From 8b84ab0ff92b59867e69a759659db8fc6965157b Mon Sep 17 00:00:00 2001 From: mrmr1993 Date: Fri, 18 Aug 2023 17:04:05 +0100 Subject: [PATCH 001/178] Add public_input_only_prover module --- kimchi/src/lib.rs | 1 + kimchi/src/public_input_only_prover.rs | 1513 ++++++++++++++++++++++++ 2 files changed, 1514 insertions(+) create mode 100644 kimchi/src/public_input_only_prover.rs diff --git a/kimchi/src/lib.rs b/kimchi/src/lib.rs index 7acab89b91..edfadac7fb 100644 --- a/kimchi/src/lib.rs +++ b/kimchi/src/lib.rs @@ -22,6 +22,7 @@ pub mod plonk_sponge; pub mod proof; pub mod prover; pub mod prover_index; +pub mod public_input_only_prover; pub mod snarky; pub mod verifier; pub mod verifier_index; diff --git a/kimchi/src/public_input_only_prover.rs b/kimchi/src/public_input_only_prover.rs new file mode 100644 index 0000000000..a449de97b6 --- /dev/null +++ b/kimchi/src/public_input_only_prover.rs @@ -0,0 +1,1513 @@ +//! This module implements prover's zk-proof primitive. + +use crate::{ + circuits::{ + argument::{Argument, ArgumentType}, + expr::{self, l0_1, Constants, Environment, LookupEnvironment}, + gate::GateType, + lookup::{self, runtime_tables::RuntimeTable, tables::combine_table_entry}, + polynomials::{ + complete_add::CompleteAdd, + endomul_scalar::EndomulScalar, + endosclmul::EndosclMul, + foreign_field_add::circuitgates::ForeignFieldAdd, + foreign_field_mul::{self, circuitgates::ForeignFieldMul}, + generic, permutation, + permutation::ZK_ROWS, + poseidon::Poseidon, + range_check::circuitgates::{RangeCheck0, RangeCheck1}, + rot::Rot64, + varbasemul::VarbaseMul, + xor::Xor16, + }, + wires::{COLUMNS, PERMUTS}, + }, + curve::KimchiCurve, + error::ProverError, + lagrange_basis_evaluations::LagrangeBasisEvaluations, + plonk_sponge::FrSponge, + proof::{ + LookupCommitments, PointEvaluations, ProofEvaluations, ProverCommitments, ProverProof, + RecursionChallenge, + }, + prover_index::ProverIndex, +}; +use ark_ec::ProjectiveCurve; +use ark_ff::{FftField, Field, One, PrimeField, UniformRand, Zero}; +use ark_poly::{ + univariate::DensePolynomial, EvaluationDomain, Evaluations, Polynomial, + Radix2EvaluationDomain as D, UVPolynomial, +}; +use itertools::Itertools; +use mina_poseidon::{sponge::ScalarChallenge, FqSponge}; +use o1_utils::ExtendedDensePolynomial as _; +use poly_commitment::{ + commitment::{ + absorb_commitment, b_poly_coefficients, BlindedCommitment, CommitmentCurve, PolyComm, + }, + evaluation_proof::DensePolynomialOrEvaluations, +}; +use rayon::prelude::*; +use std::array; +use std::collections::HashMap; + +/// The result of a proof creation or verification. +type Result = std::result::Result; + +/// Helper to quickly test if a witness satisfies a constraint +macro_rules! check_constraint { + ($index:expr, $evaluation:expr) => {{ + check_constraint!($index, stringify!($evaluation), $evaluation); + }}; + ($index:expr, $label:expr, $evaluation:expr) => {{ + if cfg!(debug_assertions) { + let (_, res) = $evaluation + .interpolate_by_ref() + .divide_by_vanishing_poly($index.cs.domain.d1) + .unwrap(); + if !res.is_zero() { + panic!("couldn't divide by vanishing polynomial: {}", $label); + } + } + }}; +} + +/// Contains variables needed for lookup in the prover algorithm. +#[derive(Default)] +struct LookupContext +where + G: CommitmentCurve, + F: FftField, +{ + /// The joint combiner used to join the columns of lookup tables + joint_combiner: Option, + + /// The power of the joint_combiner that can be used to add a table_id column + /// to the concatenated lookup tables. + table_id_combiner: Option, + + /// The combined lookup entry that can be used as dummy value + dummy_lookup_value: Option, + + /// The combined lookup table + joint_lookup_table: Option>, + joint_lookup_table_d8: Option>>, + + /// The sorted polynomials `s` in different forms + sorted: Option>>>, + sorted_coeffs: Option>>, + sorted_comms: Option>>, + sorted8: Option>>>, + + /// The aggregation polynomial in different forms + aggreg_coeffs: Option>, + aggreg_comm: Option>, + aggreg8: Option>>, + + // lookup-related evaluations + /// evaluation of lookup aggregation polynomial + pub lookup_aggregation_eval: Option>>, + /// evaluation of lookup table polynomial + pub lookup_table_eval: Option>>, + /// evaluation of lookup sorted polynomials + pub lookup_sorted_eval: [Option>>; 5], + /// evaluation of runtime lookup table polynomial + pub runtime_lookup_table_eval: Option>>, + + /// Runtime table + runtime_table: Option>, + runtime_table_d8: Option>>, + runtime_table_comm: Option>, + runtime_second_col_d8: Option>>, +} + +impl ProverProof +where + G::BaseField: PrimeField, +{ + /// This function constructs prover's zk-proof from the witness & the `ProverIndex` against SRS instance + /// + /// # Errors + /// + /// Will give error if `create_recursive` process fails. + pub fn create_public_input_only< + EFqSponge: Clone + FqSponge, + EFrSponge: FrSponge, + >( + groupmap: &G::Map, + witness: [Vec; COLUMNS], + runtime_tables: &[RuntimeTable], + index: &ProverIndex, + ) -> Result { + Self::create_recursive_public_input_only::( + groupmap, + witness, + runtime_tables, + index, + Vec::new(), + None, + ) + } + + /// This function constructs prover's recursive zk-proof from the witness & the `ProverIndex` against SRS instance + /// + /// # Errors + /// + /// Will give error if inputs(like `lookup_context.joint_lookup_table_d8`) are None. + /// + /// # Panics + /// + /// Will panic if `lookup_context.joint_lookup_table_d8` is None. + pub fn create_recursive_public_input_only< + EFqSponge: Clone + FqSponge, + EFrSponge: FrSponge, + >( + group_map: &G::Map, + mut witness: [Vec; COLUMNS], + runtime_tables: &[RuntimeTable], + index: &ProverIndex, + prev_challenges: Vec>, + blinders: Option<[Option>; COLUMNS]>, + ) -> Result { + // make sure that the SRS is not smaller than the domain size + let d1_size = index.cs.domain.d1.size(); + if index.srs.max_degree() < d1_size { + return Err(ProverError::SRSTooSmall); + } + + let (_, endo_r) = G::endos(); + + // TODO: rng should be passed as arg + let rng = &mut rand::rngs::OsRng; + + // Verify the circuit satisfiability by the computed witness (baring plookup constraints) + // Catch mistakes before proof generation. + if cfg!(debug_assertions) && !index.cs.disable_gates_checks { + let public = witness[0][0..index.cs.public].to_vec(); + index.verify(&witness, &public).expect("incorrect witness"); + } + + //~ 1. Ensure we have room in the witness for the zero-knowledge rows. + //~ We currently expect the witness not to be of the same length as the domain, + //~ but instead be of the length of the (smaller) circuit. + //~ If we cannot add `ZK_ROWS` rows to the columns of the witness before reaching + //~ the size of the domain, abort. + let length_witness = witness[0].len(); + let length_padding = d1_size + .checked_sub(length_witness) + .ok_or(ProverError::NoRoomForZkInWitness)?; + + if length_padding < ZK_ROWS as usize { + return Err(ProverError::NoRoomForZkInWitness); + } + + //~ 1. Pad the witness columns with Zero gates to make them the same length as the domain. + //~ Then, randomize the last `ZK_ROWS` of each columns. + for w in &mut witness { + if w.len() != length_witness { + return Err(ProverError::WitnessCsInconsistent); + } + + // padding + w.extend(std::iter::repeat(G::ScalarField::zero()).take(length_padding)); + + // zk-rows + for row in w.iter_mut().rev().take(ZK_ROWS as usize) { + *row = ::rand(rng); + } + } + + //~ 1. Setup the Fq-Sponge. + let mut fq_sponge = EFqSponge::new(G::OtherCurve::sponge_params()); + + //~ 1. Absorb the digest of the VerifierIndex. + let verifier_index_digest = index.verifier_index_digest::(); + fq_sponge.absorb_fq(&[verifier_index_digest]); + + //~ 1. Absorb the commitments of the previous challenges with the Fq-sponge. + for RecursionChallenge { comm, .. } in &prev_challenges { + absorb_commitment(&mut fq_sponge, comm) + } + + //~ 1. Compute the negated public input polynomial as + //~ the polynomial that evaluates to $-p_i$ for the first `public_input_size` values of the domain, + //~ and $0$ for the rest. + let public = witness[0][0..index.cs.public].to_vec(); + let public_poly = -Evaluations::>::from_vec_and_domain( + public, + index.cs.domain.d1, + ) + .interpolate(); + + //~ 1. Commit (non-hiding) to the negated public input polynomial. + let public_comm = index.srs.commit_non_hiding(&public_poly, None); + let public_comm = { + index + .srs + .mask_custom( + public_comm.clone(), + &public_comm.map(|_| G::ScalarField::one()), + ) + .unwrap() + .commitment + }; + + //~ 1. Absorb the commitment to the public polynomial with the Fq-Sponge. + //~ + //~ Note: unlike the original PLONK protocol, + //~ the prover also provides evaluations of the public polynomial to help the verifier circuit. + //~ This is why we need to absorb the commitment to the public polynomial at this point. + absorb_commitment(&mut fq_sponge, &public_comm); + + //~ 1. Commit to the witness columns by creating `COLUMNS` hidding commitments. + //~ + //~ Note: since the witness is in evaluation form, + //~ we can use the `commit_evaluation` optimization. + let mut w_comm = vec![]; + for col in 0..COLUMNS { + // witness coeff -> witness eval + let witness_eval = + Evaluations::>::from_vec_and_domain( + witness[col].clone(), + index.cs.domain.d1, + ); + + let com = match blinders.as_ref().and_then(|b| b[col].as_ref()) { + // no blinders: blind the witness + None => index + .srs + .commit_evaluations(index.cs.domain.d1, &witness_eval, rng), + // blinders: blind the witness with them + Some(blinder) => { + // TODO: make this a function rather no? mask_with_custom() + let witness_com = index + .srs + .commit_evaluations_non_hiding(index.cs.domain.d1, &witness_eval); + index + .srs + .mask_custom(witness_com, blinder) + .map_err(ProverError::WrongBlinders)? + } + }; + + w_comm.push(com); + } + + let w_comm: [BlindedCommitment; COLUMNS] = w_comm + .try_into() + .expect("previous loop is of the correct length"); + + //~ 1. Absorb the witness commitments with the Fq-Sponge. + w_comm + .iter() + .for_each(|c| absorb_commitment(&mut fq_sponge, &c.commitment)); + + //~ 1. Compute the witness polynomials by interpolating each `COLUMNS` of the witness. + //~ As mentioned above, we commit using the evaluations form rather than the coefficients + //~ form so we can take advantage of the sparsity of the evaluations (i.e., there are many + //~ 0 entries and entries that have less-than-full-size field elemnts.) + let witness_poly: [DensePolynomial; COLUMNS] = array::from_fn(|i| { + Evaluations::>::from_vec_and_domain( + witness[i].clone(), + index.cs.domain.d1, + ) + .interpolate() + }); + + let mut lookup_context = LookupContext::default(); + + //~ 1. If using lookup: + if let Some(lcs) = &index.cs.lookup_constraint_system { + //~~ * if using runtime table: + if let Some(cfg_runtime_tables) = &lcs.runtime_tables { + //~~~ * check that all the provided runtime tables have length and IDs that match the runtime table configuration of the index + //~~~ we expect the given runtime tables to be sorted as configured, this makes it easier afterwards + let expected_runtime: Vec<_> = cfg_runtime_tables + .iter() + .map(|rt| (rt.id, rt.len)) + .collect(); + let runtime: Vec<_> = runtime_tables + .iter() + .map(|rt| (rt.id, rt.data.len())) + .collect(); + if expected_runtime != runtime { + return Err(ProverError::RuntimeTablesInconsistent); + } + + //~~~ * calculate the contribution to the second column of the lookup table + //~~~ (the runtime vector) + let (runtime_table_contribution, runtime_table_contribution_d8) = { + let mut offset = lcs + .runtime_table_offset + .expect("runtime configuration missing offset"); + + let mut evals = vec![G::ScalarField::zero(); d1_size]; + for rt in runtime_tables { + let range = offset..(offset + rt.data.len()); + evals[range].copy_from_slice(&rt.data); + offset += rt.data.len(); + } + + // zero-knowledge + for e in evals.iter_mut().rev().take(ZK_ROWS as usize) { + *e = ::rand(rng); + } + + // get coeff and evaluation form + let runtime_table_contribution = + Evaluations::from_vec_and_domain(evals, index.cs.domain.d1).interpolate(); + + let runtime_table_contribution_d8 = + runtime_table_contribution.evaluate_over_domain_by_ref(index.cs.domain.d8); + + (runtime_table_contribution, runtime_table_contribution_d8) + }; + + // commit the runtime polynomial + // (and save it to the proof) + let runtime_table_comm = index.srs.commit(&runtime_table_contribution, None, rng); + + // absorb the commitment + absorb_commitment(&mut fq_sponge, &runtime_table_comm.commitment); + + // pre-compute the updated second column of the lookup table + let mut second_column_d8 = runtime_table_contribution_d8.clone(); + second_column_d8 + .evals + .par_iter_mut() + .enumerate() + .for_each(|(row, e)| { + *e += lcs.lookup_table8[1][row]; + }); + + lookup_context.runtime_table = Some(runtime_table_contribution); + lookup_context.runtime_table_d8 = Some(runtime_table_contribution_d8); + lookup_context.runtime_table_comm = Some(runtime_table_comm); + lookup_context.runtime_second_col_d8 = Some(second_column_d8); + } + + //~~ * If queries involve a lookup table with multiple columns + //~~ then squeeze the Fq-Sponge to obtain the joint combiner challenge $j'$, + //~~ otherwise set the joint combiner challenge $j'$ to $0$. + let joint_combiner = if lcs.configuration.lookup_info.features.joint_lookup_used { + fq_sponge.challenge() + } else { + G::ScalarField::zero() + }; + + //~~ * Derive the scalar joint combiner $j$ from $j'$ using the endomorphism (TOOD: specify) + let joint_combiner: G::ScalarField = ScalarChallenge(joint_combiner).to_field(endo_r); + + //~~ * If multiple lookup tables are involved, + //~~ set the `table_id_combiner` as the $j^i$ with $i$ the maximum width of any used table. + //~~ Essentially, this is to add a last column of table ids to the concatenated lookup tables. + let table_id_combiner: G::ScalarField = if lcs.table_ids8.as_ref().is_some() { + joint_combiner.pow([lcs.configuration.lookup_info.max_joint_size as u64]) + } else { + // TODO: just set this to None in case multiple tables are not used + G::ScalarField::zero() + }; + lookup_context.table_id_combiner = Some(table_id_combiner); + + //~~ * Compute the dummy lookup value as the combination of the last entry of the XOR table (so `(0, 0, 0)`). + //~~ Warning: This assumes that we always use the XOR table when using lookups. + let dummy_lookup_value = lcs + .configuration + .dummy_lookup + .evaluate(&joint_combiner, &table_id_combiner); + lookup_context.dummy_lookup_value = Some(dummy_lookup_value); + + //~~ * Compute the lookup table values as the combination of the lookup table entries. + let joint_lookup_table_d8 = { + let mut evals = Vec::with_capacity(d1_size); + + for idx in 0..(d1_size * 8) { + let table_id = match lcs.table_ids8.as_ref() { + Some(table_ids8) => table_ids8.evals[idx], + None => + // If there is no `table_ids8` in the constraint system, + // every table ID is identically 0. + { + G::ScalarField::zero() + } + }; + + let combined_entry = + if !lcs.configuration.lookup_info.features.uses_runtime_tables { + let table_row = lcs.lookup_table8.iter().map(|e| &e.evals[idx]); + + combine_table_entry( + &joint_combiner, + &table_id_combiner, + table_row, + &table_id, + ) + } else { + // if runtime table are used, the second row is modified + let second_col = lookup_context.runtime_second_col_d8.as_ref().unwrap(); + + let table_row = lcs.lookup_table8.iter().enumerate().map(|(col, e)| { + if col == 1 { + &second_col.evals[idx] + } else { + &e.evals[idx] + } + }); + + combine_table_entry( + &joint_combiner, + &table_id_combiner, + table_row, + &table_id, + ) + }; + evals.push(combined_entry); + } + + Evaluations::from_vec_and_domain(evals, index.cs.domain.d8) + }; + + // TODO: This interpolation is avoidable. + let joint_lookup_table = joint_lookup_table_d8.interpolate_by_ref(); + + //~~ * Compute the sorted evaluations. + // TODO: Once we switch to committing using lagrange commitments, + // `witness` will be consumed when we interpolate, so interpolation will + // have to moved below this. + let sorted: Vec<_> = lookup::constraints::sorted( + dummy_lookup_value, + &joint_lookup_table_d8, + index.cs.domain.d1, + &index.cs.gates, + &witness, + joint_combiner, + table_id_combiner, + &lcs.configuration.lookup_info, + )?; + + //~~ * Randomize the last `EVALS` rows in each of the sorted polynomials + //~~ in order to add zero-knowledge to the protocol. + let sorted: Vec<_> = sorted + .into_iter() + .map(|chunk| lookup::constraints::zk_patch(chunk, index.cs.domain.d1, rng)) + .collect(); + + //~~ * Commit each of the sorted polynomials. + let sorted_comms: Vec<_> = sorted + .iter() + .map(|v| index.srs.commit_evaluations(index.cs.domain.d1, v, rng)) + .collect(); + + //~~ * Absorb each commitments to the sorted polynomials. + sorted_comms + .iter() + .for_each(|c| absorb_commitment(&mut fq_sponge, &c.commitment)); + + // precompute different forms of the sorted polynomials for later + // TODO: We can avoid storing these coefficients. + let sorted_coeffs: Vec<_> = sorted.iter().map(|e| e.clone().interpolate()).collect(); + let sorted8: Vec<_> = sorted_coeffs + .iter() + .map(|v| v.evaluate_over_domain_by_ref(index.cs.domain.d8)) + .collect(); + + lookup_context.joint_combiner = Some(joint_combiner); + lookup_context.sorted = Some(sorted); + lookup_context.sorted_coeffs = Some(sorted_coeffs); + lookup_context.sorted_comms = Some(sorted_comms); + lookup_context.sorted8 = Some(sorted8); + lookup_context.joint_lookup_table_d8 = Some(joint_lookup_table_d8); + lookup_context.joint_lookup_table = Some(joint_lookup_table); + } + + //~ 1. Sample $\beta$ with the Fq-Sponge. + let beta = fq_sponge.challenge(); + + //~ 1. Sample $\gamma$ with the Fq-Sponge. + let gamma = fq_sponge.challenge(); + + //~ 1. If using lookup: + if let Some(lcs) = &index.cs.lookup_constraint_system { + //~~ * Compute the lookup aggregation polynomial. + let joint_lookup_table_d8 = lookup_context.joint_lookup_table_d8.as_ref().unwrap(); + + let aggreg = lookup::constraints::aggregation::<_, G::ScalarField>( + lookup_context.dummy_lookup_value.unwrap(), + joint_lookup_table_d8, + index.cs.domain.d1, + &index.cs.gates, + &witness, + &lookup_context.joint_combiner.unwrap(), + &lookup_context.table_id_combiner.unwrap(), + beta, + gamma, + lookup_context.sorted.as_ref().unwrap(), + rng, + &lcs.configuration.lookup_info, + )?; + + //~~ * Commit to the aggregation polynomial. + let aggreg_comm = index + .srs + .commit_evaluations(index.cs.domain.d1, &aggreg, rng); + + //~~ * Absorb the commitment to the aggregation polynomial with the Fq-Sponge. + absorb_commitment(&mut fq_sponge, &aggreg_comm.commitment); + + // precompute different forms of the aggregation polynomial for later + let aggreg_coeffs = aggreg.interpolate(); + // TODO: There's probably a clever way to expand the domain without + // interpolating + let aggreg8 = aggreg_coeffs.evaluate_over_domain_by_ref(index.cs.domain.d8); + + lookup_context.aggreg_comm = Some(aggreg_comm); + lookup_context.aggreg_coeffs = Some(aggreg_coeffs); + lookup_context.aggreg8 = Some(aggreg8); + } + + //~ 1. Compute the permutation aggregation polynomial $z$. + let z_poly = index.perm_aggreg(&witness, &beta, &gamma, rng)?; + + //~ 1. Commit (hidding) to the permutation aggregation polynomial $z$. + let z_comm = index.srs.commit(&z_poly, None, rng); + + //~ 1. Absorb the permutation aggregation polynomial $z$ with the Fq-Sponge. + absorb_commitment(&mut fq_sponge, &z_comm.commitment); + + //~ 1. Sample $\alpha'$ with the Fq-Sponge. + let alpha_chal = ScalarChallenge(fq_sponge.challenge()); + + //~ 1. Derive $\alpha$ from $\alpha'$ using the endomorphism (TODO: details) + let alpha: G::ScalarField = alpha_chal.to_field(endo_r); + + //~ 1. TODO: instantiate alpha? + let mut all_alphas = index.powers_of_alpha.clone(); + all_alphas.instantiate(alpha); + + //~ 1. Compute the quotient polynomial (the $t$ in $f = Z_H \cdot t$). + //~ The quotient polynomial is computed by adding all these polynomials together: + //~~ * the combined constraints for all the gates + //~~ * the combined constraints for the permutation + //~~ * TODO: lookup + //~~ * the negated public polynomial + //~ and by then dividing the resulting polynomial with the vanishing polynomial $Z_H$. + //~ TODO: specify the split of the permutation polynomial into perm and bnd? + let lookup_env = if let Some(lcs) = &index.cs.lookup_constraint_system { + let joint_lookup_table_d8 = lookup_context.joint_lookup_table_d8.as_ref().unwrap(); + + Some(LookupEnvironment { + aggreg: lookup_context.aggreg8.as_ref().unwrap(), + sorted: lookup_context.sorted8.as_ref().unwrap(), + selectors: &lcs.lookup_selectors, + table: joint_lookup_table_d8, + runtime_selector: lcs.runtime_selector.as_ref(), + runtime_table: lookup_context.runtime_table_d8.as_ref(), + }) + } else { + None + }; + + let lagrange = index.cs.evaluate(&witness_poly, &z_poly); + let env = { + let mut index_evals = HashMap::new(); + use GateType::*; + index_evals.insert(Generic, &index.column_evaluations.generic_selector4); + index_evals.insert(Poseidon, &index.column_evaluations.poseidon_selector8); + index_evals.insert( + CompleteAdd, + &index.column_evaluations.complete_add_selector4, + ); + index_evals.insert(VarBaseMul, &index.column_evaluations.mul_selector8); + index_evals.insert(EndoMul, &index.column_evaluations.emul_selector8); + index_evals.insert( + EndoMulScalar, + &index.column_evaluations.endomul_scalar_selector8, + ); + + if let Some(selector) = &index.column_evaluations.range_check0_selector8.as_ref() { + index_evals.insert(GateType::RangeCheck0, selector); + } + + if let Some(selector) = &index.column_evaluations.range_check1_selector8.as_ref() { + index_evals.insert(GateType::RangeCheck1, selector); + } + + if let Some(selector) = index + .column_evaluations + .foreign_field_add_selector8 + .as_ref() + { + index_evals.insert(GateType::ForeignFieldAdd, selector); + } + + if let Some(selector) = index + .column_evaluations + .foreign_field_mul_selector8 + .as_ref() + { + index_evals.extend( + foreign_field_mul::gadget::circuit_gates() + .iter() + .enumerate() + .map(|(_, gate_type)| (*gate_type, selector)), + ); + } + + if let Some(selector) = index.column_evaluations.xor_selector8.as_ref() { + index_evals.insert(GateType::Xor16, selector); + } + + if let Some(selector) = index.column_evaluations.rot_selector8.as_ref() { + index_evals.insert(GateType::Rot64, selector); + } + + let mds = &G::sponge_params().mds; + Environment { + constants: Constants { + alpha, + beta, + gamma, + joint_combiner: lookup_context.joint_combiner, + endo_coefficient: index.cs.endo, + mds, + }, + witness: &lagrange.d8.this.w, + coefficient: &index.column_evaluations.coefficients8, + vanishes_on_last_4_rows: &index.cs.precomputations().vanishes_on_last_4_rows, + z: &lagrange.d8.this.z, + l0_1: l0_1(index.cs.domain.d1), + domain: index.cs.domain, + index: index_evals, + lookup: lookup_env, + } + }; + + let mut cache = expr::Cache::default(); + + let quotient_poly = { + // generic + let mut t4 = { + let generic_constraint = + generic::Generic::combined_constraints(&all_alphas, &mut cache); + let generic4 = generic_constraint.evaluations(&env); + + if cfg!(debug_assertions) { + let p4 = public_poly.evaluate_over_domain_by_ref(index.cs.domain.d4); + let gen_minus_pub = &generic4 + &p4; + + check_constraint!(index, gen_minus_pub); + } + + generic4 + }; + // permutation + let (mut t8, bnd) = { + let alphas = + all_alphas.get_alphas(ArgumentType::Permutation, permutation::CONSTRAINTS); + let (perm, bnd) = index.perm_quot(&lagrange, beta, gamma, &z_poly, alphas)?; + + check_constraint!(index, perm); + + (perm, bnd) + }; + + { + use crate::circuits::argument::DynArgument; + + let range_check0_enabled = + index.column_evaluations.range_check0_selector8.is_some(); + let range_check1_enabled = + index.column_evaluations.range_check1_selector8.is_some(); + let foreign_field_addition_enabled = index + .column_evaluations + .foreign_field_add_selector8 + .is_some(); + let foreign_field_multiplication_enabled = index + .column_evaluations + .foreign_field_mul_selector8 + .is_some(); + let xor_enabled = index.column_evaluations.xor_selector8.is_some(); + let rot_enabled = index.column_evaluations.rot_selector8.is_some(); + + for gate in [ + ( + (&CompleteAdd::default() as &dyn DynArgument), + true, + ), + (&VarbaseMul::default(), true), + (&EndosclMul::default(), true), + (&EndomulScalar::default(), true), + (&Poseidon::default(), true), + // Range check gates + (&RangeCheck0::default(), range_check0_enabled), + (&RangeCheck1::default(), range_check1_enabled), + // Foreign field addition gate + (&ForeignFieldAdd::default(), foreign_field_addition_enabled), + // Foreign field multiplication gate + ( + &ForeignFieldMul::default(), + foreign_field_multiplication_enabled, + ), + // Xor gate + (&Xor16::default(), xor_enabled), + // Rot gate + (&Rot64::default(), rot_enabled), + ] + .into_iter() + .filter_map(|(gate, is_enabled)| if is_enabled { Some(gate) } else { None }) + { + let constraint = gate.combined_constraints(&all_alphas, &mut cache); + let eval = constraint.evaluations(&env); + if eval.domain().size == t4.domain().size { + t4 += &eval; + } else if eval.domain().size == t8.domain().size { + t8 += &eval; + } else { + panic!("Bad evaluation") + } + check_constraint!(index, format!("{:?}", gate.argument_type()), eval); + } + }; + + // lookup + { + if let Some(lcs) = index.cs.lookup_constraint_system.as_ref() { + let constraints = lookup::constraints::constraints(&lcs.configuration, false); + let constraints_len = u32::try_from(constraints.len()) + .expect("not expecting a large amount of constraints"); + let lookup_alphas = + all_alphas.get_alphas(ArgumentType::Lookup, constraints_len); + + // as lookup constraints are computed with the expression framework, + // each of them can result in Evaluations of different domains + for (ii, (constraint, alpha_pow)) in + constraints.into_iter().zip_eq(lookup_alphas).enumerate() + { + let mut eval = constraint.evaluations(&env); + eval.evals.par_iter_mut().for_each(|x| *x *= alpha_pow); + + if eval.domain().size == t4.domain().size { + t4 += &eval; + } else if eval.domain().size == t8.domain().size { + t8 += &eval; + } else if eval.evals.iter().all(|x| x.is_zero()) { + // Skip any 0-valued evaluations + } else { + panic!("Bad evaluation") + } + + check_constraint!(index, format!("lookup constraint #{ii}"), eval); + } + } + } + + // public polynomial + let mut f = t4.interpolate() + t8.interpolate(); + f += &public_poly; + + // divide contributions with vanishing polynomial + let (mut quotient, res) = f + .divide_by_vanishing_poly(index.cs.domain.d1) + .ok_or(ProverError::Prover("division by vanishing polynomial"))?; + if !res.is_zero() { + return Err(ProverError::Prover( + "rest of division by vanishing polynomial", + )); + } + + quotient += &bnd; // already divided by Z_H + quotient + }; + + //~ 1. commit (hiding) to the quotient polynomial $t$ + //~ TODO: specify the dummies + let t_comm = { + let mut t_comm = index.srs.commit("ient_poly, None, rng); + + let expected_t_size = PERMUTS; + let dummies = expected_t_size - t_comm.commitment.unshifted.len(); + // Add `dummies` many hiding commitments to the 0 polynomial, since if the + // number of commitments in `t_comm` is less than the max size, it means that + // the higher degree coefficients of `t` are 0. + for _ in 0..dummies { + let w = ::rand(rng); + t_comm + .commitment + .unshifted + .push(index.srs.h.mul(w).into_affine()); + t_comm.blinders.unshifted.push(w); + } + t_comm + }; + + //~ 1. Absorb the the commitment of the quotient polynomial with the Fq-Sponge. + absorb_commitment(&mut fq_sponge, &t_comm.commitment); + + //~ 1. Sample $\zeta'$ with the Fq-Sponge. + let zeta_chal = ScalarChallenge(fq_sponge.challenge()); + + //~ 1. Derive $\zeta$ from $\zeta'$ using the endomorphism (TODO: specify) + let zeta = zeta_chal.to_field(endo_r); + + let omega = index.cs.domain.d1.group_gen; + let zeta_omega = zeta * omega; + + //~ 1. If lookup is used, evaluate the following polynomials at $\zeta$ and $\zeta \omega$: + if index.cs.lookup_constraint_system.is_some() { + //~~ * the aggregation polynomial + let aggreg = lookup_context + .aggreg_coeffs + .as_ref() + .unwrap() + .to_chunked_polynomial(index.max_poly_size); + + //~~ * the sorted polynomials + let sorted = lookup_context + .sorted_coeffs + .as_ref() + .unwrap() + .iter() + .map(|c| c.to_chunked_polynomial(index.max_poly_size)) + .collect::>(); + + //~~ * the table polynonial + let joint_table = lookup_context.joint_lookup_table.as_ref().unwrap(); + let joint_table = joint_table.to_chunked_polynomial(index.max_poly_size); + + lookup_context.lookup_aggregation_eval = Some(PointEvaluations { + zeta: aggreg.evaluate_chunks(zeta), + zeta_omega: aggreg.evaluate_chunks(zeta_omega), + }); + lookup_context.lookup_table_eval = Some(PointEvaluations { + zeta: joint_table.evaluate_chunks(zeta), + zeta_omega: joint_table.evaluate_chunks(zeta_omega), + }); + lookup_context.lookup_sorted_eval = array::from_fn(|i| { + if i < sorted.len() { + let sorted = &sorted[i]; + Some(PointEvaluations { + zeta: sorted.evaluate_chunks(zeta), + zeta_omega: sorted.evaluate_chunks(zeta_omega), + }) + } else { + None + } + }); + lookup_context.runtime_lookup_table_eval = + lookup_context.runtime_table.as_ref().map(|runtime_table| { + let runtime_table = runtime_table.to_chunked_polynomial(index.max_poly_size); + PointEvaluations { + zeta: runtime_table.evaluate_chunks(zeta), + zeta_omega: runtime_table.evaluate_chunks(zeta_omega), + } + }); + } + + //~ 1. Chunk evaluate the following polynomials at both $\zeta$ and $\zeta \omega$: + //~~ * $s_i$ + //~~ * $w_i$ + //~~ * $z$ + //~~ * lookup (TODO) + //~~ * generic selector + //~~ * poseidon selector + //~ + //~ By "chunk evaluate" we mean that the evaluation of each polynomial can potentially be a vector of values. + //~ This is because the index's `max_poly_size` parameter dictates the maximum size of a polynomial in the protocol. + //~ If a polynomial $f$ exceeds this size, it must be split into several polynomials like so: + //~ $$f(x) = f_0(x) + x^n f_1(x) + x^{2n} f_2(x) + \cdots$$ + //~ + //~ And the evaluation of such a polynomial is the following list for $x \in {\zeta, \zeta\omega}$: + //~ + //~ $$(f_0(x), f_1(x), f_2(x), \ldots)$$ + //~ + //~ TODO: do we want to specify more on that? It seems unecessary except for the t polynomial (or if for some reason someone sets that to a low value) + + let zeta_evals = LagrangeBasisEvaluations::new(index.cs.domain.d1, zeta); + let zeta_omega_evals = LagrangeBasisEvaluations::new(index.cs.domain.d1, zeta_omega); + + let chunked_evals_for_selector = + |p: &Evaluations>| PointEvaluations { + zeta: vec![zeta_evals.evaluate_boolean(p)], + zeta_omega: vec![zeta_omega_evals.evaluate_boolean(p)], + }; + + let chunked_evals_for_evaluations = + |p: &Evaluations>| PointEvaluations { + zeta: vec![zeta_evals.evaluate(p)], + zeta_omega: vec![zeta_omega_evals.evaluate(p)], + }; + + let chunked_evals = ProofEvaluations::>> { + s: array::from_fn(|i| { + chunked_evals_for_evaluations( + &index.column_evaluations.permutation_coefficients8[i], + ) + }), + coefficients: array::from_fn(|i| { + chunked_evals_for_evaluations(&index.column_evaluations.coefficients8[i]) + }), + w: array::from_fn(|i| { + let chunked = witness_poly[i].to_chunked_polynomial(index.max_poly_size); + PointEvaluations { + zeta: chunked.evaluate_chunks(zeta), + zeta_omega: chunked.evaluate_chunks(zeta_omega), + } + }), + + z: { + let chunked = z_poly.to_chunked_polynomial(index.max_poly_size); + PointEvaluations { + zeta: chunked.evaluate_chunks(zeta), + zeta_omega: chunked.evaluate_chunks(zeta_omega), + } + }, + + lookup_aggregation: lookup_context.lookup_aggregation_eval.take(), + lookup_table: lookup_context.lookup_table_eval.take(), + lookup_sorted: array::from_fn(|i| lookup_context.lookup_sorted_eval[i].take()), + runtime_lookup_table: lookup_context.runtime_lookup_table_eval.take(), + generic_selector: chunked_evals_for_selector( + &index.column_evaluations.generic_selector4, + ), + poseidon_selector: chunked_evals_for_selector( + &index.column_evaluations.poseidon_selector8, + ), + complete_add_selector: chunked_evals_for_selector( + &index.column_evaluations.complete_add_selector4, + ), + mul_selector: chunked_evals_for_selector(&index.column_evaluations.mul_selector8), + emul_selector: chunked_evals_for_selector(&index.column_evaluations.emul_selector8), + endomul_scalar_selector: chunked_evals_for_selector( + &index.column_evaluations.endomul_scalar_selector8, + ), + + range_check0_selector: index + .column_evaluations + .range_check0_selector8 + .as_ref() + .map(chunked_evals_for_selector), + range_check1_selector: index + .column_evaluations + .range_check1_selector8 + .as_ref() + .map(chunked_evals_for_selector), + foreign_field_add_selector: index + .column_evaluations + .foreign_field_add_selector8 + .as_ref() + .map(chunked_evals_for_selector), + foreign_field_mul_selector: index + .column_evaluations + .foreign_field_mul_selector8 + .as_ref() + .map(chunked_evals_for_selector), + xor_selector: index + .column_evaluations + .xor_selector8 + .as_ref() + .map(chunked_evals_for_selector), + rot_selector: index + .column_evaluations + .rot_selector8 + .as_ref() + .map(chunked_evals_for_selector), + + runtime_lookup_table_selector: index.cs.lookup_constraint_system.as_ref().and_then( + |lcs| { + lcs.runtime_selector + .as_ref() + .map(chunked_evals_for_selector) + }, + ), + xor_lookup_selector: index.cs.lookup_constraint_system.as_ref().and_then(|lcs| { + lcs.lookup_selectors + .xor + .as_ref() + .map(chunked_evals_for_selector) + }), + lookup_gate_lookup_selector: index.cs.lookup_constraint_system.as_ref().and_then( + |lcs| { + lcs.lookup_selectors + .lookup + .as_ref() + .map(chunked_evals_for_selector) + }, + ), + range_check_lookup_selector: index.cs.lookup_constraint_system.as_ref().and_then( + |lcs| { + lcs.lookup_selectors + .range_check + .as_ref() + .map(chunked_evals_for_selector) + }, + ), + foreign_field_mul_lookup_selector: index.cs.lookup_constraint_system.as_ref().and_then( + |lcs| { + lcs.lookup_selectors + .ffmul + .as_ref() + .map(chunked_evals_for_selector) + }, + ), + }; + + let zeta_to_srs_len = zeta.pow([index.max_poly_size as u64]); + let zeta_omega_to_srs_len = zeta_omega.pow([index.max_poly_size as u64]); + let zeta_to_domain_size = zeta.pow([d1_size as u64]); + + //~ 1. Evaluate the same polynomials without chunking them + //~ (so that each polynomial should correspond to a single value this time). + let evals = { + let powers_of_eval_points_for_chunks = PointEvaluations { + zeta: zeta_to_srs_len, + zeta_omega: zeta_omega_to_srs_len, + }; + chunked_evals.combine(&powers_of_eval_points_for_chunks) + }; + + //~ 1. Compute the ft polynomial. + //~ This is to implement [Maller's optimization](https://o1-labs.github.io/mina-book/crypto/plonk/maller_15.html). + let ft: DensePolynomial = { + let f_chunked = { + // TODO: compute the linearization polynomial in evaluation form so + // that we can drop the coefficient forms of the index polynomials from + // the constraint system struct + + // permutation (not part of linearization yet) + let alphas = + all_alphas.get_alphas(ArgumentType::Permutation, permutation::CONSTRAINTS); + let f = index.perm_lnrz(&evals, zeta, beta, gamma, alphas); + + // the circuit polynomial + let f = { + let (_lin_constant, mut lin) = + index.linearization.to_polynomial(&env, zeta, &evals); + lin += &f; + lin.interpolate() + }; + + drop(env); + + // see https://o1-labs.github.io/mina-book/crypto/plonk/maller_15.html#the-prover-side + f.to_chunked_polynomial(index.max_poly_size) + .linearize(zeta_to_srs_len) + }; + + let t_chunked = quotient_poly + .to_chunked_polynomial(index.max_poly_size) + .linearize(zeta_to_srs_len); + + &f_chunked - &t_chunked.scale(zeta_to_domain_size - G::ScalarField::one()) + }; + + //~ 1. construct the blinding part of the ft polynomial commitment + //~ [see this section](https://o1-labs.github.io/mina-book/crypto/plonk/maller_15.html#evaluation-proof-and-blinding-factors) + let blinding_ft = { + let blinding_t = t_comm.blinders.chunk_blinding(zeta_to_srs_len); + let blinding_f = G::ScalarField::zero(); + + PolyComm { + // blinding_f - Z_H(zeta) * blinding_t + unshifted: vec![ + blinding_f - (zeta_to_domain_size - G::ScalarField::one()) * blinding_t, + ], + shifted: None, + } + }; + + //~ 1. Evaluate the ft polynomial at $\zeta\omega$ only. + let ft_eval1 = ft.evaluate(&zeta_omega); + + //~ 1. Setup the Fr-Sponge + let fq_sponge_before_evaluations = fq_sponge.clone(); + let mut fr_sponge = EFrSponge::new(G::sponge_params()); + + //~ 1. Squeeze the Fq-sponge and absorb the result with the Fr-Sponge. + fr_sponge.absorb(&fq_sponge.digest()); + + //~ 1. Absorb the previous recursion challenges. + let prev_challenge_digest = { + // Note: we absorb in a new sponge here to limit the scope in which we need the + // more-expensive 'optional sponge'. + let mut fr_sponge = EFrSponge::new(G::sponge_params()); + for RecursionChallenge { chals, .. } in &prev_challenges { + fr_sponge.absorb_multiple(chals); + } + fr_sponge.digest() + }; + fr_sponge.absorb(&prev_challenge_digest); + + //~ 1. Compute evaluations for the previous recursion challenges. + let polys = prev_challenges + .iter() + .map(|RecursionChallenge { chals, comm }| { + ( + DensePolynomial::from_coefficients_vec(b_poly_coefficients(chals)), + comm.unshifted.len(), + ) + }) + .collect::>(); + + //~ 1. Evaluate the negated public polynomial (if present) at $\zeta$ and $\zeta\omega$. + let public_evals = if public_poly.is_zero() { + [vec![G::ScalarField::zero()], vec![G::ScalarField::zero()]] + } else { + [ + vec![public_poly.evaluate(&zeta)], + vec![public_poly.evaluate(&zeta_omega)], + ] + }; + + //~ 1. Absorb the unique evaluation of ft: $ft(\zeta\omega)$. + fr_sponge.absorb(&ft_eval1); + + //~ 1. Absorb all the polynomial evaluations in $\zeta$ and $\zeta\omega$: + //~~ * the public polynomial + //~~ * z + //~~ * generic selector + //~~ * poseidon selector + //~~ * the 15 register/witness + //~~ * 6 sigmas evaluations (the last one is not evaluated) + fr_sponge.absorb_multiple(&public_evals[0]); + fr_sponge.absorb_multiple(&public_evals[1]); + fr_sponge.absorb_evaluations(&chunked_evals); + + //~ 1. Sample $v'$ with the Fr-Sponge + let v_chal = fr_sponge.challenge(); + + //~ 1. Derive $v$ from $v'$ using the endomorphism (TODO: specify) + let v = v_chal.to_field(endo_r); + + //~ 1. Sample $u'$ with the Fr-Sponge + let u_chal = fr_sponge.challenge(); + + //~ 1. Derive $u$ from $u'$ using the endomorphism (TODO: specify) + let u = u_chal.to_field(endo_r); + + //~ 1. Create a list of all polynomials that will require evaluations + //~ (and evaluation proofs) in the protocol. + //~ First, include the previous challenges, in case we are in a recursive prover. + let non_hiding = |d1_size: usize| PolyComm { + unshifted: vec![G::ScalarField::zero(); d1_size], + shifted: None, + }; + + let coefficients_form = DensePolynomialOrEvaluations::DensePolynomial; + let evaluations_form = |e| DensePolynomialOrEvaluations::Evaluations(e, index.cs.domain.d1); + + let mut polynomials = polys + .iter() + .map(|(p, d1_size)| (coefficients_form(p), None, non_hiding(*d1_size))) + .collect::>(); + + let fixed_hiding = |d1_size: usize| PolyComm { + unshifted: vec![G::ScalarField::one(); d1_size], + shifted: None, + }; + + //~ 1. Then, include: + //~~ * the negated public polynomial + //~~ * the ft polynomial + //~~ * the permutation aggregation polynomial z polynomial + //~~ * the generic selector + //~~ * the poseidon selector + //~~ * the 15 registers/witness columns + //~~ * the 6 sigmas + polynomials.push((coefficients_form(&public_poly), None, fixed_hiding(1))); + polynomials.push((coefficients_form(&ft), None, blinding_ft)); + polynomials.push((coefficients_form(&z_poly), None, z_comm.blinders)); + polynomials.push(( + evaluations_form(&index.column_evaluations.generic_selector4), + None, + fixed_hiding(1), + )); + polynomials.push(( + evaluations_form(&index.column_evaluations.poseidon_selector8), + None, + fixed_hiding(1), + )); + polynomials.push(( + evaluations_form(&index.column_evaluations.complete_add_selector4), + None, + fixed_hiding(1), + )); + polynomials.push(( + evaluations_form(&index.column_evaluations.mul_selector8), + None, + fixed_hiding(1), + )); + polynomials.push(( + evaluations_form(&index.column_evaluations.emul_selector8), + None, + fixed_hiding(1), + )); + polynomials.push(( + evaluations_form(&index.column_evaluations.endomul_scalar_selector8), + None, + fixed_hiding(1), + )); + polynomials.extend( + witness_poly + .iter() + .zip(w_comm.iter()) + .map(|(w, c)| (coefficients_form(w), None, c.blinders.clone())) + .collect::>(), + ); + polynomials.extend( + index + .column_evaluations + .coefficients8 + .iter() + .map(|coefficientm| (evaluations_form(coefficientm), None, non_hiding(1))) + .collect::>(), + ); + polynomials.extend( + index.column_evaluations.permutation_coefficients8[0..PERMUTS - 1] + .iter() + .map(|w| (evaluations_form(w), None, non_hiding(1))) + .collect::>(), + ); + + //~~ * the optional gates + if let Some(range_check0_selector8) = + index.column_evaluations.range_check0_selector8.as_ref() + { + polynomials.push(( + evaluations_form(range_check0_selector8), + None, + non_hiding(1), + )); + } + if let Some(range_check1_selector8) = + index.column_evaluations.range_check1_selector8.as_ref() + { + polynomials.push(( + evaluations_form(range_check1_selector8), + None, + non_hiding(1), + )); + } + if let Some(foreign_field_add_selector8) = index + .column_evaluations + .foreign_field_add_selector8 + .as_ref() + { + polynomials.push(( + evaluations_form(foreign_field_add_selector8), + None, + non_hiding(1), + )); + } + if let Some(foreign_field_mul_selector8) = index + .column_evaluations + .foreign_field_mul_selector8 + .as_ref() + { + polynomials.push(( + evaluations_form(foreign_field_mul_selector8), + None, + non_hiding(1), + )); + } + if let Some(xor_selector8) = index.column_evaluations.xor_selector8.as_ref() { + polynomials.push((evaluations_form(xor_selector8), None, non_hiding(1))); + } + if let Some(rot_selector8) = index.column_evaluations.rot_selector8.as_ref() { + polynomials.push((evaluations_form(rot_selector8), None, non_hiding(1))); + } + + //~~ * optionally, the runtime table + //~ 1. if using lookup: + if let Some(lcs) = &index.cs.lookup_constraint_system { + //~~ * add the lookup sorted polynomials + let sorted_poly = lookup_context.sorted_coeffs.as_ref().unwrap(); + let sorted_comms = lookup_context.sorted_comms.as_ref().unwrap(); + + for (poly, comm) in sorted_poly.iter().zip(sorted_comms) { + polynomials.push((coefficients_form(poly), None, comm.blinders.clone())); + } + + //~~ * add the lookup aggreg polynomial + let aggreg_poly = lookup_context.aggreg_coeffs.as_ref().unwrap(); + let aggreg_comm = lookup_context.aggreg_comm.as_ref().unwrap(); + polynomials.push(( + coefficients_form(aggreg_poly), + None, + aggreg_comm.blinders.clone(), + )); + + //~~ * add the combined table polynomial + let table_blinding = if lcs.runtime_selector.is_some() { + let runtime_comm = lookup_context.runtime_table_comm.as_ref().unwrap(); + let joint_combiner = lookup_context.joint_combiner.as_ref().unwrap(); + + let blinding = runtime_comm.blinders.unshifted[0]; + + PolyComm { + unshifted: vec![*joint_combiner * blinding], + shifted: None, + } + } else { + non_hiding(1) + }; + + let joint_lookup_table = lookup_context.joint_lookup_table.as_ref().unwrap(); + + polynomials.push((coefficients_form(joint_lookup_table), None, table_blinding)); + + //~~ * if present, add the runtime table polynomial + if lcs.runtime_selector.is_some() { + let runtime_table_comm = lookup_context.runtime_table_comm.as_ref().unwrap(); + let runtime_table = lookup_context.runtime_table.as_ref().unwrap(); + + polynomials.push(( + coefficients_form(runtime_table), + None, + runtime_table_comm.blinders.clone(), + )); + } + + //~~ * the lookup selectors + + if let Some(runtime_lookup_table_selector) = lcs.runtime_selector.as_ref() { + polynomials.push(( + evaluations_form(runtime_lookup_table_selector), + None, + non_hiding(1), + )) + } + if let Some(xor_lookup_selector) = lcs.lookup_selectors.xor.as_ref() { + polynomials.push((evaluations_form(xor_lookup_selector), None, non_hiding(1))) + } + if let Some(lookup_gate_selector) = lcs.lookup_selectors.lookup.as_ref() { + polynomials.push((evaluations_form(lookup_gate_selector), None, non_hiding(1))) + } + if let Some(range_check_lookup_selector) = lcs.lookup_selectors.range_check.as_ref() { + polynomials.push(( + evaluations_form(range_check_lookup_selector), + None, + non_hiding(1), + )) + } + if let Some(foreign_field_mul_lookup_selector) = lcs.lookup_selectors.ffmul.as_ref() { + polynomials.push(( + evaluations_form(foreign_field_mul_lookup_selector), + None, + non_hiding(1), + )) + } + } + + //~ 1. Create an aggregated evaluation proof for all of these polynomials at $\zeta$ and $\zeta\omega$ using $u$ and $v$. + let proof = index.srs.open( + group_map, + &polynomials, + &[zeta, zeta_omega], + v, + u, + fq_sponge_before_evaluations, + rng, + ); + + let lookup = lookup_context + .aggreg_comm + .zip(lookup_context.sorted_comms) + .map(|(a, s)| LookupCommitments { + aggreg: a.commitment, + sorted: s.iter().map(|c| c.commitment.clone()).collect(), + runtime: lookup_context.runtime_table_comm.map(|x| x.commitment), + }); + + Ok(Self { + commitments: ProverCommitments { + w_comm: array::from_fn(|i| w_comm[i].commitment.clone()), + z_comm: z_comm.commitment, + t_comm: t_comm.commitment, + lookup, + }, + proof, + evals: chunked_evals, + ft_eval1, + prev_challenges, + }) + } +} + +#[test] +fn test_public_input_only_prover() { + use crate::{prover_index::testing::new_index_for_test_with_lookups, verifier::verify}; + use groupmap::GroupMap; + use mina_curves::pasta::{Fq, Pallas, PallasParameters}; + use mina_poseidon::{ + constants::PlonkSpongeConstantsKimchi, + sponge::{DefaultFqSponge, DefaultFrSponge}, + }; + use std::time::Instant; + + type SpongeParams = PlonkSpongeConstantsKimchi; + type BaseSponge = DefaultFqSponge; + type ScalarSponge = DefaultFrSponge; + + let start = Instant::now(); + + let mut idx = 0; + + let mut gate = || { + let res = crate::circuits::gate::CircuitGate { + coeffs: vec![], + typ: crate::circuits::gate::GateType::Zero, + wires: std::array::from_fn(|i| crate::circuits::wires::Wire { row: idx, col: i }), + }; + idx += 1; + res + }; + + let gates = vec![gate(), gate()]; + + let num_prev_challenges = 0; + + let num_public_inputs = 1; + + let index = new_index_for_test_with_lookups::( + gates, + num_public_inputs, + num_prev_challenges, + vec![], + None, + false, + ); + println!( + "- time to create prover index: {:?}s", + start.elapsed().as_millis() + ); + + let verifier_index = index.verifier_index(); + let prover_index = index; + + let prover = prover_index; + let witness = std::array::from_fn(|_| vec![]); + + let public_inputs = vec![Fq::zero()]; + + // add the proof to the batch + let start = Instant::now(); + + let group_map = ::Map::setup(); + + let proof = ProverProof::create_recursive_public_input_only::( + &group_map, + witness, + &vec![], + &prover, + vec![], + None, + ) + .unwrap(); + println!("- time to create proof: {:?}s", start.elapsed().as_millis()); + + // verify the proof (propagate any errors) + let start = Instant::now(); + verify::(&group_map, &verifier_index, &proof, &public_inputs) + .unwrap(); + println!("- time to verify: {}ms", start.elapsed().as_millis()); +} From 5fd2ea5b38bfc336a3b69886a06653b0dae17028 Mon Sep 17 00:00:00 2001 From: mrmr1993 Date: Fri, 18 Aug 2023 17:12:46 +0100 Subject: [PATCH 002/178] Purge lookups from public_input_only_prover --- kimchi/src/public_input_only_prover.rs | 558 +------------------------ 1 file changed, 17 insertions(+), 541 deletions(-) diff --git a/kimchi/src/public_input_only_prover.rs b/kimchi/src/public_input_only_prover.rs index a449de97b6..26313cd2d6 100644 --- a/kimchi/src/public_input_only_prover.rs +++ b/kimchi/src/public_input_only_prover.rs @@ -3,9 +3,8 @@ use crate::{ circuits::{ argument::{Argument, ArgumentType}, - expr::{self, l0_1, Constants, Environment, LookupEnvironment}, + expr::{self, l0_1, Constants, Environment}, gate::GateType, - lookup::{self, runtime_tables::RuntimeTable, tables::combine_table_entry}, polynomials::{ complete_add::CompleteAdd, endomul_scalar::EndomulScalar, @@ -27,27 +26,22 @@ use crate::{ lagrange_basis_evaluations::LagrangeBasisEvaluations, plonk_sponge::FrSponge, proof::{ - LookupCommitments, PointEvaluations, ProofEvaluations, ProverCommitments, ProverProof, - RecursionChallenge, + PointEvaluations, ProofEvaluations, ProverCommitments, ProverProof, RecursionChallenge, }, prover_index::ProverIndex, }; use ark_ec::ProjectiveCurve; -use ark_ff::{FftField, Field, One, PrimeField, UniformRand, Zero}; +use ark_ff::{Field, One, PrimeField, UniformRand, Zero}; use ark_poly::{ univariate::DensePolynomial, EvaluationDomain, Evaluations, Polynomial, Radix2EvaluationDomain as D, UVPolynomial, }; -use itertools::Itertools; use mina_poseidon::{sponge::ScalarChallenge, FqSponge}; use o1_utils::ExtendedDensePolynomial as _; use poly_commitment::{ - commitment::{ - absorb_commitment, b_poly_coefficients, BlindedCommitment, CommitmentCurve, PolyComm, - }, + commitment::{absorb_commitment, b_poly_coefficients, BlindedCommitment, PolyComm}, evaluation_proof::DensePolynomialOrEvaluations, }; -use rayon::prelude::*; use std::array; use std::collections::HashMap; @@ -72,55 +66,6 @@ macro_rules! check_constraint { }}; } -/// Contains variables needed for lookup in the prover algorithm. -#[derive(Default)] -struct LookupContext -where - G: CommitmentCurve, - F: FftField, -{ - /// The joint combiner used to join the columns of lookup tables - joint_combiner: Option, - - /// The power of the joint_combiner that can be used to add a table_id column - /// to the concatenated lookup tables. - table_id_combiner: Option, - - /// The combined lookup entry that can be used as dummy value - dummy_lookup_value: Option, - - /// The combined lookup table - joint_lookup_table: Option>, - joint_lookup_table_d8: Option>>, - - /// The sorted polynomials `s` in different forms - sorted: Option>>>, - sorted_coeffs: Option>>, - sorted_comms: Option>>, - sorted8: Option>>>, - - /// The aggregation polynomial in different forms - aggreg_coeffs: Option>, - aggreg_comm: Option>, - aggreg8: Option>>, - - // lookup-related evaluations - /// evaluation of lookup aggregation polynomial - pub lookup_aggregation_eval: Option>>, - /// evaluation of lookup table polynomial - pub lookup_table_eval: Option>>, - /// evaluation of lookup sorted polynomials - pub lookup_sorted_eval: [Option>>; 5], - /// evaluation of runtime lookup table polynomial - pub runtime_lookup_table_eval: Option>>, - - /// Runtime table - runtime_table: Option>, - runtime_table_d8: Option>>, - runtime_table_comm: Option>, - runtime_second_col_d8: Option>>, -} - impl ProverProof where G::BaseField: PrimeField, @@ -136,13 +81,11 @@ where >( groupmap: &G::Map, witness: [Vec; COLUMNS], - runtime_tables: &[RuntimeTable], index: &ProverIndex, ) -> Result { Self::create_recursive_public_input_only::( groupmap, witness, - runtime_tables, index, Vec::new(), None, @@ -164,7 +107,6 @@ where >( group_map: &G::Map, mut witness: [Vec; COLUMNS], - runtime_tables: &[RuntimeTable], index: &ProverIndex, prev_challenges: Vec>, blinders: Option<[Option>; COLUMNS]>, @@ -314,257 +256,12 @@ where .interpolate() }); - let mut lookup_context = LookupContext::default(); - - //~ 1. If using lookup: - if let Some(lcs) = &index.cs.lookup_constraint_system { - //~~ * if using runtime table: - if let Some(cfg_runtime_tables) = &lcs.runtime_tables { - //~~~ * check that all the provided runtime tables have length and IDs that match the runtime table configuration of the index - //~~~ we expect the given runtime tables to be sorted as configured, this makes it easier afterwards - let expected_runtime: Vec<_> = cfg_runtime_tables - .iter() - .map(|rt| (rt.id, rt.len)) - .collect(); - let runtime: Vec<_> = runtime_tables - .iter() - .map(|rt| (rt.id, rt.data.len())) - .collect(); - if expected_runtime != runtime { - return Err(ProverError::RuntimeTablesInconsistent); - } - - //~~~ * calculate the contribution to the second column of the lookup table - //~~~ (the runtime vector) - let (runtime_table_contribution, runtime_table_contribution_d8) = { - let mut offset = lcs - .runtime_table_offset - .expect("runtime configuration missing offset"); - - let mut evals = vec![G::ScalarField::zero(); d1_size]; - for rt in runtime_tables { - let range = offset..(offset + rt.data.len()); - evals[range].copy_from_slice(&rt.data); - offset += rt.data.len(); - } - - // zero-knowledge - for e in evals.iter_mut().rev().take(ZK_ROWS as usize) { - *e = ::rand(rng); - } - - // get coeff and evaluation form - let runtime_table_contribution = - Evaluations::from_vec_and_domain(evals, index.cs.domain.d1).interpolate(); - - let runtime_table_contribution_d8 = - runtime_table_contribution.evaluate_over_domain_by_ref(index.cs.domain.d8); - - (runtime_table_contribution, runtime_table_contribution_d8) - }; - - // commit the runtime polynomial - // (and save it to the proof) - let runtime_table_comm = index.srs.commit(&runtime_table_contribution, None, rng); - - // absorb the commitment - absorb_commitment(&mut fq_sponge, &runtime_table_comm.commitment); - - // pre-compute the updated second column of the lookup table - let mut second_column_d8 = runtime_table_contribution_d8.clone(); - second_column_d8 - .evals - .par_iter_mut() - .enumerate() - .for_each(|(row, e)| { - *e += lcs.lookup_table8[1][row]; - }); - - lookup_context.runtime_table = Some(runtime_table_contribution); - lookup_context.runtime_table_d8 = Some(runtime_table_contribution_d8); - lookup_context.runtime_table_comm = Some(runtime_table_comm); - lookup_context.runtime_second_col_d8 = Some(second_column_d8); - } - - //~~ * If queries involve a lookup table with multiple columns - //~~ then squeeze the Fq-Sponge to obtain the joint combiner challenge $j'$, - //~~ otherwise set the joint combiner challenge $j'$ to $0$. - let joint_combiner = if lcs.configuration.lookup_info.features.joint_lookup_used { - fq_sponge.challenge() - } else { - G::ScalarField::zero() - }; - - //~~ * Derive the scalar joint combiner $j$ from $j'$ using the endomorphism (TOOD: specify) - let joint_combiner: G::ScalarField = ScalarChallenge(joint_combiner).to_field(endo_r); - - //~~ * If multiple lookup tables are involved, - //~~ set the `table_id_combiner` as the $j^i$ with $i$ the maximum width of any used table. - //~~ Essentially, this is to add a last column of table ids to the concatenated lookup tables. - let table_id_combiner: G::ScalarField = if lcs.table_ids8.as_ref().is_some() { - joint_combiner.pow([lcs.configuration.lookup_info.max_joint_size as u64]) - } else { - // TODO: just set this to None in case multiple tables are not used - G::ScalarField::zero() - }; - lookup_context.table_id_combiner = Some(table_id_combiner); - - //~~ * Compute the dummy lookup value as the combination of the last entry of the XOR table (so `(0, 0, 0)`). - //~~ Warning: This assumes that we always use the XOR table when using lookups. - let dummy_lookup_value = lcs - .configuration - .dummy_lookup - .evaluate(&joint_combiner, &table_id_combiner); - lookup_context.dummy_lookup_value = Some(dummy_lookup_value); - - //~~ * Compute the lookup table values as the combination of the lookup table entries. - let joint_lookup_table_d8 = { - let mut evals = Vec::with_capacity(d1_size); - - for idx in 0..(d1_size * 8) { - let table_id = match lcs.table_ids8.as_ref() { - Some(table_ids8) => table_ids8.evals[idx], - None => - // If there is no `table_ids8` in the constraint system, - // every table ID is identically 0. - { - G::ScalarField::zero() - } - }; - - let combined_entry = - if !lcs.configuration.lookup_info.features.uses_runtime_tables { - let table_row = lcs.lookup_table8.iter().map(|e| &e.evals[idx]); - - combine_table_entry( - &joint_combiner, - &table_id_combiner, - table_row, - &table_id, - ) - } else { - // if runtime table are used, the second row is modified - let second_col = lookup_context.runtime_second_col_d8.as_ref().unwrap(); - - let table_row = lcs.lookup_table8.iter().enumerate().map(|(col, e)| { - if col == 1 { - &second_col.evals[idx] - } else { - &e.evals[idx] - } - }); - - combine_table_entry( - &joint_combiner, - &table_id_combiner, - table_row, - &table_id, - ) - }; - evals.push(combined_entry); - } - - Evaluations::from_vec_and_domain(evals, index.cs.domain.d8) - }; - - // TODO: This interpolation is avoidable. - let joint_lookup_table = joint_lookup_table_d8.interpolate_by_ref(); - - //~~ * Compute the sorted evaluations. - // TODO: Once we switch to committing using lagrange commitments, - // `witness` will be consumed when we interpolate, so interpolation will - // have to moved below this. - let sorted: Vec<_> = lookup::constraints::sorted( - dummy_lookup_value, - &joint_lookup_table_d8, - index.cs.domain.d1, - &index.cs.gates, - &witness, - joint_combiner, - table_id_combiner, - &lcs.configuration.lookup_info, - )?; - - //~~ * Randomize the last `EVALS` rows in each of the sorted polynomials - //~~ in order to add zero-knowledge to the protocol. - let sorted: Vec<_> = sorted - .into_iter() - .map(|chunk| lookup::constraints::zk_patch(chunk, index.cs.domain.d1, rng)) - .collect(); - - //~~ * Commit each of the sorted polynomials. - let sorted_comms: Vec<_> = sorted - .iter() - .map(|v| index.srs.commit_evaluations(index.cs.domain.d1, v, rng)) - .collect(); - - //~~ * Absorb each commitments to the sorted polynomials. - sorted_comms - .iter() - .for_each(|c| absorb_commitment(&mut fq_sponge, &c.commitment)); - - // precompute different forms of the sorted polynomials for later - // TODO: We can avoid storing these coefficients. - let sorted_coeffs: Vec<_> = sorted.iter().map(|e| e.clone().interpolate()).collect(); - let sorted8: Vec<_> = sorted_coeffs - .iter() - .map(|v| v.evaluate_over_domain_by_ref(index.cs.domain.d8)) - .collect(); - - lookup_context.joint_combiner = Some(joint_combiner); - lookup_context.sorted = Some(sorted); - lookup_context.sorted_coeffs = Some(sorted_coeffs); - lookup_context.sorted_comms = Some(sorted_comms); - lookup_context.sorted8 = Some(sorted8); - lookup_context.joint_lookup_table_d8 = Some(joint_lookup_table_d8); - lookup_context.joint_lookup_table = Some(joint_lookup_table); - } - //~ 1. Sample $\beta$ with the Fq-Sponge. let beta = fq_sponge.challenge(); //~ 1. Sample $\gamma$ with the Fq-Sponge. let gamma = fq_sponge.challenge(); - //~ 1. If using lookup: - if let Some(lcs) = &index.cs.lookup_constraint_system { - //~~ * Compute the lookup aggregation polynomial. - let joint_lookup_table_d8 = lookup_context.joint_lookup_table_d8.as_ref().unwrap(); - - let aggreg = lookup::constraints::aggregation::<_, G::ScalarField>( - lookup_context.dummy_lookup_value.unwrap(), - joint_lookup_table_d8, - index.cs.domain.d1, - &index.cs.gates, - &witness, - &lookup_context.joint_combiner.unwrap(), - &lookup_context.table_id_combiner.unwrap(), - beta, - gamma, - lookup_context.sorted.as_ref().unwrap(), - rng, - &lcs.configuration.lookup_info, - )?; - - //~~ * Commit to the aggregation polynomial. - let aggreg_comm = index - .srs - .commit_evaluations(index.cs.domain.d1, &aggreg, rng); - - //~~ * Absorb the commitment to the aggregation polynomial with the Fq-Sponge. - absorb_commitment(&mut fq_sponge, &aggreg_comm.commitment); - - // precompute different forms of the aggregation polynomial for later - let aggreg_coeffs = aggreg.interpolate(); - // TODO: There's probably a clever way to expand the domain without - // interpolating - let aggreg8 = aggreg_coeffs.evaluate_over_domain_by_ref(index.cs.domain.d8); - - lookup_context.aggreg_comm = Some(aggreg_comm); - lookup_context.aggreg_coeffs = Some(aggreg_coeffs); - lookup_context.aggreg8 = Some(aggreg8); - } - //~ 1. Compute the permutation aggregation polynomial $z$. let z_poly = index.perm_aggreg(&witness, &beta, &gamma, rng)?; @@ -588,24 +285,9 @@ where //~ The quotient polynomial is computed by adding all these polynomials together: //~~ * the combined constraints for all the gates //~~ * the combined constraints for the permutation - //~~ * TODO: lookup //~~ * the negated public polynomial //~ and by then dividing the resulting polynomial with the vanishing polynomial $Z_H$. //~ TODO: specify the split of the permutation polynomial into perm and bnd? - let lookup_env = if let Some(lcs) = &index.cs.lookup_constraint_system { - let joint_lookup_table_d8 = lookup_context.joint_lookup_table_d8.as_ref().unwrap(); - - Some(LookupEnvironment { - aggreg: lookup_context.aggreg8.as_ref().unwrap(), - sorted: lookup_context.sorted8.as_ref().unwrap(), - selectors: &lcs.lookup_selectors, - table: joint_lookup_table_d8, - runtime_selector: lcs.runtime_selector.as_ref(), - runtime_table: lookup_context.runtime_table_d8.as_ref(), - }) - } else { - None - }; let lagrange = index.cs.evaluate(&witness_poly, &z_poly); let env = { @@ -667,7 +349,7 @@ where alpha, beta, gamma, - joint_combiner: lookup_context.joint_combiner, + joint_combiner: None, endo_coefficient: index.cs.endo, mds, }, @@ -678,7 +360,7 @@ where l0_1: l0_1(index.cs.domain.d1), domain: index.cs.domain, index: index_evals, - lookup: lookup_env, + lookup: None, } }; @@ -769,38 +451,6 @@ where } }; - // lookup - { - if let Some(lcs) = index.cs.lookup_constraint_system.as_ref() { - let constraints = lookup::constraints::constraints(&lcs.configuration, false); - let constraints_len = u32::try_from(constraints.len()) - .expect("not expecting a large amount of constraints"); - let lookup_alphas = - all_alphas.get_alphas(ArgumentType::Lookup, constraints_len); - - // as lookup constraints are computed with the expression framework, - // each of them can result in Evaluations of different domains - for (ii, (constraint, alpha_pow)) in - constraints.into_iter().zip_eq(lookup_alphas).enumerate() - { - let mut eval = constraint.evaluations(&env); - eval.evals.par_iter_mut().for_each(|x| *x *= alpha_pow); - - if eval.domain().size == t4.domain().size { - t4 += &eval; - } else if eval.domain().size == t8.domain().size { - t8 += &eval; - } else if eval.evals.iter().all(|x| x.is_zero()) { - // Skip any 0-valued evaluations - } else { - panic!("Bad evaluation") - } - - check_constraint!(index, format!("lookup constraint #{ii}"), eval); - } - } - } - // public polynomial let mut f = t4.interpolate() + t8.interpolate(); f += &public_poly; @@ -852,57 +502,6 @@ where let omega = index.cs.domain.d1.group_gen; let zeta_omega = zeta * omega; - //~ 1. If lookup is used, evaluate the following polynomials at $\zeta$ and $\zeta \omega$: - if index.cs.lookup_constraint_system.is_some() { - //~~ * the aggregation polynomial - let aggreg = lookup_context - .aggreg_coeffs - .as_ref() - .unwrap() - .to_chunked_polynomial(index.max_poly_size); - - //~~ * the sorted polynomials - let sorted = lookup_context - .sorted_coeffs - .as_ref() - .unwrap() - .iter() - .map(|c| c.to_chunked_polynomial(index.max_poly_size)) - .collect::>(); - - //~~ * the table polynonial - let joint_table = lookup_context.joint_lookup_table.as_ref().unwrap(); - let joint_table = joint_table.to_chunked_polynomial(index.max_poly_size); - - lookup_context.lookup_aggregation_eval = Some(PointEvaluations { - zeta: aggreg.evaluate_chunks(zeta), - zeta_omega: aggreg.evaluate_chunks(zeta_omega), - }); - lookup_context.lookup_table_eval = Some(PointEvaluations { - zeta: joint_table.evaluate_chunks(zeta), - zeta_omega: joint_table.evaluate_chunks(zeta_omega), - }); - lookup_context.lookup_sorted_eval = array::from_fn(|i| { - if i < sorted.len() { - let sorted = &sorted[i]; - Some(PointEvaluations { - zeta: sorted.evaluate_chunks(zeta), - zeta_omega: sorted.evaluate_chunks(zeta_omega), - }) - } else { - None - } - }); - lookup_context.runtime_lookup_table_eval = - lookup_context.runtime_table.as_ref().map(|runtime_table| { - let runtime_table = runtime_table.to_chunked_polynomial(index.max_poly_size); - PointEvaluations { - zeta: runtime_table.evaluate_chunks(zeta), - zeta_omega: runtime_table.evaluate_chunks(zeta_omega), - } - }); - } - //~ 1. Chunk evaluate the following polynomials at both $\zeta$ and $\zeta \omega$: //~~ * $s_i$ //~~ * $w_i$ @@ -962,10 +561,10 @@ where } }, - lookup_aggregation: lookup_context.lookup_aggregation_eval.take(), - lookup_table: lookup_context.lookup_table_eval.take(), - lookup_sorted: array::from_fn(|i| lookup_context.lookup_sorted_eval[i].take()), - runtime_lookup_table: lookup_context.runtime_lookup_table_eval.take(), + lookup_aggregation: None, + lookup_table: None, + lookup_sorted: array::from_fn(|_| None), + runtime_lookup_table: None, generic_selector: chunked_evals_for_selector( &index.column_evaluations.generic_selector4, ), @@ -1012,43 +611,11 @@ where .as_ref() .map(chunked_evals_for_selector), - runtime_lookup_table_selector: index.cs.lookup_constraint_system.as_ref().and_then( - |lcs| { - lcs.runtime_selector - .as_ref() - .map(chunked_evals_for_selector) - }, - ), - xor_lookup_selector: index.cs.lookup_constraint_system.as_ref().and_then(|lcs| { - lcs.lookup_selectors - .xor - .as_ref() - .map(chunked_evals_for_selector) - }), - lookup_gate_lookup_selector: index.cs.lookup_constraint_system.as_ref().and_then( - |lcs| { - lcs.lookup_selectors - .lookup - .as_ref() - .map(chunked_evals_for_selector) - }, - ), - range_check_lookup_selector: index.cs.lookup_constraint_system.as_ref().and_then( - |lcs| { - lcs.lookup_selectors - .range_check - .as_ref() - .map(chunked_evals_for_selector) - }, - ), - foreign_field_mul_lookup_selector: index.cs.lookup_constraint_system.as_ref().and_then( - |lcs| { - lcs.lookup_selectors - .ffmul - .as_ref() - .map(chunked_evals_for_selector) - }, - ), + runtime_lookup_table_selector: None, + xor_lookup_selector: None, + lookup_gate_lookup_selector: None, + range_check_lookup_selector: None, + foreign_field_mul_lookup_selector: None, }; let zeta_to_srs_len = zeta.pow([index.max_poly_size as u64]); @@ -1316,88 +883,6 @@ where polynomials.push((evaluations_form(rot_selector8), None, non_hiding(1))); } - //~~ * optionally, the runtime table - //~ 1. if using lookup: - if let Some(lcs) = &index.cs.lookup_constraint_system { - //~~ * add the lookup sorted polynomials - let sorted_poly = lookup_context.sorted_coeffs.as_ref().unwrap(); - let sorted_comms = lookup_context.sorted_comms.as_ref().unwrap(); - - for (poly, comm) in sorted_poly.iter().zip(sorted_comms) { - polynomials.push((coefficients_form(poly), None, comm.blinders.clone())); - } - - //~~ * add the lookup aggreg polynomial - let aggreg_poly = lookup_context.aggreg_coeffs.as_ref().unwrap(); - let aggreg_comm = lookup_context.aggreg_comm.as_ref().unwrap(); - polynomials.push(( - coefficients_form(aggreg_poly), - None, - aggreg_comm.blinders.clone(), - )); - - //~~ * add the combined table polynomial - let table_blinding = if lcs.runtime_selector.is_some() { - let runtime_comm = lookup_context.runtime_table_comm.as_ref().unwrap(); - let joint_combiner = lookup_context.joint_combiner.as_ref().unwrap(); - - let blinding = runtime_comm.blinders.unshifted[0]; - - PolyComm { - unshifted: vec![*joint_combiner * blinding], - shifted: None, - } - } else { - non_hiding(1) - }; - - let joint_lookup_table = lookup_context.joint_lookup_table.as_ref().unwrap(); - - polynomials.push((coefficients_form(joint_lookup_table), None, table_blinding)); - - //~~ * if present, add the runtime table polynomial - if lcs.runtime_selector.is_some() { - let runtime_table_comm = lookup_context.runtime_table_comm.as_ref().unwrap(); - let runtime_table = lookup_context.runtime_table.as_ref().unwrap(); - - polynomials.push(( - coefficients_form(runtime_table), - None, - runtime_table_comm.blinders.clone(), - )); - } - - //~~ * the lookup selectors - - if let Some(runtime_lookup_table_selector) = lcs.runtime_selector.as_ref() { - polynomials.push(( - evaluations_form(runtime_lookup_table_selector), - None, - non_hiding(1), - )) - } - if let Some(xor_lookup_selector) = lcs.lookup_selectors.xor.as_ref() { - polynomials.push((evaluations_form(xor_lookup_selector), None, non_hiding(1))) - } - if let Some(lookup_gate_selector) = lcs.lookup_selectors.lookup.as_ref() { - polynomials.push((evaluations_form(lookup_gate_selector), None, non_hiding(1))) - } - if let Some(range_check_lookup_selector) = lcs.lookup_selectors.range_check.as_ref() { - polynomials.push(( - evaluations_form(range_check_lookup_selector), - None, - non_hiding(1), - )) - } - if let Some(foreign_field_mul_lookup_selector) = lcs.lookup_selectors.ffmul.as_ref() { - polynomials.push(( - evaluations_form(foreign_field_mul_lookup_selector), - None, - non_hiding(1), - )) - } - } - //~ 1. Create an aggregated evaluation proof for all of these polynomials at $\zeta$ and $\zeta\omega$ using $u$ and $v$. let proof = index.srs.open( group_map, @@ -1409,21 +894,12 @@ where rng, ); - let lookup = lookup_context - .aggreg_comm - .zip(lookup_context.sorted_comms) - .map(|(a, s)| LookupCommitments { - aggreg: a.commitment, - sorted: s.iter().map(|c| c.commitment.clone()).collect(), - runtime: lookup_context.runtime_table_comm.map(|x| x.commitment), - }); - Ok(Self { commitments: ProverCommitments { w_comm: array::from_fn(|i| w_comm[i].commitment.clone()), z_comm: z_comm.commitment, t_comm: t_comm.commitment, - lookup, + lookup: None, }, proof, evals: chunked_evals, @@ -1442,6 +918,7 @@ fn test_public_input_only_prover() { constants::PlonkSpongeConstantsKimchi, sponge::{DefaultFqSponge, DefaultFrSponge}, }; + use poly_commitment::commitment::CommitmentCurve; use std::time::Instant; type SpongeParams = PlonkSpongeConstantsKimchi; @@ -1497,7 +974,6 @@ fn test_public_input_only_prover() { let proof = ProverProof::create_recursive_public_input_only::( &group_map, witness, - &vec![], &prover, vec![], None, From 3be08574547e96a59d97fd4883f359e3ff8e2b6c Mon Sep 17 00:00:00 2001 From: mrmr1993 Date: Fri, 18 Aug 2023 17:17:42 +0100 Subject: [PATCH 003/178] Remove zk rows from public input only prover --- kimchi/src/public_input_only_prover.rs | 24 ------------------------ 1 file changed, 24 deletions(-) diff --git a/kimchi/src/public_input_only_prover.rs b/kimchi/src/public_input_only_prover.rs index 26313cd2d6..5c5095a3e1 100644 --- a/kimchi/src/public_input_only_prover.rs +++ b/kimchi/src/public_input_only_prover.rs @@ -12,7 +12,6 @@ use crate::{ foreign_field_add::circuitgates::ForeignFieldAdd, foreign_field_mul::{self, circuitgates::ForeignFieldMul}, generic, permutation, - permutation::ZK_ROWS, poseidon::Poseidon, range_check::circuitgates::{RangeCheck0, RangeCheck1}, rot::Rot64, @@ -122,29 +121,11 @@ where // TODO: rng should be passed as arg let rng = &mut rand::rngs::OsRng; - // Verify the circuit satisfiability by the computed witness (baring plookup constraints) - // Catch mistakes before proof generation. - if cfg!(debug_assertions) && !index.cs.disable_gates_checks { - let public = witness[0][0..index.cs.public].to_vec(); - index.verify(&witness, &public).expect("incorrect witness"); - } - - //~ 1. Ensure we have room in the witness for the zero-knowledge rows. - //~ We currently expect the witness not to be of the same length as the domain, - //~ but instead be of the length of the (smaller) circuit. - //~ If we cannot add `ZK_ROWS` rows to the columns of the witness before reaching - //~ the size of the domain, abort. let length_witness = witness[0].len(); let length_padding = d1_size .checked_sub(length_witness) .ok_or(ProverError::NoRoomForZkInWitness)?; - if length_padding < ZK_ROWS as usize { - return Err(ProverError::NoRoomForZkInWitness); - } - - //~ 1. Pad the witness columns with Zero gates to make them the same length as the domain. - //~ Then, randomize the last `ZK_ROWS` of each columns. for w in &mut witness { if w.len() != length_witness { return Err(ProverError::WitnessCsInconsistent); @@ -152,11 +133,6 @@ where // padding w.extend(std::iter::repeat(G::ScalarField::zero()).take(length_padding)); - - // zk-rows - for row in w.iter_mut().rev().take(ZK_ROWS as usize) { - *row = ::rand(rng); - } } //~ 1. Setup the Fq-Sponge. From 47d47aeaaf4704539c6275686e8640735eeb04f6 Mon Sep 17 00:00:00 2001 From: mrmr1993 Date: Fri, 18 Aug 2023 17:24:04 +0100 Subject: [PATCH 004/178] Only provide 1 witness column --- kimchi/src/public_input_only_prover.rs | 26 ++++++++++++++++++++++---- 1 file changed, 22 insertions(+), 4 deletions(-) diff --git a/kimchi/src/public_input_only_prover.rs b/kimchi/src/public_input_only_prover.rs index 5c5095a3e1..dd9a013f2e 100644 --- a/kimchi/src/public_input_only_prover.rs +++ b/kimchi/src/public_input_only_prover.rs @@ -79,7 +79,7 @@ where EFrSponge: FrSponge, >( groupmap: &G::Map, - witness: [Vec; COLUMNS], + witness: Vec, index: &ProverIndex, ) -> Result { Self::create_recursive_public_input_only::( @@ -105,7 +105,7 @@ where EFrSponge: FrSponge, >( group_map: &G::Map, - mut witness: [Vec; COLUMNS], + witness: Vec, index: &ProverIndex, prev_challenges: Vec>, blinders: Option<[Option>; COLUMNS]>, @@ -121,11 +121,29 @@ where // TODO: rng should be passed as arg let rng = &mut rand::rngs::OsRng; - let length_witness = witness[0].len(); + let length_witness = witness.len(); let length_padding = d1_size .checked_sub(length_witness) .ok_or(ProverError::NoRoomForZkInWitness)?; + let mut witness: [Vec; COLUMNS] = [ + witness, + vec![], + vec![], + vec![], + vec![], + vec![], + vec![], + vec![], + vec![], + vec![], + vec![], + vec![], + vec![], + vec![], + vec![], + ]; + for w in &mut witness { if w.len() != length_witness { return Err(ProverError::WitnessCsInconsistent); @@ -938,7 +956,7 @@ fn test_public_input_only_prover() { let prover_index = index; let prover = prover_index; - let witness = std::array::from_fn(|_| vec![]); + let witness = vec![]; let public_inputs = vec![Fq::zero()]; From 2b42287bbcac85f6d1ba53582328784d7ece7f86 Mon Sep 17 00:00:00 2001 From: mrmr1993 Date: Fri, 18 Aug 2023 17:30:32 +0100 Subject: [PATCH 005/178] Remove unused gates --- kimchi/src/public_input_only_prover.rs | 76 +------------------------- 1 file changed, 3 insertions(+), 73 deletions(-) diff --git a/kimchi/src/public_input_only_prover.rs b/kimchi/src/public_input_only_prover.rs index dd9a013f2e..50bb2bc9d6 100644 --- a/kimchi/src/public_input_only_prover.rs +++ b/kimchi/src/public_input_only_prover.rs @@ -5,19 +5,7 @@ use crate::{ argument::{Argument, ArgumentType}, expr::{self, l0_1, Constants, Environment}, gate::GateType, - polynomials::{ - complete_add::CompleteAdd, - endomul_scalar::EndomulScalar, - endosclmul::EndosclMul, - foreign_field_add::circuitgates::ForeignFieldAdd, - foreign_field_mul::{self, circuitgates::ForeignFieldMul}, - generic, permutation, - poseidon::Poseidon, - range_check::circuitgates::{RangeCheck0, RangeCheck1}, - rot::Rot64, - varbasemul::VarbaseMul, - xor::Xor16, - }, + polynomials::{foreign_field_mul, generic, permutation}, wires::{COLUMNS, PERMUTS}, }, curve::KimchiCurve, @@ -362,7 +350,7 @@ where let quotient_poly = { // generic - let mut t4 = { + let t4 = { let generic_constraint = generic::Generic::combined_constraints(&all_alphas, &mut cache); let generic4 = generic_constraint.evaluations(&env); @@ -377,7 +365,7 @@ where generic4 }; // permutation - let (mut t8, bnd) = { + let (t8, bnd) = { let alphas = all_alphas.get_alphas(ArgumentType::Permutation, permutation::CONSTRAINTS); let (perm, bnd) = index.perm_quot(&lagrange, beta, gamma, &z_poly, alphas)?; @@ -387,64 +375,6 @@ where (perm, bnd) }; - { - use crate::circuits::argument::DynArgument; - - let range_check0_enabled = - index.column_evaluations.range_check0_selector8.is_some(); - let range_check1_enabled = - index.column_evaluations.range_check1_selector8.is_some(); - let foreign_field_addition_enabled = index - .column_evaluations - .foreign_field_add_selector8 - .is_some(); - let foreign_field_multiplication_enabled = index - .column_evaluations - .foreign_field_mul_selector8 - .is_some(); - let xor_enabled = index.column_evaluations.xor_selector8.is_some(); - let rot_enabled = index.column_evaluations.rot_selector8.is_some(); - - for gate in [ - ( - (&CompleteAdd::default() as &dyn DynArgument), - true, - ), - (&VarbaseMul::default(), true), - (&EndosclMul::default(), true), - (&EndomulScalar::default(), true), - (&Poseidon::default(), true), - // Range check gates - (&RangeCheck0::default(), range_check0_enabled), - (&RangeCheck1::default(), range_check1_enabled), - // Foreign field addition gate - (&ForeignFieldAdd::default(), foreign_field_addition_enabled), - // Foreign field multiplication gate - ( - &ForeignFieldMul::default(), - foreign_field_multiplication_enabled, - ), - // Xor gate - (&Xor16::default(), xor_enabled), - // Rot gate - (&Rot64::default(), rot_enabled), - ] - .into_iter() - .filter_map(|(gate, is_enabled)| if is_enabled { Some(gate) } else { None }) - { - let constraint = gate.combined_constraints(&all_alphas, &mut cache); - let eval = constraint.evaluations(&env); - if eval.domain().size == t4.domain().size { - t4 += &eval; - } else if eval.domain().size == t8.domain().size { - t8 += &eval; - } else { - panic!("Bad evaluation") - } - check_constraint!(index, format!("{:?}", gate.argument_type()), eval); - } - }; - // public polynomial let mut f = t4.interpolate() + t8.interpolate(); f += &public_poly; From 8356aabf35bd36ad9b906b17aea9aef3ac9650cd Mon Sep 17 00:00:00 2001 From: mrmr1993 Date: Fri, 18 Aug 2023 17:32:43 +0100 Subject: [PATCH 006/178] Remove optional gates --- kimchi/src/public_input_only_prover.rs | 85 ++------------------------ 1 file changed, 6 insertions(+), 79 deletions(-) diff --git a/kimchi/src/public_input_only_prover.rs b/kimchi/src/public_input_only_prover.rs index 50bb2bc9d6..cbb04745c3 100644 --- a/kimchi/src/public_input_only_prover.rs +++ b/kimchi/src/public_input_only_prover.rs @@ -504,37 +504,12 @@ where &index.column_evaluations.endomul_scalar_selector8, ), - range_check0_selector: index - .column_evaluations - .range_check0_selector8 - .as_ref() - .map(chunked_evals_for_selector), - range_check1_selector: index - .column_evaluations - .range_check1_selector8 - .as_ref() - .map(chunked_evals_for_selector), - foreign_field_add_selector: index - .column_evaluations - .foreign_field_add_selector8 - .as_ref() - .map(chunked_evals_for_selector), - foreign_field_mul_selector: index - .column_evaluations - .foreign_field_mul_selector8 - .as_ref() - .map(chunked_evals_for_selector), - xor_selector: index - .column_evaluations - .xor_selector8 - .as_ref() - .map(chunked_evals_for_selector), - rot_selector: index - .column_evaluations - .rot_selector8 - .as_ref() - .map(chunked_evals_for_selector), - + range_check0_selector: None, + range_check1_selector: None, + foreign_field_add_selector: None, + foreign_field_mul_selector: None, + xor_selector: None, + rot_selector: None, runtime_lookup_table_selector: None, xor_lookup_selector: None, lookup_gate_lookup_selector: None, @@ -759,54 +734,6 @@ where .collect::>(), ); - //~~ * the optional gates - if let Some(range_check0_selector8) = - index.column_evaluations.range_check0_selector8.as_ref() - { - polynomials.push(( - evaluations_form(range_check0_selector8), - None, - non_hiding(1), - )); - } - if let Some(range_check1_selector8) = - index.column_evaluations.range_check1_selector8.as_ref() - { - polynomials.push(( - evaluations_form(range_check1_selector8), - None, - non_hiding(1), - )); - } - if let Some(foreign_field_add_selector8) = index - .column_evaluations - .foreign_field_add_selector8 - .as_ref() - { - polynomials.push(( - evaluations_form(foreign_field_add_selector8), - None, - non_hiding(1), - )); - } - if let Some(foreign_field_mul_selector8) = index - .column_evaluations - .foreign_field_mul_selector8 - .as_ref() - { - polynomials.push(( - evaluations_form(foreign_field_mul_selector8), - None, - non_hiding(1), - )); - } - if let Some(xor_selector8) = index.column_evaluations.xor_selector8.as_ref() { - polynomials.push((evaluations_form(xor_selector8), None, non_hiding(1))); - } - if let Some(rot_selector8) = index.column_evaluations.rot_selector8.as_ref() { - polynomials.push((evaluations_form(rot_selector8), None, non_hiding(1))); - } - //~ 1. Create an aggregated evaluation proof for all of these polynomials at $\zeta$ and $\zeta\omega$ using $u$ and $v$. let proof = index.srs.open( group_map, From 3d7cbbe51ac5dec4805cbfd13e1f862e7298860f Mon Sep 17 00:00:00 2001 From: mrmr1993 Date: Fri, 18 Aug 2023 17:44:00 +0100 Subject: [PATCH 007/178] Replace z_poly with the constant polynomial 1 --- kimchi/src/public_input_only_prover.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/kimchi/src/public_input_only_prover.rs b/kimchi/src/public_input_only_prover.rs index cbb04745c3..a8d2a17459 100644 --- a/kimchi/src/public_input_only_prover.rs +++ b/kimchi/src/public_input_only_prover.rs @@ -244,8 +244,7 @@ where //~ 1. Sample $\gamma$ with the Fq-Sponge. let gamma = fq_sponge.challenge(); - //~ 1. Compute the permutation aggregation polynomial $z$. - let z_poly = index.perm_aggreg(&witness, &beta, &gamma, rng)?; + let z_poly = DensePolynomial::from_coefficients_vec(vec![G::ScalarField::one()]); //~ 1. Commit (hidding) to the permutation aggregation polynomial $z$. let z_comm = index.srs.commit(&z_poly, None, rng); From df13da271bc5d68b568b14f80e8bbc1c856f3a92 Mon Sep 17 00:00:00 2001 From: mrmr1993 Date: Fri, 18 Aug 2023 17:49:26 +0100 Subject: [PATCH 008/178] Inline contents of new_index_for_test_with_lookups --- kimchi/src/public_input_only_prover.rs | 32 ++++++++++++++++---------- 1 file changed, 20 insertions(+), 12 deletions(-) diff --git a/kimchi/src/public_input_only_prover.rs b/kimchi/src/public_input_only_prover.rs index a8d2a17459..39eae6d9c8 100644 --- a/kimchi/src/public_input_only_prover.rs +++ b/kimchi/src/public_input_only_prover.rs @@ -761,15 +761,18 @@ where #[test] fn test_public_input_only_prover() { - use crate::{prover_index::testing::new_index_for_test_with_lookups, verifier::verify}; + use crate::{circuits::constraints::ConstraintSystem, verifier::verify}; use groupmap::GroupMap; - use mina_curves::pasta::{Fq, Pallas, PallasParameters}; + use mina_curves::pasta::{Fq, Pallas, PallasParameters, Vesta}; use mina_poseidon::{ constants::PlonkSpongeConstantsKimchi, sponge::{DefaultFqSponge, DefaultFrSponge}, }; - use poly_commitment::commitment::CommitmentCurve; - use std::time::Instant; + use poly_commitment::{ + commitment::CommitmentCurve, + srs::{endos, SRS}, + }; + use std::{sync::Arc, time::Instant}; type SpongeParams = PlonkSpongeConstantsKimchi; type BaseSponge = DefaultFqSponge; @@ -795,14 +798,19 @@ fn test_public_input_only_prover() { let num_public_inputs = 1; - let index = new_index_for_test_with_lookups::( - gates, - num_public_inputs, - num_prev_challenges, - vec![], - None, - false, - ); + let index = { + let cs = ConstraintSystem::::create(gates) + .public(num_public_inputs) + .prev_challenges(num_prev_challenges) + .build() + .unwrap(); + let mut srs = SRS::::create(cs.domain.d1.size()); + srs.add_lagrange_basis(cs.domain.d1); + let srs = Arc::new(srs); + + let (endo_q, _endo_r) = endos::(); + ProverIndex::::create(cs, endo_q, srs) + }; println!( "- time to create prover index: {:?}s", start.elapsed().as_millis() From 6ae7048afeb5e5701262144cce1e7a33a023106e Mon Sep 17 00:00:00 2001 From: mrmr1993 Date: Fri, 18 Aug 2023 18:12:25 +0100 Subject: [PATCH 009/178] Inline ConstraintSystem::build --- kimchi/src/circuits/constraints.rs | 2 +- kimchi/src/public_input_only_prover.rs | 52 ++++++++++++++++++++++---- 2 files changed, 45 insertions(+), 9 deletions(-) diff --git a/kimchi/src/circuits/constraints.rs b/kimchi/src/circuits/constraints.rs index 238e725bc1..25ab00b326 100644 --- a/kimchi/src/circuits/constraints.rs +++ b/kimchi/src/circuits/constraints.rs @@ -166,7 +166,7 @@ pub struct ConstraintSystem { pub lookup_constraint_system: Option>, /// precomputes #[serde(skip)] - precomputations: OnceCell>>, + pub(crate) precomputations: OnceCell>>, /// Disable gates checks (for testing; only enables with development builds) pub disable_gates_checks: bool, diff --git a/kimchi/src/public_input_only_prover.rs b/kimchi/src/public_input_only_prover.rs index 39eae6d9c8..c9052776ff 100644 --- a/kimchi/src/public_input_only_prover.rs +++ b/kimchi/src/public_input_only_prover.rs @@ -761,13 +761,21 @@ where #[test] fn test_public_input_only_prover() { - use crate::{circuits::constraints::ConstraintSystem, verifier::verify}; + use crate::{ + circuits::{ + constraints::{ConstraintSystem, FeatureFlags}, + domains::EvaluationDomains, + lookup::lookups::{LookupFeatures, LookupPatterns}, + }, + verifier::verify, + }; use groupmap::GroupMap; use mina_curves::pasta::{Fq, Pallas, PallasParameters, Vesta}; use mina_poseidon::{ constants::PlonkSpongeConstantsKimchi, sponge::{DefaultFqSponge, DefaultFrSponge}, }; + use once_cell::sync::OnceCell; use poly_commitment::{ commitment::CommitmentCurve, srs::{endos, SRS}, @@ -799,11 +807,39 @@ fn test_public_input_only_prover() { let num_public_inputs = 1; let index = { - let cs = ConstraintSystem::::create(gates) - .public(num_public_inputs) - .prev_challenges(num_prev_challenges) - .build() - .unwrap(); + let domain = EvaluationDomains::::create(gates.len() + num_public_inputs).unwrap(); + let shifts = permutation::Shifts::new(&domain.d1); + let sid = shifts.map[0].clone(); + let cs = ConstraintSystem { + domain, + public: num_public_inputs, + prev_challenges: num_prev_challenges, + sid, + gates, + shift: shifts.shifts, + endo: Fq::zero(), + lookup_constraint_system: None, + feature_flags: FeatureFlags { + range_check0: false, + range_check1: false, + lookup_features: LookupFeatures { + patterns: LookupPatterns { + xor: false, + lookup: false, + range_check: false, + foreign_field_mul: false, + }, + joint_lookup_used: false, + uses_runtime_tables: false, + }, + foreign_field_add: false, + foreign_field_mul: false, + xor: false, + rot: false, + }, + precomputations: OnceCell::new(), + disable_gates_checks: false, + }; let mut srs = SRS::::create(cs.domain.d1.size()); srs.add_lagrange_basis(cs.domain.d1); let srs = Arc::new(srs); @@ -812,7 +848,7 @@ fn test_public_input_only_prover() { ProverIndex::::create(cs, endo_q, srs) }; println!( - "- time to create prover index: {:?}s", + "- time to create prover index: {:?}ms", start.elapsed().as_millis() ); @@ -837,7 +873,7 @@ fn test_public_input_only_prover() { None, ) .unwrap(); - println!("- time to create proof: {:?}s", start.elapsed().as_millis()); + println!("- time to create proof: {:?}ms", start.elapsed().as_millis()); // verify the proof (propagate any errors) let start = Instant::now(); From cdd3ad78145038458706dfe4d39bccc4b488b1e4 Mon Sep 17 00:00:00 2001 From: mrmr1993 Date: Fri, 18 Aug 2023 18:39:16 +0100 Subject: [PATCH 010/178] Begin using public input directly --- kimchi/src/public_input_only_prover.rs | 58 ++++++++++++-------------- 1 file changed, 26 insertions(+), 32 deletions(-) diff --git a/kimchi/src/public_input_only_prover.rs b/kimchi/src/public_input_only_prover.rs index c9052776ff..b74dd6a13b 100644 --- a/kimchi/src/public_input_only_prover.rs +++ b/kimchi/src/public_input_only_prover.rs @@ -93,7 +93,7 @@ where EFrSponge: FrSponge, >( group_map: &G::Map, - witness: Vec, + mut witness: Vec, index: &ProverIndex, prev_challenges: Vec>, blinders: Option<[Option>; COLUMNS]>, @@ -113,34 +113,26 @@ where let length_padding = d1_size .checked_sub(length_witness) .ok_or(ProverError::NoRoomForZkInWitness)?; + witness.extend(std::iter::repeat(G::ScalarField::zero()).take(length_padding)); - let mut witness: [Vec; COLUMNS] = [ + let witness: [Vec; COLUMNS] = [ witness, - vec![], - vec![], - vec![], - vec![], - vec![], - vec![], - vec![], - vec![], - vec![], - vec![], - vec![], - vec![], - vec![], - vec![], + vec![G::ScalarField::zero(); d1_size], + vec![G::ScalarField::zero(); d1_size], + vec![G::ScalarField::zero(); d1_size], + vec![G::ScalarField::zero(); d1_size], + vec![G::ScalarField::zero(); d1_size], + vec![G::ScalarField::zero(); d1_size], + vec![G::ScalarField::zero(); d1_size], + vec![G::ScalarField::zero(); d1_size], + vec![G::ScalarField::zero(); d1_size], + vec![G::ScalarField::zero(); d1_size], + vec![G::ScalarField::zero(); d1_size], + vec![G::ScalarField::zero(); d1_size], + vec![G::ScalarField::zero(); d1_size], + vec![G::ScalarField::zero(); d1_size], ]; - for w in &mut witness { - if w.len() != length_witness { - return Err(ProverError::WitnessCsInconsistent); - } - - // padding - w.extend(std::iter::repeat(G::ScalarField::zero()).take(length_padding)); - } - //~ 1. Setup the Fq-Sponge. let mut fq_sponge = EFqSponge::new(G::OtherCurve::sponge_params()); @@ -792,8 +784,8 @@ fn test_public_input_only_prover() { let mut gate = || { let res = crate::circuits::gate::CircuitGate { - coeffs: vec![], - typ: crate::circuits::gate::GateType::Zero, + coeffs: vec![Fq::one()], + typ: crate::circuits::gate::GateType::Generic, wires: std::array::from_fn(|i| crate::circuits::wires::Wire { row: idx, col: i }), }; idx += 1; @@ -804,7 +796,7 @@ fn test_public_input_only_prover() { let num_prev_challenges = 0; - let num_public_inputs = 1; + let num_public_inputs = 2; let index = { let domain = EvaluationDomains::::create(gates.len() + num_public_inputs).unwrap(); @@ -856,9 +848,8 @@ fn test_public_input_only_prover() { let prover_index = index; let prover = prover_index; - let witness = vec![]; - let public_inputs = vec![Fq::zero()]; + let public_inputs = vec![Fq::from(5u64), Fq::from(10u64)]; // add the proof to the batch let start = Instant::now(); @@ -867,13 +858,16 @@ fn test_public_input_only_prover() { let proof = ProverProof::create_recursive_public_input_only::( &group_map, - witness, + public_inputs.clone(), &prover, vec![], None, ) .unwrap(); - println!("- time to create proof: {:?}ms", start.elapsed().as_millis()); + println!( + "- time to create proof: {:?}ms", + start.elapsed().as_millis() + ); // verify the proof (propagate any errors) let start = Instant::now(); From 09a88a74ff03127a4c7423e9159451665d66c161 Mon Sep 17 00:00:00 2001 From: mrmr1993 Date: Fri, 18 Aug 2023 19:00:17 +0100 Subject: [PATCH 011/178] Tweak test to generate a constant function for generic selector --- kimchi/src/public_input_only_prover.rs | 32 ++++++++++++++------------ 1 file changed, 17 insertions(+), 15 deletions(-) diff --git a/kimchi/src/public_input_only_prover.rs b/kimchi/src/public_input_only_prover.rs index b74dd6a13b..1214a6fa2e 100644 --- a/kimchi/src/public_input_only_prover.rs +++ b/kimchi/src/public_input_only_prover.rs @@ -780,26 +780,23 @@ fn test_public_input_only_prover() { let start = Instant::now(); - let mut idx = 0; + let num_prev_challenges = 0; - let mut gate = || { - let res = crate::circuits::gate::CircuitGate { - coeffs: vec![Fq::one()], - typ: crate::circuits::gate::GateType::Generic, - wires: std::array::from_fn(|i| crate::circuits::wires::Wire { row: idx, col: i }), - }; - idx += 1; - res - }; + let num_public_inputs = 4; - let gates = vec![gate(), gate()]; + let domain = EvaluationDomains::::create(num_public_inputs).unwrap(); - let num_prev_challenges = 0; + let mut gates = Vec::with_capacity(domain.d1.size()); - let num_public_inputs = 2; + for idx in 0..domain.d1.size() { + gates.push(crate::circuits::gate::CircuitGate { + coeffs: vec![Fq::one()], + typ: crate::circuits::gate::GateType::Generic, + wires: std::array::from_fn(|i| crate::circuits::wires::Wire { row: idx, col: i }), + }); + } let index = { - let domain = EvaluationDomains::::create(gates.len() + num_public_inputs).unwrap(); let shifts = permutation::Shifts::new(&domain.d1); let sid = shifts.map[0].clone(); let cs = ConstraintSystem { @@ -849,7 +846,12 @@ fn test_public_input_only_prover() { let prover = prover_index; - let public_inputs = vec![Fq::from(5u64), Fq::from(10u64)]; + let public_inputs = vec![ + Fq::from(5u64), + Fq::from(10u64), + Fq::from(15u64), + Fq::from(20u64), + ]; // add the proof to the batch let start = Instant::now(); From b34953faa35516685804a1057a3c0b6cc40622de Mon Sep 17 00:00:00 2001 From: mrmr1993 Date: Fri, 18 Aug 2023 19:29:08 +0100 Subject: [PATCH 012/178] Skip unnecessary columns --- kimchi/src/public_input_only_prover.rs | 50 +------------------------- 1 file changed, 1 insertion(+), 49 deletions(-) diff --git a/kimchi/src/public_input_only_prover.rs b/kimchi/src/public_input_only_prover.rs index 1214a6fa2e..12eee9a217 100644 --- a/kimchi/src/public_input_only_prover.rs +++ b/kimchi/src/public_input_only_prover.rs @@ -5,7 +5,7 @@ use crate::{ argument::{Argument, ArgumentType}, expr::{self, l0_1, Constants, Environment}, gate::GateType, - polynomials::{foreign_field_mul, generic, permutation}, + polynomials::{generic, permutation}, wires::{COLUMNS, PERMUTS}, }, curve::KimchiCurve, @@ -267,54 +267,6 @@ where let mut index_evals = HashMap::new(); use GateType::*; index_evals.insert(Generic, &index.column_evaluations.generic_selector4); - index_evals.insert(Poseidon, &index.column_evaluations.poseidon_selector8); - index_evals.insert( - CompleteAdd, - &index.column_evaluations.complete_add_selector4, - ); - index_evals.insert(VarBaseMul, &index.column_evaluations.mul_selector8); - index_evals.insert(EndoMul, &index.column_evaluations.emul_selector8); - index_evals.insert( - EndoMulScalar, - &index.column_evaluations.endomul_scalar_selector8, - ); - - if let Some(selector) = &index.column_evaluations.range_check0_selector8.as_ref() { - index_evals.insert(GateType::RangeCheck0, selector); - } - - if let Some(selector) = &index.column_evaluations.range_check1_selector8.as_ref() { - index_evals.insert(GateType::RangeCheck1, selector); - } - - if let Some(selector) = index - .column_evaluations - .foreign_field_add_selector8 - .as_ref() - { - index_evals.insert(GateType::ForeignFieldAdd, selector); - } - - if let Some(selector) = index - .column_evaluations - .foreign_field_mul_selector8 - .as_ref() - { - index_evals.extend( - foreign_field_mul::gadget::circuit_gates() - .iter() - .enumerate() - .map(|(_, gate_type)| (*gate_type, selector)), - ); - } - - if let Some(selector) = index.column_evaluations.xor_selector8.as_ref() { - index_evals.insert(GateType::Xor16, selector); - } - - if let Some(selector) = index.column_evaluations.rot_selector8.as_ref() { - index_evals.insert(GateType::Rot64, selector); - } let mds = &G::sponge_params().mds; Environment { From 762c7511080073a33ea73bcd7a401f6e9e8f88bd Mon Sep 17 00:00:00 2001 From: mrmr1993 Date: Fri, 18 Aug 2023 19:29:35 +0100 Subject: [PATCH 013/178] Hard-code the selector evaluations to the expected values --- kimchi/src/public_input_only_prover.rs | 45 ++++++++++---------------- 1 file changed, 17 insertions(+), 28 deletions(-) diff --git a/kimchi/src/public_input_only_prover.rs b/kimchi/src/public_input_only_prover.rs index 12eee9a217..ad50178112 100644 --- a/kimchi/src/public_input_only_prover.rs +++ b/kimchi/src/public_input_only_prover.rs @@ -391,18 +391,17 @@ where let zeta_evals = LagrangeBasisEvaluations::new(index.cs.domain.d1, zeta); let zeta_omega_evals = LagrangeBasisEvaluations::new(index.cs.domain.d1, zeta_omega); - let chunked_evals_for_selector = - |p: &Evaluations>| PointEvaluations { - zeta: vec![zeta_evals.evaluate_boolean(p)], - zeta_omega: vec![zeta_omega_evals.evaluate_boolean(p)], - }; - let chunked_evals_for_evaluations = |p: &Evaluations>| PointEvaluations { zeta: vec![zeta_evals.evaluate(p)], zeta_omega: vec![zeta_omega_evals.evaluate(p)], }; + let constant_evals = |x| PointEvaluations { + zeta: vec![x], + zeta_omega: vec![x], + }; + let chunked_evals = ProofEvaluations::>> { s: array::from_fn(|i| { chunked_evals_for_evaluations( @@ -410,7 +409,11 @@ where ) }), coefficients: array::from_fn(|i| { - chunked_evals_for_evaluations(&index.column_evaluations.coefficients8[i]) + if i == 0 { + constant_evals(G::ScalarField::one()) + } else { + constant_evals(G::ScalarField::zero()) + } }), w: array::from_fn(|i| { let chunked = witness_poly[i].to_chunked_polynomial(index.max_poly_size); @@ -420,32 +423,18 @@ where } }), - z: { - let chunked = z_poly.to_chunked_polynomial(index.max_poly_size); - PointEvaluations { - zeta: chunked.evaluate_chunks(zeta), - zeta_omega: chunked.evaluate_chunks(zeta_omega), - } - }, + z: constant_evals(G::ScalarField::one()), lookup_aggregation: None, lookup_table: None, lookup_sorted: array::from_fn(|_| None), runtime_lookup_table: None, - generic_selector: chunked_evals_for_selector( - &index.column_evaluations.generic_selector4, - ), - poseidon_selector: chunked_evals_for_selector( - &index.column_evaluations.poseidon_selector8, - ), - complete_add_selector: chunked_evals_for_selector( - &index.column_evaluations.complete_add_selector4, - ), - mul_selector: chunked_evals_for_selector(&index.column_evaluations.mul_selector8), - emul_selector: chunked_evals_for_selector(&index.column_evaluations.emul_selector8), - endomul_scalar_selector: chunked_evals_for_selector( - &index.column_evaluations.endomul_scalar_selector8, - ), + generic_selector: constant_evals(G::ScalarField::one()), + poseidon_selector: constant_evals(G::ScalarField::zero()), + complete_add_selector: constant_evals(G::ScalarField::zero()), + mul_selector: constant_evals(G::ScalarField::zero()), + emul_selector: constant_evals(G::ScalarField::zero()), + endomul_scalar_selector: constant_evals(G::ScalarField::zero()), range_check0_selector: None, range_check1_selector: None, From 25fff6cdd16d7e35c0f2e4716d5e7020d471973e Mon Sep 17 00:00:00 2001 From: mrmr1993 Date: Fri, 18 Aug 2023 19:49:42 +0100 Subject: [PATCH 014/178] Use functions directly instead of from the index --- kimchi/src/public_input_only_prover.rs | 56 +++++++++----------------- 1 file changed, 20 insertions(+), 36 deletions(-) diff --git a/kimchi/src/public_input_only_prover.rs b/kimchi/src/public_input_only_prover.rs index ad50178112..5eba7d6a61 100644 --- a/kimchi/src/public_input_only_prover.rs +++ b/kimchi/src/public_input_only_prover.rs @@ -403,10 +403,9 @@ where }; let chunked_evals = ProofEvaluations::>> { - s: array::from_fn(|i| { - chunked_evals_for_evaluations( - &index.column_evaluations.permutation_coefficients8[i], - ) + s: array::from_fn(|i| PointEvaluations { + zeta: vec![zeta * index.cs.shift[i]], + zeta_omega: vec![zeta_omega * index.cs.shift[i]], }), coefficients: array::from_fn(|i| { if i == 0 { @@ -611,39 +610,23 @@ where //~~ * the poseidon selector //~~ * the 15 registers/witness columns //~~ * the 6 sigmas + let one_polynomial = DensePolynomial::from_coefficients_vec(vec![G::ScalarField::one()]); + let zero_polynomial = DensePolynomial::from_coefficients_vec(vec![]); + let shifted_polys: Vec<_> = (index.cs.shift) + .iter() + .map(|shift| { + DensePolynomial::from_coefficients_vec(vec![G::ScalarField::zero(), *shift]) + }) + .collect(); polynomials.push((coefficients_form(&public_poly), None, fixed_hiding(1))); polynomials.push((coefficients_form(&ft), None, blinding_ft)); polynomials.push((coefficients_form(&z_poly), None, z_comm.blinders)); - polynomials.push(( - evaluations_form(&index.column_evaluations.generic_selector4), - None, - fixed_hiding(1), - )); - polynomials.push(( - evaluations_form(&index.column_evaluations.poseidon_selector8), - None, - fixed_hiding(1), - )); - polynomials.push(( - evaluations_form(&index.column_evaluations.complete_add_selector4), - None, - fixed_hiding(1), - )); - polynomials.push(( - evaluations_form(&index.column_evaluations.mul_selector8), - None, - fixed_hiding(1), - )); - polynomials.push(( - evaluations_form(&index.column_evaluations.emul_selector8), - None, - fixed_hiding(1), - )); - polynomials.push(( - evaluations_form(&index.column_evaluations.endomul_scalar_selector8), - None, - fixed_hiding(1), - )); + polynomials.push((coefficients_form(&one_polynomial), None, fixed_hiding(1))); + polynomials.push((coefficients_form(&zero_polynomial), None, fixed_hiding(1))); + polynomials.push((coefficients_form(&zero_polynomial), None, fixed_hiding(1))); + polynomials.push((coefficients_form(&zero_polynomial), None, fixed_hiding(1))); + polynomials.push((coefficients_form(&zero_polynomial), None, fixed_hiding(1))); + polynomials.push((coefficients_form(&zero_polynomial), None, fixed_hiding(1))); polynomials.extend( witness_poly .iter() @@ -660,9 +643,10 @@ where .collect::>(), ); polynomials.extend( - index.column_evaluations.permutation_coefficients8[0..PERMUTS - 1] + shifted_polys .iter() - .map(|w| (evaluations_form(w), None, non_hiding(1))) + .take(PERMUTS - 1) + .map(|w| (coefficients_form(w), None, non_hiding(1))) .collect::>(), ); From d88137487c6762e9c0ae59c10ab895cb186edb08 Mon Sep 17 00:00:00 2001 From: mrmr1993 Date: Fri, 18 Aug 2023 20:24:21 +0100 Subject: [PATCH 015/178] Generate the verifier index explicitly --- kimchi/src/public_input_only_prover.rs | 84 +++++++++++++++++++++++++- 1 file changed, 83 insertions(+), 1 deletion(-) diff --git a/kimchi/src/public_input_only_prover.rs b/kimchi/src/public_input_only_prover.rs index 5eba7d6a61..70a6435929 100644 --- a/kimchi/src/public_input_only_prover.rs +++ b/kimchi/src/public_input_only_prover.rs @@ -3,8 +3,11 @@ use crate::{ circuits::{ argument::{Argument, ArgumentType}, + constraints::FeatureFlags, + domains::EvaluationDomains, expr::{self, l0_1, Constants, Environment}, gate::GateType, + lookup::lookups::{LookupFeatures, LookupPatterns}, polynomials::{generic, permutation}, wires::{COLUMNS, PERMUTS}, }, @@ -16,6 +19,7 @@ use crate::{ PointEvaluations, ProofEvaluations, ProverCommitments, ProverProof, RecursionChallenge, }, prover_index::ProverIndex, + verifier_index::VerifierIndex, }; use ark_ec::ProjectiveCurve; use ark_ff::{Field, One, PrimeField, UniformRand, Zero}; @@ -25,12 +29,15 @@ use ark_poly::{ }; use mina_poseidon::{sponge::ScalarChallenge, FqSponge}; use o1_utils::ExtendedDensePolynomial as _; +use once_cell::sync::OnceCell; use poly_commitment::{ commitment::{absorb_commitment, b_poly_coefficients, BlindedCommitment, PolyComm}, evaluation_proof::DensePolynomialOrEvaluations, + srs::{endos, SRS}, }; use std::array; use std::collections::HashMap; +use std::sync::Arc; /// The result of a proof creation or verification. type Result = std::result::Result; @@ -53,6 +60,76 @@ macro_rules! check_constraint { }}; } +pub fn verifier_index( + srs: Arc>, + domain: EvaluationDomains, + num_public_inputs: usize, + num_prev_challenges: usize, +) -> VerifierIndex { + let shifts = permutation::Shifts::new(&domain.d1); + let (endo_q, _endo_r) = endos::(); + let feature_flags = FeatureFlags { + range_check0: false, + range_check1: false, + lookup_features: LookupFeatures { + patterns: LookupPatterns { + xor: false, + lookup: false, + range_check: false, + foreign_field_mul: false, + }, + joint_lookup_used: false, + uses_runtime_tables: false, + }, + foreign_field_add: false, + foreign_field_mul: false, + xor: false, + rot: false, + }; + let (linearization, powers_of_alpha) = + crate::linearization::expr_linearization(Some(&feature_flags), true); + + let make_comm = |comm| PolyComm { + unshifted: vec![comm], + shifted: None, + }; + VerifierIndex { + domain: domain.d1, + max_poly_size: srs.g.len(), + srs: srs.clone().into(), + public: num_public_inputs, + prev_challenges: num_prev_challenges, + + sigma_comm: array::from_fn(|i| PolyComm { + unshifted: vec![srs.g[1].mul(shifts.shifts[i]).into_affine()], + shifted: None, + }), + coefficients_comm: array::from_fn(|i| make_comm(if i == 0 { srs.g[0] } else { G::zero() })), + generic_comm: make_comm(srs.g[0] + srs.h), + psm_comm: make_comm(srs.h), + complete_add_comm: make_comm(srs.h), + mul_comm: make_comm(srs.h), + emul_comm: make_comm(srs.h), + endomul_scalar_comm: make_comm(srs.h), + + range_check0_comm: None, + range_check1_comm: None, + foreign_field_add_comm: None, + foreign_field_mul_comm: None, + xor_comm: None, + rot_comm: None, + + shift: shifts.shifts.clone(), + zkpm: OnceCell::new(), + w: OnceCell::new(), + endo: endo_q, + lookup_index: None, + + linearization, + powers_of_alpha, + } +} + impl ProverProof where G::BaseField: PrimeField, @@ -766,7 +843,12 @@ fn test_public_input_only_prover() { start.elapsed().as_millis() ); - let verifier_index = index.verifier_index(); + let verifier_index = verifier_index::( + index.srs.clone(), + domain, + num_public_inputs, + num_prev_challenges, + ); let prover_index = index; let prover = prover_index; From cbdbfa387a2355345d28667df5b5f2b5b2e9516f Mon Sep 17 00:00:00 2001 From: mrmr1993 Date: Fri, 18 Aug 2023 20:25:17 +0100 Subject: [PATCH 016/178] Remove unused code --- kimchi/src/public_input_only_prover.rs | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/kimchi/src/public_input_only_prover.rs b/kimchi/src/public_input_only_prover.rs index 70a6435929..035839f0b1 100644 --- a/kimchi/src/public_input_only_prover.rs +++ b/kimchi/src/public_input_only_prover.rs @@ -13,7 +13,6 @@ use crate::{ }, curve::KimchiCurve, error::ProverError, - lagrange_basis_evaluations::LagrangeBasisEvaluations, plonk_sponge::FrSponge, proof::{ PointEvaluations, ProofEvaluations, ProverCommitments, ProverProof, RecursionChallenge, @@ -465,15 +464,6 @@ where //~ //~ TODO: do we want to specify more on that? It seems unecessary except for the t polynomial (or if for some reason someone sets that to a low value) - let zeta_evals = LagrangeBasisEvaluations::new(index.cs.domain.d1, zeta); - let zeta_omega_evals = LagrangeBasisEvaluations::new(index.cs.domain.d1, zeta_omega); - - let chunked_evals_for_evaluations = - |p: &Evaluations>| PointEvaluations { - zeta: vec![zeta_evals.evaluate(p)], - zeta_omega: vec![zeta_omega_evals.evaluate(p)], - }; - let constant_evals = |x| PointEvaluations { zeta: vec![x], zeta_omega: vec![x], From 929b2886602fcf6dd9398927f6db117680c2ed25 Mon Sep 17 00:00:00 2001 From: mrmr1993 Date: Fri, 18 Aug 2023 20:25:57 +0100 Subject: [PATCH 017/178] Add more representative timing info --- kimchi/src/public_input_only_prover.rs | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/kimchi/src/public_input_only_prover.rs b/kimchi/src/public_input_only_prover.rs index 035839f0b1..f9a99b77a1 100644 --- a/kimchi/src/public_input_only_prover.rs +++ b/kimchi/src/public_input_only_prover.rs @@ -833,12 +833,19 @@ fn test_public_input_only_prover() { start.elapsed().as_millis() ); + let start = Instant::now(); + let verifier_index = verifier_index::( index.srs.clone(), domain, num_public_inputs, num_prev_challenges, ); + println!( + "- time to create verifier index: {:?}ms", + start.elapsed().as_millis() + ); + let prover_index = index; let prover = prover_index; From ae68310a445a5ee9e8e0473678525c0d19e6a685 Mon Sep 17 00:00:00 2001 From: mrmr1993 Date: Fri, 18 Aug 2023 20:29:00 +0100 Subject: [PATCH 018/178] Remove constraints that entirely cancel out --- kimchi/src/public_input_only_prover.rs | 25 ++----------------------- 1 file changed, 2 insertions(+), 23 deletions(-) diff --git a/kimchi/src/public_input_only_prover.rs b/kimchi/src/public_input_only_prover.rs index f9a99b77a1..da7bf188c1 100644 --- a/kimchi/src/public_input_only_prover.rs +++ b/kimchi/src/public_input_only_prover.rs @@ -365,39 +365,18 @@ where } }; - let mut cache = expr::Cache::default(); - let quotient_poly = { - // generic - let t4 = { - let generic_constraint = - generic::Generic::combined_constraints(&all_alphas, &mut cache); - let generic4 = generic_constraint.evaluations(&env); - - if cfg!(debug_assertions) { - let p4 = public_poly.evaluate_over_domain_by_ref(index.cs.domain.d4); - let gen_minus_pub = &generic4 + &p4; - - check_constraint!(index, gen_minus_pub); - } - - generic4 - }; // permutation - let (t8, bnd) = { + let (f, bnd) = { let alphas = all_alphas.get_alphas(ArgumentType::Permutation, permutation::CONSTRAINTS); let (perm, bnd) = index.perm_quot(&lagrange, beta, gamma, &z_poly, alphas)?; check_constraint!(index, perm); - (perm, bnd) + (perm.interpolate(), bnd) }; - // public polynomial - let mut f = t4.interpolate() + t8.interpolate(); - f += &public_poly; - // divide contributions with vanishing polynomial let (mut quotient, res) = f .divide_by_vanishing_poly(index.cs.domain.d1) From df134741656b8add54177ebe4fdd476aa4cbc3a5 Mon Sep 17 00:00:00 2001 From: mrmr1993 Date: Fri, 18 Aug 2023 20:32:44 +0100 Subject: [PATCH 019/178] Skip linearization apart from permutation --- kimchi/src/public_input_only_prover.rs | 51 +++----------------------- 1 file changed, 5 insertions(+), 46 deletions(-) diff --git a/kimchi/src/public_input_only_prover.rs b/kimchi/src/public_input_only_prover.rs index da7bf188c1..50685a0c83 100644 --- a/kimchi/src/public_input_only_prover.rs +++ b/kimchi/src/public_input_only_prover.rs @@ -2,13 +2,11 @@ use crate::{ circuits::{ - argument::{Argument, ArgumentType}, + argument::ArgumentType, constraints::FeatureFlags, domains::EvaluationDomains, - expr::{self, l0_1, Constants, Environment}, - gate::GateType, lookup::lookups::{LookupFeatures, LookupPatterns}, - polynomials::{generic, permutation}, + polynomials::permutation, wires::{COLUMNS, PERMUTS}, }, curve::KimchiCurve, @@ -35,7 +33,6 @@ use poly_commitment::{ srs::{endos, SRS}, }; use std::array; -use std::collections::HashMap; use std::sync::Arc; /// The result of a proof creation or verification. @@ -339,31 +336,6 @@ where //~ TODO: specify the split of the permutation polynomial into perm and bnd? let lagrange = index.cs.evaluate(&witness_poly, &z_poly); - let env = { - let mut index_evals = HashMap::new(); - use GateType::*; - index_evals.insert(Generic, &index.column_evaluations.generic_selector4); - - let mds = &G::sponge_params().mds; - Environment { - constants: Constants { - alpha, - beta, - gamma, - joint_combiner: None, - endo_coefficient: index.cs.endo, - mds, - }, - witness: &lagrange.d8.this.w, - coefficient: &index.column_evaluations.coefficients8, - vanishes_on_last_4_rows: &index.cs.precomputations().vanishes_on_last_4_rows, - z: &lagrange.d8.this.z, - l0_1: l0_1(index.cs.domain.d1), - domain: index.cs.domain, - index: index_evals, - lookup: None, - } - }; let quotient_poly = { // permutation @@ -512,24 +484,11 @@ where //~ This is to implement [Maller's optimization](https://o1-labs.github.io/mina-book/crypto/plonk/maller_15.html). let ft: DensePolynomial = { let f_chunked = { - // TODO: compute the linearization polynomial in evaluation form so - // that we can drop the coefficient forms of the index polynomials from - // the constraint system struct - - // permutation (not part of linearization yet) let alphas = all_alphas.get_alphas(ArgumentType::Permutation, permutation::CONSTRAINTS); - let f = index.perm_lnrz(&evals, zeta, beta, gamma, alphas); - - // the circuit polynomial - let f = { - let (_lin_constant, mut lin) = - index.linearization.to_polynomial(&env, zeta, &evals); - lin += &f; - lin.interpolate() - }; - - drop(env); + let f = index + .perm_lnrz(&evals, zeta, beta, gamma, alphas) + .interpolate(); // see https://o1-labs.github.io/mina-book/crypto/plonk/maller_15.html#the-prover-side f.to_chunked_polynomial(index.max_poly_size) From a9509fb94c340666b0e3fe39364eccee53a039b5 Mon Sep 17 00:00:00 2001 From: mrmr1993 Date: Sat, 19 Aug 2023 00:11:49 +0100 Subject: [PATCH 020/178] Remove quotient_poly, hard-code to 0 --- kimchi/src/public_input_only_prover.rs | 58 +++++--------------------- 1 file changed, 10 insertions(+), 48 deletions(-) diff --git a/kimchi/src/public_input_only_prover.rs b/kimchi/src/public_input_only_prover.rs index 50685a0c83..673010f572 100644 --- a/kimchi/src/public_input_only_prover.rs +++ b/kimchi/src/public_input_only_prover.rs @@ -337,51 +337,17 @@ where let lagrange = index.cs.evaluate(&witness_poly, &z_poly); - let quotient_poly = { - // permutation - let (f, bnd) = { - let alphas = - all_alphas.get_alphas(ArgumentType::Permutation, permutation::CONSTRAINTS); - let (perm, bnd) = index.perm_quot(&lagrange, beta, gamma, &z_poly, alphas)?; - - check_constraint!(index, perm); - - (perm.interpolate(), bnd) - }; - - // divide contributions with vanishing polynomial - let (mut quotient, res) = f - .divide_by_vanishing_poly(index.cs.domain.d1) - .ok_or(ProverError::Prover("division by vanishing polynomial"))?; - if !res.is_zero() { - return Err(ProverError::Prover( - "rest of division by vanishing polynomial", - )); - } - - quotient += &bnd; // already divided by Z_H - quotient - }; - //~ 1. commit (hiding) to the quotient polynomial $t$ //~ TODO: specify the dummies - let t_comm = { - let mut t_comm = index.srs.commit("ient_poly, None, rng); - - let expected_t_size = PERMUTS; - let dummies = expected_t_size - t_comm.commitment.unshifted.len(); - // Add `dummies` many hiding commitments to the 0 polynomial, since if the - // number of commitments in `t_comm` is less than the max size, it means that - // the higher degree coefficients of `t` are 0. - for _ in 0..dummies { - let w = ::rand(rng); - t_comm - .commitment - .unshifted - .push(index.srs.h.mul(w).into_affine()); - t_comm.blinders.unshifted.push(w); - } - t_comm + let t_comm = BlindedCommitment { + commitment: PolyComm { + unshifted: vec![index.srs.h; 7], + shifted: None, + }, + blinders: PolyComm { + unshifted: vec![G::ScalarField::one(); 7], + shifted: None, + }, }; //~ 1. Absorb the the commitment of the quotient polynomial with the Fq-Sponge. @@ -495,11 +461,7 @@ where .linearize(zeta_to_srs_len) }; - let t_chunked = quotient_poly - .to_chunked_polynomial(index.max_poly_size) - .linearize(zeta_to_srs_len); - - &f_chunked - &t_chunked.scale(zeta_to_domain_size - G::ScalarField::one()) + f_chunked }; //~ 1. construct the blinding part of the ft polynomial commitment From 7d7f0711b19ded87bde1d618326fff24dcd2a407 Mon Sep 17 00:00:00 2001 From: mrmr1993 Date: Sat, 19 Aug 2023 00:13:18 +0100 Subject: [PATCH 021/178] Remove unused code --- kimchi/src/public_input_only_prover.rs | 30 +------------------------- 1 file changed, 1 insertion(+), 29 deletions(-) diff --git a/kimchi/src/public_input_only_prover.rs b/kimchi/src/public_input_only_prover.rs index 673010f572..f13a6fe5fa 100644 --- a/kimchi/src/public_input_only_prover.rs +++ b/kimchi/src/public_input_only_prover.rs @@ -19,7 +19,7 @@ use crate::{ verifier_index::VerifierIndex, }; use ark_ec::ProjectiveCurve; -use ark_ff::{Field, One, PrimeField, UniformRand, Zero}; +use ark_ff::{Field, One, PrimeField, Zero}; use ark_poly::{ univariate::DensePolynomial, EvaluationDomain, Evaluations, Polynomial, Radix2EvaluationDomain as D, UVPolynomial, @@ -38,24 +38,6 @@ use std::sync::Arc; /// The result of a proof creation or verification. type Result = std::result::Result; -/// Helper to quickly test if a witness satisfies a constraint -macro_rules! check_constraint { - ($index:expr, $evaluation:expr) => {{ - check_constraint!($index, stringify!($evaluation), $evaluation); - }}; - ($index:expr, $label:expr, $evaluation:expr) => {{ - if cfg!(debug_assertions) { - let (_, res) = $evaluation - .interpolate_by_ref() - .divide_by_vanishing_poly($index.cs.domain.d1) - .unwrap(); - if !res.is_zero() { - panic!("couldn't divide by vanishing polynomial: {}", $label); - } - } - }}; -} - pub fn verifier_index( srs: Arc>, domain: EvaluationDomains, @@ -327,16 +309,6 @@ where let mut all_alphas = index.powers_of_alpha.clone(); all_alphas.instantiate(alpha); - //~ 1. Compute the quotient polynomial (the $t$ in $f = Z_H \cdot t$). - //~ The quotient polynomial is computed by adding all these polynomials together: - //~~ * the combined constraints for all the gates - //~~ * the combined constraints for the permutation - //~~ * the negated public polynomial - //~ and by then dividing the resulting polynomial with the vanishing polynomial $Z_H$. - //~ TODO: specify the split of the permutation polynomial into perm and bnd? - - let lagrange = index.cs.evaluate(&witness_poly, &z_poly); - //~ 1. commit (hiding) to the quotient polynomial $t$ //~ TODO: specify the dummies let t_comm = BlindedCommitment { From 971aa2a82af559f5974617168467ad68380d6c2d Mon Sep 17 00:00:00 2001 From: mrmr1993 Date: Sat, 19 Aug 2023 00:21:14 +0100 Subject: [PATCH 022/178] Only evaluate the non-zero witness column --- kimchi/src/public_input_only_prover.rs | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/kimchi/src/public_input_only_prover.rs b/kimchi/src/public_input_only_prover.rs index f13a6fe5fa..6be73fd96e 100644 --- a/kimchi/src/public_input_only_prover.rs +++ b/kimchi/src/public_input_only_prover.rs @@ -371,10 +371,14 @@ where } }), w: array::from_fn(|i| { - let chunked = witness_poly[i].to_chunked_polynomial(index.max_poly_size); - PointEvaluations { - zeta: chunked.evaluate_chunks(zeta), - zeta_omega: chunked.evaluate_chunks(zeta_omega), + if i == 0 { + let chunked = witness_poly[i].to_chunked_polynomial(index.max_poly_size); + PointEvaluations { + zeta: chunked.evaluate_chunks(zeta), + zeta_omega: chunked.evaluate_chunks(zeta_omega), + } + } else { + constant_evals(G::ScalarField::zero()) } }), From a402e4eb7596260e0141506ce62af04c72fb70c3 Mon Sep 17 00:00:00 2001 From: mrmr1993 Date: Sat, 19 Aug 2023 00:33:20 +0100 Subject: [PATCH 023/178] Only commit directly to the first witness column --- kimchi/src/public_input_only_prover.rs | 32 +++++++++++++++++++------- 1 file changed, 24 insertions(+), 8 deletions(-) diff --git a/kimchi/src/public_input_only_prover.rs b/kimchi/src/public_input_only_prover.rs index 6be73fd96e..db2fdeaf5c 100644 --- a/kimchi/src/public_input_only_prover.rs +++ b/kimchi/src/public_input_only_prover.rs @@ -235,15 +235,15 @@ where //~ Note: since the witness is in evaluation form, //~ we can use the `commit_evaluation` optimization. let mut w_comm = vec![]; - for col in 0..COLUMNS { + { // witness coeff -> witness eval let witness_eval = Evaluations::>::from_vec_and_domain( - witness[col].clone(), + witness[0].clone(), index.cs.domain.d1, ); - let com = match blinders.as_ref().and_then(|b| b[col].as_ref()) { + let com = match blinders.as_ref().and_then(|b| b[0].as_ref()) { // no blinders: blind the witness None => index .srs @@ -263,6 +263,18 @@ where w_comm.push(com); } + for _ in 1..COLUMNS { + w_comm.push(BlindedCommitment { + commitment: PolyComm { + unshifted: vec![index.srs.h], + shifted: None, + }, + blinders: PolyComm { + unshifted: vec![G::ScalarField::one()], + shifted: None, + }, + }); + } let w_comm: [BlindedCommitment; COLUMNS] = w_comm .try_into() @@ -278,11 +290,15 @@ where //~ form so we can take advantage of the sparsity of the evaluations (i.e., there are many //~ 0 entries and entries that have less-than-full-size field elemnts.) let witness_poly: [DensePolynomial; COLUMNS] = array::from_fn(|i| { - Evaluations::>::from_vec_and_domain( - witness[i].clone(), - index.cs.domain.d1, - ) - .interpolate() + if i == 0 { + Evaluations::>::from_vec_and_domain( + witness[0].clone(), + index.cs.domain.d1, + ) + .interpolate() + } else { + DensePolynomial::from_coefficients_vec(vec![]) + } }); //~ 1. Sample $\beta$ with the Fq-Sponge. From c91a14158b55c8c9665bc8b9bb38157c91529c16 Mon Sep 17 00:00:00 2001 From: mrmr1993 Date: Sat, 19 Aug 2023 00:38:07 +0100 Subject: [PATCH 024/178] Use the explicit zero polynomial for other witness columns --- kimchi/src/public_input_only_prover.rs | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/kimchi/src/public_input_only_prover.rs b/kimchi/src/public_input_only_prover.rs index db2fdeaf5c..9df443e42c 100644 --- a/kimchi/src/public_input_only_prover.rs +++ b/kimchi/src/public_input_only_prover.rs @@ -586,12 +586,13 @@ where polynomials.push((coefficients_form(&zero_polynomial), None, fixed_hiding(1))); polynomials.push((coefficients_form(&zero_polynomial), None, fixed_hiding(1))); polynomials.push((coefficients_form(&zero_polynomial), None, fixed_hiding(1))); + polynomials.push(( + coefficients_form(&witness_poly[0]), + None, + w_comm[0].blinders.clone(), + )); polynomials.extend( - witness_poly - .iter() - .zip(w_comm.iter()) - .map(|(w, c)| (coefficients_form(w), None, c.blinders.clone())) - .collect::>(), + (1..COLUMNS).map(|_| (coefficients_form(&zero_polynomial), None, fixed_hiding(1))), ); polynomials.extend( index From fac2ad1be48d01afdb3a650b13d7774991417590 Mon Sep 17 00:00:00 2001 From: mrmr1993 Date: Sat, 19 Aug 2023 00:40:44 +0100 Subject: [PATCH 025/178] Remove other witness_poly terms --- kimchi/src/public_input_only_prover.rs | 21 ++++++++------------- 1 file changed, 8 insertions(+), 13 deletions(-) diff --git a/kimchi/src/public_input_only_prover.rs b/kimchi/src/public_input_only_prover.rs index 9df443e42c..5ed423b678 100644 --- a/kimchi/src/public_input_only_prover.rs +++ b/kimchi/src/public_input_only_prover.rs @@ -289,17 +289,12 @@ where //~ As mentioned above, we commit using the evaluations form rather than the coefficients //~ form so we can take advantage of the sparsity of the evaluations (i.e., there are many //~ 0 entries and entries that have less-than-full-size field elemnts.) - let witness_poly: [DensePolynomial; COLUMNS] = array::from_fn(|i| { - if i == 0 { - Evaluations::>::from_vec_and_domain( - witness[0].clone(), - index.cs.domain.d1, - ) - .interpolate() - } else { - DensePolynomial::from_coefficients_vec(vec![]) - } - }); + let witness_poly: DensePolynomial = + Evaluations::>::from_vec_and_domain( + witness[0].clone(), + index.cs.domain.d1, + ) + .interpolate(); //~ 1. Sample $\beta$ with the Fq-Sponge. let beta = fq_sponge.challenge(); @@ -388,7 +383,7 @@ where }), w: array::from_fn(|i| { if i == 0 { - let chunked = witness_poly[i].to_chunked_polynomial(index.max_poly_size); + let chunked = witness_poly.to_chunked_polynomial(index.max_poly_size); PointEvaluations { zeta: chunked.evaluate_chunks(zeta), zeta_omega: chunked.evaluate_chunks(zeta_omega), @@ -587,7 +582,7 @@ where polynomials.push((coefficients_form(&zero_polynomial), None, fixed_hiding(1))); polynomials.push((coefficients_form(&zero_polynomial), None, fixed_hiding(1))); polynomials.push(( - coefficients_form(&witness_poly[0]), + coefficients_form(&witness_poly), None, w_comm[0].blinders.clone(), )); From beea102ca44ead6a5a483a4137b56063b22c1ca5 Mon Sep 17 00:00:00 2001 From: mrmr1993 Date: Sat, 19 Aug 2023 00:42:14 +0100 Subject: [PATCH 026/178] Remove additional unused witness columns --- kimchi/src/public_input_only_prover.rs | 24 +++--------------------- 1 file changed, 3 insertions(+), 21 deletions(-) diff --git a/kimchi/src/public_input_only_prover.rs b/kimchi/src/public_input_only_prover.rs index 5ed423b678..0ec3e02c6b 100644 --- a/kimchi/src/public_input_only_prover.rs +++ b/kimchi/src/public_input_only_prover.rs @@ -170,24 +170,6 @@ where .ok_or(ProverError::NoRoomForZkInWitness)?; witness.extend(std::iter::repeat(G::ScalarField::zero()).take(length_padding)); - let witness: [Vec; COLUMNS] = [ - witness, - vec![G::ScalarField::zero(); d1_size], - vec![G::ScalarField::zero(); d1_size], - vec![G::ScalarField::zero(); d1_size], - vec![G::ScalarField::zero(); d1_size], - vec![G::ScalarField::zero(); d1_size], - vec![G::ScalarField::zero(); d1_size], - vec![G::ScalarField::zero(); d1_size], - vec![G::ScalarField::zero(); d1_size], - vec![G::ScalarField::zero(); d1_size], - vec![G::ScalarField::zero(); d1_size], - vec![G::ScalarField::zero(); d1_size], - vec![G::ScalarField::zero(); d1_size], - vec![G::ScalarField::zero(); d1_size], - vec![G::ScalarField::zero(); d1_size], - ]; - //~ 1. Setup the Fq-Sponge. let mut fq_sponge = EFqSponge::new(G::OtherCurve::sponge_params()); @@ -203,7 +185,7 @@ where //~ 1. Compute the negated public input polynomial as //~ the polynomial that evaluates to $-p_i$ for the first `public_input_size` values of the domain, //~ and $0$ for the rest. - let public = witness[0][0..index.cs.public].to_vec(); + let public = witness[0..index.cs.public].to_vec(); let public_poly = -Evaluations::>::from_vec_and_domain( public, index.cs.domain.d1, @@ -239,7 +221,7 @@ where // witness coeff -> witness eval let witness_eval = Evaluations::>::from_vec_and_domain( - witness[0].clone(), + witness.clone(), index.cs.domain.d1, ); @@ -291,7 +273,7 @@ where //~ 0 entries and entries that have less-than-full-size field elemnts.) let witness_poly: DensePolynomial = Evaluations::>::from_vec_and_domain( - witness[0].clone(), + witness.clone(), index.cs.domain.d1, ) .interpolate(); From 2e2a182745775900561345a157e038064f83141f Mon Sep 17 00:00:00 2001 From: mrmr1993 Date: Sat, 19 Aug 2023 00:49:45 +0100 Subject: [PATCH 027/178] Use explicit coefficient polynomials --- kimchi/src/public_input_only_prover.rs | 11 +++-------- 1 file changed, 3 insertions(+), 8 deletions(-) diff --git a/kimchi/src/public_input_only_prover.rs b/kimchi/src/public_input_only_prover.rs index 0ec3e02c6b..4bcd71a423 100644 --- a/kimchi/src/public_input_only_prover.rs +++ b/kimchi/src/public_input_only_prover.rs @@ -525,8 +525,7 @@ where shifted: None, }; - let coefficients_form = DensePolynomialOrEvaluations::DensePolynomial; - let evaluations_form = |e| DensePolynomialOrEvaluations::Evaluations(e, index.cs.domain.d1); + let coefficients_form = DensePolynomialOrEvaluations::<_, D<_>>::DensePolynomial; let mut polynomials = polys .iter() @@ -571,13 +570,9 @@ where polynomials.extend( (1..COLUMNS).map(|_| (coefficients_form(&zero_polynomial), None, fixed_hiding(1))), ); + polynomials.push((coefficients_form(&one_polynomial), None, non_hiding(1))); polynomials.extend( - index - .column_evaluations - .coefficients8 - .iter() - .map(|coefficientm| (evaluations_form(coefficientm), None, non_hiding(1))) - .collect::>(), + (1..COLUMNS).map(|_| (coefficients_form(&zero_polynomial), None, non_hiding(1))), ); polynomials.extend( shifted_polys From 687a22ccdd92088186b2748c1a4861d49250b047 Mon Sep 17 00:00:00 2001 From: mrmr1993 Date: Sat, 19 Aug 2023 01:01:17 +0100 Subject: [PATCH 028/178] Label inputs to opening proof --- kimchi/src/public_input_only_prover.rs | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/kimchi/src/public_input_only_prover.rs b/kimchi/src/public_input_only_prover.rs index 4bcd71a423..030ce1ae3a 100644 --- a/kimchi/src/public_input_only_prover.rs +++ b/kimchi/src/public_input_only_prover.rs @@ -537,14 +537,6 @@ where shifted: None, }; - //~ 1. Then, include: - //~~ * the negated public polynomial - //~~ * the ft polynomial - //~~ * the permutation aggregation polynomial z polynomial - //~~ * the generic selector - //~~ * the poseidon selector - //~~ * the 15 registers/witness columns - //~~ * the 6 sigmas let one_polynomial = DensePolynomial::from_coefficients_vec(vec![G::ScalarField::one()]); let zero_polynomial = DensePolynomial::from_coefficients_vec(vec![]); let shifted_polys: Vec<_> = (index.cs.shift) @@ -553,15 +545,21 @@ where DensePolynomial::from_coefficients_vec(vec![G::ScalarField::zero(), *shift]) }) .collect(); + // public polynomial polynomials.push((coefficients_form(&public_poly), None, fixed_hiding(1))); + // ft polynomial polynomials.push((coefficients_form(&ft), None, blinding_ft)); + // permutation aggregation polynomial polynomials.push((coefficients_form(&z_poly), None, z_comm.blinders)); + // generic selector polynomials.push((coefficients_form(&one_polynomial), None, fixed_hiding(1))); + // other selectors polynomials.push((coefficients_form(&zero_polynomial), None, fixed_hiding(1))); polynomials.push((coefficients_form(&zero_polynomial), None, fixed_hiding(1))); polynomials.push((coefficients_form(&zero_polynomial), None, fixed_hiding(1))); polynomials.push((coefficients_form(&zero_polynomial), None, fixed_hiding(1))); polynomials.push((coefficients_form(&zero_polynomial), None, fixed_hiding(1))); + // witness columns polynomials.push(( coefficients_form(&witness_poly), None, @@ -570,10 +568,12 @@ where polynomials.extend( (1..COLUMNS).map(|_| (coefficients_form(&zero_polynomial), None, fixed_hiding(1))), ); + // coefficients polynomials.push((coefficients_form(&one_polynomial), None, non_hiding(1))); polynomials.extend( (1..COLUMNS).map(|_| (coefficients_form(&zero_polynomial), None, non_hiding(1))), ); + // permutation coefficients polynomials.extend( shifted_polys .iter() From b90ec83c74ea60245e8b6ae1b0ecb6545386b2d9 Mon Sep 17 00:00:00 2001 From: mrmr1993 Date: Sat, 19 Aug 2023 01:11:19 +0100 Subject: [PATCH 029/178] Remove calculation for z_comm --- kimchi/src/public_input_only_prover.rs | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/kimchi/src/public_input_only_prover.rs b/kimchi/src/public_input_only_prover.rs index 030ce1ae3a..5bfd7c4dd3 100644 --- a/kimchi/src/public_input_only_prover.rs +++ b/kimchi/src/public_input_only_prover.rs @@ -284,10 +284,17 @@ where //~ 1. Sample $\gamma$ with the Fq-Sponge. let gamma = fq_sponge.challenge(); - let z_poly = DensePolynomial::from_coefficients_vec(vec![G::ScalarField::one()]); - //~ 1. Commit (hidding) to the permutation aggregation polynomial $z$. - let z_comm = index.srs.commit(&z_poly, None, rng); + let z_comm = BlindedCommitment { + commitment: PolyComm { + unshifted: vec![index.srs.g[0]], + shifted: None, + }, + blinders: PolyComm { + unshifted: vec![G::ScalarField::zero()], + shifted: None, + }, + }; //~ 1. Absorb the permutation aggregation polynomial $z$ with the Fq-Sponge. absorb_commitment(&mut fq_sponge, &z_comm.commitment); @@ -550,7 +557,7 @@ where // ft polynomial polynomials.push((coefficients_form(&ft), None, blinding_ft)); // permutation aggregation polynomial - polynomials.push((coefficients_form(&z_poly), None, z_comm.blinders)); + polynomials.push((coefficients_form(&one_polynomial), None, z_comm.blinders)); // generic selector polynomials.push((coefficients_form(&one_polynomial), None, fixed_hiding(1))); // other selectors From e28bc1354a66420553830e9c79ffd66cd68eba1d Mon Sep 17 00:00:00 2001 From: mrmr1993 Date: Sat, 19 Aug 2023 01:58:02 +0100 Subject: [PATCH 030/178] Commit only once for public input and witness polynomial --- kimchi/src/public_input_only_prover.rs | 74 ++++++-------------------- 1 file changed, 16 insertions(+), 58 deletions(-) diff --git a/kimchi/src/public_input_only_prover.rs b/kimchi/src/public_input_only_prover.rs index 5bfd7c4dd3..23967da59e 100644 --- a/kimchi/src/public_input_only_prover.rs +++ b/kimchi/src/public_input_only_prover.rs @@ -130,7 +130,6 @@ where witness, index, Vec::new(), - None, ) } @@ -151,7 +150,6 @@ where mut witness: Vec, index: &ProverIndex, prev_challenges: Vec>, - blinders: Option<[Option>; COLUMNS]>, ) -> Result { // make sure that the SRS is not smaller than the domain size let d1_size = index.cs.domain.d1.size(); @@ -185,24 +183,24 @@ where //~ 1. Compute the negated public input polynomial as //~ the polynomial that evaluates to $-p_i$ for the first `public_input_size` values of the domain, //~ and $0$ for the rest. - let public = witness[0..index.cs.public].to_vec(); - let public_poly = -Evaluations::>::from_vec_and_domain( - public, + let witness_evals = Evaluations::>::from_vec_and_domain( + witness, index.cs.domain.d1, - ) - .interpolate(); + ); + let witness_com = index + .srs + .commit_evaluations_non_hiding(index.cs.domain.d1, &witness_evals); + let witness_poly = witness_evals.interpolate(); + let public_poly = -witness_poly.clone(); //~ 1. Commit (non-hiding) to the negated public input polynomial. - let public_comm = index.srs.commit_non_hiding(&public_poly, None); - let public_comm = { - index - .srs - .mask_custom( - public_comm.clone(), - &public_comm.map(|_| G::ScalarField::one()), - ) - .unwrap() - .commitment + let public_comm = witness_com.map(|x| index.srs.h + x.neg()); + let witness_comm = BlindedCommitment { + commitment: witness_com.map(|x| x + index.srs.h), + blinders: PolyComm { + unshifted: vec![G::ScalarField::one()], + shifted: None, + }, }; //~ 1. Absorb the commitment to the public polynomial with the Fq-Sponge. @@ -216,35 +214,7 @@ where //~ //~ Note: since the witness is in evaluation form, //~ we can use the `commit_evaluation` optimization. - let mut w_comm = vec![]; - { - // witness coeff -> witness eval - let witness_eval = - Evaluations::>::from_vec_and_domain( - witness.clone(), - index.cs.domain.d1, - ); - - let com = match blinders.as_ref().and_then(|b| b[0].as_ref()) { - // no blinders: blind the witness - None => index - .srs - .commit_evaluations(index.cs.domain.d1, &witness_eval, rng), - // blinders: blind the witness with them - Some(blinder) => { - // TODO: make this a function rather no? mask_with_custom() - let witness_com = index - .srs - .commit_evaluations_non_hiding(index.cs.domain.d1, &witness_eval); - index - .srs - .mask_custom(witness_com, blinder) - .map_err(ProverError::WrongBlinders)? - } - }; - - w_comm.push(com); - } + let mut w_comm = vec![witness_comm]; for _ in 1..COLUMNS { w_comm.push(BlindedCommitment { commitment: PolyComm { @@ -267,17 +237,6 @@ where .iter() .for_each(|c| absorb_commitment(&mut fq_sponge, &c.commitment)); - //~ 1. Compute the witness polynomials by interpolating each `COLUMNS` of the witness. - //~ As mentioned above, we commit using the evaluations form rather than the coefficients - //~ form so we can take advantage of the sparsity of the evaluations (i.e., there are many - //~ 0 entries and entries that have less-than-full-size field elemnts.) - let witness_poly: DensePolynomial = - Evaluations::>::from_vec_and_domain( - witness.clone(), - index.cs.domain.d1, - ) - .interpolate(); - //~ 1. Sample $\beta$ with the Fq-Sponge. let beta = fq_sponge.challenge(); @@ -739,7 +698,6 @@ fn test_public_input_only_prover() { public_inputs.clone(), &prover, vec![], - None, ) .unwrap(); println!( From 65abba12e39765cbc8d3750bbf07e385f22b6eef Mon Sep 17 00:00:00 2001 From: mrmr1993 Date: Sat, 19 Aug 2023 02:02:09 +0100 Subject: [PATCH 031/178] Don't store blinders for witness commitments --- kimchi/src/public_input_only_prover.rs | 32 +++++++------------------- 1 file changed, 8 insertions(+), 24 deletions(-) diff --git a/kimchi/src/public_input_only_prover.rs b/kimchi/src/public_input_only_prover.rs index 23967da59e..9c9c018f54 100644 --- a/kimchi/src/public_input_only_prover.rs +++ b/kimchi/src/public_input_only_prover.rs @@ -195,13 +195,7 @@ where //~ 1. Commit (non-hiding) to the negated public input polynomial. let public_comm = witness_com.map(|x| index.srs.h + x.neg()); - let witness_comm = BlindedCommitment { - commitment: witness_com.map(|x| x + index.srs.h), - blinders: PolyComm { - unshifted: vec![G::ScalarField::one()], - shifted: None, - }, - }; + let witness_comm = witness_com.map(|x| x + index.srs.h); //~ 1. Absorb the commitment to the public polynomial with the Fq-Sponge. //~ @@ -216,26 +210,20 @@ where //~ we can use the `commit_evaluation` optimization. let mut w_comm = vec![witness_comm]; for _ in 1..COLUMNS { - w_comm.push(BlindedCommitment { - commitment: PolyComm { - unshifted: vec![index.srs.h], - shifted: None, - }, - blinders: PolyComm { - unshifted: vec![G::ScalarField::one()], - shifted: None, - }, + w_comm.push(PolyComm { + unshifted: vec![index.srs.h], + shifted: None, }); } - let w_comm: [BlindedCommitment; COLUMNS] = w_comm + let w_comm: [PolyComm; COLUMNS] = w_comm .try_into() .expect("previous loop is of the correct length"); //~ 1. Absorb the witness commitments with the Fq-Sponge. w_comm .iter() - .for_each(|c| absorb_commitment(&mut fq_sponge, &c.commitment)); + .for_each(|c| absorb_commitment(&mut fq_sponge, &c)); //~ 1. Sample $\beta$ with the Fq-Sponge. let beta = fq_sponge.challenge(); @@ -526,11 +514,7 @@ where polynomials.push((coefficients_form(&zero_polynomial), None, fixed_hiding(1))); polynomials.push((coefficients_form(&zero_polynomial), None, fixed_hiding(1))); // witness columns - polynomials.push(( - coefficients_form(&witness_poly), - None, - w_comm[0].blinders.clone(), - )); + polynomials.push((coefficients_form(&witness_poly), None, fixed_hiding(1))); polynomials.extend( (1..COLUMNS).map(|_| (coefficients_form(&zero_polynomial), None, fixed_hiding(1))), ); @@ -561,7 +545,7 @@ where Ok(Self { commitments: ProverCommitments { - w_comm: array::from_fn(|i| w_comm[i].commitment.clone()), + w_comm, z_comm: z_comm.commitment, t_comm: t_comm.commitment, lookup: None, From 6b163c06233d5a444ab3776df74cdccca63f0a26 Mon Sep 17 00:00:00 2001 From: mrmr1993 Date: Sat, 19 Aug 2023 02:05:54 +0100 Subject: [PATCH 032/178] Don't store blinders for z_comm --- kimchi/src/public_input_only_prover.rs | 18 ++++++------------ 1 file changed, 6 insertions(+), 12 deletions(-) diff --git a/kimchi/src/public_input_only_prover.rs b/kimchi/src/public_input_only_prover.rs index 9c9c018f54..bb01b3e5bf 100644 --- a/kimchi/src/public_input_only_prover.rs +++ b/kimchi/src/public_input_only_prover.rs @@ -232,19 +232,13 @@ where let gamma = fq_sponge.challenge(); //~ 1. Commit (hidding) to the permutation aggregation polynomial $z$. - let z_comm = BlindedCommitment { - commitment: PolyComm { - unshifted: vec![index.srs.g[0]], - shifted: None, - }, - blinders: PolyComm { - unshifted: vec![G::ScalarField::zero()], - shifted: None, - }, + let z_comm = PolyComm { + unshifted: vec![index.srs.g[0]], + shifted: None, }; //~ 1. Absorb the permutation aggregation polynomial $z$ with the Fq-Sponge. - absorb_commitment(&mut fq_sponge, &z_comm.commitment); + absorb_commitment(&mut fq_sponge, &z_comm); //~ 1. Sample $\alpha'$ with the Fq-Sponge. let alpha_chal = ScalarChallenge(fq_sponge.challenge()); @@ -504,7 +498,7 @@ where // ft polynomial polynomials.push((coefficients_form(&ft), None, blinding_ft)); // permutation aggregation polynomial - polynomials.push((coefficients_form(&one_polynomial), None, z_comm.blinders)); + polynomials.push((coefficients_form(&one_polynomial), None, non_hiding(1))); // generic selector polynomials.push((coefficients_form(&one_polynomial), None, fixed_hiding(1))); // other selectors @@ -546,7 +540,7 @@ where Ok(Self { commitments: ProverCommitments { w_comm, - z_comm: z_comm.commitment, + z_comm, t_comm: t_comm.commitment, lookup: None, }, From d2deb9d0fc703aad4a26b37c732c091e0226e5ff Mon Sep 17 00:00:00 2001 From: mrmr1993 Date: Sat, 19 Aug 2023 02:21:03 +0100 Subject: [PATCH 033/178] Explicitly expand permutation linearization --- kimchi/src/public_input_only_prover.rs | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/kimchi/src/public_input_only_prover.rs b/kimchi/src/public_input_only_prover.rs index bb01b3e5bf..c955a45893 100644 --- a/kimchi/src/public_input_only_prover.rs +++ b/kimchi/src/public_input_only_prover.rs @@ -366,19 +366,19 @@ where //~ 1. Compute the ft polynomial. //~ This is to implement [Maller's optimization](https://o1-labs.github.io/mina-book/crypto/plonk/maller_15.html). let ft: DensePolynomial = { - let f_chunked = { - let alphas = - all_alphas.get_alphas(ArgumentType::Permutation, permutation::CONSTRAINTS); - let f = index - .perm_lnrz(&evals, zeta, beta, gamma, alphas) - .interpolate(); - - // see https://o1-labs.github.io/mina-book/crypto/plonk/maller_15.html#the-prover-side - f.to_chunked_polynomial(index.max_poly_size) - .linearize(zeta_to_srs_len) - }; - - f_chunked + let alphas = all_alphas.get_alphas(ArgumentType::Permutation, permutation::CONSTRAINTS); + let scalar = + crate::circuits::constraints::ConstraintSystem::::perm_scalars( + &evals, + beta, + gamma, + alphas, + permutation::eval_zk_polynomial(index.cs.domain.d1, zeta), + ); + DensePolynomial::from_coefficients_vec(vec![ + G::ScalarField::zero(), + scalar * index.cs.shift[PERMUTS - 1], + ]) }; //~ 1. construct the blinding part of the ft polynomial commitment From b4ba2046e722c544e4efc8fd5fa5ad04831792fe Mon Sep 17 00:00:00 2001 From: mrmr1993 Date: Sat, 19 Aug 2023 02:44:17 +0100 Subject: [PATCH 034/178] Remove now-unneeded prover index, run test with 2^16 circuit --- kimchi/src/public_input_only_prover.rs | 141 +++++++------------------ 1 file changed, 36 insertions(+), 105 deletions(-) diff --git a/kimchi/src/public_input_only_prover.rs b/kimchi/src/public_input_only_prover.rs index c955a45893..d14b8dc832 100644 --- a/kimchi/src/public_input_only_prover.rs +++ b/kimchi/src/public_input_only_prover.rs @@ -15,7 +15,6 @@ use crate::{ proof::{ PointEvaluations, ProofEvaluations, ProverCommitments, ProverProof, RecursionChallenge, }, - prover_index::ProverIndex, verifier_index::VerifierIndex, }; use ark_ec::ProjectiveCurve; @@ -123,7 +122,7 @@ where >( groupmap: &G::Map, witness: Vec, - index: &ProverIndex, + index: &VerifierIndex, ) -> Result { Self::create_recursive_public_input_only::( groupmap, @@ -148,17 +147,16 @@ where >( group_map: &G::Map, mut witness: Vec, - index: &ProverIndex, + index: &VerifierIndex, prev_challenges: Vec>, ) -> Result { // make sure that the SRS is not smaller than the domain size - let d1_size = index.cs.domain.d1.size(); - if index.srs.max_degree() < d1_size { - return Err(ProverError::SRSTooSmall); - } + let d1_size = index.domain.size(); let (_, endo_r) = G::endos(); + let srs = index.srs.get().unwrap(); + // TODO: rng should be passed as arg let rng = &mut rand::rngs::OsRng; @@ -172,7 +170,7 @@ where let mut fq_sponge = EFqSponge::new(G::OtherCurve::sponge_params()); //~ 1. Absorb the digest of the VerifierIndex. - let verifier_index_digest = index.verifier_index_digest::(); + let verifier_index_digest = index.digest::(); fq_sponge.absorb_fq(&[verifier_index_digest]); //~ 1. Absorb the commitments of the previous challenges with the Fq-sponge. @@ -185,17 +183,15 @@ where //~ and $0$ for the rest. let witness_evals = Evaluations::>::from_vec_and_domain( witness, - index.cs.domain.d1, + index.domain, ); - let witness_com = index - .srs - .commit_evaluations_non_hiding(index.cs.domain.d1, &witness_evals); + let witness_com = srs.commit_evaluations_non_hiding(index.domain, &witness_evals); let witness_poly = witness_evals.interpolate(); let public_poly = -witness_poly.clone(); //~ 1. Commit (non-hiding) to the negated public input polynomial. - let public_comm = witness_com.map(|x| index.srs.h + x.neg()); - let witness_comm = witness_com.map(|x| x + index.srs.h); + let public_comm = witness_com.map(|x| srs.h + x.neg()); + let witness_comm = witness_com.map(|x| x + srs.h); //~ 1. Absorb the commitment to the public polynomial with the Fq-Sponge. //~ @@ -211,7 +207,7 @@ where let mut w_comm = vec![witness_comm]; for _ in 1..COLUMNS { w_comm.push(PolyComm { - unshifted: vec![index.srs.h], + unshifted: vec![srs.h], shifted: None, }); } @@ -233,7 +229,7 @@ where //~ 1. Commit (hidding) to the permutation aggregation polynomial $z$. let z_comm = PolyComm { - unshifted: vec![index.srs.g[0]], + unshifted: vec![srs.g[0]], shifted: None, }; @@ -254,7 +250,7 @@ where //~ TODO: specify the dummies let t_comm = BlindedCommitment { commitment: PolyComm { - unshifted: vec![index.srs.h; 7], + unshifted: vec![srs.h; 7], shifted: None, }, blinders: PolyComm { @@ -272,7 +268,7 @@ where //~ 1. Derive $\zeta$ from $\zeta'$ using the endomorphism (TODO: specify) let zeta = zeta_chal.to_field(endo_r); - let omega = index.cs.domain.d1.group_gen; + let omega = index.domain.group_gen; let zeta_omega = zeta * omega; //~ 1. Chunk evaluate the following polynomials at both $\zeta$ and $\zeta \omega$: @@ -301,8 +297,8 @@ where let chunked_evals = ProofEvaluations::>> { s: array::from_fn(|i| PointEvaluations { - zeta: vec![zeta * index.cs.shift[i]], - zeta_omega: vec![zeta_omega * index.cs.shift[i]], + zeta: vec![zeta * index.shift[i]], + zeta_omega: vec![zeta_omega * index.shift[i]], }), coefficients: array::from_fn(|i| { if i == 0 { @@ -373,11 +369,11 @@ where beta, gamma, alphas, - permutation::eval_zk_polynomial(index.cs.domain.d1, zeta), + permutation::eval_zk_polynomial(index.domain, zeta), ); DensePolynomial::from_coefficients_vec(vec![ G::ScalarField::zero(), - scalar * index.cs.shift[PERMUTS - 1], + scalar * index.shift[PERMUTS - 1], ]) }; @@ -487,7 +483,8 @@ where let one_polynomial = DensePolynomial::from_coefficients_vec(vec![G::ScalarField::one()]); let zero_polynomial = DensePolynomial::from_coefficients_vec(vec![]); - let shifted_polys: Vec<_> = (index.cs.shift) + let shifted_polys: Vec<_> = index + .shift .iter() .map(|shift| { DensePolynomial::from_coefficients_vec(vec![G::ScalarField::zero(), *shift]) @@ -527,7 +524,7 @@ where ); //~ 1. Create an aggregated evaluation proof for all of these polynomials at $\zeta$ and $\zeta\omega$ using $u$ and $v$. - let proof = index.srs.open( + let proof = srs.open( group_map, &polynomials, &[zeta, zeta_omega], @@ -554,25 +551,14 @@ where #[test] fn test_public_input_only_prover() { - use crate::{ - circuits::{ - constraints::{ConstraintSystem, FeatureFlags}, - domains::EvaluationDomains, - lookup::lookups::{LookupFeatures, LookupPatterns}, - }, - verifier::verify, - }; + use crate::{circuits::domains::EvaluationDomains, verifier::verify}; use groupmap::GroupMap; - use mina_curves::pasta::{Fq, Pallas, PallasParameters, Vesta}; + use mina_curves::pasta::{Fq, Pallas, PallasParameters}; use mina_poseidon::{ constants::PlonkSpongeConstantsKimchi, sponge::{DefaultFqSponge, DefaultFrSponge}, }; - use once_cell::sync::OnceCell; - use poly_commitment::{ - commitment::CommitmentCurve, - srs::{endos, SRS}, - }; + use poly_commitment::{commitment::CommitmentCurve, srs::SRS}; use std::{sync::Arc, time::Instant}; type SpongeParams = PlonkSpongeConstantsKimchi; @@ -581,84 +567,29 @@ fn test_public_input_only_prover() { let start = Instant::now(); - let num_prev_challenges = 0; - - let num_public_inputs = 4; + let circuit_size = (2 << 16) - 1; - let domain = EvaluationDomains::::create(num_public_inputs).unwrap(); + let domain = EvaluationDomains::::create(circuit_size).unwrap(); - let mut gates = Vec::with_capacity(domain.d1.size()); + let mut srs = SRS::::create(domain.d1.size()); + srs.add_lagrange_basis(domain.d1); + let srs = Arc::new(srs); - for idx in 0..domain.d1.size() { - gates.push(crate::circuits::gate::CircuitGate { - coeffs: vec![Fq::one()], - typ: crate::circuits::gate::GateType::Generic, - wires: std::array::from_fn(|i| crate::circuits::wires::Wire { row: idx, col: i }), - }); - } + println!("- time to create srs: {:?}ms", start.elapsed().as_millis()); - let index = { - let shifts = permutation::Shifts::new(&domain.d1); - let sid = shifts.map[0].clone(); - let cs = ConstraintSystem { - domain, - public: num_public_inputs, - prev_challenges: num_prev_challenges, - sid, - gates, - shift: shifts.shifts, - endo: Fq::zero(), - lookup_constraint_system: None, - feature_flags: FeatureFlags { - range_check0: false, - range_check1: false, - lookup_features: LookupFeatures { - patterns: LookupPatterns { - xor: false, - lookup: false, - range_check: false, - foreign_field_mul: false, - }, - joint_lookup_used: false, - uses_runtime_tables: false, - }, - foreign_field_add: false, - foreign_field_mul: false, - xor: false, - rot: false, - }, - precomputations: OnceCell::new(), - disable_gates_checks: false, - }; - let mut srs = SRS::::create(cs.domain.d1.size()); - srs.add_lagrange_basis(cs.domain.d1); - let srs = Arc::new(srs); + let start = Instant::now(); - let (endo_q, _endo_r) = endos::(); - ProverIndex::::create(cs, endo_q, srs) - }; - println!( - "- time to create prover index: {:?}ms", - start.elapsed().as_millis() - ); + let num_prev_challenges = 0; - let start = Instant::now(); + let num_public_inputs = 4; - let verifier_index = verifier_index::( - index.srs.clone(), - domain, - num_public_inputs, - num_prev_challenges, - ); + let verifier_index = + verifier_index::(srs, domain, num_public_inputs, num_prev_challenges); println!( "- time to create verifier index: {:?}ms", start.elapsed().as_millis() ); - let prover_index = index; - - let prover = prover_index; - let public_inputs = vec![ Fq::from(5u64), Fq::from(10u64), @@ -674,7 +605,7 @@ fn test_public_input_only_prover() { let proof = ProverProof::create_recursive_public_input_only::( &group_map, public_inputs.clone(), - &prover, + &verifier_index, vec![], ) .unwrap(); From 8e3bee18cb44ce21c36c66966473944bd0adc2a2 Mon Sep 17 00:00:00 2001 From: mrmr1993 Date: Sat, 19 Aug 2023 02:55:21 +0100 Subject: [PATCH 035/178] Tweak hiding in verifier index to avoid zeroed commitments --- kimchi/src/public_input_only_prover.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/kimchi/src/public_input_only_prover.rs b/kimchi/src/public_input_only_prover.rs index d14b8dc832..fd16eec0e6 100644 --- a/kimchi/src/public_input_only_prover.rs +++ b/kimchi/src/public_input_only_prover.rs @@ -81,8 +81,8 @@ pub fn verifier_index( unshifted: vec![srs.g[1].mul(shifts.shifts[i]).into_affine()], shifted: None, }), - coefficients_comm: array::from_fn(|i| make_comm(if i == 0 { srs.g[0] } else { G::zero() })), - generic_comm: make_comm(srs.g[0] + srs.h), + coefficients_comm: array::from_fn(|i| make_comm(if i == 0 { srs.g[0] } else { srs.h })), + generic_comm: make_comm(srs.g[0]), psm_comm: make_comm(srs.h), complete_add_comm: make_comm(srs.h), mul_comm: make_comm(srs.h), @@ -497,7 +497,7 @@ where // permutation aggregation polynomial polynomials.push((coefficients_form(&one_polynomial), None, non_hiding(1))); // generic selector - polynomials.push((coefficients_form(&one_polynomial), None, fixed_hiding(1))); + polynomials.push((coefficients_form(&one_polynomial), None, non_hiding(1))); // other selectors polynomials.push((coefficients_form(&zero_polynomial), None, fixed_hiding(1))); polynomials.push((coefficients_form(&zero_polynomial), None, fixed_hiding(1))); @@ -512,7 +512,7 @@ where // coefficients polynomials.push((coefficients_form(&one_polynomial), None, non_hiding(1))); polynomials.extend( - (1..COLUMNS).map(|_| (coefficients_form(&zero_polynomial), None, non_hiding(1))), + (1..COLUMNS).map(|_| (coefficients_form(&zero_polynomial), None, fixed_hiding(1))), ); // permutation coefficients polynomials.extend( From 110b71abd770c2e8b586c99ade3bfc42a812f4c1 Mon Sep 17 00:00:00 2001 From: mrmr1993 Date: Sat, 19 Aug 2023 13:13:45 +0100 Subject: [PATCH 036/178] Comments for public_input_only_prover --- kimchi/src/public_input_only_prover.rs | 554 +++++++++++++------------ 1 file changed, 278 insertions(+), 276 deletions(-) diff --git a/kimchi/src/public_input_only_prover.rs b/kimchi/src/public_input_only_prover.rs index fd16eec0e6..a5af4e3992 100644 --- a/kimchi/src/public_input_only_prover.rs +++ b/kimchi/src/public_input_only_prover.rs @@ -1,4 +1,9 @@ -//! This module implements prover's zk-proof primitive. +//! This module implements a prover specialized to generate a proof where only the public input is +//! considered. All other gates are disabled, as well as zero-knowledge, and the permutation +//! argument is hard-coded as the identity function, to minimize the overhead of proving. +//! +//! Proofs generated by this module are explicitly designed to be compatible with the kimchi +//! verifier, and hence the pickles verifier circuit. use crate::{ circuits::{ @@ -37,6 +42,8 @@ use std::sync::Arc; /// The result of a proof creation or verification. type Result = std::result::Result; +/// Creates a hard-coded verifier index that proofs by this proofs generated by +/// `create_recursive_public_input_only` will satisfy. pub fn verifier_index( srs: Arc>, domain: EvaluationDomains, @@ -45,6 +52,7 @@ pub fn verifier_index( ) -> VerifierIndex { let shifts = permutation::Shifts::new(&domain.d1); let (endo_q, _endo_r) = endos::(); + // TODO: Create `FeatureFlags::default`, and use it here and elsewhere. let feature_flags = FeatureFlags { range_check0: false, range_check1: false, @@ -78,17 +86,34 @@ pub fn verifier_index( prev_challenges: num_prev_challenges, sigma_comm: array::from_fn(|i| PolyComm { + // Encodes the polynomial `f(x) = x * shifts[i]`, with no blinding. + // This represents the identity permutation. unshifted: vec![srs.g[1].mul(shifts.shifts[i]).into_affine()], shifted: None, }), - coefficients_comm: array::from_fn(|i| make_comm(if i == 0 { srs.g[0] } else { srs.h })), + coefficients_comm: array::from_fn(|i| { + make_comm(if i == 0 { + // The polynomial `f(x) = 1`, without blinding. + srs.g[0] + } else { + // The polynomial `f(x) = 0`, with blinding factor 1. + // This blinding allows us to represent the commitment in affine coordinates. + srs.h + }) + }), + // The polynomial `f(x) = 1`, without blinding. + // The generic gate is enabled on every row; combined with the coefficients, and with the + // mixin for public inputs, this encodes the equation `witness[0] = public_input`. generic_comm: make_comm(srs.g[0]), + // The polynomials `f(x) = 0`, with blinding factor 1. + // This disables these gates. psm_comm: make_comm(srs.h), complete_add_comm: make_comm(srs.h), mul_comm: make_comm(srs.h), emul_comm: make_comm(srs.h), endomul_scalar_comm: make_comm(srs.h), + // Disable all optional gates explicitly. range_check0_comm: None, range_check1_comm: None, foreign_field_add_comm: None, @@ -111,11 +136,14 @@ impl ProverProof where G::BaseField: PrimeField, { - /// This function constructs prover's zk-proof from the witness & the `ProverIndex` against SRS instance + /// Generate a proof where the witness column is identical to the public input, and all other + /// columns are 0. + /// Proofs generated by this function are compatible with the kimchi verifier, and hence the + /// pickles verifier circuit. /// /// # Errors /// - /// Will give error if `create_recursive` process fails. + /// Will give error if `create_recursive_public_input_only` process fails. pub fn create_public_input_only< EFqSponge: Clone + FqSponge, EFrSponge: FrSponge, @@ -132,15 +160,15 @@ where ) } - /// This function constructs prover's recursive zk-proof from the witness & the `ProverIndex` against SRS instance + /// Generate a proof where the witness column is identical to the public input, and all other + /// columns are 0, including any recursion challenges provided. + /// Proofs generated by this function are compatible with the kimchi verifier, and hence the + /// pickles verifier circuit. /// /// # Errors /// - /// Will give error if inputs(like `lookup_context.joint_lookup_table_d8`) are None. - /// - /// # Panics - /// - /// Will panic if `lookup_context.joint_lookup_table_d8` is None. + /// Will give an error if the witness vector is too large for the domain specified in the + /// verifier index. pub fn create_recursive_public_input_only< EFqSponge: Clone + FqSponge, EFrSponge: FrSponge, @@ -150,117 +178,112 @@ where index: &VerifierIndex, prev_challenges: Vec>, ) -> Result { - // make sure that the SRS is not smaller than the domain size let d1_size = index.domain.size(); let (_, endo_r) = G::endos(); let srs = index.srs.get().unwrap(); - // TODO: rng should be passed as arg - let rng = &mut rand::rngs::OsRng; - - let length_witness = witness.len(); - let length_padding = d1_size - .checked_sub(length_witness) - .ok_or(ProverError::NoRoomForZkInWitness)?; - witness.extend(std::iter::repeat(G::ScalarField::zero()).take(length_padding)); + // Pad the witness to the full domain size, or raise an error if the witness is too large. + { + let length_witness = witness.len(); + let length_padding = d1_size + .checked_sub(length_witness) + .ok_or(ProverError::NoRoomForZkInWitness)?; + witness.extend(std::iter::repeat(G::ScalarField::zero()).take(length_padding)); + } - //~ 1. Setup the Fq-Sponge. let mut fq_sponge = EFqSponge::new(G::OtherCurve::sponge_params()); - //~ 1. Absorb the digest of the VerifierIndex. + // TODO: This could be cached for most of the relevant use-cases. let verifier_index_digest = index.digest::(); fq_sponge.absorb_fq(&[verifier_index_digest]); - //~ 1. Absorb the commitments of the previous challenges with the Fq-sponge. for RecursionChallenge { comm, .. } in &prev_challenges { absorb_commitment(&mut fq_sponge, comm) } - //~ 1. Compute the negated public input polynomial as - //~ the polynomial that evaluates to $-p_i$ for the first `public_input_size` values of the domain, - //~ and $0$ for the rest. - let witness_evals = Evaluations::>::from_vec_and_domain( - witness, - index.domain, - ); - let witness_com = srs.commit_evaluations_non_hiding(index.domain, &witness_evals); - let witness_poly = witness_evals.interpolate(); + let (witness_poly, unblinded_witness_comm) = { + let witness_evals = + Evaluations::>::from_vec_and_domain( + witness, + index.domain, + ); + // We commit using evaluations, because nearly all will be 0, and so we can skip most of + // the domain size. + let unblinded_witness_comm = + srs.commit_evaluations_non_hiding(index.domain, &witness_evals); + (witness_evals.interpolate(), unblinded_witness_comm) + }; + + // The goal of this circuit is to represent that `witness[0] = public`, so we can + // explicitly compute the negated public polynomial from the witness. let public_poly = -witness_poly.clone(); - //~ 1. Commit (non-hiding) to the negated public input polynomial. - let public_comm = witness_com.map(|x| srs.h + x.neg()); - let witness_comm = witness_com.map(|x| x + srs.h); - - //~ 1. Absorb the commitment to the public polynomial with the Fq-Sponge. - //~ - //~ Note: unlike the original PLONK protocol, - //~ the prover also provides evaluations of the public polynomial to help the verifier circuit. - //~ This is why we need to absorb the commitment to the public polynomial at this point. - absorb_commitment(&mut fq_sponge, &public_comm); - - //~ 1. Commit to the witness columns by creating `COLUMNS` hidding commitments. - //~ - //~ Note: since the witness is in evaluation form, - //~ we can use the `commit_evaluation` optimization. - let mut w_comm = vec![witness_comm]; - for _ in 1..COLUMNS { - w_comm.push(PolyComm { - unshifted: vec![srs.h], - shifted: None, - }); + // Create and absorb a blinded commitment to the negated public polynomial. We use blinding factor 1 + // to keep compatibility with the logic in the kimchi verifier. + { + let public_comm = unblinded_witness_comm.map(|x| srs.h + x.neg()); + absorb_commitment(&mut fq_sponge, &public_comm); } - let w_comm: [PolyComm; COLUMNS] = w_comm - .try_into() - .expect("previous loop is of the correct length"); + let w_comm: [PolyComm; COLUMNS] = { + let mut w_comm = Vec::with_capacity(COLUMNS); + + // Blind the witness commitment with blinding factor 1, to allow for a zero public input + // vector. + w_comm.push(unblinded_witness_comm.map(|x| x + srs.h)); + + for _ in 1..COLUMNS { + w_comm.push(PolyComm { + unshifted: vec![srs.h], // `f(x) = 0` with blinding factor 1 + shifted: None, + }); + } + + w_comm + .try_into() + .expect("previous loop is of the correct length") + }; - //~ 1. Absorb the witness commitments with the Fq-Sponge. w_comm .iter() .for_each(|c| absorb_commitment(&mut fq_sponge, &c)); - //~ 1. Sample $\beta$ with the Fq-Sponge. let beta = fq_sponge.challenge(); - - //~ 1. Sample $\gamma$ with the Fq-Sponge. let gamma = fq_sponge.challenge(); - //~ 1. Commit (hidding) to the permutation aggregation polynomial $z$. - let z_comm = PolyComm { - unshifted: vec![srs.g[0]], - shifted: None, - }; - - //~ 1. Absorb the permutation aggregation polynomial $z$ with the Fq-Sponge. - absorb_commitment(&mut fq_sponge, &z_comm); - - //~ 1. Sample $\alpha'$ with the Fq-Sponge. - let alpha_chal = ScalarChallenge(fq_sponge.challenge()); - - //~ 1. Derive $\alpha$ from $\alpha'$ using the endomorphism (TODO: details) - let alpha: G::ScalarField = alpha_chal.to_field(endo_r); - - //~ 1. TODO: instantiate alpha? - let mut all_alphas = index.powers_of_alpha.clone(); - all_alphas.instantiate(alpha); - - //~ 1. commit (hiding) to the quotient polynomial $t$ - //~ TODO: specify the dummies - let t_comm = BlindedCommitment { - commitment: PolyComm { - unshifted: vec![srs.h; 7], + let z_comm = { + // Due to the identity permutation, we know statically that all of the non-zk rows will + // evaluate to 1. Since we also don't care about zero-knowledge here, we can use the + // constant polynomial `f(x) = 1` as a satisfying instance. + let z_comm = PolyComm { + unshifted: vec![srs.g[0]], // `f(x) = 1` with no blinding. shifted: None, - }, - blinders: PolyComm { - unshifted: vec![G::ScalarField::one(); 7], - shifted: None, - }, + }; + absorb_commitment(&mut fq_sponge, &z_comm); + z_comm }; - //~ 1. Absorb the the commitment of the quotient polynomial with the Fq-Sponge. - absorb_commitment(&mut fq_sponge, &t_comm.commitment); + let alpha = ScalarChallenge(fq_sponge.challenge()).to_field(endo_r); + + let t_comm = { + // All polynomials in the circuit evaluate to exactly the 0 polynomial. + // We use this fact to ommit the calculation of the quotient and emit a (blinded) zero + // commitment directly. + let t_comm = BlindedCommitment { + commitment: PolyComm { + unshifted: vec![srs.h; 7], // `f(x) = 0`, with blinding factor 1, in 7 chunks. + shifted: None, + }, + blinders: PolyComm { + unshifted: vec![G::ScalarField::one(); 7], + shifted: None, + }, + }; + absorb_commitment(&mut fq_sponge, &t_comm.commitment); + t_comm + }; //~ 1. Sample $\zeta'$ with the Fq-Sponge. let zeta_chal = ScalarChallenge(fq_sponge.challenge()); @@ -271,86 +294,82 @@ where let omega = index.domain.group_gen; let zeta_omega = zeta * omega; - //~ 1. Chunk evaluate the following polynomials at both $\zeta$ and $\zeta \omega$: - //~~ * $s_i$ - //~~ * $w_i$ - //~~ * $z$ - //~~ * lookup (TODO) - //~~ * generic selector - //~~ * poseidon selector - //~ - //~ By "chunk evaluate" we mean that the evaluation of each polynomial can potentially be a vector of values. - //~ This is because the index's `max_poly_size` parameter dictates the maximum size of a polynomial in the protocol. - //~ If a polynomial $f$ exceeds this size, it must be split into several polynomials like so: - //~ $$f(x) = f_0(x) + x^n f_1(x) + x^{2n} f_2(x) + \cdots$$ - //~ - //~ And the evaluation of such a polynomial is the following list for $x \in {\zeta, \zeta\omega}$: - //~ - //~ $$(f_0(x), f_1(x), f_2(x), \ldots)$$ - //~ - //~ TODO: do we want to specify more on that? It seems unecessary except for the t polynomial (or if for some reason someone sets that to a low value) - - let constant_evals = |x| PointEvaluations { - zeta: vec![x], - zeta_omega: vec![x], - }; + let chunked_evals = { + let constant_evals = |x| PointEvaluations { + zeta: vec![x], + zeta_omega: vec![x], + }; - let chunked_evals = ProofEvaluations::>> { - s: array::from_fn(|i| PointEvaluations { - zeta: vec![zeta * index.shift[i]], - zeta_omega: vec![zeta_omega * index.shift[i]], - }), - coefficients: array::from_fn(|i| { - if i == 0 { - constant_evals(G::ScalarField::one()) - } else { - constant_evals(G::ScalarField::zero()) - } - }), - w: array::from_fn(|i| { - if i == 0 { - let chunked = witness_poly.to_chunked_polynomial(index.max_poly_size); + ProofEvaluations::>> { + s: array::from_fn(|i| { + // Inlined computations of `f(x) = x * shift[i]`. PointEvaluations { - zeta: chunked.evaluate_chunks(zeta), - zeta_omega: chunked.evaluate_chunks(zeta_omega), + zeta: vec![zeta * index.shift[i]], + zeta_omega: vec![zeta_omega * index.shift[i]], } - } else { - constant_evals(G::ScalarField::zero()) - } - }), - - z: constant_evals(G::ScalarField::one()), - - lookup_aggregation: None, - lookup_table: None, - lookup_sorted: array::from_fn(|_| None), - runtime_lookup_table: None, - generic_selector: constant_evals(G::ScalarField::one()), - poseidon_selector: constant_evals(G::ScalarField::zero()), - complete_add_selector: constant_evals(G::ScalarField::zero()), - mul_selector: constant_evals(G::ScalarField::zero()), - emul_selector: constant_evals(G::ScalarField::zero()), - endomul_scalar_selector: constant_evals(G::ScalarField::zero()), - - range_check0_selector: None, - range_check1_selector: None, - foreign_field_add_selector: None, - foreign_field_mul_selector: None, - xor_selector: None, - rot_selector: None, - runtime_lookup_table_selector: None, - xor_lookup_selector: None, - lookup_gate_lookup_selector: None, - range_check_lookup_selector: None, - foreign_field_mul_lookup_selector: None, + }), + coefficients: array::from_fn(|i| { + if i == 0 { + // The first coefficient column represents `f(x) = 1`. + constant_evals(G::ScalarField::one()) + } else { + // The remaining coefficient columns represent `f(x) = 0`. + constant_evals(G::ScalarField::zero()) + } + }), + w: array::from_fn(|i| { + if i == 0 { + // Compute the evaluations for our non-zero witness column. + let chunked = witness_poly.to_chunked_polynomial(index.max_poly_size); + PointEvaluations { + zeta: chunked.evaluate_chunks(zeta), + zeta_omega: chunked.evaluate_chunks(zeta_omega), + } + } else { + // The rest of the witness columns are 0, by construction. + constant_evals(G::ScalarField::zero()) + } + }), + + // As above in `z_comm`, we have selected the polynomial `f(x) = 1` as our + // satisfying witness, so we can hard-code the evaluation 1 here. + z: constant_evals(G::ScalarField::one()), + + // Enabled on every row, via `f(x) = 1`. + generic_selector: constant_evals(G::ScalarField::one()), + // Disabled everywhere, via `f(x) = 0`. + poseidon_selector: constant_evals(G::ScalarField::zero()), + complete_add_selector: constant_evals(G::ScalarField::zero()), + mul_selector: constant_evals(G::ScalarField::zero()), + emul_selector: constant_evals(G::ScalarField::zero()), + endomul_scalar_selector: constant_evals(G::ScalarField::zero()), + + // All optional gates are disabled. + range_check0_selector: None, + range_check1_selector: None, + foreign_field_add_selector: None, + foreign_field_mul_selector: None, + xor_selector: None, + rot_selector: None, + runtime_lookup_table_selector: None, + xor_lookup_selector: None, + lookup_gate_lookup_selector: None, + range_check_lookup_selector: None, + foreign_field_mul_lookup_selector: None, + + // The lookup argument is disabled. + lookup_aggregation: None, + lookup_table: None, + lookup_sorted: array::from_fn(|_| None), + runtime_lookup_table: None, + } }; let zeta_to_srs_len = zeta.pow([index.max_poly_size as u64]); let zeta_omega_to_srs_len = zeta_omega.pow([index.max_poly_size as u64]); let zeta_to_domain_size = zeta.pow([d1_size as u64]); - //~ 1. Evaluate the same polynomials without chunking them - //~ (so that each polynomial should correspond to a single value this time). + // TODO: We know statically that all chunks are of size 1, so this is technically unnecessary. let evals = { let powers_of_eval_points_for_chunks = PointEvaluations { zeta: zeta_to_srs_len, @@ -359,9 +378,13 @@ where chunked_evals.combine(&powers_of_eval_points_for_chunks) }; - //~ 1. Compute the ft polynomial. - //~ This is to implement [Maller's optimization](https://o1-labs.github.io/mina-book/crypto/plonk/maller_15.html). + // Compute the difference between the linearization polynomial and `(zeta^n - 1) * quotient`. let ft: DensePolynomial = { + // We know statically that the quotient polynomial is 0, and the only linearized part + // of the proof is the permutation argument, so we compute that part of the + // linearization explicitly here. + let mut all_alphas = index.powers_of_alpha.clone(); + all_alphas.instantiate(alpha); let alphas = all_alphas.get_alphas(ArgumentType::Permutation, permutation::CONSTRAINTS); let scalar = crate::circuits::constraints::ConstraintSystem::::perm_scalars( @@ -371,14 +394,17 @@ where alphas, permutation::eval_zk_polynomial(index.domain, zeta), ); + + // Construct the linearized polynomial `scalar * permutation_coefficients[PERMUS-1]` + // explicitly. In particular, since we know that + // `permutation_coefficients[PERMUTS-1](x) = x * shifts[PERMUTS-1]`, we know that the + // desired polynomial will be `f(x) = x * (scalar * shifts[PERMUTS-1])`. DensePolynomial::from_coefficients_vec(vec![ G::ScalarField::zero(), scalar * index.shift[PERMUTS - 1], ]) }; - //~ 1. construct the blinding part of the ft polynomial commitment - //~ [see this section](https://o1-labs.github.io/mina-book/crypto/plonk/maller_15.html#evaluation-proof-and-blinding-factors) let blinding_ft = { let blinding_t = t_comm.blinders.chunk_blinding(zeta_to_srs_len); let blinding_f = G::ScalarField::zero(); @@ -392,95 +418,52 @@ where } }; - //~ 1. Evaluate the ft polynomial at $\zeta\omega$ only. let ft_eval1 = ft.evaluate(&zeta_omega); - //~ 1. Setup the Fr-Sponge let fq_sponge_before_evaluations = fq_sponge.clone(); - let mut fr_sponge = EFrSponge::new(G::sponge_params()); - - //~ 1. Squeeze the Fq-sponge and absorb the result with the Fr-Sponge. - fr_sponge.absorb(&fq_sponge.digest()); - //~ 1. Absorb the previous recursion challenges. - let prev_challenge_digest = { - // Note: we absorb in a new sponge here to limit the scope in which we need the - // more-expensive 'optional sponge'. + let mut fr_sponge = { let mut fr_sponge = EFrSponge::new(G::sponge_params()); - for RecursionChallenge { chals, .. } in &prev_challenges { - fr_sponge.absorb_multiple(chals); - } - fr_sponge.digest() + fr_sponge.absorb(&fq_sponge.digest()); + fr_sponge }; - fr_sponge.absorb(&prev_challenge_digest); - - //~ 1. Compute evaluations for the previous recursion challenges. - let polys = prev_challenges - .iter() - .map(|RecursionChallenge { chals, comm }| { - ( - DensePolynomial::from_coefficients_vec(b_poly_coefficients(chals)), - comm.unshifted.len(), - ) - }) - .collect::>(); - //~ 1. Evaluate the negated public polynomial (if present) at $\zeta$ and $\zeta\omega$. - let public_evals = if public_poly.is_zero() { - [vec![G::ScalarField::zero()], vec![G::ScalarField::zero()]] - } else { - [ - vec![public_poly.evaluate(&zeta)], - vec![public_poly.evaluate(&zeta_omega)], - ] - }; + { + let prev_challenge_digest = { + let mut fr_sponge = EFrSponge::new(G::sponge_params()); + for RecursionChallenge { chals, .. } in &prev_challenges { + fr_sponge.absorb_multiple(chals); + } + fr_sponge.digest() + }; + fr_sponge.absorb(&prev_challenge_digest); + } - //~ 1. Absorb the unique evaluation of ft: $ft(\zeta\omega)$. fr_sponge.absorb(&ft_eval1); - //~ 1. Absorb all the polynomial evaluations in $\zeta$ and $\zeta\omega$: - //~~ * the public polynomial - //~~ * z - //~~ * generic selector - //~~ * poseidon selector - //~~ * the 15 register/witness - //~~ * 6 sigmas evaluations (the last one is not evaluated) - fr_sponge.absorb_multiple(&public_evals[0]); - fr_sponge.absorb_multiple(&public_evals[1]); - fr_sponge.absorb_evaluations(&chunked_evals); - - //~ 1. Sample $v'$ with the Fr-Sponge - let v_chal = fr_sponge.challenge(); - - //~ 1. Derive $v$ from $v'$ using the endomorphism (TODO: specify) - let v = v_chal.to_field(endo_r); - - //~ 1. Sample $u'$ with the Fr-Sponge - let u_chal = fr_sponge.challenge(); - - //~ 1. Derive $u$ from $u'$ using the endomorphism (TODO: specify) - let u = u_chal.to_field(endo_r); + { + let public_input_eval_zeta = vec![-evals.w[0].zeta]; + fr_sponge.absorb_multiple(&public_input_eval_zeta); + let public_input_eval_zeta_omega = vec![-evals.w[0].zeta_omega]; + fr_sponge.absorb_multiple(&public_input_eval_zeta_omega); + } - //~ 1. Create a list of all polynomials that will require evaluations - //~ (and evaluation proofs) in the protocol. - //~ First, include the previous challenges, in case we are in a recursive prover. - let non_hiding = |d1_size: usize| PolyComm { - unshifted: vec![G::ScalarField::zero(); d1_size], - shifted: None, - }; + fr_sponge.absorb_evaluations(&chunked_evals); - let coefficients_form = DensePolynomialOrEvaluations::<_, D<_>>::DensePolynomial; + let v = fr_sponge.challenge().to_field(endo_r); + let u = fr_sponge.challenge().to_field(endo_r); - let mut polynomials = polys + // `DensePolynomialOrEvaluation` takes a reference, so we have to allocate the polynomials + // that we will use here to make sure they live long enough. + let recursion_polynomials = prev_challenges .iter() - .map(|(p, d1_size)| (coefficients_form(p), None, non_hiding(*d1_size))) + .map(|RecursionChallenge { chals, comm }| { + ( + DensePolynomial::from_coefficients_vec(b_poly_coefficients(chals)), + comm.unshifted.len(), + ) + }) .collect::>(); - - let fixed_hiding = |d1_size: usize| PolyComm { - unshifted: vec![G::ScalarField::one(); d1_size], - shifted: None, - }; - let one_polynomial = DensePolynomial::from_coefficients_vec(vec![G::ScalarField::one()]); let zero_polynomial = DensePolynomial::from_coefficients_vec(vec![]); let shifted_polys: Vec<_> = index @@ -490,43 +473,64 @@ where DensePolynomial::from_coefficients_vec(vec![G::ScalarField::zero(), *shift]) }) .collect(); - // public polynomial - polynomials.push((coefficients_form(&public_poly), None, fixed_hiding(1))); - // ft polynomial - polynomials.push((coefficients_form(&ft), None, blinding_ft)); - // permutation aggregation polynomial - polynomials.push((coefficients_form(&one_polynomial), None, non_hiding(1))); - // generic selector - polynomials.push((coefficients_form(&one_polynomial), None, non_hiding(1))); - // other selectors - polynomials.push((coefficients_form(&zero_polynomial), None, fixed_hiding(1))); - polynomials.push((coefficients_form(&zero_polynomial), None, fixed_hiding(1))); - polynomials.push((coefficients_form(&zero_polynomial), None, fixed_hiding(1))); - polynomials.push((coefficients_form(&zero_polynomial), None, fixed_hiding(1))); - polynomials.push((coefficients_form(&zero_polynomial), None, fixed_hiding(1))); - // witness columns - polynomials.push((coefficients_form(&witness_poly), None, fixed_hiding(1))); - polynomials.extend( - (1..COLUMNS).map(|_| (coefficients_form(&zero_polynomial), None, fixed_hiding(1))), - ); - // coefficients - polynomials.push((coefficients_form(&one_polynomial), None, non_hiding(1))); - polynomials.extend( - (1..COLUMNS).map(|_| (coefficients_form(&zero_polynomial), None, fixed_hiding(1))), - ); - // permutation coefficients - polynomials.extend( - shifted_polys + + let polynomials_to_open = { + // Helpers + let non_hiding = |d1_size: usize| PolyComm { + unshifted: vec![G::ScalarField::zero(); d1_size], + shifted: None, + }; + let fixed_hiding = |d1_size: usize| PolyComm { + unshifted: vec![G::ScalarField::one(); d1_size], + shifted: None, + }; + let coefficients_form = DensePolynomialOrEvaluations::<_, D<_>>::DensePolynomial; + + let mut polynomials_to_open = recursion_polynomials .iter() - .take(PERMUTS - 1) - .map(|w| (coefficients_form(w), None, non_hiding(1))) - .collect::>(), - ); + .map(|(p, d1_size)| (coefficients_form(p), None, non_hiding(*d1_size))) + .collect::>(); + // public polynomial + polynomials_to_open.push((coefficients_form(&public_poly), None, fixed_hiding(1))); + // ft polynomial + polynomials_to_open.push((coefficients_form(&ft), None, blinding_ft)); + // permutation aggregation polynomial + polynomials_to_open.push((coefficients_form(&one_polynomial), None, non_hiding(1))); + // generic selector + polynomials_to_open.push((coefficients_form(&one_polynomial), None, non_hiding(1))); + // other selectors + polynomials_to_open.push((coefficients_form(&zero_polynomial), None, fixed_hiding(1))); + polynomials_to_open.push((coefficients_form(&zero_polynomial), None, fixed_hiding(1))); + polynomials_to_open.push((coefficients_form(&zero_polynomial), None, fixed_hiding(1))); + polynomials_to_open.push((coefficients_form(&zero_polynomial), None, fixed_hiding(1))); + polynomials_to_open.push((coefficients_form(&zero_polynomial), None, fixed_hiding(1))); + // witness columns + polynomials_to_open.push((coefficients_form(&witness_poly), None, fixed_hiding(1))); + polynomials_to_open.extend( + (1..COLUMNS).map(|_| (coefficients_form(&zero_polynomial), None, fixed_hiding(1))), + ); + // coefficients + polynomials_to_open.push((coefficients_form(&one_polynomial), None, non_hiding(1))); + polynomials_to_open.extend( + (1..COLUMNS).map(|_| (coefficients_form(&zero_polynomial), None, fixed_hiding(1))), + ); + // permutation coefficients + polynomials_to_open.extend( + shifted_polys + .iter() + .take(PERMUTS - 1) + .map(|w| (coefficients_form(w), None, non_hiding(1))) + .collect::>(), + ); + polynomials_to_open + }; + + // TODO: rng should be passed as arg + let rng = &mut rand::rngs::OsRng; - //~ 1. Create an aggregated evaluation proof for all of these polynomials at $\zeta$ and $\zeta\omega$ using $u$ and $v$. - let proof = srs.open( + let opening_proof = srs.open( group_map, - &polynomials, + &polynomials_to_open, &[zeta, zeta_omega], v, u, @@ -541,7 +545,7 @@ where t_comm: t_comm.commitment, lookup: None, }, - proof, + proof: opening_proof, evals: chunked_evals, ft_eval1, prev_challenges, @@ -597,7 +601,6 @@ fn test_public_input_only_prover() { Fq::from(20u64), ]; - // add the proof to the batch let start = Instant::now(); let group_map = ::Map::setup(); @@ -614,7 +617,6 @@ fn test_public_input_only_prover() { start.elapsed().as_millis() ); - // verify the proof (propagate any errors) let start = Instant::now(); verify::(&group_map, &verifier_index, &proof, &public_inputs) .unwrap(); From d0499deb3d2c8f87aaed2f09172c116227845b80 Mon Sep 17 00:00:00 2001 From: mrmr1993 Date: Sat, 19 Aug 2023 13:17:08 +0100 Subject: [PATCH 037/178] Revert change to precomputations field visibility --- kimchi/src/circuits/constraints.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kimchi/src/circuits/constraints.rs b/kimchi/src/circuits/constraints.rs index 25ab00b326..238e725bc1 100644 --- a/kimchi/src/circuits/constraints.rs +++ b/kimchi/src/circuits/constraints.rs @@ -166,7 +166,7 @@ pub struct ConstraintSystem { pub lookup_constraint_system: Option>, /// precomputes #[serde(skip)] - pub(crate) precomputations: OnceCell>>, + precomputations: OnceCell>>, /// Disable gates checks (for testing; only enables with development builds) pub disable_gates_checks: bool, From 4a81e062a9e7ddc7b4b6cef24a9162a508a9bcf8 Mon Sep 17 00:00:00 2001 From: mrmr1993 Date: Sat, 19 Aug 2023 15:25:17 +0100 Subject: [PATCH 038/178] Fix clippy lints --- kimchi/src/public_input_only_prover.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/kimchi/src/public_input_only_prover.rs b/kimchi/src/public_input_only_prover.rs index a5af4e3992..d752118341 100644 --- a/kimchi/src/public_input_only_prover.rs +++ b/kimchi/src/public_input_only_prover.rs @@ -121,7 +121,7 @@ pub fn verifier_index( xor_comm: None, rot_comm: None, - shift: shifts.shifts.clone(), + shift: shifts.shifts, zkpm: OnceCell::new(), w: OnceCell::new(), endo: endo_q, @@ -248,7 +248,7 @@ where w_comm .iter() - .for_each(|c| absorb_commitment(&mut fq_sponge, &c)); + .for_each(|c| absorb_commitment(&mut fq_sponge, c)); let beta = fq_sponge.challenge(); let gamma = fq_sponge.challenge(); From 6698089be752da34ed5e977bfb5f307301be8bd0 Mon Sep 17 00:00:00 2001 From: Mikhail Volkhov Date: Wed, 6 Mar 2024 17:13:07 +0000 Subject: [PATCH 039/178] Add Default for FeatureFlags, cosmetics --- kimchi/src/circuits/constraints.rs | 31 +++++++++++++++++++++++++- kimchi/src/public_input_only_prover.rs | 31 +++++++------------------- 2 files changed, 38 insertions(+), 24 deletions(-) diff --git a/kimchi/src/circuits/constraints.rs b/kimchi/src/circuits/constraints.rs index 238e725bc1..ec1b951fa4 100644 --- a/kimchi/src/circuits/constraints.rs +++ b/kimchi/src/circuits/constraints.rs @@ -5,7 +5,11 @@ use crate::{ domain_constant_evaluation::DomainConstantEvaluations, domains::EvaluationDomains, gate::{CircuitGate, GateType}, - lookup::{index::LookupConstraintSystem, lookups::LookupFeatures, tables::LookupTable}, + lookup::{ + index::LookupConstraintSystem, + lookups::{LookupFeatures, LookupPatterns}, + tables::LookupTable, + }, polynomial::{WitnessEvals, WitnessOverDomains, WitnessShifts}, polynomials::permutation::{Shifts, ZK_ROWS}, wires::*, @@ -24,6 +28,7 @@ use once_cell::sync::OnceCell; use serde::{de::DeserializeOwned, Deserialize, Serialize}; use serde_with::serde_as; use std::array; +use std::default::Default; use std::sync::Arc; // @@ -49,6 +54,30 @@ pub struct FeatureFlags { pub lookup_features: LookupFeatures, } +impl Default for FeatureFlags { + /// Returns an instance with all features disabled. + fn default() -> FeatureFlags { + FeatureFlags { + range_check0: false, + range_check1: false, + lookup_features: LookupFeatures { + patterns: LookupPatterns { + xor: false, + lookup: false, + range_check: false, + foreign_field_mul: false, + }, + joint_lookup_used: false, + uses_runtime_tables: false, + }, + foreign_field_add: false, + foreign_field_mul: false, + xor: false, + rot: false, + } + } +} + /// The polynomials representing evaluated columns, in coefficient form. #[serde_as] #[derive(Clone, Serialize, Deserialize, Debug)] diff --git a/kimchi/src/public_input_only_prover.rs b/kimchi/src/public_input_only_prover.rs index d752118341..d0e1ffde39 100644 --- a/kimchi/src/public_input_only_prover.rs +++ b/kimchi/src/public_input_only_prover.rs @@ -10,7 +10,6 @@ use crate::{ argument::ArgumentType, constraints::FeatureFlags, domains::EvaluationDomains, - lookup::lookups::{LookupFeatures, LookupPatterns}, polynomials::permutation, wires::{COLUMNS, PERMUTS}, }, @@ -40,7 +39,7 @@ use std::array; use std::sync::Arc; /// The result of a proof creation or verification. -type Result = std::result::Result; +type ProverResult = std::result::Result; /// Creates a hard-coded verifier index that proofs by this proofs generated by /// `create_recursive_public_input_only` will satisfy. @@ -52,25 +51,10 @@ pub fn verifier_index( ) -> VerifierIndex { let shifts = permutation::Shifts::new(&domain.d1); let (endo_q, _endo_r) = endos::(); - // TODO: Create `FeatureFlags::default`, and use it here and elsewhere. - let feature_flags = FeatureFlags { - range_check0: false, - range_check1: false, - lookup_features: LookupFeatures { - patterns: LookupPatterns { - xor: false, - lookup: false, - range_check: false, - foreign_field_mul: false, - }, - joint_lookup_used: false, - uses_runtime_tables: false, - }, - foreign_field_add: false, - foreign_field_mul: false, - xor: false, - rot: false, - }; + + // FeatureFlags with all features disabled. + let feature_flags: FeatureFlags = std::default::Default::default(); + let (linearization, powers_of_alpha) = crate::linearization::expr_linearization(Some(&feature_flags), true); @@ -78,6 +62,7 @@ pub fn verifier_index( unshifted: vec![comm], shifted: None, }; + VerifierIndex { domain: domain.d1, max_poly_size: srs.g.len(), @@ -151,7 +136,7 @@ where groupmap: &G::Map, witness: Vec, index: &VerifierIndex, - ) -> Result { + ) -> ProverResult { Self::create_recursive_public_input_only::( groupmap, witness, @@ -177,7 +162,7 @@ where mut witness: Vec, index: &VerifierIndex, prev_challenges: Vec>, - ) -> Result { + ) -> ProverResult { let d1_size = index.domain.size(); let (_, endo_r) = G::endos(); From 707e5c8641301c70e15e7deb20d31fa07193ab94 Mon Sep 17 00:00:00 2001 From: Mikhail Volkhov Date: Wed, 6 Mar 2024 17:23:15 +0000 Subject: [PATCH 040/178] Pass RNG from the outside of the prover --- kimchi/src/public_input_only_prover.rs | 30 +++++++++++++++++--------- 1 file changed, 20 insertions(+), 10 deletions(-) diff --git a/kimchi/src/public_input_only_prover.rs b/kimchi/src/public_input_only_prover.rs index d0e1ffde39..941ae4bbe3 100644 --- a/kimchi/src/public_input_only_prover.rs +++ b/kimchi/src/public_input_only_prover.rs @@ -35,13 +35,14 @@ use poly_commitment::{ evaluation_proof::DensePolynomialOrEvaluations, srs::{endos, SRS}, }; +use rand::{CryptoRng, RngCore}; use std::array; use std::sync::Arc; /// The result of a proof creation or verification. type ProverResult = std::result::Result; -/// Creates a hard-coded verifier index that proofs by this proofs generated by +/// Creates a hard-coded verifier index that proofs generated by /// `create_recursive_public_input_only` will satisfy. pub fn verifier_index( srs: Arc>, @@ -132,16 +133,19 @@ where pub fn create_public_input_only< EFqSponge: Clone + FqSponge, EFrSponge: FrSponge, + RNG: RngCore + CryptoRng, >( groupmap: &G::Map, witness: Vec, index: &VerifierIndex, + rng: &mut RNG, ) -> ProverResult { - Self::create_recursive_public_input_only::( + Self::create_recursive_public_input_only::( groupmap, witness, index, Vec::new(), + rng, ) } @@ -157,11 +161,13 @@ where pub fn create_recursive_public_input_only< EFqSponge: Clone + FqSponge, EFrSponge: FrSponge, + RNG: RngCore + CryptoRng, >( group_map: &G::Map, mut witness: Vec, index: &VerifierIndex, prev_challenges: Vec>, + rng: &mut RNG, ) -> ProverResult { let d1_size = index.domain.size(); @@ -510,9 +516,6 @@ where polynomials_to_open }; - // TODO: rng should be passed as arg - let rng = &mut rand::rngs::OsRng; - let opening_proof = srs.open( group_map, &polynomials_to_open, @@ -548,12 +551,17 @@ fn test_public_input_only_prover() { sponge::{DefaultFqSponge, DefaultFrSponge}, }; use poly_commitment::{commitment::CommitmentCurve, srs::SRS}; + use rand::{rngs::StdRng, thread_rng, Rng, SeedableRng}; use std::{sync::Arc, time::Instant}; type SpongeParams = PlonkSpongeConstantsKimchi; type BaseSponge = DefaultFqSponge; type ScalarSponge = DefaultFrSponge; + let seed: [u8; 32] = thread_rng().gen(); + eprintln!("Seed: {:?}", seed); + let mut rng = StdRng::from_seed(seed); + let start = Instant::now(); let circuit_size = (2 << 16) - 1; @@ -564,7 +572,7 @@ fn test_public_input_only_prover() { srs.add_lagrange_basis(domain.d1); let srs = Arc::new(srs); - println!("- time to create srs: {:?}ms", start.elapsed().as_millis()); + eprintln!("- time to create srs: {:?}ms", start.elapsed().as_millis()); let start = Instant::now(); @@ -574,7 +582,7 @@ fn test_public_input_only_prover() { let verifier_index = verifier_index::(srs, domain, num_public_inputs, num_prev_challenges); - println!( + eprintln!( "- time to create verifier index: {:?}ms", start.elapsed().as_millis() ); @@ -590,20 +598,22 @@ fn test_public_input_only_prover() { let group_map = ::Map::setup(); - let proof = ProverProof::create_recursive_public_input_only::( + let proof = ProverProof::create_recursive_public_input_only::( &group_map, public_inputs.clone(), &verifier_index, vec![], + &mut rng, ) .unwrap(); - println!( + eprintln!( "- time to create proof: {:?}ms", start.elapsed().as_millis() ); + // Checking validity of the proof with the actual Kimchi verifier. let start = Instant::now(); verify::(&group_map, &verifier_index, &proof, &public_inputs) .unwrap(); - println!("- time to verify: {}ms", start.elapsed().as_millis()); + eprintln!("- time to verify: {}ms", start.elapsed().as_millis()); } From f028db69e162e339462cd339331e83aa8dc98f9a Mon Sep 17 00:00:00 2001 From: Mikhail Volkhov Date: Wed, 6 Mar 2024 17:41:53 +0000 Subject: [PATCH 041/178] Generate random numbers for test, clippy lint --- kimchi/src/public_input_only_prover.rs | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/kimchi/src/public_input_only_prover.rs b/kimchi/src/public_input_only_prover.rs index 941ae4bbe3..49035d6096 100644 --- a/kimchi/src/public_input_only_prover.rs +++ b/kimchi/src/public_input_only_prover.rs @@ -559,7 +559,7 @@ fn test_public_input_only_prover() { type ScalarSponge = DefaultFrSponge; let seed: [u8; 32] = thread_rng().gen(); - eprintln!("Seed: {:?}", seed); + eprintln!("Seed: {seed:?}"); let mut rng = StdRng::from_seed(seed); let start = Instant::now(); @@ -588,10 +588,10 @@ fn test_public_input_only_prover() { ); let public_inputs = vec![ - Fq::from(5u64), - Fq::from(10u64), - Fq::from(15u64), - Fq::from(20u64), + From::from(rng.gen_range(0..(1 << 4))), + From::from(rng.gen_range(0..(1 << 4))), + From::from(rng.gen_range(0..(1 << 4))), + From::from(rng.gen_range(0..(1 << 4))), ]; let start = Instant::now(); From 9e4756f9b697e5d857aafd01a7491637732111f6 Mon Sep 17 00:00:00 2001 From: Mikhail Volkhov Date: Wed, 6 Mar 2024 18:08:35 +0000 Subject: [PATCH 042/178] Add some comments --- kimchi/src/public_input_only_prover.rs | 18 +++++++++++++----- 1 file changed, 13 insertions(+), 5 deletions(-) diff --git a/kimchi/src/public_input_only_prover.rs b/kimchi/src/public_input_only_prover.rs index 49035d6096..57a98e4093 100644 --- a/kimchi/src/public_input_only_prover.rs +++ b/kimchi/src/public_input_only_prover.rs @@ -1,9 +1,14 @@ -//! This module implements a prover specialized to generate a proof where only the public input is -//! considered. All other gates are disabled, as well as zero-knowledge, and the permutation -//! argument is hard-coded as the identity function, to minimize the overhead of proving. +//! This module implements a prover specialized to generate a proof +//! where only the public input is considered. Intended for testing. //! -//! Proofs generated by this module are explicitly designed to be compatible with the kimchi -//! verifier, and hence the pickles verifier circuit. +//! This is done by using the generic gate with only first coefficient +//! enabled. All other gates are disabled, as well as zero-knowledge, +//! and the permutation argument is hard-coded as the identity +//! function, to minimize the overhead of proving. +//! +//! Proofs generated by this module are explicitly designed to be +//! compatible with the kimchi verifier, and hence the pickles +//! verifier circuit. use crate::{ circuits::{ @@ -90,6 +95,9 @@ pub fn verifier_index( // The polynomial `f(x) = 1`, without blinding. // The generic gate is enabled on every row; combined with the coefficients, and with the // mixin for public inputs, this encodes the equation `witness[0] = public_input`. + // Generic gate's equation is: + // c_0 \cdot l + c_1 \cdot r + c_2 \cdot o + c_3 \cdot (l \times r) + c_4 + // so we set c_0 = 1, and everything else to zero. generic_comm: make_comm(srs.g[0]), // The polynomials `f(x) = 0`, with blinding factor 1. // This disables these gates. From 588fd6adbec18b9e11f0bc7d7f2d827f764b8f02 Mon Sep 17 00:00:00 2001 From: Danny Willems Date: Sun, 17 Mar 2024 11:41:21 +0100 Subject: [PATCH 043/178] CI: use cargo-nextest@=0.9.67 instead of latest From 0.9.68, nextest is only available from 1.73. --- .github/workflows/rust.yml | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index 85366f1983..82f1ecd840 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -16,6 +16,8 @@ jobs: run_checks: strategy: matrix: + # FIXME: use the latest version of cargo nextest when we get rid of 1.71 + # and 1.72 rust_toolchain_version: ["1.71", "1.72"] # FIXME: currently not available for 5.0.0. # It might be related to boxroot dependency, and we would need to bump @@ -107,7 +109,8 @@ jobs: - name: Install latest nextest release run: | eval $(opam env) - cargo install cargo-nextest --locked + # FIXME: update to 0.9.68 when we get rid of 1.71 and 1.72. + cargo install cargo-nextest@=0.9.67 --locked - name: Test with latest nextest release (faster than cargo test) run: | From 0b6340cf591081740f775cc9c5a2fd9e91fd319c Mon Sep 17 00:00:00 2001 From: Danny Willems Date: Mon, 18 Mar 2024 18:07:03 +0100 Subject: [PATCH 044/178] Utils: ensuring that the requested number of chunks is returned As noted in the trait documentation. When we do Vec::with_capacity(n), it does NOT mean that the vector is going to be of size n , and not more. It does only mean that, initially, the vector will point to an allocated contiguous piece of memory that can handle n elements of the type parameter of your vector. It might be reallocating if you try to push n + 1 elements. Source: https://doc.rust-lang.org/std/vec/struct.Vec.html#method.with_capacity. Therefore, if you want to enforce your vector to be of size n, and not more, you must verify it yourself. --- utils/src/dense_polynomial.rs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/utils/src/dense_polynomial.rs b/utils/src/dense_polynomial.rs index 72560f1057..895e227fe0 100644 --- a/utils/src/dense_polynomial.rs +++ b/utils/src/dense_polynomial.rs @@ -57,6 +57,10 @@ impl ExtendedDensePolynomial for DensePolynomial { chunk_polys.push(DensePolynomial::from_coefficients_vec(vec![])); } + // Ensuring that the number of chunks is the one requested, following + // trait documentation + assert_eq!(chunk_polys.len(), num_chunks); + ChunkedPolynomial { polys: chunk_polys, size: chunk_size, From e25e379141757c43dbbaceb6ae49ac0e153e0563 Mon Sep 17 00:00:00 2001 From: Danny Willems Date: Mon, 18 Mar 2024 18:09:17 +0100 Subject: [PATCH 045/178] Handle the case of n < max_poly_size, and if one of the var is not a power of 2 --- poly-commitment/src/evaluation_proof.rs | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/poly-commitment/src/evaluation_proof.rs b/poly-commitment/src/evaluation_proof.rs index 5b42c53206..447e6bd5fd 100644 --- a/poly-commitment/src/evaluation_proof.rs +++ b/poly-commitment/src/evaluation_proof.rs @@ -119,7 +119,11 @@ pub fn combine_polys>( if !plnm_evals_part.is_empty() { let n = plnm_evals_part.len(); let max_poly_size = srs_length; - let num_chunks = n / max_poly_size; + let num_chunks = if n % max_poly_size == 0 { + n / max_poly_size + } else { + n / max_poly_size + 1 + }; plnm += &Evaluations::from_vec_and_domain(plnm_evals_part, D::new(n).unwrap()) .interpolate() .to_chunked_polynomial(num_chunks, max_poly_size) From de77b85914b9432eb8ffffa19156f92bbcda3bb2 Mon Sep 17 00:00:00 2001 From: Danny Willems Date: Mon, 18 Mar 2024 18:22:49 +0100 Subject: [PATCH 046/178] Tests/Commitments: compute correct number of chunks --- poly-commitment/src/tests/commitment.rs | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/poly-commitment/src/tests/commitment.rs b/poly-commitment/src/tests/commitment.rs index 7971b99401..77a7d78db7 100644 --- a/poly-commitment/src/tests/commitment.rs +++ b/poly-commitment/src/tests/commitment.rs @@ -150,8 +150,14 @@ fn test_randomised(mut rng: &mut RNG) { let mut chunked_evals = vec![]; for point in eval_points.clone() { + let n = poly.len(); + let num_chunks = if n % srs.g.len() == 0 { + n / srs.g.len() + } else { + n / srs.g.len() + 1 + }; chunked_evals.push( - poly.to_chunked_polynomial(1, srs.g.len()) + poly.to_chunked_polynomial(num_chunks, srs.g.len()) .evaluate_chunks(point), ); } From ff3bf8a59cf290c22ec799e6de3deecc7b6a6472 Mon Sep 17 00:00:00 2001 From: Danny Willems Date: Mon, 18 Mar 2024 18:51:36 +0100 Subject: [PATCH 047/178] Tests/batch_15_wires: compute correct number of chunks --- poly-commitment/src/tests/batch_15_wires.rs | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/poly-commitment/src/tests/batch_15_wires.rs b/poly-commitment/src/tests/batch_15_wires.rs index 4acf9d1e5d..6337866ead 100644 --- a/poly-commitment/src/tests/batch_15_wires.rs +++ b/poly-commitment/src/tests/batch_15_wires.rs @@ -30,8 +30,6 @@ where let size = 1 << 7; let srs = SRS::::create(size); - let num_chunks = 1; - let group_map = ::Map::setup(); let sponge = DefaultFqSponge::::new( @@ -83,10 +81,19 @@ where let mut start = Instant::now(); let comm = (0..a.len()) .map(|i| { + let n = a[i].len(); + let num_chunks = if n % srs.g.len() == 0 { + n / size + } else { + n / size + 1 + }; ( srs.commit(&a[i].clone(), num_chunks, rng), x.iter() - .map(|xx| a[i].to_chunked_polynomial(1, size).evaluate_chunks(*xx)) + .map(|xx| { + a[i].to_chunked_polynomial(num_chunks, size) + .evaluate_chunks(*xx) + }) .collect::>(), bounds[i], ) From 6682dfdd4599c3ba7a6ec60d9b14f667ef33f449 Mon Sep 17 00:00:00 2001 From: Mikhail Volkhov Date: Wed, 20 Mar 2024 18:51:50 +0000 Subject: [PATCH 048/178] Fix test_commit chunking issue After enforcing chunking size to be as expected (See PR #2010) the test_commit became flaky. The issue was related to the sometimes appearing polynomials of zero degree: whereas previous code would always create at least one chunk, the new code was sometimes requiring and creating zero chunks. This commit restores the behaviour of the code before while preserving the explicit nature of requesting chunks. --- poly-commitment/src/evaluation_proof.rs | 6 +++--- poly-commitment/src/tests/batch_15_wires.rs | 6 +++--- poly-commitment/src/tests/commitment.rs | 6 +++--- 3 files changed, 9 insertions(+), 9 deletions(-) diff --git a/poly-commitment/src/evaluation_proof.rs b/poly-commitment/src/evaluation_proof.rs index 447e6bd5fd..6b2e9dcfc3 100644 --- a/poly-commitment/src/evaluation_proof.rs +++ b/poly-commitment/src/evaluation_proof.rs @@ -119,10 +119,10 @@ pub fn combine_polys>( if !plnm_evals_part.is_empty() { let n = plnm_evals_part.len(); let max_poly_size = srs_length; - let num_chunks = if n % max_poly_size == 0 { - n / max_poly_size + let num_chunks = if n == 0 { + 1 } else { - n / max_poly_size + 1 + n / max_poly_size + if n % max_poly_size == 0 { 0 } else { 1 } }; plnm += &Evaluations::from_vec_and_domain(plnm_evals_part, D::new(n).unwrap()) .interpolate() diff --git a/poly-commitment/src/tests/batch_15_wires.rs b/poly-commitment/src/tests/batch_15_wires.rs index 6337866ead..545a788fd8 100644 --- a/poly-commitment/src/tests/batch_15_wires.rs +++ b/poly-commitment/src/tests/batch_15_wires.rs @@ -82,10 +82,10 @@ where let comm = (0..a.len()) .map(|i| { let n = a[i].len(); - let num_chunks = if n % srs.g.len() == 0 { - n / size + let num_chunks = if n == 0 { + 1 } else { - n / size + 1 + n / srs.g.len() + if n % srs.g.len() == 0 { 0 } else { 1 } }; ( srs.commit(&a[i].clone(), num_chunks, rng), diff --git a/poly-commitment/src/tests/commitment.rs b/poly-commitment/src/tests/commitment.rs index 77a7d78db7..38d57994ec 100644 --- a/poly-commitment/src/tests/commitment.rs +++ b/poly-commitment/src/tests/commitment.rs @@ -151,10 +151,10 @@ fn test_randomised(mut rng: &mut RNG) { let mut chunked_evals = vec![]; for point in eval_points.clone() { let n = poly.len(); - let num_chunks = if n % srs.g.len() == 0 { - n / srs.g.len() + let num_chunks = if n == 0 { + 1 } else { - n / srs.g.len() + 1 + n / srs.g.len() + if n % srs.g.len() == 0 { 0 } else { 1 } }; chunked_evals.push( poly.to_chunked_polynomial(num_chunks, srs.g.len()) From 8c091d5952c31dd81bc9bf6942446ed0c804fe9f Mon Sep 17 00:00:00 2001 From: mrmr1993 Date: Wed, 3 Apr 2024 13:28:48 +0100 Subject: [PATCH 049/178] Expand max table width to include the required width for gates --- kimchi/src/circuits/lookup/index.rs | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/kimchi/src/circuits/lookup/index.rs b/kimchi/src/circuits/lookup/index.rs index ab3e4545ab..f0be99289d 100644 --- a/kimchi/src/circuits/lookup/index.rs +++ b/kimchi/src/circuits/lookup/index.rs @@ -300,6 +300,11 @@ impl LookupConstraintSystem { .max() .unwrap_or(0); + let max_table_width = std::cmp::max( + max_table_width, + lookup_info.max_joint_size.try_into().unwrap(), + ); + //~ 5. Create the concatenated table of all the fixed lookup tables. //~ It will be of height the size of the domain, //~ and of width the maximum width of any of the lookup tables. From fa69db17c5351c5eb35117a3eb33c506bc5aa453 Mon Sep 17 00:00:00 2001 From: mrmr1993 Date: Mon, 9 Oct 2023 05:24:47 +0100 Subject: [PATCH 050/178] Add fixed blinding to table ID --- kimchi/src/prover.rs | 37 +++++++++++++++++++++--------------- kimchi/src/verifier_index.rs | 2 +- 2 files changed, 23 insertions(+), 16 deletions(-) diff --git a/kimchi/src/prover.rs b/kimchi/src/prover.rs index 45fb2e6304..3f109d0ceb 100644 --- a/kimchi/src/prover.rs +++ b/kimchi/src/prover.rs @@ -1388,23 +1388,30 @@ where )); //~~ * add the combined table polynomial - let table_blinding = if lcs.runtime_selector.is_some() { - let runtime_comm = lookup_context.runtime_table_comm.as_ref().unwrap(); - let joint_combiner = lookup_context.joint_combiner.as_ref().unwrap(); - - let unshifted = runtime_comm - .blinders - .unshifted - .iter() - .map(|blinding| *joint_combiner * blinding) - .collect(); + let table_blinding = { + let table_id_combiner = lookup_context.table_id_combiner.as_ref().unwrap(); + let base_blinding = *table_id_combiner; + if lcs.runtime_selector.is_some() { + let runtime_comm = lookup_context.runtime_table_comm.as_ref().unwrap(); + let joint_combiner = lookup_context.joint_combiner.as_ref().unwrap(); + + let unshifted = runtime_comm + .blinders + .unshifted + .iter() + .map(|blinding| *joint_combiner * blinding + base_blinding) + .collect(); - PolyComm { - unshifted, - shifted: None, + PolyComm { + unshifted, + shifted: None, + } + } else { + PolyComm { + unshifted: vec![base_blinding; num_chunks], + shifted: None, + } } - } else { - non_hiding(num_chunks) }; let joint_lookup_table = lookup_context.joint_lookup_table.as_ref().unwrap(); diff --git a/kimchi/src/verifier_index.rs b/kimchi/src/verifier_index.rs index d0e5f89172..2fa177eb0a 100644 --- a/kimchi/src/verifier_index.rs +++ b/kimchi/src/verifier_index.rs @@ -195,7 +195,7 @@ where .map(|e| self.srs.commit_evaluations_non_hiding(domain, e)) .collect(), table_ids: cs.table_ids8.as_ref().map(|table_ids8| { - self.srs.commit_evaluations_non_hiding(domain, table_ids8) + mask_fixed(self.srs.commit_evaluations_non_hiding(domain, table_ids8)) }), runtime_tables_selector: cs .runtime_selector From 4d3080ec50e9c693df910dabcc0e2fa3a9076433 Mon Sep 17 00:00:00 2001 From: mrmr1993 Date: Mon, 9 Oct 2023 05:29:29 +0100 Subject: [PATCH 051/178] Add fixed blinding to lookup table columns --- kimchi/src/prover.rs | 7 +++++-- kimchi/src/verifier_index.rs | 2 +- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/kimchi/src/prover.rs b/kimchi/src/prover.rs index 3f109d0ceb..4cddf31ccd 100644 --- a/kimchi/src/prover.rs +++ b/kimchi/src/prover.rs @@ -1389,11 +1389,14 @@ where //~~ * add the combined table polynomial let table_blinding = { + let joint_combiner = lookup_context.joint_combiner.as_ref().unwrap(); let table_id_combiner = lookup_context.table_id_combiner.as_ref().unwrap(); - let base_blinding = *table_id_combiner; + let max_joint_size = lcs.configuration.lookup_info.max_joint_size; + let base_blinding = (1..max_joint_size).fold(G::ScalarField::one(), |acc, _| { + G::ScalarField::one() + *joint_combiner * acc + }) + *table_id_combiner; if lcs.runtime_selector.is_some() { let runtime_comm = lookup_context.runtime_table_comm.as_ref().unwrap(); - let joint_combiner = lookup_context.joint_combiner.as_ref().unwrap(); let unshifted = runtime_comm .blinders diff --git a/kimchi/src/verifier_index.rs b/kimchi/src/verifier_index.rs index 2fa177eb0a..1b0511d800 100644 --- a/kimchi/src/verifier_index.rs +++ b/kimchi/src/verifier_index.rs @@ -192,7 +192,7 @@ where lookup_table: cs .lookup_table8 .iter() - .map(|e| self.srs.commit_evaluations_non_hiding(domain, e)) + .map(|e| mask_fixed(self.srs.commit_evaluations_non_hiding(domain, e))) .collect(), table_ids: cs.table_ids8.as_ref().map(|table_ids8| { mask_fixed(self.srs.commit_evaluations_non_hiding(domain, table_ids8)) From 2a17572205acccc9bdbea01b918fd7f1e7056dae Mon Sep 17 00:00:00 2001 From: mrmr1993 Date: Mon, 9 Oct 2023 07:04:35 +0100 Subject: [PATCH 052/178] Wholely exposition batman! --- kimchi/src/prover.rs | 23 +++++++++++++++++++---- 1 file changed, 19 insertions(+), 4 deletions(-) diff --git a/kimchi/src/prover.rs b/kimchi/src/prover.rs index 4cddf31ccd..18974c30eb 100644 --- a/kimchi/src/prover.rs +++ b/kimchi/src/prover.rs @@ -1391,10 +1391,25 @@ where let table_blinding = { let joint_combiner = lookup_context.joint_combiner.as_ref().unwrap(); let table_id_combiner = lookup_context.table_id_combiner.as_ref().unwrap(); - let max_joint_size = lcs.configuration.lookup_info.max_joint_size; - let base_blinding = (1..max_joint_size).fold(G::ScalarField::one(), |acc, _| { - G::ScalarField::one() + *joint_combiner * acc - }) + *table_id_combiner; + let max_fixed_lookup_table_size = { + // CAUTION: This is not `lcs.configuration.lookup_info.max_joint_size` because + // the lookup table may be strictly narrower, and as such will not contribute + // the associated blinders. + // For example, using a runtime table with the lookup gate (width 2), but only + // width-1 fixed tables (e.g. range check), it would be incorrect to use the + // wider width (2) because there are no such contributing commitments! + lcs.lookup_table8.len() + }; + let base_blinding = { + let fixed_table_blinding = if max_fixed_lookup_table_size == 0 { + G::ScalarField::zero() + } else { + (1..max_fixed_lookup_table_size).fold(G::ScalarField::one(), |acc, _| { + G::ScalarField::one() + *joint_combiner * acc + }) + }; + fixed_table_blinding + *table_id_combiner + }; if lcs.runtime_selector.is_some() { let runtime_comm = lookup_context.runtime_table_comm.as_ref().unwrap(); From 63a32fca86678da41e35bcf78356dfee7e3bb51d Mon Sep 17 00:00:00 2001 From: Danny Willems Date: Wed, 21 Feb 2024 21:28:14 +0100 Subject: [PATCH 053/178] Adding a simple comment As a reminder that the method len returns the number of polynomials, and not the domain size or something else. It might be confusing when we don't remember what lookup_table8 is. --- kimchi/src/prover.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/kimchi/src/prover.rs b/kimchi/src/prover.rs index 18974c30eb..14fe30d365 100644 --- a/kimchi/src/prover.rs +++ b/kimchi/src/prover.rs @@ -1398,6 +1398,7 @@ where // For example, using a runtime table with the lookup gate (width 2), but only // width-1 fixed tables (e.g. range check), it would be incorrect to use the // wider width (2) because there are no such contributing commitments! + // Note that lookup_table8 is a list of polynomials lcs.lookup_table8.len() }; let base_blinding = { From 065ba4dd5202b2aaf61403151bc3ebe2f3e18fd4 Mon Sep 17 00:00:00 2001 From: Danny Willems Date: Sun, 17 Mar 2024 11:41:21 +0100 Subject: [PATCH 054/178] CI: use cargo-nextest@=0.9.67 instead of latest From 0.9.68, nextest is only available from 1.73. --- .github/workflows/rust.yml | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index 3490171728..13ec23a3ac 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -16,6 +16,8 @@ jobs: run_checks: strategy: matrix: + # FIXME: use the latest version of cargo nextest when we get rid of 1.71 + # and 1.72 rust_toolchain_version: ["1.71", "1.72"] # FIXME: currently not available for 5.0.0. # It might be related to boxroot dependency, and we would need to bump @@ -107,7 +109,8 @@ jobs: - name: Install latest nextest release run: | eval $(opam env) - cargo install cargo-nextest --locked + # FIXME: update to 0.9.68 when we get rid of 1.71 and 1.72. + cargo install cargo-nextest@=0.9.67 --locked - name: Test with latest nextest release (faster than cargo test) run: | From 10d5e706fe220320eb62868c154efbdfabd9765e Mon Sep 17 00:00:00 2001 From: Gregor Date: Wed, 10 Apr 2024 12:52:59 +0200 Subject: [PATCH 055/178] Merge develop --- .github/workflows/benches.yml | 2 +- .github/workflows/coverage.yml.disabled | 2 +- .github/workflows/gh-page.yml | 2 +- .github/workflows/rust.yml | 4 +- .gitignore | 4 +- CONTRIBUTING.md | 21 ++ Cargo.toml | 1 + book/Cargo.toml | 4 +- book/src/SUMMARY.md | 73 ++-- book/src/fundamentals/custom_constraints.md | 59 +--- book/src/fundamentals/proof_systems.md | 24 +- book/src/kimchi/custom_constraints.md | 58 ++++ .../extended-lookup-tables.md | 4 +- book/src/{plonk => kimchi}/final_check.md | 0 .../src/{rfcs => kimchi}/foreign_field_add.md | 0 .../src/{rfcs => kimchi}/foreign_field_mul.md | 0 book/src/kimchi/keccak.md | 168 +++++++++ book/src/kimchi/lookup.md | 319 +++++++++++++++++- book/src/{plonk => kimchi}/maller_15.md | 0 book/src/kimchi/overview.md | 22 +- book/src/kimchi/permut.md | 5 +- book/src/kimchi/zkpm.md | 1 + book/src/pickles/passthrough.md | 1 - book/src/plonk/fiat_shamir.md | 8 +- book/src/plonk/glossary.md | 14 +- book/src/plonk/zkpm.md | 25 +- book/src/rfcs/3-lookup.md | 291 ---------------- book/src/rfcs/keccak.md | 167 +-------- book/src/snarky/api.md | 2 - book/src/snarky/booleans.md | 73 ---- book/src/snarky/circuit-generation.md | 29 -- book/src/snarky/kimchi-backend.md | 234 ------------- book/src/snarky/overview.md | 32 -- book/src/snarky/snarky-wrapper.md | 70 ---- book/src/snarky/vars.md | 135 -------- book/src/snarky/witness-generation.md | 21 -- book/src/specs/kimchi.md | 12 +- kimchi/Cargo.toml | 4 +- kimchi/src/alphas.rs | 7 +- kimchi/src/circuits/constraints.rs | 131 ++++--- kimchi/src/circuits/domains.rs | 29 +- kimchi/src/circuits/expr.rs | 84 ++++- kimchi/src/circuits/gate.rs | 2 + kimchi/src/circuits/lookup/constraints.rs | 20 +- kimchi/src/circuits/lookup/index.rs | 160 ++++++++- kimchi/src/circuits/lookup/lookups.rs | 1 - kimchi/src/circuits/lookup/tables/mod.rs | 8 +- kimchi/src/error.rs | 16 +- kimchi/src/linearization.rs | 11 +- kimchi/src/prover.rs | 105 ++---- kimchi/src/prover_index.rs | 3 +- kimchi/src/tests/foreign_field_add.rs | 1 - kimchi/src/tests/lookup.rs | 169 +++++++++- kimchi/src/tests/range_check.rs | 10 +- kimchi/src/tests/recursion.rs | 2 +- kimchi/src/tests/rot.rs | 1 - kimchi/src/tests/xor.rs | 1 - kimchi/src/verifier.rs | 67 ++-- kimchi/src/verifier_index.rs | 42 +-- poly-commitment/src/chunked.rs | 9 +- poly-commitment/src/commitment.rs | 308 +++++------------ poly-commitment/src/evaluation_proof.rs | 106 ++---- poly-commitment/src/lib.rs | 11 +- poly-commitment/src/pairing_proof.rs | 47 +-- poly-commitment/src/srs.rs | 35 +- poly-commitment/src/tests/batch_15_wires.rs | 35 +- poly-commitment/src/tests/commitment.rs | 61 +--- utils/src/dense_polynomial.rs | 4 + 68 files changed, 1469 insertions(+), 1908 deletions(-) create mode 100644 book/src/kimchi/custom_constraints.md rename book/src/{rfcs => kimchi}/extended-lookup-tables.md (99%) rename book/src/{plonk => kimchi}/final_check.md (100%) rename book/src/{rfcs => kimchi}/foreign_field_add.md (100%) rename book/src/{rfcs => kimchi}/foreign_field_mul.md (100%) create mode 100644 book/src/kimchi/keccak.md rename book/src/{plonk => kimchi}/maller_15.md (100%) create mode 100644 book/src/kimchi/zkpm.md delete mode 100644 book/src/pickles/passthrough.md delete mode 100644 book/src/rfcs/3-lookup.md delete mode 100644 book/src/snarky/api.md delete mode 100644 book/src/snarky/booleans.md delete mode 100644 book/src/snarky/circuit-generation.md delete mode 100644 book/src/snarky/kimchi-backend.md delete mode 100644 book/src/snarky/overview.md delete mode 100644 book/src/snarky/snarky-wrapper.md delete mode 100644 book/src/snarky/vars.md delete mode 100644 book/src/snarky/witness-generation.md diff --git a/.github/workflows/benches.yml b/.github/workflows/benches.yml index e8ac86e6b9..7fd1fe8e2e 100644 --- a/.github/workflows/benches.yml +++ b/.github/workflows/benches.yml @@ -17,7 +17,7 @@ jobs: if: github.event.label.name == 'benchmark' steps: - name: Checkout PR - uses: actions/checkout@v2 + uses: actions/checkout@v4.1.1 # as action-rs does not seem to be maintained anymore, building from # scratch the environment using rustup diff --git a/.github/workflows/coverage.yml.disabled b/.github/workflows/coverage.yml.disabled index f9f00e3503..837124ea24 100644 --- a/.github/workflows/coverage.yml.disabled +++ b/.github/workflows/coverage.yml.disabled @@ -17,7 +17,7 @@ jobs: timeout-minutes: 60 runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2.3.4 + - uses: actions/checkout@v4.1.1 with: persist-credentials: false diff --git a/.github/workflows/gh-page.yml b/.github/workflows/gh-page.yml index edbccf8ca3..7077069ded 100644 --- a/.github/workflows/gh-page.yml +++ b/.github/workflows/gh-page.yml @@ -16,7 +16,7 @@ jobs: steps: - name: Checkout Repository - uses: actions/checkout@v2 + uses: actions/checkout@v4.1.1 # as action-rs does not seem to be maintained anymore, building from # scratch the environment using rustup diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index 13ec23a3ac..82f1ecd840 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -28,7 +28,7 @@ jobs: name: Run some basic checks and tests steps: - name: Checkout PR - uses: actions/checkout@v3 + uses: actions/checkout@v4.1.1 # as action-rs does not seem to be maintained anymore, building from # scratch the environment using rustup @@ -62,7 +62,7 @@ jobs: - name: Install cargo-spec for specifications run: | eval $(opam env) - cargo install cargo-spec + cargo install --locked cargo-spec - name: Build the kimchi specification run: | diff --git a/.gitignore b/.gitignore index 3a29d80589..fb03a39692 100644 --- a/.gitignore +++ b/.gitignore @@ -25,4 +25,6 @@ _build *.html # If symlink created for kimchi-visu -tools/srs \ No newline at end of file +tools/srs + +.ignore diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 3d3c479436..ed881acc75 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -48,3 +48,24 @@ cargo fmt ``` These are enforced by GitHub PR checks, so be sure to have any errors produced by the above tools fixed before pushing the code to your pull request branch. Refer to `.github/workflows` for all PR checks. + +## Branching policy + +Generally, proof-systems intends to be synchronized with the mina repository (see their [README-branching.md](https://github.com/MinaProtocol/mina/blob/develop/README-branching.md)), and so its branching policy is quite similar. However several important (some, temporary) distinctions exist: + +- `compatible`: + - Compatible with `rampup` in `mina`. + - Mina's `compatible`, similarly to mina's `master`, does not have `proof-systems`. +- `berkley`: future hardfork release, will be going out to berkeley. + - This is where hotfixes go. +- `develop`: matches mina's `develop`, soft fork-compatibility. + - Also used by `mina/o1js-main` and `o1js/main`. +- `master`: future feature work development, containing breaking changes. Anything that does not need to be released alongside mina. + - Note that `mina`'s `master` does not depend on `proof-systems` at all. +- `izmir`: next hardfork release after berkeley. +- In the future: + - `master`/`develop` will reverse roles and become something like gitflow. + - After Berkeley release `compatible` will become properly synced with `mina/compatible`. +- Direction of merge: + - Back-merging: `compatible` into `berkeley` into `develop` into `master`. + - Front-merging (introducing new features): other direction, but where you start depends on where the feature belongs. diff --git a/Cargo.toml b/Cargo.toml index c50da37068..3d0d91c9cb 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -14,6 +14,7 @@ members = [ "utils", "internal-tracing", ] +resolver = "2" [profile.release] lto = true diff --git a/book/Cargo.toml b/book/Cargo.toml index 8bf747f208..2a0d222bc7 100644 --- a/book/Cargo.toml +++ b/book/Cargo.toml @@ -10,4 +10,6 @@ edition = "2021" license = "Apache-2.0" [build-dependencies] -cargo-spec = { version = "0.5.0" } \ No newline at end of file +cargo-spec = { version = "0.5.0" } +time = { version = "~0.3.23" } # This crate is a known bad-actor for breaking rust version support. +plist = { version = "~1.5.0" } # This crate improperly constrains its bad-actor dependency (`time`). diff --git a/book/src/SUMMARY.md b/book/src/SUMMARY.md index 8b6e482e3e..f30839da67 100644 --- a/book/src/SUMMARY.md +++ b/book/src/SUMMARY.md @@ -9,15 +9,15 @@ - [Rings](./fundamentals/zkbook_rings.md) - [Fields](./fundamentals/zkbook.md) - [Polynomials](./fundamentals/zkbook_polynomials.md) - - [Multiplying polynomials](./fundamentals/zkbook_multiplying_polynomials.md) - - [Fast Fourier transform](./fundamentals/zkbook_fft.md) + - [Multiplying Polynomials](./fundamentals/zkbook_multiplying_polynomials.md) + - [Fast Fourier Transform](./fundamentals/zkbook_fft.md) -# Cryptographic tools +# Cryptographic Tools - [Commitments](./fundamentals/zkbook_commitment.md) -- [Polynomial commitments](./plonk/polynomial_commitments.md) - - [Inner product argument](./plonk/inner_product.md) - - [Different functionnalities](./plonk/inner_product_api.md) +- [Polynomial Commitments](./plonk/polynomial_commitments.md) + - [Inner Product Argument](./plonk/inner_product.md) + - [Different Functionnalities](./plonk/inner_product_api.md) - [Two Party Computation](./fundamentals/zkbook_2pc/overview.md) - [Garbled Circuits](./fundamentals/zkbook_2pc/gc.md) - [Basics](./fundamentals/zkbook_2pc/basics.md) @@ -27,71 +27,50 @@ - [Half Gate](./fundamentals/zkbook_2pc/halfgate.md) - [Full Description](./fundamentals/zkbook_2pc/fulldesc.md) - [Fixed-Key-AES Hashes](./fundamentals/zkbook_2pc/fkaes.md) - - [Oblivious Transfer](./fundamentals/zkbook_2pc/ot.md) - [Base OT](./fundamentals/zkbook_2pc/baseot.md) - [OT Extension](./fundamentals/zkbook_2pc/ote.md) - - [Full Protocol](./fundamentals/zkbook_2pc/2pc.md) - -# Proof systems - -- [Overview](./fundamentals/proof_systems.md) -- [zk-SNARKs](./fundamentals/zkbook_plonk.md) -- [Custom constraints](./fundamentals/custom_constraints.md) +- [Proof Systems](./fundamentals/proof_systems.md) + - [zk-SNARKs](./fundamentals/zkbook_plonk.md) # Background on PLONK - [Overview](./plonk/overview.md) -- [Glossary](./plonk/glossary.md) + - [Glossary](./plonk/glossary.md) - [Domain](./plonk/domain.md) -- [Lagrange basis in multiplicative subgroups](./plonk/lagrange.md) -- [Non-interaction with fiat-shamir](./plonk/fiat_shamir.md) +- [Lagrange Basis in Multiplicative Subgroups](./plonk/lagrange.md) +- [Non-Interactivity via Fiat-Shamir](./plonk/fiat_shamir.md) - [Plookup](./plonk/plookup.md) -- [Maller's optimization](./plonk/maller.md) +- [Maller's Optimization](./plonk/maller.md) +- [Zero-Column Approach to Zero-Knowledge](./plonk/zkpm.md) # Kimchi - [Overview](./kimchi/overview.md) - - [Arguments](./kimchi/arguments.md) - - [Custom gates](./kimchi/gates.md) - - [Permutation](./kimchi/permut.md) - - [Lookup](./kimchi/lookup.md) - -# Snarky +- [Arguments](./kimchi/arguments.md) +- [Final Check](./kimchi/final_check.md) +- [Maller's Optimization for Kimchi](./kimchi/maller_15.md) +- [Lookup Tables](./kimchi/lookup.md) + - [Extended Lookup Tables](./kimchi/extended-lookup-tables.md) +- [Custom Constraints](./kimchi/custom_constraints.md) +- [Custom Gates](./kimchi/gates.md) + - [Foreign Field Addition](./kimchi/foreign_field_add.md) + - [Foreign Field Multiplication](./kimchi/foreign_field_mul.md) + - [Keccak](./kimchi/keccak.md) -- [Overview](./snarky/overview.md) -- [API](./snarky/api.md) -- [snarky wrapper](./snarky/snarky-wrapper.md) -- [Kimchi backend](./snarky/kimchi-backend.md) -- [Vars](./snarky/vars.md) -- [Booleans](./snarky/booleans.md) -- [Circuit generation](./snarky/circuit-generation.md) -- [Witness generation](./snarky/witness-generation.md) # Pickles & Inductive Proof Systems - [Overview](./fundamentals/zkbook_ips.md) - [Accumulation](./pickles/accumulation.md) - [Deferred Computation](./pickles/deferred.md) -- [Passthough & Me-Only](./pickles/passthrough.md) - -# RFCs -- [RFC 0: Alternative zero-knowledge](./plonk/zkpm.md) -- [RFC 1: Final check](./plonk/final_check.md) -- [RFC 2: Maller's optimization for kimchi](./plonk/maller_15.md) -- [RFC 3: Plookup integration in kimchi](./rfcs/3-lookup.md) -- [RFC 4: Extended lookup tables](./rfcs/extended-lookup-tables.md) -- [RFC 5: Foreign Field Addition](./rfcs/foreign_field_add.md) -- [RFC 6: Foreign Field Multiplication](./rfcs/foreign_field_mul.md) -- [RFC 7: Keccak](./rfcs/keccak.md) - -# Specifications +# Technical Specifications - [Poseidon hash](./specs/poseidon.md) -- [Polynomial commitment](./specs/poly-commitment.md) -- [Pasta curves](./specs/pasta.md) +- [Polynomial Commitment](./specs/poly-commitment.md) +- [Pasta Curves](./specs/pasta.md) - [Kimchi](./specs/kimchi.md) - [Universal Reference String (URS)](./specs/urs.md) - [Pickles](./specs/pickles.md) diff --git a/book/src/fundamentals/custom_constraints.md b/book/src/fundamentals/custom_constraints.md index e2c7256d68..8773465d1c 100644 --- a/book/src/fundamentals/custom_constraints.md +++ b/book/src/fundamentals/custom_constraints.md @@ -1,58 +1 @@ -This section explains how to design and add a custom constraint to our `proof-systems` library. - -PLONK is an AIOP. That is, it is a protocol in which the prover sends polynomials as messages and the verifier sends random challenges, and then evaluates the prover's polynomials and performs some final checks on the outputs. - -PLONK is very flexible. It can be customized with constraints specific to computations of interest. For example, in Mina, we use a PLONK configuration called kimchi that has custom constraints for poseidon hashing, doing elliptic curve operations, and more. - -A "PLONK configuration" specifies -- The set of types of constraints that you would like to be able to enforce. We will describe below how these types of constraints are specified. -- A number of "eq-able" columns `W` -- A number of "advice" columns `A` - -Under such configuration, a circuit is specified by -- A number of rows `n` -- A vector `cs` of constraint-types of length `n`. I.e., a vector that specifies, for each row, which types of constraints should be enforced on that row. -- A vector `eqs : Vec<(Position, Position)>` of equalities to enforce, where `struct Position { row: usize, column: usize }`. E.g., if the pair `(Position { row: 0, col: 8 }, Position { row: 10, col: 2 })` is in `eqs`, then the circuit is saying the entries in those two positions should be equal, or in other words that they refer to the same value. This is where the distinction between "eq-able" and "advice" columns comes in. The `column` field of a position in the `eqs` array can only refer to one of the first `W` columns. Equalities cannot be enforced on entries in the `A` columns after that. - -Then, given such a circuit, PLONK lets you produce proofs for the statement - -> I know `W + A` "column vectors" of field elements `vs: [Vec; W + A]` such that for each row index `i < n`, the constraint of type `cs[i]` holds on the values `[vs[0][i], ..., vs[W+A - 1][i], vs[0][i+1], ..., vs[W+A - 1][i+1]` and all the equalities in `eqs` hold. I.e., for `(p1, p2)` in `eqs` we have `vs[p1.col][p1.row] == vs[p2.col][p2.row]`. So, a constraint can check the values in two adjacent rows. - -## Specifying a constraint - -Mathematically speaking, a constraint is a multivariate polynomial over the variables $c_{\mathsf{Curr},i}, \dots, v_{\mathsf{Curr}, W+A-1}, v_{\mathsf{Next}, 0}, \dots, v_{\mathsf{Next}, W+A-1}$. In other words, there is one variable corresponding to the value of each column in the "current row" and one variable correspond to the value of each column in the "next row". - -In Rust, $v_{r, i}$ is written `E::cell(Column::Witness(i), r)`. So, for example, the variable $v_{\mathsf{Next}, 3}$ is written -`E::cell(Column::Witness(3), CurrOrNext::Next)`. - - - - let w = |i| v(Column::Witness(i)); -Let's - -## Defining a PLONK configuration - -The art in proof systems comes from knowing how to design a PLONK configuration to ensure maximal efficiency for the sorts of computations you are trying to prove. That is, how to choose the numbers of columns `W` and `A`, and how to define the set of constraint types. - -Let's describe the trade-offs involved here. - -The majority of the proving time for the PLONK prover is in -- committing to the `W + A` column polynomials, which have length equal to the number of rows `n` -- committing to the "permutation accumulator polynomial, which has length `n`. -- committing to the quotient polynomial, which reduces to computing `max(k, W)` MSMs of size `n`, where `k` is the max degree of a constraint. -- performing the commitment opening proof, which is mostly dependent on the number of rows `n`. - -So all in all, the proving time is approximately equal to the time to perform `W + A + 1 + max(k - 1, W)` MSMs of size `n`, plus the cost of an opening proof for polynomials of degree `n - 1`. - -and maybe -- computing the combined constraint polynomial, which has degree `k * n` where `k` is the maximum degree of a constraint - -- Increasing `W` and `A` increase proof size, and they potentially impact the prover-time as the prover must compute polynomial commitments to each column, and computing a polynomial commitment corresponds to doing one MSM (multi-scalar multiplication, also called a multi-exponentiation.) - - However, often increasing the number of columns allows you to decrease the number of rows required for a given computation. For example, if you can perform one Poseidon hash in 36 rows with 5 total columns, then you can also perform it in 12 (= 36 / 3) rows with 15 (= 5 * 3) total columns. - - **Decreasing the number of rows (even while keeping the total number of table entries the same) is desirable because it reduces the cost of the polynomial commitment opening proof, which is dominated by a factor linear in the number of rows, and barely depends on the number of columns.** - - Increasing the number of columns also increases verifier time, as the verifier must perform one scalar-multiplication and one hash per column. Proof length is also affected by a larger number of columns, as more polynomials need to be committed and sent along to the verifier. - -There is typically some interplay between these +# Custom constraints diff --git a/book/src/fundamentals/proof_systems.md b/book/src/fundamentals/proof_systems.md index 84b5f8fe97..d281048974 100644 --- a/book/src/fundamentals/proof_systems.md +++ b/book/src/fundamentals/proof_systems.md @@ -1,31 +1,29 @@ -# Overview +# Proof Systems Design Overview Many modern proof systems (and I think all that are in use) are constructed according to the following recipe. 1. You start out with a class of computations. 2. You devise a way to *arithmetize* those computations. That is, to express your computation as a statement about polynomials. - + More specifically, you describe what is often called an "algebraic interactive oracle proof" (AIOP) that encodes your computation. An AIOP is a protocol describing an interaction between a prover and a verifier, in which the prover sends the verifier some "polynomial oracles" (basically a black box function that given a point evaluates a polynomial at that point), the verifier sends the prover random challenges, and at the end, the verifier queries the prover's polynomials at points of its choosing and makes a decision as to whether it has been satisfied by the proof. 3. An AIOP is an imagined interaction between parties. It is an abstract description of the protocol that will be "compiled" into a SNARK. There are several "non-realistic" aspects about it. One is that the prover sends the verifier black-box polynomials that the verifier can evaluate. These polynomials have degree comparable to the size of the computation being verified. If we implemented these "polynomial oracles" by having the prover really send the $O(n)$ size polynomials (say by sending all their coefficients), then we would not have a zk-SNARK at all, since the verifier would have to read this linearly sized polynomial so we would lose succinctness, and the polynomials would not be black-box functions, so we may lose zero-knowledge. - + Instead, when we concretely instantiate the AIOP, we have the prover send constant-sized, hiding *polynomial commitments*. Then, in the phase of the AIOP where the verifier queries the polynomials, the prover sends an *opening proof* for the polynomial commitments which the verifier can check, thus simulating the activity of evaluating the prover's polynomials on your own. - + So this is the next step of making a SNARK: instantiating the AIOP with a polynomial commitment scheme of one's choosing. There are several choices here and these affect the properties of the SNARK you are constructing, as the SNARK will inherit efficiency and setup properties of the polynomial commitment scheme used. -4. An AIOP describes an interactive protocol between the verifier and the prover. In reality, typically, we also want our proofs to be non-interactive. - +4. An AIOP describes an interactive protocol between the verifier and the prover. In reality, typically, we also want our proofs to be non-interactive. + This is accomplished by what is called the [Fiat--Shamir transformation](). The basic idea is this: all that the verifier is doing is sampling random values to send to the prover. Instead, to generate a "random" value, the prover simulates the verifier by hashing its messages. The resulting hash is used as the "random" challenge. - + At this point we have a fully non-interactive proof. Let's review our steps. - + 1. Start with a computation. - + 2. Translate the computation into a statement about polynomials and design a corresponding AIOP. - - 3. Compile the AIOP into an interactive protocol by having the prover send hiding polynomial commitments instead of polynomial oracles. - - 4. Get rid of the verifier-interaction by replacing it with a hash function. I.e., apply the Fiat--Shamir transform. + 3. Compile the AIOP into an interactive protocol by having the prover send hiding polynomial commitments instead of polynomial oracles. + 4. Get rid of the verifier-interaction by replacing it with a hash function. I.e., apply the Fiat--Shamir transform. diff --git a/book/src/kimchi/custom_constraints.md b/book/src/kimchi/custom_constraints.md new file mode 100644 index 0000000000..e2c7256d68 --- /dev/null +++ b/book/src/kimchi/custom_constraints.md @@ -0,0 +1,58 @@ +This section explains how to design and add a custom constraint to our `proof-systems` library. + +PLONK is an AIOP. That is, it is a protocol in which the prover sends polynomials as messages and the verifier sends random challenges, and then evaluates the prover's polynomials and performs some final checks on the outputs. + +PLONK is very flexible. It can be customized with constraints specific to computations of interest. For example, in Mina, we use a PLONK configuration called kimchi that has custom constraints for poseidon hashing, doing elliptic curve operations, and more. + +A "PLONK configuration" specifies +- The set of types of constraints that you would like to be able to enforce. We will describe below how these types of constraints are specified. +- A number of "eq-able" columns `W` +- A number of "advice" columns `A` + +Under such configuration, a circuit is specified by +- A number of rows `n` +- A vector `cs` of constraint-types of length `n`. I.e., a vector that specifies, for each row, which types of constraints should be enforced on that row. +- A vector `eqs : Vec<(Position, Position)>` of equalities to enforce, where `struct Position { row: usize, column: usize }`. E.g., if the pair `(Position { row: 0, col: 8 }, Position { row: 10, col: 2 })` is in `eqs`, then the circuit is saying the entries in those two positions should be equal, or in other words that they refer to the same value. This is where the distinction between "eq-able" and "advice" columns comes in. The `column` field of a position in the `eqs` array can only refer to one of the first `W` columns. Equalities cannot be enforced on entries in the `A` columns after that. + +Then, given such a circuit, PLONK lets you produce proofs for the statement + +> I know `W + A` "column vectors" of field elements `vs: [Vec; W + A]` such that for each row index `i < n`, the constraint of type `cs[i]` holds on the values `[vs[0][i], ..., vs[W+A - 1][i], vs[0][i+1], ..., vs[W+A - 1][i+1]` and all the equalities in `eqs` hold. I.e., for `(p1, p2)` in `eqs` we have `vs[p1.col][p1.row] == vs[p2.col][p2.row]`. So, a constraint can check the values in two adjacent rows. + +## Specifying a constraint + +Mathematically speaking, a constraint is a multivariate polynomial over the variables $c_{\mathsf{Curr},i}, \dots, v_{\mathsf{Curr}, W+A-1}, v_{\mathsf{Next}, 0}, \dots, v_{\mathsf{Next}, W+A-1}$. In other words, there is one variable corresponding to the value of each column in the "current row" and one variable correspond to the value of each column in the "next row". + +In Rust, $v_{r, i}$ is written `E::cell(Column::Witness(i), r)`. So, for example, the variable $v_{\mathsf{Next}, 3}$ is written +`E::cell(Column::Witness(3), CurrOrNext::Next)`. + + + + let w = |i| v(Column::Witness(i)); +Let's + +## Defining a PLONK configuration + +The art in proof systems comes from knowing how to design a PLONK configuration to ensure maximal efficiency for the sorts of computations you are trying to prove. That is, how to choose the numbers of columns `W` and `A`, and how to define the set of constraint types. + +Let's describe the trade-offs involved here. + +The majority of the proving time for the PLONK prover is in +- committing to the `W + A` column polynomials, which have length equal to the number of rows `n` +- committing to the "permutation accumulator polynomial, which has length `n`. +- committing to the quotient polynomial, which reduces to computing `max(k, W)` MSMs of size `n`, where `k` is the max degree of a constraint. +- performing the commitment opening proof, which is mostly dependent on the number of rows `n`. + +So all in all, the proving time is approximately equal to the time to perform `W + A + 1 + max(k - 1, W)` MSMs of size `n`, plus the cost of an opening proof for polynomials of degree `n - 1`. + +and maybe +- computing the combined constraint polynomial, which has degree `k * n` where `k` is the maximum degree of a constraint + +- Increasing `W` and `A` increase proof size, and they potentially impact the prover-time as the prover must compute polynomial commitments to each column, and computing a polynomial commitment corresponds to doing one MSM (multi-scalar multiplication, also called a multi-exponentiation.) + + However, often increasing the number of columns allows you to decrease the number of rows required for a given computation. For example, if you can perform one Poseidon hash in 36 rows with 5 total columns, then you can also perform it in 12 (= 36 / 3) rows with 15 (= 5 * 3) total columns. + + **Decreasing the number of rows (even while keeping the total number of table entries the same) is desirable because it reduces the cost of the polynomial commitment opening proof, which is dominated by a factor linear in the number of rows, and barely depends on the number of columns.** + + Increasing the number of columns also increases verifier time, as the verifier must perform one scalar-multiplication and one hash per column. Proof length is also affected by a larger number of columns, as more polynomials need to be committed and sent along to the verifier. + +There is typically some interplay between these diff --git a/book/src/rfcs/extended-lookup-tables.md b/book/src/kimchi/extended-lookup-tables.md similarity index 99% rename from book/src/rfcs/extended-lookup-tables.md rename to book/src/kimchi/extended-lookup-tables.md index ed8d29bf7c..a2205ddebd 100644 --- a/book/src/rfcs/extended-lookup-tables.md +++ b/book/src/kimchi/extended-lookup-tables.md @@ -1,6 +1,6 @@ -# RFC: Extended lookup tables +# Extended lookup tables -This RFC proposes an extension to our use of lookup tables using the PLOOKUP +This (old) RFC proposes an extension to our use of lookup tables using the PLOOKUP multiset inclusion argument, so that values within lookup tables can be chosen after the constraint system for a circuit has been fixed. diff --git a/book/src/plonk/final_check.md b/book/src/kimchi/final_check.md similarity index 100% rename from book/src/plonk/final_check.md rename to book/src/kimchi/final_check.md diff --git a/book/src/rfcs/foreign_field_add.md b/book/src/kimchi/foreign_field_add.md similarity index 100% rename from book/src/rfcs/foreign_field_add.md rename to book/src/kimchi/foreign_field_add.md diff --git a/book/src/rfcs/foreign_field_mul.md b/book/src/kimchi/foreign_field_mul.md similarity index 100% rename from book/src/rfcs/foreign_field_mul.md rename to book/src/kimchi/foreign_field_mul.md diff --git a/book/src/kimchi/keccak.md b/book/src/kimchi/keccak.md new file mode 100644 index 0000000000..9c00ad6d7c --- /dev/null +++ b/book/src/kimchi/keccak.md @@ -0,0 +1,168 @@ +# Keccak Gate + +The Keccak gadget is comprised of 3 circuit gates (`Xor16`, `Rot64`, and `Zero`) + +Keccak works with 64-bit words. The state is represented using $5\times 5$ matrix +of 64 bit words. Each compression step of Keccak consists of 24 rounds. Let us +denote the state matrix with $A$ (indexing elements as $A[x,y]$), from which we derive +further states as follows in each round. Each round then consists of the following 5 steps: + +$$ +\begin{align} +C[x] &= A[x,0] \oplus A[x,1] \oplus A[x,2] \oplus A[x,3] \oplus A[x,4] \\ +D[x] &= C[x-1] \oplus ROT(C[x+1],1) \\ +E[x,y] &= A[x,y] \oplus D[x] \\ +B[y,2x+3y] &= ROT(E[x,y],\rho[x,y]) \\ +F[x,y] &= B[x,y] \oplus ((NOT\ B[x+1,y]) AND\ B[x+2,y]) \\ +Fp[0,0] &= F[0,0] \oplus RC +\end{align} +$$ + +for $0\leq x, y \leq 4$ and $\rho[x,y]$ is the rotation offset defined for Keccak. +The values are in the table below extracted from the Keccak reference + + +| | x = 3 | x = 4 | x = 0 | x = 1 | x = 2 | +| ----- | ----- | ----- | ----- | ----- | ----- | +| y = 2 | 155 | 231 | 3 | 10 | 171 | +| y = 1 | 55 | 276 | 36 | 300 | 6 | +| y = 0 | 28 | 91 | 0 | 1 | 190 | +| y = 4 | 120 | 78 | 210 | 66 | 253 | +| y = 3 | 21 | 136 | 105 | 45 | 15 | + +## Design Approach: + +The atomic operations are XOR, ROT, NOT, AND. In the sections below, we will describe +the gates for these operations. Below are some common approaches followed in their design. + +To fit within 15 wires, we first decompose each word into its lower and upper 32-bit +components. A gate for an atomic operation works with those 32-bit components at a time. + +Before we describe the specific gate design approaches, below are some constraints in the +Kimchi framework that dictated those approaches. +* only 4 lookups per row +* only first 7 columns are available to the permutation polynomial + +## Rot64 + +It is clear from the definition of the rotation gate that its constraints are complete +(meaning that honest instances always satisfy the constraints). It is left to be proven +that the proposal is sound. In this section, we will give a proof that as soon as we +perform the range checks on the excess and shifted parts of the input, only one possible +assignment satisfies the constraints. This means that there is no dishonest instance that +can make the constraints pass. We will also give an example where one could find wrong +rotation witnesses that would satisfy the constraints if we did not check the range. + +### Necessity of range checks + +First of all, we will illustrate the necessity of range-checks with a simple example. +For the sake of readability, we will use some toy field lengths. In particular, let us +assume that our words have 4 bits, meaning all of the elements between `0x0` and `0xF`. +Next, we will be using the native field $\mathbb{F}_{32}$. + +As we will later see, this choice of field lengths is not enough to perform any 4-bit +rotation, since the operations in the constraints would overflow the native field. +Nonetheless, it will be sufficient for our example where we will only rotate by 1 bit. + +Assume we want to rotate the word `0b1101` (meaning 13) by 1 bit to the left. This gives +us the rotated word `0b1011` (meaning 11). The excess part of the word is `0b1`, whereas +the shifted part corresponds to `0b1010`. We recall the constraints for the rotation gate: + +$$ +\begin{align*} +word \cdot 2^{rot} &= excess \cdot 2^{len} + shifted \\ +rotated &= excess + shifted +\end{align*} +$$ + +Applied to our example, this results in the following equations: + +$$ +\begin{align*} +13 \cdot 2 &= excess \cdot 16 + shifted \\ +11 &= excess + shifted +\end{align*} +$$ + +We can easily check that the proposed values of the shifted `0b1010=10` and the excess +`0b1=1` values satisfy the above constraint because $26 = 1 \cdot 16 + 10$ and $11 = 1 + 10$. +Now, the question is: _can we find another value for excess and shifted, such that their addition results in an incorrect rotated word?_ + +The answer to this question is yes, due to __diophantine equations__. We basically want to find $x,y$ such that $26 = x \cdot 16 + y (\text{ mod } 32)$. The solution to this equation is: + +$$ +\begin{align*} +\forall k \in [0 \ldots 31]: x &= k \ \land \\ +y &= 26 - 16 \cdot k +\end{align*} +$$ + +We chose these word and field lengths to better understand the behaviour of the solution. Here, we can see two "classes" of evaluations. + +- If we choose an even $k$, then $y$ will have the following shape: + - $$26 - 16 \cdot (2 \cdot n) \iff 26 - 32n \equiv_{32} 26 $$ + - Meaning, if $x = 2n$ then $y = 26$. + +- If on the other hand, we chose an odd $k$, then $y$ will have the following shape instead: + - $$26 - 16 \cdot (2 \cdot n + 1) \iff 26 - 32n - 16 \equiv_{32} 26 - 16 = 10$$ + - Meaning, if $x = 2n+1$ then $y = 10$. + +Thus, possible solutions to the diophantine equation are: + +$$ +\begin{align*} +x &= 0, 1, 2, 3, 4, 5 \ldots \\ +y &= 26, 10, 26, 10, 26, 10 \ldots +\end{align*} +$$ + +Note that our valid witness is part of that set of solutions, meaning $x=1$ and $y=10$. Of course, we can also find another dishonest instantiation such as $x=0$ and $y=26$. Perhaps one could think that we do not need to worry about this case, because the resulting rotation word would be $0+26=26$, and if we later use that result as an input to a subsequent gate such as XOR, the value $26$ would not fit and at some point the constraint system would complain. Nonetheless, we still have other solutions to worry about, such as $(x=3, y=10)$ or $(x=5, y=10)$, since they would result in a rotated word that would fit in the word length of 4 bits, yet would be incorrect (not equal to $11$). + +All of the above incorrect solutions differ in one thing: they have different bit lengths. This means that we need to range check the values for the excess and shifted witnesses to make sure they have the correct length. + +### Sufficiency of range checks + +In the following, we will give a proof that performing range checks for these values is not only necessary but also sufficient to prove that the rotation gate is sound. In other words, we will prove there are no two possible solutions of the decomposition constraint that have the correct bit lengths. Now, for the sake of robustness, we will consider 64-bit words and fields with at least twice the bit length of the words (as is our case). + +We will proceed by __contradiction__. Suppose there are two different solutions to the following diophantic equation: + +$$ +\begin{align*} +\forall k \in \mathbb{F}_n: x &= k \ \land \\ +y &= w \cdot 2^r - 2^{64} \cdot k +\end{align*} +$$ + +where $k$ is a parameter to instantiate the solutions, $w$ is the word to be rotated, $r$ is the rotation amount, and $n$ is the field length. + +Then, that means that there are two different solutions, $(0\leq x=a<2^r, 0\leq y=b<2^{64})$ and $(0\leq x=a'<2^r, 0\leq y=b'<2^{64})$ with at least $a \neq a'$ or $b \neq b'$. We will show that this is impossible. + +If both are solutions to the same equation, then: +$$ +\begin{align*} +w \cdot 2^r &= a \cdot 2^{64} + b \\ +w \cdot 2^r &= a'\cdot 2^{64} + b' +\end{align*} +$$ +means that $a \cdot 2^{64} + b = a'\cdot 2^{64} + b'$. Moving terms to the left side, we have an equivalent equality: $2^{64}(a-a') + (b-b')=0 \mod{n}$. There are three cases to consider: + +- $a = a'$ and $b \neq b'$: then $(b - b') \equiv_n 0$ and this can only happen if $b' = b + kn$. But since $n > 2^{64}$, then $b'$ cannot be smaller than $2^{64}$ as it was assumed. CONTRADICTION. + +- $b = b'$ and $a \neq a'$: then $2^{64}(a - a') \equiv_n 0$ and this can only happen if $a' = a + kn$. But since $n > 2^r$, then $a'$ cannot be smaller than $2^r$ as it was assumed. CONTRADICTION. + +- $a\neq a'$ and $b \neq b'$: then we have something like $2^{64} \alpha + \beta \equiv_n 0$. + - This means $\beta \equiv_n -2^{64} \alpha = k \cdot n - 2^{64} \alpha$ for any $k$. + - According to the assumption, both $0\leq a<2^r$ and $0\leq a'<2^r$. This means, the difference $\alpha:=(a - a')$ lies anywhere in between the following interval: + $$1 - 2^r \leq \alpha \leq 2^r - 1$$ + - We plug in this interval to the above equation to obtain the following interval for $\beta$: + $$k\cdot n - 2^{64}(1-2^r)\leq \beta \leq k\cdot n - 2^{64}(2^r - 1) $$ + - We look at this interval from both sides of the inequality: $\beta \geq kn - 2^{64} + 2^{64+r}$ and $\beta \leq kn + 2^{64} - 2^{64+r}$ and we wonder if $kn - 2^{64} + 2^{64+r} \leq kn + 2^{64} - 2^{64+r}$ is at all possible. We rewrite as follows: + $$ + \begin{align*} + 2^{64+r} - 2^{64} &\leq 2^{64} - 2^{64+r}\\ + 2\cdot2^{64+r} &\leq 2\cdot2^{64} \\ + 2^{64+r} &\leq 2^{64} + \end{align*} + $$ + - But this can only happen if $r\leq 0$, which is impossible since we assume $0 < r < 64$. CONTRADICTION. +- EOP. diff --git a/book/src/kimchi/lookup.md b/book/src/kimchi/lookup.md index 9989ac5557..a261672716 100644 --- a/book/src/kimchi/lookup.md +++ b/book/src/kimchi/lookup.md @@ -1,3 +1,318 @@ -## Lookup +# Plookup in Kimchi -TO-DO \ No newline at end of file +In 2020, [plookup](https://eprint.iacr.org/2020/315.pdf) showed how to create lookup proofs. Proofs that some witness values are part of a [lookup table](https://en.wikipedia.org/wiki/Lookup_table). Two years later, an independent team published [plonkup](https://eprint.iacr.org/2022/086) showing how to integrate Plookup into Plonk. + +This document specifies how we integrate plookup in kimchi. It assumes that the reader understands the basics behind plookup. + +## Overview + +We integrate plookup in kimchi with the following differences: + +* we snake-ify the sorted table instead of wrapping it around (see later) +* we allow fixed-ahead-of-time linear combinations of columns of the queries we make +* we only use a single table (XOR) at the moment of this writing +* we allow several lookups (or queries) to be performed within the same row +* zero-knowledgeness is added in a specific way (see later) + +The following document explains the protocol in more detail + +### Recap on the grand product argument of plookup + +As per the Plookup paper, the prover will have to compute three vectors: + +* $f$, the (secret) **query vector**, containing the witness values that the prover wants to prove are part of the lookup table. +* $t$, the (public) **lookup table**. +* $s$, the (secret) concatenation of $f$ and $t$, sorted by $t$ (where elements are listed in the order they are listed in $t$). + +Essentially, plookup proves that all the elements in $f$ are indeed in the lookup table $t$ if and only if the following multisets are equal: + +* $\{(1+\beta)f, \text{diff}(t)\}$ +* $\text{diff}(\text{sorted}(f, t))$ + +where $\text{diff}$ is a new set derived by applying a "randomized difference" between every successive pairs of a vector. For example: + +* $f = \{5, 4, 1, 5\}$ +* $t = \{1, 4, 5\}$ +* $\{\color{blue}{(1+\beta)f}, \color{green}{\text{diff}(t)}\} = \{\color{blue}{(1+\beta)5, (1+\beta)4, (1+\beta)1, (1+\beta)5}, \color{green}{1+\beta 4, 4+\beta 5}\}$ +* $\text{diff}(\text{sorted}(f, t)) = \{1+\beta 1, 1+\beta 4, 4+\beta 4, 4+\beta 5, 5+\beta 5, 5+\beta 5\}$ + +> Note: This assumes that the lookup table is a single column. You will see in the next section how to address lookup tables with more than one column. + +The equality between the multisets can be proved with the permutation argument of plonk, which would look like enforcing constraints on the following accumulator: + +* init: $\mathsf{acc}_0 = 1$ +* final: $\mathsf{acc}_n = 1$ +* for every $0 < i \leq n$: + $$ + \mathsf{acc}_i = \mathsf{acc}_{i-1} \cdot \frac{(\gamma + (1+\beta) f_{i-1}) \cdot (\gamma + t_{i-1} + \beta t_i)}{(\gamma + s_{i-1} + \beta s_{i})(\gamma + s_{n+i-1} + \beta s_{n+i})} + $$ + +Note that the plookup paper uses a slightly different equation to make the proof work. It is possible that the proof would work with the above equation, but for simplicity let's just use the equation published in plookup: + +$$ +\mathsf{acc}_i = \mathsf{acc}_{i-1} \cdot \frac{(1+\beta) \cdot (\gamma + f_{i-1}) \cdot (\gamma(1 + \beta) + t_{i-1} + \beta t_i)}{(\gamma(1+\beta) + s_{i-1} + \beta s_{i})(\gamma(1+\beta) + s_{n+i-1} + \beta s_{n+i})} +$$ + +> Note: in plookup $s$ is longer than $n$ ($|s| = |f| + |t|$), and thus it needs to be split into multiple vectors to enforce the constraint at every $i \in [[0;n]]$. This leads to the two terms in the denominator as shown above, so that the degree of $\gamma (1 + \beta)$ is equal in the nominator and denominator. + +### Lookup tables + +Kimchi uses a single **lookup table** at the moment of this writing; the XOR table. The XOR table for values of 1 bit is the following: + + +| l | r | o | +| --- | --- | --- | +| 1 | 0 | 1 | +| 0 | 1 | 1 | +| 1 | 1 | 0 | +| 0 | 0 | 0 | + +Whereas kimchi uses the XOR table for values of $4$ bits, which has $2^{8}$ entries. + +Note: the $(0, 0, 0)$ **entry** is at the very end on purpose (as it will be used as dummy entry for rows of the witness that don't care about lookups). + +### Querying the table + +The plookup paper handles a vector of lookups $f$ which we do not have. So the first step is to create such a table from the witness columns (or registers). To do this, we define the following objects: + +* a **query** tells us what registers, in what order, and scaled by how much, are part of a query +* a **query selector** tells us which rows are using the query. It is pretty much the same as a [gate selector](). + +Let's go over the first item in this section. + +For example, the following **query** tells us that we want to check if $r_0 \oplus r_2 = 2\cdot r_1$ + +| l | r | o | +| :---: | :---: | :---: | +| 1, $r_0$ | 1, $r_2$ | 2, $r_1$ | + +The grand product argument for the lookup consraint will look like this at this point: + +$$ +\mathsf{acc}_i = \mathsf{acc}_{i-1} \cdot \frac{(1+\beta) \cdot {\color{green}(\gamma + w_0(g^i) + j \cdot w_2(g^i) + j^2 \cdot 2 \cdot w_1(g^i))} \cdot (\gamma(1 + \beta) + t_{i-1} + \beta t_i)}{(\gamma(1+\beta) + s_{i-1} + \beta s_{i})(\gamma(1+\beta) + s_{n+i-1} + \beta s_{n+i})} +$$ + +Not all rows need to perform queries into a lookup table. We will use a query selector in the next section to make the constraints work with this in mind. + +### Query selector + +The associated **query selector** tells us on which rows the query into the XOR lookup table occurs. + +| row | query selector | +| :---: | :------------: | +| 0 | 1 | +| 1 | 0 | + + +Both the (XOR) lookup table and the query are built-ins in kimchi. The query selector is derived from the circuit at setup time. Currently only the ChaCha gates make use of the lookups. + +With the selectors, the grand product argument for the lookup constraint has the following form: + +$$ +\mathsf{acc}_i = \mathsf{acc}_{i-1} \cdot \frac{(1+\beta) \cdot {\color{green}\mathsf{query}} \cdot (\gamma(1 + \beta) + t_{i-1} + \beta t_i)}{(\gamma(1+\beta) + s_{i-1} + \beta s_{i})} +$$ + +where $\color{green}{\mathsf{query}}$ is constructed so that a dummy query ($0 \oplus 0 = 0$) is used on rows that don't have a query. + +$$ +\begin{align} +\mathsf{query} := &\ \mathsf{selector} \cdot (\gamma + w_0(g^i) + j \cdot w_2(g^i) + j^2 \cdot 2 \cdot w_1(g^i)) + \\ +&\ (1- \mathsf{selector}) \cdot (\gamma + 0 + j \cdot 0 + j^2 \cdot 0) +\end{align} +$$ + +### Supporting multiple queries + +Since we would like to allow multiple table lookups per row, we define multiple **queries**, where each query is associated with a **lookup selector**. + +At the moment of this writing, the `ChaCha` gates all perform $4$ queries in a row. Thus, $4$ is trivially the largest number of queries that happen in a row. + +**Important**: to make constraints work, this means that each row must make $4$ queries. Potentially some or all of them are dummy queries. + +For example, the `ChaCha0`, `ChaCha1`, and `ChaCha2` gates will jointly apply the following 4 XOR queries on the current and following rows: + +| l | r | o | - | l | r | o | - | l | r | o | - | l | r | o | +| :---: | :---: | :----: | --- | :---: | :---: | :----: | --- | :---: | :---: | :----: | --- | :---: | :----: | :----: | +| 1, $r_3$ | 1, $r_7$ | 1, $r_{11}$ | - | 1, $r_4$ | 1, $r_8$ | 1, $r_{12}$ | - | 1, $r_5$ | 1, $r_9$ | 1, $r_{13}$ | - | 1, $r_6$ | 1, $r_{10}$ | 1, $r_{14}$ | + +which you can understand as checking for the current and following row that + +$$ +\begin{align} +r_3 \oplus r_7 &= r_{11}\\ +r_4 \oplus r_8 &= r_{12}\\ +r_5 \oplus r_9 &= r_{13}\\ +r_6 \oplus r_{10} &= r_{14} +\end{align} +$$ + +The `ChaChaFinal` also performs $4$ (somewhat similar) queries in the XOR lookup table. In total this is $8$ different queries that could be associated to $8$ selector polynomials. + +### Grouping queries by queries pattern + +Associating each query with a selector polynomial is not necessarily efficient. To summarize: + +* the `ChaCha0`, `ChaCha1`, and `ChaCha2` gates that in total make $4$ queries into the XOR table +* the `ChaChaFinal` gate makes another $4$ different queries into the XOR table + +Using the previous section's method, we'd have to use $8$ different lookup selector polynomials for each of the different $8$ queries. Since there's only $2$ use-cases, we can simply group them by **queries patterns** to reduce the number of lookup selector polynomials to $2$. + +The grand product argument for the lookup constraint looks like this now: + +$$ +\mathsf{acc}_i = \mathsf{acc}_{i-1} \cdot \frac{{\color{green}(1+\beta)^4 \cdot \mathsf{query}} \cdot (\gamma(1 + \beta) + t_{i-1} + \beta t_i)}{(\gamma(1+\beta) + s_{i-1} + \beta s_{i})\times \ldots} +$$ + +where $\color{green}{\mathsf{query}}$ is constructed as: + +$$ +\begin{align} +\mathsf{query} = &\ \mathsf{selector}_1 \cdot \mathsf{pattern}_1 + \\ +&\ \mathsf{selector}_2 \cdot \mathsf{pattern}_2 + \\ +&\ (1 - \mathsf{selector}_1 - \mathsf{selector}_2) \cdot (\gamma + 0 + j \cdot 0 + j^2 \cdot 0)^4 +\end{align} +$$ + +where, for example the first pattern for the `ChaCha0`, `ChaCha1`, and `ChaCha2` gates looks like this: + +$$ +\begin{align} +\mathsf{pattern}_1 = &\ (\gamma + w_3(g^i) + j \cdot w_7(g^i) + j^2 \cdot w_{11}(g^i)) \cdot \\ +&\ (\gamma + w_4(g^i) + j \cdot w_8(g^i) + j^2 \cdot w_{12}(g^i)) \cdot \\ +&\ (\gamma + w_5(g^i) + j \cdot w_9(g^i) + j^2 \cdot w_{13}(g^i)) \cdot \\ +&\ (\gamma + w_6(g^i) + j \cdot w_{10}(g^i) + j^2 \cdot w_{14}(g^i)) \cdot \\ +\end{align} +$$ + +Note that there's now $4$ dummy queries, and they only appear when none of the lookup selectors are active. +If a pattern uses less than $4$ queries, it has to be padded with dummy queries as well. + +Finally, note that the denominator of the grand product argument is incomplete in the formula above. +Since the nominator has degree $5$ in $\gamma (1 + \beta)$, the denominator must match too. +This is achieved by having a longer $s$, and referring to it $5$ times. +The denominator thus becomes $\prod_{k=1}^{5} (\gamma (1+\beta) + s_{kn+i-1} + \beta s_{kn+i})$. + +## Back to the grand product argument + +There are two things that we haven't touched on: + +* The vector $t$ representing the **combined lookup table** (after its columns have been combined with a joint combiner $j$). The **non-combined loookup table** is fixed at setup time and derived based on the lookup tables used in the circuit (for now only one, the XOR lookup table, can be used in the circuit). +* The vector $s$ representing the sorted multiset of both the queries and the lookup table. This is created by the prover and sent as commitment to the verifier. + +The first vector $t$ is quite straightforward to think about: + +* if it is smaller than the domain (of size $n$), then we can repeat the last entry enough times to make the table of size $n$. +* if it is larger than the domain, then we can either increase the domain or split the vector in two (or more) vectors. This is most likely what we will have to do to support multiple lookup tables later. + +What about the second vector $s$? + +## The sorted vector $s$ + +We said earlier that in original plonk the size of $s$ is equal to $|s| = |f|+|t|$, where $f$ encodes queries, and $t$ encodes the lookup table. +With our multi-query approach, the second vector $s$ is of the size + +$$n \cdot |\#\text{queries}| + |\text{lookup\_table}|$$ + +That is, it contains the $n$ elements of each **query vectors** (the actual values being looked up, after being combined with the joint combinator, that's $4$ per row), as well as the elements of our lookup table (after being combined as well). + +Because the vector $s$ is larger than the domain size $n$, it is split into several vectors of size $n$. Specifically, in the plonkup paper, the two halves of $s$, which are then interpolated as $h_1$ and $h_2$. +The denominator in plonk in the vector form is +$$ +\big(\gamma(1+\beta) + s_{i-1} + \beta s_{i}\big)\big(\gamma(1+\beta)+s_{n+i-1} + \beta s_{n+i}\big) +$$ +which, when interpolated into $h_1$ and $h_2$, becomes +$$ +\big(\gamma(1+\beta) + h_1(x) + \beta h_1(g \cdot x)\big)\big(\gamma(1+\beta) + h_2(x) + \beta h_2(g \cdot x)\big) +$$ + +Since one has to compute the difference of every contiguous pairs, the last element of the first half is the replicated as the first element of the second half ($s_{n-1} = s_{n}$). +Hence, a separate constraint must be added to enforce that continuity on the interpolated polynomials $h_1$ and $h_2$: + +$$L_{n-1}(X)\cdot\big(h_1(X) - h_2(g \cdot X)\big) \equiv 0$$ + +which is equivalent to checking that $h_1(g^{n-1}) = h_2(1)$. + +## The sorted vector $s$ in kimchi + +Since this vector is known only by the prover, and is evaluated as part of the protocol, zero-knowledge must be added to the corresponding polynomial (in case of plookup approach, to $h_1(X),h_2(X)$). To do this in kimchi, we use the same technique as with the other prover polynomials: we randomize the last evaluations (or rows, on the domain) of the polynomial. + +This means two things for the lookup grand product argument: + +1. We cannot use the wrap around trick to make sure that the list is split in two correctly (enforced by $L_{n-1}(h_1(x) - h_2(g \cdot x)) = 0$ which is equivalent to $h_1(g^{n-1}) = h_2(1)$ in the plookup paper) +2. We have even less space to store an entire query vector. Which is actually super correct, as the witness also has some zero-knowledge rows at the end that should not be part of the queries anyway. + +The first problem can be solved in two ways: + +* **Zig-zag technique**. By reorganizing $s$ to alternate its values between the columns. For example, $h_1 = (s_0, s_2, s_4, \cdots)$ and $h_2 = (s_1, s_3, s_5, \cdots)$ so that you can simply write the denominator of the grand product argument as + $$(\gamma(1+\beta) + h_1(x) + \beta h_2(x))(\gamma(1+\beta)+ h_2(x) + \beta h_1(x \cdot g))$$ + Whis approach is taken by the [plonkup](https://eprint.iacr.org/2022/086) paper. +* **Snake technique**. By reorganizing $s$ as a snake. This is what is currently implemented in kimchi. + +The snake technique rearranges $s$ into the following shape: + +``` + __ _ + s_0 | s_{2n-1} | | | | + ... | ... | | | | + s_{n-1} | s_n | | | | + ‾‾‾‾‾‾‾‾‾‾‾ ‾‾ ‾ + h1 h2 h3 ... +``` + +Assuming that for now we have only one bend and two polynomials $h_1(X),h_2(X)$, the denominator has the following form: + +$$\big(\gamma(1+\beta) + h_1(x) + \beta h_1(x \cdot g)\big)\big(\gamma(1+\beta)+ h_2(x \cdot g) + \beta h_2(x)\big)$$ + +and the snake doing a U-turn is constrained via $s_{n-1} = s_n$, enforced by the following equation: + +$$L_{n-1} \cdot (h_1(x) - h_2(x)) = 0$$ + +In practice, $s$ will have more sections than just two. +Assume that we have $k$ sections in total, then the denominator generalizes to + +$$ +\prod_{i=1}^k \gamma(1+\beta) + h_i(x \cdot g^{\delta_{0,\ i \text{ mod } 2}}) + \beta h_i(x \cdot g^{\delta_{1,\ i \text{ mod } 2}}) +$$ + +where $\delta_{i,j}$ is Kronecker delta, equal to $1$ when $i$ is even (for the first term) or odd (for the second one), and equal to $0$ otherwise. + +Similarly, the U-turn constraints now become + +$$ +\begin{align*} +L_{n-1}(X) \cdot (h_2(X) - h_1(X)) &\equiv 0\\ +\color{green}L_{0}(X) \cdot (h_3(X) - h_2(X)) &\color{green}\equiv 0\\ +\color{green}L_{n-1}(X) \cdot (h_4(X) - h_3(X)) &\color{green}\equiv 0\\ +\ldots +\end{align*} +$$ + +In our concrete case with $4$ simultaneous lookups the vector $s$ has to be split into $k= 5$ sections --- each denominator term in the accumulator accounts for $4$ queries ($f$) and $1$ table consistency check ($t$). + +## Unsorted $t$ in $s$ + +Note that at setup time, $t$ cannot be sorted lexicographically as it is not combined yet. Since $s$ must be sorted by $t$ (in other words sorting of $s$ must follow the elements of $t$), there are two solutions: + +1. Both the prover and the verifier can sort the combined $t$ lexicographically, so that $s$ can be sorted lexicographically too using typical sorting algorithms +2. The prover can directly sort $s$ by $t$, so that the verifier doesn't have to do any pre-sorting and can just rely on the commitment of the columns of $t$ (which the prover can evaluate in the protocol). + +We take the second approach. +However, this must be done carefully since the combined $t$ entries can repeat. For some $i, l$ such that $i \neq l$, we might have + +$$ +t_0[i] + j \cdot t_1[i] + j^2 \cdot t_2[i] = t_0[l] + j \cdot t_1[l] + j^2 \cdot t_2[l] +$$ + +For example, if $f = \{1, 2, 2, 3\}$ and $t = \{2, 1, 2, 3\}$, then $\text{sorted}(f, t) = \{2, 2, 2, 1, 1, 2, 3, 3\}$ would be a way of correctly sorting the combined vector $s$. At the same time $\text{sorted}(f, t) = \{ 2, 2, 2, 2, 1, 1, 3, 3 \}$ is incorrect since it does not have a second block of $2$s, and thus such an $s$ is not sorted by $t$. + + +## Recap + +So to recap, to create the sorted polynomials $h_i$, the prover: + +1. creates a large query vector which contains the concatenation of the $4$ per-row (combined with the joint combinator) queries (that might contain dummy queries) for all rows +2. creates the (combined with the joint combinator) table vector +3. sorts all of that into a big vector $s$ +4. divides that vector $s$ into as many $h_i$ vectors as a necessary following the snake method +5. interpolate these $h_i$ vectors into $h_i$ polynomials +6. commit to them, and evaluate them as part of the protocol. diff --git a/book/src/plonk/maller_15.md b/book/src/kimchi/maller_15.md similarity index 100% rename from book/src/plonk/maller_15.md rename to book/src/kimchi/maller_15.md diff --git a/book/src/kimchi/overview.md b/book/src/kimchi/overview.md index b8ad73a78e..6ecf0f95e4 100644 --- a/book/src/kimchi/overview.md +++ b/book/src/kimchi/overview.md @@ -1,11 +1,11 @@ # Overview -Here we explain how the Kimchi protocol design is translated into the `proof-systems` repository, from a high level perspective, touching briefly on all the involved aspects of cryptography. The concepts that we will be introducing can be studied more thoroughly by accessing the specific sections in the book. +Here we explain how the Kimchi protocol design is translated into the `proof-systems` repository, from a high level perspective, touching briefly on all the involved aspects of cryptography. The concepts that we will be introducing can be studied more thoroughly by accessing the specific sections in the book. -In brief, the Kimchi protocol requires three different types of arguments `Argument`: -- **Custom gates:** they correspond to each of the specific functions performed by the circuit, which are represented by gate constraints. +In brief, the Kimchi protocol requires three different types of arguments `Argument`: +- **Custom gates:** they correspond to each of the specific functions performed by the circuit, which are represented by gate constraints. - **Permutation:** the equality between different cells is constrained by copy constraints, which are represented by a permutation argument. It represents the wiring between gates, the connections from/to inputs and outputs. -- **Lookup tables:** for efficiency reasons, some public information can be stored by both parties (prover and verifier) instead of wired in the circuit. Examples of these are boolean functions. +- **Lookup tables:** for efficiency reasons, some public information can be stored by both parties (prover and verifier) instead of wired in the circuit. Examples of these are boolean functions. All of these arguments are translated into equations that must hold for a correct witness for the full relation. Equivalently, this is to say that a number of expressions need to evaluate to zero on a certain set of numbers. So there are two problems to tackle here: @@ -24,31 +24,31 @@ $$q(X) := \frac{p(X)}{v_S(X)}$$ And still, where's the hype? If you can provide such a quotient polynomial, one could easily check that if $q(a) = p(a) / v_S(a)$ for a random number $a\in\mathbb{F}$ \ $S$ (recall you will check in a point out of the set, otherwise you would get a $0/0$), then with very high probability that would mean that actually $p(X) = q(X) \cdot v_S(X)$, meaning that $p(X)$ vanishes on the whole set $S$, with **just one point**! -Let's take a deeper look into the _"magic"_ going on here. First, what do we mean by _high probability_? Is this even good enough? And the answer to this question is: as good as you want it to be. +Let's take a deeper look into the _"magic"_ going on here. First, what do we mean by _high probability_? Is this even good enough? And the answer to this question is: as good as you want it to be. **First** we analyse the math in this check. If the polynomial form of $p(X) = q(X) \cdot v_S(X)$ actually holds, then of course for any possible $a\in\mathbb{F}$ \ $S$ the check $p(a) =_? q(a) \cdot v_S(a)$ will hold. But is there any unlucky instantiation of the point $a$ such that $p(a) = q(a) \cdot v_S(a)$ but $p(X) \neq q(X) \cdot v_S(X)$? And the answer is, yes, there are, BUT not many. But how many? How unlikely this is? You already know the answer to this: **Schwartz-Zippel**. Recalling this lemma: -> Given two different polynomials $f(X)$ and $g(X)$ of degree $d$, they can at most intersect (i.e. _coincide_) in $d$ points. Or what's equivalent, let $h(X) := f(X) - g(X)$, the polynomial $h(X)$ can only evaluate to $0$ in at most $d$ points (its roots). +> Given two different polynomials $f(X)$ and $g(X)$ of degree $d$, they can at most intersect (i.e. _coincide_) in $d$ points. Or what's equivalent, let $h(X) := f(X) - g(X)$, the polynomial $h(X)$ can only evaluate to $0$ in at most $d$ points (its roots). Thus, if we interchange $p(X) \rightarrow f(X)$ and $q(X)\cdot v_S(X) \rightarrow g(X)$, both of degree $d$, there are at most $\frac{d}{|\mathbb{F}- S|}$ unlucky points of $a$ that could trick you into thinking that $p(X)$ was a multiple of the vanishing polynomial (and thus being equal to zero on all of $S$). So, how can you make this error probability negligible? By having a field size that is big enough (the formal definition says that the inverse of its size should decrease faster than any polynomial expression). Since we are working with fields of size $2^{255}$, we are safe on this side! **Second**, is this really faster than checking that $p(x)=0$ for all $x\in S$ ? At the end of the day, it seems like we need to evaluate $v_S(a)$, and since this is a degree $|S|$ polynomial it looks like we are still performing about the same order of computations. But here comes math again. _In practice_, we want to define this set $S$ to have a _nice structure_ that allows us to perform some computations more efficiently than with arbitrary sets of numbers. Indeed, this set will normally be a **multiplicative group** (normally represented as $\mathbb{G}$ or $\mathbb{H}$), because in such groups the vanishing polynomial $v_\mathbb{G}(X):=\prod_{\omega\in\mathbb{G}}(X-\omega)$ has an efficient representation $v_\mathbb{G}(X)=X^{|\mathbb{G}|}-1$, which is much faster to evaluate than the above product. -**Third**, we may want to understand what happens with the evaluation of $p(a)$ instead. Since this is a degree $d ≥ |\mathbb{G}|$, it may look like this will as well take a lot of effort. But here's where cryptography comes into play, since the verifier will _never_ get to evaluate the actual polynomial by themselves. Various reasons why. One, if the verifier had access to the full polynomial $p(X)$, then the prover should have sent it along with the proof, which would require $d+1$ coefficients to be represented (and this is no longer succinct for a SNARK). Two, this polynomial could carry some secret information, and if the verifier could recompute evaluations of it, they could learn some private data by evaluating on specific points. So instead, these evaluations will be a "mental game" thanks to **polynomial commitments** and **proofs of evaluation** sent by the prover (for whom a computation in the order of $d$ is not only acceptable, but necessary). The actual proof length will depend heavily on the type of polynomial commitments we are using. For example, in Kate-like commitments, committing to a polynomial takes a constant number of group elements (normally one), whereas in Bootleproof it is logarithmic. But in any case this will be shorter than sending $O(d)$ elements. +**Third**, we may want to understand what happens with the evaluation of $p(a)$ instead. Since this is a degree $d ≥ |\mathbb{G}|$, it may look like this will as well take a lot of effort. But here's where cryptography comes into play, since the verifier will _never_ get to evaluate the actual polynomial by themselves. Various reasons why. One, if the verifier had access to the full polynomial $p(X)$, then the prover should have sent it along with the proof, which would require $d+1$ coefficients to be represented (and this is no longer succinct for a SNARK). Two, this polynomial could carry some secret information, and if the verifier could recompute evaluations of it, they could learn some private data by evaluating on specific points. So instead, these evaluations will be a "mental game" thanks to **polynomial commitments** and **proofs of evaluation** sent by the prover (for whom a computation in the order of $d$ is not only acceptable, but necessary). The actual proof length will depend heavily on the type of polynomial commitments we are using. For example, in Kate-like commitments, committing to a polynomial takes a constant number of group elements (normally one), whereas in Bootleproof it is logarithmic. But in any case this will be shorter than sending $O(d)$ elements. ### Aggregation -So far we have seen how to check that a polynomial equals zero on all of $\mathbb{G}$, with just a single point. This is somehow an aggregation _per se_. But we are left to analyse how we can prove such a thing, for many polynomials. Altogether, if they hold, this will mean that the polynomials encode a correct witness and the relation would be satisfied. These checks can be performed one by one (checking that each of the quotients are indeed correct), or using an efficient aggregation mechanism and checking only **one longer equation at once**. +So far we have seen how to check that a polynomial equals zero on all of $\mathbb{G}$, with just a single point. This is somehow an aggregation _per se_. But we are left to analyse how we can prove such a thing, for many polynomials. Altogether, if they hold, this will mean that the polynomials encode a correct witness and the relation would be satisfied. These checks can be performed one by one (checking that each of the quotients are indeed correct), or using an efficient aggregation mechanism and checking only **one longer equation at once**. So what is the simplest way one could think of to perform this one-time check? Perhaps one could come up with the idea of adding up all of the equations $p_0(X),...,p_n(X)$ into a longer one $\sum_{i=0}^{n} p_i(X)$. But by doing this, we may be cancelling out terms and we could get an incorrect statemement. So instead, we can multiply each term in the sum by a random number. The reason why this trick works is the independence between random numbers. That is, if two different polynomials $f(X)$ and $g(X)$ are both equal to zero on a given $X=x$, then with very high probability the same $x$ will be a root of the random combination $\alpha\cdot f(x) + \beta\cdot g(x) = 0$. If applied to the whole statement, we could transform the $n$ equations into a single equation, -$$\bigwedge_{i_n} p_i(X) =_? 0 \iff_{w.h.p.} \sum_{i=0}^{n} \rho_i \cdot p_i(X) =_? 0 $$ +$$\bigwedge_{i_n} p_i(X) \stackrel{?}{=} 0 \text{\quad iff w.h.p. \quad} \sum_{i=0}^{n} \rho_i \cdot p_i(X) \stackrel{?}{=} 0$$ -This sounds great so far. But we are forgetting about an important part of proof systems which is proof length. For the above claim to be sound, the random values used for aggregation should be verifier-chosen, or at least prover-independent. So if the verifier had to communicate with the prover to inform about the random values being used, we would get an overhead of $n$ field elements. +This sounds great so far. But we are forgetting about an important part of proof systems which is proof length. For the above claim to be sound, the random values used for aggregation should be verifier-chosen, or at least prover-independent. So if the verifier had to communicate with the prover to inform about the random values being used, we would get an overhead of $n$ field elements. Instead, we take advantage of another technique that is called **powers-of-alpha**. Here, we make the assumption that powers of a random value $\alpha^i$ are indistinguishable from actual random values $\rho_i$. Then, we can twist the above claim to use only one random element $\alpha$ to be agreed with the prover as: -$$\bigwedge_{i_n} p_i(X) =_? 0 \iff_{w.h.p.} \sum_{i=0}^{n} \alpha^i \cdot p_i(X) =_? 0 $$ +$$\bigwedge_{i_n} p_i(X) \stackrel{?}{=} 0 \text{\quad iff w.h.p. \quad} \sum_{i=0}^{n} \alpha^i \cdot p_i(X) \stackrel{?}{=} 0$$ diff --git a/book/src/kimchi/permut.md b/book/src/kimchi/permut.md index 3d62293eae..cc82360ef6 100644 --- a/book/src/kimchi/permut.md +++ b/book/src/kimchi/permut.md @@ -1,4 +1 @@ -## Permutation - -TO-DO - +# Permutation diff --git a/book/src/kimchi/zkpm.md b/book/src/kimchi/zkpm.md new file mode 100644 index 0000000000..947a334538 --- /dev/null +++ b/book/src/kimchi/zkpm.md @@ -0,0 +1 @@ +# Zero-Column Approach to Zero-Knowledge diff --git a/book/src/pickles/passthrough.md b/book/src/pickles/passthrough.md deleted file mode 100644 index cc182c4e51..0000000000 --- a/book/src/pickles/passthrough.md +++ /dev/null @@ -1 +0,0 @@ -# Passthrough and Me-Only diff --git a/book/src/plonk/fiat_shamir.md b/book/src/plonk/fiat_shamir.md index fd486f6aba..f1e2b264ef 100644 --- a/book/src/plonk/fiat_shamir.md +++ b/book/src/plonk/fiat_shamir.md @@ -1,4 +1,4 @@ -# non-interaction with fiat-shamir +# Non-Interactivity via Fiat-Shamir So far we've talked about an interactive protocol between a prover and a verifier. The zero-knowledge proof was also in the honest verifier zero-knowedlge (HVZK) model, which is problematic. @@ -15,7 +15,7 @@ This is important as our technique to transform an interactive protocol to a non The whole idea is to replace the verifier by a random oracle, which in practice is a hash function. Note that by doing this, we remove potential leaks that can happen when the verifier acts dishonestly. -Initially the Fiat-Shamir transformation was only applied to sigma protocols, named after the greek letter $\Sigma$ due to its shape resembling the direction of messages (prover sends a commit to a verifier, verifier sends a challenge to a prover, prover sends the final proof to a verifier). +Initially the Fiat-Shamir transformation was only applied to sigma protocols, named after the greek letter $\Sigma$ due to its shape resembling the direction of messages (prover sends a commit to a verifier, verifier sends a challenge to a prover, prover sends the final proof to a verifier). A $Z$ would have made more sense but here we are. ## Generalization of Fiat-Shamir @@ -27,6 +27,6 @@ This is simple: every verifier move can be replaced by a hash of the transcript While we use a hash function for that, a different construction called the [duplex construction](https://keccak.team/sponge_duplex.html) is particularly useful in such situations as they allow to continuously absorb the transcript and produce challenges, while automatically authenticating the fact that they produced a challenge. -[Merlin](https://merlin.cool/) is a standardization of such a construction using the [Strobe protocol framework](https://strobe.sourceforge.io/) (a framework to make use of a duplex construction). -Note that the more recent [Xoodyak](https://keccak.team/xoodyak.html) (part of NIST's lightweight competition) could have been used for this as well. +[Merlin](https://merlin.cool/) is a standardization of such a construction using the [Strobe protocol framework](https://strobe.sourceforge.io/) (a framework to make use of a duplex construction). +Note that the more recent [Xoodyak](https://keccak.team/xoodyak.html) (part of NIST's lightweight competition) could have been used for this as well. Note also that Mina uses none of these standards, instead it simply uses Poseidon (see section on poseidon). diff --git a/book/src/plonk/glossary.md b/book/src/plonk/glossary.md index 4880894fb4..6769ac5f83 100644 --- a/book/src/plonk/glossary.md +++ b/book/src/plonk/glossary.md @@ -1,9 +1,9 @@ # Glossary -* size = number of rows -* columns = number of variables per rows -* cell = a pair (row, column) -* witness = the values assigned in all the cells -* gate = polynomials that act on the variables in a row -* selector vector = a vector of values 1 or 0 (not true for constant vector I think) that toggles gates and variables in a row -* gadget = a series of contiguous rows with some specific gates set (via selector vectors) +* Size: number of rows +* Columns: number of variables per rows +* Cell: a pair (row, column) +* Witness: the values assigned in all the cells +* Gate: polynomials that act on the variables in a row +* Selector vector: a vector of values 1 or 0 (not true for constant vector I think) that toggles gates and variables in a row +* Gadget: a series of contiguous rows with some specific gates set (via selector vectors) diff --git a/book/src/plonk/zkpm.md b/book/src/plonk/zkpm.md index a4a1ba291c..84f46f3ec0 100644 --- a/book/src/plonk/zkpm.md +++ b/book/src/plonk/zkpm.md @@ -19,7 +19,9 @@ Let $f(X)$ be an interpolation polynomial of degree $n-1$ such that $f(h_i) = v_ **Proof sketch.** Recall that the interpolation polynomial is -$f(X) = \sum_{j = 1}^n \prod_{k \neq j} \frac{(X-h_k)}{(h_j-h_k)} v_j$ +$$ +f(X) = \sum_{j = 1}^n \prod_{k \neq j} \frac{(X-h_k)}{(h_j-h_k)} v_j +$$ With $V_{w+1}, \ldots, V_{w+k}$ as random variables, we have, $f(X) = a_{w+1} V_{w+1} + a_{w+2} V_{w+2} + \ldots + a_{w+k} V_{w+k} + a$ @@ -32,16 +34,22 @@ The idea here is to set the last $k$ evaluations to be uniformly random elements **Modified permutation polynomial.** Specifically, set $z(X)$ as follows. -$z(X) = L_1(X) + \sum_{i = 1}^{\blue{n-k-2}} \left(L_{i+1} \prod_{j=1}^i \mathsf{frac}_{i,j} \right) + \blue{t_1 L_{n-k}(X) + \ldots + t_k L_{n}(X) }$ +$$ +z(X) = L_1(X) + \sum_{i = 1}^{\blue{n-k-2}} \left(L_{i+1} \prod_{j=1}^i \mathsf{frac}_{i,j} \right) + \blue{t_1 L_{n-k}(X) + \ldots + t_k L_{n}(X) } +$$ From Lemma 1, the above $z(X)$ has the desired zero knowledge property when $k$ evaluations are revealed. However, we need to modify the other parts of the protocol so that the last $k$ elements are not subject to the permutation evaluation, since they will no longer satisfy the permutation check. Specifically, we will need to modify the permutation polynomial to disregard those random elements, as follows. -$ \begin{aligned} & t(X) = \\ - & (a(X)b(X)q_M(X) + a(X)q_L(X) + b(X)q_R(X) + c(X)q_O(X) + PI(X) + q_C(X)) \frac{1}{z_H(X)} \\ - &+ ((a(X) + \beta X + \gamma)(b(X) + \beta k_1 X + \gamma)(c(X) + \beta k_2X + \gamma)z(X) \blue{(X-h_{n-k}) \ldots (X-h_{n-1})(X-h_n)} ) \frac{\alpha}{z_{H}(X)} \\ - & - ((a(X) + \beta S_{\sigma1}(X) + \gamma)(b(X) + \beta S_{\sigma2}(X) + \gamma)(c(X) + \beta S_{\sigma3}(X) + \gamma)z(X\omega) \blue{(X-h_{n-k}) \ldots (X-h_{n-1})(X-h_n)}) \frac{\alpha}{z_{H}(X)} \\ - & + (z(X)-1)L_1(X) \frac{\alpha^2}{z_H(X)} \\ - & + \blue{(z(X)-1)L_{n-k}(X) \frac{\alpha^3}{z_H(X)} } \end{aligned} $ +$$ +\begin{aligned} & t(X) = \\ + & \Big(a(X)b(X)q_M(X) + a(X)q_L(X) + b(X)q_R(X) + c(X)q_O(X) + PI(X) + q_C(X)\Big) \frac{1}{z_H(X)} \\ + &+ \Big((a(X) + \beta X + \gamma)(b(X) + \beta k_1 X + \gamma)(c(X) + \beta k_2X + \gamma)z(X)\\ + &\qquad\qquad\qquad\times{\blue{(X-h_{n-k}) \ldots (X-h_{n-1})(X-h_n)}} \Big) \frac{\alpha}{z_{H}(X)} \\ + & - \Big((a(X) + \beta S_{\sigma1}(X) + \gamma)(b(X) + \beta S_{\sigma2}(X) + \gamma)(c(X) + \beta S_{\sigma3}(X) + \gamma)z(X\omega)\\ + &\qquad\qquad\qquad\times{\blue{(X-h_{n-k}) \ldots (X-h_{n-1})(X-h_n)}}\Big) \frac{\alpha}{z_{H}(X)} \\ + & + \Big(z(X)-1\Big)\cdot L_1(X) \frac{\alpha^2}{z_H(X)} \\ + & + \blue{\Big(z(X)-1\Big)\cdot L_{n-k}(X) \frac{\alpha^3}{z_H(X)} } \end{aligned} +$$ **Modified permutation checks.** To recall, the permutation check was originally as follows. For all $h \in H$, @@ -50,7 +58,6 @@ $ \begin{aligned} & t(X) = \\ = Z(\omega h)[(a(h) + \beta S_{\sigma1}(h) + \gamma)(b(h) + \beta S_{\sigma2}(h) + \gamma)(c(h) + \beta S_{\sigma3}(h) + \gamma)]$ - The modified permuation checks that ensures that the check is performed only on all the values except the last $k$ elements in the witness polynomials are as follows. * For all $h \in H$, $L_1(h)(Z(h) - 1) = 0$ diff --git a/book/src/rfcs/3-lookup.md b/book/src/rfcs/3-lookup.md deleted file mode 100644 index 6f256e6a6f..0000000000 --- a/book/src/rfcs/3-lookup.md +++ /dev/null @@ -1,291 +0,0 @@ -# RFC: Plookup in kimchi - -In 2020, [plookup](https://eprint.iacr.org/2020/315.pdf) showed how to create lookup proofs. Proofs that some witness values are part of a [lookup table](https://en.wikipedia.org/wiki/Lookup_table). Two years later, an independent team published [plonkup](https://eprint.iacr.org/2022/086) showing how to integrate Plookup into Plonk. - -This document specifies how we integrate plookup in kimchi. It assumes that the reader understands the basics behind plookup. - -## Overview - -We integrate plookup in kimchi with the following differences: - -* we snake-ify the sorted table instead of wrapping it around (see later) -* we allow fixed-ahead-of-time linear combinations of columns of the queries we make -* we only use a single table (XOR) at the moment of this writing -* we allow several lookups (or queries) to be performed within the same row -* zero-knowledgeness is added in a specific way (see later) - -The following document explains the protocol in more detail - -### Recap on the grand product argument of plookup - -As per the Plookup paper, the prover will have to compute three vectors: - -* $f$, the (secret) **query vector**, containing the witness values that the prover wants to prove are part of the lookup table. -* $t$, the (public) **lookup table**. -* $s$, the (secret) concatenation of $f$ and $t$, sorted by $t$ (where elements are listed in the order they are listed in $t$). - -Essentially, plookup proves that all the elements in $f$ are indeed in the lookup table $t$ if and only if the following multisets are equal: - -* $\{(1+\beta)f, \text{diff}(t)\}$ -* $\text{diff}(\text{sorted}(f, t))$ - -where $\text{diff}$ is a new set derived by applying a "randomized difference" between every successive pairs of a vector. For example: - -* $f = \{5, 4, 1, 5\}$ -* $t = \{1, 4, 5\}$ -* $\{\color{blue}{(1+\beta)f}, \color{green}{\text{diff}(t)}\} = \{\color{blue}{(1+\beta)5, (1+\beta)4, (1+\beta)1, (1+\beta)5}, \color{green}{1+\beta 4, 4+\beta 5}\}$ -* $\text{diff}(\text{sorted}(f, t)) = \{1+\beta 1, 1+\beta 4, 4+\beta 4, 4+\beta 5, 5+\beta 5, 5+\beta 5\}$ - -> Note: This assumes that the lookup table is a single column. You will see in the next section how to address lookup tables with more than one column. - -The equality between the multisets can be proved with the permutation argument of plonk, which would look like enforcing constraints on the following accumulator: - -* init: $acc_0 = 1$ -* final: $acc_n = 1$ -* for every $0 < i \leq n$: - $$ - acc_i = acc_{i-1} \cdot \frac{(\gamma + (1+\beta) f_{i-1})(\gamma + t_{i-1} + \beta t_i)}{(\gamma + s_{i-1} + \beta s_{i})} - $$ - -Note that the plookup paper uses a slightly different equation to make the proof work. I believe the proof would work with the above equation, but for simplicity let's just use the equation published in plookup: - -$$ -acc_i = acc_{i-1} \cdot \frac{(1+\beta)(\gamma + f_{i-1})(\gamma(1 + \beta) + t_{i-1} + \beta t_i)}{(\gamma(1+\beta) + s_{i-1} + \beta s_{i})} -$$ - -> Note: in plookup $s$ is too large, and so needs to be split into multiple vectors to enforce the constraint at every $i \in [[0;n]]$. We ignore this for now. - -### Lookup tables - -Kimchi uses a single **lookup table** at the moment of this writing; the XOR table. The XOR table for values of 1 bit is the following: - - -| l | r | o | -| --- | --- | --- | -| 1 | 0 | 1 | -| 0 | 1 | 1 | -| 1 | 1 | 0 | -| 0 | 0 | 0 | - -Whereas kimchi uses the XOR table for values of 4 bits, which has $2^{8}$ entries. - -Note: the (0, 0, 0) **entry** is at the very end on purpose (as it will be used as dummy entry for rows of the witness that don't care about lookups). - -### Querying the table - -The plookup paper handles a vector of lookups $f$ which we do not have. So the first step is to create such a table from the witness columns (or registers). To do this, we define the following objects: - -* a **query** tells us what registers, in what order, and scaled by how much, are part of a query -* a **query selector** tells us which rows are using the query. It is pretty much the same as a [gate selector](). - -Let's go over the first item in this section. - -For example, the following **query** tells us that we want to check if $r_0 \oplus r_2 = 2r_1$ - -| l | r | o | -| :---: | :---: | :---: | -| 1, r0 | 1, r2 | 2, r1 | - -The grand product argument for the lookup consraint will look like this at this point: - -$$ -acc_i = acc_{i-1} \cdot \frac{\color{green}{(1+\beta)(\gamma + w_0(g^i) + j \cdot w_2(g^i) + j^2 \cdot 2 \cdot w_1(g^i))}(\gamma(1 + \beta) + t_{i-1} + \beta t_i)}{(\gamma(1+\beta) + s_{i-1} + \beta s_{i})} -$$ - -Not all rows need to perform queries into a lookup table. We will use a query selector in the next section to make the constraints work with this in mind. - -### Query selector - -The associated **query selector** tells us on which rows the query into the XOR lookup table occurs. - -| row | query selector | -| :---: | :------------: | -| 0 | 1 | -| 1 | 0 | - - -Both the (XOR) lookup table and the query are built-ins in kimchi. The query selector is derived from the circuit at setup time. Currently only the ChaCha gates make use of the lookups. - -The grand product argument for the lookup constraint looks like this now: - -$$ -acc_i = acc_{i-1} \cdot \frac{\color{green}{(1+\beta) \cdot query} \cdot (\gamma(1 + \beta) + t_{i-1} + \beta t_i)}{(\gamma(1+\beta) + s_{i-1} + \beta s_{i})} -$$ - -where $\color{green}{query}$ is constructed so that a dummy query ($0 \oplus 0 = 0$) is used on rows that don't have a query. - -$$ -\begin{align} -query = &\ selector \cdot (\gamma + w_0(g^i) + j \cdot w_2(g^i) + j^2 \cdot 2 \cdot w_1(g^i)) + \\ -&\ (1- selector) \cdot (\gamma + 0 + j \cdot 0 + j^2 \cdot 0) -\end{align} -$$ - -### Queries, not query - -Since we allow multiple queries per row, we define multiple **queries**, where each query is associated with a **lookup selector**. - -At the moment of this writing, the `ChaCha` gates all perform $4$ queries in a row. Thus, $4$ is trivially the largest number of queries that happen in a row. - -**Important**: to make constraints work, this means that each row must make 4 queries. (Potentially some or all of them are dummy queries.) - -For example, the `ChaCha0`, `ChaCha1`, and `ChaCha2` gates will apply the following 4 XOR queries on the current and following rows: - -| l | r | o | - | l | r | o | - | l | r | o | - | l | r | o | -| :---: | :---: | :----: | --- | :---: | :---: | :----: | --- | :---: | :---: | :----: | --- | :---: | :----: | :----: | -| 1, r3 | 1, r7 | 1, r11 | - | 1, r4 | 1, r8 | 1, r12 | - | 1, r5 | 1, r9 | 1, r13 | - | 1, r6 | 1, r10 | 1, r14 | - -which you can understand as checking for the current and following row that - -* $r_3 \oplus r7 = r_{11}$ -* $r_4 \oplus r8 = r_{12}$ -* $r_5 \oplus r9 = r_{13}$ -* $r_6 \oplus r10 = r_{14}$ - -The `ChaChaFinal` also performs $4$ (somewhat similar) queries in the XOR lookup table. In total this is 8 different queries that could be associated to 8 selector polynomials. - -### Grouping queries by queries pattern - -Associating each query with a selector polynomial is not necessarily efficient. To summarize: - -* the `ChaCha0`, `ChaCha1`, and `ChaCha2` gates that make $4$ queries into the XOR table -* the `ChaChaFinal` gate makes $4$ different queries into the XOR table - -Using the previous section's method, we'd have to use $8$ different lookup selector polynomials for each of the different $8$ queries. Since there's only $2$ use-cases, we can simply group them by **queries patterns** to reduce the number of lookup selector polynomials to $2$. - -The grand product argument for the lookup constraint looks like this now: - -$$ -acc_i = acc_{i-1} \cdot \frac{\color{green}{(1+\beta)^4 \cdot query} \cdot (\gamma(1 + \beta) + t_{i-1} + \beta t_i)}{(\gamma(1+\beta) + s_{i-1} + \beta s_{i})} -$$ - -where $\color{green}{query}$ is constructed as: - -$$ -\begin{align} -query = &\ selector_1 \cdot pattern_1 + \\ -&\ selector_2 \cdot pattern_2 + \\ -&\ (1 - selector_1 - selector_2) \cdot (\gamma + 0 + j \cdot 0 + j^2 \cdot 0)^4 -\end{align} -$$ - -where, for example the first pattern for the `ChaCha0`, `ChaCha1`, and `ChaCha2` gates looks like this: - -$$ -\begin{align} -pattern_1 = &\ (\gamma + w_3(g^i) + j \cdot w_7(g^i) + j^2 \cdot w_{11}(g^i)) \cdot \\ -&\ (\gamma + w_4(g^i) + j \cdot w_8(g^i) + j^2 \cdot w_{12}(g^i)) \cdot \\ -&\ (\gamma + w_5(g^i) + j \cdot w_9(g^i) + j^2 \cdot w_{13}(g^i)) \cdot \\ -&\ (\gamma + w_6(g^i) + j \cdot w_{10}(g^i) + j^2 \cdot w_{14}(g^i)) \cdot \\ -\end{align} -$$ - -Note: - -* there's now 4 dummy queries, and they only appear when none of the lookup selectors are active -* if a pattern uses less than 4 queries, they'd have to pad their queries with dummy queries as well - -## Back to the grand product argument - -There are two things that we haven't touched on: - -* The vector $t$ representing the **combined lookup table** (after its columns have been combined with a joint combiner $j$). The **non-combined loookup table** is fixed at setup time and derived based on the lookup tables used in the circuit (for now only one, the XOR lookup table, can be used in the circuit). -* The vector $s$ representing the sorted multiset of both the queries and the lookup table. This is created by the prover and sent as commitment to the verifier. - -The first vector $t$ is quite straightforward to think about: - -* if it is smaller than the domain (of size $n$), then we can repeat the last entry enough times to make the table of size $n$. -* if it is larger than the domain, then we can either increase the domain or split the vector in two (or more) vectors. This is most likely what we will have to do to support multiple lookup tables later. - -What about the second vector? - -## The sorted vector $s$ - -The second vector $s$ is of size - -$$n \cdot |\text{queries}| + |\text{lookup\_table}|$$ - -That is, it contains the $n$ elements of each **query vectors** (the actual values being looked up, after being combined with the joint combinator, that's $4$ per row), as well as the elements of our lookup table (after being combined as well). - -Because the vector $s$ is larger than the domain size $n$, it is split into several vectors of size $n$. Specifically, in the plonkup paper, the two halves of $s$ (which are then interpolated as $h_1$ and $h_2$). - -$$ -acc_i = acc_{i-1} \cdot \frac{\color{green}{(1+\beta)^4 \cdot query} \cdot (\gamma(1 + \beta) + t_{i-1} + \beta t_i)}{(\gamma(1+\beta) + s_{i-1} + \beta s_{i})(\gamma(1+\beta)+s_{n+i-1} + \beta s_{n+i})} -$$ - -Since you must compute the difference of every contiguous pairs, the last element of the first half is the replicated as the first element of the second half ($s_{n-1} = s_{n}$), and a separate constraint enforces that continuity on the interpolated polynomials $h_1$ and $h_2$: - -$$L_{n-1}(h_1(x) - h_2(g \cdot x)) = 0$$ - -which is equivalent with checking that - -$$h_1(g^{n-1}) = h_2(1)$$ - -## The sorted vector $s$ in kimchi - -Since this vector is known only by the prover, and is evaluated as part of the protocol, zero-knowledge must be added to the polynomial. To do this in kimchi, we use the same technique as with the other prover polynomials: we randomize the last evaluations (or rows, on the domain) of the polynomial. - -This means two things for the lookup grand product argument: - -1. we cannot use the wrap around trick to make sure that the list is split in two correctly (enforced by $L_{n-1}(h_1(x) - h_2(g \cdot x)) = 0$ which is equivalent to $h_1(g^{n-1}) = h_2(1)$ in the plookup paper) -2. we have even less space to store an entire query vector. Which is actually super correct, as the witness also has some zero-knowledge rows at the end that should not be part of the queries anyway. - -The first problem can be solved in two ways: - -* **Zig-zag technique**. By reorganizing $s$ to alternate its values between the columns. For example, $h_1 = (s_0, s_2, s_4, \cdots)$ and $h_2 = (s_1, s_3, s_5, \cdots)$ so that you can simply write the denominator of the grand product argument as - $$(\gamma(1+\beta) + h_1(x) + \beta h_2(x))(\gamma(1+\beta)+ h_2(x) + \beta h_1(x \cdot g))$$ - this is what the [plonkup](https://eprint.iacr.org/2022/086) paper does. -* **Snake technique**. by reorganizing $s$ as a snake. This is what is done in kimchi currently. - -The snake technique rearranges $s$ into the following shape: - -``` - _ _ - | | | | | - | | | | | - |_| |_| | -``` - -so that the denominator becomes the following equation: - -$$(\gamma(1+\beta) + h_1(x) + \beta h_1(x \cdot g))(\gamma(1+\beta)+ h_2(x \cdot g) + \beta h_2(x))$$ - -and the snake doing a U-turn is constrained via something like - -$$L_{n-1} \cdot (h_1(x) - h_2(x)) = 0$$ - -If there's an $h_3$ (because the table is very large, for example), then you'd have something like: - -$$(\gamma(1+\beta) + h_1(x) + \beta h_1(x \cdot g))(\gamma(1+\beta)+ h_2(x \cdot g) + \beta h_2(x))\color{green}{(\gamma(1+\beta)+ h_3(x) + \beta h_3(x \cdot g))}$$ - -with the added U-turn constraint: - -$$L_{0} \cdot (h_2(x) - h_3(x)) = 0$$ - -## Unsorted $t$ in $s$ - -Note that at setup time, $t$ cannot be sorted as it is not combined yet. Since $s$ needs to be sorted by $t$ (in other words, not sorted, but sorted following the elements of $t$), there are two solutions: - -1. both the prover and the verifier can sort the combined $t$, so that $s$ can be sorted via the typical sorting algorithms -2. the prover can sort $s$ by $t$, so that the verifier doesn't have to do any sorting and can just rely on the commitment of the columns of $t$ (which the prover can evaluate in the protocol). - -We do the second one, but there is an edge-case: the combined $t$ entries can repeat. -For some $i, l$ such that $i \neq l$, we might have - -$$ -t_0[i] + j t_1[i] + j^2 t_2[i] = t_0[l] + j t_1[l] + j^2 t_2[l] -$$ - -For example, if $f = \{1, 2, 2, 3\}$ and $t = \{2, 1, 2, 3\}$, then $\text{sorted}(f, t) = \{2, 2, 2, 1, 1, 2, 3, 3\}$ would be one way of sorting things out. But $\text{sorted}(f, t) = \{ 2, 2, 2, 2, 1, 1, 3, 3 \}$ would be incorrect. - - -## Recap - -So to recap, to create the sorted polynomials $h_i$, the prover: - -1. creates a large query vector which contains the concatenation of the $4$ per-row (combined with the joint combinator) queries (that might contain dummy queries) for all rows -2. creates the (combined with the joint combinator) table vector -3. sorts all of that into a big vector $s$ -4. divides that vector $s$ into as many $h_i$ vectors as a necessary following the snake method -5. interpolate these $h_i$ vectors into $h_i$ polynomials -6. commit to them, and evaluate them as part of the protocol. diff --git a/book/src/rfcs/keccak.md b/book/src/rfcs/keccak.md index 457c247b97..03174df193 100644 --- a/book/src/rfcs/keccak.md +++ b/book/src/rfcs/keccak.md @@ -1,166 +1 @@ -# RFC: Keccak - -The Keccak gadget is comprised of 3 circuit gates (`Xor16`, `Rot64`, and `Zero`) - -Keccak works with 64-bit words. The state is represented using $5\times 5$ matrix -of 64 bit words. Each compression step of Keccak consists of 24 rounds. Let us -denote the state matrix with $A$ (indexing elements as $A[x,y]$), from which we derive -further states as follows in each round. Each round then consists of the following 5 steps: - -$$ -\begin{align} -C[x] &= A[x,0] \oplus A[x,1] \oplus A[x,2] \oplus A[x,3] \oplus A[x,4] \\ -D[x] &= C[x-1] \oplus ROT(C[x+1],1) \\ -E[x,y] &= A[x,y] \oplus D[x] \\ -B[y,2x+3y] &= ROT(E[x,y],\rho[x,y]) \\ -F[x,y] &= B[x,y] \oplus ((NOT\ B[x+1,y]) AND\ B[x+2,y]) \\ -Fp[0,0] &= F[0,0] \oplus RC -\end{align} -$$ - -for $0\leq x, y \leq 4$ and $\rho[x,y]$ is the rotation offset defined for Keccak. -The values are in the table below extracted from the Keccak reference - - -| | x = 3 | x = 4 | x = 0 | x = 1 | x = 2 | -| ----- | ----- | ----- | ----- | ----- | ----- | -| y = 2 | 155 | 231 | 3 | 10 | 171 | -| y = 1 | 55 | 276 | 36 | 300 | 6 | -| y = 0 | 28 | 91 | 0 | 1 | 190 | -| y = 4 | 120 | 78 | 210 | 66 | 253 | -| y = 3 | 21 | 136 | 105 | 45 | 15 | - -## Design Approach: - -The atomic operations are XOR, ROT, NOT, AND. In the sections below, we will describe -the gates for these operations. Below are some common approaches followed in their design. - -To fit within 15 wires, we first decompose each word into its lower and upper 32-bit -components. A gate for an atomic operation works with those 32-bit components at a time. - -Before we describe the specific gate design approaches, below are some constraints in the -Kimchi framework that dictated those approaches. -* only 4 lookups per row -* only first 7 columns are available to the permutation polynomial - -## Rot64 - -It is clear from the definition of the rotation gate that its constraints are complete -(meaning that honest instances always satisfy the constraints). It is left to be proven -that the proposal is sound. In this section, we will give a proof that as soon as we -perform the range checks on the excess and shifted parts of the input, only one possible -assignment satisfies the constraints. This means that there is no dishonest instance that -can make the constraints pass. We will also give an example where one could find wrong -rotation witnesses that would satisfy the constraints if we did not check the range. - -### Necessity of range checks - -First of all, we will illustrate the necessity of range-checks with a simple example. -For the sake of readability, we will use some toy field lengths. In particular, let us -assume that our words have 4 bits, meaning all of the elements between `0x0` and `0xF`. -Next, we will be using the native field $\mathbb{F}_{32}$. - -As we will later see, this choice of field lengths is not enough to perform any 4-bit -rotation, since the operations in the constraints would overflow the native field. -Nonetheless, it will be sufficient for our example where we will only rotate by 1 bit. - -Assume we want to rotate the word `0b1101` (meaning 13) by 1 bit to the left. This gives -us the rotated word `0b1011` (meaning 11). The excess part of the word is `0b1`, whereas -the shifted part corresponds to `0b1010`. We recall the constraints for the rotation gate: - -$$ -\begin{align*} -word \cdot 2^{rot} &= excess \cdot 2^{len} + shifted \\ -rotated &= excess + shifted -\end{align*} -$$ - -Applied to our example, this results in the following equations: - -$$ -\begin{align*} -13 \cdot 2 &= excess \cdot 16 + shifted \\ -11 &= excess + shifted -\end{align*} -$$ - -We can easily check that the proposed values of the shifted `0b1010=10` and the excess -`0b1=1` values satisfy the above constraint because $26 = 1 \cdot 16 + 10$ and $11 = 1 + 10$. -Now, the question is: _can we find another value for excess and shifted, such that their addition results in an incorrect rotated word?_ - -The answer to this question is yes, due to __diophantine equations__. We basically want to find $x,y$ such that $26 = x \cdot 16 + y (\text{ mod } 32)$. The solution to this equation is: - -$$ -\begin{align*} -\forall k \in [0..31]: & \\ -x &= k \\ -y &= 26 - 16 \cdot k -\end{align*} -$$ - -We chose these word and field lengths to better understand the behaviour of the solution. Here, we can see two "classes" of evaluations. - -- If we choose an even $k$, then $y$ will have the following shape: - - $$26 - 16 \cdot (2 \cdot n) \iff 26 - 32n \equiv_{32} 26 $$ - - Meaning, if $x = 2n$ then $y = 26$. - -- If on the other hand, we chose an odd $k$, then $y$ will have the following shape instead: - - $$26 - 16 \cdot (2 \cdot n + 1) \iff 26 - 32n - 16 \equiv_{32} 26 - 16 = 10$$ - - Meaning, if $x = 2n+1$ then $y = 10$. - -Thus, possible solutions to the diophantine equation are: - -$$ -\begin{align*} -x &= 0, 1, 2, 3, 4, 5... \\ -y &= 26, 10, 26, 10, 26, 10... -\end{align*} -$$ - -Note that our valid witness is part of that set of solutions, meaning $x=1$ and $y=10$. Of course, we can also find another dishonest instantiation such as $x=0$ and $y=26$. Perhaps one could think that we do not need to worry about this case, because the resulting rotation word would be $0+26=26$, and if we later use that result as an input to a subsequent gate such as XOR, the value $26$ would not fit and at some point the constraint system would complain. Nonetheless, we still have other solutions to worry about, such as $(x=3, y=10)$ or $(x=5, y=10)$, since they would result in a rotated word that would fit in the word length of 4 bits, yet would be incorrect (not equal to $11$). - -All of the above incorrect solutions differ in one thing: they have different bit lengths. This means that we need to range check the values for the excess and shifted witnesses to make sure they have the correct length. - -### Sufficiency of range checks - -In the following, we will give a proof that performing range checks for these values is not only necessary but also sufficient to prove that the rotation gate is sound. In other words, we will prove there are no two possible solutions of the decomposition constraint that have the correct bit lengths. Now, for the sake of robustness, we will consider 64-bit words and fields with at least twice the bit length of the words (as is our case). - -We will proceed by __contradiction__. Suppose there are two different solutions to the following diophantic equation: - -$$ -\begin{align*} -\forall k \in \mathbb{F}_n: & \\ -x &= k \\ -y &= w \cdot 2^r - 2^{64} \cdot k -\end{align*} -$$ - -where $k$ is a parameter to instantiate the solutions, $w$ is the word to be rotated, $r$ is the rotation amount, and $n$ is the field length. - -Then, that means that there are two different solutions, $(0\leq x=a<2^r, 0\leq y=b<2^{64})$ and $(0\leq x=a'<2^r, 0\leq y=b'<2^{64})$ with at least $a \neq a'$ or $b \neq b'$. We will show that this is impossible. - -If both are solutions to the same equation, then: -$$ -\begin{align*} -w \cdot 2^r &= a \cdot 2^{64} + b \\ -w \cdot 2^r &= a'\cdot 2^{64} + b' -\end{align*} -$$ -means that $a \cdot 2^{64} + b = a'\cdot 2^{64} + b'$. Moving terms to the left side, we have an equivalent equality: $2^{64}(a-a') + (b-b')=0 \mod{n}$. There are three cases to consider: - -- $a = a'$ and $b \neq b'$: then $(b - b') \equiv_n 0$ and this can only happen if $b' = b + kn$. But since $n > 2^{64}$, then $b'$ cannot be smaller than $2^{64}$ as it was assumed. CONTRADICTION. - -- $b = b'$ and $a \neq a'$: then $2^{64}(a - a') \equiv_n 0$ and this can only happen if $a' = a + kn$. But since $n > 2^r$, then $a'$ cannot be smaller than $2^r$ as it was assumed. CONTRADICTION. - -- $a\neq a'$ and $b \neq b'$: then we have something like $2^{64} \alpha + \beta \equiv_n 0$. - - This means $\beta \equiv_n -2^{64} \alpha = k \cdot n - 2^{64} \alpha$ for any $k$. - - According to the assumption, both $0\leq a<2^r$ and $0\leq a'<2^r$. This means, the difference $\alpha:=(a - a')$ lies anywhere in between the following interval: - - $$1 - 2^r \leq \alpha \leq 2^r - 1$$ - - We plug in this interval to the above equation to obtain the following interval for $\beta$: - - $$k\cdot n - 2^{64}(1-2^r)\leq \beta \leq k\cdot n - 2^{64}(2^r - 1) $$ - - We look at this interval from both sides of the inequality: $\beta \geq kn - 2^{64} + 2^{64+r}$ and $\beta \leq kn + 2^{64} - 2^{64+r}$ and we wonder if $kn - 2^{64} + 2^{64+r} \leq kn + 2^{64} - 2^{64+r}$ is at all possible. We rewrite as follows: - - $$2^{64+r} - 2^{64} \leq 2^{64} - 2^{64+r}$$ - - $$2\cdot2^{64+r} \leq 2\cdot2^{64} $$ - - $$2^{64+r} \leq 2^{64} $$ - - But this can only happen if $r\leq 0$, which is impossible since we assume $0 < r < 64$.CONTRADICTION. -- EOP. +# RFC 7: Keccak diff --git a/book/src/snarky/api.md b/book/src/snarky/api.md deleted file mode 100644 index e8b981a474..0000000000 --- a/book/src/snarky/api.md +++ /dev/null @@ -1,2 +0,0 @@ -# API of Snarky - diff --git a/book/src/snarky/booleans.md b/book/src/snarky/booleans.md deleted file mode 100644 index 7b503f0580..0000000000 --- a/book/src/snarky/booleans.md +++ /dev/null @@ -1,73 +0,0 @@ -# Booleans - -Booleans are a good example of a [snarky variable](./vars.md#snarky-vars). - -```rust -pub struct Boolean(CVar); - -impl SnarkyType for Boolean -where - F: PrimeField, -{ - type Auxiliary = (); - - type OutOfCircuit = bool; - - const SIZE_IN_FIELD_ELEMENTS: usize = 1; - - fn to_cvars(&self) -> (Vec>, Self::Auxiliary) { - (vec![self.0.clone()], ()) - } - - fn from_cvars_unsafe(cvars: Vec>, _aux: Self::Auxiliary) -> Self { - assert_eq!(cvars.len(), Self::SIZE_IN_FIELD_ELEMENTS); - Self(cvars[0].clone()) - } - - fn check(&self, cs: &mut RunState) { - // TODO: annotation? - cs.assert_(Some("boolean check"), vec![BasicSnarkyConstraint::Boolean(self.0.clone())]); - } - - fn deserialize(&self) -> (Self::OutOfCircuit, Self::Auxiliary) { - todo!() - } - - fn serialize(out_of_circuit: Self::OutOfCircuit, aux: Self::Auxiliary) -> Self { - todo!() - } - - fn constraint_system_auxiliary() -> Self::Auxiliary { - todo!() - } - - fn value_to_field_elements(x: &Self::OutOfCircuit) -> (Vec, Self::Auxiliary) { - todo!() - } - - fn value_of_field_elements(x: (Vec, Self::Auxiliary)) -> Self::OutOfCircuit { - todo!() - } -} -``` - -## Check - -The `check()` function is simply constraining the `CVar` $x$ to be either $0$ or $1$ using the following constraint: - -$$x ( x - 1) = 0$$ - -It is trivial to use the [double generic gate](../specs/kimchi.md#double-generic-gate) for this. - -## And - -$$x \land y = x \times y$$ - -## Not - -$$\sim x = 1 - x$$ - -## Or - -* $\sim x \land \sim y = b$ -* $x \lor y = \sim b$ diff --git a/book/src/snarky/circuit-generation.md b/book/src/snarky/circuit-generation.md deleted file mode 100644 index e81793aa03..0000000000 --- a/book/src/snarky/circuit-generation.md +++ /dev/null @@ -1,29 +0,0 @@ -# Circuit generation - -In circuit generation mode, the `has_witness` field of `RunState` is set to the default `CircuitGeneration`, and the program of the user is ran to completion. - -During the execution, the different snarky functions called on `RunState` will create [internal variables](./vars.md) as well as constraints. - -## Creation of variables - -[Variables](./vars.md) can be created via the `compute()` function, which takes two arguments: - -* A `TypeCreation` toggle, which is either set to `Checked` or `Unsafe`. We will describe this below. -* A closure representing the actual computation behind the variable. This computation will only take place when real values are computed, and can be non-deterministic (e.g. random, or external values provided by the user). Note that the closure takes one argument: a `WitnessGeneration`, a structure that allows you to read the runtime values of any variables that were previously created in your program. - -The `compute()` function also needs a type hint to understand what type of [snarky type](./vars.md#snarky-vars) it is creating. - -It then performs the following steps: - -* creates enough [`CVar`](./vars#circuit-vars) to hold the value to be created -* retrieves the auxiliary data needed to create the snarky type (TODO: explain auxiliary data) and create the [`snarky variable`](./vars.md#snarky-vars) out of the `CVar`s and the auxiliary data -* if the `TypeCreation` is set to `Checked`, call the `check()` function on the snarky type (which will constrain the value created), if it is set to `Unsafe` do nothing (in which case we're trusting that the value cannot be malformed, this is mostly used internally and it is highly-likely that users directly making use of `Unsafe` are writing bugs) - -```admonish -At this point we only created variables to hold future values, and made sure that they are constrained. -The actual values will fill the room created by the `CVar` only during the [witness generation](./witness-generation.md). -``` - -## Constraints - -All other functions exposed by the API are basically here to operate on variables and create constraints in doing so. diff --git a/book/src/snarky/kimchi-backend.md b/book/src/snarky/kimchi-backend.md deleted file mode 100644 index 2d2ebf789a..0000000000 --- a/book/src/snarky/kimchi-backend.md +++ /dev/null @@ -1,234 +0,0 @@ -# Kimchi Backend - -![](https://i.imgur.com/KmKU5Pl.jpg) - -Underneath the snarky wrapper (in `snarky/checked_runner.rs`) lies what we used to call the `plonk_constraint_system` or `kimchi_backend` in `snarky/constraint_systen.rs`. - -```admonish -It is good to note that we're planning on removing this abstract separation between the snarky wrapper and the constraint system. -``` - -The logic in the kimchi backend serves two purposes: - -* **Circuit generation**. It is the logic that adds gates to our list of gates (representing the circuit). For most of these gates, the variables used are passed to the backend by the snarky wrapper, but some of them are created by the backend itself (see more in the [variables section](#variables)). -* **Witness generation**. It is the logic that creates the witness - -One can also perform two additional operations once the constraint system has been compiled: - -* Generate the prover and verifier index for the system. -* Get a hash of the constraint system (this includes the circuit, the number of public input) (TODO: verify that this is true) (TODO: what else should be in that hash? a version of snarky and a version of kimchi?). - -## A circuit - -A circuit is either being built, or has been contructed during a circuit generation phase: - -```rust -enum Circuit -where - F: PrimeField, -{ - /** A circuit still being written. */ - Unfinalized(Vec>), - /** Once finalized, a circuit is represented as a digest - and a list of gates that corresponds to the circuit. - */ - Compiled([u8; 32], Vec>), -} -``` - -## State - -The state of the kimchi backend looks like this: - -```rust -where - Field: PrimeField, -{ - /// A counter used to track variables - /// (similar to the one in the snarky wrapper) - next_internal_var: usize, - - /// Instruction on how to compute each internal variable - /// (as a linear combination of other variables). - /// Used during witness generation. - internal_vars: HashMap, Option)>, - - /// The symbolic execution trace table. - /// Each cell is a variable that takes a value during witness generation. - /// (if not set, it will take the value 0). - rows: Vec>>, - - /// The circuit once compiled - gates: Circuit, - - /// The row to use the next time we add a constraint. - // TODO: I think we can delete this - next_row: usize, - - /// The size of the public input - /// (which fills the first rows of our constraint system. - public_input_size: Option, - - // omitted values... -} -``` - -## Variables - -In the backend, there's two types of variables: - -```rust -enum V { - /// An external variable - /// (generated by snarky, via [exists]). - External(usize), - - /// An internal variable is generated to hold an intermediate value, - /// (e.g. in reducing linear combinations to single PLONK positions). - Internal(InternalVar), -} -``` - -Internal variables are basically a `usize` pointing to a hashmap in the state. - -That hashmap tells you how to compute the internal variable during witness generation: it is always a linear combination of other variables (and a constant). - -## Circuit generation - -During circuit generation, the snarky wrapper will make calls to the `add_constraint()` or `add_basic_snarky_constraint` function of the kimchi backend, specifying what gate to use and what variables to use in that gate. - -At this point, the snarky wrapper might have some variables that are not yet tracked as such (with a counter). -Rather, they are constants, or they are a combination of other variables. -You can see that as a small AST representing how to compute a variable. -(See the [variables section](./vars.md#circuit-vars) for more details). - -For this reason, they can hide a number of operations that haven't been constrained yet. -It is the role of the `add_constrain` logic to enforce that at this point constants, as well as linear combinations or scalings of variables, are encoded in the circuit. -This is done by adding enough generic gates (using the `reduce_lincom()` or `reduce_to_var()` functions). - -```admonish -This is a remnant of an optimization targetting R1CS (in which additions are for free). -An issue with this approach is the following: imagine that two circuit variables are created from the same circuit variable, imagine also that the original circuit variable contained a long AST, then both variables might end up creating the same constraints to convert that AST. -Currently, snarkyjs and pickles expose a `seal()` function that allows you to reduce this issue, at the cost of some manual work and mental tracking on the developer. -We should probably get rid of this, while making sure that we can continue to optimize generic gates -(in some cases you can merge two generic gates in one (TODO: give an example of where that can happen)). -Another solution is to keep track of what was reduced, and reuse previous reductions (similar to how we handle constants). -``` - -It is during this "reducing" step that internal variables (known only to the kimchi backend) are created. - -```admonish -The process is quite safe, as the kimchi backend cannot use the snarky wrapper variables directly (which are of type `CVar`). -Since the expected format (see the [variables section](#variables) is a number (of type `usize`), the only way to convert a non-tracked variable (constant, or scale, or linear combination) is to reduce it (and in the process constraining its value). -``` - -Depending on the gate being used, several constraints might be added via the `add_row()` function which does three things: - -1. figure out if there's any wiring to be done -2. add a gate to our list of gates (representing the circuit) -3. add the variables to our _symbolic_ execution trace table (symbolic in the sense that nothing has values yet) - -This process happens as the circuit is "parsed" and the constraint functions of the kimchi backend are called. - -This does not lead to a finalized circuit, see the next section to see how that is done. - -(TODO: ideally this should happen in the same step) - -## Finalization of the circuit. - -So far we've only talked about adding specific constraints to the circuit, but not about how public input are handled. - -The `finalization()` function of the kimchi backend does the following: - -* add as many generic rows as there are public inputs. -* construct the permutation -* computes a cache of the circuit (TODO: this is so unecessary) -* and other things that are not that important - -## Witness generation - -Witness generation happens by taking the finalized state (in the `compute_witness()` function) with a callback that can be used to retrieve the values of external variables (public input and public output). - -The algorithm follows these steps using the symbolic execution table we built during circuit generation: - -1. it initializes the execution trace table with zeros -2. go through the rows related to the public input and set the most-left column values to the ones obtained by the callback. -3. go through the other rows and compute the value of the variables left in the table - -Variables in step 3. should either: - -* be absent (`None`) and evaluated to the default value 0 -* point to an external variable, in which case the closure passed can be used to retrieve the value -* be an internal variable, in which case the value is computed by evaluating the AST that was used to create it. - -## Permutation - -The permutation is used to wire cells of the execution trace table (specifically, cells belonging to the first 7 columns). -It is also known as "copy constraints". - -```admonish -In snarky, the permutation is represented differently from kimchi, and thus needs to be converted to the kimchi's format before a proof can be created. -TODO: merge the representations -``` - -We use the permutation in ingenious ways to optimize circuits. -For example, we use it to encode each constants once, and wire it to places where it is used. -Another example, is that we use it to assert equality between two cells. - -## Implementation details - -There's two aspect of the implementation of the permutation, the first one is a hashmap of equivalence classes, which is used to track all the positions of a variable, the second one is making use of a [union find]() data structure to link variables that are equivalent (we'll talk about that after). - -The two data structures are in the kimchi backend's state: - -```rust -pub struct SnarkyConstraintSystem -where - Field: PrimeField, -{ - equivalence_classes: HashMap>>, - union_finds: disjoint_set::DisjointSet, - // omitted fields... -} -``` - -### equivalence classes - -As said previously, during circuit generation a symbolic execution trace table is created. It should look a bit like this (if there were only 3 columns and 4 rows): - -| | 0 | 1 | 2 | -| :-: | :-: | :-: | :-:| -| 0 | v1 | v1 | | -| 1 | | v2 | | -| 2 | | v2 | | -| 3 | | | v1 | - -From that, it should be clear that all the cells containing the variable `v1` should be connected, -and all the cells containing the variable `v2` should be as well. - -The format that the permutation expects is a [cycle](https://en.wikipedia.org/wiki/Cyclic_permutation): a list of cells where each cell is linked to the next, the last one wrapping around and linking to the first one. - -For example, a cycle for the `v1` variable could be: - -``` -(0, 0) -> (0, 1) -(0, 1) -> (3, 2) -(3, 2) -> (0, 0) -``` - -During circuit generation, a hashmap (called `equivalence_classes`) is used to track all the positions (row and column) of each variable. - -During finalization, all the different cycles are created by looking at all the variables existing in the hashmap. - -### Union finds - -Sometimes, we know that two variables will have equivalent values due to an `assert_equal()` being called to link them. -Since we link two variables together, they need to be part of the same cycle, and as such we need to be able to detect that to construct correct cycles. - -To do this, we use a [union find]() data structure, which allows us to easily find the unions of equivalent variables. - -When an `assert_equal()` is called, we link the two variables together using the `union_finds` data structure. - -During finalization, when we create the cycles, we use the `union_finds` data structure to find the equivalent variables. -We then create a new equivalence classes hashmap to merge the keys (variables) that are in the same set. -This is done before using the equivalence classes hashmap to construct the cycles. diff --git a/book/src/snarky/overview.md b/book/src/snarky/overview.md deleted file mode 100644 index b67c1fa30b..0000000000 --- a/book/src/snarky/overview.md +++ /dev/null @@ -1,32 +0,0 @@ -# Snarky - -Snarky is a frontend to the [kimchi proof system](../kimchi/overview.md). - -It allows users to write circuits that can be proven using kimchi. - -This part of the Mina book documents both how to use snarky, and how its internals work. - -```admonish -Snarky was originally an OCaml library. It also is known as a typescript library: SnarkyJS. -This documentation talks about the Rust implementation, which one can refer to as snarky-rs (but we will just say snarky from now on). -``` - -## High-level design - -Snarky is divided into two parts: - -* **Circuit-generation**: which is also called the setup or compilation phase. It is when snarky turn code written using its library, to a circuit that kimchi can understand. This can later be used by kimchi to produce prover and verifier keys. -* **Witness-generation**: which is also called the proving, or runtime phase. It is when snarky executes the written program and records its state at various point in time to create an execution trace of the program (which we call witness here). This can later be used by kimchi, with a proving key, to produce a zero-knowledge proof. - -A snarky program is constructed using functions exposed by the library. -The API of snarky that one can use to design circuits can be split in three categories: - -* creation of snarky variables (via `compute()`) -* creation of constraints (via `assert` type-functions) -* manipulation of snarky variables (which can sometimes create constraints) - -Snarky itself is divided into three parts: - -* [The high-level API](./api.md) that you can find in `api.rs` and `traits.rs` -* [The snarky wrapper](./snarky-wrapper.md), which contains the logic for creating user variables and composed types (see the section on [Snarky vars](./vars.md#snarky-vars)). -* [The kimchi backend](./kimchi-backend.md), which contains the logic for constructing the circuit as well as the witness. diff --git a/book/src/snarky/snarky-wrapper.md b/book/src/snarky/snarky-wrapper.md deleted file mode 100644 index 725f7c35ec..0000000000 --- a/book/src/snarky/snarky-wrapper.md +++ /dev/null @@ -1,70 +0,0 @@ -# Snarky wrapper - -Snarky, as of today, is constructed as two parts: - -* a snarky wrapper, which is explained in this document -* a backend underneath that wrapper, explained in the [kimchi backend section](./kimchi-backend.md) - -```admonish -This separation exists for legacy reasons, and ideally we should merge the two into a single library. -``` - -The snarky wrapper mostly exists in `checked_runner.rs`, and has the following state: - -```rust -where - F: PrimeField, -{ - /// The constraint system used to build the circuit. - /// If not set, the constraint system is not built. - system: Option>, - - /// The public input of the circuit used in witness generation. - // TODO: can we merge public_input and private_input? - public_input: Vec, - - // TODO: we could also just store `usize` here - pub(crate) public_output: Vec>, - - /// The private input of the circuit used in witness generation. Still not sure what that is, or why we care about this. - private_input: Vec, - - /// If set, the witness generation will check if the constraints are satisfied. - /// This is useful to simulate running the circuit and return an error if an assertion fails. - eval_constraints: bool, - - /// The number of public inputs. - num_public_inputs: usize, - - /// A counter used to track variables (this includes public inputs) as they're being created. - next_var: usize, - - /// Indication that we're running the witness generation (as opposed to the circuit creation). - mode: Mode, -} -``` - -The wrapper is designed to be used in different ways, depending on the fields set. - -```admonish -Ideally, we would like to only run this once and obtain a result that's an immutable compiled artifact. -Currently, `public_input`, `private_input`, `eval_constriants`, `next_var`, and `mode` all need to be mutable. -In the future these should be passed as arguments to functions, and should not exist in the state. -``` - -## Public output - -The support for public output is implemented as kind of a hack. - -When the developer writes a circuit, they have to specify the type of the public output. - -This allows the API to save enough room at the end of the public input, and store the variables used in the public output in the state. - -When the API calls the circuit written by the developer, it expects the public output (as a snarky type) to be returned by the function. -The compilation or proving API that ends up calling that function, can thus obtain the variables of the public output. -With that in hand, the API can continue to write the circuit to enforce an equality constraint between these variables being returned and the public output variable that it had previously stored in the state. - -Essentially, the kimchi backend will turn this into as many wiring as there are `CVar` in the public output. - -During witness generation, we need a way to modify the witness once we know the values of the public output. -As the public output `CVar`s were generated from the snarky wrapper (and not from the kimchi backend), the snarky wrapper should know their values after running the given circuit. diff --git a/book/src/snarky/vars.md b/book/src/snarky/vars.md deleted file mode 100644 index 7a1e3a3be7..0000000000 --- a/book/src/snarky/vars.md +++ /dev/null @@ -1,135 +0,0 @@ -# Vars - -In this section we will introduce two types of variables: - -* Circuit vars, or `CVar`s, which are low-level variables representing field elements. -* Snarky vars, which are high-level variables that user can use to create more meaningful programs. - -## Circuit vars - -In snarky, we first define circuit variables (TODO: rename Field variable?) which represent field elements in a circuit. -These circuit variables, or cvars, can be represented differently in the system: - -```rust -pub enum CVar -where - F: PrimeField, -{ - /// A constant. - Constant(F), - - /// A variable that can be refered to via a `usize`. - Var(usize), - - /// The addition of two other [CVar]s. - Add(Box>, Box>), - - /// Scaling of a [CVar]. - Scale(F, Box>), -} -``` - -One can see a CVar as an AST, where two atoms exist: a `Var(usize)` which represents a private input, an a `Constant(F)` which represents a constant. -Anything else represents combinations of these two atoms. - -### Constants - -Note that a circuit variable does not represent a value that has been constrained in the circuit (yet). -This is why we need to know if a cvar is a constant, so that we can avoid constraining it too early. -For example, the following code does not encode 2 or 1 in the circuit, but will encode 3: - -```rust -let x: CVar = state.exists(|_| 2) + state.exists(|_| 3); -state.assert_eq(x, y); // 3 and y will be encoded in the circuit -``` - -whereas the following code will encode all variables: - -```rust -let x = y + y; -let one: CVar = state.exists(|_| 1); -assert_eq(x, one); -``` - -### Non-constants - -Right after being created, a `CVar` is not constrained yet, and needs to be constrained by the application. -That is unless the application wants the `CVar` to be a constant that will not need to be constrained (see previous example) or because the application wants the `CVar` to be a random value (unlikely) (TODO: we should add a "rand" function for that). - -In any case, a circuit variable which is not a constant has a value that is not known yet at circuit-generation time. -In some situations, we might not want to constrain the - - -### When do variables get constrained? - -In general, a circuit variable only gets constrained by an assertion call like `assert` or `assert_equals`. - -When variables are added together, or scaled, they do not directly get constrained. -This is due to optimizations targetting R1CS (which we don't support anymore) that were implemented in the original snarky library, and that we have kept in snarky-rs. - -Imagine the following example: - -```rust -let y = x1 + x2 + x3 +.... ; -let z = y + 3; -assert_eq(y, 6); -assert_eq(z, 7); -``` - -The first two lines will not create constraints, but simply create minimal ASTs that track all of the additions. - -Both assert calls will then reduce the variables to a single circuit variable, creating the same constraints twice. - -For this reason, there's a function `seal()` defined in pickles and snarkyjs. (TODO: more about `seal()`, and why is it not in snarky?) (TODO: remove the R1CS optimization) - -## Snarky vars - -Handling `CVar`s can be cumbersome, as they can only represent a single field element. -We might want to represent values that are either in a smaller range (e.g. [booleans](./booleans.md)) or that are made out of several `CVar`s. - -For this, snarky's API exposes the following trait, which allows users to define their own types: - -```rust -pub trait SnarkyType: Sized -where - F: PrimeField, -{ - /// ? - type Auxiliary; - - /// The equivalent type outside of the circuit. - type OutOfCircuit; - - const SIZE_IN_FIELD_ELEMENTS: usize; - - fn to_cvars(&self) -> (Vec>, Self::Auxiliary); - - fn from_cvars_unsafe(cvars: Vec>, aux: Self::Auxiliary) -> Self; - - fn check(&self, cs: &mut RunState); - - fn deserialize(&self) -> (Self::OutOfCircuit, Self::Auxiliary); - - fn serialize(out_of_circuit: Self::OutOfCircuit, aux: Self::Auxiliary) -> Self; - - fn constraint_system_auxiliary() -> Self::Auxiliary; - - fn value_to_field_elements(x: &Self::OutOfCircuit) -> (Vec, Self::Auxiliary); - - fn value_of_field_elements(x: (Vec, Self::Auxiliary)) -> Self::OutOfCircuit; -} -``` - -Such types are always handled as `OutOfCircuit` types (e.g. `bool`) by the users, and as a type implementing `SnarkyType` by snarky (e.g. [`Boolean`](./booleans.md)). -Thus, the user can pass them to snarky in two ways: - -**As public inputs**. In this case they will be serialized into field elements for snarky before [witness-generation](./witness-generation.md) (via the `value_to_field_elements()` function) - -**As private inputs**. In this case, they must be created using the `compute()` function with a closure returning an `OutOfCircuit` value by the user. -The call to `compute()` will need to have some type hint, for snarky to understand what `SnarkyType` it is creating. -This is because the relationship is currently only one-way: a `SnarkyType` knows what out-of-circuit type it relates to, but not the other way is not true. -(TODO: should we implement that though?) - -A `SnarkyType` always implements a `check()` function, which is called by snarky when `compute()` is called to create such a type. -The `check()` function is responsible for creating the constraints that sanitize the newly-created `SnarkyType` (and its underlying `CVar`s). -For example, creating a boolean would make sure that the underlying `CVar` is either 0 or 1. diff --git a/book/src/snarky/witness-generation.md b/book/src/snarky/witness-generation.md deleted file mode 100644 index 41fbc3b5f1..0000000000 --- a/book/src/snarky/witness-generation.md +++ /dev/null @@ -1,21 +0,0 @@ -# Witness generation - -In snarky, currently, the same code is run through again to generate the witness. - -That is, the `RunState` contains a few changes: - -* **`public_input: Vec`**: now contains concrete values (instead of being empty). -* **`has_witness`**: is set to `WitnessGeneration`. - -Additionaly, if we want to verify that the arguments are actually correct (and that the program implemented does not fail) we can also set `eval_constraints` to `true` (defaults to `false`) to verify that the program has a correct state at all point in time. - -If we do not do this, the user will only detect failure during proof generation (specifically when the [composition polynomial](../specs/kimchi.md#proof-creation) is divided by the [vanishing polynomial](../specs/kimchi.md#proof-creation)). - -```admonish -This is implemented by simply checking that each [generic gate](../specs/kimchi.md#double-generic-gate) encountered is correct, in relation to the witness values observed in that row. -In other words $c_0 l + c_1 r + c_2 o + c_3 l r + c_4 = 0$ (extrapolated to the [double generic gate](../specs/kimchi.md#double-generic-gate)). -Note that other custom gates are not checked, as they are wrapped by [gadgets](../specs/kimchi.md#gates) which fill in witness values instead of the user. -Thus there is no room for user error (i.e. the user entering a wrong private input). -``` - -Due to the `has_witness` variable set to `WitnessGeneration`, functions will behave differently and compute actual values instead of generating constraints. diff --git a/book/src/specs/kimchi.md b/book/src/specs/kimchi.md index 13a51330d9..91852cdbb0 100644 --- a/book/src/specs/kimchi.md +++ b/book/src/specs/kimchi.md @@ -1641,8 +1641,8 @@ If lookup is used, the following values are added to the common index: To create the index, follow these steps: 1. If no lookup is used in the circuit, do not create a lookup index -2. Get the lookup selectors and lookup tables (TODO: how?) -3. Concatenate runtime lookup tables with the ones used by gates +2. Get the lookup selectors and lookup tables that are specified implicitly +3. Concatenate explicit runtime lookup tables with the ones (implicitly) used by gates. 4. Get the highest number of columns `max_table_width` that a lookup table can have. 5. Create the concatenated table of all the fixed lookup tables. @@ -2202,7 +2202,7 @@ The prover then follows the following steps to create the proof: * $s_i$ * $w_i$ * $z$ - * lookup (TODO) + * lookup (TODO, see [this issue](https://github.com/MinaProtocol/mina/issues/13886)) * generic selector * poseidon selector @@ -2284,11 +2284,11 @@ We run the following algorithm: * Derive the scalar joint combiner challenge $j$ from $j'$ using the endomorphism. (TODO: specify endomorphism) * absorb the commitments to the sorted polynomials. -1. Sample $\beta$ with the Fq-Sponge. -1. Sample $\gamma$ with the Fq-Sponge. +1. Sample the first permutation challenge $\beta$ with the Fq-Sponge. +1. Sample the second permutation challenge $\gamma$ with the Fq-Sponge. 1. If using lookup, absorb the commitment to the aggregation lookup polynomial. 1. Absorb the commitment to the permutation trace with the Fq-Sponge. -1. Sample $\alpha'$ with the Fq-Sponge. +1. Sample the quotient challenge $\alpha'$ with the Fq-Sponge. 1. Derive $\alpha$ from $\alpha'$ using the endomorphism (TODO: details). 1. Enforce that the length of the $t$ commitment is of size 7. 1. Absorb the commitment to the quotient polynomial $t$ into the argument. diff --git a/kimchi/Cargo.toml b/kimchi/Cargo.toml index d9415dce18..67fc1e3c00 100644 --- a/kimchi/Cargo.toml +++ b/kimchi/Cargo.toml @@ -25,7 +25,7 @@ num-derive = "0.3" num-integer = "0.1.45" num-traits = "0.2" itertools = "0.10.3" -rand = "0.8.0" +rand = { version = "0.8.0", features = ["std_rng"] } rand_core = "0.6.3" rayon = "1.5.0" rmp-serde = "1.1.1" @@ -50,7 +50,7 @@ mina-poseidon = { path = "../poseidon", version = "0.1.0" } ocaml = { version = "0.22.2", optional = true } ocaml-gen = { version = "0.1.5", optional = true } -wasm-bindgen = { version = "0.2.81", optional = true } +wasm-bindgen = { version = "=0.2.87", optional = true } internal-tracing = { path = "../internal-tracing", version = "0.1.0" } diff --git a/kimchi/src/alphas.rs b/kimchi/src/alphas.rs index 2dbef2aaf3..0eaa047114 100644 --- a/kimchi/src/alphas.rs +++ b/kimchi/src/alphas.rs @@ -323,11 +323,8 @@ mod tests { fn get_alphas_for_spec() { let gates = vec![CircuitGate::::zero(Wire::for_row(0)); 2]; let index = new_index_for_test::(gates, 0); - let (_linearization, powers_of_alpha) = expr_linearization::( - Some(&index.cs.feature_flags), - true, - index.cs.zk_rows as usize, - ); + let (_linearization, powers_of_alpha) = + expr_linearization::(Some(&index.cs.feature_flags), true); // make sure this is present in the specification let manifest_dir = std::env::var("CARGO_MANIFEST_DIR").unwrap(); let spec_path = Path::new(&manifest_dir) diff --git a/kimchi/src/circuits/constraints.rs b/kimchi/src/circuits/constraints.rs index 473b84ce6d..6cef21f258 100644 --- a/kimchi/src/circuits/constraints.rs +++ b/kimchi/src/circuits/constraints.rs @@ -15,7 +15,7 @@ use crate::{ wires::*, }, curve::KimchiCurve, - error::SetupError, + error::{DomainCreationError, SetupError}, prover_index::ProverIndex, }; use ark_ff::{PrimeField, SquareRootField, Zero}; @@ -36,6 +36,11 @@ use std::sync::Arc; // /// Flags for optional features in the constraint system +#[cfg_attr( + feature = "ocaml_types", + derive(ocaml::IntoValue, ocaml::FromValue, ocaml_gen::Struct) +)] +#[cfg_attr(feature = "wasm_types", wasm_bindgen::prelude::wasm_bindgen)] #[derive(Copy, Clone, Serialize, Deserialize, Debug)] pub struct FeatureFlags { /// RangeCheck0 gate @@ -618,6 +623,47 @@ pub fn zk_rows_strict_lower_bound(num_chunks: usize) -> usize { (2 * (PERMUTS + 1) * num_chunks - 2) / PERMUTS } +impl FeatureFlags { + pub fn from_gates_and_lookup_features( + gates: &[CircuitGate], + lookup_features: LookupFeatures, + ) -> FeatureFlags { + let mut feature_flags = FeatureFlags { + range_check0: false, + range_check1: false, + lookup_features, + foreign_field_add: false, + foreign_field_mul: false, + xor: false, + rot: false, + }; + + for gate in gates { + match gate.typ { + GateType::RangeCheck0 => feature_flags.range_check0 = true, + GateType::RangeCheck1 => feature_flags.range_check1 = true, + GateType::ForeignFieldAdd => feature_flags.foreign_field_add = true, + GateType::ForeignFieldMul => feature_flags.foreign_field_mul = true, + GateType::Xor16 => feature_flags.xor = true, + GateType::Rot64 => feature_flags.rot = true, + _ => (), + } + } + + feature_flags + } + + pub fn from_gates( + gates: &[CircuitGate], + uses_runtime_tables: bool, + ) -> FeatureFlags { + FeatureFlags::from_gates_and_lookup_features( + gates, + LookupFeatures::from_gates(gates, uses_runtime_tables), + ) + } +} + impl Builder { /// Set up the number of public inputs. /// If not invoked, it equals `0` by default. @@ -637,7 +683,9 @@ impl Builder { /// If not invoked, it is `vec![]` by default. /// /// **Warning:** you have to make sure that the IDs of the lookup tables, - /// are unique and not colliding with IDs of built-in lookup tables + /// are unique and not colliding with IDs of built-in lookup tables, otherwise + /// the error will be raised. + /// /// (see [crate::circuits::lookup::tables]). pub fn lookup(mut self, lookup_tables: Vec>) -> Self { self.lookup_tables = lookup_tables; @@ -647,8 +695,9 @@ impl Builder { /// Set up the runtime tables. /// If not invoked, it is `None` by default. /// - /// **Warning:** you have to make sure that the IDs of the runtime lookup tables, - /// are unique and not colliding with IDs of built-in lookup tables + /// **Warning:** you have to make sure that the IDs of the runtime + /// lookup tables, are unique, i.e. not colliding internaly (with other runtime tables), + /// otherwise error will be raised. /// (see [crate::circuits::lookup::tables]). pub fn runtime(mut self, runtime_tables: Option>>) -> Self { self.runtime_tables = runtime_tables; @@ -686,30 +735,35 @@ impl Builder { // for some reason we need more than 1 gate for the circuit to work, see TODO below assert!(gates.len() > 1); - let lookup_features = LookupFeatures::from_gates(&gates, runtime_tables.is_some()); + let feature_flags = FeatureFlags::from_gates(&gates, runtime_tables.is_some()); let lookup_domain_size = { // First we sum over the lookup table size + let mut has_table_with_id_0 = false; let mut lookup_domain_size: usize = lookup_tables .iter() - .map( - |LookupTable { data, id: _ }| { - if data.is_empty() { - 0 - } else { - data[0].len() - } - }, - ) + .map(|LookupTable { id, data }| { + // See below for the reason + if *id == 0_i32 { + has_table_with_id_0 = true + } + if data.is_empty() { + 0 + } else { + data[0].len() + } + }) .sum(); // After that on the runtime tables if let Some(runtime_tables) = runtime_tables.as_ref() { + // FIXME: Check that a runtime table with ID 0 is enforced to + // contain a zero entry row. for runtime_table in runtime_tables.iter() { lookup_domain_size += runtime_table.len(); } } // And we add the built-in tables, depending on the features. - let LookupFeatures { patterns, .. } = &lookup_features; + let LookupFeatures { patterns, .. } = &feature_flags.lookup_features; let mut gate_lookup_tables = GateLookupTables { xor: false, range_check: false, @@ -722,7 +776,14 @@ impl Builder { for gate_table in gate_lookup_tables.into_iter() { lookup_domain_size += gate_table.table_size(); } - lookup_domain_size + + // A dummy zero entry will be added if there is no table with ID + // zero. Therefore we must count this in the size. + if has_table_with_id_0 { + lookup_domain_size + } else { + lookup_domain_size + 1 + } }; //~ 1. Compute the number of zero-knowledge rows (`zk_rows`) that will be required to @@ -749,6 +810,9 @@ impl Builder { //~ ``` //~ let (zk_rows, domain_size_lower_bound) = { + // We add 1 to the lookup domain size because there is one element + // used to close the permutation argument (the polynomial Z is of + // degree n + 1 where n is the order of the subgroup H). let circuit_lower_bound = std::cmp::max(gates.len(), lookup_domain_size + 1); let get_domain_size_lower_bound = |zk_rows: u64| circuit_lower_bound + zk_rows as usize; @@ -763,9 +827,13 @@ impl Builder { while { let domain_size = D::::compute_size_of_domain(domain_size_lower_bound) .ok_or(SetupError::DomainCreation( - "could not compute size of domain", + DomainCreationError::DomainSizeFailed(domain_size_lower_bound), ))?; - let num_chunks = domain_size / max_poly_size; + let num_chunks = if domain_size < max_poly_size { + 1 + } else { + domain_size / max_poly_size + }; zk_rows = (zk_rows_strict_lower_bound(num_chunks) + 1) as u64; domain_size_lower_bound = get_domain_size_lower_bound(zk_rows); domain_size < domain_size_lower_bound @@ -777,7 +845,8 @@ impl Builder { //~ 1. Create a domain for the circuit. That is, //~ compute the smallest subgroup of the field that //~ has order greater or equal to `n + zk_rows` elements. - let domain = EvaluationDomains::::create(domain_size_lower_bound)?; + let domain = EvaluationDomains::::create(domain_size_lower_bound) + .map_err(SetupError::DomainCreation)?; assert!(domain.d1.size > zk_rows); @@ -793,28 +862,6 @@ impl Builder { .collect(); gates.append(&mut padding); - let mut feature_flags = FeatureFlags { - range_check0: false, - range_check1: false, - lookup_features, - foreign_field_add: false, - foreign_field_mul: false, - xor: false, - rot: false, - }; - - for gate in &gates { - match gate.typ { - GateType::RangeCheck0 => feature_flags.range_check0 = true, - GateType::RangeCheck1 => feature_flags.range_check1 = true, - GateType::ForeignFieldAdd => feature_flags.foreign_field_add = true, - GateType::ForeignFieldMul => feature_flags.foreign_field_mul = true, - GateType::Xor16 => feature_flags.xor = true, - GateType::Rot64 => feature_flags.rot = true, - _ => (), - } - } - //~ 1. sample the `PERMUTS` shifts. let shifts = Shifts::new(&domain.d1); @@ -828,7 +875,7 @@ impl Builder { &domain, zk_rows as usize, ) - .map_err(|e| SetupError::ConstraintSystem(e.to_string()))?; + .map_err(SetupError::LookupCreation)?; let sid = shifts.map[0].clone(); diff --git a/kimchi/src/circuits/domains.rs b/kimchi/src/circuits/domains.rs index 7f32dd6e12..89251bea5f 100644 --- a/kimchi/src/circuits/domains.rs +++ b/kimchi/src/circuits/domains.rs @@ -3,7 +3,7 @@ use ark_poly::{EvaluationDomain, Radix2EvaluationDomain as Domain}; use serde::{Deserialize, Serialize}; use serde_with::serde_as; -use crate::error::SetupError; +use crate::error::DomainCreationError; #[serde_as] #[derive(Debug, Clone, Copy, Serialize, Deserialize)] @@ -22,26 +22,29 @@ impl EvaluationDomains { /// Creates 4 evaluation domains `d1` (of size `n`), `d2` (of size `2n`), `d4` (of size `4n`), /// and `d8` (of size `8n`). If generator of `d8` is `g`, the generator /// of `d4` is `g^2`, the generator of `d2` is `g^4`, and the generator of `d1` is `g^8`. - pub fn create(n: usize) -> Result { - let n = Domain::::compute_size_of_domain(n).ok_or(SetupError::DomainCreation( - "could not compute size of domain", - ))?; + pub fn create(n: usize) -> Result { + let n = Domain::::compute_size_of_domain(n) + .ok_or(DomainCreationError::DomainSizeFailed(n))?; - let d1 = Domain::::new(n).ok_or(SetupError::DomainCreation( - "construction of domain d1 did not work as intended", + let d1 = Domain::::new(n).ok_or(DomainCreationError::DomainConstructionFailed( + "d1".to_string(), + n, ))?; // we also create domains of larger sizes // to efficiently operate on polynomials in evaluation form. // (in evaluation form, the domain needs to grow as the degree of a polynomial grows) - let d2 = Domain::::new(2 * n).ok_or(SetupError::DomainCreation( - "construction of domain d2 did not work as intended", + let d2 = Domain::::new(2 * n).ok_or(DomainCreationError::DomainConstructionFailed( + "d2".to_string(), + 2 * n, ))?; - let d4 = Domain::::new(4 * n).ok_or(SetupError::DomainCreation( - "construction of domain d4 did not work as intended", + let d4 = Domain::::new(4 * n).ok_or(DomainCreationError::DomainConstructionFailed( + "d4".to_string(), + 4 * n, ))?; - let d8 = Domain::::new(8 * n).ok_or(SetupError::DomainCreation( - "construction of domain d8 did not work as intended", + let d8 = Domain::::new(8 * n).ok_or(DomainCreationError::DomainConstructionFailed( + "d8".to_string(), + 8 * n, ))?; // ensure the relationship between the three domains in case the library's behavior changes diff --git a/kimchi/src/circuits/expr.rs b/kimchi/src/circuits/expr.rs index a3bd390fcf..f331d96b1b 100644 --- a/kimchi/src/circuits/expr.rs +++ b/kimchi/src/circuits/expr.rs @@ -21,11 +21,11 @@ use o1_utils::{foreign_field::ForeignFieldHelpers, FieldHelpers}; use rayon::prelude::*; use serde::{Deserialize, Serialize}; use std::ops::{Add, AddAssign, Mul, Neg, Sub}; +use std::{cmp::Ordering, fmt, iter::FromIterator}; use std::{ collections::{HashMap, HashSet}, ops::MulAssign, }; -use std::{fmt, iter::FromIterator}; use thiserror::Error; use CurrOrNext::{Curr, Next}; @@ -459,6 +459,12 @@ impl FeatureFlag { } } +#[derive(Copy, Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] +pub struct RowOffset { + pub zk_rows: bool, + pub offset: i32, +} + /// An multi-variate polynomial over the base ring `C` with /// variables /// @@ -479,7 +485,7 @@ pub enum Expr { VanishesOnZeroKnowledgeAndPreviousRows, /// UnnormalizedLagrangeBasis(i) is /// (x^n - 1) / (x - omega^i) - UnnormalizedLagrangeBasis(i32), + UnnormalizedLagrangeBasis(RowOffset), Pow(Box>, u64), Cache(CacheId, Box>), /// If the feature flag is enabled, return the first expression; otherwise, return the second. @@ -649,7 +655,7 @@ pub enum PolishToken { Mul, Sub, VanishesOnZeroKnowledgeAndPreviousRows, - UnnormalizedLagrangeBasis(i32), + UnnormalizedLagrangeBasis(RowOffset), Store, Load(usize), /// Skip the given number of tokens if the feature is enabled. @@ -764,7 +770,12 @@ impl PolishToken { stack.push(eval_vanishes_on_last_n_rows(d, c.zk_rows + 1, pt)) } UnnormalizedLagrangeBasis(i) => { - stack.push(unnormalized_lagrange_basis(&d, *i, &pt)) + let offset = if i.zk_rows { + -(c.zk_rows as i32) + i.offset + } else { + i.offset + }; + stack.push(unnormalized_lagrange_basis(&d, offset, &pt)) } Literal(x) => stack.push(*x), Dup => stack.push(stack[stack.len() - 1]), @@ -1587,7 +1598,14 @@ impl Expr> { VanishesOnZeroKnowledgeAndPreviousRows => { Ok(eval_vanishes_on_last_n_rows(d, c.zk_rows + 1, pt)) } - UnnormalizedLagrangeBasis(i) => Ok(unnormalized_lagrange_basis(&d, *i, &pt)), + UnnormalizedLagrangeBasis(i) => { + let offset = if i.zk_rows { + -(c.zk_rows as i32) + i.offset + } else { + i.offset + }; + Ok(unnormalized_lagrange_basis(&d, offset, &pt)) + } Cell(v) => v.evaluate(evals), Cache(_, e) => e.evaluate_(d, pt, evals, c), IfFeature(feature, e1, e2) => { @@ -1649,7 +1667,14 @@ impl Expr { VanishesOnZeroKnowledgeAndPreviousRows => { Ok(eval_vanishes_on_last_n_rows(d, zk_rows + 1, pt)) } - UnnormalizedLagrangeBasis(i) => Ok(unnormalized_lagrange_basis(&d, *i, &pt)), + UnnormalizedLagrangeBasis(i) => { + let offset = if i.zk_rows { + -(zk_rows as i32) + i.offset + } else { + i.offset + }; + Ok(unnormalized_lagrange_basis(&d, offset, &pt)) + } Cell(v) => v.evaluate(evals), Cache(_, e) => e.evaluate(d, pt, zk_rows, evals), IfFeature(feature, e1, e2) => { @@ -1787,10 +1812,17 @@ impl Expr { evals: env.vanishes_on_zero_knowledge_and_previous_rows, }, Expr::Constant(x) => EvalResult::Constant(*x), - Expr::UnnormalizedLagrangeBasis(i) => EvalResult::Evals { - domain: d, - evals: unnormalized_lagrange_evals(env.l0_1, *i, d, env), - }, + Expr::UnnormalizedLagrangeBasis(i) => { + let offset = if i.zk_rows { + -(env.constants.zk_rows as i32) + i.offset + } else { + i.offset + }; + EvalResult::Evals { + domain: d, + evals: unnormalized_lagrange_evals(env.l0_1, offset, d, env), + } + } Expr::Cell(Variable { col, row }) => { let evals: &'a Evaluations> = { match env.get_column(col) { @@ -2481,7 +2513,9 @@ where Double(x) => format!("double({})", x.ocaml(cache)), Constant(x) => x.ocaml(), Cell(v) => format!("cell({})", v.ocaml()), - UnnormalizedLagrangeBasis(i) => format!("unnormalized_lagrange_basis({})", *i), + UnnormalizedLagrangeBasis(i) => { + format!("unnormalized_lagrange_basis({}, {})", i.zk_rows, i.offset) + } VanishesOnZeroKnowledgeAndPreviousRows => { "vanishes_on_zero_knowledge_and_previous_rows".to_string() } @@ -2532,7 +2566,18 @@ where Double(x) => format!("2 ({})", x.latex(cache)), Constant(x) => x.latex(), Cell(v) => v.latex(), - UnnormalizedLagrangeBasis(i) => format!("unnormalized\\_lagrange\\_basis({})", *i), + UnnormalizedLagrangeBasis(RowOffset { + zk_rows: true, + offset: i, + }) => { + format!("unnormalized\\_lagrange\\_basis(zk\\_rows + {})", *i) + } + UnnormalizedLagrangeBasis(RowOffset { + zk_rows: false, + offset: i, + }) => { + format!("unnormalized\\_lagrange\\_basis({})", *i) + } VanishesOnZeroKnowledgeAndPreviousRows => { "vanishes\\_on\\_zero\\_knowledge\\_and\\_previous\\_row".to_string() } @@ -2557,7 +2602,20 @@ where Double(x) => format!("double({})", x.text(cache)), Constant(x) => x.text(), Cell(v) => v.text(), - UnnormalizedLagrangeBasis(i) => format!("unnormalized_lagrange_basis({})", *i), + UnnormalizedLagrangeBasis(RowOffset { + zk_rows: true, + offset: i, + }) => match i.cmp(&0) { + Ordering::Greater => format!("unnormalized_lagrange_basis(zk_rows + {})", *i), + Ordering::Equal => "unnormalized_lagrange_basis(zk_rows)".to_string(), + Ordering::Less => format!("unnormalized_lagrange_basis(zk_rows - {})", (-*i)), + }, + UnnormalizedLagrangeBasis(RowOffset { + zk_rows: false, + offset: i, + }) => { + format!("unnormalized_lagrange_basis({})", *i) + } VanishesOnZeroKnowledgeAndPreviousRows => { "vanishes_on_zero_knowledge_and_previous_rows".to_string() } diff --git a/kimchi/src/circuits/gate.rs b/kimchi/src/circuits/gate.rs index 212bddbbbf..61be85900d 100644 --- a/kimchi/src/circuits/gate.rs +++ b/kimchi/src/circuits/gate.rs @@ -211,6 +211,7 @@ impl CircuitGate { EndoMul => self.verify_endomul::(row, witness, &index.cs), EndoMulScalar => self.verify_endomul_scalar::(row, witness, &index.cs), // TODO: implement the verification for the lookup gate + // See https://github.com/MinaProtocol/mina/issues/14011 Lookup => Ok(()), CairoClaim | CairoInstruction | CairoFlags | CairoTransition => { self.verify_cairo_gate::(row, witness, &index.cs) @@ -299,6 +300,7 @@ impl CircuitGate { } GateType::Lookup => { // TODO: implement the verification for the lookup gate + // See https://github.com/MinaProtocol/mina/issues/14011 vec![] } GateType::CairoClaim => turshi::Claim::constraint_checks(&env, &mut cache), diff --git a/kimchi/src/circuits/lookup/constraints.rs b/kimchi/src/circuits/lookup/constraints.rs index ec4f47f7ca..3b5c38b4d4 100644 --- a/kimchi/src/circuits/lookup/constraints.rs +++ b/kimchi/src/circuits/lookup/constraints.rs @@ -1,6 +1,6 @@ use crate::{ circuits::{ - expr::{prologue::*, Column, ConstantExpr}, + expr::{prologue::*, Column, ConstantExpr, RowOffset}, gate::{CircuitGate, CurrOrNext}, lookup::lookups::{ JointLookup, JointLookupSpec, JointLookupValue, LocalPosition, LookupInfo, @@ -255,6 +255,8 @@ where .iter() .enumerate() .map(|(i, s)| { + // Snake pattern: even chunks of s are direct + // while the odd ones are reversed let (i1, i2) = if i % 2 == 0 { (row, row + 1) } else { @@ -371,7 +373,6 @@ impl LookupConfiguration { pub fn constraints( configuration: &LookupConfiguration, generate_feature_flags: bool, - zk_rows: usize, ) -> Vec> { // Something important to keep in mind is that the last 2 rows of // all columns will have random values in them to maintain zero-knowledge. @@ -601,14 +602,20 @@ pub fn constraints( let aggreg_equation = E::cell(Column::LookupAggreg, Next) * denominator - E::cell(Column::LookupAggreg, Curr) * numerator; - let final_lookup_row: i32 = -(zk_rows as i32) - 1; + let final_lookup_row = RowOffset { + zk_rows: true, + offset: -1, + }; let mut res = vec![ // the accumulator except for the last zk_rows+1 rows // (contains the zk-rows and the last value of the accumulator) E::VanishesOnZeroKnowledgeAndPreviousRows * aggreg_equation, // the initial value of the accumulator - E::UnnormalizedLagrangeBasis(0) * (E::cell(Column::LookupAggreg, Curr) - E::one()), + E::UnnormalizedLagrangeBasis(RowOffset { + zk_rows: false, + offset: 0, + }) * (E::cell(Column::LookupAggreg, Curr) - E::one()), // Check that the final value of the accumulator is 1 E::UnnormalizedLagrangeBasis(final_lookup_row) * (E::cell(Column::LookupAggreg, Curr) - E::one()), @@ -622,7 +629,10 @@ pub fn constraints( final_lookup_row } else { // Check compatibility of the first elements - 0 + RowOffset { + zk_rows: false, + offset: 0, + } }; let mut expr = E::UnnormalizedLagrangeBasis(first_or_last) * (column(Column::LookupSorted(i)) - column(Column::LookupSorted(i + 1))); diff --git a/kimchi/src/circuits/lookup/index.rs b/kimchi/src/circuits/lookup/index.rs index f0be99289d..e4e70a3a7b 100644 --- a/kimchi/src/circuits/lookup/index.rs +++ b/kimchi/src/circuits/lookup/index.rs @@ -21,7 +21,7 @@ use std::iter; use thiserror::Error; /// Represents an error found when computing the lookup constraint system -#[derive(Debug, Error)] +#[derive(Debug, Error, Clone)] pub enum LookupError { #[error("One of the lookup tables has columns of different lengths")] InconsistentTableLength, @@ -32,6 +32,8 @@ pub enum LookupError { }, #[error("The table with id 0 must have an entry of all zeros")] TableIDZeroMustHaveZeroEntry, + #[error("Cannot create a combined table since ids for sub-tables are colliding. The collision type is: {collision_type}")] + LookupTableIdCollision { collision_type: String }, } /// Lookup selectors @@ -200,7 +202,7 @@ impl LookupConstraintSystem { /// Will give error if inputs validation do not match. pub fn create( gates: &[CircuitGate], - lookup_tables: Vec>, + fixed_lookup_tables: Vec>, runtime_tables: Option>>, domain: &EvaluationDomains, zk_rows: usize, @@ -217,14 +219,42 @@ impl LookupConstraintSystem { // product is 1, we cannot use those rows to store any values. let max_num_entries = d1_size - zk_rows - 1; - //~ 2. Get the lookup selectors and lookup tables (TODO: how?) + //~ 2. Get the lookup selectors and lookup tables that are specified implicitly + // by the lookup gates. let (lookup_selectors, gate_lookup_tables) = lookup_info.selector_polynomials_and_tables(domain, gates); - //~ 3. Concatenate runtime lookup tables with the ones used by gates - let mut lookup_tables: Vec<_> = gate_lookup_tables + // Checks whether an iterator contains any duplicates, and if yes, raises + // a corresponding LookupTableIdCollision error. + fn check_id_duplicates<'a, I: Iterator>( + iter: I, + msg: &str, + ) -> Result<(), LookupError> { + use itertools::Itertools; + match iter.duplicates().collect::>() { + dups if !dups.is_empty() => Err(LookupError::LookupTableIdCollision { + collision_type: format!("{}: {:?}", msg, dups).to_string(), + }), + _ => Ok(()), + } + } + + // If there is a gate using a lookup table, this table must not be added + // explicitly to the constraint system. + let fixed_gate_joint_ids: Vec = fixed_lookup_tables + .iter() + .map(|lt| lt.id) + .chain(gate_lookup_tables.iter().map(|lt| lt.id)) + .collect(); + check_id_duplicates( + fixed_gate_joint_ids.iter(), + "duplicates between fixed given and fixed from-gate tables", + )?; + + //~ 3. Concatenate explicit runtime lookup tables with the ones (implicitly) used by gates. + let mut lookup_tables: Vec<_> = fixed_lookup_tables .into_iter() - .chain(lookup_tables) + .chain(gate_lookup_tables) .collect(); let mut has_table_id_0 = false; @@ -232,6 +262,13 @@ impl LookupConstraintSystem { // if we are using runtime tables let (runtime_table_offset, runtime_selector) = if let Some(runtime_tables) = &runtime_tables { + // Check duplicates in runtime table ids + let runtime_tables_ids: Vec = + runtime_tables.iter().map(|rt| rt.id).collect(); + check_id_duplicates(runtime_tables_ids.iter(), "runtime table duplicates")?; + // Runtime table IDs /may/ collide with lookup + // table IDs, so we intentionally do not perform another potential check. + // save the offset of the end of the table let mut runtime_table_offset = 0; for table in &lookup_tables { @@ -399,6 +436,15 @@ impl LookupConstraintSystem { } //~ 6. Pad the end of the concatened table with the dummy value. + // By padding with 0, we constraint the table with ID 0 to + // have a zero entry. + // This is for the rows which do not have a lookup selector, + // see ../../../../book/src/kimchi/lookup.md. + // The zero entry row is contained in the built-in XOR table. + // An error is raised when creating the CS if a user-defined + // table is defined with ID 0 without a row contain zeroes. + // If no such table is used, we artificially add a dummy + // table with ID 0 and a row containing only zeroes. lookup_table .iter_mut() .for_each(|col| col.extend(repeat_n(F::zero(), max_num_entries - col.len()))); @@ -448,3 +494,105 @@ impl LookupConstraintSystem { } } } + +#[cfg(test)] +mod tests { + + use super::{LookupError, LookupTable, RuntimeTableCfg}; + use crate::{ + circuits::constraints::ConstraintSystem, circuits::gate::CircuitGate, + circuits::lookup::tables::xor, circuits::polynomials::range_check, error::SetupError, + }; + use mina_curves::pasta::Fp; + + #[test] + fn test_colliding_table_ids() { + let (_, gates) = CircuitGate::::create_multi_range_check(0); + let collision_id: i32 = 5; + + let cs = ConstraintSystem::::create(gates.clone()) + .lookup(vec![range_check::gadget::lookup_table()]) + .build(); + + assert!( + matches!( + cs, + Err(SetupError::LookupCreation( + LookupError::LookupTableIdCollision { .. } + )) + ), + "LookupConstraintSystem::create(...) must fail due to range table passed twice" + ); + + let cs = ConstraintSystem::::create(gates.clone()) + .lookup(vec![xor::xor_table()]) + .build(); + + assert!( + cs.is_ok(), + "LookupConstraintSystem::create(...) must succeed, no duplicates exist" + ); + + let cs = ConstraintSystem::::create(gates.clone()) + .lookup(vec![ + LookupTable { + id: collision_id, + data: vec![vec![From::from(0); 16]], + }, + LookupTable { + id: collision_id, + data: vec![vec![From::from(1); 16]], + }, + ]) + .build(); + + assert!( + matches!( + cs, + Err(SetupError::LookupCreation( + LookupError::LookupTableIdCollision { .. } + )) + ), + "LookupConstraintSystem::create(...) must fail, collision in fixed ids" + ); + + let cs = ConstraintSystem::::create(gates.clone()) + .runtime(Some(vec![ + RuntimeTableCfg { + id: collision_id, + first_column: vec![From::from(0); 16], + }, + RuntimeTableCfg { + id: collision_id, + first_column: vec![From::from(1); 16], + }, + ])) + .build(); + + assert!( + matches!( + cs, + Err(SetupError::LookupCreation( + LookupError::LookupTableIdCollision { .. } + )) + ), + "LookupConstraintSystem::create(...) must fail, collision in runtime ids" + ); + + let cs = ConstraintSystem::::create(gates.clone()) + .lookup(vec![LookupTable { + id: collision_id, + data: vec![vec![From::from(0); 16]], + }]) + .runtime(Some(vec![RuntimeTableCfg { + id: collision_id, + first_column: vec![From::from(1); 16], + }])) + .build(); + + assert!( + cs.is_ok(), + "LookupConstraintSystem::create(...) must not fail when there is a collision between runtime and lookup ids" + ); + } +} diff --git a/kimchi/src/circuits/lookup/lookups.rs b/kimchi/src/circuits/lookup/lookups.rs index b126e1be00..002b040828 100644 --- a/kimchi/src/circuits/lookup/lookups.rs +++ b/kimchi/src/circuits/lookup/lookups.rs @@ -323,7 +323,6 @@ pub type JointLookupSpec = JointLookup, LookupTableID>; pub type JointLookupValue = JointLookup; impl + From> JointLookupValue { - // TODO: Support multiple tables /// Evaluate the combined value of a joint-lookup. pub fn evaluate(&self, joint_combiner: &F, table_id_combiner: &F) -> F { combine_table_entry( diff --git a/kimchi/src/circuits/lookup/tables/mod.rs b/kimchi/src/circuits/lookup/tables/mod.rs index 0c049f3e59..cd183cb714 100644 --- a/kimchi/src/circuits/lookup/tables/mod.rs +++ b/kimchi/src/circuits/lookup/tables/mod.rs @@ -78,19 +78,15 @@ impl LookupTable where F: FftField, { - /// Return true if the table has an entry containing all zeros. + /// Return true if the table has an entry (row) containing all zeros. pub fn has_zero_entry(&self) -> bool { // reminder: a table is written as a list of columns, // not as a list of row entries. for row in 0..self.len() { - for col in &self.data { - if !col[row].is_zero() { - continue; - } + if self.data.iter().all(|col| col[row].is_zero()) { return true; } } - false } diff --git a/kimchi/src/error.rs b/kimchi/src/error.rs index 5e4a8817b7..8f2e8406a1 100644 --- a/kimchi/src/error.rs +++ b/kimchi/src/error.rs @@ -1,5 +1,6 @@ //! This module implements the [`ProverError`] type. +use crate::circuits::lookup::index::LookupError; // not sure about hierarchy use poly_commitment::error::CommitmentError; use thiserror::Error; @@ -82,6 +83,16 @@ pub enum VerifyError { MissingCommitment(crate::circuits::expr::Column), } +/// Errors that can arise when preparing the setup +#[derive(Error, Debug, Clone)] +pub enum DomainCreationError { + #[error("could not compute the size of domain for {0}")] + DomainSizeFailed(usize), + + #[error("construction of domain {0} for size {1} failed")] + DomainConstructionFailed(String, usize), +} + /// Errors that can arise when preparing the setup #[derive(Error, Debug, Clone)] pub enum SetupError { @@ -89,7 +100,10 @@ pub enum SetupError { ConstraintSystem(String), #[error("the domain could not be constructed: {0}")] - DomainCreation(&'static str), + DomainCreation(DomainCreationError), + + #[error("the lookup constraint system cannot not be constructed: {0}")] + LookupCreation(LookupError), } /// Errors that can arise when creating a verifier index diff --git a/kimchi/src/linearization.rs b/kimchi/src/linearization.rs index 4f85cb920c..566ef58216 100644 --- a/kimchi/src/linearization.rs +++ b/kimchi/src/linearization.rs @@ -38,7 +38,6 @@ use ark_ff::{FftField, PrimeField, SquareRootField, Zero}; pub fn constraints_expr( feature_flags: Option<&FeatureFlags>, generic: bool, - zk_rows: usize, ) -> (Expr>, Alphas) { // register powers of alpha so that we don't reuse them across mutually inclusive constraints let mut powers_of_alpha = Alphas::::default(); @@ -166,8 +165,7 @@ pub fn constraints_expr( if feature_flags.lookup_features.patterns != LookupPatterns::default() { let lookup_configuration = LookupConfiguration::new(LookupInfo::create(feature_flags.lookup_features)); - let constraints = - lookup::constraints::constraints(&lookup_configuration, false, zk_rows); + let constraints = lookup::constraints::constraints(&lookup_configuration, false); // note: the number of constraints depends on the lookup configuration, // specifically the presence of runtime tables. @@ -193,7 +191,7 @@ pub fn constraints_expr( joint_lookup_used: true, }; let lookup_configuration = LookupConfiguration::new(LookupInfo::create(all_features)); - let constraints = lookup::constraints::constraints(&lookup_configuration, true, zk_rows); + let constraints = lookup::constraints::constraints(&lookup_configuration, true); // note: the number of constraints depends on the lookup configuration, // specifically the presence of runtime tables. @@ -224,7 +222,7 @@ pub fn constraints_expr( // flags. if cfg!(feature = "check_feature_flags") { if let Some(feature_flags) = feature_flags { - let (feature_flagged_expr, _) = constraints_expr(None, generic, zk_rows); + let (feature_flagged_expr, _) = constraints_expr(None, generic); let feature_flagged_expr = feature_flagged_expr.apply_feature_flags(feature_flags); assert_eq!(expr, feature_flagged_expr); } @@ -341,11 +339,10 @@ pub fn linearization_columns( pub fn expr_linearization( feature_flags: Option<&FeatureFlags>, generic: bool, - zk_rows: usize, ) -> (Linearization>>, Alphas) { let evaluated_cols = linearization_columns::(feature_flags); - let (expr, powers_of_alpha) = constraints_expr(feature_flags, generic, zk_rows); + let (expr, powers_of_alpha) = constraints_expr(feature_flags, generic); let linearization = expr .linearize(evaluated_cols) diff --git a/kimchi/src/prover.rs b/kimchi/src/prover.rs index 14fe30d365..55a99561c3 100644 --- a/kimchi/src/prover.rs +++ b/kimchi/src/prover.rs @@ -260,7 +260,7 @@ where .interpolate(); //~ 1. Commit (non-hiding) to the negated public input polynomial. - let public_comm = index.srs.commit_non_hiding(&public_poly, num_chunks, None); + let public_comm = index.srs.commit_non_hiding(&public_poly, num_chunks); let public_comm = { index .srs @@ -393,7 +393,7 @@ where let runtime_table_comm = index .srs - .commit(&runtime_table_contribution, num_chunks, None, rng); + .commit(&runtime_table_contribution, num_chunks, rng); // absorb the commitment absorb_commitment(&mut fq_sponge, &runtime_table_comm.commitment); @@ -607,7 +607,7 @@ where let z_poly = index.perm_aggreg(&witness, &beta, &gamma, rng)?; //~ 1. Commit (hidding) to the permutation aggregation polynomial $z$. - let z_comm = index.srs.commit(&z_poly, num_chunks, None, rng); + let z_comm = index.srs.commit(&z_poly, num_chunks, rng); //~ 1. Absorb the permutation aggregation polynomial $z$ with the Fq-Sponge. absorb_commitment(&mut fq_sponge, &z_comm.commitment); @@ -818,11 +818,7 @@ where // lookup { if let Some(lcs) = index.cs.lookup_constraint_system.as_ref() { - let constraints = lookup::constraints::constraints( - &lcs.configuration, - false, - index.cs.zk_rows as usize, - ); + let constraints = lookup::constraints::constraints(&lcs.configuration, false); let constraints_len = u32::try_from(constraints.len()) .expect("not expecting a large amount of constraints"); let lookup_alphas = @@ -870,7 +866,7 @@ where }; //~ 1. commit (hiding) to the quotient polynomial $t$ - let t_comm = { index.srs.commit("ient_poly, 7 * num_chunks, None, rng) }; + let t_comm = { index.srs.commit("ient_poly, 7 * num_chunks, rng) }; //~ 1. Absorb the the commitment of the quotient polynomial with the Fq-Sponge. absorb_commitment(&mut fq_sponge, &t_comm.commitment); @@ -940,7 +936,7 @@ where //~~ * $s_i$ //~~ * $w_i$ //~~ * $z$ - //~~ * lookup (TODO) + //~~ * lookup (TODO, see [this issue](https://github.com/MinaProtocol/mina/issues/13886)) //~~ * generic selector //~~ * poseidon selector //~ @@ -1155,10 +1151,9 @@ where PolyComm { // blinding_f - Z_H(zeta) * blinding_t - unshifted: vec![ + elems: vec![ blinding_f - (zeta_to_domain_size - G::ScalarField::one()) * blinding_t, ], - shifted: None, } }; @@ -1192,7 +1187,7 @@ where .map(|RecursionChallenge { chals, comm }| { ( DensePolynomial::from_coefficients_vec(b_poly_coefficients(chals)), - comm.unshifted.len(), + comm.elems.len(), ) }) .collect::>(); @@ -1227,8 +1222,7 @@ where //~ (and evaluation proofs) in the protocol. //~ First, include the previous challenges, in case we are in a recursive prover. let non_hiding = |d1_size: usize| PolyComm { - unshifted: vec![G::ScalarField::zero(); d1_size], - shifted: None, + elems: vec![G::ScalarField::zero(); d1_size], }; let coefficients_form = DensePolynomialOrEvaluations::DensePolynomial; @@ -1236,12 +1230,11 @@ where let mut polynomials = polys .iter() - .map(|(p, d1_size)| (coefficients_form(p), None, non_hiding(*d1_size))) + .map(|(p, d1_size)| (coefficients_form(p), non_hiding(*d1_size))) .collect::>(); let fixed_hiding = |d1_size: usize| PolyComm { - unshifted: vec![G::ScalarField::one(); d1_size], - shifted: None, + elems: vec![G::ScalarField::one(); d1_size], }; //~ 1. Then, include: @@ -1252,48 +1245,38 @@ where //~~ * the poseidon selector //~~ * the 15 registers/witness columns //~~ * the 6 sigmas - polynomials.push(( - coefficients_form(&public_poly), - None, - fixed_hiding(num_chunks), - )); - polynomials.push((coefficients_form(&ft), None, blinding_ft)); - polynomials.push((coefficients_form(&z_poly), None, z_comm.blinders)); + polynomials.push((coefficients_form(&public_poly), fixed_hiding(num_chunks))); + polynomials.push((coefficients_form(&ft), blinding_ft)); + polynomials.push((coefficients_form(&z_poly), z_comm.blinders)); polynomials.push(( evaluations_form(&index.column_evaluations.generic_selector4), - None, fixed_hiding(num_chunks), )); polynomials.push(( evaluations_form(&index.column_evaluations.poseidon_selector8), - None, fixed_hiding(num_chunks), )); polynomials.push(( evaluations_form(&index.column_evaluations.complete_add_selector4), - None, fixed_hiding(num_chunks), )); polynomials.push(( evaluations_form(&index.column_evaluations.mul_selector8), - None, fixed_hiding(num_chunks), )); polynomials.push(( evaluations_form(&index.column_evaluations.emul_selector8), - None, fixed_hiding(num_chunks), )); polynomials.push(( evaluations_form(&index.column_evaluations.endomul_scalar_selector8), - None, fixed_hiding(num_chunks), )); polynomials.extend( witness_poly .iter() .zip(w_comm.iter()) - .map(|(w, c)| (coefficients_form(w), None, c.blinders.clone())) + .map(|(w, c)| (coefficients_form(w), c.blinders.clone())) .collect::>(), ); polynomials.extend( @@ -1301,13 +1284,13 @@ where .column_evaluations .coefficients8 .iter() - .map(|coefficientm| (evaluations_form(coefficientm), None, non_hiding(num_chunks))) + .map(|coefficientm| (evaluations_form(coefficientm), non_hiding(num_chunks))) .collect::>(), ); polynomials.extend( index.column_evaluations.permutation_coefficients8[0..PERMUTS - 1] .iter() - .map(|w| (evaluations_form(w), None, non_hiding(num_chunks))) + .map(|w| (evaluations_form(w), non_hiding(num_chunks))) .collect::>(), ); @@ -1317,7 +1300,6 @@ where { polynomials.push(( evaluations_form(range_check0_selector8), - None, non_hiding(num_chunks), )); } @@ -1326,7 +1308,6 @@ where { polynomials.push(( evaluations_form(range_check1_selector8), - None, non_hiding(num_chunks), )); } @@ -1337,7 +1318,6 @@ where { polynomials.push(( evaluations_form(foreign_field_add_selector8), - None, non_hiding(num_chunks), )); } @@ -1348,23 +1328,14 @@ where { polynomials.push(( evaluations_form(foreign_field_mul_selector8), - None, non_hiding(num_chunks), )); } if let Some(xor_selector8) = index.column_evaluations.xor_selector8.as_ref() { - polynomials.push(( - evaluations_form(xor_selector8), - None, - non_hiding(num_chunks), - )); + polynomials.push((evaluations_form(xor_selector8), non_hiding(num_chunks))); } if let Some(rot_selector8) = index.column_evaluations.rot_selector8.as_ref() { - polynomials.push(( - evaluations_form(rot_selector8), - None, - non_hiding(num_chunks), - )); + polynomials.push((evaluations_form(rot_selector8), non_hiding(num_chunks))); } //~~ * optionally, the runtime table @@ -1375,17 +1346,13 @@ where let sorted_comms = lookup_context.sorted_comms.as_ref().unwrap(); for (poly, comm) in sorted_poly.iter().zip(sorted_comms) { - polynomials.push((coefficients_form(poly), None, comm.blinders.clone())); + polynomials.push((coefficients_form(poly), comm.blinders.clone())); } //~~ * add the lookup aggreg polynomial let aggreg_poly = lookup_context.aggreg_coeffs.as_ref().unwrap(); let aggreg_comm = lookup_context.aggreg_comm.as_ref().unwrap(); - polynomials.push(( - coefficients_form(aggreg_poly), - None, - aggreg_comm.blinders.clone(), - )); + polynomials.push((coefficients_form(aggreg_poly), aggreg_comm.blinders.clone())); //~~ * add the combined table polynomial let table_blinding = { @@ -1414,28 +1381,23 @@ where if lcs.runtime_selector.is_some() { let runtime_comm = lookup_context.runtime_table_comm.as_ref().unwrap(); - let unshifted = runtime_comm + let elems = runtime_comm .blinders - .unshifted + .elems .iter() .map(|blinding| *joint_combiner * blinding + base_blinding) .collect(); - PolyComm { - unshifted, - shifted: None, - } + PolyComm { elems } } else { - PolyComm { - unshifted: vec![base_blinding; num_chunks], - shifted: None, - } + let elems = vec![base_blinding; num_chunks]; + PolyComm { elems } } }; let joint_lookup_table = lookup_context.joint_lookup_table.as_ref().unwrap(); - polynomials.push((coefficients_form(joint_lookup_table), None, table_blinding)); + polynomials.push((coefficients_form(joint_lookup_table), table_blinding)); //~~ * if present, add the runtime table polynomial if lcs.runtime_selector.is_some() { @@ -1444,7 +1406,6 @@ where polynomials.push(( coefficients_form(runtime_table), - None, runtime_table_comm.blinders.clone(), )); } @@ -1454,27 +1415,21 @@ where if let Some(runtime_lookup_table_selector) = lcs.runtime_selector.as_ref() { polynomials.push(( evaluations_form(runtime_lookup_table_selector), - None, non_hiding(1), )) } if let Some(xor_lookup_selector) = lcs.lookup_selectors.xor.as_ref() { - polynomials.push((evaluations_form(xor_lookup_selector), None, non_hiding(1))) + polynomials.push((evaluations_form(xor_lookup_selector), non_hiding(1))) } if let Some(lookup_gate_selector) = lcs.lookup_selectors.lookup.as_ref() { - polynomials.push((evaluations_form(lookup_gate_selector), None, non_hiding(1))) + polynomials.push((evaluations_form(lookup_gate_selector), non_hiding(1))) } if let Some(range_check_lookup_selector) = lcs.lookup_selectors.range_check.as_ref() { - polynomials.push(( - evaluations_form(range_check_lookup_selector), - None, - non_hiding(1), - )) + polynomials.push((evaluations_form(range_check_lookup_selector), non_hiding(1))) } if let Some(foreign_field_mul_lookup_selector) = lcs.lookup_selectors.ffmul.as_ref() { polynomials.push(( evaluations_form(foreign_field_mul_lookup_selector), - None, non_hiding(1), )) } diff --git a/kimchi/src/prover_index.rs b/kimchi/src/prover_index.rs index 44ee0f6266..523d583e18 100644 --- a/kimchi/src/prover_index.rs +++ b/kimchi/src/prover_index.rs @@ -69,8 +69,7 @@ where cs.endo = endo_q; // pre-compute the linearization - let (linearization, powers_of_alpha) = - expr_linearization(Some(&cs.feature_flags), true, cs.zk_rows as usize); + let (linearization, powers_of_alpha) = expr_linearization(Some(&cs.feature_flags), true); let evaluated_column_coefficients = cs.evaluated_column_coefficients(); diff --git a/kimchi/src/tests/foreign_field_add.rs b/kimchi/src/tests/foreign_field_add.rs index 24c1d5a8c1..760c7fa2d5 100644 --- a/kimchi/src/tests/foreign_field_add.rs +++ b/kimchi/src/tests/foreign_field_add.rs @@ -1490,7 +1490,6 @@ fn test_ffadd_finalization() { let index = { let cs = ConstraintSystem::create(gates.clone()) - .lookup(vec![range_check::gadget::lookup_table()]) .public(num_public_inputs) .build() .unwrap(); diff --git a/kimchi/src/tests/lookup.rs b/kimchi/src/tests/lookup.rs index a674eba78f..d92f62e24b 100644 --- a/kimchi/src/tests/lookup.rs +++ b/kimchi/src/tests/lookup.rs @@ -14,6 +14,7 @@ use mina_poseidon::{ constants::PlonkSpongeConstantsKimchi, sponge::{DefaultFqSponge, DefaultFrSponge}, }; +use rand::prelude::*; use rand::Rng; use std::array; @@ -22,10 +23,12 @@ type BaseSponge = DefaultFqSponge; type ScalarSponge = DefaultFrSponge; fn setup_lookup_proof(use_values_from_table: bool, num_lookups: usize, table_sizes: Vec) { - let lookup_table_values: Vec> = table_sizes + let mut lookup_table_values: Vec> = table_sizes .iter() .map(|size| (0..*size).map(|_| rand::random()).collect()) .collect(); + // Zero table must have a zero row + lookup_table_values[0][0] = From::from(0); let lookup_tables = lookup_table_values .iter() .enumerate() @@ -131,7 +134,9 @@ fn setup_successfull_runtime_table_test( runtime_tables: Vec>, lookups: Vec, ) { - let mut rng = rand::thread_rng(); + let seed: [u8; 32] = thread_rng().gen(); + eprintln!("Seed: {:?}", seed); + let mut rng = StdRng::from_seed(seed); let nb_lookups = lookups.len(); // circuit @@ -194,13 +199,15 @@ fn setup_successfull_runtime_table_test( #[test] fn test_runtime_table() { let num = 5; - let mut rng = rand::thread_rng(); + let seed: [u8; 32] = thread_rng().gen(); + eprintln!("Seed: {:?}", seed); + let mut rng = StdRng::from_seed(seed); let first_column = [8u32, 9, 8, 7, 1]; let len = first_column.len(); let mut runtime_tables_setup = vec![]; - for table_id in 0..num { + for table_id in 1..num + 1 { let cfg = RuntimeTableCfg { id: table_id, first_column: first_column.into_iter().map(Into::into).collect(), @@ -236,7 +243,7 @@ fn test_runtime_table() { for row in 0..20 { // the first register is the table id. We pick one random table. - lookup_cols[0][row] = (rng.gen_range(0..num) as u32).into(); + lookup_cols[0][row] = (rng.gen_range(1..num + 1) as u32).into(); // create queries into our runtime lookup table. // We will set [w1, w2], [w3, w4] and [w5, w6] to randon indexes and @@ -448,7 +455,9 @@ fn test_negative_test_runtime_table_prover_uses_undefined_id_in_index_and_witnes #[test] fn test_runtime_table_with_more_than_one_runtime_table_data_given_by_prover() { - let mut rng = rand::thread_rng(); + let seed: [u8; 32] = thread_rng().gen(); + eprintln!("Seed: {:?}", seed); + let mut rng = StdRng::from_seed(seed); let first_column = [0, 1, 2, 3, 4]; let len = first_column.len(); @@ -551,7 +560,9 @@ fn test_runtime_table_only_one_table_with_id_zero_with_non_zero_entries_fixed_va #[test] fn test_runtime_table_only_one_table_with_id_zero_with_non_zero_entries_random_values() { - let mut rng = rand::thread_rng(); + let seed: [u8; 32] = thread_rng().gen(); + eprintln!("Seed: {:?}", seed); + let mut rng = StdRng::from_seed(seed); let len = rng.gen_range(1usize..1000); let first_column: Vec = (0..len as i32).collect(); @@ -573,3 +584,147 @@ fn test_runtime_table_only_one_table_with_id_zero_with_non_zero_entries_random_v setup_successfull_runtime_table_test(vec![cfg], vec![runtime_table], lookups); } + +// This test verifies that if there is a table with ID 0, it contains a row with only zeroes. +// This is to enforce the constraint we have on the so-called "dummy value". +// FIXME: see https://github.com/o1-labs/proof-systems/issues/1460 +// We should test the error message, "expected" argument of the macro won't be +// allowed anymore in future release, see clippy output. +#[test] +#[should_panic] +fn test_lookup_with_a_table_with_id_zero_but_no_zero_entry() { + let max_len: u32 = 100u32; + let seed: [u8; 32] = thread_rng().gen(); + eprintln!("Seed: {:?}", seed); + let mut rng = StdRng::from_seed(seed); + + // Non zero-length table + let len = 1u32 + rng.gen_range(0u32..max_len); + // Table id is 0 + let table_id: i32 = 0; + // Always include index 0 in the table. Maybe even a few. + let indices: Vec = (0..len) + .map(|i| { + if i == 0 { + 0u32 + } else { + rng.gen_range(0u32..max_len) + } + }) + .map(Into::into) + .collect(); + // But no zero values! + // So we'll get rows with zeroes that are not full-zero-rows. + let values: Vec = (0..len) + .map(|_| rng.gen_range(1u32..max_len)) + .map(Into::into) + .collect(); + let lookup_table = LookupTable { + id: table_id, + data: vec![indices, values], + }; + let lookup_tables = vec![lookup_table]; + let num_lookups = 20; + + // circuit gates + let gates = (0..num_lookups) + .map(|i| CircuitGate::new(GateType::Lookup, Wire::for_row(i), vec![])) + .collect(); + + // 0 everywhere, it should handle the case (0, 0, 0). We simulate a lot of + // lookups with (0, 0, 0). + let witness = array::from_fn(|_col| vec![Fp::zero(); num_lookups]); + + let _ = TestFramework::::default() + .gates(gates) + .witness(witness) + .lookup_tables(lookup_tables) + .setup(); +} + +#[test] +fn test_dummy_value_is_added_in_an_arbitraly_created_table_when_no_table_with_id_0() { + let seed: [u8; 32] = thread_rng().gen(); + eprintln!("Seed: {:?}", seed); + let mut rng = StdRng::from_seed(seed); + let max_len: u32 = 100u32; + let max_table_id: i32 = 100; + + // No zero-length table + let len = rng.gen_range(1u32..max_len); + // No table of ID 0 + let table_id: i32 = rng.gen_range(1i32..max_table_id); + // No index 0 in the table. + let indices: Vec = (0..len) + .map(|_| rng.gen_range(1u32..max_len)) + .map(Into::into) + .collect(); + // No zero value + let values: Vec = (0..len) + .map(|_| rng.gen_range(1u32..max_len)) + .map(Into::into) + .collect(); + let lookup_table = LookupTable { + id: table_id, + data: vec![indices, values], + }; + let lookup_tables = vec![lookup_table]; + let num_lookups = 20; + + // circuit gates + let gates = (0..num_lookups) + .map(|i| CircuitGate::new(GateType::Lookup, Wire::for_row(i), vec![])) + .collect(); + + // 0 everywhere, it should handle the case (0, 0, 0). We simulate a lot of + // lookups with (0, 0, 0). + let witness = array::from_fn(|_col| vec![Fp::zero(); num_lookups]); + + TestFramework::::default() + .gates(gates) + .witness(witness) + .lookup_tables(lookup_tables) + .setup() + .prove_and_verify::() + .unwrap(); +} + +#[test] +fn test_dummy_zero_entry_is_counted_while_computing_domain_size() { + let seed: [u8; 32] = thread_rng().gen(); + eprintln!("Seed: {:?}", seed); + let mut rng = StdRng::from_seed(seed); + + let power_of_2: u32 = rng.gen_range(3..16); + // 4 = zk_rows + 1 for the closing constraint on the polynomial. + let len = (1 << power_of_2) - 3 - 1; + // We want to create a table with an ID different than 0. + let table_id: i32 = rng.gen_range(1..1_000); + let idx: Vec = (1..(len + 1) as i32).map(Into::into).collect(); + let values: Vec = (1..(len + 1)) + .map(|_| UniformRand::rand(&mut rng)) + .collect(); + let lt = LookupTable { + id: table_id, + data: vec![idx, values], + }; + + // Dummy, used for the setup. Only the number of gates must be lower than + // the length of the table to avoid having a bigger circuit than the table + // size, and therefore use it as the main component for the domain size + // computation. + let num_lookups = rng.gen_range(2..len); + let gates = (0..num_lookups) + .map(|i| CircuitGate::new(GateType::Lookup, Wire::for_row(i), vec![])) + .collect(); + let witness = array::from_fn(|_col| vec![Fp::zero(); num_lookups]); + + let setup = TestFramework::::default() + .gates(gates) + .witness(witness) + .lookup_tables(vec![lt]) + .setup(); + let domain_size = setup.prover_index().cs.domain.d1.size; + // As the dummy entry has been added, we reached the next power of two + assert!(domain_size == (1 << (power_of_2 + 1))); +} diff --git a/kimchi/src/tests/range_check.rs b/kimchi/src/tests/range_check.rs index 27240db942..8e46962add 100644 --- a/kimchi/src/tests/range_check.rs +++ b/kimchi/src/tests/range_check.rs @@ -64,15 +64,7 @@ fn create_test_prover_index( CircuitGate::::create_multi_range_check(0) }; - new_index_for_test_with_lookups( - gates, - public_size, - 0, - vec![range_check::gadget::lookup_table()], - None, - false, - None, - ) + new_index_for_test_with_lookups(gates, public_size, 0, vec![], None, false, None) } #[test] diff --git a/kimchi/src/tests/recursion.rs b/kimchi/src/tests/recursion.rs index d7f028acb5..719318eb96 100644 --- a/kimchi/src/tests/recursion.rs +++ b/kimchi/src/tests/recursion.rs @@ -43,7 +43,7 @@ fn test_recursion() { let comm = { let coeffs = b_poly_coefficients(&chals); let b = DensePolynomial::from_coefficients_vec(coeffs); - index.srs.commit_non_hiding(&b, 1, None) + index.srs.commit_non_hiding(&b, 1) }; RecursionChallenge::new(chals, comm) }; diff --git a/kimchi/src/tests/rot.rs b/kimchi/src/tests/rot.rs index f88125cf9b..f9a1308b86 100644 --- a/kimchi/src/tests/rot.rs +++ b/kimchi/src/tests/rot.rs @@ -328,7 +328,6 @@ fn test_rot_finalization() { let index = { let cs = ConstraintSystem::create(gates.clone()) .public(num_public_inputs) - .lookup(vec![rot::lookup_table()]) .build() .unwrap(); let mut srs = SRS::::create(cs.domain.d1.size()); diff --git a/kimchi/src/tests/xor.rs b/kimchi/src/tests/xor.rs index 0344e0aea3..7ab28b4008 100644 --- a/kimchi/src/tests/xor.rs +++ b/kimchi/src/tests/xor.rs @@ -392,7 +392,6 @@ fn test_xor_finalization() { let index = { let cs = ConstraintSystem::create(gates.clone()) - .lookup(vec![xor::lookup_table()]) .public(num_inputs) .build() .unwrap(); diff --git a/kimchi/src/verifier.rs b/kimchi/src/verifier.rs index 0c9cd4aef3..62d9e5d43f 100644 --- a/kimchi/src/verifier.rs +++ b/kimchi/src/verifier.rs @@ -205,38 +205,47 @@ where } } - //~ 1. Sample $\beta$ with the Fq-Sponge. + // --- PlonK - Round 2 + //~ 1. Sample the first permutation challenge $\beta$ with the Fq-Sponge. let beta = fq_sponge.challenge(); - //~ 1. Sample $\gamma$ with the Fq-Sponge. + //~ 1. Sample the second permutation challenge $\gamma$ with the Fq-Sponge. let gamma = fq_sponge.challenge(); //~ 1. If using lookup, absorb the commitment to the aggregation lookup polynomial. - self.commitments.lookup.iter().for_each(|l| { - absorb_commitment(&mut fq_sponge, &l.aggreg); - }); + if index.lookup_index.is_some() { + // Should not fail, as the lookup index is present + let lookup_commits = self + .commitments + .lookup + .as_ref() + .ok_or(VerifyError::LookupCommitmentMissing)?; + absorb_commitment(&mut fq_sponge, &lookup_commits.aggreg); + } //~ 1. Absorb the commitment to the permutation trace with the Fq-Sponge. absorb_commitment(&mut fq_sponge, &self.commitments.z_comm); - //~ 1. Sample $\alpha'$ with the Fq-Sponge. + // --- PlonK - Round 3 + //~ 1. Sample the quotient challenge $\alpha'$ with the Fq-Sponge. let alpha_chal = ScalarChallenge(fq_sponge.challenge()); //~ 1. Derive $\alpha$ from $\alpha'$ using the endomorphism (TODO: details). let alpha = alpha_chal.to_field(endo_r); //~ 1. Enforce that the length of the $t$ commitment is of size 7. - if self.commitments.t_comm.unshifted.len() > chunk_size * 7 { + if self.commitments.t_comm.elems.len() > chunk_size * 7 { return Err(VerifyError::IncorrectCommitmentLength( "t", chunk_size * 7, - self.commitments.t_comm.unshifted.len(), + self.commitments.t_comm.elems.len(), )); } //~ 1. Absorb the commitment to the quotient polynomial $t$ into the argument. absorb_commitment(&mut fq_sponge, &self.commitments.t_comm); + // --- PlonK - Round 4 //~ 1. Sample $\zeta'$ with the Fq-Sponge. let zeta_chal = ScalarChallenge(fq_sponge.challenge()); @@ -453,10 +462,10 @@ where let ft_eval1 = vec![self.ft_eval1]; #[allow(clippy::type_complexity)] - let mut es: Vec<(Vec>, Option)> = - polys.iter().map(|(_, e)| (e.clone(), None)).collect(); - es.push((public_evals.to_vec(), None)); - es.push((vec![ft_eval0, ft_eval1], None)); + let mut es: Vec>> = + polys.iter().map(|(_, e)| e.clone()).collect(); + es.push(public_evals.to_vec()); + es.push(vec![ft_eval0, ft_eval1]); for col in [ Column::Z, Column::Index(GateType::Generic), @@ -551,19 +560,16 @@ where .into_iter() .flatten(), ) { - es.push(( - { - let evals = self - .evals - .get_column(col) - .ok_or(VerifyError::MissingEvaluation(col))?; - vec![evals.zeta.clone(), evals.zeta_omega.clone()] - }, - None, - )) + es.push({ + let evals = self + .evals + .get_column(col) + .ok_or(VerifyError::MissingEvaluation(col))?; + vec![evals.zeta.clone(), evals.zeta_omega.clone()] + }) } - combined_inner_product(&evaluation_points, &v, &u, &es, index.srs().max_poly_size()) + combined_inner_product(&v, &u, &es) }; let oracles = RandomOracles { @@ -795,10 +801,7 @@ where .expect("pre-computed committed lagrange bases not found"); let com: Vec<_> = lgr_comm.iter().take(verifier_index.public).collect(); if public_input.is_empty() { - PolyComm::new( - vec![verifier_index.srs().blinding_commitment(); chunk_size], - None, - ) + PolyComm::new(vec![verifier_index.srs().blinding_commitment(); chunk_size]) } else { let elm: Vec<_> = public_input.iter().map(|s| -*s).collect(); let public_comm = PolyComm::::multi_scalar_mul(&com, &elm); @@ -917,21 +920,18 @@ where evaluations.extend(polys.into_iter().map(|(c, e)| Evaluation { commitment: c, evaluations: e, - degree_bound: None, })); //~~ * public input commitment evaluations.push(Evaluation { commitment: public_comm, evaluations: public_evals.to_vec(), - degree_bound: None, }); //~~ * ft commitment (chunks of it) evaluations.push(Evaluation { commitment: ft_comm, evaluations: vec![vec![ft_eval0], vec![proof.ft_eval1]], - degree_bound: None, }); for col in [ @@ -1015,7 +1015,6 @@ where .ok_or(VerifyError::MissingCommitment(col))? .clone(), evaluations: vec![evals.zeta.clone(), evals.zeta_omega.clone()], - degree_bound: None, }); } @@ -1038,6 +1037,9 @@ where let joint_combiner = oracles .joint_combiner .expect("joint_combiner should be present if lookups are used"); + // The table ID is added as the last column of the vector. + // Therefore, the exponent for the combiner for the table ID is the + // width of the concatenated table, i.e. max_joint_size. let table_id_combiner = joint_combiner .1 .pow([u64::from(li.lookup_info.max_joint_size)]); @@ -1057,7 +1059,6 @@ where evaluations.push(Evaluation { commitment: table_comm, evaluations: vec![lookup_table.zeta.clone(), lookup_table.zeta_omega.clone()], - degree_bound: None, }); // add evaluation of the runtime table polynomial @@ -1074,7 +1075,6 @@ where evaluations.push(Evaluation { commitment: runtime.clone(), evaluations: vec![runtime_eval.zeta, runtime_eval.zeta_omega], - degree_bound: None, }); } } @@ -1125,7 +1125,6 @@ where .ok_or(VerifyError::MissingCommitment(col))? .clone(), evaluations: vec![evals.zeta.clone(), evals.zeta_omega.clone()], - degree_bound: None, }); } diff --git a/kimchi/src/verifier_index.rs b/kimchi/src/verifier_index.rs index 1b0511d800..67ecaeace0 100644 --- a/kimchi/src/verifier_index.rs +++ b/kimchi/src/verifier_index.rs @@ -439,42 +439,42 @@ impl> VerifierIndex // Always present for comm in sigma_comm.iter() { - fq_sponge.absorb_g(&comm.unshifted); + fq_sponge.absorb_g(&comm.elems); } for comm in coefficients_comm.iter() { - fq_sponge.absorb_g(&comm.unshifted); + fq_sponge.absorb_g(&comm.elems); } - fq_sponge.absorb_g(&generic_comm.unshifted); - fq_sponge.absorb_g(&psm_comm.unshifted); - fq_sponge.absorb_g(&complete_add_comm.unshifted); - fq_sponge.absorb_g(&mul_comm.unshifted); - fq_sponge.absorb_g(&emul_comm.unshifted); - fq_sponge.absorb_g(&endomul_scalar_comm.unshifted); + fq_sponge.absorb_g(&generic_comm.elems); + fq_sponge.absorb_g(&psm_comm.elems); + fq_sponge.absorb_g(&complete_add_comm.elems); + fq_sponge.absorb_g(&mul_comm.elems); + fq_sponge.absorb_g(&emul_comm.elems); + fq_sponge.absorb_g(&endomul_scalar_comm.elems); // Optional gates if let Some(range_check0_comm) = range_check0_comm { - fq_sponge.absorb_g(&range_check0_comm.unshifted); + fq_sponge.absorb_g(&range_check0_comm.elems); } if let Some(range_check1_comm) = range_check1_comm { - fq_sponge.absorb_g(&range_check1_comm.unshifted); + fq_sponge.absorb_g(&range_check1_comm.elems); } if let Some(foreign_field_mul_comm) = foreign_field_mul_comm { - fq_sponge.absorb_g(&foreign_field_mul_comm.unshifted); + fq_sponge.absorb_g(&foreign_field_mul_comm.elems); } if let Some(foreign_field_add_comm) = foreign_field_add_comm { - fq_sponge.absorb_g(&foreign_field_add_comm.unshifted); + fq_sponge.absorb_g(&foreign_field_add_comm.elems); } if let Some(xor_comm) = xor_comm { - fq_sponge.absorb_g(&xor_comm.unshifted); + fq_sponge.absorb_g(&xor_comm.elems); } if let Some(rot_comm) = rot_comm { - fq_sponge.absorb_g(&rot_comm.unshifted); + fq_sponge.absorb_g(&rot_comm.elems); } // Lookup index; optional @@ -496,26 +496,26 @@ impl> VerifierIndex }) = lookup_index { for entry in lookup_table { - fq_sponge.absorb_g(&entry.unshifted); + fq_sponge.absorb_g(&entry.elems); } if let Some(table_ids) = table_ids { - fq_sponge.absorb_g(&table_ids.unshifted); + fq_sponge.absorb_g(&table_ids.elems); } if let Some(runtime_tables_selector) = runtime_tables_selector { - fq_sponge.absorb_g(&runtime_tables_selector.unshifted); + fq_sponge.absorb_g(&runtime_tables_selector.elems); } if let Some(xor) = xor { - fq_sponge.absorb_g(&xor.unshifted); + fq_sponge.absorb_g(&xor.elems); } if let Some(lookup) = lookup { - fq_sponge.absorb_g(&lookup.unshifted); + fq_sponge.absorb_g(&lookup.elems); } if let Some(range_check) = range_check { - fq_sponge.absorb_g(&range_check.unshifted); + fq_sponge.absorb_g(&range_check.elems); } if let Some(ffmul) = ffmul { - fq_sponge.absorb_g(&ffmul.unshifted); + fq_sponge.absorb_g(&ffmul.elems); } } fq_sponge.digest_fq() diff --git a/poly-commitment/src/chunked.rs b/poly-commitment/src/chunked.rs index 32cb5e1408..9c3ee5c294 100644 --- a/poly-commitment/src/chunked.rs +++ b/poly-commitment/src/chunked.rs @@ -9,21 +9,19 @@ where C: CommitmentCurve, { /// Multiplies each commitment chunk of f with powers of zeta^n - /// Note that it ignores the shifted part. // TODO(mimoo): better name for this function pub fn chunk_commitment(&self, zeta_n: C::ScalarField) -> Self { let mut res = C::Projective::zero(); // use Horner's to compute chunk[0] + z^n chunk[1] + z^2n chunk[2] + ... // as ( chunk[-1] * z^n + chunk[-2] ) * z^n + chunk[-3] // (https://en.wikipedia.org/wiki/Horner%27s_method) - for chunk in self.unshifted.iter().rev() { + for chunk in self.elems.iter().rev() { res *= zeta_n; res.add_assign_mixed(chunk); } PolyComm { - unshifted: vec![res.into_affine()], - shifted: self.shifted, + elems: vec![res.into_affine()], } } } @@ -33,14 +31,13 @@ where F: Field, { /// Multiplies each blinding chunk of f with powers of zeta^n - /// Note that it ignores the shifted part. // TODO(mimoo): better name for this function pub fn chunk_blinding(&self, zeta_n: F) -> F { let mut res = F::zero(); // use Horner's to compute chunk[0] + z^n chunk[1] + z^2n chunk[2] + ... // as ( chunk[-1] * z^n + chunk[-2] ) * z^n + chunk[-3] // (https://en.wikipedia.org/wiki/Horner%27s_method) - for chunk in self.unshifted.iter().rev() { + for chunk in self.elems.iter().rev() { res *= zeta_n; res += chunk } diff --git a/poly-commitment/src/commitment.rs b/poly-commitment/src/commitment.rs index 3d6cdf2411..bb2469b49f 100644 --- a/poly-commitment/src/commitment.rs +++ b/poly-commitment/src/commitment.rs @@ -39,9 +39,7 @@ use super::evaluation_proof::*; #[serde(bound = "C: CanonicalDeserialize + CanonicalSerialize")] pub struct PolyComm { #[serde_as(as = "Vec")] - pub unshifted: Vec, - #[serde_as(as = "Option")] - pub shifted: Option, + pub elems: Vec, } #[derive(Clone, Debug, Serialize, Deserialize)] @@ -54,8 +52,8 @@ where } impl PolyComm { - pub fn new(unshifted: Vec, shifted: Option) -> Self { - Self { unshifted, shifted } + pub fn new(elems: Vec) -> Self { + Self { elems } } } @@ -68,19 +66,18 @@ where F: FnMut(A) -> B, B: CanonicalDeserialize + CanonicalSerialize, { - let unshifted = self.unshifted.iter().map(|x| f(x.clone())).collect(); - let shifted = self.shifted.as_ref().map(|x| f(x.clone())); - PolyComm { unshifted, shifted } + let elems = self.elems.iter().map(|x| f(x.clone())).collect(); + PolyComm { elems } } - /// Returns the length of the unshifted commitment. + /// Returns the length of the commitment. pub fn len(&self) -> usize { - self.unshifted.len() + self.elems.len() } /// Returns `true` if the commitment is empty. pub fn is_empty(&self) -> bool { - self.unshifted.is_empty() && self.shifted.is_none() + self.elems.is_empty() } } @@ -90,21 +87,16 @@ impl PolyComm { &self, other: &PolyComm, ) -> Option> { - if self.unshifted.len() != other.unshifted.len() { + if self.elems.len() != other.elems.len() { return None; } - let unshifted = self - .unshifted + let elems = self + .elems .iter() - .zip(other.unshifted.iter()) + .zip(other.elems.iter()) .map(|(x, y)| (*x, *y)) .collect(); - let shifted = match (self.shifted, other.shifted) { - (Some(x), Some(y)) => Some((x, y)), - (None, None) => None, - (Some(_), None) | (None, Some(_)) => return None, - }; - Some(PolyComm { unshifted, shifted }) + Some(PolyComm { elems }) } } @@ -159,25 +151,20 @@ impl<'a, 'b, C: AffineCurve> Add<&'a PolyComm> for &'b PolyComm { type Output = PolyComm; fn add(self, other: &'a PolyComm) -> PolyComm { - let mut unshifted = vec![]; - let n1 = self.unshifted.len(); - let n2 = other.unshifted.len(); + let mut elems = vec![]; + let n1 = self.elems.len(); + let n2 = other.elems.len(); for i in 0..std::cmp::max(n1, n2) { let pt = if i < n1 && i < n2 { - self.unshifted[i] + other.unshifted[i] + self.elems[i] + other.elems[i] } else if i < n1 { - self.unshifted[i] + self.elems[i] } else { - other.unshifted[i] + other.elems[i] }; - unshifted.push(pt); + elems.push(pt); } - let shifted = match (self.shifted, other.shifted) { - (None, _) => other.shifted, - (_, None) => self.shifted, - (Some(p1), Some(p2)) => Some(p1 + p2), - }; - PolyComm { unshifted, shifted } + PolyComm { elems } } } @@ -185,37 +172,27 @@ impl<'a, 'b, C: AffineCurve> Sub<&'a PolyComm> for &'b PolyComm { type Output = PolyComm; fn sub(self, other: &'a PolyComm) -> PolyComm { - let mut unshifted = vec![]; - let n1 = self.unshifted.len(); - let n2 = other.unshifted.len(); + let mut elems = vec![]; + let n1 = self.elems.len(); + let n2 = other.elems.len(); for i in 0..std::cmp::max(n1, n2) { let pt = if i < n1 && i < n2 { - self.unshifted[i] + (-other.unshifted[i]) + self.elems[i] + (-other.elems[i]) } else if i < n1 { - self.unshifted[i] + self.elems[i] } else { - other.unshifted[i] + other.elems[i] }; - unshifted.push(pt); + elems.push(pt); } - let shifted = match (self.shifted, other.shifted) { - (None, _) => other.shifted, - (_, None) => self.shifted, - (Some(p1), Some(p2)) => Some(p1 + (-p2)), - }; - PolyComm { unshifted, shifted } + PolyComm { elems } } } impl PolyComm { pub fn scale(&self, c: C::ScalarField) -> PolyComm { PolyComm { - unshifted: self - .unshifted - .iter() - .map(|g| g.mul(c).into_affine()) - .collect(), - shifted: self.shifted.map(|g| g.mul(c).into_affine()), + elems: self.elems.iter().map(|g| g.mul(c).into_affine()).collect(), } } @@ -229,41 +206,27 @@ impl PolyComm { assert_eq!(com.len(), elm.len()); if com.is_empty() || elm.is_empty() { - return Self::new(vec![C::zero()], None); + return Self::new(vec![C::zero()]); } let all_scalars: Vec<_> = elm.iter().map(|s| s.into_repr()).collect(); - let unshifted_size = Iterator::max(com.iter().map(|c| c.unshifted.len())).unwrap(); - let mut unshifted = Vec::with_capacity(unshifted_size); + let elems_size = Iterator::max(com.iter().map(|c| c.elems.len())).unwrap(); + let mut elems = Vec::with_capacity(elems_size); - for chunk in 0..unshifted_size { + for chunk in 0..elems_size { let (points, scalars): (Vec<_>, Vec<_>) = com .iter() .zip(&all_scalars) // get rid of scalars that don't have an associated chunk - .filter_map(|(com, scalar)| com.unshifted.get(chunk).map(|c| (c, scalar))) + .filter_map(|(com, scalar)| com.elems.get(chunk).map(|c| (c, scalar))) .unzip(); let chunk_msm = VariableBaseMSM::multi_scalar_mul::(&points, &scalars); - unshifted.push(chunk_msm.into_affine()); + elems.push(chunk_msm.into_affine()); } - let mut shifted_pairs = com - .iter() - .zip(all_scalars) - // get rid of commitments without a `shifted` part - .filter_map(|(c, s)| c.shifted.map(|c| (c, s))) - .peekable(); - - let shifted = if shifted_pairs.peek().is_none() { - None - } else { - let (points, scalars): (Vec<_>, Vec<_>) = shifted_pairs.unzip(); - Some(VariableBaseMSM::multi_scalar_mul(&points, &scalars).into_affine()) - }; - - Self::new(unshifted, shifted) + Self::new(elems) } } @@ -343,10 +306,7 @@ pub fn absorb_commitment< sponge: &mut EFqSponge, commitment: &PolyComm, ) { - sponge.absorb_g(&commitment.unshifted); - if let Some(shifted) = commitment.shifted.as_ref() { - sponge.absorb_g(&[shifted.clone()]); - } + sponge.absorb_g(&commitment.elems); } /// A useful trait extending AffineCurve for commitments. @@ -443,21 +403,17 @@ pub fn to_group(m: &G::Map, t: ::BaseField /// Computes the linearization of the evaluations of a (potentially split) polynomial. /// Each given `poly` is associated to a matrix where the rows represent the number of evaluated points, /// and the columns represent potential segments (if a polynomial was split in several parts). -/// Note that if one of the polynomial comes specified with a degree bound, -/// the evaluation for the last segment is potentially shifted to meet the proof. #[allow(clippy::type_complexity)] pub fn combined_inner_product( - evaluation_points: &[F], polyscale: &F, evalscale: &F, // TODO(mimoo): needs a type that can get you evaluations or segments - polys: &[(Vec>, Option)], - srs_length: usize, + polys: &[Vec>], ) -> F { let mut res = F::zero(); let mut xi_i = F::one(); - for (evals_tr, shifted) in polys.iter().filter(|(evals_tr, _)| !evals_tr[0].is_empty()) { + for evals_tr in polys.iter().filter(|evals_tr| !evals_tr[0].is_empty()) { // transpose the evaluations let evals = (0..evals_tr[0].len()) .map(|i| evals_tr.iter().map(|v| v[i]).collect::>()) @@ -470,23 +426,6 @@ pub fn combined_inner_product( res += &(xi_i * term); xi_i *= polyscale; } - - if let Some(m) = shifted { - // polyscale^i sum_j evalscale^j elm_j^{N - m} f(elm_j) - let last_evals = if *m >= evals.len() * srs_length { - vec![F::zero(); evaluation_points.len()] - } else { - evals[evals.len() - 1].clone() - }; - let shifted_evals: Vec<_> = evaluation_points - .iter() - .zip(&last_evals) - .map(|(elm, f_elm)| elm.pow([(srs_length - (*m) % srs_length) as u64]) * f_elm) - .collect(); - - res += &(xi_i * DensePolynomial::::eval_polynomial(&shifted_evals, *evalscale)); - xi_i *= polyscale; - } } res } @@ -501,9 +440,6 @@ where /// Contains an evaluation table pub evaluations: Vec>, - - /// optional degree bound - pub degree_bound: Option, } /// Contains the batch evaluation @@ -535,33 +471,17 @@ pub fn combine_commitments( ) { let mut xi_i = G::ScalarField::one(); - for Evaluation { - commitment, - degree_bound, - .. - } in evaluations + for Evaluation { commitment, .. } in evaluations .iter() - .filter(|x| !x.commitment.unshifted.is_empty()) + .filter(|x| !x.commitment.elems.is_empty()) { // iterating over the polynomial segments - for comm_ch in &commitment.unshifted { + for comm_ch in &commitment.elems { scalars.push(rand_base * xi_i); points.push(*comm_ch); xi_i *= polyscale; } - - if let Some(_m) = degree_bound { - if let Some(comm_ch) = commitment.shifted { - if !comm_ch.is_zero() { - // polyscale^i sum_j evalscale^j elm_j^{N - m} f(elm_j) - scalars.push(rand_base * xi_i); - points.push(comm_ch); - - xi_i *= polyscale; - } - } - } } } @@ -579,13 +499,9 @@ pub fn combine_evaluations( vec![G::ScalarField::zero(); num_evals] }; - for Evaluation { - evaluations, - degree_bound, - .. - } in evaluations + for Evaluation { evaluations, .. } in evaluations .iter() - .filter(|x| !x.commitment.unshifted.is_empty()) + .filter(|x| !x.commitment.elems.is_empty()) { // iterating over the polynomial segments for j in 0..evaluations[0].len() { @@ -594,10 +510,6 @@ pub fn combine_evaluations( } xi_i *= polyscale; } - - if let Some(_m) = degree_bound { - todo!("Misaligned chunked commitments are not supported") - } } acc @@ -622,10 +534,9 @@ impl SRSTrait for SRS { &self, plnm: &DensePolynomial, num_chunks: usize, - max: Option, rng: &mut (impl RngCore + CryptoRng), ) -> BlindedCommitment { - self.mask(self.commit_non_hiding(plnm, num_chunks, max), rng) + self.mask(self.commit_non_hiding(plnm, num_chunks), rng) } /// Turns a non-hiding polynomial commitment into a hidding polynomial commitment. Transforms each given `` into `( + wH, w)` with a random `w` per commitment. @@ -660,62 +571,34 @@ impl SRSTrait for SRS { /// This function commits a polynomial using the SRS' basis of size `n`. /// - `plnm`: polynomial to commit to with max size of sections - /// - `num_chunks`: the number of unshifted commitments to be included in the output polynomial commitment - /// - `max`: maximal degree of the polynomial (not inclusive), if none, no degree bound - /// The function returns an unbounded commitment vector (which splits the commitment into several commitments of size at most `n`), - /// as well as an optional bounded commitment (if `max` is set). - /// Note that a maximum degree cannot (and doesn't need to) be enforced via a shift if `max` is a multiple of `n`. + /// - `num_chunks`: the number of commitments to be included in the output polynomial commitment + /// The function returns an unbounded commitment vector + /// (which splits the commitment into several commitments of size at most `n`). fn commit_non_hiding( &self, plnm: &DensePolynomial, num_chunks: usize, - max: Option, ) -> PolyComm { let is_zero = plnm.is_zero(); - let basis_len = self.g.len(); - let coeffs_len = plnm.coeffs.len(); - let coeffs: Vec<_> = plnm.iter().map(|c| c.into_repr()).collect(); // chunk while commiting - let mut unshifted = vec![]; + let mut elems = vec![]; if is_zero { - unshifted.push(G::zero()); + elems.push(G::zero()); } else { coeffs.chunks(self.g.len()).for_each(|coeffs_chunk| { let chunk = VariableBaseMSM::multi_scalar_mul(&self.g, coeffs_chunk); - unshifted.push(chunk.into_affine()); + elems.push(chunk.into_affine()); }); } - for _ in unshifted.len()..num_chunks { - unshifted.push(G::zero()); + for _ in elems.len()..num_chunks { + elems.push(G::zero()); } - // committing only last chunk shifted to the right edge of SRS - let shifted = match max { - None => None, - Some(max) => { - let start = max - (max % basis_len); - if is_zero || start >= coeffs_len { - // polynomial is small, nothing was shifted - Some(G::zero()) - } else if max % basis_len == 0 { - // the number of chunks should tell the verifier everything they need to know - None - } else { - // we shift the last chunk to the right as proof of the degree bound - let shifted = VariableBaseMSM::multi_scalar_mul( - &self.g[basis_len - (max % basis_len)..], - &coeffs[start..], - ); - Some(shifted.into_affine()) - } - } - }; - - PolyComm:: { unshifted, shifted } + PolyComm:: { elems } } fn commit_evaluations_non_hiding( @@ -968,7 +851,7 @@ mod tests { let mut e = vec![Fp::zero(); n]; e[i] = Fp::one(); let p = Evaluations::>::from_vec_and_domain(e, domain).interpolate(); - srs.commit_non_hiding(&p, num_chunks, None) + srs.commit_non_hiding(&p, num_chunks) }) .collect(); @@ -982,21 +865,24 @@ mod tests { } #[test] + // This tests with two chunks. fn test_chunked_lagrange_commitments() { let n = 64; + let divisor = 4; let domain = D::::new(n).unwrap(); - let mut srs = SRS::::create(n / 2); + let mut srs = SRS::::create(n / divisor); srs.add_lagrange_basis(domain); let num_chunks = domain.size() / srs.g.len(); + assert!(num_chunks == divisor); let expected_lagrange_commitments: Vec<_> = (0..n) .map(|i| { let mut e = vec![Fp::zero(); n]; e[i] = Fp::one(); let p = Evaluations::>::from_vec_and_domain(e, domain).interpolate(); - srs.commit_non_hiding(&p, num_chunks, None) + srs.commit_non_hiding(&p, num_chunks) }) .collect(); @@ -1010,6 +896,10 @@ mod tests { } #[test] + // TODO @volhovm I don't understand what this test does and + // whether it is worth leaving. + /// Same as test_chunked_lagrange_commitments, but with a slight + /// offset in the SRS fn test_offset_chunked_lagrange_commitments() { let n = 64; let domain = D::::new(n).unwrap(); @@ -1017,14 +907,16 @@ mod tests { let mut srs = SRS::::create(n / 2 + 1); srs.add_lagrange_basis(domain); + // Is this even taken into account?... let num_chunks = (domain.size() + srs.g.len() - 1) / srs.g.len(); + assert!(num_chunks == 2); let expected_lagrange_commitments: Vec<_> = (0..n) .map(|i| { let mut e = vec![Fp::zero(); n]; e[i] = Fp::one(); let p = Evaluations::>::from_vec_and_domain(e, domain).interpolate(); - srs.commit_non_hiding(&p, num_chunks, Some(64)) + srs.commit_non_hiding(&p, num_chunks) // this requires max = Some(64) }) .collect(); @@ -1048,10 +940,9 @@ mod tests { let srs = SRS::::create(20); let rng = &mut StdRng::from_seed([0u8; 32]); - // commit the two polynomials (and upperbound the second one) - let commitment = srs.commit(&poly1, 1, None, rng); - let upperbound = poly2.degree() + 1; - let bounded_commitment = srs.commit(&poly2, 1, Some(upperbound), rng); + // commit the two polynomials + let commitment1 = srs.commit(&poly1, 1, rng); + let commitment2 = srs.commit(&poly2, 1, rng); // create an aggregated opening proof let (u, v) = (Fp::rand(rng), Fp::rand(rng)); @@ -1061,18 +952,15 @@ mod tests { let polys: Vec<( DensePolynomialOrEvaluations<_, Radix2EvaluationDomain<_>>, - Option, PolyComm<_>, )> = vec![ ( DensePolynomialOrEvaluations::DensePolynomial(&poly1), - None, - commitment.blinders, + commitment1.blinders, ), ( DensePolynomialOrEvaluations::DensePolynomial(&poly2), - Some(upperbound), - bounded_commitment.blinders, + commitment2.blinders, ), ]; let elm = vec![Fp::rand(rng), Fp::rand(rng)]; @@ -1110,40 +998,21 @@ mod tests { let evaluations = vec![ Evaluation { - commitment: commitment.commitment, + commitment: commitment1.commitment, evaluations: poly1_chunked_evals, - degree_bound: None, }, Evaluation { - commitment: bounded_commitment.commitment, + commitment: commitment2.commitment, evaluations: poly2_chunked_evals, - degree_bound: Some(upperbound), }, ]; let combined_inner_product = { let es: Vec<_> = evaluations .iter() - .map( - |Evaluation { - commitment, - evaluations, - degree_bound, - }| { - let bound: Option = (|| { - let b = (*degree_bound)?; - let x = commitment.shifted?; - if x.is_zero() { - None - } else { - Some(b) - } - })(); - (evaluations.clone(), bound) - }, - ) + .map(|Evaluation { evaluations, .. }| evaluations.clone()) .collect(); - combined_inner_product(&elm, &v, &u, &es, srs.g.len()) + combined_inner_product(&v, &u, &es) }; // verify the proof @@ -1186,8 +1055,8 @@ pub mod caml { { fn from(polycomm: PolyComm) -> Self { Self { - unshifted: polycomm.unshifted.into_iter().map(Into::into).collect(), - shifted: polycomm.shifted.map(Into::into), + unshifted: polycomm.elems.into_iter().map(Into::into).collect(), + shifted: None, } } } @@ -1199,8 +1068,8 @@ pub mod caml { { fn from(polycomm: &'a PolyComm) -> Self { Self { - unshifted: polycomm.unshifted.iter().map(Into::into).collect(), - shifted: polycomm.shifted.as_ref().map(Into::into), + unshifted: polycomm.elems.iter().map(Into::into).collect(), + shifted: None, } } } @@ -1210,9 +1079,12 @@ pub mod caml { G: AffineCurve + From, { fn from(camlpolycomm: CamlPolyComm) -> PolyComm { + assert!( + camlpolycomm.shifted.is_none(), + "mina#14628: Shifted commitments are deprecated and must not be used" + ); PolyComm { - unshifted: camlpolycomm.unshifted.into_iter().map(Into::into).collect(), - shifted: camlpolycomm.shifted.map(Into::into), + elems: camlpolycomm.unshifted.into_iter().map(Into::into).collect(), } } } @@ -1222,9 +1094,13 @@ pub mod caml { G: AffineCurve + From<&'a CamlG> + From, { fn from(camlpolycomm: &'a CamlPolyComm) -> PolyComm { + assert!( + camlpolycomm.shifted.is_none(), + "mina#14628: Shifted commitments are deprecated and must not be used" + ); PolyComm { - unshifted: camlpolycomm.unshifted.iter().map(Into::into).collect(), - shifted: camlpolycomm.shifted.as_ref().map(Into::into), + //FIXME something with as_ref() + elems: camlpolycomm.unshifted.iter().map(Into::into).collect(), } } } diff --git a/poly-commitment/src/evaluation_proof.rs b/poly-commitment/src/evaluation_proof.rs index 0b15615b66..6b2e9dcfc3 100644 --- a/poly-commitment/src/evaluation_proof.rs +++ b/poly-commitment/src/evaluation_proof.rs @@ -12,16 +12,11 @@ use serde::{Deserialize, Serialize}; use serde_with::serde_as; use std::iter::Iterator; -enum OptShiftedPolynomial

{ - Unshifted(P), - Shifted(P, usize), -} - // A formal sum of the form // `s_0 * p_0 + ... s_n * p_n` -// where each `s_i` is a scalar and each `p_i` is an optionally shifted polynomial. +// where each `s_i` is a scalar and each `p_i` is a polynomial. #[derive(Default)] -struct ScaledChunkedPolynomial(Vec<(F, OptShiftedPolynomial

)>); +struct ScaledChunkedPolynomial(Vec<(F, P)>); pub enum DensePolynomialOrEvaluations<'a, F: FftField, D: EvaluationDomain> { DensePolynomial(&'a DensePolynomial), @@ -29,13 +24,8 @@ pub enum DensePolynomialOrEvaluations<'a, F: FftField, D: EvaluationDomain> { } impl ScaledChunkedPolynomial { - fn add_unshifted(&mut self, scale: F, p: P) { - self.0.push((scale, OptShiftedPolynomial::Unshifted(p))) - } - - fn add_shifted(&mut self, scale: F, shift: usize, p: P) { - self.0 - .push((scale, OptShiftedPolynomial::Shifted(p, shift))) + fn add_poly(&mut self, scale: F, p: P) { + self.0.push((scale, p)) } } @@ -48,18 +38,8 @@ impl<'a, F: Field> ScaledChunkedPolynomial { .par_iter() .map(|(scale, segment)| { let scale = *scale; - match segment { - OptShiftedPolynomial::Unshifted(segment) => { - let v = segment.par_iter().map(|x| scale * *x).collect(); - DensePolynomial::from_coefficients_vec(v) - } - OptShiftedPolynomial::Shifted(segment, shift) => { - let mut v: Vec<_> = segment.par_iter().map(|x| scale * *x).collect(); - let mut res = vec![F::zero(); *shift]; - res.append(&mut v); - DensePolynomial::from_coefficients_vec(res) - } - } + let v = segment.par_iter().map(|x| scale * *x).collect(); + DensePolynomial::from_coefficients_vec(v) }) .collect(); @@ -85,7 +65,7 @@ pub fn combine_polys>( // If/when we change this, we can add more complicated code to handle different degrees. let degree = plnms .iter() - .fold(None, |acc, (p, _, _)| match p { + .fold(None, |acc, (p, _)| match p { DensePolynomialOrEvaluations::DensePolynomial(_) => acc, DensePolynomialOrEvaluations::Evaluations(_, d) => { if let Some(n) = acc { @@ -97,13 +77,12 @@ pub fn combine_polys>( .unwrap_or(0); vec![G::ScalarField::zero(); degree] }; - // let mut plnm_chunks: Vec<(G::ScalarField, OptShiftedPolynomial<_>)> = vec![]; let mut omega = G::ScalarField::zero(); let mut scale = G::ScalarField::one(); // iterating over polynomials in the batch - for (p_i, degree_bound, omegas) in plnms { + for (p_i, omegas) in plnms { match p_i { DensePolynomialOrEvaluations::Evaluations(evals_i, sub_domain) => { let stride = evals_i.evals.len() / sub_domain.size(); @@ -114,41 +93,23 @@ pub fn combine_polys>( .for_each(|(i, x)| { *x += scale * evals[i * stride]; }); - for j in 0..omegas.unshifted.len() { - omega += &(omegas.unshifted[j] * scale); + for j in 0..omegas.elems.len() { + omega += &(omegas.elems[j] * scale); scale *= &polyscale; } - // We assume here that we have no shifted segment. - // TODO: Remove shifted } DensePolynomialOrEvaluations::DensePolynomial(p_i) => { let mut offset = 0; // iterating over chunks of the polynomial - if let Some(m) = degree_bound { - assert!(p_i.coeffs.len() <= m + 1); - } else { - assert!(omegas.shifted.is_none()); - } - for j in 0..omegas.unshifted.len() { + for j in 0..omegas.elems.len() { let segment = &p_i.coeffs[std::cmp::min(offset, p_i.coeffs.len()) ..std::cmp::min(offset + srs_length, p_i.coeffs.len())]; - // always mixing in the unshifted segments - plnm.add_unshifted(scale, segment); + plnm.add_poly(scale, segment); - omega += &(omegas.unshifted[j] * scale); + omega += &(omegas.elems[j] * scale); scale *= &polyscale; offset += srs_length; - if let Some(m) = degree_bound { - if offset >= *m { - if offset > *m { - // mixing in the shifted segment since degree is bounded - plnm.add_shifted(scale, srs_length - m % srs_length, segment); - } - omega += &(omegas.shifted.unwrap() * scale); - scale *= &polyscale; - } - } } } } @@ -158,7 +119,11 @@ pub fn combine_polys>( if !plnm_evals_part.is_empty() { let n = plnm_evals_part.len(); let max_poly_size = srs_length; - let num_chunks = n / max_poly_size; + let num_chunks = if n == 0 { + 1 + } else { + n / max_poly_size + if n % max_poly_size == 0 { 0 } else { 1 } + }; plnm += &Evaluations::from_vec_and_domain(plnm_evals_part, D::new(n).unwrap()) .interpolate() .to_chunked_polynomial(num_chunks, max_poly_size) @@ -183,15 +148,11 @@ impl SRS { &self, group_map: &G::Map, // TODO(mimoo): create a type for that entry - plnms: &[( - DensePolynomialOrEvaluations, - Option, - PolyComm, - )], // vector of polynomial with optional degree bound and commitment randomness - elm: &[G::ScalarField], // vector of evaluation points - polyscale: G::ScalarField, // scaling factor for polynoms - evalscale: G::ScalarField, // scaling factor for evaluation point powers - mut sponge: EFqSponge, // sponge + plnms: PolynomialsToCombine, // vector of polynomial with commitment randomness + elm: &[G::ScalarField], // vector of evaluation points + polyscale: G::ScalarField, // scaling factor for polynoms + evalscale: G::ScalarField, // scaling factor for evaluation point powers + mut sponge: EFqSponge, // sponge rng: &mut RNG, ) -> OpeningProof where @@ -362,11 +323,7 @@ impl SRS { #[allow(clippy::many_single_char_names)] pub fn prover_polynomials_to_verifier_evaluations>( &self, - plnms: &[( - DensePolynomialOrEvaluations, - Option, - PolyComm, - )], // vector of polynomial with optional degree bound and commitment randomness + plnms: PolynomialsToCombine, elm: &[G::ScalarField], // vector of evaluation points ) -> Vec> where @@ -375,7 +332,7 @@ impl SRS { plnms .iter() .enumerate() - .map(|(i, (poly_or_evals, degree_bound, blinders))| { + .map(|(i, (poly_or_evals, blinders))| { let poly = match poly_or_evals { DensePolynomialOrEvaluations::DensePolynomial(poly) => (*poly).clone(), DensePolynomialOrEvaluations::Evaluations(evals, _) => { @@ -383,9 +340,8 @@ impl SRS { } }; let chunked_polynomial = - poly.to_chunked_polynomial(blinders.unshifted.len(), self.g.len()); - let chunked_commitment = - { self.commit_non_hiding(&poly, blinders.unshifted.len(), None) }; + poly.to_chunked_polynomial(blinders.elems.len(), self.g.len()); + let chunked_commitment = { self.commit_non_hiding(&poly, blinders.elems.len()) }; let masked_commitment = match self.mask_custom(chunked_commitment, blinders) { Ok(comm) => comm, Err(err) => panic!("Error at index {i}: {err}"), @@ -398,8 +354,6 @@ impl SRS { commitment: masked_commitment.commitment, evaluations: chunked_evals, - - degree_bound: *degree_bound, } }) .collect() @@ -433,11 +387,7 @@ impl< fn open::ScalarField>>( srs: &Self::SRS, group_map: &::Map, - plnms: &[( - DensePolynomialOrEvaluations<::ScalarField, D>, - Option, - PolyComm<::ScalarField>, - )], // vector of polynomial with optional degree bound and commitment randomness + plnms: PolynomialsToCombine, elm: &[::ScalarField], // vector of evaluation points polyscale: ::ScalarField, // scaling factor for polynoms evalscale: ::ScalarField, // scaling factor for evaluation point powers diff --git a/poly-commitment/src/lib.rs b/poly-commitment/src/lib.rs index 4d7bac7913..fb7f7491ca 100644 --- a/poly-commitment/src/lib.rs +++ b/poly-commitment/src/lib.rs @@ -37,7 +37,6 @@ pub trait SRS { &self, plnm: &DensePolynomial, num_chunks: usize, - max: Option, rng: &mut (impl RngCore + CryptoRng), ) -> BlindedCommitment; @@ -60,15 +59,13 @@ pub trait SRS { /// This function commits a polynomial using the SRS' basis of size `n`. /// - `plnm`: polynomial to commit to with max size of sections - /// - `max`: maximal degree of the polynomial (not inclusive), if none, no degree bound - /// The function returns an unbounded commitment vector (which splits the commitment into several commitments of size at most `n`), - /// as well as an optional bounded commitment (if `max` is set). - /// Note that a maximum degree cannot (and doesn't need to) be enforced via a shift if `max` is a multiple of `n`. + /// - `num_chunks`: the number of commitments to be included in the output polynomial commitment + /// The function returns an unbounded commitment vector + /// (which splits the commitment into several commitments of size at most `n`). fn commit_non_hiding( &self, plnm: &DensePolynomial, num_chunks: usize, - max: Option, ) -> PolyComm; fn commit_evaluations_non_hiding( @@ -86,9 +83,9 @@ pub trait SRS { } #[allow(type_alias_bounds)] +/// Vector of triples (polynomial itself, degree bound, omegas). type PolynomialsToCombine<'a, G: CommitmentCurve, D: EvaluationDomain> = &'a [( DensePolynomialOrEvaluations<'a, G::ScalarField, D>, - Option, PolyComm, )]; diff --git a/poly-commitment/src/pairing_proof.rs b/poly-commitment/src/pairing_proof.rs index 913cf15d0f..1a581e538b 100644 --- a/poly-commitment/src/pairing_proof.rs +++ b/poly-commitment/src/pairing_proof.rs @@ -1,5 +1,5 @@ use crate::commitment::*; -use crate::evaluation_proof::{combine_polys, DensePolynomialOrEvaluations}; +use crate::evaluation_proof::combine_polys; use crate::srs::SRS; use crate::{CommitmentError, PolynomialsToCombine, SRS as SRSTrait}; use ark_ec::{msm::VariableBaseMSM, AffineCurve, PairingEngine}; @@ -94,11 +94,7 @@ impl< fn open::ScalarField>>( srs: &Self::SRS, _group_map: &::Map, - plnms: &[( - DensePolynomialOrEvaluations<::ScalarField, D>, - Option, - PolyComm<::ScalarField>, - )], // vector of polynomial with optional degree bound and commitment randomness + plnms: PolynomialsToCombine, elm: &[::ScalarField], // vector of evaluation points polyscale: ::ScalarField, // scaling factor for polynoms _evalscale: ::ScalarField, // scaling factor for evaluation point powers @@ -164,10 +160,9 @@ impl< &self, plnm: &DensePolynomial, num_chunks: usize, - max: Option, rng: &mut (impl RngCore + CryptoRng), ) -> BlindedCommitment { - self.full_srs.commit(plnm, num_chunks, max, rng) + self.full_srs.commit(plnm, num_chunks, rng) } fn mask_custom( @@ -190,9 +185,8 @@ impl< &self, plnm: &DensePolynomial, num_chunks: usize, - max: Option, ) -> PolyComm { - self.full_srs.commit_non_hiding(plnm, num_chunks, max) + self.full_srs.commit_non_hiding(plnm, num_chunks) } fn commit_evaluations_non_hiding( @@ -282,10 +276,7 @@ impl< quotient }; - let quotient = srs - .full_srs - .commit_non_hiding("ient_poly, 1, None) - .unshifted[0]; + let quotient = srs.full_srs.commit_non_hiding("ient_poly, 1).elems[0]; Some(PairingProof { quotient, @@ -317,12 +308,12 @@ impl< let blinding_commitment = srs.full_srs.h.mul(self.blinding); let divisor_commitment = srs .verifier_srs - .commit_non_hiding(&divisor_polynomial(elm), 1, None) - .unshifted[0]; + .commit_non_hiding(&divisor_polynomial(elm), 1) + .elems[0]; let eval_commitment = srs .full_srs - .commit_non_hiding(&eval_polynomial(elm, &evals), 1, None) - .unshifted[0] + .commit_non_hiding(&eval_polynomial(elm, &evals), 1) + .elems[0] .into_projective(); let numerator_commitment = { poly_commitment - eval_commitment - blinding_commitment }; @@ -380,18 +371,17 @@ mod tests { let comms: Vec<_> = polynomials .iter() - .map(|p| srs.full_srs.commit(p, 1, None, rng)) + .map(|p| srs.full_srs.commit(p, 1, rng)) .collect(); - let polynomials_and_blinders: Vec<(DensePolynomialOrEvaluations<_, D<_>>, _, _)> = - polynomials - .iter() - .zip(comms.iter()) - .map(|(p, comm)| { - let p = DensePolynomialOrEvaluations::DensePolynomial(p); - (p, None, comm.blinders.clone()) - }) - .collect(); + let polynomials_and_blinders: Vec<(DensePolynomialOrEvaluations<_, D<_>>, _)> = polynomials + .iter() + .zip(comms.iter()) + .map(|(p, comm)| { + let p = DensePolynomialOrEvaluations::DensePolynomial(p); + (p, comm.blinders.clone()) + }) + .collect(); let evaluation_points = vec![ScalarField::rand(rng), ScalarField::rand(rng)]; @@ -409,7 +399,6 @@ mod tests { Evaluation { commitment: commitment.commitment, evaluations, - degree_bound: None, } }) .collect(); diff --git a/poly-commitment/src/srs.rs b/poly-commitment/src/srs.rs index 899b708224..355c420b66 100644 --- a/poly-commitment/src/srs.rs +++ b/poly-commitment/src/srs.rs @@ -178,11 +178,11 @@ impl SRS { // By computing each of these, and recollecting the terms as a vector of polynomial // commitments, we obtain a chunked commitment to the L_i polynomials. let srs_size = self.g.len(); - let num_unshifteds = (n + srs_size - 1) / srs_size; - let mut unshifted = Vec::with_capacity(num_unshifteds); + let num_elems = (n + srs_size - 1) / srs_size; + let mut elems = Vec::with_capacity(num_elems); // For each chunk - for i in 0..num_unshifteds { + for i in 0..num_elems { // Initialize the vector with zero curve points let mut lg: Vec<::Projective> = vec![::Projective::zero(); n]; @@ -195,36 +195,13 @@ impl SRS { // Apply the IFFT domain.ifft_in_place(&mut lg); ::Projective::batch_normalization(lg.as_mut_slice()); - // Append the 'partial Langrange polynomials' to the vector of unshifted chunks - unshifted.push(lg) + // Append the 'partial Langrange polynomials' to the vector of elems chunks + elems.push(lg) } - // If the srs size does not exactly divide the domain size - let shifted: Option::Projective>> = - if n < srs_size || num_unshifteds * srs_size == n { - None - } else { - // Initialize the vector to zero - let mut lg: Vec<::Projective> = - vec![::Projective::zero(); n]; - // Overwrite the terms corresponding to the final chunk with the SRS curve points - // shifted to the right - let start_offset = (num_unshifteds - 1) * srs_size; - let num_terms = n - start_offset; - let srs_start_offset = srs_size - num_terms; - for j in 0..num_terms { - lg[start_offset + j] = self.g[srs_start_offset + j].into_projective() - } - // Apply the IFFT - domain.ifft_in_place(&mut lg); - ::Projective::batch_normalization(lg.as_mut_slice()); - Some(lg) - }; - let chunked_commitments: Vec<_> = (0..n) .map(|i| PolyComm { - unshifted: unshifted.iter().map(|v| v[i].into_affine()).collect(), - shifted: shifted.as_ref().map(|v| v[i].into_affine()), + elems: elems.iter().map(|v| v[i].into_affine()).collect(), }) .collect(); self.lagrange_bases.insert(n, chunked_commitments); diff --git a/poly-commitment/src/tests/batch_15_wires.rs b/poly-commitment/src/tests/batch_15_wires.rs index 570e8e8752..545a788fd8 100644 --- a/poly-commitment/src/tests/batch_15_wires.rs +++ b/poly-commitment/src/tests/batch_15_wires.rs @@ -30,8 +30,6 @@ where let size = 1 << 7; let srs = SRS::::create(size); - let num_chunks = 1; - let group_map = ::Map::setup(); let sponge = DefaultFqSponge::::new( @@ -62,6 +60,8 @@ where } }) .collect::>(); + + // TODO @volhovm remove? let bounds = a .iter() .enumerate() @@ -81,10 +81,19 @@ where let mut start = Instant::now(); let comm = (0..a.len()) .map(|i| { + let n = a[i].len(); + let num_chunks = if n == 0 { + 1 + } else { + n / srs.g.len() + if n % srs.g.len() == 0 { 0 } else { 1 } + }; ( - srs.commit(&a[i].clone(), num_chunks, bounds[i], rng), + srs.commit(&a[i].clone(), num_chunks, rng), x.iter() - .map(|xx| a[i].to_chunked_polynomial(1, size).evaluate_chunks(*xx)) + .map(|xx| { + a[i].to_chunked_polynomial(num_chunks, size) + .evaluate_chunks(*xx) + }) .collect::>(), bounds[i], ) @@ -96,12 +105,10 @@ where let polys: Vec<( DensePolynomialOrEvaluations<_, Radix2EvaluationDomain<_>>, _, - _, )> = (0..a.len()) .map(|i| { ( DensePolynomialOrEvaluations::DensePolynomial(&a[i]), - bounds[i], (comm[i].0).blinders.clone(), ) }) @@ -120,20 +127,9 @@ where let combined_inner_product = { let es: Vec<_> = comm .iter() - .map(|(commitment, evaluations, degree_bound)| { - let bound: Option = (|| { - let b = (*degree_bound)?; - let x = commitment.commitment.shifted?; - if x.is_zero() { - None - } else { - Some(b) - } - })(); - (evaluations.clone(), bound) - }) + .map(|(_, evaluations, _)| evaluations.clone()) .collect(); - combined_inner_product(&x, &polymask, &evalmask, &es, srs.g.len()) + combined_inner_product(&polymask, &evalmask, &es) }; ( @@ -161,7 +157,6 @@ where .map(|poly| Evaluation { commitment: (poly.0).commitment.clone(), evaluations: poly.1.clone(), - degree_bound: poly.2, }) .collect::>(), opening: &proof.5, diff --git a/poly-commitment/src/tests/commitment.rs b/poly-commitment/src/tests/commitment.rs index dedcd0ad6e..38d57994ec 100644 --- a/poly-commitment/src/tests/commitment.rs +++ b/poly-commitment/src/tests/commitment.rs @@ -27,8 +27,6 @@ use std::time::{Duration, Instant}; pub struct Commitment { /// the commitment itself, potentially in chunks chunked_commitment: PolyComm, - /// an optional degree bound - bound: Option, } /// An evaluated commitment (given a number of evaluation points) @@ -76,7 +74,6 @@ impl AggregatedEvaluationProof { /// This function converts an aggregated evaluation proof into something the verify API understands pub fn verify_type( &self, - srs: &SRS, ) -> BatchEvaluationProof, OpeningProof> { let mut coms = vec![]; @@ -85,39 +82,15 @@ impl AggregatedEvaluationProof { coms.push(Evaluation { commitment: eval_com.commit.chunked_commitment.clone(), evaluations: eval_com.chunked_evals.clone(), - degree_bound: eval_com.commit.bound, }); } let combined_inner_product = { let es: Vec<_> = coms .iter() - .map( - |Evaluation { - commitment, - evaluations, - degree_bound, - }| { - let bound: Option = (|| { - let b = (*degree_bound)?; - let x = commitment.shifted?; - if x.is_zero() { - None - } else { - Some(b) - } - })(); - (evaluations.clone(), bound) - }, - ) + .map(|Evaluation { evaluations, .. }| evaluations.clone()) .collect(); - combined_inner_product( - &self.eval_points, - &self.polymask, - &self.evalmask, - &es, - srs.g.len(), - ) + combined_inner_product(&self.polymask, &self.evalmask, &es) }; BatchEvaluationProof { @@ -156,42 +129,40 @@ fn test_randomised(mut rng: &mut RNG) { // create 11 polynomials of random degree (of at most 500) // and commit to them let mut commitments = vec![]; - for i in 0..11 { + for _ in 0..11 { let len: usize = rng.gen(); let len = len % 500; + // TODO @volhovm maybe remove the second case. + // every other polynomial is upperbounded let poly = if len == 0 { DensePolynomial::::zero() } else { DensePolynomial::::rand(len, &mut rng) }; - // every other polynomial is upperbounded - let bound = if i % 2 == 0 { - Some(poly.coeffs.len()) - } else { - None - }; - // create commitments for each polynomial, and evaluate each polynomial at the 7 random points let timer = Instant::now(); let BlindedCommitment { commitment: chunked_commitment, blinders: chunked_blinding, - } = srs.commit(&poly, num_chunks, bound, &mut rng); + } = srs.commit(&poly, num_chunks, &mut rng); time_commit += timer.elapsed(); let mut chunked_evals = vec![]; for point in eval_points.clone() { + let n = poly.len(); + let num_chunks = if n == 0 { + 1 + } else { + n / srs.g.len() + if n % srs.g.len() == 0 { 0 } else { 1 } + }; chunked_evals.push( - poly.to_chunked_polynomial(1, srs.g.len()) + poly.to_chunked_polynomial(num_chunks, srs.g.len()) .evaluate_chunks(point), ); } - let commit = Commitment { - chunked_commitment, - bound, - }; + let commit = Commitment { chunked_commitment }; let eval_commit = EvaluatedCommitment { commit, @@ -209,13 +180,11 @@ fn test_randomised(mut rng: &mut RNG) { #[allow(clippy::type_complexity)] let mut polynomials: Vec<( DensePolynomialOrEvaluations>, - Option, PolyComm<_>, )> = vec![]; for c in &commitments { polynomials.push(( DensePolynomialOrEvaluations::DensePolynomial(&c.poly), - c.eval_commit.commit.bound, c.chunked_blinding.clone(), )); } @@ -257,7 +226,7 @@ fn test_randomised(mut rng: &mut RNG) { let timer = Instant::now(); // batch verify all the proofs - let mut batch: Vec<_> = proofs.iter().map(|p| p.verify_type(&srs)).collect(); + let mut batch: Vec<_> = proofs.iter().map(|p| p.verify_type()).collect(); assert!(srs.verify::, _>(&group_map, &mut batch, &mut rng)); // TODO: move to bench diff --git a/utils/src/dense_polynomial.rs b/utils/src/dense_polynomial.rs index 72560f1057..895e227fe0 100644 --- a/utils/src/dense_polynomial.rs +++ b/utils/src/dense_polynomial.rs @@ -57,6 +57,10 @@ impl ExtendedDensePolynomial for DensePolynomial { chunk_polys.push(DensePolynomial::from_coefficients_vec(vec![])); } + // Ensuring that the number of chunks is the one requested, following + // trait documentation + assert_eq!(chunk_polys.len(), num_chunks); + ChunkedPolynomial { polys: chunk_polys, size: chunk_size, From e9da8cdd16ba8a98010c4a5d7ce8c9ece5e99cbb Mon Sep 17 00:00:00 2001 From: Joseandro Luiz Date: Thu, 2 May 2024 13:49:24 -0300 Subject: [PATCH 056/178] Vendored depedencies --- .cargo/config.toml | 5 +++++ .github/workflows/benches.yml | 2 ++ .github/workflows/rust.yml | 2 ++ .gitmodules | 3 +++ Cargo.lock | 2 ++ proof-systems-vendors | 1 + 6 files changed, 15 insertions(+) create mode 100644 .cargo/config.toml create mode 100644 .gitmodules create mode 160000 proof-systems-vendors diff --git a/.cargo/config.toml b/.cargo/config.toml new file mode 100644 index 0000000000..862be18186 --- /dev/null +++ b/.cargo/config.toml @@ -0,0 +1,5 @@ +[source.crates-io] +replace-with = "vendored-sources" + +[source.vendored-sources] +directory = "proof-systems-vendors" \ No newline at end of file diff --git a/.github/workflows/benches.yml b/.github/workflows/benches.yml index c837a0c8ee..0e67965382 100644 --- a/.github/workflows/benches.yml +++ b/.github/workflows/benches.yml @@ -21,6 +21,8 @@ jobs: - name: Checkout PR uses: actions/checkout@v2 + with: + submodules: true # - name: Run iai bench # run: | diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index 6990d5a779..737eb1a40f 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -25,6 +25,8 @@ jobs: - name: Checkout PR uses: actions/checkout@v3 + with: + submodules: true - name: Set up cargo/rust uses: actions-rs/toolchain@v1 diff --git a/.gitmodules b/.gitmodules new file mode 100644 index 0000000000..f99f90b3c4 --- /dev/null +++ b/.gitmodules @@ -0,0 +1,3 @@ +[submodule "proof-systems-vendors"] + path = proof-systems-vendors + url = git@github.com:o1-labs/proof-systems-vendors.git diff --git a/Cargo.lock b/Cargo.lock index 41cd44de59..0abd25bf58 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1289,6 +1289,8 @@ name = "mina-book" version = "0.1.0" dependencies = [ "cargo-spec", + "plist", + "time", ] [[package]] diff --git a/proof-systems-vendors b/proof-systems-vendors new file mode 160000 index 0000000000..b180613995 --- /dev/null +++ b/proof-systems-vendors @@ -0,0 +1 @@ +Subproject commit b18061399504db1ab2b6d5f28d121597273ef507 From c96e9c650d8356ca8a086d1a1d389f60a4dee172 Mon Sep 17 00:00:00 2001 From: Danny Willems Date: Thu, 2 May 2024 20:15:58 +0200 Subject: [PATCH 057/178] CI/MISC: delete trailing whitespaces --- .github/workflows/benches.yml | 4 ++-- .github/workflows/stale.yml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/benches.yml b/.github/workflows/benches.yml index 0e67965382..7ae6e08d28 100644 --- a/.github/workflows/benches.yml +++ b/.github/workflows/benches.yml @@ -22,7 +22,7 @@ jobs: - name: Checkout PR uses: actions/checkout@v2 with: - submodules: true + submodules: true # - name: Run iai bench # run: | @@ -47,7 +47,7 @@ jobs: const criterion_bench = fs.readFileSync("criterion_bench", {encoding:'utf8', flag:'r'}); // form message - const message = `Hello there👋 + const message = `Hello there👋 Here are some benchmark results using [criterion](https://bheisler.github.io/criterion.rs/). Keep in mind that since this runs in CI, it is not really accurate (as it depends on the host load)

${criterion_bench}
`; diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml index c535845cea..6dd1fdf361 100644 --- a/.github/workflows/stale.yml +++ b/.github/workflows/stale.yml @@ -12,7 +12,7 @@ on: # │ │ ┌───────────── day of the month (1 - 31) # │ │ │ ┌───────────── month (1 - 12) # │ │ │ │ ┌───────────── day of the week (0 - 6) -# │ │ │ │ │ +# │ │ │ │ │ # │ │ │ │ │ # │ │ │ │ │ - cron: '19 7 * * *' From 8f4a566994d663d478362588c0fd071b1d996195 Mon Sep 17 00:00:00 2001 From: Danny Willems Date: Thu, 2 May 2024 20:16:18 +0200 Subject: [PATCH 058/178] CI: add --frozen --- .github/workflows/rust.yml | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index 737eb1a40f..3c91e85eaf 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -1,3 +1,5 @@ +# We do use --frozen to be sure that we do not access the network + name: CI on: @@ -83,7 +85,7 @@ jobs: uses: actions-rs/cargo@v1 with: command: clippy - args: --all-features --tests --all-targets -- -D warnings + args: --all-features --tests --all-targets --frozen -- -D warnings - name: Run Clippy (beta) uses: actions-rs/clippy-check@v1 @@ -91,7 +93,7 @@ jobs: with: name: Clippy (beta) token: ${{ secrets.GITHUB_TOKEN }} - args: --all-features --all-targets -- -W clippy::all + args: --all-features --all-targets --frozen -- -W clippy::all # # Build @@ -101,7 +103,7 @@ jobs: uses: actions-rs/cargo@v1 with: command: build - args: --all-targets --all-features + args: --all-targets --all-features --frozen # # Tests @@ -115,10 +117,10 @@ jobs: uses: actions-rs/cargo@v1 with: command: nextest - args: run --all-features --release + args: run --all-features --release --frozen - name: Doc tests uses: actions-rs/cargo@v1 with: command: test - args: --all-features --release --doc + args: --all-features --release --doc --frozen From 4921c68939479b6d3a8ff176638eb99d68c18962 Mon Sep 17 00:00:00 2001 From: Joseandro Luiz Date: Thu, 2 May 2024 18:38:29 -0300 Subject: [PATCH 059/178] Using https instead of ssh to make CI happy --- .gitmodules | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.gitmodules b/.gitmodules index f99f90b3c4..3b6a5edc6a 100644 --- a/.gitmodules +++ b/.gitmodules @@ -1,3 +1,3 @@ [submodule "proof-systems-vendors"] path = proof-systems-vendors - url = git@github.com:o1-labs/proof-systems-vendors.git + url = https://github.com/o1-labs/proof-systems-vendors.git From 87f21f7567e008ec18748cceed41bbc7e1dc21b3 Mon Sep 17 00:00:00 2001 From: Joseandro Luiz Date: Wed, 8 May 2024 11:43:42 -0700 Subject: [PATCH 060/178] Use the --offline parameter to conform with the way we handle vendored depdendencies in Mina CI --- .github/workflows/rust.yml | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index 3c91e85eaf..42b5c1f618 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -1,4 +1,4 @@ -# We do use --frozen to be sure that we do not access the network +# We use --offline to be sure that we do not access the network name: CI @@ -85,7 +85,7 @@ jobs: uses: actions-rs/cargo@v1 with: command: clippy - args: --all-features --tests --all-targets --frozen -- -D warnings + args: --all-features --tests --all-targets --offline -- -D warnings - name: Run Clippy (beta) uses: actions-rs/clippy-check@v1 @@ -93,7 +93,7 @@ jobs: with: name: Clippy (beta) token: ${{ secrets.GITHUB_TOKEN }} - args: --all-features --all-targets --frozen -- -W clippy::all + args: --all-features --all-targets --offline -- -W clippy::all # # Build @@ -103,7 +103,7 @@ jobs: uses: actions-rs/cargo@v1 with: command: build - args: --all-targets --all-features --frozen + args: --all-targets --all-features --offline # # Tests @@ -117,10 +117,10 @@ jobs: uses: actions-rs/cargo@v1 with: command: nextest - args: run --all-features --release --frozen + args: run --all-features --release --offline - name: Doc tests uses: actions-rs/cargo@v1 with: command: test - args: --all-features --release --doc --frozen + args: --all-features --release --doc --offline From c9ae9c574ff2dfbaff18b56f6dbbae5710ab3646 Mon Sep 17 00:00:00 2001 From: Joseandro Luiz Date: Thu, 9 May 2024 16:09:24 -0700 Subject: [PATCH 061/178] Fixed unsolved CI conflicts --- .github/workflows/rust.yml | 23 ----------------------- 1 file changed, 23 deletions(-) diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index 2181247d8e..0330c70416 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -92,7 +92,6 @@ jobs: cargo fmt -- --check - name: Lint (clippy) -<<<<<<< HEAD uses: actions-rs/cargo@v1 with: command: clippy @@ -105,27 +104,16 @@ jobs: name: Clippy (beta) token: ${{ secrets.GITHUB_TOKEN }} args: --all-features --all-targets --offline -- -W clippy::all -======= - run: | - eval $(opam env) - cargo clippy --all-features --all-targets --tests -- -W clippy::all -D warnings ->>>>>>> 5c79cb386b54490442638939c57f4509af873a73 # # Build # - name: Ensure that everything builds -<<<<<<< HEAD uses: actions-rs/cargo@v1 with: command: build args: --all-targets --all-features --offline -======= - run: | - eval $(opam env) - cargo build --release --all-targets --all-features ->>>>>>> 5c79cb386b54490442638939c57f4509af873a73 # # Tests @@ -139,7 +127,6 @@ jobs: cargo install cargo-nextest@=0.9.67 --locked - name: Test with latest nextest release (faster than cargo test) -<<<<<<< HEAD uses: actions-rs/cargo@v1 with: command: nextest @@ -150,13 +137,3 @@ jobs: with: command: test args: --all-features --release --doc --offline -======= - run: | - eval $(opam env) - cargo nextest run --all-features --release - - - name: Doc tests - run: | - eval $(opam env) - cargo test --all-features --release --doc ->>>>>>> 5c79cb386b54490442638939c57f4509af873a73 From e2f506ad52556b34fb4af98e093d849400b89583 Mon Sep 17 00:00:00 2001 From: Joseandro Luiz Date: Thu, 9 May 2024 16:22:55 -0700 Subject: [PATCH 062/178] Format fix --- kimchi/src/circuits/constraints.rs | 2 +- kimchi/src/lib.rs | 1 - 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/kimchi/src/circuits/constraints.rs b/kimchi/src/circuits/constraints.rs index 9d318414da..4f957384a4 100644 --- a/kimchi/src/circuits/constraints.rs +++ b/kimchi/src/circuits/constraints.rs @@ -8,7 +8,7 @@ use crate::{ lookup::{ index::LookupConstraintSystem, lookups::{LookupFeatures, LookupPatterns}, - tables::{GateLookupTables, LookupTable} + tables::{GateLookupTables, LookupTable}, }, polynomial::{WitnessEvals, WitnessOverDomains, WitnessShifts}, polynomials::permutation::Shifts, diff --git a/kimchi/src/lib.rs b/kimchi/src/lib.rs index 193c315b1e..6a1e004c57 100644 --- a/kimchi/src/lib.rs +++ b/kimchi/src/lib.rs @@ -27,6 +27,5 @@ pub mod snarky; pub mod verifier; pub mod verifier_index; - #[cfg(test)] mod tests; From 661c47d17239ce059eeb614acd86cb57a4c74fd2 Mon Sep 17 00:00:00 2001 From: Joseandro Luiz Date: Thu, 9 May 2024 17:28:02 -0700 Subject: [PATCH 063/178] Changed ocaml install strategy to make ocaml-sys happy --- .github/workflows/rust.yml | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index 0330c70416..dcaf05cfff 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -48,12 +48,10 @@ jobs: # overwriting default rust-toolchain echo ${{ matrix.rust_toolchain_version }} > rust-toolchain - - name: Setup OCaml ${{ matrix.ocaml_version }} - uses: ocaml/setup-ocaml@v2 - with: - ocaml-compiler: ${{ matrix.ocaml_version }} - # https://github.com/ocaml/setup-ocaml/issues/211#issuecomment-1058882386 - # disable-cache: true + - name: Setup OCaml (because of ocaml-gen) + run: | + sudo apt update + sudo apt install -y ocaml - name: Install markdownlint run: | From 8a29fcbabd9a05b6af512365dff39798db47c180 Mon Sep 17 00:00:00 2001 From: Joseandro Luiz Date: Thu, 9 May 2024 17:39:21 -0700 Subject: [PATCH 064/178] Removed unused opam commands and updated nextest installation source --- .github/workflows/rust.yml | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index dcaf05cfff..42d1937a59 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -63,7 +63,6 @@ jobs: - name: Install cargo-spec for specifications run: | - eval $(opam env) cargo install --locked cargo-spec - name: Build the kimchi specification @@ -86,7 +85,6 @@ jobs: - name: Enforce formating run: | - eval $(opam env) cargo fmt -- --check - name: Lint (clippy) @@ -119,10 +117,7 @@ jobs: # https://nexte.st/book/pre-built-binaries.html#using-nextest-in-github-actions - name: Install latest nextest release - run: | - eval $(opam env) - # FIXME: update to 0.9.68 when we get rid of 1.71 and 1.72. - cargo install cargo-nextest@=0.9.67 --locked + uses: taiki-e/install-action@nextest - name: Test with latest nextest release (faster than cargo test) uses: actions-rs/cargo@v1 From 42b370894b019b74aa40aa36a60a9052d091ad32 Mon Sep 17 00:00:00 2001 From: Joseandro Luiz Date: Wed, 15 May 2024 17:09:21 -0300 Subject: [PATCH 065/178] Updated vendored depedencies --- proof-systems-vendors | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/proof-systems-vendors b/proof-systems-vendors index b180613995..db65478184 160000 --- a/proof-systems-vendors +++ b/proof-systems-vendors @@ -1 +1 @@ -Subproject commit b18061399504db1ab2b6d5f28d121597273ef507 +Subproject commit db654781846a51f0b2cd5086b2e81e5f5a6cf990 From 627020494cdc372ec87c8c7b0c7a4194d8411d64 Mon Sep 17 00:00:00 2001 From: Joseandro Luiz Date: Fri, 17 May 2024 13:41:55 -0300 Subject: [PATCH 066/178] Removed vendored changes that were not part of crates --- proof-systems-vendors | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/proof-systems-vendors b/proof-systems-vendors index db65478184..2db2631fbb 160000 --- a/proof-systems-vendors +++ b/proof-systems-vendors @@ -1 +1 @@ -Subproject commit db654781846a51f0b2cd5086b2e81e5f5a6cf990 +Subproject commit 2db2631fbb47fb24ce111ffa4cc4240c438ca993 From 7cd2acf570af4ef5897372a71f454b8584fd4562 Mon Sep 17 00:00:00 2001 From: Mikhail Volkhov Date: Tue, 20 Aug 2024 13:22:11 +0200 Subject: [PATCH 067/178] Add serde_as regression test for pasta --- Cargo.lock | 1 + utils/Cargo.toml | 1 + utils/src/serialization.rs | 58 ++++++++++++++++++++++++++++++++++++++ 3 files changed, 60 insertions(+) diff --git a/Cargo.lock b/Cargo.lock index cba1364190..a7575b7ad9 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1518,6 +1518,7 @@ dependencies = [ "rand", "rand_core", "rayon", + "rmp-serde", "secp256k1", "serde", "serde_with", diff --git a/utils/Cargo.toml b/utils/Cargo.toml index a40ccba348..9c06c56768 100644 --- a/utils/Cargo.toml +++ b/utils/Cargo.toml @@ -22,6 +22,7 @@ hex = { version = "0.4", features = ["serde"] } num-bigint = { version = "0.4.3", features = ["rand"]} num-integer = "0.1.45" num-traits = "0.2" +rmp-serde = "1.1.1" sha2 = "0.10.2" thiserror = "1.0.30" rand = "0.8.0" diff --git a/utils/src/serialization.rs b/utils/src/serialization.rs index f746f982e0..c0470473a4 100644 --- a/utils/src/serialization.rs +++ b/utils/src/serialization.rs @@ -87,3 +87,61 @@ where T::deserialize(&mut &bytes[..]).map_err(serde::de::Error::custom) } } + +#[cfg(test)] +mod tests { + + use ark_ec::AffineCurve; + use ark_serialize::Write; + use mina_curves::pasta::{Pallas, Vesta}; + use serde::{Deserialize, Serialize}; + use serde_with::serde_as; + use std::io::BufReader; + + #[test] + pub fn serde_as_regression_pasta() { + #[serde_as] + #[derive(Debug, Clone, Default, PartialEq, Eq, Serialize, Deserialize)] + struct TestStruct { + #[serde_as(as = "crate::serialization::SerdeAs")] + pallas: Pallas, + #[serde_as(as = "crate::serialization::SerdeAs")] + vesta: Vesta, + } + + let data_expected = TestStruct { + pallas: Pallas::prime_subgroup_generator(), + vesta: Vesta::prime_subgroup_generator(), + }; + + // reference serialized value + let buf_expected: Vec = vec![ + 146, 196, 33, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 196, 33, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]; + + let mut buf_written: Vec = vec![0; buf_expected.len()]; + + let srs_bytes = rmp_serde::to_vec(&data_expected).unwrap(); + (buf_written.as_mut_slice()) + .write_all(&srs_bytes) + .expect("failed to write file"); + (buf_written.as_mut_slice()) + .flush() + .expect("failed to flush file"); + + assert!( + buf_written == buf_expected, + "Serialized (written) representation {buf_written:?} does not match the expected one {buf_expected:?}" + ); + + let reader = BufReader::new(buf_expected.as_slice()); + let data_read: TestStruct = rmp_serde::from_read(reader).unwrap(); + + assert!( + data_read == data_expected, + "Deserialized value {data_read:?} does not match the expected one {data_expected:?}" + ); + } +} From d149d2b3b766f4886658fb937316c4812b10a421 Mon Sep 17 00:00:00 2001 From: Mikhail Volkhov Date: Thu, 22 Aug 2024 12:45:22 +0200 Subject: [PATCH 068/178] Address review comments --- utils/src/serialization.rs | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/utils/src/serialization.rs b/utils/src/serialization.rs index c0470473a4..72178ca575 100644 --- a/utils/src/serialization.rs +++ b/utils/src/serialization.rs @@ -123,9 +123,10 @@ mod tests { let mut buf_written: Vec = vec![0; buf_expected.len()]; - let srs_bytes = rmp_serde::to_vec(&data_expected).unwrap(); + let serialized_bytes = + rmp_serde::to_vec(&data_expected).expect("TestStruct could not be serialized"); (buf_written.as_mut_slice()) - .write_all(&srs_bytes) + .write_all(&serialized_bytes) .expect("failed to write file"); (buf_written.as_mut_slice()) .flush() @@ -137,7 +138,8 @@ mod tests { ); let reader = BufReader::new(buf_expected.as_slice()); - let data_read: TestStruct = rmp_serde::from_read(reader).unwrap(); + let data_read: TestStruct = + rmp_serde::from_read(reader).expect("Could not deseralize TestStruct"); assert!( data_read == data_expected, From 34aa8579355b0e0166df023d12fa2a5743fcc6ed Mon Sep 17 00:00:00 2001 From: Mikhail Volkhov Date: Thu, 22 Aug 2024 12:46:57 +0200 Subject: [PATCH 069/178] Remove must_use annotation from setup_with_custom_srs --- kimchi/src/tests/framework.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/kimchi/src/tests/framework.rs b/kimchi/src/tests/framework.rs index b0b79dbdbd..0788befe83 100644 --- a/kimchi/src/tests/framework.rs +++ b/kimchi/src/tests/framework.rs @@ -122,7 +122,6 @@ where } /// creates the indexes - #[must_use] pub(crate) fn setup_with_custom_srs, usize) -> OpeningProof::SRS>( mut self, get_srs: F, From 03393bb0622a3bd3e383859ae18465ac643b974c Mon Sep 17 00:00:00 2001 From: Mikhail Volkhov Date: Thu, 22 Aug 2024 13:12:16 +0200 Subject: [PATCH 070/178] Fix setup_with_custom_srs warning --- kimchi/src/tests/framework.rs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/kimchi/src/tests/framework.rs b/kimchi/src/tests/framework.rs index 0788befe83..9309b71581 100644 --- a/kimchi/src/tests/framework.rs +++ b/kimchi/src/tests/framework.rs @@ -121,7 +121,10 @@ where self } + // Re allow(dead_code): this method is used in tests; without the annotation it warns unnecessarily. /// creates the indexes + #[must_use] + #[allow(dead_code)] pub(crate) fn setup_with_custom_srs, usize) -> OpeningProof::SRS>( mut self, get_srs: F, From fbf57bd0067a50b19f620948720d2871e818deb3 Mon Sep 17 00:00:00 2001 From: Chiro Hiro Date: Wed, 4 Oct 2023 14:39:04 +0700 Subject: [PATCH 071/178] Update arkworks to 0.4.2 for pallas and vesta --- Cargo.lock | 274 ++++++++++++++++++++++++------ curves/Cargo.toml | 11 +- curves/src/pasta/curves/pallas.rs | 91 +++++----- curves/src/pasta/curves/tests.rs | 2 +- curves/src/pasta/curves/vesta.rs | 83 ++++----- curves/src/pasta/fields/fft.rs | 69 ++++++++ curves/src/pasta/fields/fp.rs | 28 +-- curves/src/pasta/fields/fq.rs | 31 ++-- curves/src/pasta/fields/mod.rs | 2 + proof-systems-vendors | 2 +- 10 files changed, 424 insertions(+), 169 deletions(-) create mode 100644 curves/src/pasta/fields/fft.rs diff --git a/Cargo.lock b/Cargo.lock index a7575b7ad9..ec31979a02 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -28,6 +28,17 @@ dependencies = [ "version_check", ] +[[package]] +name = "ahash" +version = "0.8.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2c99f64d1e06488f620f932677e24bc6e2897582980441ae90a671415bd7ec2f" +dependencies = [ + "cfg-if 1.0.0", + "once_cell", + "version_check", +] + [[package]] name = "aho-corasick" version = "1.0.2" @@ -48,14 +59,22 @@ dependencies = [ [[package]] name = "ark-algebra-test-templates" -version = "0.3.0" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3eef0b339ebc113d9bd3fb7cd666baf2cfe4e1024e0fac23e072d46598bbd0cd" +checksum = "400bd3a79c741b1832f1416d4373ae077ef82ca14a8b4cee1248a2f11c8b9172" dependencies = [ - "ark-ec", - "ark-ff", - "ark-serialize", - "ark-std", + "ark-ec 0.4.2", + "ark-ff 0.4.2", + "ark-serialize 0.4.2", + "ark-std 0.4.0", + "hex", + "num-bigint", + "num-integer", + "num-traits", + "serde", + "serde_derive", + "serde_json", + "sha2", ] [[package]] @@ -64,9 +83,9 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ea691771ebbb28aea556c044e2e5c5227398d840cee0c34d4d20fa8eb2689e8c" dependencies = [ - "ark-ec", - "ark-ff", - "ark-std", + "ark-ec 0.3.0", + "ark-ff 0.3.0", + "ark-std 0.3.0", ] [[package]] @@ -75,31 +94,70 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dea978406c4b1ca13c2db2373b05cc55429c3575b8b21f1b9ee859aa5b03dd42" dependencies = [ - "ark-ff", - "ark-serialize", - "ark-std", + "ark-ff 0.3.0", + "ark-serialize 0.3.0", + "ark-std 0.3.0", "derivative", "num-traits", "rayon", "zeroize", ] +[[package]] +name = "ark-ec" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "defd9a439d56ac24968cca0571f598a61bc8c55f71d50a89cda591cb750670ba" +dependencies = [ + "ark-ff 0.4.2", + "ark-poly 0.4.2", + "ark-serialize 0.4.2", + "ark-std 0.4.0", + "derivative", + "hashbrown 0.13.2", + "itertools", + "num-traits", + "rayon", + "zeroize", +] + [[package]] name = "ark-ff" version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6b3235cc41ee7a12aaaf2c575a2ad7b46713a8a50bda2fc3b003a04845c05dd6" dependencies = [ - "ark-ff-asm", - "ark-ff-macros", - "ark-serialize", - "ark-std", + "ark-ff-asm 0.3.0", + "ark-ff-macros 0.3.0", + "ark-serialize 0.3.0", + "ark-std 0.3.0", "derivative", "num-bigint", "num-traits", "paste", "rayon", - "rustc_version", + "rustc_version 0.3.3", + "zeroize", +] + +[[package]] +name = "ark-ff" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec847af850f44ad29048935519032c33da8aa03340876d351dfab5660d2966ba" +dependencies = [ + "ark-ff-asm 0.4.2", + "ark-ff-macros 0.4.2", + "ark-serialize 0.4.2", + "ark-std 0.4.0", + "derivative", + "digest 0.10.7", + "itertools", + "num-bigint", + "num-traits", + "paste", + "rayon", + "rustc_version 0.4.0", "zeroize", ] @@ -113,6 +171,16 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "ark-ff-asm" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3ed4aa4fe255d0bc6d79373f7e31d2ea147bcf486cba1be5ba7ea85abdb92348" +dependencies = [ + "quote 1.0.29", + "syn 1.0.109", +] + [[package]] name = "ark-ff-macros" version = "0.3.0" @@ -125,31 +193,69 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "ark-ff-macros" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7abe79b0e4288889c4574159ab790824d0033b9fdcb2a112a3182fac2e514565" +dependencies = [ + "num-bigint", + "num-traits", + "proc-macro2 1.0.64", + "quote 1.0.29", + "syn 1.0.109", +] + [[package]] name = "ark-poly" version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7b0f78f47537c2f15706db7e98fe64cc1711dbf9def81218194e17239e53e5aa" dependencies = [ - "ark-ff", - "ark-serialize", - "ark-std", + "ark-ff 0.3.0", + "ark-serialize 0.3.0", + "ark-std 0.3.0", "derivative", "hashbrown 0.11.2", "rayon", ] +[[package]] +name = "ark-poly" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d320bfc44ee185d899ccbadfa8bc31aab923ce1558716e1997a1e74057fe86bf" +dependencies = [ + "ark-ff 0.4.2", + "ark-serialize 0.4.2", + "ark-std 0.4.0", + "derivative", + "hashbrown 0.13.2", +] + [[package]] name = "ark-serialize" version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1d6c2b318ee6e10f8c2853e73a83adc0ccb88995aa978d8a3408d492ab2ee671" dependencies = [ - "ark-serialize-derive", - "ark-std", + "ark-serialize-derive 0.3.0", + "ark-std 0.3.0", "digest 0.9.0", ] +[[package]] +name = "ark-serialize" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "adb7b85a02b83d2f22f89bd5cac66c9c89474240cb6207cb1efc16d098e822a5" +dependencies = [ + "ark-serialize-derive 0.4.2", + "ark-std 0.4.0", + "digest 0.10.7", + "num-bigint", +] + [[package]] name = "ark-serialize-derive" version = "0.3.0" @@ -161,6 +267,17 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "ark-serialize-derive" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ae3281bc6d0fd7e549af32b52511e1302185bd688fd3359fa36423346ff682ea" +dependencies = [ + "proc-macro2 1.0.64", + "quote 1.0.29", + "syn 1.0.109", +] + [[package]] name = "ark-std" version = "0.3.0" @@ -172,6 +289,28 @@ dependencies = [ "rayon", ] +[[package]] +name = "ark-std" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "94893f1e0c6eeab764ade8dc4c0db24caf4fe7cbbaafc0eba0a9030f447b5185" +dependencies = [ + "num-traits", + "rand", + "rayon", +] + +[[package]] +name = "ark-test-curves" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "83c22c2469f93dfcace9a98baabb7af1bc0c40de82c07cffbc0deba4acf41a90" +dependencies = [ + "ark-ec 0.4.2", + "ark-ff 0.4.2", + "ark-std 0.4.0", +] + [[package]] name = "askama" version = "0.11.1" @@ -758,8 +897,8 @@ dependencies = [ name = "export_test_vectors" version = "0.1.0" dependencies = [ - "ark-ff", - "ark-serialize", + "ark-ff 0.3.0", + "ark-serialize 0.3.0", "hex", "mina-curves", "mina-poseidon", @@ -889,8 +1028,8 @@ checksum = "b6c80984affa11d98d1b88b66ac8853f143217b399d3c74116778ff8fdb4ed2e" name = "groupmap" version = "0.1.0" dependencies = [ - "ark-ec", - "ark-ff", + "ark-ec 0.3.0", + "ark-ff 0.3.0", "mina-curves", "rand", ] @@ -907,7 +1046,7 @@ version = "0.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ab5ef0d4909ef3724cc8cce6ccc8572c5c817592e9285f5464f8e86f8bd3726e" dependencies = [ - "ahash", + "ahash 0.7.6", ] [[package]] @@ -916,7 +1055,16 @@ version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" dependencies = [ - "ahash", + "ahash 0.7.6", +] + +[[package]] +name = "hashbrown" +version = "0.13.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "43a3c133739dddd0d2990f9a4bdf8eb4b21ef50e4851ca85ab661199821d510e" +dependencies = [ + "ahash 0.8.3", ] [[package]] @@ -1101,10 +1249,10 @@ name = "kimchi" version = "0.1.0" dependencies = [ "ark-bn254", - "ark-ec", - "ark-ff", - "ark-poly", - "ark-serialize", + "ark-ec 0.3.0", + "ark-ff 0.3.0", + "ark-poly 0.3.0", + "ark-serialize 0.3.0", "blake2", "colored", "criterion", @@ -1146,8 +1294,8 @@ dependencies = [ name = "kimchi-visu" version = "0.1.0" dependencies = [ - "ark-ec", - "ark-ff", + "ark-ec 0.3.0", + "ark-ff 0.3.0", "kimchi", "mina-curves", "mina-poseidon", @@ -1292,9 +1440,10 @@ name = "mina-curves" version = "0.1.0" dependencies = [ "ark-algebra-test-templates", - "ark-ec", - "ark-ff", - "ark-std", + "ark-ec 0.4.2", + "ark-ff 0.4.2", + "ark-std 0.4.0", + "ark-test-curves", "rand", ] @@ -1302,7 +1451,7 @@ dependencies = [ name = "mina-hasher" version = "0.1.0" dependencies = [ - "ark-ff", + "ark-ff 0.3.0", "bitvec", "mina-curves", "mina-poseidon", @@ -1315,10 +1464,10 @@ dependencies = [ name = "mina-poseidon" version = "0.1.0" dependencies = [ - "ark-ec", - "ark-ff", - "ark-poly", - "ark-serialize", + "ark-ec 0.3.0", + "ark-ff 0.3.0", + "ark-poly 0.3.0", + "ark-serialize 0.3.0", "hex", "mina-curves", "o1-utils", @@ -1336,8 +1485,8 @@ dependencies = [ name = "mina-signer" version = "0.1.0" dependencies = [ - "ark-ec", - "ark-ff", + "ark-ec 0.3.0", + "ark-ff 0.3.0", "bitvec", "blake2", "bs58", @@ -1505,10 +1654,10 @@ dependencies = [ name = "o1-utils" version = "0.1.0" dependencies = [ - "ark-ec", - "ark-ff", - "ark-poly", - "ark-serialize", + "ark-ec 0.3.0", + "ark-ff 0.3.0", + "ark-poly 0.3.0", + "ark-serialize 0.3.0", "bcs", "hex", "mina-curves", @@ -1767,10 +1916,10 @@ name = "poly-commitment" version = "0.1.0" dependencies = [ "ark-bn254", - "ark-ec", - "ark-ff", - "ark-poly", - "ark-serialize", + "ark-ec 0.3.0", + "ark-ff 0.3.0", + "ark-poly 0.3.0", + "ark-serialize 0.3.0", "blake2", "colored", "groupmap", @@ -2063,7 +2212,16 @@ version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f0dfe2087c51c460008730de8b57e6a320782fbfb312e1f4d520e6c6fae155ee" dependencies = [ - "semver", + "semver 0.11.0", +] + +[[package]] +name = "rustc_version" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366" +dependencies = [ + "semver 1.0.19", ] [[package]] @@ -2165,6 +2323,12 @@ dependencies = [ "semver-parser", ] +[[package]] +name = "semver" +version = "1.0.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ad977052201c6de01a8ef2aa3378c4bd23217a056337d1d6da40468d267a4fb0" + [[package]] name = "semver-parser" version = "0.10.2" @@ -2553,8 +2717,8 @@ dependencies = [ name = "turshi" version = "0.1.0" dependencies = [ - "ark-ec", - "ark-ff", + "ark-ec 0.3.0", + "ark-ff 0.3.0", "hex", "mina-curves", "o1-utils", diff --git a/curves/Cargo.toml b/curves/Cargo.toml index 4bbf45f798..c35d499cbe 100644 --- a/curves/Cargo.toml +++ b/curves/Cargo.toml @@ -10,10 +10,11 @@ edition = "2021" license = "Apache-2.0" [dependencies] -ark-ec = { version = "0.3.0", features = ["parallel"] } -ark-ff = { version = "0.3.0", features = ["parallel", "asm"] } +ark-ec = { version = "0.4.2", features = ["parallel"] } +ark-ff = { version = "0.4.2", features = ["parallel", "asm"] } [dev-dependencies] -rand = { version = "0.8.0", default-features = false } -ark-algebra-test-templates = "0.3.0" -ark-std = "0.3.0" +rand = { version = "0.8.5", default-features = false } +ark-test-curves = "0.4.2" +ark-algebra-test-templates = "0.4.2" +ark-std = "0.4.0" diff --git a/curves/src/pasta/curves/pallas.rs b/curves/src/pasta/curves/pallas.rs index 790251b55d..39813e13bb 100644 --- a/curves/src/pasta/curves/pallas.rs +++ b/curves/src/pasta/curves/pallas.rs @@ -1,70 +1,75 @@ use crate::pasta::*; use ark_ec::{ - models::short_weierstrass_jacobian::{GroupAffine, GroupProjective}, - ModelParameters, SWModelParameters, + models::short_weierstrass::{Affine, Projective, SWCurveConfig}, + CurveConfig, }; -use ark_ff::{field_new, Zero}; +use ark_ff::{MontFp, Zero}; + +/// G_GENERATOR_X = +/// 1 +pub const G_GENERATOR_X: Fp = MontFp!("1"); + +/// G1_GENERATOR_Y = +/// 12418654782883325593414442427049395787963493412651469444558597405572177144507 +pub const G_GENERATOR_Y: Fp = + MontFp!("12418654782883325593414442427049395787963493412651469444558597405572177144507"); #[derive(Debug, Clone, Default, PartialEq, Eq)] pub struct PallasParameters; -impl ModelParameters for PallasParameters { +impl CurveConfig for PallasParameters { type BaseField = Fp; - type ScalarField = Fq; -} -pub type Pallas = GroupAffine; -pub type ProjectivePallas = GroupProjective; - -impl SWModelParameters for PallasParameters { - /// COEFF_A = 0 - const COEFF_A: Fp = field_new!(Fp, "0"); - - /// COEFF_B = 5 - const COEFF_B: Fp = field_new!(Fp, "5"); + type ScalarField = Fq; /// COFACTOR = 1 const COFACTOR: &'static [u64] = &[0x1]; /// COFACTOR_INV = 1 - const COFACTOR_INV: Fq = field_new!(Fq, "1"); + const COFACTOR_INV: Fq = MontFp!("1"); +} + +pub type Pallas = Affine; + +pub type ProjectivePallas = Projective; + +impl SWCurveConfig for PallasParameters { + const COEFF_A: Self::BaseField = MontFp!("0"); - /// AFFINE_GENERATOR_COEFFS = (G1_GENERATOR_X, G1_GENERATOR_Y) - const AFFINE_GENERATOR_COEFFS: (Self::BaseField, Self::BaseField) = - (G_GENERATOR_X, G_GENERATOR_Y); + const COEFF_B: Self::BaseField = MontFp!("5"); + const GENERATOR: Affine = Affine::new_unchecked(G_GENERATOR_X, G_GENERATOR_Y); +} + +impl PallasParameters { #[inline(always)] - fn mul_by_a(_: &Self::BaseField) -> Self::BaseField { - Self::BaseField::zero() + pub fn mul_by_a( + _: &::BaseField, + ) -> ::BaseField { + ::BaseField::zero() } } -/// G_GENERATOR_X = -/// 1 -pub const G_GENERATOR_X: Fp = field_new!(Fp, "1"); - -/// G1_GENERATOR_Y = -/// 12418654782883325593414442427049395787963493412651469444558597405572177144507 -pub const G_GENERATOR_Y: Fp = field_new!( - Fp, - "12418654782883325593414442427049395787963493412651469444558597405572177144507" -); - /// legacy curve, a copy of the normal curve to support legacy sponge params #[derive(Copy, Clone, Default, PartialEq, Eq)] pub struct LegacyPallasParameters; -impl ModelParameters for LegacyPallasParameters { - type BaseField = ::BaseField; - type ScalarField = ::ScalarField; +impl CurveConfig for LegacyPallasParameters { + type BaseField = ::BaseField; + + type ScalarField = ::ScalarField; + + const COFACTOR: &'static [u64] = ::COFACTOR; + + const COFACTOR_INV: Self::ScalarField = ::COFACTOR_INV; } -impl SWModelParameters for LegacyPallasParameters { - const COEFF_A: Self::BaseField = ::COEFF_A; - const COEFF_B: Self::BaseField = ::COEFF_B; - const COFACTOR: &'static [u64] = ::COFACTOR; - const COFACTOR_INV: Self::ScalarField = ::COFACTOR_INV; - const AFFINE_GENERATOR_COEFFS: (Self::BaseField, Self::BaseField) = - ::AFFINE_GENERATOR_COEFFS; + +impl SWCurveConfig for LegacyPallasParameters { + const COEFF_A: Self::BaseField = ::COEFF_A; + + const COEFF_B: Self::BaseField = ::COEFF_B; + + const GENERATOR: Affine = Affine::new_unchecked(G_GENERATOR_X, G_GENERATOR_Y); } -pub type LegacyPallas = GroupAffine; +pub type LegacyPallas = Affine; diff --git a/curves/src/pasta/curves/tests.rs b/curves/src/pasta/curves/tests.rs index 3e22f00ced..079f219ce0 100644 --- a/curves/src/pasta/curves/tests.rs +++ b/curves/src/pasta/curves/tests.rs @@ -1,5 +1,5 @@ use ark_algebra_test_templates::{curves::*, groups::*}; -use ark_ec::AffineCurve; +use ark_ec::AffineRepr; use ark_std::test_rng; use rand::Rng; diff --git a/curves/src/pasta/curves/vesta.rs b/curves/src/pasta/curves/vesta.rs index 2a8b5002e5..7a587e9f1d 100644 --- a/curves/src/pasta/curves/vesta.rs +++ b/curves/src/pasta/curves/vesta.rs @@ -1,70 +1,71 @@ use crate::pasta::*; use ark_ec::{ - models::short_weierstrass_jacobian::{GroupAffine, GroupProjective}, - ModelParameters, SWModelParameters, + models::short_weierstrass::{Affine, Projective, SWCurveConfig}, + CurveConfig, }; -use ark_ff::{field_new, Zero}; +use ark_ff::{MontFp, Zero}; + +/// G_GENERATOR_X = +/// 1 +pub const G_GENERATOR_X: Fq = MontFp!("1"); + +/// G1_GENERATOR_Y = +/// 11426906929455361843568202299992114520848200991084027513389447476559454104162 +pub const G_GENERATOR_Y: Fq = + MontFp!("11426906929455361843568202299992114520848200991084027513389447476559454104162"); #[derive(Debug, Clone, Default, PartialEq, Eq)] pub struct VestaParameters; -impl ModelParameters for VestaParameters { +impl CurveConfig for VestaParameters { type BaseField = Fq; type ScalarField = Fp; + + /// COFACTOR = 1 + const COFACTOR: &'static [u64] = &[0x1]; + + /// COFACTOR_INV = 1 + const COFACTOR_INV: Fp = MontFp!("1"); } -pub type Vesta = GroupAffine; -pub type ProjectiveVesta = GroupProjective; +pub type Vesta = Affine; +pub type ProjectiveVesta = Projective; -impl SWModelParameters for VestaParameters { +impl SWCurveConfig for VestaParameters { /// COEFF_A = 0 - const COEFF_A: Fq = field_new!(Fq, "0"); + const COEFF_A: Fq = MontFp!("0"); /// COEFF_B = 5 - const COEFF_B: Fq = field_new!(Fq, "5"); - - /// COFACTOR = 1 - const COFACTOR: &'static [u64] = &[0x1]; - - /// COFACTOR_INV = 1 - const COFACTOR_INV: Fp = field_new!(Fp, "1"); + const COEFF_B: Fq = MontFp!("5"); /// AFFINE_GENERATOR_COEFFS = (G1_GENERATOR_X, G1_GENERATOR_Y) - const AFFINE_GENERATOR_COEFFS: (Self::BaseField, Self::BaseField) = - (G_GENERATOR_X, G_GENERATOR_Y); + const GENERATOR: Affine = Affine::new_unchecked(G_GENERATOR_X, G_GENERATOR_Y); +} +impl VestaParameters { #[inline(always)] - fn mul_by_a(_: &Self::BaseField) -> Self::BaseField { - Self::BaseField::zero() + pub fn mul_by_a( + _: &::BaseField, + ) -> ::BaseField { + ::BaseField::zero() } } -/// G_GENERATOR_X = -/// 1 -pub const G_GENERATOR_X: Fq = field_new!(Fq, "1"); - -/// G1_GENERATOR_Y = -/// 11426906929455361843568202299992114520848200991084027513389447476559454104162 -pub const G_GENERATOR_Y: Fq = field_new!( - Fq, - "11426906929455361843568202299992114520848200991084027513389447476559454104162" -); - /// legacy curve, a copy of the normal curve to support legacy sponge params #[derive(Copy, Clone, Default, PartialEq, Eq)] pub struct LegacyVestaParameters; -impl ModelParameters for LegacyVestaParameters { - type BaseField = ::BaseField; - type ScalarField = ::ScalarField; +impl CurveConfig for LegacyVestaParameters { + type BaseField = ::BaseField; + type ScalarField = ::ScalarField; + const COFACTOR: &'static [u64] = ::COFACTOR; + const COFACTOR_INV: Self::ScalarField = ::COFACTOR_INV; } -impl SWModelParameters for LegacyVestaParameters { - const COEFF_A: Self::BaseField = ::COEFF_A; - const COEFF_B: Self::BaseField = ::COEFF_B; - const COFACTOR: &'static [u64] = ::COFACTOR; - const COFACTOR_INV: Self::ScalarField = ::COFACTOR_INV; - const AFFINE_GENERATOR_COEFFS: (Self::BaseField, Self::BaseField) = - ::AFFINE_GENERATOR_COEFFS; + +impl SWCurveConfig for LegacyVestaParameters { + const COEFF_A: Self::BaseField = ::COEFF_A; + const COEFF_B: Self::BaseField = ::COEFF_B; + const GENERATOR: Affine = Affine::new_unchecked(G_GENERATOR_X, G_GENERATOR_Y); } -pub type LegacyVesta = GroupAffine; +pub type LegacyVesta = Affine; diff --git a/curves/src/pasta/fields/fft.rs b/curves/src/pasta/fields/fft.rs new file mode 100644 index 0000000000..023615fb0a --- /dev/null +++ b/curves/src/pasta/fields/fft.rs @@ -0,0 +1,69 @@ +use ark_ff::biginteger::BigInteger; + +/// A trait that defines parameters for a field that can be used for FFTs. +pub trait FftParameters: 'static + Send + Sync + Sized { + type BigInt: BigInteger; + + /// Let `N` be the size of the multiplicative group defined by the field. + /// Then `TWO_ADICITY` is the two-adicity of `N`, i.e. the integer `s` + /// such that `N = 2^s * t` for some odd integer `t`. + const TWO_ADICITY: u32; + + /// 2^s root of unity computed by GENERATOR^t + const TWO_ADIC_ROOT_OF_UNITY: Self::BigInt; + + /// An integer `b` such that there exists a multiplicative subgroup + /// of size `b^k` for some integer `k`. + const SMALL_SUBGROUP_BASE: Option = None; + + /// The integer `k` such that there exists a multiplicative subgroup + /// of size `Self::SMALL_SUBGROUP_BASE^k`. + const SMALL_SUBGROUP_BASE_ADICITY: Option = None; + + /// GENERATOR^((MODULUS-1) / (2^s * + /// SMALL_SUBGROUP_BASE^SMALL_SUBGROUP_BASE_ADICITY)) Used for mixed-radix FFT. + const LARGE_SUBGROUP_ROOT_OF_UNITY: Option = None; +} + +/// A trait that defines parameters for a prime field. +pub trait FpParameters: FftParameters { + /// The modulus of the field. + const MODULUS: Self::BigInt; + + /// The number of bits needed to represent the `Self::MODULUS`. + const MODULUS_BITS: u32; + + /// The number of bits that must be shaved from the beginning of + /// the representation when randomly sampling. + const REPR_SHAVE_BITS: u32; + + /// Let `M` be the power of 2^64 nearest to `Self::MODULUS_BITS`. Then + /// `R = M % Self::MODULUS`. + const R: Self::BigInt; + + /// R2 = R^2 % Self::MODULUS + const R2: Self::BigInt; + + /// INV = -MODULUS^{-1} mod 2^64 + const INV: u64; + + /// A multiplicative generator of the field. + /// `Self::GENERATOR` is an element having multiplicative order + /// `Self::MODULUS - 1`. + const GENERATOR: Self::BigInt; + + /// The number of bits that can be reliably stored. + /// (Should equal `SELF::MODULUS_BITS - 1`) + const CAPACITY: u32; + + /// t for 2^s * t = MODULUS - 1, and t coprime to 2. + const T: Self::BigInt; + + /// (t - 1) / 2 + const T_MINUS_ONE_DIV_TWO: Self::BigInt; + + /// (Self::MODULUS - 1) / 2 + const MODULUS_MINUS_ONE_DIV_TWO: Self::BigInt; +} + +pub trait Fp256Parameters {} diff --git a/curves/src/pasta/fields/fp.rs b/curves/src/pasta/fields/fp.rs index 8560087ade..9b4e120da9 100644 --- a/curves/src/pasta/fields/fp.rs +++ b/curves/src/pasta/fields/fp.rs @@ -1,6 +1,12 @@ -use ark_ff::{biginteger::BigInteger256 as BigInteger, FftParameters, Fp256, Fp256Parameters}; +use super::fft::{FftParameters, Fp256Parameters}; +use ark_ff::fields::{MontBackend, MontConfig}; +use ark_ff::{biginteger::BigInteger256 as BigInteger, Fp256}; -pub type Fp = Fp256; +#[derive(MontConfig)] +#[modulus = "28948022309329048855892746252171976963363056481941560715954676764349967630337"] +#[generator = "5"] +pub struct FqConfig; +pub type Fp = Fp256>; pub struct FpParameters; @@ -12,35 +18,35 @@ impl FftParameters for FpParameters { const TWO_ADICITY: u32 = 32; #[rustfmt::skip] - const TWO_ADIC_ROOT_OF_UNITY: BigInteger = BigInteger([ + const TWO_ADIC_ROOT_OF_UNITY: BigInteger = BigInteger::new([ 0xa28db849bad6dbf0, 0x9083cd03d3b539df, 0xfba6b9ca9dc8448e, 0x3ec928747b89c6da ]); } -impl ark_ff::FpParameters for FpParameters { +impl super::fft::FpParameters for FpParameters { // 28948022309329048855892746252171976963363056481941560715954676764349967630337 - const MODULUS: BigInteger = BigInteger([ + const MODULUS: BigInteger = BigInteger::new([ 0x992d30ed00000001, 0x224698fc094cf91b, 0x0, 0x4000000000000000, ]); - const R: BigInteger = BigInteger([ + const R: BigInteger = BigInteger::new([ 0x34786d38fffffffd, 0x992c350be41914ad, 0xffffffffffffffff, 0x3fffffffffffffff, ]); - const R2: BigInteger = BigInteger([ + const R2: BigInteger = BigInteger::new([ 0x8c78ecb30000000f, 0xd7d30dbd8b0de0e7, 0x7797a99bc3c95d18, 0x96d41af7b9cb714, ]); - const MODULUS_MINUS_ONE_DIV_TWO: BigInteger = BigInteger([ + const MODULUS_MINUS_ONE_DIV_TWO: BigInteger = BigInteger::new([ 0xcc96987680000000, 0x11234c7e04a67c8d, 0x0, @@ -48,13 +54,13 @@ impl ark_ff::FpParameters for FpParameters { ]); // T and T_MINUS_ONE_DIV_TWO, where MODULUS - 1 = 2^S * T - const T: BigInteger = BigInteger([0x94cf91b992d30ed, 0x224698fc, 0x0, 0x40000000]); + const T: BigInteger = BigInteger::new([0x94cf91b992d30ed, 0x224698fc, 0x0, 0x40000000]); const T_MINUS_ONE_DIV_TWO: BigInteger = - BigInteger([0x4a67c8dcc969876, 0x11234c7e, 0x0, 0x20000000]); + BigInteger::new([0x4a67c8dcc969876, 0x11234c7e, 0x0, 0x20000000]); // GENERATOR = 5 - const GENERATOR: BigInteger = BigInteger([ + const GENERATOR: BigInteger = BigInteger::new([ 0xa1a55e68ffffffed, 0x74c2a54b4f4982f3, 0xfffffffffffffffd, diff --git a/curves/src/pasta/fields/fq.rs b/curves/src/pasta/fields/fq.rs index 59a0ced05b..b623705750 100644 --- a/curves/src/pasta/fields/fq.rs +++ b/curves/src/pasta/fields/fq.rs @@ -1,46 +1,53 @@ -use ark_ff::{ - biginteger::BigInteger256 as BigInteger, FftParameters, Fp256, Fp256Parameters, FpParameters, -}; +use super::fft::{FftParameters, Fp256Parameters, FpParameters}; +use ark_ff::{biginteger::BigInteger256 as BigInteger, Fp256}; pub struct FqParameters; -pub type Fq = Fp256; +use ark_ff::fields::{MontBackend, MontConfig}; + +#[derive(MontConfig)] +#[modulus = "28948022309329048855892746252171976963363056481941647379679742748393362948097"] +#[generator = "5"] +pub struct FrConfig; +pub type Fq = Fp256>; impl Fp256Parameters for FqParameters {} + impl FftParameters for FqParameters { type BigInt = BigInteger; const TWO_ADICITY: u32 = 32; #[rustfmt::skip] - const TWO_ADIC_ROOT_OF_UNITY: BigInteger = BigInteger([ + const TWO_ADIC_ROOT_OF_UNITY: BigInteger = BigInteger::new([ 0x218077428c9942de, 0xcc49578921b60494, 0xac2e5d27b2efbee2, 0xb79fa897f2db056 ]); } + impl FpParameters for FqParameters { // 28948022309329048855892746252171976963363056481941647379679742748393362948097 - const MODULUS: BigInteger = BigInteger([ + const MODULUS: BigInteger = BigInteger::new([ 0x8c46eb2100000001, 0x224698fc0994a8dd, 0x0, 0x4000000000000000, ]); - const R: BigInteger = BigInteger([ + const R: BigInteger = BigInteger::new([ 0x5b2b3e9cfffffffd, 0x992c350be3420567, 0xffffffffffffffff, 0x3fffffffffffffff, ]); - const R2: BigInteger = BigInteger([ + const R2: BigInteger = BigInteger::new([ 0xfc9678ff0000000f, 0x67bb433d891a16e3, 0x7fae231004ccf590, 0x96d41af7ccfdaa9, ]); - const MODULUS_MINUS_ONE_DIV_TWO: BigInteger = BigInteger([ + const MODULUS_MINUS_ONE_DIV_TWO: BigInteger = BigInteger::new([ 0xc623759080000000, 0x11234c7e04ca546e, 0x0, @@ -49,13 +56,13 @@ impl FpParameters for FqParameters { // T and T_MINUS_ONE_DIV_TWO, where MODULUS - 1 = 2^S * T - const T: BigInteger = BigInteger([0x994a8dd8c46eb21, 0x224698fc, 0x0, 0x40000000]); + const T: BigInteger = BigInteger::new([0x994a8dd8c46eb21, 0x224698fc, 0x0, 0x40000000]); const T_MINUS_ONE_DIV_TWO: BigInteger = - BigInteger([0x4ca546ec6237590, 0x11234c7e, 0x0, 0x20000000]); + BigInteger::new([0x4ca546ec6237590, 0x11234c7e, 0x0, 0x20000000]); // GENERATOR = 5 - const GENERATOR: BigInteger = BigInteger([ + const GENERATOR: BigInteger = BigInteger::new([ 0x96bc8c8cffffffed, 0x74c2a54b49f7778e, 0xfffffffffffffffd, diff --git a/curves/src/pasta/fields/mod.rs b/curves/src/pasta/fields/mod.rs index 5c5f93a2ea..fcaff2e7a9 100644 --- a/curves/src/pasta/fields/mod.rs +++ b/curves/src/pasta/fields/mod.rs @@ -4,5 +4,7 @@ pub use self::fp::*; pub mod fq; pub use self::fq::*; +pub mod fft; + #[cfg(test)] mod tests; diff --git a/proof-systems-vendors b/proof-systems-vendors index 2db2631fbb..02ba9dc230 160000 --- a/proof-systems-vendors +++ b/proof-systems-vendors @@ -1 +1 @@ -Subproject commit 2db2631fbb47fb24ce111ffa4cc4240c438ca993 +Subproject commit 02ba9dc2305d76cd4882e6be9a88aa0bc684fd4d From 7e5083fc6684b747ce56aaa1773bed3021b6ca2f Mon Sep 17 00:00:00 2001 From: Chiro Hiro Date: Wed, 15 Nov 2023 16:19:33 +0300 Subject: [PATCH 072/178] Update test cases to new version of test suite 0.4.2 --- Cargo.lock | 1 + curves/Cargo.toml | 1 + curves/src/pasta/curves/tests.rs | 32 +++++--------------------------- curves/src/pasta/fields/tests.rs | 27 ++++----------------------- 4 files changed, 11 insertions(+), 50 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index ec31979a02..13d55d1ac9 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1442,6 +1442,7 @@ dependencies = [ "ark-algebra-test-templates", "ark-ec 0.4.2", "ark-ff 0.4.2", + "ark-serialize 0.4.2", "ark-std 0.4.0", "ark-test-curves", "rand", diff --git a/curves/Cargo.toml b/curves/Cargo.toml index c35d499cbe..1015d8e47d 100644 --- a/curves/Cargo.toml +++ b/curves/Cargo.toml @@ -17,4 +17,5 @@ ark-ff = { version = "0.4.2", features = ["parallel", "asm"] } rand = { version = "0.8.5", default-features = false } ark-test-curves = "0.4.2" ark-algebra-test-templates = "0.4.2" +ark-serialize="0.4.2" ark-std = "0.4.0" diff --git a/curves/src/pasta/curves/tests.rs b/curves/src/pasta/curves/tests.rs index 079f219ce0..9f9d3cc002 100644 --- a/curves/src/pasta/curves/tests.rs +++ b/curves/src/pasta/curves/tests.rs @@ -1,28 +1,6 @@ -use ark_algebra_test_templates::{curves::*, groups::*}; -use ark_ec::AffineRepr; -use ark_std::test_rng; -use rand::Rng; +use crate::pasta::ProjectivePallas; +use crate::pasta::ProjectiveVesta; +use ark_algebra_test_templates::*; -use super::pallas; - -#[test] -fn test_pallas_projective_curve() { - curve_tests::(); - - sw_tests::(); -} - -#[test] -fn test_pallas_projective_group() { - let mut rng = test_rng(); - let a: pallas::ProjectivePallas = rng.gen(); - let b: pallas::ProjectivePallas = rng.gen(); - group_test(a, b); -} - -#[test] -fn test_pallas_generator() { - let generator = pallas::Pallas::prime_subgroup_generator(); - assert!(generator.is_on_curve()); - assert!(generator.is_in_correct_subgroup_assuming_on_curve()); -} +test_group!(g1; ProjectivePallas; sw); +test_group!(g2; ProjectiveVesta; sw); diff --git a/curves/src/pasta/fields/tests.rs b/curves/src/pasta/fields/tests.rs index 38d1c93982..0489cfc4cf 100644 --- a/curves/src/pasta/fields/tests.rs +++ b/curves/src/pasta/fields/tests.rs @@ -1,24 +1,5 @@ -use crate::pasta::*; -use ark_algebra_test_templates::fields::{field_test, primefield_test, sqrt_field_test}; -use ark_std::test_rng; -use rand::Rng; +use crate::pasta::fields::{Fp as Fr, Fq}; +use ark_algebra_test_templates::*; -#[test] -fn test_fp() { - let mut rng = test_rng(); - let a: Fp = rng.gen(); - let b: Fp = rng.gen(); - field_test(a, b); - sqrt_field_test(a); - primefield_test::(); -} - -#[test] -fn test_fq() { - let mut rng = test_rng(); - let a: Fq = rng.gen(); - let b: Fq = rng.gen(); - field_test(a, b); - sqrt_field_test(a); - primefield_test::(); -} +test_field!(fq; Fq; mont_prime_field); +test_field!(fr; Fr; mont_prime_field); From 202ec81411c41671b54d8e2ff595135130de57a2 Mon Sep 17 00:00:00 2001 From: Chiro Hiro Date: Wed, 6 Dec 2023 11:24:28 +0700 Subject: [PATCH 073/178] Upgrade utils to arkworks 0.4.2 --- Cargo.lock | 9 +++++---- utils/Cargo.toml | 11 ++++++----- utils/src/chunked_polynomial.rs | 2 +- utils/src/dense_polynomial.rs | 6 +++--- utils/src/field_helpers.rs | 27 ++++++++++++++++----------- utils/src/foreign_field.rs | 4 ++-- utils/src/serialization.rs | 8 ++++---- 7 files changed, 37 insertions(+), 30 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 13d55d1ac9..d7e16842eb 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -231,6 +231,7 @@ dependencies = [ "ark-std 0.4.0", "derivative", "hashbrown 0.13.2", + "rayon", ] [[package]] @@ -1655,10 +1656,10 @@ dependencies = [ name = "o1-utils" version = "0.1.0" dependencies = [ - "ark-ec 0.3.0", - "ark-ff 0.3.0", - "ark-poly 0.3.0", - "ark-serialize 0.3.0", + "ark-ec 0.4.2", + "ark-ff 0.4.2", + "ark-poly 0.4.2", + "ark-serialize 0.4.2", "bcs", "hex", "mina-curves", diff --git a/utils/Cargo.toml b/utils/Cargo.toml index 9c06c56768..b5af2aff8d 100644 --- a/utils/Cargo.toml +++ b/utils/Cargo.toml @@ -10,10 +10,10 @@ edition = "2021" license = "Apache-2.0" [dependencies] -ark-ec = { version = "0.3.0", features = [ "parallel" ] } -ark-ff = { version = "0.3.0", features = [ "parallel", "asm" ] } -ark-poly = { version = "0.3.0", features = [ "parallel" ] } -ark-serialize = "0.3.0" +ark-ec = { version = "0.4.2", features = [ "parallel" ] } +ark-ff = { version = "0.4.2", features = [ "parallel", "asm" ] } +ark-poly = { version = "0.4.2", features = [ "parallel" ] } +ark-serialize = "0.4.2" bcs = "0.1.3" rayon = "1.3.0" serde = "1.0.130" @@ -27,9 +27,10 @@ sha2 = "0.10.2" thiserror = "1.0.30" rand = "0.8.0" rand_core = "0.6.3" +mina-curves = { path = "../curves", version = "0.1.0" } [dev-dependencies] -ark-ec = { version = "0.3.0", features = [ "parallel" ] } +ark-ec = { version = "0.4.2", features = [ "parallel" ] } mina-curves = { path = "../curves", version = "0.1.0" } num-bigint = { version = "0.4.3", features = ["rand"] } secp256k1 = "0.24.2" diff --git a/utils/src/chunked_polynomial.rs b/utils/src/chunked_polynomial.rs index 6f79de09ea..45433df93b 100644 --- a/utils/src/chunked_polynomial.rs +++ b/utils/src/chunked_polynomial.rs @@ -55,7 +55,7 @@ mod tests { use super::*; use ark_ff::One; - use ark_poly::{univariate::DensePolynomial, UVPolynomial}; + use ark_poly::{univariate::DensePolynomial, DenseUVPolynomial}; use mina_curves::pasta::Fp; #[test] diff --git a/utils/src/dense_polynomial.rs b/utils/src/dense_polynomial.rs index 895e227fe0..2c7859d5d4 100644 --- a/utils/src/dense_polynomial.rs +++ b/utils/src/dense_polynomial.rs @@ -1,7 +1,7 @@ //! This adds a few utility functions for the [DensePolynomial] arkworks type. use ark_ff::Field; -use ark_poly::{univariate::DensePolynomial, Polynomial, UVPolynomial}; +use ark_poly::{univariate::DensePolynomial, DenseUVPolynomial, Polynomial}; use rayon::prelude::*; use crate::chunked_polynomial::ChunkedPolynomial; @@ -32,7 +32,7 @@ impl ExtendedDensePolynomial for DensePolynomial { result .coeffs .par_iter_mut() - .for_each(|coeff| *coeff *= &elm); + .for_each(|coeff: &mut F| *coeff *= &elm); result } @@ -76,7 +76,7 @@ impl ExtendedDensePolynomial for DensePolynomial { mod tests { use super::*; use ark_ff::One; - use ark_poly::{univariate::DensePolynomial, UVPolynomial}; + use ark_poly::{univariate::DensePolynomial, DenseUVPolynomial}; use mina_curves::pasta::Fp; #[test] diff --git a/utils/src/field_helpers.rs b/utils/src/field_helpers.rs index ed6a91b8c7..484f2e2340 100644 --- a/utils/src/field_helpers.rs +++ b/utils/src/field_helpers.rs @@ -1,6 +1,6 @@ //! Useful helper methods to extend [ark_ff::Field]. -use ark_ff::{BigInteger, Field, FpParameters, PrimeField}; +use ark_ff::{BigInteger, Field, PrimeField}; use num_bigint::{BigUint, RandBigInt}; use rand::rngs::StdRng; use std::ops::Neg; @@ -114,7 +114,7 @@ pub trait FieldHelpers { where F: PrimeField, { - F::size_in_bits() / 8 + (F::size_in_bits() % 8 != 0) as usize + (F::MODULUS_BIT_SIZE / 8) as usize + (F::MODULUS_BIT_SIZE % 8 != 0) as usize } /// Get the modulus as `BigUint` @@ -122,18 +122,19 @@ pub trait FieldHelpers { where F: PrimeField, { - BigUint::from_bytes_le(&F::Params::MODULUS.to_bytes_le()) + BigUint::from_bytes_le(&F::MODULUS.to_bytes_le()) } } impl FieldHelpers for F { fn from_bytes(bytes: &[u8]) -> Result { - F::deserialize(&mut &*bytes).map_err(|_| FieldHelpersError::DeserializeBytes) + F::deserialize_uncompressed(&mut &*bytes).map_err(|_| FieldHelpersError::DeserializeBytes) } fn from_hex(hex: &str) -> Result { let bytes: Vec = hex::decode(hex).map_err(|_| FieldHelpersError::DecodeHex)?; - F::deserialize(&mut &bytes[..]).map_err(|_| FieldHelpersError::DeserializeBytes) + F::deserialize_uncompressed(&mut &bytes[..]) + .map_err(|_| FieldHelpersError::DeserializeBytes) } fn from_bits(bits: &[bool]) -> Result { @@ -145,12 +146,13 @@ impl FieldHelpers for F { bytes }); - F::deserialize(&mut &bytes[..]).map_err(|_| FieldHelpersError::DeserializeBytes) + F::deserialize_uncompressed(&mut &bytes[..]) + .map_err(|_| FieldHelpersError::DeserializeBytes) } fn to_bytes(&self) -> Vec { let mut bytes: Vec = vec![]; - self.serialize(&mut bytes) + self.serialize_uncompressed(&mut bytes) .expect("Failed to serialize field"); bytes @@ -201,12 +203,12 @@ pub fn i32_to_field + Neg>(i: i32) -> F { mod tests { use super::*; - use ark_ec::AffineCurve; + use ark_ec::AffineRepr; use ark_ff::One; use mina_curves::pasta::Pallas as CurvePoint; /// Base field element type - pub type BaseField = ::BaseField; + pub type BaseField = ::BaseField; #[test] fn field_hex() { @@ -298,7 +300,10 @@ mod tests { .is_ok()); assert_eq!( - BaseField::from_bits(&vec![true; BaseField::size_in_bits()]), + BaseField::from_bits(&vec![ + true; + ::MODULUS_BIT_SIZE as usize + ]), Err(FieldHelpersError::DeserializeBytes) ); @@ -328,7 +333,7 @@ mod tests { let field_zero = BaseField::from(0u32); assert_eq!( - BigUint::from_bytes_be(&field_zero.into_repr().to_bytes_be()), + BigUint::from_bytes_be(&field_zero.0.to_bytes_be()), BigUint::from_bytes_be(&be_zero_32bytes) ); diff --git a/utils/src/foreign_field.rs b/utils/src/foreign_field.rs index f11d191038..14a7c66eb2 100644 --- a/utils/src/foreign_field.rs +++ b/utils/src/foreign_field.rs @@ -390,14 +390,14 @@ fn biguint_to_limbs(x: &BigUint, limb_bits: usize) -> Vec { mod tests { use super::*; use crate::field_helpers::FieldHelpers; - use ark_ec::AffineCurve; + use ark_ec::AffineRepr; use ark_ff::One; use mina_curves::pasta::Pallas as CurvePoint; use num_bigint::RandBigInt; use rand::{rngs::StdRng, SeedableRng}; /// Base field element type - pub type BaseField = ::BaseField; + pub type BaseField = ::BaseField; const RNG_SEED: [u8; 32] = [ 12, 31, 143, 75, 29, 255, 206, 26, 67, 193, 86, 160, 1, 90, 131, 221, 86, 168, 4, 95, 50, diff --git a/utils/src/serialization.rs b/utils/src/serialization.rs index 72178ca575..352a1484d2 100644 --- a/utils/src/serialization.rs +++ b/utils/src/serialization.rs @@ -23,7 +23,7 @@ pub mod ser { S: serde::Serializer, { let mut bytes = vec![]; - val.serialize(&mut bytes) + val.serialize_uncompressed(&mut bytes) .map_err(serde::ser::Error::custom)?; Bytes::serialize_as(&bytes, serializer) @@ -37,7 +37,7 @@ pub mod ser { D: serde::Deserializer<'de>, { let bytes: Vec = Bytes::deserialize_as(deserializer)?; - T::deserialize(&mut &bytes[..]).map_err(serde::de::Error::custom) + T::deserialize_uncompressed(&mut &bytes[..]).map_err(serde::de::Error::custom) } } @@ -60,7 +60,7 @@ where S: serde::Serializer, { let mut bytes = vec![]; - val.serialize(&mut bytes) + val.serialize_uncompressed(&mut bytes) .map_err(serde::ser::Error::custom)?; if serializer.is_human_readable() { @@ -84,7 +84,7 @@ where } else { Bytes::deserialize_as(deserializer)? }; - T::deserialize(&mut &bytes[..]).map_err(serde::de::Error::custom) + T::deserialize_uncompressed(&mut &bytes[..]).map_err(serde::de::Error::custom) } } From b2c55cdc9866c08dd2319ea86c52dcbe23e85be2 Mon Sep 17 00:00:00 2001 From: Chiro Hiro Date: Wed, 6 Dec 2023 11:55:02 +0700 Subject: [PATCH 074/178] Upgrade poseidon to arkworks 0.4.2 --- Cargo.lock | 8 +++---- poseidon/Cargo.toml | 9 ++++---- poseidon/src/poseidon.rs | 3 ++- poseidon/src/sponge.rs | 47 +++++++++++++++++++--------------------- 4 files changed, 33 insertions(+), 34 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index d7e16842eb..ef9feb5b6b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1466,10 +1466,10 @@ dependencies = [ name = "mina-poseidon" version = "0.1.0" dependencies = [ - "ark-ec 0.3.0", - "ark-ff 0.3.0", - "ark-poly 0.3.0", - "ark-serialize 0.3.0", + "ark-ec 0.4.2", + "ark-ff 0.4.2", + "ark-poly 0.4.2", + "ark-serialize 0.4.2", "hex", "mina-curves", "o1-utils", diff --git a/poseidon/Cargo.toml b/poseidon/Cargo.toml index 7a122051e4..9256552934 100644 --- a/poseidon/Cargo.toml +++ b/poseidon/Cargo.toml @@ -13,9 +13,10 @@ license = "Apache-2.0" path = "src/lib.rs" [dependencies] -ark-ff = { version = "0.3.0", features = [ "parallel", "asm" ] } -ark-ec = { version = "0.3.0", features = [ "parallel" ] } -ark-poly = { version = "0.3.0", features = [ "parallel" ] } +ark-ff = { version = "0.4.2", features = [ "parallel", "asm" ] } +ark-ec = { version = "0.4.2", features = [ "parallel" ] } +ark-poly = { version = "0.4.2", features = [ "parallel" ] } +ark-serialize = { version = "0.4.2", features = ["derive"]} rand = "0.8.0" rayon = "1" serde = { version = "1.0", features = ["derive"] } @@ -32,7 +33,7 @@ ocaml-gen = { version = "0.1.5", optional = true } [dev-dependencies] serde_json = "1.0" hex = "0.4" -ark-serialize = "0.3.0" +ark-serialize = "0.4.2" [features] default = [] diff --git a/poseidon/src/poseidon.rs b/poseidon/src/poseidon.rs index ff06022910..6a2d5c51b5 100644 --- a/poseidon/src/poseidon.rs +++ b/poseidon/src/poseidon.rs @@ -3,6 +3,7 @@ use crate::constants::SpongeConstants; use crate::permutation::{full_round, poseidon_block_cipher}; use ark_ff::Field; +use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; use serde::{Deserialize, Serialize}; use serde_with::serde_as; @@ -34,7 +35,7 @@ pub enum SpongeState { #[serde_as] #[derive(Clone, Serialize, Deserialize, Default, Debug)] -pub struct ArithmeticSpongeParams { +pub struct ArithmeticSpongeParams { #[serde_as(as = "Vec>")] pub round_constants: Vec>, #[serde_as(as = "Vec>")] diff --git a/poseidon/src/sponge.rs b/poseidon/src/sponge.rs index ff7f00a412..5fd68a2ce0 100644 --- a/poseidon/src/sponge.rs +++ b/poseidon/src/sponge.rs @@ -1,7 +1,7 @@ use crate::constants::SpongeConstants; use crate::poseidon::{ArithmeticSponge, ArithmeticSpongeParams, Sponge}; -use ark_ec::{short_weierstrass_jacobian::GroupAffine, SWModelParameters}; -use ark_ff::{BigInteger, Field, FpParameters, One, PrimeField, Zero}; +use ark_ec::models::short_weierstrass::{Affine, SWCurveConfig}; +use ark_ff::{BigInteger, Field, One, PrimeField, Zero}; pub use crate::FqSponge; @@ -17,9 +17,7 @@ pub struct ScalarChallenge(pub F); pub fn endo_coefficient() -> F { let p_minus_1_over_3 = (F::zero() - F::one()) / F::from(3u64); - let t = F::multiplicative_generator(); - - t.pow(p_minus_1_over_3.into_repr().as_ref()) + F::GENERATOR.pow(p_minus_1_over_3.into_bigint().as_ref()) } fn get_bit(limbs_lsb: &[u64], i: u64) -> u64 { @@ -30,7 +28,7 @@ fn get_bit(limbs_lsb: &[u64], i: u64) -> u64 { impl ScalarChallenge { pub fn to_field_with_length(&self, length_in_bits: usize, endo_coeff: &F) -> F { - let rep = self.0.into_repr(); + let rep = self.0.into_bigint(); let r = rep.as_ref(); let mut a: F = 2_u64.into(); @@ -63,7 +61,7 @@ impl ScalarChallenge { } #[derive(Clone)] -pub struct DefaultFqSponge { +pub struct DefaultFqSponge { pub sponge: ArithmeticSponge, pub last_squeezed: Vec, } @@ -74,10 +72,10 @@ pub struct DefaultFrSponge { } fn pack(limbs_lsb: &[u64]) -> B { - let mut res: B = 0.into(); + let mut res: B = 0u64.into(); for &x in limbs_lsb.iter().rev() { res.muln(64); - res.add_nocarry(&x.into()); + res.add_with_carry(&x.into()); } res } @@ -88,10 +86,9 @@ impl DefaultFrSponge { let last_squeezed = self.last_squeezed.clone(); let (limbs, remaining) = last_squeezed.split_at(num_limbs); self.last_squeezed = remaining.to_vec(); - Fr::from_repr(pack::(limbs)) - .expect("internal representation was not a valid field element") + Fr::from(pack::(limbs)) } else { - let x = self.sponge.squeeze().into_repr(); + let x = self.sponge.squeeze().into_bigint(); self.last_squeezed .extend(&x.as_ref()[0..HIGH_ENTROPY_LIMBS]); self.squeeze(num_limbs) @@ -99,7 +96,7 @@ impl DefaultFrSponge { } } -impl DefaultFqSponge +impl DefaultFqSponge where P::BaseField: PrimeField, ::BigInt: Into<::BigInt>, @@ -111,7 +108,7 @@ where self.last_squeezed = remaining.to_vec(); limbs.to_vec() } else { - let x = self.sponge.squeeze().into_repr(); + let x = self.sponge.squeeze().into_bigint(); self.last_squeezed .extend(&x.as_ref()[0..HIGH_ENTROPY_LIMBS]); self.squeeze_limbs(num_limbs) @@ -124,13 +121,13 @@ where } pub fn squeeze(&mut self, num_limbs: usize) -> P::ScalarField { - P::ScalarField::from_repr(pack(&self.squeeze_limbs(num_limbs))) + P::ScalarField::from_bigint(pack(&self.squeeze_limbs(num_limbs))) .expect("internal representation was not a valid field element") } } -impl - FqSponge, P::ScalarField> for DefaultFqSponge +impl FqSponge, P::ScalarField> + for DefaultFqSponge where P::BaseField: PrimeField, ::BigInt: Into<::BigInt>, @@ -143,7 +140,7 @@ where } } - fn absorb_g(&mut self, g: &[GroupAffine

]) { + fn absorb_g(&mut self, g: &[Affine

], v1: &[SWJAffine

], @@ -117,7 +118,7 @@ fn batch_add_assign_no_branch( } /// Given arrays of curve points `v0` and `v1` do `v0[i] += v1[i]` for each i. -pub fn batch_add_assign( +pub fn batch_add_assign( denominators: &mut [P::BaseField], v0: &mut [SWJAffine

], v1: &[SWJAffine

], @@ -168,7 +169,7 @@ pub fn batch_add_assign( }); } -fn affine_window_combine_base( +fn affine_window_combine_base( g1: &[SWJAffine

], g2: &[SWJAffine

], x1: P::ScalarField, @@ -190,8 +191,8 @@ fn affine_window_combine_base( }; assert!(g1g2.len() == g1.len()); - let windows1 = BitIteratorBE::new(x1.into_repr()).tuples(); - let windows2 = BitIteratorBE::new(x2.into_repr()).tuples(); + let windows1 = BitIteratorBE::new(x1.into_bigint()).tuples(); + let windows2 = BitIteratorBE::new(x2.into_bigint()).tuples(); let mut points = vec![SWJAffine::

::zero(); g1.len()]; @@ -275,11 +276,11 @@ fn affine_window_combine_base( points } -fn batch_endo_in_place(endo_coeff: P::BaseField, ps: &mut [SWJAffine

]) { +fn batch_endo_in_place(endo_coeff: P::BaseField, ps: &mut [SWJAffine

]) { ps.par_iter_mut().for_each(|p| p.x *= endo_coeff); } -fn batch_negate_in_place(ps: &mut [SWJAffine

]) { +fn batch_negate_in_place(ps: &mut [SWJAffine

]) { ps.par_iter_mut().for_each(|p| { p.y = -p.y; }); @@ -287,7 +288,7 @@ fn batch_negate_in_place(ps: &mut [SWJAffine

]) { /// Uses a batch version of Algorithm 1 of https://eprint.iacr.org/2019/1021.pdf (on page 19) to /// compute `g1 + g2.scale(chal.to_field(endo_coeff))` -fn affine_window_combine_one_endo_base( +fn affine_window_combine_one_endo_base( endo_coeff: P::BaseField, g1: &[SWJAffine

], g2: &[SWJAffine

], @@ -304,7 +305,7 @@ fn affine_window_combine_one_endo_base( (limbs_lsb[limb as usize] >> j) & 1 } - let rep = chal.0.into_repr(); + let rep = chal.0.into_bigint(); let r = rep.as_ref(); let mut denominators = vec![P::BaseField::zero(); g1.len()]; @@ -340,7 +341,7 @@ fn affine_window_combine_one_endo_base( } /// Double an array of curve points in-place. -fn batch_double_in_place( +fn batch_double_in_place( denominators: &mut Vec, points: &mut [SWJAffine

], ) { @@ -366,12 +367,12 @@ fn batch_double_in_place( }); } -fn affine_window_combine_one_base( +fn affine_window_combine_one_base( g1: &[SWJAffine

], g2: &[SWJAffine

], x2: P::ScalarField, ) -> Vec> { - let windows2 = BitIteratorBE::new(x2.into_repr()).tuples(); + let windows2 = BitIteratorBE::new(x2.into_bigint()).tuples(); let mut points = vec![SWJAffine::

::zero(); g1.len()]; @@ -412,7 +413,7 @@ fn affine_window_combine_one_base( points } -pub fn affine_window_combine( +pub fn affine_window_combine( g1: &[SWJAffine

], g2: &[SWJAffine

], x1: P::ScalarField, @@ -431,7 +432,7 @@ pub fn affine_window_combine( /// `g1[i] + g2[i].scale(chal.to_field(endo_coeff))` /// /// Internally, it uses the curve endomorphism to speed up this operation. -pub fn affine_window_combine_one_endo( +pub fn affine_window_combine_one_endo( endo_coeff: P::BaseField, g1: &[SWJAffine

], g2: &[SWJAffine

], @@ -445,7 +446,7 @@ pub fn affine_window_combine_one_endo( .collect(); v.concat() } -pub fn affine_window_combine_one( +pub fn affine_window_combine_one( g1: &[SWJAffine

], g2: &[SWJAffine

], x2: P::ScalarField, @@ -459,24 +460,23 @@ pub fn affine_window_combine_one( v.concat() } -pub fn window_combine( +pub fn window_combine( g_lo: &[G], g_hi: &[G], x_lo: G::ScalarField, x_hi: G::ScalarField, ) -> Vec { - let mut g_proj: Vec = { + let mut g_proj: Vec = { let pairs: Vec<_> = g_lo.iter().zip(g_hi).collect(); pairs .into_par_iter() .map(|(lo, hi)| window_shamir::(x_lo, *lo, x_hi, *hi)) .collect() }; - G::Projective::batch_normalization(g_proj.as_mut_slice()); - g_proj.par_iter().map(|g| g.into_affine()).collect() + G::Group::normalize_batch(g_proj.as_mut_slice()) } -pub fn affine_shamir_window_table( +pub fn affine_shamir_window_table( denominators: &mut [P::BaseField], g1: &[SWJAffine

], g2: &[SWJAffine

], @@ -555,7 +555,7 @@ pub fn affine_shamir_window_table( res } -pub fn affine_shamir_window_table_one( +pub fn affine_shamir_window_table_one( denominators: &mut [P::BaseField], g1: &[SWJAffine

], ) -> [Vec>; 3] { @@ -585,118 +585,113 @@ pub fn affine_shamir_window_table_one( res } -fn window_shamir( - x1: G::ScalarField, - g1: G, - x2: G::ScalarField, - g2: G, -) -> G::Projective { +fn window_shamir(x1: G::ScalarField, g1: G, x2: G::ScalarField, g2: G) -> G::Group { let [_g00_00, g01_00, g10_00, g11_00, g00_01, g01_01, g10_01, g11_01, g00_10, g01_10, g10_10, g11_10, g00_11, g01_11, g10_11, g11_11] = shamir_window_table(g1, g2); - let windows1 = BitIteratorBE::new(x1.into_repr()).tuples(); - let windows2 = BitIteratorBE::new(x2.into_repr()).tuples(); + let windows1 = BitIteratorBE::new(x1.into_bigint()).tuples(); + let windows2 = BitIteratorBE::new(x2.into_bigint()).tuples(); - let mut res = G::Projective::zero(); + let mut res = G::Group::zero(); for ((hi_1, lo_1), (hi_2, lo_2)) in windows1.zip(windows2) { res.double_in_place(); res.double_in_place(); match ((hi_1, lo_1), (hi_2, lo_2)) { ((false, false), (false, false)) => (), - ((false, true), (false, false)) => res.add_assign_mixed(&g01_00), - ((true, false), (false, false)) => res.add_assign_mixed(&g10_00), - ((true, true), (false, false)) => res.add_assign_mixed(&g11_00), - - ((false, false), (false, true)) => res.add_assign_mixed(&g00_01), - ((false, true), (false, true)) => res.add_assign_mixed(&g01_01), - ((true, false), (false, true)) => res.add_assign_mixed(&g10_01), - ((true, true), (false, true)) => res.add_assign_mixed(&g11_01), - - ((false, false), (true, false)) => res.add_assign_mixed(&g00_10), - ((false, true), (true, false)) => res.add_assign_mixed(&g01_10), - ((true, false), (true, false)) => res.add_assign_mixed(&g10_10), - ((true, true), (true, false)) => res.add_assign_mixed(&g11_10), - - ((false, false), (true, true)) => res.add_assign_mixed(&g00_11), - ((false, true), (true, true)) => res.add_assign_mixed(&g01_11), - ((true, false), (true, true)) => res.add_assign_mixed(&g10_11), - ((true, true), (true, true)) => res.add_assign_mixed(&g11_11), + ((false, true), (false, false)) => res.add_assign(&g01_00), + ((true, false), (false, false)) => res.add_assign(&g10_00), + ((true, true), (false, false)) => res.add_assign(&g11_00), + + ((false, false), (false, true)) => res.add_assign(&g00_01), + ((false, true), (false, true)) => res.add_assign(&g01_01), + ((true, false), (false, true)) => res.add_assign(&g10_01), + ((true, true), (false, true)) => res.add_assign(&g11_01), + + ((false, false), (true, false)) => res.add_assign(&g00_10), + ((false, true), (true, false)) => res.add_assign(&g01_10), + ((true, false), (true, false)) => res.add_assign(&g10_10), + ((true, true), (true, false)) => res.add_assign(&g11_10), + + ((false, false), (true, true)) => res.add_assign(&g00_11), + ((false, true), (true, true)) => res.add_assign(&g01_11), + ((true, false), (true, true)) => res.add_assign(&g10_11), + ((true, true), (true, true)) => res.add_assign(&g11_11), } } res } -pub fn shamir_window_table(g1: G, g2: G) -> [G; 16] { - let g00_00 = G::prime_subgroup_generator().into_projective(); - let g01_00 = g1.into_projective(); +pub fn shamir_window_table(g1: G, g2: G) -> [G; 16] { + let g00_00 = G::generator().into_group(); + let g01_00 = g1.into_group(); let g10_00 = { let mut g = g01_00; - g.add_assign_mixed(&g1); + g.add_assign(&g1); g }; let g11_00 = { let mut g = g10_00; - g.add_assign_mixed(&g1); + g.add_assign(&g1); g }; - let g00_01 = g2.into_projective(); + let g00_01 = g2.into_group(); let g01_01 = { let mut g = g00_01; - g.add_assign_mixed(&g1); + g.add_assign(&g1); g }; let g10_01 = { let mut g = g01_01; - g.add_assign_mixed(&g1); + g.add_assign(&g1); g }; let g11_01 = { let mut g = g10_01; - g.add_assign_mixed(&g1); + g.add_assign(&g1); g }; let g00_10 = { let mut g = g00_01; - g.add_assign_mixed(&g2); + g.add_assign(&g2); g }; let g01_10 = { let mut g = g00_10; - g.add_assign_mixed(&g1); + g.add_assign(&g1); g }; let g10_10 = { let mut g = g01_10; - g.add_assign_mixed(&g1); + g.add_assign(&g1); g }; let g11_10 = { let mut g = g10_10; - g.add_assign_mixed(&g1); + g.add_assign(&g1); g }; let g00_11 = { let mut g = g00_10; - g.add_assign_mixed(&g2); + g.add_assign(&g2); g }; let g01_11 = { let mut g = g00_11; - g.add_assign_mixed(&g1); + g.add_assign(&g1); g }; let g10_11 = { let mut g = g01_11; - g.add_assign_mixed(&g1); + g.add_assign(&g1); g }; let g11_11 = { let mut g = g10_11; - g.add_assign_mixed(&g1); + g.add_assign(&g1); g }; @@ -704,8 +699,7 @@ pub fn shamir_window_table(g1: G, g2: G) -> [G; 16] { g00_00, g01_00, g10_00, g11_00, g00_01, g01_01, g10_01, g11_01, g00_10, g01_10, g10_10, g11_10, g00_11, g01_11, g10_11, g11_11, ]; - G::Projective::batch_normalization(v.as_mut_slice()); - let v: Vec<_> = v.iter().map(|x| x.into_affine()).collect(); + let v: Vec<_> = G::Group::normalize_batch(v.as_mut_slice()); [ v[0], v[1], v[2], v[3], v[4], v[5], v[6], v[7], v[8], v[9], v[10], v[11], v[12], v[13], v[14], v[15], diff --git a/poly-commitment/src/commitment.rs b/poly-commitment/src/commitment.rs index bb2469b49f..35459e4f5d 100644 --- a/poly-commitment/src/commitment.rs +++ b/poly-commitment/src/commitment.rs @@ -10,17 +10,15 @@ use crate::srs::endos; use crate::SRS as SRSTrait; use crate::{error::CommitmentError, srs::SRS}; use ark_ec::{ - models::short_weierstrass_jacobian::GroupAffine as SWJAffine, msm::VariableBaseMSM, - AffineCurve, ProjectiveCurve, SWModelParameters, -}; -use ark_ff::{ - BigInteger, Field, FpParameters, One, PrimeField, SquareRootField, UniformRand, Zero, + models::short_weierstrass::Affine as SWJAffine, short_weierstrass::SWCurveConfig, AffineRepr, + CurveGroup, VariableBaseMSM, }; +use ark_ff::{BigInteger, Field, One, PrimeField, UniformRand, Zero}; use ark_poly::{ univariate::DensePolynomial, EvaluationDomain, Evaluations, Radix2EvaluationDomain as D, }; use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; -use core::ops::{Add, Sub}; +use core::ops::{Add, AddAssign, Sub}; use groupmap::{BWParameters, GroupMap}; use mina_poseidon::{sponge::ScalarChallenge, FqSponge}; use o1_utils::math; @@ -130,16 +128,16 @@ impl PolyComm { /// ``` /// /// in the other case. -pub fn shift_scalar(x: G::ScalarField) -> G::ScalarField +pub fn shift_scalar(x: G::ScalarField) -> G::ScalarField where G::BaseField: PrimeField, { - let n1 = ::Params::MODULUS; + let n1 = ::MODULUS; let n2 = ::BigInt::from_bits_le( - &::Params::MODULUS.to_bits_le()[..], + &::MODULUS.to_bits_le()[..], ); let two: G::ScalarField = (2u64).into(); - let two_pow = two.pow([::Params::MODULUS_BITS as u64]); + let two_pow = two.pow([::MODULUS_BIT_SIZE as u64]); if n1 < n2 { (x - (two_pow + G::ScalarField::one())) / two } else { @@ -147,7 +145,7 @@ where } } -impl<'a, 'b, C: AffineCurve> Add<&'a PolyComm> for &'b PolyComm { +impl<'a, 'b, C: AffineRepr> Add<&'a PolyComm> for &'b PolyComm { type Output = PolyComm; fn add(self, other: &'a PolyComm) -> PolyComm { @@ -156,7 +154,7 @@ impl<'a, 'b, C: AffineCurve> Add<&'a PolyComm> for &'b PolyComm { let n2 = other.elems.len(); for i in 0..std::cmp::max(n1, n2) { let pt = if i < n1 && i < n2 { - self.elems[i] + other.elems[i] + (self.elems[i] + other.elems[i]).into_affine() } else if i < n1 { self.elems[i] } else { @@ -168,7 +166,7 @@ impl<'a, 'b, C: AffineCurve> Add<&'a PolyComm> for &'b PolyComm { } } -impl<'a, 'b, C: AffineCurve> Sub<&'a PolyComm> for &'b PolyComm { +impl<'a, 'b, C: AffineRepr + Sub> Sub<&'a PolyComm> for &'b PolyComm { type Output = PolyComm; fn sub(self, other: &'a PolyComm) -> PolyComm { @@ -177,7 +175,7 @@ impl<'a, 'b, C: AffineCurve> Sub<&'a PolyComm> for &'b PolyComm { let n2 = other.elems.len(); for i in 0..std::cmp::max(n1, n2) { let pt = if i < n1 && i < n2 { - self.elems[i] + (-other.elems[i]) + (self.elems[i] - other.elems[i]).into_affine() } else if i < n1 { self.elems[i] } else { @@ -189,7 +187,7 @@ impl<'a, 'b, C: AffineCurve> Sub<&'a PolyComm> for &'b PolyComm { } } -impl PolyComm { +impl PolyComm { pub fn scale(&self, c: C::ScalarField) -> PolyComm { PolyComm { elems: self.elems.iter().map(|g| g.mul(c).into_affine()).collect(), @@ -209,7 +207,7 @@ impl PolyComm { return Self::new(vec![C::zero()]); } - let all_scalars: Vec<_> = elm.iter().map(|s| s.into_repr()).collect(); + let all_scalars: Vec<_> = elm.iter().map(|s| s.into_bigint()).collect(); let elems_size = Iterator::max(com.iter().map(|c| c.elems.len())).unwrap(); let mut elems = Vec::with_capacity(elems_size); @@ -222,10 +220,9 @@ impl PolyComm { .filter_map(|(com, scalar)| com.elems.get(chunk).map(|c| (c, scalar))) .unzip(); - let chunk_msm = VariableBaseMSM::multi_scalar_mul::(&points, &scalars); + let chunk_msm = C::Group::msm_bigint(&points, &scalars); elems.push(chunk_msm.into_affine()); } - Self::new(elems) } } @@ -279,41 +276,31 @@ pub fn pows(d: usize, x: F) -> Vec { res } -pub fn squeeze_prechallenge>( +pub fn squeeze_prechallenge>( sponge: &mut EFqSponge, ) -> ScalarChallenge { ScalarChallenge(sponge.challenge()) } -pub fn squeeze_challenge< - Fq: Field, - G, - Fr: PrimeField + SquareRootField, - EFqSponge: FqSponge, ->( +pub fn squeeze_challenge>( endo_r: &Fr, sponge: &mut EFqSponge, ) -> Fr { squeeze_prechallenge(sponge).to_field(endo_r) } -pub fn absorb_commitment< - Fq: Field, - G: Clone, - Fr: PrimeField + SquareRootField, - EFqSponge: FqSponge, ->( +pub fn absorb_commitment>( sponge: &mut EFqSponge, commitment: &PolyComm, ) { sponge.absorb_g(&commitment.elems); } -/// A useful trait extending AffineCurve for commitments. -/// Unfortunately, we can't specify that `AffineCurve`, +/// A useful trait extending AffineRepr for commitments. +/// Unfortunately, we can't specify that `AffineRepr`, /// so usage of this traits must manually bind `G::BaseField: PrimeField`. -pub trait CommitmentCurve: AffineCurve { - type Params: SWModelParameters; +pub trait CommitmentCurve: AffineRepr { + type Params: SWCurveConfig; type Map: GroupMap; fn to_coordinates(&self) -> Option<(Self::BaseField, Self::BaseField)>; @@ -350,7 +337,7 @@ pub trait EndoCurve: CommitmentCurve { } } -impl CommitmentCurve for SWJAffine

{ +impl CommitmentCurve for SWJAffine

{ type Params = P; type Map = BWParameters

; @@ -363,14 +350,11 @@ impl CommitmentCurve for SWJAffine

{ } fn of_coordinates(x: P::BaseField, y: P::BaseField) -> SWJAffine

{ - SWJAffine::

::new(x, y, false) + SWJAffine::

::new_unchecked(x, y) } } -impl EndoCurve for SWJAffine

-where - P::BaseField: PrimeField, -{ +impl EndoCurve for SWJAffine

{ fn combine_one(g1: &[Self], g2: &[Self], x2: Self::ScalarField) -> Vec { crate::combine::affine_window_combine_one(g1, g2, x2) } @@ -395,7 +379,7 @@ where } } -pub fn to_group(m: &G::Map, t: ::BaseField) -> G { +pub fn to_group(m: &G::Map, t: ::BaseField) -> G { let (x, y) = m.to_group(t); G::of_coordinates(x, y) } @@ -433,7 +417,7 @@ pub fn combined_inner_product( /// Contains the evaluation of a polynomial commitment at a set of points. pub struct Evaluation where - G: AffineCurve, + G: AffineRepr, { /// The commitment of the polynomial being evaluated pub commitment: PolyComm, @@ -446,7 +430,7 @@ where // TODO: I think we should really change this name to something more correct pub struct BatchEvaluationProof<'a, G, EFqSponge, OpeningProof> where - G: AffineCurve, + G: AffineRepr, EFqSponge: FqSponge, { pub sponge: EFqSponge, @@ -560,7 +544,7 @@ impl SRSTrait for SRS { .ok_or_else(|| CommitmentError::BlindersDontMatch(blinders.len(), com.len()))? .map(|(g, b)| { let mut g_masked = self.h.mul(b); - g_masked.add_assign_mixed(&g); + g_masked.add_assign(&g); g_masked.into_affine() }); Ok(BlindedCommitment { @@ -581,7 +565,7 @@ impl SRSTrait for SRS { ) -> PolyComm { let is_zero = plnm.is_zero(); - let coeffs: Vec<_> = plnm.iter().map(|c| c.into_repr()).collect(); + let coeffs: Vec<_> = plnm.iter().map(|c| c.into_bigint()).collect(); // chunk while commiting let mut elems = vec![]; @@ -589,7 +573,7 @@ impl SRSTrait for SRS { elems.push(G::zero()); } else { coeffs.chunks(self.g.len()).for_each(|coeffs_chunk| { - let chunk = VariableBaseMSM::multi_scalar_mul(&self.g, coeffs_chunk); + let chunk = G::Group::msm_bigint(&self.g, coeffs_chunk); elems.push(chunk.into_affine()); }); } @@ -807,8 +791,8 @@ impl SRS { } // verify the equation - let scalars: Vec<_> = scalars.iter().map(|x| x.into_repr()).collect(); - VariableBaseMSM::multi_scalar_mul(&points, &scalars) == G::Projective::zero() + let scalars: Vec<_> = scalars.iter().map(|x| x.into_bigint()).collect(); + G::Group::msm_bigint(&points, &scalars) == G::Group::zero() } } @@ -829,7 +813,7 @@ mod tests { use super::*; use crate::srs::SRS; - use ark_poly::{Polynomial, Radix2EvaluationDomain, UVPolynomial}; + use ark_poly::{DenseUVPolynomial, Polynomial, Radix2EvaluationDomain}; use mina_curves::pasta::{Fp, Vesta as VestaG}; use mina_poseidon::constants::PlonkSpongeConstantsKimchi as SC; use mina_poseidon::sponge::DefaultFqSponge; @@ -1050,12 +1034,16 @@ pub mod caml { impl From> for CamlPolyComm where - G: AffineCurve, + G: AffineRepr, CamlG: From, { fn from(polycomm: PolyComm) -> Self { Self { - unshifted: polycomm.elems.into_iter().map(Into::into).collect(), + unshifted: polycomm + .elems + .into_iter() + .map(Into::::into) + .collect(), shifted: None, } } @@ -1063,12 +1051,12 @@ pub mod caml { impl<'a, G, CamlG> From<&'a PolyComm> for CamlPolyComm where - G: AffineCurve, + G: AffineRepr, CamlG: From + From<&'a G>, { fn from(polycomm: &'a PolyComm) -> Self { Self { - unshifted: polycomm.elems.iter().map(Into::into).collect(), + unshifted: polycomm.elems.iter().map(Into::::into).collect(), shifted: None, } } @@ -1076,7 +1064,7 @@ pub mod caml { impl From> for PolyComm where - G: AffineCurve + From, + G: AffineRepr + From, { fn from(camlpolycomm: CamlPolyComm) -> PolyComm { assert!( @@ -1084,14 +1072,18 @@ pub mod caml { "mina#14628: Shifted commitments are deprecated and must not be used" ); PolyComm { - elems: camlpolycomm.unshifted.into_iter().map(Into::into).collect(), + elems: camlpolycomm + .unshifted + .into_iter() + .map(Into::::into) + .collect(), } } } impl<'a, G, CamlG> From<&'a CamlPolyComm> for PolyComm where - G: AffineCurve + From<&'a CamlG> + From, + G: AffineRepr + From<&'a CamlG> + From, { fn from(camlpolycomm: &'a CamlPolyComm) -> PolyComm { assert!( @@ -1119,7 +1111,7 @@ pub mod caml { impl From> for CamlOpeningProof where - G: AffineCurve, + G: AffineRepr, CamlG: From, CamlF: From, { @@ -1128,19 +1120,19 @@ pub mod caml { lr: opening_proof .lr .into_iter() - .map(|(g1, g2)| (g1.into(), g2.into())) + .map(|(g1, g2)| (From::from(g1), From::from(g2))) .collect(), - delta: opening_proof.delta.into(), - z1: opening_proof.z1.into(), - z2: opening_proof.z2.into(), - sg: opening_proof.sg.into(), + delta: From::from(opening_proof.delta), + z1: From::from(opening_proof.z1), + z2: From::from(opening_proof.z2), + sg: From::from(opening_proof.sg), } } } impl From> for OpeningProof where - G: AffineCurve, + G: AffineRepr, CamlG: Into, CamlF: Into, { diff --git a/poly-commitment/src/evaluation_proof.rs b/poly-commitment/src/evaluation_proof.rs index 6b2e9dcfc3..4c1b80d7e0 100644 --- a/poly-commitment/src/evaluation_proof.rs +++ b/poly-commitment/src/evaluation_proof.rs @@ -1,8 +1,8 @@ use crate::{commitment::*, srs::endos}; use crate::{srs::SRS, PolynomialsToCombine, SRS as _}; -use ark_ec::{msm::VariableBaseMSM, AffineCurve, ProjectiveCurve}; +use ark_ec::{AffineRepr, CurveGroup, VariableBaseMSM}; use ark_ff::{FftField, Field, One, PrimeField, UniformRand, Zero}; -use ark_poly::{univariate::DensePolynomial, UVPolynomial}; +use ark_poly::{univariate::DensePolynomial, DenseUVPolynomial}; use ark_poly::{EvaluationDomain, Evaluations}; use mina_poseidon::{sponge::ScalarChallenge, FqSponge}; use o1_utils::{math, ExtendedDensePolynomial}; @@ -224,22 +224,22 @@ impl SRS { let rand_l = ::rand(rng); let rand_r = ::rand(rng); - let l = VariableBaseMSM::multi_scalar_mul( + let l = G::Group::msm_bigint( &[&g[0..n], &[self.h, u]].concat(), &[&a[n..], &[rand_l, inner_prod(a_hi, b_lo)]] .concat() .iter() - .map(|x| x.into_repr()) + .map(|x| x.into_bigint()) .collect::>(), ) .into_affine(); - let r = VariableBaseMSM::multi_scalar_mul( + let r = G::Group::msm_bigint( &[&g[n..], &[self.h, u]].concat(), &[&a[0..n], &[rand_r, inner_prod(a_lo, b_hi)]] .concat() .iter() - .map(|x| x.into_repr()) + .map(|x| x.into_bigint()) .collect::>(), ) .into_affine(); @@ -298,9 +298,8 @@ impl SRS { let d = ::rand(rng); let r_delta = ::rand(rng); - let delta = ((g0.into_projective() + (u.mul(b0))).into_affine().mul(d) - + self.h.mul(r_delta)) - .into_affine(); + let delta = ((g0.into_group() + (u.mul(b0))).into_affine().mul(d) + self.h.mul(r_delta)) + .into_affine(); sponge.absorb_g(&[delta]); let c = ScalarChallenge(sponge.challenge()).to_field(&endo_r); @@ -363,7 +362,7 @@ impl SRS { #[serde_as] #[derive(Clone, Debug, Serialize, Deserialize, Default)] #[serde(bound = "G: ark_serialize::CanonicalDeserialize + ark_serialize::CanonicalSerialize")] -pub struct OpeningProof { +pub struct OpeningProof { /// vector of rounds of L & R commitments #[serde_as(as = "Vec<(o1_utils::serialization::SerdeAs, o1_utils::serialization::SerdeAs)>")] pub lr: Vec<(G, G)>, @@ -377,26 +376,24 @@ pub struct OpeningProof { pub sg: G, } -impl< - BaseField: PrimeField, - G: AffineCurve + CommitmentCurve + EndoCurve, - > crate::OpenProof for OpeningProof +impl + CommitmentCurve + EndoCurve> + crate::OpenProof for OpeningProof { type SRS = SRS; - fn open::ScalarField>>( + fn open::ScalarField>>( srs: &Self::SRS, group_map: &::Map, plnms: PolynomialsToCombine, - elm: &[::ScalarField], // vector of evaluation points - polyscale: ::ScalarField, // scaling factor for polynoms - evalscale: ::ScalarField, // scaling factor for evaluation point powers - sponge: EFqSponge, // sponge + elm: &[::ScalarField], // vector of evaluation points + polyscale: ::ScalarField, // scaling factor for polynoms + evalscale: ::ScalarField, // scaling factor for evaluation point powers + sponge: EFqSponge, // sponge rng: &mut RNG, ) -> Self where EFqSponge: - Clone + FqSponge<::BaseField, G, ::ScalarField>, + Clone + FqSponge<::BaseField, G, ::ScalarField>, RNG: RngCore + CryptoRng, { srs.open(group_map, plnms, elm, polyscale, evalscale, sponge, rng) @@ -409,7 +406,7 @@ impl< rng: &mut RNG, ) -> bool where - EFqSponge: FqSponge, + EFqSponge: FqSponge<::BaseField, G, ::ScalarField>, RNG: RngCore + CryptoRng, { srs.verify(group_map, batch, rng) @@ -421,7 +418,7 @@ pub struct Challenges { pub chal_inv: Vec, } -impl OpeningProof { +impl OpeningProof { pub fn prechallenges>( &self, sponge: &mut EFqSponge, diff --git a/poly-commitment/src/lib.rs b/poly-commitment/src/lib.rs index fb7f7491ca..9fa1873ec4 100644 --- a/poly-commitment/src/lib.rs +++ b/poly-commitment/src/lib.rs @@ -14,7 +14,7 @@ pub use commitment::PolyComm; use crate::commitment::{BatchEvaluationProof, BlindedCommitment, CommitmentCurve}; use crate::error::CommitmentError; use crate::evaluation_proof::DensePolynomialOrEvaluations; -use ark_ec::AffineCurve; +use ark_ec::AffineRepr; use ark_ff::UniformRand; use ark_poly::{ univariate::DensePolynomial, EvaluationDomain, Evaluations, Radix2EvaluationDomain as D, @@ -93,19 +93,19 @@ pub trait OpenProof: Sized { type SRS: SRS; #[allow(clippy::too_many_arguments)] - fn open::ScalarField>>( + fn open::ScalarField>>( srs: &Self::SRS, group_map: &::Map, plnms: PolynomialsToCombine, // vector of polynomial with optional degree bound and commitment randomness - elm: &[::ScalarField], // vector of evaluation points - polyscale: ::ScalarField, // scaling factor for polynoms - evalscale: ::ScalarField, // scaling factor for evaluation point powers + elm: &[::ScalarField], // vector of evaluation points + polyscale: ::ScalarField, // scaling factor for polynoms + evalscale: ::ScalarField, // scaling factor for evaluation point powers sponge: EFqSponge, // sponge rng: &mut RNG, ) -> Self where EFqSponge: - Clone + FqSponge<::BaseField, G, ::ScalarField>, + Clone + FqSponge<::BaseField, G, ::ScalarField>, RNG: RngCore + CryptoRng; fn verify( diff --git a/poly-commitment/src/pairing_proof.rs b/poly-commitment/src/pairing_proof.rs index 1a581e538b..55599369a0 100644 --- a/poly-commitment/src/pairing_proof.rs +++ b/poly-commitment/src/pairing_proof.rs @@ -2,11 +2,11 @@ use crate::commitment::*; use crate::evaluation_proof::combine_polys; use crate::srs::SRS; use crate::{CommitmentError, PolynomialsToCombine, SRS as SRSTrait}; -use ark_ec::{msm::VariableBaseMSM, AffineCurve, PairingEngine}; +use ark_ec::{pairing::Pairing, AffineRepr, VariableBaseMSM}; use ark_ff::{PrimeField, Zero}; use ark_poly::{ univariate::{DenseOrSparsePolynomial, DensePolynomial}, - EvaluationDomain, Evaluations, Polynomial, Radix2EvaluationDomain as D, UVPolynomial, + DenseUVPolynomial, EvaluationDomain, Evaluations, Polynomial, Radix2EvaluationDomain as D, }; use mina_poseidon::FqSponge; use rand_core::{CryptoRng, RngCore}; @@ -18,23 +18,23 @@ use serde_with::serde_as; #[serde( bound = "Pair::G1Affine: ark_serialize::CanonicalDeserialize + ark_serialize::CanonicalSerialize" )] -pub struct PairingProof { +pub struct PairingProof { #[serde_as(as = "o1_utils::serialization::SerdeAs")] pub quotient: Pair::G1Affine, #[serde_as(as = "o1_utils::serialization::SerdeAs")] - pub blinding: ::ScalarField, + pub blinding: ::ScalarField, } -impl Default for PairingProof { +impl Default for PairingProof { fn default() -> Self { Self { - quotient: Pair::G1Affine::prime_subgroup_generator(), - blinding: ::ScalarField::zero(), + quotient: Pair::G1Affine::generator(), + blinding: ::ScalarField::zero(), } } } -impl Clone for PairingProof { +impl Clone for PairingProof { fn clone(&self) -> Self { Self { quotient: self.quotient, @@ -44,12 +44,12 @@ impl Clone for PairingProof { } #[derive(Debug, Serialize, Deserialize)] -pub struct PairingSRS { +pub struct PairingSRS { pub full_srs: SRS, pub verifier_srs: SRS, } -impl Default for PairingSRS { +impl Default for PairingSRS { fn default() -> Self { Self { full_srs: SRS::default(), @@ -58,7 +58,7 @@ impl Default for PairingSRS { } } -impl Clone for PairingSRS { +impl Clone for PairingSRS { fn clone(&self) -> Self { Self { full_srs: self.full_srs.clone(), @@ -71,7 +71,7 @@ impl< F: PrimeField, G: CommitmentCurve, G2: CommitmentCurve, - Pair: PairingEngine, + Pair: Pairing, > PairingSRS { pub fn create(x: F, n: usize) -> Self { @@ -86,24 +86,24 @@ impl< F: PrimeField, G: CommitmentCurve, G2: CommitmentCurve, - Pair: PairingEngine, + Pair: Pairing, > crate::OpenProof for PairingProof { type SRS = PairingSRS; - fn open::ScalarField>>( + fn open::ScalarField>>( srs: &Self::SRS, _group_map: &::Map, plnms: PolynomialsToCombine, - elm: &[::ScalarField], // vector of evaluation points - polyscale: ::ScalarField, // scaling factor for polynoms - _evalscale: ::ScalarField, // scaling factor for evaluation point powers - _sponge: EFqSponge, // sponge + elm: &[::ScalarField], // vector of evaluation points + polyscale: ::ScalarField, // scaling factor for polynoms + _evalscale: ::ScalarField, // scaling factor for evaluation point powers + _sponge: EFqSponge, // sponge _rng: &mut RNG, ) -> Self where EFqSponge: - Clone + FqSponge<::BaseField, G, ::ScalarField>, + Clone + FqSponge<::BaseField, G, ::ScalarField>, RNG: RngCore + CryptoRng, { PairingProof::create(srs, plnms, elm, polyscale).unwrap() @@ -141,7 +141,7 @@ impl< F: PrimeField, G: CommitmentCurve, G2: CommitmentCurve, - Pair: PairingEngine, + Pair: Pairing, > SRSTrait for PairingSRS { fn max_poly_size(&self) -> usize { @@ -250,7 +250,7 @@ impl< F: PrimeField, G: CommitmentCurve, G2: CommitmentCurve, - Pair: PairingEngine, + Pair: Pairing, > PairingProof { pub fn create>( @@ -283,6 +283,7 @@ impl< blinding: blinding_factor, }) } + pub fn verify( &self, srs: &PairingSRS, // SRS @@ -290,7 +291,7 @@ impl< polyscale: G::ScalarField, // scaling factor for polynoms elm: &[G::ScalarField], // vector of evaluation points ) -> bool { - let poly_commitment = { + let poly_commitment: G::Group = { let mut scalars: Vec = Vec::new(); let mut points = Vec::new(); combine_commitments( @@ -300,9 +301,9 @@ impl< polyscale, F::one(), /* TODO: This is inefficient */ ); - let scalars: Vec<_> = scalars.iter().map(|x| x.into_repr()).collect(); + let scalars: Vec<_> = scalars.iter().map(|x| x.into_bigint()).collect(); - VariableBaseMSM::multi_scalar_mul(&points, &scalars) + G::Group::msm_bigint(&points, &scalars) }; let evals = combine_evaluations(evaluations, polyscale); let blinding_commitment = srs.full_srs.h.mul(self.blinding); @@ -314,13 +315,12 @@ impl< .full_srs .commit_non_hiding(&eval_polynomial(elm, &evals), 1) .elems[0] - .into_projective(); - let numerator_commitment = { poly_commitment - eval_commitment - blinding_commitment }; + .into_group(); + let numerator_commitment_proj: ::Group = + { poly_commitment - eval_commitment - blinding_commitment }; + let numerator_commitment_affine: Pair::G1Affine = From::from(numerator_commitment_proj); - let numerator = Pair::pairing( - numerator_commitment, - Pair::G2Affine::prime_subgroup_generator(), - ); + let numerator = Pair::pairing(numerator_commitment_affine, Pair::G2Affine::generator()); let scaled_quotient = Pair::pairing(self.quotient, divisor_commitment); numerator == scaled_quotient } @@ -334,12 +334,12 @@ mod tests { use crate::srs::SRS; use crate::SRS as _; use ark_bn254::Fr as ScalarField; - use ark_bn254::{G1Affine as G1, G2Affine as G2, Parameters}; + use ark_bn254::{Config, G1Affine as G1, G2Affine as G2}; use ark_ec::bn::Bn; use ark_ff::UniformRand; use ark_poly::{ - univariate::DensePolynomial, EvaluationDomain, Polynomial, Radix2EvaluationDomain as D, - UVPolynomial, + univariate::DensePolynomial, DenseUVPolynomial, EvaluationDomain, Polynomial, + Radix2EvaluationDomain as D, }; use rand::{rngs::StdRng, SeedableRng}; @@ -405,7 +405,7 @@ mod tests { let polyscale = ScalarField::rand(rng); - let pairing_proof = PairingProof::>::create( + let pairing_proof = PairingProof::>::create( &srs, polynomials_and_blinders.as_slice(), &evaluation_points, diff --git a/poly-commitment/src/srs.rs b/poly-commitment/src/srs.rs index 355c420b66..c80b9429c7 100644 --- a/poly-commitment/src/srs.rs +++ b/poly-commitment/src/srs.rs @@ -2,7 +2,7 @@ use crate::commitment::CommitmentCurve; use crate::PolyComm; -use ark_ec::{AffineCurve, ProjectiveCurve}; +use ark_ec::{AffineRepr, CurveGroup}; use ark_ff::{BigInteger, Field, One, PrimeField, Zero}; use ark_poly::{EvaluationDomain, Radix2EvaluationDomain as D}; use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; @@ -48,10 +48,10 @@ where let endo_q: G::BaseField = mina_poseidon::sponge::endo_coefficient(); let endo_r = { let potential_endo_r: G::ScalarField = mina_poseidon::sponge::endo_coefficient(); - let t = G::prime_subgroup_generator(); + let t = G::generator(); let (x, y) = t.to_coordinates().unwrap(); let phi_t = G::of_coordinates(x * endo_q, y); - if t.mul(potential_endo_r) == phi_t.into_projective() { + if t.mul(potential_endo_r) == phi_t.into_group() { potential_endo_r } else { potential_endo_r * potential_endo_r @@ -81,11 +81,12 @@ where let n = <::BasePrimeField as PrimeField>::BigInt::from_bits_be(&bits); - let t = <::BasePrimeField as PrimeField>::from_repr(n) + let t = <::BasePrimeField as PrimeField>::from_bigint(n) .expect("packing code has a bug"); base_fields.push(t) } let t = G::BaseField::from_base_prime_field_elems(&base_fields).unwrap(); + let (x, y) = map.to_group(t); G::of_coordinates(x, y) } @@ -184,24 +185,22 @@ impl SRS { // For each chunk for i in 0..num_elems { // Initialize the vector with zero curve points - let mut lg: Vec<::Projective> = - vec![::Projective::zero(); n]; + let mut lg: Vec<::Group> = vec![::Group::zero(); n]; // Overwrite the terms corresponding to that chunk with the SRS curve points let start_offset = i * srs_size; let num_terms = min((i + 1) * srs_size, n) - start_offset; for j in 0..num_terms { - lg[start_offset + j] = self.g[j].into_projective() + lg[start_offset + j] = self.g[j].into_group() } // Apply the IFFT domain.ifft_in_place(&mut lg); - ::Projective::batch_normalization(lg.as_mut_slice()); // Append the 'partial Langrange polynomials' to the vector of elems chunks - elems.push(lg) + elems.push(::Group::normalize_batch(lg.as_mut_slice())); } let chunked_commitments: Vec<_> = (0..n) .map(|i| PolyComm { - elems: elems.iter().map(|v| v[i].into_affine()).collect(), + elems: elems.iter().map(|v| v[i]).collect(), }) .collect(); self.lagrange_bases.insert(n, chunked_commitments); @@ -214,7 +213,7 @@ impl SRS { let mut x_pow = G::ScalarField::one(); let g: Vec<_> = (0..depth) .map(|_| { - let res = G::prime_subgroup_generator().mul(x_pow); + let res = G::generator().mul(x_pow); x_pow *= x; res.into_affine() }) diff --git a/poly-commitment/src/tests/batch_15_wires.rs b/poly-commitment/src/tests/batch_15_wires.rs index 545a788fd8..488074e2f3 100644 --- a/poly-commitment/src/tests/batch_15_wires.rs +++ b/poly-commitment/src/tests/batch_15_wires.rs @@ -8,7 +8,7 @@ use crate::{ SRS as _, }; use ark_ff::{UniformRand, Zero}; -use ark_poly::{univariate::DensePolynomial, Radix2EvaluationDomain, UVPolynomial}; +use ark_poly::{univariate::DensePolynomial, DenseUVPolynomial, Radix2EvaluationDomain}; use colored::Colorize; use groupmap::GroupMap; use mina_curves::pasta::{Fp, Vesta, VestaParameters}; diff --git a/poly-commitment/src/tests/commitment.rs b/poly-commitment/src/tests/commitment.rs index 38d57994ec..3c96fb8c54 100644 --- a/poly-commitment/src/tests/commitment.rs +++ b/poly-commitment/src/tests/commitment.rs @@ -8,7 +8,7 @@ use crate::{ SRS as _, }; use ark_ff::{UniformRand, Zero}; -use ark_poly::{univariate::DensePolynomial, Radix2EvaluationDomain, UVPolynomial}; +use ark_poly::{univariate::DensePolynomial, DenseUVPolynomial, Radix2EvaluationDomain}; use colored::Colorize; use groupmap::GroupMap; use mina_curves::pasta::{Fp, Vesta, VestaParameters}; diff --git a/proof-systems-vendors b/proof-systems-vendors index 02ba9dc230..dfed44c3cb 160000 --- a/proof-systems-vendors +++ b/proof-systems-vendors @@ -1 +1 @@ -Subproject commit 02ba9dc2305d76cd4882e6be9a88aa0bc684fd4d +Subproject commit dfed44c3cb43543b8166fc2f16dac5bd091e971b From 8262e8549c8012757aa497a27eacb90a5536b0ca Mon Sep 17 00:00:00 2001 From: mrmr1993 Date: Tue, 26 Dec 2023 22:11:43 +0000 Subject: [PATCH 081/178] Upgrade arkworks for `kimchi` and other libraries/tools --- .github/workflows/rust.yml | 2 +- Cargo.lock | 283 +---- book/src/specs/kimchi.md | 8 +- circuit-construction/Cargo.toml | 47 + circuit-construction/src/constants.rs | 44 + circuit-construction/src/lib.rs | 33 + circuit-construction/src/prover.rs | 136 +++ .../src/tests/example_proof.rs | 103 ++ circuit-construction/src/writer.rs | 1007 +++++++++++++++++ kimchi/Cargo.toml | 10 +- kimchi/src/circuits/constraints.rs | 15 +- .../circuits/domain_constant_evaluation.rs | 2 +- kimchi/src/circuits/expr.rs | 4 +- kimchi/src/circuits/gate.rs | 23 +- kimchi/src/circuits/lookup/index.rs | 4 +- kimchi/src/circuits/polynomials/and.rs | 4 +- .../circuits/polynomials/endomul_scalar.rs | 6 +- .../polynomials/foreign_field_add/gadget.rs | 4 +- .../polynomials/foreign_field_mul/gadget.rs | 4 +- .../src/circuits/polynomials/permutation.rs | 6 +- kimchi/src/circuits/polynomials/poseidon.rs | 4 +- .../polynomials/range_check/gadget.rs | 4 +- kimchi/src/circuits/polynomials/rot.rs | 4 +- kimchi/src/circuits/polynomials/turshi.rs | 4 +- kimchi/src/circuits/polynomials/xor.rs | 4 +- kimchi/src/circuits/wires.rs | 20 - kimchi/src/circuits/witness/mod.rs | 4 +- kimchi/src/curve.rs | 40 +- kimchi/src/linearization.rs | 8 +- kimchi/src/proof.rs | 16 +- kimchi/src/prover.rs | 18 +- kimchi/src/prover_index.rs | 8 +- kimchi/src/tests/and.rs | 6 +- kimchi/src/tests/ec.rs | 27 +- kimchi/src/tests/endomul.rs | 28 +- kimchi/src/tests/endomul_scalar.rs | 4 +- kimchi/src/tests/foreign_field_add.rs | 14 +- kimchi/src/tests/foreign_field_mul.rs | 6 +- kimchi/src/tests/generic.rs | 4 +- kimchi/src/tests/keccak.rs | 4 +- kimchi/src/tests/not.rs | 6 +- kimchi/src/tests/range_check.rs | 4 +- kimchi/src/tests/recursion.rs | 2 +- kimchi/src/tests/rot.rs | 4 +- kimchi/src/tests/serde.rs | 6 +- kimchi/src/tests/varbasemul.rs | 19 +- kimchi/src/tests/xor.rs | 4 +- kimchi/src/verifier.rs | 4 +- poly-commitment/src/commitment.rs | 2 +- proof-systems-vendors | 2 +- signer/src/schnorr.rs | 2 +- tools/kimchi-visu/Cargo.toml | 4 +- 52 files changed, 1599 insertions(+), 432 deletions(-) create mode 100644 circuit-construction/Cargo.toml create mode 100644 circuit-construction/src/constants.rs create mode 100644 circuit-construction/src/lib.rs create mode 100644 circuit-construction/src/prover.rs create mode 100644 circuit-construction/src/tests/example_proof.rs create mode 100644 circuit-construction/src/writer.rs diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index 42d1937a59..b4581fab8f 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -32,7 +32,7 @@ jobs: - name: Checkout PR uses: actions/checkout@v4.1.1 with: - submodules: true + submodules: true # as action-rs does not seem to be maintained anymore, building from # scratch the environment using rustup diff --git a/Cargo.lock b/Cargo.lock index bd6e73cab4..1a1ab03646 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -63,10 +63,10 @@ version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "400bd3a79c741b1832f1416d4373ae077ef82ca14a8b4cee1248a2f11c8b9172" dependencies = [ - "ark-ec 0.4.2", - "ark-ff 0.4.2", - "ark-serialize 0.4.2", - "ark-std 0.4.0", + "ark-ec", + "ark-ff", + "ark-serialize", + "ark-std", "hex", "num-bigint", "num-integer", @@ -77,41 +77,15 @@ dependencies = [ "sha2", ] -[[package]] -name = "ark-bn254" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea691771ebbb28aea556c044e2e5c5227398d840cee0c34d4d20fa8eb2689e8c" -dependencies = [ - "ark-ec 0.3.0", - "ark-ff 0.3.0", - "ark-std 0.3.0", -] - [[package]] name = "ark-bn254" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a22f4561524cd949590d78d7d4c5df8f592430d221f7f3c9497bbafd8972120f" dependencies = [ - "ark-ec 0.4.2", - "ark-ff 0.4.2", - "ark-std 0.4.0", -] - -[[package]] -name = "ark-ec" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dea978406c4b1ca13c2db2373b05cc55429c3575b8b21f1b9ee859aa5b03dd42" -dependencies = [ - "ark-ff 0.3.0", - "ark-serialize 0.3.0", - "ark-std 0.3.0", - "derivative", - "num-traits", - "rayon", - "zeroize", + "ark-ec", + "ark-ff", + "ark-std", ] [[package]] @@ -120,10 +94,10 @@ version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "defd9a439d56ac24968cca0571f598a61bc8c55f71d50a89cda591cb750670ba" dependencies = [ - "ark-ff 0.4.2", - "ark-poly 0.4.2", - "ark-serialize 0.4.2", - "ark-std 0.4.0", + "ark-ff", + "ark-poly", + "ark-serialize", + "ark-std", "derivative", "hashbrown 0.13.2", "itertools", @@ -132,56 +106,27 @@ dependencies = [ "zeroize", ] -[[package]] -name = "ark-ff" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b3235cc41ee7a12aaaf2c575a2ad7b46713a8a50bda2fc3b003a04845c05dd6" -dependencies = [ - "ark-ff-asm 0.3.0", - "ark-ff-macros 0.3.0", - "ark-serialize 0.3.0", - "ark-std 0.3.0", - "derivative", - "num-bigint", - "num-traits", - "paste", - "rayon", - "rustc_version 0.3.3", - "zeroize", -] - [[package]] name = "ark-ff" version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec847af850f44ad29048935519032c33da8aa03340876d351dfab5660d2966ba" dependencies = [ - "ark-ff-asm 0.4.2", - "ark-ff-macros 0.4.2", - "ark-serialize 0.4.2", - "ark-std 0.4.0", + "ark-ff-asm", + "ark-ff-macros", + "ark-serialize", + "ark-std", "derivative", - "digest 0.10.7", + "digest", "itertools", "num-bigint", "num-traits", "paste", "rayon", - "rustc_version 0.4.0", + "rustc_version", "zeroize", ] -[[package]] -name = "ark-ff-asm" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db02d390bf6643fb404d3d22d31aee1c4bc4459600aef9113833d17e786c6e44" -dependencies = [ - "quote 1.0.29", - "syn 1.0.109", -] - [[package]] name = "ark-ff-asm" version = "0.4.2" @@ -192,18 +137,6 @@ dependencies = [ "syn 1.0.109", ] -[[package]] -name = "ark-ff-macros" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db2fd794a08ccb318058009eefdf15bcaaaaf6f8161eb3345f907222bac38b20" -dependencies = [ - "num-bigint", - "num-traits", - "quote 1.0.29", - "syn 1.0.109", -] - [[package]] name = "ark-ff-macros" version = "0.4.2" @@ -217,68 +150,32 @@ dependencies = [ "syn 1.0.109", ] -[[package]] -name = "ark-poly" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b0f78f47537c2f15706db7e98fe64cc1711dbf9def81218194e17239e53e5aa" -dependencies = [ - "ark-ff 0.3.0", - "ark-serialize 0.3.0", - "ark-std 0.3.0", - "derivative", - "hashbrown 0.11.2", - "rayon", -] - [[package]] name = "ark-poly" version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d320bfc44ee185d899ccbadfa8bc31aab923ce1558716e1997a1e74057fe86bf" dependencies = [ - "ark-ff 0.4.2", - "ark-serialize 0.4.2", - "ark-std 0.4.0", + "ark-ff", + "ark-serialize", + "ark-std", "derivative", "hashbrown 0.13.2", "rayon", ] -[[package]] -name = "ark-serialize" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d6c2b318ee6e10f8c2853e73a83adc0ccb88995aa978d8a3408d492ab2ee671" -dependencies = [ - "ark-serialize-derive 0.3.0", - "ark-std 0.3.0", - "digest 0.9.0", -] - [[package]] name = "ark-serialize" version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "adb7b85a02b83d2f22f89bd5cac66c9c89474240cb6207cb1efc16d098e822a5" dependencies = [ - "ark-serialize-derive 0.4.2", - "ark-std 0.4.0", - "digest 0.10.7", + "ark-serialize-derive", + "ark-std", + "digest", "num-bigint", ] -[[package]] -name = "ark-serialize-derive" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8dd4e5f0bf8285d5ed538d27fab7411f3e297908fd93c62195de8bee3f199e82" -dependencies = [ - "proc-macro2 1.0.64", - "quote 1.0.29", - "syn 1.0.109", -] - [[package]] name = "ark-serialize-derive" version = "0.4.2" @@ -290,17 +187,6 @@ dependencies = [ "syn 1.0.109", ] -[[package]] -name = "ark-std" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1df2c09229cbc5a028b1d70e00fdb2acee28b1055dfb5ca73eea49c5a25c4e7c" -dependencies = [ - "num-traits", - "rand", - "rayon", -] - [[package]] name = "ark-std" version = "0.4.0" @@ -318,9 +204,9 @@ version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "83c22c2469f93dfcace9a98baabb7af1bc0c40de82c07cffbc0deba4acf41a90" dependencies = [ - "ark-ec 0.4.2", - "ark-ff 0.4.2", - "ark-std 0.4.0", + "ark-ec", + "ark-ff", + "ark-std", ] [[package]] @@ -473,7 +359,7 @@ version = "0.10.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "46502ad458c9a52b69d4d4d32775c788b7a1b85e8bc9d482d92250fc0e3f8efe" dependencies = [ - "digest 0.10.7", + "digest", ] [[package]] @@ -846,15 +732,6 @@ dependencies = [ "syn 1.0.109", ] -[[package]] -name = "digest" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d3dd60d1080a57a05ab032377049e0591415d2b31afd7028356dbf3cc6dcb066" -dependencies = [ - "generic-array", -] - [[package]] name = "digest" version = "0.10.7" @@ -909,8 +786,8 @@ dependencies = [ name = "export_test_vectors" version = "0.1.0" dependencies = [ - "ark-ff 0.4.2", - "ark-serialize 0.4.2", + "ark-ff", + "ark-serialize", "hex", "mina-curves", "mina-poseidon", @@ -1040,8 +917,8 @@ checksum = "b6c80984affa11d98d1b88b66ac8853f143217b399d3c74116778ff8fdb4ed2e" name = "groupmap" version = "0.1.0" dependencies = [ - "ark-ec 0.4.2", - "ark-ff 0.4.2", + "ark-ec", + "ark-ff", "mina-curves", "rand", ] @@ -1052,15 +929,6 @@ version = "1.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "eabb4a44450da02c90444cf74558da904edde8fb4e9035a9a6a4e15445af0bd7" -[[package]] -name = "hashbrown" -version = "0.11.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab5ef0d4909ef3724cc8cce6ccc8572c5c817592e9285f5464f8e86f8bd3726e" -dependencies = [ - "ahash 0.7.6", -] - [[package]] name = "hashbrown" version = "0.12.3" @@ -1260,11 +1128,11 @@ dependencies = [ name = "kimchi" version = "0.1.0" dependencies = [ - "ark-bn254 0.3.0", - "ark-ec 0.3.0", - "ark-ff 0.3.0", - "ark-poly 0.3.0", - "ark-serialize 0.3.0", + "ark-bn254", + "ark-ec", + "ark-ff", + "ark-poly", + "ark-serialize", "blake2", "colored", "criterion", @@ -1306,8 +1174,8 @@ dependencies = [ name = "kimchi-visu" version = "0.1.0" dependencies = [ - "ark-ec 0.3.0", - "ark-ff 0.3.0", + "ark-ec", + "ark-ff", "kimchi", "mina-curves", "mina-poseidon", @@ -1452,10 +1320,10 @@ name = "mina-curves" version = "0.1.0" dependencies = [ "ark-algebra-test-templates", - "ark-ec 0.4.2", - "ark-ff 0.4.2", - "ark-serialize 0.4.2", - "ark-std 0.4.0", + "ark-ec", + "ark-ff", + "ark-serialize", + "ark-std", "ark-test-curves", "rand", ] @@ -1464,7 +1332,7 @@ dependencies = [ name = "mina-hasher" version = "0.1.0" dependencies = [ - "ark-ff 0.4.2", + "ark-ff", "bitvec", "mina-curves", "mina-poseidon", @@ -1477,10 +1345,10 @@ dependencies = [ name = "mina-poseidon" version = "0.1.0" dependencies = [ - "ark-ec 0.4.2", - "ark-ff 0.4.2", - "ark-poly 0.4.2", - "ark-serialize 0.4.2", + "ark-ec", + "ark-ff", + "ark-poly", + "ark-serialize", "hex", "mina-curves", "o1-utils", @@ -1498,8 +1366,8 @@ dependencies = [ name = "mina-signer" version = "0.1.0" dependencies = [ - "ark-ec 0.4.2", - "ark-ff 0.4.2", + "ark-ec", + "ark-ff", "bitvec", "blake2", "bs58", @@ -1667,10 +1535,10 @@ dependencies = [ name = "o1-utils" version = "0.1.0" dependencies = [ - "ark-ec 0.4.2", - "ark-ff 0.4.2", - "ark-poly 0.4.2", - "ark-serialize 0.4.2", + "ark-ec", + "ark-ff", + "ark-poly", + "ark-serialize", "bcs", "hex", "mina-curves", @@ -1928,11 +1796,11 @@ dependencies = [ name = "poly-commitment" version = "0.1.0" dependencies = [ - "ark-bn254 0.4.0", - "ark-ec 0.4.2", - "ark-ff 0.4.2", - "ark-poly 0.4.2", - "ark-serialize 0.4.2", + "ark-bn254", + "ark-ec", + "ark-ff", + "ark-poly", + "ark-serialize", "blake2", "colored", "groupmap", @@ -2219,22 +2087,13 @@ version = "0.1.23" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d626bb9dae77e28219937af045c257c28bfd3f69333c512553507f5f9798cb76" -[[package]] -name = "rustc_version" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0dfe2087c51c460008730de8b57e6a320782fbfb312e1f4d520e6c6fae155ee" -dependencies = [ - "semver 0.11.0", -] - [[package]] name = "rustc_version" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366" dependencies = [ - "semver 1.0.19", + "semver", ] [[package]] @@ -2327,30 +2186,12 @@ dependencies = [ "cc", ] -[[package]] -name = "semver" -version = "0.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f301af10236f6df4160f7c3f04eec6dbc70ace82d23326abad5edee88801c6b6" -dependencies = [ - "semver-parser", -] - [[package]] name = "semver" version = "1.0.19" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ad977052201c6de01a8ef2aa3378c4bd23217a056337d1d6da40468d267a4fb0" -[[package]] -name = "semver-parser" -version = "0.10.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "00b0bef5b7f9e0df16536d3961cfb6e84331c065b4066afb39768d0e319411f7" -dependencies = [ - "pest", -] - [[package]] name = "serde" version = "1.0.171" @@ -2422,7 +2263,7 @@ checksum = "479fb9d862239e610720565ca91403019f2f00410f1864c5aa7479b950a76ed8" dependencies = [ "cfg-if 1.0.0", "cpufeatures", - "digest 0.10.7", + "digest", ] [[package]] @@ -2730,8 +2571,8 @@ dependencies = [ name = "turshi" version = "0.1.0" dependencies = [ - "ark-ec 0.4.2", - "ark-ff 0.4.2", + "ark-ec", + "ark-ff", "hex", "mina-curves", "o1-utils", diff --git a/book/src/specs/kimchi.md b/book/src/specs/kimchi.md index 91852cdbb0..20ea9094df 100644 --- a/book/src/specs/kimchi.md +++ b/book/src/specs/kimchi.md @@ -2029,7 +2029,7 @@ pub struct ProofEvaluations { #[serde_as] #[derive(Debug, Clone, Serialize, Deserialize)] #[serde(bound = "G: ark_serialize::CanonicalDeserialize + ark_serialize::CanonicalSerialize")] -pub struct LookupCommitments { +pub struct LookupCommitments { /// Commitments to the sorted lookup table polynomial (may have chunks) pub sorted: Vec>, /// Commitment to the lookup aggregation polynomial @@ -2042,7 +2042,7 @@ pub struct LookupCommitments { #[serde_as] #[derive(Debug, Clone, Serialize, Deserialize)] #[serde(bound = "G: ark_serialize::CanonicalDeserialize + ark_serialize::CanonicalSerialize")] -pub struct ProverCommitments { +pub struct ProverCommitments { /// The commitments to the witness (execution trace) pub w_comm: [PolyComm; COLUMNS], /// The commitment to the permutation polynomial @@ -2057,7 +2057,7 @@ pub struct ProverCommitments { #[serde_as] #[derive(Debug, Clone, Serialize, Deserialize)] #[serde(bound = "G: ark_serialize::CanonicalDeserialize + ark_serialize::CanonicalSerialize")] -pub struct ProverProof { +pub struct ProverProof { /// All the polynomial commitments required in the proof pub commitments: ProverCommitments, @@ -2085,7 +2085,7 @@ pub struct ProverProof { #[serde(bound = "G: ark_serialize::CanonicalDeserialize + ark_serialize::CanonicalSerialize")] pub struct RecursionChallenge where - G: AffineCurve, + G: AffineRepr, { /// Vector of scalar field elements #[serde_as(as = "Vec")] diff --git a/circuit-construction/Cargo.toml b/circuit-construction/Cargo.toml new file mode 100644 index 0000000000..3e60cb706c --- /dev/null +++ b/circuit-construction/Cargo.toml @@ -0,0 +1,47 @@ +[package] +name = "circuit-construction" +version = "0.1.0" +description = "A simple circuit writer for kimchi" +repository = "https://github.com/o1-labs/proof-systems" +edition = "2021" +license = "Apache-2.0" +homepage = "https://o1-labs.github.io/proof-systems/" +documentation = "https://o1-labs.github.io/proof-systems/rustdoc/" +readme = "../README.md" + +[lib] +path = "src/lib.rs" +bench = false # needed for criterion (https://bheisler.github.io/criterion.rs/book/faq.html#cargo-bench-gives-unrecognized-option-errors-for-valid-command-line-options) + +[dependencies] +ark-ff = { version = "0.4.2", features = [ "parallel", "asm" ] } +ark-ec = { version = "0.4.2", features = [ "parallel" ] } +ark-poly = { version = "0.4.2", features = [ "parallel" ] } +ark-serialize = "0.4.2" +blake2 = "0.10.0" +num-derive = "0.3" +num-traits = "0.2" +itertools = "0.10.3" +rand = "0.8.0" +rand_core = "0.6.3" +rayon = "1.5.0" +rmp-serde = "1.0.0" +serde = "1.0.130" +serde_with = "1.10.0" +thiserror = "1.0.30" + +poly-commitment = { path = "../poly-commitment", version = "0.1.0" } +groupmap = { path = "../groupmap", version = "0.1.0" } +mina-curves = { path = "../curves", version = "0.1.0" } +o1-utils = { path = "../utils", version = "0.1.0" } +mina-poseidon = { path = "../poseidon", version = "0.1.0" } +kimchi = { path = "../kimchi", version = "0.1.0" } + +[dev-dependencies] +proptest = "1.0.0" +proptest-derive = "0.3.0" +colored = "2.0.0" + +# benchmarks +criterion = "0.3" +iai = "0.1" diff --git a/circuit-construction/src/constants.rs b/circuit-construction/src/constants.rs new file mode 100644 index 0000000000..803f18a9c0 --- /dev/null +++ b/circuit-construction/src/constants.rs @@ -0,0 +1,44 @@ +use ark_ec::AffineRepr; +use ark_ff::Field; +use kimchi::curve::KimchiCurve; +use mina_curves::pasta::{Fp, Fq, Pallas as PallasAffine, Vesta as VestaAffine}; +use mina_poseidon::poseidon::ArithmeticSpongeParams; +use poly_commitment::{commitment::CommitmentCurve, srs::endos}; + +/// The type of possible constants in the circuit +#[derive(Clone)] +pub struct Constants { + pub poseidon: &'static ArithmeticSpongeParams, + pub endo: F, + pub base: (F, F), +} + +/// Constants for the base field of Pallas +/// /// +/// # Panics +/// +/// Will panic if `PallasAffine::generator()` returns None. +pub fn fp_constants() -> Constants { + let (endo_q, _endo_r) = endos::(); + let base = PallasAffine::generator().to_coordinates().unwrap(); + Constants { + poseidon: VestaAffine::sponge_params(), + endo: endo_q, + base, + } +} + +/// Constants for the base field of Vesta +/// +/// # Panics +/// +/// Will panic if `VestaAffine::generator()` returns None. +pub fn fq_constants() -> Constants { + let (endo_q, _endo_r) = endos::(); + let base = VestaAffine::generator().to_coordinates().unwrap(); + Constants { + poseidon: PallasAffine::sponge_params(), + endo: endo_q, + base, + } +} diff --git a/circuit-construction/src/lib.rs b/circuit-construction/src/lib.rs new file mode 100644 index 0000000000..27618ed679 --- /dev/null +++ b/circuit-construction/src/lib.rs @@ -0,0 +1,33 @@ +#![doc = include_str!("../../README.md")] + +/// Definition of possible constants in circuits +pub mod constants; +/// This contains the prover functions, ranging from curves definitions to prover index and proof generation +pub mod prover; +/// This is the actual writer with all of the available functions to set up a circuit and its corresponding constraint system +pub mod writer; + +#[cfg(test)] +mod tests; + +/// This contains the Kimchi dependencies being used +pub mod prologue { + pub use super::constants::{fp_constants, fq_constants, Constants}; + pub use super::prover::{generate_prover_index, prove, CoordinateCurve}; + pub use super::writer::{Cs, Var}; + pub use ark_ec::{AffineRepr, CurveGroup}; + pub use ark_ff::{FftField, PrimeField, UniformRand}; + pub use ark_poly::{EvaluationDomain, Radix2EvaluationDomain}; + pub use groupmap::GroupMap; + pub use kimchi::verifier::verify; + pub use mina_curves::pasta::{ + Fp, Pallas as PallasAffine, Vesta as VestaAffine, VestaParameters, + }; + pub use mina_poseidon::{ + constants::*, + poseidon::{ArithmeticSponge, Sponge}, + sponge::{DefaultFqSponge, DefaultFrSponge}, + }; + pub use poly_commitment::{commitment::CommitmentCurve, srs::SRS}; + pub use std::sync::Arc; +} diff --git a/circuit-construction/src/prover.rs b/circuit-construction/src/prover.rs new file mode 100644 index 0000000000..2841c8dfbd --- /dev/null +++ b/circuit-construction/src/prover.rs @@ -0,0 +1,136 @@ +use crate::writer::{Cs, GateSpec, System, Var, WitnessGenerator}; +use ark_ec::AffineRepr; +use ark_ff::{One, PrimeField, Zero}; +use kimchi::{ + circuits::{constraints::ConstraintSystem, gate::GateType, wires::COLUMNS}, + curve::KimchiCurve, + plonk_sponge::FrSponge, + proof::ProverProof, + prover_index::ProverIndex, +}; +use mina_poseidon::FqSponge; +use poly_commitment::{ + commitment::{CommitmentCurve, PolyComm}, + srs::{endos, SRS}, +}; +use std::array; + +/// Given an index, a group map, custom blinders for the witness, a public input vector, and a circuit `main`, it creates a proof. +/// +/// # Panics +/// +/// Will panic if recursive proof creation returns `ProverError`. +pub fn prove( + index: &ProverIndex, + group_map: &G::Map, + blinders: Option<[Option; COLUMNS]>, + public_input: &[G::ScalarField], + mut main: H, +) -> ProverProof +where + H: FnMut(&mut WitnessGenerator, Vec>), + G::BaseField: PrimeField, + G: KimchiCurve, + EFqSponge: Clone + FqSponge, + EFrSponge: FrSponge, +{ + // create the witness generator + let mut gen: WitnessGenerator = WitnessGenerator::new(public_input); + + // run the witness generation + let public_vars = public_input + .iter() + .map(|x| Var { + index: 0, + value: Some(*x), + }) + .collect(); + main(&mut gen, public_vars); + + // get the witness columns + gen.curr_gate_count(); + let columns = gen.columns(); + + // custom blinders for the witness commitment + let blinders: [Option>; COLUMNS] = match blinders { + None => array::from_fn(|_| None), + Some(bs) => array::from_fn(|i| { + bs[i].map(|b| PolyComm { + unshifted: vec![b], + shifted: None, + }) + }), + }; + + // create the proof + ProverProof::create_recursive::( + group_map, + columns, + &[], + index, + vec![], + Some(blinders), + ) + .unwrap() +} + +/// Creates the prover index on input an `srs`, used `constants`, parameters for Poseidon, number of public inputs, and a specific circuit +/// +/// # Panics +/// +/// Will panic if `constraint_system` is not built with `public` input. +pub fn generate_prover_index( + srs: std::sync::Arc>, + public: usize, + main: Circuit, +) -> ProverIndex +where + Circuit: FnOnce(&mut System, Vec>), + Curve: KimchiCurve, +{ + let mut system: System = System::default(); + let z = Curve::ScalarField::zero(); + + // create public input variables + let public_input_row = vec![Curve::ScalarField::one(), z, z, z, z, z, z, z, z, z]; + let public_input: Vec<_> = (0..public) + .map(|_| { + let v = system.var(|| panic!("fail")); + + system.gate(GateSpec { + typ: GateType::Generic, + row: vec![Some(v)], + coeffs: public_input_row.clone(), + }); + v + }) + .collect(); + + main(&mut system, public_input); + + let gates = system.gates(); + + // Other base field = self scalar field + let (endo_q, _endo_r) = endos::(); + //let (endo_q, _endo_r) = Curve::endos(); + + let constraint_system = ConstraintSystem::::create(gates) + .public(public) + .build() + // TODO: return a Result instead of panicking + .expect("couldn't construct constraint system"); + + ProverIndex::::create(constraint_system, endo_q, srs) +} + +/// Handling coordinates in an affine curve +pub trait CoordinateCurve: AffineRepr { + /// Returns the coordinates in the curve as two points of the base field + fn to_coords(&self) -> Option<(Self::BaseField, Self::BaseField)>; +} + +impl CoordinateCurve for G { + fn to_coords(&self) -> Option<(Self::BaseField, Self::BaseField)> { + CommitmentCurve::to_coordinates(self) + } +} diff --git a/circuit-construction/src/tests/example_proof.rs b/circuit-construction/src/tests/example_proof.rs new file mode 100644 index 0000000000..54fa787bb2 --- /dev/null +++ b/circuit-construction/src/tests/example_proof.rs @@ -0,0 +1,103 @@ +use crate::prologue::*; +use kimchi::curve::KimchiCurve; +use std::ops::Mul; + +type SpongeQ = DefaultFqSponge; +type SpongeR = DefaultFrSponge; + +pub struct Witness { + pub s: G::ScalarField, + pub preimage: G::BaseField, +} + +// Prove knowledge of discrete log and poseidon preimage of a hash +pub fn circuit< + F: PrimeField + FftField, + G: AffineRepr + CoordinateCurve, + Sys: Cs, +>( + constants: &Constants, + // The witness + witness: Option<&Witness>, + sys: &mut Sys, + public_input: Vec>, +) { + let zero = sys.constant(F::zero()); + + let constant_curve_pt = |sys: &mut Sys, (x, y)| { + let x = sys.constant(x); + let y = sys.constant(y); + (x, y) + }; + + let base = constant_curve_pt(sys, G::generator().to_coords().unwrap()); + let scalar = sys.scalar(G::ScalarField::MODULUS_BIT_SIZE as usize, || { + witness.as_ref().unwrap().s + }); + let actual = sys.scalar_mul(zero, base, scalar); + + let preimage = sys.var(|| witness.as_ref().unwrap().preimage); + let actual_hash = sys.poseidon(constants, vec![preimage, zero, zero])[0]; + + sys.assert_eq(actual.0, public_input[0]); + sys.assert_eq(actual.1, public_input[1]); + sys.assert_eq(actual_hash, public_input[2]); +} + +const PUBLIC_INPUT_LENGTH: usize = 3; + +#[test] +fn test_example_circuit() { + use mina_curves::pasta::Pallas; + use mina_curves::pasta::Vesta; + // create SRS + let srs = { + let mut srs = SRS::::create(1 << 7); // 2^7 = 128 + srs.add_lagrange_basis(Radix2EvaluationDomain::new(srs.g.len()).unwrap()); + Arc::new(srs) + }; + + let proof_system_constants = fp_constants(); + + // generate circuit and index + let prover_index = generate_prover_index::<_, _>(srs, PUBLIC_INPUT_LENGTH, |sys, p| { + circuit::<_, Pallas, _>(&proof_system_constants, None, sys, p) + }); + + let group_map = ::Map::setup(); + + let mut rng = rand::thread_rng(); + + // create witness + let private_key = ::ScalarField::rand(&mut rng); + let preimage = ::BaseField::rand(&mut rng); + + let witness = Witness { + s: private_key, + preimage, + }; + + // create public input + let public_key = Pallas::generator().mul(private_key).into_affine(); + let hash = { + let mut s: ArithmeticSponge<_, PlonkSpongeConstantsKimchi> = + ArithmeticSponge::new(Vesta::sponge_params()); + s.absorb(&[preimage]); + s.squeeze() + }; + + // generate proof + let public_input = vec![public_key.x, public_key.y, hash]; + let proof = prove::( + &prover_index, + &group_map, + None, + &public_input, + |sys, p| circuit::(&proof_system_constants, Some(&witness), sys, p), + ); + + // verify proof + let verifier_index = prover_index.verifier_index(); + + verify::<_, SpongeQ, SpongeR>(&group_map, &verifier_index, &proof, &public_input).unwrap(); +} diff --git a/circuit-construction/src/writer.rs b/circuit-construction/src/writer.rs new file mode 100644 index 0000000000..0caec23c1c --- /dev/null +++ b/circuit-construction/src/writer.rs @@ -0,0 +1,1007 @@ +use ark_ff::{BigInteger, FftField, PrimeField}; +use kimchi::circuits::{ + gate::{CircuitGate, GateType}, + polynomials::generic::{ + DOUBLE_GENERIC_COEFFS, DOUBLE_GENERIC_REGISTERS, GENERIC_COEFFS, GENERIC_REGISTERS, + }, + wires::{Wire, COLUMNS}, +}; +use mina_poseidon::{ + constants::{PlonkSpongeConstantsKimchi, SpongeConstants}, + permutation::full_round, +}; +use std::array; +use std::collections::HashMap; + +use crate::constants::Constants; + +/// A variable in our circuit. +/// Variables are assigned with an index to differentiate from each other. +/// Optionally, they can eventually take as value a field element. +#[derive(Hash, Eq, PartialEq, Debug, Clone, Copy)] +pub struct Var { + pub index: usize, + pub value: Option, +} + +impl Var { + /// Returns the value inside a variable [Var]. + /// + /// # Panics + /// + /// Will panic if it is `None`. + pub fn val(&self) -> F { + self.value.unwrap() + } +} + +/// A variable that corresponds to scalar that is shifted by a certain amount. +pub struct ShiftedScalar(Var); + +/// Specifies a gate within a circuit. +/// A gate will have a type, +/// will refer to a row of variables, +/// and will have associated vector of coefficients. +pub struct GateSpec { + pub typ: GateType, + pub row: Vec>>, + pub coeffs: Vec, +} + +impl GateSpec { + pub fn get_var_val_or(&self, col: usize, default: F) -> F { + match self.row.get(col) { + Some(Some(var)) => var.val(), + _ => default, + } + } + + pub fn get_var_idx(&self, col: usize) -> Option { + match self.row.get(col) { + Some(Some(var)) => Some(var.index), + _ => None, + } + } +} + +/// A set of gates within the circuit. +/// It carries the index for the next available variable, +/// and the vector of [`GateSpec`] created so far. +/// It also keeps track of the queue of generic gates and cached constants. +#[derive(Default)] +pub struct System { + pub next_variable: usize, + pub generic_gate_queue: Vec>, + // pub equivalence_classes: HashMap>, + pub gates: Vec>, + pub cached_constants: HashMap>, +} + +/// Carries a vector of rows corresponding to the witness, a queue of generic gates, and stores the cached constants +#[derive(Default)] +pub struct WitnessGenerator +where + F: PrimeField, +{ + pub generic_gate_queue: Vec>, + pub rows: Vec>, + pub cached_constants: HashMap>, +} + +impl WitnessGenerator +where + F: PrimeField, +{ + /// Given a list of public inputs, creates the witness generator. + pub fn new(public_inputs: &[F]) -> Self { + let mut gen = Self::default(); + + for input in public_inputs { + let row = array::from_fn(|i| if i == 0 { *input } else { F::zero() }); + gen.rows.push(row); + } + + gen + } +} + +/// A row is an array of [COLUMNS] elements +type Row = [V; COLUMNS]; + +/// This trait includes all the operations that can be executed +/// by the elements in the circuits. +/// It allows for different behaviours depending on the struct for +/// which it is implemented for. +/// In particular, the circuit mode and the witness generation mode. +pub trait Cs { + /// In cases where you want to create a free variable in the circuit, + /// as in the variable is not constrained _yet_ + /// and can be anything that the prover wants. + /// For example, division can be implemented as: + /// + /// ```ignore + /// let a = sys.constant(5u32.into()); + /// let b = sys.constant(10u32.into()); + /// let c = sys.var(|| { + /// b.value * a.value.inverse().unwrap() + /// }); + /// sys.assert_eq(a * c, b); + /// ``` + /// + fn var(&mut self, g: G) -> Var + where + G: FnOnce() -> F; + + /// Returns the number of gates that the current [Self] contains. + fn curr_gate_count(&self) -> usize; + + /// Returns a variable containing a field element as value that is + /// computed as the equivalent `BigInteger` number returned by + /// function `g`, only if the length is a multiple of 4. + fn endo_scalar(&mut self, length: usize, g: G) -> Var + where + G: FnOnce() -> N, + { + assert_eq!(length % 4, 0); + + self.var(|| { + let y = g(); + let bits = y.to_bits_le(); + F::from_bigint(F::BigInt::from_bits_le(&bits)).unwrap() + }) + } + + /// This function creates a [`ShiftedScalar`] variable from a field element that is + /// returned by function `g()`, and a length that should be a multiple of 5. + fn scalar(&mut self, length: usize, g: G) -> ShiftedScalar + where + G: FnOnce() -> Fr, + { + assert_eq!(length % 5, 0); + + let v = self.var(|| { + // TODO: No need to recompute this each time. + let two = Fr::from(2u64); + let shift = Fr::one() + two.pow([length as u64]); + + let x = g(); + // x = 2 y + shift + // y = (x - shift) / 2 + // TODO: Could cache value of 1/2 to avoid division + let y = (x - shift) / two; + let bits = y.into_bigint().to_bits_le(); + F::from_bigint(F::BigInt::from_bits_le(&bits)).unwrap() + }); + ShiftedScalar(v) + } + + /// In circuit mode, adds a gate to the circuit. + /// In witness generation mode, adds the corresponding row to the witness. + fn gate(&mut self, g: GateSpec); + + /// Creates a `Generic` gate that constrains if two variables are equal. + /// This is done by setting `x1` in the left wire and `x2` in the right wire + /// with left coefficient `1` and right coefficient `-1`, so that `x1 - x2 = 0`. + // TODO: Optimize to use permutation argument. + fn assert_eq(&mut self, x1: Var, x2: Var) { + // | 0 | 1 | 2 | ... + // | x1 | x2 | 0 | ... + let vars = [Some(x1), Some(x2), None]; + + // constrain `x1 - x2 = 0` + let mut coeffs = [F::zero(); GENERIC_COEFFS]; + coeffs[0] = F::one(); + coeffs[1] = -F::one(); + + self.generic(coeffs, vars); + } + + /// Checks if a constant `x` is already in the cached constants of `self` and returns it. + /// Otherwise, it creates a variable for it and caches it. + fn cached_constants(&mut self, x: F) -> Var; + + /// Creates a `Generic` gate to include a constant in the circuit, and returns the variable containing it. + /// It sets the left wire to be the variable containing the constant `x` and the rest to zero. + /// Then the left coefficient is set to one and the coefficient for constants is set to `-x`. + /// This way, the constraint `1 * x - x = 0` holds. + fn constant(&mut self, x: F) -> Var { + let v = self.cached_constants(x); + + let mut coeffs = [F::zero(); GENERIC_COEFFS]; + coeffs[0] = F::one(); + coeffs[GENERIC_REGISTERS + 1] = -x; + + let vars = [Some(v), None, None]; + + self.generic(coeffs, vars); + + v + } + + /// Stores a generic gate until it can combine two of them + /// into a double generic gate. + fn generic_queue(&mut self, gate: GateSpec) -> Option>; + + /// Adds a generic gate. + /// + /// Warning: this assumes that some finalization occurs to flush + /// any queued generic gate. + fn generic(&mut self, coeffs: [F; GENERIC_COEFFS], vars: [Option>; GENERIC_REGISTERS]) { + let gate = GateSpec { + typ: GateType::Generic, + row: vars.to_vec(), + coeffs: coeffs.to_vec(), + }; + // we queue the single generic gate until we have two of them + if let Some(double_generic_gate) = self.generic_queue(gate) { + self.gate(double_generic_gate); + } + } + + /// Creates a `Generic` gate to constrain that a variable `v` is scaled by an `x` amount and returns it. + /// First, it creates a new variable with a scaled value (meaning, the value in `v` times `x`). + /// Then, it creates a row that sets the left wire to be `v` and the right wire to be the scaled variable. + /// Finally, it sets the left coefficient to `x` and the right coefficient to `-1`. + /// That way, the constraint `x * v - 1 * xv = 0` is created. + fn scale(&mut self, x: F, v: Var) -> Var { + let xv = self.var(|| v.val() * x); + + let vars = [Some(v), Some(xv), None]; + + let mut coeffs = [F::zero(); GENERIC_COEFFS]; + coeffs[0] = x; + coeffs[1] = -F::one(); + + self.generic(coeffs, vars); + + xv + } + + /// Performs curve point addition. + /// It creates the corresponding `CompleteAdd` gate for the points `(x1, y1)` and `(x2,y2)` + /// and returns the third point resulting from the addition as a tuple of variables. + fn add_group( + &mut self, + zero: Var, + (x1, y1): (Var, Var), + (x2, y2): (Var, Var), + ) -> (Var, Var) { + let mut same_x_bool = false; + let same_x = self.var(|| { + let same_x = x1.val() == x2.val(); + same_x_bool = same_x; + F::from(u64::from(same_x)) + }); + + let inf = zero; + let x21_inv = self.var(|| { + if x1.val() == x2.val() { + F::zero() + } else { + (x2.val() - x1.val()).inverse().unwrap() + } + }); + + let s = self.var(|| { + if same_x_bool { + let x1_squared = x1.val().square(); + (x1_squared.double() + x1_squared).div(y1.val().double()) + } else { + (y2.val() - y1.val()) * x21_inv.val() + } + }); + + let inf_z = self.var(|| { + if y1.val() == y2.val() { + F::zero() + } else if same_x_bool { + (y2.val() - y1.val()).inverse().unwrap() + } else { + F::zero() + } + }); + + let x3 = self.var(|| s.val().square() - (x1.val() + x2.val())); + + let y3 = self.var(|| s.val() * (x1.val() - x3.val()) - y1.val()); + + self.gate(GateSpec { + typ: GateType::CompleteAdd, + row: vec![ + Some(x1), + Some(y1), + Some(x2), + Some(y2), + Some(x3), + Some(y3), + Some(inf), + Some(same_x), + Some(s), + Some(inf_z), + Some(x21_inv), + ], + coeffs: vec![], + }); + (x3, y3) + } + + /// Doubles one curve point `(x1, y1)`, using internally the `add_group()` function. + /// It creates a `CompleteAdd` gate for this point addition (with itself). + /// Returns a tuple of variables corresponding to the doubled point. + fn double(&mut self, zero: Var, (x1, y1): (Var, Var)) -> (Var, Var) { + self.add_group(zero, (x1, y1), (x1, y1)) + } + + /// Creates a `CompleteAdd` gate that checks whether a third point `(x3, y3)` is the addition + /// of the two first points `(x1, y1)` and `(x2, y2)`. + /// The difference between this function and `add_group()` is that in `assert_add_group` the + /// third point is given, whereas in the other one it is computed with the formula. + fn assert_add_group( + &mut self, + zero: Var, + (x1, y1): (Var, Var), + (x2, y2): (Var, Var), + (x3, y3): (Var, Var), + ) { + let mut same_x_bool = false; + let same_x = self.var(|| { + let same_x = x1.val() == x2.val(); + same_x_bool = same_x; + F::from(u64::from(same_x)) + }); + + let inf = zero; + let x21_inv = self.var(|| { + if x1.val() == x2.val() { + F::zero() + } else { + (x2.val() - x1.val()).inverse().unwrap() + } + }); + + let s = self.var(|| { + if same_x_bool { + let x1_squared = x1.val().square(); + (x1_squared.double() + x1_squared).div(y1.val().double()) + } else { + (y2.val() - y1.val()) * x21_inv.val() + } + }); + + let inf_z = self.var(|| { + if y1.val() == y2.val() { + F::zero() + } else if same_x_bool { + (y2.val() - y1.val()).inverse().unwrap() + } else { + F::zero() + } + }); + + self.gate(GateSpec { + typ: GateType::CompleteAdd, + row: vec![ + Some(x1), + Some(y1), + Some(x2), + Some(y2), + Some(x3), + Some(y3), + Some(inf), + Some(same_x), + Some(s), + Some(inf_z), + Some(x21_inv), + ], + coeffs: vec![], + }); + } + + /// This function is used to include conditionals in circuits. + /// It creates three `Generic` gates to simulate the logics of the conditional. + /// It receives as input: + /// - `b`: the branch + /// - `t`: the true + /// - `f`: the false + /// And simulates the following equation: `res = b * ( t - f ) + f` + /// ( when the condition is false, `res = 1` ) + /// ( when the condition is true, `res = b` ) + /// This is constrained using three `Generic` gates + /// 1. Constrain `delta = t - f` + /// 2. Constrain `res1 = b * delta` + /// 3. Constrain `res = res1 + f` + /// For (1): + /// - Creates a row with left wire `t`, right wire `f`, and output wire `delta` + /// - Assigns `1` to the left coefficient, `-1` to the right coefficient, and `-1` to the output coefficient. + /// - That way, it creates a first gate constraining: `1 * t - 1 * f - delta = 0` + /// For (2): + /// - Creates a row with left wire `b`, right wire `delta`, and output wire `res1`. + /// - Assigns `-1` to the output coefficient, and `1` to the multiplication coefficient. + /// - That way, it creates a second gate constraining: `-1 * res + 1 * b * delta = 0` + /// For (3): + /// - Creates a row with left wire `res1`, right wire `f`, and output wire `res`. + /// - Assigns `1` to the left coefficient, `1` to the right coefficient, and `-1` to the output coefficient. + /// - That way, it creates a third gate constraining: `1 * res1 + 1 * f - 1 * res = 0` + fn cond_select(&mut self, b: Var, t: Var, f: Var) -> Var { + // Could be more efficient. Currently uses three constraints :( + // delta = t - f + // res1 = b * delta + // res = res1 + f + + let delta = self.var(|| t.val() - f.val()); + let res1 = self.var(|| b.val() * delta.val()); + let res = self.var(|| f.val() + res1.val()); + + let row1 = [Some(t), Some(f), Some(delta)]; + let mut c1 = [F::zero(); GENERIC_COEFFS]; + c1[0] = F::one(); + c1[1] = -F::one(); + c1[2] = -F::one(); + + self.generic(c1, row1); + + let row2 = [Some(b), Some(delta), Some(res1)]; + + let mut c2 = [F::zero(); GENERIC_COEFFS]; + c2[0] = F::zero(); + c2[1] = F::zero(); + c2[2] = -F::one(); + c2[3] = F::one(); + + self.generic(c2, row2); + + let row3 = [Some(res1), Some(f), Some(res)]; + let mut c3 = [F::zero(); GENERIC_COEFFS]; + c3[0] = F::one(); + c3[1] = F::one(); + c3[2] = -F::one(); + + self.generic(c3, row3); + + res + } + + /// Performs a scalar multiplication between a [`ShiftedScalar`] and a point `(xt, yt)`. + /// This function creates 51 rows pairs of rows. + fn scalar_mul( + &mut self, + zero: Var, + (xt, yt): (Var, Var), + scalar: ShiftedScalar, + ) -> (Var, Var) { + let num_bits = 255; + let num_row_pairs = num_bits / 5; + let mut witness: [Vec; COLUMNS] = array::from_fn(|_| vec![]); + + let acc0 = self.add_group(zero, (xt, yt), (xt, yt)); + + let _ = self.var(|| { + witness = array::from_fn(|_| vec![F::zero(); 2 * num_row_pairs]); + // Creates a vector of bits from the value inside the scalar, with the most significant bit upfront + let bits_msb: Vec = scalar + .0 + .val() + .into_bigint() + .to_bits_le() + .iter() + .take(num_bits) + .copied() + .rev() + .collect(); + // Creates a witness for the VarBaseMul gate. + kimchi::circuits::polynomials::varbasemul::witness( + &mut witness, + 0, + (xt.val(), yt.val()), + &bits_msb, + (acc0.0.val(), acc0.1.val()), + ); + F::zero() + }); + + // For each of the pairs, it generates a VarBaseMul and a Zero gate. + let mut res = None; + for i in 0..num_row_pairs { + let mut row1: [_; COLUMNS] = array::from_fn(|j| self.var(|| witness[j][2 * i])); + let row2: [_; COLUMNS] = array::from_fn(|j| self.var(|| witness[j][2 * i + 1])); + + row1[0] = xt; + row1[1] = yt; + if i == 0 { + row1[2] = acc0.0; + row1[3] = acc0.1; + row1[4] = zero; + } + if i == num_row_pairs - 1 { + row1[5] = scalar.0; + res = Some((row2[0], row2[1])); + } + + self.gate(GateSpec { + row: row1.into_iter().map(Some).collect(), + typ: GateType::VarBaseMul, + coeffs: vec![], + }); + + self.gate(GateSpec { + row: row2.into_iter().map(Some).collect(), + typ: GateType::Zero, + coeffs: vec![], + }); + } + + res.unwrap() + } + + /// Creates an endoscalar multiplication gadget with `length_in_bits/4 + 1` gates. + /// For each row, it adds one `EndoMul` gate. The gadget is finalized with a `Zero` gate. + /// + /// | row | `GateType` | + /// | --- | ---------- | + /// | i | `EndoMul` | + /// | i+1 | `EndoMul` | + /// | ... | ... | + /// | r | `EndoMul` | + /// | r+1 | `Zero` | + /// + fn endo( + &mut self, + zero: Var, + constants: &Constants, + (xt, yt): (Var, Var), + scalar: Var, + length_in_bits: usize, + ) -> (Var, Var) { + let bits_per_row = 4; + let rows = length_in_bits / 4; + assert_eq!(0, length_in_bits % 4); + + let mut bits_ = vec![]; + let bits: Vec<_> = (0..length_in_bits) + .map(|i| { + self.var(|| { + if bits_.is_empty() { + bits_ = scalar + .val() + .into_bigint() + .to_bits_le() + .iter() + .take(length_in_bits) + .copied() + .rev() + .collect(); + } + F::from(u64::from(bits_[i])) + }) + }) + .collect(); + + let one = F::one(); + + let endo = constants.endo; + let mut acc = { + let phip = (self.scale(endo, xt), yt); + let phip_p = self.add_group(zero, phip, (xt, yt)); + self.double(zero, phip_p) + }; + + let mut n_acc = zero; + + // TODO: Could be more efficient + for i in 0..rows { + let b1 = bits[i * bits_per_row]; + let b2 = bits[i * bits_per_row + 1]; + let b3 = bits[i * bits_per_row + 2]; + let b4 = bits[i * bits_per_row + 3]; + + let (xp, yp) = acc; + + let xq1 = self.var(|| (one + (endo - one) * b1.val()) * xt.val()); + let yq1 = self.var(|| (b2.val().double() - one) * yt.val()); + + let s1 = self.var(|| (yq1.val() - yp.val()) / (xq1.val() - xp.val())); + let s1_squared = self.var(|| s1.val().square()); + // (2*xp – s1^2 + xq) * ((xp – xr) * s1 + yr + yp) = (xp – xr) * 2*yp + // => 2 yp / (2*xp – s1^2 + xq) = s1 + (yr + yp) / (xp – xr) + // => 2 yp / (2*xp – s1^2 + xq) - s1 = (yr + yp) / (xp – xr) + // + // s2 := 2 yp / (2*xp – s1^2 + xq) - s1 + // + // (yr + yp)^2 = (xp – xr)^2 * (s1^2 – xq1 + xr) + // => (s1^2 – xq1 + xr) = (yr + yp)^2 / (xp – xr)^2 + // + // => xr = s2^2 - s1^2 + xq + // => yr = s2 * (xp - xr) - yp + let s2 = self.var(|| { + yp.val().double() / (xp.val().double() + xq1.val() - s1_squared.val()) - s1.val() + }); + + // (xr, yr) + let xr = self.var(|| xq1.val() + s2.val().square() - s1_squared.val()); + let yr = self.var(|| (xp.val() - xr.val()) * s2.val() - yp.val()); + + let xq2 = self.var(|| (one + (endo - one) * b3.val()) * xt.val()); + let yq2 = self.var(|| (b4.val().double() - one) * yt.val()); + let s3 = self.var(|| (yq2.val() - yr.val()) / (xq2.val() - xr.val())); + let s3_squared = self.var(|| s3.val().square()); + let s4 = self.var(|| { + yr.val().double() / (xr.val().double() + xq2.val() - s3_squared.val()) - s3.val() + }); + + let xs = self.var(|| xq2.val() + s4.val().square() - s3_squared.val()); + let ys = self.var(|| (xr.val() - xs.val()) * s4.val() - yr.val()); + + self.gate(GateSpec { + typ: GateType::EndoMul, + row: vec![ + Some(xt), + Some(yt), + None, + None, + Some(xp), + Some(yp), + Some(n_acc), + Some(xr), + Some(yr), + Some(s1), + Some(s3), + Some(b1), + Some(b2), + Some(b3), + Some(b4), + ], + coeffs: vec![], + }); + + acc = (xs, ys); + + n_acc = self.var(|| { + let mut n_acc = n_acc.val(); + n_acc.double_in_place(); + n_acc += b1.val(); + n_acc.double_in_place(); + n_acc += b2.val(); + n_acc.double_in_place(); + n_acc += b3.val(); + n_acc.double_in_place(); + n_acc += b4.val(); + n_acc + }); + } + + // TODO: use a generic gate with zero coeffs + self.gate(GateSpec { + typ: GateType::Zero, + row: vec![ + None, + None, + None, + None, + Some(acc.0), + Some(acc.1), + Some(scalar), + None, + None, + None, + None, + None, + None, + None, + None, + ], + coeffs: vec![], + }); + acc + } + + /// Checks that a string of bits (with LSB first) correspond to the value inside variable `x`. + /// It splits the bitstring across rows, where each row takes care of 8 crumbs of 2 bits each. + /// + fn assert_pack(&mut self, zero: Var, x: Var, bits_lsb: &[Var]) { + let crumbs_per_row = 8; + let bits_per_row = 2 * crumbs_per_row; + assert_eq!(bits_lsb.len() % bits_per_row, 0); + let num_rows = bits_lsb.len() / bits_per_row; + + // Reverse string of bits to have MSB first in the vector + let bits_msb: Vec<_> = bits_lsb.iter().rev().collect(); + + let mut a = self.var(|| F::from(2u64)); + let mut b = self.var(|| F::from(2u64)); + let mut n = zero; + + let one = F::one(); + let neg_one = -one; + + // For each of the chunks, get the corresponding bits + for (i, row_bits) in bits_msb[..].chunks(bits_per_row).enumerate() { + let mut row: [Var; COLUMNS] = array::from_fn(|_| self.var(|| F::zero())); + row[0] = n; + row[2] = a; + row[3] = b; + + // For this row, get crumbs of 2 bits each + for (j, crumb_bits) in row_bits.chunks(2).enumerate() { + // Remember the MSB of each crumb is in the 0 index + let b0 = crumb_bits[1]; // less valued + let b1 = crumb_bits[0]; // more valued + + // Value of the 2-bit crumb in MSB + let crumb = self.var(|| b0.val() + b1.val().double()); + // Stores the 8 of them in positions [6..13] of the row + row[6 + j] = crumb; + + a = self.var(|| { + let x = a.val().double(); + if b1.val().is_zero() { + x + } else { + x + if b0.val().is_one() { one } else { neg_one } + } + }); + + b = self.var(|| { + let x = b.val().double(); + if b1.val().is_zero() { + x + if b0.val().is_one() { one } else { neg_one } + } else { + x + } + }); + + // Accumulated chunk value + n = self.var(|| n.val().double().double() + crumb.val()); + } + + // In final row, this is the input value, otherwise the accumulated value + row[1] = if i == num_rows - 1 { x } else { n }; + row[4] = a; + row[5] = b; + + row[14] = self.var(|| F::zero()); + } + } + + /// Creates a Poseidon gadget for given constants and a given input. + /// It generates a number of `Poseidon` gates followed by a final `Zero` gate. + fn poseidon(&mut self, constants: &Constants, input: Vec>) -> Vec> { + use kimchi::circuits::polynomials::poseidon::{POS_ROWS_PER_HASH, ROUNDS_PER_ROW}; + + let params = constants.poseidon; + let rc = ¶ms.round_constants; + let width = PlonkSpongeConstantsKimchi::SPONGE_WIDTH; + + let mut states = vec![input]; + + for row in 0..POS_ROWS_PER_HASH { + let offset = row * ROUNDS_PER_ROW; + + for i in 0..ROUNDS_PER_ROW { + let mut s: Option> = None; + states.push( + (0..3) + .map(|col| { + self.var(|| { + match &s { + Some(s) => s[col], + None => { + // Do one full round on the previous value + let mut acc = states[states.len() - 1] + .iter() + .map(|x| x.val()) + .collect(); + full_round::( + params, + &mut acc, + offset + i, + ); + let res = acc[col]; + s = Some(acc); + res + } + } + }) + }) + .collect(), + ); + } + + self.gate(GateSpec { + typ: kimchi::circuits::gate::GateType::Poseidon, + coeffs: (0..COLUMNS) + .map(|i| rc[offset + (i / width)][i % width]) + .collect(), + row: vec![ + Some(states[offset][0]), + Some(states[offset][1]), + Some(states[offset][2]), + Some(states[offset + 4][0]), + Some(states[offset + 4][1]), + Some(states[offset + 4][2]), + Some(states[offset + 1][0]), + Some(states[offset + 1][1]), + Some(states[offset + 1][2]), + Some(states[offset + 2][0]), + Some(states[offset + 2][1]), + Some(states[offset + 2][2]), + Some(states[offset + 3][0]), + Some(states[offset + 3][1]), + Some(states[offset + 3][2]), + ], + }); + } + + let final_state = &states[states.len() - 1]; + let final_row = vec![ + Some(final_state[0]), + Some(final_state[1]), + Some(final_state[2]), + ]; + self.gate(GateSpec { + typ: kimchi::circuits::gate::GateType::Zero, + coeffs: vec![], + row: final_row, + }); + + states.pop().unwrap() + } +} + +impl Cs for WitnessGenerator { + /// Creates a variable with value given by a function `g` with index `0` + fn var(&mut self, g: G) -> Var + where + G: FnOnce() -> F, + { + Var { + index: 0, + value: Some(g()), + } + } + + /// Returns the number of rows. + fn curr_gate_count(&self) -> usize { + self.rows.len() + } + + /// Pushes a new row corresponding to the values in the row of gate `g`. + fn gate(&mut self, g: GateSpec) { + assert!(g.row.len() <= COLUMNS); + + let row: [F; COLUMNS] = array::from_fn(|col| g.get_var_val_or(col, F::zero())); + self.rows.push(row); + } + + fn generic_queue(&mut self, gate: GateSpec) -> Option> { + if let Some(mut other) = self.generic_gate_queue.pop() { + other.row.extend(&gate.row); + assert_eq!(other.row.len(), DOUBLE_GENERIC_REGISTERS); + Some(other) + } else { + self.generic_gate_queue.push(gate); + None + } + } + + fn cached_constants(&mut self, x: F) -> Var { + match self.cached_constants.get(&x) { + Some(var) => *var, + None => { + let var = self.var(|| x); + self.cached_constants.insert(x, var); + var + } + } + } +} + +impl WitnessGenerator { + /// Returns the columns of the witness. + pub fn columns(&mut self) -> [Vec; COLUMNS] { + // flush any queued generic gate + if let Some(gate) = self.generic_gate_queue.pop() { + self.gate(gate); + } + + // transpose + array::from_fn(|col| self.rows.iter().map(|row| row[col]).collect()) + } +} + +impl Cs for System { + fn var(&mut self, _: V) -> Var { + let v = self.next_variable; + self.next_variable += 1; + Var { + index: v, + value: None, + } + } + + /// Outputs the number of gates in the circuit + fn curr_gate_count(&self) -> usize { + self.gates.len() + } + + fn gate(&mut self, g: GateSpec) { + self.gates.push(g); + } + + fn generic_queue(&mut self, gate: GateSpec) -> Option> { + if let Some(mut other) = self.generic_gate_queue.pop() { + other.row.extend(&gate.row); + assert_eq!(other.row.len(), DOUBLE_GENERIC_REGISTERS); + other.coeffs.extend(&gate.coeffs); + assert_eq!(other.coeffs.len(), DOUBLE_GENERIC_COEFFS); + Some(other) + } else { + self.generic_gate_queue.push(gate); + None + } + } + + fn cached_constants(&mut self, x: F) -> Var { + match self.cached_constants.get(&x) { + Some(var) => *var, + None => { + let var = self.var(|| x); + self.cached_constants.insert(x, var); + var + } + } + } +} + +impl System { + /// Compiles our intermediate representation into a circuit. + /// + /// # Panics + /// + /// Will not panic ever since it is permutation inside gates + pub fn gates(&mut self) -> Vec> { + let mut first_cell: HashMap = HashMap::new(); + let mut most_recent_cell: HashMap = HashMap::new(); + let mut gates = vec![]; + + // flush any queued generic gate + if let Some(gate) = self.generic_gate_queue.pop() { + self.gate(gate); + } + + // convert GateSpec into CircuitGate + for (row, gate) in self.gates.iter().enumerate() { + // while tracking the wiring + let wires = array::from_fn(|col| { + let curr = Wire { row, col }; + + if let Some(index) = gate.get_var_idx(col) { + // wire this cell to the previous one + match most_recent_cell.insert(index, curr) { + Some(w) => w, + // unless it is the first cell, + // in which case we just save it for the very end + // (to complete the cycle) + None => { + first_cell.insert(index, curr); + curr + } + } + } else { + // if no var to be found, it's a cell wired to itself + curr + } + }); + + let g = CircuitGate::new(gate.typ, wires, gate.coeffs.clone()); + gates.push(g); + } + + // finish the permutation cycle + for (var, first) in &first_cell { + let last = *most_recent_cell.get(var).unwrap(); + gates[first.row].wires[first.col] = last; + } + + gates + } +} diff --git a/kimchi/Cargo.toml b/kimchi/Cargo.toml index 67fc1e3c00..fec0d61716 100644 --- a/kimchi/Cargo.toml +++ b/kimchi/Cargo.toml @@ -14,11 +14,11 @@ path = "src/lib.rs" bench = false # needed for criterion (https://bheisler.github.io/criterion.rs/book/faq.html#cargo-bench-gives-unrecognized-option-errors-for-valid-command-line-options) [dependencies] -ark-ff = { version = "0.3.0", features = [ "parallel", "asm" ] } -ark-ec = { version = "0.3.0", features = [ "parallel" ] } -ark-poly = { version = "0.3.0", features = [ "parallel" ] } -ark-serialize = "0.3.0" -ark-bn254 = { version = "0.3.0", optional = true } +ark-ff = { version = "0.4.2", features = [ "parallel", "asm" ] } +ark-ec = { version = "0.4.2", features = [ "parallel" ] } +ark-poly = { version = "0.4.2", features = [ "parallel" ] } +ark-serialize = "0.4.2" +ark-bn254 = { version = "0.4.0", optional = true } blake2 = "0.10.0" num-bigint = { version = "0.4.3", features = ["rand", "serde"]} num-derive = "0.3" diff --git a/kimchi/src/circuits/constraints.rs b/kimchi/src/circuits/constraints.rs index 4f957384a4..454ce1cab0 100644 --- a/kimchi/src/circuits/constraints.rs +++ b/kimchi/src/circuits/constraints.rs @@ -18,7 +18,7 @@ use crate::{ error::{DomainCreationError, SetupError}, prover_index::ProverIndex, }; -use ark_ff::{PrimeField, SquareRootField, Zero}; +use ark_ff::{PrimeField, Zero}; use ark_poly::{ univariate::DensePolynomial as DP, EvaluationDomain, Evaluations as E, Radix2EvaluationDomain as D, @@ -303,11 +303,8 @@ impl ConstraintSystem { } } -impl< - F: PrimeField + SquareRootField, - G: KimchiCurve, - OpeningProof: OpenProof, - > ProverIndex +impl, OpeningProof: OpenProof> + ProverIndex { /// This function verifies the consistency of the wire /// assignments (witness) against the constraints @@ -361,7 +358,7 @@ impl< } } -impl ConstraintSystem { +impl ConstraintSystem { /// evaluate witness polynomials over domains pub fn evaluate(&self, w: &[DP; COLUMNS], z: &DP) -> WitnessOverDomains { // compute shifted witness polynomials @@ -689,7 +686,7 @@ impl FeatureFlags { } } -impl Builder { +impl Builder { /// Set up the number of public inputs. /// If not invoked, it equals `0` by default. pub fn public(mut self, public: usize) -> Self { @@ -942,7 +939,7 @@ pub mod tests { use super::*; use mina_curves::pasta::Fp; - impl ConstraintSystem { + impl ConstraintSystem { pub fn for_testing(gates: Vec>) -> Self { let public = 0; // not sure if theres a smarter way instead of the double unwrap, but should be fine in the test diff --git a/kimchi/src/circuits/domain_constant_evaluation.rs b/kimchi/src/circuits/domain_constant_evaluation.rs index 6659f42d31..8a835c372c 100644 --- a/kimchi/src/circuits/domain_constant_evaluation.rs +++ b/kimchi/src/circuits/domain_constant_evaluation.rs @@ -2,8 +2,8 @@ use crate::circuits::domains::EvaluationDomains; use ark_ff::FftField; +use ark_poly::DenseUVPolynomial; use ark_poly::EvaluationDomain; -use ark_poly::UVPolynomial; use ark_poly::{univariate::DensePolynomial as DP, Evaluations as E, Radix2EvaluationDomain as D}; use serde::{Deserialize, Serialize}; use serde_with::serde_as; diff --git a/kimchi/src/circuits/expr.rs b/kimchi/src/circuits/expr.rs index f331d96b1b..4f8d11267b 100644 --- a/kimchi/src/circuits/expr.rs +++ b/kimchi/src/circuits/expr.rs @@ -2428,7 +2428,7 @@ where JointCombiner => "joint_combiner".to_string(), EndoCoefficient => "endo_coefficient".to_string(), Mds { row, col } => format!("mds({row}, {col})"), - Literal(x) => format!("field(\"0x{}\")", x.into_repr()), + Literal(x) => format!("field(\"0x{}\")", x.into_bigint()), Pow(x, n) => match x.as_ref() { Alpha => format!("alpha_pow({n})"), x => format!("pow({}, {n})", x.ocaml()), @@ -2448,7 +2448,7 @@ where JointCombiner => "joint\\_combiner".to_string(), EndoCoefficient => "endo\\_coefficient".to_string(), Mds { row, col } => format!("mds({row}, {col})"), - Literal(x) => format!("\\mathbb{{F}}({})", x.into_repr().into()), + Literal(x) => format!("\\mathbb{{F}}({})", x.into_bigint().into()), Pow(x, n) => match x.as_ref() { Alpha => format!("\\alpha^{{{n}}}"), x => format!("{}^{n}", x.ocaml()), diff --git a/kimchi/src/circuits/gate.rs b/kimchi/src/circuits/gate.rs index 61be85900d..a03481b04c 100644 --- a/kimchi/src/circuits/gate.rs +++ b/kimchi/src/circuits/gate.rs @@ -13,13 +13,11 @@ use crate::{ curve::KimchiCurve, prover_index::ProverIndex, }; -use ark_ff::{bytes::ToBytes, PrimeField, SquareRootField}; -use num_traits::cast::ToPrimitive; +use ark_ff::PrimeField; use o1_utils::hasher::CryptoDigest; use poly_commitment::OpenProof; use serde::{Deserialize, Serialize}; use serde_with::serde_as; -use std::io::{Result as IoResult, Write}; use thiserror::Error; use super::{ @@ -165,24 +163,7 @@ where } } -impl ToBytes for CircuitGate { - #[inline] - fn write(&self, mut w: W) -> IoResult<()> { - let typ: u8 = ToPrimitive::to_u8(&self.typ).unwrap(); - typ.write(&mut w)?; - for i in 0..COLUMNS { - self.wires[i].write(&mut w)?; - } - - (self.coeffs.len() as u8).write(&mut w)?; - for x in &self.coeffs { - x.write(&mut w)?; - } - Ok(()) - } -} - -impl CircuitGate { +impl CircuitGate { /// this function creates "empty" circuit gate pub fn zero(wires: GateWires) -> Self { CircuitGate::new(GateType::Zero, wires, vec![]) diff --git a/kimchi/src/circuits/lookup/index.rs b/kimchi/src/circuits/lookup/index.rs index e4e70a3a7b..1cb8d26cde 100644 --- a/kimchi/src/circuits/lookup/index.rs +++ b/kimchi/src/circuits/lookup/index.rs @@ -8,7 +8,7 @@ use crate::circuits::{ tables::LookupTable, }, }; -use ark_ff::{FftField, PrimeField, SquareRootField}; +use ark_ff::{FftField, PrimeField}; use ark_poly::{ univariate::DensePolynomial as DP, EvaluationDomain, Evaluations as E, Radix2EvaluationDomain as D, @@ -194,7 +194,7 @@ pub struct LookupConstraintSystem { pub configuration: LookupConfiguration, } -impl LookupConstraintSystem { +impl LookupConstraintSystem { /// Create the `LookupConstraintSystem`. /// /// # Errors diff --git a/kimchi/src/circuits/polynomials/and.rs b/kimchi/src/circuits/polynomials/and.rs index e49da51a3d..8debb0992d 100644 --- a/kimchi/src/circuits/polynomials/and.rs +++ b/kimchi/src/circuits/polynomials/and.rs @@ -15,7 +15,7 @@ use crate::circuits::{ polynomial::COLUMNS, wires::Wire, }; -use ark_ff::{PrimeField, SquareRootField}; +use ark_ff::PrimeField; use num_bigint::BigUint; use o1_utils::{BigUintFieldHelpers, BigUintHelpers, BitwiseOps, FieldHelpers, Two}; @@ -58,7 +58,7 @@ use o1_utils::{BigUintFieldHelpers, BigUintHelpers, BitwiseOps, FieldHelpers, Tw //~ * the `xor` in `a x b = xor` is connected to the `xor` in `2 \cdot and = sum - xor` //~ * the `sum` in `a + b = sum` is connected to the `sum` in `2 \cdot and = sum - xor` -impl CircuitGate { +impl CircuitGate { /// Extends an AND gadget for `bytes` length. /// The full operation being performed is the following: /// `a AND b = 1/2 * (a + b - (a XOR b))` diff --git a/kimchi/src/circuits/polynomials/endomul_scalar.rs b/kimchi/src/circuits/polynomials/endomul_scalar.rs index 701ce892bf..0b52ec3efe 100644 --- a/kimchi/src/circuits/polynomials/endomul_scalar.rs +++ b/kimchi/src/circuits/polynomials/endomul_scalar.rs @@ -228,7 +228,7 @@ pub fn gen_witness( let bits_per_row = 2 * crumbs_per_row; assert_eq!(num_bits % bits_per_row, 0); - let bits_lsb: Vec<_> = BitIteratorLE::new(scalar.into_repr()) + let bits_lsb: Vec<_> = BitIteratorLE::new(scalar.into_bigint()) .take(num_bits) .collect(); let bits_msb: Vec<_> = bits_lsb.iter().rev().collect(); @@ -339,7 +339,7 @@ mod tests { let f1 = c_func; let f2 = |x: F| -> F { - let bits_le = x.into_repr().to_bits_le(); + let bits_le = x.into_bigint().to_bits_le(); let b0 = bits_le[0]; let b1 = bits_le[1]; @@ -371,7 +371,7 @@ mod tests { let f1 = d_func; let f2 = |x: F| -> F { - let bits_le = x.into_repr().to_bits_le(); + let bits_le = x.into_bigint().to_bits_le(); let b0 = bits_le[0]; let b1 = bits_le[1]; diff --git a/kimchi/src/circuits/polynomials/foreign_field_add/gadget.rs b/kimchi/src/circuits/polynomials/foreign_field_add/gadget.rs index 30fc6926ff..96af81e165 100644 --- a/kimchi/src/circuits/polynomials/foreign_field_add/gadget.rs +++ b/kimchi/src/circuits/polynomials/foreign_field_add/gadget.rs @@ -1,6 +1,6 @@ //! This module obtains the gates of a foreign field addition circuit. -use ark_ff::{PrimeField, SquareRootField}; +use ark_ff::PrimeField; use num_bigint::BigUint; use o1_utils::foreign_field::BigUintForeignFieldHelpers; @@ -11,7 +11,7 @@ use crate::circuits::{ use super::witness::FFOps; -impl CircuitGate { +impl CircuitGate { /// Create foreign field addition gate chain without range checks (needs to wire the range check for result bound manually) /// - Inputs /// - starting row diff --git a/kimchi/src/circuits/polynomials/foreign_field_mul/gadget.rs b/kimchi/src/circuits/polynomials/foreign_field_mul/gadget.rs index 104ea2db51..41afcc39e0 100644 --- a/kimchi/src/circuits/polynomials/foreign_field_mul/gadget.rs +++ b/kimchi/src/circuits/polynomials/foreign_field_mul/gadget.rs @@ -1,6 +1,6 @@ //! This module obtains the gates of a foreign field addition circuit. -use ark_ff::{PrimeField, SquareRootField}; +use ark_ff::PrimeField; use num_bigint::BigUint; use o1_utils::foreign_field::{BigUintForeignFieldHelpers, ForeignFieldHelpers}; @@ -24,7 +24,7 @@ use super::circuitgates::ForeignFieldMul; /// Number of gates in this gadget pub const GATE_COUNT: usize = 1; -impl CircuitGate { +impl CircuitGate { /// Create foreign field multiplication gate /// Inputs the starting row /// Outputs tuple (next_row, circuit_gates) where diff --git a/kimchi/src/circuits/polynomials/permutation.rs b/kimchi/src/circuits/polynomials/permutation.rs index f94133c08e..b80ebfa6b5 100644 --- a/kimchi/src/circuits/polynomials/permutation.rs +++ b/kimchi/src/circuits/polynomials/permutation.rs @@ -49,12 +49,12 @@ use crate::{ proof::{PointEvaluations, ProofEvaluations}, prover_index::ProverIndex, }; -use ark_ff::{FftField, PrimeField, SquareRootField, Zero}; +use ark_ff::{FftField, PrimeField, Zero}; use ark_poly::{ univariate::{DenseOrSparsePolynomial, DensePolynomial}, EvaluationDomain, Evaluations, Radix2EvaluationDomain as D, }; -use ark_poly::{Polynomial, UVPolynomial}; +use ark_poly::{DenseUVPolynomial, Polynomial}; use blake2::{Blake2b512, Digest}; use o1_utils::{ExtendedDensePolynomial, ExtendedEvaluations}; use poly_commitment::OpenProof; @@ -135,7 +135,7 @@ pub struct Shifts { impl Shifts where - F: FftField + SquareRootField, + F: FftField, { /// Generates the shifts for a given domain pub fn new(domain: &D) -> Self { diff --git a/kimchi/src/circuits/polynomials/poseidon.rs b/kimchi/src/circuits/polynomials/poseidon.rs index c587c506e8..587346d953 100644 --- a/kimchi/src/circuits/polynomials/poseidon.rs +++ b/kimchi/src/circuits/polynomials/poseidon.rs @@ -35,7 +35,7 @@ use crate::{ }, curve::KimchiCurve, }; -use ark_ff::{Field, PrimeField, SquareRootField}; +use ark_ff::{Field, PrimeField}; use mina_poseidon::{ constants::{PlonkSpongeConstantsKimchi, SpongeConstants}, poseidon::{sbox, ArithmeticSponge, ArithmeticSpongeParams, Sponge}, @@ -77,7 +77,7 @@ pub const fn round_to_cols(i: usize) -> Range { start..(start + SPONGE_WIDTH) } -impl CircuitGate { +impl CircuitGate { pub fn create_poseidon( wires: GateWires, // Coefficients are passed in in the logical order diff --git a/kimchi/src/circuits/polynomials/range_check/gadget.rs b/kimchi/src/circuits/polynomials/range_check/gadget.rs index f8d3d6e696..5815488441 100644 --- a/kimchi/src/circuits/polynomials/range_check/gadget.rs +++ b/kimchi/src/circuits/polynomials/range_check/gadget.rs @@ -1,6 +1,6 @@ //! Range check gate -use ark_ff::{FftField, PrimeField, SquareRootField}; +use ark_ff::{FftField, PrimeField}; use crate::{ alphas::Alphas, @@ -20,7 +20,7 @@ use super::circuitgates::{RangeCheck0, RangeCheck1}; pub const GATE_COUNT: usize = 2; -impl CircuitGate { +impl CircuitGate { /// Create range check gate for constraining three 88-bit values. /// Inputs the starting row /// Outputs tuple (`next_row`, `circuit_gates`) where diff --git a/kimchi/src/circuits/polynomials/rot.rs b/kimchi/src/circuits/polynomials/rot.rs index 80c4022d41..4ed7089997 100644 --- a/kimchi/src/circuits/polynomials/rot.rs +++ b/kimchi/src/circuits/polynomials/rot.rs @@ -19,7 +19,7 @@ use crate::{ }, variable_map, }; -use ark_ff::{PrimeField, SquareRootField}; +use ark_ff::PrimeField; use std::{array, marker::PhantomData}; #[derive(Clone, Copy, PartialEq, Eq, Debug)] @@ -28,7 +28,7 @@ pub enum RotMode { Right, } -impl CircuitGate { +impl CircuitGate { /// Creates a Rot64 gadget to rotate a word /// It will need: /// - 1 Generic gate to constrain to zero the top 2 limbs of the shifted witness of the rotation diff --git a/kimchi/src/circuits/polynomials/turshi.rs b/kimchi/src/circuits/polynomials/turshi.rs index da51465f92..5dacc6eb52 100644 --- a/kimchi/src/circuits/polynomials/turshi.rs +++ b/kimchi/src/circuits/polynomials/turshi.rs @@ -90,7 +90,7 @@ use crate::{ curve::KimchiCurve, proof::ProofEvaluations, }; -use ark_ff::{FftField, Field, PrimeField, SquareRootField}; +use ark_ff::{FftField, Field, PrimeField}; use rand::{prelude::StdRng, SeedableRng}; use std::array; use std::marker::PhantomData; @@ -104,7 +104,7 @@ pub const CIRCUIT_GATE_COUNT: usize = 4; // GATE-RELATED -impl CircuitGate { +impl CircuitGate { /// This function creates a `CairoClaim` gate pub fn create_cairo_claim(wires: GateWires) -> Self { CircuitGate::new(GateType::CairoClaim, wires, vec![]) diff --git a/kimchi/src/circuits/polynomials/xor.rs b/kimchi/src/circuits/polynomials/xor.rs index ea5fbc2cbd..564b7d25c8 100644 --- a/kimchi/src/circuits/polynomials/xor.rs +++ b/kimchi/src/circuits/polynomials/xor.rs @@ -16,14 +16,14 @@ use crate::{ }, variable_map, }; -use ark_ff::{PrimeField, SquareRootField}; +use ark_ff::PrimeField; use num_bigint::BigUint; use o1_utils::{BigUintFieldHelpers, BigUintHelpers, BitwiseOps, FieldHelpers}; use std::{array, marker::PhantomData}; use super::generic::GenericGateSpec; -impl CircuitGate { +impl CircuitGate { /// Extends a XOR gadget for `bits` length to a circuit /// Includes: /// - num_xors Xor16 gates diff --git a/kimchi/src/circuits/wires.rs b/kimchi/src/circuits/wires.rs index 1ab40d4b83..31c8761911 100644 --- a/kimchi/src/circuits/wires.rs +++ b/kimchi/src/circuits/wires.rs @@ -1,9 +1,7 @@ //! This module implements Plonk circuit gate wires primitive. -use ark_ff::bytes::{FromBytes, ToBytes}; use serde::{Deserialize, Serialize}; use std::array; -use std::io::{Read, Result as IoResult, Write}; /// Number of registers pub const COLUMNS: usize = 15; @@ -65,24 +63,6 @@ impl Wirable for GateWires { } } -impl ToBytes for Wire { - #[inline] - fn write(&self, mut w: W) -> IoResult<()> { - (self.row as u32).write(&mut w)?; - (self.col as u32).write(&mut w)?; - Ok(()) - } -} - -impl FromBytes for Wire { - #[inline] - fn read(mut r: R) -> IoResult { - let row = u32::read(&mut r)? as usize; - let col = u32::read(&mut r)? as usize; - Ok(Wire { row, col }) - } -} - #[cfg(feature = "ocaml_types")] pub mod caml { use super::*; diff --git a/kimchi/src/circuits/witness/mod.rs b/kimchi/src/circuits/witness/mod.rs index a85a932db9..75271215f4 100644 --- a/kimchi/src/circuits/witness/mod.rs +++ b/kimchi/src/circuits/witness/mod.rs @@ -67,10 +67,10 @@ mod tests { use super::*; - use ark_ec::AffineCurve; + use ark_ec::AffineRepr; use ark_ff::{Field, One, Zero}; use mina_curves::pasta::Pallas; - type PallasField = ::BaseField; + type PallasField = ::BaseField; #[test] fn zero_layout() { diff --git a/kimchi/src/curve.rs b/kimchi/src/curve.rs index 57790b10f7..db2aa340a2 100644 --- a/kimchi/src/curve.rs +++ b/kimchi/src/curve.rs @@ -1,7 +1,7 @@ //! This module contains a useful trait for recursion: [KimchiCurve], //! which defines how a pair of curves interact. -use ark_ec::{short_weierstrass_jacobian::GroupAffine, AffineCurve, ModelParameters}; +use ark_ec::{short_weierstrass::Affine, AffineRepr, CurveConfig}; use mina_curves::pasta::curves::{ pallas::{LegacyPallasParameters, PallasParameters}, vesta::{LegacyVestaParameters, VestaParameters}, @@ -37,28 +37,28 @@ pub trait KimchiCurve: CommitmentCurve + EndoCurve { } fn vesta_endos() -> &'static ( - ::BaseField, - ::ScalarField, + ::BaseField, + ::ScalarField, ) { static VESTA_ENDOS: Lazy<( - ::BaseField, - ::ScalarField, - )> = Lazy::new(endos::>); + ::BaseField, + ::ScalarField, + )> = Lazy::new(endos::>); &VESTA_ENDOS } fn pallas_endos() -> &'static ( - ::BaseField, - ::ScalarField, + ::BaseField, + ::ScalarField, ) { static PALLAS_ENDOS: Lazy<( - ::BaseField, - ::ScalarField, - )> = Lazy::new(endos::>); + ::BaseField, + ::ScalarField, + )> = Lazy::new(endos::>); &PALLAS_ENDOS } -impl KimchiCurve for GroupAffine { +impl KimchiCurve for Affine { const NAME: &'static str = "vesta"; fn sponge_params() -> &'static ArithmeticSpongeParams { @@ -78,13 +78,13 @@ impl KimchiCurve for GroupAffine { } fn other_curve_prime_subgroup_generator() -> (Self::ScalarField, Self::ScalarField) { - GroupAffine::::prime_subgroup_generator() + Affine::::generator() .to_coordinates() .unwrap() } } -impl KimchiCurve for GroupAffine { +impl KimchiCurve for Affine { const NAME: &'static str = "pallas"; fn sponge_params() -> &'static ArithmeticSpongeParams { @@ -104,7 +104,7 @@ impl KimchiCurve for GroupAffine { } fn other_curve_prime_subgroup_generator() -> (Self::ScalarField, Self::ScalarField) { - GroupAffine::::prime_subgroup_generator() + Affine::::generator() .to_coordinates() .unwrap() } @@ -114,7 +114,7 @@ impl KimchiCurve for GroupAffine { // Legacy curves // -impl KimchiCurve for GroupAffine { +impl KimchiCurve for Affine { const NAME: &'static str = "legacy_vesta"; fn sponge_params() -> &'static ArithmeticSpongeParams { @@ -134,13 +134,13 @@ impl KimchiCurve for GroupAffine { } fn other_curve_prime_subgroup_generator() -> (Self::ScalarField, Self::ScalarField) { - GroupAffine::::prime_subgroup_generator() + Affine::::generator() .to_coordinates() .unwrap() } } -impl KimchiCurve for GroupAffine { +impl KimchiCurve for Affine { const NAME: &'static str = "legacy_pallas"; fn sponge_params() -> &'static ArithmeticSpongeParams { @@ -160,7 +160,7 @@ impl KimchiCurve for GroupAffine { } fn other_curve_prime_subgroup_generator() -> (Self::ScalarField, Self::ScalarField) { - GroupAffine::::prime_subgroup_generator() + Affine::::generator() .to_coordinates() .unwrap() } @@ -170,7 +170,7 @@ impl KimchiCurve for GroupAffine { use mina_poseidon::dummy_values::kimchi_dummy; #[cfg(feature = "bn254")] -impl KimchiCurve for GroupAffine { +impl KimchiCurve for Affine { const NAME: &'static str = "bn254"; fn sponge_params() -> &'static ArithmeticSpongeParams { diff --git a/kimchi/src/linearization.rs b/kimchi/src/linearization.rs index 566ef58216..6ca517240b 100644 --- a/kimchi/src/linearization.rs +++ b/kimchi/src/linearization.rs @@ -28,14 +28,14 @@ use crate::circuits::{ gate::GateType, wires::COLUMNS, }; -use ark_ff::{FftField, PrimeField, SquareRootField, Zero}; +use ark_ff::{FftField, PrimeField, Zero}; /// Get the expresion of constraints. /// /// # Panics /// /// Will panic if `generic_gate` is not associate with `alpha^0`. -pub fn constraints_expr( +pub fn constraints_expr( feature_flags: Option<&FeatureFlags>, generic: bool, ) -> (Expr>, Alphas) { @@ -234,7 +234,7 @@ pub fn constraints_expr( /// Adds the polynomials that are evaluated as part of the proof /// for the linearization to work. -pub fn linearization_columns( +pub fn linearization_columns( feature_flags: Option<&FeatureFlags>, ) -> std::collections::HashSet { let mut h = std::collections::HashSet::new(); @@ -336,7 +336,7 @@ pub fn linearization_columns( /// # Panics /// /// Will panic if the `linearization` process fails. -pub fn expr_linearization( +pub fn expr_linearization( feature_flags: Option<&FeatureFlags>, generic: bool, ) -> (Linearization>>, Alphas) { diff --git a/kimchi/src/proof.rs b/kimchi/src/proof.rs index 5829178786..75468abe8c 100644 --- a/kimchi/src/proof.rs +++ b/kimchi/src/proof.rs @@ -6,7 +6,7 @@ use crate::circuits::{ lookup::lookups::LookupPattern, wires::{COLUMNS, PERMUTS}, }; -use ark_ec::AffineCurve; +use ark_ec::AffineRepr; use ark_ff::{FftField, One, Zero}; use ark_poly::univariate::DensePolynomial; use o1_utils::ExtendedDensePolynomial; @@ -108,7 +108,7 @@ pub struct ProofEvaluations { #[serde_as] #[derive(Debug, Clone, Serialize, Deserialize)] #[serde(bound = "G: ark_serialize::CanonicalDeserialize + ark_serialize::CanonicalSerialize")] -pub struct LookupCommitments { +pub struct LookupCommitments { /// Commitments to the sorted lookup table polynomial (may have chunks) pub sorted: Vec>, /// Commitment to the lookup aggregation polynomial @@ -121,7 +121,7 @@ pub struct LookupCommitments { #[serde_as] #[derive(Debug, Clone, Serialize, Deserialize)] #[serde(bound = "G: ark_serialize::CanonicalDeserialize + ark_serialize::CanonicalSerialize")] -pub struct ProverCommitments { +pub struct ProverCommitments { /// The commitments to the witness (execution trace) pub w_comm: [PolyComm; COLUMNS], /// The commitment to the permutation polynomial @@ -136,7 +136,7 @@ pub struct ProverCommitments { #[serde_as] #[derive(Debug, Clone, Serialize, Deserialize)] #[serde(bound = "G: ark_serialize::CanonicalDeserialize + ark_serialize::CanonicalSerialize")] -pub struct ProverProof { +pub struct ProverProof { /// All the polynomial commitments required in the proof pub commitments: ProverCommitments, @@ -164,7 +164,7 @@ pub struct ProverProof { #[serde(bound = "G: ark_serialize::CanonicalDeserialize + ark_serialize::CanonicalSerialize")] pub struct RecursionChallenge where - G: AffineCurve, + G: AffineRepr, { /// Vector of scalar field elements #[serde_as(as = "Vec")] @@ -345,7 +345,7 @@ impl ProofEvaluations { } } -impl RecursionChallenge { +impl RecursionChallenge { pub fn new(chals: Vec, comm: PolyComm) -> RecursionChallenge { RecursionChallenge { chals, comm } } @@ -505,7 +505,7 @@ pub mod caml { impl From> for CamlRecursionChallenge where - G: AffineCurve, + G: AffineRepr, CamlG: From, CamlF: From, { @@ -519,7 +519,7 @@ pub mod caml { impl From> for RecursionChallenge where - G: AffineCurve + From, + G: AffineRepr + From, G::ScalarField: From, { fn from(caml_ch: CamlRecursionChallenge) -> RecursionChallenge { diff --git a/kimchi/src/prover.rs b/kimchi/src/prover.rs index 55a99561c3..dca688dd3d 100644 --- a/kimchi/src/prover.rs +++ b/kimchi/src/prover.rs @@ -35,8 +35,8 @@ use crate::{ }; use ark_ff::{FftField, Field, One, PrimeField, UniformRand, Zero}; use ark_poly::{ - univariate::DensePolynomial, EvaluationDomain, Evaluations, Polynomial, - Radix2EvaluationDomain as D, UVPolynomial, + univariate::DensePolynomial, DenseUVPolynomial, EvaluationDomain, Evaluations, Polynomial, + Radix2EvaluationDomain as D, }; use itertools::Itertools; use mina_poseidon::{sponge::ScalarChallenge, FqSponge}; @@ -1500,7 +1500,7 @@ internal_tracing::decl_traces!(internal_traces; pub mod caml { use super::*; use crate::proof::caml::{CamlProofEvaluations, CamlRecursionChallenge}; - use ark_ec::AffineCurve; + use ark_ec::AffineRepr; use poly_commitment::{ commitment::caml::{CamlOpeningProof, CamlPolyComm}, evaluation_proof::OpeningProof, @@ -1588,7 +1588,7 @@ pub mod caml { impl From> for CamlLookupCommitments where - G: AffineCurve, + G: AffineRepr, CamlPolyComm: From>, { fn from( @@ -1608,7 +1608,7 @@ pub mod caml { impl From> for LookupCommitments where - G: AffineCurve, + G: AffineRepr, PolyComm: From>, { fn from( @@ -1632,7 +1632,7 @@ pub mod caml { impl From> for CamlProverCommitments where - G: AffineCurve, + G: AffineRepr, CamlPolyComm: From>, { fn from(prover_comm: ProverCommitments) -> Self { @@ -1665,7 +1665,7 @@ pub mod caml { impl From> for ProverCommitments where - G: AffineCurve, + G: AffineRepr, PolyComm: From>, { fn from(caml_prover_comm: CamlProverCommitments) -> ProverCommitments { @@ -1718,7 +1718,7 @@ pub mod caml { impl From<(ProverProof>, Vec)> for CamlProofWithPublic where - G: AffineCurve, + G: AffineRepr, CamlG: From, CamlF: From, { @@ -1742,7 +1742,7 @@ pub mod caml { for (ProverProof>, Vec) where CamlF: Clone, - G: AffineCurve + From, + G: AffineRepr + From, G::ScalarField: From, { fn from( diff --git a/kimchi/src/prover_index.rs b/kimchi/src/prover_index.rs index 523d583e18..ab7dc81b65 100644 --- a/kimchi/src/prover_index.rs +++ b/kimchi/src/prover_index.rs @@ -140,7 +140,7 @@ pub mod testing { }, precomputed_srs, }; - use ark_ff::{PrimeField, SquareRootField}; + use ark_ff::PrimeField; use ark_poly::{EvaluationDomain, Radix2EvaluationDomain as D}; use poly_commitment::{evaluation_proof::OpeningProof, srs::SRS, OpenProof}; @@ -161,7 +161,7 @@ pub mod testing { ) -> ProverIndex where G::BaseField: PrimeField, - G::ScalarField: PrimeField + SquareRootField, + G::ScalarField: PrimeField, { // not sure if theres a smarter way instead of the double unwrap, but should be fine in the test let cs = ConstraintSystem::::create(gates) @@ -198,7 +198,7 @@ pub mod testing { ) -> ProverIndex> where G::BaseField: PrimeField, - G::ScalarField: PrimeField + SquareRootField, + G::ScalarField: PrimeField, { new_index_for_test_with_lookups_and_custom_srs( gates, @@ -230,7 +230,7 @@ pub mod testing { ) -> ProverIndex> where G::BaseField: PrimeField, - G::ScalarField: PrimeField + SquareRootField, + G::ScalarField: PrimeField, { new_index_for_test_with_lookups::(gates, public, 0, vec![], None, false, None) } diff --git a/kimchi/src/tests/and.rs b/kimchi/src/tests/and.rs index e344af6da4..1b76f43c2c 100644 --- a/kimchi/src/tests/and.rs +++ b/kimchi/src/tests/and.rs @@ -10,7 +10,7 @@ use crate::{ plonk_sponge::FrSponge, }; -use ark_ec::AffineCurve; +use ark_ec::AffineRepr; use ark_ff::{One, PrimeField, Zero}; use mina_curves::pasta::{Fp, Fq, Pallas, PallasParameters, Vesta, VestaParameters}; use mina_poseidon::{ @@ -24,8 +24,8 @@ use rand::{rngs::StdRng, SeedableRng}; use super::framework::TestFramework; -type PallasField = ::BaseField; -type VestaField = ::BaseField; +type PallasField = ::BaseField; +type VestaField = ::BaseField; type SpongeParams = PlonkSpongeConstantsKimchi; type VestaBaseSponge = DefaultFqSponge; type VestaScalarSponge = DefaultFrSponge; diff --git a/kimchi/src/tests/ec.rs b/kimchi/src/tests/ec.rs index 9deec7a32a..2a3daee005 100644 --- a/kimchi/src/tests/ec.rs +++ b/kimchi/src/tests/ec.rs @@ -2,8 +2,8 @@ use crate::circuits::{ gate::{CircuitGate, GateType}, wires::*, }; -use ark_ec::{AffineCurve, ProjectiveCurve}; -use ark_ff::{Field, One, PrimeField, UniformRand, Zero}; +use ark_ec::{AffineRepr, CurveGroup}; +use ark_ff::{Field, One, UniformRand, Zero}; use mina_curves::pasta::{Fp as F, Pallas as Other, Vesta, VestaParameters}; use mina_poseidon::{ constants::PlonkSpongeConstantsKimchi, @@ -11,6 +11,7 @@ use mina_poseidon::{ }; use rand::{rngs::StdRng, SeedableRng}; use std::array; +use std::ops::Mul; use super::framework::TestFramework; @@ -40,31 +41,29 @@ fn ec_test() { let rng = &mut StdRng::from_seed([0; 32]); let ps = { - let p = Other::prime_subgroup_generator() - .into_projective() - .mul(::ScalarField::rand(rng).into_repr()) - .into_affine(); + let p = Other::generator() + .into_group() + .mul(::ScalarField::rand(rng)); let mut res = vec![]; let mut acc = p; for _ in 0..num_additions { res.push(acc); - acc = acc + p; + acc += p; } - res + ::Group::normalize_batch(&res) }; let qs = { - let q = Other::prime_subgroup_generator() - .into_projective() - .mul(::ScalarField::rand(rng).into_repr()) - .into_affine(); + let q = Other::generator() + .into_group() + .mul(::ScalarField::rand(rng)); let mut res = vec![]; let mut acc = q; for _ in 0..num_additions { res.push(acc); - acc = acc + q; + acc += q; } - res + ::Group::normalize_batch(&res) }; for &p in ps.iter().take(num_doubles) { diff --git a/kimchi/src/tests/endomul.rs b/kimchi/src/tests/endomul.rs index 5a4fa08246..4682848627 100644 --- a/kimchi/src/tests/endomul.rs +++ b/kimchi/src/tests/endomul.rs @@ -4,7 +4,7 @@ use crate::circuits::{ wires::*, }; use crate::tests::framework::TestFramework; -use ark_ec::{AffineCurve, ProjectiveCurve}; +use ark_ec::{AffineRepr, CurveGroup}; use ark_ff::{BigInteger, BitIteratorLE, Field, One, PrimeField, UniformRand, Zero}; use mina_curves::pasta::{Fp as F, Pallas as Other, Vesta, VestaParameters}; use mina_poseidon::{ @@ -14,6 +14,7 @@ use mina_poseidon::{ use poly_commitment::srs::endos; use rand::{rngs::StdRng, SeedableRng}; use std::array; +use std::ops::Mul; type SpongeParams = PlonkSpongeConstantsKimchi; type BaseSponge = DefaultFqSponge; @@ -56,20 +57,20 @@ fn endomul_test() { // let start = Instant::now(); for i in 0..num_scalars { - let bits_lsb: Vec<_> = BitIteratorLE::new(F::rand(rng).into_repr()) + let bits_lsb: Vec<_> = BitIteratorLE::new(F::rand(rng).into_bigint()) .take(num_bits) .collect(); - let x = ::ScalarField::from_repr( + let x = ::ScalarField::from_bigint( ::BigInt::from_bits_le(&bits_lsb[..]), ) .unwrap(); let x_scalar = ScalarChallenge(x).to_field(&endo_r); - let base = Other::prime_subgroup_generator(); - // let g = Other::prime_subgroup_generator().into_projective(); + let base = Other::generator(); + // let g = Other::generator().into_group(); let acc0 = { - let t = Other::new(endo_q * base.x, base.y, false); + let t = Other::new_unchecked(endo_q * base.x, base.y); let p = t + base; let acc = p + p; (acc.x, acc.y) @@ -87,27 +88,24 @@ fn endomul_test() { ); let expected = { - let t = Other::prime_subgroup_generator(); - let mut acc = Other::new(acc0.0, acc0.1, false); + let t = Other::generator(); + let mut acc = Other::new_unchecked(acc0.0, acc0.1).into_group(); for i in (0..(num_bits / 2)).rev() { let b2i = F::from(bits_lsb[2 * i] as u64); let b2i1 = F::from(bits_lsb[2 * i + 1] as u64); let xq = (F::one() + ((endo_q - F::one()) * b2i1)) * t.x; let yq = (b2i.double() - F::one()) * t.y; - acc = acc + (acc + Other::new(xq, yq, false)); + acc = acc + (acc + Other::new_unchecked(xq, yq)); } - acc + acc.into_affine() }; assert_eq!( expected, - Other::prime_subgroup_generator() - .into_projective() - .mul(x_scalar.into_repr()) - .into_affine() + Other::generator().into_group().mul(x_scalar).into_affine() ); assert_eq!((expected.x, expected.y), res.acc); - assert_eq!(x.into_repr(), res.n.into_repr()); + assert_eq!(x.into_bigint(), res.n.into_bigint()); } TestFramework::::default() diff --git a/kimchi/src/tests/endomul_scalar.rs b/kimchi/src/tests/endomul_scalar.rs index f39c0bf236..886a7daefa 100644 --- a/kimchi/src/tests/endomul_scalar.rs +++ b/kimchi/src/tests/endomul_scalar.rs @@ -52,10 +52,10 @@ fn endomul_scalar_test() { //let start = Instant::now(); for _ in 0..num_scalars { let x = { - let bits_lsb: Vec<_> = BitIteratorLE::new(F::rand(rng).into_repr()) + let bits_lsb: Vec<_> = BitIteratorLE::new(F::rand(rng).into_bigint()) .take(num_bits) .collect(); - F::from_repr(::BigInt::from_bits_le(&bits_lsb[..])).unwrap() + F::from_bigint(::BigInt::from_bits_le(&bits_lsb[..])).unwrap() }; assert_eq!( diff --git a/kimchi/src/tests/foreign_field_add.rs b/kimchi/src/tests/foreign_field_add.rs index 760c7fa2d5..8aea3daf13 100644 --- a/kimchi/src/tests/foreign_field_add.rs +++ b/kimchi/src/tests/foreign_field_add.rs @@ -13,8 +13,8 @@ use crate::circuits::{ }; use crate::curve::KimchiCurve; use crate::prover_index::ProverIndex; -use ark_ec::AffineCurve; -use ark_ff::{One, PrimeField, SquareRootField, Zero}; +use ark_ec::AffineRepr; +use ark_ff::{One, PrimeField, Zero}; use ark_poly::EvaluationDomain; use mina_curves::pasta::{Fp, Pallas, Vesta, VestaParameters}; use mina_poseidon::{ @@ -34,8 +34,8 @@ use poly_commitment::{ use rand::{rngs::StdRng, Rng, SeedableRng}; use std::array; use std::sync::Arc; -type PallasField = ::BaseField; -type VestaField = ::BaseField; +type PallasField = ::BaseField; +type VestaField = ::BaseField; type SpongeParams = PlonkSpongeConstantsKimchi; @@ -150,7 +150,7 @@ static NULL_CARRY_BOTH: &[u8] = &[ 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x03, 0xD2, ]; -impl CircuitGate { +impl CircuitGate { /// Check if a given circuit gate is a given foreign field operation pub fn check_ffadd_sign(&self, sign: FFOps) -> Result<(), String> { if self.typ != GateType::ForeignFieldAdd { @@ -179,7 +179,7 @@ impl CircuitGate { // Outputs tuple (next_row, circuit_gates) where // next_row - next row after this gate // circuit_gates - vector of circuit gates comprising this gate -fn short_circuit( +fn short_circuit( opcodes: &[FFOps], foreign_field_modulus: &BigUint, ) -> (usize, Vec>) { @@ -212,7 +212,7 @@ fn short_circuit( // Outputs tuple (next_row, circuit_gates) where // next_row - next row after this gate // circuit_gates - vector of circuit gates comprising this gate -fn full_circuit( +fn full_circuit( opcodes: &[FFOps], foreign_field_modulus: &BigUint, ) -> (usize, Vec>) { diff --git a/kimchi/src/tests/foreign_field_mul.rs b/kimchi/src/tests/foreign_field_mul.rs index 95272e594c..951fbf4953 100644 --- a/kimchi/src/tests/foreign_field_mul.rs +++ b/kimchi/src/tests/foreign_field_mul.rs @@ -9,7 +9,7 @@ use crate::{ plonk_sponge::FrSponge, tests::framework::TestFramework, }; -use ark_ec::AffineCurve; +use ark_ec::AffineRepr; use ark_ff::{Field, PrimeField, Zero}; use mina_curves::pasta::{Fp, Fq, Pallas, PallasParameters, Vesta, VestaParameters}; use num_bigint::BigUint; @@ -27,8 +27,8 @@ use mina_poseidon::{ use num_bigint::RandBigInt; use rand::{rngs::StdRng, SeedableRng}; -type PallasField = ::BaseField; -type VestaField = ::BaseField; +type PallasField = ::BaseField; +type VestaField = ::BaseField; type SpongeParams = PlonkSpongeConstantsKimchi; type VestaBaseSponge = DefaultFqSponge; diff --git a/kimchi/src/tests/generic.rs b/kimchi/src/tests/generic.rs index f9efc83341..2ff3b72da5 100644 --- a/kimchi/src/tests/generic.rs +++ b/kimchi/src/tests/generic.rs @@ -92,7 +92,7 @@ fn test_generic_gate_pub_empty() { fn test_generic_gate_pairing() { type Fp = ark_bn254::Fr; type SpongeParams = PlonkSpongeConstantsKimchi; - type BaseSponge = DefaultFqSponge; + type BaseSponge = DefaultFqSponge; type ScalarSponge = DefaultFrSponge; use ark_ff::UniformRand; @@ -110,7 +110,7 @@ fn test_generic_gate_pairing() { // create and verify proof based on the witness >, + poly_commitment::pairing_proof::PairingProof>, > as Default>::default() .gates(gates) .witness(witness) diff --git a/kimchi/src/tests/keccak.rs b/kimchi/src/tests/keccak.rs index b39ecc4f84..a29e13c40b 100644 --- a/kimchi/src/tests/keccak.rs +++ b/kimchi/src/tests/keccak.rs @@ -6,12 +6,12 @@ use crate::circuits::{ polynomials::keccak::{self, ROT_TAB}, wires::Wire, }; -use ark_ec::AffineCurve; +use ark_ec::AffineRepr; use mina_curves::pasta::{Fp, Pallas, Vesta}; use rand::Rng; //use super::framework::TestFramework; -type PallasField = ::BaseField; +type PallasField = ::BaseField; fn create_test_constraint_system() -> ConstraintSystem { let (mut next_row, mut gates) = { CircuitGate::::create_keccak(0) }; diff --git a/kimchi/src/tests/not.rs b/kimchi/src/tests/not.rs index 42cd0705f7..794a41bb22 100644 --- a/kimchi/src/tests/not.rs +++ b/kimchi/src/tests/not.rs @@ -14,7 +14,7 @@ use crate::{ }; use super::framework::TestFramework; -use ark_ec::AffineCurve; +use ark_ec::AffineRepr; use ark_ff::{Field, One, PrimeField, Zero}; use mina_curves::pasta::{Fp, Pallas, Vesta, VestaParameters}; use mina_poseidon::{ @@ -26,8 +26,8 @@ use o1_utils::{BigUintHelpers, BitwiseOps, FieldHelpers, RandomField}; use poly_commitment::evaluation_proof::OpeningProof; use rand::{rngs::StdRng, SeedableRng}; -type PallasField = ::BaseField; -type VestaField = ::BaseField; +type PallasField = ::BaseField; +type VestaField = ::BaseField; type SpongeParams = PlonkSpongeConstantsKimchi; type VestaBaseSponge = DefaultFqSponge; type VestaScalarSponge = DefaultFrSponge; diff --git a/kimchi/src/tests/range_check.rs b/kimchi/src/tests/range_check.rs index 8e46962add..6a93128883 100644 --- a/kimchi/src/tests/range_check.rs +++ b/kimchi/src/tests/range_check.rs @@ -13,7 +13,7 @@ use crate::{ prover_index::testing::new_index_for_test_with_lookups, }; -use ark_ec::AffineCurve; +use ark_ec::AffineRepr; use ark_ff::{Field, One, Zero}; use ark_poly::EvaluationDomain; use mina_curves::pasta::{Fp, Pallas, Vesta, VestaParameters}; @@ -47,7 +47,7 @@ use super::framework::TestFramework; type BaseSponge = DefaultFqSponge; type ScalarSponge = DefaultFrSponge; -type PallasField = ::BaseField; +type PallasField = ::BaseField; const RNG_SEED: [u8; 32] = [ 22, 4, 34, 75, 29, 255, 0, 126, 237, 19, 86, 160, 1, 90, 131, 221, 186, 168, 40, 59, 0, 4, 9, diff --git a/kimchi/src/tests/recursion.rs b/kimchi/src/tests/recursion.rs index 719318eb96..8a812c330f 100644 --- a/kimchi/src/tests/recursion.rs +++ b/kimchi/src/tests/recursion.rs @@ -4,7 +4,7 @@ use crate::circuits::wires::COLUMNS; use crate::proof::RecursionChallenge; use ark_ff::{UniformRand, Zero}; use ark_poly::univariate::DensePolynomial; -use ark_poly::UVPolynomial; +use ark_poly::DenseUVPolynomial; use mina_curves::pasta::{Fp, Vesta, VestaParameters}; use mina_poseidon::{ constants::PlonkSpongeConstantsKimchi, diff --git a/kimchi/src/tests/rot.rs b/kimchi/src/tests/rot.rs index f9a1308b86..fc5be93697 100644 --- a/kimchi/src/tests/rot.rs +++ b/kimchi/src/tests/rot.rs @@ -16,7 +16,7 @@ use crate::{ plonk_sponge::FrSponge, prover_index::ProverIndex, }; -use ark_ec::AffineCurve; +use ark_ec::AffineRepr; use ark_ff::{One, PrimeField, Zero}; use ark_poly::EvaluationDomain; use mina_curves::pasta::{Fp, Fq, Pallas, PallasParameters, Vesta, VestaParameters}; @@ -32,7 +32,7 @@ use poly_commitment::{ }; use rand::{rngs::StdRng, Rng, SeedableRng}; -type PallasField = ::BaseField; +type PallasField = ::BaseField; type SpongeParams = PlonkSpongeConstantsKimchi; type VestaBaseSponge = DefaultFqSponge; type VestaScalarSponge = DefaultFrSponge; diff --git a/kimchi/src/tests/serde.rs b/kimchi/src/tests/serde.rs index 43883066d9..7b3eeefb37 100644 --- a/kimchi/src/tests/serde.rs +++ b/kimchi/src/tests/serde.rs @@ -9,7 +9,7 @@ use crate::{ verifier::verify, verifier_index::VerifierIndex, }; -use ark_ec::short_weierstrass_jacobian::GroupAffine; +use ark_ec::short_weierstrass::Affine; use ark_ff::Zero; use groupmap::GroupMap; use mina_curves::pasta::{Fp, Vesta, VestaParameters}; @@ -73,11 +73,11 @@ mod tests { .unwrap(); // deserialize the verifier index - let mut verifier_index_deserialize: VerifierIndex, _> = + let mut verifier_index_deserialize: VerifierIndex, _> = serde_json::from_str(&verifier_index_serialize).unwrap(); // add srs with lagrange bases - let mut srs = SRS::>::create(verifier_index.max_poly_size); + let mut srs = SRS::>::create(verifier_index.max_poly_size); srs.add_lagrange_basis(verifier_index.domain); verifier_index_deserialize.powers_of_alpha = index.powers_of_alpha; verifier_index_deserialize.linearization = index.linearization; diff --git a/kimchi/src/tests/varbasemul.rs b/kimchi/src/tests/varbasemul.rs index 91eed12b78..79fa241acc 100644 --- a/kimchi/src/tests/varbasemul.rs +++ b/kimchi/src/tests/varbasemul.rs @@ -4,7 +4,7 @@ use crate::circuits::{ wires::*, }; use crate::tests::framework::TestFramework; -use ark_ec::{AffineCurve, ProjectiveCurve}; +use ark_ec::{AffineRepr, CurveGroup}; use ark_ff::{BigInteger, BitIteratorLE, Field, One, PrimeField, UniformRand, Zero}; use colored::Colorize; use mina_curves::pasta::{Fp as F, Pallas as Other, Vesta, VestaParameters}; @@ -14,6 +14,7 @@ use mina_poseidon::{ }; use rand::{rngs::StdRng, SeedableRng}; use std::array; +use std::ops::Mul; use std::time::Instant; type SpongeParams = PlonkSpongeConstantsKimchi; @@ -22,7 +23,7 @@ type ScalarSponge = DefaultFrSponge; #[test] fn varbase_mul_test() { - let num_bits = F::size_in_bits(); + let num_bits = F::MODULUS_BIT_SIZE as usize; let chunks = num_bits / 5; let num_scalars = 10; @@ -54,14 +55,14 @@ fn varbase_mul_test() { let start = Instant::now(); for i in 0..num_scalars { let x = F::rand(rng); - let bits_lsb: Vec<_> = BitIteratorLE::new(x.into_repr()).take(num_bits).collect(); - let x_ = ::ScalarField::from_repr( + let bits_lsb: Vec<_> = BitIteratorLE::new(x.into_bigint()).take(num_bits).collect(); + let x_ = ::ScalarField::from_bigint( ::BigInt::from_bits_le(&bits_lsb[..]), ) .unwrap(); - let base = Other::prime_subgroup_generator(); - let g = Other::prime_subgroup_generator().into_projective(); + let base = Other::generator(); + let g = Other::generator().into_group(); let acc = (g + g).into_affine(); let acc = (acc.x, acc.y); @@ -75,12 +76,12 @@ fn varbase_mul_test() { acc, ); - let shift = ::ScalarField::from(2).pow([(bits_msb.len()) as u64]); + let shift = ::ScalarField::from(2).pow([(bits_msb.len()) as u64]); let expected = g - .mul((::ScalarField::one() + shift + x_.double()).into_repr()) + .mul(&(::ScalarField::one() + shift + x_.double())) .into_affine(); - assert_eq!(x_.into_repr(), res.n.into_repr()); + assert_eq!(x_.into_bigint(), res.n.into_bigint()); assert_eq!((expected.x, expected.y), res.acc); } println!( diff --git a/kimchi/src/tests/xor.rs b/kimchi/src/tests/xor.rs index 7ab28b4008..e6b271dede 100644 --- a/kimchi/src/tests/xor.rs +++ b/kimchi/src/tests/xor.rs @@ -11,7 +11,7 @@ use crate::{ curve::KimchiCurve, prover_index::ProverIndex, }; -use ark_ec::AffineCurve; +use ark_ec::AffineRepr; use ark_ff::{Field, One, PrimeField, Zero}; use ark_poly::EvaluationDomain; use mina_curves::pasta::{Fp, Pallas, Vesta, VestaParameters}; @@ -29,7 +29,7 @@ use rand::{rngs::StdRng, SeedableRng}; use super::framework::TestFramework; -type PallasField = ::BaseField; +type PallasField = ::BaseField; type SpongeParams = PlonkSpongeConstantsKimchi; type VestaBaseSponge = DefaultFqSponge; type VestaScalarSponge = DefaultFrSponge; diff --git a/kimchi/src/verifier.rs b/kimchi/src/verifier.rs index 62d9e5d43f..30d2c77f80 100644 --- a/kimchi/src/verifier.rs +++ b/kimchi/src/verifier.rs @@ -18,7 +18,7 @@ use crate::{ proof::{PointEvaluations, ProofEvaluations, ProverProof, RecursionChallenge}, verifier_index::VerifierIndex, }; -use ark_ec::AffineCurve; +use ark_ec::AffineRepr; use ark_ff::{Field, One, PrimeField, Zero}; use ark_poly::{univariate::DensePolynomial, EvaluationDomain, Polynomial}; use mina_poseidon::{sponge::ScalarChallenge, FqSponge}; @@ -747,7 +747,7 @@ where fn to_batch<'a, G, EFqSponge, EFrSponge, OpeningProof: OpenProof>( verifier_index: &VerifierIndex, proof: &'a ProverProof, - public_input: &'a [::ScalarField], + public_input: &'a [::ScalarField], ) -> Result> where G: KimchiCurve, diff --git a/poly-commitment/src/commitment.rs b/poly-commitment/src/commitment.rs index 35459e4f5d..97f8dde32d 100644 --- a/poly-commitment/src/commitment.rs +++ b/poly-commitment/src/commitment.rs @@ -299,7 +299,7 @@ pub fn absorb_commitment`, /// so usage of this traits must manually bind `G::BaseField: PrimeField`. -pub trait CommitmentCurve: AffineRepr { +pub trait CommitmentCurve: AffineRepr + Sub { type Params: SWCurveConfig; type Map: GroupMap; diff --git a/proof-systems-vendors b/proof-systems-vendors index dfed44c3cb..782304f933 160000 --- a/proof-systems-vendors +++ b/proof-systems-vendors @@ -1 +1 @@ -Subproject commit dfed44c3cb43543b8166fc2f16dac5bd091e971b +Subproject commit 782304f9337249282065e2fc96ef1d8657e93e52 diff --git a/signer/src/schnorr.rs b/signer/src/schnorr.rs index 1774d89918..ed780121ac 100644 --- a/signer/src/schnorr.rs +++ b/signer/src/schnorr.rs @@ -5,7 +5,7 @@ //! Details: use ark_ec::{ - AffineRepr, // for prime_subgroup_generator() + AffineRepr, // for generator() CurveGroup, }; use ark_ff::{ diff --git a/tools/kimchi-visu/Cargo.toml b/tools/kimchi-visu/Cargo.toml index bc4b3ac227..89c5ec76ed 100644 --- a/tools/kimchi-visu/Cargo.toml +++ b/tools/kimchi-visu/Cargo.toml @@ -13,8 +13,8 @@ license = "Apache-2.0" path = "src/lib.rs" [dependencies] -ark-ec = "0.3.0" -ark-ff = "0.3.0" +ark-ec = "0.4.2" +ark-ff = "0.4.2" serde = { version = "1.0.136", features = ["derive"] } serde_json = "1.0.79" serde_with = "1.10.0" From dbf1a8e75cdb3317530228afa37a632d91fcf5bc Mon Sep 17 00:00:00 2001 From: mrmr1993 Date: Tue, 26 Dec 2023 22:16:56 +0000 Subject: [PATCH 082/178] Fixup compilation errors in OCaml conversion helpers --- poly-commitment/src/commitment.rs | 16 ++++++---------- 1 file changed, 6 insertions(+), 10 deletions(-) diff --git a/poly-commitment/src/commitment.rs b/poly-commitment/src/commitment.rs index 97f8dde32d..cc2b3b07ab 100644 --- a/poly-commitment/src/commitment.rs +++ b/poly-commitment/src/commitment.rs @@ -1039,11 +1039,7 @@ pub mod caml { { fn from(polycomm: PolyComm) -> Self { Self { - unshifted: polycomm - .elems - .into_iter() - .map(Into::::into) - .collect(), + unshifted: polycomm.elems.into_iter().map(CamlG::from).collect(), shifted: None, } } @@ -1120,12 +1116,12 @@ pub mod caml { lr: opening_proof .lr .into_iter() - .map(|(g1, g2)| (From::from(g1), From::from(g2))) + .map(|(g1, g2)| (CamlG::from(g1), CamlG::from(g2))) .collect(), - delta: From::from(opening_proof.delta), - z1: From::from(opening_proof.z1), - z2: From::from(opening_proof.z2), - sg: From::from(opening_proof.sg), + delta: CamlG::from(opening_proof.delta), + z1: opening_proof.z1.into(), + z2: opening_proof.z2.into(), + sg: CamlG::from(opening_proof.sg), } } } From d8baa81ff4f55a6f8c4628a0f9022bb8afe67ce9 Mon Sep 17 00:00:00 2001 From: Mikhail Volkhov Date: Tue, 20 Aug 2024 13:34:10 +0200 Subject: [PATCH 083/178] Adjust serde_as regression test to 0.4.2 --- utils/src/serialization.rs | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/utils/src/serialization.rs b/utils/src/serialization.rs index 352a1484d2..09dddcc92a 100644 --- a/utils/src/serialization.rs +++ b/utils/src/serialization.rs @@ -91,9 +91,10 @@ where #[cfg(test)] mod tests { - use ark_ec::AffineCurve; + use ark_ec::short_weierstrass::SWCurveConfig; use ark_serialize::Write; use mina_curves::pasta::{Pallas, Vesta}; + use mina_curves::pasta::{PallasParameters, VestaParameters}; use serde::{Deserialize, Serialize}; use serde_with::serde_as; use std::io::BufReader; @@ -110,8 +111,8 @@ mod tests { } let data_expected = TestStruct { - pallas: Pallas::prime_subgroup_generator(), - vesta: Vesta::prime_subgroup_generator(), + pallas: PallasParameters::GENERATOR, + vesta: VestaParameters::GENERATOR, }; // reference serialized value From 2f8b4f5e9e4342a2e781b857d57d232533aecda2 Mon Sep 17 00:00:00 2001 From: Mikhail Volkhov Date: Tue, 20 Aug 2024 14:15:48 +0200 Subject: [PATCH 084/178] Use compressed serialization --- utils/src/serialization.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/utils/src/serialization.rs b/utils/src/serialization.rs index 09dddcc92a..82ca15c8da 100644 --- a/utils/src/serialization.rs +++ b/utils/src/serialization.rs @@ -23,7 +23,7 @@ pub mod ser { S: serde::Serializer, { let mut bytes = vec![]; - val.serialize_uncompressed(&mut bytes) + val.serialize_compressed(&mut bytes) .map_err(serde::ser::Error::custom)?; Bytes::serialize_as(&bytes, serializer) @@ -37,7 +37,7 @@ pub mod ser { D: serde::Deserializer<'de>, { let bytes: Vec = Bytes::deserialize_as(deserializer)?; - T::deserialize_uncompressed(&mut &bytes[..]).map_err(serde::de::Error::custom) + T::deserialize_compressed(&mut &bytes[..]).map_err(serde::de::Error::custom) } } @@ -60,7 +60,7 @@ where S: serde::Serializer, { let mut bytes = vec![]; - val.serialize_uncompressed(&mut bytes) + val.serialize_compressed(&mut bytes) .map_err(serde::ser::Error::custom)?; if serializer.is_human_readable() { @@ -84,7 +84,7 @@ where } else { Bytes::deserialize_as(deserializer)? }; - T::deserialize_uncompressed(&mut &bytes[..]).map_err(serde::de::Error::custom) + T::deserialize_compressed(&mut &bytes[..]).map_err(serde::de::Error::custom) } } From 18233d05f208db69a6549c173ccc430398e56558 Mon Sep 17 00:00:00 2001 From: Mikhail Volkhov Date: Tue, 20 Aug 2024 17:18:42 +0200 Subject: [PATCH 085/178] Fix erroneous implicit Affine->Proj conversions --- kimchi/src/tests/ec.rs | 12 +++++++----- kimchi/src/tests/endomul.rs | 2 +- 2 files changed, 8 insertions(+), 6 deletions(-) diff --git a/kimchi/src/tests/ec.rs b/kimchi/src/tests/ec.rs index 2a3daee005..d669c6486f 100644 --- a/kimchi/src/tests/ec.rs +++ b/kimchi/src/tests/ec.rs @@ -40,7 +40,7 @@ fn ec_test() { let rng = &mut StdRng::from_seed([0; 32]); - let ps = { + let ps: Vec = { let p = Other::generator() .into_group() .mul(::ScalarField::rand(rng)); @@ -53,7 +53,7 @@ fn ec_test() { ::Group::normalize_batch(&res) }; - let qs = { + let qs: Vec = { let q = Other::generator() .into_group() .mul(::ScalarField::rand(rng)); @@ -67,7 +67,7 @@ fn ec_test() { }; for &p in ps.iter().take(num_doubles) { - let p2 = p + p; + let p2: Other = (p + p).into(); let (x1, y1) = (p.x, p.y); let x1_squared = x1.square(); // 2 * s * y1 = 3 * x1^2 @@ -95,11 +95,12 @@ fn ec_test() { let p = ps[i]; let q = qs[i]; - let pq = p + q; + let pq: Other = (p + q).into(); let (x1, y1) = (p.x, p.y); let (x2, y2) = (q.x, q.y); // (x2 - x1) * s = y2 - y1 let s = (y2 - y1) / (x2 - x1); + witness[0].push(x1); witness[1].push(y1); witness[2].push(x2); @@ -121,11 +122,12 @@ fn ec_test() { for &p in ps.iter().take(num_infs) { let q = -p; - let p2 = p + p; + let p2: Other = (p + p).into(); let (x1, y1) = (p.x, p.y); let x1_squared = x1.square(); // 2 * s * y1 = -3 * x1^2 let s = (x1_squared.double() + x1_squared) / y1.double(); + witness[0].push(p.x); witness[1].push(p.y); witness[2].push(q.x); diff --git a/kimchi/src/tests/endomul.rs b/kimchi/src/tests/endomul.rs index 4682848627..e34ef14452 100644 --- a/kimchi/src/tests/endomul.rs +++ b/kimchi/src/tests/endomul.rs @@ -72,7 +72,7 @@ fn endomul_test() { let acc0 = { let t = Other::new_unchecked(endo_q * base.x, base.y); let p = t + base; - let acc = p + p; + let acc: Other = (p + p).into(); (acc.x, acc.y) }; From 2c77135be319410a7ce63852f73d3e0f17fe0c97 Mon Sep 17 00:00:00 2001 From: Mikhail Volkhov Date: Tue, 20 Aug 2024 17:47:23 +0200 Subject: [PATCH 086/178] Fix from_address bug --- signer/src/pubkey.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/signer/src/pubkey.rs b/signer/src/pubkey.rs index e2cfe9277e..d02f67b09f 100644 --- a/signer/src/pubkey.rs +++ b/signer/src/pubkey.rs @@ -166,7 +166,7 @@ impl PubKey { let mut pt = CurvePoint::get_point_from_x_unchecked(x, y_parity).ok_or(PubKeyError::XCoordinate)?; - if pt.y.0.is_even() == y_parity { + if pt.y.into_bigint().is_even() == y_parity { pt.y = pt.y.neg(); } @@ -200,7 +200,7 @@ impl PubKey { /// Serialize public key into corresponding Mina address pub fn into_address(&self) -> String { let point = self.point(); - into_address(&point.x, point.y.0.is_odd()) + into_address(&point.x, point.y.into_bigint().is_odd()) } /// Deserialize public key into bytes From 6866340531e593516a9b88b03bc10185d0000560 Mon Sep 17 00:00:00 2001 From: Mikhail Volkhov Date: Mon, 26 Aug 2024 15:22:56 +0200 Subject: [PATCH 087/178] Fix ocaml printing: use hex instead of integer --- kimchi/src/circuits/expr.rs | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/kimchi/src/circuits/expr.rs b/kimchi/src/circuits/expr.rs index 4f8d11267b..14d3b33b37 100644 --- a/kimchi/src/circuits/expr.rs +++ b/kimchi/src/circuits/expr.rs @@ -2428,7 +2428,10 @@ where JointCombiner => "joint_combiner".to_string(), EndoCoefficient => "endo_coefficient".to_string(), Mds { row, col } => format!("mds({row}, {col})"), - Literal(x) => format!("field(\"0x{}\")", x.into_bigint()), + Literal(x) => format!( + "field(\"{:#066X}\")", + Into::::into(x.into_bigint()) + ), Pow(x, n) => match x.as_ref() { Alpha => format!("alpha_pow({n})"), x => format!("pow({}, {n})", x.ocaml()), From 31a21e71b4b1dbe5933d054a7254b6d78a1dad0b Mon Sep 17 00:00:00 2001 From: ember arlynx Date: Wed, 31 Jan 2024 21:16:10 -0500 Subject: [PATCH 088/178] Use workspace deps (develop/compatible) --- Cargo.lock | 1236 +++++++++++++++++++--------------- Cargo.toml | 71 ++ curves/Cargo.toml | 14 +- groupmap/Cargo.toml | 8 +- hasher/Cargo.toml | 14 +- internal-tracing/Cargo.toml | 8 +- kimchi/Cargo.toml | 98 +-- poly-commitment/Cargo.toml | 48 +- poseidon/Cargo.toml | 31 +- signer/Cargo.toml | 25 +- tools/kimchi-visu/Cargo.toml | 22 +- turshi/Cargo.toml | 10 +- utils/Cargo.toml | 43 +- 13 files changed, 924 insertions(+), 704 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 1a1ab03646..00f8cfdd84 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4,9 +4,9 @@ version = 3 [[package]] name = "addr2line" -version = "0.20.0" +version = "0.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4fa78e18c64fce05e902adecd7a5eed15a5e0a3439f7b0e169f0252214865e3" +checksum = "8a30b2e23b9e17a9f90641c7ab1549cd9b44f296d3ccbf309d2863cfe398a0cb" dependencies = [ "gimli", ] @@ -19,35 +19,46 @@ checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" [[package]] name = "ahash" -version = "0.7.6" +version = "0.8.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fcb51a0695d8f838b1ee009b3fbf66bda078cd64590202a864a8f3e8c4315c47" +checksum = "77c3a9648d43b9cd48db467b3f87fdd6e146bcc88ab0180006cef2179fe11d01" dependencies = [ - "getrandom", + "cfg-if 1.0.0", "once_cell", "version_check", + "zerocopy", ] [[package]] -name = "ahash" -version = "0.8.3" +name = "aho-corasick" +version = "1.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c99f64d1e06488f620f932677e24bc6e2897582980441ae90a671415bd7ec2f" +checksum = "b2969dcb958b36655471fc61f7e416fa76033bdd4bfed0678d8fee1e2d07a1f0" dependencies = [ - "cfg-if 1.0.0", - "once_cell", - "version_check", + "memchr", ] [[package]] -name = "aho-corasick" -version = "1.0.2" +name = "android-tzdata" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e999941b234f3131b00bc13c22d06e8c5ff726d1b6318ac7eb276997bbb4fef0" + +[[package]] +name = "android_system_properties" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43f6cb1bf222025340178f382c426f13757b2960e89779dfcb319c32542a5a41" +checksum = "819e7219dbd41043ac279b19830f2efc897156490d7fd6ea916720117ee66311" dependencies = [ - "memchr", + "libc", ] +[[package]] +name = "anes" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4b46cbb362ab8752921c97e041f5e366ee6297bd428a31275b9fcf1e380f7299" + [[package]] name = "ansi_term" version = "0.12.1" @@ -57,6 +68,12 @@ dependencies = [ "winapi 0.3.9", ] +[[package]] +name = "anstyle" +version = "1.0.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2faccea4cc4ab4a667ce676a30e8ec13922a692c99bb8f5b11f1502c72e04220" + [[package]] name = "ark-algebra-test-templates" version = "0.4.2" @@ -100,7 +117,7 @@ dependencies = [ "ark-std", "derivative", "hashbrown 0.13.2", - "itertools", + "itertools 0.10.5", "num-traits", "rayon", "zeroize", @@ -118,7 +135,7 @@ dependencies = [ "ark-std", "derivative", "digest", - "itertools", + "itertools 0.10.5", "num-bigint", "num-traits", "paste", @@ -133,7 +150,7 @@ version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3ed4aa4fe255d0bc6d79373f7e31d2ea147bcf486cba1be5ba7ea85abdb92348" dependencies = [ - "quote 1.0.29", + "quote", "syn 1.0.109", ] @@ -145,8 +162,8 @@ checksum = "7abe79b0e4288889c4574159ab790824d0033b9fdcb2a112a3182fac2e514565" dependencies = [ "num-bigint", "num-traits", - "proc-macro2 1.0.64", - "quote 1.0.29", + "proc-macro2", + "quote", "syn 1.0.109", ] @@ -182,8 +199,8 @@ version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ae3281bc6d0fd7e549af32b52511e1302185bd688fd3359fa36423346ff682ea" dependencies = [ - "proc-macro2 1.0.64", - "quote 1.0.29", + "proc-macro2", + "quote", "syn 1.0.109", ] @@ -227,7 +244,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "87bf87e6e8b47264efa9bde63d6225c6276a52e05e91bf37eaa8afd0032d6b71" dependencies = [ "askama_shared", - "proc-macro2 1.0.64", + "proc-macro2", "syn 1.0.109", ] @@ -250,8 +267,8 @@ dependencies = [ "nom", "num-traits", "percent-encoding", - "proc-macro2 1.0.64", - "quote 1.0.29", + "proc-macro2", + "quote", "serde", "syn 1.0.109", "toml", @@ -276,9 +293,9 @@ checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" [[package]] name = "backtrace" -version = "0.3.68" +version = "0.3.69" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4319208da049c43661739c5fade2ba182f09d1dc2299b32298d3a31692b17e12" +checksum = "2089b7e3f35b9dd2d0ed921ead4f6d318c27680d4a5bd167b3ee120edb105837" dependencies = [ "addr2line", "cc", @@ -291,15 +308,15 @@ dependencies = [ [[package]] name = "base64" -version = "0.21.2" +version = "0.21.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "604178f6c5c21f02dc555784810edfb88d34ac2c73b2eae109655649ee73ce3d" +checksum = "9d297deb1925b89f2ccc13d7635fa0714f12c87adce1c75356b39ca9b7178567" [[package]] name = "bcs" -version = "0.1.5" +version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4bd3ffe8b19a604421a5d461d4a70346223e535903fbc3067138bddbebddcf77" +checksum = "85b6598a2f5d564fb7855dc6b06fd1c38cff5a72bd8b863a4d021938497b440a" dependencies = [ "serde", "thiserror", @@ -337,9 +354,9 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "bitflags" -version = "2.3.3" +version = "2.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "630be753d4e58660abd17930c71b647fe46c27ea6b63cc59e1e3851406972e42" +checksum = "ed570934406eb16438a4e976b1b4500774099c13b8cb96eec99f620f05090ddf" [[package]] name = "bitvec" @@ -373,21 +390,24 @@ dependencies = [ [[package]] name = "bs58" -version = "0.4.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "771fe0050b883fcc3ea2359b1a96bcfbc090b7116eae7c3c512c7a083fdf23d3" +checksum = "f5353f36341f7451062466f0b755b96ac3a9547e4d7f6b70d603fc721a7d7896" +dependencies = [ + "tinyvec", +] [[package]] name = "bumpalo" -version = "3.13.0" +version = "3.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a3e2c3daef883ecc1b5d58c15adae93470a91d425f3532ba1695849656af3fc1" +checksum = "7f30e7476521f6f8af1a1c4c0b8cc94f0bee37d91763d0ca2665f299b6cd8aec" [[package]] name = "byteorder" -version = "1.4.3" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610" +checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" [[package]] name = "cargo-spec" @@ -414,9 +434,12 @@ checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5" [[package]] name = "cc" -version = "1.0.79" +version = "1.0.83" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50d30906286121d95be3d479533b458f87493b30a4b5f79a607db8f5d11aa91f" +checksum = "f1174fb0b6ec23863f8b971027804a42614e347eafb0a95bf0b12cdae21fc4d0" +dependencies = [ + "libc", +] [[package]] name = "cfg-if" @@ -430,6 +453,46 @@ version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" +[[package]] +name = "chrono" +version = "0.4.33" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9f13690e35a5e4ace198e7beea2895d29f3a9cc55015fcebe6336bd2010af9eb" +dependencies = [ + "android-tzdata", + "iana-time-zone", + "num-traits", + "serde", + "windows-targets 0.52.0", +] + +[[package]] +name = "ciborium" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42e69ffd6f0917f5c029256a24d0161db17cea3997d185db0d35926308770f0e" +dependencies = [ + "ciborium-io", + "ciborium-ll", + "serde", +] + +[[package]] +name = "ciborium-io" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05afea1e0a06c9be33d539b876f1ce3692f4afea2cb41f740e7743225ed1c757" + +[[package]] +name = "ciborium-ll" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "57663b653d948a338bfb3eeba9bb2fd5fcfaecb9e199e87e1eda4d9e8b240fd9" +dependencies = [ + "ciborium-io", + "half", +] + [[package]] name = "clap" version = "2.34.0" @@ -454,14 +517,33 @@ dependencies = [ "atty", "bitflags 1.3.2", "clap_derive", - "clap_lex", - "indexmap", + "clap_lex 0.2.4", + "indexmap 1.9.3", "once_cell", "strsim 0.10.0", "termcolor", "textwrap 0.16.0", ] +[[package]] +name = "clap" +version = "4.4.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e578d6ec4194633722ccf9544794b71b1385c3c027efe0c55db226fc880865c" +dependencies = [ + "clap_builder", +] + +[[package]] +name = "clap_builder" +version = "4.4.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4df4df40ec50c46000231c914968278b1eb05098cf8f1b3a518a95030e71d1c7" +dependencies = [ + "anstyle", + "clap_lex 0.6.0", +] + [[package]] name = "clap_derive" version = "3.2.25" @@ -470,8 +552,8 @@ checksum = "ae6371b8bdc8b7d3959e9cf7b22d4435ef3e79e138688421ec654acf8c81b008" dependencies = [ "heck", "proc-macro-error", - "proc-macro2 1.0.64", - "quote 1.0.29", + "proc-macro2", + "quote", "syn 1.0.109", ] @@ -484,15 +566,20 @@ dependencies = [ "os_str_bytes", ] +[[package]] +name = "clap_lex" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "702fc72eb24e5a1e48ce58027a675bc24edd52096d5397d4aea7c6dd9eca0bd1" + [[package]] name = "colored" -version = "2.0.4" +version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2674ec482fbc38012cf31e6c42ba0177b431a0cb6f15fe40efa5aab1bda516f6" +checksum = "cbf2150cce219b664a8a70df7a1f933836724b503f8a413af9365b4dcc4d90b8" dependencies = [ - "is-terminal", "lazy_static", - "windows-sys", + "windows-sys 0.48.0", ] [[package]] @@ -517,23 +604,21 @@ dependencies = [ [[package]] name = "const-random" -version = "0.1.15" +version = "0.1.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "368a7a772ead6ce7e1de82bfb04c485f3db8ec744f72925af5735e29a22cc18e" +checksum = "5aaf16c9c2c612020bcfd042e170f6e32de9b9d75adb5277cdbbd2e2c8c8299a" dependencies = [ "const-random-macro", - "proc-macro-hack", ] [[package]] name = "const-random-macro" -version = "0.1.15" +version = "0.1.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d7d6ab3c3a2282db210df5f02c4dab6e0a7057af0fb7ebd4070f30fe05c0ddb" +checksum = "f9d839f2a20b0aee515dc581a6172f2321f96cab76c1a38a4c584a194955390e" dependencies = [ "getrandom", "once_cell", - "proc-macro-hack", "tiny-keccak", ] @@ -546,11 +631,17 @@ dependencies = [ "unicode-segmentation", ] +[[package]] +name = "core-foundation-sys" +version = "0.8.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "06ea2b9bc92be3c2baa9334a323ebca2d6f074ff852cd1d7b11064035cd3868f" + [[package]] name = "cpufeatures" -version = "0.2.9" +version = "0.2.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a17b76ff3a4162b0b27f354a0c87015ddad39d35f9c0c36607a3bdd175dde1f1" +checksum = "53fe5e26ff1b7aef8bca9c6080520cfb8d9333c7568e1829cef191a9723e5504" dependencies = [ "libc", ] @@ -566,24 +657,24 @@ dependencies = [ [[package]] name = "criterion" -version = "0.3.6" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b01d6de93b2b6c65e17c634a26653a29d107b3c98c607c765bf38d041531cd8f" +checksum = "f2b12d017a929603d80db1831cd3a24082f8137ce19c69e6447f54f5fc8d692f" dependencies = [ - "atty", + "anes", "cast", - "clap 2.34.0", + "ciborium", + "clap 4.4.18", "criterion-plot", - "csv", - "itertools", - "lazy_static", + "is-terminal", + "itertools 0.10.5", "num-traits", + "once_cell", "oorandom", "plotters", "rayon", "regex", "serde", - "serde_cbor", "serde_derive", "serde_json", "tinytemplate", @@ -592,56 +683,38 @@ dependencies = [ [[package]] name = "criterion-plot" -version = "0.4.5" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2673cc8207403546f45f5fd319a974b1e6983ad1a3ee7e6041650013be041876" +checksum = "6b50826342786a51a89e2da3a28f1c32b06e387201bc2d19791f622c673706b1" dependencies = [ "cast", - "itertools", -] - -[[package]] -name = "crossbeam-channel" -version = "0.5.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a33c2bf77f2df06183c3aa30d1e96c0695a313d4f9c453cc3762a6db39f99200" -dependencies = [ - "cfg-if 1.0.0", - "crossbeam-utils", + "itertools 0.10.5", ] [[package]] name = "crossbeam-deque" -version = "0.8.3" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce6fd6f855243022dcecf8702fef0c297d4338e226845fe067f6341ad9fa0cef" +checksum = "613f8cc01fe9cf1a3eb3d7f488fd2fa8388403e97039e2f73692932e291a770d" dependencies = [ - "cfg-if 1.0.0", "crossbeam-epoch", "crossbeam-utils", ] [[package]] name = "crossbeam-epoch" -version = "0.9.15" +version = "0.9.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae211234986c545741a7dc064309f67ee1e5ad243d0e48335adc0484d960bcc7" +checksum = "5b82ac4a3c2ca9c3460964f020e1402edd5753411d7737aa39c3714ad1b5420e" dependencies = [ - "autocfg", - "cfg-if 1.0.0", "crossbeam-utils", - "memoffset", - "scopeguard", ] [[package]] name = "crossbeam-utils" -version = "0.8.16" +version = "0.8.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a22b2d63d4d1dc0b7f1b6b2747dd0088008a9be28b6ddf0b1e7d335e3037294" -dependencies = [ - "cfg-if 1.0.0", -] +checksum = "248e3bacc7dc6baa3b21e405ee045c3047101a49145e7e9eca583ab4c2ca5345" [[package]] name = "crunchy" @@ -659,27 +732,6 @@ dependencies = [ "typenum", ] -[[package]] -name = "csv" -version = "1.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "626ae34994d3d8d668f4269922248239db4ae42d538b14c398b74a52208e8086" -dependencies = [ - "csv-core", - "itoa", - "ryu", - "serde", -] - -[[package]] -name = "csv-core" -version = "0.1.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b2466559f260f48ad25fe6317b3c8dac77b5bdb5763ac7d9d6103530663bc90" -dependencies = [ - "memchr", -] - [[package]] name = "cty" version = "0.2.2" @@ -692,8 +744,18 @@ version = "0.13.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a01d95850c592940db9b8194bc39f4bc0e89dee5c4265e4b1807c34a9aba453c" dependencies = [ - "darling_core", - "darling_macro", + "darling_core 0.13.4", + "darling_macro 0.13.4", +] + +[[package]] +name = "darling" +version = "0.20.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fc5d6b04b3fd0ba9926f945895de7d806260a2d7431ba82e7edaecb043c4c6b8" +dependencies = [ + "darling_core 0.20.5", + "darling_macro 0.20.5", ] [[package]] @@ -704,31 +766,66 @@ checksum = "859d65a907b6852c9361e3185c862aae7fafd2887876799fa55f5f99dc40d610" dependencies = [ "fnv", "ident_case", - "proc-macro2 1.0.64", - "quote 1.0.29", + "proc-macro2", + "quote", "strsim 0.10.0", "syn 1.0.109", ] +[[package]] +name = "darling_core" +version = "0.20.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "04e48a959bcd5c761246f5d090ebc2fbf7b9cd527a492b07a67510c108f1e7e3" +dependencies = [ + "fnv", + "ident_case", + "proc-macro2", + "quote", + "strsim 0.10.0", + "syn 2.0.48", +] + [[package]] name = "darling_macro" version = "0.13.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9c972679f83bdf9c42bd905396b6c3588a843a17f0f16dfcfa3e2c5d57441835" dependencies = [ - "darling_core", - "quote 1.0.29", + "darling_core 0.13.4", + "quote", "syn 1.0.109", ] +[[package]] +name = "darling_macro" +version = "0.20.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d1545d67a2149e1d93b7e5c7752dce5a7426eb5d1357ddcfd89336b94444f77" +dependencies = [ + "darling_core 0.20.5", + "quote", + "syn 2.0.48", +] + +[[package]] +name = "deranged" +version = "0.3.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b42b6fa04a440b495c8b04d0e71b707c585f83cb9cb28cf8cd0d976c315e31b4" +dependencies = [ + "powerfmt", + "serde", +] + [[package]] name = "derivative" version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fcc3dd5e9e9c0b295d6e1e4d811fb6f157d5ffd784b8d202fc62eac8035a770b" dependencies = [ - "proc-macro2 1.0.64", - "quote 1.0.29", + "proc-macro2", + "quote", "syn 1.0.109", ] @@ -751,9 +848,9 @@ checksum = "d102f1a462fdcdddce88d6d46c06c074a2d2749b262230333726b06c52bb7585" [[package]] name = "either" -version = "1.8.1" +version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fcaabb2fef8c910e7f4c7ce9f67a1283a1715879a7c230ca9d6d1ae31f16d91" +checksum = "a26ae43d7bcc3b814de94796a5e736d4029efb0ee900c12e2d54c993ad1a1e07" [[package]] name = "entities" @@ -762,24 +859,19 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b5320ae4c3782150d900b79807611a59a99fc9a1d61d686faafc24b93fc8d7ca" [[package]] -name = "errno" -version = "0.3.1" +name = "equivalent" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4bcfec3a70f97c962c307b2d2c56e358cf1d00b558d74262b5f929ee8cc7e73a" -dependencies = [ - "errno-dragonfly", - "libc", - "windows-sys", -] +checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5" [[package]] -name = "errno-dragonfly" -version = "0.1.2" +name = "errno" +version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aa68f1b12764fab894d2755d2518754e71b4fd80ecfb822714a1206c2aab39bf" +checksum = "a258e46cdc063eb8519c00b9fc845fc47bcfca4130e2f08e88665ceda8474245" dependencies = [ - "cc", "libc", + "windows-sys 0.52.0", ] [[package]] @@ -795,7 +887,7 @@ dependencies = [ "rand", "serde", "serde_json", - "serde_with", + "serde_with 1.14.0", ] [[package]] @@ -810,30 +902,27 @@ dependencies = [ [[package]] name = "fastrand" -version = "1.9.0" +version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e51093e27b0797c359783294ca4f0a911c270184cb10f85783b118614a1501be" -dependencies = [ - "instant", -] +checksum = "25cbce373ec4653f1a01a31e8a5e5ec0c622dc27ff9c4e6606eefef5cbbed4a5" [[package]] name = "filetime" -version = "0.2.21" +version = "0.2.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5cbc844cecaee9d4443931972e1289c8ff485cb4cc2767cb03ca139ed6885153" +checksum = "1ee447700ac8aa0b2f2bd7bc4462ad686ba06baa6727ac149a2d6277f0d240fd" dependencies = [ "cfg-if 1.0.0", "libc", - "redox_syscall 0.2.16", - "windows-sys", + "redox_syscall", + "windows-sys 0.52.0", ] [[package]] name = "flate2" -version = "1.0.26" +version = "1.0.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b9429470923de8e8cbd4d2dc513535400b4b3fef0319fb5c4e1f520a7bef743" +checksum = "46303f565772937ffe1d394a4fac6f411c6013172fadde9dcdb1e147a086940e" dependencies = [ "crc32fast", "miniz_oxide", @@ -898,9 +987,9 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.2.10" +version = "0.2.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be4136b2a15dd319360be1c07d9933517ccf0be8f16bf62a3bee4f0d618df427" +checksum = "190092ea657667030ac6a35e305e62fc4dd69fd98ac98631e5d3a2b1575a12b5" dependencies = [ "cfg-if 1.0.0", "libc", @@ -909,9 +998,9 @@ dependencies = [ [[package]] name = "gimli" -version = "0.27.3" +version = "0.28.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6c80984affa11d98d1b88b66ac8853f143217b399d3c74116778ff8fdb4ed2e" +checksum = "4271d37baee1b8c7e4b708028c57d816cf9d2434acb33a549475f78c181f6253" [[package]] name = "groupmap" @@ -925,18 +1014,19 @@ dependencies = [ [[package]] name = "half" -version = "1.8.2" +version = "2.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eabb4a44450da02c90444cf74558da904edde8fb4e9035a9a6a4e15445af0bd7" +checksum = "bc52e53916c08643f1b56ec082790d1e86a32e58dc5268f897f313fbae7b4872" +dependencies = [ + "cfg-if 1.0.0", + "crunchy", +] [[package]] name = "hashbrown" version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" -dependencies = [ - "ahash 0.7.6", -] [[package]] name = "hashbrown" @@ -944,9 +1034,15 @@ version = "0.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "43a3c133739dddd0d2990f9a4bdf8eb4b21ef50e4851ca85ab661199821d510e" dependencies = [ - "ahash 0.8.3", + "ahash", ] +[[package]] +name = "hashbrown" +version = "0.14.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "290f1a1d9242c78d09ce40a5e87e7554ee637af1351968159f4952f028f75604" + [[package]] name = "heck" version = "0.4.1" @@ -964,9 +1060,9 @@ dependencies = [ [[package]] name = "hermit-abi" -version = "0.3.2" +version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "443144c8cdadd93ebf52ddb4056d257f5b52c04d3c804e657d19eb73fc33668b" +checksum = "5d3d0e0f38255e7fa3cf31335b3a56f05febd18025f4db5ef7a0cfb4f8da651f" [[package]] name = "hex" @@ -977,15 +1073,6 @@ dependencies = [ "serde", ] -[[package]] -name = "home" -version = "0.5.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5444c27eef6923071f7ebcc33e3444508466a76f7a2b93da00ed6e19f30c1ddb" -dependencies = [ - "windows-sys", -] - [[package]] name = "humansize" version = "1.1.1" @@ -998,6 +1085,29 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "71a816c97c42258aa5834d07590b718b4c9a598944cd39a52dc25b351185d678" +[[package]] +name = "iana-time-zone" +version = "0.1.59" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6a67363e2aa4443928ce15e57ebae94fd8949958fd1223c4cfc0cd473ad7539" +dependencies = [ + "android_system_properties", + "core-foundation-sys", + "iana-time-zone-haiku", + "js-sys", + "wasm-bindgen", + "windows-core", +] + +[[package]] +name = "iana-time-zone-haiku" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f31827a206f56af32e590ba56d5d2d085f558508192593743f16b2306495269f" +dependencies = [ + "cc", +] + [[package]] name = "ident_case" version = "1.0.1" @@ -1012,6 +1122,18 @@ checksum = "bd070e393353796e801d209ad339e89596eb4c8d430d18ede6a1cced8fafbd99" dependencies = [ "autocfg", "hashbrown 0.12.3", + "serde", +] + +[[package]] +name = "indexmap" +version = "2.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "824b2ae422412366ba479e8111fd301f7b5faece8149317bb81925979a53f520" +dependencies = [ + "equivalent", + "hashbrown 0.14.3", + "serde", ] [[package]] @@ -1034,15 +1156,6 @@ dependencies = [ "libc", ] -[[package]] -name = "instant" -version = "0.1.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a5bbe824c507c5da5956355e86a746d82e0e1464f65d862cc5e71da70e94b2c" -dependencies = [ - "cfg-if 1.0.0", -] - [[package]] name = "internal-tracing" version = "0.1.0" @@ -1053,17 +1166,6 @@ dependencies = [ "serde_json", ] -[[package]] -name = "io-lifetimes" -version = "1.0.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eae7b9aee968036d54dce06cebaefd919e4472e753296daccd6d344e3e2df0c2" -dependencies = [ - "hermit-abi 0.3.2", - "libc", - "windows-sys", -] - [[package]] name = "iovec" version = "0.1.4" @@ -1075,13 +1177,13 @@ dependencies = [ [[package]] name = "is-terminal" -version = "0.4.9" +version = "0.4.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb0889898416213fab133e1d33a0e5858a48177452750691bde3666d0fdbaf8b" +checksum = "0bad00257d07be169d870ab665980b06cdb366d792ad690bf2e76876dc503455" dependencies = [ - "hermit-abi 0.3.2", - "rustix 0.38.3", - "windows-sys", + "hermit-abi 0.3.4", + "rustix", + "windows-sys 0.52.0", ] [[package]] @@ -1099,17 +1201,26 @@ dependencies = [ "either", ] +[[package]] +name = "itertools" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba291022dbbd398a455acf126c1e341954079855bc60dfdda641363bd6922569" +dependencies = [ + "either", +] + [[package]] name = "itoa" -version = "1.0.8" +version = "1.0.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62b02a5381cc465bd3041d84623d0fa3b66738b52b8e2fc3bab8ad63ab032f4a" +checksum = "b1a46d1a171d865aa5f83f92695765caa047a9b4cbae2cbf37dbd613a793fd4c" [[package]] name = "js-sys" -version = "0.3.64" +version = "0.3.67" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c5f195fe497f702db0f318b07fdd68edb16955aed830df8363d837542f8f935a" +checksum = "9a1d36f1235bc969acba30b7f5990b864423a6068a10f7c90ae8f0112e3a59d1" dependencies = [ "wasm-bindgen", ] @@ -1141,7 +1252,7 @@ dependencies = [ "hex", "iai", "internal-tracing", - "itertools", + "itertools 0.12.1", "mina-curves", "mina-poseidon", "num-bigint", @@ -1162,7 +1273,7 @@ dependencies = [ "secp256k1", "serde", "serde_json", - "serde_with", + "serde_with 3.6.0", "strum", "strum_macros", "thiserror", @@ -1183,7 +1294,7 @@ dependencies = [ "poly-commitment", "serde", "serde_json", - "serde_with", + "serde_with 3.6.0", "tinytemplate", ] @@ -1201,15 +1312,15 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "libc" -version = "0.2.147" +version = "0.2.153" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b4668fb0ea861c1df094127ac5f1da3409a82116a4ba74fca2e58ef927159bb3" +checksum = "9c198f91728a82281a64e1f4f9eeb25d82cb32a5de251c6bd1b5154d63a8e7bd" [[package]] name = "libm" -version = "0.2.7" +version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7012b1bbb0719e1097c47611d3898568c546d597c2e74d66f6087edd5233ff4" +checksum = "4ec2a862134d2a7d32d7983ddcdd1c4923530833c9f2ea1a44fc5fa473989058" [[package]] name = "line-wrap" @@ -1228,36 +1339,21 @@ checksum = "0717cef1bc8b636c6e1c1bbdefc09e6322da8a9321966e8928ef80d20f7f770f" [[package]] name = "linux-raw-sys" -version = "0.3.8" +version = "0.4.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ef53942eb7bf7ff43a617b3e2c1c4a5ecf5944a7c1bc12d7ee39bbb15e5c1519" - -[[package]] -name = "linux-raw-sys" -version = "0.4.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09fc20d2ca12cb9f044c93e3bd6d32d523e6e2ec3db4f7b2939cd99026ecd3f0" +checksum = "01cda141df6706de531b6c46c3a33ecca755538219bd484262fa09410c13539c" [[package]] name = "log" -version = "0.4.19" +version = "0.4.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b06a4cde4c0f271a446782e3eff8de789548ce57dbc8eca9292c27f4a42004b4" +checksum = "b5e6163cb8c49088c2c36f57875e58ccd8c87c7427f7fbd50ea6710b2f3f2e8f" [[package]] name = "memchr" -version = "2.5.0" +version = "2.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2dffe52ecf27772e601905b7522cb4ef790d2cc203488bbd0e2fe85fcb74566d" - -[[package]] -name = "memoffset" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a634b1c61a95585bd15607c6ab0c4e5b226e695ff2800ba0cdccddf208c406c" -dependencies = [ - "autocfg", -] +checksum = "523dc4f511e55ab87b694dc30d0f820d60906ef06413f93d4d7a1385599cc149" [[package]] name = "miette" @@ -1285,8 +1381,8 @@ version = "4.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6b5bc45b761bcf1b5e6e6c4128cd93b84c218721a8d9b894aa0aff4ed180174c" dependencies = [ - "proc-macro2 1.0.64", - "quote 1.0.29", + "proc-macro2", + "quote", "syn 1.0.109", ] @@ -1359,7 +1455,7 @@ dependencies = [ "rayon", "serde", "serde_json", - "serde_with", + "serde_with 3.6.0", ] [[package]] @@ -1479,9 +1575,9 @@ dependencies = [ [[package]] name = "num-bigint" -version = "0.4.3" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f93ab6289c7b344a8a9f60f88d80aa20032336fe78da341afc91c8a2341fc75f" +checksum = "608e7659b5c3d7cba262d894801b9ec9d00de989e8a82bd4bef91d08da45cdc0" dependencies = [ "autocfg", "num-integer", @@ -1492,13 +1588,13 @@ dependencies = [ [[package]] name = "num-derive" -version = "0.3.3" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "876a53fff98e03a936a674b29568b0e605f06b29372c2489ff4de23f1949743d" +checksum = "cfb77679af88f8b125209d354a202862602672222e7f2313fdd6dc349bad4712" dependencies = [ - "proc-macro2 1.0.64", - "quote 1.0.29", - "syn 1.0.109", + "proc-macro2", + "quote", + "syn 2.0.48", ] [[package]] @@ -1513,24 +1609,14 @@ dependencies = [ [[package]] name = "num-traits" -version = "0.2.15" +version = "0.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "578ede34cf02f8924ab9447f50c28075b4d3e5b269972345e7e0372b38c6cdcd" +checksum = "39e3200413f237f41ab11ad6d161bc7239c84dcb631773ccd7de3dfe4b5c267c" dependencies = [ "autocfg", "libm", ] -[[package]] -name = "num_cpus" -version = "1.16.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4161fcb6d602d4d2081af7c3a45852d875a03dd337a6bfdd6e06407b61342a43" -dependencies = [ - "hermit-abi 0.3.2", - "libc", -] - [[package]] name = "o1-utils" version = "0.1.0" @@ -1551,16 +1637,16 @@ dependencies = [ "rmp-serde", "secp256k1", "serde", - "serde_with", + "serde_with 3.6.0", "sha2", "thiserror", ] [[package]] name = "object" -version = "0.31.1" +version = "0.32.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8bda667d9f2b5051b8833f59f3bf748b28ef54f850f4fcb389a252aa383866d1" +checksum = "a6a622008b6e321afc04970976f62ee297fdbaa6f95318ca343e3eebb9648441" dependencies = [ "memchr", ] @@ -1592,8 +1678,8 @@ version = "0.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b40aa99a001268b85eb18414ecd190dc21fceaeaf81214ca28233b6feb25a998" dependencies = [ - "proc-macro2 1.0.64", - "quote 1.0.29", + "proc-macro2", + "quote", "syn 1.0.109", "synstructure", ] @@ -1617,8 +1703,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1894efdef5c9d83d17932c5f5db16d16eb5c8ae1a625ce44d9d1715e85d9d8dc" dependencies = [ "convert_case", - "proc-macro2 1.0.64", - "quote 1.0.29", + "proc-macro2", + "quote", "syn 1.0.109", ] @@ -1644,9 +1730,9 @@ dependencies = [ [[package]] name = "once_cell" -version = "1.18.0" +version = "1.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd8b5dd2ae5ed71462c540258bedcb51965123ad7e7ccf4b9a8cafaa4a63576d" +checksum = "3fdb12b2476b595f9358c5161aa467c2438859caa136dec86c26fdd2efe17b92" [[package]] name = "onig" @@ -1678,9 +1764,9 @@ checksum = "0ab1bc2a289d34bd04a330323ac98a1b4bc82c9d9fcb1e66b63caa84da26b575" [[package]] name = "os_str_bytes" -version = "6.5.1" +version = "6.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4d5d9eb14b174ee9aa2ef96dc2b94637a2d4b6e7cb873c7e171f0c20c6cf3eac" +checksum = "e2355d85b9a3786f481747ced0e0ff2ba35213a1f9bd406ed906554d7af805a1" [[package]] name = "owo-colors" @@ -1690,31 +1776,32 @@ checksum = "c1b04fb49957986fdce4d6ee7a65027d55d4b6d2265e5848bbb507b58ccfdb6f" [[package]] name = "paste" -version = "1.0.13" +version = "1.0.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b4b27ab7be369122c218afc2079489cdcb4b517c0a3fc386ff11e1fedfcc2b35" +checksum = "de3145af08024dea9fa9914f381a17b8fc6034dfb00f3a84013f7ff43f29ed4c" [[package]] name = "percent-encoding" -version = "2.3.0" +version = "2.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b2a4787296e9989611394c33f193f676704af1686e70b8f8033ab5ba9a35a94" +checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" [[package]] name = "pest" -version = "2.7.0" +version = "2.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f73935e4d55e2abf7f130186537b19e7a4abc886a0252380b59248af473a3fc9" +checksum = "1f200d8d83c44a45b21764d1916299752ca035d15ecd46faca3e9a2a2bf6ad06" dependencies = [ + "memchr", "thiserror", "ucd-trie", ] [[package]] name = "pest_derive" -version = "2.7.0" +version = "2.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aef623c9bbfa0eedf5a0efba11a5ee83209c326653ca31ff019bec3a95bfff2b" +checksum = "bcd6ab1236bbdb3a49027e920e693192ebfe8913f6d60e294de57463a493cfde" dependencies = [ "pest", "pest_generator", @@ -1722,22 +1809,22 @@ dependencies = [ [[package]] name = "pest_generator" -version = "2.7.0" +version = "2.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b3e8cba4ec22bada7fc55ffe51e2deb6a0e0db2d0b7ab0b103acc80d2510c190" +checksum = "2a31940305ffc96863a735bef7c7994a00b325a7138fdbc5bda0f1a0476d3275" dependencies = [ "pest", "pest_meta", - "proc-macro2 1.0.64", - "quote 1.0.29", - "syn 2.0.25", + "proc-macro2", + "quote", + "syn 2.0.48", ] [[package]] name = "pest_meta" -version = "2.7.0" +version = "2.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a01f71cb40bd8bb94232df14b946909e14660e33fc05db3e50ae2a82d7ea0ca0" +checksum = "a7ff62f5259e53b78d1af898941cdcdccfae7385cf7d793a6e55de5d05bb4b7d" dependencies = [ "once_cell", "pest", @@ -1746,18 +1833,18 @@ dependencies = [ [[package]] name = "pkg-config" -version = "0.3.27" +version = "0.3.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26072860ba924cbfa98ea39c8c19b4dd6a4a25423dbdf219c1eca91aa0cf6964" +checksum = "2900ede94e305130c13ddd391e0ab7cbaeb783945ae07a279c268cb05109c6cb" [[package]] name = "plist" -version = "1.5.0" +version = "1.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bdc0001cfea3db57a2e24bc0d818e9e20e554b5f97fabb9bc231dc240269ae06" +checksum = "9a4a0cfc5fb21a09dc6af4bf834cf10d4a32fccd9e2ea468c4b1751a097487aa" dependencies = [ "base64", - "indexmap", + "indexmap 1.9.3", "line-wrap", "quick-xml", "serde", @@ -1804,7 +1891,7 @@ dependencies = [ "blake2", "colored", "groupmap", - "itertools", + "itertools 0.12.1", "mina-curves", "mina-poseidon", "o1-utils", @@ -1817,10 +1904,16 @@ dependencies = [ "rayon", "rmp-serde", "serde", - "serde_with", + "serde_with 3.6.0", "thiserror", ] +[[package]] +name = "powerfmt" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "439ee305def115ba05938db6eb1644ff94165c5ab5e9420d1c1bcedbba909391" + [[package]] name = "ppv-lite86" version = "0.2.17" @@ -1834,8 +1927,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c" dependencies = [ "proc-macro-error-attr", - "proc-macro2 1.0.64", - "quote 1.0.29", + "proc-macro2", + "quote", "syn 1.0.109", "version_check", ] @@ -1846,50 +1939,35 @@ version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869" dependencies = [ - "proc-macro2 1.0.64", - "quote 1.0.29", + "proc-macro2", + "quote", "version_check", ] -[[package]] -name = "proc-macro-hack" -version = "0.5.20+deprecated" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc375e1527247fe1a97d8b7156678dfe7c1af2fc075c9a4db3690ecd2a148068" - -[[package]] -name = "proc-macro2" -version = "0.4.30" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf3d2011ab5c909338f7887f4fc896d35932e29146c12c8d01da6b22a80ba759" -dependencies = [ - "unicode-xid 0.1.0", -] - [[package]] name = "proc-macro2" -version = "1.0.64" +version = "1.0.78" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "78803b62cbf1f46fde80d7c0e803111524b9877184cfe7c3033659490ac7a7da" +checksum = "e2422ad645d89c99f8f3e6b88a9fdeca7fabeac836b1002371c4367c8f984aae" dependencies = [ "unicode-ident", ] [[package]] name = "proptest" -version = "1.2.0" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4e35c06b98bf36aba164cc17cb25f7e232f5c4aeea73baa14b8a9f0d92dbfa65" +checksum = "31b476131c3c86cb68032fdc5cb6d5a1045e3e42d96b69fa599fd77701e1f5bf" dependencies = [ "bit-set", - "bitflags 1.3.2", - "byteorder", + "bit-vec", + "bitflags 2.4.2", "lazy_static", "num-traits", "rand", "rand_chacha", "rand_xorshift", - "regex-syntax 0.6.29", + "regex-syntax 0.8.2", "rusty-fork", "tempfile", "unarray", @@ -1897,13 +1975,13 @@ dependencies = [ [[package]] name = "proptest-derive" -version = "0.3.0" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90b46295382dc76166cb7cf2bb4a97952464e4b7ed5a43e6cd34e1fec3349ddc" +checksum = "9cf16337405ca084e9c78985114633b6827711d22b9e6ef6c6c0d665eb3f0b6e" dependencies = [ - "proc-macro2 0.4.30", - "quote 0.6.13", - "syn 0.15.44", + "proc-macro2", + "quote", + "syn 1.0.109", ] [[package]] @@ -1914,29 +1992,20 @@ checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" [[package]] name = "quick-xml" -version = "0.29.0" +version = "0.30.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81b9228215d82c7b61490fec1de287136b5de6f5700f6e58ea9ad61a7964ca51" +checksum = "eff6510e86862b57b210fd8cbe8ed3f0d7d600b9c2863cd4549a2e033c66e956" dependencies = [ "memchr", ] [[package]] name = "quote" -version = "0.6.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ce23b6b870e8f94f81fb0a363d65d86675884b34a09043c81e5562f11c1f8e1" -dependencies = [ - "proc-macro2 0.4.30", -] - -[[package]] -name = "quote" -version = "1.0.29" +version = "1.0.35" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "573015e8ab27661678357f27dc26460738fd2b6c86e46f386fde94cb5d913105" +checksum = "291ec9ab5efd934aaf503a6466c5d5251535d108ee747472c3977cc5acc868ef" dependencies = [ - "proc-macro2 1.0.64", + "proc-macro2", ] [[package]] @@ -1986,9 +2055,9 @@ dependencies = [ [[package]] name = "rayon" -version = "1.7.0" +version = "1.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d2df5196e37bcc87abebc0053e20787d73847bb33134a69841207dd0a47f03b" +checksum = "fa7237101a77a10773db45d62004a272517633fbcc3df19d96455ede1122e051" dependencies = [ "either", "rayon-core", @@ -1996,55 +2065,44 @@ dependencies = [ [[package]] name = "rayon-core" -version = "1.11.0" +version = "1.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4b8f95bd6966f5c87776639160a66bd8ab9895d9d4ab01ddba9fc60661aebe8d" +checksum = "1465873a3dfdaa8ae7cb14b4383657caab0b3e8a0aa9ae8e04b044854c8dfce2" dependencies = [ - "crossbeam-channel", "crossbeam-deque", "crossbeam-utils", - "num_cpus", ] [[package]] name = "redox_syscall" -version = "0.2.16" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb5a58c1855b4b6819d59012155603f0b22ad30cad752600aadfcb695265519a" -dependencies = [ - "bitflags 1.3.2", -] - -[[package]] -name = "redox_syscall" -version = "0.3.5" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "567664f262709473930a4bf9e51bf2ebf3348f2e748ccc50dea20646858f8f29" +checksum = "4722d768eff46b75989dd134e5c353f0d6296e5aaa3132e776cbdb56be7731aa" dependencies = [ "bitflags 1.3.2", ] [[package]] name = "regex" -version = "1.9.1" +version = "1.10.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b2eae68fc220f7cf2532e4494aded17545fce192d59cd996e0fe7887f4ceb575" +checksum = "b62dbe01f0b06f9d8dc7d49e05a0785f153b00b2c227856282f671e0318c9b15" dependencies = [ "aho-corasick", "memchr", "regex-automata", - "regex-syntax 0.7.3", + "regex-syntax 0.8.2", ] [[package]] name = "regex-automata" -version = "0.3.2" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "83d3daa6976cffb758ec878f108ba0e062a45b2d6ca3a2cca965338855476caf" +checksum = "5bb987efffd3c6d0d8f5f89510bb458559eab11e4f869acb20bf845e016259cd" dependencies = [ "aho-corasick", "memchr", - "regex-syntax 0.7.3", + "regex-syntax 0.8.2", ] [[package]] @@ -2055,15 +2113,15 @@ checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" [[package]] name = "regex-syntax" -version = "0.7.3" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2ab07dc67230e4a4718e70fd5c20055a4334b121f1f9db8fe63ef39ce9b8c846" +checksum = "c08c74e62047bb2de4ff487b251e4a92e24f48745648451635cec7d591162d9f" [[package]] name = "rmp" -version = "0.8.11" +version = "0.8.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44519172358fd6d58656c86ab8e7fbc9e1490c3e8f14d35ed78ca0dd07403c9f" +checksum = "7f9860a6cc38ed1da53456442089b4dfa35e7cedaa326df63017af88385e6b20" dependencies = [ "byteorder", "num-traits", @@ -2072,9 +2130,9 @@ dependencies = [ [[package]] name = "rmp-serde" -version = "1.1.1" +version = "1.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c5b13be192e0220b8afb7222aa5813cb62cc269ebb5cac346ca6487681d2913e" +checksum = "bffea85eea980d8a74453e5d02a8d93028f3c34725de143085a844ebe953258a" dependencies = [ "byteorder", "rmp", @@ -2089,45 +2147,31 @@ checksum = "d626bb9dae77e28219937af045c257c28bfd3f69333c512553507f5f9798cb76" [[package]] name = "rustc_version" -version = "0.4.0" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366" +checksum = "cfcb3a22ef46e85b45de6ee7e79d063319ebb6594faafcf1c225ea92ab6e9b92" dependencies = [ "semver", ] [[package]] name = "rustix" -version = "0.37.23" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4d69718bf81c6127a49dc64e44a742e8bb9213c0ff8869a22c308f84c1d4ab06" -dependencies = [ - "bitflags 1.3.2", - "errno", - "io-lifetimes", - "libc", - "linux-raw-sys 0.3.8", - "windows-sys", -] - -[[package]] -name = "rustix" -version = "0.38.3" +version = "0.38.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac5ffa1efe7548069688cd7028f32591853cd7b5b756d41bcffd2353e4fc75b4" +checksum = "322394588aaf33c24007e8bb3238ee3e4c5c09c084ab32bc73890b99ff326bca" dependencies = [ - "bitflags 2.3.3", + "bitflags 2.4.2", "errno", "libc", - "linux-raw-sys 0.4.3", - "windows-sys", + "linux-raw-sys", + "windows-sys 0.52.0", ] [[package]] name = "rustversion" -version = "1.0.13" +version = "1.0.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc31bd9b61a32c31f9650d18add92aa83a49ba979c143eefd27fe7177b05bd5f" +checksum = "7ffc183a10b4478d04cbbbfc96d0873219d962dd5accaff2ffbd4ceb7df837f4" [[package]] name = "rusty-fork" @@ -2143,9 +2187,9 @@ dependencies = [ [[package]] name = "ryu" -version = "1.0.14" +version = "1.0.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fe232bdf6be8c8de797b22184ee71118d63780ea42ac85b61d1baa6d3b782ae9" +checksum = "f98d2aa92eebf49b69786be48e4477826b256916e84a57ff2a4f21923b48eb4c" [[package]] name = "safemem" @@ -2162,71 +2206,55 @@ dependencies = [ "winapi-util", ] -[[package]] -name = "scopeguard" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" - [[package]] name = "secp256k1" -version = "0.24.3" +version = "0.28.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b1629c9c557ef9b293568b338dddfc8208c98a18c59d722a9d53f859d9c9b62" +checksum = "d24b59d129cdadea20aea4fb2352fa053712e5d713eee47d700cd4b2bc002f10" dependencies = [ "secp256k1-sys", ] [[package]] name = "secp256k1-sys" -version = "0.6.1" +version = "0.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "83080e2c2fc1006e625be82e5d1eb6a43b7fd9578b617fcc55814daf286bba4b" +checksum = "e5d1746aae42c19d583c3c1a8c646bfad910498e2051c551a7f2e3c0c9fbb7eb" dependencies = [ "cc", ] [[package]] name = "semver" -version = "1.0.19" +version = "1.0.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ad977052201c6de01a8ef2aa3378c4bd23217a056337d1d6da40468d267a4fb0" +checksum = "61697e0a1c7e512e84a621326239844a24d8207b4669b41bc18b32ea5cbf988b" [[package]] name = "serde" -version = "1.0.171" +version = "1.0.196" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30e27d1e4fd7659406c492fd6cfaf2066ba8773de45ca75e855590f856dc34a9" +checksum = "870026e60fa08c69f064aa766c10f10b1d62db9ccd4d0abb206472bee0ce3b32" dependencies = [ "serde_derive", ] -[[package]] -name = "serde_cbor" -version = "0.11.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2bef2ebfde456fb76bbcf9f59315333decc4fda0b2b44b420243c11e0f5ec1f5" -dependencies = [ - "half", - "serde", -] - [[package]] name = "serde_derive" -version = "1.0.171" +version = "1.0.196" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "389894603bd18c46fa56231694f8d827779c0951a667087194cf9de94ed24682" +checksum = "33c85360c95e7d137454dc81d9a4ed2b8efd8fbe19cee57357b32b9771fccb67" dependencies = [ - "proc-macro2 1.0.64", - "quote 1.0.29", - "syn 2.0.25", + "proc-macro2", + "quote", + "syn 2.0.48", ] [[package]] name = "serde_json" -version = "1.0.100" +version = "1.0.113" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f1e14e89be7aa4c4b78bdbdc9eb5bf8517829a600ae8eaa39a6e1d960b5185c" +checksum = "69801b70b1c3dac963ecb03a364ba0ceda9cf60c71cfe475e99864759c8b8a79" dependencies = [ "itoa", "ryu", @@ -2240,7 +2268,24 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "678b5a069e50bf00ecd22d0cd8ddf7c236f68581b03db652061ed5eb13a312ff" dependencies = [ "serde", - "serde_with_macros", + "serde_with_macros 1.5.2", +] + +[[package]] +name = "serde_with" +version = "3.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1b0ed1662c5a68664f45b76d18deb0e234aff37207086803165c961eb695e981" +dependencies = [ + "base64", + "chrono", + "hex", + "indexmap 1.9.3", + "indexmap 2.2.2", + "serde", + "serde_json", + "serde_with_macros 3.6.0", + "time", ] [[package]] @@ -2249,17 +2294,29 @@ version = "1.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e182d6ec6f05393cc0e5ed1bf81ad6db3a8feedf8ee515ecdd369809bcce8082" dependencies = [ - "darling", - "proc-macro2 1.0.64", - "quote 1.0.29", + "darling 0.13.4", + "proc-macro2", + "quote", "syn 1.0.109", ] +[[package]] +name = "serde_with_macros" +version = "3.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "568577ff0ef47b879f736cd66740e022f3672788cdf002a05a4e609ea5a6fb15" +dependencies = [ + "darling 0.20.5", + "proc-macro2", + "quote", + "syn 2.0.48", +] + [[package]] name = "sha2" -version = "0.10.7" +version = "0.10.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "479fb9d862239e610720565ca91403019f2f00410f1864c5aa7479b950a76ed8" +checksum = "793db75ad2bcafc3ffa7c68b215fee268f537982cd901d132f89c6343f3a3dc8" dependencies = [ "cfg-if 1.0.0", "cpufeatures", @@ -2274,18 +2331,18 @@ checksum = "24188a676b6ae68c3b2cb3a01be17fbf7240ce009799bb56d5b1409051e78fde" [[package]] name = "slab" -version = "0.4.8" +version = "0.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6528351c9bc8ab22353f9d776db39a20288e8d6c37ef8cfe3317cf875eecfc2d" +checksum = "8f92a496fb766b417c996b9c5e57daf2f7ad3b0bebe1ccfca4856390e3d3bb67" dependencies = [ "autocfg", ] [[package]] name = "smawk" -version = "0.3.1" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f67ad224767faa3c7d8b6d91985b78e70a1324408abcb1cfcc2be4c06bc06043" +checksum = "b7c388c1b5e93756d0c740965c41e8822f866621d41acbdf6336a6a168f8840c" [[package]] name = "static_assertions" @@ -2307,21 +2364,21 @@ checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623" [[package]] name = "strum" -version = "0.24.1" +version = "0.26.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "063e6045c0e62079840579a7e47a355ae92f60eb74daaf156fb1e84ba164e63f" +checksum = "723b93e8addf9aa965ebe2d11da6d7540fa2283fcea14b3371ff055f7ba13f5f" [[package]] name = "strum_macros" -version = "0.24.3" +version = "0.26.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e385be0d24f186b4ce2f9982191e7101bb737312ad61c1f2f984f34bcf85d59" +checksum = "7a3417fc93d76740d974a01654a09777cb500428cc874ca9f45edfe0c4d4cd18" dependencies = [ "heck", - "proc-macro2 1.0.64", - "quote 1.0.29", + "proc-macro2", + "quote", "rustversion", - "syn 1.0.109", + "syn 2.0.48", ] [[package]] @@ -2358,36 +2415,25 @@ dependencies = [ "atty", ] -[[package]] -name = "syn" -version = "0.15.44" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ca4b3b69a77cbe1ffc9e198781b7acb0c7365a883670e8f1c1bc66fba79a5c5" -dependencies = [ - "proc-macro2 0.4.30", - "quote 0.6.13", - "unicode-xid 0.1.0", -] - [[package]] name = "syn" version = "1.0.109" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" dependencies = [ - "proc-macro2 1.0.64", - "quote 1.0.29", + "proc-macro2", + "quote", "unicode-ident", ] [[package]] name = "syn" -version = "2.0.25" +version = "2.0.48" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "15e3fc8c0c74267e2df136e5e5fb656a464158aa57624053375eb9c8c6e25ae2" +checksum = "0f3531638e407dfc0814761abb7c00a5b54992b849452a0646b7f65c9f770f3f" dependencies = [ - "proc-macro2 1.0.64", - "quote 1.0.29", + "proc-macro2", + "quote", "unicode-ident", ] @@ -2397,10 +2443,10 @@ version = "0.12.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f36bdaa60a83aca3921b5259d5400cbf5e90fc51931376a9bd4a0eb79aa7210f" dependencies = [ - "proc-macro2 1.0.64", - "quote 1.0.29", + "proc-macro2", + "quote", "syn 1.0.109", - "unicode-xid 0.2.4", + "unicode-xid", ] [[package]] @@ -2434,23 +2480,22 @@ checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369" [[package]] name = "tempfile" -version = "3.6.0" +version = "3.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "31c0432476357e58790aaa47a8efb0c5138f137343f3b5f23bd36a27e3b0a6d6" +checksum = "01ce4141aa927a6d1bd34a041795abd0db1cccba5d5f24b009f694bdf3a1f3fa" dependencies = [ - "autocfg", "cfg-if 1.0.0", "fastrand", - "redox_syscall 0.3.5", - "rustix 0.37.23", - "windows-sys", + "redox_syscall", + "rustix", + "windows-sys 0.52.0", ] [[package]] name = "termcolor" -version = "1.2.0" +version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be55cf8942feac5c765c2c993422806843c9a9a45d4d5c407ad6dd2ea95eb9b6" +checksum = "06794f8f6c5c898b3275aebefa6b8a1cb24cd2c6c79397ab15774837a0bc5755" dependencies = [ "winapi-util", ] @@ -2493,31 +2538,33 @@ checksum = "222a222a5bfe1bba4a77b45ec488a741b3cb8872e5e499451fd7d0129c9c7c3d" [[package]] name = "thiserror" -version = "1.0.43" +version = "1.0.56" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a35fc5b8971143ca348fa6df4f024d4d55264f3468c71ad1c2f365b0a4d58c42" +checksum = "d54378c645627613241d077a3a79db965db602882668f9136ac42af9ecb730ad" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.43" +version = "1.0.56" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "463fe12d7993d3b327787537ce8dd4dfa058de32fc2b195ef3cde03dc4771e8f" +checksum = "fa0faa943b50f3db30a20aa7e265dbc66076993efed8463e8de414e5d06d3471" dependencies = [ - "proc-macro2 1.0.64", - "quote 1.0.29", - "syn 2.0.25", + "proc-macro2", + "quote", + "syn 2.0.48", ] [[package]] name = "time" -version = "0.3.23" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59e399c068f43a5d116fedaf73b203fa4f9c519f17e2b34f63221d3792f81446" +checksum = "f657ba42c3f86e7680e53c8cd3af8abbe56b5491790b46e22e19c0d57463583e" dependencies = [ + "deranged", "itoa", + "powerfmt", "serde", "time-core", "time-macros", @@ -2525,15 +2572,15 @@ dependencies = [ [[package]] name = "time-core" -version = "0.1.1" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7300fbefb4dadc1af235a9cef3737cea692a9d97e1b9cbcd4ebdae6f8868e6fb" +checksum = "ef927ca75afb808a4d64dd374f00a2adf8d0fcff8e7b184af886c3c87ec4a3f3" [[package]] name = "time-macros" -version = "0.2.10" +version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96ba15a897f3c86766b757e5ac7221554c6750054d74d5b28844fce5fb36a6c4" +checksum = "26197e33420244aeb70c3e8c78376ca46571bc4e701e4791c2cd9f57dcb3a43f" dependencies = [ "time-core", ] @@ -2557,13 +2604,28 @@ dependencies = [ "serde_json", ] +[[package]] +name = "tinyvec" +version = "1.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "87cc5ceb3875bb20c2890005a4e226a4651264a5c75edb2421b52861a0a0cb50" +dependencies = [ + "tinyvec_macros", +] + +[[package]] +name = "tinyvec_macros" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" + [[package]] name = "toml" version = "0.5.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f4f7f0dd8d50a853a531c426359045b1998f04219d88799810762cd4ad314234" dependencies = [ - "indexmap", + "indexmap 1.9.3", "serde", ] @@ -2586,9 +2648,9 @@ checksum = "a9b2228007eba4120145f785df0f6c92ea538f5a3635a612ecf4e334c8c1446d" [[package]] name = "typenum" -version = "1.16.0" +version = "1.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "497961ef93d974e23eb6f433eb5fe1b7930b659f06d12dec6fc44a8f554c0bba" +checksum = "42ff0bf0c66b8238c6f3b578df37d0b7848e55df8577b3f74f92a69acceeb825" [[package]] name = "ucd-trie" @@ -2604,28 +2666,24 @@ checksum = "eaea85b334db583fe3274d12b4cd1880032beab409c0d774be044d4480ab9a94" [[package]] name = "unicase" -version = "2.6.0" +version = "2.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50f37be617794602aabbeee0be4f259dc1778fabe05e2d67ee8f79326d5cb4f6" +checksum = "f7d2d4dafb69621809a81864c9c1b864479e1235c0dd4e199924b9742439ed89" dependencies = [ "version_check", ] [[package]] name = "unicode-ident" -version = "1.0.10" +version = "1.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22049a19f4a68748a168c0fc439f9516686aa045927ff767eca0a85101fb6e73" +checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b" [[package]] name = "unicode-linebreak" -version = "0.1.4" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c5faade31a542b8b35855fff6e8def199853b2da8da256da52f52f1316ee3137" -dependencies = [ - "hashbrown 0.12.3", - "regex", -] +checksum = "3b09c83c3c29d37506a3e260c08c03743a6bb66a9cd432c6934ab501a190571f" [[package]] name = "unicode-segmentation" @@ -2635,15 +2693,9 @@ checksum = "1dd624098567895118886609431a7c3b8f516e41d30e0643f03d94592a147e36" [[package]] name = "unicode-width" -version = "0.1.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0edd1e5b14653f783770bce4a4dabb4a5108a5370a5f5d8cfe8710c361f6c8b" - -[[package]] -name = "unicode-xid" -version = "0.1.0" +version = "0.1.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc72304796d0818e357ead4e000d19c9c174ab23dc11093ac919054d20a6a7fc" +checksum = "e51733f11c9c4f72aa0c160008246859e340b00807569a0da0e7a1079b27ba85" [[package]] name = "unicode-xid" @@ -2680,9 +2732,9 @@ dependencies = [ [[package]] name = "walkdir" -version = "2.3.3" +version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "36df944cda56c7d8d8b7496af378e6b16de9284591917d307c9b4d313c44e698" +checksum = "d71d857dc86794ca4c280d616f7da00d2dbfd8cd788846559a6813e6aa4b54ee" dependencies = [ "same-file", "winapi-util", @@ -2696,9 +2748,9 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasm-bindgen" -version = "0.2.87" +version = "0.2.90" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7706a72ab36d8cb1f80ffbf0e071533974a60d0a308d01a5d0375bf60499a342" +checksum = "b1223296a201415c7fad14792dbefaace9bd52b62d33453ade1c5b5f07555406" dependencies = [ "cfg-if 1.0.0", "wasm-bindgen-macro", @@ -2706,53 +2758,53 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.87" +version = "0.2.90" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ef2b6d3c510e9625e5fe6f509ab07d66a760f0885d858736483c32ed7809abd" +checksum = "fcdc935b63408d58a32f8cc9738a0bffd8f05cc7c002086c6ef20b7312ad9dcd" dependencies = [ "bumpalo", "log", "once_cell", - "proc-macro2 1.0.64", - "quote 1.0.29", - "syn 2.0.25", + "proc-macro2", + "quote", + "syn 2.0.48", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-macro" -version = "0.2.87" +version = "0.2.90" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dee495e55982a3bd48105a7b947fd2a9b4a8ae3010041b9e0faab3f9cd028f1d" +checksum = "3e4c238561b2d428924c49815533a8b9121c664599558a5d9ec51f8a1740a999" dependencies = [ - "quote 1.0.29", + "quote", "wasm-bindgen-macro-support", ] [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.87" +version = "0.2.90" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "54681b18a46765f095758388f2d0cf16eb8d4169b639ab575a8f5693af210c7b" +checksum = "bae1abb6806dc1ad9e560ed242107c0f6c84335f1749dd4e8ddb012ebd5e25a7" dependencies = [ - "proc-macro2 1.0.64", - "quote 1.0.29", - "syn 2.0.25", + "proc-macro2", + "quote", + "syn 2.0.48", "wasm-bindgen-backend", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.87" +version = "0.2.90" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca6ad05a4870b2bf5fe995117d3728437bd27d7cd5f06f13c17443ef369775a1" +checksum = "4d91413b1c31d7539ba5ef2451af3f0b833a005eb27a631cec32bc0635a8602b" [[package]] name = "web-sys" -version = "0.3.64" +version = "0.3.67" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b85cbef8c220a6abc02aefd892dfc0fc23afb1c6a426316ec33253a3877249b" +checksum = "58cd2333b6e0be7a39605f0e255892fd7418a682d8da8fe042fe25128794d2ed" dependencies = [ "js-sys", "wasm-bindgen", @@ -2788,9 +2840,9 @@ checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" [[package]] name = "winapi-util" -version = "0.1.5" +version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70ec6ce85bb158151cae5e5c87f95a8e97d2c0c4b001223f33a334e3ce5de178" +checksum = "f29e6f9198ba0d26b4c9f07dbe6f9ed633e1f3d5b8b414090084349e46a52596" dependencies = [ "winapi 0.3.9", ] @@ -2801,71 +2853,146 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" +[[package]] +name = "windows-core" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "33ab640c8d7e35bf8ba19b884ba838ceb4fba93a4e8c65a9059d08afcfc683d9" +dependencies = [ + "windows-targets 0.52.0", +] + [[package]] name = "windows-sys" version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9" dependencies = [ - "windows-targets", + "windows-targets 0.48.5", +] + +[[package]] +name = "windows-sys" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" +dependencies = [ + "windows-targets 0.52.0", ] [[package]] name = "windows-targets" -version = "0.48.1" +version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05d4b17490f70499f20b9e791dcf6a299785ce8af4d709018206dc5b4953e95f" +checksum = "9a2fa6e2155d7247be68c096456083145c183cbbbc2764150dda45a87197940c" dependencies = [ - "windows_aarch64_gnullvm", - "windows_aarch64_msvc", - "windows_i686_gnu", - "windows_i686_msvc", - "windows_x86_64_gnu", - "windows_x86_64_gnullvm", - "windows_x86_64_msvc", + "windows_aarch64_gnullvm 0.48.5", + "windows_aarch64_msvc 0.48.5", + "windows_i686_gnu 0.48.5", + "windows_i686_msvc 0.48.5", + "windows_x86_64_gnu 0.48.5", + "windows_x86_64_gnullvm 0.48.5", + "windows_x86_64_msvc 0.48.5", +] + +[[package]] +name = "windows-targets" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a18201040b24831fbb9e4eb208f8892e1f50a37feb53cc7ff887feb8f50e7cd" +dependencies = [ + "windows_aarch64_gnullvm 0.52.0", + "windows_aarch64_msvc 0.52.0", + "windows_i686_gnu 0.52.0", + "windows_i686_msvc 0.52.0", + "windows_x86_64_gnu 0.52.0", + "windows_x86_64_gnullvm 0.52.0", + "windows_x86_64_msvc 0.52.0", ] [[package]] name = "windows_aarch64_gnullvm" -version = "0.48.0" +version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "91ae572e1b79dba883e0d315474df7305d12f569b400fcf90581b06062f7e1bc" +checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8" + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cb7764e35d4db8a7921e09562a0304bf2f93e0a51bfccee0bd0bb0b666b015ea" [[package]] name = "windows_aarch64_msvc" -version = "0.48.0" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b2ef27e0d7bdfcfc7b868b317c1d32c641a6fe4629c171b8928c7b08d98d7cf3" +checksum = "bbaa0368d4f1d2aaefc55b6fcfee13f41544ddf36801e793edbbfd7d7df075ef" [[package]] name = "windows_i686_gnu" -version = "0.48.0" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e" + +[[package]] +name = "windows_i686_gnu" +version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "622a1962a7db830d6fd0a69683c80a18fda201879f0f447f065a3b7467daa241" +checksum = "a28637cb1fa3560a16915793afb20081aba2c92ee8af57b4d5f28e4b3e7df313" [[package]] name = "windows_i686_msvc" -version = "0.48.0" +version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4542c6e364ce21bf45d69fdd2a8e455fa38d316158cfd43b3ac1c5b1b19f8e00" +checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406" + +[[package]] +name = "windows_i686_msvc" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ffe5e8e31046ce6230cc7215707b816e339ff4d4d67c65dffa206fd0f7aa7b9a" [[package]] name = "windows_x86_64_gnu" -version = "0.48.0" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca2b8a661f7628cbd23440e50b05d705db3686f894fc9580820623656af974b1" +checksum = "3d6fa32db2bc4a2f5abeacf2b69f7992cd09dca97498da74a151a3132c26befd" [[package]] name = "windows_x86_64_gnullvm" -version = "0.48.0" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7896dbc1f41e08872e9d5e8f8baa8fdd2677f29468c4e156210174edc7f7b953" +checksum = "1a657e1e9d3f514745a572a6846d3c7aa7dbe1658c056ed9c3344c4109a6949e" [[package]] name = "windows_x86_64_msvc" -version = "0.48.0" +version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a515f5799fe4961cb532f983ce2b23082366b898e52ffbce459c86f67c8378a" +checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dff9641d1cd4be8d1a070daf9e3773c5f67e78b4d9d42263020c057706765c04" [[package]] name = "ws2_32-sys" @@ -2888,12 +3015,9 @@ dependencies = [ [[package]] name = "xdg" -version = "2.5.0" +version = "2.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "688597db5a750e9cad4511cb94729a078e274308099a0382b5b8203bbc767fee" -dependencies = [ - "home", -] +checksum = "213b7324336b53d2414b2db8537e56544d981803139155afa84f76eeebb7a546" [[package]] name = "yaml-rust" @@ -2904,11 +3028,31 @@ dependencies = [ "linked-hash-map", ] +[[package]] +name = "zerocopy" +version = "0.7.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "74d4d3961e53fa4c9a25a8637fc2bfaf2595b3d3ae34875568a5cf64787716be" +dependencies = [ + "zerocopy-derive", +] + +[[package]] +name = "zerocopy-derive" +version = "0.7.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ce1b18ccd8e73a9321186f97e46f9f04b778851177567b1975109d26a08d2a6" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.48", +] + [[package]] name = "zeroize" -version = "1.6.0" +version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a0956f1ba7c7909bfb66c2e9e4124ab6f6482560f6628b5aaeba39207c9aad9" +checksum = "525b4ec142c6b68a2d10f01f7bbf6755599ca3f81ea53b8431b7dd348f5fdb2d" dependencies = [ "zeroize_derive", ] @@ -2919,7 +3063,7 @@ version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ - "proc-macro2 1.0.64", - "quote 1.0.29", - "syn 2.0.25", + "proc-macro2", + "quote", + "syn 2.0.48", ] diff --git a/Cargo.toml b/Cargo.toml index 3d0d91c9cb..c7c997d03b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -16,6 +16,77 @@ members = [ ] resolver = "2" +[workspace.dependencies] +ark-algebra-test-templates = "0.4.2" +ark-bn254 = { version = "0.4.0" } +ark-ec = { version = "0.4.2", features = ["parallel"] } +ark-ff = { version = "0.4.2", features = ["parallel", "asm"] } +ark-poly = { version = "0.4.2", features = ["parallel"] } +ark-serialize = "0.4.2" +ark-std = "0.4.0" +ark-test-curves = "0.4.2" +base64 = "0.21.5" +bcs = "0.1.3" +bitvec = "1.0.0" +blake2 = "0.10.0" +bs58 = "0.5.0" +clap = "4.4.6" +colored = "2.0.0" +command-fds = "0.3" +convert_case = "0.6.0" +criterion = "0.5" +elf = "0.7.2" +env_logger = "0.11.1" +hex = { version = "0.4", features = ["serde"] } +iai = "0.1" +itertools = "0.12.1" +libc = "0.2.62" +libflate = "2" +log = "0.4.20" +num-bigint = { version = "0.4.3", features = ["rand", "serde"] } +num-derive = "0.4" +num-integer = "0.1.45" +num-traits = "0.2" +ocaml = { version = "0.22.2" } +ocaml-gen = { version = "0.1.5" } +once_cell = "1.10.0" +os_pipe = { version = "1.1.4", features = ["io_safety"] } +proc-macro2 = "1.0.43" +proptest = "1.0.0" +proptest-derive = "0.4.0" +quote = "1.0.21" +rand = { version = "0.8.5", features = ["std_rng"] } +rand_chacha = { version = "0.3.0" } +rand_core = { version = "0.6.3" } +rayon = "1.5.0" +regex = "1.10.2" +rmp-serde = "1.1.1" +secp256k1 = "0.28.2" +serde = { version = "1.0.130", features = ["derive"] } +serde_json = "1.0.79" +serde_with = "3.6.0" +sha2 = "0.10.2" +strum = "0.26.1" +strum_macros = "0.26.1" +syn = { version = "1.0.109", features = ["full"] } +thiserror = "1.0.30" +tinytemplate = "1.1" +wasm-bindgen = "=0.2.90" + +groupmap = { path = "./groupmap", version = "0.1.0" } +internal-tracing = { path = "./internal-tracing", version = "0.1.0" } +kimchi = { path = "./kimchi", version = "0.1.0", features = ["bn254"] } +kimchi-visu = { path = "./tools/kimchi-visu", version = "0.1.0" } +mina-curves = { path = "./curves", version = "0.1.0" } +mina-hasher = { path = "./hasher", version = "0.1.0" } +mina-poseidon = { path = "./poseidon", version = "0.1.0" } +o1-utils = { path = "./utils", version = "0.1.0" } +optimism = { path = "./optimism", version = "0.1.0" } +poly-commitment = { path = "./poly-commitment", version = "0.1.0" } +signer = { path = "./signer", version = "0.1.0" } +turshi = { path = "./turshi", version = "0.1.0" } +utils = { path = "./utils", version = "0.1.0" } + [profile.release] lto = true panic = 'abort' diff --git a/curves/Cargo.toml b/curves/Cargo.toml index 1015d8e47d..2116b66c86 100644 --- a/curves/Cargo.toml +++ b/curves/Cargo.toml @@ -10,12 +10,12 @@ edition = "2021" license = "Apache-2.0" [dependencies] -ark-ec = { version = "0.4.2", features = ["parallel"] } -ark-ff = { version = "0.4.2", features = ["parallel", "asm"] } +ark-ec.workspace = true +ark-ff.workspace = true [dev-dependencies] -rand = { version = "0.8.5", default-features = false } -ark-test-curves = "0.4.2" -ark-algebra-test-templates = "0.4.2" -ark-serialize="0.4.2" -ark-std = "0.4.0" +rand.workspace = true +ark-test-curves.workspace = true +ark-algebra-test-templates.workspace = true +ark-serialize.workspace = true +ark-std.workspace = true diff --git a/groupmap/Cargo.toml b/groupmap/Cargo.toml index d639095573..4178d0fbbf 100644 --- a/groupmap/Cargo.toml +++ b/groupmap/Cargo.toml @@ -13,9 +13,9 @@ license = "Apache-2.0" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -ark-ff = { version = "0.4.2", features = [ "parallel", "asm" ] } -ark-ec = { version = "0.4.2", features = [ "parallel" ] } -rand = "0.8.4" +ark-ff.workspace = true +ark-ec.workspace = true +rand.workspace = true [dev-dependencies] -mina-curves = { path = "../curves", version = "0.1.0" } +mina-curves.workspace = true \ No newline at end of file diff --git a/hasher/Cargo.toml b/hasher/Cargo.toml index 4ce6257214..fa715d46bd 100644 --- a/hasher/Cargo.toml +++ b/hasher/Cargo.toml @@ -13,14 +13,14 @@ license = "Apache-2.0" path = "src/lib.rs" [dependencies] -mina-poseidon = { path = "../poseidon", version = "0.1.0" } -mina-curves = { path = "../curves", version = "0.1.0" } -o1-utils = { path = "../utils", version = "0.1.0" } +mina-poseidon.workspace = true +mina-curves.workspace = true +o1-utils.workspace = true -ark-ff = { version = "0.4.2", features = [ "parallel", "asm" ] } +ark-ff.workspace = true -bitvec = "1.0.0" -serde = { version = "1.0", features = ["derive"] } +bitvec.workspace = true +serde.workspace = true [dev-dependencies] -serde_json = { version = "1.0" } +serde_json.workspace = true \ No newline at end of file diff --git a/internal-tracing/Cargo.toml b/internal-tracing/Cargo.toml index b0539c7394..5d8355386b 100644 --- a/internal-tracing/Cargo.toml +++ b/internal-tracing/Cargo.toml @@ -5,10 +5,10 @@ edition = "2021" license = "Apache-2.0" [dependencies] -serde = { version = "*", features = ["derive"], optional = true } -serde_json = { version = "*", optional = true } -ocaml = { version = "0.22.2", optional = true } -ocaml-gen = { version = "0.1.5", optional = true } +serde = { workspace = true, features = ["derive"], optional = true } +serde_json = { workspace = true, optional = true } +ocaml = { workspace = true, optional = true } +ocaml-gen = { workspace = true, optional = true } [features] enabled = [ "serde", "serde_json" ] diff --git a/kimchi/Cargo.toml b/kimchi/Cargo.toml index fec0d61716..bf3ab6c136 100644 --- a/kimchi/Cargo.toml +++ b/kimchi/Cargo.toml @@ -11,60 +11,62 @@ license = "Apache-2.0" [lib] path = "src/lib.rs" -bench = false # needed for criterion (https://bheisler.github.io/criterion.rs/book/faq.html#cargo-bench-gives-unrecognized-option-errors-for-valid-command-line-options) +bench = false # needed for criterion (https://bheisler.github.io/criterion.rs/book/faq.html#cargo-bench-gives-unrecognized-option-errors-for-valid-command-line-options) [dependencies] -ark-ff = { version = "0.4.2", features = [ "parallel", "asm" ] } -ark-ec = { version = "0.4.2", features = [ "parallel" ] } -ark-poly = { version = "0.4.2", features = [ "parallel" ] } -ark-serialize = "0.4.2" -ark-bn254 = { version = "0.4.0", optional = true } -blake2 = "0.10.0" -num-bigint = { version = "0.4.3", features = ["rand", "serde"]} -num-derive = "0.3" -num-integer = "0.1.45" -num-traits = "0.2" -itertools = "0.10.3" -rand = { version = "0.8.0", features = ["std_rng"] } -rand_core = "0.6.3" -rayon = "1.5.0" -rmp-serde = "1.1.1" -serde = "1.0.130" -serde_with = "1.10.0" -thiserror = "1.0.30" -once_cell = "1.10.0" -hex = "0.4" -strum = "0.24.0" -strum_macros = "0.24.0" +ark-ff.workspace = true +ark-ec.workspace = true +ark-poly.workspace = true +ark-serialize.workspace = true +ark-bn254 = { workspace = true, optional = true } +blake2.workspace = true +num-bigint.workspace = true +num-derive.workspace = true +num-integer.workspace = true +num-traits.workspace = true +itertools.workspace = true +rand = { workspace = true, features = ["std_rng"] } +rand_core.workspace = true +rayon.workspace = true +rmp-serde.workspace = true +serde.workspace = true +serde_with.workspace = true +thiserror.workspace = true +once_cell.workspace = true +hex.workspace = true +strum.workspace = true +strum_macros.workspace = true + # TODO: audit this disjoint-set = "0.0.2" -turshi = { path = "../turshi", version = "0.1.0" } -poly-commitment = { path = "../poly-commitment", version = "0.1.0" } -groupmap = { path = "../groupmap", version = "0.1.0" } -mina-curves = { path = "../curves", version = "0.1.0" } -o1-utils = { path = "../utils", version = "0.1.0" } -mina-poseidon = { path = "../poseidon", version = "0.1.0" } -ocaml = { version = "0.22.2", optional = true } -ocaml-gen = { version = "0.1.5", optional = true } +turshi.workspace = true +poly-commitment.workspace = true +groupmap.workspace = true +mina-curves.workspace = true +o1-utils.workspace = true +mina-poseidon.workspace = true + +ocaml = { workspace = true, optional = true } +ocaml-gen = { workspace = true, optional = true } -wasm-bindgen = { version = "=0.2.87", optional = true } +wasm-bindgen = { workspace = true, optional = true } -internal-tracing = { path = "../internal-tracing", version = "0.1.0" } +internal-tracing.workspace = true [dev-dependencies] -proptest = "1.0.0" -proptest-derive = "0.3.0" -colored = "2.0.0" -serde_json = { version = "1.0" } -num-bigint = { version = "0.4.3", features = ["rand"] } -secp256k1 = "0.24.2" +proptest.workspace = true +proptest-derive.workspace = true +colored.workspace = true +serde_json.workspace = true +num-bigint.workspace = true +secp256k1.workspace = true # benchmarks -criterion = "0.3" -iai = "0.1" +criterion.workspace = true +iai.workspace = true [[bench]] name = "proof_criterion" @@ -80,8 +82,14 @@ harness = false [features] default = [] -internal_tracing = [ "internal-tracing/enabled" ] -ocaml_types = [ "ocaml", "ocaml-gen", "poly-commitment/ocaml_types", "mina-poseidon/ocaml_types", "internal-tracing/ocaml_types" ] -bn254 = [ "ark-bn254" ] -wasm_types = [ "wasm-bindgen" ] +internal_tracing = ["internal-tracing/enabled"] +ocaml_types = [ + "ocaml", + "ocaml-gen", + "poly-commitment/ocaml_types", + "mina-poseidon/ocaml_types", + "internal-tracing/ocaml_types", +] +bn254 = ["ark-bn254"] +wasm_types = ["wasm-bindgen"] check_feature_flags = [] diff --git a/poly-commitment/Cargo.toml b/poly-commitment/Cargo.toml index fbd169ae2e..3fc226584e 100644 --- a/poly-commitment/Cargo.toml +++ b/poly-commitment/Cargo.toml @@ -10,34 +10,34 @@ edition = "2021" license = "Apache-2.0" [dependencies] -ark-ff = { version = "0.4.2", features = [ "parallel", "asm" ] } -ark-ec = { version = "0.4.2", features = [ "parallel" ] } -ark-poly = { version = "0.4.2", features = [ "parallel" ] } -ark-serialize = "0.4.2" +ark-ff.workspace = true +ark-ec.workspace = true +ark-poly.workspace = true +ark-serialize.workspace = true -blake2 = "0.10.0" -itertools = "0.10.3" -once_cell = "1.10.0" -rand = "0.8.0" -rand_core = { version = "0.6.0" } -rayon = { version = "1" } -rmp-serde = "1.1.1" -serde = "1.0.130" -serde_with = "1.10.0" -thiserror = "1.0.31" +blake2.workspace = true +itertools.workspace = true +once_cell.workspace = true +rand.workspace = true +rand_core.workspace = true +rayon.workspace = true +rmp-serde.workspace = true +serde.workspace = true +serde_with.workspace = true +thiserror.workspace = true -groupmap = { path = "../groupmap", version = "0.1.0" } -mina-curves = { path = "../curves", version = "0.1.0" } -o1-utils = { path = "../utils", version = "0.1.0" } -mina-poseidon = { path = "../poseidon", version = "0.1.0" } +groupmap.workspace = true +mina-curves.workspace = true +o1-utils.workspace = true +mina-poseidon.workspace = true -ocaml = { version = "0.22.2", optional = true } -ocaml-gen = { version = "0.1.5", optional = true } +ocaml = { workspace = true, optional = true } +ocaml-gen = { workspace = true, optional = true } [dev-dependencies] -colored = "2.0.0" -rand_chacha = { version = "0.3.0" } -ark-bn254 = { version = "0.4.0" } +colored.workspace = true +rand_chacha.workspace = true +ark-bn254.workspace = true [features] -ocaml_types = [ "ocaml", "ocaml-gen" ] +ocaml_types = ["ocaml", "ocaml-gen"] diff --git a/poseidon/Cargo.toml b/poseidon/Cargo.toml index 9256552934..a2157e04f6 100644 --- a/poseidon/Cargo.toml +++ b/poseidon/Cargo.toml @@ -13,27 +13,26 @@ license = "Apache-2.0" path = "src/lib.rs" [dependencies] -ark-ff = { version = "0.4.2", features = [ "parallel", "asm" ] } -ark-ec = { version = "0.4.2", features = [ "parallel" ] } -ark-poly = { version = "0.4.2", features = [ "parallel" ] } -ark-serialize = { version = "0.4.2", features = ["derive"]} -rand = "0.8.0" -rayon = "1" -serde = { version = "1.0", features = ["derive"] } -serde_with = "1.10.0" -once_cell = "1.10.0" +ark-ff.workspace = true +ark-ec.workspace = true +ark-poly.workspace = true +ark-serialize.workspace = true +rand.workspace = true +rayon.workspace = true +serde.workspace = true +serde_with.workspace = true +once_cell.workspace = true -mina-curves = { path = "../curves", version = "0.1.0" } -o1-utils = { path = "../utils", version = "0.1.0" } +mina-curves.workspace = true +o1-utils.workspace = true # for ocaml -ocaml = { version = "0.22.2", optional = true } -ocaml-gen = { version = "0.1.5", optional = true } +ocaml = { workspace = true, optional = true } +ocaml-gen = { workspace = true, optional = true } [dev-dependencies] -serde_json = "1.0" -hex = "0.4" -ark-serialize = "0.4.2" +serde_json.workspace = true +hex.workspace = true [features] default = [] diff --git a/signer/Cargo.toml b/signer/Cargo.toml index 059d830a28..db3dc3e985 100644 --- a/signer/Cargo.toml +++ b/signer/Cargo.toml @@ -13,18 +13,17 @@ license = "Apache-2.0" path = "src/lib.rs" [dependencies] -mina-curves = { path = "../curves", version = "0.1.0" } -mina-hasher = { path = "../hasher", version = "0.1.0" } -o1-utils = { path = "../utils", version = "0.1.0" } +mina-curves.workspace = true +mina-hasher.workspace = true +o1-utils.workspace = true -ark-ec = { version = "0.4.2", features = [ "parallel" ] } -ark-ff = { version = "0.4.2", features = ["parallel", "asm"] } - -rand = "0.8.0" -blake2 = "0.10.0" -hex = "0.4" -bitvec = "1.0.0" -sha2 = "0.10.0" -bs58 = "0.4.0" -thiserror = "1.0.30" +ark-ec.workspace = true +ark-ff.workspace = true +rand.workspace = true +blake2.workspace = true +hex.workspace = true +bitvec.workspace = true +sha2.workspace = true +bs58.workspace = true +thiserror.workspace = true diff --git a/tools/kimchi-visu/Cargo.toml b/tools/kimchi-visu/Cargo.toml index 89c5ec76ed..01982d3794 100644 --- a/tools/kimchi-visu/Cargo.toml +++ b/tools/kimchi-visu/Cargo.toml @@ -13,15 +13,15 @@ license = "Apache-2.0" path = "src/lib.rs" [dependencies] -ark-ec = "0.4.2" -ark-ff = "0.4.2" -serde = { version = "1.0.136", features = ["derive"] } -serde_json = "1.0.79" -serde_with = "1.10.0" -tinytemplate = "1.1" +ark-ec.workspace = true +ark-ff.workspace = true +serde.workspace = true +serde_json.workspace = true +serde_with.workspace = true +tinytemplate.workspace = true -mina-curves = { path = "../../curves", version = "0.1.0" } -kimchi = { path = "../../kimchi", version = "0.1.0" } -o1-utils = { path = "../../utils", version = "0.1.0" } -mina-poseidon = { path = "../../poseidon", version = "0.1.0" } -poly-commitment = { path = "../../poly-commitment", version = "0.1.0" } +mina-curves.workspace = true +kimchi.workspace = true +o1-utils.workspace = true +mina-poseidon.workspace = true +poly-commitment.workspace = true \ No newline at end of file diff --git a/turshi/Cargo.toml b/turshi/Cargo.toml index d725b6e759..64bbebf2d6 100644 --- a/turshi/Cargo.toml +++ b/turshi/Cargo.toml @@ -13,12 +13,12 @@ license = "Apache-2.0" path = "src/lib.rs" [dependencies] -ark-ff = { version = "0.4.2", features = [ "parallel", "asm" ] } -hex = "0.4" +ark-ff.workspace = true +hex.workspace = true -o1-utils = { path = "../utils", version = "0.1.0" } +o1-utils.workspace = true [dev-dependencies] -ark-ec = { version = "0.4.2", features = [ "parallel" ] } +ark-ec.workspace = true -mina-curves = { path = "../curves", version = "0.1.0" } +mina-curves.workspace = true \ No newline at end of file diff --git a/utils/Cargo.toml b/utils/Cargo.toml index b5af2aff8d..d43f01e5ca 100644 --- a/utils/Cargo.toml +++ b/utils/Cargo.toml @@ -10,27 +10,26 @@ edition = "2021" license = "Apache-2.0" [dependencies] -ark-ec = { version = "0.4.2", features = [ "parallel" ] } -ark-ff = { version = "0.4.2", features = [ "parallel", "asm" ] } -ark-poly = { version = "0.4.2", features = [ "parallel" ] } -ark-serialize = "0.4.2" -bcs = "0.1.3" -rayon = "1.3.0" -serde = "1.0.130" -serde_with = "1.10.0" -hex = { version = "0.4", features = ["serde"] } -num-bigint = { version = "0.4.3", features = ["rand"]} -num-integer = "0.1.45" -num-traits = "0.2" -rmp-serde = "1.1.1" -sha2 = "0.10.2" -thiserror = "1.0.30" -rand = "0.8.0" -rand_core = "0.6.3" -mina-curves = { path = "../curves", version = "0.1.0" } +ark-ec.workspace = true +ark-ff.workspace = true +ark-poly.workspace = true +ark-serialize.workspace = true +bcs.workspace = true +rayon.workspace = true +serde.workspace = true +serde_with.workspace = true +hex.workspace = true +num-bigint.workspace = true +num-integer.workspace = true +num-traits.workspace = true +rmp-serde.workspace = true +sha2.workspace = true +thiserror.workspace = true +rand.workspace = true +rand_core.workspace = true +mina-curves.workspace = true [dev-dependencies] -ark-ec = { version = "0.4.2", features = [ "parallel" ] } -mina-curves = { path = "../curves", version = "0.1.0" } -num-bigint = { version = "0.4.3", features = ["rand"] } -secp256k1 = "0.24.2" +ark-ec.workspace = true +num-bigint.workspace = true +secp256k1.workspace = true From ea044a61a1796c7e8274848e5a87e59734434b33 Mon Sep 17 00:00:00 2001 From: Mikhail Volkhov Date: Tue, 17 Sep 2024 15:16:50 +0100 Subject: [PATCH 089/178] Downgrade plist --- Cargo.lock | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 00f8cfdd84..429532881d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1839,9 +1839,9 @@ checksum = "2900ede94e305130c13ddd391e0ab7cbaeb783945ae07a279c268cb05109c6cb" [[package]] name = "plist" -version = "1.5.1" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a4a0cfc5fb21a09dc6af4bf834cf10d4a32fccd9e2ea468c4b1751a097487aa" +checksum = "bdc0001cfea3db57a2e24bc0d818e9e20e554b5f97fabb9bc231dc240269ae06" dependencies = [ "base64", "indexmap 1.9.3", @@ -1992,9 +1992,9 @@ checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" [[package]] name = "quick-xml" -version = "0.30.0" +version = "0.29.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eff6510e86862b57b210fd8cbe8ed3f0d7d600b9c2863cd4549a2e033c66e956" +checksum = "81b9228215d82c7b61490fec1de287136b5de6f5700f6e58ea9ad61a7964ca51" dependencies = [ "memchr", ] From 42c7fcd76014c3affb922531ae54e705bf628c79 Mon Sep 17 00:00:00 2001 From: Mikhail Volkhov Date: Tue, 17 Sep 2024 15:48:07 +0100 Subject: [PATCH 090/178] Allow time=0.3.31 --- book/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/book/Cargo.toml b/book/Cargo.toml index 2a0d222bc7..0e13d0a0f1 100644 --- a/book/Cargo.toml +++ b/book/Cargo.toml @@ -11,5 +11,5 @@ license = "Apache-2.0" [build-dependencies] cargo-spec = { version = "0.5.0" } -time = { version = "~0.3.23" } # This crate is a known bad-actor for breaking rust version support. +time = { version = "~0.3.31" } # This crate is a known bad-actor for breaking rust version support. plist = { version = "~1.5.0" } # This crate improperly constrains its bad-actor dependency (`time`). From 57c7ba4fedafebd12c339bf5a06d46763e573f32 Mon Sep 17 00:00:00 2001 From: Mikhail Volkhov Date: Tue, 17 Sep 2024 16:24:59 +0100 Subject: [PATCH 091/178] Update circuit-construction --- circuit-construction/Cargo.toml | 52 ++++++++++++++++----------------- 1 file changed, 26 insertions(+), 26 deletions(-) diff --git a/circuit-construction/Cargo.toml b/circuit-construction/Cargo.toml index 3e60cb706c..2141e692de 100644 --- a/circuit-construction/Cargo.toml +++ b/circuit-construction/Cargo.toml @@ -14,34 +14,34 @@ path = "src/lib.rs" bench = false # needed for criterion (https://bheisler.github.io/criterion.rs/book/faq.html#cargo-bench-gives-unrecognized-option-errors-for-valid-command-line-options) [dependencies] -ark-ff = { version = "0.4.2", features = [ "parallel", "asm" ] } -ark-ec = { version = "0.4.2", features = [ "parallel" ] } -ark-poly = { version = "0.4.2", features = [ "parallel" ] } -ark-serialize = "0.4.2" -blake2 = "0.10.0" -num-derive = "0.3" -num-traits = "0.2" -itertools = "0.10.3" -rand = "0.8.0" -rand_core = "0.6.3" -rayon = "1.5.0" -rmp-serde = "1.0.0" -serde = "1.0.130" -serde_with = "1.10.0" -thiserror = "1.0.30" +ark-ff.workspace = true +ark-ec.workspace = true +ark-poly.workspace = true +ark-serialize.workspace = true +blake2.workspace = true +num-derive.workspace = true +num-traits.workspace = true +itertools.workspace = true +rand.workspace = true +rand_core.workspace = true +rayon.workspace = true +rmp-serde.workspace = true +serde.workspace = true +serde_with.workspace = true +thiserror.workspace = true -poly-commitment = { path = "../poly-commitment", version = "0.1.0" } -groupmap = { path = "../groupmap", version = "0.1.0" } -mina-curves = { path = "../curves", version = "0.1.0" } -o1-utils = { path = "../utils", version = "0.1.0" } -mina-poseidon = { path = "../poseidon", version = "0.1.0" } -kimchi = { path = "../kimchi", version = "0.1.0" } +poly-commitment.workspace = true +groupmap.workspace = true +mina-curves.workspace = true +o1-utils.workspace = true +mina-poseidon.workspace = true +kimchi.workspace = true [dev-dependencies] -proptest = "1.0.0" -proptest-derive = "0.3.0" -colored = "2.0.0" +proptest.workspace = true +proptest-derive.workspace = true +colored.workspace = true # benchmarks -criterion = "0.3" -iai = "0.1" +criterion.workspace = true +iai.workspace = true From dce148ae7be41142795b6f072a71615f4be85171 Mon Sep 17 00:00:00 2001 From: Mikhail Volkhov Date: Tue, 17 Sep 2024 16:26:42 +0100 Subject: [PATCH 092/178] Update poseidon/export_test_vectors --- poseidon/export_test_vectors/Cargo.toml | 21 ++++++++++----------- 1 file changed, 10 insertions(+), 11 deletions(-) diff --git a/poseidon/export_test_vectors/Cargo.toml b/poseidon/export_test_vectors/Cargo.toml index 80baaa21f8..0c5ead5902 100644 --- a/poseidon/export_test_vectors/Cargo.toml +++ b/poseidon/export_test_vectors/Cargo.toml @@ -10,15 +10,14 @@ edition = "2021" license = "Apache-2.0" [dependencies] -ark-ff = { version = "0.4.2", features = [ "parallel", "asm" ] } -num-bigint = { version = "0.4.0" } -serde_json = { version = "1.0" } -hex = { version = "0.4" } -ark-serialize = { version = "0.4.2" } -rand = "0.8.0" -serde = { version = "1.0", features = ["derive"] } -serde_with = "1.10.0" - -mina-curves = { path = "../../curves", version = "0.1.0" } -mina-poseidon = { path = "../../poseidon", version = "0.1.0" } +ark-ff.workspace = true +num-bigint.workspace = true +serde_json.workspace = true +hex.workspace = true +ark-serialize.workspace = true +rand.workspace = true +serde.workspace = true +serde_with.workspace = true +mina-curves.workspace = true +mina-poseidon.workspace = true From 07f344db38f2faf66ae8b69e936ccb62e6d10e9e Mon Sep 17 00:00:00 2001 From: Mikhail Volkhov Date: Tue, 17 Sep 2024 16:27:41 +0100 Subject: [PATCH 093/178] Update cargo lock --- Cargo.lock | 79 ++++++++---------------------------------------------- 1 file changed, 11 insertions(+), 68 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 429532881d..11336c75b5 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -738,38 +738,14 @@ version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b365fabc795046672053e29c954733ec3b05e4be654ab130fe8f1f94d7051f35" -[[package]] -name = "darling" -version = "0.13.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a01d95850c592940db9b8194bc39f4bc0e89dee5c4265e4b1807c34a9aba453c" -dependencies = [ - "darling_core 0.13.4", - "darling_macro 0.13.4", -] - [[package]] name = "darling" version = "0.20.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fc5d6b04b3fd0ba9926f945895de7d806260a2d7431ba82e7edaecb043c4c6b8" dependencies = [ - "darling_core 0.20.5", - "darling_macro 0.20.5", -] - -[[package]] -name = "darling_core" -version = "0.13.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "859d65a907b6852c9361e3185c862aae7fafd2887876799fa55f5f99dc40d610" -dependencies = [ - "fnv", - "ident_case", - "proc-macro2", - "quote", - "strsim 0.10.0", - "syn 1.0.109", + "darling_core", + "darling_macro", ] [[package]] @@ -786,24 +762,13 @@ dependencies = [ "syn 2.0.48", ] -[[package]] -name = "darling_macro" -version = "0.13.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c972679f83bdf9c42bd905396b6c3588a843a17f0f16dfcfa3e2c5d57441835" -dependencies = [ - "darling_core 0.13.4", - "quote", - "syn 1.0.109", -] - [[package]] name = "darling_macro" version = "0.20.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1d1545d67a2149e1d93b7e5c7752dce5a7426eb5d1357ddcfd89336b94444f77" dependencies = [ - "darling_core 0.20.5", + "darling_core", "quote", "syn 2.0.48", ] @@ -887,7 +852,7 @@ dependencies = [ "rand", "serde", "serde_json", - "serde_with 1.14.0", + "serde_with", ] [[package]] @@ -1273,7 +1238,7 @@ dependencies = [ "secp256k1", "serde", "serde_json", - "serde_with 3.6.0", + "serde_with", "strum", "strum_macros", "thiserror", @@ -1294,7 +1259,7 @@ dependencies = [ "poly-commitment", "serde", "serde_json", - "serde_with 3.6.0", + "serde_with", "tinytemplate", ] @@ -1455,7 +1420,7 @@ dependencies = [ "rayon", "serde", "serde_json", - "serde_with 3.6.0", + "serde_with", ] [[package]] @@ -1637,7 +1602,7 @@ dependencies = [ "rmp-serde", "secp256k1", "serde", - "serde_with 3.6.0", + "serde_with", "sha2", "thiserror", ] @@ -1904,7 +1869,7 @@ dependencies = [ "rayon", "rmp-serde", "serde", - "serde_with 3.6.0", + "serde_with", "thiserror", ] @@ -2261,16 +2226,6 @@ dependencies = [ "serde", ] -[[package]] -name = "serde_with" -version = "1.14.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "678b5a069e50bf00ecd22d0cd8ddf7c236f68581b03db652061ed5eb13a312ff" -dependencies = [ - "serde", - "serde_with_macros 1.5.2", -] - [[package]] name = "serde_with" version = "3.6.0" @@ -2284,29 +2239,17 @@ dependencies = [ "indexmap 2.2.2", "serde", "serde_json", - "serde_with_macros 3.6.0", + "serde_with_macros", "time", ] -[[package]] -name = "serde_with_macros" -version = "1.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e182d6ec6f05393cc0e5ed1bf81ad6db3a8feedf8ee515ecdd369809bcce8082" -dependencies = [ - "darling 0.13.4", - "proc-macro2", - "quote", - "syn 1.0.109", -] - [[package]] name = "serde_with_macros" version = "3.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "568577ff0ef47b879f736cd66740e022f3672788cdf002a05a4e609ea5a6fb15" dependencies = [ - "darling 0.20.5", + "darling", "proc-macro2", "quote", "syn 2.0.48", From 37fa5fbb7866c7155b0c1495052b0c50f8e90491 Mon Sep 17 00:00:00 2001 From: Mikhail Volkhov Date: Tue, 17 Sep 2024 16:41:45 +0100 Subject: [PATCH 094/178] Downgrate itertools to 0.10.5 --- Cargo.lock | 21 ++++++--------------- Cargo.toml | 2 +- 2 files changed, 7 insertions(+), 16 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 11336c75b5..8aae95b222 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -117,7 +117,7 @@ dependencies = [ "ark-std", "derivative", "hashbrown 0.13.2", - "itertools 0.10.5", + "itertools", "num-traits", "rayon", "zeroize", @@ -135,7 +135,7 @@ dependencies = [ "ark-std", "derivative", "digest", - "itertools 0.10.5", + "itertools", "num-bigint", "num-traits", "paste", @@ -667,7 +667,7 @@ dependencies = [ "clap 4.4.18", "criterion-plot", "is-terminal", - "itertools 0.10.5", + "itertools", "num-traits", "once_cell", "oorandom", @@ -688,7 +688,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6b50826342786a51a89e2da3a28f1c32b06e387201bc2d19791f622c673706b1" dependencies = [ "cast", - "itertools 0.10.5", + "itertools", ] [[package]] @@ -1166,15 +1166,6 @@ dependencies = [ "either", ] -[[package]] -name = "itertools" -version = "0.12.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba291022dbbd398a455acf126c1e341954079855bc60dfdda641363bd6922569" -dependencies = [ - "either", -] - [[package]] name = "itoa" version = "1.0.10" @@ -1217,7 +1208,7 @@ dependencies = [ "hex", "iai", "internal-tracing", - "itertools 0.12.1", + "itertools", "mina-curves", "mina-poseidon", "num-bigint", @@ -1856,7 +1847,7 @@ dependencies = [ "blake2", "colored", "groupmap", - "itertools 0.12.1", + "itertools", "mina-curves", "mina-poseidon", "o1-utils", diff --git a/Cargo.toml b/Cargo.toml index c7c997d03b..fc4717ea7e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -39,7 +39,7 @@ elf = "0.7.2" env_logger = "0.11.1" hex = { version = "0.4", features = ["serde"] } iai = "0.1" -itertools = "0.12.1" +itertools = "0.10.5" libc = "0.2.62" libflate = "2" log = "0.4.20" From bb9412aa18d504f5011e1dd3eeaaa5c5efb53f23 Mon Sep 17 00:00:00 2001 From: Mikhail Volkhov Date: Tue, 17 Sep 2024 18:36:01 +0100 Subject: [PATCH 095/178] Downgrade criterion --- Cargo.lock | 142 +++++++++++++++++------------------------------------ Cargo.toml | 2 +- 2 files changed, 45 insertions(+), 99 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 8aae95b222..76c502a81d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -53,12 +53,6 @@ dependencies = [ "libc", ] -[[package]] -name = "anes" -version = "0.1.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4b46cbb362ab8752921c97e041f5e366ee6297bd428a31275b9fcf1e380f7299" - [[package]] name = "ansi_term" version = "0.12.1" @@ -68,12 +62,6 @@ dependencies = [ "winapi 0.3.9", ] -[[package]] -name = "anstyle" -version = "1.0.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2faccea4cc4ab4a667ce676a30e8ec13922a692c99bb8f5b11f1502c72e04220" - [[package]] name = "ark-algebra-test-templates" version = "0.4.2" @@ -280,7 +268,7 @@ version = "0.2.14" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8" dependencies = [ - "hermit-abi 0.1.19", + "hermit-abi", "libc", "winapi 0.3.9", ] @@ -466,33 +454,6 @@ dependencies = [ "windows-targets 0.52.0", ] -[[package]] -name = "ciborium" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "42e69ffd6f0917f5c029256a24d0161db17cea3997d185db0d35926308770f0e" -dependencies = [ - "ciborium-io", - "ciborium-ll", - "serde", -] - -[[package]] -name = "ciborium-io" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05afea1e0a06c9be33d539b876f1ce3692f4afea2cb41f740e7743225ed1c757" - -[[package]] -name = "ciborium-ll" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57663b653d948a338bfb3eeba9bb2fd5fcfaecb9e199e87e1eda4d9e8b240fd9" -dependencies = [ - "ciborium-io", - "half", -] - [[package]] name = "clap" version = "2.34.0" @@ -517,7 +478,7 @@ dependencies = [ "atty", "bitflags 1.3.2", "clap_derive", - "clap_lex 0.2.4", + "clap_lex", "indexmap 1.9.3", "once_cell", "strsim 0.10.0", @@ -525,25 +486,6 @@ dependencies = [ "textwrap 0.16.0", ] -[[package]] -name = "clap" -version = "4.4.18" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e578d6ec4194633722ccf9544794b71b1385c3c027efe0c55db226fc880865c" -dependencies = [ - "clap_builder", -] - -[[package]] -name = "clap_builder" -version = "4.4.18" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4df4df40ec50c46000231c914968278b1eb05098cf8f1b3a518a95030e71d1c7" -dependencies = [ - "anstyle", - "clap_lex 0.6.0", -] - [[package]] name = "clap_derive" version = "3.2.25" @@ -566,12 +508,6 @@ dependencies = [ "os_str_bytes", ] -[[package]] -name = "clap_lex" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "702fc72eb24e5a1e48ce58027a675bc24edd52096d5397d4aea7c6dd9eca0bd1" - [[package]] name = "colored" version = "2.1.0" @@ -657,24 +593,24 @@ dependencies = [ [[package]] name = "criterion" -version = "0.5.1" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2b12d017a929603d80db1831cd3a24082f8137ce19c69e6447f54f5fc8d692f" +checksum = "b01d6de93b2b6c65e17c634a26653a29d107b3c98c607c765bf38d041531cd8f" dependencies = [ - "anes", + "atty", "cast", - "ciborium", - "clap 4.4.18", + "clap 2.34.0", "criterion-plot", - "is-terminal", + "csv", "itertools", + "lazy_static", "num-traits", - "once_cell", "oorandom", "plotters", "rayon", "regex", "serde", + "serde_cbor", "serde_derive", "serde_json", "tinytemplate", @@ -683,9 +619,9 @@ dependencies = [ [[package]] name = "criterion-plot" -version = "0.5.0" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b50826342786a51a89e2da3a28f1c32b06e387201bc2d19791f622c673706b1" +checksum = "2673cc8207403546f45f5fd319a974b1e6983ad1a3ee7e6041650013be041876" dependencies = [ "cast", "itertools", @@ -732,6 +668,27 @@ dependencies = [ "typenum", ] +[[package]] +name = "csv" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ac574ff4d437a7b5ad237ef331c17ccca63c46479e5b5453eb8e10bb99a759fe" +dependencies = [ + "csv-core", + "itoa", + "ryu", + "serde", +] + +[[package]] +name = "csv-core" +version = "0.1.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5efa2b3d7902f4b634a20cae3c9c4e6209dc4779feb6863329607560143efa70" +dependencies = [ + "memchr", +] + [[package]] name = "cty" version = "0.2.2" @@ -979,13 +936,9 @@ dependencies = [ [[package]] name = "half" -version = "2.3.1" +version = "1.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc52e53916c08643f1b56ec082790d1e86a32e58dc5268f897f313fbae7b4872" -dependencies = [ - "cfg-if 1.0.0", - "crunchy", -] +checksum = "1b43ede17f21864e81be2fa654110bf1e793774238d86ef8555c37e6519c0403" [[package]] name = "hashbrown" @@ -1023,12 +976,6 @@ dependencies = [ "libc", ] -[[package]] -name = "hermit-abi" -version = "0.3.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d3d0e0f38255e7fa3cf31335b3a56f05febd18025f4db5ef7a0cfb4f8da651f" - [[package]] name = "hex" version = "0.4.3" @@ -1140,17 +1087,6 @@ dependencies = [ "libc", ] -[[package]] -name = "is-terminal" -version = "0.4.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0bad00257d07be169d870ab665980b06cdb366d792ad690bf2e76876dc503455" -dependencies = [ - "hermit-abi 0.3.4", - "rustix", - "windows-sys 0.52.0", -] - [[package]] name = "is_ci" version = "1.1.1" @@ -2195,6 +2131,16 @@ dependencies = [ "serde_derive", ] +[[package]] +name = "serde_cbor" +version = "0.11.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2bef2ebfde456fb76bbcf9f59315333decc4fda0b2b44b420243c11e0f5ec1f5" +dependencies = [ + "half", + "serde", +] + [[package]] name = "serde_derive" version = "1.0.196" diff --git a/Cargo.toml b/Cargo.toml index fc4717ea7e..5bc3d507bd 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -34,7 +34,7 @@ clap = "4.4.6" colored = "2.0.0" command-fds = "0.3" convert_case = "0.6.0" -criterion = "0.5" +criterion = "0.3.6" elf = "0.7.2" env_logger = "0.11.1" hex = { version = "0.4", features = ["serde"] } From e1c6f819a56c38f036a383f4e5b8d7ffe4fcb6bf Mon Sep 17 00:00:00 2001 From: Serhii Shymkiv Date: Wed, 28 Aug 2024 15:49:15 +0300 Subject: [PATCH 096/178] Add the test coverage data gathering and reports generation. --- .github/actions/codecov-shared/action.yml | 28 +++++ .github/workflows/rust.yml | 132 +++++++++++++++------- CONTRIBUTING.md | 35 ++++-- Makefile | 99 ++++++++++++++++ README.md | 63 ++++++++--- 5 files changed, 291 insertions(+), 66 deletions(-) create mode 100644 .github/actions/codecov-shared/action.yml create mode 100644 Makefile diff --git a/.github/actions/codecov-shared/action.yml b/.github/actions/codecov-shared/action.yml new file mode 100644 index 0000000000..460cbae69a --- /dev/null +++ b/.github/actions/codecov-shared/action.yml @@ -0,0 +1,28 @@ +name: "Shared Codecov reporting steps" +description: "Shared Codecov reporting steps" +inputs: + files: + description: "Files to upload to Codecov" + required: true + default: "./target/coverage/lcov" + flags: + description: "Flags to pass to Codecov" + required: false + default: "unittests" + name: + description: "The report name" + required: false + default: "proof-systems" +runs: + using: "composite" + steps: + - name: Upload coverage reports to Codecov + uses: codecov/codecov-action@v4 + with: + token: ${{ secrets.CODECOV_TOKEN }} + files: ${{ inputs.files }} + flags: ${{ inputs.flags }} + name: ${{ inputs.name }} + verbose: true + handle_no_reports_found: true + fail_ci_if_error: false diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index b4581fab8f..f200d984a6 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -15,6 +15,67 @@ env: CARGO_TERM_COLOR: always jobs: + run_mdbook: + name: Building MDBook + runs-on: ubuntu-latest + strategy: + matrix: + rust_toolchain_version: ["1.72"] + steps: + - name: Checkout PR + uses: actions/checkout@v4.1.1 + + - name: Setup Rust toolchain ${{ matrix.rust_toolchain_version }} + run: | + curl --proto '=https' --tlsv1.2 -sSf -o rustup-init \ + https://static.rust-lang.org/rustup/dist/x86_64-unknown-linux-gnu/rustup-init + chmod +x ./rustup-init + ./rustup-init -y --default-toolchain "${{ matrix.rust_toolchain_version }}" --profile default + rm ./rustup-init + echo "$HOME/.cargo/bin" >> $GITHUB_PATH + # overwriting default rust-toolchain + echo ${{ matrix.rust_toolchain_version }} > rust-toolchain + + - name: Build the mdbook + run: | + cd book + make deps + make build + + run_formatting: + name: Formatting + runs-on: ubuntu-latest + strategy: + matrix: + rust_toolchain_version: ["nightly"] + steps: + - name: Checkout PR + uses: actions/checkout@v4.1.1 + + - name: Setup Rust toolchain ${{ matrix.rust_toolchain_version }} + run: | + curl --proto '=https' --tlsv1.2 -sSf -o rustup-init \ + https://static.rust-lang.org/rustup/dist/x86_64-unknown-linux-gnu/rustup-init + chmod +x ./rustup-init + ./rustup-init -y --default-toolchain "${{ matrix.rust_toolchain_version }}" --profile default + rm ./rustup-init + echo "$HOME/.cargo/bin" >> $GITHUB_PATH + # overwriting default rust-toolchain + echo ${{ matrix.rust_toolchain_version }} > rust-toolchain + + - name: Run cargo fmt + run: | + make format + + # We run only one of the matrix options on the toffee hetzner-1, + # and also only in this configuration we enable heavy tests. + run-checks-setup: + runs-on: ubuntu-latest + outputs: + runners: '{"1.71":"ubuntu-latest", "1.72": "ubuntu-latest", "1.73": "ubuntu-latest", "1.74": "hetzner-1"}' + steps: + - run: echo no-op + run_checks: strategy: matrix: @@ -37,16 +98,15 @@ jobs: # as action-rs does not seem to be maintained anymore, building from # scratch the environment using rustup - name: Setup Rust toolchain ${{ matrix.rust_toolchain_version }} - run: - | - curl --proto '=https' --tlsv1.2 -sSf -o rustup-init \ - https://static.rust-lang.org/rustup/dist/x86_64-unknown-linux-gnu/rustup-init - chmod +x ./rustup-init - ./rustup-init -y --default-toolchain "${{ matrix.rust_toolchain_version }}" --profile default - rm ./rustup-init - echo "$HOME/.cargo/bin" >> $GITHUB_PATH - # overwriting default rust-toolchain - echo ${{ matrix.rust_toolchain_version }} > rust-toolchain + run: | + curl --proto '=https' --tlsv1.2 -sSf -o rustup-init \ + https://static.rust-lang.org/rustup/dist/x86_64-unknown-linux-gnu/rustup-init + chmod +x ./rustup-init + ./rustup-init -y --default-toolchain "${{ matrix.rust_toolchain_version }}" --profile default + rm ./rustup-init + echo "$HOME/.cargo/bin" >> $GITHUB_PATH + # overwriting default rust-toolchain + echo ${{ matrix.rust_toolchain_version }} > rust-toolchain - name: Setup OCaml (because of ocaml-gen) run: | @@ -83,47 +143,43 @@ jobs: # Coding guidelines # - - name: Enforce formating - run: | - cargo fmt -- --check - - name: Lint (clippy) - uses: actions-rs/cargo@v1 - with: - command: clippy - args: --all-features --tests --all-targets --offline -- -D warnings - - - name: Run Clippy (beta) - uses: actions-rs/clippy-check@v1 - continue-on-error: true - with: - name: Clippy (beta) - token: ${{ secrets.GITHUB_TOKEN }} - args: --all-features --all-targets --offline -- -W clippy::all + run: | + eval $(opam env) + make lint # # Build # - name: Ensure that everything builds - uses: actions-rs/cargo@v1 - with: - command: build - args: --all-targets --all-features --offline + run: | + eval $(opam env) + make all # # Tests # - # https://nexte.st/book/pre-built-binaries.html#using-nextest-in-github-actions - - name: Install latest nextest release - uses: taiki-e/install-action@nextest + - name: Install test dependencies + run: | + make install-test-deps + + - name: Run non-heavy tests + run: | + eval $(opam env) + make nextest-with-coverage + - name: Use shared Codecov reporting steps + uses: ./.github/actions/codecov-shared - - name: Test with latest nextest release (faster than cargo test) - uses: actions-rs/cargo@v1 - with: - command: nextest - args: run --all-features --release --offline + - name: Run heavy tests + if: ${{ matrix.rust_toolchain_version == '1.74' }} + run: | + eval $(opam env) + make nextest-heavy-with-coverage + - name: Use shared Codecov reporting steps + if: ${{ matrix.rust_toolchain_version == '1.74' }} + uses: ./.github/actions/codecov-shared - name: Doc tests uses: actions-rs/cargo@v1 diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index ed881acc75..93f062b120 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -4,9 +4,9 @@ Here's all you need to know to start contributing to kimchi. ## Navigating the project -* [The following video](https://www.youtube.com/watch?v=WUP54nqVedc) goes over the project organization. -* The [Mina book](https://o1-labs.github.io/proof-systems/) contains specifications, rust documentation, RFCs, and explainers on the different aspects of the system. -* The [Discussion page](https://github.com/o1-labs/proof-systems/discussions) can be used to start discussions or ask questions. +- [The following video](https://www.youtube.com/watch?v=WUP54nqVedc) goes over the project organization. +- The [Mina book](https://o1-labs.github.io/proof-systems/) contains specifications, rust documentation, RFCs, and explainers on the different aspects of the system. +- The [Discussion page](https://github.com/o1-labs/proof-systems/discussions) can be used to start discussions or ask questions. ## Finding a task @@ -14,21 +14,32 @@ We have a list of easy task to start contributing. [Start over there](https://gi ## Setting up the project +Run + +```shell +git submodule init +git submodule update +``` + +to get the version of Optimism the zkVM has been developed for. + ### Mac & Linux -* Follow these instructions to install OCaml: https://ocaml.org/docs/install.html -* Follow these instructions to install Rust: https://rustup.rs/ +- Follow these instructions to install OCaml: +- Follow these instructions to install Rust: ### Windows Development Windows development can be done using [Windows Subsystem for Linux (WSL)](https://docs.microsoft.com/en-us/windows/wsl/install). -* Install and open WSL -* Within WSL, install OCaml using your distro's package manager. For example: `apt install opam` -* Within WSL, navigate to the project directory and run `cargo test`. If there are no failures then everything is set up correctly. + +- Install and open WSL +- Within WSL, install OCaml using your distro's package manager. For example: `apt install opam` +- Within WSL, navigate to the project directory and run `cargo test`. If there are no failures then everything is set up correctly. ## Development To run tests: + ```bash cargo test --all-features --release ``` @@ -36,6 +47,7 @@ cargo test --all-features --release Takes about 5-8 minutes on a MacBook Pro (2019, 8-Core Intel Core i9, 32GB RAM). Without `--release`, more than an hour. To scan for lints: + ```bash cargo clippy --all-features --tests --all-targets -- -D warnings ``` @@ -43,6 +55,7 @@ cargo clippy --all-features --tests --all-targets -- -D warnings Note: cargo can automatically fix some lints. To do so, add `--fix` to the above command (as the first parameter). Finally, to check formatting: + ```bash cargo fmt ``` @@ -54,14 +67,14 @@ These are enforced by GitHub PR checks, so be sure to have any errors produced b Generally, proof-systems intends to be synchronized with the mina repository (see their [README-branching.md](https://github.com/MinaProtocol/mina/blob/develop/README-branching.md)), and so its branching policy is quite similar. However several important (some, temporary) distinctions exist: - `compatible`: - - Compatible with `rampup` in `mina`. - - Mina's `compatible`, similarly to mina's `master`, does not have `proof-systems`. + - Compatible with `rampup` in `mina`. + - Mina's `compatible`, similarly to mina's `master`, does not have `proof-systems`. - `berkley`: future hardfork release, will be going out to berkeley. - This is where hotfixes go. - `develop`: matches mina's `develop`, soft fork-compatibility. - Also used by `mina/o1js-main` and `o1js/main`. - `master`: future feature work development, containing breaking changes. Anything that does not need to be released alongside mina. - - Note that `mina`'s `master` does not depend on `proof-systems` at all. + - Note that `mina`'s `master` does not depend on `proof-systems` at all. - `izmir`: next hardfork release after berkeley. - In the future: - `master`/`develop` will reverse roles and become something like gitflow. diff --git a/Makefile b/Makefile new file mode 100644 index 0000000000..6a3694c04c --- /dev/null +++ b/Makefile @@ -0,0 +1,99 @@ +# Variables +COVERAGE_ENV = CARGO_INCREMENTAL=0 RUSTFLAGS='-Cinstrument-coverage' LLVM_PROFILE_FILE=$(shell pwd)/target/profraw/cargo-test-%p-%m.profraw + +# Install test dependencies +install-test-deps: + @echo "" + @echo "Installing the test dependencies." + @echo "" + rustup component add llvm-tools-preview + # https://nexte.st/book/pre-built-binaries.html#using-nextest-in-github-actions + # FIXME: update to 0.9.68 when we get rid of 1.71 and 1.72. + cargo install cargo-nextest@=0.9.67 --locked + cargo install grcov + @echo "" + @echo "Test dependencies installed." + @echo "" + +# Default target +all: clean release + +# Clean the project +clean: + cargo clean + +# Build the project +build: + cargo build --all-targets --all-features + +# Build the project in release mode +release: + cargo build --release --all-targets --all-features + +# Test the project with non-heavy tests and using native cargo test runner +test: + cargo test --all-features --release $(CARGO_EXTRA_ARGS) -- --nocapture --skip heavy $(BIN_EXTRA_ARGS) + +test-with-coverage: + $(COVERAGE_ENV) CARGO_EXTRA_ARGS="$(CARGO_EXTRA_ARGS)" BIN_EXTRA_ARGS="$(BIN_EXTRA_ARGS)" $(MAKE) test + $(MAKE) generate-test-coverage-report + +# Test the project with heavy tests and using native cargo test runner +test-heavy: + cargo test --all-features --release $(CARGO_EXTRA_ARGS) -- --nocapture heavy $(BIN_EXTRA_ARGS) + +test-heavy-with-coverage: + $(COVERAGE_ENV) CARGO_EXTRA_ARGS="$(CARGO_EXTRA_ARGS)" BIN_EXTRA_ARGS="$(BIN_EXTRA_ARGS)" $(MAKE) test-heavy + $(MAKE) generate-test-coverage-report + +# Test the project with all tests and using native cargo test runner +test-all: + cargo test --all-features --release $(CARGO_EXTRA_ARGS) -- --nocapture $(BIN_EXTRA_ARGS) + +test-all-with-coverage: + $(COVERAGE_ENV) CARGO_EXTRA_ARGS="$(CARGO_EXTRA_ARGS)" BIN_EXTRA_ARGS="$(BIN_EXTRA_ARGS)" $(MAKE) test-all + $(MAKE) generate-test-coverage-report + +# Test the project with non-heavy tests and using nextest test runner +nextest: + cargo nextest run --all-features --release --profile ci -E "not test(heavy)" $(BIN_EXTRA_ARGS) + +nextest-with-coverage: + $(COVERAGE_ENV) BIN_EXTRA_ARGS="$(BIN_EXTRA_ARGS)" $(MAKE) nextest + $(MAKE) generate-test-coverage-report + +# Test the project with heavy tests and using nextest test runner +nextest-heavy: + cargo nextest run --all-features --release --profile ci -E "test(heavy)" $(BIN_EXTRA_ARGS) + +nextest-heavy-with-coverage: + $(COVERAGE_ENV) BIN_EXTRA_ARGS="$(BIN_EXTRA_ARGS)" $(MAKE) nextest-heavy + $(MAKE) generate-test-coverage-report + +# Test the project with all tests and using nextest test runner +nextest-all: + cargo nextest run --all-features --release --profile ci $(BIN_EXTRA_ARGS) + +nextest-all-with-coverage: + $(COVERAGE_ENV) BIN_EXTRA_ARGS="$(BIN_EXTRA_ARGS)" $(MAKE) nextest-all + $(MAKE) generate-test-coverage-report + +# Format the code +format: + cargo fmt -- --check + +# Lint the code +lint: + cargo clippy --all-features --all-targets --tests -- -W clippy::all -D warnings + +generate-test-coverage-report: + @echo "" + @echo "Generating the test coverage report." + @echo "" + mkdir -p ./target/coverage + grcov ./target/profraw --binary-path ./target/release/deps/ -s . -t html,lcov --branch --ignore-not-existing --ignore "**/tests/**" -o ./target/coverage + @echo "" + @echo "Test coverage report is available at: ./target/coverage/html" + @echo "" + +.PHONY: install-test-deps all clean build release test test-with-coverage test-heavy test-heavy-with-coverage test-all test-all-with-coverage nextest nextest-with-coverage nextest-heavy nextest-heavy-with-coverage nextest-all nextest-all-with-coverage format lint generate-test-coverage-report diff --git a/README.md b/README.md index a2ce1f6e90..94145083f2 100644 --- a/README.md +++ b/README.md @@ -1,8 +1,9 @@ +# Kimchi + +[![codecov](https://codecov.io/gh/o1-labs/proof-systems/graph/badge.svg?token=pl6W1FDfV0)](https://codecov.io/gh/o1-labs/proof-systems) [![CI](https://github.com/o1-labs/proof-systems/actions/workflows/rust.yml/badge.svg)](https://github.com/o1-labs/proof-systems/actions/workflows/rust.yml) [![dependency status](https://deps.rs/repo/github/o1-labs/proof-systems/status.svg?style=flat-square)](https://deps.rs/repo/github/o1-labs/proof-systems) -# Kimchi - This repository contains **kimchi**, a general-purpose zero-knowledge proof system for proving the correct execution of programs. You can read more about this project on the [Kimchi book](https://o1-labs.github.io/proof-systems), or for a lighter introduction in this [blogpost](https://minaprotocol.com/blog/kimchi-the-latest-update-to-minas-proof-system). @@ -11,7 +12,7 @@ You can read more about this project on the [Kimchi book](https://o1-labs.github ## User Warning -This project comes as is. We provide no guarantee of stability or support, as the crates closely follow the needs of the [Mina]([https://](https://github.com/minaprotocol/mina)) project. +This project comes as is. We provide no guarantee of stability or support, as the crates closely follow the needs of the [Mina](<[https://](https://github.com/minaprotocol/mina)>) project. If you use this project in a production environment, it is your responsibility to perform a security audit to ensure that the software meets your requirements. @@ -19,7 +20,7 @@ If you use this project in a production environment, it is your responsibility t At the time of this writing: -**Proving time** +### Proving time | number of gates | seconds | | :-------------: | :-----: | @@ -27,14 +28,14 @@ At the time of this writing: | 2^15 | 3.3s | | 2^16 | 6.3s | -**Verification time** +### Verification time | number of gates | seconds | | :-------------: | :-----: | | 2^15 | 0.1s | | 2^16 | 0.1s | -**Proof size** +### Proof size | number of gates | bytes | | :-------------: | :---: | @@ -45,18 +46,46 @@ At the time of this writing: The project is organized in the following way: -* [book/](book/). The mina book, RFCs, and specifications. [Available here in HTML](https://o1-labs.github.io/proof-systems). -* [curves/](curves/). The elliptic curves we use (for now just the pasta curves). -* [groupmap/](groupmap/). Used to convert elliptic curve elements to field elements. -* [hasher/](hasher/). Interfaces for mina hashing. -* [kimchi/](kimchi/). Our proof system based on PLONK. -* [poly-commitment/](poly-commitment/). Polynomial commitment code. -* [poseidon/](poseidon/). Implementation of the poseidon hash function. -* [signer/](signer/). Interfaces for mina signature schemes. -* [tools/](tools/). Various tooling to help us work on kimchi. -* [turshi/](turshi/). A Cairo runner written in rust. -* [utils/](utils/). Collection of useful functions and traits. +- [book/](book/). The mina book, RFCs, and specifications. [Available here in HTML](https://o1-labs.github.io/proof-systems). +- [curves/](curves/). The elliptic curves we use (for now just the pasta curves). +- [groupmap/](groupmap/). Used to convert elliptic curve elements to field elements. +- [hasher/](hasher/). Interfaces for mina hashing. +- [kimchi/](kimchi/). Our proof system based on PLONK. +- [poly-commitment/](poly-commitment/). Polynomial commitment code. +- [poseidon/](poseidon/). Implementation of the poseidon hash function. +- [signer/](signer/). Interfaces for mina signature schemes. +- [tools/](tools/). Various tooling to help us work on kimchi. +- [turshi/](turshi/). A Cairo runner written in rust. +- [utils/](utils/). Collection of useful functions and traits. ## Contributing Check [CONTRIBUTING.md](CONTRIBUTING.md) if you are interested in contributing to this project. +<<<<<<< HEAD +======= + +## Generate rustdoc locally + +An effort is made to have the documentation being self-contained, referring to the mina book for more details when necessary. +You can build the rust documentation with + + + +```shell +rustup install nightly +RUSTDOCFLAGS="--enable-index-page -Zunstable-options" cargo +nightly doc --all --no-deps +``` + +You can visualize the documentation by opening the file `target/doc/index.html`. + +## CI + + + +The CI will build different targets. + +- [Deploy Specifications & Docs to GitHub Pages](.github/workflows/gh-page.yml). + When CI passes on master, the documentation built from the rust code will be + available [here](https://o1-labs.github.io/proof-systems/rustdoc) and the book + will be available [here](https://o1-labs.github.io/proof-systems). +>>>>>>> 2fd953cd04 (Add the test coverage data gathering and reports generation.) From 00cccf474755398b76f91ad08de7f9d0d768f37b Mon Sep 17 00:00:00 2001 From: Serhii Shymkiv Date: Wed, 28 Aug 2024 18:42:49 +0300 Subject: [PATCH 097/178] Let's try with the grcov@=0.8.13 (for rustc 1.71). --- Makefile | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 6a3694c04c..55754aeb06 100644 --- a/Makefile +++ b/Makefile @@ -10,7 +10,8 @@ install-test-deps: # https://nexte.st/book/pre-built-binaries.html#using-nextest-in-github-actions # FIXME: update to 0.9.68 when we get rid of 1.71 and 1.72. cargo install cargo-nextest@=0.9.67 --locked - cargo install grcov + # FIXME: latest 0.8.19+ requires rustc 1.74+ + cargo install grcov@=0.8.13 --locked @echo "" @echo "Test dependencies installed." @echo "" From 39a682fc779ee4adc3b8ba709f67278e2e49504e Mon Sep 17 00:00:00 2001 From: Serhii Shymkiv Date: Wed, 28 Aug 2024 20:20:52 +0300 Subject: [PATCH 098/178] Another grcov@=0.8.13 fix (-t CLI argument). --- .github/actions/codecov-shared/action.yml | 4 ++-- Makefile | 5 +++-- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/.github/actions/codecov-shared/action.yml b/.github/actions/codecov-shared/action.yml index 460cbae69a..766bd6b686 100644 --- a/.github/actions/codecov-shared/action.yml +++ b/.github/actions/codecov-shared/action.yml @@ -3,8 +3,8 @@ description: "Shared Codecov reporting steps" inputs: files: description: "Files to upload to Codecov" - required: true - default: "./target/coverage/lcov" + required: false + default: "./target/coverage/lcov.info" flags: description: "Flags to pass to Codecov" required: false diff --git a/Makefile b/Makefile index 55754aeb06..0d219323c4 100644 --- a/Makefile +++ b/Makefile @@ -92,9 +92,10 @@ generate-test-coverage-report: @echo "Generating the test coverage report." @echo "" mkdir -p ./target/coverage - grcov ./target/profraw --binary-path ./target/release/deps/ -s . -t html,lcov --branch --ignore-not-existing --ignore "**/tests/**" -o ./target/coverage + grcov ./target/profraw --binary-path ./target/release/deps/ -s . -t html --branch --ignore-not-existing --ignore "**/tests/**" -o ./target/coverage + grcov ./target/profraw --binary-path ./target/release/deps/ -s . -t lcov --branch --ignore-not-existing --ignore "**/tests/**" -o ./target/coverage/lcov.info @echo "" - @echo "Test coverage report is available at: ./target/coverage/html" + @echo "The test coverage report is available at: ./target/coverage" @echo "" .PHONY: install-test-deps all clean build release test test-with-coverage test-heavy test-heavy-with-coverage test-all test-all-with-coverage nextest nextest-with-coverage nextest-heavy nextest-heavy-with-coverage nextest-all nextest-all-with-coverage format lint generate-test-coverage-report From 23d8501fcd3af4ccbb268a5ec0a17639f3a7a738 Mon Sep 17 00:00:00 2001 From: Serhii Shymkiv Date: Wed, 28 Aug 2024 20:39:02 +0300 Subject: [PATCH 099/178] Refactoring. --- Makefile | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/Makefile b/Makefile index 0d219323c4..dab63d6d8d 100644 --- a/Makefile +++ b/Makefile @@ -1,5 +1,7 @@ # Variables COVERAGE_ENV = CARGO_INCREMENTAL=0 RUSTFLAGS='-Cinstrument-coverage' LLVM_PROFILE_FILE=$(shell pwd)/target/profraw/cargo-test-%p-%m.profraw +# FIXME: In latest 0.8.19+ -t CLI argument can accept comma separated list of custom output types, hence, no need in double invocation +GRCOV_CALL = grcov ./target/profraw --binary-path ./target/release/deps/ -s . --branch --ignore-not-existing --ignore "**/tests/**" # Install test dependencies install-test-deps: @@ -92,8 +94,11 @@ generate-test-coverage-report: @echo "Generating the test coverage report." @echo "" mkdir -p ./target/coverage - grcov ./target/profraw --binary-path ./target/release/deps/ -s . -t html --branch --ignore-not-existing --ignore "**/tests/**" -o ./target/coverage - grcov ./target/profraw --binary-path ./target/release/deps/ -s . -t lcov --branch --ignore-not-existing --ignore "**/tests/**" -o ./target/coverage/lcov.info + GRCOV_OUTPUT_TYPE=html GRCOV_OUTPUT_PATH=./target/coverage + $(eval GRCOV_HTML_CMD=$(GRCOV_CALL) -t html -o ./target/coverage) + $(GRCOV_HTML_CMD) + $(eval GRCOV_LCOV_CMD=$(GRCOV_CALL) -t lcov -o ./target/coverage/lcov.info) + $(GRCOV_LCOV_CMD) @echo "" @echo "The test coverage report is available at: ./target/coverage" @echo "" From 2a068b1d96612ce41bc445bd5732c74fac373fff Mon Sep 17 00:00:00 2001 From: Serhii Shymkiv Date: Wed, 28 Aug 2024 21:50:17 +0300 Subject: [PATCH 100/178] The workaround of the composite actions limitations (secrets). --- .github/actions/codecov-shared/action.yml | 5 ++++- .github/workflows/rust.yml | 4 ++++ 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/.github/actions/codecov-shared/action.yml b/.github/actions/codecov-shared/action.yml index 766bd6b686..94fbc4db33 100644 --- a/.github/actions/codecov-shared/action.yml +++ b/.github/actions/codecov-shared/action.yml @@ -1,6 +1,9 @@ name: "Shared Codecov reporting steps" description: "Shared Codecov reporting steps" inputs: + token: + description: "Codecov token" + required: true files: description: "Files to upload to Codecov" required: false @@ -19,7 +22,7 @@ runs: - name: Upload coverage reports to Codecov uses: codecov/codecov-action@v4 with: - token: ${{ secrets.CODECOV_TOKEN }} + token: ${{ inputs.token }} files: ${{ inputs.files }} flags: ${{ inputs.flags }} name: ${{ inputs.name }} diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index f200d984a6..2e97400067 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -171,6 +171,8 @@ jobs: make nextest-with-coverage - name: Use shared Codecov reporting steps uses: ./.github/actions/codecov-shared + with: + token: ${{ secrets.CODECOV_TOKEN }} - name: Run heavy tests if: ${{ matrix.rust_toolchain_version == '1.74' }} @@ -180,6 +182,8 @@ jobs: - name: Use shared Codecov reporting steps if: ${{ matrix.rust_toolchain_version == '1.74' }} uses: ./.github/actions/codecov-shared + with: + token: ${{ secrets.CODECOV_TOKEN }} - name: Doc tests uses: actions-rs/cargo@v1 From 02066549f05fdb6bda1076e7402660b08ce5581f Mon Sep 17 00:00:00 2001 From: Serhii Shymkiv Date: Thu, 29 Aug 2024 11:02:10 +0300 Subject: [PATCH 101/178] Make default target to be 'release' (no clean), don't run heavy tests with coverage as part of regular checks, remove flag from coverage uploading to see if it helps with diff processing. --- .github/actions/codecov-shared/action.yml | 2 +- .github/workflows/rust.yml | 7 +------ Makefile | 6 +++--- 3 files changed, 5 insertions(+), 10 deletions(-) diff --git a/.github/actions/codecov-shared/action.yml b/.github/actions/codecov-shared/action.yml index 94fbc4db33..886136488f 100644 --- a/.github/actions/codecov-shared/action.yml +++ b/.github/actions/codecov-shared/action.yml @@ -11,7 +11,7 @@ inputs: flags: description: "Flags to pass to Codecov" required: false - default: "unittests" + default: "" name: description: "The report name" required: false diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index 2e97400067..854688d0d3 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -178,12 +178,7 @@ jobs: if: ${{ matrix.rust_toolchain_version == '1.74' }} run: | eval $(opam env) - make nextest-heavy-with-coverage - - name: Use shared Codecov reporting steps - if: ${{ matrix.rust_toolchain_version == '1.74' }} - uses: ./.github/actions/codecov-shared - with: - token: ${{ secrets.CODECOV_TOKEN }} + make nextest-heavy - name: Doc tests uses: actions-rs/cargo@v1 diff --git a/Makefile b/Makefile index dab63d6d8d..5222d30342 100644 --- a/Makefile +++ b/Makefile @@ -3,6 +3,9 @@ COVERAGE_ENV = CARGO_INCREMENTAL=0 RUSTFLAGS='-Cinstrument-coverage' LLVM_PROFIL # FIXME: In latest 0.8.19+ -t CLI argument can accept comma separated list of custom output types, hence, no need in double invocation GRCOV_CALL = grcov ./target/profraw --binary-path ./target/release/deps/ -s . --branch --ignore-not-existing --ignore "**/tests/**" +# Default target +all: release + # Install test dependencies install-test-deps: @echo "" @@ -18,9 +21,6 @@ install-test-deps: @echo "Test dependencies installed." @echo "" -# Default target -all: clean release - # Clean the project clean: cargo clean From 6a34b34f3a37ddeb0da4cdfd16529bce5b7c2af9 Mon Sep 17 00:00:00 2001 From: Serhii Shymkiv Date: Thu, 29 Aug 2024 16:45:09 +0300 Subject: [PATCH 102/178] Actions refactoring. --- .github/actions/ocaml-shared/action.yml | 20 +++++ .github/actions/toolchain-shared/action.yml | 26 ++++++ .github/workflows/ci-nightly.yml | 63 +++++++++++++++ .github/workflows/{rust.yml => ci.yml} | 90 ++++++++------------- .github/workflows/coverage.yml.disabled | 44 ---------- 5 files changed, 142 insertions(+), 101 deletions(-) create mode 100644 .github/actions/ocaml-shared/action.yml create mode 100644 .github/actions/toolchain-shared/action.yml create mode 100644 .github/workflows/ci-nightly.yml rename .github/workflows/{rust.yml => ci.yml} (55%) delete mode 100644 .github/workflows/coverage.yml.disabled diff --git a/.github/actions/ocaml-shared/action.yml b/.github/actions/ocaml-shared/action.yml new file mode 100644 index 0000000000..8a04bf75c1 --- /dev/null +++ b/.github/actions/ocaml-shared/action.yml @@ -0,0 +1,20 @@ +name: "Shared OCaml setting up steps" +description: "Shared OCaml setting up steps" +inputs: + ocaml_version: + description: "OCaml version" + required: true + shell: + description: "Shell to use" + required: false + default: "bash" +runs: + using: "composite" + steps: + - name: Setup OCaml ${{ inputs.ocaml_version }} + uses: ocaml/setup-ocaml@v2 + with: + ocaml-compiler: ${{ inputs.ocaml_version }} + # https://github.com/ocaml/setup-ocaml/issues/211#issuecomment-1058882386 + # disable-cache: true + shell: ${{ inputs.shell }} diff --git a/.github/actions/toolchain-shared/action.yml b/.github/actions/toolchain-shared/action.yml new file mode 100644 index 0000000000..5757d14c88 --- /dev/null +++ b/.github/actions/toolchain-shared/action.yml @@ -0,0 +1,26 @@ +name: "Shared Rust toolchain setting up steps" +description: "Shared Rust toolchain setting up steps" +inputs: + rust_toolchain_version: + description: "Rust toolchain version" + required: true + shell: + description: "Shell to use" + required: false + default: "bash" +runs: + using: "composite" + steps: + # As action-rs does not seem to be maintained anymore, building from + # scratch the environment using rustup + - name: Setup Rust toolchain ${{ inputs.rust_toolchain_version }} + run: | + curl --proto '=https' --tlsv1.2 -sSf -o rustup-init \ + https://static.rust-lang.org/rustup/dist/x86_64-unknown-linux-gnu/rustup-init + chmod +x ./rustup-init + ./rustup-init -y --default-toolchain "${{ inputs.rust_toolchain_version }}" --profile default + rm ./rustup-init + echo "$HOME/.cargo/bin" >> $GITHUB_PATH + # overwriting default rust-toolchain + echo ${{ inputs.rust_toolchain_version }} > rust-toolchain + shell: ${{ inputs.shell }} diff --git a/.github/workflows/ci-nightly.yml b/.github/workflows/ci-nightly.yml new file mode 100644 index 0000000000..9f511a2356 --- /dev/null +++ b/.github/workflows/ci-nightly.yml @@ -0,0 +1,63 @@ +# +# This workflow is triggered nightly (or on-demand) to run all the tests with the code coverage enabled and using the self-hosted GitHub runner. +# + +name: Nightly tests with code coverage + +on: + schedule: + - cron: "0 0 * * *" + workflow_dispatch: {} + +env: + # https://doc.rust-lang.org/cargo/reference/profiles.html#release + RUSTFLAGS: -Coverflow-checks=y -Cdebug-assertions=y + # https://doc.rust-lang.org/cargo/reference/profiles.html#incremental + CARGO_INCREMENTAL: 1 + # https://nexte.st/book/pre-built-binaries.html#using-nextest-in-github-actions + CARGO_TERM_COLOR: always + RUST_MIN_STACK: 31457280 + # 30 MB of stack for Keccak tests + +jobs: + run_tests: + name: Run all tests with the code coverage + runs-on: ${{ matrix.os }} + strategy: + matrix: + # FIXME: use the latest version of cargo nextest when we get rid of 1.71 + # and 1.72 + rust_toolchain_version: ["1.74"] + # FIXME: currently not available for 5.0.0. + # It might be related to boxroot dependency, and we would need to bump + # up the ocaml-rs dependency + ocaml_version: ["4.14"] + os: ["hetzner-1"] + steps: + - name: Checkout PR + uses: actions/checkout@v4.1.1 + + - name: Use shared Rust toolchain setting up steps + uses: ./.github/actions/toolchain-shared + with: + rust_toolchain_version: ${{ matrix.rust_toolchain_version }} + + - name: Use shared OCaml setting up steps + uses: ./.github/actions/ocaml-shared + with: + ocaml_version: ${{ matrix.ocaml_version }} + + - name: Install test dependencies + run: | + make install-test-deps + + - name: Run all tests + if: ${{ matrix.rust_toolchain_version == '1.74' }} + run: | + eval $(opam env) + make nextest-all-with-coverage + + - name: Use shared Codecov reporting steps + uses: ./.github/actions/codecov-shared + with: + token: ${{ secrets.CODECOV_TOKEN }} diff --git a/.github/workflows/rust.yml b/.github/workflows/ci.yml similarity index 55% rename from .github/workflows/rust.yml rename to .github/workflows/ci.yml index 854688d0d3..5143f760e0 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/ci.yml @@ -25,16 +25,10 @@ jobs: - name: Checkout PR uses: actions/checkout@v4.1.1 - - name: Setup Rust toolchain ${{ matrix.rust_toolchain_version }} - run: | - curl --proto '=https' --tlsv1.2 -sSf -o rustup-init \ - https://static.rust-lang.org/rustup/dist/x86_64-unknown-linux-gnu/rustup-init - chmod +x ./rustup-init - ./rustup-init -y --default-toolchain "${{ matrix.rust_toolchain_version }}" --profile default - rm ./rustup-init - echo "$HOME/.cargo/bin" >> $GITHUB_PATH - # overwriting default rust-toolchain - echo ${{ matrix.rust_toolchain_version }} > rust-toolchain + - name: Use shared Rust toolchain setting up steps + uses: ./.github/actions/toolchain-shared + with: + rust_toolchain_version: ${{ matrix.rust_toolchain_version }} - name: Build the mdbook run: | @@ -52,31 +46,20 @@ jobs: - name: Checkout PR uses: actions/checkout@v4.1.1 - - name: Setup Rust toolchain ${{ matrix.rust_toolchain_version }} - run: | - curl --proto '=https' --tlsv1.2 -sSf -o rustup-init \ - https://static.rust-lang.org/rustup/dist/x86_64-unknown-linux-gnu/rustup-init - chmod +x ./rustup-init - ./rustup-init -y --default-toolchain "${{ matrix.rust_toolchain_version }}" --profile default - rm ./rustup-init - echo "$HOME/.cargo/bin" >> $GITHUB_PATH - # overwriting default rust-toolchain - echo ${{ matrix.rust_toolchain_version }} > rust-toolchain + - name: Use shared Rust toolchain setting up steps + uses: ./.github/actions/toolchain-shared + with: + rust_toolchain_version: ${{ matrix.rust_toolchain_version }} - name: Run cargo fmt run: | make format - # We run only one of the matrix options on the toffee hetzner-1, - # and also only in this configuration we enable heavy tests. - run-checks-setup: - runs-on: ubuntu-latest - outputs: - runners: '{"1.71":"ubuntu-latest", "1.72": "ubuntu-latest", "1.73": "ubuntu-latest", "1.74": "hetzner-1"}' - steps: - - run: echo no-op - run_checks: + name: Run some basic checks and tests + runs-on: ${{ matrix.os }} + env: + RUST_TOOLCHAIN_COVERAGE_VERSION: "1.74" strategy: matrix: # FIXME: use the latest version of cargo nextest when we get rid of 1.71 @@ -86,32 +69,22 @@ jobs: # It might be related to boxroot dependency, and we would need to bump # up the ocaml-rs dependency ocaml_version: ["4.14"] - - runs-on: ubuntu-latest - name: Run some basic checks and tests + os: ["ubuntu-latest"] steps: - name: Checkout PR uses: actions/checkout@v4.1.1 with: submodules: true - # as action-rs does not seem to be maintained anymore, building from - # scratch the environment using rustup - - name: Setup Rust toolchain ${{ matrix.rust_toolchain_version }} - run: | - curl --proto '=https' --tlsv1.2 -sSf -o rustup-init \ - https://static.rust-lang.org/rustup/dist/x86_64-unknown-linux-gnu/rustup-init - chmod +x ./rustup-init - ./rustup-init -y --default-toolchain "${{ matrix.rust_toolchain_version }}" --profile default - rm ./rustup-init - echo "$HOME/.cargo/bin" >> $GITHUB_PATH - # overwriting default rust-toolchain - echo ${{ matrix.rust_toolchain_version }} > rust-toolchain - - - name: Setup OCaml (because of ocaml-gen) - run: | - sudo apt update - sudo apt install -y ocaml + - name: Use shared Rust toolchain setting up steps + uses: ./.github/actions/toolchain-shared + with: + rust_toolchain_version: ${{ matrix.rust_toolchain_version }} + + - name: Use shared OCaml setting up steps + uses: ./.github/actions/ocaml-shared + with: + ocaml_version: ${{ matrix.ocaml_version }} - name: Install markdownlint run: | @@ -155,7 +128,7 @@ jobs: - name: Ensure that everything builds run: | eval $(opam env) - make all + make # # Tests @@ -165,21 +138,24 @@ jobs: run: | make install-test-deps - - name: Run non-heavy tests + - name: Run non-heavy tests without the code coverage + if: ${{ matrix.rust_toolchain_version != env.RUST_TOOLCHAIN_COVERAGE_VERSION }} + run: | + eval $(opam env) + make nextest + + - name: Run non-heavy tests with the code coverage + if: ${{ matrix.rust_toolchain_version == env.RUST_TOOLCHAIN_COVERAGE_VERSION }} run: | eval $(opam env) make nextest-with-coverage + - name: Use shared Codecov reporting steps + if: ${{ matrix.rust_toolchain_version == env.RUST_TOOLCHAIN_COVERAGE_VERSION }} uses: ./.github/actions/codecov-shared with: token: ${{ secrets.CODECOV_TOKEN }} - - name: Run heavy tests - if: ${{ matrix.rust_toolchain_version == '1.74' }} - run: | - eval $(opam env) - make nextest-heavy - - name: Doc tests uses: actions-rs/cargo@v1 with: diff --git a/.github/workflows/coverage.yml.disabled b/.github/workflows/coverage.yml.disabled deleted file mode 100644 index 837124ea24..0000000000 --- a/.github/workflows/coverage.yml.disabled +++ /dev/null @@ -1,44 +0,0 @@ -name: Coverage - -on: - workflow_dispatch: - pull_request: - branches: - - master - push: - branches: - - master - -jobs: - - coverage: - name: Coverage (+nightly) - # The large timeout is to accommodate nightly builds - timeout-minutes: 60 - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v4.1.1 - with: - persist-credentials: false - - - uses: actions-rs/toolchain@v1.0.7 - with: - toolchain: nightly - override: true - profile: minimal - components: llvm-tools-preview - - - name: Install cargo-llvm-cov cargo command - run: cargo install cargo-llvm-cov - - - name: Setup OCaml (because of ocaml-gen) - run: sudo apt update && sudo apt install ocaml - - - name: Generate code coverage - env: - ZEBRA_SKIP_NETWORK_TESTS: 1 - CARGO_INCREMENTAL: 0 - run: cargo llvm-cov --lcov > lcov.info - - - name: Upload coverage report to Codecov - uses: codecov/codecov-action@v2.0.3 From 55a52ba8087d9f612ff967181ce8f76295ecdf72 Mon Sep 17 00:00:00 2001 From: Serhii Shymkiv Date: Thu, 29 Aug 2024 16:49:47 +0300 Subject: [PATCH 103/178] Actions refactoring. --- .github/actions/ocaml-shared/action.yml | 5 ----- 1 file changed, 5 deletions(-) diff --git a/.github/actions/ocaml-shared/action.yml b/.github/actions/ocaml-shared/action.yml index 8a04bf75c1..37351b6a4c 100644 --- a/.github/actions/ocaml-shared/action.yml +++ b/.github/actions/ocaml-shared/action.yml @@ -4,10 +4,6 @@ inputs: ocaml_version: description: "OCaml version" required: true - shell: - description: "Shell to use" - required: false - default: "bash" runs: using: "composite" steps: @@ -17,4 +13,3 @@ runs: ocaml-compiler: ${{ inputs.ocaml_version }} # https://github.com/ocaml/setup-ocaml/issues/211#issuecomment-1058882386 # disable-cache: true - shell: ${{ inputs.shell }} From 2b56a4ae062a6ca0ba0bd1cce9a087d20784df1a Mon Sep 17 00:00:00 2001 From: Serhii Shymkiv Date: Thu, 29 Aug 2024 16:57:03 +0300 Subject: [PATCH 104/178] Comments refactoring. --- .github/workflows/ci-nightly.yml | 2 -- .github/workflows/ci.yml | 4 +--- Makefile | 6 +++--- 3 files changed, 4 insertions(+), 8 deletions(-) diff --git a/.github/workflows/ci-nightly.yml b/.github/workflows/ci-nightly.yml index 9f511a2356..1cc1ef3641 100644 --- a/.github/workflows/ci-nightly.yml +++ b/.github/workflows/ci-nightly.yml @@ -25,8 +25,6 @@ jobs: runs-on: ${{ matrix.os }} strategy: matrix: - # FIXME: use the latest version of cargo nextest when we get rid of 1.71 - # and 1.72 rust_toolchain_version: ["1.74"] # FIXME: currently not available for 5.0.0. # It might be related to boxroot dependency, and we would need to bump diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 5143f760e0..05a234d651 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -62,9 +62,7 @@ jobs: RUST_TOOLCHAIN_COVERAGE_VERSION: "1.74" strategy: matrix: - # FIXME: use the latest version of cargo nextest when we get rid of 1.71 - # and 1.72 - rust_toolchain_version: ["1.71", "1.72"] + rust_toolchain_version: ["1.71", "1.72", "1.73", "1.74"] # FIXME: currently not available for 5.0.0. # It might be related to boxroot dependency, and we would need to bump # up the ocaml-rs dependency diff --git a/Makefile b/Makefile index 5222d30342..fa79d7b25c 100644 --- a/Makefile +++ b/Makefile @@ -7,15 +7,15 @@ GRCOV_CALL = grcov ./target/profraw --binary-path ./target/release/deps/ -s . -- all: release # Install test dependencies +# https://nexte.st/book/pre-built-binaries.html#using-nextest-in-github-actions +# FIXME: update to 0.9.68 when we get rid of 1.71 and 1.72. +# FIXME: latest 0.8.19+ requires rustc 1.74+ install-test-deps: @echo "" @echo "Installing the test dependencies." @echo "" rustup component add llvm-tools-preview - # https://nexte.st/book/pre-built-binaries.html#using-nextest-in-github-actions - # FIXME: update to 0.9.68 when we get rid of 1.71 and 1.72. cargo install cargo-nextest@=0.9.67 --locked - # FIXME: latest 0.8.19+ requires rustc 1.74+ cargo install grcov@=0.8.13 --locked @echo "" @echo "Test dependencies installed." From 7384b3ce24692185618bbacf10d4acabb79db161 Mon Sep 17 00:00:00 2001 From: Serhii Shymkiv Date: Thu, 29 Aug 2024 17:20:12 +0300 Subject: [PATCH 105/178] Cleaning step. --- .github/workflows/ci-nightly.yml | 1 + .github/workflows/ci.yml | 2 ++ 2 files changed, 3 insertions(+) diff --git a/.github/workflows/ci-nightly.yml b/.github/workflows/ci-nightly.yml index 1cc1ef3641..5ff9c2f361 100644 --- a/.github/workflows/ci-nightly.yml +++ b/.github/workflows/ci-nightly.yml @@ -53,6 +53,7 @@ jobs: if: ${{ matrix.rust_toolchain_version == '1.74' }} run: | eval $(opam env) + make clean make nextest-all-with-coverage - name: Use shared Codecov reporting steps diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 05a234d651..5d705318ab 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -140,12 +140,14 @@ jobs: if: ${{ matrix.rust_toolchain_version != env.RUST_TOOLCHAIN_COVERAGE_VERSION }} run: | eval $(opam env) + make clean make nextest - name: Run non-heavy tests with the code coverage if: ${{ matrix.rust_toolchain_version == env.RUST_TOOLCHAIN_COVERAGE_VERSION }} run: | eval $(opam env) + make clean make nextest-with-coverage - name: Use shared Codecov reporting steps From a9b3fb59378b2d9831bed134f974d96d23f33a0e Mon Sep 17 00:00:00 2001 From: Serhii Shymkiv Date: Thu, 29 Aug 2024 17:21:24 +0300 Subject: [PATCH 106/178] Nightly condition fix. --- .github/workflows/ci-nightly.yml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/.github/workflows/ci-nightly.yml b/.github/workflows/ci-nightly.yml index 5ff9c2f361..7d2d6c9f46 100644 --- a/.github/workflows/ci-nightly.yml +++ b/.github/workflows/ci-nightly.yml @@ -49,8 +49,7 @@ jobs: run: | make install-test-deps - - name: Run all tests - if: ${{ matrix.rust_toolchain_version == '1.74' }} + - name: Run all tests with the code coverage run: | eval $(opam env) make clean From f1f3fe3e82576dfedf65934e131e913a75854b79 Mon Sep 17 00:00:00 2001 From: Serhii Shymkiv Date: Thu, 29 Aug 2024 18:32:55 +0300 Subject: [PATCH 107/178] Add debug logging. --- .github/workflows/ci-nightly.yml | 2 +- .github/workflows/ci.yml | 19 ++++++++++++++++--- 2 files changed, 17 insertions(+), 4 deletions(-) diff --git a/.github/workflows/ci-nightly.yml b/.github/workflows/ci-nightly.yml index 7d2d6c9f46..14e565ceb8 100644 --- a/.github/workflows/ci-nightly.yml +++ b/.github/workflows/ci-nightly.yml @@ -32,7 +32,7 @@ jobs: ocaml_version: ["4.14"] os: ["hetzner-1"] steps: - - name: Checkout PR + - name: Checkout repository uses: actions/checkout@v4.1.1 - name: Use shared Rust toolchain setting up steps diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 5d705318ab..10b69cef98 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -22,7 +22,7 @@ jobs: matrix: rust_toolchain_version: ["1.72"] steps: - - name: Checkout PR + - name: Checkout repository uses: actions/checkout@v4.1.1 - name: Use shared Rust toolchain setting up steps @@ -43,7 +43,7 @@ jobs: matrix: rust_toolchain_version: ["nightly"] steps: - - name: Checkout PR + - name: Checkout repository uses: actions/checkout@v4.1.1 - name: Use shared Rust toolchain setting up steps @@ -69,7 +69,7 @@ jobs: ocaml_version: ["4.14"] os: ["ubuntu-latest"] steps: - - name: Checkout PR + - name: Checkout repo uses: actions/checkout@v4.1.1 with: submodules: true @@ -88,6 +88,19 @@ jobs: run: | npm install -g markdownlint-cli + # TODO: Remove debug logging + - name: Echo tollchain version and environment + run: | + echo rustc --version + echo "" + echo $GITHUB_PATH + echo "" + echo $PATH + echo "" + echo $CARGO_HOME + echo "" + env + # # Doc & Spec # From 00b45225a462fff21033445f1899a7a74cbd96db Mon Sep 17 00:00:00 2001 From: Serhii Shymkiv Date: Thu, 29 Aug 2024 18:53:29 +0300 Subject: [PATCH 108/178] Additional workflows refactoring and more debug logging to track the issue. --- .github/workflows/benches.yml | 40 ++++++++++++----------------------- .github/workflows/ci.yml | 6 +++++- .github/workflows/gh-page.yml | 29 ++++++++----------------- 3 files changed, 28 insertions(+), 47 deletions(-) diff --git a/.github/workflows/benches.yml b/.github/workflows/benches.yml index 7fd1fe8e2e..aff0c4941b 100644 --- a/.github/workflows/benches.yml +++ b/.github/workflows/benches.yml @@ -9,47 +9,35 @@ env: OCAML_VERSION: "4.14.0" RUST_TOOLCHAIN_VERSION: "1.71" - jobs: bench: runs-on: ubuntu-latest name: Run benchmarks if: github.event.label.name == 'benchmark' steps: - - name: Checkout PR + - name: Checkout repository uses: actions/checkout@v4.1.1 - # as action-rs does not seem to be maintained anymore, building from - # scratch the environment using rustup - - name: Setup Rust toolchain $RUST_TOOLCHAIN_VERSION - run: - | - curl --proto '=https' --tlsv1.2 -sSf -o rustup-init \ - https://static.rust-lang.org/rustup/dist/x86_64-unknown-linux-gnu/rustup-init - chmod +x ./rustup-init - ./rustup-init -y --default-toolchain "$RUST_TOOLCHAIN_VERSION" --profile default - rm ./rustup-init - echo "$HOME/.cargo/bin" >> $GITHUB_PATH - # overwriting default rust-toolchain - echo $RUST_TOOLCHAIN_VERSION > rust-toolchain + - name: Use shared Rust toolchain setting up steps + uses: ./.github/actions/toolchain-shared + with: + rust_toolchain_version: ${{ env.RUST_TOOLCHAIN_VERSION }} - name: Install dependencies run: | set -x - cargo install cargo-criterion # criterion + cargo install cargo-criterion - - name: Setup OCaml ${{ env.OCAML_VERSION }} - uses: ocaml/setup-ocaml@v2 + - name: Use shared OCaml setting up steps + uses: ./.github/actions/ocaml-shared with: - ocaml-compiler: ${{ env.OCAML_VERSION }} - # https://github.com/ocaml/setup-ocaml/issues/211#issuecomment-1058882386 - # disable-cache: true + ocaml_version: ${{ env.OCAML_VERSION }} -# - name: Run iai bench -# run: | -# set -x -# cargo bench -p kimchi --bench proof_iai > iai_bench -# cat iai_bench + # - name: Run iai bench + # run: | + # set -x + # cargo bench -p kimchi --bench proof_iai > iai_bench + # cat iai_bench - name: Run criterion bench run: | diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 10b69cef98..630fbdb2df 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -91,7 +91,9 @@ jobs: # TODO: Remove debug logging - name: Echo tollchain version and environment run: | - echo rustc --version + rustc --version + echo "" + cargo --version echo "" echo $GITHUB_PATH echo "" @@ -99,6 +101,8 @@ jobs: echo "" echo $CARGO_HOME echo "" + cat rust-toolchain + echo "" env # diff --git a/.github/workflows/gh-page.yml b/.github/workflows/gh-page.yml index 7077069ded..6bf299852e 100644 --- a/.github/workflows/gh-page.yml +++ b/.github/workflows/gh-page.yml @@ -15,29 +15,18 @@ jobs: runs-on: ubuntu-latest steps: - - name: Checkout Repository + - name: Checkout repository uses: actions/checkout@v4.1.1 - # as action-rs does not seem to be maintained anymore, building from - # scratch the environment using rustup - - name: Setup nightly Rust toolchain - run: - | - curl --proto '=https' --tlsv1.2 -sSf -o rustup-init \ - https://static.rust-lang.org/rustup/dist/x86_64-unknown-linux-gnu/rustup-init - chmod +x ./rustup-init - ./rustup-init -y --default-toolchain "$RUST_TOOLCHAIN_VERSION" --profile default - rm ./rustup-init - echo "$HOME/.cargo/bin" >> $GITHUB_PATH - # overwriting default rust-toolchain - echo $RUST_TOOLCHAIN_VERSION > rust-toolchain - - - name: Setup OCaml ${{ env.OCAML_VERSION }} - uses: ocaml/setup-ocaml@v2 + - name: Use shared Rust toolchain setting up steps + uses: ./.github/actions/toolchain-shared with: - ocaml-compiler: ${{ env.OCAML_VERSION }} - # https://github.com/ocaml/setup-ocaml/issues/211#issuecomment-1058882386 - # disable-cache: true + rust_toolchain_version: ${{ env.RUST_TOOLCHAIN_VERSION }} + + - name: Use shared OCaml setting up steps + uses: ./.github/actions/ocaml-shared + with: + ocaml_version: ${{ env.OCAML_VERSION }} - name: Build Rust Documentation run: | From abf2b15e743384698c1319dfa4a49af06db36a6b Mon Sep 17 00:00:00 2001 From: Serhii Shymkiv Date: Thu, 29 Aug 2024 21:19:07 +0300 Subject: [PATCH 109/178] Use latest actions/checkout@v4 with submodules=recursive. --- .github/workflows/benches.yml | 4 +++- .github/workflows/ci-nightly.yml | 6 ++++-- .github/workflows/ci.yml | 33 +++++++++++--------------------- .github/workflows/gh-page.yml | 4 +++- 4 files changed, 21 insertions(+), 26 deletions(-) diff --git a/.github/workflows/benches.yml b/.github/workflows/benches.yml index aff0c4941b..70cd89cee9 100644 --- a/.github/workflows/benches.yml +++ b/.github/workflows/benches.yml @@ -16,7 +16,9 @@ jobs: if: github.event.label.name == 'benchmark' steps: - name: Checkout repository - uses: actions/checkout@v4.1.1 + uses: actions/checkout@v4 + with: + submodules: recursive - name: Use shared Rust toolchain setting up steps uses: ./.github/actions/toolchain-shared diff --git a/.github/workflows/ci-nightly.yml b/.github/workflows/ci-nightly.yml index 14e565ceb8..7889772681 100644 --- a/.github/workflows/ci-nightly.yml +++ b/.github/workflows/ci-nightly.yml @@ -16,8 +16,8 @@ env: CARGO_INCREMENTAL: 1 # https://nexte.st/book/pre-built-binaries.html#using-nextest-in-github-actions CARGO_TERM_COLOR: always - RUST_MIN_STACK: 31457280 # 30 MB of stack for Keccak tests + RUST_MIN_STACK: 31457280 jobs: run_tests: @@ -33,7 +33,9 @@ jobs: os: ["hetzner-1"] steps: - name: Checkout repository - uses: actions/checkout@v4.1.1 + uses: actions/checkout@v4 + with: + submodules: recursive - name: Use shared Rust toolchain setting up steps uses: ./.github/actions/toolchain-shared diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 630fbdb2df..4d08868351 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -13,6 +13,8 @@ env: CARGO_INCREMENTAL: 1 # https://nexte.st/book/pre-built-binaries.html#using-nextest-in-github-actions CARGO_TERM_COLOR: always + # 30 MB of stack for Keccak tests + RUST_MIN_STACK: 31457280 jobs: run_mdbook: @@ -23,7 +25,9 @@ jobs: rust_toolchain_version: ["1.72"] steps: - name: Checkout repository - uses: actions/checkout@v4.1.1 + uses: actions/checkout@v4 + with: + submodules: recursive - name: Use shared Rust toolchain setting up steps uses: ./.github/actions/toolchain-shared @@ -44,7 +48,9 @@ jobs: rust_toolchain_version: ["nightly"] steps: - name: Checkout repository - uses: actions/checkout@v4.1.1 + uses: actions/checkout@v4 + with: + submodules: recursive - name: Use shared Rust toolchain setting up steps uses: ./.github/actions/toolchain-shared @@ -69,10 +75,10 @@ jobs: ocaml_version: ["4.14"] os: ["ubuntu-latest"] steps: - - name: Checkout repo - uses: actions/checkout@v4.1.1 + - name: Checkout repository + uses: actions/checkout@v4 with: - submodules: true + submodules: recursive - name: Use shared Rust toolchain setting up steps uses: ./.github/actions/toolchain-shared @@ -88,23 +94,6 @@ jobs: run: | npm install -g markdownlint-cli - # TODO: Remove debug logging - - name: Echo tollchain version and environment - run: | - rustc --version - echo "" - cargo --version - echo "" - echo $GITHUB_PATH - echo "" - echo $PATH - echo "" - echo $CARGO_HOME - echo "" - cat rust-toolchain - echo "" - env - # # Doc & Spec # diff --git a/.github/workflows/gh-page.yml b/.github/workflows/gh-page.yml index 6bf299852e..74655d4392 100644 --- a/.github/workflows/gh-page.yml +++ b/.github/workflows/gh-page.yml @@ -16,7 +16,9 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@v4.1.1 + uses: actions/checkout@v4 + with: + submodules: recursive - name: Use shared Rust toolchain setting up steps uses: ./.github/actions/toolchain-shared From 2f9bb8e6f6f173456f7da8182efbece1ec210d46 Mon Sep 17 00:00:00 2001 From: Danny Willems Date: Thu, 29 Aug 2024 13:43:37 -0700 Subject: [PATCH 110/178] CI: bump up setup-ocaml version to v3 Released earlier this year in July. No big changes. Only bumping up to stay up-to-date. --- .github/actions/ocaml-shared/action.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/actions/ocaml-shared/action.yml b/.github/actions/ocaml-shared/action.yml index 37351b6a4c..0ca66d56ba 100644 --- a/.github/actions/ocaml-shared/action.yml +++ b/.github/actions/ocaml-shared/action.yml @@ -8,7 +8,7 @@ runs: using: "composite" steps: - name: Setup OCaml ${{ inputs.ocaml_version }} - uses: ocaml/setup-ocaml@v2 + uses: ocaml/setup-ocaml@v3 with: ocaml-compiler: ${{ inputs.ocaml_version }} # https://github.com/ocaml/setup-ocaml/issues/211#issuecomment-1058882386 From b529f30a6f885f1d39db5c1186be2ec1cb6b3535 Mon Sep 17 00:00:00 2001 From: Serhii Shymkiv Date: Fri, 30 Aug 2024 10:13:41 +0300 Subject: [PATCH 111/178] Job rename. --- .github/workflows/ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 4d08868351..c864100dce 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -62,7 +62,7 @@ jobs: make format run_checks: - name: Run some basic checks and tests + name: Run checks and tests runs-on: ${{ matrix.os }} env: RUST_TOOLCHAIN_COVERAGE_VERSION: "1.74" From f16e7e15e9a91debf03bff200fbcf40613cb1743 Mon Sep 17 00:00:00 2001 From: Serhii Shymkiv Date: Fri, 30 Aug 2024 16:12:56 +0300 Subject: [PATCH 112/178] Documentation. --- .github/workflows/benches.yml | 2 +- .github/workflows/ci.yml | 1 - CONTRIBUTING.md | 69 +++++++++++++++++++++++++++-------- Makefile | 14 ++++++- README.md | 19 ++++++---- 5 files changed, 77 insertions(+), 28 deletions(-) diff --git a/.github/workflows/benches.yml b/.github/workflows/benches.yml index 70cd89cee9..24924d043d 100644 --- a/.github/workflows/benches.yml +++ b/.github/workflows/benches.yml @@ -49,7 +49,7 @@ jobs: cat criterion_bench - name: Write result in PR - uses: actions/github-script@v5 + uses: actions/github-script@v7 with: script: | const fs = require('fs'); diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index c864100dce..8e788b7784 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -146,7 +146,6 @@ jobs: if: ${{ matrix.rust_toolchain_version != env.RUST_TOOLCHAIN_COVERAGE_VERSION }} run: | eval $(opam env) - make clean make nextest - name: Run non-heavy tests with the code coverage diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 93f062b120..3f47c5c106 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -14,14 +14,17 @@ We have a list of easy task to start contributing. [Start over there](https://gi ## Setting up the project -Run +Make sure you have the GNU `make` utility installed since we use it to streamline various tasks. +Windows users may need to use the `WSL` to run `make` commands. +For the complete list of `make` targets, please refer to the [Makefile](Makefile). + +After the repository being cloned, run: ```shell -git submodule init -git submodule update +make setup ``` -to get the version of Optimism the zkVM has been developed for. +this will also synchronize the Git submodules to get the version of Optimism the zkVM has been developed for. ### Mac & Linux @@ -38,29 +41,63 @@ Windows development can be done using [Windows Subsystem for Linux (WSL)](https: ## Development -To run tests: +To run all tests: + +### Setting up -```bash -cargo test --all-features --release +```shell +make install-test-deps ``` -Takes about 5-8 minutes on a MacBook Pro (2019, 8-Core Intel Core i9, 32GB RAM). Without `--release`, more than an hour. +### Cargo test runner -To scan for lints: +```shell +make test-all +``` + +### Nextest test runner -```bash -cargo clippy --all-features --tests --all-targets -- -D warnings +```shell +make nextest-all ``` -Note: cargo can automatically fix some lints. To do so, add `--fix` to the above command (as the first parameter). +We also provide the `make` targets to run tests with the code coverage reporting, for example: + +```shell +make test-all-with-coverage +``` -Finally, to check formatting: +You can also specify an extra CLI argument to `make` to pass it to the cargo or binary, for example: + +```shell +CARGO_EXTRA_ARGS="-p poly-commitment" make test-all-with-coverage +BIN_EXTRA_ARGS="-p poly-commitment" make nextest-all-with-coverage +``` -```bash -cargo fmt +Note: In example above we run tests for the `poly-commitment` package only. + +We build and run tests in `--release` mode, because otherwise tests execution can last for a long time. + +To check formatting: + +```shell +make format +``` + +To scan for lints: + +```shell +make lint +``` + +Note: cargo can automatically fix some lints. To do so, add `--fix` to the `CARGO_EXTRA_ARGS` variable and use it with the command above like this: + +```shell +CARGO_EXTRA_ARGS="--fix" make lint ``` -These are enforced by GitHub PR checks, so be sure to have any errors produced by the above tools fixed before pushing the code to your pull request branch. Refer to `.github/workflows` for all PR checks. +Formatting and lints are enforced by GitHub PR checks, so please be sure to have any errors produced by the above tools fixed before pushing the code to your pull request branch. +Please refer to [CI](.github/workflows/ci.yml) workflow to see all PR checks. ## Branching policy diff --git a/Makefile b/Makefile index fa79d7b25c..3543440992 100644 --- a/Makefile +++ b/Makefile @@ -6,6 +6,16 @@ GRCOV_CALL = grcov ./target/profraw --binary-path ./target/release/deps/ -s . -- # Default target all: release +setup: + @echo "" + @echo "Syncing the Git submodules." + @echo "" + git submodule sync + git submodule update --init --recursive + @echo "" + @echo "Git submodules synced." + @echo "" + # Install test dependencies # https://nexte.st/book/pre-built-binaries.html#using-nextest-in-github-actions # FIXME: update to 0.9.68 when we get rid of 1.71 and 1.72. @@ -87,7 +97,7 @@ format: # Lint the code lint: - cargo clippy --all-features --all-targets --tests -- -W clippy::all -D warnings + cargo clippy --all-features --all-targets --tests $(CARGO_EXTRA_ARGS) -- -W clippy::all -D warnings generate-test-coverage-report: @echo "" @@ -103,4 +113,4 @@ generate-test-coverage-report: @echo "The test coverage report is available at: ./target/coverage" @echo "" -.PHONY: install-test-deps all clean build release test test-with-coverage test-heavy test-heavy-with-coverage test-all test-all-with-coverage nextest nextest-with-coverage nextest-heavy nextest-heavy-with-coverage nextest-all nextest-all-with-coverage format lint generate-test-coverage-report +.PHONY: all setup install-test-deps clean build release test test-with-coverage test-heavy test-heavy-with-coverage test-all test-all-with-coverage nextest nextest-with-coverage nextest-heavy nextest-heavy-with-coverage nextest-all nextest-all-with-coverage format lint generate-test-coverage-report diff --git a/README.md b/README.md index 94145083f2..0d365a4707 100644 --- a/README.md +++ b/README.md @@ -61,8 +61,6 @@ The project is organized in the following way: ## Contributing Check [CONTRIBUTING.md](CONTRIBUTING.md) if you are interested in contributing to this project. -<<<<<<< HEAD -======= ## Generate rustdoc locally @@ -82,10 +80,15 @@ You can visualize the documentation by opening the file `target/doc/index.html`. -The CI will build different targets. +- [CI](.github/workflows/ci.yml). + This workflow ensures that the entire project builds correctly, adheres to guidelines, and passes all necessary tests. +- [Nightly tests with code coverage](.github/workflows/ci-nightly.yml). + This workflow runs all the tests nightly or on demand, generates and publishes the code coverage report. +- [Benchmarks](.github/workflows/benches.yml). + This workflow runs benchmarks when a pull request is labeled with "benchmark." It sets up the Rust and OCaml environments, installs necessary tools, and executes cargo criterion benchmarks on the kimchi crate. The benchmark results are then posted as a comment on the pull request for review. +- [Deploy Specifications & Docs to GitHub Pages](.github/workflows/gh-page.yml). + When CI passes on master, the documentation built from the rust code will be available by this [link](https://o1-labs.github.io/proof-systems/rustdoc) and the book will be available by this [link](https://o1-labs.github.io/proof-systems). -- [Deploy Specifications & Docs to GitHub Pages](.github/workflows/gh-page.yml). - When CI passes on master, the documentation built from the rust code will be - available [here](https://o1-labs.github.io/proof-systems/rustdoc) and the book - will be available [here](https://o1-labs.github.io/proof-systems). ->>>>>>> 2fd953cd04 (Add the test coverage data gathering and reports generation.) +## Nix for Dependencies (WIP) + +If you have `nix` installed and in particular, `flakes` enabled, you can install the dependencies for these projects using nix. Simply `nix develop .` inside this directory to bring into scope `rustup`, `opam`, and `go` (along with a few other tools). You will have to manage the toolchains yourself using `rustup` and `opam`, in the current iteration. From 5e58573113e4f279062206008419e3ee89dc03cd Mon Sep 17 00:00:00 2001 From: Serhii Shymkiv Date: Fri, 30 Aug 2024 17:50:25 +0300 Subject: [PATCH 113/178] Let's try to speed up tests with coverage by not cleaning up the env. --- .github/workflows/ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 8e788b7784..91d98280c9 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -152,7 +152,7 @@ jobs: if: ${{ matrix.rust_toolchain_version == env.RUST_TOOLCHAIN_COVERAGE_VERSION }} run: | eval $(opam env) - make clean + # make clean make nextest-with-coverage - name: Use shared Codecov reporting steps From e78e2309e23b9c1f61352556484baf06dcf37720 Mon Sep 17 00:00:00 2001 From: Serhii Shymkiv Date: Fri, 30 Aug 2024 20:05:10 +0300 Subject: [PATCH 114/178] Refactor CI to run tests with the code coverage on self-hosted runner and make Nightly builds on demand with the coverage report attached to the job execution results rather than upload it to the Codecov to not mess the diff checks. --- .../{ci-nightly.yml => ci-on-demand.yml} | 18 +++++++++++------- .github/workflows/ci.yml | 16 ++++++++++++---- README.md | 4 ++-- 3 files changed, 25 insertions(+), 13 deletions(-) rename .github/workflows/{ci-nightly.yml => ci-on-demand.yml} (71%) diff --git a/.github/workflows/ci-nightly.yml b/.github/workflows/ci-on-demand.yml similarity index 71% rename from .github/workflows/ci-nightly.yml rename to .github/workflows/ci-on-demand.yml index 7889772681..443363d3e7 100644 --- a/.github/workflows/ci-nightly.yml +++ b/.github/workflows/ci-on-demand.yml @@ -1,12 +1,11 @@ # -# This workflow is triggered nightly (or on-demand) to run all the tests with the code coverage enabled and using the self-hosted GitHub runner. +# This workflow is triggered on-demand to run all the tests with the code coverage enabled and using the self-hosted GitHub runner. +# Test coverage report is attached to the current job execution results in a form of Zip archive. # -name: Nightly tests with code coverage +name: On-demand tests with the code coverage on: - schedule: - - cron: "0 0 * * *" workflow_dispatch: {} env: @@ -57,7 +56,12 @@ jobs: make clean make nextest-all-with-coverage - - name: Use shared Codecov reporting steps - uses: ./.github/actions/codecov-shared + - name: Upload the HTML test coverage report + uses: actions/upload-artifact@v4 + continue-on-error: true + if: always() with: - token: ${{ secrets.CODECOV_TOKEN }} + if-no-files-found: ignore + name: test-coverage-html-report-${{ matrix.rust_toolchain_version }}-${{ matrix.os }} + path: target/coverage/ + retention-days: 30 diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 91d98280c9..199d20f8a4 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -61,9 +61,19 @@ jobs: run: | make format + # We run only one of the matrix options on the toffee hetzner-1 self-hosted GitHub runner. + # Only in this configuration we enable heavy tests with the code coverage data gathering. + run_checks_setup: + runs-on: ubuntu-latest + outputs: + runners: '{"1.71":"ubuntu-latest", "1.72": "ubuntu-latest", "1.73": "ubuntu-latest", "1.74": "hetzner-1"}' + steps: + - run: echo no-op + run_checks: + needs: [run_checks_setup] name: Run checks and tests - runs-on: ${{ matrix.os }} + runs-on: ${{ fromJSON(needs.run_checks_setup.outputs.runners)[matrix.rust_toolchain_version] }} env: RUST_TOOLCHAIN_COVERAGE_VERSION: "1.74" strategy: @@ -143,7 +153,6 @@ jobs: make install-test-deps - name: Run non-heavy tests without the code coverage - if: ${{ matrix.rust_toolchain_version != env.RUST_TOOLCHAIN_COVERAGE_VERSION }} run: | eval $(opam env) make nextest @@ -152,8 +161,7 @@ jobs: if: ${{ matrix.rust_toolchain_version == env.RUST_TOOLCHAIN_COVERAGE_VERSION }} run: | eval $(opam env) - # make clean - make nextest-with-coverage + make nextest-heavy-with-coverage - name: Use shared Codecov reporting steps if: ${{ matrix.rust_toolchain_version == env.RUST_TOOLCHAIN_COVERAGE_VERSION }} diff --git a/README.md b/README.md index 0d365a4707..8ace4ec2dc 100644 --- a/README.md +++ b/README.md @@ -82,8 +82,8 @@ You can visualize the documentation by opening the file `target/doc/index.html`. - [CI](.github/workflows/ci.yml). This workflow ensures that the entire project builds correctly, adheres to guidelines, and passes all necessary tests. -- [Nightly tests with code coverage](.github/workflows/ci-nightly.yml). - This workflow runs all the tests nightly or on demand, generates and publishes the code coverage report. +- [On-demand tests with the code coverage](.github/workflows/ci-on-demand.yml). + This workflow runs all the tests on demand, generates and attaches the code coverage report to job execution results. - [Benchmarks](.github/workflows/benches.yml). This workflow runs benchmarks when a pull request is labeled with "benchmark." It sets up the Rust and OCaml environments, installs necessary tools, and executes cargo criterion benchmarks on the kimchi crate. The benchmark results are then posted as a comment on the pull request for review. - [Deploy Specifications & Docs to GitHub Pages](.github/workflows/gh-page.yml). From 903e5990c7b507ee3f7be2c3bb543cccfee78a6c Mon Sep 17 00:00:00 2001 From: Serhii Shymkiv Date: Fri, 30 Aug 2024 20:11:49 +0300 Subject: [PATCH 115/178] Refactoring. --- .github/workflows/{ci-on-demand.yml => ci-nightly.yml} | 6 ++++-- .github/workflows/ci.yml | 1 + README.md | 4 ++-- 3 files changed, 7 insertions(+), 4 deletions(-) rename .github/workflows/{ci-on-demand.yml => ci-nightly.yml} (88%) diff --git a/.github/workflows/ci-on-demand.yml b/.github/workflows/ci-nightly.yml similarity index 88% rename from .github/workflows/ci-on-demand.yml rename to .github/workflows/ci-nightly.yml index 443363d3e7..a17e94ebf3 100644 --- a/.github/workflows/ci-on-demand.yml +++ b/.github/workflows/ci-nightly.yml @@ -1,11 +1,13 @@ # -# This workflow is triggered on-demand to run all the tests with the code coverage enabled and using the self-hosted GitHub runner. +# This workflow is triggered by scheduler or on-demand to run all the tests with the code coverage enabled and using the self-hosted GitHub runner. # Test coverage report is attached to the current job execution results in a form of Zip archive. # -name: On-demand tests with the code coverage +name: Nightly tests with the code coverage on: + schedule: + - cron: "0 0 * * *" # every day at midnight workflow_dispatch: {} env: diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 199d20f8a4..8639307c6f 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -153,6 +153,7 @@ jobs: make install-test-deps - name: Run non-heavy tests without the code coverage + if: ${{ matrix.rust_toolchain_version != env.RUST_TOOLCHAIN_COVERAGE_VERSION }} run: | eval $(opam env) make nextest diff --git a/README.md b/README.md index 8ace4ec2dc..75a40fdd1c 100644 --- a/README.md +++ b/README.md @@ -82,8 +82,8 @@ You can visualize the documentation by opening the file `target/doc/index.html`. - [CI](.github/workflows/ci.yml). This workflow ensures that the entire project builds correctly, adheres to guidelines, and passes all necessary tests. -- [On-demand tests with the code coverage](.github/workflows/ci-on-demand.yml). - This workflow runs all the tests on demand, generates and attaches the code coverage report to job execution results. +- [Nightly tests with the code coverage](.github/workflows/ci-nightly.yml). + This workflow runs all the tests per scheduler or on-demand, generates and attaches the code coverage report to the job's execution results. - [Benchmarks](.github/workflows/benches.yml). This workflow runs benchmarks when a pull request is labeled with "benchmark." It sets up the Rust and OCaml environments, installs necessary tools, and executes cargo criterion benchmarks on the kimchi crate. The benchmark results are then posted as a comment on the pull request for review. - [Deploy Specifications & Docs to GitHub Pages](.github/workflows/gh-page.yml). From b646696d1ab19e9e12a8557814f8e305a7a24dd9 Mon Sep 17 00:00:00 2001 From: Serhii Shymkiv Date: Fri, 30 Aug 2024 20:59:21 +0300 Subject: [PATCH 116/178] Step name fix. --- .github/workflows/ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 8639307c6f..76769a5d62 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -158,7 +158,7 @@ jobs: eval $(opam env) make nextest - - name: Run non-heavy tests with the code coverage + - name: Run heavy tests with the code coverage if: ${{ matrix.rust_toolchain_version == env.RUST_TOOLCHAIN_COVERAGE_VERSION }} run: | eval $(opam env) From ce3eac073971d1567e46d285d9e54ce0dbc591fb Mon Sep 17 00:00:00 2001 From: Serhii Shymkiv Date: Fri, 30 Aug 2024 23:25:08 +0300 Subject: [PATCH 117/178] Heavy tests with coverage are too heavy (3h+), reverting back to non-heavy, everything else will be executed during the Nightly runs. --- .github/workflows/ci.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 76769a5d62..68b42f40d4 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -158,11 +158,11 @@ jobs: eval $(opam env) make nextest - - name: Run heavy tests with the code coverage + - name: Run non-heavy tests with the code coverage if: ${{ matrix.rust_toolchain_version == env.RUST_TOOLCHAIN_COVERAGE_VERSION }} run: | eval $(opam env) - make nextest-heavy-with-coverage + make nextest-with-coverage - name: Use shared Codecov reporting steps if: ${{ matrix.rust_toolchain_version == env.RUST_TOOLCHAIN_COVERAGE_VERSION }} From 370eedcbad23c6a45e18accf11009e5d590ea161 Mon Sep 17 00:00:00 2001 From: Serhii Shymkiv Date: Sat, 31 Aug 2024 09:29:37 +0300 Subject: [PATCH 118/178] Log job summary with the test coverage on completion. --- .../coverage-summary-shared/action.yml | 32 +++++++++++++++++++ .github/workflows/ci-nightly.yml | 3 ++ .github/workflows/ci.yml | 4 +++ 3 files changed, 39 insertions(+) create mode 100644 .github/actions/coverage-summary-shared/action.yml diff --git a/.github/actions/coverage-summary-shared/action.yml b/.github/actions/coverage-summary-shared/action.yml new file mode 100644 index 0000000000..fd61cd647d --- /dev/null +++ b/.github/actions/coverage-summary-shared/action.yml @@ -0,0 +1,32 @@ +name: "Shared code coverage summary" +description: "Shared code coverage summary" +inputs: + html_file: + description: "HTML file with the coverage report" + required: false + default: "target/coverage/index.html" + shell: + description: "Shell to use" + required: false + default: "bash" +runs: + using: "composite" + steps: + - name: Add test coverage summary + run: | + echo "### Test coverage summary" >> $GITHUB_STEP_SUMMARY + # Define the HTML file + html_file="${{ inputs.html_file }}" + # Extract data for Lines, Functions, and Branches using `sed` + lines=$(sed -n '/heading">Lines/{n;n;s/.*]*>\(.*%\)<\/abbr>.*/\1/p;}' "$html_file") + functions=$(sed -n '/heading">Functions/{n;n;s/.*]*>\(.*%\)<\/abbr>.*/\1/p;}' "$html_file") + branches=$(sed -n '/heading">Branches/{n;n;s/.*]*>\(.*%\)<\/abbr>.*/\1/p;}' "$html_file") + # Compose Markdown summary table + echo "" >> $GITHUB_STEP_SUMMARY + echo "| Metric | Coverage |" >> $GITHUB_STEP_SUMMARY + echo "|-----------|----------|" >> $GITHUB_STEP_SUMMARY + echo "| Lines | $lines |" >> $GITHUB_STEP_SUMMARY + echo "| Functions | $functions |" >> $GITHUB_STEP_SUMMARY + echo "| Branches | $branches |" >> $GITHUB_STEP_SUMMARY + echo "" >> $GITHUB_STEP_SUMMARY + shell: ${{ inputs.shell }} diff --git a/.github/workflows/ci-nightly.yml b/.github/workflows/ci-nightly.yml index a17e94ebf3..55b4106aab 100644 --- a/.github/workflows/ci-nightly.yml +++ b/.github/workflows/ci-nightly.yml @@ -58,6 +58,9 @@ jobs: make clean make nextest-all-with-coverage + - name: Use shared code coverage summary + uses: ./.github/actions/coverage-summary-shared + - name: Upload the HTML test coverage report uses: actions/upload-artifact@v4 continue-on-error: true diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 68b42f40d4..d960968f6e 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -164,6 +164,10 @@ jobs: eval $(opam env) make nextest-with-coverage + - name: Use shared code coverage summary + if: ${{ matrix.rust_toolchain_version == env.RUST_TOOLCHAIN_COVERAGE_VERSION }} + uses: ./.github/actions/coverage-summary-shared + - name: Use shared Codecov reporting steps if: ${{ matrix.rust_toolchain_version == env.RUST_TOOLCHAIN_COVERAGE_VERSION }} uses: ./.github/actions/codecov-shared From 916fa8977475d33309ff5e26476e6d3585624533 Mon Sep 17 00:00:00 2001 From: Serhii Shymkiv Date: Sat, 31 Aug 2024 09:52:35 +0300 Subject: [PATCH 119/178] Refactor runners selection. --- .github/workflows/ci.yml | 14 +++----------- 1 file changed, 3 insertions(+), 11 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index d960968f6e..542d0a0377 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -61,19 +61,11 @@ jobs: run: | make format - # We run only one of the matrix options on the toffee hetzner-1 self-hosted GitHub runner. - # Only in this configuration we enable heavy tests with the code coverage data gathering. - run_checks_setup: - runs-on: ubuntu-latest - outputs: - runners: '{"1.71":"ubuntu-latest", "1.72": "ubuntu-latest", "1.73": "ubuntu-latest", "1.74": "hetzner-1"}' - steps: - - run: echo no-op - run_checks: - needs: [run_checks_setup] name: Run checks and tests - runs-on: ${{ fromJSON(needs.run_checks_setup.outputs.runners)[matrix.rust_toolchain_version] }} + # We run only one of the matrix options on the toffee `hetzner-1` self-hosted GitHub runner. + # Only in this configuration we enable heavy tests with the code coverage data gathering. + runs-on: ${{ matrix.rust_toolchain_version == '1.74' && 'hetzner-1' || 'ubuntu-latest' }} env: RUST_TOOLCHAIN_COVERAGE_VERSION: "1.74" strategy: From 36cda266c237187b90f02fba12541d71b777e7d6 Mon Sep 17 00:00:00 2001 From: Serhii Shymkiv Date: Sat, 31 Aug 2024 09:55:07 +0300 Subject: [PATCH 120/178] Comments fix. --- .github/workflows/ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 542d0a0377..148522d8af 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -64,7 +64,7 @@ jobs: run_checks: name: Run checks and tests # We run only one of the matrix options on the toffee `hetzner-1` self-hosted GitHub runner. - # Only in this configuration we enable heavy tests with the code coverage data gathering. + # Only in this configuration we enable tests with the code coverage data gathering. runs-on: ${{ matrix.rust_toolchain_version == '1.74' && 'hetzner-1' || 'ubuntu-latest' }} env: RUST_TOOLCHAIN_COVERAGE_VERSION: "1.74" From 5f7f1ca3b2d1aae8fcf00daff9815aa8c30d775f Mon Sep 17 00:00:00 2001 From: Serhii Shymkiv Date: Wed, 18 Sep 2024 17:40:34 +0300 Subject: [PATCH 121/178] CI.yml changes --- .github/workflows/ci.yml | 27 +++++++++++++++------------ 1 file changed, 15 insertions(+), 12 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 148522d8af..788c258a34 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -1,9 +1,6 @@ -# We use --offline to be sure that we do not access the network - name: CI on: - push: pull_request: env: @@ -94,7 +91,8 @@ jobs: - name: Install markdownlint run: | - npm install -g markdownlint-cli + # FIXME: 0.39.0 makes the CI fail + npm install -g markdownlint-cli@0.38.0 # # Doc & Spec @@ -102,22 +100,28 @@ jobs: - name: Install cargo-spec for specifications run: | + eval $(opam env) cargo install --locked cargo-spec - name: Build the kimchi specification run: | - cd book/specifications - cd kimchi && make build + cd book/specifications/kimchi + make build - name: Build the polynomial commitment specification run: | - cd book/specifications - cd poly-commitment && make build + cd book/specifications/poly-commitment + make build - name: Check that up-to-date specification is checked in run: | git diff --exit-code ":(exclude)rust-toolchain" + - name: Build cargo docs + run: | + eval $(opam env) + RUSTDOCFLAGS="-D warnings" cargo doc --all-features --no-deps + # # Coding guidelines # @@ -167,7 +171,6 @@ jobs: token: ${{ secrets.CODECOV_TOKEN }} - name: Doc tests - uses: actions-rs/cargo@v1 - with: - command: test - args: --all-features --release --doc --offline + run: | + eval $(opam env) + cargo test --all-features --release --doc From 7100d83a6a5530effd61b2e48cc0dde4fcbfb293 Mon Sep 17 00:00:00 2001 From: Serhii Shymkiv Date: Wed, 18 Sep 2024 17:45:40 +0300 Subject: [PATCH 122/178] Nix support was not added into the develop yet. --- README.md | 4 ---- 1 file changed, 4 deletions(-) diff --git a/README.md b/README.md index 75a40fdd1c..9f17533e5d 100644 --- a/README.md +++ b/README.md @@ -88,7 +88,3 @@ You can visualize the documentation by opening the file `target/doc/index.html`. This workflow runs benchmarks when a pull request is labeled with "benchmark." It sets up the Rust and OCaml environments, installs necessary tools, and executes cargo criterion benchmarks on the kimchi crate. The benchmark results are then posted as a comment on the pull request for review. - [Deploy Specifications & Docs to GitHub Pages](.github/workflows/gh-page.yml). When CI passes on master, the documentation built from the rust code will be available by this [link](https://o1-labs.github.io/proof-systems/rustdoc) and the book will be available by this [link](https://o1-labs.github.io/proof-systems). - -## Nix for Dependencies (WIP) - -If you have `nix` installed and in particular, `flakes` enabled, you can install the dependencies for these projects using nix. Simply `nix develop .` inside this directory to bring into scope `rustup`, `opam`, and `go` (along with a few other tools). You will have to manage the toolchains yourself using `rustup` and `opam`, in the current iteration. From 47e2cc595b387123e36bcf6f42db96583ba7592e Mon Sep 17 00:00:00 2001 From: Danny Willems Date: Sat, 31 Aug 2024 12:43:14 -0700 Subject: [PATCH 123/178] Makefile: use nightly for `format` target We do use some unstable feature, like: ``` Warning: can't set `indent_style = Block`, unstable features are only available in nightly channel. Warning: can't set `imports_granularity = Crate`, unstable features are only available in nightly channel. ``` --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 3543440992..21f85ed370 100644 --- a/Makefile +++ b/Makefile @@ -93,7 +93,7 @@ nextest-all-with-coverage: # Format the code format: - cargo fmt -- --check + cargo +nightly fmt -- --check # Lint the code lint: From 12385c1cd5b5d5d1af69115717da3d10112542fe Mon Sep 17 00:00:00 2001 From: Serhii Shymkiv Date: Sun, 1 Sep 2024 14:16:01 +0300 Subject: [PATCH 124/178] Run doc tests with coverage. --- .github/workflows/ci-nightly.yml | 1 + .github/workflows/ci.yml | 12 +++++++----- Makefile | 9 ++++++++- 3 files changed, 16 insertions(+), 6 deletions(-) diff --git a/.github/workflows/ci-nightly.yml b/.github/workflows/ci-nightly.yml index 55b4106aab..3c23101d08 100644 --- a/.github/workflows/ci-nightly.yml +++ b/.github/workflows/ci-nightly.yml @@ -56,6 +56,7 @@ jobs: run: | eval $(opam env) make clean + make test-doc-with-coverage make nextest-all-with-coverage - name: Use shared code coverage summary diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 788c258a34..9bd64f5975 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -148,6 +148,12 @@ jobs: run: | make install-test-deps + - name: Doc tests + if: ${{ matrix.rust_toolchain_version != env.RUST_TOOLCHAIN_COVERAGE_VERSION }} + run: | + eval $(opam env) + make test-doc + - name: Run non-heavy tests without the code coverage if: ${{ matrix.rust_toolchain_version != env.RUST_TOOLCHAIN_COVERAGE_VERSION }} run: | @@ -158,6 +164,7 @@ jobs: if: ${{ matrix.rust_toolchain_version == env.RUST_TOOLCHAIN_COVERAGE_VERSION }} run: | eval $(opam env) + make test-doc-with-coverage make nextest-with-coverage - name: Use shared code coverage summary @@ -169,8 +176,3 @@ jobs: uses: ./.github/actions/codecov-shared with: token: ${{ secrets.CODECOV_TOKEN }} - - - name: Doc tests - run: | - eval $(opam env) - cargo test --all-features --release --doc diff --git a/Makefile b/Makefile index 21f85ed370..eaa739e030 100644 --- a/Makefile +++ b/Makefile @@ -43,6 +43,13 @@ build: release: cargo build --release --all-targets --all-features +# Test the project's docs comments +test-doc: + cargo test --all-features --release --doc + +test-doc-with-coverage: + $(COVERAGE_ENV) $(MAKE) test-doc + # Test the project with non-heavy tests and using native cargo test runner test: cargo test --all-features --release $(CARGO_EXTRA_ARGS) -- --nocapture --skip heavy $(BIN_EXTRA_ARGS) @@ -113,4 +120,4 @@ generate-test-coverage-report: @echo "The test coverage report is available at: ./target/coverage" @echo "" -.PHONY: all setup install-test-deps clean build release test test-with-coverage test-heavy test-heavy-with-coverage test-all test-all-with-coverage nextest nextest-with-coverage nextest-heavy nextest-heavy-with-coverage nextest-all nextest-all-with-coverage format lint generate-test-coverage-report +.PHONY: all setup install-test-deps clean build release test-doc test-doc-with-coverage test test-with-coverage test-heavy test-heavy-with-coverage test-all test-all-with-coverage nextest nextest-with-coverage nextest-heavy nextest-heavy-with-coverage nextest-all nextest-all-with-coverage format lint generate-test-coverage-report From 33d6a729dcf3b9bc7f341562c0997c335116d957 Mon Sep 17 00:00:00 2001 From: Serhii Shymkiv Date: Sun, 1 Sep 2024 15:51:26 +0300 Subject: [PATCH 125/178] Known coverage limitations documentation, RUSTDOCFLAGS, disable patch checks for Codecov. --- .github/workflows/ci-nightly.yml | 3 ++- .github/workflows/ci.yml | 3 ++- Makefile | 11 ++++------- codecov.yml | 3 +++ 4 files changed, 11 insertions(+), 9 deletions(-) create mode 100644 codecov.yml diff --git a/.github/workflows/ci-nightly.yml b/.github/workflows/ci-nightly.yml index 3c23101d08..8da92c5de5 100644 --- a/.github/workflows/ci-nightly.yml +++ b/.github/workflows/ci-nightly.yml @@ -56,8 +56,9 @@ jobs: run: | eval $(opam env) make clean - make test-doc-with-coverage make nextest-all-with-coverage + make test-doc-with-coverage + make generate-test-coverage-report - name: Use shared code coverage summary uses: ./.github/actions/coverage-summary-shared diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 9bd64f5975..2325f583b0 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -164,8 +164,9 @@ jobs: if: ${{ matrix.rust_toolchain_version == env.RUST_TOOLCHAIN_COVERAGE_VERSION }} run: | eval $(opam env) - make test-doc-with-coverage make nextest-with-coverage + make test-doc-with-coverage + make generate-test-coverage-report - name: Use shared code coverage summary if: ${{ matrix.rust_toolchain_version == env.RUST_TOOLCHAIN_COVERAGE_VERSION }} diff --git a/Makefile b/Makefile index eaa739e030..0ceaa39152 100644 --- a/Makefile +++ b/Makefile @@ -1,5 +1,8 @@ # Variables -COVERAGE_ENV = CARGO_INCREMENTAL=0 RUSTFLAGS='-Cinstrument-coverage' LLVM_PROFILE_FILE=$(shell pwd)/target/profraw/cargo-test-%p-%m.profraw +# Known coverage limitations and issues: +# - https://github.com/rust-lang/rust/issues/79417 +# - https://github.com/nextest-rs/nextest/issues/16 +COVERAGE_ENV = CARGO_INCREMENTAL=0 RUSTFLAGS='-Cinstrument-coverage' RUSTDOCFLAGS="-Cinstrument-coverage" LLVM_PROFILE_FILE=$(shell pwd)/target/profraw/cargo-test-%p-%m.profraw # FIXME: In latest 0.8.19+ -t CLI argument can accept comma separated list of custom output types, hence, no need in double invocation GRCOV_CALL = grcov ./target/profraw --binary-path ./target/release/deps/ -s . --branch --ignore-not-existing --ignore "**/tests/**" @@ -56,7 +59,6 @@ test: test-with-coverage: $(COVERAGE_ENV) CARGO_EXTRA_ARGS="$(CARGO_EXTRA_ARGS)" BIN_EXTRA_ARGS="$(BIN_EXTRA_ARGS)" $(MAKE) test - $(MAKE) generate-test-coverage-report # Test the project with heavy tests and using native cargo test runner test-heavy: @@ -64,7 +66,6 @@ test-heavy: test-heavy-with-coverage: $(COVERAGE_ENV) CARGO_EXTRA_ARGS="$(CARGO_EXTRA_ARGS)" BIN_EXTRA_ARGS="$(BIN_EXTRA_ARGS)" $(MAKE) test-heavy - $(MAKE) generate-test-coverage-report # Test the project with all tests and using native cargo test runner test-all: @@ -72,7 +73,6 @@ test-all: test-all-with-coverage: $(COVERAGE_ENV) CARGO_EXTRA_ARGS="$(CARGO_EXTRA_ARGS)" BIN_EXTRA_ARGS="$(BIN_EXTRA_ARGS)" $(MAKE) test-all - $(MAKE) generate-test-coverage-report # Test the project with non-heavy tests and using nextest test runner nextest: @@ -80,7 +80,6 @@ nextest: nextest-with-coverage: $(COVERAGE_ENV) BIN_EXTRA_ARGS="$(BIN_EXTRA_ARGS)" $(MAKE) nextest - $(MAKE) generate-test-coverage-report # Test the project with heavy tests and using nextest test runner nextest-heavy: @@ -88,7 +87,6 @@ nextest-heavy: nextest-heavy-with-coverage: $(COVERAGE_ENV) BIN_EXTRA_ARGS="$(BIN_EXTRA_ARGS)" $(MAKE) nextest-heavy - $(MAKE) generate-test-coverage-report # Test the project with all tests and using nextest test runner nextest-all: @@ -96,7 +94,6 @@ nextest-all: nextest-all-with-coverage: $(COVERAGE_ENV) BIN_EXTRA_ARGS="$(BIN_EXTRA_ARGS)" $(MAKE) nextest-all - $(MAKE) generate-test-coverage-report # Format the code format: diff --git a/codecov.yml b/codecov.yml new file mode 100644 index 0000000000..fa348a8f24 --- /dev/null +++ b/codecov.yml @@ -0,0 +1,3 @@ +coverage: + status: + patch: off From af7c7cdb61352c779ae698437ffbd4b366f601a7 Mon Sep 17 00:00:00 2001 From: Serhii Shymkiv Date: Sun, 1 Sep 2024 18:57:01 +0300 Subject: [PATCH 126/178] Codecov/patch doc comments. --- Makefile | 2 ++ codecov.yml | 4 ++++ 2 files changed, 6 insertions(+) diff --git a/Makefile b/Makefile index 0ceaa39152..6063509911 100644 --- a/Makefile +++ b/Makefile @@ -2,6 +2,8 @@ # Known coverage limitations and issues: # - https://github.com/rust-lang/rust/issues/79417 # - https://github.com/nextest-rs/nextest/issues/16 +# FIXME: Update or remove the `codecov.yml` file to enable the `patch` coverage report and the corresponding PR check, +# once situation with the Rust's Doctests will be improved. COVERAGE_ENV = CARGO_INCREMENTAL=0 RUSTFLAGS='-Cinstrument-coverage' RUSTDOCFLAGS="-Cinstrument-coverage" LLVM_PROFILE_FILE=$(shell pwd)/target/profraw/cargo-test-%p-%m.profraw # FIXME: In latest 0.8.19+ -t CLI argument can accept comma separated list of custom output types, hence, no need in double invocation GRCOV_CALL = grcov ./target/profraw --binary-path ./target/release/deps/ -s . --branch --ignore-not-existing --ignore "**/tests/**" diff --git a/codecov.yml b/codecov.yml index fa348a8f24..d3ccbbdc0c 100644 --- a/codecov.yml +++ b/codecov.yml @@ -1,3 +1,7 @@ +# FIXME: Remove this file to enable the `patch` coverage report and the corresponding PR check, +# once situation with the Rust's Doctests will be improved. +# - https://github.com/rust-lang/rust/issues/79417 +# - https://github.com/nextest-rs/nextest/issues/16 coverage: status: patch: off From 430182d698f8d0084e7f6810daae6b1bb6ebb156 Mon Sep 17 00:00:00 2001 From: Serhii Shymkiv Date: Thu, 5 Sep 2024 09:50:40 +0300 Subject: [PATCH 127/178] Ignore doc comments for Codecov. --- codecov.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/codecov.yml b/codecov.yml index d3ccbbdc0c..0d343c0d96 100644 --- a/codecov.yml +++ b/codecov.yml @@ -3,5 +3,8 @@ # - https://github.com/rust-lang/rust/issues/79417 # - https://github.com/nextest-rs/nextest/issues/16 coverage: + ignore: + # Ignore lines that start with Rust doc comments (///) + - "///.*" status: patch: off From ffe8911b2dfa3538a88aad034096a402bb2479b7 Mon Sep 17 00:00:00 2001 From: Serhii Shymkiv Date: Thu, 5 Sep 2024 15:00:07 +0300 Subject: [PATCH 128/178] Ignore doc comments for Codecov. --- codecov.yml | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/codecov.yml b/codecov.yml index 0d343c0d96..e727f83e80 100644 --- a/codecov.yml +++ b/codecov.yml @@ -4,7 +4,9 @@ # - https://github.com/nextest-rs/nextest/issues/16 coverage: ignore: - # Ignore lines that start with Rust doc comments (///) - - "///.*" + - "///.*" # Ignore outer line doc comments (///) + - "/\\*\\*.*\\*/" # Ignore outer block doc comments (/** ... */) + - "/\\*!.*\\*/" # Ignore inner block doc comments (/*! ... */) + - "//!.*" # Ignore inner line doc comments (//!) status: patch: off From 22649b566fab4017a4220fabc556099be16899ed Mon Sep 17 00:00:00 2001 From: Mikhail Volkhov Date: Wed, 18 Sep 2024 16:29:22 +0100 Subject: [PATCH 129/178] Downgrade wasm-bindgen to 0.2.87 --- Cargo.lock | 28 ++++++++++++++-------------- Cargo.toml | 2 +- 2 files changed, 15 insertions(+), 15 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 76c502a81d..ff85c650b0 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1110,9 +1110,9 @@ checksum = "b1a46d1a171d865aa5f83f92695765caa047a9b4cbae2cbf37dbd613a793fd4c" [[package]] name = "js-sys" -version = "0.3.67" +version = "0.3.64" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a1d36f1235bc969acba30b7f5990b864423a6068a10f7c90ae8f0112e3a59d1" +checksum = "c5f195fe497f702db0f318b07fdd68edb16955aed830df8363d837542f8f935a" dependencies = [ "wasm-bindgen", ] @@ -2628,9 +2628,9 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasm-bindgen" -version = "0.2.90" +version = "0.2.87" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1223296a201415c7fad14792dbefaace9bd52b62d33453ade1c5b5f07555406" +checksum = "7706a72ab36d8cb1f80ffbf0e071533974a60d0a308d01a5d0375bf60499a342" dependencies = [ "cfg-if 1.0.0", "wasm-bindgen-macro", @@ -2638,9 +2638,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.90" +version = "0.2.87" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fcdc935b63408d58a32f8cc9738a0bffd8f05cc7c002086c6ef20b7312ad9dcd" +checksum = "5ef2b6d3c510e9625e5fe6f509ab07d66a760f0885d858736483c32ed7809abd" dependencies = [ "bumpalo", "log", @@ -2653,9 +2653,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.90" +version = "0.2.87" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3e4c238561b2d428924c49815533a8b9121c664599558a5d9ec51f8a1740a999" +checksum = "dee495e55982a3bd48105a7b947fd2a9b4a8ae3010041b9e0faab3f9cd028f1d" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -2663,9 +2663,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.90" +version = "0.2.87" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bae1abb6806dc1ad9e560ed242107c0f6c84335f1749dd4e8ddb012ebd5e25a7" +checksum = "54681b18a46765f095758388f2d0cf16eb8d4169b639ab575a8f5693af210c7b" dependencies = [ "proc-macro2", "quote", @@ -2676,15 +2676,15 @@ dependencies = [ [[package]] name = "wasm-bindgen-shared" -version = "0.2.90" +version = "0.2.87" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4d91413b1c31d7539ba5ef2451af3f0b833a005eb27a631cec32bc0635a8602b" +checksum = "ca6ad05a4870b2bf5fe995117d3728437bd27d7cd5f06f13c17443ef369775a1" [[package]] name = "web-sys" -version = "0.3.67" +version = "0.3.64" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "58cd2333b6e0be7a39605f0e255892fd7418a682d8da8fe042fe25128794d2ed" +checksum = "9b85cbef8c220a6abc02aefd892dfc0fc23afb1c6a426316ec33253a3877249b" dependencies = [ "js-sys", "wasm-bindgen", diff --git a/Cargo.toml b/Cargo.toml index 5bc3d507bd..34f5c15cf3 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -71,7 +71,7 @@ strum_macros = "0.26.1" syn = { version = "1.0.109", features = ["full"] } thiserror = "1.0.30" tinytemplate = "1.1" -wasm-bindgen = "=0.2.90" +wasm-bindgen = "=0.2.87" groupmap = { path = "./groupmap", version = "0.1.0" } internal-tracing = { path = "./internal-tracing", version = "0.1.0" } From 4b68490fefa09973d8dbd3b6af1155b97a2addf0 Mon Sep 17 00:00:00 2001 From: Mikhail Volkhov Date: Wed, 18 Sep 2024 16:30:50 +0100 Subject: [PATCH 130/178] Revendor --- proof-systems-vendors | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/proof-systems-vendors b/proof-systems-vendors index 782304f933..7ea36d68a4 160000 --- a/proof-systems-vendors +++ b/proof-systems-vendors @@ -1 +1 @@ -Subproject commit 782304f9337249282065e2fc96ef1d8657e93e52 +Subproject commit 7ea36d68a4f325b1444850978a174ded8b0d4afe From 8a5b04fa40c031055e96afe615fa1bdf9af3da14 Mon Sep 17 00:00:00 2001 From: Serhii Shymkiv Date: Wed, 18 Sep 2024 20:35:58 +0300 Subject: [PATCH 131/178] MDBook fixes. --- book/Cargo.toml | 4 +- book/Makefile | 47 +++-- book/README.md | 2 + book/assets/css/mdbook-admonish.css | 92 +++++----- book/book.toml | 27 ++- book/macros.txt | 7 +- book/mdbook-admonish.css | 206 ++++++++++++---------- book/mermaid-init.js | 2 +- book/mermaid.min.js | 2 +- book/src/fundamentals/zkbook.md | 2 +- book/src/fundamentals/zkbook_2pc/fkaes.md | 2 +- book/src/fundamentals/zkbook_ips.md | 2 +- book/src/introduction.md | 2 +- book/src/kimchi/final_check.md | 2 +- book/src/kimchi/foreign_field_add.md | 4 +- book/src/kimchi/foreign_field_mul.md | 2 +- book/src/kimchi/maller_15.md | 8 +- book/src/pickles/accumulation.md | 8 +- book/src/pickles/deferred.md | 2 +- book/src/plonk/domain.md | 2 +- book/src/plonk/zkpm.md | 16 +- book/src/specs/kimchi.md | 13 +- book/src/specs/pickles.md | 6 +- book/src/specs/poseidon.md | 6 +- 24 files changed, 253 insertions(+), 213 deletions(-) diff --git a/book/Cargo.toml b/book/Cargo.toml index 2a0d222bc7..be15bffe48 100644 --- a/book/Cargo.toml +++ b/book/Cargo.toml @@ -11,5 +11,5 @@ license = "Apache-2.0" [build-dependencies] cargo-spec = { version = "0.5.0" } -time = { version = "~0.3.23" } # This crate is a known bad-actor for breaking rust version support. -plist = { version = "~1.5.0" } # This crate improperly constrains its bad-actor dependency (`time`). +time = { version = "~0.3.23" } # This crate is a known bad-actor for breaking rust version support. +plist = { version = "~1.5.0" } # This crate improperly constrains its bad-actor dependency (`time`). diff --git a/book/Makefile b/book/Makefile index 893ebc857d..5416d1b7a3 100644 --- a/book/Makefile +++ b/book/Makefile @@ -2,50 +2,61 @@ # list versions of mdbook and mdbook plugins # -MDBOOK_VERSION = 0.4.27 -MDBOOK_ADMONISH_VERSION = 1.8.0 +MDBOOK_VERSION = 0.4.35 MDBOOK_KATEX_VERSION = 0.3.8 -MDBOOK_LINKCHECK_VERSION = 0.7.6 +MDBOOK_ADMONISH_VERSION = 1.14.0 +MDBOOK_LINKCHECK_VERSION = 0.7.7 MDBOOK_MERMAID_VERSION = 0.12.6 -MDBOOK_TOC_VERSION = 0.11.2 +MDBOOK_TOC_VERSION = 0.14.1 + + +all: deps check build serve # -# use `make deps` to install the dependencies required to serve or build the book +# Installs the dependencies required to serve or build the book # deps: - cargo install "mdbook@$(MDBOOK_VERSION)" - cargo install "mdbook-admonish@$(MDBOOK_ADMONISH_VERSION)" - cargo install "mdbook-katex@$(MDBOOK_KATEX_VERSION)" - # cargo install "mdbook-linkcheck@$(MDBOOK_LINKCHECK_VERSION)" - cargo install "mdbook-mermaid@$(MDBOOK_MERMAID_VERSION)" - cargo install "mdbook-toc@$(MDBOOK_TOC_VERSION)" + cargo install --locked "mdbook@$(MDBOOK_VERSION)" + cargo install --locked "mdbook-admonish@$(MDBOOK_ADMONISH_VERSION)" + cargo install --locked "mdbook-katex@$(MDBOOK_KATEX_VERSION)" + cargo install --locked --git https://github.com/o1-labs/mdbook-linkcheck --rev 8cccfc8fee397092ecdf1236a42871c5c980672e mdbook-linkcheck + cargo install --locked "mdbook-mermaid@$(MDBOOK_MERMAID_VERSION)" + cargo install --locked "mdbook-toc@$(MDBOOK_TOC_VERSION)" # -# use `make check` to check if your installed dependencies match what we've listed above +# Checks if your installed dependencies match what we've listed above # define check_version - (cargo install --list | grep "$(1) v" | cut -d " " -f 2 | tr -d ':' | grep -Fx "v$(2)") || (echo "your $(1) version needs to be $(2)" && exit 1) + @(cargo install --list | grep "$(1) v" | cut -d " " -f 2 | tr -d ':' | grep -Fx "v$(2)") || (echo "your $(1) version needs to be $(2)" && exit 1) endef check: $(call check_version,mdbook,$(MDBOOK_VERSION)) $(call check_version,mdbook-admonish,$(MDBOOK_ADMONISH_VERSION)) $(call check_version,mdbook-katex,$(MDBOOK_KATEX_VERSION)) + $(call check_version,mdbook-linkcheck,$(MDBOOK_LINKCHECK_VERSION)) $(call check_version,mdbook-mermaid,$(MDBOOK_MERMAID_VERSION)) $(call check_version,mdbook-toc,$(MDBOOK_TOC_VERSION)) # -# use `make` to serve the book locally +# Builds the book +# + +build: check + mdbook build + +# +# Serves the book locally # -all: check +serve: check mdbook serve --open # -# use `make build` to build the book +# Cleans the generated artefacts # -build: check - mdbook build +clean: + mdbook clean \ No newline at end of file diff --git a/book/README.md b/book/README.md index 63568c6b6c..335426c0dc 100644 --- a/book/README.md +++ b/book/README.md @@ -9,6 +9,8 @@ $ # install dependencies $ make deps $ # serve the page locally $ make +$ # clean +$ make clean ``` The specifications in the book are dynamically generated. Refer to the [specifications/](specifications/) directory. diff --git a/book/assets/css/mdbook-admonish.css b/book/assets/css/mdbook-admonish.css index 29ed451e07..078650fd48 100644 --- a/book/assets/css/mdbook-admonish.css +++ b/book/assets/css/mdbook-admonish.css @@ -1,28 +1,16 @@ :root { - --md-admonition-icon--note: - url("data:image/svg+xml;charset=utf-8,"); - --md-admonition-icon--abstract: - url("data:image/svg+xml;charset=utf-8,"); - --md-admonition-icon--info: - url("data:image/svg+xml;charset=utf-8,"); - --md-admonition-icon--tip: - url("data:image/svg+xml;charset=utf-8,"); - --md-admonition-icon--success: - url("data:image/svg+xml;charset=utf-8,"); - --md-admonition-icon--question: - url("data:image/svg+xml;charset=utf-8,"); - --md-admonition-icon--warning: - url("data:image/svg+xml;charset=utf-8,"); - --md-admonition-icon--failure: - url("data:image/svg+xml;charset=utf-8,"); - --md-admonition-icon--danger: - url("data:image/svg+xml;charset=utf-8,"); - --md-admonition-icon--bug: - url("data:image/svg+xml;charset=utf-8,"); - --md-admonition-icon--example: - url("data:image/svg+xml;charset=utf-8,"); - --md-admonition-icon--quote: - url("data:image/svg+xml;charset=utf-8,"); + --md-admonition-icon--note: url("data:image/svg+xml;charset=utf-8,"); + --md-admonition-icon--abstract: url("data:image/svg+xml;charset=utf-8,"); + --md-admonition-icon--info: url("data:image/svg+xml;charset=utf-8,"); + --md-admonition-icon--tip: url("data:image/svg+xml;charset=utf-8,"); + --md-admonition-icon--success: url("data:image/svg+xml;charset=utf-8,"); + --md-admonition-icon--question: url("data:image/svg+xml;charset=utf-8,"); + --md-admonition-icon--warning: url("data:image/svg+xml;charset=utf-8,"); + --md-admonition-icon--failure: url("data:image/svg+xml;charset=utf-8,"); + --md-admonition-icon--danger: url("data:image/svg+xml;charset=utf-8,"); + --md-admonition-icon--bug: url("data:image/svg+xml;charset=utf-8,"); + --md-admonition-icon--example: url("data:image/svg+xml;charset=utf-8,"); + --md-admonition-icon--quote: url("data:image/svg+xml;charset=utf-8,"); } :is(.admonition) { @@ -44,7 +32,7 @@ } } -:is(.admonition)>* { +:is(.admonition) > * { box-sizing: border-box; } @@ -53,11 +41,11 @@ margin-bottom: 1em; } -:is(.admonition)>.tabbed-set:only-child { +:is(.admonition) > .tabbed-set:only-child { margin-top: 0; } -html :is(.admonition)> :last-child { +html :is(.admonition) > :last-child { margin-bottom: 1.2rem; } @@ -110,11 +98,11 @@ html :is(.admonition-title, summary):last-child { border-color: #448aff; } -:is(.note)> :is(.admonition-title, summary) { +:is(.note) > :is(.admonition-title, summary) { background-color: rgba(68, 138, 255, 0.1); } -:is(.note)> :is(.admonition-title, summary)::before { +:is(.note) > :is(.admonition-title, summary)::before { background-color: #448aff; mask-image: var(--md-admonition-icon--note); -webkit-mask-image: var(--md-admonition-icon--note); @@ -128,11 +116,11 @@ html :is(.admonition-title, summary):last-child { border-color: #00b0ff; } -:is(.abstract, .summary, .tldr)> :is(.admonition-title, summary) { +:is(.abstract, .summary, .tldr) > :is(.admonition-title, summary) { background-color: rgba(0, 176, 255, 0.1); } -:is(.abstract, .summary, .tldr)> :is(.admonition-title, summary)::before { +:is(.abstract, .summary, .tldr) > :is(.admonition-title, summary)::before { background-color: #00b0ff; mask-image: var(--md-admonition-icon--abstract); -webkit-mask-image: var(--md-admonition-icon--abstract); @@ -146,11 +134,11 @@ html :is(.admonition-title, summary):last-child { border-color: #00b8d4; } -:is(.info, .todo)> :is(.admonition-title, summary) { +:is(.info, .todo) > :is(.admonition-title, summary) { background-color: rgba(0, 184, 212, 0.1); } -:is(.info, .todo)> :is(.admonition-title, summary)::before { +:is(.info, .todo) > :is(.admonition-title, summary)::before { background-color: #00b8d4; mask-image: var(--md-admonition-icon--info); -webkit-mask-image: var(--md-admonition-icon--info); @@ -164,11 +152,11 @@ html :is(.admonition-title, summary):last-child { border-color: #00bfa5; } -:is(.tip, .hint, .important)> :is(.admonition-title, summary) { +:is(.tip, .hint, .important) > :is(.admonition-title, summary) { background-color: rgba(0, 191, 165, 0.1); } -:is(.tip, .hint, .important)> :is(.admonition-title, summary)::before { +:is(.tip, .hint, .important) > :is(.admonition-title, summary)::before { background-color: #00bfa5; mask-image: var(--md-admonition-icon--tip); -webkit-mask-image: var(--md-admonition-icon--tip); @@ -182,11 +170,11 @@ html :is(.admonition-title, summary):last-child { border-color: #00c853; } -:is(.success, .check, .done)> :is(.admonition-title, summary) { +:is(.success, .check, .done) > :is(.admonition-title, summary) { background-color: rgba(0, 200, 83, 0.1); } -:is(.success, .check, .done)> :is(.admonition-title, summary)::before { +:is(.success, .check, .done) > :is(.admonition-title, summary)::before { background-color: #00c853; mask-image: var(--md-admonition-icon--success); -webkit-mask-image: var(--md-admonition-icon--success); @@ -200,11 +188,11 @@ html :is(.admonition-title, summary):last-child { border-color: #64dd17; } -:is(.question, .help, .faq)> :is(.admonition-title, summary) { +:is(.question, .help, .faq) > :is(.admonition-title, summary) { background-color: rgba(100, 221, 23, 0.1); } -:is(.question, .help, .faq)> :is(.admonition-title, summary)::before { +:is(.question, .help, .faq) > :is(.admonition-title, summary)::before { background-color: #64dd17; mask-image: var(--md-admonition-icon--question); -webkit-mask-image: var(--md-admonition-icon--question); @@ -218,11 +206,11 @@ html :is(.admonition-title, summary):last-child { border-color: #ff9100; } -:is(.warning, .caution, .attention)> :is(.admonition-title, summary) { +:is(.warning, .caution, .attention) > :is(.admonition-title, summary) { background-color: rgba(255, 145, 0, 0.1); } -:is(.warning, .caution, .attention)> :is(.admonition-title, summary)::before { +:is(.warning, .caution, .attention) > :is(.admonition-title, summary)::before { background-color: #ff9100; mask-image: var(--md-admonition-icon--warning); -webkit-mask-image: var(--md-admonition-icon--warning); @@ -236,11 +224,11 @@ html :is(.admonition-title, summary):last-child { border-color: #ff5252; } -:is(.failure, .fail, .missing)> :is(.admonition-title, summary) { +:is(.failure, .fail, .missing) > :is(.admonition-title, summary) { background-color: rgba(255, 82, 82, 0.1); } -:is(.failure, .fail, .missing)> :is(.admonition-title, summary)::before { +:is(.failure, .fail, .missing) > :is(.admonition-title, summary)::before { background-color: #ff5252; mask-image: var(--md-admonition-icon--failure); -webkit-mask-image: var(--md-admonition-icon--failure); @@ -254,11 +242,11 @@ html :is(.admonition-title, summary):last-child { border-color: #ff1744; } -:is(.danger, .error)> :is(.admonition-title, summary) { +:is(.danger, .error) > :is(.admonition-title, summary) { background-color: rgba(255, 23, 68, 0.1); } -:is(.danger, .error)> :is(.admonition-title, summary)::before { +:is(.danger, .error) > :is(.admonition-title, summary)::before { background-color: #ff1744; mask-image: var(--md-admonition-icon--danger); -webkit-mask-image: var(--md-admonition-icon--danger); @@ -272,11 +260,11 @@ html :is(.admonition-title, summary):last-child { border-color: #f50057; } -:is(.bug)> :is(.admonition-title, summary) { +:is(.bug) > :is(.admonition-title, summary) { background-color: rgba(245, 0, 87, 0.1); } -:is(.bug)> :is(.admonition-title, summary)::before { +:is(.bug) > :is(.admonition-title, summary)::before { background-color: #f50057; mask-image: var(--md-admonition-icon--bug); -webkit-mask-image: var(--md-admonition-icon--bug); @@ -290,11 +278,11 @@ html :is(.admonition-title, summary):last-child { border-color: #7c4dff; } -:is(.example)> :is(.admonition-title, summary) { +:is(.example) > :is(.admonition-title, summary) { background-color: rgba(124, 77, 255, 0.1); } -:is(.example)> :is(.admonition-title, summary)::before { +:is(.example) > :is(.admonition-title, summary)::before { background-color: #7c4dff; mask-image: var(--md-admonition-icon--example); -webkit-mask-image: var(--md-admonition-icon--example); @@ -308,11 +296,11 @@ html :is(.admonition-title, summary):last-child { border-color: #9e9e9e; } -:is(.quote, .cite)> :is(.admonition-title, summary) { +:is(.quote, .cite) > :is(.admonition-title, summary) { background-color: rgba(158, 158, 158, 0.1); } -:is(.quote, .cite)> :is(.admonition-title, summary)::before { +:is(.quote, .cite) > :is(.admonition-title, summary)::before { background-color: #9e9e9e; mask-image: var(--md-admonition-icon--quote); -webkit-mask-image: var(--md-admonition-icon--quote); @@ -339,4 +327,4 @@ html :is(.admonition-title, summary):last-child { .rust .admonition-anchor-link:link, .rust .admonition-anchor-link:visited { color: var(--sidebar-fg); -} \ No newline at end of file +} diff --git a/book/book.toml b/book/book.toml index 55cadcbb94..9b1f9845ed 100644 --- a/book/book.toml +++ b/book/book.toml @@ -1,5 +1,13 @@ [book] -authors = ["Izaak Meckler", "Vanishree Rao", "Mathias Hall-Andersen", "Matthew Ryan", "Joseph Spadavecchia", "David Wong", "Vitaly Zelov"] +authors = [ + "Izaak Meckler", + "Vanishree Rao", + "Mathias Hall-Andersen", + "Matthew Ryan", + "Joseph Spadavecchia", + "David Wong", + "Vitaly Zelov", +] language = "en" multilingual = false src = "src" @@ -10,18 +18,20 @@ site-url = "/proof-systems/" use-site-url-as-root = true curly-quotes = true git-repository-url = "https://www.github.com/o1-labs/proof-systems" -additional-css = ["./assets/css/mdbook-admonish.css", "././mdbook-admonish.css"] +additional-css = ["./assets/css/mdbook-admonish.css", "./mdbook-admonish.css"] additional-js = ["mermaid.min.js", "mermaid-init.js"] # for LaTeX [output.katex] [preprocessor.katex] -macros = "macros.txt" # crypto related macros (as close to cryptocode as possible) +macros = "macros.txt" # crypto related macros (as close to cryptocode as possible) +throw-on-error = true +error-color = "#cc0000" # for callouts [preprocessor.admonish] command = "mdbook-admonish" -assets_version = "2.0.0" # do not edit: managed by `mdbook-admonish install` +assets_version = "3.0.1" # do not edit: managed by `mdbook-admonish install` # for protocol diagrams [preprocessor.mermaid] @@ -31,3 +41,12 @@ command = "mdbook-mermaid" [preprocessor.toc] command = "mdbook-toc" renderer = ["html"] + + +# See docs: +# - https://github.com/o1-labs/mdbook-linkcheck +# - (original repo) https://github.com/Michael-F-Bryan/mdbook-linkcheck +[output.linkcheck] +follow-web-links = false +traverse-parent-directories = false +latex-support = true diff --git a/book/macros.txt b/book/macros.txt index 2048c9d02c..26382f28ac 100644 --- a/book/macros.txt +++ b/book/macros.txt @@ -1,4 +1,4 @@ -\sample:{\overset{?}{\ \gets \ }} +\sample:{\stackrel{{\tiny \$}}{\ \gets\ }} \GG:{\mathbb{G}} \FF:{\mathbb{F}} \language:{\mathcal{L}} @@ -62,4 +62,7 @@ \enc:{\small \mathsf{Enc}} \gid:{\mathsf{gid}} \counter:{\mathsf{counter}} -\prg:{\small \mathsf{PRG}} \ No newline at end of file +\prg:{\small \mathsf{PRG}} + +\plonk:\mathcal{PlonK} +\plookup:\textsf{Plookup} \ No newline at end of file diff --git a/book/mdbook-admonish.css b/book/mdbook-admonish.css index 5e360387df..73c91e601e 100644 --- a/book/mdbook-admonish.css +++ b/book/mdbook-admonish.css @@ -1,31 +1,18 @@ @charset "UTF-8"; :root { - --md-admonition-icon--note: - url("data:image/svg+xml;charset=utf-8,"); - --md-admonition-icon--abstract: - url("data:image/svg+xml;charset=utf-8,"); - --md-admonition-icon--info: - url("data:image/svg+xml;charset=utf-8,"); - --md-admonition-icon--tip: - url("data:image/svg+xml;charset=utf-8,"); - --md-admonition-icon--success: - url("data:image/svg+xml;charset=utf-8,"); - --md-admonition-icon--question: - url("data:image/svg+xml;charset=utf-8,"); - --md-admonition-icon--warning: - url("data:image/svg+xml;charset=utf-8,"); - --md-admonition-icon--failure: - url("data:image/svg+xml;charset=utf-8,"); - --md-admonition-icon--danger: - url("data:image/svg+xml;charset=utf-8,"); - --md-admonition-icon--bug: - url("data:image/svg+xml;charset=utf-8,"); - --md-admonition-icon--example: - url("data:image/svg+xml;charset=utf-8,"); - --md-admonition-icon--quote: - url("data:image/svg+xml;charset=utf-8,"); - --md-details-icon: - url("data:image/svg+xml;charset=utf-8,"); + --md-admonition-icon--admonish-note: url("data:image/svg+xml;charset=utf-8,"); + --md-admonition-icon--admonish-abstract: url("data:image/svg+xml;charset=utf-8,"); + --md-admonition-icon--admonish-info: url("data:image/svg+xml;charset=utf-8,"); + --md-admonition-icon--admonish-tip: url("data:image/svg+xml;charset=utf-8,"); + --md-admonition-icon--admonish-success: url("data:image/svg+xml;charset=utf-8,"); + --md-admonition-icon--admonish-question: url("data:image/svg+xml;charset=utf-8,"); + --md-admonition-icon--admonish-warning: url("data:image/svg+xml;charset=utf-8,"); + --md-admonition-icon--admonish-failure: url("data:image/svg+xml;charset=utf-8,"); + --md-admonition-icon--admonish-danger: url("data:image/svg+xml;charset=utf-8,"); + --md-admonition-icon--admonish-bug: url("data:image/svg+xml;charset=utf-8,"); + --md-admonition-icon--admonish-example: url("data:image/svg+xml;charset=utf-8,"); + --md-admonition-icon--admonish-quote: url("data:image/svg+xml;charset=utf-8,"); + --md-details-icon: url("data:image/svg+xml;charset=utf-8,"); } :is(.admonition) { @@ -65,39 +52,46 @@ a.admonition-anchor-link { left: -1.2rem; padding-right: 1rem; } -a.admonition-anchor-link:link, a.admonition-anchor-link:visited { +a.admonition-anchor-link:link, +a.admonition-anchor-link:visited { color: var(--fg); } -a.admonition-anchor-link:link:hover, a.admonition-anchor-link:visited:hover { +a.admonition-anchor-link:link:hover, +a.admonition-anchor-link:visited:hover { text-decoration: none; } a.admonition-anchor-link::before { content: "§"; } -:is(.admonition-title, summary) { +:is(.admonition-title, summary.admonition-title) { position: relative; + min-height: 4rem; margin-block: 0; margin-inline: -1.6rem -1.2rem; padding-block: 0.8rem; padding-inline: 4.4rem 1.2rem; font-weight: 700; background-color: rgba(68, 138, 255, 0.1); + print-color-adjust: exact; + -webkit-print-color-adjust: exact; display: flex; } -:is(.admonition-title, summary) p { +:is(.admonition-title, summary.admonition-title) p { margin: 0; } -html :is(.admonition-title, summary):last-child { +html :is(.admonition-title, summary.admonition-title):last-child { margin-bottom: 0; } -:is(.admonition-title, summary)::before { +:is(.admonition-title, summary.admonition-title)::before { position: absolute; top: 0.625em; inset-inline-start: 1.6rem; width: 2rem; height: 2rem; background-color: #448aff; + print-color-adjust: exact; + -webkit-print-color-adjust: exact; mask-image: url('data:image/svg+xml;charset=utf-8,'); -webkit-mask-image: url('data:image/svg+xml;charset=utf-8,'); mask-repeat: no-repeat; @@ -106,7 +100,8 @@ html :is(.admonition-title, summary):last-child { -webkit-mask-size: contain; content: ""; } -:is(.admonition-title, summary):hover a.admonition-anchor-link { +:is(.admonition-title, summary.admonition-title):hover + a.admonition-anchor-link { display: initial; } @@ -131,204 +126,223 @@ details[open].admonition > summary.admonition-title::after { transform: rotate(90deg); } -:is(.admonition):is(.note) { +:is(.admonition):is(.admonish-note) { border-color: #448aff; } -:is(.note) > :is(.admonition-title, summary) { +:is(.admonish-note) > :is(.admonition-title, summary.admonition-title) { background-color: rgba(68, 138, 255, 0.1); } -:is(.note) > :is(.admonition-title, summary)::before { +:is(.admonish-note) > :is(.admonition-title, summary.admonition-title)::before { background-color: #448aff; - mask-image: var(--md-admonition-icon--note); - -webkit-mask-image: var(--md-admonition-icon--note); + mask-image: var(--md-admonition-icon--admonish-note); + -webkit-mask-image: var(--md-admonition-icon--admonish-note); mask-repeat: no-repeat; -webkit-mask-repeat: no-repeat; mask-size: contain; -webkit-mask-repeat: no-repeat; } -:is(.admonition):is(.abstract, .summary, .tldr) { +:is(.admonition):is(.admonish-abstract, .admonish-summary, .admonish-tldr) { border-color: #00b0ff; } -:is(.abstract, .summary, .tldr) > :is(.admonition-title, summary) { +:is(.admonish-abstract, .admonish-summary, .admonish-tldr) + > :is(.admonition-title, summary.admonition-title) { background-color: rgba(0, 176, 255, 0.1); } -:is(.abstract, .summary, .tldr) > :is(.admonition-title, summary)::before { +:is(.admonish-abstract, .admonish-summary, .admonish-tldr) + > :is(.admonition-title, summary.admonition-title)::before { background-color: #00b0ff; - mask-image: var(--md-admonition-icon--abstract); - -webkit-mask-image: var(--md-admonition-icon--abstract); + mask-image: var(--md-admonition-icon--admonish-abstract); + -webkit-mask-image: var(--md-admonition-icon--admonish-abstract); mask-repeat: no-repeat; -webkit-mask-repeat: no-repeat; mask-size: contain; -webkit-mask-repeat: no-repeat; } -:is(.admonition):is(.info, .todo) { +:is(.admonition):is(.admonish-info, .admonish-todo) { border-color: #00b8d4; } -:is(.info, .todo) > :is(.admonition-title, summary) { +:is(.admonish-info, .admonish-todo) + > :is(.admonition-title, summary.admonition-title) { background-color: rgba(0, 184, 212, 0.1); } -:is(.info, .todo) > :is(.admonition-title, summary)::before { +:is(.admonish-info, .admonish-todo) + > :is(.admonition-title, summary.admonition-title)::before { background-color: #00b8d4; - mask-image: var(--md-admonition-icon--info); - -webkit-mask-image: var(--md-admonition-icon--info); + mask-image: var(--md-admonition-icon--admonish-info); + -webkit-mask-image: var(--md-admonition-icon--admonish-info); mask-repeat: no-repeat; -webkit-mask-repeat: no-repeat; mask-size: contain; -webkit-mask-repeat: no-repeat; } -:is(.admonition):is(.tip, .hint, .important) { +:is(.admonition):is(.admonish-tip, .admonish-hint, .admonish-important) { border-color: #00bfa5; } -:is(.tip, .hint, .important) > :is(.admonition-title, summary) { +:is(.admonish-tip, .admonish-hint, .admonish-important) + > :is(.admonition-title, summary.admonition-title) { background-color: rgba(0, 191, 165, 0.1); } -:is(.tip, .hint, .important) > :is(.admonition-title, summary)::before { +:is(.admonish-tip, .admonish-hint, .admonish-important) + > :is(.admonition-title, summary.admonition-title)::before { background-color: #00bfa5; - mask-image: var(--md-admonition-icon--tip); - -webkit-mask-image: var(--md-admonition-icon--tip); + mask-image: var(--md-admonition-icon--admonish-tip); + -webkit-mask-image: var(--md-admonition-icon--admonish-tip); mask-repeat: no-repeat; -webkit-mask-repeat: no-repeat; mask-size: contain; -webkit-mask-repeat: no-repeat; } -:is(.admonition):is(.success, .check, .done) { +:is(.admonition):is(.admonish-success, .admonish-check, .admonish-done) { border-color: #00c853; } -:is(.success, .check, .done) > :is(.admonition-title, summary) { +:is(.admonish-success, .admonish-check, .admonish-done) + > :is(.admonition-title, summary.admonition-title) { background-color: rgba(0, 200, 83, 0.1); } -:is(.success, .check, .done) > :is(.admonition-title, summary)::before { +:is(.admonish-success, .admonish-check, .admonish-done) + > :is(.admonition-title, summary.admonition-title)::before { background-color: #00c853; - mask-image: var(--md-admonition-icon--success); - -webkit-mask-image: var(--md-admonition-icon--success); + mask-image: var(--md-admonition-icon--admonish-success); + -webkit-mask-image: var(--md-admonition-icon--admonish-success); mask-repeat: no-repeat; -webkit-mask-repeat: no-repeat; mask-size: contain; -webkit-mask-repeat: no-repeat; } -:is(.admonition):is(.question, .help, .faq) { +:is(.admonition):is(.admonish-question, .admonish-help, .admonish-faq) { border-color: #64dd17; } -:is(.question, .help, .faq) > :is(.admonition-title, summary) { +:is(.admonish-question, .admonish-help, .admonish-faq) + > :is(.admonition-title, summary.admonition-title) { background-color: rgba(100, 221, 23, 0.1); } -:is(.question, .help, .faq) > :is(.admonition-title, summary)::before { +:is(.admonish-question, .admonish-help, .admonish-faq) + > :is(.admonition-title, summary.admonition-title)::before { background-color: #64dd17; - mask-image: var(--md-admonition-icon--question); - -webkit-mask-image: var(--md-admonition-icon--question); + mask-image: var(--md-admonition-icon--admonish-question); + -webkit-mask-image: var(--md-admonition-icon--admonish-question); mask-repeat: no-repeat; -webkit-mask-repeat: no-repeat; mask-size: contain; -webkit-mask-repeat: no-repeat; } -:is(.admonition):is(.warning, .caution, .attention) { +:is(.admonition):is(.admonish-warning, .admonish-caution, .admonish-attention) { border-color: #ff9100; } -:is(.warning, .caution, .attention) > :is(.admonition-title, summary) { +:is(.admonish-warning, .admonish-caution, .admonish-attention) + > :is(.admonition-title, summary.admonition-title) { background-color: rgba(255, 145, 0, 0.1); } -:is(.warning, .caution, .attention) > :is(.admonition-title, summary)::before { +:is(.admonish-warning, .admonish-caution, .admonish-attention) + > :is(.admonition-title, summary.admonition-title)::before { background-color: #ff9100; - mask-image: var(--md-admonition-icon--warning); - -webkit-mask-image: var(--md-admonition-icon--warning); + mask-image: var(--md-admonition-icon--admonish-warning); + -webkit-mask-image: var(--md-admonition-icon--admonish-warning); mask-repeat: no-repeat; -webkit-mask-repeat: no-repeat; mask-size: contain; -webkit-mask-repeat: no-repeat; } -:is(.admonition):is(.failure, .fail, .missing) { +:is(.admonition):is(.admonish-failure, .admonish-fail, .admonish-missing) { border-color: #ff5252; } -:is(.failure, .fail, .missing) > :is(.admonition-title, summary) { +:is(.admonish-failure, .admonish-fail, .admonish-missing) + > :is(.admonition-title, summary.admonition-title) { background-color: rgba(255, 82, 82, 0.1); } -:is(.failure, .fail, .missing) > :is(.admonition-title, summary)::before { +:is(.admonish-failure, .admonish-fail, .admonish-missing) + > :is(.admonition-title, summary.admonition-title)::before { background-color: #ff5252; - mask-image: var(--md-admonition-icon--failure); - -webkit-mask-image: var(--md-admonition-icon--failure); + mask-image: var(--md-admonition-icon--admonish-failure); + -webkit-mask-image: var(--md-admonition-icon--admonish-failure); mask-repeat: no-repeat; -webkit-mask-repeat: no-repeat; mask-size: contain; -webkit-mask-repeat: no-repeat; } -:is(.admonition):is(.danger, .error) { +:is(.admonition):is(.admonish-danger, .admonish-error) { border-color: #ff1744; } -:is(.danger, .error) > :is(.admonition-title, summary) { +:is(.admonish-danger, .admonish-error) + > :is(.admonition-title, summary.admonition-title) { background-color: rgba(255, 23, 68, 0.1); } -:is(.danger, .error) > :is(.admonition-title, summary)::before { +:is(.admonish-danger, .admonish-error) + > :is(.admonition-title, summary.admonition-title)::before { background-color: #ff1744; - mask-image: var(--md-admonition-icon--danger); - -webkit-mask-image: var(--md-admonition-icon--danger); + mask-image: var(--md-admonition-icon--admonish-danger); + -webkit-mask-image: var(--md-admonition-icon--admonish-danger); mask-repeat: no-repeat; -webkit-mask-repeat: no-repeat; mask-size: contain; -webkit-mask-repeat: no-repeat; } -:is(.admonition):is(.bug) { +:is(.admonition):is(.admonish-bug) { border-color: #f50057; } -:is(.bug) > :is(.admonition-title, summary) { +:is(.admonish-bug) > :is(.admonition-title, summary.admonition-title) { background-color: rgba(245, 0, 87, 0.1); } -:is(.bug) > :is(.admonition-title, summary)::before { +:is(.admonish-bug) > :is(.admonition-title, summary.admonition-title)::before { background-color: #f50057; - mask-image: var(--md-admonition-icon--bug); - -webkit-mask-image: var(--md-admonition-icon--bug); + mask-image: var(--md-admonition-icon--admonish-bug); + -webkit-mask-image: var(--md-admonition-icon--admonish-bug); mask-repeat: no-repeat; -webkit-mask-repeat: no-repeat; mask-size: contain; -webkit-mask-repeat: no-repeat; } -:is(.admonition):is(.example) { +:is(.admonition):is(.admonish-example) { border-color: #7c4dff; } -:is(.example) > :is(.admonition-title, summary) { +:is(.admonish-example) > :is(.admonition-title, summary.admonition-title) { background-color: rgba(124, 77, 255, 0.1); } -:is(.example) > :is(.admonition-title, summary)::before { +:is(.admonish-example) + > :is(.admonition-title, summary.admonition-title)::before { background-color: #7c4dff; - mask-image: var(--md-admonition-icon--example); - -webkit-mask-image: var(--md-admonition-icon--example); + mask-image: var(--md-admonition-icon--admonish-example); + -webkit-mask-image: var(--md-admonition-icon--admonish-example); mask-repeat: no-repeat; -webkit-mask-repeat: no-repeat; mask-size: contain; -webkit-mask-repeat: no-repeat; } -:is(.admonition):is(.quote, .cite) { +:is(.admonition):is(.admonish-quote, .admonish-cite) { border-color: #9e9e9e; } -:is(.quote, .cite) > :is(.admonition-title, summary) { +:is(.admonish-quote, .admonish-cite) + > :is(.admonition-title, summary.admonition-title) { background-color: rgba(158, 158, 158, 0.1); } -:is(.quote, .cite) > :is(.admonition-title, summary)::before { +:is(.admonish-quote, .admonish-cite) + > :is(.admonition-title, summary.admonition-title)::before { background-color: #9e9e9e; - mask-image: var(--md-admonition-icon--quote); - -webkit-mask-image: var(--md-admonition-icon--quote); + mask-image: var(--md-admonition-icon--admonish-quote); + -webkit-mask-image: var(--md-admonition-icon--admonish-quote); mask-repeat: no-repeat; -webkit-mask-repeat: no-repeat; mask-size: contain; @@ -339,7 +353,8 @@ details[open].admonition > summary.admonition-title::after { background-color: var(--sidebar-bg); } -.ayu :is(.admonition), .coal :is(.admonition) { +.ayu :is(.admonition), +.coal :is(.admonition) { background-color: var(--theme-hover); } @@ -347,6 +362,7 @@ details[open].admonition > summary.admonition-title::after { background-color: var(--sidebar-bg); color: var(--sidebar-fg); } -.rust .admonition-anchor-link:link, .rust .admonition-anchor-link:visited { +.rust .admonition-anchor-link:link, +.rust .admonition-anchor-link:visited { color: var(--sidebar-fg); } diff --git a/book/mermaid-init.js b/book/mermaid-init.js index 313a6e8bc8..6075900ed5 100644 --- a/book/mermaid-init.js +++ b/book/mermaid-init.js @@ -1 +1 @@ -mermaid.initialize({startOnLoad:true}); +mermaid.initialize({ startOnLoad: true }); diff --git a/book/mermaid.min.js b/book/mermaid.min.js index d45942f362..5cbc3f3857 100644 --- a/book/mermaid.min.js +++ b/book/mermaid.min.js @@ -1,4 +1,4 @@ /* MIT Licensed. Copyright (c) 2014 - 2021 Knut Sveidqvist */ /*! For license information please see https://github.com/mermaid-js/mermaid/blob/8.13.10/LICENSE */ !function(t,e){"object"==typeof exports&&"object"==typeof module?module.exports=e():"function"==typeof define&&define.amd?define([],e):"object"==typeof exports?exports.mermaid=e():t.mermaid=e()}("undefined"!=typeof self?self:this,(function(){return(()=>{var t={1362:(t,e,n)=>{t=n.nmd(t);var r=function(){var t=function(t,e,n,r){for(n=n||{},r=t.length;r--;n[t[r]]=e);return n},e=[1,6],n=[1,7],r=[1,8],i=[1,9],a=[1,12],o=[1,11],s=[1,15,24],c=[1,19],u=[1,31],l=[1,34],h=[1,32],f=[1,33],d=[1,35],p=[1,36],y=[1,37],g=[1,38],m=[1,41],v=[1,42],b=[1,43],_=[1,44],x=[15,24],w=[1,56],k=[1,57],T=[1,58],E=[1,59],C=[1,60],S=[1,61],A=[15,24,31,38,39,47,50,51,52,53,54,55,60,62],M=[15,24,29,31,38,39,43,47,50,51,52,53,54,55,60,62,77,78,79,80],N=[7,8,9,10,15,18,22,24],D=[47,77,78,79,80],O=[47,54,55,77,78,79,80],B=[47,50,51,52,53,77,78,79,80],L=[15,24,31],I=[1,93],R={trace:function(){},yy:{},symbols_:{error:2,start:3,mermaidDoc:4,direction:5,directive:6,direction_tb:7,direction_bt:8,direction_rl:9,direction_lr:10,graphConfig:11,openDirective:12,typeDirective:13,closeDirective:14,NEWLINE:15,":":16,argDirective:17,open_directive:18,type_directive:19,arg_directive:20,close_directive:21,CLASS_DIAGRAM:22,statements:23,EOF:24,statement:25,className:26,alphaNumToken:27,classLiteralName:28,GENERICTYPE:29,relationStatement:30,LABEL:31,classStatement:32,methodStatement:33,annotationStatement:34,clickStatement:35,cssClassStatement:36,CLASS:37,STYLE_SEPARATOR:38,STRUCT_START:39,members:40,STRUCT_STOP:41,ANNOTATION_START:42,ANNOTATION_END:43,MEMBER:44,SEPARATOR:45,relation:46,STR:47,relationType:48,lineType:49,AGGREGATION:50,EXTENSION:51,COMPOSITION:52,DEPENDENCY:53,LINE:54,DOTTED_LINE:55,CALLBACK:56,LINK:57,LINK_TARGET:58,CLICK:59,CALLBACK_NAME:60,CALLBACK_ARGS:61,HREF:62,CSSCLASS:63,commentToken:64,textToken:65,graphCodeTokens:66,textNoTagsToken:67,TAGSTART:68,TAGEND:69,"==":70,"--":71,PCT:72,DEFAULT:73,SPACE:74,MINUS:75,keywords:76,UNICODE_TEXT:77,NUM:78,ALPHA:79,BQUOTE_STR:80,$accept:0,$end:1},terminals_:{2:"error",7:"direction_tb",8:"direction_bt",9:"direction_rl",10:"direction_lr",15:"NEWLINE",16:":",18:"open_directive",19:"type_directive",20:"arg_directive",21:"close_directive",22:"CLASS_DIAGRAM",24:"EOF",29:"GENERICTYPE",31:"LABEL",37:"CLASS",38:"STYLE_SEPARATOR",39:"STRUCT_START",41:"STRUCT_STOP",42:"ANNOTATION_START",43:"ANNOTATION_END",44:"MEMBER",45:"SEPARATOR",47:"STR",50:"AGGREGATION",51:"EXTENSION",52:"COMPOSITION",53:"DEPENDENCY",54:"LINE",55:"DOTTED_LINE",56:"CALLBACK",57:"LINK",58:"LINK_TARGET",59:"CLICK",60:"CALLBACK_NAME",61:"CALLBACK_ARGS",62:"HREF",63:"CSSCLASS",66:"graphCodeTokens",68:"TAGSTART",69:"TAGEND",70:"==",71:"--",72:"PCT",73:"DEFAULT",74:"SPACE",75:"MINUS",76:"keywords",77:"UNICODE_TEXT",78:"NUM",79:"ALPHA",80:"BQUOTE_STR"},productions_:[0,[3,1],[3,1],[3,2],[5,1],[5,1],[5,1],[5,1],[4,1],[6,4],[6,6],[12,1],[13,1],[17,1],[14,1],[11,4],[23,1],[23,2],[23,3],[26,1],[26,1],[26,2],[26,2],[26,2],[25,1],[25,2],[25,1],[25,1],[25,1],[25,1],[25,1],[25,1],[25,1],[32,2],[32,4],[32,5],[32,7],[34,4],[40,1],[40,2],[33,1],[33,2],[33,1],[33,1],[30,3],[30,4],[30,4],[30,5],[46,3],[46,2],[46,2],[46,1],[48,1],[48,1],[48,1],[48,1],[49,1],[49,1],[35,3],[35,4],[35,3],[35,4],[35,4],[35,5],[35,3],[35,4],[35,4],[35,5],[35,3],[35,4],[35,4],[35,5],[36,3],[64,1],[64,1],[65,1],[65,1],[65,1],[65,1],[65,1],[65,1],[65,1],[67,1],[67,1],[67,1],[67,1],[27,1],[27,1],[27,1],[28,1]],performAction:function(t,e,n,r,i,a,o){var s=a.length-1;switch(i){case 4:r.setDirection("TB");break;case 5:r.setDirection("BT");break;case 6:r.setDirection("RL");break;case 7:r.setDirection("LR");break;case 11:r.parseDirective("%%{","open_directive");break;case 12:r.parseDirective(a[s],"type_directive");break;case 13:a[s]=a[s].trim().replace(/'/g,'"'),r.parseDirective(a[s],"arg_directive");break;case 14:r.parseDirective("}%%","close_directive","class");break;case 19:case 20:this.$=a[s];break;case 21:this.$=a[s-1]+a[s];break;case 22:case 23:this.$=a[s-1]+"~"+a[s];break;case 24:r.addRelation(a[s]);break;case 25:a[s-1].title=r.cleanupLabel(a[s]),r.addRelation(a[s-1]);break;case 33:r.addClass(a[s]);break;case 34:r.addClass(a[s-2]),r.setCssClass(a[s-2],a[s]);break;case 35:r.addClass(a[s-3]),r.addMembers(a[s-3],a[s-1]);break;case 36:r.addClass(a[s-5]),r.setCssClass(a[s-5],a[s-3]),r.addMembers(a[s-5],a[s-1]);break;case 37:r.addAnnotation(a[s],a[s-2]);break;case 38:this.$=[a[s]];break;case 39:a[s].push(a[s-1]),this.$=a[s];break;case 40:case 42:case 43:break;case 41:r.addMember(a[s-1],r.cleanupLabel(a[s]));break;case 44:this.$={id1:a[s-2],id2:a[s],relation:a[s-1],relationTitle1:"none",relationTitle2:"none"};break;case 45:this.$={id1:a[s-3],id2:a[s],relation:a[s-1],relationTitle1:a[s-2],relationTitle2:"none"};break;case 46:this.$={id1:a[s-3],id2:a[s],relation:a[s-2],relationTitle1:"none",relationTitle2:a[s-1]};break;case 47:this.$={id1:a[s-4],id2:a[s],relation:a[s-2],relationTitle1:a[s-3],relationTitle2:a[s-1]};break;case 48:this.$={type1:a[s-2],type2:a[s],lineType:a[s-1]};break;case 49:this.$={type1:"none",type2:a[s],lineType:a[s-1]};break;case 50:this.$={type1:a[s-1],type2:"none",lineType:a[s]};break;case 51:this.$={type1:"none",type2:"none",lineType:a[s]};break;case 52:this.$=r.relationType.AGGREGATION;break;case 53:this.$=r.relationType.EXTENSION;break;case 54:this.$=r.relationType.COMPOSITION;break;case 55:this.$=r.relationType.DEPENDENCY;break;case 56:this.$=r.lineType.LINE;break;case 57:this.$=r.lineType.DOTTED_LINE;break;case 58:case 64:this.$=a[s-2],r.setClickEvent(a[s-1],a[s]);break;case 59:case 65:this.$=a[s-3],r.setClickEvent(a[s-2],a[s-1]),r.setTooltip(a[s-2],a[s]);break;case 60:case 68:this.$=a[s-2],r.setLink(a[s-1],a[s]);break;case 61:case 69:this.$=a[s-3],r.setLink(a[s-2],a[s-1],a[s]);break;case 62:case 70:this.$=a[s-3],r.setLink(a[s-2],a[s-1]),r.setTooltip(a[s-2],a[s]);break;case 63:case 71:this.$=a[s-4],r.setLink(a[s-3],a[s-2],a[s]),r.setTooltip(a[s-3],a[s-1]);break;case 66:this.$=a[s-3],r.setClickEvent(a[s-2],a[s-1],a[s]);break;case 67:this.$=a[s-4],r.setClickEvent(a[s-3],a[s-2],a[s-1]),r.setTooltip(a[s-3],a[s]);break;case 72:r.setCssClass(a[s-1],a[s])}},table:[{3:1,4:2,5:3,6:4,7:e,8:n,9:r,10:i,11:5,12:10,18:a,22:o},{1:[3]},{1:[2,1]},{1:[2,2]},{3:13,4:2,5:3,6:4,7:e,8:n,9:r,10:i,11:5,12:10,18:a,22:o},{1:[2,8]},t(s,[2,4]),t(s,[2,5]),t(s,[2,6]),t(s,[2,7]),{13:14,19:[1,15]},{15:[1,16]},{19:[2,11]},{1:[2,3]},{14:17,16:[1,18],21:c},t([16,21],[2,12]),{5:29,6:28,7:e,8:n,9:r,10:i,12:10,18:a,23:20,25:21,26:30,27:39,28:40,30:22,32:23,33:24,34:25,35:26,36:27,37:u,42:l,44:h,45:f,56:d,57:p,59:y,63:g,77:m,78:v,79:b,80:_},{15:[1,45]},{17:46,20:[1,47]},{15:[2,14]},{24:[1,48]},{15:[1,49],24:[2,16]},t(x,[2,24],{31:[1,50]}),t(x,[2,26]),t(x,[2,27]),t(x,[2,28]),t(x,[2,29]),t(x,[2,30]),t(x,[2,31]),t(x,[2,32]),t(x,[2,40],{46:51,48:54,49:55,31:[1,53],47:[1,52],50:w,51:k,52:T,53:E,54:C,55:S}),{26:62,27:39,28:40,77:m,78:v,79:b,80:_},t(x,[2,42]),t(x,[2,43]),{27:63,77:m,78:v,79:b},{26:64,27:39,28:40,77:m,78:v,79:b,80:_},{26:65,27:39,28:40,77:m,78:v,79:b,80:_},{26:66,27:39,28:40,77:m,78:v,79:b,80:_},{47:[1,67]},t(A,[2,19],{27:39,28:40,26:68,29:[1,69],77:m,78:v,79:b,80:_}),t(A,[2,20],{29:[1,70]}),t(M,[2,86]),t(M,[2,87]),t(M,[2,88]),t([15,24,29,31,38,39,47,50,51,52,53,54,55,60,62],[2,89]),t(N,[2,9]),{14:71,21:c},{21:[2,13]},{1:[2,15]},{5:29,6:28,7:e,8:n,9:r,10:i,12:10,18:a,23:72,24:[2,17],25:21,26:30,27:39,28:40,30:22,32:23,33:24,34:25,35:26,36:27,37:u,42:l,44:h,45:f,56:d,57:p,59:y,63:g,77:m,78:v,79:b,80:_},t(x,[2,25]),{26:73,27:39,28:40,47:[1,74],77:m,78:v,79:b,80:_},{46:75,48:54,49:55,50:w,51:k,52:T,53:E,54:C,55:S},t(x,[2,41]),{49:76,54:C,55:S},t(D,[2,51],{48:77,50:w,51:k,52:T,53:E}),t(O,[2,52]),t(O,[2,53]),t(O,[2,54]),t(O,[2,55]),t(B,[2,56]),t(B,[2,57]),t(x,[2,33],{38:[1,78],39:[1,79]}),{43:[1,80]},{47:[1,81]},{47:[1,82]},{60:[1,83],62:[1,84]},{27:85,77:m,78:v,79:b},t(A,[2,21]),t(A,[2,22]),t(A,[2,23]),{15:[1,86]},{24:[2,18]},t(L,[2,44]),{26:87,27:39,28:40,77:m,78:v,79:b,80:_},{26:88,27:39,28:40,47:[1,89],77:m,78:v,79:b,80:_},t(D,[2,50],{48:90,50:w,51:k,52:T,53:E}),t(D,[2,49]),{27:91,77:m,78:v,79:b},{40:92,44:I},{26:94,27:39,28:40,77:m,78:v,79:b,80:_},t(x,[2,58],{47:[1,95]}),t(x,[2,60],{47:[1,97],58:[1,96]}),t(x,[2,64],{47:[1,98],61:[1,99]}),t(x,[2,68],{47:[1,101],58:[1,100]}),t(x,[2,72]),t(N,[2,10]),t(L,[2,46]),t(L,[2,45]),{26:102,27:39,28:40,77:m,78:v,79:b,80:_},t(D,[2,48]),t(x,[2,34],{39:[1,103]}),{41:[1,104]},{40:105,41:[2,38],44:I},t(x,[2,37]),t(x,[2,59]),t(x,[2,61]),t(x,[2,62],{58:[1,106]}),t(x,[2,65]),t(x,[2,66],{47:[1,107]}),t(x,[2,69]),t(x,[2,70],{58:[1,108]}),t(L,[2,47]),{40:109,44:I},t(x,[2,35]),{41:[2,39]},t(x,[2,63]),t(x,[2,67]),t(x,[2,71]),{41:[1,110]},t(x,[2,36])],defaultActions:{2:[2,1],3:[2,2],5:[2,8],12:[2,11],13:[2,3],19:[2,14],47:[2,13],48:[2,15],72:[2,18],105:[2,39]},parseError:function(t,e){if(!e.recoverable){var n=new Error(t);throw n.hash=e,n}this.trace(t)},parse:function(t){var e=this,n=[0],r=[],i=[null],a=[],o=this.table,s="",c=0,u=0,l=0,h=2,f=1,d=a.slice.call(arguments,1),p=Object.create(this.lexer),y={yy:{}};for(var g in this.yy)Object.prototype.hasOwnProperty.call(this.yy,g)&&(y.yy[g]=this.yy[g]);p.setInput(t,y.yy),y.yy.lexer=p,y.yy.parser=this,void 0===p.yylloc&&(p.yylloc={});var m=p.yylloc;a.push(m);var v=p.options&&p.options.ranges;function b(){var t;return"number"!=typeof(t=r.pop()||p.lex()||f)&&(t instanceof Array&&(t=(r=t).pop()),t=e.symbols_[t]||t),t}"function"==typeof y.yy.parseError?this.parseError=y.yy.parseError:this.parseError=Object.getPrototypeOf(this).parseError;for(var _,x,w,k,T,E,C,S,A,M={};;){if(w=n[n.length-1],this.defaultActions[w]?k=this.defaultActions[w]:(null==_&&(_=b()),k=o[w]&&o[w][_]),void 0===k||!k.length||!k[0]){var N="";for(E in A=[],o[w])this.terminals_[E]&&E>h&&A.push("'"+this.terminals_[E]+"'");N=p.showPosition?"Parse error on line "+(c+1)+":\n"+p.showPosition()+"\nExpecting "+A.join(", ")+", got '"+(this.terminals_[_]||_)+"'":"Parse error on line "+(c+1)+": Unexpected "+(_==f?"end of input":"'"+(this.terminals_[_]||_)+"'"),this.parseError(N,{text:p.match,token:this.terminals_[_]||_,line:p.yylineno,loc:m,expected:A})}if(k[0]instanceof Array&&k.length>1)throw new Error("Parse Error: multiple actions possible at state: "+w+", token: "+_);switch(k[0]){case 1:n.push(_),i.push(p.yytext),a.push(p.yylloc),n.push(k[1]),_=null,x?(_=x,x=null):(u=p.yyleng,s=p.yytext,c=p.yylineno,m=p.yylloc,l>0&&l--);break;case 2:if(C=this.productions_[k[1]][1],M.$=i[i.length-C],M._$={first_line:a[a.length-(C||1)].first_line,last_line:a[a.length-1].last_line,first_column:a[a.length-(C||1)].first_column,last_column:a[a.length-1].last_column},v&&(M._$.range=[a[a.length-(C||1)].range[0],a[a.length-1].range[1]]),void 0!==(T=this.performAction.apply(M,[s,u,c,y.yy,k[1],i,a].concat(d))))return T;C&&(n=n.slice(0,-1*C*2),i=i.slice(0,-1*C),a=a.slice(0,-1*C)),n.push(this.productions_[k[1]][0]),i.push(M.$),a.push(M._$),S=o[n[n.length-2]][n[n.length-1]],n.push(S);break;case 3:return!0}}return!0}},F={EOF:1,parseError:function(t,e){if(!this.yy.parser)throw new Error(t);this.yy.parser.parseError(t,e)},setInput:function(t,e){return this.yy=e||this.yy||{},this._input=t,this._more=this._backtrack=this.done=!1,this.yylineno=this.yyleng=0,this.yytext=this.matched=this.match="",this.conditionStack=["INITIAL"],this.yylloc={first_line:1,first_column:0,last_line:1,last_column:0},this.options.ranges&&(this.yylloc.range=[0,0]),this.offset=0,this},input:function(){var t=this._input[0];return this.yytext+=t,this.yyleng++,this.offset++,this.match+=t,this.matched+=t,t.match(/(?:\r\n?|\n).*/g)?(this.yylineno++,this.yylloc.last_line++):this.yylloc.last_column++,this.options.ranges&&this.yylloc.range[1]++,this._input=this._input.slice(1),t},unput:function(t){var e=t.length,n=t.split(/(?:\r\n?|\n)/g);this._input=t+this._input,this.yytext=this.yytext.substr(0,this.yytext.length-e),this.offset-=e;var r=this.match.split(/(?:\r\n?|\n)/g);this.match=this.match.substr(0,this.match.length-1),this.matched=this.matched.substr(0,this.matched.length-1),n.length-1&&(this.yylineno-=n.length-1);var i=this.yylloc.range;return this.yylloc={first_line:this.yylloc.first_line,last_line:this.yylineno+1,first_column:this.yylloc.first_column,last_column:n?(n.length===r.length?this.yylloc.first_column:0)+r[r.length-n.length].length-n[0].length:this.yylloc.first_column-e},this.options.ranges&&(this.yylloc.range=[i[0],i[0]+this.yyleng-e]),this.yyleng=this.yytext.length,this},more:function(){return this._more=!0,this},reject:function(){return this.options.backtrack_lexer?(this._backtrack=!0,this):this.parseError("Lexical error on line "+(this.yylineno+1)+". You can only invoke reject() in the lexer when the lexer is of the backtracking persuasion (options.backtrack_lexer = true).\n"+this.showPosition(),{text:"",token:null,line:this.yylineno})},less:function(t){this.unput(this.match.slice(t))},pastInput:function(){var t=this.matched.substr(0,this.matched.length-this.match.length);return(t.length>20?"...":"")+t.substr(-20).replace(/\n/g,"")},upcomingInput:function(){var t=this.match;return t.length<20&&(t+=this._input.substr(0,20-t.length)),(t.substr(0,20)+(t.length>20?"...":"")).replace(/\n/g,"")},showPosition:function(){var t=this.pastInput(),e=new Array(t.length+1).join("-");return t+this.upcomingInput()+"\n"+e+"^"},test_match:function(t,e){var n,r,i;if(this.options.backtrack_lexer&&(i={yylineno:this.yylineno,yylloc:{first_line:this.yylloc.first_line,last_line:this.last_line,first_column:this.yylloc.first_column,last_column:this.yylloc.last_column},yytext:this.yytext,match:this.match,matches:this.matches,matched:this.matched,yyleng:this.yyleng,offset:this.offset,_more:this._more,_input:this._input,yy:this.yy,conditionStack:this.conditionStack.slice(0),done:this.done},this.options.ranges&&(i.yylloc.range=this.yylloc.range.slice(0))),(r=t[0].match(/(?:\r\n?|\n).*/g))&&(this.yylineno+=r.length),this.yylloc={first_line:this.yylloc.last_line,last_line:this.yylineno+1,first_column:this.yylloc.last_column,last_column:r?r[r.length-1].length-r[r.length-1].match(/\r?\n?/)[0].length:this.yylloc.last_column+t[0].length},this.yytext+=t[0],this.match+=t[0],this.matches=t,this.yyleng=this.yytext.length,this.options.ranges&&(this.yylloc.range=[this.offset,this.offset+=this.yyleng]),this._more=!1,this._backtrack=!1,this._input=this._input.slice(t[0].length),this.matched+=t[0],n=this.performAction.call(this,this.yy,this,e,this.conditionStack[this.conditionStack.length-1]),this.done&&this._input&&(this.done=!1),n)return n;if(this._backtrack){for(var a in i)this[a]=i[a];return!1}return!1},next:function(){if(this.done)return this.EOF;var t,e,n,r;this._input||(this.done=!0),this._more||(this.yytext="",this.match="");for(var i=this._currentRules(),a=0;ae[0].length)){if(e=n,r=a,this.options.backtrack_lexer){if(!1!==(t=this.test_match(n,i[a])))return t;if(this._backtrack){e=!1;continue}return!1}if(!this.options.flex)break}return e?!1!==(t=this.test_match(e,i[r]))&&t:""===this._input?this.EOF:this.parseError("Lexical error on line "+(this.yylineno+1)+". Unrecognized text.\n"+this.showPosition(),{text:"",token:null,line:this.yylineno})},lex:function(){return this.next()||this.lex()},begin:function(t){this.conditionStack.push(t)},popState:function(){return this.conditionStack.length-1>0?this.conditionStack.pop():this.conditionStack[0]},_currentRules:function(){return this.conditionStack.length&&this.conditionStack[this.conditionStack.length-1]?this.conditions[this.conditionStack[this.conditionStack.length-1]].rules:this.conditions.INITIAL.rules},topState:function(t){return(t=this.conditionStack.length-1-Math.abs(t||0))>=0?this.conditionStack[t]:"INITIAL"},pushState:function(t){this.begin(t)},stateStackSize:function(){return this.conditionStack.length},options:{},performAction:function(t,e,n,r){switch(n){case 0:return this.begin("open_directive"),18;case 1:return 7;case 2:return 8;case 3:return 9;case 4:return 10;case 5:return this.begin("type_directive"),19;case 6:return this.popState(),this.begin("arg_directive"),16;case 7:return this.popState(),this.popState(),21;case 8:return 20;case 9:case 10:case 12:case 19:break;case 11:return 15;case 13:case 14:return 22;case 15:return this.begin("struct"),39;case 16:return"EOF_IN_STRUCT";case 17:return"OPEN_IN_STRUCT";case 18:return this.popState(),41;case 20:return"MEMBER";case 21:return 37;case 22:return 63;case 23:return 56;case 24:return 57;case 25:return 59;case 26:return 42;case 27:return 43;case 28:this.begin("generic");break;case 29:case 32:case 35:case 38:case 41:case 44:this.popState();break;case 30:return"GENERICTYPE";case 31:this.begin("string");break;case 33:return"STR";case 34:this.begin("bqstring");break;case 36:return"BQUOTE_STR";case 37:this.begin("href");break;case 39:return 62;case 40:this.begin("callback_name");break;case 42:this.popState(),this.begin("callback_args");break;case 43:return 60;case 45:return 61;case 46:case 47:case 48:case 49:return 58;case 50:case 51:return 51;case 52:case 53:return 53;case 54:return 52;case 55:return 50;case 56:return 54;case 57:return 55;case 58:return 31;case 59:return 38;case 60:return 75;case 61:return"DOT";case 62:return"PLUS";case 63:return 72;case 64:case 65:return"EQUALS";case 66:return 79;case 67:return"PUNCTUATION";case 68:return 78;case 69:return 77;case 70:return 74;case 71:return 24}},rules:[/^(?:%%\{)/,/^(?:.*direction\s+TB[^\n]*)/,/^(?:.*direction\s+BT[^\n]*)/,/^(?:.*direction\s+RL[^\n]*)/,/^(?:.*direction\s+LR[^\n]*)/,/^(?:((?:(?!\}%%)[^:.])*))/,/^(?::)/,/^(?:\}%%)/,/^(?:((?:(?!\}%%).|\n)*))/,/^(?:%%(?!\{)*[^\n]*(\r?\n?)+)/,/^(?:%%[^\n]*(\r?\n)*)/,/^(?:\s*(\r?\n)+)/,/^(?:\s+)/,/^(?:classDiagram-v2\b)/,/^(?:classDiagram\b)/,/^(?:[{])/,/^(?:$)/,/^(?:[{])/,/^(?:[}])/,/^(?:[\n])/,/^(?:[^{}\n]*)/,/^(?:class\b)/,/^(?:cssClass\b)/,/^(?:callback\b)/,/^(?:link\b)/,/^(?:click\b)/,/^(?:<<)/,/^(?:>>)/,/^(?:[~])/,/^(?:[~])/,/^(?:[^~]*)/,/^(?:["])/,/^(?:["])/,/^(?:[^"]*)/,/^(?:[`])/,/^(?:[`])/,/^(?:[^`]+)/,/^(?:href[\s]+["])/,/^(?:["])/,/^(?:[^"]*)/,/^(?:call[\s]+)/,/^(?:\([\s]*\))/,/^(?:\()/,/^(?:[^(]*)/,/^(?:\))/,/^(?:[^)]*)/,/^(?:_self\b)/,/^(?:_blank\b)/,/^(?:_parent\b)/,/^(?:_top\b)/,/^(?:\s*<\|)/,/^(?:\s*\|>)/,/^(?:\s*>)/,/^(?:\s*<)/,/^(?:\s*\*)/,/^(?:\s*o\b)/,/^(?:--)/,/^(?:\.\.)/,/^(?::{1}[^:\n;]+)/,/^(?::{3})/,/^(?:-)/,/^(?:\.)/,/^(?:\+)/,/^(?:%)/,/^(?:=)/,/^(?:=)/,/^(?:\w+)/,/^(?:[!"#$%&'*+,-.`?\\/])/,/^(?:[0-9]+)/,/^(?:[\u00AA\u00B5\u00BA\u00C0-\u00D6\u00D8-\u00F6]|[\u00F8-\u02C1\u02C6-\u02D1\u02E0-\u02E4\u02EC\u02EE\u0370-\u0374\u0376\u0377]|[\u037A-\u037D\u0386\u0388-\u038A\u038C\u038E-\u03A1\u03A3-\u03F5]|[\u03F7-\u0481\u048A-\u0527\u0531-\u0556\u0559\u0561-\u0587\u05D0-\u05EA]|[\u05F0-\u05F2\u0620-\u064A\u066E\u066F\u0671-\u06D3\u06D5\u06E5\u06E6\u06EE]|[\u06EF\u06FA-\u06FC\u06FF\u0710\u0712-\u072F\u074D-\u07A5\u07B1\u07CA-\u07EA]|[\u07F4\u07F5\u07FA\u0800-\u0815\u081A\u0824\u0828\u0840-\u0858\u08A0]|[\u08A2-\u08AC\u0904-\u0939\u093D\u0950\u0958-\u0961\u0971-\u0977]|[\u0979-\u097F\u0985-\u098C\u098F\u0990\u0993-\u09A8\u09AA-\u09B0\u09B2]|[\u09B6-\u09B9\u09BD\u09CE\u09DC\u09DD\u09DF-\u09E1\u09F0\u09F1\u0A05-\u0A0A]|[\u0A0F\u0A10\u0A13-\u0A28\u0A2A-\u0A30\u0A32\u0A33\u0A35\u0A36\u0A38\u0A39]|[\u0A59-\u0A5C\u0A5E\u0A72-\u0A74\u0A85-\u0A8D\u0A8F-\u0A91\u0A93-\u0AA8]|[\u0AAA-\u0AB0\u0AB2\u0AB3\u0AB5-\u0AB9\u0ABD\u0AD0\u0AE0\u0AE1\u0B05-\u0B0C]|[\u0B0F\u0B10\u0B13-\u0B28\u0B2A-\u0B30\u0B32\u0B33\u0B35-\u0B39\u0B3D\u0B5C]|[\u0B5D\u0B5F-\u0B61\u0B71\u0B83\u0B85-\u0B8A\u0B8E-\u0B90\u0B92-\u0B95\u0B99]|[\u0B9A\u0B9C\u0B9E\u0B9F\u0BA3\u0BA4\u0BA8-\u0BAA\u0BAE-\u0BB9\u0BD0]|[\u0C05-\u0C0C\u0C0E-\u0C10\u0C12-\u0C28\u0C2A-\u0C33\u0C35-\u0C39\u0C3D]|[\u0C58\u0C59\u0C60\u0C61\u0C85-\u0C8C\u0C8E-\u0C90\u0C92-\u0CA8\u0CAA-\u0CB3]|[\u0CB5-\u0CB9\u0CBD\u0CDE\u0CE0\u0CE1\u0CF1\u0CF2\u0D05-\u0D0C\u0D0E-\u0D10]|[\u0D12-\u0D3A\u0D3D\u0D4E\u0D60\u0D61\u0D7A-\u0D7F\u0D85-\u0D96\u0D9A-\u0DB1]|[\u0DB3-\u0DBB\u0DBD\u0DC0-\u0DC6\u0E01-\u0E30\u0E32\u0E33\u0E40-\u0E46\u0E81]|[\u0E82\u0E84\u0E87\u0E88\u0E8A\u0E8D\u0E94-\u0E97\u0E99-\u0E9F\u0EA1-\u0EA3]|[\u0EA5\u0EA7\u0EAA\u0EAB\u0EAD-\u0EB0\u0EB2\u0EB3\u0EBD\u0EC0-\u0EC4\u0EC6]|[\u0EDC-\u0EDF\u0F00\u0F40-\u0F47\u0F49-\u0F6C\u0F88-\u0F8C\u1000-\u102A]|[\u103F\u1050-\u1055\u105A-\u105D\u1061\u1065\u1066\u106E-\u1070\u1075-\u1081]|[\u108E\u10A0-\u10C5\u10C7\u10CD\u10D0-\u10FA\u10FC-\u1248\u124A-\u124D]|[\u1250-\u1256\u1258\u125A-\u125D\u1260-\u1288\u128A-\u128D\u1290-\u12B0]|[\u12B2-\u12B5\u12B8-\u12BE\u12C0\u12C2-\u12C5\u12C8-\u12D6\u12D8-\u1310]|[\u1312-\u1315\u1318-\u135A\u1380-\u138F\u13A0-\u13F4\u1401-\u166C]|[\u166F-\u167F\u1681-\u169A\u16A0-\u16EA\u1700-\u170C\u170E-\u1711]|[\u1720-\u1731\u1740-\u1751\u1760-\u176C\u176E-\u1770\u1780-\u17B3\u17D7]|[\u17DC\u1820-\u1877\u1880-\u18A8\u18AA\u18B0-\u18F5\u1900-\u191C]|[\u1950-\u196D\u1970-\u1974\u1980-\u19AB\u19C1-\u19C7\u1A00-\u1A16]|[\u1A20-\u1A54\u1AA7\u1B05-\u1B33\u1B45-\u1B4B\u1B83-\u1BA0\u1BAE\u1BAF]|[\u1BBA-\u1BE5\u1C00-\u1C23\u1C4D-\u1C4F\u1C5A-\u1C7D\u1CE9-\u1CEC]|[\u1CEE-\u1CF1\u1CF5\u1CF6\u1D00-\u1DBF\u1E00-\u1F15\u1F18-\u1F1D]|[\u1F20-\u1F45\u1F48-\u1F4D\u1F50-\u1F57\u1F59\u1F5B\u1F5D\u1F5F-\u1F7D]|[\u1F80-\u1FB4\u1FB6-\u1FBC\u1FBE\u1FC2-\u1FC4\u1FC6-\u1FCC\u1FD0-\u1FD3]|[\u1FD6-\u1FDB\u1FE0-\u1FEC\u1FF2-\u1FF4\u1FF6-\u1FFC\u2071\u207F]|[\u2090-\u209C\u2102\u2107\u210A-\u2113\u2115\u2119-\u211D\u2124\u2126\u2128]|[\u212A-\u212D\u212F-\u2139\u213C-\u213F\u2145-\u2149\u214E\u2183\u2184]|[\u2C00-\u2C2E\u2C30-\u2C5E\u2C60-\u2CE4\u2CEB-\u2CEE\u2CF2\u2CF3]|[\u2D00-\u2D25\u2D27\u2D2D\u2D30-\u2D67\u2D6F\u2D80-\u2D96\u2DA0-\u2DA6]|[\u2DA8-\u2DAE\u2DB0-\u2DB6\u2DB8-\u2DBE\u2DC0-\u2DC6\u2DC8-\u2DCE]|[\u2DD0-\u2DD6\u2DD8-\u2DDE\u2E2F\u3005\u3006\u3031-\u3035\u303B\u303C]|[\u3041-\u3096\u309D-\u309F\u30A1-\u30FA\u30FC-\u30FF\u3105-\u312D]|[\u3131-\u318E\u31A0-\u31BA\u31F0-\u31FF\u3400-\u4DB5\u4E00-\u9FCC]|[\uA000-\uA48C\uA4D0-\uA4FD\uA500-\uA60C\uA610-\uA61F\uA62A\uA62B]|[\uA640-\uA66E\uA67F-\uA697\uA6A0-\uA6E5\uA717-\uA71F\uA722-\uA788]|[\uA78B-\uA78E\uA790-\uA793\uA7A0-\uA7AA\uA7F8-\uA801\uA803-\uA805]|[\uA807-\uA80A\uA80C-\uA822\uA840-\uA873\uA882-\uA8B3\uA8F2-\uA8F7\uA8FB]|[\uA90A-\uA925\uA930-\uA946\uA960-\uA97C\uA984-\uA9B2\uA9CF\uAA00-\uAA28]|[\uAA40-\uAA42\uAA44-\uAA4B\uAA60-\uAA76\uAA7A\uAA80-\uAAAF\uAAB1\uAAB5]|[\uAAB6\uAAB9-\uAABD\uAAC0\uAAC2\uAADB-\uAADD\uAAE0-\uAAEA\uAAF2-\uAAF4]|[\uAB01-\uAB06\uAB09-\uAB0E\uAB11-\uAB16\uAB20-\uAB26\uAB28-\uAB2E]|[\uABC0-\uABE2\uAC00-\uD7A3\uD7B0-\uD7C6\uD7CB-\uD7FB\uF900-\uFA6D]|[\uFA70-\uFAD9\uFB00-\uFB06\uFB13-\uFB17\uFB1D\uFB1F-\uFB28\uFB2A-\uFB36]|[\uFB38-\uFB3C\uFB3E\uFB40\uFB41\uFB43\uFB44\uFB46-\uFBB1\uFBD3-\uFD3D]|[\uFD50-\uFD8F\uFD92-\uFDC7\uFDF0-\uFDFB\uFE70-\uFE74\uFE76-\uFEFC]|[\uFF21-\uFF3A\uFF41-\uFF5A\uFF66-\uFFBE\uFFC2-\uFFC7\uFFCA-\uFFCF]|[\uFFD2-\uFFD7\uFFDA-\uFFDC])/,/^(?:\s)/,/^(?:$)/],conditions:{arg_directive:{rules:[7,8],inclusive:!1},type_directive:{rules:[6,7],inclusive:!1},open_directive:{rules:[5],inclusive:!1},callback_args:{rules:[44,45],inclusive:!1},callback_name:{rules:[41,42,43],inclusive:!1},href:{rules:[38,39],inclusive:!1},struct:{rules:[16,17,18,19,20],inclusive:!1},generic:{rules:[29,30],inclusive:!1},bqstring:{rules:[35,36],inclusive:!1},string:{rules:[32,33],inclusive:!1},INITIAL:{rules:[0,1,2,3,4,9,10,11,12,13,14,15,21,22,23,24,25,26,27,28,31,34,37,40,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71],inclusive:!0}}};function P(){this.yy={}}return R.lexer=F,P.prototype=R,R.Parser=P,new P}();e.parser=r,e.Parser=r.Parser,e.parse=function(){return r.parse.apply(r,arguments)},e.main=function(t){t[1]||(console.log("Usage: "+t[0]+" FILE"),process.exit(1));var r=n(8218).readFileSync(n(6470).normalize(t[1]),"utf8");return e.parser.parse(r)},n.c[n.s]===t&&e.main(process.argv.slice(1))},5890:(t,e,n)=>{t=n.nmd(t);var r=function(){var t=function(t,e,n,r){for(n=n||{},r=t.length;r--;n[t[r]]=e);return n},e=[1,2],n=[1,5],r=[6,9,11,23,41],i=[1,17],a=[1,20],o=[1,25],s=[1,26],c=[1,27],u=[1,28],l=[1,37],h=[23,38,39],f=[4,6,9,11,23,41],d=[34,35,36,37],p=[22,29],y=[1,55],g={trace:function(){},yy:{},symbols_:{error:2,start:3,ER_DIAGRAM:4,document:5,EOF:6,directive:7,line:8,SPACE:9,statement:10,NEWLINE:11,openDirective:12,typeDirective:13,closeDirective:14,":":15,argDirective:16,entityName:17,relSpec:18,role:19,BLOCK_START:20,attributes:21,BLOCK_STOP:22,ALPHANUM:23,attribute:24,attributeType:25,attributeName:26,attributeKeyType:27,attributeComment:28,ATTRIBUTE_WORD:29,ATTRIBUTE_KEY:30,COMMENT:31,cardinality:32,relType:33,ZERO_OR_ONE:34,ZERO_OR_MORE:35,ONE_OR_MORE:36,ONLY_ONE:37,NON_IDENTIFYING:38,IDENTIFYING:39,WORD:40,open_directive:41,type_directive:42,arg_directive:43,close_directive:44,$accept:0,$end:1},terminals_:{2:"error",4:"ER_DIAGRAM",6:"EOF",9:"SPACE",11:"NEWLINE",15:":",20:"BLOCK_START",22:"BLOCK_STOP",23:"ALPHANUM",29:"ATTRIBUTE_WORD",30:"ATTRIBUTE_KEY",31:"COMMENT",34:"ZERO_OR_ONE",35:"ZERO_OR_MORE",36:"ONE_OR_MORE",37:"ONLY_ONE",38:"NON_IDENTIFYING",39:"IDENTIFYING",40:"WORD",41:"open_directive",42:"type_directive",43:"arg_directive",44:"close_directive"},productions_:[0,[3,3],[3,2],[5,0],[5,2],[8,2],[8,1],[8,1],[8,1],[7,4],[7,6],[10,1],[10,5],[10,4],[10,3],[10,1],[17,1],[21,1],[21,2],[24,2],[24,3],[24,3],[24,4],[25,1],[26,1],[27,1],[28,1],[18,3],[32,1],[32,1],[32,1],[32,1],[33,1],[33,1],[19,1],[19,1],[12,1],[13,1],[16,1],[14,1]],performAction:function(t,e,n,r,i,a,o){var s=a.length-1;switch(i){case 1:break;case 3:case 7:case 8:this.$=[];break;case 4:a[s-1].push(a[s]),this.$=a[s-1];break;case 5:case 6:case 16:case 23:case 24:case 25:case 35:this.$=a[s];break;case 12:r.addEntity(a[s-4]),r.addEntity(a[s-2]),r.addRelationship(a[s-4],a[s],a[s-2],a[s-3]);break;case 13:r.addEntity(a[s-3]),r.addAttributes(a[s-3],a[s-1]);break;case 14:r.addEntity(a[s-2]);break;case 15:r.addEntity(a[s]);break;case 17:this.$=[a[s]];break;case 18:a[s].push(a[s-1]),this.$=a[s];break;case 19:this.$={attributeType:a[s-1],attributeName:a[s]};break;case 20:this.$={attributeType:a[s-2],attributeName:a[s-1],attributeKeyType:a[s]};break;case 21:this.$={attributeType:a[s-2],attributeName:a[s-1],attributeComment:a[s]};break;case 22:this.$={attributeType:a[s-3],attributeName:a[s-2],attributeKeyType:a[s-1],attributeComment:a[s]};break;case 26:case 34:this.$=a[s].replace(/"/g,"");break;case 27:this.$={cardA:a[s],relType:a[s-1],cardB:a[s-2]};break;case 28:this.$=r.Cardinality.ZERO_OR_ONE;break;case 29:this.$=r.Cardinality.ZERO_OR_MORE;break;case 30:this.$=r.Cardinality.ONE_OR_MORE;break;case 31:this.$=r.Cardinality.ONLY_ONE;break;case 32:this.$=r.Identification.NON_IDENTIFYING;break;case 33:this.$=r.Identification.IDENTIFYING;break;case 36:r.parseDirective("%%{","open_directive");break;case 37:r.parseDirective(a[s],"type_directive");break;case 38:a[s]=a[s].trim().replace(/'/g,'"'),r.parseDirective(a[s],"arg_directive");break;case 39:r.parseDirective("}%%","close_directive","er")}},table:[{3:1,4:e,7:3,12:4,41:n},{1:[3]},t(r,[2,3],{5:6}),{3:7,4:e,7:3,12:4,41:n},{13:8,42:[1,9]},{42:[2,36]},{6:[1,10],7:15,8:11,9:[1,12],10:13,11:[1,14],12:4,17:16,23:i,41:n},{1:[2,2]},{14:18,15:[1,19],44:a},t([15,44],[2,37]),t(r,[2,8],{1:[2,1]}),t(r,[2,4]),{7:15,10:21,12:4,17:16,23:i,41:n},t(r,[2,6]),t(r,[2,7]),t(r,[2,11]),t(r,[2,15],{18:22,32:24,20:[1,23],34:o,35:s,36:c,37:u}),t([6,9,11,15,20,23,34,35,36,37,41],[2,16]),{11:[1,29]},{16:30,43:[1,31]},{11:[2,39]},t(r,[2,5]),{17:32,23:i},{21:33,22:[1,34],24:35,25:36,29:l},{33:38,38:[1,39],39:[1,40]},t(h,[2,28]),t(h,[2,29]),t(h,[2,30]),t(h,[2,31]),t(f,[2,9]),{14:41,44:a},{44:[2,38]},{15:[1,42]},{22:[1,43]},t(r,[2,14]),{21:44,22:[2,17],24:35,25:36,29:l},{26:45,29:[1,46]},{29:[2,23]},{32:47,34:o,35:s,36:c,37:u},t(d,[2,32]),t(d,[2,33]),{11:[1,48]},{19:49,23:[1,51],40:[1,50]},t(r,[2,13]),{22:[2,18]},t(p,[2,19],{27:52,28:53,30:[1,54],31:y}),t([22,29,30,31],[2,24]),{23:[2,27]},t(f,[2,10]),t(r,[2,12]),t(r,[2,34]),t(r,[2,35]),t(p,[2,20],{28:56,31:y}),t(p,[2,21]),t([22,29,31],[2,25]),t(p,[2,26]),t(p,[2,22])],defaultActions:{5:[2,36],7:[2,2],20:[2,39],31:[2,38],37:[2,23],44:[2,18],47:[2,27]},parseError:function(t,e){if(!e.recoverable){var n=new Error(t);throw n.hash=e,n}this.trace(t)},parse:function(t){var e=this,n=[0],r=[],i=[null],a=[],o=this.table,s="",c=0,u=0,l=0,h=2,f=1,d=a.slice.call(arguments,1),p=Object.create(this.lexer),y={yy:{}};for(var g in this.yy)Object.prototype.hasOwnProperty.call(this.yy,g)&&(y.yy[g]=this.yy[g]);p.setInput(t,y.yy),y.yy.lexer=p,y.yy.parser=this,void 0===p.yylloc&&(p.yylloc={});var m=p.yylloc;a.push(m);var v=p.options&&p.options.ranges;function b(){var t;return"number"!=typeof(t=r.pop()||p.lex()||f)&&(t instanceof Array&&(t=(r=t).pop()),t=e.symbols_[t]||t),t}"function"==typeof y.yy.parseError?this.parseError=y.yy.parseError:this.parseError=Object.getPrototypeOf(this).parseError;for(var _,x,w,k,T,E,C,S,A,M={};;){if(w=n[n.length-1],this.defaultActions[w]?k=this.defaultActions[w]:(null==_&&(_=b()),k=o[w]&&o[w][_]),void 0===k||!k.length||!k[0]){var N="";for(E in A=[],o[w])this.terminals_[E]&&E>h&&A.push("'"+this.terminals_[E]+"'");N=p.showPosition?"Parse error on line "+(c+1)+":\n"+p.showPosition()+"\nExpecting "+A.join(", ")+", got '"+(this.terminals_[_]||_)+"'":"Parse error on line "+(c+1)+": Unexpected "+(_==f?"end of input":"'"+(this.terminals_[_]||_)+"'"),this.parseError(N,{text:p.match,token:this.terminals_[_]||_,line:p.yylineno,loc:m,expected:A})}if(k[0]instanceof Array&&k.length>1)throw new Error("Parse Error: multiple actions possible at state: "+w+", token: "+_);switch(k[0]){case 1:n.push(_),i.push(p.yytext),a.push(p.yylloc),n.push(k[1]),_=null,x?(_=x,x=null):(u=p.yyleng,s=p.yytext,c=p.yylineno,m=p.yylloc,l>0&&l--);break;case 2:if(C=this.productions_[k[1]][1],M.$=i[i.length-C],M._$={first_line:a[a.length-(C||1)].first_line,last_line:a[a.length-1].last_line,first_column:a[a.length-(C||1)].first_column,last_column:a[a.length-1].last_column},v&&(M._$.range=[a[a.length-(C||1)].range[0],a[a.length-1].range[1]]),void 0!==(T=this.performAction.apply(M,[s,u,c,y.yy,k[1],i,a].concat(d))))return T;C&&(n=n.slice(0,-1*C*2),i=i.slice(0,-1*C),a=a.slice(0,-1*C)),n.push(this.productions_[k[1]][0]),i.push(M.$),a.push(M._$),S=o[n[n.length-2]][n[n.length-1]],n.push(S);break;case 3:return!0}}return!0}},m={EOF:1,parseError:function(t,e){if(!this.yy.parser)throw new Error(t);this.yy.parser.parseError(t,e)},setInput:function(t,e){return this.yy=e||this.yy||{},this._input=t,this._more=this._backtrack=this.done=!1,this.yylineno=this.yyleng=0,this.yytext=this.matched=this.match="",this.conditionStack=["INITIAL"],this.yylloc={first_line:1,first_column:0,last_line:1,last_column:0},this.options.ranges&&(this.yylloc.range=[0,0]),this.offset=0,this},input:function(){var t=this._input[0];return this.yytext+=t,this.yyleng++,this.offset++,this.match+=t,this.matched+=t,t.match(/(?:\r\n?|\n).*/g)?(this.yylineno++,this.yylloc.last_line++):this.yylloc.last_column++,this.options.ranges&&this.yylloc.range[1]++,this._input=this._input.slice(1),t},unput:function(t){var e=t.length,n=t.split(/(?:\r\n?|\n)/g);this._input=t+this._input,this.yytext=this.yytext.substr(0,this.yytext.length-e),this.offset-=e;var r=this.match.split(/(?:\r\n?|\n)/g);this.match=this.match.substr(0,this.match.length-1),this.matched=this.matched.substr(0,this.matched.length-1),n.length-1&&(this.yylineno-=n.length-1);var i=this.yylloc.range;return this.yylloc={first_line:this.yylloc.first_line,last_line:this.yylineno+1,first_column:this.yylloc.first_column,last_column:n?(n.length===r.length?this.yylloc.first_column:0)+r[r.length-n.length].length-n[0].length:this.yylloc.first_column-e},this.options.ranges&&(this.yylloc.range=[i[0],i[0]+this.yyleng-e]),this.yyleng=this.yytext.length,this},more:function(){return this._more=!0,this},reject:function(){return this.options.backtrack_lexer?(this._backtrack=!0,this):this.parseError("Lexical error on line "+(this.yylineno+1)+". You can only invoke reject() in the lexer when the lexer is of the backtracking persuasion (options.backtrack_lexer = true).\n"+this.showPosition(),{text:"",token:null,line:this.yylineno})},less:function(t){this.unput(this.match.slice(t))},pastInput:function(){var t=this.matched.substr(0,this.matched.length-this.match.length);return(t.length>20?"...":"")+t.substr(-20).replace(/\n/g,"")},upcomingInput:function(){var t=this.match;return t.length<20&&(t+=this._input.substr(0,20-t.length)),(t.substr(0,20)+(t.length>20?"...":"")).replace(/\n/g,"")},showPosition:function(){var t=this.pastInput(),e=new Array(t.length+1).join("-");return t+this.upcomingInput()+"\n"+e+"^"},test_match:function(t,e){var n,r,i;if(this.options.backtrack_lexer&&(i={yylineno:this.yylineno,yylloc:{first_line:this.yylloc.first_line,last_line:this.last_line,first_column:this.yylloc.first_column,last_column:this.yylloc.last_column},yytext:this.yytext,match:this.match,matches:this.matches,matched:this.matched,yyleng:this.yyleng,offset:this.offset,_more:this._more,_input:this._input,yy:this.yy,conditionStack:this.conditionStack.slice(0),done:this.done},this.options.ranges&&(i.yylloc.range=this.yylloc.range.slice(0))),(r=t[0].match(/(?:\r\n?|\n).*/g))&&(this.yylineno+=r.length),this.yylloc={first_line:this.yylloc.last_line,last_line:this.yylineno+1,first_column:this.yylloc.last_column,last_column:r?r[r.length-1].length-r[r.length-1].match(/\r?\n?/)[0].length:this.yylloc.last_column+t[0].length},this.yytext+=t[0],this.match+=t[0],this.matches=t,this.yyleng=this.yytext.length,this.options.ranges&&(this.yylloc.range=[this.offset,this.offset+=this.yyleng]),this._more=!1,this._backtrack=!1,this._input=this._input.slice(t[0].length),this.matched+=t[0],n=this.performAction.call(this,this.yy,this,e,this.conditionStack[this.conditionStack.length-1]),this.done&&this._input&&(this.done=!1),n)return n;if(this._backtrack){for(var a in i)this[a]=i[a];return!1}return!1},next:function(){if(this.done)return this.EOF;var t,e,n,r;this._input||(this.done=!0),this._more||(this.yytext="",this.match="");for(var i=this._currentRules(),a=0;ae[0].length)){if(e=n,r=a,this.options.backtrack_lexer){if(!1!==(t=this.test_match(n,i[a])))return t;if(this._backtrack){e=!1;continue}return!1}if(!this.options.flex)break}return e?!1!==(t=this.test_match(e,i[r]))&&t:""===this._input?this.EOF:this.parseError("Lexical error on line "+(this.yylineno+1)+". Unrecognized text.\n"+this.showPosition(),{text:"",token:null,line:this.yylineno})},lex:function(){return this.next()||this.lex()},begin:function(t){this.conditionStack.push(t)},popState:function(){return this.conditionStack.length-1>0?this.conditionStack.pop():this.conditionStack[0]},_currentRules:function(){return this.conditionStack.length&&this.conditionStack[this.conditionStack.length-1]?this.conditions[this.conditionStack[this.conditionStack.length-1]].rules:this.conditions.INITIAL.rules},topState:function(t){return(t=this.conditionStack.length-1-Math.abs(t||0))>=0?this.conditionStack[t]:"INITIAL"},pushState:function(t){this.begin(t)},stateStackSize:function(){return this.conditionStack.length},options:{"case-insensitive":!0},performAction:function(t,e,n,r){switch(n){case 0:return this.begin("open_directive"),41;case 1:return this.begin("type_directive"),42;case 2:return this.popState(),this.begin("arg_directive"),15;case 3:return this.popState(),this.popState(),44;case 4:return 43;case 5:case 6:case 8:case 13:case 17:break;case 7:return 11;case 9:return 9;case 10:return 40;case 11:return 4;case 12:return this.begin("block"),20;case 14:return 30;case 15:return 29;case 16:return 31;case 18:return this.popState(),22;case 19:case 32:return e.yytext[0];case 20:case 24:return 34;case 21:case 25:return 35;case 22:case 26:return 36;case 23:return 37;case 27:case 29:case 30:return 38;case 28:return 39;case 31:return 23;case 33:return 6}},rules:[/^(?:%%\{)/i,/^(?:((?:(?!\}%%)[^:.])*))/i,/^(?::)/i,/^(?:\}%%)/i,/^(?:((?:(?!\}%%).|\n)*))/i,/^(?:%(?!\{)[^\n]*)/i,/^(?:[^\}]%%[^\n]*)/i,/^(?:[\n]+)/i,/^(?:\s+)/i,/^(?:[\s]+)/i,/^(?:"[^"]*")/i,/^(?:erDiagram\b)/i,/^(?:\{)/i,/^(?:\s+)/i,/^(?:(?:PK)|(?:FK))/i,/^(?:[A-Za-z][A-Za-z0-9\-_]*)/i,/^(?:"[^"]*")/i,/^(?:[\n]+)/i,/^(?:\})/i,/^(?:.)/i,/^(?:\|o\b)/i,/^(?:\}o\b)/i,/^(?:\}\|)/i,/^(?:\|\|)/i,/^(?:o\|)/i,/^(?:o\{)/i,/^(?:\|\{)/i,/^(?:\.\.)/i,/^(?:--)/i,/^(?:\.-)/i,/^(?:-\.)/i,/^(?:[A-Za-z][A-Za-z0-9\-_]*)/i,/^(?:.)/i,/^(?:$)/i],conditions:{open_directive:{rules:[1],inclusive:!1},type_directive:{rules:[2,3],inclusive:!1},arg_directive:{rules:[3,4],inclusive:!1},block:{rules:[13,14,15,16,17,18,19],inclusive:!1},INITIAL:{rules:[0,5,6,7,8,9,10,11,12,20,21,22,23,24,25,26,27,28,29,30,31,32,33],inclusive:!0}}};function v(){this.yy={}}return g.lexer=m,v.prototype=g,g.Parser=v,new v}();e.parser=r,e.Parser=r.Parser,e.parse=function(){return r.parse.apply(r,arguments)},e.main=function(t){t[1]||(console.log("Usage: "+t[0]+" FILE"),process.exit(1));var r=n(8009).readFileSync(n(6470).normalize(t[1]),"utf8");return e.parser.parse(r)},n.c[n.s]===t&&e.main(process.argv.slice(1))},3602:(t,e,n)=>{t=n.nmd(t);var r=function(){var t=function(t,e,n,r){for(n=n||{},r=t.length;r--;n[t[r]]=e);return n},e=[1,9],n=[1,7],r=[1,6],i=[1,8],a=[1,20,21,22,23,38,47,59,60,79,80,81,82,83,84,88,98,99,102,104,105,111,112,113,114,115,116,117,118,119,120],o=[2,10],s=[1,20],c=[1,21],u=[1,22],l=[1,23],h=[1,30],f=[1,59],d=[1,45],p=[1,49],y=[1,33],g=[1,34],m=[1,35],v=[1,36],b=[1,37],_=[1,53],x=[1,60],w=[1,48],k=[1,50],T=[1,52],E=[1,56],C=[1,57],S=[1,38],A=[1,39],M=[1,40],N=[1,41],D=[1,58],O=[1,47],B=[1,51],L=[1,54],I=[1,55],R=[1,46],F=[1,63],P=[1,68],j=[1,20,21,22,23,38,42,47,59,60,79,80,81,82,83,84,88,98,99,102,104,105,111,112,113,114,115,116,117,118,119,120],Y=[1,72],z=[1,71],U=[1,73],q=[20,21,23,74,75],H=[1,94],$=[1,99],W=[1,102],V=[1,103],G=[1,96],X=[1,101],Z=[1,104],Q=[1,97],K=[1,109],J=[1,108],tt=[1,98],et=[1,100],nt=[1,105],rt=[1,106],it=[1,107],at=[1,110],ot=[20,21,22,23,74,75],st=[20,21,22,23,48,74,75],ct=[20,21,22,23,40,47,48,50,52,54,56,58,59,60,62,64,66,67,69,74,75,84,88,98,99,102,104,105,115,116,117,118,119,120],ut=[20,21,23],lt=[20,21,23,47,59,60,74,75,84,88,98,99,102,104,105,115,116,117,118,119,120],ht=[1,12,20,21,22,23,24,38,42,47,59,60,79,80,81,82,83,84,88,98,99,102,104,105,111,112,113,114,115,116,117,118,119,120],ft=[47,59,60,84,88,98,99,102,104,105,115,116,117,118,119,120],dt=[1,143],pt=[1,151],yt=[1,152],gt=[1,153],mt=[1,154],vt=[1,138],bt=[1,139],_t=[1,135],xt=[1,146],wt=[1,147],kt=[1,148],Tt=[1,149],Et=[1,150],Ct=[1,155],St=[1,156],At=[1,141],Mt=[1,144],Nt=[1,140],Dt=[1,137],Ot=[20,21,22,23,38,42,47,59,60,79,80,81,82,83,84,88,98,99,102,104,105,111,112,113,114,115,116,117,118,119,120],Bt=[1,159],Lt=[20,21,22,23,26,47,59,60,84,98,99,102,104,105,115,116,117,118,119,120],It=[20,21,22,23,24,26,38,40,41,42,47,51,53,55,57,59,60,61,63,65,66,68,70,74,75,79,80,81,82,83,84,85,88,98,99,102,104,105,106,107,115,116,117,118,119,120],Rt=[12,21,22,24],Ft=[22,99],Pt=[1,242],jt=[1,237],Yt=[1,238],zt=[1,246],Ut=[1,243],qt=[1,240],Ht=[1,239],$t=[1,241],Wt=[1,244],Vt=[1,245],Gt=[1,247],Xt=[1,265],Zt=[20,21,23,99],Qt=[20,21,22,23,59,60,79,95,98,99,102,103,104,105,106],Kt={trace:function(){},yy:{},symbols_:{error:2,start:3,mermaidDoc:4,directive:5,openDirective:6,typeDirective:7,closeDirective:8,separator:9,":":10,argDirective:11,open_directive:12,type_directive:13,arg_directive:14,close_directive:15,graphConfig:16,document:17,line:18,statement:19,SEMI:20,NEWLINE:21,SPACE:22,EOF:23,GRAPH:24,NODIR:25,DIR:26,FirstStmtSeperator:27,ending:28,endToken:29,spaceList:30,spaceListNewline:31,verticeStatement:32,styleStatement:33,linkStyleStatement:34,classDefStatement:35,classStatement:36,clickStatement:37,subgraph:38,text:39,SQS:40,SQE:41,end:42,direction:43,link:44,node:45,vertex:46,AMP:47,STYLE_SEPARATOR:48,idString:49,PS:50,PE:51,"(-":52,"-)":53,STADIUMSTART:54,STADIUMEND:55,SUBROUTINESTART:56,SUBROUTINEEND:57,VERTEX_WITH_PROPS_START:58,ALPHA:59,COLON:60,PIPE:61,CYLINDERSTART:62,CYLINDEREND:63,DIAMOND_START:64,DIAMOND_STOP:65,TAGEND:66,TRAPSTART:67,TRAPEND:68,INVTRAPSTART:69,INVTRAPEND:70,linkStatement:71,arrowText:72,TESTSTR:73,START_LINK:74,LINK:75,textToken:76,STR:77,keywords:78,STYLE:79,LINKSTYLE:80,CLASSDEF:81,CLASS:82,CLICK:83,DOWN:84,UP:85,textNoTags:86,textNoTagsToken:87,DEFAULT:88,stylesOpt:89,alphaNum:90,CALLBACKNAME:91,CALLBACKARGS:92,HREF:93,LINK_TARGET:94,HEX:95,numList:96,INTERPOLATE:97,NUM:98,COMMA:99,style:100,styleComponent:101,MINUS:102,UNIT:103,BRKT:104,DOT:105,PCT:106,TAGSTART:107,alphaNumToken:108,idStringToken:109,alphaNumStatement:110,direction_tb:111,direction_bt:112,direction_rl:113,direction_lr:114,PUNCTUATION:115,UNICODE_TEXT:116,PLUS:117,EQUALS:118,MULT:119,UNDERSCORE:120,graphCodeTokens:121,ARROW_CROSS:122,ARROW_POINT:123,ARROW_CIRCLE:124,ARROW_OPEN:125,QUOTE:126,$accept:0,$end:1},terminals_:{2:"error",10:":",12:"open_directive",13:"type_directive",14:"arg_directive",15:"close_directive",20:"SEMI",21:"NEWLINE",22:"SPACE",23:"EOF",24:"GRAPH",25:"NODIR",26:"DIR",38:"subgraph",40:"SQS",41:"SQE",42:"end",47:"AMP",48:"STYLE_SEPARATOR",50:"PS",51:"PE",52:"(-",53:"-)",54:"STADIUMSTART",55:"STADIUMEND",56:"SUBROUTINESTART",57:"SUBROUTINEEND",58:"VERTEX_WITH_PROPS_START",59:"ALPHA",60:"COLON",61:"PIPE",62:"CYLINDERSTART",63:"CYLINDEREND",64:"DIAMOND_START",65:"DIAMOND_STOP",66:"TAGEND",67:"TRAPSTART",68:"TRAPEND",69:"INVTRAPSTART",70:"INVTRAPEND",73:"TESTSTR",74:"START_LINK",75:"LINK",77:"STR",79:"STYLE",80:"LINKSTYLE",81:"CLASSDEF",82:"CLASS",83:"CLICK",84:"DOWN",85:"UP",88:"DEFAULT",91:"CALLBACKNAME",92:"CALLBACKARGS",93:"HREF",94:"LINK_TARGET",95:"HEX",97:"INTERPOLATE",98:"NUM",99:"COMMA",102:"MINUS",103:"UNIT",104:"BRKT",105:"DOT",106:"PCT",107:"TAGSTART",111:"direction_tb",112:"direction_bt",113:"direction_rl",114:"direction_lr",115:"PUNCTUATION",116:"UNICODE_TEXT",117:"PLUS",118:"EQUALS",119:"MULT",120:"UNDERSCORE",122:"ARROW_CROSS",123:"ARROW_POINT",124:"ARROW_CIRCLE",125:"ARROW_OPEN",126:"QUOTE"},productions_:[0,[3,1],[3,2],[5,4],[5,6],[6,1],[7,1],[11,1],[8,1],[4,2],[17,0],[17,2],[18,1],[18,1],[18,1],[18,1],[18,1],[16,2],[16,2],[16,2],[16,3],[28,2],[28,1],[29,1],[29,1],[29,1],[27,1],[27,1],[27,2],[31,2],[31,2],[31,1],[31,1],[30,2],[30,1],[19,2],[19,2],[19,2],[19,2],[19,2],[19,2],[19,9],[19,6],[19,4],[19,1],[9,1],[9,1],[9,1],[32,3],[32,4],[32,2],[32,1],[45,1],[45,5],[45,3],[46,4],[46,6],[46,4],[46,4],[46,4],[46,8],[46,4],[46,4],[46,4],[46,6],[46,4],[46,4],[46,4],[46,4],[46,4],[46,1],[44,2],[44,3],[44,3],[44,1],[44,3],[71,1],[72,3],[39,1],[39,2],[39,1],[78,1],[78,1],[78,1],[78,1],[78,1],[78,1],[78,1],[78,1],[78,1],[78,1],[78,1],[86,1],[86,2],[35,5],[35,5],[36,5],[37,2],[37,4],[37,3],[37,5],[37,2],[37,4],[37,4],[37,6],[37,2],[37,4],[37,2],[37,4],[37,4],[37,6],[33,5],[33,5],[34,5],[34,5],[34,9],[34,9],[34,7],[34,7],[96,1],[96,3],[89,1],[89,3],[100,1],[100,2],[101,1],[101,1],[101,1],[101,1],[101,1],[101,1],[101,1],[101,1],[101,1],[101,1],[101,1],[76,1],[76,1],[76,1],[76,1],[76,1],[76,1],[87,1],[87,1],[87,1],[87,1],[49,1],[49,2],[90,1],[90,2],[110,1],[110,1],[110,1],[110,1],[43,1],[43,1],[43,1],[43,1],[108,1],[108,1],[108,1],[108,1],[108,1],[108,1],[108,1],[108,1],[108,1],[108,1],[108,1],[108,1],[108,1],[109,1],[109,1],[109,1],[109,1],[109,1],[109,1],[109,1],[109,1],[109,1],[109,1],[109,1],[109,1],[109,1],[109,1],[109,1],[109,1],[121,1],[121,1],[121,1],[121,1],[121,1],[121,1],[121,1],[121,1],[121,1],[121,1],[121,1],[121,1],[121,1],[121,1],[121,1],[121,1],[121,1],[121,1],[121,1],[121,1],[121,1],[121,1],[121,1],[121,1],[121,1],[121,1]],performAction:function(t,e,n,r,i,a,o){var s=a.length-1;switch(i){case 5:r.parseDirective("%%{","open_directive");break;case 6:r.parseDirective(a[s],"type_directive");break;case 7:a[s]=a[s].trim().replace(/'/g,'"'),r.parseDirective(a[s],"arg_directive");break;case 8:r.parseDirective("}%%","close_directive","flowchart");break;case 10:case 36:case 37:case 38:case 39:case 40:this.$=[];break;case 11:a[s]!==[]&&a[s-1].push(a[s]),this.$=a[s-1];break;case 12:case 78:case 80:case 92:case 148:case 150:case 151:case 74:case 146:this.$=a[s];break;case 19:r.setDirection("TB"),this.$="TB";break;case 20:r.setDirection(a[s-1]),this.$=a[s-1];break;case 35:this.$=a[s-1].nodes;break;case 41:this.$=r.addSubGraph(a[s-6],a[s-1],a[s-4]);break;case 42:this.$=r.addSubGraph(a[s-3],a[s-1],a[s-3]);break;case 43:this.$=r.addSubGraph(void 0,a[s-1],void 0);break;case 48:r.addLink(a[s-2].stmt,a[s],a[s-1]),this.$={stmt:a[s],nodes:a[s].concat(a[s-2].nodes)};break;case 49:r.addLink(a[s-3].stmt,a[s-1],a[s-2]),this.$={stmt:a[s-1],nodes:a[s-1].concat(a[s-3].nodes)};break;case 50:this.$={stmt:a[s-1],nodes:a[s-1]};break;case 51:this.$={stmt:a[s],nodes:a[s]};break;case 52:case 119:case 121:this.$=[a[s]];break;case 53:this.$=a[s-4].concat(a[s]);break;case 54:this.$=[a[s-2]],r.setClass(a[s-2],a[s]);break;case 55:this.$=a[s-3],r.addVertex(a[s-3],a[s-1],"square");break;case 56:this.$=a[s-5],r.addVertex(a[s-5],a[s-2],"circle");break;case 57:this.$=a[s-3],r.addVertex(a[s-3],a[s-1],"ellipse");break;case 58:this.$=a[s-3],r.addVertex(a[s-3],a[s-1],"stadium");break;case 59:this.$=a[s-3],r.addVertex(a[s-3],a[s-1],"subroutine");break;case 60:this.$=a[s-7],r.addVertex(a[s-7],a[s-1],"rect",void 0,void 0,void 0,Object.fromEntries([[a[s-5],a[s-3]]]));break;case 61:this.$=a[s-3],r.addVertex(a[s-3],a[s-1],"cylinder");break;case 62:this.$=a[s-3],r.addVertex(a[s-3],a[s-1],"round");break;case 63:this.$=a[s-3],r.addVertex(a[s-3],a[s-1],"diamond");break;case 64:this.$=a[s-5],r.addVertex(a[s-5],a[s-2],"hexagon");break;case 65:this.$=a[s-3],r.addVertex(a[s-3],a[s-1],"odd");break;case 66:this.$=a[s-3],r.addVertex(a[s-3],a[s-1],"trapezoid");break;case 67:this.$=a[s-3],r.addVertex(a[s-3],a[s-1],"inv_trapezoid");break;case 68:this.$=a[s-3],r.addVertex(a[s-3],a[s-1],"lean_right");break;case 69:this.$=a[s-3],r.addVertex(a[s-3],a[s-1],"lean_left");break;case 70:this.$=a[s],r.addVertex(a[s]);break;case 71:a[s-1].text=a[s],this.$=a[s-1];break;case 72:case 73:a[s-2].text=a[s-1],this.$=a[s-2];break;case 75:var c=r.destructLink(a[s],a[s-2]);this.$={type:c.type,stroke:c.stroke,length:c.length,text:a[s-1]};break;case 76:c=r.destructLink(a[s]),this.$={type:c.type,stroke:c.stroke,length:c.length};break;case 77:this.$=a[s-1];break;case 79:case 93:case 149:case 147:this.$=a[s-1]+""+a[s];break;case 94:case 95:this.$=a[s-4],r.addClass(a[s-2],a[s]);break;case 96:this.$=a[s-4],r.setClass(a[s-2],a[s]);break;case 97:case 105:this.$=a[s-1],r.setClickEvent(a[s-1],a[s]);break;case 98:case 106:this.$=a[s-3],r.setClickEvent(a[s-3],a[s-2]),r.setTooltip(a[s-3],a[s]);break;case 99:this.$=a[s-2],r.setClickEvent(a[s-2],a[s-1],a[s]);break;case 100:this.$=a[s-4],r.setClickEvent(a[s-4],a[s-3],a[s-2]),r.setTooltip(a[s-4],a[s]);break;case 101:case 107:this.$=a[s-1],r.setLink(a[s-1],a[s]);break;case 102:case 108:this.$=a[s-3],r.setLink(a[s-3],a[s-2]),r.setTooltip(a[s-3],a[s]);break;case 103:case 109:this.$=a[s-3],r.setLink(a[s-3],a[s-2],a[s]);break;case 104:case 110:this.$=a[s-5],r.setLink(a[s-5],a[s-4],a[s]),r.setTooltip(a[s-5],a[s-2]);break;case 111:this.$=a[s-4],r.addVertex(a[s-2],void 0,void 0,a[s]);break;case 112:case 114:this.$=a[s-4],r.updateLink(a[s-2],a[s]);break;case 113:this.$=a[s-4],r.updateLink([a[s-2]],a[s]);break;case 115:this.$=a[s-8],r.updateLinkInterpolate([a[s-6]],a[s-2]),r.updateLink([a[s-6]],a[s]);break;case 116:this.$=a[s-8],r.updateLinkInterpolate(a[s-6],a[s-2]),r.updateLink(a[s-6],a[s]);break;case 117:this.$=a[s-6],r.updateLinkInterpolate([a[s-4]],a[s]);break;case 118:this.$=a[s-6],r.updateLinkInterpolate(a[s-4],a[s]);break;case 120:case 122:a[s-2].push(a[s]),this.$=a[s-2];break;case 124:this.$=a[s-1]+a[s];break;case 152:this.$="v";break;case 153:this.$="-";break;case 154:this.$={stmt:"dir",value:"TB"};break;case 155:this.$={stmt:"dir",value:"BT"};break;case 156:this.$={stmt:"dir",value:"RL"};break;case 157:this.$={stmt:"dir",value:"LR"}}},table:[{3:1,4:2,5:3,6:5,12:e,16:4,21:n,22:r,24:i},{1:[3]},{1:[2,1]},{3:10,4:2,5:3,6:5,12:e,16:4,21:n,22:r,24:i},t(a,o,{17:11}),{7:12,13:[1,13]},{16:14,21:n,22:r,24:i},{16:15,21:n,22:r,24:i},{25:[1,16],26:[1,17]},{13:[2,5]},{1:[2,2]},{1:[2,9],18:18,19:19,20:s,21:c,22:u,23:l,32:24,33:25,34:26,35:27,36:28,37:29,38:h,43:31,45:32,46:42,47:f,49:43,59:d,60:p,79:y,80:g,81:m,82:v,83:b,84:_,88:x,98:w,99:k,102:T,104:E,105:C,109:44,111:S,112:A,113:M,114:N,115:D,116:O,117:B,118:L,119:I,120:R},{8:61,10:[1,62],15:F},t([10,15],[2,6]),t(a,[2,17]),t(a,[2,18]),t(a,[2,19]),{20:[1,65],21:[1,66],22:P,27:64,30:67},t(j,[2,11]),t(j,[2,12]),t(j,[2,13]),t(j,[2,14]),t(j,[2,15]),t(j,[2,16]),{9:69,20:Y,21:z,23:U,44:70,71:74,74:[1,75],75:[1,76]},{9:77,20:Y,21:z,23:U},{9:78,20:Y,21:z,23:U},{9:79,20:Y,21:z,23:U},{9:80,20:Y,21:z,23:U},{9:81,20:Y,21:z,23:U},{9:83,20:Y,21:z,22:[1,82],23:U},t(j,[2,44]),t(q,[2,51],{30:84,22:P}),{22:[1,85]},{22:[1,86]},{22:[1,87]},{22:[1,88]},{26:H,47:$,59:W,60:V,77:[1,92],84:G,90:91,91:[1,89],93:[1,90],98:X,99:Z,102:Q,104:K,105:J,108:95,110:93,115:tt,116:et,117:nt,118:rt,119:it,120:at},t(j,[2,154]),t(j,[2,155]),t(j,[2,156]),t(j,[2,157]),t(ot,[2,52],{48:[1,111]}),t(st,[2,70],{109:123,40:[1,112],47:f,50:[1,113],52:[1,114],54:[1,115],56:[1,116],58:[1,117],59:d,60:p,62:[1,118],64:[1,119],66:[1,120],67:[1,121],69:[1,122],84:_,88:x,98:w,99:k,102:T,104:E,105:C,115:D,116:O,117:B,118:L,119:I,120:R}),t(ct,[2,146]),t(ct,[2,171]),t(ct,[2,172]),t(ct,[2,173]),t(ct,[2,174]),t(ct,[2,175]),t(ct,[2,176]),t(ct,[2,177]),t(ct,[2,178]),t(ct,[2,179]),t(ct,[2,180]),t(ct,[2,181]),t(ct,[2,182]),t(ct,[2,183]),t(ct,[2,184]),t(ct,[2,185]),t(ct,[2,186]),{9:124,20:Y,21:z,23:U},{11:125,14:[1,126]},t(ut,[2,8]),t(a,[2,20]),t(a,[2,26]),t(a,[2,27]),{21:[1,127]},t(lt,[2,34],{30:128,22:P}),t(j,[2,35]),{45:129,46:42,47:f,49:43,59:d,60:p,84:_,88:x,98:w,99:k,102:T,104:E,105:C,109:44,115:D,116:O,117:B,118:L,119:I,120:R},t(ht,[2,45]),t(ht,[2,46]),t(ht,[2,47]),t(ft,[2,74],{72:130,61:[1,132],73:[1,131]}),{22:dt,24:pt,26:yt,38:gt,39:133,42:mt,47:$,59:W,60:V,66:vt,74:bt,76:134,77:_t,78:145,79:xt,80:wt,81:kt,82:Tt,83:Et,84:Ct,85:St,87:136,88:At,98:X,99:Z,102:Mt,104:K,105:J,106:Nt,107:Dt,108:142,115:tt,116:et,117:nt,118:rt,119:it,120:at},t([47,59,60,61,73,84,88,98,99,102,104,105,115,116,117,118,119,120],[2,76]),t(j,[2,36]),t(j,[2,37]),t(j,[2,38]),t(j,[2,39]),t(j,[2,40]),{22:dt,24:pt,26:yt,38:gt,39:157,42:mt,47:$,59:W,60:V,66:vt,74:bt,76:134,77:_t,78:145,79:xt,80:wt,81:kt,82:Tt,83:Et,84:Ct,85:St,87:136,88:At,98:X,99:Z,102:Mt,104:K,105:J,106:Nt,107:Dt,108:142,115:tt,116:et,117:nt,118:rt,119:it,120:at},t(Ot,o,{17:158}),t(q,[2,50],{47:Bt}),{26:H,47:$,59:W,60:V,84:G,90:160,95:[1,161],98:X,99:Z,102:Q,104:K,105:J,108:95,110:93,115:tt,116:et,117:nt,118:rt,119:it,120:at},{88:[1,162],96:163,98:[1,164]},{26:H,47:$,59:W,60:V,84:G,88:[1,165],90:166,98:X,99:Z,102:Q,104:K,105:J,108:95,110:93,115:tt,116:et,117:nt,118:rt,119:it,120:at},{26:H,47:$,59:W,60:V,84:G,90:167,98:X,99:Z,102:Q,104:K,105:J,108:95,110:93,115:tt,116:et,117:nt,118:rt,119:it,120:at},t(ut,[2,97],{22:[1,168],92:[1,169]}),t(ut,[2,101],{22:[1,170]}),t(ut,[2,105],{108:95,110:172,22:[1,171],26:H,47:$,59:W,60:V,84:G,98:X,99:Z,102:Q,104:K,105:J,115:tt,116:et,117:nt,118:rt,119:it,120:at}),t(ut,[2,107],{22:[1,173]}),t(Lt,[2,148]),t(Lt,[2,150]),t(Lt,[2,151]),t(Lt,[2,152]),t(Lt,[2,153]),t(It,[2,158]),t(It,[2,159]),t(It,[2,160]),t(It,[2,161]),t(It,[2,162]),t(It,[2,163]),t(It,[2,164]),t(It,[2,165]),t(It,[2,166]),t(It,[2,167]),t(It,[2,168]),t(It,[2,169]),t(It,[2,170]),{47:f,49:174,59:d,60:p,84:_,88:x,98:w,99:k,102:T,104:E,105:C,109:44,115:D,116:O,117:B,118:L,119:I,120:R},{22:dt,24:pt,26:yt,38:gt,39:175,42:mt,47:$,59:W,60:V,66:vt,74:bt,76:134,77:_t,78:145,79:xt,80:wt,81:kt,82:Tt,83:Et,84:Ct,85:St,87:136,88:At,98:X,99:Z,102:Mt,104:K,105:J,106:Nt,107:Dt,108:142,115:tt,116:et,117:nt,118:rt,119:it,120:at},{22:dt,24:pt,26:yt,38:gt,39:177,42:mt,47:$,50:[1,176],59:W,60:V,66:vt,74:bt,76:134,77:_t,78:145,79:xt,80:wt,81:kt,82:Tt,83:Et,84:Ct,85:St,87:136,88:At,98:X,99:Z,102:Mt,104:K,105:J,106:Nt,107:Dt,108:142,115:tt,116:et,117:nt,118:rt,119:it,120:at},{22:dt,24:pt,26:yt,38:gt,39:178,42:mt,47:$,59:W,60:V,66:vt,74:bt,76:134,77:_t,78:145,79:xt,80:wt,81:kt,82:Tt,83:Et,84:Ct,85:St,87:136,88:At,98:X,99:Z,102:Mt,104:K,105:J,106:Nt,107:Dt,108:142,115:tt,116:et,117:nt,118:rt,119:it,120:at},{22:dt,24:pt,26:yt,38:gt,39:179,42:mt,47:$,59:W,60:V,66:vt,74:bt,76:134,77:_t,78:145,79:xt,80:wt,81:kt,82:Tt,83:Et,84:Ct,85:St,87:136,88:At,98:X,99:Z,102:Mt,104:K,105:J,106:Nt,107:Dt,108:142,115:tt,116:et,117:nt,118:rt,119:it,120:at},{22:dt,24:pt,26:yt,38:gt,39:180,42:mt,47:$,59:W,60:V,66:vt,74:bt,76:134,77:_t,78:145,79:xt,80:wt,81:kt,82:Tt,83:Et,84:Ct,85:St,87:136,88:At,98:X,99:Z,102:Mt,104:K,105:J,106:Nt,107:Dt,108:142,115:tt,116:et,117:nt,118:rt,119:it,120:at},{59:[1,181]},{22:dt,24:pt,26:yt,38:gt,39:182,42:mt,47:$,59:W,60:V,66:vt,74:bt,76:134,77:_t,78:145,79:xt,80:wt,81:kt,82:Tt,83:Et,84:Ct,85:St,87:136,88:At,98:X,99:Z,102:Mt,104:K,105:J,106:Nt,107:Dt,108:142,115:tt,116:et,117:nt,118:rt,119:it,120:at},{22:dt,24:pt,26:yt,38:gt,39:183,42:mt,47:$,59:W,60:V,64:[1,184],66:vt,74:bt,76:134,77:_t,78:145,79:xt,80:wt,81:kt,82:Tt,83:Et,84:Ct,85:St,87:136,88:At,98:X,99:Z,102:Mt,104:K,105:J,106:Nt,107:Dt,108:142,115:tt,116:et,117:nt,118:rt,119:it,120:at},{22:dt,24:pt,26:yt,38:gt,39:185,42:mt,47:$,59:W,60:V,66:vt,74:bt,76:134,77:_t,78:145,79:xt,80:wt,81:kt,82:Tt,83:Et,84:Ct,85:St,87:136,88:At,98:X,99:Z,102:Mt,104:K,105:J,106:Nt,107:Dt,108:142,115:tt,116:et,117:nt,118:rt,119:it,120:at},{22:dt,24:pt,26:yt,38:gt,39:186,42:mt,47:$,59:W,60:V,66:vt,74:bt,76:134,77:_t,78:145,79:xt,80:wt,81:kt,82:Tt,83:Et,84:Ct,85:St,87:136,88:At,98:X,99:Z,102:Mt,104:K,105:J,106:Nt,107:Dt,108:142,115:tt,116:et,117:nt,118:rt,119:it,120:at},{22:dt,24:pt,26:yt,38:gt,39:187,42:mt,47:$,59:W,60:V,66:vt,74:bt,76:134,77:_t,78:145,79:xt,80:wt,81:kt,82:Tt,83:Et,84:Ct,85:St,87:136,88:At,98:X,99:Z,102:Mt,104:K,105:J,106:Nt,107:Dt,108:142,115:tt,116:et,117:nt,118:rt,119:it,120:at},t(ct,[2,147]),t(Rt,[2,3]),{8:188,15:F},{15:[2,7]},t(a,[2,28]),t(lt,[2,33]),t(q,[2,48],{30:189,22:P}),t(ft,[2,71],{22:[1,190]}),{22:[1,191]},{22:dt,24:pt,26:yt,38:gt,39:192,42:mt,47:$,59:W,60:V,66:vt,74:bt,76:134,77:_t,78:145,79:xt,80:wt,81:kt,82:Tt,83:Et,84:Ct,85:St,87:136,88:At,98:X,99:Z,102:Mt,104:K,105:J,106:Nt,107:Dt,108:142,115:tt,116:et,117:nt,118:rt,119:it,120:at},{22:dt,24:pt,26:yt,38:gt,42:mt,47:$,59:W,60:V,66:vt,74:bt,75:[1,193],76:194,78:145,79:xt,80:wt,81:kt,82:Tt,83:Et,84:Ct,85:St,87:136,88:At,98:X,99:Z,102:Mt,104:K,105:J,106:Nt,107:Dt,108:142,115:tt,116:et,117:nt,118:rt,119:it,120:at},t(It,[2,78]),t(It,[2,80]),t(It,[2,136]),t(It,[2,137]),t(It,[2,138]),t(It,[2,139]),t(It,[2,140]),t(It,[2,141]),t(It,[2,142]),t(It,[2,143]),t(It,[2,144]),t(It,[2,145]),t(It,[2,81]),t(It,[2,82]),t(It,[2,83]),t(It,[2,84]),t(It,[2,85]),t(It,[2,86]),t(It,[2,87]),t(It,[2,88]),t(It,[2,89]),t(It,[2,90]),t(It,[2,91]),{9:196,20:Y,21:z,22:dt,23:U,24:pt,26:yt,38:gt,40:[1,195],42:mt,47:$,59:W,60:V,66:vt,74:bt,76:194,78:145,79:xt,80:wt,81:kt,82:Tt,83:Et,84:Ct,85:St,87:136,88:At,98:X,99:Z,102:Mt,104:K,105:J,106:Nt,107:Dt,108:142,115:tt,116:et,117:nt,118:rt,119:it,120:at},{18:18,19:19,20:s,21:c,22:u,23:l,32:24,33:25,34:26,35:27,36:28,37:29,38:h,42:[1,197],43:31,45:32,46:42,47:f,49:43,59:d,60:p,79:y,80:g,81:m,82:v,83:b,84:_,88:x,98:w,99:k,102:T,104:E,105:C,109:44,111:S,112:A,113:M,114:N,115:D,116:O,117:B,118:L,119:I,120:R},{22:P,30:198},{22:[1,199],26:H,47:$,59:W,60:V,84:G,98:X,99:Z,102:Q,104:K,105:J,108:95,110:172,115:tt,116:et,117:nt,118:rt,119:it,120:at},{22:[1,200]},{22:[1,201]},{22:[1,202],99:[1,203]},t(Ft,[2,119]),{22:[1,204]},{22:[1,205],26:H,47:$,59:W,60:V,84:G,98:X,99:Z,102:Q,104:K,105:J,108:95,110:172,115:tt,116:et,117:nt,118:rt,119:it,120:at},{22:[1,206],26:H,47:$,59:W,60:V,84:G,98:X,99:Z,102:Q,104:K,105:J,108:95,110:172,115:tt,116:et,117:nt,118:rt,119:it,120:at},{77:[1,207]},t(ut,[2,99],{22:[1,208]}),{77:[1,209],94:[1,210]},{77:[1,211]},t(Lt,[2,149]),{77:[1,212],94:[1,213]},t(ot,[2,54],{109:123,47:f,59:d,60:p,84:_,88:x,98:w,99:k,102:T,104:E,105:C,115:D,116:O,117:B,118:L,119:I,120:R}),{22:dt,24:pt,26:yt,38:gt,41:[1,214],42:mt,47:$,59:W,60:V,66:vt,74:bt,76:194,78:145,79:xt,80:wt,81:kt,82:Tt,83:Et,84:Ct,85:St,87:136,88:At,98:X,99:Z,102:Mt,104:K,105:J,106:Nt,107:Dt,108:142,115:tt,116:et,117:nt,118:rt,119:it,120:at},{22:dt,24:pt,26:yt,38:gt,39:215,42:mt,47:$,59:W,60:V,66:vt,74:bt,76:134,77:_t,78:145,79:xt,80:wt,81:kt,82:Tt,83:Et,84:Ct,85:St,87:136,88:At,98:X,99:Z,102:Mt,104:K,105:J,106:Nt,107:Dt,108:142,115:tt,116:et,117:nt,118:rt,119:it,120:at},{22:dt,24:pt,26:yt,38:gt,42:mt,47:$,51:[1,216],59:W,60:V,66:vt,74:bt,76:194,78:145,79:xt,80:wt,81:kt,82:Tt,83:Et,84:Ct,85:St,87:136,88:At,98:X,99:Z,102:Mt,104:K,105:J,106:Nt,107:Dt,108:142,115:tt,116:et,117:nt,118:rt,119:it,120:at},{22:dt,24:pt,26:yt,38:gt,42:mt,47:$,53:[1,217],59:W,60:V,66:vt,74:bt,76:194,78:145,79:xt,80:wt,81:kt,82:Tt,83:Et,84:Ct,85:St,87:136,88:At,98:X,99:Z,102:Mt,104:K,105:J,106:Nt,107:Dt,108:142,115:tt,116:et,117:nt,118:rt,119:it,120:at},{22:dt,24:pt,26:yt,38:gt,42:mt,47:$,55:[1,218],59:W,60:V,66:vt,74:bt,76:194,78:145,79:xt,80:wt,81:kt,82:Tt,83:Et,84:Ct,85:St,87:136,88:At,98:X,99:Z,102:Mt,104:K,105:J,106:Nt,107:Dt,108:142,115:tt,116:et,117:nt,118:rt,119:it,120:at},{22:dt,24:pt,26:yt,38:gt,42:mt,47:$,57:[1,219],59:W,60:V,66:vt,74:bt,76:194,78:145,79:xt,80:wt,81:kt,82:Tt,83:Et,84:Ct,85:St,87:136,88:At,98:X,99:Z,102:Mt,104:K,105:J,106:Nt,107:Dt,108:142,115:tt,116:et,117:nt,118:rt,119:it,120:at},{60:[1,220]},{22:dt,24:pt,26:yt,38:gt,42:mt,47:$,59:W,60:V,63:[1,221],66:vt,74:bt,76:194,78:145,79:xt,80:wt,81:kt,82:Tt,83:Et,84:Ct,85:St,87:136,88:At,98:X,99:Z,102:Mt,104:K,105:J,106:Nt,107:Dt,108:142,115:tt,116:et,117:nt,118:rt,119:it,120:at},{22:dt,24:pt,26:yt,38:gt,42:mt,47:$,59:W,60:V,65:[1,222],66:vt,74:bt,76:194,78:145,79:xt,80:wt,81:kt,82:Tt,83:Et,84:Ct,85:St,87:136,88:At,98:X,99:Z,102:Mt,104:K,105:J,106:Nt,107:Dt,108:142,115:tt,116:et,117:nt,118:rt,119:it,120:at},{22:dt,24:pt,26:yt,38:gt,39:223,42:mt,47:$,59:W,60:V,66:vt,74:bt,76:134,77:_t,78:145,79:xt,80:wt,81:kt,82:Tt,83:Et,84:Ct,85:St,87:136,88:At,98:X,99:Z,102:Mt,104:K,105:J,106:Nt,107:Dt,108:142,115:tt,116:et,117:nt,118:rt,119:it,120:at},{22:dt,24:pt,26:yt,38:gt,41:[1,224],42:mt,47:$,59:W,60:V,66:vt,74:bt,76:194,78:145,79:xt,80:wt,81:kt,82:Tt,83:Et,84:Ct,85:St,87:136,88:At,98:X,99:Z,102:Mt,104:K,105:J,106:Nt,107:Dt,108:142,115:tt,116:et,117:nt,118:rt,119:it,120:at},{22:dt,24:pt,26:yt,38:gt,42:mt,47:$,59:W,60:V,66:vt,68:[1,225],70:[1,226],74:bt,76:194,78:145,79:xt,80:wt,81:kt,82:Tt,83:Et,84:Ct,85:St,87:136,88:At,98:X,99:Z,102:Mt,104:K,105:J,106:Nt,107:Dt,108:142,115:tt,116:et,117:nt,118:rt,119:it,120:at},{22:dt,24:pt,26:yt,38:gt,42:mt,47:$,59:W,60:V,66:vt,68:[1,228],70:[1,227],74:bt,76:194,78:145,79:xt,80:wt,81:kt,82:Tt,83:Et,84:Ct,85:St,87:136,88:At,98:X,99:Z,102:Mt,104:K,105:J,106:Nt,107:Dt,108:142,115:tt,116:et,117:nt,118:rt,119:it,120:at},{9:229,20:Y,21:z,23:U},t(q,[2,49],{47:Bt}),t(ft,[2,73]),t(ft,[2,72]),{22:dt,24:pt,26:yt,38:gt,42:mt,47:$,59:W,60:V,61:[1,230],66:vt,74:bt,76:194,78:145,79:xt,80:wt,81:kt,82:Tt,83:Et,84:Ct,85:St,87:136,88:At,98:X,99:Z,102:Mt,104:K,105:J,106:Nt,107:Dt,108:142,115:tt,116:et,117:nt,118:rt,119:it,120:at},t(ft,[2,75]),t(It,[2,79]),{22:dt,24:pt,26:yt,38:gt,39:231,42:mt,47:$,59:W,60:V,66:vt,74:bt,76:134,77:_t,78:145,79:xt,80:wt,81:kt,82:Tt,83:Et,84:Ct,85:St,87:136,88:At,98:X,99:Z,102:Mt,104:K,105:J,106:Nt,107:Dt,108:142,115:tt,116:et,117:nt,118:rt,119:it,120:at},t(Ot,o,{17:232}),t(j,[2,43]),{46:233,47:f,49:43,59:d,60:p,84:_,88:x,98:w,99:k,102:T,104:E,105:C,109:44,115:D,116:O,117:B,118:L,119:I,120:R},{22:Pt,59:jt,60:Yt,79:zt,89:234,95:Ut,98:qt,100:235,101:236,102:Ht,103:$t,104:Wt,105:Vt,106:Gt},{22:Pt,59:jt,60:Yt,79:zt,89:248,95:Ut,98:qt,100:235,101:236,102:Ht,103:$t,104:Wt,105:Vt,106:Gt},{22:Pt,59:jt,60:Yt,79:zt,89:249,95:Ut,97:[1,250],98:qt,100:235,101:236,102:Ht,103:$t,104:Wt,105:Vt,106:Gt},{22:Pt,59:jt,60:Yt,79:zt,89:251,95:Ut,97:[1,252],98:qt,100:235,101:236,102:Ht,103:$t,104:Wt,105:Vt,106:Gt},{98:[1,253]},{22:Pt,59:jt,60:Yt,79:zt,89:254,95:Ut,98:qt,100:235,101:236,102:Ht,103:$t,104:Wt,105:Vt,106:Gt},{22:Pt,59:jt,60:Yt,79:zt,89:255,95:Ut,98:qt,100:235,101:236,102:Ht,103:$t,104:Wt,105:Vt,106:Gt},{26:H,47:$,59:W,60:V,84:G,90:256,98:X,99:Z,102:Q,104:K,105:J,108:95,110:93,115:tt,116:et,117:nt,118:rt,119:it,120:at},t(ut,[2,98]),{77:[1,257]},t(ut,[2,102],{22:[1,258]}),t(ut,[2,103]),t(ut,[2,106]),t(ut,[2,108],{22:[1,259]}),t(ut,[2,109]),t(st,[2,55]),{22:dt,24:pt,26:yt,38:gt,42:mt,47:$,51:[1,260],59:W,60:V,66:vt,74:bt,76:194,78:145,79:xt,80:wt,81:kt,82:Tt,83:Et,84:Ct,85:St,87:136,88:At,98:X,99:Z,102:Mt,104:K,105:J,106:Nt,107:Dt,108:142,115:tt,116:et,117:nt,118:rt,119:it,120:at},t(st,[2,62]),t(st,[2,57]),t(st,[2,58]),t(st,[2,59]),{59:[1,261]},t(st,[2,61]),t(st,[2,63]),{22:dt,24:pt,26:yt,38:gt,42:mt,47:$,59:W,60:V,65:[1,262],66:vt,74:bt,76:194,78:145,79:xt,80:wt,81:kt,82:Tt,83:Et,84:Ct,85:St,87:136,88:At,98:X,99:Z,102:Mt,104:K,105:J,106:Nt,107:Dt,108:142,115:tt,116:et,117:nt,118:rt,119:it,120:at},t(st,[2,65]),t(st,[2,66]),t(st,[2,68]),t(st,[2,67]),t(st,[2,69]),t(Rt,[2,4]),t([22,47,59,60,84,88,98,99,102,104,105,115,116,117,118,119,120],[2,77]),{22:dt,24:pt,26:yt,38:gt,41:[1,263],42:mt,47:$,59:W,60:V,66:vt,74:bt,76:194,78:145,79:xt,80:wt,81:kt,82:Tt,83:Et,84:Ct,85:St,87:136,88:At,98:X,99:Z,102:Mt,104:K,105:J,106:Nt,107:Dt,108:142,115:tt,116:et,117:nt,118:rt,119:it,120:at},{18:18,19:19,20:s,21:c,22:u,23:l,32:24,33:25,34:26,35:27,36:28,37:29,38:h,42:[1,264],43:31,45:32,46:42,47:f,49:43,59:d,60:p,79:y,80:g,81:m,82:v,83:b,84:_,88:x,98:w,99:k,102:T,104:E,105:C,109:44,111:S,112:A,113:M,114:N,115:D,116:O,117:B,118:L,119:I,120:R},t(ot,[2,53]),t(ut,[2,111],{99:Xt}),t(Zt,[2,121],{101:266,22:Pt,59:jt,60:Yt,79:zt,95:Ut,98:qt,102:Ht,103:$t,104:Wt,105:Vt,106:Gt}),t(Qt,[2,123]),t(Qt,[2,125]),t(Qt,[2,126]),t(Qt,[2,127]),t(Qt,[2,128]),t(Qt,[2,129]),t(Qt,[2,130]),t(Qt,[2,131]),t(Qt,[2,132]),t(Qt,[2,133]),t(Qt,[2,134]),t(Qt,[2,135]),t(ut,[2,112],{99:Xt}),t(ut,[2,113],{99:Xt}),{22:[1,267]},t(ut,[2,114],{99:Xt}),{22:[1,268]},t(Ft,[2,120]),t(ut,[2,94],{99:Xt}),t(ut,[2,95],{99:Xt}),t(ut,[2,96],{108:95,110:172,26:H,47:$,59:W,60:V,84:G,98:X,99:Z,102:Q,104:K,105:J,115:tt,116:et,117:nt,118:rt,119:it,120:at}),t(ut,[2,100]),{94:[1,269]},{94:[1,270]},{51:[1,271]},{61:[1,272]},{65:[1,273]},{9:274,20:Y,21:z,23:U},t(j,[2,42]),{22:Pt,59:jt,60:Yt,79:zt,95:Ut,98:qt,100:275,101:236,102:Ht,103:$t,104:Wt,105:Vt,106:Gt},t(Qt,[2,124]),{26:H,47:$,59:W,60:V,84:G,90:276,98:X,99:Z,102:Q,104:K,105:J,108:95,110:93,115:tt,116:et,117:nt,118:rt,119:it,120:at},{26:H,47:$,59:W,60:V,84:G,90:277,98:X,99:Z,102:Q,104:K,105:J,108:95,110:93,115:tt,116:et,117:nt,118:rt,119:it,120:at},t(ut,[2,104]),t(ut,[2,110]),t(st,[2,56]),{22:dt,24:pt,26:yt,38:gt,39:278,42:mt,47:$,59:W,60:V,66:vt,74:bt,76:134,77:_t,78:145,79:xt,80:wt,81:kt,82:Tt,83:Et,84:Ct,85:St,87:136,88:At,98:X,99:Z,102:Mt,104:K,105:J,106:Nt,107:Dt,108:142,115:tt,116:et,117:nt,118:rt,119:it,120:at},t(st,[2,64]),t(Ot,o,{17:279}),t(Zt,[2,122],{101:266,22:Pt,59:jt,60:Yt,79:zt,95:Ut,98:qt,102:Ht,103:$t,104:Wt,105:Vt,106:Gt}),t(ut,[2,117],{108:95,110:172,22:[1,280],26:H,47:$,59:W,60:V,84:G,98:X,99:Z,102:Q,104:K,105:J,115:tt,116:et,117:nt,118:rt,119:it,120:at}),t(ut,[2,118],{108:95,110:172,22:[1,281],26:H,47:$,59:W,60:V,84:G,98:X,99:Z,102:Q,104:K,105:J,115:tt,116:et,117:nt,118:rt,119:it,120:at}),{22:dt,24:pt,26:yt,38:gt,41:[1,282],42:mt,47:$,59:W,60:V,66:vt,74:bt,76:194,78:145,79:xt,80:wt,81:kt,82:Tt,83:Et,84:Ct,85:St,87:136,88:At,98:X,99:Z,102:Mt,104:K,105:J,106:Nt,107:Dt,108:142,115:tt,116:et,117:nt,118:rt,119:it,120:at},{18:18,19:19,20:s,21:c,22:u,23:l,32:24,33:25,34:26,35:27,36:28,37:29,38:h,42:[1,283],43:31,45:32,46:42,47:f,49:43,59:d,60:p,79:y,80:g,81:m,82:v,83:b,84:_,88:x,98:w,99:k,102:T,104:E,105:C,109:44,111:S,112:A,113:M,114:N,115:D,116:O,117:B,118:L,119:I,120:R},{22:Pt,59:jt,60:Yt,79:zt,89:284,95:Ut,98:qt,100:235,101:236,102:Ht,103:$t,104:Wt,105:Vt,106:Gt},{22:Pt,59:jt,60:Yt,79:zt,89:285,95:Ut,98:qt,100:235,101:236,102:Ht,103:$t,104:Wt,105:Vt,106:Gt},t(st,[2,60]),t(j,[2,41]),t(ut,[2,115],{99:Xt}),t(ut,[2,116],{99:Xt})],defaultActions:{2:[2,1],9:[2,5],10:[2,2],126:[2,7]},parseError:function(t,e){if(!e.recoverable){var n=new Error(t);throw n.hash=e,n}this.trace(t)},parse:function(t){var e=this,n=[0],r=[],i=[null],a=[],o=this.table,s="",c=0,u=0,l=0,h=2,f=1,d=a.slice.call(arguments,1),p=Object.create(this.lexer),y={yy:{}};for(var g in this.yy)Object.prototype.hasOwnProperty.call(this.yy,g)&&(y.yy[g]=this.yy[g]);p.setInput(t,y.yy),y.yy.lexer=p,y.yy.parser=this,void 0===p.yylloc&&(p.yylloc={});var m=p.yylloc;a.push(m);var v=p.options&&p.options.ranges;function b(){var t;return"number"!=typeof(t=r.pop()||p.lex()||f)&&(t instanceof Array&&(t=(r=t).pop()),t=e.symbols_[t]||t),t}"function"==typeof y.yy.parseError?this.parseError=y.yy.parseError:this.parseError=Object.getPrototypeOf(this).parseError;for(var _,x,w,k,T,E,C,S,A,M={};;){if(w=n[n.length-1],this.defaultActions[w]?k=this.defaultActions[w]:(null==_&&(_=b()),k=o[w]&&o[w][_]),void 0===k||!k.length||!k[0]){var N="";for(E in A=[],o[w])this.terminals_[E]&&E>h&&A.push("'"+this.terminals_[E]+"'");N=p.showPosition?"Parse error on line "+(c+1)+":\n"+p.showPosition()+"\nExpecting "+A.join(", ")+", got '"+(this.terminals_[_]||_)+"'":"Parse error on line "+(c+1)+": Unexpected "+(_==f?"end of input":"'"+(this.terminals_[_]||_)+"'"),this.parseError(N,{text:p.match,token:this.terminals_[_]||_,line:p.yylineno,loc:m,expected:A})}if(k[0]instanceof Array&&k.length>1)throw new Error("Parse Error: multiple actions possible at state: "+w+", token: "+_);switch(k[0]){case 1:n.push(_),i.push(p.yytext),a.push(p.yylloc),n.push(k[1]),_=null,x?(_=x,x=null):(u=p.yyleng,s=p.yytext,c=p.yylineno,m=p.yylloc,l>0&&l--);break;case 2:if(C=this.productions_[k[1]][1],M.$=i[i.length-C],M._$={first_line:a[a.length-(C||1)].first_line,last_line:a[a.length-1].last_line,first_column:a[a.length-(C||1)].first_column,last_column:a[a.length-1].last_column},v&&(M._$.range=[a[a.length-(C||1)].range[0],a[a.length-1].range[1]]),void 0!==(T=this.performAction.apply(M,[s,u,c,y.yy,k[1],i,a].concat(d))))return T;C&&(n=n.slice(0,-1*C*2),i=i.slice(0,-1*C),a=a.slice(0,-1*C)),n.push(this.productions_[k[1]][0]),i.push(M.$),a.push(M._$),S=o[n[n.length-2]][n[n.length-1]],n.push(S);break;case 3:return!0}}return!0}},Jt={EOF:1,parseError:function(t,e){if(!this.yy.parser)throw new Error(t);this.yy.parser.parseError(t,e)},setInput:function(t,e){return this.yy=e||this.yy||{},this._input=t,this._more=this._backtrack=this.done=!1,this.yylineno=this.yyleng=0,this.yytext=this.matched=this.match="",this.conditionStack=["INITIAL"],this.yylloc={first_line:1,first_column:0,last_line:1,last_column:0},this.options.ranges&&(this.yylloc.range=[0,0]),this.offset=0,this},input:function(){var t=this._input[0];return this.yytext+=t,this.yyleng++,this.offset++,this.match+=t,this.matched+=t,t.match(/(?:\r\n?|\n).*/g)?(this.yylineno++,this.yylloc.last_line++):this.yylloc.last_column++,this.options.ranges&&this.yylloc.range[1]++,this._input=this._input.slice(1),t},unput:function(t){var e=t.length,n=t.split(/(?:\r\n?|\n)/g);this._input=t+this._input,this.yytext=this.yytext.substr(0,this.yytext.length-e),this.offset-=e;var r=this.match.split(/(?:\r\n?|\n)/g);this.match=this.match.substr(0,this.match.length-1),this.matched=this.matched.substr(0,this.matched.length-1),n.length-1&&(this.yylineno-=n.length-1);var i=this.yylloc.range;return this.yylloc={first_line:this.yylloc.first_line,last_line:this.yylineno+1,first_column:this.yylloc.first_column,last_column:n?(n.length===r.length?this.yylloc.first_column:0)+r[r.length-n.length].length-n[0].length:this.yylloc.first_column-e},this.options.ranges&&(this.yylloc.range=[i[0],i[0]+this.yyleng-e]),this.yyleng=this.yytext.length,this},more:function(){return this._more=!0,this},reject:function(){return this.options.backtrack_lexer?(this._backtrack=!0,this):this.parseError("Lexical error on line "+(this.yylineno+1)+". You can only invoke reject() in the lexer when the lexer is of the backtracking persuasion (options.backtrack_lexer = true).\n"+this.showPosition(),{text:"",token:null,line:this.yylineno})},less:function(t){this.unput(this.match.slice(t))},pastInput:function(){var t=this.matched.substr(0,this.matched.length-this.match.length);return(t.length>20?"...":"")+t.substr(-20).replace(/\n/g,"")},upcomingInput:function(){var t=this.match;return t.length<20&&(t+=this._input.substr(0,20-t.length)),(t.substr(0,20)+(t.length>20?"...":"")).replace(/\n/g,"")},showPosition:function(){var t=this.pastInput(),e=new Array(t.length+1).join("-");return t+this.upcomingInput()+"\n"+e+"^"},test_match:function(t,e){var n,r,i;if(this.options.backtrack_lexer&&(i={yylineno:this.yylineno,yylloc:{first_line:this.yylloc.first_line,last_line:this.last_line,first_column:this.yylloc.first_column,last_column:this.yylloc.last_column},yytext:this.yytext,match:this.match,matches:this.matches,matched:this.matched,yyleng:this.yyleng,offset:this.offset,_more:this._more,_input:this._input,yy:this.yy,conditionStack:this.conditionStack.slice(0),done:this.done},this.options.ranges&&(i.yylloc.range=this.yylloc.range.slice(0))),(r=t[0].match(/(?:\r\n?|\n).*/g))&&(this.yylineno+=r.length),this.yylloc={first_line:this.yylloc.last_line,last_line:this.yylineno+1,first_column:this.yylloc.last_column,last_column:r?r[r.length-1].length-r[r.length-1].match(/\r?\n?/)[0].length:this.yylloc.last_column+t[0].length},this.yytext+=t[0],this.match+=t[0],this.matches=t,this.yyleng=this.yytext.length,this.options.ranges&&(this.yylloc.range=[this.offset,this.offset+=this.yyleng]),this._more=!1,this._backtrack=!1,this._input=this._input.slice(t[0].length),this.matched+=t[0],n=this.performAction.call(this,this.yy,this,e,this.conditionStack[this.conditionStack.length-1]),this.done&&this._input&&(this.done=!1),n)return n;if(this._backtrack){for(var a in i)this[a]=i[a];return!1}return!1},next:function(){if(this.done)return this.EOF;var t,e,n,r;this._input||(this.done=!0),this._more||(this.yytext="",this.match="");for(var i=this._currentRules(),a=0;ae[0].length)){if(e=n,r=a,this.options.backtrack_lexer){if(!1!==(t=this.test_match(n,i[a])))return t;if(this._backtrack){e=!1;continue}return!1}if(!this.options.flex)break}return e?!1!==(t=this.test_match(e,i[r]))&&t:""===this._input?this.EOF:this.parseError("Lexical error on line "+(this.yylineno+1)+". Unrecognized text.\n"+this.showPosition(),{text:"",token:null,line:this.yylineno})},lex:function(){return this.next()||this.lex()},begin:function(t){this.conditionStack.push(t)},popState:function(){return this.conditionStack.length-1>0?this.conditionStack.pop():this.conditionStack[0]},_currentRules:function(){return this.conditionStack.length&&this.conditionStack[this.conditionStack.length-1]?this.conditions[this.conditionStack[this.conditionStack.length-1]].rules:this.conditions.INITIAL.rules},topState:function(t){return(t=this.conditionStack.length-1-Math.abs(t||0))>=0?this.conditionStack[t]:"INITIAL"},pushState:function(t){this.begin(t)},stateStackSize:function(){return this.conditionStack.length},options:{},performAction:function(t,e,n,r){switch(n){case 0:return this.begin("open_directive"),12;case 1:return this.begin("type_directive"),13;case 2:return this.popState(),this.begin("arg_directive"),10;case 3:return this.popState(),this.popState(),15;case 4:return 14;case 5:case 6:break;case 7:this.begin("string");break;case 8:case 17:case 20:case 23:case 26:this.popState();break;case 9:return"STR";case 10:return 79;case 11:return 88;case 12:return 80;case 13:return 97;case 14:return 81;case 15:return 82;case 16:this.begin("href");break;case 18:return 93;case 19:this.begin("callbackname");break;case 21:this.popState(),this.begin("callbackargs");break;case 22:return 91;case 24:return 92;case 25:this.begin("click");break;case 27:return 83;case 28:case 29:return t.lex.firstGraph()&&this.begin("dir"),24;case 30:return 38;case 31:return 42;case 32:case 33:case 34:case 35:return 94;case 36:return this.popState(),25;case 37:case 38:case 39:case 40:case 41:case 42:case 43:case 44:case 45:case 46:return this.popState(),26;case 47:return 111;case 48:return 112;case 49:return 113;case 50:return 114;case 51:return 98;case 52:return 104;case 53:return 48;case 54:return 60;case 55:return 47;case 56:return 20;case 57:return 99;case 58:return 119;case 59:case 60:case 61:return 75;case 62:case 63:case 64:return 74;case 65:return 52;case 66:return 53;case 67:return 54;case 68:return 55;case 69:return 56;case 70:return 57;case 71:return 58;case 72:return 62;case 73:return 63;case 74:return 102;case 75:return 105;case 76:return 120;case 77:return 117;case 78:return 106;case 79:case 80:return 118;case 81:return 107;case 82:return 66;case 83:return 85;case 84:return"SEP";case 85:return 84;case 86:return 59;case 87:return 68;case 88:return 67;case 89:return 70;case 90:return 69;case 91:return 115;case 92:return 116;case 93:return 61;case 94:return 50;case 95:return 51;case 96:return 40;case 97:return 41;case 98:return 64;case 99:return 65;case 100:return 126;case 101:return 21;case 102:return 22;case 103:return 23}},rules:[/^(?:%%\{)/,/^(?:((?:(?!\}%%)[^:.])*))/,/^(?::)/,/^(?:\}%%)/,/^(?:((?:(?!\}%%).|\n)*))/,/^(?:%%(?!\{)[^\n]*)/,/^(?:[^\}]%%[^\n]*)/,/^(?:["])/,/^(?:["])/,/^(?:[^"]*)/,/^(?:style\b)/,/^(?:default\b)/,/^(?:linkStyle\b)/,/^(?:interpolate\b)/,/^(?:classDef\b)/,/^(?:class\b)/,/^(?:href[\s]+["])/,/^(?:["])/,/^(?:[^"]*)/,/^(?:call[\s]+)/,/^(?:\([\s]*\))/,/^(?:\()/,/^(?:[^(]*)/,/^(?:\))/,/^(?:[^)]*)/,/^(?:click[\s]+)/,/^(?:[\s\n])/,/^(?:[^\s\n]*)/,/^(?:graph\b)/,/^(?:flowchart\b)/,/^(?:subgraph\b)/,/^(?:end\b\s*)/,/^(?:_self\b)/,/^(?:_blank\b)/,/^(?:_parent\b)/,/^(?:_top\b)/,/^(?:(\r?\n)*\s*\n)/,/^(?:\s*LR\b)/,/^(?:\s*RL\b)/,/^(?:\s*TB\b)/,/^(?:\s*BT\b)/,/^(?:\s*TD\b)/,/^(?:\s*BR\b)/,/^(?:\s*<)/,/^(?:\s*>)/,/^(?:\s*\^)/,/^(?:\s*v\b)/,/^(?:.*direction\s+TB[^\n]*)/,/^(?:.*direction\s+BT[^\n]*)/,/^(?:.*direction\s+RL[^\n]*)/,/^(?:.*direction\s+LR[^\n]*)/,/^(?:[0-9]+)/,/^(?:#)/,/^(?::::)/,/^(?::)/,/^(?:&)/,/^(?:;)/,/^(?:,)/,/^(?:\*)/,/^(?:\s*[xo<]?--+[-xo>]\s*)/,/^(?:\s*[xo<]?==+[=xo>]\s*)/,/^(?:\s*[xo<]?-?\.+-[xo>]?\s*)/,/^(?:\s*[xo<]?--\s*)/,/^(?:\s*[xo<]?==\s*)/,/^(?:\s*[xo<]?-\.\s*)/,/^(?:\(-)/,/^(?:-\))/,/^(?:\(\[)/,/^(?:\]\))/,/^(?:\[\[)/,/^(?:\]\])/,/^(?:\[\|)/,/^(?:\[\()/,/^(?:\)\])/,/^(?:-)/,/^(?:\.)/,/^(?:[\_])/,/^(?:\+)/,/^(?:%)/,/^(?:=)/,/^(?:=)/,/^(?:<)/,/^(?:>)/,/^(?:\^)/,/^(?:\\\|)/,/^(?:v\b)/,/^(?:[A-Za-z]+)/,/^(?:\\\])/,/^(?:\[\/)/,/^(?:\/\])/,/^(?:\[\\)/,/^(?:[!"#$%&'*+,-.`?\\_/])/,/^(?:[\u00AA\u00B5\u00BA\u00C0-\u00D6\u00D8-\u00F6]|[\u00F8-\u02C1\u02C6-\u02D1\u02E0-\u02E4\u02EC\u02EE\u0370-\u0374\u0376\u0377]|[\u037A-\u037D\u0386\u0388-\u038A\u038C\u038E-\u03A1\u03A3-\u03F5]|[\u03F7-\u0481\u048A-\u0527\u0531-\u0556\u0559\u0561-\u0587\u05D0-\u05EA]|[\u05F0-\u05F2\u0620-\u064A\u066E\u066F\u0671-\u06D3\u06D5\u06E5\u06E6\u06EE]|[\u06EF\u06FA-\u06FC\u06FF\u0710\u0712-\u072F\u074D-\u07A5\u07B1\u07CA-\u07EA]|[\u07F4\u07F5\u07FA\u0800-\u0815\u081A\u0824\u0828\u0840-\u0858\u08A0]|[\u08A2-\u08AC\u0904-\u0939\u093D\u0950\u0958-\u0961\u0971-\u0977]|[\u0979-\u097F\u0985-\u098C\u098F\u0990\u0993-\u09A8\u09AA-\u09B0\u09B2]|[\u09B6-\u09B9\u09BD\u09CE\u09DC\u09DD\u09DF-\u09E1\u09F0\u09F1\u0A05-\u0A0A]|[\u0A0F\u0A10\u0A13-\u0A28\u0A2A-\u0A30\u0A32\u0A33\u0A35\u0A36\u0A38\u0A39]|[\u0A59-\u0A5C\u0A5E\u0A72-\u0A74\u0A85-\u0A8D\u0A8F-\u0A91\u0A93-\u0AA8]|[\u0AAA-\u0AB0\u0AB2\u0AB3\u0AB5-\u0AB9\u0ABD\u0AD0\u0AE0\u0AE1\u0B05-\u0B0C]|[\u0B0F\u0B10\u0B13-\u0B28\u0B2A-\u0B30\u0B32\u0B33\u0B35-\u0B39\u0B3D\u0B5C]|[\u0B5D\u0B5F-\u0B61\u0B71\u0B83\u0B85-\u0B8A\u0B8E-\u0B90\u0B92-\u0B95\u0B99]|[\u0B9A\u0B9C\u0B9E\u0B9F\u0BA3\u0BA4\u0BA8-\u0BAA\u0BAE-\u0BB9\u0BD0]|[\u0C05-\u0C0C\u0C0E-\u0C10\u0C12-\u0C28\u0C2A-\u0C33\u0C35-\u0C39\u0C3D]|[\u0C58\u0C59\u0C60\u0C61\u0C85-\u0C8C\u0C8E-\u0C90\u0C92-\u0CA8\u0CAA-\u0CB3]|[\u0CB5-\u0CB9\u0CBD\u0CDE\u0CE0\u0CE1\u0CF1\u0CF2\u0D05-\u0D0C\u0D0E-\u0D10]|[\u0D12-\u0D3A\u0D3D\u0D4E\u0D60\u0D61\u0D7A-\u0D7F\u0D85-\u0D96\u0D9A-\u0DB1]|[\u0DB3-\u0DBB\u0DBD\u0DC0-\u0DC6\u0E01-\u0E30\u0E32\u0E33\u0E40-\u0E46\u0E81]|[\u0E82\u0E84\u0E87\u0E88\u0E8A\u0E8D\u0E94-\u0E97\u0E99-\u0E9F\u0EA1-\u0EA3]|[\u0EA5\u0EA7\u0EAA\u0EAB\u0EAD-\u0EB0\u0EB2\u0EB3\u0EBD\u0EC0-\u0EC4\u0EC6]|[\u0EDC-\u0EDF\u0F00\u0F40-\u0F47\u0F49-\u0F6C\u0F88-\u0F8C\u1000-\u102A]|[\u103F\u1050-\u1055\u105A-\u105D\u1061\u1065\u1066\u106E-\u1070\u1075-\u1081]|[\u108E\u10A0-\u10C5\u10C7\u10CD\u10D0-\u10FA\u10FC-\u1248\u124A-\u124D]|[\u1250-\u1256\u1258\u125A-\u125D\u1260-\u1288\u128A-\u128D\u1290-\u12B0]|[\u12B2-\u12B5\u12B8-\u12BE\u12C0\u12C2-\u12C5\u12C8-\u12D6\u12D8-\u1310]|[\u1312-\u1315\u1318-\u135A\u1380-\u138F\u13A0-\u13F4\u1401-\u166C]|[\u166F-\u167F\u1681-\u169A\u16A0-\u16EA\u1700-\u170C\u170E-\u1711]|[\u1720-\u1731\u1740-\u1751\u1760-\u176C\u176E-\u1770\u1780-\u17B3\u17D7]|[\u17DC\u1820-\u1877\u1880-\u18A8\u18AA\u18B0-\u18F5\u1900-\u191C]|[\u1950-\u196D\u1970-\u1974\u1980-\u19AB\u19C1-\u19C7\u1A00-\u1A16]|[\u1A20-\u1A54\u1AA7\u1B05-\u1B33\u1B45-\u1B4B\u1B83-\u1BA0\u1BAE\u1BAF]|[\u1BBA-\u1BE5\u1C00-\u1C23\u1C4D-\u1C4F\u1C5A-\u1C7D\u1CE9-\u1CEC]|[\u1CEE-\u1CF1\u1CF5\u1CF6\u1D00-\u1DBF\u1E00-\u1F15\u1F18-\u1F1D]|[\u1F20-\u1F45\u1F48-\u1F4D\u1F50-\u1F57\u1F59\u1F5B\u1F5D\u1F5F-\u1F7D]|[\u1F80-\u1FB4\u1FB6-\u1FBC\u1FBE\u1FC2-\u1FC4\u1FC6-\u1FCC\u1FD0-\u1FD3]|[\u1FD6-\u1FDB\u1FE0-\u1FEC\u1FF2-\u1FF4\u1FF6-\u1FFC\u2071\u207F]|[\u2090-\u209C\u2102\u2107\u210A-\u2113\u2115\u2119-\u211D\u2124\u2126\u2128]|[\u212A-\u212D\u212F-\u2139\u213C-\u213F\u2145-\u2149\u214E\u2183\u2184]|[\u2C00-\u2C2E\u2C30-\u2C5E\u2C60-\u2CE4\u2CEB-\u2CEE\u2CF2\u2CF3]|[\u2D00-\u2D25\u2D27\u2D2D\u2D30-\u2D67\u2D6F\u2D80-\u2D96\u2DA0-\u2DA6]|[\u2DA8-\u2DAE\u2DB0-\u2DB6\u2DB8-\u2DBE\u2DC0-\u2DC6\u2DC8-\u2DCE]|[\u2DD0-\u2DD6\u2DD8-\u2DDE\u2E2F\u3005\u3006\u3031-\u3035\u303B\u303C]|[\u3041-\u3096\u309D-\u309F\u30A1-\u30FA\u30FC-\u30FF\u3105-\u312D]|[\u3131-\u318E\u31A0-\u31BA\u31F0-\u31FF\u3400-\u4DB5\u4E00-\u9FCC]|[\uA000-\uA48C\uA4D0-\uA4FD\uA500-\uA60C\uA610-\uA61F\uA62A\uA62B]|[\uA640-\uA66E\uA67F-\uA697\uA6A0-\uA6E5\uA717-\uA71F\uA722-\uA788]|[\uA78B-\uA78E\uA790-\uA793\uA7A0-\uA7AA\uA7F8-\uA801\uA803-\uA805]|[\uA807-\uA80A\uA80C-\uA822\uA840-\uA873\uA882-\uA8B3\uA8F2-\uA8F7\uA8FB]|[\uA90A-\uA925\uA930-\uA946\uA960-\uA97C\uA984-\uA9B2\uA9CF\uAA00-\uAA28]|[\uAA40-\uAA42\uAA44-\uAA4B\uAA60-\uAA76\uAA7A\uAA80-\uAAAF\uAAB1\uAAB5]|[\uAAB6\uAAB9-\uAABD\uAAC0\uAAC2\uAADB-\uAADD\uAAE0-\uAAEA\uAAF2-\uAAF4]|[\uAB01-\uAB06\uAB09-\uAB0E\uAB11-\uAB16\uAB20-\uAB26\uAB28-\uAB2E]|[\uABC0-\uABE2\uAC00-\uD7A3\uD7B0-\uD7C6\uD7CB-\uD7FB\uF900-\uFA6D]|[\uFA70-\uFAD9\uFB00-\uFB06\uFB13-\uFB17\uFB1D\uFB1F-\uFB28\uFB2A-\uFB36]|[\uFB38-\uFB3C\uFB3E\uFB40\uFB41\uFB43\uFB44\uFB46-\uFBB1\uFBD3-\uFD3D]|[\uFD50-\uFD8F\uFD92-\uFDC7\uFDF0-\uFDFB\uFE70-\uFE74\uFE76-\uFEFC]|[\uFF21-\uFF3A\uFF41-\uFF5A\uFF66-\uFFBE\uFFC2-\uFFC7\uFFCA-\uFFCF]|[\uFFD2-\uFFD7\uFFDA-\uFFDC])/,/^(?:\|)/,/^(?:\()/,/^(?:\))/,/^(?:\[)/,/^(?:\])/,/^(?:\{)/,/^(?:\})/,/^(?:")/,/^(?:(\r?\n)+)/,/^(?:\s)/,/^(?:$)/],conditions:{close_directive:{rules:[],inclusive:!1},arg_directive:{rules:[3,4],inclusive:!1},type_directive:{rules:[2,3],inclusive:!1},open_directive:{rules:[1],inclusive:!1},callbackargs:{rules:[23,24],inclusive:!1},callbackname:{rules:[20,21,22],inclusive:!1},href:{rules:[17,18],inclusive:!1},click:{rules:[26,27],inclusive:!1},vertex:{rules:[],inclusive:!1},dir:{rules:[36,37,38,39,40,41,42,43,44,45,46],inclusive:!1},string:{rules:[8,9],inclusive:!1},INITIAL:{rules:[0,5,6,7,10,11,12,13,14,15,16,19,25,28,29,30,31,32,33,34,35,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103],inclusive:!0}}};function te(){this.yy={}}return Kt.lexer=Jt,te.prototype=Kt,Kt.Parser=te,new te}();e.parser=r,e.Parser=r.Parser,e.parse=function(){return r.parse.apply(r,arguments)},e.main=function(t){t[1]||(console.log("Usage: "+t[0]+" FILE"),process.exit(1));var r=n(5354).readFileSync(n(6470).normalize(t[1]),"utf8");return e.parser.parse(r)},n.c[n.s]===t&&e.main(process.argv.slice(1))},9959:(t,e,n)=>{t=n.nmd(t);var r=function(){var t=function(t,e,n,r){for(n=n||{},r=t.length;r--;n[t[r]]=e);return n},e=[1,3],n=[1,5],r=[7,9,11,12,13,14,15,16,17,18,19,20,22,29,34],i=[1,15],a=[1,16],o=[1,17],s=[1,18],c=[1,19],u=[1,20],l=[1,21],h=[1,22],f=[1,23],d=[1,25],p=[1,27],y=[1,30],g=[5,7,9,11,12,13,14,15,16,17,18,19,20,22,29,34],m={trace:function(){},yy:{},symbols_:{error:2,start:3,directive:4,gantt:5,document:6,EOF:7,line:8,SPACE:9,statement:10,NL:11,dateFormat:12,inclusiveEndDates:13,topAxis:14,axisFormat:15,excludes:16,includes:17,todayMarker:18,title:19,section:20,clickStatement:21,taskTxt:22,taskData:23,openDirective:24,typeDirective:25,closeDirective:26,":":27,argDirective:28,click:29,callbackname:30,callbackargs:31,href:32,clickStatementDebug:33,open_directive:34,type_directive:35,arg_directive:36,close_directive:37,$accept:0,$end:1},terminals_:{2:"error",5:"gantt",7:"EOF",9:"SPACE",11:"NL",12:"dateFormat",13:"inclusiveEndDates",14:"topAxis",15:"axisFormat",16:"excludes",17:"includes",18:"todayMarker",19:"title",20:"section",22:"taskTxt",23:"taskData",27:":",29:"click",30:"callbackname",31:"callbackargs",32:"href",34:"open_directive",35:"type_directive",36:"arg_directive",37:"close_directive"},productions_:[0,[3,2],[3,3],[6,0],[6,2],[8,2],[8,1],[8,1],[8,1],[10,1],[10,1],[10,1],[10,1],[10,1],[10,1],[10,1],[10,1],[10,1],[10,1],[10,2],[10,1],[4,4],[4,6],[21,2],[21,3],[21,3],[21,4],[21,3],[21,4],[21,2],[33,2],[33,3],[33,3],[33,4],[33,3],[33,4],[33,2],[24,1],[25,1],[28,1],[26,1]],performAction:function(t,e,n,r,i,a,o){var s=a.length-1;switch(i){case 2:return a[s-1];case 3:case 7:case 8:this.$=[];break;case 4:a[s-1].push(a[s]),this.$=a[s-1];break;case 5:case 6:this.$=a[s];break;case 9:r.setDateFormat(a[s].substr(11)),this.$=a[s].substr(11);break;case 10:r.enableInclusiveEndDates(),this.$=a[s].substr(18);break;case 11:r.TopAxis(),this.$=a[s].substr(8);break;case 12:r.setAxisFormat(a[s].substr(11)),this.$=a[s].substr(11);break;case 13:r.setExcludes(a[s].substr(9)),this.$=a[s].substr(9);break;case 14:r.setIncludes(a[s].substr(9)),this.$=a[s].substr(9);break;case 15:r.setTodayMarker(a[s].substr(12)),this.$=a[s].substr(12);break;case 16:r.setTitle(a[s].substr(6)),this.$=a[s].substr(6);break;case 17:r.addSection(a[s].substr(8)),this.$=a[s].substr(8);break;case 19:r.addTask(a[s-1],a[s]),this.$="task";break;case 23:this.$=a[s-1],r.setClickEvent(a[s-1],a[s],null);break;case 24:this.$=a[s-2],r.setClickEvent(a[s-2],a[s-1],a[s]);break;case 25:this.$=a[s-2],r.setClickEvent(a[s-2],a[s-1],null),r.setLink(a[s-2],a[s]);break;case 26:this.$=a[s-3],r.setClickEvent(a[s-3],a[s-2],a[s-1]),r.setLink(a[s-3],a[s]);break;case 27:this.$=a[s-2],r.setClickEvent(a[s-2],a[s],null),r.setLink(a[s-2],a[s-1]);break;case 28:this.$=a[s-3],r.setClickEvent(a[s-3],a[s-1],a[s]),r.setLink(a[s-3],a[s-2]);break;case 29:this.$=a[s-1],r.setLink(a[s-1],a[s]);break;case 30:case 36:this.$=a[s-1]+" "+a[s];break;case 31:case 32:case 34:this.$=a[s-2]+" "+a[s-1]+" "+a[s];break;case 33:case 35:this.$=a[s-3]+" "+a[s-2]+" "+a[s-1]+" "+a[s];break;case 37:r.parseDirective("%%{","open_directive");break;case 38:r.parseDirective(a[s],"type_directive");break;case 39:a[s]=a[s].trim().replace(/'/g,'"'),r.parseDirective(a[s],"arg_directive");break;case 40:r.parseDirective("}%%","close_directive","gantt")}},table:[{3:1,4:2,5:e,24:4,34:n},{1:[3]},{3:6,4:2,5:e,24:4,34:n},t(r,[2,3],{6:7}),{25:8,35:[1,9]},{35:[2,37]},{1:[2,1]},{4:26,7:[1,10],8:11,9:[1,12],10:13,11:[1,14],12:i,13:a,14:o,15:s,16:c,17:u,18:l,19:h,20:f,21:24,22:d,24:4,29:p,34:n},{26:28,27:[1,29],37:y},t([27,37],[2,38]),t(r,[2,8],{1:[2,2]}),t(r,[2,4]),{4:26,10:31,12:i,13:a,14:o,15:s,16:c,17:u,18:l,19:h,20:f,21:24,22:d,24:4,29:p,34:n},t(r,[2,6]),t(r,[2,7]),t(r,[2,9]),t(r,[2,10]),t(r,[2,11]),t(r,[2,12]),t(r,[2,13]),t(r,[2,14]),t(r,[2,15]),t(r,[2,16]),t(r,[2,17]),t(r,[2,18]),{23:[1,32]},t(r,[2,20]),{30:[1,33],32:[1,34]},{11:[1,35]},{28:36,36:[1,37]},{11:[2,40]},t(r,[2,5]),t(r,[2,19]),t(r,[2,23],{31:[1,38],32:[1,39]}),t(r,[2,29],{30:[1,40]}),t(g,[2,21]),{26:41,37:y},{37:[2,39]},t(r,[2,24],{32:[1,42]}),t(r,[2,25]),t(r,[2,27],{31:[1,43]}),{11:[1,44]},t(r,[2,26]),t(r,[2,28]),t(g,[2,22])],defaultActions:{5:[2,37],6:[2,1],30:[2,40],37:[2,39]},parseError:function(t,e){if(!e.recoverable){var n=new Error(t);throw n.hash=e,n}this.trace(t)},parse:function(t){var e=this,n=[0],r=[],i=[null],a=[],o=this.table,s="",c=0,u=0,l=0,h=2,f=1,d=a.slice.call(arguments,1),p=Object.create(this.lexer),y={yy:{}};for(var g in this.yy)Object.prototype.hasOwnProperty.call(this.yy,g)&&(y.yy[g]=this.yy[g]);p.setInput(t,y.yy),y.yy.lexer=p,y.yy.parser=this,void 0===p.yylloc&&(p.yylloc={});var m=p.yylloc;a.push(m);var v=p.options&&p.options.ranges;function b(){var t;return"number"!=typeof(t=r.pop()||p.lex()||f)&&(t instanceof Array&&(t=(r=t).pop()),t=e.symbols_[t]||t),t}"function"==typeof y.yy.parseError?this.parseError=y.yy.parseError:this.parseError=Object.getPrototypeOf(this).parseError;for(var _,x,w,k,T,E,C,S,A,M={};;){if(w=n[n.length-1],this.defaultActions[w]?k=this.defaultActions[w]:(null==_&&(_=b()),k=o[w]&&o[w][_]),void 0===k||!k.length||!k[0]){var N="";for(E in A=[],o[w])this.terminals_[E]&&E>h&&A.push("'"+this.terminals_[E]+"'");N=p.showPosition?"Parse error on line "+(c+1)+":\n"+p.showPosition()+"\nExpecting "+A.join(", ")+", got '"+(this.terminals_[_]||_)+"'":"Parse error on line "+(c+1)+": Unexpected "+(_==f?"end of input":"'"+(this.terminals_[_]||_)+"'"),this.parseError(N,{text:p.match,token:this.terminals_[_]||_,line:p.yylineno,loc:m,expected:A})}if(k[0]instanceof Array&&k.length>1)throw new Error("Parse Error: multiple actions possible at state: "+w+", token: "+_);switch(k[0]){case 1:n.push(_),i.push(p.yytext),a.push(p.yylloc),n.push(k[1]),_=null,x?(_=x,x=null):(u=p.yyleng,s=p.yytext,c=p.yylineno,m=p.yylloc,l>0&&l--);break;case 2:if(C=this.productions_[k[1]][1],M.$=i[i.length-C],M._$={first_line:a[a.length-(C||1)].first_line,last_line:a[a.length-1].last_line,first_column:a[a.length-(C||1)].first_column,last_column:a[a.length-1].last_column},v&&(M._$.range=[a[a.length-(C||1)].range[0],a[a.length-1].range[1]]),void 0!==(T=this.performAction.apply(M,[s,u,c,y.yy,k[1],i,a].concat(d))))return T;C&&(n=n.slice(0,-1*C*2),i=i.slice(0,-1*C),a=a.slice(0,-1*C)),n.push(this.productions_[k[1]][0]),i.push(M.$),a.push(M._$),S=o[n[n.length-2]][n[n.length-1]],n.push(S);break;case 3:return!0}}return!0}},v={EOF:1,parseError:function(t,e){if(!this.yy.parser)throw new Error(t);this.yy.parser.parseError(t,e)},setInput:function(t,e){return this.yy=e||this.yy||{},this._input=t,this._more=this._backtrack=this.done=!1,this.yylineno=this.yyleng=0,this.yytext=this.matched=this.match="",this.conditionStack=["INITIAL"],this.yylloc={first_line:1,first_column:0,last_line:1,last_column:0},this.options.ranges&&(this.yylloc.range=[0,0]),this.offset=0,this},input:function(){var t=this._input[0];return this.yytext+=t,this.yyleng++,this.offset++,this.match+=t,this.matched+=t,t.match(/(?:\r\n?|\n).*/g)?(this.yylineno++,this.yylloc.last_line++):this.yylloc.last_column++,this.options.ranges&&this.yylloc.range[1]++,this._input=this._input.slice(1),t},unput:function(t){var e=t.length,n=t.split(/(?:\r\n?|\n)/g);this._input=t+this._input,this.yytext=this.yytext.substr(0,this.yytext.length-e),this.offset-=e;var r=this.match.split(/(?:\r\n?|\n)/g);this.match=this.match.substr(0,this.match.length-1),this.matched=this.matched.substr(0,this.matched.length-1),n.length-1&&(this.yylineno-=n.length-1);var i=this.yylloc.range;return this.yylloc={first_line:this.yylloc.first_line,last_line:this.yylineno+1,first_column:this.yylloc.first_column,last_column:n?(n.length===r.length?this.yylloc.first_column:0)+r[r.length-n.length].length-n[0].length:this.yylloc.first_column-e},this.options.ranges&&(this.yylloc.range=[i[0],i[0]+this.yyleng-e]),this.yyleng=this.yytext.length,this},more:function(){return this._more=!0,this},reject:function(){return this.options.backtrack_lexer?(this._backtrack=!0,this):this.parseError("Lexical error on line "+(this.yylineno+1)+". You can only invoke reject() in the lexer when the lexer is of the backtracking persuasion (options.backtrack_lexer = true).\n"+this.showPosition(),{text:"",token:null,line:this.yylineno})},less:function(t){this.unput(this.match.slice(t))},pastInput:function(){var t=this.matched.substr(0,this.matched.length-this.match.length);return(t.length>20?"...":"")+t.substr(-20).replace(/\n/g,"")},upcomingInput:function(){var t=this.match;return t.length<20&&(t+=this._input.substr(0,20-t.length)),(t.substr(0,20)+(t.length>20?"...":"")).replace(/\n/g,"")},showPosition:function(){var t=this.pastInput(),e=new Array(t.length+1).join("-");return t+this.upcomingInput()+"\n"+e+"^"},test_match:function(t,e){var n,r,i;if(this.options.backtrack_lexer&&(i={yylineno:this.yylineno,yylloc:{first_line:this.yylloc.first_line,last_line:this.last_line,first_column:this.yylloc.first_column,last_column:this.yylloc.last_column},yytext:this.yytext,match:this.match,matches:this.matches,matched:this.matched,yyleng:this.yyleng,offset:this.offset,_more:this._more,_input:this._input,yy:this.yy,conditionStack:this.conditionStack.slice(0),done:this.done},this.options.ranges&&(i.yylloc.range=this.yylloc.range.slice(0))),(r=t[0].match(/(?:\r\n?|\n).*/g))&&(this.yylineno+=r.length),this.yylloc={first_line:this.yylloc.last_line,last_line:this.yylineno+1,first_column:this.yylloc.last_column,last_column:r?r[r.length-1].length-r[r.length-1].match(/\r?\n?/)[0].length:this.yylloc.last_column+t[0].length},this.yytext+=t[0],this.match+=t[0],this.matches=t,this.yyleng=this.yytext.length,this.options.ranges&&(this.yylloc.range=[this.offset,this.offset+=this.yyleng]),this._more=!1,this._backtrack=!1,this._input=this._input.slice(t[0].length),this.matched+=t[0],n=this.performAction.call(this,this.yy,this,e,this.conditionStack[this.conditionStack.length-1]),this.done&&this._input&&(this.done=!1),n)return n;if(this._backtrack){for(var a in i)this[a]=i[a];return!1}return!1},next:function(){if(this.done)return this.EOF;var t,e,n,r;this._input||(this.done=!0),this._more||(this.yytext="",this.match="");for(var i=this._currentRules(),a=0;ae[0].length)){if(e=n,r=a,this.options.backtrack_lexer){if(!1!==(t=this.test_match(n,i[a])))return t;if(this._backtrack){e=!1;continue}return!1}if(!this.options.flex)break}return e?!1!==(t=this.test_match(e,i[r]))&&t:""===this._input?this.EOF:this.parseError("Lexical error on line "+(this.yylineno+1)+". Unrecognized text.\n"+this.showPosition(),{text:"",token:null,line:this.yylineno})},lex:function(){return this.next()||this.lex()},begin:function(t){this.conditionStack.push(t)},popState:function(){return this.conditionStack.length-1>0?this.conditionStack.pop():this.conditionStack[0]},_currentRules:function(){return this.conditionStack.length&&this.conditionStack[this.conditionStack.length-1]?this.conditions[this.conditionStack[this.conditionStack.length-1]].rules:this.conditions.INITIAL.rules},topState:function(t){return(t=this.conditionStack.length-1-Math.abs(t||0))>=0?this.conditionStack[t]:"INITIAL"},pushState:function(t){this.begin(t)},stateStackSize:function(){return this.conditionStack.length},options:{"case-insensitive":!0},performAction:function(t,e,n,r){switch(n){case 0:return this.begin("open_directive"),34;case 1:return this.begin("type_directive"),35;case 2:return this.popState(),this.begin("arg_directive"),27;case 3:return this.popState(),this.popState(),37;case 4:return 36;case 5:case 6:case 7:case 9:case 10:case 11:break;case 8:return 11;case 12:this.begin("href");break;case 13:case 16:case 19:case 22:this.popState();break;case 14:return 32;case 15:this.begin("callbackname");break;case 17:this.popState(),this.begin("callbackargs");break;case 18:return 30;case 20:return 31;case 21:this.begin("click");break;case 23:return 29;case 24:return 5;case 25:return 12;case 26:return 13;case 27:return 14;case 28:return 15;case 29:return 17;case 30:return 16;case 31:return 18;case 32:return"date";case 33:return 19;case 34:return 20;case 35:return 22;case 36:return 23;case 37:return 27;case 38:return 7;case 39:return"INVALID"}},rules:[/^(?:%%\{)/i,/^(?:((?:(?!\}%%)[^:.])*))/i,/^(?::)/i,/^(?:\}%%)/i,/^(?:((?:(?!\}%%).|\n)*))/i,/^(?:%%(?!\{)*[^\n]*)/i,/^(?:[^\}]%%*[^\n]*)/i,/^(?:%%*[^\n]*[\n]*)/i,/^(?:[\n]+)/i,/^(?:\s+)/i,/^(?:#[^\n]*)/i,/^(?:%[^\n]*)/i,/^(?:href[\s]+["])/i,/^(?:["])/i,/^(?:[^"]*)/i,/^(?:call[\s]+)/i,/^(?:\([\s]*\))/i,/^(?:\()/i,/^(?:[^(]*)/i,/^(?:\))/i,/^(?:[^)]*)/i,/^(?:click[\s]+)/i,/^(?:[\s\n])/i,/^(?:[^\s\n]*)/i,/^(?:gantt\b)/i,/^(?:dateFormat\s[^#\n;]+)/i,/^(?:inclusiveEndDates\b)/i,/^(?:topAxis\b)/i,/^(?:axisFormat\s[^#\n;]+)/i,/^(?:includes\s[^#\n;]+)/i,/^(?:excludes\s[^#\n;]+)/i,/^(?:todayMarker\s[^\n;]+)/i,/^(?:\d\d\d\d-\d\d-\d\d\b)/i,/^(?:title\s[^#\n;]+)/i,/^(?:section\s[^#:\n;]+)/i,/^(?:[^#:\n;]+)/i,/^(?::[^#\n;]+)/i,/^(?::)/i,/^(?:$)/i,/^(?:.)/i],conditions:{close_directive:{rules:[],inclusive:!1},arg_directive:{rules:[3,4],inclusive:!1},type_directive:{rules:[2,3],inclusive:!1},open_directive:{rules:[1],inclusive:!1},callbackargs:{rules:[19,20],inclusive:!1},callbackname:{rules:[16,17,18],inclusive:!1},href:{rules:[13,14],inclusive:!1},click:{rules:[22,23],inclusive:!1},INITIAL:{rules:[0,5,6,7,8,9,10,11,12,15,21,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39],inclusive:!0}}};function b(){this.yy={}}return m.lexer=v,b.prototype=m,m.Parser=b,new b}();e.parser=r,e.Parser=r.Parser,e.parse=function(){return r.parse.apply(r,arguments)},e.main=function(t){t[1]||(console.log("Usage: "+t[0]+" FILE"),process.exit(1));var r=n(6878).readFileSync(n(6470).normalize(t[1]),"utf8");return e.parser.parse(r)},n.c[n.s]===t&&e.main(process.argv.slice(1))},2553:(t,e,n)=>{t=n.nmd(t);var r=function(){var t=function(t,e,n,r){for(n=n||{},r=t.length;r--;n[t[r]]=e);return n},e=[2,3],n=[1,7],r=[7,12,15,17,19,20,21],i=[7,11,12,15,17,19,20,21],a=[2,20],o=[1,32],s={trace:function(){},yy:{},symbols_:{error:2,start:3,GG:4,":":5,document:6,EOF:7,DIR:8,options:9,body:10,OPT:11,NL:12,line:13,statement:14,COMMIT:15,commit_arg:16,BRANCH:17,ID:18,CHECKOUT:19,MERGE:20,RESET:21,reset_arg:22,STR:23,HEAD:24,reset_parents:25,CARET:26,$accept:0,$end:1},terminals_:{2:"error",4:"GG",5:":",7:"EOF",8:"DIR",11:"OPT",12:"NL",15:"COMMIT",17:"BRANCH",18:"ID",19:"CHECKOUT",20:"MERGE",21:"RESET",23:"STR",24:"HEAD",26:"CARET"},productions_:[0,[3,4],[3,5],[6,0],[6,2],[9,2],[9,1],[10,0],[10,2],[13,2],[13,1],[14,2],[14,2],[14,2],[14,2],[14,2],[16,0],[16,1],[22,2],[22,2],[25,0],[25,2]],performAction:function(t,e,n,r,i,a,o){var s=a.length-1;switch(i){case 1:return a[s-1];case 2:return r.setDirection(a[s-3]),a[s-1];case 4:r.setOptions(a[s-1]),this.$=a[s];break;case 5:a[s-1]+=a[s],this.$=a[s-1];break;case 7:this.$=[];break;case 8:a[s-1].push(a[s]),this.$=a[s-1];break;case 9:this.$=a[s-1];break;case 11:r.commit(a[s]);break;case 12:r.branch(a[s]);break;case 13:r.checkout(a[s]);break;case 14:r.merge(a[s]);break;case 15:r.reset(a[s]);break;case 16:this.$="";break;case 17:this.$=a[s];break;case 18:this.$=a[s-1]+":"+a[s];break;case 19:this.$=a[s-1]+":"+r.count,r.count=0;break;case 20:r.count=0;break;case 21:r.count+=1}},table:[{3:1,4:[1,2]},{1:[3]},{5:[1,3],8:[1,4]},{6:5,7:e,9:6,12:n},{5:[1,8]},{7:[1,9]},t(r,[2,7],{10:10,11:[1,11]}),t(i,[2,6]),{6:12,7:e,9:6,12:n},{1:[2,1]},{7:[2,4],12:[1,15],13:13,14:14,15:[1,16],17:[1,17],19:[1,18],20:[1,19],21:[1,20]},t(i,[2,5]),{7:[1,21]},t(r,[2,8]),{12:[1,22]},t(r,[2,10]),{12:[2,16],16:23,23:[1,24]},{18:[1,25]},{18:[1,26]},{18:[1,27]},{18:[1,30],22:28,24:[1,29]},{1:[2,2]},t(r,[2,9]),{12:[2,11]},{12:[2,17]},{12:[2,12]},{12:[2,13]},{12:[2,14]},{12:[2,15]},{12:a,25:31,26:o},{12:a,25:33,26:o},{12:[2,18]},{12:a,25:34,26:o},{12:[2,19]},{12:[2,21]}],defaultActions:{9:[2,1],21:[2,2],23:[2,11],24:[2,17],25:[2,12],26:[2,13],27:[2,14],28:[2,15],31:[2,18],33:[2,19],34:[2,21]},parseError:function(t,e){if(!e.recoverable){var n=new Error(t);throw n.hash=e,n}this.trace(t)},parse:function(t){var e=this,n=[0],r=[],i=[null],a=[],o=this.table,s="",c=0,u=0,l=0,h=2,f=1,d=a.slice.call(arguments,1),p=Object.create(this.lexer),y={yy:{}};for(var g in this.yy)Object.prototype.hasOwnProperty.call(this.yy,g)&&(y.yy[g]=this.yy[g]);p.setInput(t,y.yy),y.yy.lexer=p,y.yy.parser=this,void 0===p.yylloc&&(p.yylloc={});var m=p.yylloc;a.push(m);var v=p.options&&p.options.ranges;function b(){var t;return"number"!=typeof(t=r.pop()||p.lex()||f)&&(t instanceof Array&&(t=(r=t).pop()),t=e.symbols_[t]||t),t}"function"==typeof y.yy.parseError?this.parseError=y.yy.parseError:this.parseError=Object.getPrototypeOf(this).parseError;for(var _,x,w,k,T,E,C,S,A,M={};;){if(w=n[n.length-1],this.defaultActions[w]?k=this.defaultActions[w]:(null==_&&(_=b()),k=o[w]&&o[w][_]),void 0===k||!k.length||!k[0]){var N="";for(E in A=[],o[w])this.terminals_[E]&&E>h&&A.push("'"+this.terminals_[E]+"'");N=p.showPosition?"Parse error on line "+(c+1)+":\n"+p.showPosition()+"\nExpecting "+A.join(", ")+", got '"+(this.terminals_[_]||_)+"'":"Parse error on line "+(c+1)+": Unexpected "+(_==f?"end of input":"'"+(this.terminals_[_]||_)+"'"),this.parseError(N,{text:p.match,token:this.terminals_[_]||_,line:p.yylineno,loc:m,expected:A})}if(k[0]instanceof Array&&k.length>1)throw new Error("Parse Error: multiple actions possible at state: "+w+", token: "+_);switch(k[0]){case 1:n.push(_),i.push(p.yytext),a.push(p.yylloc),n.push(k[1]),_=null,x?(_=x,x=null):(u=p.yyleng,s=p.yytext,c=p.yylineno,m=p.yylloc,l>0&&l--);break;case 2:if(C=this.productions_[k[1]][1],M.$=i[i.length-C],M._$={first_line:a[a.length-(C||1)].first_line,last_line:a[a.length-1].last_line,first_column:a[a.length-(C||1)].first_column,last_column:a[a.length-1].last_column},v&&(M._$.range=[a[a.length-(C||1)].range[0],a[a.length-1].range[1]]),void 0!==(T=this.performAction.apply(M,[s,u,c,y.yy,k[1],i,a].concat(d))))return T;C&&(n=n.slice(0,-1*C*2),i=i.slice(0,-1*C),a=a.slice(0,-1*C)),n.push(this.productions_[k[1]][0]),i.push(M.$),a.push(M._$),S=o[n[n.length-2]][n[n.length-1]],n.push(S);break;case 3:return!0}}return!0}},c={EOF:1,parseError:function(t,e){if(!this.yy.parser)throw new Error(t);this.yy.parser.parseError(t,e)},setInput:function(t,e){return this.yy=e||this.yy||{},this._input=t,this._more=this._backtrack=this.done=!1,this.yylineno=this.yyleng=0,this.yytext=this.matched=this.match="",this.conditionStack=["INITIAL"],this.yylloc={first_line:1,first_column:0,last_line:1,last_column:0},this.options.ranges&&(this.yylloc.range=[0,0]),this.offset=0,this},input:function(){var t=this._input[0];return this.yytext+=t,this.yyleng++,this.offset++,this.match+=t,this.matched+=t,t.match(/(?:\r\n?|\n).*/g)?(this.yylineno++,this.yylloc.last_line++):this.yylloc.last_column++,this.options.ranges&&this.yylloc.range[1]++,this._input=this._input.slice(1),t},unput:function(t){var e=t.length,n=t.split(/(?:\r\n?|\n)/g);this._input=t+this._input,this.yytext=this.yytext.substr(0,this.yytext.length-e),this.offset-=e;var r=this.match.split(/(?:\r\n?|\n)/g);this.match=this.match.substr(0,this.match.length-1),this.matched=this.matched.substr(0,this.matched.length-1),n.length-1&&(this.yylineno-=n.length-1);var i=this.yylloc.range;return this.yylloc={first_line:this.yylloc.first_line,last_line:this.yylineno+1,first_column:this.yylloc.first_column,last_column:n?(n.length===r.length?this.yylloc.first_column:0)+r[r.length-n.length].length-n[0].length:this.yylloc.first_column-e},this.options.ranges&&(this.yylloc.range=[i[0],i[0]+this.yyleng-e]),this.yyleng=this.yytext.length,this},more:function(){return this._more=!0,this},reject:function(){return this.options.backtrack_lexer?(this._backtrack=!0,this):this.parseError("Lexical error on line "+(this.yylineno+1)+". You can only invoke reject() in the lexer when the lexer is of the backtracking persuasion (options.backtrack_lexer = true).\n"+this.showPosition(),{text:"",token:null,line:this.yylineno})},less:function(t){this.unput(this.match.slice(t))},pastInput:function(){var t=this.matched.substr(0,this.matched.length-this.match.length);return(t.length>20?"...":"")+t.substr(-20).replace(/\n/g,"")},upcomingInput:function(){var t=this.match;return t.length<20&&(t+=this._input.substr(0,20-t.length)),(t.substr(0,20)+(t.length>20?"...":"")).replace(/\n/g,"")},showPosition:function(){var t=this.pastInput(),e=new Array(t.length+1).join("-");return t+this.upcomingInput()+"\n"+e+"^"},test_match:function(t,e){var n,r,i;if(this.options.backtrack_lexer&&(i={yylineno:this.yylineno,yylloc:{first_line:this.yylloc.first_line,last_line:this.last_line,first_column:this.yylloc.first_column,last_column:this.yylloc.last_column},yytext:this.yytext,match:this.match,matches:this.matches,matched:this.matched,yyleng:this.yyleng,offset:this.offset,_more:this._more,_input:this._input,yy:this.yy,conditionStack:this.conditionStack.slice(0),done:this.done},this.options.ranges&&(i.yylloc.range=this.yylloc.range.slice(0))),(r=t[0].match(/(?:\r\n?|\n).*/g))&&(this.yylineno+=r.length),this.yylloc={first_line:this.yylloc.last_line,last_line:this.yylineno+1,first_column:this.yylloc.last_column,last_column:r?r[r.length-1].length-r[r.length-1].match(/\r?\n?/)[0].length:this.yylloc.last_column+t[0].length},this.yytext+=t[0],this.match+=t[0],this.matches=t,this.yyleng=this.yytext.length,this.options.ranges&&(this.yylloc.range=[this.offset,this.offset+=this.yyleng]),this._more=!1,this._backtrack=!1,this._input=this._input.slice(t[0].length),this.matched+=t[0],n=this.performAction.call(this,this.yy,this,e,this.conditionStack[this.conditionStack.length-1]),this.done&&this._input&&(this.done=!1),n)return n;if(this._backtrack){for(var a in i)this[a]=i[a];return!1}return!1},next:function(){if(this.done)return this.EOF;var t,e,n,r;this._input||(this.done=!0),this._more||(this.yytext="",this.match="");for(var i=this._currentRules(),a=0;ae[0].length)){if(e=n,r=a,this.options.backtrack_lexer){if(!1!==(t=this.test_match(n,i[a])))return t;if(this._backtrack){e=!1;continue}return!1}if(!this.options.flex)break}return e?!1!==(t=this.test_match(e,i[r]))&&t:""===this._input?this.EOF:this.parseError("Lexical error on line "+(this.yylineno+1)+". Unrecognized text.\n"+this.showPosition(),{text:"",token:null,line:this.yylineno})},lex:function(){return this.next()||this.lex()},begin:function(t){this.conditionStack.push(t)},popState:function(){return this.conditionStack.length-1>0?this.conditionStack.pop():this.conditionStack[0]},_currentRules:function(){return this.conditionStack.length&&this.conditionStack[this.conditionStack.length-1]?this.conditions[this.conditionStack[this.conditionStack.length-1]].rules:this.conditions.INITIAL.rules},topState:function(t){return(t=this.conditionStack.length-1-Math.abs(t||0))>=0?this.conditionStack[t]:"INITIAL"},pushState:function(t){this.begin(t)},stateStackSize:function(){return this.conditionStack.length},options:{"case-insensitive":!0},performAction:function(t,e,n,r){switch(n){case 0:return 12;case 1:case 2:case 3:break;case 4:return 4;case 5:return 15;case 6:return 17;case 7:return 20;case 8:return 21;case 9:return 19;case 10:case 11:return 8;case 12:return 5;case 13:return 26;case 14:this.begin("options");break;case 15:case 18:this.popState();break;case 16:return 11;case 17:this.begin("string");break;case 19:return 23;case 20:return 18;case 21:return 7}},rules:[/^(?:(\r?\n)+)/i,/^(?:\s+)/i,/^(?:#[^\n]*)/i,/^(?:%[^\n]*)/i,/^(?:gitGraph\b)/i,/^(?:commit\b)/i,/^(?:branch\b)/i,/^(?:merge\b)/i,/^(?:reset\b)/i,/^(?:checkout\b)/i,/^(?:LR\b)/i,/^(?:BT\b)/i,/^(?::)/i,/^(?:\^)/i,/^(?:options\r?\n)/i,/^(?:end\r?\n)/i,/^(?:[^\n]+\r?\n)/i,/^(?:["])/i,/^(?:["])/i,/^(?:[^"]*)/i,/^(?:[a-zA-Z][-_\.a-zA-Z0-9]*[-_a-zA-Z0-9])/i,/^(?:$)/i],conditions:{options:{rules:[15,16],inclusive:!1},string:{rules:[18,19],inclusive:!1},INITIAL:{rules:[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,17,20,21],inclusive:!0}}};function u(){this.yy={}}return s.lexer=c,u.prototype=s,s.Parser=u,new u}();e.parser=r,e.Parser=r.Parser,e.parse=function(){return r.parse.apply(r,arguments)},e.main=function(t){t[1]||(console.log("Usage: "+t[0]+" FILE"),process.exit(1));var r=n(8183).readFileSync(n(6470).normalize(t[1]),"utf8");return e.parser.parse(r)},n.c[n.s]===t&&e.main(process.argv.slice(1))},6765:(t,e,n)=>{t=n.nmd(t);var r=function(){var t=function(t,e,n,r){for(n=n||{},r=t.length;r--;n[t[r]]=e);return n},e=[6,9,10],n={trace:function(){},yy:{},symbols_:{error:2,start:3,info:4,document:5,EOF:6,line:7,statement:8,NL:9,showInfo:10,$accept:0,$end:1},terminals_:{2:"error",4:"info",6:"EOF",9:"NL",10:"showInfo"},productions_:[0,[3,3],[5,0],[5,2],[7,1],[7,1],[8,1]],performAction:function(t,e,n,r,i,a,o){switch(a.length,i){case 1:return r;case 4:break;case 6:r.setInfo(!0)}},table:[{3:1,4:[1,2]},{1:[3]},t(e,[2,2],{5:3}),{6:[1,4],7:5,8:6,9:[1,7],10:[1,8]},{1:[2,1]},t(e,[2,3]),t(e,[2,4]),t(e,[2,5]),t(e,[2,6])],defaultActions:{4:[2,1]},parseError:function(t,e){if(!e.recoverable){var n=new Error(t);throw n.hash=e,n}this.trace(t)},parse:function(t){var e=this,n=[0],r=[],i=[null],a=[],o=this.table,s="",c=0,u=0,l=0,h=2,f=1,d=a.slice.call(arguments,1),p=Object.create(this.lexer),y={yy:{}};for(var g in this.yy)Object.prototype.hasOwnProperty.call(this.yy,g)&&(y.yy[g]=this.yy[g]);p.setInput(t,y.yy),y.yy.lexer=p,y.yy.parser=this,void 0===p.yylloc&&(p.yylloc={});var m=p.yylloc;a.push(m);var v=p.options&&p.options.ranges;function b(){var t;return"number"!=typeof(t=r.pop()||p.lex()||f)&&(t instanceof Array&&(t=(r=t).pop()),t=e.symbols_[t]||t),t}"function"==typeof y.yy.parseError?this.parseError=y.yy.parseError:this.parseError=Object.getPrototypeOf(this).parseError;for(var _,x,w,k,T,E,C,S,A,M={};;){if(w=n[n.length-1],this.defaultActions[w]?k=this.defaultActions[w]:(null==_&&(_=b()),k=o[w]&&o[w][_]),void 0===k||!k.length||!k[0]){var N="";for(E in A=[],o[w])this.terminals_[E]&&E>h&&A.push("'"+this.terminals_[E]+"'");N=p.showPosition?"Parse error on line "+(c+1)+":\n"+p.showPosition()+"\nExpecting "+A.join(", ")+", got '"+(this.terminals_[_]||_)+"'":"Parse error on line "+(c+1)+": Unexpected "+(_==f?"end of input":"'"+(this.terminals_[_]||_)+"'"),this.parseError(N,{text:p.match,token:this.terminals_[_]||_,line:p.yylineno,loc:m,expected:A})}if(k[0]instanceof Array&&k.length>1)throw new Error("Parse Error: multiple actions possible at state: "+w+", token: "+_);switch(k[0]){case 1:n.push(_),i.push(p.yytext),a.push(p.yylloc),n.push(k[1]),_=null,x?(_=x,x=null):(u=p.yyleng,s=p.yytext,c=p.yylineno,m=p.yylloc,l>0&&l--);break;case 2:if(C=this.productions_[k[1]][1],M.$=i[i.length-C],M._$={first_line:a[a.length-(C||1)].first_line,last_line:a[a.length-1].last_line,first_column:a[a.length-(C||1)].first_column,last_column:a[a.length-1].last_column},v&&(M._$.range=[a[a.length-(C||1)].range[0],a[a.length-1].range[1]]),void 0!==(T=this.performAction.apply(M,[s,u,c,y.yy,k[1],i,a].concat(d))))return T;C&&(n=n.slice(0,-1*C*2),i=i.slice(0,-1*C),a=a.slice(0,-1*C)),n.push(this.productions_[k[1]][0]),i.push(M.$),a.push(M._$),S=o[n[n.length-2]][n[n.length-1]],n.push(S);break;case 3:return!0}}return!0}},r={EOF:1,parseError:function(t,e){if(!this.yy.parser)throw new Error(t);this.yy.parser.parseError(t,e)},setInput:function(t,e){return this.yy=e||this.yy||{},this._input=t,this._more=this._backtrack=this.done=!1,this.yylineno=this.yyleng=0,this.yytext=this.matched=this.match="",this.conditionStack=["INITIAL"],this.yylloc={first_line:1,first_column:0,last_line:1,last_column:0},this.options.ranges&&(this.yylloc.range=[0,0]),this.offset=0,this},input:function(){var t=this._input[0];return this.yytext+=t,this.yyleng++,this.offset++,this.match+=t,this.matched+=t,t.match(/(?:\r\n?|\n).*/g)?(this.yylineno++,this.yylloc.last_line++):this.yylloc.last_column++,this.options.ranges&&this.yylloc.range[1]++,this._input=this._input.slice(1),t},unput:function(t){var e=t.length,n=t.split(/(?:\r\n?|\n)/g);this._input=t+this._input,this.yytext=this.yytext.substr(0,this.yytext.length-e),this.offset-=e;var r=this.match.split(/(?:\r\n?|\n)/g);this.match=this.match.substr(0,this.match.length-1),this.matched=this.matched.substr(0,this.matched.length-1),n.length-1&&(this.yylineno-=n.length-1);var i=this.yylloc.range;return this.yylloc={first_line:this.yylloc.first_line,last_line:this.yylineno+1,first_column:this.yylloc.first_column,last_column:n?(n.length===r.length?this.yylloc.first_column:0)+r[r.length-n.length].length-n[0].length:this.yylloc.first_column-e},this.options.ranges&&(this.yylloc.range=[i[0],i[0]+this.yyleng-e]),this.yyleng=this.yytext.length,this},more:function(){return this._more=!0,this},reject:function(){return this.options.backtrack_lexer?(this._backtrack=!0,this):this.parseError("Lexical error on line "+(this.yylineno+1)+". You can only invoke reject() in the lexer when the lexer is of the backtracking persuasion (options.backtrack_lexer = true).\n"+this.showPosition(),{text:"",token:null,line:this.yylineno})},less:function(t){this.unput(this.match.slice(t))},pastInput:function(){var t=this.matched.substr(0,this.matched.length-this.match.length);return(t.length>20?"...":"")+t.substr(-20).replace(/\n/g,"")},upcomingInput:function(){var t=this.match;return t.length<20&&(t+=this._input.substr(0,20-t.length)),(t.substr(0,20)+(t.length>20?"...":"")).replace(/\n/g,"")},showPosition:function(){var t=this.pastInput(),e=new Array(t.length+1).join("-");return t+this.upcomingInput()+"\n"+e+"^"},test_match:function(t,e){var n,r,i;if(this.options.backtrack_lexer&&(i={yylineno:this.yylineno,yylloc:{first_line:this.yylloc.first_line,last_line:this.last_line,first_column:this.yylloc.first_column,last_column:this.yylloc.last_column},yytext:this.yytext,match:this.match,matches:this.matches,matched:this.matched,yyleng:this.yyleng,offset:this.offset,_more:this._more,_input:this._input,yy:this.yy,conditionStack:this.conditionStack.slice(0),done:this.done},this.options.ranges&&(i.yylloc.range=this.yylloc.range.slice(0))),(r=t[0].match(/(?:\r\n?|\n).*/g))&&(this.yylineno+=r.length),this.yylloc={first_line:this.yylloc.last_line,last_line:this.yylineno+1,first_column:this.yylloc.last_column,last_column:r?r[r.length-1].length-r[r.length-1].match(/\r?\n?/)[0].length:this.yylloc.last_column+t[0].length},this.yytext+=t[0],this.match+=t[0],this.matches=t,this.yyleng=this.yytext.length,this.options.ranges&&(this.yylloc.range=[this.offset,this.offset+=this.yyleng]),this._more=!1,this._backtrack=!1,this._input=this._input.slice(t[0].length),this.matched+=t[0],n=this.performAction.call(this,this.yy,this,e,this.conditionStack[this.conditionStack.length-1]),this.done&&this._input&&(this.done=!1),n)return n;if(this._backtrack){for(var a in i)this[a]=i[a];return!1}return!1},next:function(){if(this.done)return this.EOF;var t,e,n,r;this._input||(this.done=!0),this._more||(this.yytext="",this.match="");for(var i=this._currentRules(),a=0;ae[0].length)){if(e=n,r=a,this.options.backtrack_lexer){if(!1!==(t=this.test_match(n,i[a])))return t;if(this._backtrack){e=!1;continue}return!1}if(!this.options.flex)break}return e?!1!==(t=this.test_match(e,i[r]))&&t:""===this._input?this.EOF:this.parseError("Lexical error on line "+(this.yylineno+1)+". Unrecognized text.\n"+this.showPosition(),{text:"",token:null,line:this.yylineno})},lex:function(){return this.next()||this.lex()},begin:function(t){this.conditionStack.push(t)},popState:function(){return this.conditionStack.length-1>0?this.conditionStack.pop():this.conditionStack[0]},_currentRules:function(){return this.conditionStack.length&&this.conditionStack[this.conditionStack.length-1]?this.conditions[this.conditionStack[this.conditionStack.length-1]].rules:this.conditions.INITIAL.rules},topState:function(t){return(t=this.conditionStack.length-1-Math.abs(t||0))>=0?this.conditionStack[t]:"INITIAL"},pushState:function(t){this.begin(t)},stateStackSize:function(){return this.conditionStack.length},options:{"case-insensitive":!0},performAction:function(t,e,n,r){switch(n){case 0:return 4;case 1:return 9;case 2:return"space";case 3:return 10;case 4:return 6;case 5:return"TXT"}},rules:[/^(?:info\b)/i,/^(?:[\s\n\r]+)/i,/^(?:[\s]+)/i,/^(?:showInfo\b)/i,/^(?:$)/i,/^(?:.)/i],conditions:{INITIAL:{rules:[0,1,2,3,4,5],inclusive:!0}}};function i(){this.yy={}}return n.lexer=r,i.prototype=n,n.Parser=i,new i}();e.parser=r,e.Parser=r.Parser,e.parse=function(){return r.parse.apply(r,arguments)},e.main=function(t){t[1]||(console.log("Usage: "+t[0]+" FILE"),process.exit(1));var r=n(1428).readFileSync(n(6470).normalize(t[1]),"utf8");return e.parser.parse(r)},n.c[n.s]===t&&e.main(process.argv.slice(1))},7062:(t,e,n)=>{t=n.nmd(t);var r=function(){var t=function(t,e,n,r){for(n=n||{},r=t.length;r--;n[t[r]]=e);return n},e=[1,4],n=[1,5],r=[1,6],i=[1,7],a=[1,9],o=[1,11,13,20,21,22,23],s=[2,5],c=[1,6,11,13,20,21,22,23],u=[20,21,22],l=[2,8],h=[1,18],f=[1,19],d=[1,24],p=[6,20,21,22,23],y={trace:function(){},yy:{},symbols_:{error:2,start:3,eol:4,directive:5,PIE:6,document:7,showData:8,line:9,statement:10,txt:11,value:12,title:13,title_value:14,openDirective:15,typeDirective:16,closeDirective:17,":":18,argDirective:19,NEWLINE:20,";":21,EOF:22,open_directive:23,type_directive:24,arg_directive:25,close_directive:26,$accept:0,$end:1},terminals_:{2:"error",6:"PIE",8:"showData",11:"txt",12:"value",13:"title",14:"title_value",18:":",20:"NEWLINE",21:";",22:"EOF",23:"open_directive",24:"type_directive",25:"arg_directive",26:"close_directive"},productions_:[0,[3,2],[3,2],[3,2],[3,3],[7,0],[7,2],[9,2],[10,0],[10,2],[10,2],[10,1],[5,3],[5,5],[4,1],[4,1],[4,1],[15,1],[16,1],[19,1],[17,1]],performAction:function(t,e,n,r,i,a,o){var s=a.length-1;switch(i){case 4:r.setShowData(!0);break;case 7:this.$=a[s-1];break;case 9:r.addSection(a[s-1],r.cleanupValue(a[s]));break;case 10:this.$=a[s].trim(),r.setTitle(this.$);break;case 17:r.parseDirective("%%{","open_directive");break;case 18:r.parseDirective(a[s],"type_directive");break;case 19:a[s]=a[s].trim().replace(/'/g,'"'),r.parseDirective(a[s],"arg_directive");break;case 20:r.parseDirective("}%%","close_directive","pie")}},table:[{3:1,4:2,5:3,6:e,15:8,20:n,21:r,22:i,23:a},{1:[3]},{3:10,4:2,5:3,6:e,15:8,20:n,21:r,22:i,23:a},{3:11,4:2,5:3,6:e,15:8,20:n,21:r,22:i,23:a},t(o,s,{7:12,8:[1,13]}),t(c,[2,14]),t(c,[2,15]),t(c,[2,16]),{16:14,24:[1,15]},{24:[2,17]},{1:[2,1]},{1:[2,2]},t(u,l,{15:8,9:16,10:17,5:20,1:[2,3],11:h,13:f,23:a}),t(o,s,{7:21}),{17:22,18:[1,23],26:d},t([18,26],[2,18]),t(o,[2,6]),{4:25,20:n,21:r,22:i},{12:[1,26]},{14:[1,27]},t(u,[2,11]),t(u,l,{15:8,9:16,10:17,5:20,1:[2,4],11:h,13:f,23:a}),t(p,[2,12]),{19:28,25:[1,29]},t(p,[2,20]),t(o,[2,7]),t(u,[2,9]),t(u,[2,10]),{17:30,26:d},{26:[2,19]},t(p,[2,13])],defaultActions:{9:[2,17],10:[2,1],11:[2,2],29:[2,19]},parseError:function(t,e){if(!e.recoverable){var n=new Error(t);throw n.hash=e,n}this.trace(t)},parse:function(t){var e=this,n=[0],r=[],i=[null],a=[],o=this.table,s="",c=0,u=0,l=0,h=2,f=1,d=a.slice.call(arguments,1),p=Object.create(this.lexer),y={yy:{}};for(var g in this.yy)Object.prototype.hasOwnProperty.call(this.yy,g)&&(y.yy[g]=this.yy[g]);p.setInput(t,y.yy),y.yy.lexer=p,y.yy.parser=this,void 0===p.yylloc&&(p.yylloc={});var m=p.yylloc;a.push(m);var v=p.options&&p.options.ranges;function b(){var t;return"number"!=typeof(t=r.pop()||p.lex()||f)&&(t instanceof Array&&(t=(r=t).pop()),t=e.symbols_[t]||t),t}"function"==typeof y.yy.parseError?this.parseError=y.yy.parseError:this.parseError=Object.getPrototypeOf(this).parseError;for(var _,x,w,k,T,E,C,S,A,M={};;){if(w=n[n.length-1],this.defaultActions[w]?k=this.defaultActions[w]:(null==_&&(_=b()),k=o[w]&&o[w][_]),void 0===k||!k.length||!k[0]){var N="";for(E in A=[],o[w])this.terminals_[E]&&E>h&&A.push("'"+this.terminals_[E]+"'");N=p.showPosition?"Parse error on line "+(c+1)+":\n"+p.showPosition()+"\nExpecting "+A.join(", ")+", got '"+(this.terminals_[_]||_)+"'":"Parse error on line "+(c+1)+": Unexpected "+(_==f?"end of input":"'"+(this.terminals_[_]||_)+"'"),this.parseError(N,{text:p.match,token:this.terminals_[_]||_,line:p.yylineno,loc:m,expected:A})}if(k[0]instanceof Array&&k.length>1)throw new Error("Parse Error: multiple actions possible at state: "+w+", token: "+_);switch(k[0]){case 1:n.push(_),i.push(p.yytext),a.push(p.yylloc),n.push(k[1]),_=null,x?(_=x,x=null):(u=p.yyleng,s=p.yytext,c=p.yylineno,m=p.yylloc,l>0&&l--);break;case 2:if(C=this.productions_[k[1]][1],M.$=i[i.length-C],M._$={first_line:a[a.length-(C||1)].first_line,last_line:a[a.length-1].last_line,first_column:a[a.length-(C||1)].first_column,last_column:a[a.length-1].last_column},v&&(M._$.range=[a[a.length-(C||1)].range[0],a[a.length-1].range[1]]),void 0!==(T=this.performAction.apply(M,[s,u,c,y.yy,k[1],i,a].concat(d))))return T;C&&(n=n.slice(0,-1*C*2),i=i.slice(0,-1*C),a=a.slice(0,-1*C)),n.push(this.productions_[k[1]][0]),i.push(M.$),a.push(M._$),S=o[n[n.length-2]][n[n.length-1]],n.push(S);break;case 3:return!0}}return!0}},g={EOF:1,parseError:function(t,e){if(!this.yy.parser)throw new Error(t);this.yy.parser.parseError(t,e)},setInput:function(t,e){return this.yy=e||this.yy||{},this._input=t,this._more=this._backtrack=this.done=!1,this.yylineno=this.yyleng=0,this.yytext=this.matched=this.match="",this.conditionStack=["INITIAL"],this.yylloc={first_line:1,first_column:0,last_line:1,last_column:0},this.options.ranges&&(this.yylloc.range=[0,0]),this.offset=0,this},input:function(){var t=this._input[0];return this.yytext+=t,this.yyleng++,this.offset++,this.match+=t,this.matched+=t,t.match(/(?:\r\n?|\n).*/g)?(this.yylineno++,this.yylloc.last_line++):this.yylloc.last_column++,this.options.ranges&&this.yylloc.range[1]++,this._input=this._input.slice(1),t},unput:function(t){var e=t.length,n=t.split(/(?:\r\n?|\n)/g);this._input=t+this._input,this.yytext=this.yytext.substr(0,this.yytext.length-e),this.offset-=e;var r=this.match.split(/(?:\r\n?|\n)/g);this.match=this.match.substr(0,this.match.length-1),this.matched=this.matched.substr(0,this.matched.length-1),n.length-1&&(this.yylineno-=n.length-1);var i=this.yylloc.range;return this.yylloc={first_line:this.yylloc.first_line,last_line:this.yylineno+1,first_column:this.yylloc.first_column,last_column:n?(n.length===r.length?this.yylloc.first_column:0)+r[r.length-n.length].length-n[0].length:this.yylloc.first_column-e},this.options.ranges&&(this.yylloc.range=[i[0],i[0]+this.yyleng-e]),this.yyleng=this.yytext.length,this},more:function(){return this._more=!0,this},reject:function(){return this.options.backtrack_lexer?(this._backtrack=!0,this):this.parseError("Lexical error on line "+(this.yylineno+1)+". You can only invoke reject() in the lexer when the lexer is of the backtracking persuasion (options.backtrack_lexer = true).\n"+this.showPosition(),{text:"",token:null,line:this.yylineno})},less:function(t){this.unput(this.match.slice(t))},pastInput:function(){var t=this.matched.substr(0,this.matched.length-this.match.length);return(t.length>20?"...":"")+t.substr(-20).replace(/\n/g,"")},upcomingInput:function(){var t=this.match;return t.length<20&&(t+=this._input.substr(0,20-t.length)),(t.substr(0,20)+(t.length>20?"...":"")).replace(/\n/g,"")},showPosition:function(){var t=this.pastInput(),e=new Array(t.length+1).join("-");return t+this.upcomingInput()+"\n"+e+"^"},test_match:function(t,e){var n,r,i;if(this.options.backtrack_lexer&&(i={yylineno:this.yylineno,yylloc:{first_line:this.yylloc.first_line,last_line:this.last_line,first_column:this.yylloc.first_column,last_column:this.yylloc.last_column},yytext:this.yytext,match:this.match,matches:this.matches,matched:this.matched,yyleng:this.yyleng,offset:this.offset,_more:this._more,_input:this._input,yy:this.yy,conditionStack:this.conditionStack.slice(0),done:this.done},this.options.ranges&&(i.yylloc.range=this.yylloc.range.slice(0))),(r=t[0].match(/(?:\r\n?|\n).*/g))&&(this.yylineno+=r.length),this.yylloc={first_line:this.yylloc.last_line,last_line:this.yylineno+1,first_column:this.yylloc.last_column,last_column:r?r[r.length-1].length-r[r.length-1].match(/\r?\n?/)[0].length:this.yylloc.last_column+t[0].length},this.yytext+=t[0],this.match+=t[0],this.matches=t,this.yyleng=this.yytext.length,this.options.ranges&&(this.yylloc.range=[this.offset,this.offset+=this.yyleng]),this._more=!1,this._backtrack=!1,this._input=this._input.slice(t[0].length),this.matched+=t[0],n=this.performAction.call(this,this.yy,this,e,this.conditionStack[this.conditionStack.length-1]),this.done&&this._input&&(this.done=!1),n)return n;if(this._backtrack){for(var a in i)this[a]=i[a];return!1}return!1},next:function(){if(this.done)return this.EOF;var t,e,n,r;this._input||(this.done=!0),this._more||(this.yytext="",this.match="");for(var i=this._currentRules(),a=0;ae[0].length)){if(e=n,r=a,this.options.backtrack_lexer){if(!1!==(t=this.test_match(n,i[a])))return t;if(this._backtrack){e=!1;continue}return!1}if(!this.options.flex)break}return e?!1!==(t=this.test_match(e,i[r]))&&t:""===this._input?this.EOF:this.parseError("Lexical error on line "+(this.yylineno+1)+". Unrecognized text.\n"+this.showPosition(),{text:"",token:null,line:this.yylineno})},lex:function(){return this.next()||this.lex()},begin:function(t){this.conditionStack.push(t)},popState:function(){return this.conditionStack.length-1>0?this.conditionStack.pop():this.conditionStack[0]},_currentRules:function(){return this.conditionStack.length&&this.conditionStack[this.conditionStack.length-1]?this.conditions[this.conditionStack[this.conditionStack.length-1]].rules:this.conditions.INITIAL.rules},topState:function(t){return(t=this.conditionStack.length-1-Math.abs(t||0))>=0?this.conditionStack[t]:"INITIAL"},pushState:function(t){this.begin(t)},stateStackSize:function(){return this.conditionStack.length},options:{"case-insensitive":!0},performAction:function(t,e,n,r){switch(n){case 0:return this.begin("open_directive"),23;case 1:return this.begin("type_directive"),24;case 2:return this.popState(),this.begin("arg_directive"),18;case 3:return this.popState(),this.popState(),26;case 4:return 25;case 5:case 6:case 8:case 9:break;case 7:return 20;case 10:return this.begin("title"),13;case 11:return this.popState(),"title_value";case 12:this.begin("string");break;case 13:this.popState();break;case 14:return"txt";case 15:return 6;case 16:return 8;case 17:return"value";case 18:return 22}},rules:[/^(?:%%\{)/i,/^(?:((?:(?!\}%%)[^:.])*))/i,/^(?::)/i,/^(?:\}%%)/i,/^(?:((?:(?!\}%%).|\n)*))/i,/^(?:%%(?!\{)[^\n]*)/i,/^(?:[^\}]%%[^\n]*)/i,/^(?:[\n\r]+)/i,/^(?:%%[^\n]*)/i,/^(?:[\s]+)/i,/^(?:title\b)/i,/^(?:(?!\n||)*[^\n]*)/i,/^(?:["])/i,/^(?:["])/i,/^(?:[^"]*)/i,/^(?:pie\b)/i,/^(?:showData\b)/i,/^(?::[\s]*[\d]+(?:\.[\d]+)?)/i,/^(?:$)/i],conditions:{close_directive:{rules:[],inclusive:!1},arg_directive:{rules:[3,4],inclusive:!1},type_directive:{rules:[2,3],inclusive:!1},open_directive:{rules:[1],inclusive:!1},title:{rules:[11],inclusive:!1},string:{rules:[13,14],inclusive:!1},INITIAL:{rules:[0,5,6,7,8,9,10,12,15,16,17,18],inclusive:!0}}};function m(){this.yy={}}return y.lexer=g,m.prototype=y,y.Parser=m,new m}();e.parser=r,e.Parser=r.Parser,e.parse=function(){return r.parse.apply(r,arguments)},e.main=function(t){t[1]||(console.log("Usage: "+t[0]+" FILE"),process.exit(1));var r=n(4551).readFileSync(n(6470).normalize(t[1]),"utf8");return e.parser.parse(r)},n.c[n.s]===t&&e.main(process.argv.slice(1))},3176:(t,e,n)=>{t=n.nmd(t);var r=function(){var t=function(t,e,n,r){for(n=n||{},r=t.length;r--;n[t[r]]=e);return n},e=[1,3],n=[1,5],r=[1,17],i=[2,10],a=[1,21],o=[1,22],s=[1,23],c=[1,24],u=[1,25],l=[1,26],h=[1,19],f=[1,27],d=[1,28],p=[1,31],y=[66,67],g=[5,8,14,35,36,37,38,39,40,48,55,57,66,67],m=[5,6,8,14,35,36,37,38,39,40,48,66,67],v=[1,51],b=[1,52],_=[1,53],x=[1,54],w=[1,55],k=[1,56],T=[1,57],E=[57,58],C=[1,69],S=[1,65],A=[1,66],M=[1,67],N=[1,68],D=[1,70],O=[1,74],B=[1,75],L=[1,72],I=[1,73],R=[5,8,14,35,36,37,38,39,40,48,66,67],F={trace:function(){},yy:{},symbols_:{error:2,start:3,directive:4,NEWLINE:5,RD:6,diagram:7,EOF:8,openDirective:9,typeDirective:10,closeDirective:11,":":12,argDirective:13,open_directive:14,type_directive:15,arg_directive:16,close_directive:17,requirementDef:18,elementDef:19,relationshipDef:20,requirementType:21,requirementName:22,STRUCT_START:23,requirementBody:24,ID:25,COLONSEP:26,id:27,TEXT:28,text:29,RISK:30,riskLevel:31,VERIFYMTHD:32,verifyType:33,STRUCT_STOP:34,REQUIREMENT:35,FUNCTIONAL_REQUIREMENT:36,INTERFACE_REQUIREMENT:37,PERFORMANCE_REQUIREMENT:38,PHYSICAL_REQUIREMENT:39,DESIGN_CONSTRAINT:40,LOW_RISK:41,MED_RISK:42,HIGH_RISK:43,VERIFY_ANALYSIS:44,VERIFY_DEMONSTRATION:45,VERIFY_INSPECTION:46,VERIFY_TEST:47,ELEMENT:48,elementName:49,elementBody:50,TYPE:51,type:52,DOCREF:53,ref:54,END_ARROW_L:55,relationship:56,LINE:57,END_ARROW_R:58,CONTAINS:59,COPIES:60,DERIVES:61,SATISFIES:62,VERIFIES:63,REFINES:64,TRACES:65,unqString:66,qString:67,$accept:0,$end:1},terminals_:{2:"error",5:"NEWLINE",6:"RD",8:"EOF",12:":",14:"open_directive",15:"type_directive",16:"arg_directive",17:"close_directive",23:"STRUCT_START",25:"ID",26:"COLONSEP",28:"TEXT",30:"RISK",32:"VERIFYMTHD",34:"STRUCT_STOP",35:"REQUIREMENT",36:"FUNCTIONAL_REQUIREMENT",37:"INTERFACE_REQUIREMENT",38:"PERFORMANCE_REQUIREMENT",39:"PHYSICAL_REQUIREMENT",40:"DESIGN_CONSTRAINT",41:"LOW_RISK",42:"MED_RISK",43:"HIGH_RISK",44:"VERIFY_ANALYSIS",45:"VERIFY_DEMONSTRATION",46:"VERIFY_INSPECTION",47:"VERIFY_TEST",48:"ELEMENT",51:"TYPE",53:"DOCREF",55:"END_ARROW_L",57:"LINE",58:"END_ARROW_R",59:"CONTAINS",60:"COPIES",61:"DERIVES",62:"SATISFIES",63:"VERIFIES",64:"REFINES",65:"TRACES",66:"unqString",67:"qString"},productions_:[0,[3,3],[3,2],[3,4],[4,3],[4,5],[9,1],[10,1],[13,1],[11,1],[7,0],[7,2],[7,2],[7,2],[7,2],[7,2],[18,5],[24,5],[24,5],[24,5],[24,5],[24,2],[24,1],[21,1],[21,1],[21,1],[21,1],[21,1],[21,1],[31,1],[31,1],[31,1],[33,1],[33,1],[33,1],[33,1],[19,5],[50,5],[50,5],[50,2],[50,1],[20,5],[20,5],[56,1],[56,1],[56,1],[56,1],[56,1],[56,1],[56,1],[22,1],[22,1],[27,1],[27,1],[29,1],[29,1],[49,1],[49,1],[52,1],[52,1],[54,1],[54,1]],performAction:function(t,e,n,r,i,a,o){var s=a.length-1;switch(i){case 6:r.parseDirective("%%{","open_directive");break;case 7:r.parseDirective(a[s],"type_directive");break;case 8:a[s]=a[s].trim().replace(/'/g,'"'),r.parseDirective(a[s],"arg_directive");break;case 9:r.parseDirective("}%%","close_directive","pie");break;case 10:this.$=[];break;case 16:r.addRequirement(a[s-3],a[s-4]);break;case 17:r.setNewReqId(a[s-2]);break;case 18:r.setNewReqText(a[s-2]);break;case 19:r.setNewReqRisk(a[s-2]);break;case 20:r.setNewReqVerifyMethod(a[s-2]);break;case 23:this.$=r.RequirementType.REQUIREMENT;break;case 24:this.$=r.RequirementType.FUNCTIONAL_REQUIREMENT;break;case 25:this.$=r.RequirementType.INTERFACE_REQUIREMENT;break;case 26:this.$=r.RequirementType.PERFORMANCE_REQUIREMENT;break;case 27:this.$=r.RequirementType.PHYSICAL_REQUIREMENT;break;case 28:this.$=r.RequirementType.DESIGN_CONSTRAINT;break;case 29:this.$=r.RiskLevel.LOW_RISK;break;case 30:this.$=r.RiskLevel.MED_RISK;break;case 31:this.$=r.RiskLevel.HIGH_RISK;break;case 32:this.$=r.VerifyType.VERIFY_ANALYSIS;break;case 33:this.$=r.VerifyType.VERIFY_DEMONSTRATION;break;case 34:this.$=r.VerifyType.VERIFY_INSPECTION;break;case 35:this.$=r.VerifyType.VERIFY_TEST;break;case 36:r.addElement(a[s-3]);break;case 37:r.setNewElementType(a[s-2]);break;case 38:r.setNewElementDocRef(a[s-2]);break;case 41:r.addRelationship(a[s-2],a[s],a[s-4]);break;case 42:r.addRelationship(a[s-2],a[s-4],a[s]);break;case 43:this.$=r.Relationships.CONTAINS;break;case 44:this.$=r.Relationships.COPIES;break;case 45:this.$=r.Relationships.DERIVES;break;case 46:this.$=r.Relationships.SATISFIES;break;case 47:this.$=r.Relationships.VERIFIES;break;case 48:this.$=r.Relationships.REFINES;break;case 49:this.$=r.Relationships.TRACES}},table:[{3:1,4:2,6:e,9:4,14:n},{1:[3]},{3:7,4:2,5:[1,6],6:e,9:4,14:n},{5:[1,8]},{10:9,15:[1,10]},{15:[2,6]},{3:11,4:2,6:e,9:4,14:n},{1:[2,2]},{4:16,5:r,7:12,8:i,9:4,14:n,18:13,19:14,20:15,21:18,27:20,35:a,36:o,37:s,38:c,39:u,40:l,48:h,66:f,67:d},{11:29,12:[1,30],17:p},t([12,17],[2,7]),{1:[2,1]},{8:[1,32]},{4:16,5:r,7:33,8:i,9:4,14:n,18:13,19:14,20:15,21:18,27:20,35:a,36:o,37:s,38:c,39:u,40:l,48:h,66:f,67:d},{4:16,5:r,7:34,8:i,9:4,14:n,18:13,19:14,20:15,21:18,27:20,35:a,36:o,37:s,38:c,39:u,40:l,48:h,66:f,67:d},{4:16,5:r,7:35,8:i,9:4,14:n,18:13,19:14,20:15,21:18,27:20,35:a,36:o,37:s,38:c,39:u,40:l,48:h,66:f,67:d},{4:16,5:r,7:36,8:i,9:4,14:n,18:13,19:14,20:15,21:18,27:20,35:a,36:o,37:s,38:c,39:u,40:l,48:h,66:f,67:d},{4:16,5:r,7:37,8:i,9:4,14:n,18:13,19:14,20:15,21:18,27:20,35:a,36:o,37:s,38:c,39:u,40:l,48:h,66:f,67:d},{22:38,66:[1,39],67:[1,40]},{49:41,66:[1,42],67:[1,43]},{55:[1,44],57:[1,45]},t(y,[2,23]),t(y,[2,24]),t(y,[2,25]),t(y,[2,26]),t(y,[2,27]),t(y,[2,28]),t(g,[2,52]),t(g,[2,53]),t(m,[2,4]),{13:46,16:[1,47]},t(m,[2,9]),{1:[2,3]},{8:[2,11]},{8:[2,12]},{8:[2,13]},{8:[2,14]},{8:[2,15]},{23:[1,48]},{23:[2,50]},{23:[2,51]},{23:[1,49]},{23:[2,56]},{23:[2,57]},{56:50,59:v,60:b,61:_,62:x,63:w,64:k,65:T},{56:58,59:v,60:b,61:_,62:x,63:w,64:k,65:T},{11:59,17:p},{17:[2,8]},{5:[1,60]},{5:[1,61]},{57:[1,62]},t(E,[2,43]),t(E,[2,44]),t(E,[2,45]),t(E,[2,46]),t(E,[2,47]),t(E,[2,48]),t(E,[2,49]),{58:[1,63]},t(m,[2,5]),{5:C,24:64,25:S,28:A,30:M,32:N,34:D},{5:O,34:B,50:71,51:L,53:I},{27:76,66:f,67:d},{27:77,66:f,67:d},t(R,[2,16]),{26:[1,78]},{26:[1,79]},{26:[1,80]},{26:[1,81]},{5:C,24:82,25:S,28:A,30:M,32:N,34:D},t(R,[2,22]),t(R,[2,36]),{26:[1,83]},{26:[1,84]},{5:O,34:B,50:85,51:L,53:I},t(R,[2,40]),t(R,[2,41]),t(R,[2,42]),{27:86,66:f,67:d},{29:87,66:[1,88],67:[1,89]},{31:90,41:[1,91],42:[1,92],43:[1,93]},{33:94,44:[1,95],45:[1,96],46:[1,97],47:[1,98]},t(R,[2,21]),{52:99,66:[1,100],67:[1,101]},{54:102,66:[1,103],67:[1,104]},t(R,[2,39]),{5:[1,105]},{5:[1,106]},{5:[2,54]},{5:[2,55]},{5:[1,107]},{5:[2,29]},{5:[2,30]},{5:[2,31]},{5:[1,108]},{5:[2,32]},{5:[2,33]},{5:[2,34]},{5:[2,35]},{5:[1,109]},{5:[2,58]},{5:[2,59]},{5:[1,110]},{5:[2,60]},{5:[2,61]},{5:C,24:111,25:S,28:A,30:M,32:N,34:D},{5:C,24:112,25:S,28:A,30:M,32:N,34:D},{5:C,24:113,25:S,28:A,30:M,32:N,34:D},{5:C,24:114,25:S,28:A,30:M,32:N,34:D},{5:O,34:B,50:115,51:L,53:I},{5:O,34:B,50:116,51:L,53:I},t(R,[2,17]),t(R,[2,18]),t(R,[2,19]),t(R,[2,20]),t(R,[2,37]),t(R,[2,38])],defaultActions:{5:[2,6],7:[2,2],11:[2,1],32:[2,3],33:[2,11],34:[2,12],35:[2,13],36:[2,14],37:[2,15],39:[2,50],40:[2,51],42:[2,56],43:[2,57],47:[2,8],88:[2,54],89:[2,55],91:[2,29],92:[2,30],93:[2,31],95:[2,32],96:[2,33],97:[2,34],98:[2,35],100:[2,58],101:[2,59],103:[2,60],104:[2,61]},parseError:function(t,e){if(!e.recoverable){var n=new Error(t);throw n.hash=e,n}this.trace(t)},parse:function(t){var e=this,n=[0],r=[],i=[null],a=[],o=this.table,s="",c=0,u=0,l=0,h=2,f=1,d=a.slice.call(arguments,1),p=Object.create(this.lexer),y={yy:{}};for(var g in this.yy)Object.prototype.hasOwnProperty.call(this.yy,g)&&(y.yy[g]=this.yy[g]);p.setInput(t,y.yy),y.yy.lexer=p,y.yy.parser=this,void 0===p.yylloc&&(p.yylloc={});var m=p.yylloc;a.push(m);var v=p.options&&p.options.ranges;function b(){var t;return"number"!=typeof(t=r.pop()||p.lex()||f)&&(t instanceof Array&&(t=(r=t).pop()),t=e.symbols_[t]||t),t}"function"==typeof y.yy.parseError?this.parseError=y.yy.parseError:this.parseError=Object.getPrototypeOf(this).parseError;for(var _,x,w,k,T,E,C,S,A,M={};;){if(w=n[n.length-1],this.defaultActions[w]?k=this.defaultActions[w]:(null==_&&(_=b()),k=o[w]&&o[w][_]),void 0===k||!k.length||!k[0]){var N="";for(E in A=[],o[w])this.terminals_[E]&&E>h&&A.push("'"+this.terminals_[E]+"'");N=p.showPosition?"Parse error on line "+(c+1)+":\n"+p.showPosition()+"\nExpecting "+A.join(", ")+", got '"+(this.terminals_[_]||_)+"'":"Parse error on line "+(c+1)+": Unexpected "+(_==f?"end of input":"'"+(this.terminals_[_]||_)+"'"),this.parseError(N,{text:p.match,token:this.terminals_[_]||_,line:p.yylineno,loc:m,expected:A})}if(k[0]instanceof Array&&k.length>1)throw new Error("Parse Error: multiple actions possible at state: "+w+", token: "+_);switch(k[0]){case 1:n.push(_),i.push(p.yytext),a.push(p.yylloc),n.push(k[1]),_=null,x?(_=x,x=null):(u=p.yyleng,s=p.yytext,c=p.yylineno,m=p.yylloc,l>0&&l--);break;case 2:if(C=this.productions_[k[1]][1],M.$=i[i.length-C],M._$={first_line:a[a.length-(C||1)].first_line,last_line:a[a.length-1].last_line,first_column:a[a.length-(C||1)].first_column,last_column:a[a.length-1].last_column},v&&(M._$.range=[a[a.length-(C||1)].range[0],a[a.length-1].range[1]]),void 0!==(T=this.performAction.apply(M,[s,u,c,y.yy,k[1],i,a].concat(d))))return T;C&&(n=n.slice(0,-1*C*2),i=i.slice(0,-1*C),a=a.slice(0,-1*C)),n.push(this.productions_[k[1]][0]),i.push(M.$),a.push(M._$),S=o[n[n.length-2]][n[n.length-1]],n.push(S);break;case 3:return!0}}return!0}},P={EOF:1,parseError:function(t,e){if(!this.yy.parser)throw new Error(t);this.yy.parser.parseError(t,e)},setInput:function(t,e){return this.yy=e||this.yy||{},this._input=t,this._more=this._backtrack=this.done=!1,this.yylineno=this.yyleng=0,this.yytext=this.matched=this.match="",this.conditionStack=["INITIAL"],this.yylloc={first_line:1,first_column:0,last_line:1,last_column:0},this.options.ranges&&(this.yylloc.range=[0,0]),this.offset=0,this},input:function(){var t=this._input[0];return this.yytext+=t,this.yyleng++,this.offset++,this.match+=t,this.matched+=t,t.match(/(?:\r\n?|\n).*/g)?(this.yylineno++,this.yylloc.last_line++):this.yylloc.last_column++,this.options.ranges&&this.yylloc.range[1]++,this._input=this._input.slice(1),t},unput:function(t){var e=t.length,n=t.split(/(?:\r\n?|\n)/g);this._input=t+this._input,this.yytext=this.yytext.substr(0,this.yytext.length-e),this.offset-=e;var r=this.match.split(/(?:\r\n?|\n)/g);this.match=this.match.substr(0,this.match.length-1),this.matched=this.matched.substr(0,this.matched.length-1),n.length-1&&(this.yylineno-=n.length-1);var i=this.yylloc.range;return this.yylloc={first_line:this.yylloc.first_line,last_line:this.yylineno+1,first_column:this.yylloc.first_column,last_column:n?(n.length===r.length?this.yylloc.first_column:0)+r[r.length-n.length].length-n[0].length:this.yylloc.first_column-e},this.options.ranges&&(this.yylloc.range=[i[0],i[0]+this.yyleng-e]),this.yyleng=this.yytext.length,this},more:function(){return this._more=!0,this},reject:function(){return this.options.backtrack_lexer?(this._backtrack=!0,this):this.parseError("Lexical error on line "+(this.yylineno+1)+". You can only invoke reject() in the lexer when the lexer is of the backtracking persuasion (options.backtrack_lexer = true).\n"+this.showPosition(),{text:"",token:null,line:this.yylineno})},less:function(t){this.unput(this.match.slice(t))},pastInput:function(){var t=this.matched.substr(0,this.matched.length-this.match.length);return(t.length>20?"...":"")+t.substr(-20).replace(/\n/g,"")},upcomingInput:function(){var t=this.match;return t.length<20&&(t+=this._input.substr(0,20-t.length)),(t.substr(0,20)+(t.length>20?"...":"")).replace(/\n/g,"")},showPosition:function(){var t=this.pastInput(),e=new Array(t.length+1).join("-");return t+this.upcomingInput()+"\n"+e+"^"},test_match:function(t,e){var n,r,i;if(this.options.backtrack_lexer&&(i={yylineno:this.yylineno,yylloc:{first_line:this.yylloc.first_line,last_line:this.last_line,first_column:this.yylloc.first_column,last_column:this.yylloc.last_column},yytext:this.yytext,match:this.match,matches:this.matches,matched:this.matched,yyleng:this.yyleng,offset:this.offset,_more:this._more,_input:this._input,yy:this.yy,conditionStack:this.conditionStack.slice(0),done:this.done},this.options.ranges&&(i.yylloc.range=this.yylloc.range.slice(0))),(r=t[0].match(/(?:\r\n?|\n).*/g))&&(this.yylineno+=r.length),this.yylloc={first_line:this.yylloc.last_line,last_line:this.yylineno+1,first_column:this.yylloc.last_column,last_column:r?r[r.length-1].length-r[r.length-1].match(/\r?\n?/)[0].length:this.yylloc.last_column+t[0].length},this.yytext+=t[0],this.match+=t[0],this.matches=t,this.yyleng=this.yytext.length,this.options.ranges&&(this.yylloc.range=[this.offset,this.offset+=this.yyleng]),this._more=!1,this._backtrack=!1,this._input=this._input.slice(t[0].length),this.matched+=t[0],n=this.performAction.call(this,this.yy,this,e,this.conditionStack[this.conditionStack.length-1]),this.done&&this._input&&(this.done=!1),n)return n;if(this._backtrack){for(var a in i)this[a]=i[a];return!1}return!1},next:function(){if(this.done)return this.EOF;var t,e,n,r;this._input||(this.done=!0),this._more||(this.yytext="",this.match="");for(var i=this._currentRules(),a=0;ae[0].length)){if(e=n,r=a,this.options.backtrack_lexer){if(!1!==(t=this.test_match(n,i[a])))return t;if(this._backtrack){e=!1;continue}return!1}if(!this.options.flex)break}return e?!1!==(t=this.test_match(e,i[r]))&&t:""===this._input?this.EOF:this.parseError("Lexical error on line "+(this.yylineno+1)+". Unrecognized text.\n"+this.showPosition(),{text:"",token:null,line:this.yylineno})},lex:function(){return this.next()||this.lex()},begin:function(t){this.conditionStack.push(t)},popState:function(){return this.conditionStack.length-1>0?this.conditionStack.pop():this.conditionStack[0]},_currentRules:function(){return this.conditionStack.length&&this.conditionStack[this.conditionStack.length-1]?this.conditions[this.conditionStack[this.conditionStack.length-1]].rules:this.conditions.INITIAL.rules},topState:function(t){return(t=this.conditionStack.length-1-Math.abs(t||0))>=0?this.conditionStack[t]:"INITIAL"},pushState:function(t){this.begin(t)},stateStackSize:function(){return this.conditionStack.length},options:{"case-insensitive":!0},performAction:function(t,e,n,r){switch(n){case 0:return this.begin("open_directive"),14;case 1:return this.begin("type_directive"),15;case 2:return this.popState(),this.begin("arg_directive"),12;case 3:return this.popState(),this.popState(),17;case 4:return 16;case 5:return 5;case 6:case 7:case 8:break;case 9:return 8;case 10:return 6;case 11:return 23;case 12:return 34;case 13:return 26;case 14:return 25;case 15:return 28;case 16:return 30;case 17:return 32;case 18:return 35;case 19:return 36;case 20:return 37;case 21:return 38;case 22:return 39;case 23:return 40;case 24:return 41;case 25:return 42;case 26:return 43;case 27:return 44;case 28:return 45;case 29:return 46;case 30:return 47;case 31:return 48;case 32:return 59;case 33:return 60;case 34:return 61;case 35:return 62;case 36:return 63;case 37:return 64;case 38:return 65;case 39:return 51;case 40:return 53;case 41:return 55;case 42:return 58;case 43:return 57;case 44:this.begin("string");break;case 45:this.popState();break;case 46:return"qString";case 47:return e.yytext=e.yytext.trim(),66}},rules:[/^(?:%%\{)/i,/^(?:((?:(?!\}%%)[^:.])*))/i,/^(?::)/i,/^(?:\}%%)/i,/^(?:((?:(?!\}%%).|\n)*))/i,/^(?:(\r?\n)+)/i,/^(?:\s+)/i,/^(?:#[^\n]*)/i,/^(?:%[^\n]*)/i,/^(?:$)/i,/^(?:requirementDiagram\b)/i,/^(?:\{)/i,/^(?:\})/i,/^(?::)/i,/^(?:id\b)/i,/^(?:text\b)/i,/^(?:risk\b)/i,/^(?:verifyMethod\b)/i,/^(?:requirement\b)/i,/^(?:functionalRequirement\b)/i,/^(?:interfaceRequirement\b)/i,/^(?:performanceRequirement\b)/i,/^(?:physicalRequirement\b)/i,/^(?:designConstraint\b)/i,/^(?:low\b)/i,/^(?:medium\b)/i,/^(?:high\b)/i,/^(?:analysis\b)/i,/^(?:demonstration\b)/i,/^(?:inspection\b)/i,/^(?:test\b)/i,/^(?:element\b)/i,/^(?:contains\b)/i,/^(?:copies\b)/i,/^(?:derives\b)/i,/^(?:satisfies\b)/i,/^(?:verifies\b)/i,/^(?:refines\b)/i,/^(?:traces\b)/i,/^(?:type\b)/i,/^(?:docref\b)/i,/^(?:<-)/i,/^(?:->)/i,/^(?:-)/i,/^(?:["])/i,/^(?:["])/i,/^(?:[^"]*)/i,/^(?:[\w][^\r\n\{\<\>\-\=]*)/i],conditions:{close_directive:{rules:[],inclusive:!1},arg_directive:{rules:[3,4],inclusive:!1},type_directive:{rules:[2,3],inclusive:!1},open_directive:{rules:[1],inclusive:!1},unqString:{rules:[],inclusive:!1},token:{rules:[],inclusive:!1},string:{rules:[45,46],inclusive:!1},INITIAL:{rules:[0,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,47],inclusive:!0}}};function j(){this.yy={}}return F.lexer=P,j.prototype=F,F.Parser=j,new j}();e.parser=r,e.Parser=r.Parser,e.parse=function(){return r.parse.apply(r,arguments)},e.main=function(t){t[1]||(console.log("Usage: "+t[0]+" FILE"),process.exit(1));var r=n(8800).readFileSync(n(6470).normalize(t[1]),"utf8");return e.parser.parse(r)},n.c[n.s]===t&&e.main(process.argv.slice(1))},6876:(t,e,n)=>{t=n.nmd(t);var r=function(){var t=function(t,e,n,r){for(n=n||{},r=t.length;r--;n[t[r]]=e);return n},e=[1,2],n=[1,3],r=[1,5],i=[1,7],a=[2,5],o=[1,15],s=[1,17],c=[1,18],u=[1,19],l=[1,21],h=[1,22],f=[1,23],d=[1,29],p=[1,30],y=[1,31],g=[1,32],m=[1,33],v=[1,34],b=[1,37],_=[1,38],x=[1,39],w=[1,40],k=[1,41],T=[1,42],E=[1,45],C=[1,4,5,16,20,22,23,24,30,32,33,34,35,36,38,40,41,42,46,47,48,49,57,67],S=[1,58],A=[4,5,16,20,22,23,24,30,32,33,34,35,36,38,42,46,47,48,49,57,67],M=[4,5,16,20,22,23,24,30,32,33,34,35,36,38,41,42,46,47,48,49,57,67],N=[4,5,16,20,22,23,24,30,32,33,34,35,36,38,40,42,46,47,48,49,57,67],D=[55,56,57],O=[1,4,5,7,16,20,22,23,24,30,32,33,34,35,36,38,40,41,42,46,47,48,49,57,67],B={trace:function(){},yy:{},symbols_:{error:2,start:3,SPACE:4,NEWLINE:5,directive:6,SD:7,document:8,line:9,statement:10,openDirective:11,typeDirective:12,closeDirective:13,":":14,argDirective:15,participant:16,actor:17,AS:18,restOfLine:19,participant_actor:20,signal:21,autonumber:22,activate:23,deactivate:24,note_statement:25,links_statement:26,link_statement:27,properties_statement:28,details_statement:29,title:30,text2:31,loop:32,end:33,rect:34,opt:35,alt:36,else_sections:37,par:38,par_sections:39,and:40,else:41,note:42,placement:43,over:44,actor_pair:45,links:46,link:47,properties:48,details:49,spaceList:50,",":51,left_of:52,right_of:53,signaltype:54,"+":55,"-":56,ACTOR:57,SOLID_OPEN_ARROW:58,DOTTED_OPEN_ARROW:59,SOLID_ARROW:60,DOTTED_ARROW:61,SOLID_CROSS:62,DOTTED_CROSS:63,SOLID_POINT:64,DOTTED_POINT:65,TXT:66,open_directive:67,type_directive:68,arg_directive:69,close_directive:70,$accept:0,$end:1},terminals_:{2:"error",4:"SPACE",5:"NEWLINE",7:"SD",14:":",16:"participant",18:"AS",19:"restOfLine",20:"participant_actor",22:"autonumber",23:"activate",24:"deactivate",30:"title",32:"loop",33:"end",34:"rect",35:"opt",36:"alt",38:"par",40:"and",41:"else",42:"note",44:"over",46:"links",47:"link",48:"properties",49:"details",51:",",52:"left_of",53:"right_of",55:"+",56:"-",57:"ACTOR",58:"SOLID_OPEN_ARROW",59:"DOTTED_OPEN_ARROW",60:"SOLID_ARROW",61:"DOTTED_ARROW",62:"SOLID_CROSS",63:"DOTTED_CROSS",64:"SOLID_POINT",65:"DOTTED_POINT",66:"TXT",67:"open_directive",68:"type_directive",69:"arg_directive",70:"close_directive"},productions_:[0,[3,2],[3,2],[3,2],[3,2],[8,0],[8,2],[9,2],[9,1],[9,1],[6,4],[6,6],[10,5],[10,3],[10,5],[10,3],[10,2],[10,1],[10,3],[10,3],[10,2],[10,2],[10,2],[10,2],[10,2],[10,3],[10,4],[10,4],[10,4],[10,4],[10,4],[10,1],[39,1],[39,4],[37,1],[37,4],[25,4],[25,4],[26,3],[27,3],[28,3],[29,3],[50,2],[50,1],[45,3],[45,1],[43,1],[43,1],[21,5],[21,5],[21,4],[17,1],[54,1],[54,1],[54,1],[54,1],[54,1],[54,1],[54,1],[54,1],[31,1],[11,1],[12,1],[15,1],[13,1]],performAction:function(t,e,n,r,i,a,o){var s=a.length-1;switch(i){case 4:return r.apply(a[s]),a[s];case 5:case 9:this.$=[];break;case 6:a[s-1].push(a[s]),this.$=a[s-1];break;case 7:case 8:case 45:this.$=a[s];break;case 12:a[s-3].type="addParticipant",a[s-3].description=r.parseMessage(a[s-1]),this.$=a[s-3];break;case 13:a[s-1].type="addParticipant",this.$=a[s-1];break;case 14:a[s-3].type="addActor",a[s-3].description=r.parseMessage(a[s-1]),this.$=a[s-3];break;case 15:a[s-1].type="addActor",this.$=a[s-1];break;case 17:r.enableSequenceNumbers();break;case 18:this.$={type:"activeStart",signalType:r.LINETYPE.ACTIVE_START,actor:a[s-1]};break;case 19:this.$={type:"activeEnd",signalType:r.LINETYPE.ACTIVE_END,actor:a[s-1]};break;case 25:this.$=[{type:"setTitle",text:a[s-1]}];break;case 26:a[s-1].unshift({type:"loopStart",loopText:r.parseMessage(a[s-2]),signalType:r.LINETYPE.LOOP_START}),a[s-1].push({type:"loopEnd",loopText:a[s-2],signalType:r.LINETYPE.LOOP_END}),this.$=a[s-1];break;case 27:a[s-1].unshift({type:"rectStart",color:r.parseMessage(a[s-2]),signalType:r.LINETYPE.RECT_START}),a[s-1].push({type:"rectEnd",color:r.parseMessage(a[s-2]),signalType:r.LINETYPE.RECT_END}),this.$=a[s-1];break;case 28:a[s-1].unshift({type:"optStart",optText:r.parseMessage(a[s-2]),signalType:r.LINETYPE.OPT_START}),a[s-1].push({type:"optEnd",optText:r.parseMessage(a[s-2]),signalType:r.LINETYPE.OPT_END}),this.$=a[s-1];break;case 29:a[s-1].unshift({type:"altStart",altText:r.parseMessage(a[s-2]),signalType:r.LINETYPE.ALT_START}),a[s-1].push({type:"altEnd",signalType:r.LINETYPE.ALT_END}),this.$=a[s-1];break;case 30:a[s-1].unshift({type:"parStart",parText:r.parseMessage(a[s-2]),signalType:r.LINETYPE.PAR_START}),a[s-1].push({type:"parEnd",signalType:r.LINETYPE.PAR_END}),this.$=a[s-1];break;case 33:this.$=a[s-3].concat([{type:"and",parText:r.parseMessage(a[s-1]),signalType:r.LINETYPE.PAR_AND},a[s]]);break;case 35:this.$=a[s-3].concat([{type:"else",altText:r.parseMessage(a[s-1]),signalType:r.LINETYPE.ALT_ELSE},a[s]]);break;case 36:this.$=[a[s-1],{type:"addNote",placement:a[s-2],actor:a[s-1].actor,text:a[s]}];break;case 37:a[s-2]=[].concat(a[s-1],a[s-1]).slice(0,2),a[s-2][0]=a[s-2][0].actor,a[s-2][1]=a[s-2][1].actor,this.$=[a[s-1],{type:"addNote",placement:r.PLACEMENT.OVER,actor:a[s-2].slice(0,2),text:a[s]}];break;case 38:this.$=[a[s-1],{type:"addLinks",actor:a[s-1].actor,text:a[s]}];break;case 39:this.$=[a[s-1],{type:"addALink",actor:a[s-1].actor,text:a[s]}];break;case 40:this.$=[a[s-1],{type:"addProperties",actor:a[s-1].actor,text:a[s]}];break;case 41:this.$=[a[s-1],{type:"addDetails",actor:a[s-1].actor,text:a[s]}];break;case 44:this.$=[a[s-2],a[s]];break;case 46:this.$=r.PLACEMENT.LEFTOF;break;case 47:this.$=r.PLACEMENT.RIGHTOF;break;case 48:this.$=[a[s-4],a[s-1],{type:"addMessage",from:a[s-4].actor,to:a[s-1].actor,signalType:a[s-3],msg:a[s]},{type:"activeStart",signalType:r.LINETYPE.ACTIVE_START,actor:a[s-1]}];break;case 49:this.$=[a[s-4],a[s-1],{type:"addMessage",from:a[s-4].actor,to:a[s-1].actor,signalType:a[s-3],msg:a[s]},{type:"activeEnd",signalType:r.LINETYPE.ACTIVE_END,actor:a[s-4]}];break;case 50:this.$=[a[s-3],a[s-1],{type:"addMessage",from:a[s-3].actor,to:a[s-1].actor,signalType:a[s-2],msg:a[s]}];break;case 51:this.$={type:"addParticipant",actor:a[s]};break;case 52:this.$=r.LINETYPE.SOLID_OPEN;break;case 53:this.$=r.LINETYPE.DOTTED_OPEN;break;case 54:this.$=r.LINETYPE.SOLID;break;case 55:this.$=r.LINETYPE.DOTTED;break;case 56:this.$=r.LINETYPE.SOLID_CROSS;break;case 57:this.$=r.LINETYPE.DOTTED_CROSS;break;case 58:this.$=r.LINETYPE.SOLID_POINT;break;case 59:this.$=r.LINETYPE.DOTTED_POINT;break;case 60:this.$=r.parseMessage(a[s].trim().substring(1));break;case 61:r.parseDirective("%%{","open_directive");break;case 62:r.parseDirective(a[s],"type_directive");break;case 63:a[s]=a[s].trim().replace(/'/g,'"'),r.parseDirective(a[s],"arg_directive");break;case 64:r.parseDirective("}%%","close_directive","sequence")}},table:[{3:1,4:e,5:n,6:4,7:r,11:6,67:i},{1:[3]},{3:8,4:e,5:n,6:4,7:r,11:6,67:i},{3:9,4:e,5:n,6:4,7:r,11:6,67:i},{3:10,4:e,5:n,6:4,7:r,11:6,67:i},t([1,4,5,16,20,22,23,24,30,32,34,35,36,38,42,46,47,48,49,57,67],a,{8:11}),{12:12,68:[1,13]},{68:[2,61]},{1:[2,1]},{1:[2,2]},{1:[2,3]},{1:[2,4],4:o,5:s,6:35,9:14,10:16,11:6,16:c,17:36,20:u,21:20,22:l,23:h,24:f,25:24,26:25,27:26,28:27,29:28,30:d,32:p,34:y,35:g,36:m,38:v,42:b,46:_,47:x,48:w,49:k,57:T,67:i},{13:43,14:[1,44],70:E},t([14,70],[2,62]),t(C,[2,6]),{6:35,10:46,11:6,16:c,17:36,20:u,21:20,22:l,23:h,24:f,25:24,26:25,27:26,28:27,29:28,30:d,32:p,34:y,35:g,36:m,38:v,42:b,46:_,47:x,48:w,49:k,57:T,67:i},t(C,[2,8]),t(C,[2,9]),{17:47,57:T},{17:48,57:T},{5:[1,49]},t(C,[2,17]),{17:50,57:T},{17:51,57:T},{5:[1,52]},{5:[1,53]},{5:[1,54]},{5:[1,55]},{5:[1,56]},{31:57,66:S},{19:[1,59]},{19:[1,60]},{19:[1,61]},{19:[1,62]},{19:[1,63]},t(C,[2,31]),{54:64,58:[1,65],59:[1,66],60:[1,67],61:[1,68],62:[1,69],63:[1,70],64:[1,71],65:[1,72]},{43:73,44:[1,74],52:[1,75],53:[1,76]},{17:77,57:T},{17:78,57:T},{17:79,57:T},{17:80,57:T},t([5,18,51,58,59,60,61,62,63,64,65,66],[2,51]),{5:[1,81]},{15:82,69:[1,83]},{5:[2,64]},t(C,[2,7]),{5:[1,85],18:[1,84]},{5:[1,87],18:[1,86]},t(C,[2,16]),{5:[1,88]},{5:[1,89]},t(C,[2,20]),t(C,[2,21]),t(C,[2,22]),t(C,[2,23]),t(C,[2,24]),{5:[1,90]},{5:[2,60]},t(A,a,{8:91}),t(A,a,{8:92}),t(A,a,{8:93}),t(M,a,{37:94,8:95}),t(N,a,{39:96,8:97}),{17:100,55:[1,98],56:[1,99],57:T},t(D,[2,52]),t(D,[2,53]),t(D,[2,54]),t(D,[2,55]),t(D,[2,56]),t(D,[2,57]),t(D,[2,58]),t(D,[2,59]),{17:101,57:T},{17:103,45:102,57:T},{57:[2,46]},{57:[2,47]},{31:104,66:S},{31:105,66:S},{31:106,66:S},{31:107,66:S},t(O,[2,10]),{13:108,70:E},{70:[2,63]},{19:[1,109]},t(C,[2,13]),{19:[1,110]},t(C,[2,15]),t(C,[2,18]),t(C,[2,19]),t(C,[2,25]),{4:o,5:s,6:35,9:14,10:16,11:6,16:c,17:36,20:u,21:20,22:l,23:h,24:f,25:24,26:25,27:26,28:27,29:28,30:d,32:p,33:[1,111],34:y,35:g,36:m,38:v,42:b,46:_,47:x,48:w,49:k,57:T,67:i},{4:o,5:s,6:35,9:14,10:16,11:6,16:c,17:36,20:u,21:20,22:l,23:h,24:f,25:24,26:25,27:26,28:27,29:28,30:d,32:p,33:[1,112],34:y,35:g,36:m,38:v,42:b,46:_,47:x,48:w,49:k,57:T,67:i},{4:o,5:s,6:35,9:14,10:16,11:6,16:c,17:36,20:u,21:20,22:l,23:h,24:f,25:24,26:25,27:26,28:27,29:28,30:d,32:p,33:[1,113],34:y,35:g,36:m,38:v,42:b,46:_,47:x,48:w,49:k,57:T,67:i},{33:[1,114]},{4:o,5:s,6:35,9:14,10:16,11:6,16:c,17:36,20:u,21:20,22:l,23:h,24:f,25:24,26:25,27:26,28:27,29:28,30:d,32:p,33:[2,34],34:y,35:g,36:m,38:v,41:[1,115],42:b,46:_,47:x,48:w,49:k,57:T,67:i},{33:[1,116]},{4:o,5:s,6:35,9:14,10:16,11:6,16:c,17:36,20:u,21:20,22:l,23:h,24:f,25:24,26:25,27:26,28:27,29:28,30:d,32:p,33:[2,32],34:y,35:g,36:m,38:v,40:[1,117],42:b,46:_,47:x,48:w,49:k,57:T,67:i},{17:118,57:T},{17:119,57:T},{31:120,66:S},{31:121,66:S},{31:122,66:S},{51:[1,123],66:[2,45]},{5:[2,38]},{5:[2,39]},{5:[2,40]},{5:[2,41]},{5:[1,124]},{5:[1,125]},{5:[1,126]},t(C,[2,26]),t(C,[2,27]),t(C,[2,28]),t(C,[2,29]),{19:[1,127]},t(C,[2,30]),{19:[1,128]},{31:129,66:S},{31:130,66:S},{5:[2,50]},{5:[2,36]},{5:[2,37]},{17:131,57:T},t(O,[2,11]),t(C,[2,12]),t(C,[2,14]),t(M,a,{8:95,37:132}),t(N,a,{8:97,39:133}),{5:[2,48]},{5:[2,49]},{66:[2,44]},{33:[2,35]},{33:[2,33]}],defaultActions:{7:[2,61],8:[2,1],9:[2,2],10:[2,3],45:[2,64],58:[2,60],75:[2,46],76:[2,47],83:[2,63],104:[2,38],105:[2,39],106:[2,40],107:[2,41],120:[2,50],121:[2,36],122:[2,37],129:[2,48],130:[2,49],131:[2,44],132:[2,35],133:[2,33]},parseError:function(t,e){if(!e.recoverable){var n=new Error(t);throw n.hash=e,n}this.trace(t)},parse:function(t){var e=this,n=[0],r=[],i=[null],a=[],o=this.table,s="",c=0,u=0,l=0,h=2,f=1,d=a.slice.call(arguments,1),p=Object.create(this.lexer),y={yy:{}};for(var g in this.yy)Object.prototype.hasOwnProperty.call(this.yy,g)&&(y.yy[g]=this.yy[g]);p.setInput(t,y.yy),y.yy.lexer=p,y.yy.parser=this,void 0===p.yylloc&&(p.yylloc={});var m=p.yylloc;a.push(m);var v=p.options&&p.options.ranges;function b(){var t;return"number"!=typeof(t=r.pop()||p.lex()||f)&&(t instanceof Array&&(t=(r=t).pop()),t=e.symbols_[t]||t),t}"function"==typeof y.yy.parseError?this.parseError=y.yy.parseError:this.parseError=Object.getPrototypeOf(this).parseError;for(var _,x,w,k,T,E,C,S,A,M={};;){if(w=n[n.length-1],this.defaultActions[w]?k=this.defaultActions[w]:(null==_&&(_=b()),k=o[w]&&o[w][_]),void 0===k||!k.length||!k[0]){var N="";for(E in A=[],o[w])this.terminals_[E]&&E>h&&A.push("'"+this.terminals_[E]+"'");N=p.showPosition?"Parse error on line "+(c+1)+":\n"+p.showPosition()+"\nExpecting "+A.join(", ")+", got '"+(this.terminals_[_]||_)+"'":"Parse error on line "+(c+1)+": Unexpected "+(_==f?"end of input":"'"+(this.terminals_[_]||_)+"'"),this.parseError(N,{text:p.match,token:this.terminals_[_]||_,line:p.yylineno,loc:m,expected:A})}if(k[0]instanceof Array&&k.length>1)throw new Error("Parse Error: multiple actions possible at state: "+w+", token: "+_);switch(k[0]){case 1:n.push(_),i.push(p.yytext),a.push(p.yylloc),n.push(k[1]),_=null,x?(_=x,x=null):(u=p.yyleng,s=p.yytext,c=p.yylineno,m=p.yylloc,l>0&&l--);break;case 2:if(C=this.productions_[k[1]][1],M.$=i[i.length-C],M._$={first_line:a[a.length-(C||1)].first_line,last_line:a[a.length-1].last_line,first_column:a[a.length-(C||1)].first_column,last_column:a[a.length-1].last_column},v&&(M._$.range=[a[a.length-(C||1)].range[0],a[a.length-1].range[1]]),void 0!==(T=this.performAction.apply(M,[s,u,c,y.yy,k[1],i,a].concat(d))))return T;C&&(n=n.slice(0,-1*C*2),i=i.slice(0,-1*C),a=a.slice(0,-1*C)),n.push(this.productions_[k[1]][0]),i.push(M.$),a.push(M._$),S=o[n[n.length-2]][n[n.length-1]],n.push(S);break;case 3:return!0}}return!0}},L={EOF:1,parseError:function(t,e){if(!this.yy.parser)throw new Error(t);this.yy.parser.parseError(t,e)},setInput:function(t,e){return this.yy=e||this.yy||{},this._input=t,this._more=this._backtrack=this.done=!1,this.yylineno=this.yyleng=0,this.yytext=this.matched=this.match="",this.conditionStack=["INITIAL"],this.yylloc={first_line:1,first_column:0,last_line:1,last_column:0},this.options.ranges&&(this.yylloc.range=[0,0]),this.offset=0,this},input:function(){var t=this._input[0];return this.yytext+=t,this.yyleng++,this.offset++,this.match+=t,this.matched+=t,t.match(/(?:\r\n?|\n).*/g)?(this.yylineno++,this.yylloc.last_line++):this.yylloc.last_column++,this.options.ranges&&this.yylloc.range[1]++,this._input=this._input.slice(1),t},unput:function(t){var e=t.length,n=t.split(/(?:\r\n?|\n)/g);this._input=t+this._input,this.yytext=this.yytext.substr(0,this.yytext.length-e),this.offset-=e;var r=this.match.split(/(?:\r\n?|\n)/g);this.match=this.match.substr(0,this.match.length-1),this.matched=this.matched.substr(0,this.matched.length-1),n.length-1&&(this.yylineno-=n.length-1);var i=this.yylloc.range;return this.yylloc={first_line:this.yylloc.first_line,last_line:this.yylineno+1,first_column:this.yylloc.first_column,last_column:n?(n.length===r.length?this.yylloc.first_column:0)+r[r.length-n.length].length-n[0].length:this.yylloc.first_column-e},this.options.ranges&&(this.yylloc.range=[i[0],i[0]+this.yyleng-e]),this.yyleng=this.yytext.length,this},more:function(){return this._more=!0,this},reject:function(){return this.options.backtrack_lexer?(this._backtrack=!0,this):this.parseError("Lexical error on line "+(this.yylineno+1)+". You can only invoke reject() in the lexer when the lexer is of the backtracking persuasion (options.backtrack_lexer = true).\n"+this.showPosition(),{text:"",token:null,line:this.yylineno})},less:function(t){this.unput(this.match.slice(t))},pastInput:function(){var t=this.matched.substr(0,this.matched.length-this.match.length);return(t.length>20?"...":"")+t.substr(-20).replace(/\n/g,"")},upcomingInput:function(){var t=this.match;return t.length<20&&(t+=this._input.substr(0,20-t.length)),(t.substr(0,20)+(t.length>20?"...":"")).replace(/\n/g,"")},showPosition:function(){var t=this.pastInput(),e=new Array(t.length+1).join("-");return t+this.upcomingInput()+"\n"+e+"^"},test_match:function(t,e){var n,r,i;if(this.options.backtrack_lexer&&(i={yylineno:this.yylineno,yylloc:{first_line:this.yylloc.first_line,last_line:this.last_line,first_column:this.yylloc.first_column,last_column:this.yylloc.last_column},yytext:this.yytext,match:this.match,matches:this.matches,matched:this.matched,yyleng:this.yyleng,offset:this.offset,_more:this._more,_input:this._input,yy:this.yy,conditionStack:this.conditionStack.slice(0),done:this.done},this.options.ranges&&(i.yylloc.range=this.yylloc.range.slice(0))),(r=t[0].match(/(?:\r\n?|\n).*/g))&&(this.yylineno+=r.length),this.yylloc={first_line:this.yylloc.last_line,last_line:this.yylineno+1,first_column:this.yylloc.last_column,last_column:r?r[r.length-1].length-r[r.length-1].match(/\r?\n?/)[0].length:this.yylloc.last_column+t[0].length},this.yytext+=t[0],this.match+=t[0],this.matches=t,this.yyleng=this.yytext.length,this.options.ranges&&(this.yylloc.range=[this.offset,this.offset+=this.yyleng]),this._more=!1,this._backtrack=!1,this._input=this._input.slice(t[0].length),this.matched+=t[0],n=this.performAction.call(this,this.yy,this,e,this.conditionStack[this.conditionStack.length-1]),this.done&&this._input&&(this.done=!1),n)return n;if(this._backtrack){for(var a in i)this[a]=i[a];return!1}return!1},next:function(){if(this.done)return this.EOF;var t,e,n,r;this._input||(this.done=!0),this._more||(this.yytext="",this.match="");for(var i=this._currentRules(),a=0;ae[0].length)){if(e=n,r=a,this.options.backtrack_lexer){if(!1!==(t=this.test_match(n,i[a])))return t;if(this._backtrack){e=!1;continue}return!1}if(!this.options.flex)break}return e?!1!==(t=this.test_match(e,i[r]))&&t:""===this._input?this.EOF:this.parseError("Lexical error on line "+(this.yylineno+1)+". Unrecognized text.\n"+this.showPosition(),{text:"",token:null,line:this.yylineno})},lex:function(){return this.next()||this.lex()},begin:function(t){this.conditionStack.push(t)},popState:function(){return this.conditionStack.length-1>0?this.conditionStack.pop():this.conditionStack[0]},_currentRules:function(){return this.conditionStack.length&&this.conditionStack[this.conditionStack.length-1]?this.conditions[this.conditionStack[this.conditionStack.length-1]].rules:this.conditions.INITIAL.rules},topState:function(t){return(t=this.conditionStack.length-1-Math.abs(t||0))>=0?this.conditionStack[t]:"INITIAL"},pushState:function(t){this.begin(t)},stateStackSize:function(){return this.conditionStack.length},options:{"case-insensitive":!0},performAction:function(t,e,n,r){switch(n){case 0:return this.begin("open_directive"),67;case 1:return this.begin("type_directive"),68;case 2:return this.popState(),this.begin("arg_directive"),14;case 3:return this.popState(),this.popState(),70;case 4:return 69;case 5:case 39:case 52:return 5;case 6:case 7:case 8:case 9:case 10:break;case 11:return this.begin("ID"),16;case 12:return this.begin("ID"),20;case 13:return e.yytext=e.yytext.trim(),this.begin("ALIAS"),57;case 14:return this.popState(),this.popState(),this.begin("LINE"),18;case 15:return this.popState(),this.popState(),5;case 16:return this.begin("LINE"),32;case 17:return this.begin("LINE"),34;case 18:return this.begin("LINE"),35;case 19:return this.begin("LINE"),36;case 20:return this.begin("LINE"),41;case 21:return this.begin("LINE"),38;case 22:return this.begin("LINE"),40;case 23:return this.popState(),19;case 24:return 33;case 25:return 52;case 26:return 53;case 27:return 46;case 28:return 47;case 29:return 48;case 30:return 49;case 31:return 44;case 32:return 42;case 33:return this.begin("ID"),23;case 34:return this.begin("ID"),24;case 35:return 30;case 36:return 7;case 37:return 22;case 38:return 51;case 40:return e.yytext=e.yytext.trim(),57;case 41:return 60;case 42:return 61;case 43:return 58;case 44:return 59;case 45:return 62;case 46:return 63;case 47:return 64;case 48:return 65;case 49:return 66;case 50:return 55;case 51:return 56;case 53:return"INVALID"}},rules:[/^(?:%%\{)/i,/^(?:((?:(?!\}%%)[^:.])*))/i,/^(?::)/i,/^(?:\}%%)/i,/^(?:((?:(?!\}%%).|\n)*))/i,/^(?:[\n]+)/i,/^(?:\s+)/i,/^(?:((?!\n)\s)+)/i,/^(?:#[^\n]*)/i,/^(?:%(?!\{)[^\n]*)/i,/^(?:[^\}]%%[^\n]*)/i,/^(?:participant\b)/i,/^(?:actor\b)/i,/^(?:[^\->:\n,;]+?(?=((?!\n)\s)+as(?!\n)\s|[#\n;]|$))/i,/^(?:as\b)/i,/^(?:(?:))/i,/^(?:loop\b)/i,/^(?:rect\b)/i,/^(?:opt\b)/i,/^(?:alt\b)/i,/^(?:else\b)/i,/^(?:par\b)/i,/^(?:and\b)/i,/^(?:(?:[:]?(?:no)?wrap)?[^#\n;]*)/i,/^(?:end\b)/i,/^(?:left of\b)/i,/^(?:right of\b)/i,/^(?:links\b)/i,/^(?:link\b)/i,/^(?:properties\b)/i,/^(?:details\b)/i,/^(?:over\b)/i,/^(?:note\b)/i,/^(?:activate\b)/i,/^(?:deactivate\b)/i,/^(?:title\b)/i,/^(?:sequenceDiagram\b)/i,/^(?:autonumber\b)/i,/^(?:,)/i,/^(?:;)/i,/^(?:[^\+\->:\n,;]+((?!(-x|--x|-\)|--\)))[\-]*[^\+\->:\n,;]+)*)/i,/^(?:->>)/i,/^(?:-->>)/i,/^(?:->)/i,/^(?:-->)/i,/^(?:-[x])/i,/^(?:--[x])/i,/^(?:-[\)])/i,/^(?:--[\)])/i,/^(?::(?:(?:no)?wrap)?[^#\n;]+)/i,/^(?:\+)/i,/^(?:-)/i,/^(?:$)/i,/^(?:.)/i],conditions:{open_directive:{rules:[1,8],inclusive:!1},type_directive:{rules:[2,3,8],inclusive:!1},arg_directive:{rules:[3,4,8],inclusive:!1},ID:{rules:[7,8,13],inclusive:!1},ALIAS:{rules:[7,8,14,15],inclusive:!1},LINE:{rules:[7,8,23],inclusive:!1},INITIAL:{rules:[0,5,6,8,9,10,11,12,16,17,18,19,20,21,22,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53],inclusive:!0}}};function I(){this.yy={}}return B.lexer=L,I.prototype=B,B.Parser=I,new I}();e.parser=r,e.Parser=r.Parser,e.parse=function(){return r.parse.apply(r,arguments)},e.main=function(t){t[1]||(console.log("Usage: "+t[0]+" FILE"),process.exit(1));var r=n(1993).readFileSync(n(6470).normalize(t[1]),"utf8");return e.parser.parse(r)},n.c[n.s]===t&&e.main(process.argv.slice(1))},3584:(t,e,n)=>{t=n.nmd(t);var r=function(){var t=function(t,e,n,r){for(n=n||{},r=t.length;r--;n[t[r]]=e);return n},e=[1,2],n=[1,3],r=[1,5],i=[1,7],a=[2,5],o=[1,15],s=[1,17],c=[1,19],u=[1,20],l=[1,21],h=[1,22],f=[1,30],d=[1,23],p=[1,24],y=[1,25],g=[1,26],m=[1,27],v=[1,32],b=[1,33],_=[1,34],x=[1,35],w=[1,31],k=[1,38],T=[1,4,5,14,15,17,19,20,22,23,24,25,26,27,36,37,38,39,42,45],E=[1,4,5,12,13,14,15,17,19,20,22,23,24,25,26,27,36,37,38,39,42,45],C=[1,4,5,7,14,15,17,19,20,22,23,24,25,26,27,36,37,38,39,42,45],S=[4,5,14,15,17,19,20,22,23,24,25,26,27,36,37,38,39,42,45],A={trace:function(){},yy:{},symbols_:{error:2,start:3,SPACE:4,NL:5,directive:6,SD:7,document:8,line:9,statement:10,idStatement:11,DESCR:12,"--\x3e":13,HIDE_EMPTY:14,scale:15,WIDTH:16,COMPOSIT_STATE:17,STRUCT_START:18,STRUCT_STOP:19,STATE_DESCR:20,AS:21,ID:22,FORK:23,JOIN:24,CHOICE:25,CONCURRENT:26,note:27,notePosition:28,NOTE_TEXT:29,direction:30,openDirective:31,typeDirective:32,closeDirective:33,":":34,argDirective:35,direction_tb:36,direction_bt:37,direction_rl:38,direction_lr:39,eol:40,";":41,EDGE_STATE:42,left_of:43,right_of:44,open_directive:45,type_directive:46,arg_directive:47,close_directive:48,$accept:0,$end:1},terminals_:{2:"error",4:"SPACE",5:"NL",7:"SD",12:"DESCR",13:"--\x3e",14:"HIDE_EMPTY",15:"scale",16:"WIDTH",17:"COMPOSIT_STATE",18:"STRUCT_START",19:"STRUCT_STOP",20:"STATE_DESCR",21:"AS",22:"ID",23:"FORK",24:"JOIN",25:"CHOICE",26:"CONCURRENT",27:"note",29:"NOTE_TEXT",34:":",36:"direction_tb",37:"direction_bt",38:"direction_rl",39:"direction_lr",41:";",42:"EDGE_STATE",43:"left_of",44:"right_of",45:"open_directive",46:"type_directive",47:"arg_directive",48:"close_directive"},productions_:[0,[3,2],[3,2],[3,2],[3,2],[8,0],[8,2],[9,2],[9,1],[9,1],[10,1],[10,2],[10,3],[10,4],[10,1],[10,2],[10,1],[10,4],[10,3],[10,6],[10,1],[10,1],[10,1],[10,1],[10,4],[10,4],[10,1],[10,1],[6,3],[6,5],[30,1],[30,1],[30,1],[30,1],[40,1],[40,1],[11,1],[11,1],[28,1],[28,1],[31,1],[32,1],[35,1],[33,1]],performAction:function(t,e,n,r,i,a,o){var s=a.length-1;switch(i){case 4:return r.setRootDoc(a[s]),a[s];case 5:this.$=[];break;case 6:"nl"!=a[s]&&(a[s-1].push(a[s]),this.$=a[s-1]);break;case 7:case 8:case 36:case 37:this.$=a[s];break;case 9:this.$="nl";break;case 10:this.$={stmt:"state",id:a[s],type:"default",description:""};break;case 11:this.$={stmt:"state",id:a[s-1],type:"default",description:r.trimColon(a[s])};break;case 12:this.$={stmt:"relation",state1:{stmt:"state",id:a[s-2],type:"default",description:""},state2:{stmt:"state",id:a[s],type:"default",description:""}};break;case 13:this.$={stmt:"relation",state1:{stmt:"state",id:a[s-3],type:"default",description:""},state2:{stmt:"state",id:a[s-1],type:"default",description:""},description:a[s].substr(1).trim()};break;case 17:this.$={stmt:"state",id:a[s-3],type:"default",description:"",doc:a[s-1]};break;case 18:var c=a[s],u=a[s-2].trim();if(a[s].match(":")){var l=a[s].split(":");c=l[0],u=[u,l[1]]}this.$={stmt:"state",id:c,type:"default",description:u};break;case 19:this.$={stmt:"state",id:a[s-3],type:"default",description:a[s-5],doc:a[s-1]};break;case 20:this.$={stmt:"state",id:a[s],type:"fork"};break;case 21:this.$={stmt:"state",id:a[s],type:"join"};break;case 22:this.$={stmt:"state",id:a[s],type:"choice"};break;case 23:this.$={stmt:"state",id:r.getDividerId(),type:"divider"};break;case 24:this.$={stmt:"state",id:a[s-1].trim(),note:{position:a[s-2].trim(),text:a[s].trim()}};break;case 30:r.setDirection("TB"),this.$={stmt:"dir",value:"TB"};break;case 31:r.setDirection("BT"),this.$={stmt:"dir",value:"BT"};break;case 32:r.setDirection("RL"),this.$={stmt:"dir",value:"RL"};break;case 33:r.setDirection("LR"),this.$={stmt:"dir",value:"LR"};break;case 40:r.parseDirective("%%{","open_directive");break;case 41:r.parseDirective(a[s],"type_directive");break;case 42:a[s]=a[s].trim().replace(/'/g,'"'),r.parseDirective(a[s],"arg_directive");break;case 43:r.parseDirective("}%%","close_directive","state")}},table:[{3:1,4:e,5:n,6:4,7:r,31:6,45:i},{1:[3]},{3:8,4:e,5:n,6:4,7:r,31:6,45:i},{3:9,4:e,5:n,6:4,7:r,31:6,45:i},{3:10,4:e,5:n,6:4,7:r,31:6,45:i},t([1,4,5,14,15,17,20,22,23,24,25,26,27,36,37,38,39,42,45],a,{8:11}),{32:12,46:[1,13]},{46:[2,40]},{1:[2,1]},{1:[2,2]},{1:[2,3]},{1:[2,4],4:o,5:s,6:28,9:14,10:16,11:18,14:c,15:u,17:l,20:h,22:f,23:d,24:p,25:y,26:g,27:m,30:29,31:6,36:v,37:b,38:_,39:x,42:w,45:i},{33:36,34:[1,37],48:k},t([34,48],[2,41]),t(T,[2,6]),{6:28,10:39,11:18,14:c,15:u,17:l,20:h,22:f,23:d,24:p,25:y,26:g,27:m,30:29,31:6,36:v,37:b,38:_,39:x,42:w,45:i},t(T,[2,8]),t(T,[2,9]),t(T,[2,10],{12:[1,40],13:[1,41]}),t(T,[2,14]),{16:[1,42]},t(T,[2,16],{18:[1,43]}),{21:[1,44]},t(T,[2,20]),t(T,[2,21]),t(T,[2,22]),t(T,[2,23]),{28:45,29:[1,46],43:[1,47],44:[1,48]},t(T,[2,26]),t(T,[2,27]),t(E,[2,36]),t(E,[2,37]),t(T,[2,30]),t(T,[2,31]),t(T,[2,32]),t(T,[2,33]),t(C,[2,28]),{35:49,47:[1,50]},t(C,[2,43]),t(T,[2,7]),t(T,[2,11]),{11:51,22:f,42:w},t(T,[2,15]),t(S,a,{8:52}),{22:[1,53]},{22:[1,54]},{21:[1,55]},{22:[2,38]},{22:[2,39]},{33:56,48:k},{48:[2,42]},t(T,[2,12],{12:[1,57]}),{4:o,5:s,6:28,9:14,10:16,11:18,14:c,15:u,17:l,19:[1,58],20:h,22:f,23:d,24:p,25:y,26:g,27:m,30:29,31:6,36:v,37:b,38:_,39:x,42:w,45:i},t(T,[2,18],{18:[1,59]}),{29:[1,60]},{22:[1,61]},t(C,[2,29]),t(T,[2,13]),t(T,[2,17]),t(S,a,{8:62}),t(T,[2,24]),t(T,[2,25]),{4:o,5:s,6:28,9:14,10:16,11:18,14:c,15:u,17:l,19:[1,63],20:h,22:f,23:d,24:p,25:y,26:g,27:m,30:29,31:6,36:v,37:b,38:_,39:x,42:w,45:i},t(T,[2,19])],defaultActions:{7:[2,40],8:[2,1],9:[2,2],10:[2,3],47:[2,38],48:[2,39],50:[2,42]},parseError:function(t,e){if(!e.recoverable){var n=new Error(t);throw n.hash=e,n}this.trace(t)},parse:function(t){var e=this,n=[0],r=[],i=[null],a=[],o=this.table,s="",c=0,u=0,l=0,h=2,f=1,d=a.slice.call(arguments,1),p=Object.create(this.lexer),y={yy:{}};for(var g in this.yy)Object.prototype.hasOwnProperty.call(this.yy,g)&&(y.yy[g]=this.yy[g]);p.setInput(t,y.yy),y.yy.lexer=p,y.yy.parser=this,void 0===p.yylloc&&(p.yylloc={});var m=p.yylloc;a.push(m);var v=p.options&&p.options.ranges;function b(){var t;return"number"!=typeof(t=r.pop()||p.lex()||f)&&(t instanceof Array&&(t=(r=t).pop()),t=e.symbols_[t]||t),t}"function"==typeof y.yy.parseError?this.parseError=y.yy.parseError:this.parseError=Object.getPrototypeOf(this).parseError;for(var _,x,w,k,T,E,C,S,A,M={};;){if(w=n[n.length-1],this.defaultActions[w]?k=this.defaultActions[w]:(null==_&&(_=b()),k=o[w]&&o[w][_]),void 0===k||!k.length||!k[0]){var N="";for(E in A=[],o[w])this.terminals_[E]&&E>h&&A.push("'"+this.terminals_[E]+"'");N=p.showPosition?"Parse error on line "+(c+1)+":\n"+p.showPosition()+"\nExpecting "+A.join(", ")+", got '"+(this.terminals_[_]||_)+"'":"Parse error on line "+(c+1)+": Unexpected "+(_==f?"end of input":"'"+(this.terminals_[_]||_)+"'"),this.parseError(N,{text:p.match,token:this.terminals_[_]||_,line:p.yylineno,loc:m,expected:A})}if(k[0]instanceof Array&&k.length>1)throw new Error("Parse Error: multiple actions possible at state: "+w+", token: "+_);switch(k[0]){case 1:n.push(_),i.push(p.yytext),a.push(p.yylloc),n.push(k[1]),_=null,x?(_=x,x=null):(u=p.yyleng,s=p.yytext,c=p.yylineno,m=p.yylloc,l>0&&l--);break;case 2:if(C=this.productions_[k[1]][1],M.$=i[i.length-C],M._$={first_line:a[a.length-(C||1)].first_line,last_line:a[a.length-1].last_line,first_column:a[a.length-(C||1)].first_column,last_column:a[a.length-1].last_column},v&&(M._$.range=[a[a.length-(C||1)].range[0],a[a.length-1].range[1]]),void 0!==(T=this.performAction.apply(M,[s,u,c,y.yy,k[1],i,a].concat(d))))return T;C&&(n=n.slice(0,-1*C*2),i=i.slice(0,-1*C),a=a.slice(0,-1*C)),n.push(this.productions_[k[1]][0]),i.push(M.$),a.push(M._$),S=o[n[n.length-2]][n[n.length-1]],n.push(S);break;case 3:return!0}}return!0}},M={EOF:1,parseError:function(t,e){if(!this.yy.parser)throw new Error(t);this.yy.parser.parseError(t,e)},setInput:function(t,e){return this.yy=e||this.yy||{},this._input=t,this._more=this._backtrack=this.done=!1,this.yylineno=this.yyleng=0,this.yytext=this.matched=this.match="",this.conditionStack=["INITIAL"],this.yylloc={first_line:1,first_column:0,last_line:1,last_column:0},this.options.ranges&&(this.yylloc.range=[0,0]),this.offset=0,this},input:function(){var t=this._input[0];return this.yytext+=t,this.yyleng++,this.offset++,this.match+=t,this.matched+=t,t.match(/(?:\r\n?|\n).*/g)?(this.yylineno++,this.yylloc.last_line++):this.yylloc.last_column++,this.options.ranges&&this.yylloc.range[1]++,this._input=this._input.slice(1),t},unput:function(t){var e=t.length,n=t.split(/(?:\r\n?|\n)/g);this._input=t+this._input,this.yytext=this.yytext.substr(0,this.yytext.length-e),this.offset-=e;var r=this.match.split(/(?:\r\n?|\n)/g);this.match=this.match.substr(0,this.match.length-1),this.matched=this.matched.substr(0,this.matched.length-1),n.length-1&&(this.yylineno-=n.length-1);var i=this.yylloc.range;return this.yylloc={first_line:this.yylloc.first_line,last_line:this.yylineno+1,first_column:this.yylloc.first_column,last_column:n?(n.length===r.length?this.yylloc.first_column:0)+r[r.length-n.length].length-n[0].length:this.yylloc.first_column-e},this.options.ranges&&(this.yylloc.range=[i[0],i[0]+this.yyleng-e]),this.yyleng=this.yytext.length,this},more:function(){return this._more=!0,this},reject:function(){return this.options.backtrack_lexer?(this._backtrack=!0,this):this.parseError("Lexical error on line "+(this.yylineno+1)+". You can only invoke reject() in the lexer when the lexer is of the backtracking persuasion (options.backtrack_lexer = true).\n"+this.showPosition(),{text:"",token:null,line:this.yylineno})},less:function(t){this.unput(this.match.slice(t))},pastInput:function(){var t=this.matched.substr(0,this.matched.length-this.match.length);return(t.length>20?"...":"")+t.substr(-20).replace(/\n/g,"")},upcomingInput:function(){var t=this.match;return t.length<20&&(t+=this._input.substr(0,20-t.length)),(t.substr(0,20)+(t.length>20?"...":"")).replace(/\n/g,"")},showPosition:function(){var t=this.pastInput(),e=new Array(t.length+1).join("-");return t+this.upcomingInput()+"\n"+e+"^"},test_match:function(t,e){var n,r,i;if(this.options.backtrack_lexer&&(i={yylineno:this.yylineno,yylloc:{first_line:this.yylloc.first_line,last_line:this.last_line,first_column:this.yylloc.first_column,last_column:this.yylloc.last_column},yytext:this.yytext,match:this.match,matches:this.matches,matched:this.matched,yyleng:this.yyleng,offset:this.offset,_more:this._more,_input:this._input,yy:this.yy,conditionStack:this.conditionStack.slice(0),done:this.done},this.options.ranges&&(i.yylloc.range=this.yylloc.range.slice(0))),(r=t[0].match(/(?:\r\n?|\n).*/g))&&(this.yylineno+=r.length),this.yylloc={first_line:this.yylloc.last_line,last_line:this.yylineno+1,first_column:this.yylloc.last_column,last_column:r?r[r.length-1].length-r[r.length-1].match(/\r?\n?/)[0].length:this.yylloc.last_column+t[0].length},this.yytext+=t[0],this.match+=t[0],this.matches=t,this.yyleng=this.yytext.length,this.options.ranges&&(this.yylloc.range=[this.offset,this.offset+=this.yyleng]),this._more=!1,this._backtrack=!1,this._input=this._input.slice(t[0].length),this.matched+=t[0],n=this.performAction.call(this,this.yy,this,e,this.conditionStack[this.conditionStack.length-1]),this.done&&this._input&&(this.done=!1),n)return n;if(this._backtrack){for(var a in i)this[a]=i[a];return!1}return!1},next:function(){if(this.done)return this.EOF;var t,e,n,r;this._input||(this.done=!0),this._more||(this.yytext="",this.match="");for(var i=this._currentRules(),a=0;ae[0].length)){if(e=n,r=a,this.options.backtrack_lexer){if(!1!==(t=this.test_match(n,i[a])))return t;if(this._backtrack){e=!1;continue}return!1}if(!this.options.flex)break}return e?!1!==(t=this.test_match(e,i[r]))&&t:""===this._input?this.EOF:this.parseError("Lexical error on line "+(this.yylineno+1)+". Unrecognized text.\n"+this.showPosition(),{text:"",token:null,line:this.yylineno})},lex:function(){return this.next()||this.lex()},begin:function(t){this.conditionStack.push(t)},popState:function(){return this.conditionStack.length-1>0?this.conditionStack.pop():this.conditionStack[0]},_currentRules:function(){return this.conditionStack.length&&this.conditionStack[this.conditionStack.length-1]?this.conditions[this.conditionStack[this.conditionStack.length-1]].rules:this.conditions.INITIAL.rules},topState:function(t){return(t=this.conditionStack.length-1-Math.abs(t||0))>=0?this.conditionStack[t]:"INITIAL"},pushState:function(t){this.begin(t)},stateStackSize:function(){return this.conditionStack.length},options:{"case-insensitive":!0},performAction:function(t,e,n,r){switch(n){case 0:case 26:return 36;case 1:case 27:return 37;case 2:case 28:return 38;case 3:case 29:return 39;case 4:return this.begin("open_directive"),45;case 5:return this.begin("type_directive"),46;case 6:return this.popState(),this.begin("arg_directive"),34;case 7:return this.popState(),this.popState(),48;case 8:return 47;case 9:case 10:case 12:case 13:case 14:case 15:case 39:case 45:break;case 11:case 59:return 5;case 16:return this.pushState("SCALE"),15;case 17:return 16;case 18:case 33:case 36:this.popState();break;case 19:this.pushState("STATE");break;case 20:case 23:return this.popState(),e.yytext=e.yytext.slice(0,-8).trim(),23;case 21:case 24:return this.popState(),e.yytext=e.yytext.slice(0,-8).trim(),24;case 22:case 25:return this.popState(),e.yytext=e.yytext.slice(0,-10).trim(),25;case 30:this.begin("STATE_STRING");break;case 31:return this.popState(),this.pushState("STATE_ID"),"AS";case 32:case 47:return this.popState(),"ID";case 34:return"STATE_DESCR";case 35:return 17;case 37:return this.popState(),this.pushState("struct"),18;case 38:return this.popState(),19;case 40:return this.begin("NOTE"),27;case 41:return this.popState(),this.pushState("NOTE_ID"),43;case 42:return this.popState(),this.pushState("NOTE_ID"),44;case 43:this.popState(),this.pushState("FLOATING_NOTE");break;case 44:return this.popState(),this.pushState("FLOATING_NOTE_ID"),"AS";case 46:return"NOTE_TEXT";case 48:return this.popState(),this.pushState("NOTE_TEXT"),22;case 49:return this.popState(),e.yytext=e.yytext.substr(2).trim(),29;case 50:return this.popState(),e.yytext=e.yytext.slice(0,-8).trim(),29;case 51:case 52:return 7;case 53:return 14;case 54:return 42;case 55:return 22;case 56:return e.yytext=e.yytext.trim(),12;case 57:return 13;case 58:return 26;case 60:return"INVALID"}},rules:[/^(?:.*direction\s+TB[^\n]*)/i,/^(?:.*direction\s+BT[^\n]*)/i,/^(?:.*direction\s+RL[^\n]*)/i,/^(?:.*direction\s+LR[^\n]*)/i,/^(?:%%\{)/i,/^(?:((?:(?!\}%%)[^:.])*))/i,/^(?::)/i,/^(?:\}%%)/i,/^(?:((?:(?!\}%%).|\n)*))/i,/^(?:%%(?!\{)[^\n]*)/i,/^(?:[^\}]%%[^\n]*)/i,/^(?:[\n]+)/i,/^(?:[\s]+)/i,/^(?:((?!\n)\s)+)/i,/^(?:#[^\n]*)/i,/^(?:%[^\n]*)/i,/^(?:scale\s+)/i,/^(?:\d+)/i,/^(?:\s+width\b)/i,/^(?:state\s+)/i,/^(?:.*<>)/i,/^(?:.*<>)/i,/^(?:.*<>)/i,/^(?:.*\[\[fork\]\])/i,/^(?:.*\[\[join\]\])/i,/^(?:.*\[\[choice\]\])/i,/^(?:.*direction\s+TB[^\n]*)/i,/^(?:.*direction\s+BT[^\n]*)/i,/^(?:.*direction\s+RL[^\n]*)/i,/^(?:.*direction\s+LR[^\n]*)/i,/^(?:["])/i,/^(?:\s*as\s+)/i,/^(?:[^\n\{]*)/i,/^(?:["])/i,/^(?:[^"]*)/i,/^(?:[^\n\s\{]+)/i,/^(?:\n)/i,/^(?:\{)/i,/^(?:\})/i,/^(?:[\n])/i,/^(?:note\s+)/i,/^(?:left of\b)/i,/^(?:right of\b)/i,/^(?:")/i,/^(?:\s*as\s*)/i,/^(?:["])/i,/^(?:[^"]*)/i,/^(?:[^\n]*)/i,/^(?:\s*[^:\n\s\-]+)/i,/^(?:\s*:[^:\n;]+)/i,/^(?:[\s\S]*?end note\b)/i,/^(?:stateDiagram\s+)/i,/^(?:stateDiagram-v2\s+)/i,/^(?:hide empty description\b)/i,/^(?:\[\*\])/i,/^(?:[^:\n\s\-\{]+)/i,/^(?:\s*:[^:\n;]+)/i,/^(?:-->)/i,/^(?:--)/i,/^(?:$)/i,/^(?:.)/i],conditions:{LINE:{rules:[13,14],inclusive:!1},close_directive:{rules:[13,14],inclusive:!1},arg_directive:{rules:[7,8,13,14],inclusive:!1},type_directive:{rules:[6,7,13,14],inclusive:!1},open_directive:{rules:[5,13,14],inclusive:!1},struct:{rules:[13,14,19,26,27,28,29,38,39,40,54,55,56,57,58],inclusive:!1},FLOATING_NOTE_ID:{rules:[47],inclusive:!1},FLOATING_NOTE:{rules:[44,45,46],inclusive:!1},NOTE_TEXT:{rules:[49,50],inclusive:!1},NOTE_ID:{rules:[48],inclusive:!1},NOTE:{rules:[41,42,43],inclusive:!1},SCALE:{rules:[17,18],inclusive:!1},ALIAS:{rules:[],inclusive:!1},STATE_ID:{rules:[32],inclusive:!1},STATE_STRING:{rules:[33,34],inclusive:!1},FORK_STATE:{rules:[],inclusive:!1},STATE:{rules:[13,14,20,21,22,23,24,25,30,31,35,36,37],inclusive:!1},ID:{rules:[13,14],inclusive:!1},INITIAL:{rules:[0,1,2,3,4,9,10,11,12,14,15,16,19,37,40,51,52,53,54,55,56,57,59,60],inclusive:!0}}};function N(){this.yy={}}return A.lexer=M,N.prototype=A,A.Parser=N,new N}();e.parser=r,e.Parser=r.Parser,e.parse=function(){return r.parse.apply(r,arguments)},e.main=function(t){t[1]||(console.log("Usage: "+t[0]+" FILE"),process.exit(1));var r=n(3069).readFileSync(n(6470).normalize(t[1]),"utf8");return e.parser.parse(r)},n.c[n.s]===t&&e.main(process.argv.slice(1))},9763:(t,e,n)=>{t=n.nmd(t);var r=function(){var t=function(t,e,n,r){for(n=n||{},r=t.length;r--;n[t[r]]=e);return n},e=[1,2],n=[1,5],r=[6,9,11,17,18,19,21],i=[1,15],a=[1,16],o=[1,17],s=[1,21],c=[4,6,9,11,17,18,19,21],u={trace:function(){},yy:{},symbols_:{error:2,start:3,journey:4,document:5,EOF:6,directive:7,line:8,SPACE:9,statement:10,NEWLINE:11,openDirective:12,typeDirective:13,closeDirective:14,":":15,argDirective:16,title:17,section:18,taskName:19,taskData:20,open_directive:21,type_directive:22,arg_directive:23,close_directive:24,$accept:0,$end:1},terminals_:{2:"error",4:"journey",6:"EOF",9:"SPACE",11:"NEWLINE",15:":",17:"title",18:"section",19:"taskName",20:"taskData",21:"open_directive",22:"type_directive",23:"arg_directive",24:"close_directive"},productions_:[0,[3,3],[3,2],[5,0],[5,2],[8,2],[8,1],[8,1],[8,1],[7,4],[7,6],[10,1],[10,1],[10,2],[10,1],[12,1],[13,1],[16,1],[14,1]],performAction:function(t,e,n,r,i,a,o){var s=a.length-1;switch(i){case 1:return a[s-1];case 3:case 7:case 8:this.$=[];break;case 4:a[s-1].push(a[s]),this.$=a[s-1];break;case 5:case 6:this.$=a[s];break;case 11:r.setTitle(a[s].substr(6)),this.$=a[s].substr(6);break;case 12:r.addSection(a[s].substr(8)),this.$=a[s].substr(8);break;case 13:r.addTask(a[s-1],a[s]),this.$="task";break;case 15:r.parseDirective("%%{","open_directive");break;case 16:r.parseDirective(a[s],"type_directive");break;case 17:a[s]=a[s].trim().replace(/'/g,'"'),r.parseDirective(a[s],"arg_directive");break;case 18:r.parseDirective("}%%","close_directive","journey")}},table:[{3:1,4:e,7:3,12:4,21:n},{1:[3]},t(r,[2,3],{5:6}),{3:7,4:e,7:3,12:4,21:n},{13:8,22:[1,9]},{22:[2,15]},{6:[1,10],7:18,8:11,9:[1,12],10:13,11:[1,14],12:4,17:i,18:a,19:o,21:n},{1:[2,2]},{14:19,15:[1,20],24:s},t([15,24],[2,16]),t(r,[2,8],{1:[2,1]}),t(r,[2,4]),{7:18,10:22,12:4,17:i,18:a,19:o,21:n},t(r,[2,6]),t(r,[2,7]),t(r,[2,11]),t(r,[2,12]),{20:[1,23]},t(r,[2,14]),{11:[1,24]},{16:25,23:[1,26]},{11:[2,18]},t(r,[2,5]),t(r,[2,13]),t(c,[2,9]),{14:27,24:s},{24:[2,17]},{11:[1,28]},t(c,[2,10])],defaultActions:{5:[2,15],7:[2,2],21:[2,18],26:[2,17]},parseError:function(t,e){if(!e.recoverable){var n=new Error(t);throw n.hash=e,n}this.trace(t)},parse:function(t){var e=this,n=[0],r=[],i=[null],a=[],o=this.table,s="",c=0,u=0,l=0,h=2,f=1,d=a.slice.call(arguments,1),p=Object.create(this.lexer),y={yy:{}};for(var g in this.yy)Object.prototype.hasOwnProperty.call(this.yy,g)&&(y.yy[g]=this.yy[g]);p.setInput(t,y.yy),y.yy.lexer=p,y.yy.parser=this,void 0===p.yylloc&&(p.yylloc={});var m=p.yylloc;a.push(m);var v=p.options&&p.options.ranges;function b(){var t;return"number"!=typeof(t=r.pop()||p.lex()||f)&&(t instanceof Array&&(t=(r=t).pop()),t=e.symbols_[t]||t),t}"function"==typeof y.yy.parseError?this.parseError=y.yy.parseError:this.parseError=Object.getPrototypeOf(this).parseError;for(var _,x,w,k,T,E,C,S,A,M={};;){if(w=n[n.length-1],this.defaultActions[w]?k=this.defaultActions[w]:(null==_&&(_=b()),k=o[w]&&o[w][_]),void 0===k||!k.length||!k[0]){var N="";for(E in A=[],o[w])this.terminals_[E]&&E>h&&A.push("'"+this.terminals_[E]+"'");N=p.showPosition?"Parse error on line "+(c+1)+":\n"+p.showPosition()+"\nExpecting "+A.join(", ")+", got '"+(this.terminals_[_]||_)+"'":"Parse error on line "+(c+1)+": Unexpected "+(_==f?"end of input":"'"+(this.terminals_[_]||_)+"'"),this.parseError(N,{text:p.match,token:this.terminals_[_]||_,line:p.yylineno,loc:m,expected:A})}if(k[0]instanceof Array&&k.length>1)throw new Error("Parse Error: multiple actions possible at state: "+w+", token: "+_);switch(k[0]){case 1:n.push(_),i.push(p.yytext),a.push(p.yylloc),n.push(k[1]),_=null,x?(_=x,x=null):(u=p.yyleng,s=p.yytext,c=p.yylineno,m=p.yylloc,l>0&&l--);break;case 2:if(C=this.productions_[k[1]][1],M.$=i[i.length-C],M._$={first_line:a[a.length-(C||1)].first_line,last_line:a[a.length-1].last_line,first_column:a[a.length-(C||1)].first_column,last_column:a[a.length-1].last_column},v&&(M._$.range=[a[a.length-(C||1)].range[0],a[a.length-1].range[1]]),void 0!==(T=this.performAction.apply(M,[s,u,c,y.yy,k[1],i,a].concat(d))))return T;C&&(n=n.slice(0,-1*C*2),i=i.slice(0,-1*C),a=a.slice(0,-1*C)),n.push(this.productions_[k[1]][0]),i.push(M.$),a.push(M._$),S=o[n[n.length-2]][n[n.length-1]],n.push(S);break;case 3:return!0}}return!0}},l={EOF:1,parseError:function(t,e){if(!this.yy.parser)throw new Error(t);this.yy.parser.parseError(t,e)},setInput:function(t,e){return this.yy=e||this.yy||{},this._input=t,this._more=this._backtrack=this.done=!1,this.yylineno=this.yyleng=0,this.yytext=this.matched=this.match="",this.conditionStack=["INITIAL"],this.yylloc={first_line:1,first_column:0,last_line:1,last_column:0},this.options.ranges&&(this.yylloc.range=[0,0]),this.offset=0,this},input:function(){var t=this._input[0];return this.yytext+=t,this.yyleng++,this.offset++,this.match+=t,this.matched+=t,t.match(/(?:\r\n?|\n).*/g)?(this.yylineno++,this.yylloc.last_line++):this.yylloc.last_column++,this.options.ranges&&this.yylloc.range[1]++,this._input=this._input.slice(1),t},unput:function(t){var e=t.length,n=t.split(/(?:\r\n?|\n)/g);this._input=t+this._input,this.yytext=this.yytext.substr(0,this.yytext.length-e),this.offset-=e;var r=this.match.split(/(?:\r\n?|\n)/g);this.match=this.match.substr(0,this.match.length-1),this.matched=this.matched.substr(0,this.matched.length-1),n.length-1&&(this.yylineno-=n.length-1);var i=this.yylloc.range;return this.yylloc={first_line:this.yylloc.first_line,last_line:this.yylineno+1,first_column:this.yylloc.first_column,last_column:n?(n.length===r.length?this.yylloc.first_column:0)+r[r.length-n.length].length-n[0].length:this.yylloc.first_column-e},this.options.ranges&&(this.yylloc.range=[i[0],i[0]+this.yyleng-e]),this.yyleng=this.yytext.length,this},more:function(){return this._more=!0,this},reject:function(){return this.options.backtrack_lexer?(this._backtrack=!0,this):this.parseError("Lexical error on line "+(this.yylineno+1)+". You can only invoke reject() in the lexer when the lexer is of the backtracking persuasion (options.backtrack_lexer = true).\n"+this.showPosition(),{text:"",token:null,line:this.yylineno})},less:function(t){this.unput(this.match.slice(t))},pastInput:function(){var t=this.matched.substr(0,this.matched.length-this.match.length);return(t.length>20?"...":"")+t.substr(-20).replace(/\n/g,"")},upcomingInput:function(){var t=this.match;return t.length<20&&(t+=this._input.substr(0,20-t.length)),(t.substr(0,20)+(t.length>20?"...":"")).replace(/\n/g,"")},showPosition:function(){var t=this.pastInput(),e=new Array(t.length+1).join("-");return t+this.upcomingInput()+"\n"+e+"^"},test_match:function(t,e){var n,r,i;if(this.options.backtrack_lexer&&(i={yylineno:this.yylineno,yylloc:{first_line:this.yylloc.first_line,last_line:this.last_line,first_column:this.yylloc.first_column,last_column:this.yylloc.last_column},yytext:this.yytext,match:this.match,matches:this.matches,matched:this.matched,yyleng:this.yyleng,offset:this.offset,_more:this._more,_input:this._input,yy:this.yy,conditionStack:this.conditionStack.slice(0),done:this.done},this.options.ranges&&(i.yylloc.range=this.yylloc.range.slice(0))),(r=t[0].match(/(?:\r\n?|\n).*/g))&&(this.yylineno+=r.length),this.yylloc={first_line:this.yylloc.last_line,last_line:this.yylineno+1,first_column:this.yylloc.last_column,last_column:r?r[r.length-1].length-r[r.length-1].match(/\r?\n?/)[0].length:this.yylloc.last_column+t[0].length},this.yytext+=t[0],this.match+=t[0],this.matches=t,this.yyleng=this.yytext.length,this.options.ranges&&(this.yylloc.range=[this.offset,this.offset+=this.yyleng]),this._more=!1,this._backtrack=!1,this._input=this._input.slice(t[0].length),this.matched+=t[0],n=this.performAction.call(this,this.yy,this,e,this.conditionStack[this.conditionStack.length-1]),this.done&&this._input&&(this.done=!1),n)return n;if(this._backtrack){for(var a in i)this[a]=i[a];return!1}return!1},next:function(){if(this.done)return this.EOF;var t,e,n,r;this._input||(this.done=!0),this._more||(this.yytext="",this.match="");for(var i=this._currentRules(),a=0;ae[0].length)){if(e=n,r=a,this.options.backtrack_lexer){if(!1!==(t=this.test_match(n,i[a])))return t;if(this._backtrack){e=!1;continue}return!1}if(!this.options.flex)break}return e?!1!==(t=this.test_match(e,i[r]))&&t:""===this._input?this.EOF:this.parseError("Lexical error on line "+(this.yylineno+1)+". Unrecognized text.\n"+this.showPosition(),{text:"",token:null,line:this.yylineno})},lex:function(){return this.next()||this.lex()},begin:function(t){this.conditionStack.push(t)},popState:function(){return this.conditionStack.length-1>0?this.conditionStack.pop():this.conditionStack[0]},_currentRules:function(){return this.conditionStack.length&&this.conditionStack[this.conditionStack.length-1]?this.conditions[this.conditionStack[this.conditionStack.length-1]].rules:this.conditions.INITIAL.rules},topState:function(t){return(t=this.conditionStack.length-1-Math.abs(t||0))>=0?this.conditionStack[t]:"INITIAL"},pushState:function(t){this.begin(t)},stateStackSize:function(){return this.conditionStack.length},options:{"case-insensitive":!0},performAction:function(t,e,n,r){switch(n){case 0:return this.begin("open_directive"),21;case 1:return this.begin("type_directive"),22;case 2:return this.popState(),this.begin("arg_directive"),15;case 3:return this.popState(),this.popState(),24;case 4:return 23;case 5:case 6:case 8:case 9:break;case 7:return 11;case 10:return 4;case 11:return 17;case 12:return 18;case 13:return 19;case 14:return 20;case 15:return 15;case 16:return 6;case 17:return"INVALID"}},rules:[/^(?:%%\{)/i,/^(?:((?:(?!\}%%)[^:.])*))/i,/^(?::)/i,/^(?:\}%%)/i,/^(?:((?:(?!\}%%).|\n)*))/i,/^(?:%(?!\{)[^\n]*)/i,/^(?:[^\}]%%[^\n]*)/i,/^(?:[\n]+)/i,/^(?:\s+)/i,/^(?:#[^\n]*)/i,/^(?:journey\b)/i,/^(?:title\s[^#\n;]+)/i,/^(?:section\s[^#:\n;]+)/i,/^(?:[^#:\n;]+)/i,/^(?::[^#\n;]+)/i,/^(?::)/i,/^(?:$)/i,/^(?:.)/i],conditions:{open_directive:{rules:[1],inclusive:!1},type_directive:{rules:[2,3],inclusive:!1},arg_directive:{rules:[3,4],inclusive:!1},INITIAL:{rules:[0,5,6,7,8,9,10,11,12,13,14,15,16,17],inclusive:!0}}};function h(){this.yy={}}return u.lexer=l,h.prototype=u,u.Parser=h,new h}();e.parser=r,e.Parser=r.Parser,e.parse=function(){return r.parse.apply(r,arguments)},e.main=function(t){t[1]||(console.log("Usage: "+t[0]+" FILE"),process.exit(1));var r=n(9143).readFileSync(n(6470).normalize(t[1]),"utf8");return e.parser.parse(r)},n.c[n.s]===t&&e.main(process.argv.slice(1))},9609:t=>{"use strict";var e=/^(%20|\s)*(javascript|data)/im,n=/[^\x20-\x7E]/gim,r=/^([^:]+):/gm,i=[".","/"];t.exports={sanitizeUrl:function(t){if(!t)return"about:blank";var a,o,s=t.replace(n,"").trim();return function(t){return i.indexOf(t[0])>-1}(s)?s:(o=s.match(r))?(a=o[0],e.test(a)?"about:blank":s):"about:blank"}}},3841:t=>{t.exports=function(t,e){return t.intersect(e)}},7458:(t,e,n)=>{"use strict";n.d(e,{default:()=>hC});var r=n(1941),i=n.n(r),a={debug:1,info:2,warn:3,error:4,fatal:5},o={debug:function(){},info:function(){},warn:function(){},error:function(){},fatal:function(){}},s=function(){var t=arguments.length>0&&void 0!==arguments[0]?arguments[0]:"fatal";isNaN(t)&&(t=t.toLowerCase(),void 0!==a[t]&&(t=a[t])),o.trace=function(){},o.debug=function(){},o.info=function(){},o.warn=function(){},o.error=function(){},o.fatal=function(){},t<=a.fatal&&(o.fatal=console.error?console.error.bind(console,c("FATAL"),"color: orange"):console.log.bind(console,"",c("FATAL"))),t<=a.error&&(o.error=console.error?console.error.bind(console,c("ERROR"),"color: orange"):console.log.bind(console,"",c("ERROR"))),t<=a.warn&&(o.warn=console.warn?console.warn.bind(console,c("WARN"),"color: orange"):console.log.bind(console,"",c("WARN"))),t<=a.info&&(o.info=console.info?console.info.bind(console,c("INFO"),"color: lightblue"):console.log.bind(console,"",c("INFO"))),t<=a.debug&&(o.debug=console.debug?console.debug.bind(console,c("DEBUG"),"color: lightgreen"):console.log.bind(console,"",c("DEBUG")))},c=function(t){var e=i()().format("ss.SSS");return"%c".concat(e," : ").concat(t," : ")};function u(t,e){let n;if(void 0===e)for(const e of t)null!=e&&(n=e)&&(n=e);else{let r=-1;for(let i of t)null!=(i=e(i,++r,t))&&(n=i)&&(n=i)}return n}function l(t,e){let n;if(void 0===e)for(const e of t)null!=e&&(n>e||void 0===n&&e>=e)&&(n=e);else{let r=-1;for(let i of t)null!=(i=e(i,++r,t))&&(n>i||void 0===n&&i>=i)&&(n=i)}return n}function h(t){return t}var f=1e-6;function d(t){return"translate("+t+",0)"}function p(t){return"translate(0,"+t+")"}function y(t){return e=>+t(e)}function g(t,e){return e=Math.max(0,t.bandwidth()-2*e)/2,t.round()&&(e=Math.round(e)),n=>+t(n)+e}function m(){return!this.__axis}function v(t,e){var n=[],r=null,i=null,a=6,o=6,s=3,c="undefined"!=typeof window&&window.devicePixelRatio>1?0:.5,u=1===t||4===t?-1:1,l=4===t||2===t?"x":"y",v=1===t||3===t?d:p;function b(d){var p=null==r?e.ticks?e.ticks.apply(e,n):e.domain():r,b=null==i?e.tickFormat?e.tickFormat.apply(e,n):h:i,_=Math.max(a,0)+s,x=e.range(),w=+x[0]+c,k=+x[x.length-1]+c,T=(e.bandwidth?g:y)(e.copy(),c),E=d.selection?d.selection():d,C=E.selectAll(".domain").data([null]),S=E.selectAll(".tick").data(p,e).order(),A=S.exit(),M=S.enter().append("g").attr("class","tick"),N=S.select("line"),D=S.select("text");C=C.merge(C.enter().insert("path",".tick").attr("class","domain").attr("stroke","currentColor")),S=S.merge(M),N=N.merge(M.append("line").attr("stroke","currentColor").attr(l+"2",u*a)),D=D.merge(M.append("text").attr("fill","currentColor").attr(l,u*_).attr("dy",1===t?"0em":3===t?"0.71em":"0.32em")),d!==E&&(C=C.transition(d),S=S.transition(d),N=N.transition(d),D=D.transition(d),A=A.transition(d).attr("opacity",f).attr("transform",(function(t){return isFinite(t=T(t))?v(t+c):this.getAttribute("transform")})),M.attr("opacity",f).attr("transform",(function(t){var e=this.parentNode.__axis;return v((e&&isFinite(e=e(t))?e:T(t))+c)}))),A.remove(),C.attr("d",4===t||2===t?o?"M"+u*o+","+w+"H"+c+"V"+k+"H"+u*o:"M"+c+","+w+"V"+k:o?"M"+w+","+u*o+"V"+c+"H"+k+"V"+u*o:"M"+w+","+c+"H"+k),S.attr("opacity",1).attr("transform",(function(t){return v(T(t)+c)})),N.attr(l+"2",u*a),D.attr(l,u*_).text(b),E.filter(m).attr("fill","none").attr("font-size",10).attr("font-family","sans-serif").attr("text-anchor",2===t?"start":4===t?"end":"middle"),E.each((function(){this.__axis=T}))}return b.scale=function(t){return arguments.length?(e=t,b):e},b.ticks=function(){return n=Array.from(arguments),b},b.tickArguments=function(t){return arguments.length?(n=null==t?[]:Array.from(t),b):n.slice()},b.tickValues=function(t){return arguments.length?(r=null==t?null:Array.from(t),b):r&&r.slice()},b.tickFormat=function(t){return arguments.length?(i=t,b):i},b.tickSize=function(t){return arguments.length?(a=o=+t,b):a},b.tickSizeInner=function(t){return arguments.length?(a=+t,b):a},b.tickSizeOuter=function(t){return arguments.length?(o=+t,b):o},b.tickPadding=function(t){return arguments.length?(s=+t,b):s},b.offset=function(t){return arguments.length?(c=+t,b):c},b}function b(){}function _(t){return null==t?b:function(){return this.querySelector(t)}}function x(t){return null==t?[]:Array.isArray(t)?t:Array.from(t)}function w(){return[]}function k(t){return null==t?w:function(){return this.querySelectorAll(t)}}function T(t){return function(){return this.matches(t)}}function E(t){return function(e){return e.matches(t)}}var C=Array.prototype.find;function S(){return this.firstElementChild}var A=Array.prototype.filter;function M(){return Array.from(this.children)}function N(t){return new Array(t.length)}function D(t,e){this.ownerDocument=t.ownerDocument,this.namespaceURI=t.namespaceURI,this._next=null,this._parent=t,this.__data__=e}function O(t){return function(){return t}}function B(t,e,n,r,i,a){for(var o,s=0,c=e.length,u=a.length;se?1:t>=e?0:NaN}D.prototype={constructor:D,appendChild:function(t){return this._parent.insertBefore(t,this._next)},insertBefore:function(t,e){return this._parent.insertBefore(t,e)},querySelector:function(t){return this._parent.querySelector(t)},querySelectorAll:function(t){return this._parent.querySelectorAll(t)}};var P="http://www.w3.org/1999/xhtml";const j={svg:"http://www.w3.org/2000/svg",xhtml:P,xlink:"http://www.w3.org/1999/xlink",xml:"http://www.w3.org/XML/1998/namespace",xmlns:"http://www.w3.org/2000/xmlns/"};function Y(t){var e=t+="",n=e.indexOf(":");return n>=0&&"xmlns"!==(e=t.slice(0,n))&&(t=t.slice(n+1)),j.hasOwnProperty(e)?{space:j[e],local:t}:t}function z(t){return function(){this.removeAttribute(t)}}function U(t){return function(){this.removeAttributeNS(t.space,t.local)}}function q(t,e){return function(){this.setAttribute(t,e)}}function H(t,e){return function(){this.setAttributeNS(t.space,t.local,e)}}function $(t,e){return function(){var n=e.apply(this,arguments);null==n?this.removeAttribute(t):this.setAttribute(t,n)}}function W(t,e){return function(){var n=e.apply(this,arguments);null==n?this.removeAttributeNS(t.space,t.local):this.setAttributeNS(t.space,t.local,n)}}function V(t){return t.ownerDocument&&t.ownerDocument.defaultView||t.document&&t||t.defaultView}function G(t){return function(){this.style.removeProperty(t)}}function X(t,e,n){return function(){this.style.setProperty(t,e,n)}}function Z(t,e,n){return function(){var r=e.apply(this,arguments);null==r?this.style.removeProperty(t):this.style.setProperty(t,r,n)}}function Q(t,e){return t.style.getPropertyValue(e)||V(t).getComputedStyle(t,null).getPropertyValue(e)}function K(t){return function(){delete this[t]}}function J(t,e){return function(){this[t]=e}}function tt(t,e){return function(){var n=e.apply(this,arguments);null==n?delete this[t]:this[t]=n}}function et(t){return t.trim().split(/^|\s+/)}function nt(t){return t.classList||new rt(t)}function rt(t){this._node=t,this._names=et(t.getAttribute("class")||"")}function it(t,e){for(var n=nt(t),r=-1,i=e.length;++r=0&&(e=t.slice(n+1),t=t.slice(0,n)),{type:t,name:e}}))}function Et(t){return function(){var e=this.__on;if(e){for(var n,r=0,i=-1,a=e.length;r=0&&(this._names.splice(e,1),this._node.setAttribute("class",this._names.join(" ")))},contains:function(t){return this._names.indexOf(t)>=0}};var Nt=[null];function Dt(t,e){this._groups=t,this._parents=e}function Ot(){return new Dt([[document.documentElement]],Nt)}Dt.prototype=Ot.prototype={constructor:Dt,select:function(t){"function"!=typeof t&&(t=_(t));for(var e=this._groups,n=e.length,r=new Array(n),i=0;i=x&&(x=_+1);!(b=g[x])&&++x=0;)(r=i[a])&&(o&&4^r.compareDocumentPosition(o)&&o.parentNode.insertBefore(r,o),o=r);return this},sort:function(t){function e(e,n){return e&&n?t(e.__data__,n.__data__):!e-!n}t||(t=F);for(var n=this._groups,r=n.length,i=new Array(r),a=0;a1?this.each((null==e?G:"function"==typeof e?Z:X)(t,e,null==n?"":n)):Q(this.node(),t)},property:function(t,e){return arguments.length>1?this.each((null==e?K:"function"==typeof e?tt:J)(t,e)):this.node()[t]},classed:function(t,e){var n=et(t+"");if(arguments.length<2){for(var r=nt(this.node()),i=-1,a=n.length;++i{}};function It(){for(var t,e=0,n=arguments.length,r={};e=0&&(n=t.slice(r+1),t=t.slice(0,r)),t&&!e.hasOwnProperty(t))throw new Error("unknown type: "+t);return{type:t,name:n}}))}function Pt(t,e){for(var n,r=0,i=t.length;r0)for(var n,r,i=new Array(n),a=0;a=0&&e._call.call(void 0,t),e=e._next;--qt}()}finally{qt=0,function(){for(var t,e,n=zt,r=1/0;n;)n._call?(r>n._time&&(r=n._time),t=n,n=n._next):(e=n._next,n._next=null,n=t?t._next=e:zt=e);Ut=t,re(r)}(),Vt=0}}function ne(){var t=Xt.now(),e=t-Wt;e>1e3&&(Gt-=e,Wt=t)}function re(t){qt||(Ht&&(Ht=clearTimeout(Ht)),t-Vt>24?(t<1/0&&(Ht=setTimeout(ee,t-Xt.now()-Gt)),$t&&($t=clearInterval($t))):($t||(Wt=Xt.now(),$t=setInterval(ne,1e3)),qt=1,Zt(ee)))}function ie(t,e,n){var r=new Jt;return e=null==e?0:+e,r.restart((n=>{r.stop(),t(n+e)}),e,n),r}Jt.prototype=te.prototype={constructor:Jt,restart:function(t,e,n){if("function"!=typeof t)throw new TypeError("callback is not a function");n=(null==n?Qt():+n)+(null==e?0:+e),this._next||Ut===this||(Ut?Ut._next=this:zt=this,Ut=this),this._call=t,this._time=n,re()},stop:function(){this._call&&(this._call=null,this._time=1/0,re())}};var ae=Yt("start","end","cancel","interrupt"),oe=[];function se(t,e,n,r,i,a){var o=t.__transition;if(o){if(n in o)return}else t.__transition={};!function(t,e,n){var r,i=t.__transition;function a(c){var u,l,h,f;if(1!==n.state)return s();for(u in i)if((f=i[u]).name===n.name){if(3===f.state)return ie(a);4===f.state?(f.state=6,f.timer.stop(),f.on.call("interrupt",t,t.__data__,f.index,f.group),delete i[u]):+u0)throw new Error("too late; already scheduled");return n}function ue(t,e){var n=le(t,e);if(n.state>3)throw new Error("too late; already running");return n}function le(t,e){var n=t.__transition;if(!n||!(n=n[e]))throw new Error("transition not found");return n}function he(t,e){return t=+t,e=+e,function(n){return t*(1-n)+e*n}}var fe,de=180/Math.PI,pe={translateX:0,translateY:0,rotate:0,skewX:0,scaleX:1,scaleY:1};function ye(t,e,n,r,i,a){var o,s,c;return(o=Math.sqrt(t*t+e*e))&&(t/=o,e/=o),(c=t*n+e*r)&&(n-=t*c,r-=e*c),(s=Math.sqrt(n*n+r*r))&&(n/=s,r/=s,c/=s),t*r180?e+=360:e-t>180&&(t+=360),a.push({i:n.push(i(n)+"rotate(",null,r)-2,x:he(t,e)})):e&&n.push(i(n)+"rotate("+e+r)}(a.rotate,o.rotate,s,c),function(t,e,n,a){t!==e?a.push({i:n.push(i(n)+"skewX(",null,r)-2,x:he(t,e)}):e&&n.push(i(n)+"skewX("+e+r)}(a.skewX,o.skewX,s,c),function(t,e,n,r,a,o){if(t!==n||e!==r){var s=a.push(i(a)+"scale(",null,",",null,")");o.push({i:s-4,x:he(t,n)},{i:s-2,x:he(e,r)})}else 1===n&&1===r||a.push(i(a)+"scale("+n+","+r+")")}(a.scaleX,a.scaleY,o.scaleX,o.scaleY,s,c),a=o=null,function(t){for(var e,n=-1,r=c.length;++n>8&15|e>>4&240,e>>4&15|240&e,(15&e)<<4|15&e,1):8===n?Ue(e>>24&255,e>>16&255,e>>8&255,(255&e)/255):4===n?Ue(e>>12&15|e>>8&240,e>>8&15|e>>4&240,e>>4&15|240&e,((15&e)<<4|15&e)/255):null):(e=De.exec(t))?new $e(e[1],e[2],e[3],1):(e=Oe.exec(t))?new $e(255*e[1]/100,255*e[2]/100,255*e[3]/100,1):(e=Be.exec(t))?Ue(e[1],e[2],e[3],e[4]):(e=Le.exec(t))?Ue(255*e[1]/100,255*e[2]/100,255*e[3]/100,e[4]):(e=Ie.exec(t))?Xe(e[1],e[2]/100,e[3]/100,1):(e=Re.exec(t))?Xe(e[1],e[2]/100,e[3]/100,e[4]):Fe.hasOwnProperty(t)?ze(Fe[t]):"transparent"===t?new $e(NaN,NaN,NaN,0):null}function ze(t){return new $e(t>>16&255,t>>8&255,255&t,1)}function Ue(t,e,n,r){return r<=0&&(t=e=n=NaN),new $e(t,e,n,r)}function qe(t){return t instanceof Te||(t=Ye(t)),t?new $e((t=t.rgb()).r,t.g,t.b,t.opacity):new $e}function He(t,e,n,r){return 1===arguments.length?qe(t):new $e(t,e,n,null==r?1:r)}function $e(t,e,n,r){this.r=+t,this.g=+e,this.b=+n,this.opacity=+r}function We(){return"#"+Ge(this.r)+Ge(this.g)+Ge(this.b)}function Ve(){var t=this.opacity;return(1===(t=isNaN(t)?1:Math.max(0,Math.min(1,t)))?"rgb(":"rgba(")+Math.max(0,Math.min(255,Math.round(this.r)||0))+", "+Math.max(0,Math.min(255,Math.round(this.g)||0))+", "+Math.max(0,Math.min(255,Math.round(this.b)||0))+(1===t?")":", "+t+")")}function Ge(t){return((t=Math.max(0,Math.min(255,Math.round(t)||0)))<16?"0":"")+t.toString(16)}function Xe(t,e,n,r){return r<=0?t=e=n=NaN:n<=0||n>=1?t=e=NaN:e<=0&&(t=NaN),new Qe(t,e,n,r)}function Ze(t){if(t instanceof Qe)return new Qe(t.h,t.s,t.l,t.opacity);if(t instanceof Te||(t=Ye(t)),!t)return new Qe;if(t instanceof Qe)return t;var e=(t=t.rgb()).r/255,n=t.g/255,r=t.b/255,i=Math.min(e,n,r),a=Math.max(e,n,r),o=NaN,s=a-i,c=(a+i)/2;return s?(o=e===a?(n-r)/s+6*(n0&&c<1?0:o,new Qe(o,s,c,t.opacity)}function Qe(t,e,n,r){this.h=+t,this.s=+e,this.l=+n,this.opacity=+r}function Ke(t,e,n){return 255*(t<60?e+(n-e)*t/60:t<180?n:t<240?e+(n-e)*(240-t)/60:e)}function Je(t,e,n,r,i){var a=t*t,o=a*t;return((1-3*t+3*a-o)*e+(4-6*a+3*o)*n+(1+3*t+3*a-3*o)*r+o*i)/6}we(Te,Ye,{copy:function(t){return Object.assign(new this.constructor,this,t)},displayable:function(){return this.rgb().displayable()},hex:Pe,formatHex:Pe,formatHsl:function(){return Ze(this).formatHsl()},formatRgb:je,toString:je}),we($e,He,ke(Te,{brighter:function(t){return t=null==t?Ce:Math.pow(Ce,t),new $e(this.r*t,this.g*t,this.b*t,this.opacity)},darker:function(t){return t=null==t?Ee:Math.pow(Ee,t),new $e(this.r*t,this.g*t,this.b*t,this.opacity)},rgb:function(){return this},displayable:function(){return-.5<=this.r&&this.r<255.5&&-.5<=this.g&&this.g<255.5&&-.5<=this.b&&this.b<255.5&&0<=this.opacity&&this.opacity<=1},hex:We,formatHex:We,formatRgb:Ve,toString:Ve})),we(Qe,(function(t,e,n,r){return 1===arguments.length?Ze(t):new Qe(t,e,n,null==r?1:r)}),ke(Te,{brighter:function(t){return t=null==t?Ce:Math.pow(Ce,t),new Qe(this.h,this.s,this.l*t,this.opacity)},darker:function(t){return t=null==t?Ee:Math.pow(Ee,t),new Qe(this.h,this.s,this.l*t,this.opacity)},rgb:function(){var t=this.h%360+360*(this.h<0),e=isNaN(t)||isNaN(this.s)?0:this.s,n=this.l,r=n+(n<.5?n:1-n)*e,i=2*n-r;return new $e(Ke(t>=240?t-240:t+120,i,r),Ke(t,i,r),Ke(t<120?t+240:t-120,i,r),this.opacity)},displayable:function(){return(0<=this.s&&this.s<=1||isNaN(this.s))&&0<=this.l&&this.l<=1&&0<=this.opacity&&this.opacity<=1},formatHsl:function(){var t=this.opacity;return(1===(t=isNaN(t)?1:Math.max(0,Math.min(1,t)))?"hsl(":"hsla(")+(this.h||0)+", "+100*(this.s||0)+"%, "+100*(this.l||0)+"%"+(1===t?")":", "+t+")")}}));const tn=t=>()=>t;function en(t,e){var n=e-t;return n?function(t,e){return function(n){return t+n*e}}(t,n):tn(isNaN(t)?e:t)}const nn=function t(e){var n=function(t){return 1==(t=+t)?en:function(e,n){return n-e?function(t,e,n){return t=Math.pow(t,n),e=Math.pow(e,n)-t,n=1/n,function(r){return Math.pow(t+r*e,n)}}(e,n,t):tn(isNaN(e)?n:e)}}(e);function r(t,e){var r=n((t=He(t)).r,(e=He(e)).r),i=n(t.g,e.g),a=n(t.b,e.b),o=en(t.opacity,e.opacity);return function(e){return t.r=r(e),t.g=i(e),t.b=a(e),t.opacity=o(e),t+""}}return r.gamma=t,r}(1);function rn(t){return function(e){var n,r,i=e.length,a=new Array(i),o=new Array(i),s=new Array(i);for(n=0;n=1?(n=1,e-1):Math.floor(n*e),i=t[r],a=t[r+1],o=r>0?t[r-1]:2*i-a,s=ra&&(i=e.slice(a,i),s[o]?s[o]+=i:s[++o]=i),(n=n[0])===(r=r[0])?s[o]?s[o]+=r:s[++o]=r:(s[++o]=null,c.push({i:o,x:he(n,r)})),a=on.lastIndex;return a=0&&(t=t.slice(0,e)),!t||"start"===t}))}(e)?ce:ue;return function(){var o=a(this,t),s=o.on;s!==r&&(i=(r=s).copy()).on(e,n),o.on=i}}var En=Bt.prototype.constructor;function Cn(t){return function(){this.style.removeProperty(t)}}function Sn(t,e,n){return function(r){this.style.setProperty(t,e.call(this,r),n)}}function An(t,e,n){var r,i;function a(){var a=e.apply(this,arguments);return a!==i&&(r=(i=a)&&Sn(t,a,n)),r}return a._value=e,a}function Mn(t){return function(e){this.textContent=t.call(this,e)}}function Nn(t){var e,n;function r(){var r=t.apply(this,arguments);return r!==n&&(e=(n=r)&&Mn(r)),e}return r._value=t,r}var Dn=0;function On(t,e,n,r){this._groups=t,this._parents=e,this._name=n,this._id=r}function Bn(){return++Dn}var Ln=Bt.prototype;On.prototype=function(t){return Bt().transition(t)}.prototype={constructor:On,select:function(t){var e=this._name,n=this._id;"function"!=typeof t&&(t=_(t));for(var r=this._groups,i=r.length,a=new Array(i),o=0;o2&&n.state<5,n.state=6,n.timer.stop(),n.on.call(r?"interrupt":"cancel",t,t.__data__,n.index,n.group),delete a[i]):o=!1;o&&delete t.__transition}}(this,t)}))},Bt.prototype.transition=function(t){var e,n;t instanceof On?(e=t._id,t=t._name):(e=Bn(),(n=In).time=Qt(),t=null==t?null:t+"");for(var r=this._groups,i=r.length,a=0;a>8&15|e>>4&240,e>>4&15|240&e,(15&e)<<4|15&e,1):8===n?sr(e>>24&255,e>>16&255,e>>8&255,(255&e)/255):4===n?sr(e>>12&15|e>>8&240,e>>8&15|e>>4&240,e>>4&15|240&e,((15&e)<<4|15&e)/255):null):(e=Zn.exec(t))?new lr(e[1],e[2],e[3],1):(e=Qn.exec(t))?new lr(255*e[1]/100,255*e[2]/100,255*e[3]/100,1):(e=Kn.exec(t))?sr(e[1],e[2],e[3],e[4]):(e=Jn.exec(t))?sr(255*e[1]/100,255*e[2]/100,255*e[3]/100,e[4]):(e=tr.exec(t))?pr(e[1],e[2]/100,e[3]/100,1):(e=er.exec(t))?pr(e[1],e[2]/100,e[3]/100,e[4]):nr.hasOwnProperty(t)?or(nr[t]):"transparent"===t?new lr(NaN,NaN,NaN,0):null}function or(t){return new lr(t>>16&255,t>>8&255,255&t,1)}function sr(t,e,n,r){return r<=0&&(t=e=n=NaN),new lr(t,e,n,r)}function cr(t){return t instanceof qn||(t=ar(t)),t?new lr((t=t.rgb()).r,t.g,t.b,t.opacity):new lr}function ur(t,e,n,r){return 1===arguments.length?cr(t):new lr(t,e,n,null==r?1:r)}function lr(t,e,n,r){this.r=+t,this.g=+e,this.b=+n,this.opacity=+r}function hr(){return"#"+dr(this.r)+dr(this.g)+dr(this.b)}function fr(){var t=this.opacity;return(1===(t=isNaN(t)?1:Math.max(0,Math.min(1,t)))?"rgb(":"rgba(")+Math.max(0,Math.min(255,Math.round(this.r)||0))+", "+Math.max(0,Math.min(255,Math.round(this.g)||0))+", "+Math.max(0,Math.min(255,Math.round(this.b)||0))+(1===t?")":", "+t+")")}function dr(t){return((t=Math.max(0,Math.min(255,Math.round(t)||0)))<16?"0":"")+t.toString(16)}function pr(t,e,n,r){return r<=0?t=e=n=NaN:n<=0||n>=1?t=e=NaN:e<=0&&(t=NaN),new gr(t,e,n,r)}function yr(t){if(t instanceof gr)return new gr(t.h,t.s,t.l,t.opacity);if(t instanceof qn||(t=ar(t)),!t)return new gr;if(t instanceof gr)return t;var e=(t=t.rgb()).r/255,n=t.g/255,r=t.b/255,i=Math.min(e,n,r),a=Math.max(e,n,r),o=NaN,s=a-i,c=(a+i)/2;return s?(o=e===a?(n-r)/s+6*(n0&&c<1?0:o,new gr(o,s,c,t.opacity)}function gr(t,e,n,r){this.h=+t,this.s=+e,this.l=+n,this.opacity=+r}function mr(t,e,n){return 255*(t<60?e+(n-e)*t/60:t<180?n:t<240?e+(n-e)*(240-t)/60:e)}zn(qn,ar,{copy:function(t){return Object.assign(new this.constructor,this,t)},displayable:function(){return this.rgb().displayable()},hex:rr,formatHex:rr,formatHsl:function(){return yr(this).formatHsl()},formatRgb:ir,toString:ir}),zn(lr,ur,Un(qn,{brighter:function(t){return t=null==t?$n:Math.pow($n,t),new lr(this.r*t,this.g*t,this.b*t,this.opacity)},darker:function(t){return t=null==t?Hn:Math.pow(Hn,t),new lr(this.r*t,this.g*t,this.b*t,this.opacity)},rgb:function(){return this},displayable:function(){return-.5<=this.r&&this.r<255.5&&-.5<=this.g&&this.g<255.5&&-.5<=this.b&&this.b<255.5&&0<=this.opacity&&this.opacity<=1},hex:hr,formatHex:hr,formatRgb:fr,toString:fr})),zn(gr,(function(t,e,n,r){return 1===arguments.length?yr(t):new gr(t,e,n,null==r?1:r)}),Un(qn,{brighter:function(t){return t=null==t?$n:Math.pow($n,t),new gr(this.h,this.s,this.l*t,this.opacity)},darker:function(t){return t=null==t?Hn:Math.pow(Hn,t),new gr(this.h,this.s,this.l*t,this.opacity)},rgb:function(){var t=this.h%360+360*(this.h<0),e=isNaN(t)||isNaN(this.s)?0:this.s,n=this.l,r=n+(n<.5?n:1-n)*e,i=2*n-r;return new lr(mr(t>=240?t-240:t+120,i,r),mr(t,i,r),mr(t<120?t+240:t-120,i,r),this.opacity)},displayable:function(){return(0<=this.s&&this.s<=1||isNaN(this.s))&&0<=this.l&&this.l<=1&&0<=this.opacity&&this.opacity<=1},formatHsl:function(){var t=this.opacity;return(1===(t=isNaN(t)?1:Math.max(0,Math.min(1,t)))?"hsl(":"hsla(")+(this.h||0)+", "+100*(this.s||0)+"%, "+100*(this.l||0)+"%"+(1===t?")":", "+t+")")}}));const vr=Math.PI/180,br=180/Math.PI,_r=.96422,xr=.82521,wr=4/29,kr=6/29,Tr=3*kr*kr;function Er(t){if(t instanceof Cr)return new Cr(t.l,t.a,t.b,t.opacity);if(t instanceof Br)return Lr(t);t instanceof lr||(t=cr(t));var e,n,r=Nr(t.r),i=Nr(t.g),a=Nr(t.b),o=Sr((.2225045*r+.7168786*i+.0606169*a)/1);return r===i&&i===a?e=n=o:(e=Sr((.4360747*r+.3850649*i+.1430804*a)/_r),n=Sr((.0139322*r+.0971045*i+.7141733*a)/xr)),new Cr(116*o-16,500*(e-o),200*(o-n),t.opacity)}function Cr(t,e,n,r){this.l=+t,this.a=+e,this.b=+n,this.opacity=+r}function Sr(t){return t>.008856451679035631?Math.pow(t,1/3):t/Tr+wr}function Ar(t){return t>kr?t*t*t:Tr*(t-wr)}function Mr(t){return 255*(t<=.0031308?12.92*t:1.055*Math.pow(t,1/2.4)-.055)}function Nr(t){return(t/=255)<=.04045?t/12.92:Math.pow((t+.055)/1.055,2.4)}function Dr(t){if(t instanceof Br)return new Br(t.h,t.c,t.l,t.opacity);if(t instanceof Cr||(t=Er(t)),0===t.a&&0===t.b)return new Br(NaN,0()=>t;function Rr(t,e){return function(n){return t+n*e}}function Fr(t,e){var n=e-t;return n?Rr(t,n):Ir(isNaN(t)?e:t)}function Pr(t){return function(e,n){var r=t((e=Or(e)).h,(n=Or(n)).h),i=Fr(e.c,n.c),a=Fr(e.l,n.l),o=Fr(e.opacity,n.opacity);return function(t){return e.h=r(t),e.c=i(t),e.l=a(t),e.opacity=o(t),e+""}}}const jr=Pr((function(t,e){var n=e-t;return n?Rr(t,n>180||n<-180?n-360*Math.round(n/360):n):Ir(isNaN(t)?e:t)}));Pr(Fr);var Yr=Math.sqrt(50),zr=Math.sqrt(10),Ur=Math.sqrt(2);function qr(t,e,n){var r=(e-t)/Math.max(0,n),i=Math.floor(Math.log(r)/Math.LN10),a=r/Math.pow(10,i);return i>=0?(a>=Yr?10:a>=zr?5:a>=Ur?2:1)*Math.pow(10,i):-Math.pow(10,-i)/(a>=Yr?10:a>=zr?5:a>=Ur?2:1)}function Hr(t,e,n){var r=Math.abs(e-t)/Math.max(0,n),i=Math.pow(10,Math.floor(Math.log(r)/Math.LN10)),a=r/i;return a>=Yr?i*=10:a>=zr?i*=5:a>=Ur&&(i*=2),ee?1:t>=e?0:NaN}function Wr(t){let e=t,n=t,r=t;function i(t,e,i=0,a=t.length){if(i>>1;r(t[n],e)<0?i=n+1:a=n}while(it(e)-n,n=$r,r=(e,n)=>$r(t(e),n)),{left:i,center:function(t,n,r=0,a=t.length){const o=i(t,n,r,a-1);return o>r&&e(t[o-1],n)>-e(t[o],n)?o-1:o},right:function(t,e,i=0,a=t.length){if(i>>1;r(t[n],e)<=0?i=n+1:a=n}while(i>8&15|e>>4&240,e>>4&15|240&e,(15&e)<<4|15&e,1):8===n?gi(e>>24&255,e>>16&255,e>>8&255,(255&e)/255):4===n?gi(e>>12&15|e>>8&240,e>>8&15|e>>4&240,e>>4&15|240&e,((15&e)<<4|15&e)/255):null):(e=ai.exec(t))?new bi(e[1],e[2],e[3],1):(e=oi.exec(t))?new bi(255*e[1]/100,255*e[2]/100,255*e[3]/100,1):(e=si.exec(t))?gi(e[1],e[2],e[3],e[4]):(e=ci.exec(t))?gi(255*e[1]/100,255*e[2]/100,255*e[3]/100,e[4]):(e=ui.exec(t))?ki(e[1],e[2]/100,e[3]/100,1):(e=li.exec(t))?ki(e[1],e[2]/100,e[3]/100,e[4]):hi.hasOwnProperty(t)?yi(hi[t]):"transparent"===t?new bi(NaN,NaN,NaN,0):null}function yi(t){return new bi(t>>16&255,t>>8&255,255&t,1)}function gi(t,e,n,r){return r<=0&&(t=e=n=NaN),new bi(t,e,n,r)}function mi(t){return t instanceof Kr||(t=pi(t)),t?new bi((t=t.rgb()).r,t.g,t.b,t.opacity):new bi}function vi(t,e,n,r){return 1===arguments.length?mi(t):new bi(t,e,n,null==r?1:r)}function bi(t,e,n,r){this.r=+t,this.g=+e,this.b=+n,this.opacity=+r}function _i(){return"#"+wi(this.r)+wi(this.g)+wi(this.b)}function xi(){var t=this.opacity;return(1===(t=isNaN(t)?1:Math.max(0,Math.min(1,t)))?"rgb(":"rgba(")+Math.max(0,Math.min(255,Math.round(this.r)||0))+", "+Math.max(0,Math.min(255,Math.round(this.g)||0))+", "+Math.max(0,Math.min(255,Math.round(this.b)||0))+(1===t?")":", "+t+")")}function wi(t){return((t=Math.max(0,Math.min(255,Math.round(t)||0)))<16?"0":"")+t.toString(16)}function ki(t,e,n,r){return r<=0?t=e=n=NaN:n<=0||n>=1?t=e=NaN:e<=0&&(t=NaN),new Ei(t,e,n,r)}function Ti(t){if(t instanceof Ei)return new Ei(t.h,t.s,t.l,t.opacity);if(t instanceof Kr||(t=pi(t)),!t)return new Ei;if(t instanceof Ei)return t;var e=(t=t.rgb()).r/255,n=t.g/255,r=t.b/255,i=Math.min(e,n,r),a=Math.max(e,n,r),o=NaN,s=a-i,c=(a+i)/2;return s?(o=e===a?(n-r)/s+6*(n0&&c<1?0:o,new Ei(o,s,c,t.opacity)}function Ei(t,e,n,r){this.h=+t,this.s=+e,this.l=+n,this.opacity=+r}function Ci(t,e,n){return 255*(t<60?e+(n-e)*t/60:t<180?n:t<240?e+(n-e)*(240-t)/60:e)}function Si(t,e,n,r,i){var a=t*t,o=a*t;return((1-3*t+3*a-o)*e+(4-6*a+3*o)*n+(1+3*t+3*a-3*o)*r+o*i)/6}Zr(Kr,pi,{copy:function(t){return Object.assign(new this.constructor,this,t)},displayable:function(){return this.rgb().displayable()},hex:fi,formatHex:fi,formatHsl:function(){return Ti(this).formatHsl()},formatRgb:di,toString:di}),Zr(bi,vi,Qr(Kr,{brighter:function(t){return t=null==t?ti:Math.pow(ti,t),new bi(this.r*t,this.g*t,this.b*t,this.opacity)},darker:function(t){return t=null==t?Jr:Math.pow(Jr,t),new bi(this.r*t,this.g*t,this.b*t,this.opacity)},rgb:function(){return this},displayable:function(){return-.5<=this.r&&this.r<255.5&&-.5<=this.g&&this.g<255.5&&-.5<=this.b&&this.b<255.5&&0<=this.opacity&&this.opacity<=1},hex:_i,formatHex:_i,formatRgb:xi,toString:xi})),Zr(Ei,(function(t,e,n,r){return 1===arguments.length?Ti(t):new Ei(t,e,n,null==r?1:r)}),Qr(Kr,{brighter:function(t){return t=null==t?ti:Math.pow(ti,t),new Ei(this.h,this.s,this.l*t,this.opacity)},darker:function(t){return t=null==t?Jr:Math.pow(Jr,t),new Ei(this.h,this.s,this.l*t,this.opacity)},rgb:function(){var t=this.h%360+360*(this.h<0),e=isNaN(t)||isNaN(this.s)?0:this.s,n=this.l,r=n+(n<.5?n:1-n)*e,i=2*n-r;return new bi(Ci(t>=240?t-240:t+120,i,r),Ci(t,i,r),Ci(t<120?t+240:t-120,i,r),this.opacity)},displayable:function(){return(0<=this.s&&this.s<=1||isNaN(this.s))&&0<=this.l&&this.l<=1&&0<=this.opacity&&this.opacity<=1},formatHsl:function(){var t=this.opacity;return(1===(t=isNaN(t)?1:Math.max(0,Math.min(1,t)))?"hsl(":"hsla(")+(this.h||0)+", "+100*(this.s||0)+"%, "+100*(this.l||0)+"%"+(1===t?")":", "+t+")")}}));const Ai=t=>()=>t;function Mi(t,e){var n=e-t;return n?function(t,e){return function(n){return t+n*e}}(t,n):Ai(isNaN(t)?e:t)}const Ni=function t(e){var n=function(t){return 1==(t=+t)?Mi:function(e,n){return n-e?function(t,e,n){return t=Math.pow(t,n),e=Math.pow(e,n)-t,n=1/n,function(r){return Math.pow(t+r*e,n)}}(e,n,t):Ai(isNaN(e)?n:e)}}(e);function r(t,e){var r=n((t=vi(t)).r,(e=vi(e)).r),i=n(t.g,e.g),a=n(t.b,e.b),o=Mi(t.opacity,e.opacity);return function(e){return t.r=r(e),t.g=i(e),t.b=a(e),t.opacity=o(e),t+""}}return r.gamma=t,r}(1);function Di(t){return function(e){var n,r,i=e.length,a=new Array(i),o=new Array(i),s=new Array(i);for(n=0;n=1?(n=1,e-1):Math.floor(n*e),i=t[r],a=t[r+1],o=r>0?t[r-1]:2*i-a,s=ra&&(i=e.slice(a,i),s[o]?s[o]+=i:s[++o]=i),(n=n[0])===(r=r[0])?s[o]?s[o]+=r:s[++o]=r:(s[++o]=null,c.push({i:o,x:Li(n,r)})),a=Fi.lastIndex;return ae&&(n=t,t=e,e=n),u=function(n){return Math.max(t,Math.min(e,n))}),r=c>2?Vi:Wi,i=a=null,h}function h(e){return null==e||isNaN(e=+e)?n:(i||(i=r(o.map(t),s,c)))(t(u(e)))}return h.invert=function(n){return u(e((a||(a=r(s,o.map(t),Li)))(n)))},h.domain=function(t){return arguments.length?(o=Array.from(t,Ui),l()):o.slice()},h.range=function(t){return arguments.length?(s=Array.from(t),l()):s.slice()},h.rangeRound=function(t){return s=Array.from(t),c=zi,l()},h.clamp=function(t){return arguments.length?(u=!!t||Hi,l()):u!==Hi},h.interpolate=function(t){return arguments.length?(c=t,l()):c},h.unknown=function(t){return arguments.length?(n=t,h):n},function(n,r){return t=n,e=r,l()}}()(Hi,Hi)}function Zi(t,e){switch(arguments.length){case 0:break;case 1:this.range(t);break;default:this.range(e).domain(t)}return this}var Qi,Ki=/^(?:(.)?([<>=^]))?([+\-( ])?([$#])?(0)?(\d+)?(,)?(\.\d+)?(~)?([a-z%])?$/i;function Ji(t){if(!(e=Ki.exec(t)))throw new Error("invalid format: "+t);var e;return new ta({fill:e[1],align:e[2],sign:e[3],symbol:e[4],zero:e[5],width:e[6],comma:e[7],precision:e[8]&&e[8].slice(1),trim:e[9],type:e[10]})}function ta(t){this.fill=void 0===t.fill?" ":t.fill+"",this.align=void 0===t.align?">":t.align+"",this.sign=void 0===t.sign?"-":t.sign+"",this.symbol=void 0===t.symbol?"":t.symbol+"",this.zero=!!t.zero,this.width=void 0===t.width?void 0:+t.width,this.comma=!!t.comma,this.precision=void 0===t.precision?void 0:+t.precision,this.trim=!!t.trim,this.type=void 0===t.type?"":t.type+""}function ea(t,e){if((n=(t=e?t.toExponential(e-1):t.toExponential()).indexOf("e"))<0)return null;var n,r=t.slice(0,n);return[r.length>1?r[0]+r.slice(2):r,+t.slice(n+1)]}function na(t){return(t=ea(Math.abs(t)))?t[1]:NaN}function ra(t,e){var n=ea(t,e);if(!n)return t+"";var r=n[0],i=n[1];return i<0?"0."+new Array(-i).join("0")+r:r.length>i+1?r.slice(0,i+1)+"."+r.slice(i+1):r+new Array(i-r.length+2).join("0")}Ji.prototype=ta.prototype,ta.prototype.toString=function(){return this.fill+this.align+this.sign+this.symbol+(this.zero?"0":"")+(void 0===this.width?"":Math.max(1,0|this.width))+(this.comma?",":"")+(void 0===this.precision?"":"."+Math.max(0,0|this.precision))+(this.trim?"~":"")+this.type};const ia={"%":(t,e)=>(100*t).toFixed(e),b:t=>Math.round(t).toString(2),c:t=>t+"",d:function(t){return Math.abs(t=Math.round(t))>=1e21?t.toLocaleString("en").replace(/,/g,""):t.toString(10)},e:(t,e)=>t.toExponential(e),f:(t,e)=>t.toFixed(e),g:(t,e)=>t.toPrecision(e),o:t=>Math.round(t).toString(8),p:(t,e)=>ra(100*t,e),r:ra,s:function(t,e){var n=ea(t,e);if(!n)return t+"";var r=n[0],i=n[1],a=i-(Qi=3*Math.max(-8,Math.min(8,Math.floor(i/3))))+1,o=r.length;return a===o?r:a>o?r+new Array(a-o+1).join("0"):a>0?r.slice(0,a)+"."+r.slice(a):"0."+new Array(1-a).join("0")+ea(t,Math.max(0,e+a-1))[0]},X:t=>Math.round(t).toString(16).toUpperCase(),x:t=>Math.round(t).toString(16)};function aa(t){return t}var oa,sa,ca,ua=Array.prototype.map,la=["y","z","a","f","p","n","µ","m","","k","M","G","T","P","E","Z","Y"];function ha(t){var e=t.domain;return t.ticks=function(t){var n=e();return function(t,e,n){var r,i,a,o,s=-1;if(n=+n,(t=+t)==(e=+e)&&n>0)return[t];if((r=e0){let n=Math.round(t/o),r=Math.round(e/o);for(n*oe&&--r,a=new Array(i=r-n+1);++se&&--r,a=new Array(i=r-n+1);++s0;){if((i=qr(c,u,n))===r)return a[o]=c,a[s]=u,e(a);if(i>0)c=Math.floor(c/i)*i,u=Math.ceil(u/i)*i;else{if(!(i<0))break;c=Math.ceil(c*i)/i,u=Math.floor(u*i)/i}r=i}return t},t}function fa(){var t=Xi();return t.copy=function(){return Gi(t,fa())},Zi.apply(t,arguments),ha(t)}oa=function(t){var e,n,r=void 0===t.grouping||void 0===t.thousands?aa:(e=ua.call(t.grouping,Number),n=t.thousands+"",function(t,r){for(var i=t.length,a=[],o=0,s=e[0],c=0;i>0&&s>0&&(c+s+1>r&&(s=Math.max(1,r-c)),a.push(t.substring(i-=s,i+s)),!((c+=s+1)>r));)s=e[o=(o+1)%e.length];return a.reverse().join(n)}),i=void 0===t.currency?"":t.currency[0]+"",a=void 0===t.currency?"":t.currency[1]+"",o=void 0===t.decimal?".":t.decimal+"",s=void 0===t.numerals?aa:function(t){return function(e){return e.replace(/[0-9]/g,(function(e){return t[+e]}))}}(ua.call(t.numerals,String)),c=void 0===t.percent?"%":t.percent+"",u=void 0===t.minus?"−":t.minus+"",l=void 0===t.nan?"NaN":t.nan+"";function h(t){var e=(t=Ji(t)).fill,n=t.align,h=t.sign,f=t.symbol,d=t.zero,p=t.width,y=t.comma,g=t.precision,m=t.trim,v=t.type;"n"===v?(y=!0,v="g"):ia[v]||(void 0===g&&(g=12),m=!0,v="g"),(d||"0"===e&&"="===n)&&(d=!0,e="0",n="=");var b="$"===f?i:"#"===f&&/[boxX]/.test(v)?"0"+v.toLowerCase():"",_="$"===f?a:/[%p]/.test(v)?c:"",x=ia[v],w=/[defgprs%]/.test(v);function k(t){var i,a,c,f=b,k=_;if("c"===v)k=x(t)+k,t="";else{var T=(t=+t)<0||1/t<0;if(t=isNaN(t)?l:x(Math.abs(t),g),m&&(t=function(t){t:for(var e,n=t.length,r=1,i=-1;r0&&(i=0)}return i>0?t.slice(0,i)+t.slice(e+1):t}(t)),T&&0==+t&&"+"!==h&&(T=!1),f=(T?"("===h?h:u:"-"===h||"("===h?"":h)+f,k=("s"===v?la[8+Qi/3]:"")+k+(T&&"("===h?")":""),w)for(i=-1,a=t.length;++i(c=t.charCodeAt(i))||c>57){k=(46===c?o+t.slice(i+1):t.slice(i))+k,t=t.slice(0,i);break}}y&&!d&&(t=r(t,1/0));var E=f.length+t.length+k.length,C=E>1)+f+t+k+C.slice(E);break;default:t=C+f+t+k}return s(t)}return g=void 0===g?6:/[gprs]/.test(v)?Math.max(1,Math.min(21,g)):Math.max(0,Math.min(20,g)),k.toString=function(){return t+""},k}return{format:h,formatPrefix:function(t,e){var n=h(((t=Ji(t)).type="f",t)),r=3*Math.max(-8,Math.min(8,Math.floor(na(e)/3))),i=Math.pow(10,-r),a=la[8+r/3];return function(t){return n(i*t)+a}}}}({thousands:",",grouping:[3],currency:["$",""]}),sa=oa.format,ca=oa.formatPrefix;class da extends Map{constructor(t,e=ya){if(super(),Object.defineProperties(this,{_intern:{value:new Map},_key:{value:e}}),null!=t)for(const[e,n]of t)this.set(e,n)}get(t){return super.get(pa(this,t))}has(t){return super.has(pa(this,t))}set(t,e){return super.set(function({_intern:t,_key:e},n){const r=e(n);return t.has(r)?t.get(r):(t.set(r,n),n)}(this,t),e)}delete(t){return super.delete(function({_intern:t,_key:e},n){const r=e(n);return t.has(r)&&(n=t.get(r),t.delete(r)),n}(this,t))}}function pa({_intern:t,_key:e},n){const r=e(n);return t.has(r)?t.get(r):n}function ya(t){return null!==t&&"object"==typeof t?t.valueOf():t}Set;const ga=Symbol("implicit");function ma(){var t=new da,e=[],n=[],r=ga;function i(i){let a=t.get(i);if(void 0===a){if(r!==ga)return r;t.set(i,a=e.push(i)-1)}return n[a%n.length]}return i.domain=function(n){if(!arguments.length)return e.slice();e=[],t=new da;for(const r of n)t.has(r)||t.set(r,e.push(r)-1);return i},i.range=function(t){return arguments.length?(n=Array.from(t),i):n.slice()},i.unknown=function(t){return arguments.length?(r=t,i):r},i.copy=function(){return ma(e,n).unknown(r)},Zi.apply(i,arguments),i}const va=1e3,ba=6e4,_a=36e5,xa=864e5,wa=6048e5,ka=31536e6;var Ta=new Date,Ea=new Date;function Ca(t,e,n,r){function i(e){return t(e=0===arguments.length?new Date:new Date(+e)),e}return i.floor=function(e){return t(e=new Date(+e)),e},i.ceil=function(n){return t(n=new Date(n-1)),e(n,1),t(n),n},i.round=function(t){var e=i(t),n=i.ceil(t);return t-e0))return s;do{s.push(o=new Date(+n)),e(n,a),t(n)}while(o=e)for(;t(e),!n(e);)e.setTime(e-1)}),(function(t,r){if(t>=t)if(r<0)for(;++r<=0;)for(;e(t,-1),!n(t););else for(;--r>=0;)for(;e(t,1),!n(t););}))},n&&(i.count=function(e,r){return Ta.setTime(+e),Ea.setTime(+r),t(Ta),t(Ea),Math.floor(n(Ta,Ea))},i.every=function(t){return t=Math.floor(t),isFinite(t)&&t>0?t>1?i.filter(r?function(e){return r(e)%t==0}:function(e){return i.count(0,e)%t==0}):i:null}),i}var Sa=Ca((function(){}),(function(t,e){t.setTime(+t+e)}),(function(t,e){return e-t}));Sa.every=function(t){return t=Math.floor(t),isFinite(t)&&t>0?t>1?Ca((function(e){e.setTime(Math.floor(e/t)*t)}),(function(e,n){e.setTime(+e+n*t)}),(function(e,n){return(n-e)/t})):Sa:null};const Aa=Sa;Sa.range;var Ma=Ca((function(t){t.setTime(t-t.getMilliseconds())}),(function(t,e){t.setTime(+t+e*va)}),(function(t,e){return(e-t)/va}),(function(t){return t.getUTCSeconds()}));const Na=Ma;Ma.range;var Da=Ca((function(t){t.setTime(t-t.getMilliseconds()-t.getSeconds()*va)}),(function(t,e){t.setTime(+t+e*ba)}),(function(t,e){return(e-t)/ba}),(function(t){return t.getMinutes()}));const Oa=Da;Da.range;var Ba=Ca((function(t){t.setTime(t-t.getMilliseconds()-t.getSeconds()*va-t.getMinutes()*ba)}),(function(t,e){t.setTime(+t+e*_a)}),(function(t,e){return(e-t)/_a}),(function(t){return t.getHours()}));const La=Ba;Ba.range;var Ia=Ca((t=>t.setHours(0,0,0,0)),((t,e)=>t.setDate(t.getDate()+e)),((t,e)=>(e-t-(e.getTimezoneOffset()-t.getTimezoneOffset())*ba)/xa),(t=>t.getDate()-1));const Ra=Ia;function Fa(t){return Ca((function(e){e.setDate(e.getDate()-(e.getDay()+7-t)%7),e.setHours(0,0,0,0)}),(function(t,e){t.setDate(t.getDate()+7*e)}),(function(t,e){return(e-t-(e.getTimezoneOffset()-t.getTimezoneOffset())*ba)/wa}))}Ia.range;var Pa=Fa(0),ja=Fa(1),Ya=Fa(2),za=Fa(3),Ua=Fa(4),qa=Fa(5),Ha=Fa(6),$a=(Pa.range,ja.range,Ya.range,za.range,Ua.range,qa.range,Ha.range,Ca((function(t){t.setDate(1),t.setHours(0,0,0,0)}),(function(t,e){t.setMonth(t.getMonth()+e)}),(function(t,e){return e.getMonth()-t.getMonth()+12*(e.getFullYear()-t.getFullYear())}),(function(t){return t.getMonth()})));const Wa=$a;$a.range;var Va=Ca((function(t){t.setMonth(0,1),t.setHours(0,0,0,0)}),(function(t,e){t.setFullYear(t.getFullYear()+e)}),(function(t,e){return e.getFullYear()-t.getFullYear()}),(function(t){return t.getFullYear()}));Va.every=function(t){return isFinite(t=Math.floor(t))&&t>0?Ca((function(e){e.setFullYear(Math.floor(e.getFullYear()/t)*t),e.setMonth(0,1),e.setHours(0,0,0,0)}),(function(e,n){e.setFullYear(e.getFullYear()+n*t)})):null};const Ga=Va;Va.range;var Xa=Ca((function(t){t.setUTCSeconds(0,0)}),(function(t,e){t.setTime(+t+e*ba)}),(function(t,e){return(e-t)/ba}),(function(t){return t.getUTCMinutes()}));const Za=Xa;Xa.range;var Qa=Ca((function(t){t.setUTCMinutes(0,0,0)}),(function(t,e){t.setTime(+t+e*_a)}),(function(t,e){return(e-t)/_a}),(function(t){return t.getUTCHours()}));const Ka=Qa;Qa.range;var Ja=Ca((function(t){t.setUTCHours(0,0,0,0)}),(function(t,e){t.setUTCDate(t.getUTCDate()+e)}),(function(t,e){return(e-t)/xa}),(function(t){return t.getUTCDate()-1}));const to=Ja;function eo(t){return Ca((function(e){e.setUTCDate(e.getUTCDate()-(e.getUTCDay()+7-t)%7),e.setUTCHours(0,0,0,0)}),(function(t,e){t.setUTCDate(t.getUTCDate()+7*e)}),(function(t,e){return(e-t)/wa}))}Ja.range;var no=eo(0),ro=eo(1),io=eo(2),ao=eo(3),oo=eo(4),so=eo(5),co=eo(6),uo=(no.range,ro.range,io.range,ao.range,oo.range,so.range,co.range,Ca((function(t){t.setUTCDate(1),t.setUTCHours(0,0,0,0)}),(function(t,e){t.setUTCMonth(t.getUTCMonth()+e)}),(function(t,e){return e.getUTCMonth()-t.getUTCMonth()+12*(e.getUTCFullYear()-t.getUTCFullYear())}),(function(t){return t.getUTCMonth()})));const lo=uo;uo.range;var ho=Ca((function(t){t.setUTCMonth(0,1),t.setUTCHours(0,0,0,0)}),(function(t,e){t.setUTCFullYear(t.getUTCFullYear()+e)}),(function(t,e){return e.getUTCFullYear()-t.getUTCFullYear()}),(function(t){return t.getUTCFullYear()}));ho.every=function(t){return isFinite(t=Math.floor(t))&&t>0?Ca((function(e){e.setUTCFullYear(Math.floor(e.getUTCFullYear()/t)*t),e.setUTCMonth(0,1),e.setUTCHours(0,0,0,0)}),(function(e,n){e.setUTCFullYear(e.getUTCFullYear()+n*t)})):null};const fo=ho;function po(t,e,n,r,i,a){const o=[[Na,1,va],[Na,5,5e3],[Na,15,15e3],[Na,30,3e4],[a,1,ba],[a,5,3e5],[a,15,9e5],[a,30,18e5],[i,1,_a],[i,3,108e5],[i,6,216e5],[i,12,432e5],[r,1,xa],[r,2,1728e5],[n,1,wa],[e,1,2592e6],[e,3,7776e6],[t,1,ka]];function s(e,n,r){const i=Math.abs(n-e)/r,a=Wr((([,,t])=>t)).right(o,i);if(a===o.length)return t.every(Hr(e/ka,n/ka,r));if(0===a)return Aa.every(Math.max(Hr(e,n,r),1));const[s,c]=o[i/o[a-1][2][t.toLowerCase(),e])))}function Oo(t,e,n){var r=Eo.exec(e.slice(n,n+1));return r?(t.w=+r[0],n+r[0].length):-1}function Bo(t,e,n){var r=Eo.exec(e.slice(n,n+1));return r?(t.u=+r[0],n+r[0].length):-1}function Lo(t,e,n){var r=Eo.exec(e.slice(n,n+2));return r?(t.U=+r[0],n+r[0].length):-1}function Io(t,e,n){var r=Eo.exec(e.slice(n,n+2));return r?(t.V=+r[0],n+r[0].length):-1}function Ro(t,e,n){var r=Eo.exec(e.slice(n,n+2));return r?(t.W=+r[0],n+r[0].length):-1}function Fo(t,e,n){var r=Eo.exec(e.slice(n,n+4));return r?(t.y=+r[0],n+r[0].length):-1}function Po(t,e,n){var r=Eo.exec(e.slice(n,n+2));return r?(t.y=+r[0]+(+r[0]>68?1900:2e3),n+r[0].length):-1}function jo(t,e,n){var r=/^(Z)|([+-]\d\d)(?::?(\d\d))?/.exec(e.slice(n,n+6));return r?(t.Z=r[1]?0:-(r[2]+(r[3]||"00")),n+r[0].length):-1}function Yo(t,e,n){var r=Eo.exec(e.slice(n,n+1));return r?(t.q=3*r[0]-3,n+r[0].length):-1}function zo(t,e,n){var r=Eo.exec(e.slice(n,n+2));return r?(t.m=r[0]-1,n+r[0].length):-1}function Uo(t,e,n){var r=Eo.exec(e.slice(n,n+2));return r?(t.d=+r[0],n+r[0].length):-1}function qo(t,e,n){var r=Eo.exec(e.slice(n,n+3));return r?(t.m=0,t.d=+r[0],n+r[0].length):-1}function Ho(t,e,n){var r=Eo.exec(e.slice(n,n+2));return r?(t.H=+r[0],n+r[0].length):-1}function $o(t,e,n){var r=Eo.exec(e.slice(n,n+2));return r?(t.M=+r[0],n+r[0].length):-1}function Wo(t,e,n){var r=Eo.exec(e.slice(n,n+2));return r?(t.S=+r[0],n+r[0].length):-1}function Vo(t,e,n){var r=Eo.exec(e.slice(n,n+3));return r?(t.L=+r[0],n+r[0].length):-1}function Go(t,e,n){var r=Eo.exec(e.slice(n,n+6));return r?(t.L=Math.floor(r[0]/1e3),n+r[0].length):-1}function Xo(t,e,n){var r=Co.exec(e.slice(n,n+1));return r?n+r[0].length:-1}function Zo(t,e,n){var r=Eo.exec(e.slice(n));return r?(t.Q=+r[0],n+r[0].length):-1}function Qo(t,e,n){var r=Eo.exec(e.slice(n));return r?(t.s=+r[0],n+r[0].length):-1}function Ko(t,e){return Ao(t.getDate(),e,2)}function Jo(t,e){return Ao(t.getHours(),e,2)}function ts(t,e){return Ao(t.getHours()%12||12,e,2)}function es(t,e){return Ao(1+Ra.count(Ga(t),t),e,3)}function ns(t,e){return Ao(t.getMilliseconds(),e,3)}function rs(t,e){return ns(t,e)+"000"}function is(t,e){return Ao(t.getMonth()+1,e,2)}function as(t,e){return Ao(t.getMinutes(),e,2)}function os(t,e){return Ao(t.getSeconds(),e,2)}function ss(t){var e=t.getDay();return 0===e?7:e}function cs(t,e){return Ao(Pa.count(Ga(t)-1,t),e,2)}function us(t){var e=t.getDay();return e>=4||0===e?Ua(t):Ua.ceil(t)}function ls(t,e){return t=us(t),Ao(Ua.count(Ga(t),t)+(4===Ga(t).getDay()),e,2)}function hs(t){return t.getDay()}function fs(t,e){return Ao(ja.count(Ga(t)-1,t),e,2)}function ds(t,e){return Ao(t.getFullYear()%100,e,2)}function ps(t,e){return Ao((t=us(t)).getFullYear()%100,e,2)}function ys(t,e){return Ao(t.getFullYear()%1e4,e,4)}function gs(t,e){var n=t.getDay();return Ao((t=n>=4||0===n?Ua(t):Ua.ceil(t)).getFullYear()%1e4,e,4)}function ms(t){var e=t.getTimezoneOffset();return(e>0?"-":(e*=-1,"+"))+Ao(e/60|0,"0",2)+Ao(e%60,"0",2)}function vs(t,e){return Ao(t.getUTCDate(),e,2)}function bs(t,e){return Ao(t.getUTCHours(),e,2)}function _s(t,e){return Ao(t.getUTCHours()%12||12,e,2)}function xs(t,e){return Ao(1+to.count(fo(t),t),e,3)}function ws(t,e){return Ao(t.getUTCMilliseconds(),e,3)}function ks(t,e){return ws(t,e)+"000"}function Ts(t,e){return Ao(t.getUTCMonth()+1,e,2)}function Es(t,e){return Ao(t.getUTCMinutes(),e,2)}function Cs(t,e){return Ao(t.getUTCSeconds(),e,2)}function Ss(t){var e=t.getUTCDay();return 0===e?7:e}function As(t,e){return Ao(no.count(fo(t)-1,t),e,2)}function Ms(t){var e=t.getUTCDay();return e>=4||0===e?oo(t):oo.ceil(t)}function Ns(t,e){return t=Ms(t),Ao(oo.count(fo(t),t)+(4===fo(t).getUTCDay()),e,2)}function Ds(t){return t.getUTCDay()}function Os(t,e){return Ao(ro.count(fo(t)-1,t),e,2)}function Bs(t,e){return Ao(t.getUTCFullYear()%100,e,2)}function Ls(t,e){return Ao((t=Ms(t)).getUTCFullYear()%100,e,2)}function Is(t,e){return Ao(t.getUTCFullYear()%1e4,e,4)}function Rs(t,e){var n=t.getUTCDay();return Ao((t=n>=4||0===n?oo(t):oo.ceil(t)).getUTCFullYear()%1e4,e,4)}function Fs(){return"+0000"}function Ps(){return"%"}function js(t){return+t}function Ys(t){return Math.floor(+t/1e3)}function zs(t){return new Date(t)}function Us(t){return t instanceof Date?+t:+new Date(+t)}function qs(t,e,n,r,i,a,o,s,c,u){var l=Xi(),h=l.invert,f=l.domain,d=u(".%L"),p=u(":%S"),y=u("%I:%M"),g=u("%I %p"),m=u("%a %d"),v=u("%b %d"),b=u("%B"),_=u("%Y");function x(t){return(c(t)=12)]},q:function(t){return 1+~~(t.getMonth()/3)},Q:js,s:Ys,S:os,u:ss,U:cs,V:ls,w:hs,W:fs,x:null,X:null,y:ds,Y:ys,Z:ms,"%":Ps},_={a:function(t){return o[t.getUTCDay()]},A:function(t){return a[t.getUTCDay()]},b:function(t){return c[t.getUTCMonth()]},B:function(t){return s[t.getUTCMonth()]},c:null,d:vs,e:vs,f:ks,g:Ls,G:Rs,H:bs,I:_s,j:xs,L:ws,m:Ts,M:Es,p:function(t){return i[+(t.getUTCHours()>=12)]},q:function(t){return 1+~~(t.getUTCMonth()/3)},Q:js,s:Ys,S:Cs,u:Ss,U:As,V:Ns,w:Ds,W:Os,x:null,X:null,y:Bs,Y:Is,Z:Fs,"%":Ps},x={a:function(t,e,n){var r=d.exec(e.slice(n));return r?(t.w=p.get(r[0].toLowerCase()),n+r[0].length):-1},A:function(t,e,n){var r=h.exec(e.slice(n));return r?(t.w=f.get(r[0].toLowerCase()),n+r[0].length):-1},b:function(t,e,n){var r=m.exec(e.slice(n));return r?(t.m=v.get(r[0].toLowerCase()),n+r[0].length):-1},B:function(t,e,n){var r=y.exec(e.slice(n));return r?(t.m=g.get(r[0].toLowerCase()),n+r[0].length):-1},c:function(t,n,r){return T(t,e,n,r)},d:Uo,e:Uo,f:Go,g:Po,G:Fo,H:Ho,I:Ho,j:qo,L:Vo,m:zo,M:$o,p:function(t,e,n){var r=u.exec(e.slice(n));return r?(t.p=l.get(r[0].toLowerCase()),n+r[0].length):-1},q:Yo,Q:Zo,s:Qo,S:Wo,u:Bo,U:Lo,V:Io,w:Oo,W:Ro,x:function(t,e,r){return T(t,n,e,r)},X:function(t,e,n){return T(t,r,e,n)},y:Po,Y:Fo,Z:jo,"%":Xo};function w(t,e){return function(n){var r,i,a,o=[],s=-1,c=0,u=t.length;for(n instanceof Date||(n=new Date(+n));++s53)return null;"w"in a||(a.w=1),"Z"in a?(i=(r=_o(xo(a.y,0,1))).getUTCDay(),r=i>4||0===i?ro.ceil(r):ro(r),r=to.offset(r,7*(a.V-1)),a.y=r.getUTCFullYear(),a.m=r.getUTCMonth(),a.d=r.getUTCDate()+(a.w+6)%7):(i=(r=bo(xo(a.y,0,1))).getDay(),r=i>4||0===i?ja.ceil(r):ja(r),r=Ra.offset(r,7*(a.V-1)),a.y=r.getFullYear(),a.m=r.getMonth(),a.d=r.getDate()+(a.w+6)%7)}else("W"in a||"U"in a)&&("w"in a||(a.w="u"in a?a.u%7:"W"in a?1:0),i="Z"in a?_o(xo(a.y,0,1)).getUTCDay():bo(xo(a.y,0,1)).getDay(),a.m=0,a.d="W"in a?(a.w+6)%7+7*a.W-(i+5)%7:a.w+7*a.U-(i+6)%7);return"Z"in a?(a.H+=a.Z/100|0,a.M+=a.Z%100,_o(a)):bo(a)}}function T(t,e,n,r){for(var i,a,o=0,s=e.length,c=n.length;o=c)return-1;if(37===(i=e.charCodeAt(o++))){if(i=e.charAt(o++),!(a=x[i in To?e.charAt(o++):i])||(r=a(t,n,r))<0)return-1}else if(i!=n.charCodeAt(r++))return-1}return r}return b.x=w(n,b),b.X=w(r,b),b.c=w(e,b),_.x=w(n,_),_.X=w(r,_),_.c=w(e,_),{format:function(t){var e=w(t+="",b);return e.toString=function(){return t},e},parse:function(t){var e=k(t+="",!1);return e.toString=function(){return t},e},utcFormat:function(t){var e=w(t+="",_);return e.toString=function(){return t},e},utcParse:function(t){var e=k(t+="",!0);return e.toString=function(){return t},e}}}({dateTime:"%x, %X",date:"%-m/%-d/%Y",time:"%-I:%M:%S %p",periods:["AM","PM"],days:["Sunday","Monday","Tuesday","Wednesday","Thursday","Friday","Saturday"],shortDays:["Sun","Mon","Tue","Wed","Thu","Fri","Sat"],months:["January","February","March","April","May","June","July","August","September","October","November","December"],shortMonths:["Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec"]}),ko=wo.format,wo.parse,wo.utcFormat,wo.utcParse;var Qs=Array.prototype.find;function Ks(){return this.firstElementChild}var Js=Array.prototype.filter;function tc(){return Array.from(this.children)}function ec(t){return new Array(t.length)}function nc(t,e){this.ownerDocument=t.ownerDocument,this.namespaceURI=t.namespaceURI,this._next=null,this._parent=t,this.__data__=e}function rc(t){return function(){return t}}function ic(t,e,n,r,i,a){for(var o,s=0,c=e.length,u=a.length;se?1:t>=e?0:NaN}nc.prototype={constructor:nc,appendChild:function(t){return this._parent.insertBefore(t,this._next)},insertBefore:function(t,e){return this._parent.insertBefore(t,e)},querySelector:function(t){return this._parent.querySelector(t)},querySelectorAll:function(t){return this._parent.querySelectorAll(t)}};var uc="http://www.w3.org/1999/xhtml";const lc={svg:"http://www.w3.org/2000/svg",xhtml:uc,xlink:"http://www.w3.org/1999/xlink",xml:"http://www.w3.org/XML/1998/namespace",xmlns:"http://www.w3.org/2000/xmlns/"};function hc(t){var e=t+="",n=e.indexOf(":");return n>=0&&"xmlns"!==(e=t.slice(0,n))&&(t=t.slice(n+1)),lc.hasOwnProperty(e)?{space:lc[e],local:t}:t}function fc(t){return function(){this.removeAttribute(t)}}function dc(t){return function(){this.removeAttributeNS(t.space,t.local)}}function pc(t,e){return function(){this.setAttribute(t,e)}}function yc(t,e){return function(){this.setAttributeNS(t.space,t.local,e)}}function gc(t,e){return function(){var n=e.apply(this,arguments);null==n?this.removeAttribute(t):this.setAttribute(t,n)}}function mc(t,e){return function(){var n=e.apply(this,arguments);null==n?this.removeAttributeNS(t.space,t.local):this.setAttributeNS(t.space,t.local,n)}}function vc(t){return t.ownerDocument&&t.ownerDocument.defaultView||t.document&&t||t.defaultView}function bc(t){return function(){this.style.removeProperty(t)}}function _c(t,e,n){return function(){this.style.setProperty(t,e,n)}}function xc(t,e,n){return function(){var r=e.apply(this,arguments);null==r?this.style.removeProperty(t):this.style.setProperty(t,r,n)}}function wc(t,e){return t.style.getPropertyValue(e)||vc(t).getComputedStyle(t,null).getPropertyValue(e)}function kc(t){return function(){delete this[t]}}function Tc(t,e){return function(){this[t]=e}}function Ec(t,e){return function(){var n=e.apply(this,arguments);null==n?delete this[t]:this[t]=n}}function Cc(t){return t.trim().split(/^|\s+/)}function Sc(t){return t.classList||new Ac(t)}function Ac(t){this._node=t,this._names=Cc(t.getAttribute("class")||"")}function Mc(t,e){for(var n=Sc(t),r=-1,i=e.length;++r=0&&(e=t.slice(n+1),t=t.slice(0,n)),{type:t,name:e}}))}function Zc(t){return function(){var e=this.__on;if(e){for(var n,r=0,i=-1,a=e.length;r=0&&(this._names.splice(e,1),this._node.setAttribute("class",this._names.join(" ")))},contains:function(t){return this._names.indexOf(t)>=0}};var eu=[null];function nu(t,e){this._groups=t,this._parents=e}function ru(){return new nu([[document.documentElement]],eu)}nu.prototype=ru.prototype={constructor:nu,select:function(t){"function"!=typeof t&&(t=$s(t));for(var e=this._groups,n=e.length,r=new Array(n),i=0;i=x&&(x=_+1);!(b=g[x])&&++x=0;)(r=i[a])&&(o&&4^r.compareDocumentPosition(o)&&o.parentNode.insertBefore(r,o),o=r);return this},sort:function(t){function e(e,n){return e&&n?t(e.__data__,n.__data__):!e-!n}t||(t=cc);for(var n=this._groups,r=n.length,i=new Array(r),a=0;a1?this.each((null==e?bc:"function"==typeof e?xc:_c)(t,e,null==n?"":n)):wc(this.node(),t)},property:function(t,e){return arguments.length>1?this.each((null==e?kc:"function"==typeof e?Ec:Tc)(t,e)):this.node()[t]},classed:function(t,e){var n=Cc(t+"");if(arguments.length<2){for(var r=Sc(this.node()),i=-1,a=n.length;++iuu)if(Math.abs(l*s-c*u)>uu&&i){var f=n-a,d=r-o,p=s*s+c*c,y=f*f+d*d,g=Math.sqrt(p),m=Math.sqrt(h),v=i*Math.tan((su-Math.acos((p+h-y)/(2*g*m)))/2),b=v/m,_=v/g;Math.abs(b-1)>uu&&(this._+="L"+(t+b*u)+","+(e+b*l)),this._+="A"+i+","+i+",0,0,"+ +(l*f>u*d)+","+(this._x1=t+_*s)+","+(this._y1=e+_*c)}else this._+="L"+(this._x1=t)+","+(this._y1=e)},arc:function(t,e,n,r,i,a){t=+t,e=+e,a=!!a;var o=(n=+n)*Math.cos(r),s=n*Math.sin(r),c=t+o,u=e+s,l=1^a,h=a?r-i:i-r;if(n<0)throw new Error("negative radius: "+n);null===this._x1?this._+="M"+c+","+u:(Math.abs(this._x1-c)>uu||Math.abs(this._y1-u)>uu)&&(this._+="L"+c+","+u),n&&(h<0&&(h=h%cu+cu),h>lu?this._+="A"+n+","+n+",0,1,"+l+","+(t-o)+","+(e-s)+"A"+n+","+n+",0,1,"+l+","+(this._x1=c)+","+(this._y1=u):h>uu&&(this._+="A"+n+","+n+",0,"+ +(h>=su)+","+l+","+(this._x1=t+n*Math.cos(i))+","+(this._y1=e+n*Math.sin(i))))},rect:function(t,e,n,r){this._+="M"+(this._x0=this._x1=+t)+","+(this._y0=this._y1=+e)+"h"+ +n+"v"+ +r+"h"+-n+"Z"},toString:function(){return this._}};const du=fu;function pu(t){return function(){return t}}var yu=Math.abs,gu=Math.atan2,mu=Math.cos,vu=Math.max,bu=Math.min,_u=Math.sin,xu=Math.sqrt,wu=1e-12,ku=Math.PI,Tu=ku/2,Eu=2*ku;function Cu(t){return t>1?0:t<-1?ku:Math.acos(t)}function Su(t){return t>=1?Tu:t<=-1?-Tu:Math.asin(t)}function Au(t){return t.innerRadius}function Mu(t){return t.outerRadius}function Nu(t){return t.startAngle}function Du(t){return t.endAngle}function Ou(t){return t&&t.padAngle}function Bu(t,e,n,r,i,a,o,s){var c=n-t,u=r-e,l=o-i,h=s-a,f=h*c-l*u;if(!(f*fN*N+D*D&&(T=C,E=S),{cx:T,cy:E,x01:-l,y01:-h,x11:T*(i/x-1),y11:E*(i/x-1)}}function Iu(){var t=Au,e=Mu,n=pu(0),r=null,i=Nu,a=Du,o=Ou,s=null;function c(){var c,u,l=+t.apply(this,arguments),h=+e.apply(this,arguments),f=i.apply(this,arguments)-Tu,d=a.apply(this,arguments)-Tu,p=yu(d-f),y=d>f;if(s||(s=c=du()),hwu)if(p>Eu-wu)s.moveTo(h*mu(f),h*_u(f)),s.arc(0,0,h,f,d,!y),l>wu&&(s.moveTo(l*mu(d),l*_u(d)),s.arc(0,0,l,d,f,y));else{var g,m,v=f,b=d,_=f,x=d,w=p,k=p,T=o.apply(this,arguments)/2,E=T>wu&&(r?+r.apply(this,arguments):xu(l*l+h*h)),C=bu(yu(h-l)/2,+n.apply(this,arguments)),S=C,A=C;if(E>wu){var M=Su(E/l*_u(T)),N=Su(E/h*_u(T));(w-=2*M)>wu?(_+=M*=y?1:-1,x-=M):(w=0,_=x=(f+d)/2),(k-=2*N)>wu?(v+=N*=y?1:-1,b-=N):(k=0,v=b=(f+d)/2)}var D=h*mu(v),O=h*_u(v),B=l*mu(x),L=l*_u(x);if(C>wu){var I,R=h*mu(b),F=h*_u(b),P=l*mu(_),j=l*_u(_);if(pwu?A>wu?(g=Lu(P,j,D,O,h,A,y),m=Lu(R,F,B,L,h,A,y),s.moveTo(g.cx+g.x01,g.cy+g.y01),Awu&&w>wu?S>wu?(g=Lu(B,L,R,F,l,-S,y),m=Lu(D,O,P,j,l,-S,y),s.lineTo(g.cx+g.x01,g.cy+g.y01),St?1:e>=t?0:NaN}function qu(t){return t}function Hu(){}function $u(t,e,n){t._context.bezierCurveTo((2*t._x0+t._x1)/3,(2*t._y0+t._y1)/3,(t._x0+2*t._x1)/3,(t._y0+2*t._y1)/3,(t._x0+4*t._x1+e)/6,(t._y0+4*t._y1+n)/6)}function Wu(t){this._context=t}function Vu(t){return new Wu(t)}function Gu(t){this._context=t}function Xu(t){this._context=t}function Zu(t){this._context=t}function Qu(t){return t<0?-1:1}function Ku(t,e,n){var r=t._x1-t._x0,i=e-t._x1,a=(t._y1-t._y0)/(r||i<0&&-0),o=(n-t._y1)/(i||r<0&&-0),s=(a*i+o*r)/(r+i);return(Qu(a)+Qu(o))*Math.min(Math.abs(a),Math.abs(o),.5*Math.abs(s))||0}function Ju(t,e){var n=t._x1-t._x0;return n?(3*(t._y1-t._y0)/n-e)/2:e}function tl(t,e,n){var r=t._x0,i=t._y0,a=t._x1,o=t._y1,s=(a-r)/3;t._context.bezierCurveTo(r+s,i+s*e,a-s,o-s*n,a,o)}function el(t){this._context=t}function nl(t){this._context=new rl(t)}function rl(t){this._context=t}function il(t){this._context=t}function al(t){var e,n,r=t.length-1,i=new Array(r),a=new Array(r),o=new Array(r);for(i[0]=0,a[0]=2,o[0]=t[0]+2*t[1],e=1;e=0;--e)i[e]=(o[e]-i[e+1])/a[e];for(a[r-1]=(t[r]+i[r-1])/2,e=0;e=0&&(this._t=1-this._t,this._line=1-this._line)},point:function(t,e){switch(t=+t,e=+e,this._point){case 0:this._point=1,this._line?this._context.lineTo(t,e):this._context.moveTo(t,e);break;case 1:this._point=2;default:if(this._t<=0)this._context.lineTo(this._x,e),this._context.lineTo(t,e);else{var n=this._x*(1-this._t)+t*this._t;this._context.lineTo(n,this._y),this._context.lineTo(n,e)}}this._x=t,this._y=e}};var sl=new Date,cl=new Date;function ul(t,e,n,r){function i(e){return t(e=0===arguments.length?new Date:new Date(+e)),e}return i.floor=function(e){return t(e=new Date(+e)),e},i.ceil=function(n){return t(n=new Date(n-1)),e(n,1),t(n),n},i.round=function(t){var e=i(t),n=i.ceil(t);return t-e0))return s;do{s.push(o=new Date(+n)),e(n,a),t(n)}while(o=e)for(;t(e),!n(e);)e.setTime(e-1)}),(function(t,r){if(t>=t)if(r<0)for(;++r<=0;)for(;e(t,-1),!n(t););else for(;--r>=0;)for(;e(t,1),!n(t););}))},n&&(i.count=function(e,r){return sl.setTime(+e),cl.setTime(+r),t(sl),t(cl),Math.floor(n(sl,cl))},i.every=function(t){return t=Math.floor(t),isFinite(t)&&t>0?t>1?i.filter(r?function(e){return r(e)%t==0}:function(e){return i.count(0,e)%t==0}):i:null}),i}const ll=864e5,hl=6048e5;function fl(t){return ul((function(e){e.setUTCDate(e.getUTCDate()-(e.getUTCDay()+7-t)%7),e.setUTCHours(0,0,0,0)}),(function(t,e){t.setUTCDate(t.getUTCDate()+7*e)}),(function(t,e){return(e-t)/hl}))}var dl=fl(0),pl=fl(1),yl=fl(2),gl=fl(3),ml=fl(4),vl=fl(5),bl=fl(6),_l=(dl.range,pl.range,yl.range,gl.range,ml.range,vl.range,bl.range,ul((function(t){t.setUTCHours(0,0,0,0)}),(function(t,e){t.setUTCDate(t.getUTCDate()+e)}),(function(t,e){return(e-t)/ll}),(function(t){return t.getUTCDate()-1})));const xl=_l;function wl(t){return ul((function(e){e.setDate(e.getDate()-(e.getDay()+7-t)%7),e.setHours(0,0,0,0)}),(function(t,e){t.setDate(t.getDate()+7*e)}),(function(t,e){return(e-t-6e4*(e.getTimezoneOffset()-t.getTimezoneOffset()))/hl}))}_l.range;var kl=wl(0),Tl=wl(1),El=wl(2),Cl=wl(3),Sl=wl(4),Al=wl(5),Ml=wl(6),Nl=(kl.range,Tl.range,El.range,Cl.range,Sl.range,Al.range,Ml.range,ul((t=>t.setHours(0,0,0,0)),((t,e)=>t.setDate(t.getDate()+e)),((t,e)=>(e-t-6e4*(e.getTimezoneOffset()-t.getTimezoneOffset()))/ll),(t=>t.getDate()-1)));const Dl=Nl;Nl.range;var Ol=ul((function(t){t.setMonth(0,1),t.setHours(0,0,0,0)}),(function(t,e){t.setFullYear(t.getFullYear()+e)}),(function(t,e){return e.getFullYear()-t.getFullYear()}),(function(t){return t.getFullYear()}));Ol.every=function(t){return isFinite(t=Math.floor(t))&&t>0?ul((function(e){e.setFullYear(Math.floor(e.getFullYear()/t)*t),e.setMonth(0,1),e.setHours(0,0,0,0)}),(function(e,n){e.setFullYear(e.getFullYear()+n*t)})):null};const Bl=Ol;Ol.range;var Ll=ul((function(t){t.setUTCMonth(0,1),t.setUTCHours(0,0,0,0)}),(function(t,e){t.setUTCFullYear(t.getUTCFullYear()+e)}),(function(t,e){return e.getUTCFullYear()-t.getUTCFullYear()}),(function(t){return t.getUTCFullYear()}));Ll.every=function(t){return isFinite(t=Math.floor(t))&&t>0?ul((function(e){e.setUTCFullYear(Math.floor(e.getUTCFullYear()/t)*t),e.setUTCMonth(0,1),e.setUTCHours(0,0,0,0)}),(function(e,n){e.setUTCFullYear(e.getUTCFullYear()+n*t)})):null};const Il=Ll;function Rl(t){if(0<=t.y&&t.y<100){var e=new Date(-1,t.m,t.d,t.H,t.M,t.S,t.L);return e.setFullYear(t.y),e}return new Date(t.y,t.m,t.d,t.H,t.M,t.S,t.L)}function Fl(t){if(0<=t.y&&t.y<100){var e=new Date(Date.UTC(-1,t.m,t.d,t.H,t.M,t.S,t.L));return e.setUTCFullYear(t.y),e}return new Date(Date.UTC(t.y,t.m,t.d,t.H,t.M,t.S,t.L))}function Pl(t,e,n){return{y:t,m:e,d:n,H:0,M:0,S:0,L:0}}Ll.range;var jl,Yl,zl={"-":"",_:" ",0:"0"},Ul=/^\s*\d+/,ql=/^%/,Hl=/[\\^$*+?|[\]().{}]/g;function $l(t,e,n){var r=t<0?"-":"",i=(r?-t:t)+"",a=i.length;return r+(a[t.toLowerCase(),e])))}function Xl(t,e,n){var r=Ul.exec(e.slice(n,n+1));return r?(t.w=+r[0],n+r[0].length):-1}function Zl(t,e,n){var r=Ul.exec(e.slice(n,n+1));return r?(t.u=+r[0],n+r[0].length):-1}function Ql(t,e,n){var r=Ul.exec(e.slice(n,n+2));return r?(t.U=+r[0],n+r[0].length):-1}function Kl(t,e,n){var r=Ul.exec(e.slice(n,n+2));return r?(t.V=+r[0],n+r[0].length):-1}function Jl(t,e,n){var r=Ul.exec(e.slice(n,n+2));return r?(t.W=+r[0],n+r[0].length):-1}function th(t,e,n){var r=Ul.exec(e.slice(n,n+4));return r?(t.y=+r[0],n+r[0].length):-1}function eh(t,e,n){var r=Ul.exec(e.slice(n,n+2));return r?(t.y=+r[0]+(+r[0]>68?1900:2e3),n+r[0].length):-1}function nh(t,e,n){var r=/^(Z)|([+-]\d\d)(?::?(\d\d))?/.exec(e.slice(n,n+6));return r?(t.Z=r[1]?0:-(r[2]+(r[3]||"00")),n+r[0].length):-1}function rh(t,e,n){var r=Ul.exec(e.slice(n,n+1));return r?(t.q=3*r[0]-3,n+r[0].length):-1}function ih(t,e,n){var r=Ul.exec(e.slice(n,n+2));return r?(t.m=r[0]-1,n+r[0].length):-1}function ah(t,e,n){var r=Ul.exec(e.slice(n,n+2));return r?(t.d=+r[0],n+r[0].length):-1}function oh(t,e,n){var r=Ul.exec(e.slice(n,n+3));return r?(t.m=0,t.d=+r[0],n+r[0].length):-1}function sh(t,e,n){var r=Ul.exec(e.slice(n,n+2));return r?(t.H=+r[0],n+r[0].length):-1}function ch(t,e,n){var r=Ul.exec(e.slice(n,n+2));return r?(t.M=+r[0],n+r[0].length):-1}function uh(t,e,n){var r=Ul.exec(e.slice(n,n+2));return r?(t.S=+r[0],n+r[0].length):-1}function lh(t,e,n){var r=Ul.exec(e.slice(n,n+3));return r?(t.L=+r[0],n+r[0].length):-1}function hh(t,e,n){var r=Ul.exec(e.slice(n,n+6));return r?(t.L=Math.floor(r[0]/1e3),n+r[0].length):-1}function fh(t,e,n){var r=ql.exec(e.slice(n,n+1));return r?n+r[0].length:-1}function dh(t,e,n){var r=Ul.exec(e.slice(n));return r?(t.Q=+r[0],n+r[0].length):-1}function ph(t,e,n){var r=Ul.exec(e.slice(n));return r?(t.s=+r[0],n+r[0].length):-1}function yh(t,e){return $l(t.getDate(),e,2)}function gh(t,e){return $l(t.getHours(),e,2)}function mh(t,e){return $l(t.getHours()%12||12,e,2)}function vh(t,e){return $l(1+Dl.count(Bl(t),t),e,3)}function bh(t,e){return $l(t.getMilliseconds(),e,3)}function _h(t,e){return bh(t,e)+"000"}function xh(t,e){return $l(t.getMonth()+1,e,2)}function wh(t,e){return $l(t.getMinutes(),e,2)}function kh(t,e){return $l(t.getSeconds(),e,2)}function Th(t){var e=t.getDay();return 0===e?7:e}function Eh(t,e){return $l(kl.count(Bl(t)-1,t),e,2)}function Ch(t){var e=t.getDay();return e>=4||0===e?Sl(t):Sl.ceil(t)}function Sh(t,e){return t=Ch(t),$l(Sl.count(Bl(t),t)+(4===Bl(t).getDay()),e,2)}function Ah(t){return t.getDay()}function Mh(t,e){return $l(Tl.count(Bl(t)-1,t),e,2)}function Nh(t,e){return $l(t.getFullYear()%100,e,2)}function Dh(t,e){return $l((t=Ch(t)).getFullYear()%100,e,2)}function Oh(t,e){return $l(t.getFullYear()%1e4,e,4)}function Bh(t,e){var n=t.getDay();return $l((t=n>=4||0===n?Sl(t):Sl.ceil(t)).getFullYear()%1e4,e,4)}function Lh(t){var e=t.getTimezoneOffset();return(e>0?"-":(e*=-1,"+"))+$l(e/60|0,"0",2)+$l(e%60,"0",2)}function Ih(t,e){return $l(t.getUTCDate(),e,2)}function Rh(t,e){return $l(t.getUTCHours(),e,2)}function Fh(t,e){return $l(t.getUTCHours()%12||12,e,2)}function Ph(t,e){return $l(1+xl.count(Il(t),t),e,3)}function jh(t,e){return $l(t.getUTCMilliseconds(),e,3)}function Yh(t,e){return jh(t,e)+"000"}function zh(t,e){return $l(t.getUTCMonth()+1,e,2)}function Uh(t,e){return $l(t.getUTCMinutes(),e,2)}function qh(t,e){return $l(t.getUTCSeconds(),e,2)}function Hh(t){var e=t.getUTCDay();return 0===e?7:e}function $h(t,e){return $l(dl.count(Il(t)-1,t),e,2)}function Wh(t){var e=t.getUTCDay();return e>=4||0===e?ml(t):ml.ceil(t)}function Vh(t,e){return t=Wh(t),$l(ml.count(Il(t),t)+(4===Il(t).getUTCDay()),e,2)}function Gh(t){return t.getUTCDay()}function Xh(t,e){return $l(pl.count(Il(t)-1,t),e,2)}function Zh(t,e){return $l(t.getUTCFullYear()%100,e,2)}function Qh(t,e){return $l((t=Wh(t)).getUTCFullYear()%100,e,2)}function Kh(t,e){return $l(t.getUTCFullYear()%1e4,e,4)}function Jh(t,e){var n=t.getUTCDay();return $l((t=n>=4||0===n?ml(t):ml.ceil(t)).getUTCFullYear()%1e4,e,4)}function tf(){return"+0000"}function ef(){return"%"}function nf(t){return+t}function rf(t){return Math.floor(+t/1e3)}jl=function(t){var e=t.dateTime,n=t.date,r=t.time,i=t.periods,a=t.days,o=t.shortDays,s=t.months,c=t.shortMonths,u=Vl(i),l=Gl(i),h=Vl(a),f=Gl(a),d=Vl(o),p=Gl(o),y=Vl(s),g=Gl(s),m=Vl(c),v=Gl(c),b={a:function(t){return o[t.getDay()]},A:function(t){return a[t.getDay()]},b:function(t){return c[t.getMonth()]},B:function(t){return s[t.getMonth()]},c:null,d:yh,e:yh,f:_h,g:Dh,G:Bh,H:gh,I:mh,j:vh,L:bh,m:xh,M:wh,p:function(t){return i[+(t.getHours()>=12)]},q:function(t){return 1+~~(t.getMonth()/3)},Q:nf,s:rf,S:kh,u:Th,U:Eh,V:Sh,w:Ah,W:Mh,x:null,X:null,y:Nh,Y:Oh,Z:Lh,"%":ef},_={a:function(t){return o[t.getUTCDay()]},A:function(t){return a[t.getUTCDay()]},b:function(t){return c[t.getUTCMonth()]},B:function(t){return s[t.getUTCMonth()]},c:null,d:Ih,e:Ih,f:Yh,g:Qh,G:Jh,H:Rh,I:Fh,j:Ph,L:jh,m:zh,M:Uh,p:function(t){return i[+(t.getUTCHours()>=12)]},q:function(t){return 1+~~(t.getUTCMonth()/3)},Q:nf,s:rf,S:qh,u:Hh,U:$h,V:Vh,w:Gh,W:Xh,x:null,X:null,y:Zh,Y:Kh,Z:tf,"%":ef},x={a:function(t,e,n){var r=d.exec(e.slice(n));return r?(t.w=p.get(r[0].toLowerCase()),n+r[0].length):-1},A:function(t,e,n){var r=h.exec(e.slice(n));return r?(t.w=f.get(r[0].toLowerCase()),n+r[0].length):-1},b:function(t,e,n){var r=m.exec(e.slice(n));return r?(t.m=v.get(r[0].toLowerCase()),n+r[0].length):-1},B:function(t,e,n){var r=y.exec(e.slice(n));return r?(t.m=g.get(r[0].toLowerCase()),n+r[0].length):-1},c:function(t,n,r){return T(t,e,n,r)},d:ah,e:ah,f:hh,g:eh,G:th,H:sh,I:sh,j:oh,L:lh,m:ih,M:ch,p:function(t,e,n){var r=u.exec(e.slice(n));return r?(t.p=l.get(r[0].toLowerCase()),n+r[0].length):-1},q:rh,Q:dh,s:ph,S:uh,u:Zl,U:Ql,V:Kl,w:Xl,W:Jl,x:function(t,e,r){return T(t,n,e,r)},X:function(t,e,n){return T(t,r,e,n)},y:eh,Y:th,Z:nh,"%":fh};function w(t,e){return function(n){var r,i,a,o=[],s=-1,c=0,u=t.length;for(n instanceof Date||(n=new Date(+n));++s53)return null;"w"in a||(a.w=1),"Z"in a?(i=(r=Fl(Pl(a.y,0,1))).getUTCDay(),r=i>4||0===i?pl.ceil(r):pl(r),r=xl.offset(r,7*(a.V-1)),a.y=r.getUTCFullYear(),a.m=r.getUTCMonth(),a.d=r.getUTCDate()+(a.w+6)%7):(i=(r=Rl(Pl(a.y,0,1))).getDay(),r=i>4||0===i?Tl.ceil(r):Tl(r),r=Dl.offset(r,7*(a.V-1)),a.y=r.getFullYear(),a.m=r.getMonth(),a.d=r.getDate()+(a.w+6)%7)}else("W"in a||"U"in a)&&("w"in a||(a.w="u"in a?a.u%7:"W"in a?1:0),i="Z"in a?Fl(Pl(a.y,0,1)).getUTCDay():Rl(Pl(a.y,0,1)).getDay(),a.m=0,a.d="W"in a?(a.w+6)%7+7*a.W-(i+5)%7:a.w+7*a.U-(i+6)%7);return"Z"in a?(a.H+=a.Z/100|0,a.M+=a.Z%100,Fl(a)):Rl(a)}}function T(t,e,n,r){for(var i,a,o=0,s=e.length,c=n.length;o=c)return-1;if(37===(i=e.charCodeAt(o++))){if(i=e.charAt(o++),!(a=x[i in zl?e.charAt(o++):i])||(r=a(t,n,r))<0)return-1}else if(i!=n.charCodeAt(r++))return-1}return r}return b.x=w(n,b),b.X=w(r,b),b.c=w(e,b),_.x=w(n,_),_.X=w(r,_),_.c=w(e,_),{format:function(t){var e=w(t+="",b);return e.toString=function(){return t},e},parse:function(t){var e=k(t+="",!1);return e.toString=function(){return t},e},utcFormat:function(t){var e=w(t+="",_);return e.toString=function(){return t},e},utcParse:function(t){var e=k(t+="",!0);return e.toString=function(){return t},e}}}({dateTime:"%x, %X",date:"%-m/%-d/%Y",time:"%-I:%M:%S %p",periods:["AM","PM"],days:["Sunday","Monday","Tuesday","Wednesday","Thursday","Friday","Saturday"],shortDays:["Sun","Mon","Tue","Wed","Thu","Fri","Sat"],months:["January","February","March","April","May","June","July","August","September","October","November","December"],shortMonths:["Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec"]}),Yl=jl.format,jl.parse,jl.utcFormat,jl.utcParse;var af={value:()=>{}};function of(){for(var t,e=0,n=arguments.length,r={};e=0&&(n=t.slice(r+1),t=t.slice(0,r)),t&&!e.hasOwnProperty(t))throw new Error("unknown type: "+t);return{type:t,name:n}}))}function uf(t,e){for(var n,r=0,i=t.length;r0)for(var n,r,i=new Array(n),a=0;a=0&&e._call.call(void 0,t),e=e._next;--pf}()}finally{pf=0,function(){for(var t,e,n=ff,r=1/0;n;)n._call?(r>n._time&&(r=n._time),t=n,n=n._next):(e=n._next,n._next=null,n=t?t._next=e:ff=e);df=t,Af(r)}(),vf=0}}function Sf(){var t=_f.now(),e=t-mf;e>1e3&&(bf-=e,mf=t)}function Af(t){pf||(yf&&(yf=clearTimeout(yf)),t-vf>24?(t<1/0&&(yf=setTimeout(Cf,t-_f.now()-bf)),gf&&(gf=clearInterval(gf))):(gf||(mf=_f.now(),gf=setInterval(Sf,1e3)),pf=1,xf(Cf)))}function Mf(t,e,n){var r=new Tf;return e=null==e?0:+e,r.restart((n=>{r.stop(),t(n+e)}),e,n),r}Tf.prototype=Ef.prototype={constructor:Tf,restart:function(t,e,n){if("function"!=typeof t)throw new TypeError("callback is not a function");n=(null==n?wf():+n)+(null==e?0:+e),this._next||df===this||(df?df._next=this:ff=this,df=this),this._call=t,this._time=n,Af()},stop:function(){this._call&&(this._call=null,this._time=1/0,Af())}};var Nf=hf("start","end","cancel","interrupt"),Df=[];function Of(t,e,n,r,i,a){var o=t.__transition;if(o){if(n in o)return}else t.__transition={};!function(t,e,n){var r,i=t.__transition;function a(c){var u,l,h,f;if(1!==n.state)return s();for(u in i)if((f=i[u]).name===n.name){if(3===f.state)return Mf(a);4===f.state?(f.state=6,f.timer.stop(),f.on.call("interrupt",t,t.__data__,f.index,f.group),delete i[u]):+u0)throw new Error("too late; already scheduled");return n}function Lf(t,e){var n=If(t,e);if(n.state>3)throw new Error("too late; already running");return n}function If(t,e){var n=t.__transition;if(!n||!(n=n[e]))throw new Error("transition not found");return n}function Rf(t,e){return t=+t,e=+e,function(n){return t*(1-n)+e*n}}var Ff,Pf=180/Math.PI,jf={translateX:0,translateY:0,rotate:0,skewX:0,scaleX:1,scaleY:1};function Yf(t,e,n,r,i,a){var o,s,c;return(o=Math.sqrt(t*t+e*e))&&(t/=o,e/=o),(c=t*n+e*r)&&(n-=t*c,r-=e*c),(s=Math.sqrt(n*n+r*r))&&(n/=s,r/=s,c/=s),t*r180?e+=360:e-t>180&&(t+=360),a.push({i:n.push(i(n)+"rotate(",null,r)-2,x:Rf(t,e)})):e&&n.push(i(n)+"rotate("+e+r)}(a.rotate,o.rotate,s,c),function(t,e,n,a){t!==e?a.push({i:n.push(i(n)+"skewX(",null,r)-2,x:Rf(t,e)}):e&&n.push(i(n)+"skewX("+e+r)}(a.skewX,o.skewX,s,c),function(t,e,n,r,a,o){if(t!==n||e!==r){var s=a.push(i(a)+"scale(",null,",",null,")");o.push({i:s-4,x:Rf(t,n)},{i:s-2,x:Rf(e,r)})}else 1===n&&1===r||a.push(i(a)+"scale("+n+","+r+")")}(a.scaleX,a.scaleY,o.scaleX,o.scaleY,s,c),a=o=null,function(t){for(var e,n=-1,r=c.length;++n=1?(n=1,e-1):Math.floor(n*e),i=t[r],a=t[r+1],o=r>0?t[r-1]:2*i-a,s=ra&&(i=e.slice(a,i),s[o]?s[o]+=i:s[++o]=i),(n=n[0])===(r=r[0])?s[o]?s[o]+=r:s[++o]=r:(s[++o]=null,c.push({i:o,x:Rf(n,r)})),a=Qf.lastIndex;return a=0&&(t=t.slice(0,e)),!t||"start"===t}))}(e)?Bf:Lf;return function(){var o=a(this,t),s=o.on;s!==r&&(i=(r=s).copy()).on(e,n),o.on=i}}var gd=iu.prototype.constructor;function md(t){return function(){this.style.removeProperty(t)}}function vd(t,e,n){return function(r){this.style.setProperty(t,e.call(this,r),n)}}function bd(t,e,n){var r,i;function a(){var a=e.apply(this,arguments);return a!==i&&(r=(i=a)&&vd(t,a,n)),r}return a._value=e,a}function _d(t){return function(e){this.textContent=t.call(this,e)}}function xd(t){var e,n;function r(){var r=t.apply(this,arguments);return r!==n&&(e=(n=r)&&_d(r)),e}return r._value=t,r}var wd=0;function kd(t,e,n,r){this._groups=t,this._parents=e,this._name=n,this._id=r}function Td(){return++wd}var Ed=iu.prototype;kd.prototype=function(t){return iu().transition(t)}.prototype={constructor:kd,select:function(t){var e=this._name,n=this._id;"function"!=typeof t&&(t=$s(t));for(var r=this._groups,i=r.length,a=new Array(i),o=0;o2&&n.state<5,n.state=6,n.timer.stop(),n.on.call(r?"interrupt":"cancel",t,t.__data__,n.index,n.group),delete a[i]):o=!1;o&&delete t.__transition}}(this,t)}))},iu.prototype.transition=function(t){var e,n;t instanceof kd?(e=t._id,t=t._name):(e=Td(),(n=Cd).time=wf(),t=null==t?null:t+"");for(var r=this._groups,i=r.length,a=0;ae?1:t>=e?0:NaN}Yd.prototype={constructor:Yd,appendChild:function(t){return this._parent.insertBefore(t,this._next)},insertBefore:function(t,e){return this._parent.insertBefore(t,e)},querySelector:function(t){return this._parent.querySelector(t)},querySelectorAll:function(t){return this._parent.querySelectorAll(t)}};var Vd="http://www.w3.org/1999/xhtml";const Gd={svg:"http://www.w3.org/2000/svg",xhtml:Vd,xlink:"http://www.w3.org/1999/xlink",xml:"http://www.w3.org/XML/1998/namespace",xmlns:"http://www.w3.org/2000/xmlns/"};function Xd(t){var e=t+="",n=e.indexOf(":");return n>=0&&"xmlns"!==(e=t.slice(0,n))&&(t=t.slice(n+1)),Gd.hasOwnProperty(e)?{space:Gd[e],local:t}:t}function Zd(t){return function(){this.removeAttribute(t)}}function Qd(t){return function(){this.removeAttributeNS(t.space,t.local)}}function Kd(t,e){return function(){this.setAttribute(t,e)}}function Jd(t,e){return function(){this.setAttributeNS(t.space,t.local,e)}}function tp(t,e){return function(){var n=e.apply(this,arguments);null==n?this.removeAttribute(t):this.setAttribute(t,n)}}function ep(t,e){return function(){var n=e.apply(this,arguments);null==n?this.removeAttributeNS(t.space,t.local):this.setAttributeNS(t.space,t.local,n)}}function np(t){return t.ownerDocument&&t.ownerDocument.defaultView||t.document&&t||t.defaultView}function rp(t){return function(){this.style.removeProperty(t)}}function ip(t,e,n){return function(){this.style.setProperty(t,e,n)}}function ap(t,e,n){return function(){var r=e.apply(this,arguments);null==r?this.style.removeProperty(t):this.style.setProperty(t,r,n)}}function op(t,e){return t.style.getPropertyValue(e)||np(t).getComputedStyle(t,null).getPropertyValue(e)}function sp(t){return function(){delete this[t]}}function cp(t,e){return function(){this[t]=e}}function up(t,e){return function(){var n=e.apply(this,arguments);null==n?delete this[t]:this[t]=n}}function lp(t){return t.trim().split(/^|\s+/)}function hp(t){return t.classList||new fp(t)}function fp(t){this._node=t,this._names=lp(t.getAttribute("class")||"")}function dp(t,e){for(var n=hp(t),r=-1,i=e.length;++r=0&&(e=t.slice(n+1),t=t.slice(0,n)),{type:t,name:e}}))}function Lp(t){return function(){var e=this.__on;if(e){for(var n,r=0,i=-1,a=e.length;r=0&&(this._names.splice(e,1),this._node.setAttribute("class",this._names.join(" ")))},contains:function(t){return this._names.indexOf(t)>=0}};var jp=[null];function Yp(t,e){this._groups=t,this._parents=e}function zp(){return new Yp([[document.documentElement]],jp)}Yp.prototype=zp.prototype={constructor:Yp,select:function(t){"function"!=typeof t&&(t=Md(t));for(var e=this._groups,n=e.length,r=new Array(n),i=0;i=x&&(x=_+1);!(b=g[x])&&++x=0;)(r=i[a])&&(o&&4^r.compareDocumentPosition(o)&&o.parentNode.insertBefore(r,o),o=r);return this},sort:function(t){function e(e,n){return e&&n?t(e.__data__,n.__data__):!e-!n}t||(t=Wd);for(var n=this._groups,r=n.length,i=new Array(r),a=0;a1?this.each((null==e?rp:"function"==typeof e?ap:ip)(t,e,null==n?"":n)):op(this.node(),t)},property:function(t,e){return arguments.length>1?this.each((null==e?sp:"function"==typeof e?up:cp)(t,e)):this.node()[t]},classed:function(t,e){var n=lp(t+"");if(arguments.length<2){for(var r=hp(this.node()),i=-1,a=n.length;++i{}};function Hp(){for(var t,e=0,n=arguments.length,r={};e=0&&(n=t.slice(r+1),t=t.slice(0,r)),t&&!e.hasOwnProperty(t))throw new Error("unknown type: "+t);return{type:t,name:n}}))}function Vp(t,e){for(var n,r=0,i=t.length;r0)for(var n,r,i=new Array(n),a=0;a=0&&e._call.call(void 0,t),e=e._next;--Kp}()}finally{Kp=0,function(){for(var t,e,n=Zp,r=1/0;n;)n._call?(r>n._time&&(r=n._time),t=n,n=n._next):(e=n._next,n._next=null,n=t?t._next=e:Zp=e);Qp=t,fy(r)}(),ny=0}}function hy(){var t=iy.now(),e=t-ey;e>1e3&&(ry-=e,ey=t)}function fy(t){Kp||(Jp&&(Jp=clearTimeout(Jp)),t-ny>24?(t<1/0&&(Jp=setTimeout(ly,t-iy.now()-ry)),ty&&(ty=clearInterval(ty))):(ty||(ey=iy.now(),ty=setInterval(hy,1e3)),Kp=1,ay(ly)))}function dy(t,e,n){var r=new cy;return e=null==e?0:+e,r.restart((n=>{r.stop(),t(n+e)}),e,n),r}cy.prototype=uy.prototype={constructor:cy,restart:function(t,e,n){if("function"!=typeof t)throw new TypeError("callback is not a function");n=(null==n?oy():+n)+(null==e?0:+e),this._next||Qp===this||(Qp?Qp._next=this:Zp=this,Qp=this),this._call=t,this._time=n,fy()},stop:function(){this._call&&(this._call=null,this._time=1/0,fy())}};var py=Xp("start","end","cancel","interrupt"),yy=[];function gy(t,e,n,r,i,a){var o=t.__transition;if(o){if(n in o)return}else t.__transition={};!function(t,e,n){var r,i=t.__transition;function a(c){var u,l,h,f;if(1!==n.state)return s();for(u in i)if((f=i[u]).name===n.name){if(3===f.state)return dy(a);4===f.state?(f.state=6,f.timer.stop(),f.on.call("interrupt",t,t.__data__,f.index,f.group),delete i[u]):+u0)throw new Error("too late; already scheduled");return n}function vy(t,e){var n=by(t,e);if(n.state>3)throw new Error("too late; already running");return n}function by(t,e){var n=t.__transition;if(!n||!(n=n[e]))throw new Error("transition not found");return n}function _y(t,e){return t=+t,e=+e,function(n){return t*(1-n)+e*n}}var xy,wy=180/Math.PI,ky={translateX:0,translateY:0,rotate:0,skewX:0,scaleX:1,scaleY:1};function Ty(t,e,n,r,i,a){var o,s,c;return(o=Math.sqrt(t*t+e*e))&&(t/=o,e/=o),(c=t*n+e*r)&&(n-=t*c,r-=e*c),(s=Math.sqrt(n*n+r*r))&&(n/=s,r/=s,c/=s),t*r180?e+=360:e-t>180&&(t+=360),a.push({i:n.push(i(n)+"rotate(",null,r)-2,x:_y(t,e)})):e&&n.push(i(n)+"rotate("+e+r)}(a.rotate,o.rotate,s,c),function(t,e,n,a){t!==e?a.push({i:n.push(i(n)+"skewX(",null,r)-2,x:_y(t,e)}):e&&n.push(i(n)+"skewX("+e+r)}(a.skewX,o.skewX,s,c),function(t,e,n,r,a,o){if(t!==n||e!==r){var s=a.push(i(a)+"scale(",null,",",null,")");o.push({i:s-4,x:_y(t,n)},{i:s-2,x:_y(e,r)})}else 1===n&&1===r||a.push(i(a)+"scale("+n+","+r+")")}(a.scaleX,a.scaleY,o.scaleX,o.scaleY,s,c),a=o=null,function(t){for(var e,n=-1,r=c.length;++n>8&15|e>>4&240,e>>4&15|240&e,(15&e)<<4|15&e,1):8===n?Qy(e>>24&255,e>>16&255,e>>8&255,(255&e)/255):4===n?Qy(e>>12&15|e>>8&240,e>>8&15|e>>4&240,e>>4&15|240&e,((15&e)<<4|15&e)/255):null):(e=Yy.exec(t))?new tg(e[1],e[2],e[3],1):(e=zy.exec(t))?new tg(255*e[1]/100,255*e[2]/100,255*e[3]/100,1):(e=Uy.exec(t))?Qy(e[1],e[2],e[3],e[4]):(e=qy.exec(t))?Qy(255*e[1]/100,255*e[2]/100,255*e[3]/100,e[4]):(e=Hy.exec(t))?ig(e[1],e[2]/100,e[3]/100,1):(e=$y.exec(t))?ig(e[1],e[2]/100,e[3]/100,e[4]):Wy.hasOwnProperty(t)?Zy(Wy[t]):"transparent"===t?new tg(NaN,NaN,NaN,0):null}function Zy(t){return new tg(t>>16&255,t>>8&255,255&t,1)}function Qy(t,e,n,r){return r<=0&&(t=e=n=NaN),new tg(t,e,n,r)}function Ky(t){return t instanceof By||(t=Xy(t)),t?new tg((t=t.rgb()).r,t.g,t.b,t.opacity):new tg}function Jy(t,e,n,r){return 1===arguments.length?Ky(t):new tg(t,e,n,null==r?1:r)}function tg(t,e,n,r){this.r=+t,this.g=+e,this.b=+n,this.opacity=+r}function eg(){return"#"+rg(this.r)+rg(this.g)+rg(this.b)}function ng(){var t=this.opacity;return(1===(t=isNaN(t)?1:Math.max(0,Math.min(1,t)))?"rgb(":"rgba(")+Math.max(0,Math.min(255,Math.round(this.r)||0))+", "+Math.max(0,Math.min(255,Math.round(this.g)||0))+", "+Math.max(0,Math.min(255,Math.round(this.b)||0))+(1===t?")":", "+t+")")}function rg(t){return((t=Math.max(0,Math.min(255,Math.round(t)||0)))<16?"0":"")+t.toString(16)}function ig(t,e,n,r){return r<=0?t=e=n=NaN:n<=0||n>=1?t=e=NaN:e<=0&&(t=NaN),new og(t,e,n,r)}function ag(t){if(t instanceof og)return new og(t.h,t.s,t.l,t.opacity);if(t instanceof By||(t=Xy(t)),!t)return new og;if(t instanceof og)return t;var e=(t=t.rgb()).r/255,n=t.g/255,r=t.b/255,i=Math.min(e,n,r),a=Math.max(e,n,r),o=NaN,s=a-i,c=(a+i)/2;return s?(o=e===a?(n-r)/s+6*(n0&&c<1?0:o,new og(o,s,c,t.opacity)}function og(t,e,n,r){this.h=+t,this.s=+e,this.l=+n,this.opacity=+r}function sg(t,e,n){return 255*(t<60?e+(n-e)*t/60:t<180?n:t<240?e+(n-e)*(240-t)/60:e)}function cg(t,e,n,r,i){var a=t*t,o=a*t;return((1-3*t+3*a-o)*e+(4-6*a+3*o)*n+(1+3*t+3*a-3*o)*r+o*i)/6}Dy(By,Xy,{copy:function(t){return Object.assign(new this.constructor,this,t)},displayable:function(){return this.rgb().displayable()},hex:Vy,formatHex:Vy,formatHsl:function(){return ag(this).formatHsl()},formatRgb:Gy,toString:Gy}),Dy(tg,Jy,Oy(By,{brighter:function(t){return t=null==t?Iy:Math.pow(Iy,t),new tg(this.r*t,this.g*t,this.b*t,this.opacity)},darker:function(t){return t=null==t?Ly:Math.pow(Ly,t),new tg(this.r*t,this.g*t,this.b*t,this.opacity)},rgb:function(){return this},displayable:function(){return-.5<=this.r&&this.r<255.5&&-.5<=this.g&&this.g<255.5&&-.5<=this.b&&this.b<255.5&&0<=this.opacity&&this.opacity<=1},hex:eg,formatHex:eg,formatRgb:ng,toString:ng})),Dy(og,(function(t,e,n,r){return 1===arguments.length?ag(t):new og(t,e,n,null==r?1:r)}),Oy(By,{brighter:function(t){return t=null==t?Iy:Math.pow(Iy,t),new og(this.h,this.s,this.l*t,this.opacity)},darker:function(t){return t=null==t?Ly:Math.pow(Ly,t),new og(this.h,this.s,this.l*t,this.opacity)},rgb:function(){var t=this.h%360+360*(this.h<0),e=isNaN(t)||isNaN(this.s)?0:this.s,n=this.l,r=n+(n<.5?n:1-n)*e,i=2*n-r;return new tg(sg(t>=240?t-240:t+120,i,r),sg(t,i,r),sg(t<120?t+240:t-120,i,r),this.opacity)},displayable:function(){return(0<=this.s&&this.s<=1||isNaN(this.s))&&0<=this.l&&this.l<=1&&0<=this.opacity&&this.opacity<=1},formatHsl:function(){var t=this.opacity;return(1===(t=isNaN(t)?1:Math.max(0,Math.min(1,t)))?"hsl(":"hsla(")+(this.h||0)+", "+100*(this.s||0)+"%, "+100*(this.l||0)+"%"+(1===t?")":", "+t+")")}}));const ug=t=>()=>t;function lg(t,e){var n=e-t;return n?function(t,e){return function(n){return t+n*e}}(t,n):ug(isNaN(t)?e:t)}const hg=function t(e){var n=function(t){return 1==(t=+t)?lg:function(e,n){return n-e?function(t,e,n){return t=Math.pow(t,n),e=Math.pow(e,n)-t,n=1/n,function(r){return Math.pow(t+r*e,n)}}(e,n,t):ug(isNaN(e)?n:e)}}(e);function r(t,e){var r=n((t=Jy(t)).r,(e=Jy(e)).r),i=n(t.g,e.g),a=n(t.b,e.b),o=lg(t.opacity,e.opacity);return function(e){return t.r=r(e),t.g=i(e),t.b=a(e),t.opacity=o(e),t+""}}return r.gamma=t,r}(1);function fg(t){return function(e){var n,r,i=e.length,a=new Array(i),o=new Array(i),s=new Array(i);for(n=0;n=1?(n=1,e-1):Math.floor(n*e),i=t[r],a=t[r+1],o=r>0?t[r-1]:2*i-a,s=ra&&(i=e.slice(a,i),s[o]?s[o]+=i:s[++o]=i),(n=n[0])===(r=r[0])?s[o]?s[o]+=r:s[++o]=r:(s[++o]=null,c.push({i:o,x:_y(n,r)})),a=pg.lastIndex;return a=0&&(t=t.slice(0,e)),!t||"start"===t}))}(e)?my:vy;return function(){var o=a(this,t),s=o.on;s!==r&&(i=(r=s).copy()).on(e,n),o.on=i}}var Bg=Up.prototype.constructor;function Lg(t){return function(){this.style.removeProperty(t)}}function Ig(t,e,n){return function(r){this.style.setProperty(t,e.call(this,r),n)}}function Rg(t,e,n){var r,i;function a(){var a=e.apply(this,arguments);return a!==i&&(r=(i=a)&&Ig(t,a,n)),r}return a._value=e,a}function Fg(t){return function(e){this.textContent=t.call(this,e)}}function Pg(t){var e,n;function r(){var r=t.apply(this,arguments);return r!==n&&(e=(n=r)&&Fg(r)),e}return r._value=t,r}var jg=0;function Yg(t,e,n,r){this._groups=t,this._parents=e,this._name=n,this._id=r}function zg(){return++jg}var Ug=Up.prototype;Yg.prototype=function(t){return Up().transition(t)}.prototype={constructor:Yg,select:function(t){var e=this._name,n=this._id;"function"!=typeof t&&(t=Md(t));for(var r=this._groups,i=r.length,a=new Array(i),o=0;o2&&n.state<5,n.state=6,n.timer.stop(),n.on.call(r?"interrupt":"cancel",t,t.__data__,n.index,n.group),delete a[i]):o=!1;o&&delete t.__transition}}(this,t)}))},Up.prototype.transition=function(t){var e,n;t instanceof Yg?(e=t._id,t=t._name):(e=zg(),(n=qg).time=oy(),t=null==t?null:t+"");for(var r=this._groups,i=r.length,a=0;a0?tm(fm,--lm):0,cm--,10===hm&&(cm=1,sm--),hm}function ym(){return hm=lm2||bm(hm)>3?"":" "}function wm(t,e){for(;--e&&ym()&&!(hm<48||hm>102||hm>57&&hm<65||hm>70&&hm<97););return vm(t,mm()+(e<6&&32==gm()&&32==ym()))}function km(t){for(;ym();)switch(hm){case t:return lm;case 34:case 39:34!==t&&39!==t&&km(hm);break;case 40:41===t&&km(t);break;case 92:ym()}return lm}function Tm(t,e){for(;ym()&&t+hm!==57&&(t+hm!==84||47!==gm()););return"/*"+vm(e,lm-1)+"*"+Zg(47===t?t:ym())}function Em(t){for(;!bm(gm());)ym();return vm(t,lm)}function Cm(t){return function(t){return fm="",t}(Sm("",null,null,null,[""],t=function(t){return sm=cm=1,um=nm(fm=t),lm=0,[]}(t),0,[0],t))}function Sm(t,e,n,r,i,a,o,s,c){for(var u=0,l=0,h=o,f=0,d=0,p=0,y=1,g=1,m=1,v=0,b="",_=i,x=a,w=r,k=b;g;)switch(p=v,v=ym()){case 40:if(108!=p&&58==k.charCodeAt(h-1)){-1!=Jg(k+=Kg(_m(v),"&","&\f"),"&\f")&&(m=-1);break}case 34:case 39:case 91:k+=_m(v);break;case 9:case 10:case 13:case 32:k+=xm(p);break;case 92:k+=wm(mm()-1,7);continue;case 47:switch(gm()){case 42:case 47:im(Mm(Tm(ym(),mm()),e,n),c);break;default:k+="/"}break;case 123*y:s[u++]=nm(k)*m;case 125*y:case 59:case 0:switch(v){case 0:case 125:g=0;case 59+l:d>0&&nm(k)-h&&im(d>32?Nm(k+";",r,n,h-1):Nm(Kg(k," ","")+";",r,n,h-2),c);break;case 59:k+=";";default:if(im(w=Am(k,e,n,u,l,i,s,b,_=[],x=[],h),a),123===v)if(0===l)Sm(k,e,w,w,_,a,h,s,x);else switch(f){case 100:case 109:case 115:Sm(t,w,w,r&&im(Am(t,w,w,0,0,i,s,b,i,_=[],h),x),i,x,h,s,r?_:x);break;default:Sm(k,w,w,w,[""],x,0,s,x)}}u=l=d=0,y=m=1,b=k="",h=o;break;case 58:h=1+nm(k),d=p;default:if(y<1)if(123==v)--y;else if(125==v&&0==y++&&125==pm())continue;switch(k+=Zg(v),v*y){case 38:m=l>0?1:(k+="\f",-1);break;case 44:s[u++]=(nm(k)-1)*m,m=1;break;case 64:45===gm()&&(k+=_m(ym())),f=gm(),l=h=nm(b=k+=Em(mm())),v++;break;case 45:45===p&&2==nm(k)&&(y=0)}}return a}function Am(t,e,n,r,i,a,o,s,c,u,l){for(var h=i-1,f=0===i?a:[""],d=rm(f),p=0,y=0,g=0;p0?f[m]+" "+v:Kg(v,/&\f/g,f[m])))&&(c[g++]=b);return dm(t,e,n,0===i?Vg:s,c,u,l)}function Mm(t,e,n){return dm(t,e,n,Wg,Zg(hm),em(t,2,-2),0)}function Nm(t,e,n,r){return dm(t,e,n,Gg,em(t,0,r),em(t,r+1,-1),r)}const Dm="8.13.10";var Om=n(9609),Bm=n(7856),Lm=n.n(Bm),Im=function(t){var e=t.replace(/\\u[\dA-F]{4}/gi,(function(t){return String.fromCharCode(parseInt(t.replace(/\\u/g,""),16))}));return e=(e=(e=e.replace(/\\x([0-9a-f]{2})/gi,(function(t,e){return String.fromCharCode(parseInt(e,16))}))).replace(/\\[\d\d\d]{3}/gi,(function(t){return String.fromCharCode(parseInt(t.replace(/\\/g,""),8))}))).replace(/\\[\d\d\d]{2}/gi,(function(t){return String.fromCharCode(parseInt(t.replace(/\\/g,""),8))}))},Rm=function(t){for(var e="",n=0;n>=0;){if(!((n=t.indexOf("=0)){e+=t,n=-1;break}e+=t.substr(0,n),(n=(t=t.substr(n+1)).indexOf("<\/script>"))>=0&&(n+=9,t=t.substr(n))}var r=Im(e);return(r=(r=(r=r.replace(/script>/gi,"#")).replace(/javascript:/gi,"#")).replace(/onerror=/gi,"onerror:")).replace(/

]) { self.last_squeezed = vec![]; for g in g.iter() { if g.infinity { @@ -170,13 +167,13 @@ where self.last_squeezed = vec![]; x.iter().for_each(|x| { - let bits = x.into_repr().to_bits_le(); + let bits = x.into_bigint().to_bits_le(); // absorb - if ::Params::MODULUS - < ::Params::MODULUS.into() + if ::MODULUS + < ::MODULUS.into() { - let fe = P::BaseField::from_repr( + let fe = P::BaseField::from_bigint( ::BigInt::from_bits_le(&bits), ) .expect("padding code has a bug"); @@ -188,7 +185,7 @@ where P::BaseField::zero() }; - let high_bits = P::BaseField::from_repr( + let high_bits = P::BaseField::from_bigint( ::BigInt::from_bits_le(&bits[1..bits.len()]), ) .expect("padding code has a bug"); @@ -200,14 +197,14 @@ where } fn digest(mut self) -> P::ScalarField { - let x: ::BigInt = self.squeeze_field().into_repr(); + let x: ::BigInt = self.squeeze_field().into_bigint(); // Returns zero for values that are too large. // This means that there is a bias for the value zero (in one of the curve). // An attacker could try to target that seed, in order to predict the challenges u and v produced by the Fr-Sponge. // This would allow the attacker to mess with the result of the aggregated evaluation proof. // Previously the attacker's odds were 1/q, now it's (q-p)/q. // Since log2(q-p) ~ 86 and log2(q) ~ 254 the odds of a successful attack are negligible. - P::ScalarField::from_repr(x.into()).unwrap_or_else(P::ScalarField::zero) + P::ScalarField::from_bigint(x.into()).unwrap_or_else(P::ScalarField::zero) } fn digest_fq(mut self) -> P::BaseField { From e9924d7b2f67335d555f58af57fe3230c9b3599f Mon Sep 17 00:00:00 2001 From: Chiro Hiro Date: Wed, 6 Dec 2023 12:04:05 +0700 Subject: [PATCH 075/178] Upgrade export test vectors of poseidon to arkworks 0.4.2 --- Cargo.lock | 4 ++-- poseidon/export_test_vectors/Cargo.toml | 4 ++-- poseidon/export_test_vectors/src/vectors.rs | 11 ++++++----- 3 files changed, 10 insertions(+), 9 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index ef9feb5b6b..f3995ac218 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -898,8 +898,8 @@ dependencies = [ name = "export_test_vectors" version = "0.1.0" dependencies = [ - "ark-ff 0.3.0", - "ark-serialize 0.3.0", + "ark-ff 0.4.2", + "ark-serialize 0.4.2", "hex", "mina-curves", "mina-poseidon", diff --git a/poseidon/export_test_vectors/Cargo.toml b/poseidon/export_test_vectors/Cargo.toml index 6af585470b..80baaa21f8 100644 --- a/poseidon/export_test_vectors/Cargo.toml +++ b/poseidon/export_test_vectors/Cargo.toml @@ -10,11 +10,11 @@ edition = "2021" license = "Apache-2.0" [dependencies] -ark-ff = { version = "0.3.0", features = [ "parallel", "asm" ] } +ark-ff = { version = "0.4.2", features = [ "parallel", "asm" ] } num-bigint = { version = "0.4.0" } serde_json = { version = "1.0" } hex = { version = "0.4" } -ark-serialize = { version = "0.3.0" } +ark-serialize = { version = "0.4.2" } rand = "0.8.0" serde = { version = "1.0", features = ["derive"] } serde_with = "1.10.0" diff --git a/poseidon/export_test_vectors/src/vectors.rs b/poseidon/export_test_vectors/src/vectors.rs index 7fc8826cfc..b8feddeeb8 100644 --- a/poseidon/export_test_vectors/src/vectors.rs +++ b/poseidon/export_test_vectors/src/vectors.rs @@ -1,5 +1,5 @@ use super::{Mode, ParamType}; -use ark_ff::{fields::PrimeField as _, UniformRand as _}; +use ark_ff::UniformRand as _; use ark_serialize::CanonicalSerialize as _; use mina_curves::pasta::Fp; use mina_poseidon::{ @@ -78,9 +78,10 @@ pub fn generate(mode: Mode, param_type: ParamType) -> TestVectors { .into_iter() .map(|elem| { let mut input_bytes = vec![]; - elem.into_repr() - .serialize(&mut input_bytes) + elem.0 + .serialize_uncompressed(&mut input_bytes) .expect("canonical serialiation should work"); + match mode { Mode::Hex => hex::encode(&input_bytes), Mode::B10 => BigUint::from_bytes_le(&input_bytes).to_string(), @@ -89,8 +90,8 @@ pub fn generate(mode: Mode, param_type: ParamType) -> TestVectors { .collect(); let mut output_bytes = vec![]; output - .into_repr() - .serialize(&mut output_bytes) + .0 + .serialize_uncompressed(&mut output_bytes) .expect("canonical serialization should work"); // add vector From eeb57d1ccf3be48de5c82e337a1dec3f12218ce8 Mon Sep 17 00:00:00 2001 From: Chiro Hiro Date: Thu, 7 Dec 2023 10:28:49 +0700 Subject: [PATCH 076/178] Upgrade groupmap to arkworks 0.4.2 --- Cargo.lock | 4 ++-- curves/src/pasta/fields/mod.rs | 38 ++++++++++++++++++++++++++++++++++ groupmap/Cargo.toml | 4 ++-- groupmap/src/lib.rs | 22 +++++++++----------- groupmap/tests/groupmap.rs | 4 ++-- 5 files changed, 54 insertions(+), 18 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index f3995ac218..62b1c0ba91 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1029,8 +1029,8 @@ checksum = "b6c80984affa11d98d1b88b66ac8853f143217b399d3c74116778ff8fdb4ed2e" name = "groupmap" version = "0.1.0" dependencies = [ - "ark-ec 0.3.0", - "ark-ff 0.3.0", + "ark-ec 0.4.2", + "ark-ff 0.4.2", "mina-curves", "rand", ] diff --git a/curves/src/pasta/fields/mod.rs b/curves/src/pasta/fields/mod.rs index fcaff2e7a9..158eae558d 100644 --- a/curves/src/pasta/fields/mod.rs +++ b/curves/src/pasta/fields/mod.rs @@ -1,3 +1,4 @@ +use ark_ff::Field; pub mod fp; pub use self::fp::*; @@ -6,5 +7,42 @@ pub use self::fq::*; pub mod fft; +#[derive(Debug, PartialEq)] +pub enum LegendreSymbol { + Zero = 0, + QuadraticResidue = 1, + QuadraticNonResidue = -1, +} + +impl LegendreSymbol { + pub fn is_zero(&self) -> bool { + *self == LegendreSymbol::Zero + } + + pub fn is_qnr(&self) -> bool { + *self == LegendreSymbol::QuadraticNonResidue + } + + pub fn is_qr(&self) -> bool { + *self == LegendreSymbol::QuadraticResidue + } +} + +/// The interface for a field that supports an efficient square-root operation. +pub trait SquareRootField: Field { + /// Returns a `LegendreSymbol`, which indicates whether this field element is + /// 1 : a quadratic residue + /// 0 : equal to 0 + /// -1 : a quadratic non-residue + fn legendre(&self) -> LegendreSymbol; + + /// Returns the square root of self, if it exists. + #[must_use] + fn sqrt(&self) -> Option; + + /// Sets `self` to be the square root of `self`, if it exists. + fn sqrt_in_place(&mut self) -> Option<&mut Self>; +} + #[cfg(test)] mod tests; diff --git a/groupmap/Cargo.toml b/groupmap/Cargo.toml index a1d30309c3..d639095573 100644 --- a/groupmap/Cargo.toml +++ b/groupmap/Cargo.toml @@ -13,8 +13,8 @@ license = "Apache-2.0" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -ark-ff = { version = "0.3.0", features = [ "parallel", "asm" ] } -ark-ec = { version = "0.3.0", features = [ "parallel" ] } +ark-ff = { version = "0.4.2", features = [ "parallel", "asm" ] } +ark-ec = { version = "0.4.2", features = [ "parallel" ] } rand = "0.8.4" [dev-dependencies] diff --git a/groupmap/src/lib.rs b/groupmap/src/lib.rs index cc310d9ab8..a35140f423 100644 --- a/groupmap/src/lib.rs +++ b/groupmap/src/lib.rs @@ -19,8 +19,8 @@ //! WB19: Riad S. Wahby and Dan Boneh, Fast and simple constant-time hashing to the BLS12-381 elliptic curve. //! -use ark_ec::models::SWModelParameters; -use ark_ff::{Field, One, SquareRootField, Zero}; +use ark_ec::short_weierstrass::SWCurveConfig; +use ark_ff::{Field, One, Zero}; pub trait GroupMap { fn setup() -> Self; @@ -29,7 +29,7 @@ pub trait GroupMap { } #[derive(Clone, Copy)] -pub struct BWParameters { +pub struct BWParameters { u: G::BaseField, fu: G::BaseField, sqrt_neg_three_u_squared_minus_u_over_2: G::BaseField, @@ -38,12 +38,13 @@ pub struct BWParameters { } /// returns the right-hand side of the Short Weierstrass curve equation for a given x -fn curve_eqn(x: G::BaseField) -> G::BaseField { +fn curve_eqn(x: G::BaseField) -> G::BaseField { let mut res = x; res *= &x; // x^2 res += &G::COEFF_A; // x^2 + A res *= &x; // x^3 + A x res += &G::COEFF_B; // x^3 + A x + B + res } @@ -61,7 +62,7 @@ fn find_first Option>(start: K, f: F) -> A { } /// ? -fn potential_xs_helper( +fn potential_xs_helper( params: &BWParameters, t2: G::BaseField, alpha: G::BaseField, @@ -89,10 +90,7 @@ fn potential_xs_helper( } /// ? -fn potential_xs( - params: &BWParameters, - t: G::BaseField, -) -> [G::BaseField; 3] { +fn potential_xs(params: &BWParameters, t: G::BaseField) -> [G::BaseField; 3] { let t2 = t.square(); let mut alpha_inv = t2; alpha_inv += ¶ms.fu; @@ -108,12 +106,12 @@ fn potential_xs( /// returns the y-coordinate if x is a valid point on the curve, otherwise None /// TODO: what about sign? -pub fn get_y(x: G::BaseField) -> Option { +pub fn get_y(x: G::BaseField) -> Option { let fx = curve_eqn::(x); fx.sqrt() } -fn get_xy( +fn get_xy( params: &BWParameters, t: G::BaseField, ) -> (G::BaseField, G::BaseField) { @@ -126,7 +124,7 @@ fn get_xy( panic!("get_xy") } -impl GroupMap for BWParameters { +impl GroupMap for BWParameters { fn setup() -> Self { assert!(G::COEFF_A.is_zero()); diff --git a/groupmap/tests/groupmap.rs b/groupmap/tests/groupmap.rs index 0044616783..17d05dfc16 100644 --- a/groupmap/tests/groupmap.rs +++ b/groupmap/tests/groupmap.rs @@ -8,7 +8,7 @@ fn test_group_map_on_curve() { let params = BWParameters::::setup(); let t: Fq = rand::random(); let (x, y) = BWParameters::::to_group(¶ms, t); - let g = Vesta::new(x, y, false); + let g = Vesta::new(x, y); assert!(g.is_on_curve()); } @@ -27,7 +27,7 @@ fn test_batch_group_map_on_curve() { let ts: Vec = (0..1000).map(|_| rand::random()).collect(); for xs in BWParameters::::batch_to_group_x(¶ms, ts).iter() { let (x, y) = first_xy(xs); - let g = Vesta::new(x, y, false); + let g = Vesta::new(x, y); assert!(g.is_on_curve()); } } From 0971b34646dec4ca6d9e950323f981a0f85a501a Mon Sep 17 00:00:00 2001 From: Chiro Hiro Date: Thu, 7 Dec 2023 10:36:45 +0700 Subject: [PATCH 077/178] Upgrade hasher to arkworks 0.4.2 --- Cargo.lock | 2 +- hasher/Cargo.toml | 2 +- hasher/src/roinput.rs | 10 ++++++---- 3 files changed, 8 insertions(+), 6 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 62b1c0ba91..ad4970d12b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1453,7 +1453,7 @@ dependencies = [ name = "mina-hasher" version = "0.1.0" dependencies = [ - "ark-ff 0.3.0", + "ark-ff 0.4.2", "bitvec", "mina-curves", "mina-poseidon", diff --git a/hasher/Cargo.toml b/hasher/Cargo.toml index 781aafaadb..4ce6257214 100644 --- a/hasher/Cargo.toml +++ b/hasher/Cargo.toml @@ -17,7 +17,7 @@ mina-poseidon = { path = "../poseidon", version = "0.1.0" } mina-curves = { path = "../curves", version = "0.1.0" } o1-utils = { path = "../utils", version = "0.1.0" } -ark-ff = { version = "0.3.0", features = [ "parallel", "asm" ] } +ark-ff = { version = "0.4.2", features = [ "parallel", "asm" ] } bitvec = "1.0.0" serde = { version = "1.0", features = ["derive"] } diff --git a/hasher/src/roinput.rs b/hasher/src/roinput.rs index 1db4ee3193..e20b4bbba7 100644 --- a/hasher/src/roinput.rs +++ b/hasher/src/roinput.rs @@ -91,7 +91,7 @@ impl ROInput { pub fn append_scalar(mut self, s: Fq) -> Self { // mina scalars are 255 bytes let bytes = s.to_bytes(); - let bits = &bytes.as_bits::()[..Fq::size_in_bits()]; + let bits = &bytes.as_bits::()[..Fq::MODULUS_BIT_SIZE as usize]; self.bits.extend(bits); self } @@ -121,7 +121,9 @@ impl ROInput { /// Serialize random oracle input to bytes pub fn to_bytes(&self) -> Vec { let mut bits: BitVec = self.fields.iter().fold(BitVec::new(), |mut acc, fe| { - acc.extend_from_bitslice(&fe.to_bytes().as_bits::()[..Fp::size_in_bits()]); + acc.extend_from_bitslice( + &fe.to_bytes().as_bits::()[..Fp::MODULUS_BIT_SIZE as usize], + ); acc }); @@ -137,7 +139,7 @@ impl ROInput { let bits_as_fields = self.bits - .chunks(Fp::size_in_bits() - 1) + .chunks(Fp::MODULUS_BIT_SIZE as usize - 1) .fold(vec![], |mut acc, chunk| { // Workaround: chunk.clone() does not appear to respect // the chunk's boundaries when it's not byte-aligned. @@ -157,7 +159,7 @@ impl ROInput { bv.clone_from_bitslice(chunk); // extend to the size of a field; - bv.resize(Fp::size_in_bits(), false); + bv.resize(Fp::MODULUS_BIT_SIZE as usize, false); acc.push( Fp::from_bytes(&bv.into_vec()) From 34ed56a59b3f8f7bb2ebf2a05b81222e25f155b8 Mon Sep 17 00:00:00 2001 From: Chiro Hiro Date: Thu, 7 Dec 2023 11:16:32 +0700 Subject: [PATCH 078/178] Upgrade signer to arkworks 0.4.2 --- Cargo.lock | 4 ++-- signer/Cargo.toml | 4 ++-- signer/src/lib.rs | 6 +++--- signer/src/pubkey.rs | 29 ++++++++++++++++++----------- signer/src/schnorr.rs | 25 ++++++++++++++----------- 5 files changed, 39 insertions(+), 29 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index ad4970d12b..ba210a9d97 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1487,8 +1487,8 @@ dependencies = [ name = "mina-signer" version = "0.1.0" dependencies = [ - "ark-ec 0.3.0", - "ark-ff 0.3.0", + "ark-ec 0.4.2", + "ark-ff 0.4.2", "bitvec", "blake2", "bs58", diff --git a/signer/Cargo.toml b/signer/Cargo.toml index aa06d94401..059d830a28 100644 --- a/signer/Cargo.toml +++ b/signer/Cargo.toml @@ -17,8 +17,8 @@ mina-curves = { path = "../curves", version = "0.1.0" } mina-hasher = { path = "../hasher", version = "0.1.0" } o1-utils = { path = "../utils", version = "0.1.0" } -ark-ec = { version = "0.3.0", features = [ "parallel" ] } -ark-ff = { version = "0.3.0", features = [ "parallel", "asm" ] } +ark-ec = { version = "0.4.2", features = [ "parallel" ] } +ark-ff = { version = "0.4.2", features = ["parallel", "asm"] } rand = "0.8.0" blake2 = "0.10.0" diff --git a/signer/src/lib.rs b/signer/src/lib.rs index 6f64a44930..ff90997c6d 100644 --- a/signer/src/lib.rs +++ b/signer/src/lib.rs @@ -15,16 +15,16 @@ pub use schnorr::Schnorr; pub use seckey::SecKey; pub use signature::Signature; -use ark_ec::AffineCurve; +use ark_ec::AffineRepr; /// Affine curve point type pub use mina_curves::pasta::Pallas as CurvePoint; /// Base field element type -pub type BaseField = ::BaseField; +pub type BaseField = ::BaseField; /// Scalar field element type -pub type ScalarField = ::ScalarField; +pub type ScalarField = ::ScalarField; /// Mina network (or blockchain) identifier #[derive(Debug, Clone)] diff --git a/signer/src/pubkey.rs b/signer/src/pubkey.rs index 9a52f39494..e2cfe9277e 100644 --- a/signer/src/pubkey.rs +++ b/signer/src/pubkey.rs @@ -2,12 +2,12 @@ //! //! Definition of public key structure and helpers -use ark_ec::{AffineCurve, ProjectiveCurve}; +use ark_ec::{short_weierstrass::Affine, AffineRepr, CurveGroup}; use ark_ff::{BigInteger, PrimeField, Zero}; use bs58; use core::fmt; use sha2::{Digest, Sha256}; -use std::ops::Neg; +use std::ops::{Mul, Neg}; use thiserror::Error; use crate::{BaseField, CurvePoint, ScalarField, SecKey}; @@ -86,12 +86,17 @@ impl PubKey { .map_err(|_| PubKeyError::XCoordinateBytes)?; let y = BaseField::from_bytes(&bytes[BaseField::size_in_bytes()..]) .map_err(|_| PubKeyError::YCoordinateBytes)?; - let pt = CurvePoint::get_point_from_x(x, y.0.is_odd()).ok_or(PubKeyError::XCoordinate)?; + let pt = CurvePoint::get_point_from_x_unchecked(x, y.0.is_odd()) + .ok_or(PubKeyError::XCoordinate)?; if pt.y != y { return Err(PubKeyError::NonCurvePoint); } - let public = CurvePoint::new(x, y, pt.infinity); + let public = Affine { + x, + y, + infinity: pt.infinity, + }; if !public.is_on_curve() { return Err(PubKeyError::NonCurvePoint); } @@ -115,7 +120,7 @@ impl PubKey { if secret_key.clone().into_scalar() == ScalarField::zero() { return Err(PubKeyError::SecKey); } - let pt = CurvePoint::prime_subgroup_generator() + let pt = CurvePoint::generator() .mul(secret_key.into_scalar()) .into_affine(); if !pt.is_on_curve() { @@ -158,9 +163,10 @@ impl PubKey { } let x = BaseField::from_bytes(x_bytes).map_err(|_| PubKeyError::XCoordinateBytes)?; - let mut pt = CurvePoint::get_point_from_x(x, y_parity).ok_or(PubKeyError::XCoordinate)?; + let mut pt = + CurvePoint::get_point_from_x_unchecked(x, y_parity).ok_or(PubKeyError::XCoordinate)?; - if pt.y.into_repr().is_even() == y_parity { + if pt.y.0.is_even() == y_parity { pt.y = pt.y.neg(); } @@ -187,14 +193,14 @@ impl PubKey { let point = self.0; CompressedPubKey { x: point.x, - is_odd: point.y.into_repr().is_odd(), + is_odd: point.y.into_bigint().is_odd(), } } /// Serialize public key into corresponding Mina address pub fn into_address(&self) -> String { let point = self.point(); - into_address(&point.x, point.y.into_repr().is_odd()) + into_address(&point.x, point.y.0.is_odd()) } /// Deserialize public key into bytes @@ -271,7 +277,8 @@ impl CompressedPubKey { } else { return Err(PubKeyError::YCoordinateParity); }; - let public = CurvePoint::get_point_from_x(x, is_odd).ok_or(PubKeyError::XCoordinate)?; + let public = + CurvePoint::get_point_from_x_unchecked(x, is_odd).ok_or(PubKeyError::XCoordinate)?; if !public.is_on_curve() { return Err(PubKeyError::NonCurvePoint); } @@ -294,7 +301,7 @@ impl CompressedPubKey { pub fn from_secret_key(sec_key: SecKey) -> Self { // We do not need to check point is on the curve, since it's derived directly from the generator point let public = PubKey::from_point_unsafe( - CurvePoint::prime_subgroup_generator() + CurvePoint::generator() .mul(sec_key.into_scalar()) .into_affine(), ); diff --git a/signer/src/schnorr.rs b/signer/src/schnorr.rs index 6fb1cff55c..1774d89918 100644 --- a/signer/src/schnorr.rs +++ b/signer/src/schnorr.rs @@ -5,8 +5,8 @@ //! Details: use ark_ec::{ - AffineCurve, // for prime_subgroup_generator() - ProjectiveCurve, // for into_affine() + AffineRepr, // for prime_subgroup_generator() + CurveGroup, }; use ark_ff::{ BigInteger, // for is_even() @@ -19,7 +19,7 @@ use blake2::{ Blake2bVar, }; use mina_hasher::{self, DomainParameter, Hasher, ROInput}; -use std::ops::Neg; +use std::ops::{Add, Neg}; use crate::{BaseField, CurvePoint, Hashable, Keypair, PubKey, ScalarField, Signature, Signer}; @@ -58,8 +58,10 @@ impl Hashable for Message { impl Signer for Schnorr { fn sign(&mut self, kp: &Keypair, input: &H) -> Signature { let k: ScalarField = self.derive_nonce(kp, input); - let r: CurvePoint = CurvePoint::prime_subgroup_generator().mul(k).into_affine(); - let k: ScalarField = if r.y.into_repr().is_even() { k } else { -k }; + let r: CurvePoint = CurvePoint::generator() + .mul_bigint(k.into_bigint()) + .into_affine(); + let k: ScalarField = if r.y.into_bigint().is_even() { k } else { -k }; let e: ScalarField = self.message_hash(&kp.public, r.x, input); let s: ScalarField = k + e * kp.secret.scalar(); @@ -70,17 +72,19 @@ impl Signer for Schnorr { fn verify(&mut self, sig: &Signature, public: &PubKey, input: &H) -> bool { let ev: ScalarField = self.message_hash(public, sig.rx, input); - let sv: CurvePoint = CurvePoint::prime_subgroup_generator() - .mul(sig.s) + let sv = CurvePoint::generator() + .mul_bigint(sig.s.into_bigint()) .into_affine(); // Perform addition and infinity check in projective coordinates for performance - let rv = public.point().mul(ev).neg().add_mixed(&sv); + let rv = public.point().mul_bigint(ev.into_bigint()).neg().add(sv); + if rv.is_zero() { return false; } + let rv = rv.into_affine(); - rv.y.into_repr().is_even() && rv.x == sig.rx + rv.y.into_bigint().is_even() && rv.x == sig.rx } } @@ -147,7 +151,6 @@ impl Schnorr { // Squeeze and convert from base field element to scalar field element // Since the difference in modulus between the two fields is < 2^125, w.h.p., a // random value from one field will fit in the other field. - ScalarField::from_repr(self.hasher.hash(&schnorr_input).into_repr()) - .expect("failed to create scalar") + ScalarField::from(self.hasher.hash(&schnorr_input).into_bigint()) } } From d8e5cd037418c95cccc9a2a7958f770a4f10c0fc Mon Sep 17 00:00:00 2001 From: Chiro Hiro Date: Fri, 8 Dec 2023 12:36:57 +0700 Subject: [PATCH 079/178] Upgrade turshi to arkworks 0.4.2 --- turshi/Cargo.toml | 4 ++-- turshi/src/helper.rs | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/turshi/Cargo.toml b/turshi/Cargo.toml index 66fc14fbc0..d725b6e759 100644 --- a/turshi/Cargo.toml +++ b/turshi/Cargo.toml @@ -13,12 +13,12 @@ license = "Apache-2.0" path = "src/lib.rs" [dependencies] -ark-ff = { version = "0.3.0", features = [ "parallel", "asm" ] } +ark-ff = { version = "0.4.2", features = [ "parallel", "asm" ] } hex = "0.4" o1-utils = { path = "../utils", version = "0.1.0" } [dev-dependencies] -ark-ec = { version = "0.3.0", features = [ "parallel" ] } +ark-ec = { version = "0.4.2", features = [ "parallel" ] } mina-curves = { path = "../curves", version = "0.1.0" } diff --git a/turshi/src/helper.rs b/turshi/src/helper.rs index a3d9960372..10b72bcad7 100644 --- a/turshi/src/helper.rs +++ b/turshi/src/helper.rs @@ -50,12 +50,12 @@ impl CairoFieldHelpers for F { #[cfg(test)] mod tests { use super::*; - use ark_ec::AffineCurve; + use ark_ec::AffineRepr; use mina_curves::pasta::Pallas as CurvePoint; use o1_utils::FieldHelpers; /// Base field element type - pub type BaseField = ::BaseField; + pub type BaseField = ::BaseField; #[test] fn test_field_to_bits() { From 8bfcba536758fe50c38f3e475e03c507ab9cabfd Mon Sep 17 00:00:00 2001 From: mrmr1993 Date: Tue, 26 Dec 2023 21:03:38 +0000 Subject: [PATCH 080/178] Convert poly-comm to arkworks 0.4.2 --- Cargo.lock | 27 ++-- poly-commitment/Cargo.toml | 10 +- poly-commitment/src/chunked.rs | 7 +- poly-commitment/src/combine.rs | 134 ++++++++++---------- poly-commitment/src/commitment.rs | 120 ++++++++---------- poly-commitment/src/evaluation_proof.rs | 41 +++--- poly-commitment/src/lib.rs | 12 +- poly-commitment/src/pairing_proof.rs | 68 +++++----- poly-commitment/src/srs.rs | 21 ++- poly-commitment/src/tests/batch_15_wires.rs | 2 +- poly-commitment/src/tests/commitment.rs | 2 +- proof-systems-vendors | 2 +- 12 files changed, 220 insertions(+), 226 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index ba210a9d97..bd6e73cab4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -88,6 +88,17 @@ dependencies = [ "ark-std 0.3.0", ] +[[package]] +name = "ark-bn254" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a22f4561524cd949590d78d7d4c5df8f592430d221f7f3c9497bbafd8972120f" +dependencies = [ + "ark-ec 0.4.2", + "ark-ff 0.4.2", + "ark-std 0.4.0", +] + [[package]] name = "ark-ec" version = "0.3.0" @@ -1249,7 +1260,7 @@ dependencies = [ name = "kimchi" version = "0.1.0" dependencies = [ - "ark-bn254", + "ark-bn254 0.3.0", "ark-ec 0.3.0", "ark-ff 0.3.0", "ark-poly 0.3.0", @@ -1917,11 +1928,11 @@ dependencies = [ name = "poly-commitment" version = "0.1.0" dependencies = [ - "ark-bn254", - "ark-ec 0.3.0", - "ark-ff 0.3.0", - "ark-poly 0.3.0", - "ark-serialize 0.3.0", + "ark-bn254 0.4.0", + "ark-ec 0.4.2", + "ark-ff 0.4.2", + "ark-poly 0.4.2", + "ark-serialize 0.4.2", "blake2", "colored", "groupmap", @@ -2719,8 +2730,8 @@ dependencies = [ name = "turshi" version = "0.1.0" dependencies = [ - "ark-ec 0.3.0", - "ark-ff 0.3.0", + "ark-ec 0.4.2", + "ark-ff 0.4.2", "hex", "mina-curves", "o1-utils", diff --git a/poly-commitment/Cargo.toml b/poly-commitment/Cargo.toml index 890555082e..fbd169ae2e 100644 --- a/poly-commitment/Cargo.toml +++ b/poly-commitment/Cargo.toml @@ -10,10 +10,10 @@ edition = "2021" license = "Apache-2.0" [dependencies] -ark-ff = { version = "0.3.0", features = [ "parallel", "asm" ] } -ark-ec = { version = "0.3.0", features = [ "parallel" ] } -ark-poly = { version = "0.3.0", features = [ "parallel" ] } -ark-serialize = "0.3.0" +ark-ff = { version = "0.4.2", features = [ "parallel", "asm" ] } +ark-ec = { version = "0.4.2", features = [ "parallel" ] } +ark-poly = { version = "0.4.2", features = [ "parallel" ] } +ark-serialize = "0.4.2" blake2 = "0.10.0" itertools = "0.10.3" @@ -37,7 +37,7 @@ ocaml-gen = { version = "0.1.5", optional = true } [dev-dependencies] colored = "2.0.0" rand_chacha = { version = "0.3.0" } -ark-bn254 = { version = "0.3.0" } +ark-bn254 = { version = "0.4.0" } [features] ocaml_types = [ "ocaml", "ocaml-gen" ] diff --git a/poly-commitment/src/chunked.rs b/poly-commitment/src/chunked.rs index 9c3ee5c294..c3d4542199 100644 --- a/poly-commitment/src/chunked.rs +++ b/poly-commitment/src/chunked.rs @@ -1,5 +1,6 @@ -use ark_ec::ProjectiveCurve; +use ark_ec::CurveGroup; use ark_ff::{Field, Zero}; +use std::ops::AddAssign; use crate::commitment::CommitmentCurve; use crate::PolyComm; @@ -11,13 +12,13 @@ where /// Multiplies each commitment chunk of f with powers of zeta^n // TODO(mimoo): better name for this function pub fn chunk_commitment(&self, zeta_n: C::ScalarField) -> Self { - let mut res = C::Projective::zero(); + let mut res = C::Group::zero(); // use Horner's to compute chunk[0] + z^n chunk[1] + z^2n chunk[2] + ... // as ( chunk[-1] * z^n + chunk[-2] ) * z^n + chunk[-3] // (https://en.wikipedia.org/wiki/Horner%27s_method) for chunk in self.elems.iter().rev() { res *= zeta_n; - res.add_assign_mixed(chunk); + res.add_assign(chunk); } PolyComm { diff --git a/poly-commitment/src/combine.rs b/poly-commitment/src/combine.rs index 52f7e19f95..521e4739dc 100644 --- a/poly-commitment/src/combine.rs +++ b/poly-commitment/src/combine.rs @@ -16,15 +16,16 @@ //! such a scratch array within each algorithm. use ark_ec::{ - models::short_weierstrass_jacobian::GroupAffine as SWJAffine, AffineCurve, ProjectiveCurve, - SWModelParameters, + models::short_weierstrass::Affine as SWJAffine, short_weierstrass::SWCurveConfig, AffineRepr, + CurveGroup, Group, }; use ark_ff::{BitIteratorBE, Field, One, PrimeField, Zero}; use itertools::Itertools; use mina_poseidon::sponge::ScalarChallenge; use rayon::prelude::*; +use std::ops::AddAssign; -fn add_pairs_in_place(pairs: &mut Vec>) { +fn add_pairs_in_place(pairs: &mut Vec>) { let len = if pairs.len() % 2 == 0 { pairs.len() } else { @@ -86,7 +87,7 @@ fn add_pairs_in_place(pairs: &mut Vec>) { /// assuming that for each `i`, `v0[i].x != v1[i].x` so we can use the ordinary /// addition formula and don't have to handle the edge cases of doubling and /// hitting the point at infinity. -fn batch_add_assign_no_branch( +fn batch_add_assign_no_branch( denominators: &mut [P::BaseField], v0: &mut [SWJAffine