diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index 42d1937a59..b4581fab8f 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -32,7 +32,7 @@ jobs: - name: Checkout PR uses: actions/checkout@v4.1.1 with: - submodules: true + submodules: true # as action-rs does not seem to be maintained anymore, building from # scratch the environment using rustup diff --git a/Cargo.lock b/Cargo.lock index a7575b7ad9..1a1ab03646 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -28,6 +28,17 @@ dependencies = [ "version_check", ] +[[package]] +name = "ahash" +version = "0.8.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2c99f64d1e06488f620f932677e24bc6e2897582980441ae90a671415bd7ec2f" +dependencies = [ + "cfg-if 1.0.0", + "once_cell", + "version_check", +] + [[package]] name = "aho-corasick" version = "1.0.2" @@ -48,21 +59,29 @@ dependencies = [ [[package]] name = "ark-algebra-test-templates" -version = "0.3.0" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3eef0b339ebc113d9bd3fb7cd666baf2cfe4e1024e0fac23e072d46598bbd0cd" +checksum = "400bd3a79c741b1832f1416d4373ae077ef82ca14a8b4cee1248a2f11c8b9172" dependencies = [ "ark-ec", "ark-ff", "ark-serialize", "ark-std", + "hex", + "num-bigint", + "num-integer", + "num-traits", + "serde", + "serde_derive", + "serde_json", + "sha2", ] [[package]] name = "ark-bn254" -version = "0.3.0" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea691771ebbb28aea556c044e2e5c5227398d840cee0c34d4d20fa8eb2689e8c" +checksum = "a22f4561524cd949590d78d7d4c5df8f592430d221f7f3c9497bbafd8972120f" dependencies = [ "ark-ec", "ark-ff", @@ -71,14 +90,17 @@ dependencies = [ [[package]] name = "ark-ec" -version = "0.3.0" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dea978406c4b1ca13c2db2373b05cc55429c3575b8b21f1b9ee859aa5b03dd42" +checksum = "defd9a439d56ac24968cca0571f598a61bc8c55f71d50a89cda591cb750670ba" dependencies = [ "ark-ff", + "ark-poly", "ark-serialize", "ark-std", "derivative", + "hashbrown 0.13.2", + "itertools", "num-traits", "rayon", "zeroize", @@ -86,15 +108,17 @@ dependencies = [ [[package]] name = "ark-ff" -version = "0.3.0" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b3235cc41ee7a12aaaf2c575a2ad7b46713a8a50bda2fc3b003a04845c05dd6" +checksum = "ec847af850f44ad29048935519032c33da8aa03340876d351dfab5660d2966ba" dependencies = [ "ark-ff-asm", "ark-ff-macros", "ark-serialize", "ark-std", "derivative", + "digest", + "itertools", "num-bigint", "num-traits", "paste", @@ -105,9 +129,9 @@ dependencies = [ [[package]] name = "ark-ff-asm" -version = "0.3.0" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db02d390bf6643fb404d3d22d31aee1c4bc4459600aef9113833d17e786c6e44" +checksum = "3ed4aa4fe255d0bc6d79373f7e31d2ea147bcf486cba1be5ba7ea85abdb92348" dependencies = [ "quote 1.0.29", "syn 1.0.109", @@ -115,46 +139,48 @@ dependencies = [ [[package]] name = "ark-ff-macros" -version = "0.3.0" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db2fd794a08ccb318058009eefdf15bcaaaaf6f8161eb3345f907222bac38b20" +checksum = "7abe79b0e4288889c4574159ab790824d0033b9fdcb2a112a3182fac2e514565" dependencies = [ "num-bigint", "num-traits", + "proc-macro2 1.0.64", "quote 1.0.29", "syn 1.0.109", ] [[package]] name = "ark-poly" -version = "0.3.0" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b0f78f47537c2f15706db7e98fe64cc1711dbf9def81218194e17239e53e5aa" +checksum = "d320bfc44ee185d899ccbadfa8bc31aab923ce1558716e1997a1e74057fe86bf" dependencies = [ "ark-ff", "ark-serialize", "ark-std", "derivative", - "hashbrown 0.11.2", + "hashbrown 0.13.2", "rayon", ] [[package]] name = "ark-serialize" -version = "0.3.0" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d6c2b318ee6e10f8c2853e73a83adc0ccb88995aa978d8a3408d492ab2ee671" +checksum = "adb7b85a02b83d2f22f89bd5cac66c9c89474240cb6207cb1efc16d098e822a5" dependencies = [ "ark-serialize-derive", "ark-std", - "digest 0.9.0", + "digest", + "num-bigint", ] [[package]] name = "ark-serialize-derive" -version = "0.3.0" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8dd4e5f0bf8285d5ed538d27fab7411f3e297908fd93c62195de8bee3f199e82" +checksum = "ae3281bc6d0fd7e549af32b52511e1302185bd688fd3359fa36423346ff682ea" dependencies = [ "proc-macro2 1.0.64", "quote 1.0.29", @@ -163,15 +189,26 @@ dependencies = [ [[package]] name = "ark-std" -version = "0.3.0" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1df2c09229cbc5a028b1d70e00fdb2acee28b1055dfb5ca73eea49c5a25c4e7c" +checksum = "94893f1e0c6eeab764ade8dc4c0db24caf4fe7cbbaafc0eba0a9030f447b5185" dependencies = [ "num-traits", "rand", "rayon", ] +[[package]] +name = "ark-test-curves" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "83c22c2469f93dfcace9a98baabb7af1bc0c40de82c07cffbc0deba4acf41a90" +dependencies = [ + "ark-ec", + "ark-ff", + "ark-std", +] + [[package]] name = "askama" version = "0.11.1" @@ -322,7 +359,7 @@ version = "0.10.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "46502ad458c9a52b69d4d4d32775c788b7a1b85e8bc9d482d92250fc0e3f8efe" dependencies = [ - "digest 0.10.7", + "digest", ] [[package]] @@ -695,15 +732,6 @@ dependencies = [ "syn 1.0.109", ] -[[package]] -name = "digest" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d3dd60d1080a57a05ab032377049e0591415d2b31afd7028356dbf3cc6dcb066" -dependencies = [ - "generic-array", -] - [[package]] name = "digest" version = "0.10.7" @@ -903,20 +931,20 @@ checksum = "eabb4a44450da02c90444cf74558da904edde8fb4e9035a9a6a4e15445af0bd7" [[package]] name = "hashbrown" -version = "0.11.2" +version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab5ef0d4909ef3724cc8cce6ccc8572c5c817592e9285f5464f8e86f8bd3726e" +checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" dependencies = [ - "ahash", + "ahash 0.7.6", ] [[package]] name = "hashbrown" -version = "0.12.3" +version = "0.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" +checksum = "43a3c133739dddd0d2990f9a4bdf8eb4b21ef50e4851ca85ab661199821d510e" dependencies = [ - "ahash", + "ahash 0.8.3", ] [[package]] @@ -1294,7 +1322,9 @@ dependencies = [ "ark-algebra-test-templates", "ark-ec", "ark-ff", + "ark-serialize", "ark-std", + "ark-test-curves", "rand", ] @@ -2059,9 +2089,9 @@ checksum = "d626bb9dae77e28219937af045c257c28bfd3f69333c512553507f5f9798cb76" [[package]] name = "rustc_version" -version = "0.3.3" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0dfe2087c51c460008730de8b57e6a320782fbfb312e1f4d520e6c6fae155ee" +checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366" dependencies = [ "semver", ] @@ -2158,21 +2188,9 @@ dependencies = [ [[package]] name = "semver" -version = "0.11.0" +version = "1.0.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f301af10236f6df4160f7c3f04eec6dbc70ace82d23326abad5edee88801c6b6" -dependencies = [ - "semver-parser", -] - -[[package]] -name = "semver-parser" -version = "0.10.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "00b0bef5b7f9e0df16536d3961cfb6e84331c065b4066afb39768d0e319411f7" -dependencies = [ - "pest", -] +checksum = "ad977052201c6de01a8ef2aa3378c4bd23217a056337d1d6da40468d267a4fb0" [[package]] name = "serde" @@ -2245,7 +2263,7 @@ checksum = "479fb9d862239e610720565ca91403019f2f00410f1864c5aa7479b950a76ed8" dependencies = [ "cfg-if 1.0.0", "cpufeatures", - "digest 0.10.7", + "digest", ] [[package]] diff --git a/book/src/specs/kimchi.md b/book/src/specs/kimchi.md index 91852cdbb0..20ea9094df 100644 --- a/book/src/specs/kimchi.md +++ b/book/src/specs/kimchi.md @@ -2029,7 +2029,7 @@ pub struct ProofEvaluations { #[serde_as] #[derive(Debug, Clone, Serialize, Deserialize)] #[serde(bound = "G: ark_serialize::CanonicalDeserialize + ark_serialize::CanonicalSerialize")] -pub struct LookupCommitments { +pub struct LookupCommitments { /// Commitments to the sorted lookup table polynomial (may have chunks) pub sorted: Vec>, /// Commitment to the lookup aggregation polynomial @@ -2042,7 +2042,7 @@ pub struct LookupCommitments { #[serde_as] #[derive(Debug, Clone, Serialize, Deserialize)] #[serde(bound = "G: ark_serialize::CanonicalDeserialize + ark_serialize::CanonicalSerialize")] -pub struct ProverCommitments { +pub struct ProverCommitments { /// The commitments to the witness (execution trace) pub w_comm: [PolyComm; COLUMNS], /// The commitment to the permutation polynomial @@ -2057,7 +2057,7 @@ pub struct ProverCommitments { #[serde_as] #[derive(Debug, Clone, Serialize, Deserialize)] #[serde(bound = "G: ark_serialize::CanonicalDeserialize + ark_serialize::CanonicalSerialize")] -pub struct ProverProof { +pub struct ProverProof { /// All the polynomial commitments required in the proof pub commitments: ProverCommitments, @@ -2085,7 +2085,7 @@ pub struct ProverProof { #[serde(bound = "G: ark_serialize::CanonicalDeserialize + ark_serialize::CanonicalSerialize")] pub struct RecursionChallenge where - G: AffineCurve, + G: AffineRepr, { /// Vector of scalar field elements #[serde_as(as = "Vec")] diff --git a/circuit-construction/Cargo.toml b/circuit-construction/Cargo.toml new file mode 100644 index 0000000000..3e60cb706c --- /dev/null +++ b/circuit-construction/Cargo.toml @@ -0,0 +1,47 @@ +[package] +name = "circuit-construction" +version = "0.1.0" +description = "A simple circuit writer for kimchi" +repository = "https://github.com/o1-labs/proof-systems" +edition = "2021" +license = "Apache-2.0" +homepage = "https://o1-labs.github.io/proof-systems/" +documentation = "https://o1-labs.github.io/proof-systems/rustdoc/" +readme = "../README.md" + +[lib] +path = "src/lib.rs" +bench = false # needed for criterion (https://bheisler.github.io/criterion.rs/book/faq.html#cargo-bench-gives-unrecognized-option-errors-for-valid-command-line-options) + +[dependencies] +ark-ff = { version = "0.4.2", features = [ "parallel", "asm" ] } +ark-ec = { version = "0.4.2", features = [ "parallel" ] } +ark-poly = { version = "0.4.2", features = [ "parallel" ] } +ark-serialize = "0.4.2" +blake2 = "0.10.0" +num-derive = "0.3" +num-traits = "0.2" +itertools = "0.10.3" +rand = "0.8.0" +rand_core = "0.6.3" +rayon = "1.5.0" +rmp-serde = "1.0.0" +serde = "1.0.130" +serde_with = "1.10.0" +thiserror = "1.0.30" + +poly-commitment = { path = "../poly-commitment", version = "0.1.0" } +groupmap = { path = "../groupmap", version = "0.1.0" } +mina-curves = { path = "../curves", version = "0.1.0" } +o1-utils = { path = "../utils", version = "0.1.0" } +mina-poseidon = { path = "../poseidon", version = "0.1.0" } +kimchi = { path = "../kimchi", version = "0.1.0" } + +[dev-dependencies] +proptest = "1.0.0" +proptest-derive = "0.3.0" +colored = "2.0.0" + +# benchmarks +criterion = "0.3" +iai = "0.1" diff --git a/circuit-construction/src/constants.rs b/circuit-construction/src/constants.rs new file mode 100644 index 0000000000..803f18a9c0 --- /dev/null +++ b/circuit-construction/src/constants.rs @@ -0,0 +1,44 @@ +use ark_ec::AffineRepr; +use ark_ff::Field; +use kimchi::curve::KimchiCurve; +use mina_curves::pasta::{Fp, Fq, Pallas as PallasAffine, Vesta as VestaAffine}; +use mina_poseidon::poseidon::ArithmeticSpongeParams; +use poly_commitment::{commitment::CommitmentCurve, srs::endos}; + +/// The type of possible constants in the circuit +#[derive(Clone)] +pub struct Constants { + pub poseidon: &'static ArithmeticSpongeParams, + pub endo: F, + pub base: (F, F), +} + +/// Constants for the base field of Pallas +/// /// +/// # Panics +/// +/// Will panic if `PallasAffine::generator()` returns None. +pub fn fp_constants() -> Constants { + let (endo_q, _endo_r) = endos::(); + let base = PallasAffine::generator().to_coordinates().unwrap(); + Constants { + poseidon: VestaAffine::sponge_params(), + endo: endo_q, + base, + } +} + +/// Constants for the base field of Vesta +/// +/// # Panics +/// +/// Will panic if `VestaAffine::generator()` returns None. +pub fn fq_constants() -> Constants { + let (endo_q, _endo_r) = endos::(); + let base = VestaAffine::generator().to_coordinates().unwrap(); + Constants { + poseidon: PallasAffine::sponge_params(), + endo: endo_q, + base, + } +} diff --git a/circuit-construction/src/lib.rs b/circuit-construction/src/lib.rs new file mode 100644 index 0000000000..27618ed679 --- /dev/null +++ b/circuit-construction/src/lib.rs @@ -0,0 +1,33 @@ +#![doc = include_str!("../../README.md")] + +/// Definition of possible constants in circuits +pub mod constants; +/// This contains the prover functions, ranging from curves definitions to prover index and proof generation +pub mod prover; +/// This is the actual writer with all of the available functions to set up a circuit and its corresponding constraint system +pub mod writer; + +#[cfg(test)] +mod tests; + +/// This contains the Kimchi dependencies being used +pub mod prologue { + pub use super::constants::{fp_constants, fq_constants, Constants}; + pub use super::prover::{generate_prover_index, prove, CoordinateCurve}; + pub use super::writer::{Cs, Var}; + pub use ark_ec::{AffineRepr, CurveGroup}; + pub use ark_ff::{FftField, PrimeField, UniformRand}; + pub use ark_poly::{EvaluationDomain, Radix2EvaluationDomain}; + pub use groupmap::GroupMap; + pub use kimchi::verifier::verify; + pub use mina_curves::pasta::{ + Fp, Pallas as PallasAffine, Vesta as VestaAffine, VestaParameters, + }; + pub use mina_poseidon::{ + constants::*, + poseidon::{ArithmeticSponge, Sponge}, + sponge::{DefaultFqSponge, DefaultFrSponge}, + }; + pub use poly_commitment::{commitment::CommitmentCurve, srs::SRS}; + pub use std::sync::Arc; +} diff --git a/circuit-construction/src/prover.rs b/circuit-construction/src/prover.rs new file mode 100644 index 0000000000..2841c8dfbd --- /dev/null +++ b/circuit-construction/src/prover.rs @@ -0,0 +1,136 @@ +use crate::writer::{Cs, GateSpec, System, Var, WitnessGenerator}; +use ark_ec::AffineRepr; +use ark_ff::{One, PrimeField, Zero}; +use kimchi::{ + circuits::{constraints::ConstraintSystem, gate::GateType, wires::COLUMNS}, + curve::KimchiCurve, + plonk_sponge::FrSponge, + proof::ProverProof, + prover_index::ProverIndex, +}; +use mina_poseidon::FqSponge; +use poly_commitment::{ + commitment::{CommitmentCurve, PolyComm}, + srs::{endos, SRS}, +}; +use std::array; + +/// Given an index, a group map, custom blinders for the witness, a public input vector, and a circuit `main`, it creates a proof. +/// +/// # Panics +/// +/// Will panic if recursive proof creation returns `ProverError`. +pub fn prove( + index: &ProverIndex, + group_map: &G::Map, + blinders: Option<[Option; COLUMNS]>, + public_input: &[G::ScalarField], + mut main: H, +) -> ProverProof +where + H: FnMut(&mut WitnessGenerator, Vec>), + G::BaseField: PrimeField, + G: KimchiCurve, + EFqSponge: Clone + FqSponge, + EFrSponge: FrSponge, +{ + // create the witness generator + let mut gen: WitnessGenerator = WitnessGenerator::new(public_input); + + // run the witness generation + let public_vars = public_input + .iter() + .map(|x| Var { + index: 0, + value: Some(*x), + }) + .collect(); + main(&mut gen, public_vars); + + // get the witness columns + gen.curr_gate_count(); + let columns = gen.columns(); + + // custom blinders for the witness commitment + let blinders: [Option>; COLUMNS] = match blinders { + None => array::from_fn(|_| None), + Some(bs) => array::from_fn(|i| { + bs[i].map(|b| PolyComm { + unshifted: vec![b], + shifted: None, + }) + }), + }; + + // create the proof + ProverProof::create_recursive::( + group_map, + columns, + &[], + index, + vec![], + Some(blinders), + ) + .unwrap() +} + +/// Creates the prover index on input an `srs`, used `constants`, parameters for Poseidon, number of public inputs, and a specific circuit +/// +/// # Panics +/// +/// Will panic if `constraint_system` is not built with `public` input. +pub fn generate_prover_index( + srs: std::sync::Arc>, + public: usize, + main: Circuit, +) -> ProverIndex +where + Circuit: FnOnce(&mut System, Vec>), + Curve: KimchiCurve, +{ + let mut system: System = System::default(); + let z = Curve::ScalarField::zero(); + + // create public input variables + let public_input_row = vec![Curve::ScalarField::one(), z, z, z, z, z, z, z, z, z]; + let public_input: Vec<_> = (0..public) + .map(|_| { + let v = system.var(|| panic!("fail")); + + system.gate(GateSpec { + typ: GateType::Generic, + row: vec![Some(v)], + coeffs: public_input_row.clone(), + }); + v + }) + .collect(); + + main(&mut system, public_input); + + let gates = system.gates(); + + // Other base field = self scalar field + let (endo_q, _endo_r) = endos::(); + //let (endo_q, _endo_r) = Curve::endos(); + + let constraint_system = ConstraintSystem::::create(gates) + .public(public) + .build() + // TODO: return a Result instead of panicking + .expect("couldn't construct constraint system"); + + ProverIndex::::create(constraint_system, endo_q, srs) +} + +/// Handling coordinates in an affine curve +pub trait CoordinateCurve: AffineRepr { + /// Returns the coordinates in the curve as two points of the base field + fn to_coords(&self) -> Option<(Self::BaseField, Self::BaseField)>; +} + +impl CoordinateCurve for G { + fn to_coords(&self) -> Option<(Self::BaseField, Self::BaseField)> { + CommitmentCurve::to_coordinates(self) + } +} diff --git a/circuit-construction/src/tests/example_proof.rs b/circuit-construction/src/tests/example_proof.rs new file mode 100644 index 0000000000..54fa787bb2 --- /dev/null +++ b/circuit-construction/src/tests/example_proof.rs @@ -0,0 +1,103 @@ +use crate::prologue::*; +use kimchi::curve::KimchiCurve; +use std::ops::Mul; + +type SpongeQ = DefaultFqSponge; +type SpongeR = DefaultFrSponge; + +pub struct Witness { + pub s: G::ScalarField, + pub preimage: G::BaseField, +} + +// Prove knowledge of discrete log and poseidon preimage of a hash +pub fn circuit< + F: PrimeField + FftField, + G: AffineRepr + CoordinateCurve, + Sys: Cs, +>( + constants: &Constants, + // The witness + witness: Option<&Witness>, + sys: &mut Sys, + public_input: Vec>, +) { + let zero = sys.constant(F::zero()); + + let constant_curve_pt = |sys: &mut Sys, (x, y)| { + let x = sys.constant(x); + let y = sys.constant(y); + (x, y) + }; + + let base = constant_curve_pt(sys, G::generator().to_coords().unwrap()); + let scalar = sys.scalar(G::ScalarField::MODULUS_BIT_SIZE as usize, || { + witness.as_ref().unwrap().s + }); + let actual = sys.scalar_mul(zero, base, scalar); + + let preimage = sys.var(|| witness.as_ref().unwrap().preimage); + let actual_hash = sys.poseidon(constants, vec![preimage, zero, zero])[0]; + + sys.assert_eq(actual.0, public_input[0]); + sys.assert_eq(actual.1, public_input[1]); + sys.assert_eq(actual_hash, public_input[2]); +} + +const PUBLIC_INPUT_LENGTH: usize = 3; + +#[test] +fn test_example_circuit() { + use mina_curves::pasta::Pallas; + use mina_curves::pasta::Vesta; + // create SRS + let srs = { + let mut srs = SRS::::create(1 << 7); // 2^7 = 128 + srs.add_lagrange_basis(Radix2EvaluationDomain::new(srs.g.len()).unwrap()); + Arc::new(srs) + }; + + let proof_system_constants = fp_constants(); + + // generate circuit and index + let prover_index = generate_prover_index::<_, _>(srs, PUBLIC_INPUT_LENGTH, |sys, p| { + circuit::<_, Pallas, _>(&proof_system_constants, None, sys, p) + }); + + let group_map = ::Map::setup(); + + let mut rng = rand::thread_rng(); + + // create witness + let private_key = ::ScalarField::rand(&mut rng); + let preimage = ::BaseField::rand(&mut rng); + + let witness = Witness { + s: private_key, + preimage, + }; + + // create public input + let public_key = Pallas::generator().mul(private_key).into_affine(); + let hash = { + let mut s: ArithmeticSponge<_, PlonkSpongeConstantsKimchi> = + ArithmeticSponge::new(Vesta::sponge_params()); + s.absorb(&[preimage]); + s.squeeze() + }; + + // generate proof + let public_input = vec![public_key.x, public_key.y, hash]; + let proof = prove::( + &prover_index, + &group_map, + None, + &public_input, + |sys, p| circuit::(&proof_system_constants, Some(&witness), sys, p), + ); + + // verify proof + let verifier_index = prover_index.verifier_index(); + + verify::<_, SpongeQ, SpongeR>(&group_map, &verifier_index, &proof, &public_input).unwrap(); +} diff --git a/circuit-construction/src/writer.rs b/circuit-construction/src/writer.rs new file mode 100644 index 0000000000..0caec23c1c --- /dev/null +++ b/circuit-construction/src/writer.rs @@ -0,0 +1,1007 @@ +use ark_ff::{BigInteger, FftField, PrimeField}; +use kimchi::circuits::{ + gate::{CircuitGate, GateType}, + polynomials::generic::{ + DOUBLE_GENERIC_COEFFS, DOUBLE_GENERIC_REGISTERS, GENERIC_COEFFS, GENERIC_REGISTERS, + }, + wires::{Wire, COLUMNS}, +}; +use mina_poseidon::{ + constants::{PlonkSpongeConstantsKimchi, SpongeConstants}, + permutation::full_round, +}; +use std::array; +use std::collections::HashMap; + +use crate::constants::Constants; + +/// A variable in our circuit. +/// Variables are assigned with an index to differentiate from each other. +/// Optionally, they can eventually take as value a field element. +#[derive(Hash, Eq, PartialEq, Debug, Clone, Copy)] +pub struct Var { + pub index: usize, + pub value: Option, +} + +impl Var { + /// Returns the value inside a variable [Var]. + /// + /// # Panics + /// + /// Will panic if it is `None`. + pub fn val(&self) -> F { + self.value.unwrap() + } +} + +/// A variable that corresponds to scalar that is shifted by a certain amount. +pub struct ShiftedScalar(Var); + +/// Specifies a gate within a circuit. +/// A gate will have a type, +/// will refer to a row of variables, +/// and will have associated vector of coefficients. +pub struct GateSpec { + pub typ: GateType, + pub row: Vec>>, + pub coeffs: Vec, +} + +impl GateSpec { + pub fn get_var_val_or(&self, col: usize, default: F) -> F { + match self.row.get(col) { + Some(Some(var)) => var.val(), + _ => default, + } + } + + pub fn get_var_idx(&self, col: usize) -> Option { + match self.row.get(col) { + Some(Some(var)) => Some(var.index), + _ => None, + } + } +} + +/// A set of gates within the circuit. +/// It carries the index for the next available variable, +/// and the vector of [`GateSpec`] created so far. +/// It also keeps track of the queue of generic gates and cached constants. +#[derive(Default)] +pub struct System { + pub next_variable: usize, + pub generic_gate_queue: Vec>, + // pub equivalence_classes: HashMap>, + pub gates: Vec>, + pub cached_constants: HashMap>, +} + +/// Carries a vector of rows corresponding to the witness, a queue of generic gates, and stores the cached constants +#[derive(Default)] +pub struct WitnessGenerator +where + F: PrimeField, +{ + pub generic_gate_queue: Vec>, + pub rows: Vec>, + pub cached_constants: HashMap>, +} + +impl WitnessGenerator +where + F: PrimeField, +{ + /// Given a list of public inputs, creates the witness generator. + pub fn new(public_inputs: &[F]) -> Self { + let mut gen = Self::default(); + + for input in public_inputs { + let row = array::from_fn(|i| if i == 0 { *input } else { F::zero() }); + gen.rows.push(row); + } + + gen + } +} + +/// A row is an array of [COLUMNS] elements +type Row = [V; COLUMNS]; + +/// This trait includes all the operations that can be executed +/// by the elements in the circuits. +/// It allows for different behaviours depending on the struct for +/// which it is implemented for. +/// In particular, the circuit mode and the witness generation mode. +pub trait Cs { + /// In cases where you want to create a free variable in the circuit, + /// as in the variable is not constrained _yet_ + /// and can be anything that the prover wants. + /// For example, division can be implemented as: + /// + /// ```ignore + /// let a = sys.constant(5u32.into()); + /// let b = sys.constant(10u32.into()); + /// let c = sys.var(|| { + /// b.value * a.value.inverse().unwrap() + /// }); + /// sys.assert_eq(a * c, b); + /// ``` + /// + fn var(&mut self, g: G) -> Var + where + G: FnOnce() -> F; + + /// Returns the number of gates that the current [Self] contains. + fn curr_gate_count(&self) -> usize; + + /// Returns a variable containing a field element as value that is + /// computed as the equivalent `BigInteger` number returned by + /// function `g`, only if the length is a multiple of 4. + fn endo_scalar(&mut self, length: usize, g: G) -> Var + where + G: FnOnce() -> N, + { + assert_eq!(length % 4, 0); + + self.var(|| { + let y = g(); + let bits = y.to_bits_le(); + F::from_bigint(F::BigInt::from_bits_le(&bits)).unwrap() + }) + } + + /// This function creates a [`ShiftedScalar`] variable from a field element that is + /// returned by function `g()`, and a length that should be a multiple of 5. + fn scalar(&mut self, length: usize, g: G) -> ShiftedScalar + where + G: FnOnce() -> Fr, + { + assert_eq!(length % 5, 0); + + let v = self.var(|| { + // TODO: No need to recompute this each time. + let two = Fr::from(2u64); + let shift = Fr::one() + two.pow([length as u64]); + + let x = g(); + // x = 2 y + shift + // y = (x - shift) / 2 + // TODO: Could cache value of 1/2 to avoid division + let y = (x - shift) / two; + let bits = y.into_bigint().to_bits_le(); + F::from_bigint(F::BigInt::from_bits_le(&bits)).unwrap() + }); + ShiftedScalar(v) + } + + /// In circuit mode, adds a gate to the circuit. + /// In witness generation mode, adds the corresponding row to the witness. + fn gate(&mut self, g: GateSpec); + + /// Creates a `Generic` gate that constrains if two variables are equal. + /// This is done by setting `x1` in the left wire and `x2` in the right wire + /// with left coefficient `1` and right coefficient `-1`, so that `x1 - x2 = 0`. + // TODO: Optimize to use permutation argument. + fn assert_eq(&mut self, x1: Var, x2: Var) { + // | 0 | 1 | 2 | ... + // | x1 | x2 | 0 | ... + let vars = [Some(x1), Some(x2), None]; + + // constrain `x1 - x2 = 0` + let mut coeffs = [F::zero(); GENERIC_COEFFS]; + coeffs[0] = F::one(); + coeffs[1] = -F::one(); + + self.generic(coeffs, vars); + } + + /// Checks if a constant `x` is already in the cached constants of `self` and returns it. + /// Otherwise, it creates a variable for it and caches it. + fn cached_constants(&mut self, x: F) -> Var; + + /// Creates a `Generic` gate to include a constant in the circuit, and returns the variable containing it. + /// It sets the left wire to be the variable containing the constant `x` and the rest to zero. + /// Then the left coefficient is set to one and the coefficient for constants is set to `-x`. + /// This way, the constraint `1 * x - x = 0` holds. + fn constant(&mut self, x: F) -> Var { + let v = self.cached_constants(x); + + let mut coeffs = [F::zero(); GENERIC_COEFFS]; + coeffs[0] = F::one(); + coeffs[GENERIC_REGISTERS + 1] = -x; + + let vars = [Some(v), None, None]; + + self.generic(coeffs, vars); + + v + } + + /// Stores a generic gate until it can combine two of them + /// into a double generic gate. + fn generic_queue(&mut self, gate: GateSpec) -> Option>; + + /// Adds a generic gate. + /// + /// Warning: this assumes that some finalization occurs to flush + /// any queued generic gate. + fn generic(&mut self, coeffs: [F; GENERIC_COEFFS], vars: [Option>; GENERIC_REGISTERS]) { + let gate = GateSpec { + typ: GateType::Generic, + row: vars.to_vec(), + coeffs: coeffs.to_vec(), + }; + // we queue the single generic gate until we have two of them + if let Some(double_generic_gate) = self.generic_queue(gate) { + self.gate(double_generic_gate); + } + } + + /// Creates a `Generic` gate to constrain that a variable `v` is scaled by an `x` amount and returns it. + /// First, it creates a new variable with a scaled value (meaning, the value in `v` times `x`). + /// Then, it creates a row that sets the left wire to be `v` and the right wire to be the scaled variable. + /// Finally, it sets the left coefficient to `x` and the right coefficient to `-1`. + /// That way, the constraint `x * v - 1 * xv = 0` is created. + fn scale(&mut self, x: F, v: Var) -> Var { + let xv = self.var(|| v.val() * x); + + let vars = [Some(v), Some(xv), None]; + + let mut coeffs = [F::zero(); GENERIC_COEFFS]; + coeffs[0] = x; + coeffs[1] = -F::one(); + + self.generic(coeffs, vars); + + xv + } + + /// Performs curve point addition. + /// It creates the corresponding `CompleteAdd` gate for the points `(x1, y1)` and `(x2,y2)` + /// and returns the third point resulting from the addition as a tuple of variables. + fn add_group( + &mut self, + zero: Var, + (x1, y1): (Var, Var), + (x2, y2): (Var, Var), + ) -> (Var, Var) { + let mut same_x_bool = false; + let same_x = self.var(|| { + let same_x = x1.val() == x2.val(); + same_x_bool = same_x; + F::from(u64::from(same_x)) + }); + + let inf = zero; + let x21_inv = self.var(|| { + if x1.val() == x2.val() { + F::zero() + } else { + (x2.val() - x1.val()).inverse().unwrap() + } + }); + + let s = self.var(|| { + if same_x_bool { + let x1_squared = x1.val().square(); + (x1_squared.double() + x1_squared).div(y1.val().double()) + } else { + (y2.val() - y1.val()) * x21_inv.val() + } + }); + + let inf_z = self.var(|| { + if y1.val() == y2.val() { + F::zero() + } else if same_x_bool { + (y2.val() - y1.val()).inverse().unwrap() + } else { + F::zero() + } + }); + + let x3 = self.var(|| s.val().square() - (x1.val() + x2.val())); + + let y3 = self.var(|| s.val() * (x1.val() - x3.val()) - y1.val()); + + self.gate(GateSpec { + typ: GateType::CompleteAdd, + row: vec![ + Some(x1), + Some(y1), + Some(x2), + Some(y2), + Some(x3), + Some(y3), + Some(inf), + Some(same_x), + Some(s), + Some(inf_z), + Some(x21_inv), + ], + coeffs: vec![], + }); + (x3, y3) + } + + /// Doubles one curve point `(x1, y1)`, using internally the `add_group()` function. + /// It creates a `CompleteAdd` gate for this point addition (with itself). + /// Returns a tuple of variables corresponding to the doubled point. + fn double(&mut self, zero: Var, (x1, y1): (Var, Var)) -> (Var, Var) { + self.add_group(zero, (x1, y1), (x1, y1)) + } + + /// Creates a `CompleteAdd` gate that checks whether a third point `(x3, y3)` is the addition + /// of the two first points `(x1, y1)` and `(x2, y2)`. + /// The difference between this function and `add_group()` is that in `assert_add_group` the + /// third point is given, whereas in the other one it is computed with the formula. + fn assert_add_group( + &mut self, + zero: Var, + (x1, y1): (Var, Var), + (x2, y2): (Var, Var), + (x3, y3): (Var, Var), + ) { + let mut same_x_bool = false; + let same_x = self.var(|| { + let same_x = x1.val() == x2.val(); + same_x_bool = same_x; + F::from(u64::from(same_x)) + }); + + let inf = zero; + let x21_inv = self.var(|| { + if x1.val() == x2.val() { + F::zero() + } else { + (x2.val() - x1.val()).inverse().unwrap() + } + }); + + let s = self.var(|| { + if same_x_bool { + let x1_squared = x1.val().square(); + (x1_squared.double() + x1_squared).div(y1.val().double()) + } else { + (y2.val() - y1.val()) * x21_inv.val() + } + }); + + let inf_z = self.var(|| { + if y1.val() == y2.val() { + F::zero() + } else if same_x_bool { + (y2.val() - y1.val()).inverse().unwrap() + } else { + F::zero() + } + }); + + self.gate(GateSpec { + typ: GateType::CompleteAdd, + row: vec![ + Some(x1), + Some(y1), + Some(x2), + Some(y2), + Some(x3), + Some(y3), + Some(inf), + Some(same_x), + Some(s), + Some(inf_z), + Some(x21_inv), + ], + coeffs: vec![], + }); + } + + /// This function is used to include conditionals in circuits. + /// It creates three `Generic` gates to simulate the logics of the conditional. + /// It receives as input: + /// - `b`: the branch + /// - `t`: the true + /// - `f`: the false + /// And simulates the following equation: `res = b * ( t - f ) + f` + /// ( when the condition is false, `res = 1` ) + /// ( when the condition is true, `res = b` ) + /// This is constrained using three `Generic` gates + /// 1. Constrain `delta = t - f` + /// 2. Constrain `res1 = b * delta` + /// 3. Constrain `res = res1 + f` + /// For (1): + /// - Creates a row with left wire `t`, right wire `f`, and output wire `delta` + /// - Assigns `1` to the left coefficient, `-1` to the right coefficient, and `-1` to the output coefficient. + /// - That way, it creates a first gate constraining: `1 * t - 1 * f - delta = 0` + /// For (2): + /// - Creates a row with left wire `b`, right wire `delta`, and output wire `res1`. + /// - Assigns `-1` to the output coefficient, and `1` to the multiplication coefficient. + /// - That way, it creates a second gate constraining: `-1 * res + 1 * b * delta = 0` + /// For (3): + /// - Creates a row with left wire `res1`, right wire `f`, and output wire `res`. + /// - Assigns `1` to the left coefficient, `1` to the right coefficient, and `-1` to the output coefficient. + /// - That way, it creates a third gate constraining: `1 * res1 + 1 * f - 1 * res = 0` + fn cond_select(&mut self, b: Var, t: Var, f: Var) -> Var { + // Could be more efficient. Currently uses three constraints :( + // delta = t - f + // res1 = b * delta + // res = res1 + f + + let delta = self.var(|| t.val() - f.val()); + let res1 = self.var(|| b.val() * delta.val()); + let res = self.var(|| f.val() + res1.val()); + + let row1 = [Some(t), Some(f), Some(delta)]; + let mut c1 = [F::zero(); GENERIC_COEFFS]; + c1[0] = F::one(); + c1[1] = -F::one(); + c1[2] = -F::one(); + + self.generic(c1, row1); + + let row2 = [Some(b), Some(delta), Some(res1)]; + + let mut c2 = [F::zero(); GENERIC_COEFFS]; + c2[0] = F::zero(); + c2[1] = F::zero(); + c2[2] = -F::one(); + c2[3] = F::one(); + + self.generic(c2, row2); + + let row3 = [Some(res1), Some(f), Some(res)]; + let mut c3 = [F::zero(); GENERIC_COEFFS]; + c3[0] = F::one(); + c3[1] = F::one(); + c3[2] = -F::one(); + + self.generic(c3, row3); + + res + } + + /// Performs a scalar multiplication between a [`ShiftedScalar`] and a point `(xt, yt)`. + /// This function creates 51 rows pairs of rows. + fn scalar_mul( + &mut self, + zero: Var, + (xt, yt): (Var, Var), + scalar: ShiftedScalar, + ) -> (Var, Var) { + let num_bits = 255; + let num_row_pairs = num_bits / 5; + let mut witness: [Vec; COLUMNS] = array::from_fn(|_| vec![]); + + let acc0 = self.add_group(zero, (xt, yt), (xt, yt)); + + let _ = self.var(|| { + witness = array::from_fn(|_| vec![F::zero(); 2 * num_row_pairs]); + // Creates a vector of bits from the value inside the scalar, with the most significant bit upfront + let bits_msb: Vec = scalar + .0 + .val() + .into_bigint() + .to_bits_le() + .iter() + .take(num_bits) + .copied() + .rev() + .collect(); + // Creates a witness for the VarBaseMul gate. + kimchi::circuits::polynomials::varbasemul::witness( + &mut witness, + 0, + (xt.val(), yt.val()), + &bits_msb, + (acc0.0.val(), acc0.1.val()), + ); + F::zero() + }); + + // For each of the pairs, it generates a VarBaseMul and a Zero gate. + let mut res = None; + for i in 0..num_row_pairs { + let mut row1: [_; COLUMNS] = array::from_fn(|j| self.var(|| witness[j][2 * i])); + let row2: [_; COLUMNS] = array::from_fn(|j| self.var(|| witness[j][2 * i + 1])); + + row1[0] = xt; + row1[1] = yt; + if i == 0 { + row1[2] = acc0.0; + row1[3] = acc0.1; + row1[4] = zero; + } + if i == num_row_pairs - 1 { + row1[5] = scalar.0; + res = Some((row2[0], row2[1])); + } + + self.gate(GateSpec { + row: row1.into_iter().map(Some).collect(), + typ: GateType::VarBaseMul, + coeffs: vec![], + }); + + self.gate(GateSpec { + row: row2.into_iter().map(Some).collect(), + typ: GateType::Zero, + coeffs: vec![], + }); + } + + res.unwrap() + } + + /// Creates an endoscalar multiplication gadget with `length_in_bits/4 + 1` gates. + /// For each row, it adds one `EndoMul` gate. The gadget is finalized with a `Zero` gate. + /// + /// | row | `GateType` | + /// | --- | ---------- | + /// | i | `EndoMul` | + /// | i+1 | `EndoMul` | + /// | ... | ... | + /// | r | `EndoMul` | + /// | r+1 | `Zero` | + /// + fn endo( + &mut self, + zero: Var, + constants: &Constants, + (xt, yt): (Var, Var), + scalar: Var, + length_in_bits: usize, + ) -> (Var, Var) { + let bits_per_row = 4; + let rows = length_in_bits / 4; + assert_eq!(0, length_in_bits % 4); + + let mut bits_ = vec![]; + let bits: Vec<_> = (0..length_in_bits) + .map(|i| { + self.var(|| { + if bits_.is_empty() { + bits_ = scalar + .val() + .into_bigint() + .to_bits_le() + .iter() + .take(length_in_bits) + .copied() + .rev() + .collect(); + } + F::from(u64::from(bits_[i])) + }) + }) + .collect(); + + let one = F::one(); + + let endo = constants.endo; + let mut acc = { + let phip = (self.scale(endo, xt), yt); + let phip_p = self.add_group(zero, phip, (xt, yt)); + self.double(zero, phip_p) + }; + + let mut n_acc = zero; + + // TODO: Could be more efficient + for i in 0..rows { + let b1 = bits[i * bits_per_row]; + let b2 = bits[i * bits_per_row + 1]; + let b3 = bits[i * bits_per_row + 2]; + let b4 = bits[i * bits_per_row + 3]; + + let (xp, yp) = acc; + + let xq1 = self.var(|| (one + (endo - one) * b1.val()) * xt.val()); + let yq1 = self.var(|| (b2.val().double() - one) * yt.val()); + + let s1 = self.var(|| (yq1.val() - yp.val()) / (xq1.val() - xp.val())); + let s1_squared = self.var(|| s1.val().square()); + // (2*xp – s1^2 + xq) * ((xp – xr) * s1 + yr + yp) = (xp – xr) * 2*yp + // => 2 yp / (2*xp – s1^2 + xq) = s1 + (yr + yp) / (xp – xr) + // => 2 yp / (2*xp – s1^2 + xq) - s1 = (yr + yp) / (xp – xr) + // + // s2 := 2 yp / (2*xp – s1^2 + xq) - s1 + // + // (yr + yp)^2 = (xp – xr)^2 * (s1^2 – xq1 + xr) + // => (s1^2 – xq1 + xr) = (yr + yp)^2 / (xp – xr)^2 + // + // => xr = s2^2 - s1^2 + xq + // => yr = s2 * (xp - xr) - yp + let s2 = self.var(|| { + yp.val().double() / (xp.val().double() + xq1.val() - s1_squared.val()) - s1.val() + }); + + // (xr, yr) + let xr = self.var(|| xq1.val() + s2.val().square() - s1_squared.val()); + let yr = self.var(|| (xp.val() - xr.val()) * s2.val() - yp.val()); + + let xq2 = self.var(|| (one + (endo - one) * b3.val()) * xt.val()); + let yq2 = self.var(|| (b4.val().double() - one) * yt.val()); + let s3 = self.var(|| (yq2.val() - yr.val()) / (xq2.val() - xr.val())); + let s3_squared = self.var(|| s3.val().square()); + let s4 = self.var(|| { + yr.val().double() / (xr.val().double() + xq2.val() - s3_squared.val()) - s3.val() + }); + + let xs = self.var(|| xq2.val() + s4.val().square() - s3_squared.val()); + let ys = self.var(|| (xr.val() - xs.val()) * s4.val() - yr.val()); + + self.gate(GateSpec { + typ: GateType::EndoMul, + row: vec![ + Some(xt), + Some(yt), + None, + None, + Some(xp), + Some(yp), + Some(n_acc), + Some(xr), + Some(yr), + Some(s1), + Some(s3), + Some(b1), + Some(b2), + Some(b3), + Some(b4), + ], + coeffs: vec![], + }); + + acc = (xs, ys); + + n_acc = self.var(|| { + let mut n_acc = n_acc.val(); + n_acc.double_in_place(); + n_acc += b1.val(); + n_acc.double_in_place(); + n_acc += b2.val(); + n_acc.double_in_place(); + n_acc += b3.val(); + n_acc.double_in_place(); + n_acc += b4.val(); + n_acc + }); + } + + // TODO: use a generic gate with zero coeffs + self.gate(GateSpec { + typ: GateType::Zero, + row: vec![ + None, + None, + None, + None, + Some(acc.0), + Some(acc.1), + Some(scalar), + None, + None, + None, + None, + None, + None, + None, + None, + ], + coeffs: vec![], + }); + acc + } + + /// Checks that a string of bits (with LSB first) correspond to the value inside variable `x`. + /// It splits the bitstring across rows, where each row takes care of 8 crumbs of 2 bits each. + /// + fn assert_pack(&mut self, zero: Var, x: Var, bits_lsb: &[Var]) { + let crumbs_per_row = 8; + let bits_per_row = 2 * crumbs_per_row; + assert_eq!(bits_lsb.len() % bits_per_row, 0); + let num_rows = bits_lsb.len() / bits_per_row; + + // Reverse string of bits to have MSB first in the vector + let bits_msb: Vec<_> = bits_lsb.iter().rev().collect(); + + let mut a = self.var(|| F::from(2u64)); + let mut b = self.var(|| F::from(2u64)); + let mut n = zero; + + let one = F::one(); + let neg_one = -one; + + // For each of the chunks, get the corresponding bits + for (i, row_bits) in bits_msb[..].chunks(bits_per_row).enumerate() { + let mut row: [Var; COLUMNS] = array::from_fn(|_| self.var(|| F::zero())); + row[0] = n; + row[2] = a; + row[3] = b; + + // For this row, get crumbs of 2 bits each + for (j, crumb_bits) in row_bits.chunks(2).enumerate() { + // Remember the MSB of each crumb is in the 0 index + let b0 = crumb_bits[1]; // less valued + let b1 = crumb_bits[0]; // more valued + + // Value of the 2-bit crumb in MSB + let crumb = self.var(|| b0.val() + b1.val().double()); + // Stores the 8 of them in positions [6..13] of the row + row[6 + j] = crumb; + + a = self.var(|| { + let x = a.val().double(); + if b1.val().is_zero() { + x + } else { + x + if b0.val().is_one() { one } else { neg_one } + } + }); + + b = self.var(|| { + let x = b.val().double(); + if b1.val().is_zero() { + x + if b0.val().is_one() { one } else { neg_one } + } else { + x + } + }); + + // Accumulated chunk value + n = self.var(|| n.val().double().double() + crumb.val()); + } + + // In final row, this is the input value, otherwise the accumulated value + row[1] = if i == num_rows - 1 { x } else { n }; + row[4] = a; + row[5] = b; + + row[14] = self.var(|| F::zero()); + } + } + + /// Creates a Poseidon gadget for given constants and a given input. + /// It generates a number of `Poseidon` gates followed by a final `Zero` gate. + fn poseidon(&mut self, constants: &Constants, input: Vec>) -> Vec> { + use kimchi::circuits::polynomials::poseidon::{POS_ROWS_PER_HASH, ROUNDS_PER_ROW}; + + let params = constants.poseidon; + let rc = ¶ms.round_constants; + let width = PlonkSpongeConstantsKimchi::SPONGE_WIDTH; + + let mut states = vec![input]; + + for row in 0..POS_ROWS_PER_HASH { + let offset = row * ROUNDS_PER_ROW; + + for i in 0..ROUNDS_PER_ROW { + let mut s: Option> = None; + states.push( + (0..3) + .map(|col| { + self.var(|| { + match &s { + Some(s) => s[col], + None => { + // Do one full round on the previous value + let mut acc = states[states.len() - 1] + .iter() + .map(|x| x.val()) + .collect(); + full_round::( + params, + &mut acc, + offset + i, + ); + let res = acc[col]; + s = Some(acc); + res + } + } + }) + }) + .collect(), + ); + } + + self.gate(GateSpec { + typ: kimchi::circuits::gate::GateType::Poseidon, + coeffs: (0..COLUMNS) + .map(|i| rc[offset + (i / width)][i % width]) + .collect(), + row: vec![ + Some(states[offset][0]), + Some(states[offset][1]), + Some(states[offset][2]), + Some(states[offset + 4][0]), + Some(states[offset + 4][1]), + Some(states[offset + 4][2]), + Some(states[offset + 1][0]), + Some(states[offset + 1][1]), + Some(states[offset + 1][2]), + Some(states[offset + 2][0]), + Some(states[offset + 2][1]), + Some(states[offset + 2][2]), + Some(states[offset + 3][0]), + Some(states[offset + 3][1]), + Some(states[offset + 3][2]), + ], + }); + } + + let final_state = &states[states.len() - 1]; + let final_row = vec![ + Some(final_state[0]), + Some(final_state[1]), + Some(final_state[2]), + ]; + self.gate(GateSpec { + typ: kimchi::circuits::gate::GateType::Zero, + coeffs: vec![], + row: final_row, + }); + + states.pop().unwrap() + } +} + +impl Cs for WitnessGenerator { + /// Creates a variable with value given by a function `g` with index `0` + fn var(&mut self, g: G) -> Var + where + G: FnOnce() -> F, + { + Var { + index: 0, + value: Some(g()), + } + } + + /// Returns the number of rows. + fn curr_gate_count(&self) -> usize { + self.rows.len() + } + + /// Pushes a new row corresponding to the values in the row of gate `g`. + fn gate(&mut self, g: GateSpec) { + assert!(g.row.len() <= COLUMNS); + + let row: [F; COLUMNS] = array::from_fn(|col| g.get_var_val_or(col, F::zero())); + self.rows.push(row); + } + + fn generic_queue(&mut self, gate: GateSpec) -> Option> { + if let Some(mut other) = self.generic_gate_queue.pop() { + other.row.extend(&gate.row); + assert_eq!(other.row.len(), DOUBLE_GENERIC_REGISTERS); + Some(other) + } else { + self.generic_gate_queue.push(gate); + None + } + } + + fn cached_constants(&mut self, x: F) -> Var { + match self.cached_constants.get(&x) { + Some(var) => *var, + None => { + let var = self.var(|| x); + self.cached_constants.insert(x, var); + var + } + } + } +} + +impl WitnessGenerator { + /// Returns the columns of the witness. + pub fn columns(&mut self) -> [Vec; COLUMNS] { + // flush any queued generic gate + if let Some(gate) = self.generic_gate_queue.pop() { + self.gate(gate); + } + + // transpose + array::from_fn(|col| self.rows.iter().map(|row| row[col]).collect()) + } +} + +impl Cs for System { + fn var(&mut self, _: V) -> Var { + let v = self.next_variable; + self.next_variable += 1; + Var { + index: v, + value: None, + } + } + + /// Outputs the number of gates in the circuit + fn curr_gate_count(&self) -> usize { + self.gates.len() + } + + fn gate(&mut self, g: GateSpec) { + self.gates.push(g); + } + + fn generic_queue(&mut self, gate: GateSpec) -> Option> { + if let Some(mut other) = self.generic_gate_queue.pop() { + other.row.extend(&gate.row); + assert_eq!(other.row.len(), DOUBLE_GENERIC_REGISTERS); + other.coeffs.extend(&gate.coeffs); + assert_eq!(other.coeffs.len(), DOUBLE_GENERIC_COEFFS); + Some(other) + } else { + self.generic_gate_queue.push(gate); + None + } + } + + fn cached_constants(&mut self, x: F) -> Var { + match self.cached_constants.get(&x) { + Some(var) => *var, + None => { + let var = self.var(|| x); + self.cached_constants.insert(x, var); + var + } + } + } +} + +impl System { + /// Compiles our intermediate representation into a circuit. + /// + /// # Panics + /// + /// Will not panic ever since it is permutation inside gates + pub fn gates(&mut self) -> Vec> { + let mut first_cell: HashMap = HashMap::new(); + let mut most_recent_cell: HashMap = HashMap::new(); + let mut gates = vec![]; + + // flush any queued generic gate + if let Some(gate) = self.generic_gate_queue.pop() { + self.gate(gate); + } + + // convert GateSpec into CircuitGate + for (row, gate) in self.gates.iter().enumerate() { + // while tracking the wiring + let wires = array::from_fn(|col| { + let curr = Wire { row, col }; + + if let Some(index) = gate.get_var_idx(col) { + // wire this cell to the previous one + match most_recent_cell.insert(index, curr) { + Some(w) => w, + // unless it is the first cell, + // in which case we just save it for the very end + // (to complete the cycle) + None => { + first_cell.insert(index, curr); + curr + } + } + } else { + // if no var to be found, it's a cell wired to itself + curr + } + }); + + let g = CircuitGate::new(gate.typ, wires, gate.coeffs.clone()); + gates.push(g); + } + + // finish the permutation cycle + for (var, first) in &first_cell { + let last = *most_recent_cell.get(var).unwrap(); + gates[first.row].wires[first.col] = last; + } + + gates + } +} diff --git a/curves/Cargo.toml b/curves/Cargo.toml index 4bbf45f798..1015d8e47d 100644 --- a/curves/Cargo.toml +++ b/curves/Cargo.toml @@ -10,10 +10,12 @@ edition = "2021" license = "Apache-2.0" [dependencies] -ark-ec = { version = "0.3.0", features = ["parallel"] } -ark-ff = { version = "0.3.0", features = ["parallel", "asm"] } +ark-ec = { version = "0.4.2", features = ["parallel"] } +ark-ff = { version = "0.4.2", features = ["parallel", "asm"] } [dev-dependencies] -rand = { version = "0.8.0", default-features = false } -ark-algebra-test-templates = "0.3.0" -ark-std = "0.3.0" +rand = { version = "0.8.5", default-features = false } +ark-test-curves = "0.4.2" +ark-algebra-test-templates = "0.4.2" +ark-serialize="0.4.2" +ark-std = "0.4.0" diff --git a/curves/src/pasta/curves/pallas.rs b/curves/src/pasta/curves/pallas.rs index 790251b55d..39813e13bb 100644 --- a/curves/src/pasta/curves/pallas.rs +++ b/curves/src/pasta/curves/pallas.rs @@ -1,70 +1,75 @@ use crate::pasta::*; use ark_ec::{ - models::short_weierstrass_jacobian::{GroupAffine, GroupProjective}, - ModelParameters, SWModelParameters, + models::short_weierstrass::{Affine, Projective, SWCurveConfig}, + CurveConfig, }; -use ark_ff::{field_new, Zero}; +use ark_ff::{MontFp, Zero}; + +/// G_GENERATOR_X = +/// 1 +pub const G_GENERATOR_X: Fp = MontFp!("1"); + +/// G1_GENERATOR_Y = +/// 12418654782883325593414442427049395787963493412651469444558597405572177144507 +pub const G_GENERATOR_Y: Fp = + MontFp!("12418654782883325593414442427049395787963493412651469444558597405572177144507"); #[derive(Debug, Clone, Default, PartialEq, Eq)] pub struct PallasParameters; -impl ModelParameters for PallasParameters { +impl CurveConfig for PallasParameters { type BaseField = Fp; - type ScalarField = Fq; -} -pub type Pallas = GroupAffine; -pub type ProjectivePallas = GroupProjective; - -impl SWModelParameters for PallasParameters { - /// COEFF_A = 0 - const COEFF_A: Fp = field_new!(Fp, "0"); - - /// COEFF_B = 5 - const COEFF_B: Fp = field_new!(Fp, "5"); + type ScalarField = Fq; /// COFACTOR = 1 const COFACTOR: &'static [u64] = &[0x1]; /// COFACTOR_INV = 1 - const COFACTOR_INV: Fq = field_new!(Fq, "1"); + const COFACTOR_INV: Fq = MontFp!("1"); +} + +pub type Pallas = Affine; + +pub type ProjectivePallas = Projective; + +impl SWCurveConfig for PallasParameters { + const COEFF_A: Self::BaseField = MontFp!("0"); - /// AFFINE_GENERATOR_COEFFS = (G1_GENERATOR_X, G1_GENERATOR_Y) - const AFFINE_GENERATOR_COEFFS: (Self::BaseField, Self::BaseField) = - (G_GENERATOR_X, G_GENERATOR_Y); + const COEFF_B: Self::BaseField = MontFp!("5"); + const GENERATOR: Affine = Affine::new_unchecked(G_GENERATOR_X, G_GENERATOR_Y); +} + +impl PallasParameters { #[inline(always)] - fn mul_by_a(_: &Self::BaseField) -> Self::BaseField { - Self::BaseField::zero() + pub fn mul_by_a( + _: &::BaseField, + ) -> ::BaseField { + ::BaseField::zero() } } -/// G_GENERATOR_X = -/// 1 -pub const G_GENERATOR_X: Fp = field_new!(Fp, "1"); - -/// G1_GENERATOR_Y = -/// 12418654782883325593414442427049395787963493412651469444558597405572177144507 -pub const G_GENERATOR_Y: Fp = field_new!( - Fp, - "12418654782883325593414442427049395787963493412651469444558597405572177144507" -); - /// legacy curve, a copy of the normal curve to support legacy sponge params #[derive(Copy, Clone, Default, PartialEq, Eq)] pub struct LegacyPallasParameters; -impl ModelParameters for LegacyPallasParameters { - type BaseField = ::BaseField; - type ScalarField = ::ScalarField; +impl CurveConfig for LegacyPallasParameters { + type BaseField = ::BaseField; + + type ScalarField = ::ScalarField; + + const COFACTOR: &'static [u64] = ::COFACTOR; + + const COFACTOR_INV: Self::ScalarField = ::COFACTOR_INV; } -impl SWModelParameters for LegacyPallasParameters { - const COEFF_A: Self::BaseField = ::COEFF_A; - const COEFF_B: Self::BaseField = ::COEFF_B; - const COFACTOR: &'static [u64] = ::COFACTOR; - const COFACTOR_INV: Self::ScalarField = ::COFACTOR_INV; - const AFFINE_GENERATOR_COEFFS: (Self::BaseField, Self::BaseField) = - ::AFFINE_GENERATOR_COEFFS; + +impl SWCurveConfig for LegacyPallasParameters { + const COEFF_A: Self::BaseField = ::COEFF_A; + + const COEFF_B: Self::BaseField = ::COEFF_B; + + const GENERATOR: Affine = Affine::new_unchecked(G_GENERATOR_X, G_GENERATOR_Y); } -pub type LegacyPallas = GroupAffine; +pub type LegacyPallas = Affine; diff --git a/curves/src/pasta/curves/tests.rs b/curves/src/pasta/curves/tests.rs index 3e22f00ced..9f9d3cc002 100644 --- a/curves/src/pasta/curves/tests.rs +++ b/curves/src/pasta/curves/tests.rs @@ -1,28 +1,6 @@ -use ark_algebra_test_templates::{curves::*, groups::*}; -use ark_ec::AffineCurve; -use ark_std::test_rng; -use rand::Rng; +use crate::pasta::ProjectivePallas; +use crate::pasta::ProjectiveVesta; +use ark_algebra_test_templates::*; -use super::pallas; - -#[test] -fn test_pallas_projective_curve() { - curve_tests::(); - - sw_tests::(); -} - -#[test] -fn test_pallas_projective_group() { - let mut rng = test_rng(); - let a: pallas::ProjectivePallas = rng.gen(); - let b: pallas::ProjectivePallas = rng.gen(); - group_test(a, b); -} - -#[test] -fn test_pallas_generator() { - let generator = pallas::Pallas::prime_subgroup_generator(); - assert!(generator.is_on_curve()); - assert!(generator.is_in_correct_subgroup_assuming_on_curve()); -} +test_group!(g1; ProjectivePallas; sw); +test_group!(g2; ProjectiveVesta; sw); diff --git a/curves/src/pasta/curves/vesta.rs b/curves/src/pasta/curves/vesta.rs index 2a8b5002e5..7a587e9f1d 100644 --- a/curves/src/pasta/curves/vesta.rs +++ b/curves/src/pasta/curves/vesta.rs @@ -1,70 +1,71 @@ use crate::pasta::*; use ark_ec::{ - models::short_weierstrass_jacobian::{GroupAffine, GroupProjective}, - ModelParameters, SWModelParameters, + models::short_weierstrass::{Affine, Projective, SWCurveConfig}, + CurveConfig, }; -use ark_ff::{field_new, Zero}; +use ark_ff::{MontFp, Zero}; + +/// G_GENERATOR_X = +/// 1 +pub const G_GENERATOR_X: Fq = MontFp!("1"); + +/// G1_GENERATOR_Y = +/// 11426906929455361843568202299992114520848200991084027513389447476559454104162 +pub const G_GENERATOR_Y: Fq = + MontFp!("11426906929455361843568202299992114520848200991084027513389447476559454104162"); #[derive(Debug, Clone, Default, PartialEq, Eq)] pub struct VestaParameters; -impl ModelParameters for VestaParameters { +impl CurveConfig for VestaParameters { type BaseField = Fq; type ScalarField = Fp; + + /// COFACTOR = 1 + const COFACTOR: &'static [u64] = &[0x1]; + + /// COFACTOR_INV = 1 + const COFACTOR_INV: Fp = MontFp!("1"); } -pub type Vesta = GroupAffine; -pub type ProjectiveVesta = GroupProjective; +pub type Vesta = Affine; +pub type ProjectiveVesta = Projective; -impl SWModelParameters for VestaParameters { +impl SWCurveConfig for VestaParameters { /// COEFF_A = 0 - const COEFF_A: Fq = field_new!(Fq, "0"); + const COEFF_A: Fq = MontFp!("0"); /// COEFF_B = 5 - const COEFF_B: Fq = field_new!(Fq, "5"); - - /// COFACTOR = 1 - const COFACTOR: &'static [u64] = &[0x1]; - - /// COFACTOR_INV = 1 - const COFACTOR_INV: Fp = field_new!(Fp, "1"); + const COEFF_B: Fq = MontFp!("5"); /// AFFINE_GENERATOR_COEFFS = (G1_GENERATOR_X, G1_GENERATOR_Y) - const AFFINE_GENERATOR_COEFFS: (Self::BaseField, Self::BaseField) = - (G_GENERATOR_X, G_GENERATOR_Y); + const GENERATOR: Affine = Affine::new_unchecked(G_GENERATOR_X, G_GENERATOR_Y); +} +impl VestaParameters { #[inline(always)] - fn mul_by_a(_: &Self::BaseField) -> Self::BaseField { - Self::BaseField::zero() + pub fn mul_by_a( + _: &::BaseField, + ) -> ::BaseField { + ::BaseField::zero() } } -/// G_GENERATOR_X = -/// 1 -pub const G_GENERATOR_X: Fq = field_new!(Fq, "1"); - -/// G1_GENERATOR_Y = -/// 11426906929455361843568202299992114520848200991084027513389447476559454104162 -pub const G_GENERATOR_Y: Fq = field_new!( - Fq, - "11426906929455361843568202299992114520848200991084027513389447476559454104162" -); - /// legacy curve, a copy of the normal curve to support legacy sponge params #[derive(Copy, Clone, Default, PartialEq, Eq)] pub struct LegacyVestaParameters; -impl ModelParameters for LegacyVestaParameters { - type BaseField = ::BaseField; - type ScalarField = ::ScalarField; +impl CurveConfig for LegacyVestaParameters { + type BaseField = ::BaseField; + type ScalarField = ::ScalarField; + const COFACTOR: &'static [u64] = ::COFACTOR; + const COFACTOR_INV: Self::ScalarField = ::COFACTOR_INV; } -impl SWModelParameters for LegacyVestaParameters { - const COEFF_A: Self::BaseField = ::COEFF_A; - const COEFF_B: Self::BaseField = ::COEFF_B; - const COFACTOR: &'static [u64] = ::COFACTOR; - const COFACTOR_INV: Self::ScalarField = ::COFACTOR_INV; - const AFFINE_GENERATOR_COEFFS: (Self::BaseField, Self::BaseField) = - ::AFFINE_GENERATOR_COEFFS; + +impl SWCurveConfig for LegacyVestaParameters { + const COEFF_A: Self::BaseField = ::COEFF_A; + const COEFF_B: Self::BaseField = ::COEFF_B; + const GENERATOR: Affine = Affine::new_unchecked(G_GENERATOR_X, G_GENERATOR_Y); } -pub type LegacyVesta = GroupAffine; +pub type LegacyVesta = Affine; diff --git a/curves/src/pasta/fields/fft.rs b/curves/src/pasta/fields/fft.rs new file mode 100644 index 0000000000..023615fb0a --- /dev/null +++ b/curves/src/pasta/fields/fft.rs @@ -0,0 +1,69 @@ +use ark_ff::biginteger::BigInteger; + +/// A trait that defines parameters for a field that can be used for FFTs. +pub trait FftParameters: 'static + Send + Sync + Sized { + type BigInt: BigInteger; + + /// Let `N` be the size of the multiplicative group defined by the field. + /// Then `TWO_ADICITY` is the two-adicity of `N`, i.e. the integer `s` + /// such that `N = 2^s * t` for some odd integer `t`. + const TWO_ADICITY: u32; + + /// 2^s root of unity computed by GENERATOR^t + const TWO_ADIC_ROOT_OF_UNITY: Self::BigInt; + + /// An integer `b` such that there exists a multiplicative subgroup + /// of size `b^k` for some integer `k`. + const SMALL_SUBGROUP_BASE: Option = None; + + /// The integer `k` such that there exists a multiplicative subgroup + /// of size `Self::SMALL_SUBGROUP_BASE^k`. + const SMALL_SUBGROUP_BASE_ADICITY: Option = None; + + /// GENERATOR^((MODULUS-1) / (2^s * + /// SMALL_SUBGROUP_BASE^SMALL_SUBGROUP_BASE_ADICITY)) Used for mixed-radix FFT. + const LARGE_SUBGROUP_ROOT_OF_UNITY: Option = None; +} + +/// A trait that defines parameters for a prime field. +pub trait FpParameters: FftParameters { + /// The modulus of the field. + const MODULUS: Self::BigInt; + + /// The number of bits needed to represent the `Self::MODULUS`. + const MODULUS_BITS: u32; + + /// The number of bits that must be shaved from the beginning of + /// the representation when randomly sampling. + const REPR_SHAVE_BITS: u32; + + /// Let `M` be the power of 2^64 nearest to `Self::MODULUS_BITS`. Then + /// `R = M % Self::MODULUS`. + const R: Self::BigInt; + + /// R2 = R^2 % Self::MODULUS + const R2: Self::BigInt; + + /// INV = -MODULUS^{-1} mod 2^64 + const INV: u64; + + /// A multiplicative generator of the field. + /// `Self::GENERATOR` is an element having multiplicative order + /// `Self::MODULUS - 1`. + const GENERATOR: Self::BigInt; + + /// The number of bits that can be reliably stored. + /// (Should equal `SELF::MODULUS_BITS - 1`) + const CAPACITY: u32; + + /// t for 2^s * t = MODULUS - 1, and t coprime to 2. + const T: Self::BigInt; + + /// (t - 1) / 2 + const T_MINUS_ONE_DIV_TWO: Self::BigInt; + + /// (Self::MODULUS - 1) / 2 + const MODULUS_MINUS_ONE_DIV_TWO: Self::BigInt; +} + +pub trait Fp256Parameters {} diff --git a/curves/src/pasta/fields/fp.rs b/curves/src/pasta/fields/fp.rs index 8560087ade..9b4e120da9 100644 --- a/curves/src/pasta/fields/fp.rs +++ b/curves/src/pasta/fields/fp.rs @@ -1,6 +1,12 @@ -use ark_ff::{biginteger::BigInteger256 as BigInteger, FftParameters, Fp256, Fp256Parameters}; +use super::fft::{FftParameters, Fp256Parameters}; +use ark_ff::fields::{MontBackend, MontConfig}; +use ark_ff::{biginteger::BigInteger256 as BigInteger, Fp256}; -pub type Fp = Fp256; +#[derive(MontConfig)] +#[modulus = "28948022309329048855892746252171976963363056481941560715954676764349967630337"] +#[generator = "5"] +pub struct FqConfig; +pub type Fp = Fp256>; pub struct FpParameters; @@ -12,35 +18,35 @@ impl FftParameters for FpParameters { const TWO_ADICITY: u32 = 32; #[rustfmt::skip] - const TWO_ADIC_ROOT_OF_UNITY: BigInteger = BigInteger([ + const TWO_ADIC_ROOT_OF_UNITY: BigInteger = BigInteger::new([ 0xa28db849bad6dbf0, 0x9083cd03d3b539df, 0xfba6b9ca9dc8448e, 0x3ec928747b89c6da ]); } -impl ark_ff::FpParameters for FpParameters { +impl super::fft::FpParameters for FpParameters { // 28948022309329048855892746252171976963363056481941560715954676764349967630337 - const MODULUS: BigInteger = BigInteger([ + const MODULUS: BigInteger = BigInteger::new([ 0x992d30ed00000001, 0x224698fc094cf91b, 0x0, 0x4000000000000000, ]); - const R: BigInteger = BigInteger([ + const R: BigInteger = BigInteger::new([ 0x34786d38fffffffd, 0x992c350be41914ad, 0xffffffffffffffff, 0x3fffffffffffffff, ]); - const R2: BigInteger = BigInteger([ + const R2: BigInteger = BigInteger::new([ 0x8c78ecb30000000f, 0xd7d30dbd8b0de0e7, 0x7797a99bc3c95d18, 0x96d41af7b9cb714, ]); - const MODULUS_MINUS_ONE_DIV_TWO: BigInteger = BigInteger([ + const MODULUS_MINUS_ONE_DIV_TWO: BigInteger = BigInteger::new([ 0xcc96987680000000, 0x11234c7e04a67c8d, 0x0, @@ -48,13 +54,13 @@ impl ark_ff::FpParameters for FpParameters { ]); // T and T_MINUS_ONE_DIV_TWO, where MODULUS - 1 = 2^S * T - const T: BigInteger = BigInteger([0x94cf91b992d30ed, 0x224698fc, 0x0, 0x40000000]); + const T: BigInteger = BigInteger::new([0x94cf91b992d30ed, 0x224698fc, 0x0, 0x40000000]); const T_MINUS_ONE_DIV_TWO: BigInteger = - BigInteger([0x4a67c8dcc969876, 0x11234c7e, 0x0, 0x20000000]); + BigInteger::new([0x4a67c8dcc969876, 0x11234c7e, 0x0, 0x20000000]); // GENERATOR = 5 - const GENERATOR: BigInteger = BigInteger([ + const GENERATOR: BigInteger = BigInteger::new([ 0xa1a55e68ffffffed, 0x74c2a54b4f4982f3, 0xfffffffffffffffd, diff --git a/curves/src/pasta/fields/fq.rs b/curves/src/pasta/fields/fq.rs index 59a0ced05b..b623705750 100644 --- a/curves/src/pasta/fields/fq.rs +++ b/curves/src/pasta/fields/fq.rs @@ -1,46 +1,53 @@ -use ark_ff::{ - biginteger::BigInteger256 as BigInteger, FftParameters, Fp256, Fp256Parameters, FpParameters, -}; +use super::fft::{FftParameters, Fp256Parameters, FpParameters}; +use ark_ff::{biginteger::BigInteger256 as BigInteger, Fp256}; pub struct FqParameters; -pub type Fq = Fp256; +use ark_ff::fields::{MontBackend, MontConfig}; + +#[derive(MontConfig)] +#[modulus = "28948022309329048855892746252171976963363056481941647379679742748393362948097"] +#[generator = "5"] +pub struct FrConfig; +pub type Fq = Fp256>; impl Fp256Parameters for FqParameters {} + impl FftParameters for FqParameters { type BigInt = BigInteger; const TWO_ADICITY: u32 = 32; #[rustfmt::skip] - const TWO_ADIC_ROOT_OF_UNITY: BigInteger = BigInteger([ + const TWO_ADIC_ROOT_OF_UNITY: BigInteger = BigInteger::new([ 0x218077428c9942de, 0xcc49578921b60494, 0xac2e5d27b2efbee2, 0xb79fa897f2db056 ]); } + impl FpParameters for FqParameters { // 28948022309329048855892746252171976963363056481941647379679742748393362948097 - const MODULUS: BigInteger = BigInteger([ + const MODULUS: BigInteger = BigInteger::new([ 0x8c46eb2100000001, 0x224698fc0994a8dd, 0x0, 0x4000000000000000, ]); - const R: BigInteger = BigInteger([ + const R: BigInteger = BigInteger::new([ 0x5b2b3e9cfffffffd, 0x992c350be3420567, 0xffffffffffffffff, 0x3fffffffffffffff, ]); - const R2: BigInteger = BigInteger([ + const R2: BigInteger = BigInteger::new([ 0xfc9678ff0000000f, 0x67bb433d891a16e3, 0x7fae231004ccf590, 0x96d41af7ccfdaa9, ]); - const MODULUS_MINUS_ONE_DIV_TWO: BigInteger = BigInteger([ + const MODULUS_MINUS_ONE_DIV_TWO: BigInteger = BigInteger::new([ 0xc623759080000000, 0x11234c7e04ca546e, 0x0, @@ -49,13 +56,13 @@ impl FpParameters for FqParameters { // T and T_MINUS_ONE_DIV_TWO, where MODULUS - 1 = 2^S * T - const T: BigInteger = BigInteger([0x994a8dd8c46eb21, 0x224698fc, 0x0, 0x40000000]); + const T: BigInteger = BigInteger::new([0x994a8dd8c46eb21, 0x224698fc, 0x0, 0x40000000]); const T_MINUS_ONE_DIV_TWO: BigInteger = - BigInteger([0x4ca546ec6237590, 0x11234c7e, 0x0, 0x20000000]); + BigInteger::new([0x4ca546ec6237590, 0x11234c7e, 0x0, 0x20000000]); // GENERATOR = 5 - const GENERATOR: BigInteger = BigInteger([ + const GENERATOR: BigInteger = BigInteger::new([ 0x96bc8c8cffffffed, 0x74c2a54b49f7778e, 0xfffffffffffffffd, diff --git a/curves/src/pasta/fields/mod.rs b/curves/src/pasta/fields/mod.rs index 5c5f93a2ea..158eae558d 100644 --- a/curves/src/pasta/fields/mod.rs +++ b/curves/src/pasta/fields/mod.rs @@ -1,8 +1,48 @@ +use ark_ff::Field; pub mod fp; pub use self::fp::*; pub mod fq; pub use self::fq::*; +pub mod fft; + +#[derive(Debug, PartialEq)] +pub enum LegendreSymbol { + Zero = 0, + QuadraticResidue = 1, + QuadraticNonResidue = -1, +} + +impl LegendreSymbol { + pub fn is_zero(&self) -> bool { + *self == LegendreSymbol::Zero + } + + pub fn is_qnr(&self) -> bool { + *self == LegendreSymbol::QuadraticNonResidue + } + + pub fn is_qr(&self) -> bool { + *self == LegendreSymbol::QuadraticResidue + } +} + +/// The interface for a field that supports an efficient square-root operation. +pub trait SquareRootField: Field { + /// Returns a `LegendreSymbol`, which indicates whether this field element is + /// 1 : a quadratic residue + /// 0 : equal to 0 + /// -1 : a quadratic non-residue + fn legendre(&self) -> LegendreSymbol; + + /// Returns the square root of self, if it exists. + #[must_use] + fn sqrt(&self) -> Option; + + /// Sets `self` to be the square root of `self`, if it exists. + fn sqrt_in_place(&mut self) -> Option<&mut Self>; +} + #[cfg(test)] mod tests; diff --git a/curves/src/pasta/fields/tests.rs b/curves/src/pasta/fields/tests.rs index 38d1c93982..0489cfc4cf 100644 --- a/curves/src/pasta/fields/tests.rs +++ b/curves/src/pasta/fields/tests.rs @@ -1,24 +1,5 @@ -use crate::pasta::*; -use ark_algebra_test_templates::fields::{field_test, primefield_test, sqrt_field_test}; -use ark_std::test_rng; -use rand::Rng; +use crate::pasta::fields::{Fp as Fr, Fq}; +use ark_algebra_test_templates::*; -#[test] -fn test_fp() { - let mut rng = test_rng(); - let a: Fp = rng.gen(); - let b: Fp = rng.gen(); - field_test(a, b); - sqrt_field_test(a); - primefield_test::(); -} - -#[test] -fn test_fq() { - let mut rng = test_rng(); - let a: Fq = rng.gen(); - let b: Fq = rng.gen(); - field_test(a, b); - sqrt_field_test(a); - primefield_test::(); -} +test_field!(fq; Fq; mont_prime_field); +test_field!(fr; Fr; mont_prime_field); diff --git a/groupmap/Cargo.toml b/groupmap/Cargo.toml index a1d30309c3..d639095573 100644 --- a/groupmap/Cargo.toml +++ b/groupmap/Cargo.toml @@ -13,8 +13,8 @@ license = "Apache-2.0" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -ark-ff = { version = "0.3.0", features = [ "parallel", "asm" ] } -ark-ec = { version = "0.3.0", features = [ "parallel" ] } +ark-ff = { version = "0.4.2", features = [ "parallel", "asm" ] } +ark-ec = { version = "0.4.2", features = [ "parallel" ] } rand = "0.8.4" [dev-dependencies] diff --git a/groupmap/src/lib.rs b/groupmap/src/lib.rs index cc310d9ab8..a35140f423 100644 --- a/groupmap/src/lib.rs +++ b/groupmap/src/lib.rs @@ -19,8 +19,8 @@ //! WB19: Riad S. Wahby and Dan Boneh, Fast and simple constant-time hashing to the BLS12-381 elliptic curve. //! -use ark_ec::models::SWModelParameters; -use ark_ff::{Field, One, SquareRootField, Zero}; +use ark_ec::short_weierstrass::SWCurveConfig; +use ark_ff::{Field, One, Zero}; pub trait GroupMap { fn setup() -> Self; @@ -29,7 +29,7 @@ pub trait GroupMap { } #[derive(Clone, Copy)] -pub struct BWParameters { +pub struct BWParameters { u: G::BaseField, fu: G::BaseField, sqrt_neg_three_u_squared_minus_u_over_2: G::BaseField, @@ -38,12 +38,13 @@ pub struct BWParameters { } /// returns the right-hand side of the Short Weierstrass curve equation for a given x -fn curve_eqn(x: G::BaseField) -> G::BaseField { +fn curve_eqn(x: G::BaseField) -> G::BaseField { let mut res = x; res *= &x; // x^2 res += &G::COEFF_A; // x^2 + A res *= &x; // x^3 + A x res += &G::COEFF_B; // x^3 + A x + B + res } @@ -61,7 +62,7 @@ fn find_first Option>(start: K, f: F) -> A { } /// ? -fn potential_xs_helper( +fn potential_xs_helper( params: &BWParameters, t2: G::BaseField, alpha: G::BaseField, @@ -89,10 +90,7 @@ fn potential_xs_helper( } /// ? -fn potential_xs( - params: &BWParameters, - t: G::BaseField, -) -> [G::BaseField; 3] { +fn potential_xs(params: &BWParameters, t: G::BaseField) -> [G::BaseField; 3] { let t2 = t.square(); let mut alpha_inv = t2; alpha_inv += ¶ms.fu; @@ -108,12 +106,12 @@ fn potential_xs( /// returns the y-coordinate if x is a valid point on the curve, otherwise None /// TODO: what about sign? -pub fn get_y(x: G::BaseField) -> Option { +pub fn get_y(x: G::BaseField) -> Option { let fx = curve_eqn::(x); fx.sqrt() } -fn get_xy( +fn get_xy( params: &BWParameters, t: G::BaseField, ) -> (G::BaseField, G::BaseField) { @@ -126,7 +124,7 @@ fn get_xy( panic!("get_xy") } -impl GroupMap for BWParameters { +impl GroupMap for BWParameters { fn setup() -> Self { assert!(G::COEFF_A.is_zero()); diff --git a/groupmap/tests/groupmap.rs b/groupmap/tests/groupmap.rs index 0044616783..17d05dfc16 100644 --- a/groupmap/tests/groupmap.rs +++ b/groupmap/tests/groupmap.rs @@ -8,7 +8,7 @@ fn test_group_map_on_curve() { let params = BWParameters::::setup(); let t: Fq = rand::random(); let (x, y) = BWParameters::::to_group(¶ms, t); - let g = Vesta::new(x, y, false); + let g = Vesta::new(x, y); assert!(g.is_on_curve()); } @@ -27,7 +27,7 @@ fn test_batch_group_map_on_curve() { let ts: Vec = (0..1000).map(|_| rand::random()).collect(); for xs in BWParameters::::batch_to_group_x(¶ms, ts).iter() { let (x, y) = first_xy(xs); - let g = Vesta::new(x, y, false); + let g = Vesta::new(x, y); assert!(g.is_on_curve()); } } diff --git a/hasher/Cargo.toml b/hasher/Cargo.toml index 781aafaadb..4ce6257214 100644 --- a/hasher/Cargo.toml +++ b/hasher/Cargo.toml @@ -17,7 +17,7 @@ mina-poseidon = { path = "../poseidon", version = "0.1.0" } mina-curves = { path = "../curves", version = "0.1.0" } o1-utils = { path = "../utils", version = "0.1.0" } -ark-ff = { version = "0.3.0", features = [ "parallel", "asm" ] } +ark-ff = { version = "0.4.2", features = [ "parallel", "asm" ] } bitvec = "1.0.0" serde = { version = "1.0", features = ["derive"] } diff --git a/hasher/src/roinput.rs b/hasher/src/roinput.rs index 1db4ee3193..e20b4bbba7 100644 --- a/hasher/src/roinput.rs +++ b/hasher/src/roinput.rs @@ -91,7 +91,7 @@ impl ROInput { pub fn append_scalar(mut self, s: Fq) -> Self { // mina scalars are 255 bytes let bytes = s.to_bytes(); - let bits = &bytes.as_bits::()[..Fq::size_in_bits()]; + let bits = &bytes.as_bits::()[..Fq::MODULUS_BIT_SIZE as usize]; self.bits.extend(bits); self } @@ -121,7 +121,9 @@ impl ROInput { /// Serialize random oracle input to bytes pub fn to_bytes(&self) -> Vec { let mut bits: BitVec = self.fields.iter().fold(BitVec::new(), |mut acc, fe| { - acc.extend_from_bitslice(&fe.to_bytes().as_bits::()[..Fp::size_in_bits()]); + acc.extend_from_bitslice( + &fe.to_bytes().as_bits::()[..Fp::MODULUS_BIT_SIZE as usize], + ); acc }); @@ -137,7 +139,7 @@ impl ROInput { let bits_as_fields = self.bits - .chunks(Fp::size_in_bits() - 1) + .chunks(Fp::MODULUS_BIT_SIZE as usize - 1) .fold(vec![], |mut acc, chunk| { // Workaround: chunk.clone() does not appear to respect // the chunk's boundaries when it's not byte-aligned. @@ -157,7 +159,7 @@ impl ROInput { bv.clone_from_bitslice(chunk); // extend to the size of a field; - bv.resize(Fp::size_in_bits(), false); + bv.resize(Fp::MODULUS_BIT_SIZE as usize, false); acc.push( Fp::from_bytes(&bv.into_vec()) diff --git a/kimchi/Cargo.toml b/kimchi/Cargo.toml index 67fc1e3c00..fec0d61716 100644 --- a/kimchi/Cargo.toml +++ b/kimchi/Cargo.toml @@ -14,11 +14,11 @@ path = "src/lib.rs" bench = false # needed for criterion (https://bheisler.github.io/criterion.rs/book/faq.html#cargo-bench-gives-unrecognized-option-errors-for-valid-command-line-options) [dependencies] -ark-ff = { version = "0.3.0", features = [ "parallel", "asm" ] } -ark-ec = { version = "0.3.0", features = [ "parallel" ] } -ark-poly = { version = "0.3.0", features = [ "parallel" ] } -ark-serialize = "0.3.0" -ark-bn254 = { version = "0.3.0", optional = true } +ark-ff = { version = "0.4.2", features = [ "parallel", "asm" ] } +ark-ec = { version = "0.4.2", features = [ "parallel" ] } +ark-poly = { version = "0.4.2", features = [ "parallel" ] } +ark-serialize = "0.4.2" +ark-bn254 = { version = "0.4.0", optional = true } blake2 = "0.10.0" num-bigint = { version = "0.4.3", features = ["rand", "serde"]} num-derive = "0.3" diff --git a/kimchi/src/circuits/constraints.rs b/kimchi/src/circuits/constraints.rs index 4f957384a4..454ce1cab0 100644 --- a/kimchi/src/circuits/constraints.rs +++ b/kimchi/src/circuits/constraints.rs @@ -18,7 +18,7 @@ use crate::{ error::{DomainCreationError, SetupError}, prover_index::ProverIndex, }; -use ark_ff::{PrimeField, SquareRootField, Zero}; +use ark_ff::{PrimeField, Zero}; use ark_poly::{ univariate::DensePolynomial as DP, EvaluationDomain, Evaluations as E, Radix2EvaluationDomain as D, @@ -303,11 +303,8 @@ impl ConstraintSystem { } } -impl< - F: PrimeField + SquareRootField, - G: KimchiCurve, - OpeningProof: OpenProof, - > ProverIndex +impl, OpeningProof: OpenProof> + ProverIndex { /// This function verifies the consistency of the wire /// assignments (witness) against the constraints @@ -361,7 +358,7 @@ impl< } } -impl ConstraintSystem { +impl ConstraintSystem { /// evaluate witness polynomials over domains pub fn evaluate(&self, w: &[DP; COLUMNS], z: &DP) -> WitnessOverDomains { // compute shifted witness polynomials @@ -689,7 +686,7 @@ impl FeatureFlags { } } -impl Builder { +impl Builder { /// Set up the number of public inputs. /// If not invoked, it equals `0` by default. pub fn public(mut self, public: usize) -> Self { @@ -942,7 +939,7 @@ pub mod tests { use super::*; use mina_curves::pasta::Fp; - impl ConstraintSystem { + impl ConstraintSystem { pub fn for_testing(gates: Vec>) -> Self { let public = 0; // not sure if theres a smarter way instead of the double unwrap, but should be fine in the test diff --git a/kimchi/src/circuits/domain_constant_evaluation.rs b/kimchi/src/circuits/domain_constant_evaluation.rs index 6659f42d31..8a835c372c 100644 --- a/kimchi/src/circuits/domain_constant_evaluation.rs +++ b/kimchi/src/circuits/domain_constant_evaluation.rs @@ -2,8 +2,8 @@ use crate::circuits::domains::EvaluationDomains; use ark_ff::FftField; +use ark_poly::DenseUVPolynomial; use ark_poly::EvaluationDomain; -use ark_poly::UVPolynomial; use ark_poly::{univariate::DensePolynomial as DP, Evaluations as E, Radix2EvaluationDomain as D}; use serde::{Deserialize, Serialize}; use serde_with::serde_as; diff --git a/kimchi/src/circuits/expr.rs b/kimchi/src/circuits/expr.rs index f331d96b1b..14d3b33b37 100644 --- a/kimchi/src/circuits/expr.rs +++ b/kimchi/src/circuits/expr.rs @@ -2428,7 +2428,10 @@ where JointCombiner => "joint_combiner".to_string(), EndoCoefficient => "endo_coefficient".to_string(), Mds { row, col } => format!("mds({row}, {col})"), - Literal(x) => format!("field(\"0x{}\")", x.into_repr()), + Literal(x) => format!( + "field(\"{:#066X}\")", + Into::::into(x.into_bigint()) + ), Pow(x, n) => match x.as_ref() { Alpha => format!("alpha_pow({n})"), x => format!("pow({}, {n})", x.ocaml()), @@ -2448,7 +2451,7 @@ where JointCombiner => "joint\\_combiner".to_string(), EndoCoefficient => "endo\\_coefficient".to_string(), Mds { row, col } => format!("mds({row}, {col})"), - Literal(x) => format!("\\mathbb{{F}}({})", x.into_repr().into()), + Literal(x) => format!("\\mathbb{{F}}({})", x.into_bigint().into()), Pow(x, n) => match x.as_ref() { Alpha => format!("\\alpha^{{{n}}}"), x => format!("{}^{n}", x.ocaml()), diff --git a/kimchi/src/circuits/gate.rs b/kimchi/src/circuits/gate.rs index 61be85900d..a03481b04c 100644 --- a/kimchi/src/circuits/gate.rs +++ b/kimchi/src/circuits/gate.rs @@ -13,13 +13,11 @@ use crate::{ curve::KimchiCurve, prover_index::ProverIndex, }; -use ark_ff::{bytes::ToBytes, PrimeField, SquareRootField}; -use num_traits::cast::ToPrimitive; +use ark_ff::PrimeField; use o1_utils::hasher::CryptoDigest; use poly_commitment::OpenProof; use serde::{Deserialize, Serialize}; use serde_with::serde_as; -use std::io::{Result as IoResult, Write}; use thiserror::Error; use super::{ @@ -165,24 +163,7 @@ where } } -impl ToBytes for CircuitGate { - #[inline] - fn write(&self, mut w: W) -> IoResult<()> { - let typ: u8 = ToPrimitive::to_u8(&self.typ).unwrap(); - typ.write(&mut w)?; - for i in 0..COLUMNS { - self.wires[i].write(&mut w)?; - } - - (self.coeffs.len() as u8).write(&mut w)?; - for x in &self.coeffs { - x.write(&mut w)?; - } - Ok(()) - } -} - -impl CircuitGate { +impl CircuitGate { /// this function creates "empty" circuit gate pub fn zero(wires: GateWires) -> Self { CircuitGate::new(GateType::Zero, wires, vec![]) diff --git a/kimchi/src/circuits/lookup/index.rs b/kimchi/src/circuits/lookup/index.rs index e4e70a3a7b..1cb8d26cde 100644 --- a/kimchi/src/circuits/lookup/index.rs +++ b/kimchi/src/circuits/lookup/index.rs @@ -8,7 +8,7 @@ use crate::circuits::{ tables::LookupTable, }, }; -use ark_ff::{FftField, PrimeField, SquareRootField}; +use ark_ff::{FftField, PrimeField}; use ark_poly::{ univariate::DensePolynomial as DP, EvaluationDomain, Evaluations as E, Radix2EvaluationDomain as D, @@ -194,7 +194,7 @@ pub struct LookupConstraintSystem { pub configuration: LookupConfiguration, } -impl LookupConstraintSystem { +impl LookupConstraintSystem { /// Create the `LookupConstraintSystem`. /// /// # Errors diff --git a/kimchi/src/circuits/polynomials/and.rs b/kimchi/src/circuits/polynomials/and.rs index e49da51a3d..8debb0992d 100644 --- a/kimchi/src/circuits/polynomials/and.rs +++ b/kimchi/src/circuits/polynomials/and.rs @@ -15,7 +15,7 @@ use crate::circuits::{ polynomial::COLUMNS, wires::Wire, }; -use ark_ff::{PrimeField, SquareRootField}; +use ark_ff::PrimeField; use num_bigint::BigUint; use o1_utils::{BigUintFieldHelpers, BigUintHelpers, BitwiseOps, FieldHelpers, Two}; @@ -58,7 +58,7 @@ use o1_utils::{BigUintFieldHelpers, BigUintHelpers, BitwiseOps, FieldHelpers, Tw //~ * the `xor` in `a x b = xor` is connected to the `xor` in `2 \cdot and = sum - xor` //~ * the `sum` in `a + b = sum` is connected to the `sum` in `2 \cdot and = sum - xor` -impl CircuitGate { +impl CircuitGate { /// Extends an AND gadget for `bytes` length. /// The full operation being performed is the following: /// `a AND b = 1/2 * (a + b - (a XOR b))` diff --git a/kimchi/src/circuits/polynomials/endomul_scalar.rs b/kimchi/src/circuits/polynomials/endomul_scalar.rs index 701ce892bf..0b52ec3efe 100644 --- a/kimchi/src/circuits/polynomials/endomul_scalar.rs +++ b/kimchi/src/circuits/polynomials/endomul_scalar.rs @@ -228,7 +228,7 @@ pub fn gen_witness( let bits_per_row = 2 * crumbs_per_row; assert_eq!(num_bits % bits_per_row, 0); - let bits_lsb: Vec<_> = BitIteratorLE::new(scalar.into_repr()) + let bits_lsb: Vec<_> = BitIteratorLE::new(scalar.into_bigint()) .take(num_bits) .collect(); let bits_msb: Vec<_> = bits_lsb.iter().rev().collect(); @@ -339,7 +339,7 @@ mod tests { let f1 = c_func; let f2 = |x: F| -> F { - let bits_le = x.into_repr().to_bits_le(); + let bits_le = x.into_bigint().to_bits_le(); let b0 = bits_le[0]; let b1 = bits_le[1]; @@ -371,7 +371,7 @@ mod tests { let f1 = d_func; let f2 = |x: F| -> F { - let bits_le = x.into_repr().to_bits_le(); + let bits_le = x.into_bigint().to_bits_le(); let b0 = bits_le[0]; let b1 = bits_le[1]; diff --git a/kimchi/src/circuits/polynomials/foreign_field_add/gadget.rs b/kimchi/src/circuits/polynomials/foreign_field_add/gadget.rs index 30fc6926ff..96af81e165 100644 --- a/kimchi/src/circuits/polynomials/foreign_field_add/gadget.rs +++ b/kimchi/src/circuits/polynomials/foreign_field_add/gadget.rs @@ -1,6 +1,6 @@ //! This module obtains the gates of a foreign field addition circuit. -use ark_ff::{PrimeField, SquareRootField}; +use ark_ff::PrimeField; use num_bigint::BigUint; use o1_utils::foreign_field::BigUintForeignFieldHelpers; @@ -11,7 +11,7 @@ use crate::circuits::{ use super::witness::FFOps; -impl CircuitGate { +impl CircuitGate { /// Create foreign field addition gate chain without range checks (needs to wire the range check for result bound manually) /// - Inputs /// - starting row diff --git a/kimchi/src/circuits/polynomials/foreign_field_mul/gadget.rs b/kimchi/src/circuits/polynomials/foreign_field_mul/gadget.rs index 104ea2db51..41afcc39e0 100644 --- a/kimchi/src/circuits/polynomials/foreign_field_mul/gadget.rs +++ b/kimchi/src/circuits/polynomials/foreign_field_mul/gadget.rs @@ -1,6 +1,6 @@ //! This module obtains the gates of a foreign field addition circuit. -use ark_ff::{PrimeField, SquareRootField}; +use ark_ff::PrimeField; use num_bigint::BigUint; use o1_utils::foreign_field::{BigUintForeignFieldHelpers, ForeignFieldHelpers}; @@ -24,7 +24,7 @@ use super::circuitgates::ForeignFieldMul; /// Number of gates in this gadget pub const GATE_COUNT: usize = 1; -impl CircuitGate { +impl CircuitGate { /// Create foreign field multiplication gate /// Inputs the starting row /// Outputs tuple (next_row, circuit_gates) where diff --git a/kimchi/src/circuits/polynomials/permutation.rs b/kimchi/src/circuits/polynomials/permutation.rs index f94133c08e..b80ebfa6b5 100644 --- a/kimchi/src/circuits/polynomials/permutation.rs +++ b/kimchi/src/circuits/polynomials/permutation.rs @@ -49,12 +49,12 @@ use crate::{ proof::{PointEvaluations, ProofEvaluations}, prover_index::ProverIndex, }; -use ark_ff::{FftField, PrimeField, SquareRootField, Zero}; +use ark_ff::{FftField, PrimeField, Zero}; use ark_poly::{ univariate::{DenseOrSparsePolynomial, DensePolynomial}, EvaluationDomain, Evaluations, Radix2EvaluationDomain as D, }; -use ark_poly::{Polynomial, UVPolynomial}; +use ark_poly::{DenseUVPolynomial, Polynomial}; use blake2::{Blake2b512, Digest}; use o1_utils::{ExtendedDensePolynomial, ExtendedEvaluations}; use poly_commitment::OpenProof; @@ -135,7 +135,7 @@ pub struct Shifts { impl Shifts where - F: FftField + SquareRootField, + F: FftField, { /// Generates the shifts for a given domain pub fn new(domain: &D) -> Self { diff --git a/kimchi/src/circuits/polynomials/poseidon.rs b/kimchi/src/circuits/polynomials/poseidon.rs index c587c506e8..587346d953 100644 --- a/kimchi/src/circuits/polynomials/poseidon.rs +++ b/kimchi/src/circuits/polynomials/poseidon.rs @@ -35,7 +35,7 @@ use crate::{ }, curve::KimchiCurve, }; -use ark_ff::{Field, PrimeField, SquareRootField}; +use ark_ff::{Field, PrimeField}; use mina_poseidon::{ constants::{PlonkSpongeConstantsKimchi, SpongeConstants}, poseidon::{sbox, ArithmeticSponge, ArithmeticSpongeParams, Sponge}, @@ -77,7 +77,7 @@ pub const fn round_to_cols(i: usize) -> Range { start..(start + SPONGE_WIDTH) } -impl CircuitGate { +impl CircuitGate { pub fn create_poseidon( wires: GateWires, // Coefficients are passed in in the logical order diff --git a/kimchi/src/circuits/polynomials/range_check/gadget.rs b/kimchi/src/circuits/polynomials/range_check/gadget.rs index f8d3d6e696..5815488441 100644 --- a/kimchi/src/circuits/polynomials/range_check/gadget.rs +++ b/kimchi/src/circuits/polynomials/range_check/gadget.rs @@ -1,6 +1,6 @@ //! Range check gate -use ark_ff::{FftField, PrimeField, SquareRootField}; +use ark_ff::{FftField, PrimeField}; use crate::{ alphas::Alphas, @@ -20,7 +20,7 @@ use super::circuitgates::{RangeCheck0, RangeCheck1}; pub const GATE_COUNT: usize = 2; -impl CircuitGate { +impl CircuitGate { /// Create range check gate for constraining three 88-bit values. /// Inputs the starting row /// Outputs tuple (`next_row`, `circuit_gates`) where diff --git a/kimchi/src/circuits/polynomials/rot.rs b/kimchi/src/circuits/polynomials/rot.rs index 80c4022d41..4ed7089997 100644 --- a/kimchi/src/circuits/polynomials/rot.rs +++ b/kimchi/src/circuits/polynomials/rot.rs @@ -19,7 +19,7 @@ use crate::{ }, variable_map, }; -use ark_ff::{PrimeField, SquareRootField}; +use ark_ff::PrimeField; use std::{array, marker::PhantomData}; #[derive(Clone, Copy, PartialEq, Eq, Debug)] @@ -28,7 +28,7 @@ pub enum RotMode { Right, } -impl CircuitGate { +impl CircuitGate { /// Creates a Rot64 gadget to rotate a word /// It will need: /// - 1 Generic gate to constrain to zero the top 2 limbs of the shifted witness of the rotation diff --git a/kimchi/src/circuits/polynomials/turshi.rs b/kimchi/src/circuits/polynomials/turshi.rs index da51465f92..5dacc6eb52 100644 --- a/kimchi/src/circuits/polynomials/turshi.rs +++ b/kimchi/src/circuits/polynomials/turshi.rs @@ -90,7 +90,7 @@ use crate::{ curve::KimchiCurve, proof::ProofEvaluations, }; -use ark_ff::{FftField, Field, PrimeField, SquareRootField}; +use ark_ff::{FftField, Field, PrimeField}; use rand::{prelude::StdRng, SeedableRng}; use std::array; use std::marker::PhantomData; @@ -104,7 +104,7 @@ pub const CIRCUIT_GATE_COUNT: usize = 4; // GATE-RELATED -impl CircuitGate { +impl CircuitGate { /// This function creates a `CairoClaim` gate pub fn create_cairo_claim(wires: GateWires) -> Self { CircuitGate::new(GateType::CairoClaim, wires, vec![]) diff --git a/kimchi/src/circuits/polynomials/xor.rs b/kimchi/src/circuits/polynomials/xor.rs index ea5fbc2cbd..564b7d25c8 100644 --- a/kimchi/src/circuits/polynomials/xor.rs +++ b/kimchi/src/circuits/polynomials/xor.rs @@ -16,14 +16,14 @@ use crate::{ }, variable_map, }; -use ark_ff::{PrimeField, SquareRootField}; +use ark_ff::PrimeField; use num_bigint::BigUint; use o1_utils::{BigUintFieldHelpers, BigUintHelpers, BitwiseOps, FieldHelpers}; use std::{array, marker::PhantomData}; use super::generic::GenericGateSpec; -impl CircuitGate { +impl CircuitGate { /// Extends a XOR gadget for `bits` length to a circuit /// Includes: /// - num_xors Xor16 gates diff --git a/kimchi/src/circuits/wires.rs b/kimchi/src/circuits/wires.rs index 1ab40d4b83..31c8761911 100644 --- a/kimchi/src/circuits/wires.rs +++ b/kimchi/src/circuits/wires.rs @@ -1,9 +1,7 @@ //! This module implements Plonk circuit gate wires primitive. -use ark_ff::bytes::{FromBytes, ToBytes}; use serde::{Deserialize, Serialize}; use std::array; -use std::io::{Read, Result as IoResult, Write}; /// Number of registers pub const COLUMNS: usize = 15; @@ -65,24 +63,6 @@ impl Wirable for GateWires { } } -impl ToBytes for Wire { - #[inline] - fn write(&self, mut w: W) -> IoResult<()> { - (self.row as u32).write(&mut w)?; - (self.col as u32).write(&mut w)?; - Ok(()) - } -} - -impl FromBytes for Wire { - #[inline] - fn read(mut r: R) -> IoResult { - let row = u32::read(&mut r)? as usize; - let col = u32::read(&mut r)? as usize; - Ok(Wire { row, col }) - } -} - #[cfg(feature = "ocaml_types")] pub mod caml { use super::*; diff --git a/kimchi/src/circuits/witness/mod.rs b/kimchi/src/circuits/witness/mod.rs index a85a932db9..75271215f4 100644 --- a/kimchi/src/circuits/witness/mod.rs +++ b/kimchi/src/circuits/witness/mod.rs @@ -67,10 +67,10 @@ mod tests { use super::*; - use ark_ec::AffineCurve; + use ark_ec::AffineRepr; use ark_ff::{Field, One, Zero}; use mina_curves::pasta::Pallas; - type PallasField = ::BaseField; + type PallasField = ::BaseField; #[test] fn zero_layout() { diff --git a/kimchi/src/curve.rs b/kimchi/src/curve.rs index 57790b10f7..db2aa340a2 100644 --- a/kimchi/src/curve.rs +++ b/kimchi/src/curve.rs @@ -1,7 +1,7 @@ //! This module contains a useful trait for recursion: [KimchiCurve], //! which defines how a pair of curves interact. -use ark_ec::{short_weierstrass_jacobian::GroupAffine, AffineCurve, ModelParameters}; +use ark_ec::{short_weierstrass::Affine, AffineRepr, CurveConfig}; use mina_curves::pasta::curves::{ pallas::{LegacyPallasParameters, PallasParameters}, vesta::{LegacyVestaParameters, VestaParameters}, @@ -37,28 +37,28 @@ pub trait KimchiCurve: CommitmentCurve + EndoCurve { } fn vesta_endos() -> &'static ( - ::BaseField, - ::ScalarField, + ::BaseField, + ::ScalarField, ) { static VESTA_ENDOS: Lazy<( - ::BaseField, - ::ScalarField, - )> = Lazy::new(endos::>); + ::BaseField, + ::ScalarField, + )> = Lazy::new(endos::>); &VESTA_ENDOS } fn pallas_endos() -> &'static ( - ::BaseField, - ::ScalarField, + ::BaseField, + ::ScalarField, ) { static PALLAS_ENDOS: Lazy<( - ::BaseField, - ::ScalarField, - )> = Lazy::new(endos::>); + ::BaseField, + ::ScalarField, + )> = Lazy::new(endos::>); &PALLAS_ENDOS } -impl KimchiCurve for GroupAffine { +impl KimchiCurve for Affine { const NAME: &'static str = "vesta"; fn sponge_params() -> &'static ArithmeticSpongeParams { @@ -78,13 +78,13 @@ impl KimchiCurve for GroupAffine { } fn other_curve_prime_subgroup_generator() -> (Self::ScalarField, Self::ScalarField) { - GroupAffine::::prime_subgroup_generator() + Affine::::generator() .to_coordinates() .unwrap() } } -impl KimchiCurve for GroupAffine { +impl KimchiCurve for Affine { const NAME: &'static str = "pallas"; fn sponge_params() -> &'static ArithmeticSpongeParams { @@ -104,7 +104,7 @@ impl KimchiCurve for GroupAffine { } fn other_curve_prime_subgroup_generator() -> (Self::ScalarField, Self::ScalarField) { - GroupAffine::::prime_subgroup_generator() + Affine::::generator() .to_coordinates() .unwrap() } @@ -114,7 +114,7 @@ impl KimchiCurve for GroupAffine { // Legacy curves // -impl KimchiCurve for GroupAffine { +impl KimchiCurve for Affine { const NAME: &'static str = "legacy_vesta"; fn sponge_params() -> &'static ArithmeticSpongeParams { @@ -134,13 +134,13 @@ impl KimchiCurve for GroupAffine { } fn other_curve_prime_subgroup_generator() -> (Self::ScalarField, Self::ScalarField) { - GroupAffine::::prime_subgroup_generator() + Affine::::generator() .to_coordinates() .unwrap() } } -impl KimchiCurve for GroupAffine { +impl KimchiCurve for Affine { const NAME: &'static str = "legacy_pallas"; fn sponge_params() -> &'static ArithmeticSpongeParams { @@ -160,7 +160,7 @@ impl KimchiCurve for GroupAffine { } fn other_curve_prime_subgroup_generator() -> (Self::ScalarField, Self::ScalarField) { - GroupAffine::::prime_subgroup_generator() + Affine::::generator() .to_coordinates() .unwrap() } @@ -170,7 +170,7 @@ impl KimchiCurve for GroupAffine { use mina_poseidon::dummy_values::kimchi_dummy; #[cfg(feature = "bn254")] -impl KimchiCurve for GroupAffine { +impl KimchiCurve for Affine { const NAME: &'static str = "bn254"; fn sponge_params() -> &'static ArithmeticSpongeParams { diff --git a/kimchi/src/linearization.rs b/kimchi/src/linearization.rs index 566ef58216..6ca517240b 100644 --- a/kimchi/src/linearization.rs +++ b/kimchi/src/linearization.rs @@ -28,14 +28,14 @@ use crate::circuits::{ gate::GateType, wires::COLUMNS, }; -use ark_ff::{FftField, PrimeField, SquareRootField, Zero}; +use ark_ff::{FftField, PrimeField, Zero}; /// Get the expresion of constraints. /// /// # Panics /// /// Will panic if `generic_gate` is not associate with `alpha^0`. -pub fn constraints_expr( +pub fn constraints_expr( feature_flags: Option<&FeatureFlags>, generic: bool, ) -> (Expr>, Alphas) { @@ -234,7 +234,7 @@ pub fn constraints_expr( /// Adds the polynomials that are evaluated as part of the proof /// for the linearization to work. -pub fn linearization_columns( +pub fn linearization_columns( feature_flags: Option<&FeatureFlags>, ) -> std::collections::HashSet { let mut h = std::collections::HashSet::new(); @@ -336,7 +336,7 @@ pub fn linearization_columns( /// # Panics /// /// Will panic if the `linearization` process fails. -pub fn expr_linearization( +pub fn expr_linearization( feature_flags: Option<&FeatureFlags>, generic: bool, ) -> (Linearization>>, Alphas) { diff --git a/kimchi/src/proof.rs b/kimchi/src/proof.rs index 5829178786..75468abe8c 100644 --- a/kimchi/src/proof.rs +++ b/kimchi/src/proof.rs @@ -6,7 +6,7 @@ use crate::circuits::{ lookup::lookups::LookupPattern, wires::{COLUMNS, PERMUTS}, }; -use ark_ec::AffineCurve; +use ark_ec::AffineRepr; use ark_ff::{FftField, One, Zero}; use ark_poly::univariate::DensePolynomial; use o1_utils::ExtendedDensePolynomial; @@ -108,7 +108,7 @@ pub struct ProofEvaluations { #[serde_as] #[derive(Debug, Clone, Serialize, Deserialize)] #[serde(bound = "G: ark_serialize::CanonicalDeserialize + ark_serialize::CanonicalSerialize")] -pub struct LookupCommitments { +pub struct LookupCommitments { /// Commitments to the sorted lookup table polynomial (may have chunks) pub sorted: Vec>, /// Commitment to the lookup aggregation polynomial @@ -121,7 +121,7 @@ pub struct LookupCommitments { #[serde_as] #[derive(Debug, Clone, Serialize, Deserialize)] #[serde(bound = "G: ark_serialize::CanonicalDeserialize + ark_serialize::CanonicalSerialize")] -pub struct ProverCommitments { +pub struct ProverCommitments { /// The commitments to the witness (execution trace) pub w_comm: [PolyComm; COLUMNS], /// The commitment to the permutation polynomial @@ -136,7 +136,7 @@ pub struct ProverCommitments { #[serde_as] #[derive(Debug, Clone, Serialize, Deserialize)] #[serde(bound = "G: ark_serialize::CanonicalDeserialize + ark_serialize::CanonicalSerialize")] -pub struct ProverProof { +pub struct ProverProof { /// All the polynomial commitments required in the proof pub commitments: ProverCommitments, @@ -164,7 +164,7 @@ pub struct ProverProof { #[serde(bound = "G: ark_serialize::CanonicalDeserialize + ark_serialize::CanonicalSerialize")] pub struct RecursionChallenge where - G: AffineCurve, + G: AffineRepr, { /// Vector of scalar field elements #[serde_as(as = "Vec")] @@ -345,7 +345,7 @@ impl ProofEvaluations { } } -impl RecursionChallenge { +impl RecursionChallenge { pub fn new(chals: Vec, comm: PolyComm) -> RecursionChallenge { RecursionChallenge { chals, comm } } @@ -505,7 +505,7 @@ pub mod caml { impl From> for CamlRecursionChallenge where - G: AffineCurve, + G: AffineRepr, CamlG: From, CamlF: From, { @@ -519,7 +519,7 @@ pub mod caml { impl From> for RecursionChallenge where - G: AffineCurve + From, + G: AffineRepr + From, G::ScalarField: From, { fn from(caml_ch: CamlRecursionChallenge) -> RecursionChallenge { diff --git a/kimchi/src/prover.rs b/kimchi/src/prover.rs index 55a99561c3..dca688dd3d 100644 --- a/kimchi/src/prover.rs +++ b/kimchi/src/prover.rs @@ -35,8 +35,8 @@ use crate::{ }; use ark_ff::{FftField, Field, One, PrimeField, UniformRand, Zero}; use ark_poly::{ - univariate::DensePolynomial, EvaluationDomain, Evaluations, Polynomial, - Radix2EvaluationDomain as D, UVPolynomial, + univariate::DensePolynomial, DenseUVPolynomial, EvaluationDomain, Evaluations, Polynomial, + Radix2EvaluationDomain as D, }; use itertools::Itertools; use mina_poseidon::{sponge::ScalarChallenge, FqSponge}; @@ -1500,7 +1500,7 @@ internal_tracing::decl_traces!(internal_traces; pub mod caml { use super::*; use crate::proof::caml::{CamlProofEvaluations, CamlRecursionChallenge}; - use ark_ec::AffineCurve; + use ark_ec::AffineRepr; use poly_commitment::{ commitment::caml::{CamlOpeningProof, CamlPolyComm}, evaluation_proof::OpeningProof, @@ -1588,7 +1588,7 @@ pub mod caml { impl From> for CamlLookupCommitments where - G: AffineCurve, + G: AffineRepr, CamlPolyComm: From>, { fn from( @@ -1608,7 +1608,7 @@ pub mod caml { impl From> for LookupCommitments where - G: AffineCurve, + G: AffineRepr, PolyComm: From>, { fn from( @@ -1632,7 +1632,7 @@ pub mod caml { impl From> for CamlProverCommitments where - G: AffineCurve, + G: AffineRepr, CamlPolyComm: From>, { fn from(prover_comm: ProverCommitments) -> Self { @@ -1665,7 +1665,7 @@ pub mod caml { impl From> for ProverCommitments where - G: AffineCurve, + G: AffineRepr, PolyComm: From>, { fn from(caml_prover_comm: CamlProverCommitments) -> ProverCommitments { @@ -1718,7 +1718,7 @@ pub mod caml { impl From<(ProverProof>, Vec)> for CamlProofWithPublic where - G: AffineCurve, + G: AffineRepr, CamlG: From, CamlF: From, { @@ -1742,7 +1742,7 @@ pub mod caml { for (ProverProof>, Vec) where CamlF: Clone, - G: AffineCurve + From, + G: AffineRepr + From, G::ScalarField: From, { fn from( diff --git a/kimchi/src/prover_index.rs b/kimchi/src/prover_index.rs index 523d583e18..ab7dc81b65 100644 --- a/kimchi/src/prover_index.rs +++ b/kimchi/src/prover_index.rs @@ -140,7 +140,7 @@ pub mod testing { }, precomputed_srs, }; - use ark_ff::{PrimeField, SquareRootField}; + use ark_ff::PrimeField; use ark_poly::{EvaluationDomain, Radix2EvaluationDomain as D}; use poly_commitment::{evaluation_proof::OpeningProof, srs::SRS, OpenProof}; @@ -161,7 +161,7 @@ pub mod testing { ) -> ProverIndex where G::BaseField: PrimeField, - G::ScalarField: PrimeField + SquareRootField, + G::ScalarField: PrimeField, { // not sure if theres a smarter way instead of the double unwrap, but should be fine in the test let cs = ConstraintSystem::::create(gates) @@ -198,7 +198,7 @@ pub mod testing { ) -> ProverIndex> where G::BaseField: PrimeField, - G::ScalarField: PrimeField + SquareRootField, + G::ScalarField: PrimeField, { new_index_for_test_with_lookups_and_custom_srs( gates, @@ -230,7 +230,7 @@ pub mod testing { ) -> ProverIndex> where G::BaseField: PrimeField, - G::ScalarField: PrimeField + SquareRootField, + G::ScalarField: PrimeField, { new_index_for_test_with_lookups::(gates, public, 0, vec![], None, false, None) } diff --git a/kimchi/src/tests/and.rs b/kimchi/src/tests/and.rs index e344af6da4..1b76f43c2c 100644 --- a/kimchi/src/tests/and.rs +++ b/kimchi/src/tests/and.rs @@ -10,7 +10,7 @@ use crate::{ plonk_sponge::FrSponge, }; -use ark_ec::AffineCurve; +use ark_ec::AffineRepr; use ark_ff::{One, PrimeField, Zero}; use mina_curves::pasta::{Fp, Fq, Pallas, PallasParameters, Vesta, VestaParameters}; use mina_poseidon::{ @@ -24,8 +24,8 @@ use rand::{rngs::StdRng, SeedableRng}; use super::framework::TestFramework; -type PallasField = ::BaseField; -type VestaField = ::BaseField; +type PallasField = ::BaseField; +type VestaField = ::BaseField; type SpongeParams = PlonkSpongeConstantsKimchi; type VestaBaseSponge = DefaultFqSponge; type VestaScalarSponge = DefaultFrSponge; diff --git a/kimchi/src/tests/ec.rs b/kimchi/src/tests/ec.rs index 9deec7a32a..d669c6486f 100644 --- a/kimchi/src/tests/ec.rs +++ b/kimchi/src/tests/ec.rs @@ -2,8 +2,8 @@ use crate::circuits::{ gate::{CircuitGate, GateType}, wires::*, }; -use ark_ec::{AffineCurve, ProjectiveCurve}; -use ark_ff::{Field, One, PrimeField, UniformRand, Zero}; +use ark_ec::{AffineRepr, CurveGroup}; +use ark_ff::{Field, One, UniformRand, Zero}; use mina_curves::pasta::{Fp as F, Pallas as Other, Vesta, VestaParameters}; use mina_poseidon::{ constants::PlonkSpongeConstantsKimchi, @@ -11,6 +11,7 @@ use mina_poseidon::{ }; use rand::{rngs::StdRng, SeedableRng}; use std::array; +use std::ops::Mul; use super::framework::TestFramework; @@ -39,36 +40,34 @@ fn ec_test() { let rng = &mut StdRng::from_seed([0; 32]); - let ps = { - let p = Other::prime_subgroup_generator() - .into_projective() - .mul(::ScalarField::rand(rng).into_repr()) - .into_affine(); + let ps: Vec = { + let p = Other::generator() + .into_group() + .mul(::ScalarField::rand(rng)); let mut res = vec![]; let mut acc = p; for _ in 0..num_additions { res.push(acc); - acc = acc + p; + acc += p; } - res + ::Group::normalize_batch(&res) }; - let qs = { - let q = Other::prime_subgroup_generator() - .into_projective() - .mul(::ScalarField::rand(rng).into_repr()) - .into_affine(); + let qs: Vec = { + let q = Other::generator() + .into_group() + .mul(::ScalarField::rand(rng)); let mut res = vec![]; let mut acc = q; for _ in 0..num_additions { res.push(acc); - acc = acc + q; + acc += q; } - res + ::Group::normalize_batch(&res) }; for &p in ps.iter().take(num_doubles) { - let p2 = p + p; + let p2: Other = (p + p).into(); let (x1, y1) = (p.x, p.y); let x1_squared = x1.square(); // 2 * s * y1 = 3 * x1^2 @@ -96,11 +95,12 @@ fn ec_test() { let p = ps[i]; let q = qs[i]; - let pq = p + q; + let pq: Other = (p + q).into(); let (x1, y1) = (p.x, p.y); let (x2, y2) = (q.x, q.y); // (x2 - x1) * s = y2 - y1 let s = (y2 - y1) / (x2 - x1); + witness[0].push(x1); witness[1].push(y1); witness[2].push(x2); @@ -122,11 +122,12 @@ fn ec_test() { for &p in ps.iter().take(num_infs) { let q = -p; - let p2 = p + p; + let p2: Other = (p + p).into(); let (x1, y1) = (p.x, p.y); let x1_squared = x1.square(); // 2 * s * y1 = -3 * x1^2 let s = (x1_squared.double() + x1_squared) / y1.double(); + witness[0].push(p.x); witness[1].push(p.y); witness[2].push(q.x); diff --git a/kimchi/src/tests/endomul.rs b/kimchi/src/tests/endomul.rs index 5a4fa08246..e34ef14452 100644 --- a/kimchi/src/tests/endomul.rs +++ b/kimchi/src/tests/endomul.rs @@ -4,7 +4,7 @@ use crate::circuits::{ wires::*, }; use crate::tests::framework::TestFramework; -use ark_ec::{AffineCurve, ProjectiveCurve}; +use ark_ec::{AffineRepr, CurveGroup}; use ark_ff::{BigInteger, BitIteratorLE, Field, One, PrimeField, UniformRand, Zero}; use mina_curves::pasta::{Fp as F, Pallas as Other, Vesta, VestaParameters}; use mina_poseidon::{ @@ -14,6 +14,7 @@ use mina_poseidon::{ use poly_commitment::srs::endos; use rand::{rngs::StdRng, SeedableRng}; use std::array; +use std::ops::Mul; type SpongeParams = PlonkSpongeConstantsKimchi; type BaseSponge = DefaultFqSponge; @@ -56,22 +57,22 @@ fn endomul_test() { // let start = Instant::now(); for i in 0..num_scalars { - let bits_lsb: Vec<_> = BitIteratorLE::new(F::rand(rng).into_repr()) + let bits_lsb: Vec<_> = BitIteratorLE::new(F::rand(rng).into_bigint()) .take(num_bits) .collect(); - let x = ::ScalarField::from_repr( + let x = ::ScalarField::from_bigint( ::BigInt::from_bits_le(&bits_lsb[..]), ) .unwrap(); let x_scalar = ScalarChallenge(x).to_field(&endo_r); - let base = Other::prime_subgroup_generator(); - // let g = Other::prime_subgroup_generator().into_projective(); + let base = Other::generator(); + // let g = Other::generator().into_group(); let acc0 = { - let t = Other::new(endo_q * base.x, base.y, false); + let t = Other::new_unchecked(endo_q * base.x, base.y); let p = t + base; - let acc = p + p; + let acc: Other = (p + p).into(); (acc.x, acc.y) }; @@ -87,27 +88,24 @@ fn endomul_test() { ); let expected = { - let t = Other::prime_subgroup_generator(); - let mut acc = Other::new(acc0.0, acc0.1, false); + let t = Other::generator(); + let mut acc = Other::new_unchecked(acc0.0, acc0.1).into_group(); for i in (0..(num_bits / 2)).rev() { let b2i = F::from(bits_lsb[2 * i] as u64); let b2i1 = F::from(bits_lsb[2 * i + 1] as u64); let xq = (F::one() + ((endo_q - F::one()) * b2i1)) * t.x; let yq = (b2i.double() - F::one()) * t.y; - acc = acc + (acc + Other::new(xq, yq, false)); + acc = acc + (acc + Other::new_unchecked(xq, yq)); } - acc + acc.into_affine() }; assert_eq!( expected, - Other::prime_subgroup_generator() - .into_projective() - .mul(x_scalar.into_repr()) - .into_affine() + Other::generator().into_group().mul(x_scalar).into_affine() ); assert_eq!((expected.x, expected.y), res.acc); - assert_eq!(x.into_repr(), res.n.into_repr()); + assert_eq!(x.into_bigint(), res.n.into_bigint()); } TestFramework::::default() diff --git a/kimchi/src/tests/endomul_scalar.rs b/kimchi/src/tests/endomul_scalar.rs index f39c0bf236..886a7daefa 100644 --- a/kimchi/src/tests/endomul_scalar.rs +++ b/kimchi/src/tests/endomul_scalar.rs @@ -52,10 +52,10 @@ fn endomul_scalar_test() { //let start = Instant::now(); for _ in 0..num_scalars { let x = { - let bits_lsb: Vec<_> = BitIteratorLE::new(F::rand(rng).into_repr()) + let bits_lsb: Vec<_> = BitIteratorLE::new(F::rand(rng).into_bigint()) .take(num_bits) .collect(); - F::from_repr(::BigInt::from_bits_le(&bits_lsb[..])).unwrap() + F::from_bigint(::BigInt::from_bits_le(&bits_lsb[..])).unwrap() }; assert_eq!( diff --git a/kimchi/src/tests/foreign_field_add.rs b/kimchi/src/tests/foreign_field_add.rs index 760c7fa2d5..8aea3daf13 100644 --- a/kimchi/src/tests/foreign_field_add.rs +++ b/kimchi/src/tests/foreign_field_add.rs @@ -13,8 +13,8 @@ use crate::circuits::{ }; use crate::curve::KimchiCurve; use crate::prover_index::ProverIndex; -use ark_ec::AffineCurve; -use ark_ff::{One, PrimeField, SquareRootField, Zero}; +use ark_ec::AffineRepr; +use ark_ff::{One, PrimeField, Zero}; use ark_poly::EvaluationDomain; use mina_curves::pasta::{Fp, Pallas, Vesta, VestaParameters}; use mina_poseidon::{ @@ -34,8 +34,8 @@ use poly_commitment::{ use rand::{rngs::StdRng, Rng, SeedableRng}; use std::array; use std::sync::Arc; -type PallasField = ::BaseField; -type VestaField = ::BaseField; +type PallasField = ::BaseField; +type VestaField = ::BaseField; type SpongeParams = PlonkSpongeConstantsKimchi; @@ -150,7 +150,7 @@ static NULL_CARRY_BOTH: &[u8] = &[ 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x03, 0xD2, ]; -impl CircuitGate { +impl CircuitGate { /// Check if a given circuit gate is a given foreign field operation pub fn check_ffadd_sign(&self, sign: FFOps) -> Result<(), String> { if self.typ != GateType::ForeignFieldAdd { @@ -179,7 +179,7 @@ impl CircuitGate { // Outputs tuple (next_row, circuit_gates) where // next_row - next row after this gate // circuit_gates - vector of circuit gates comprising this gate -fn short_circuit( +fn short_circuit( opcodes: &[FFOps], foreign_field_modulus: &BigUint, ) -> (usize, Vec>) { @@ -212,7 +212,7 @@ fn short_circuit( // Outputs tuple (next_row, circuit_gates) where // next_row - next row after this gate // circuit_gates - vector of circuit gates comprising this gate -fn full_circuit( +fn full_circuit( opcodes: &[FFOps], foreign_field_modulus: &BigUint, ) -> (usize, Vec>) { diff --git a/kimchi/src/tests/foreign_field_mul.rs b/kimchi/src/tests/foreign_field_mul.rs index 95272e594c..951fbf4953 100644 --- a/kimchi/src/tests/foreign_field_mul.rs +++ b/kimchi/src/tests/foreign_field_mul.rs @@ -9,7 +9,7 @@ use crate::{ plonk_sponge::FrSponge, tests::framework::TestFramework, }; -use ark_ec::AffineCurve; +use ark_ec::AffineRepr; use ark_ff::{Field, PrimeField, Zero}; use mina_curves::pasta::{Fp, Fq, Pallas, PallasParameters, Vesta, VestaParameters}; use num_bigint::BigUint; @@ -27,8 +27,8 @@ use mina_poseidon::{ use num_bigint::RandBigInt; use rand::{rngs::StdRng, SeedableRng}; -type PallasField = ::BaseField; -type VestaField = ::BaseField; +type PallasField = ::BaseField; +type VestaField = ::BaseField; type SpongeParams = PlonkSpongeConstantsKimchi; type VestaBaseSponge = DefaultFqSponge; diff --git a/kimchi/src/tests/generic.rs b/kimchi/src/tests/generic.rs index f9efc83341..2ff3b72da5 100644 --- a/kimchi/src/tests/generic.rs +++ b/kimchi/src/tests/generic.rs @@ -92,7 +92,7 @@ fn test_generic_gate_pub_empty() { fn test_generic_gate_pairing() { type Fp = ark_bn254::Fr; type SpongeParams = PlonkSpongeConstantsKimchi; - type BaseSponge = DefaultFqSponge; + type BaseSponge = DefaultFqSponge; type ScalarSponge = DefaultFrSponge; use ark_ff::UniformRand; @@ -110,7 +110,7 @@ fn test_generic_gate_pairing() { // create and verify proof based on the witness >, + poly_commitment::pairing_proof::PairingProof>, > as Default>::default() .gates(gates) .witness(witness) diff --git a/kimchi/src/tests/keccak.rs b/kimchi/src/tests/keccak.rs index b39ecc4f84..a29e13c40b 100644 --- a/kimchi/src/tests/keccak.rs +++ b/kimchi/src/tests/keccak.rs @@ -6,12 +6,12 @@ use crate::circuits::{ polynomials::keccak::{self, ROT_TAB}, wires::Wire, }; -use ark_ec::AffineCurve; +use ark_ec::AffineRepr; use mina_curves::pasta::{Fp, Pallas, Vesta}; use rand::Rng; //use super::framework::TestFramework; -type PallasField = ::BaseField; +type PallasField = ::BaseField; fn create_test_constraint_system() -> ConstraintSystem { let (mut next_row, mut gates) = { CircuitGate::::create_keccak(0) }; diff --git a/kimchi/src/tests/not.rs b/kimchi/src/tests/not.rs index 42cd0705f7..794a41bb22 100644 --- a/kimchi/src/tests/not.rs +++ b/kimchi/src/tests/not.rs @@ -14,7 +14,7 @@ use crate::{ }; use super::framework::TestFramework; -use ark_ec::AffineCurve; +use ark_ec::AffineRepr; use ark_ff::{Field, One, PrimeField, Zero}; use mina_curves::pasta::{Fp, Pallas, Vesta, VestaParameters}; use mina_poseidon::{ @@ -26,8 +26,8 @@ use o1_utils::{BigUintHelpers, BitwiseOps, FieldHelpers, RandomField}; use poly_commitment::evaluation_proof::OpeningProof; use rand::{rngs::StdRng, SeedableRng}; -type PallasField = ::BaseField; -type VestaField = ::BaseField; +type PallasField = ::BaseField; +type VestaField = ::BaseField; type SpongeParams = PlonkSpongeConstantsKimchi; type VestaBaseSponge = DefaultFqSponge; type VestaScalarSponge = DefaultFrSponge; diff --git a/kimchi/src/tests/range_check.rs b/kimchi/src/tests/range_check.rs index 8e46962add..6a93128883 100644 --- a/kimchi/src/tests/range_check.rs +++ b/kimchi/src/tests/range_check.rs @@ -13,7 +13,7 @@ use crate::{ prover_index::testing::new_index_for_test_with_lookups, }; -use ark_ec::AffineCurve; +use ark_ec::AffineRepr; use ark_ff::{Field, One, Zero}; use ark_poly::EvaluationDomain; use mina_curves::pasta::{Fp, Pallas, Vesta, VestaParameters}; @@ -47,7 +47,7 @@ use super::framework::TestFramework; type BaseSponge = DefaultFqSponge; type ScalarSponge = DefaultFrSponge; -type PallasField = ::BaseField; +type PallasField = ::BaseField; const RNG_SEED: [u8; 32] = [ 22, 4, 34, 75, 29, 255, 0, 126, 237, 19, 86, 160, 1, 90, 131, 221, 186, 168, 40, 59, 0, 4, 9, diff --git a/kimchi/src/tests/recursion.rs b/kimchi/src/tests/recursion.rs index 719318eb96..8a812c330f 100644 --- a/kimchi/src/tests/recursion.rs +++ b/kimchi/src/tests/recursion.rs @@ -4,7 +4,7 @@ use crate::circuits::wires::COLUMNS; use crate::proof::RecursionChallenge; use ark_ff::{UniformRand, Zero}; use ark_poly::univariate::DensePolynomial; -use ark_poly::UVPolynomial; +use ark_poly::DenseUVPolynomial; use mina_curves::pasta::{Fp, Vesta, VestaParameters}; use mina_poseidon::{ constants::PlonkSpongeConstantsKimchi, diff --git a/kimchi/src/tests/rot.rs b/kimchi/src/tests/rot.rs index f9a1308b86..fc5be93697 100644 --- a/kimchi/src/tests/rot.rs +++ b/kimchi/src/tests/rot.rs @@ -16,7 +16,7 @@ use crate::{ plonk_sponge::FrSponge, prover_index::ProverIndex, }; -use ark_ec::AffineCurve; +use ark_ec::AffineRepr; use ark_ff::{One, PrimeField, Zero}; use ark_poly::EvaluationDomain; use mina_curves::pasta::{Fp, Fq, Pallas, PallasParameters, Vesta, VestaParameters}; @@ -32,7 +32,7 @@ use poly_commitment::{ }; use rand::{rngs::StdRng, Rng, SeedableRng}; -type PallasField = ::BaseField; +type PallasField = ::BaseField; type SpongeParams = PlonkSpongeConstantsKimchi; type VestaBaseSponge = DefaultFqSponge; type VestaScalarSponge = DefaultFrSponge; diff --git a/kimchi/src/tests/serde.rs b/kimchi/src/tests/serde.rs index 43883066d9..7b3eeefb37 100644 --- a/kimchi/src/tests/serde.rs +++ b/kimchi/src/tests/serde.rs @@ -9,7 +9,7 @@ use crate::{ verifier::verify, verifier_index::VerifierIndex, }; -use ark_ec::short_weierstrass_jacobian::GroupAffine; +use ark_ec::short_weierstrass::Affine; use ark_ff::Zero; use groupmap::GroupMap; use mina_curves::pasta::{Fp, Vesta, VestaParameters}; @@ -73,11 +73,11 @@ mod tests { .unwrap(); // deserialize the verifier index - let mut verifier_index_deserialize: VerifierIndex, _> = + let mut verifier_index_deserialize: VerifierIndex, _> = serde_json::from_str(&verifier_index_serialize).unwrap(); // add srs with lagrange bases - let mut srs = SRS::>::create(verifier_index.max_poly_size); + let mut srs = SRS::>::create(verifier_index.max_poly_size); srs.add_lagrange_basis(verifier_index.domain); verifier_index_deserialize.powers_of_alpha = index.powers_of_alpha; verifier_index_deserialize.linearization = index.linearization; diff --git a/kimchi/src/tests/varbasemul.rs b/kimchi/src/tests/varbasemul.rs index 91eed12b78..79fa241acc 100644 --- a/kimchi/src/tests/varbasemul.rs +++ b/kimchi/src/tests/varbasemul.rs @@ -4,7 +4,7 @@ use crate::circuits::{ wires::*, }; use crate::tests::framework::TestFramework; -use ark_ec::{AffineCurve, ProjectiveCurve}; +use ark_ec::{AffineRepr, CurveGroup}; use ark_ff::{BigInteger, BitIteratorLE, Field, One, PrimeField, UniformRand, Zero}; use colored::Colorize; use mina_curves::pasta::{Fp as F, Pallas as Other, Vesta, VestaParameters}; @@ -14,6 +14,7 @@ use mina_poseidon::{ }; use rand::{rngs::StdRng, SeedableRng}; use std::array; +use std::ops::Mul; use std::time::Instant; type SpongeParams = PlonkSpongeConstantsKimchi; @@ -22,7 +23,7 @@ type ScalarSponge = DefaultFrSponge; #[test] fn varbase_mul_test() { - let num_bits = F::size_in_bits(); + let num_bits = F::MODULUS_BIT_SIZE as usize; let chunks = num_bits / 5; let num_scalars = 10; @@ -54,14 +55,14 @@ fn varbase_mul_test() { let start = Instant::now(); for i in 0..num_scalars { let x = F::rand(rng); - let bits_lsb: Vec<_> = BitIteratorLE::new(x.into_repr()).take(num_bits).collect(); - let x_ = ::ScalarField::from_repr( + let bits_lsb: Vec<_> = BitIteratorLE::new(x.into_bigint()).take(num_bits).collect(); + let x_ = ::ScalarField::from_bigint( ::BigInt::from_bits_le(&bits_lsb[..]), ) .unwrap(); - let base = Other::prime_subgroup_generator(); - let g = Other::prime_subgroup_generator().into_projective(); + let base = Other::generator(); + let g = Other::generator().into_group(); let acc = (g + g).into_affine(); let acc = (acc.x, acc.y); @@ -75,12 +76,12 @@ fn varbase_mul_test() { acc, ); - let shift = ::ScalarField::from(2).pow([(bits_msb.len()) as u64]); + let shift = ::ScalarField::from(2).pow([(bits_msb.len()) as u64]); let expected = g - .mul((::ScalarField::one() + shift + x_.double()).into_repr()) + .mul(&(::ScalarField::one() + shift + x_.double())) .into_affine(); - assert_eq!(x_.into_repr(), res.n.into_repr()); + assert_eq!(x_.into_bigint(), res.n.into_bigint()); assert_eq!((expected.x, expected.y), res.acc); } println!( diff --git a/kimchi/src/tests/xor.rs b/kimchi/src/tests/xor.rs index 7ab28b4008..e6b271dede 100644 --- a/kimchi/src/tests/xor.rs +++ b/kimchi/src/tests/xor.rs @@ -11,7 +11,7 @@ use crate::{ curve::KimchiCurve, prover_index::ProverIndex, }; -use ark_ec::AffineCurve; +use ark_ec::AffineRepr; use ark_ff::{Field, One, PrimeField, Zero}; use ark_poly::EvaluationDomain; use mina_curves::pasta::{Fp, Pallas, Vesta, VestaParameters}; @@ -29,7 +29,7 @@ use rand::{rngs::StdRng, SeedableRng}; use super::framework::TestFramework; -type PallasField = ::BaseField; +type PallasField = ::BaseField; type SpongeParams = PlonkSpongeConstantsKimchi; type VestaBaseSponge = DefaultFqSponge; type VestaScalarSponge = DefaultFrSponge; diff --git a/kimchi/src/verifier.rs b/kimchi/src/verifier.rs index 62d9e5d43f..30d2c77f80 100644 --- a/kimchi/src/verifier.rs +++ b/kimchi/src/verifier.rs @@ -18,7 +18,7 @@ use crate::{ proof::{PointEvaluations, ProofEvaluations, ProverProof, RecursionChallenge}, verifier_index::VerifierIndex, }; -use ark_ec::AffineCurve; +use ark_ec::AffineRepr; use ark_ff::{Field, One, PrimeField, Zero}; use ark_poly::{univariate::DensePolynomial, EvaluationDomain, Polynomial}; use mina_poseidon::{sponge::ScalarChallenge, FqSponge}; @@ -747,7 +747,7 @@ where fn to_batch<'a, G, EFqSponge, EFrSponge, OpeningProof: OpenProof>( verifier_index: &VerifierIndex, proof: &'a ProverProof, - public_input: &'a [::ScalarField], + public_input: &'a [::ScalarField], ) -> Result> where G: KimchiCurve, diff --git a/poly-commitment/Cargo.toml b/poly-commitment/Cargo.toml index 890555082e..fbd169ae2e 100644 --- a/poly-commitment/Cargo.toml +++ b/poly-commitment/Cargo.toml @@ -10,10 +10,10 @@ edition = "2021" license = "Apache-2.0" [dependencies] -ark-ff = { version = "0.3.0", features = [ "parallel", "asm" ] } -ark-ec = { version = "0.3.0", features = [ "parallel" ] } -ark-poly = { version = "0.3.0", features = [ "parallel" ] } -ark-serialize = "0.3.0" +ark-ff = { version = "0.4.2", features = [ "parallel", "asm" ] } +ark-ec = { version = "0.4.2", features = [ "parallel" ] } +ark-poly = { version = "0.4.2", features = [ "parallel" ] } +ark-serialize = "0.4.2" blake2 = "0.10.0" itertools = "0.10.3" @@ -37,7 +37,7 @@ ocaml-gen = { version = "0.1.5", optional = true } [dev-dependencies] colored = "2.0.0" rand_chacha = { version = "0.3.0" } -ark-bn254 = { version = "0.3.0" } +ark-bn254 = { version = "0.4.0" } [features] ocaml_types = [ "ocaml", "ocaml-gen" ] diff --git a/poly-commitment/src/chunked.rs b/poly-commitment/src/chunked.rs index 9c3ee5c294..c3d4542199 100644 --- a/poly-commitment/src/chunked.rs +++ b/poly-commitment/src/chunked.rs @@ -1,5 +1,6 @@ -use ark_ec::ProjectiveCurve; +use ark_ec::CurveGroup; use ark_ff::{Field, Zero}; +use std::ops::AddAssign; use crate::commitment::CommitmentCurve; use crate::PolyComm; @@ -11,13 +12,13 @@ where /// Multiplies each commitment chunk of f with powers of zeta^n // TODO(mimoo): better name for this function pub fn chunk_commitment(&self, zeta_n: C::ScalarField) -> Self { - let mut res = C::Projective::zero(); + let mut res = C::Group::zero(); // use Horner's to compute chunk[0] + z^n chunk[1] + z^2n chunk[2] + ... // as ( chunk[-1] * z^n + chunk[-2] ) * z^n + chunk[-3] // (https://en.wikipedia.org/wiki/Horner%27s_method) for chunk in self.elems.iter().rev() { res *= zeta_n; - res.add_assign_mixed(chunk); + res.add_assign(chunk); } PolyComm { diff --git a/poly-commitment/src/combine.rs b/poly-commitment/src/combine.rs index 52f7e19f95..521e4739dc 100644 --- a/poly-commitment/src/combine.rs +++ b/poly-commitment/src/combine.rs @@ -16,15 +16,16 @@ //! such a scratch array within each algorithm. use ark_ec::{ - models::short_weierstrass_jacobian::GroupAffine as SWJAffine, AffineCurve, ProjectiveCurve, - SWModelParameters, + models::short_weierstrass::Affine as SWJAffine, short_weierstrass::SWCurveConfig, AffineRepr, + CurveGroup, Group, }; use ark_ff::{BitIteratorBE, Field, One, PrimeField, Zero}; use itertools::Itertools; use mina_poseidon::sponge::ScalarChallenge; use rayon::prelude::*; +use std::ops::AddAssign; -fn add_pairs_in_place(pairs: &mut Vec>) { +fn add_pairs_in_place(pairs: &mut Vec>) { let len = if pairs.len() % 2 == 0 { pairs.len() } else { @@ -86,7 +87,7 @@ fn add_pairs_in_place(pairs: &mut Vec>) { /// assuming that for each `i`, `v0[i].x != v1[i].x` so we can use the ordinary /// addition formula and don't have to handle the edge cases of doubling and /// hitting the point at infinity. -fn batch_add_assign_no_branch( +fn batch_add_assign_no_branch( denominators: &mut [P::BaseField], v0: &mut [SWJAffine

], v1: &[SWJAffine

], @@ -117,7 +118,7 @@ fn batch_add_assign_no_branch( } /// Given arrays of curve points `v0` and `v1` do `v0[i] += v1[i]` for each i. -pub fn batch_add_assign( +pub fn batch_add_assign( denominators: &mut [P::BaseField], v0: &mut [SWJAffine

], v1: &[SWJAffine

], @@ -168,7 +169,7 @@ pub fn batch_add_assign( }); } -fn affine_window_combine_base( +fn affine_window_combine_base( g1: &[SWJAffine

], g2: &[SWJAffine

], x1: P::ScalarField, @@ -190,8 +191,8 @@ fn affine_window_combine_base( }; assert!(g1g2.len() == g1.len()); - let windows1 = BitIteratorBE::new(x1.into_repr()).tuples(); - let windows2 = BitIteratorBE::new(x2.into_repr()).tuples(); + let windows1 = BitIteratorBE::new(x1.into_bigint()).tuples(); + let windows2 = BitIteratorBE::new(x2.into_bigint()).tuples(); let mut points = vec![SWJAffine::

::zero(); g1.len()]; @@ -275,11 +276,11 @@ fn affine_window_combine_base( points } -fn batch_endo_in_place(endo_coeff: P::BaseField, ps: &mut [SWJAffine

]) { +fn batch_endo_in_place(endo_coeff: P::BaseField, ps: &mut [SWJAffine

]) { ps.par_iter_mut().for_each(|p| p.x *= endo_coeff); } -fn batch_negate_in_place(ps: &mut [SWJAffine

]) { +fn batch_negate_in_place(ps: &mut [SWJAffine

]) { ps.par_iter_mut().for_each(|p| { p.y = -p.y; }); @@ -287,7 +288,7 @@ fn batch_negate_in_place(ps: &mut [SWJAffine

]) { /// Uses a batch version of Algorithm 1 of https://eprint.iacr.org/2019/1021.pdf (on page 19) to /// compute `g1 + g2.scale(chal.to_field(endo_coeff))` -fn affine_window_combine_one_endo_base( +fn affine_window_combine_one_endo_base( endo_coeff: P::BaseField, g1: &[SWJAffine

], g2: &[SWJAffine

], @@ -304,7 +305,7 @@ fn affine_window_combine_one_endo_base( (limbs_lsb[limb as usize] >> j) & 1 } - let rep = chal.0.into_repr(); + let rep = chal.0.into_bigint(); let r = rep.as_ref(); let mut denominators = vec![P::BaseField::zero(); g1.len()]; @@ -340,7 +341,7 @@ fn affine_window_combine_one_endo_base( } /// Double an array of curve points in-place. -fn batch_double_in_place( +fn batch_double_in_place( denominators: &mut Vec, points: &mut [SWJAffine

], ) { @@ -366,12 +367,12 @@ fn batch_double_in_place( }); } -fn affine_window_combine_one_base( +fn affine_window_combine_one_base( g1: &[SWJAffine

], g2: &[SWJAffine

], x2: P::ScalarField, ) -> Vec> { - let windows2 = BitIteratorBE::new(x2.into_repr()).tuples(); + let windows2 = BitIteratorBE::new(x2.into_bigint()).tuples(); let mut points = vec![SWJAffine::

::zero(); g1.len()]; @@ -412,7 +413,7 @@ fn affine_window_combine_one_base( points } -pub fn affine_window_combine( +pub fn affine_window_combine( g1: &[SWJAffine

], g2: &[SWJAffine

], x1: P::ScalarField, @@ -431,7 +432,7 @@ pub fn affine_window_combine( /// `g1[i] + g2[i].scale(chal.to_field(endo_coeff))` /// /// Internally, it uses the curve endomorphism to speed up this operation. -pub fn affine_window_combine_one_endo( +pub fn affine_window_combine_one_endo( endo_coeff: P::BaseField, g1: &[SWJAffine

], g2: &[SWJAffine

], @@ -445,7 +446,7 @@ pub fn affine_window_combine_one_endo( .collect(); v.concat() } -pub fn affine_window_combine_one( +pub fn affine_window_combine_one( g1: &[SWJAffine

], g2: &[SWJAffine

], x2: P::ScalarField, @@ -459,24 +460,23 @@ pub fn affine_window_combine_one( v.concat() } -pub fn window_combine( +pub fn window_combine( g_lo: &[G], g_hi: &[G], x_lo: G::ScalarField, x_hi: G::ScalarField, ) -> Vec { - let mut g_proj: Vec = { + let mut g_proj: Vec = { let pairs: Vec<_> = g_lo.iter().zip(g_hi).collect(); pairs .into_par_iter() .map(|(lo, hi)| window_shamir::(x_lo, *lo, x_hi, *hi)) .collect() }; - G::Projective::batch_normalization(g_proj.as_mut_slice()); - g_proj.par_iter().map(|g| g.into_affine()).collect() + G::Group::normalize_batch(g_proj.as_mut_slice()) } -pub fn affine_shamir_window_table( +pub fn affine_shamir_window_table( denominators: &mut [P::BaseField], g1: &[SWJAffine

], g2: &[SWJAffine

], @@ -555,7 +555,7 @@ pub fn affine_shamir_window_table( res } -pub fn affine_shamir_window_table_one( +pub fn affine_shamir_window_table_one( denominators: &mut [P::BaseField], g1: &[SWJAffine

], ) -> [Vec>; 3] { @@ -585,118 +585,113 @@ pub fn affine_shamir_window_table_one( res } -fn window_shamir( - x1: G::ScalarField, - g1: G, - x2: G::ScalarField, - g2: G, -) -> G::Projective { +fn window_shamir(x1: G::ScalarField, g1: G, x2: G::ScalarField, g2: G) -> G::Group { let [_g00_00, g01_00, g10_00, g11_00, g00_01, g01_01, g10_01, g11_01, g00_10, g01_10, g10_10, g11_10, g00_11, g01_11, g10_11, g11_11] = shamir_window_table(g1, g2); - let windows1 = BitIteratorBE::new(x1.into_repr()).tuples(); - let windows2 = BitIteratorBE::new(x2.into_repr()).tuples(); + let windows1 = BitIteratorBE::new(x1.into_bigint()).tuples(); + let windows2 = BitIteratorBE::new(x2.into_bigint()).tuples(); - let mut res = G::Projective::zero(); + let mut res = G::Group::zero(); for ((hi_1, lo_1), (hi_2, lo_2)) in windows1.zip(windows2) { res.double_in_place(); res.double_in_place(); match ((hi_1, lo_1), (hi_2, lo_2)) { ((false, false), (false, false)) => (), - ((false, true), (false, false)) => res.add_assign_mixed(&g01_00), - ((true, false), (false, false)) => res.add_assign_mixed(&g10_00), - ((true, true), (false, false)) => res.add_assign_mixed(&g11_00), - - ((false, false), (false, true)) => res.add_assign_mixed(&g00_01), - ((false, true), (false, true)) => res.add_assign_mixed(&g01_01), - ((true, false), (false, true)) => res.add_assign_mixed(&g10_01), - ((true, true), (false, true)) => res.add_assign_mixed(&g11_01), - - ((false, false), (true, false)) => res.add_assign_mixed(&g00_10), - ((false, true), (true, false)) => res.add_assign_mixed(&g01_10), - ((true, false), (true, false)) => res.add_assign_mixed(&g10_10), - ((true, true), (true, false)) => res.add_assign_mixed(&g11_10), - - ((false, false), (true, true)) => res.add_assign_mixed(&g00_11), - ((false, true), (true, true)) => res.add_assign_mixed(&g01_11), - ((true, false), (true, true)) => res.add_assign_mixed(&g10_11), - ((true, true), (true, true)) => res.add_assign_mixed(&g11_11), + ((false, true), (false, false)) => res.add_assign(&g01_00), + ((true, false), (false, false)) => res.add_assign(&g10_00), + ((true, true), (false, false)) => res.add_assign(&g11_00), + + ((false, false), (false, true)) => res.add_assign(&g00_01), + ((false, true), (false, true)) => res.add_assign(&g01_01), + ((true, false), (false, true)) => res.add_assign(&g10_01), + ((true, true), (false, true)) => res.add_assign(&g11_01), + + ((false, false), (true, false)) => res.add_assign(&g00_10), + ((false, true), (true, false)) => res.add_assign(&g01_10), + ((true, false), (true, false)) => res.add_assign(&g10_10), + ((true, true), (true, false)) => res.add_assign(&g11_10), + + ((false, false), (true, true)) => res.add_assign(&g00_11), + ((false, true), (true, true)) => res.add_assign(&g01_11), + ((true, false), (true, true)) => res.add_assign(&g10_11), + ((true, true), (true, true)) => res.add_assign(&g11_11), } } res } -pub fn shamir_window_table(g1: G, g2: G) -> [G; 16] { - let g00_00 = G::prime_subgroup_generator().into_projective(); - let g01_00 = g1.into_projective(); +pub fn shamir_window_table(g1: G, g2: G) -> [G; 16] { + let g00_00 = G::generator().into_group(); + let g01_00 = g1.into_group(); let g10_00 = { let mut g = g01_00; - g.add_assign_mixed(&g1); + g.add_assign(&g1); g }; let g11_00 = { let mut g = g10_00; - g.add_assign_mixed(&g1); + g.add_assign(&g1); g }; - let g00_01 = g2.into_projective(); + let g00_01 = g2.into_group(); let g01_01 = { let mut g = g00_01; - g.add_assign_mixed(&g1); + g.add_assign(&g1); g }; let g10_01 = { let mut g = g01_01; - g.add_assign_mixed(&g1); + g.add_assign(&g1); g }; let g11_01 = { let mut g = g10_01; - g.add_assign_mixed(&g1); + g.add_assign(&g1); g }; let g00_10 = { let mut g = g00_01; - g.add_assign_mixed(&g2); + g.add_assign(&g2); g }; let g01_10 = { let mut g = g00_10; - g.add_assign_mixed(&g1); + g.add_assign(&g1); g }; let g10_10 = { let mut g = g01_10; - g.add_assign_mixed(&g1); + g.add_assign(&g1); g }; let g11_10 = { let mut g = g10_10; - g.add_assign_mixed(&g1); + g.add_assign(&g1); g }; let g00_11 = { let mut g = g00_10; - g.add_assign_mixed(&g2); + g.add_assign(&g2); g }; let g01_11 = { let mut g = g00_11; - g.add_assign_mixed(&g1); + g.add_assign(&g1); g }; let g10_11 = { let mut g = g01_11; - g.add_assign_mixed(&g1); + g.add_assign(&g1); g }; let g11_11 = { let mut g = g10_11; - g.add_assign_mixed(&g1); + g.add_assign(&g1); g }; @@ -704,8 +699,7 @@ pub fn shamir_window_table(g1: G, g2: G) -> [G; 16] { g00_00, g01_00, g10_00, g11_00, g00_01, g01_01, g10_01, g11_01, g00_10, g01_10, g10_10, g11_10, g00_11, g01_11, g10_11, g11_11, ]; - G::Projective::batch_normalization(v.as_mut_slice()); - let v: Vec<_> = v.iter().map(|x| x.into_affine()).collect(); + let v: Vec<_> = G::Group::normalize_batch(v.as_mut_slice()); [ v[0], v[1], v[2], v[3], v[4], v[5], v[6], v[7], v[8], v[9], v[10], v[11], v[12], v[13], v[14], v[15], diff --git a/poly-commitment/src/commitment.rs b/poly-commitment/src/commitment.rs index bb2469b49f..cc2b3b07ab 100644 --- a/poly-commitment/src/commitment.rs +++ b/poly-commitment/src/commitment.rs @@ -10,17 +10,15 @@ use crate::srs::endos; use crate::SRS as SRSTrait; use crate::{error::CommitmentError, srs::SRS}; use ark_ec::{ - models::short_weierstrass_jacobian::GroupAffine as SWJAffine, msm::VariableBaseMSM, - AffineCurve, ProjectiveCurve, SWModelParameters, -}; -use ark_ff::{ - BigInteger, Field, FpParameters, One, PrimeField, SquareRootField, UniformRand, Zero, + models::short_weierstrass::Affine as SWJAffine, short_weierstrass::SWCurveConfig, AffineRepr, + CurveGroup, VariableBaseMSM, }; +use ark_ff::{BigInteger, Field, One, PrimeField, UniformRand, Zero}; use ark_poly::{ univariate::DensePolynomial, EvaluationDomain, Evaluations, Radix2EvaluationDomain as D, }; use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; -use core::ops::{Add, Sub}; +use core::ops::{Add, AddAssign, Sub}; use groupmap::{BWParameters, GroupMap}; use mina_poseidon::{sponge::ScalarChallenge, FqSponge}; use o1_utils::math; @@ -130,16 +128,16 @@ impl PolyComm { /// ``` /// /// in the other case. -pub fn shift_scalar(x: G::ScalarField) -> G::ScalarField +pub fn shift_scalar(x: G::ScalarField) -> G::ScalarField where G::BaseField: PrimeField, { - let n1 = ::Params::MODULUS; + let n1 = ::MODULUS; let n2 = ::BigInt::from_bits_le( - &::Params::MODULUS.to_bits_le()[..], + &::MODULUS.to_bits_le()[..], ); let two: G::ScalarField = (2u64).into(); - let two_pow = two.pow([::Params::MODULUS_BITS as u64]); + let two_pow = two.pow([::MODULUS_BIT_SIZE as u64]); if n1 < n2 { (x - (two_pow + G::ScalarField::one())) / two } else { @@ -147,7 +145,7 @@ where } } -impl<'a, 'b, C: AffineCurve> Add<&'a PolyComm> for &'b PolyComm { +impl<'a, 'b, C: AffineRepr> Add<&'a PolyComm> for &'b PolyComm { type Output = PolyComm; fn add(self, other: &'a PolyComm) -> PolyComm { @@ -156,7 +154,7 @@ impl<'a, 'b, C: AffineCurve> Add<&'a PolyComm> for &'b PolyComm { let n2 = other.elems.len(); for i in 0..std::cmp::max(n1, n2) { let pt = if i < n1 && i < n2 { - self.elems[i] + other.elems[i] + (self.elems[i] + other.elems[i]).into_affine() } else if i < n1 { self.elems[i] } else { @@ -168,7 +166,7 @@ impl<'a, 'b, C: AffineCurve> Add<&'a PolyComm> for &'b PolyComm { } } -impl<'a, 'b, C: AffineCurve> Sub<&'a PolyComm> for &'b PolyComm { +impl<'a, 'b, C: AffineRepr + Sub> Sub<&'a PolyComm> for &'b PolyComm { type Output = PolyComm; fn sub(self, other: &'a PolyComm) -> PolyComm { @@ -177,7 +175,7 @@ impl<'a, 'b, C: AffineCurve> Sub<&'a PolyComm> for &'b PolyComm { let n2 = other.elems.len(); for i in 0..std::cmp::max(n1, n2) { let pt = if i < n1 && i < n2 { - self.elems[i] + (-other.elems[i]) + (self.elems[i] - other.elems[i]).into_affine() } else if i < n1 { self.elems[i] } else { @@ -189,7 +187,7 @@ impl<'a, 'b, C: AffineCurve> Sub<&'a PolyComm> for &'b PolyComm { } } -impl PolyComm { +impl PolyComm { pub fn scale(&self, c: C::ScalarField) -> PolyComm { PolyComm { elems: self.elems.iter().map(|g| g.mul(c).into_affine()).collect(), @@ -209,7 +207,7 @@ impl PolyComm { return Self::new(vec![C::zero()]); } - let all_scalars: Vec<_> = elm.iter().map(|s| s.into_repr()).collect(); + let all_scalars: Vec<_> = elm.iter().map(|s| s.into_bigint()).collect(); let elems_size = Iterator::max(com.iter().map(|c| c.elems.len())).unwrap(); let mut elems = Vec::with_capacity(elems_size); @@ -222,10 +220,9 @@ impl PolyComm { .filter_map(|(com, scalar)| com.elems.get(chunk).map(|c| (c, scalar))) .unzip(); - let chunk_msm = VariableBaseMSM::multi_scalar_mul::(&points, &scalars); + let chunk_msm = C::Group::msm_bigint(&points, &scalars); elems.push(chunk_msm.into_affine()); } - Self::new(elems) } } @@ -279,41 +276,31 @@ pub fn pows(d: usize, x: F) -> Vec { res } -pub fn squeeze_prechallenge>( +pub fn squeeze_prechallenge>( sponge: &mut EFqSponge, ) -> ScalarChallenge { ScalarChallenge(sponge.challenge()) } -pub fn squeeze_challenge< - Fq: Field, - G, - Fr: PrimeField + SquareRootField, - EFqSponge: FqSponge, ->( +pub fn squeeze_challenge>( endo_r: &Fr, sponge: &mut EFqSponge, ) -> Fr { squeeze_prechallenge(sponge).to_field(endo_r) } -pub fn absorb_commitment< - Fq: Field, - G: Clone, - Fr: PrimeField + SquareRootField, - EFqSponge: FqSponge, ->( +pub fn absorb_commitment>( sponge: &mut EFqSponge, commitment: &PolyComm, ) { sponge.absorb_g(&commitment.elems); } -/// A useful trait extending AffineCurve for commitments. -/// Unfortunately, we can't specify that `AffineCurve`, +/// A useful trait extending AffineRepr for commitments. +/// Unfortunately, we can't specify that `AffineRepr`, /// so usage of this traits must manually bind `G::BaseField: PrimeField`. -pub trait CommitmentCurve: AffineCurve { - type Params: SWModelParameters; +pub trait CommitmentCurve: AffineRepr + Sub { + type Params: SWCurveConfig; type Map: GroupMap; fn to_coordinates(&self) -> Option<(Self::BaseField, Self::BaseField)>; @@ -350,7 +337,7 @@ pub trait EndoCurve: CommitmentCurve { } } -impl CommitmentCurve for SWJAffine

{ +impl CommitmentCurve for SWJAffine

{ type Params = P; type Map = BWParameters

; @@ -363,14 +350,11 @@ impl CommitmentCurve for SWJAffine

{ } fn of_coordinates(x: P::BaseField, y: P::BaseField) -> SWJAffine

{ - SWJAffine::

::new(x, y, false) + SWJAffine::

::new_unchecked(x, y) } } -impl EndoCurve for SWJAffine

-where - P::BaseField: PrimeField, -{ +impl EndoCurve for SWJAffine

{ fn combine_one(g1: &[Self], g2: &[Self], x2: Self::ScalarField) -> Vec { crate::combine::affine_window_combine_one(g1, g2, x2) } @@ -395,7 +379,7 @@ where } } -pub fn to_group(m: &G::Map, t: ::BaseField) -> G { +pub fn to_group(m: &G::Map, t: ::BaseField) -> G { let (x, y) = m.to_group(t); G::of_coordinates(x, y) } @@ -433,7 +417,7 @@ pub fn combined_inner_product( /// Contains the evaluation of a polynomial commitment at a set of points. pub struct Evaluation where - G: AffineCurve, + G: AffineRepr, { /// The commitment of the polynomial being evaluated pub commitment: PolyComm, @@ -446,7 +430,7 @@ where // TODO: I think we should really change this name to something more correct pub struct BatchEvaluationProof<'a, G, EFqSponge, OpeningProof> where - G: AffineCurve, + G: AffineRepr, EFqSponge: FqSponge, { pub sponge: EFqSponge, @@ -560,7 +544,7 @@ impl SRSTrait for SRS { .ok_or_else(|| CommitmentError::BlindersDontMatch(blinders.len(), com.len()))? .map(|(g, b)| { let mut g_masked = self.h.mul(b); - g_masked.add_assign_mixed(&g); + g_masked.add_assign(&g); g_masked.into_affine() }); Ok(BlindedCommitment { @@ -581,7 +565,7 @@ impl SRSTrait for SRS { ) -> PolyComm { let is_zero = plnm.is_zero(); - let coeffs: Vec<_> = plnm.iter().map(|c| c.into_repr()).collect(); + let coeffs: Vec<_> = plnm.iter().map(|c| c.into_bigint()).collect(); // chunk while commiting let mut elems = vec![]; @@ -589,7 +573,7 @@ impl SRSTrait for SRS { elems.push(G::zero()); } else { coeffs.chunks(self.g.len()).for_each(|coeffs_chunk| { - let chunk = VariableBaseMSM::multi_scalar_mul(&self.g, coeffs_chunk); + let chunk = G::Group::msm_bigint(&self.g, coeffs_chunk); elems.push(chunk.into_affine()); }); } @@ -807,8 +791,8 @@ impl SRS { } // verify the equation - let scalars: Vec<_> = scalars.iter().map(|x| x.into_repr()).collect(); - VariableBaseMSM::multi_scalar_mul(&points, &scalars) == G::Projective::zero() + let scalars: Vec<_> = scalars.iter().map(|x| x.into_bigint()).collect(); + G::Group::msm_bigint(&points, &scalars) == G::Group::zero() } } @@ -829,7 +813,7 @@ mod tests { use super::*; use crate::srs::SRS; - use ark_poly::{Polynomial, Radix2EvaluationDomain, UVPolynomial}; + use ark_poly::{DenseUVPolynomial, Polynomial, Radix2EvaluationDomain}; use mina_curves::pasta::{Fp, Vesta as VestaG}; use mina_poseidon::constants::PlonkSpongeConstantsKimchi as SC; use mina_poseidon::sponge::DefaultFqSponge; @@ -1050,12 +1034,12 @@ pub mod caml { impl From> for CamlPolyComm where - G: AffineCurve, + G: AffineRepr, CamlG: From, { fn from(polycomm: PolyComm) -> Self { Self { - unshifted: polycomm.elems.into_iter().map(Into::into).collect(), + unshifted: polycomm.elems.into_iter().map(CamlG::from).collect(), shifted: None, } } @@ -1063,12 +1047,12 @@ pub mod caml { impl<'a, G, CamlG> From<&'a PolyComm> for CamlPolyComm where - G: AffineCurve, + G: AffineRepr, CamlG: From + From<&'a G>, { fn from(polycomm: &'a PolyComm) -> Self { Self { - unshifted: polycomm.elems.iter().map(Into::into).collect(), + unshifted: polycomm.elems.iter().map(Into::::into).collect(), shifted: None, } } @@ -1076,7 +1060,7 @@ pub mod caml { impl From> for PolyComm where - G: AffineCurve + From, + G: AffineRepr + From, { fn from(camlpolycomm: CamlPolyComm) -> PolyComm { assert!( @@ -1084,14 +1068,18 @@ pub mod caml { "mina#14628: Shifted commitments are deprecated and must not be used" ); PolyComm { - elems: camlpolycomm.unshifted.into_iter().map(Into::into).collect(), + elems: camlpolycomm + .unshifted + .into_iter() + .map(Into::::into) + .collect(), } } } impl<'a, G, CamlG> From<&'a CamlPolyComm> for PolyComm where - G: AffineCurve + From<&'a CamlG> + From, + G: AffineRepr + From<&'a CamlG> + From, { fn from(camlpolycomm: &'a CamlPolyComm) -> PolyComm { assert!( @@ -1119,7 +1107,7 @@ pub mod caml { impl From> for CamlOpeningProof where - G: AffineCurve, + G: AffineRepr, CamlG: From, CamlF: From, { @@ -1128,19 +1116,19 @@ pub mod caml { lr: opening_proof .lr .into_iter() - .map(|(g1, g2)| (g1.into(), g2.into())) + .map(|(g1, g2)| (CamlG::from(g1), CamlG::from(g2))) .collect(), - delta: opening_proof.delta.into(), + delta: CamlG::from(opening_proof.delta), z1: opening_proof.z1.into(), z2: opening_proof.z2.into(), - sg: opening_proof.sg.into(), + sg: CamlG::from(opening_proof.sg), } } } impl From> for OpeningProof where - G: AffineCurve, + G: AffineRepr, CamlG: Into, CamlF: Into, { diff --git a/poly-commitment/src/evaluation_proof.rs b/poly-commitment/src/evaluation_proof.rs index 6b2e9dcfc3..4c1b80d7e0 100644 --- a/poly-commitment/src/evaluation_proof.rs +++ b/poly-commitment/src/evaluation_proof.rs @@ -1,8 +1,8 @@ use crate::{commitment::*, srs::endos}; use crate::{srs::SRS, PolynomialsToCombine, SRS as _}; -use ark_ec::{msm::VariableBaseMSM, AffineCurve, ProjectiveCurve}; +use ark_ec::{AffineRepr, CurveGroup, VariableBaseMSM}; use ark_ff::{FftField, Field, One, PrimeField, UniformRand, Zero}; -use ark_poly::{univariate::DensePolynomial, UVPolynomial}; +use ark_poly::{univariate::DensePolynomial, DenseUVPolynomial}; use ark_poly::{EvaluationDomain, Evaluations}; use mina_poseidon::{sponge::ScalarChallenge, FqSponge}; use o1_utils::{math, ExtendedDensePolynomial}; @@ -224,22 +224,22 @@ impl SRS { let rand_l = ::rand(rng); let rand_r = ::rand(rng); - let l = VariableBaseMSM::multi_scalar_mul( + let l = G::Group::msm_bigint( &[&g[0..n], &[self.h, u]].concat(), &[&a[n..], &[rand_l, inner_prod(a_hi, b_lo)]] .concat() .iter() - .map(|x| x.into_repr()) + .map(|x| x.into_bigint()) .collect::>(), ) .into_affine(); - let r = VariableBaseMSM::multi_scalar_mul( + let r = G::Group::msm_bigint( &[&g[n..], &[self.h, u]].concat(), &[&a[0..n], &[rand_r, inner_prod(a_lo, b_hi)]] .concat() .iter() - .map(|x| x.into_repr()) + .map(|x| x.into_bigint()) .collect::>(), ) .into_affine(); @@ -298,9 +298,8 @@ impl SRS { let d = ::rand(rng); let r_delta = ::rand(rng); - let delta = ((g0.into_projective() + (u.mul(b0))).into_affine().mul(d) - + self.h.mul(r_delta)) - .into_affine(); + let delta = ((g0.into_group() + (u.mul(b0))).into_affine().mul(d) + self.h.mul(r_delta)) + .into_affine(); sponge.absorb_g(&[delta]); let c = ScalarChallenge(sponge.challenge()).to_field(&endo_r); @@ -363,7 +362,7 @@ impl SRS { #[serde_as] #[derive(Clone, Debug, Serialize, Deserialize, Default)] #[serde(bound = "G: ark_serialize::CanonicalDeserialize + ark_serialize::CanonicalSerialize")] -pub struct OpeningProof { +pub struct OpeningProof { /// vector of rounds of L & R commitments #[serde_as(as = "Vec<(o1_utils::serialization::SerdeAs, o1_utils::serialization::SerdeAs)>")] pub lr: Vec<(G, G)>, @@ -377,26 +376,24 @@ pub struct OpeningProof { pub sg: G, } -impl< - BaseField: PrimeField, - G: AffineCurve + CommitmentCurve + EndoCurve, - > crate::OpenProof for OpeningProof +impl + CommitmentCurve + EndoCurve> + crate::OpenProof for OpeningProof { type SRS = SRS; - fn open::ScalarField>>( + fn open::ScalarField>>( srs: &Self::SRS, group_map: &::Map, plnms: PolynomialsToCombine, - elm: &[::ScalarField], // vector of evaluation points - polyscale: ::ScalarField, // scaling factor for polynoms - evalscale: ::ScalarField, // scaling factor for evaluation point powers - sponge: EFqSponge, // sponge + elm: &[::ScalarField], // vector of evaluation points + polyscale: ::ScalarField, // scaling factor for polynoms + evalscale: ::ScalarField, // scaling factor for evaluation point powers + sponge: EFqSponge, // sponge rng: &mut RNG, ) -> Self where EFqSponge: - Clone + FqSponge<::BaseField, G, ::ScalarField>, + Clone + FqSponge<::BaseField, G, ::ScalarField>, RNG: RngCore + CryptoRng, { srs.open(group_map, plnms, elm, polyscale, evalscale, sponge, rng) @@ -409,7 +406,7 @@ impl< rng: &mut RNG, ) -> bool where - EFqSponge: FqSponge, + EFqSponge: FqSponge<::BaseField, G, ::ScalarField>, RNG: RngCore + CryptoRng, { srs.verify(group_map, batch, rng) @@ -421,7 +418,7 @@ pub struct Challenges { pub chal_inv: Vec, } -impl OpeningProof { +impl OpeningProof { pub fn prechallenges>( &self, sponge: &mut EFqSponge, diff --git a/poly-commitment/src/lib.rs b/poly-commitment/src/lib.rs index fb7f7491ca..9fa1873ec4 100644 --- a/poly-commitment/src/lib.rs +++ b/poly-commitment/src/lib.rs @@ -14,7 +14,7 @@ pub use commitment::PolyComm; use crate::commitment::{BatchEvaluationProof, BlindedCommitment, CommitmentCurve}; use crate::error::CommitmentError; use crate::evaluation_proof::DensePolynomialOrEvaluations; -use ark_ec::AffineCurve; +use ark_ec::AffineRepr; use ark_ff::UniformRand; use ark_poly::{ univariate::DensePolynomial, EvaluationDomain, Evaluations, Radix2EvaluationDomain as D, @@ -93,19 +93,19 @@ pub trait OpenProof: Sized { type SRS: SRS; #[allow(clippy::too_many_arguments)] - fn open::ScalarField>>( + fn open::ScalarField>>( srs: &Self::SRS, group_map: &::Map, plnms: PolynomialsToCombine, // vector of polynomial with optional degree bound and commitment randomness - elm: &[::ScalarField], // vector of evaluation points - polyscale: ::ScalarField, // scaling factor for polynoms - evalscale: ::ScalarField, // scaling factor for evaluation point powers + elm: &[::ScalarField], // vector of evaluation points + polyscale: ::ScalarField, // scaling factor for polynoms + evalscale: ::ScalarField, // scaling factor for evaluation point powers sponge: EFqSponge, // sponge rng: &mut RNG, ) -> Self where EFqSponge: - Clone + FqSponge<::BaseField, G, ::ScalarField>, + Clone + FqSponge<::BaseField, G, ::ScalarField>, RNG: RngCore + CryptoRng; fn verify( diff --git a/poly-commitment/src/pairing_proof.rs b/poly-commitment/src/pairing_proof.rs index 1a581e538b..55599369a0 100644 --- a/poly-commitment/src/pairing_proof.rs +++ b/poly-commitment/src/pairing_proof.rs @@ -2,11 +2,11 @@ use crate::commitment::*; use crate::evaluation_proof::combine_polys; use crate::srs::SRS; use crate::{CommitmentError, PolynomialsToCombine, SRS as SRSTrait}; -use ark_ec::{msm::VariableBaseMSM, AffineCurve, PairingEngine}; +use ark_ec::{pairing::Pairing, AffineRepr, VariableBaseMSM}; use ark_ff::{PrimeField, Zero}; use ark_poly::{ univariate::{DenseOrSparsePolynomial, DensePolynomial}, - EvaluationDomain, Evaluations, Polynomial, Radix2EvaluationDomain as D, UVPolynomial, + DenseUVPolynomial, EvaluationDomain, Evaluations, Polynomial, Radix2EvaluationDomain as D, }; use mina_poseidon::FqSponge; use rand_core::{CryptoRng, RngCore}; @@ -18,23 +18,23 @@ use serde_with::serde_as; #[serde( bound = "Pair::G1Affine: ark_serialize::CanonicalDeserialize + ark_serialize::CanonicalSerialize" )] -pub struct PairingProof { +pub struct PairingProof { #[serde_as(as = "o1_utils::serialization::SerdeAs")] pub quotient: Pair::G1Affine, #[serde_as(as = "o1_utils::serialization::SerdeAs")] - pub blinding: ::ScalarField, + pub blinding: ::ScalarField, } -impl Default for PairingProof { +impl Default for PairingProof { fn default() -> Self { Self { - quotient: Pair::G1Affine::prime_subgroup_generator(), - blinding: ::ScalarField::zero(), + quotient: Pair::G1Affine::generator(), + blinding: ::ScalarField::zero(), } } } -impl Clone for PairingProof { +impl Clone for PairingProof { fn clone(&self) -> Self { Self { quotient: self.quotient, @@ -44,12 +44,12 @@ impl Clone for PairingProof { } #[derive(Debug, Serialize, Deserialize)] -pub struct PairingSRS { +pub struct PairingSRS { pub full_srs: SRS, pub verifier_srs: SRS, } -impl Default for PairingSRS { +impl Default for PairingSRS { fn default() -> Self { Self { full_srs: SRS::default(), @@ -58,7 +58,7 @@ impl Default for PairingSRS { } } -impl Clone for PairingSRS { +impl Clone for PairingSRS { fn clone(&self) -> Self { Self { full_srs: self.full_srs.clone(), @@ -71,7 +71,7 @@ impl< F: PrimeField, G: CommitmentCurve, G2: CommitmentCurve, - Pair: PairingEngine, + Pair: Pairing, > PairingSRS { pub fn create(x: F, n: usize) -> Self { @@ -86,24 +86,24 @@ impl< F: PrimeField, G: CommitmentCurve, G2: CommitmentCurve, - Pair: PairingEngine, + Pair: Pairing, > crate::OpenProof for PairingProof { type SRS = PairingSRS; - fn open::ScalarField>>( + fn open::ScalarField>>( srs: &Self::SRS, _group_map: &::Map, plnms: PolynomialsToCombine, - elm: &[::ScalarField], // vector of evaluation points - polyscale: ::ScalarField, // scaling factor for polynoms - _evalscale: ::ScalarField, // scaling factor for evaluation point powers - _sponge: EFqSponge, // sponge + elm: &[::ScalarField], // vector of evaluation points + polyscale: ::ScalarField, // scaling factor for polynoms + _evalscale: ::ScalarField, // scaling factor for evaluation point powers + _sponge: EFqSponge, // sponge _rng: &mut RNG, ) -> Self where EFqSponge: - Clone + FqSponge<::BaseField, G, ::ScalarField>, + Clone + FqSponge<::BaseField, G, ::ScalarField>, RNG: RngCore + CryptoRng, { PairingProof::create(srs, plnms, elm, polyscale).unwrap() @@ -141,7 +141,7 @@ impl< F: PrimeField, G: CommitmentCurve, G2: CommitmentCurve, - Pair: PairingEngine, + Pair: Pairing, > SRSTrait for PairingSRS { fn max_poly_size(&self) -> usize { @@ -250,7 +250,7 @@ impl< F: PrimeField, G: CommitmentCurve, G2: CommitmentCurve, - Pair: PairingEngine, + Pair: Pairing, > PairingProof { pub fn create>( @@ -283,6 +283,7 @@ impl< blinding: blinding_factor, }) } + pub fn verify( &self, srs: &PairingSRS, // SRS @@ -290,7 +291,7 @@ impl< polyscale: G::ScalarField, // scaling factor for polynoms elm: &[G::ScalarField], // vector of evaluation points ) -> bool { - let poly_commitment = { + let poly_commitment: G::Group = { let mut scalars: Vec = Vec::new(); let mut points = Vec::new(); combine_commitments( @@ -300,9 +301,9 @@ impl< polyscale, F::one(), /* TODO: This is inefficient */ ); - let scalars: Vec<_> = scalars.iter().map(|x| x.into_repr()).collect(); + let scalars: Vec<_> = scalars.iter().map(|x| x.into_bigint()).collect(); - VariableBaseMSM::multi_scalar_mul(&points, &scalars) + G::Group::msm_bigint(&points, &scalars) }; let evals = combine_evaluations(evaluations, polyscale); let blinding_commitment = srs.full_srs.h.mul(self.blinding); @@ -314,13 +315,12 @@ impl< .full_srs .commit_non_hiding(&eval_polynomial(elm, &evals), 1) .elems[0] - .into_projective(); - let numerator_commitment = { poly_commitment - eval_commitment - blinding_commitment }; + .into_group(); + let numerator_commitment_proj: ::Group = + { poly_commitment - eval_commitment - blinding_commitment }; + let numerator_commitment_affine: Pair::G1Affine = From::from(numerator_commitment_proj); - let numerator = Pair::pairing( - numerator_commitment, - Pair::G2Affine::prime_subgroup_generator(), - ); + let numerator = Pair::pairing(numerator_commitment_affine, Pair::G2Affine::generator()); let scaled_quotient = Pair::pairing(self.quotient, divisor_commitment); numerator == scaled_quotient } @@ -334,12 +334,12 @@ mod tests { use crate::srs::SRS; use crate::SRS as _; use ark_bn254::Fr as ScalarField; - use ark_bn254::{G1Affine as G1, G2Affine as G2, Parameters}; + use ark_bn254::{Config, G1Affine as G1, G2Affine as G2}; use ark_ec::bn::Bn; use ark_ff::UniformRand; use ark_poly::{ - univariate::DensePolynomial, EvaluationDomain, Polynomial, Radix2EvaluationDomain as D, - UVPolynomial, + univariate::DensePolynomial, DenseUVPolynomial, EvaluationDomain, Polynomial, + Radix2EvaluationDomain as D, }; use rand::{rngs::StdRng, SeedableRng}; @@ -405,7 +405,7 @@ mod tests { let polyscale = ScalarField::rand(rng); - let pairing_proof = PairingProof::>::create( + let pairing_proof = PairingProof::>::create( &srs, polynomials_and_blinders.as_slice(), &evaluation_points, diff --git a/poly-commitment/src/srs.rs b/poly-commitment/src/srs.rs index 355c420b66..c80b9429c7 100644 --- a/poly-commitment/src/srs.rs +++ b/poly-commitment/src/srs.rs @@ -2,7 +2,7 @@ use crate::commitment::CommitmentCurve; use crate::PolyComm; -use ark_ec::{AffineCurve, ProjectiveCurve}; +use ark_ec::{AffineRepr, CurveGroup}; use ark_ff::{BigInteger, Field, One, PrimeField, Zero}; use ark_poly::{EvaluationDomain, Radix2EvaluationDomain as D}; use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; @@ -48,10 +48,10 @@ where let endo_q: G::BaseField = mina_poseidon::sponge::endo_coefficient(); let endo_r = { let potential_endo_r: G::ScalarField = mina_poseidon::sponge::endo_coefficient(); - let t = G::prime_subgroup_generator(); + let t = G::generator(); let (x, y) = t.to_coordinates().unwrap(); let phi_t = G::of_coordinates(x * endo_q, y); - if t.mul(potential_endo_r) == phi_t.into_projective() { + if t.mul(potential_endo_r) == phi_t.into_group() { potential_endo_r } else { potential_endo_r * potential_endo_r @@ -81,11 +81,12 @@ where let n = <::BasePrimeField as PrimeField>::BigInt::from_bits_be(&bits); - let t = <::BasePrimeField as PrimeField>::from_repr(n) + let t = <::BasePrimeField as PrimeField>::from_bigint(n) .expect("packing code has a bug"); base_fields.push(t) } let t = G::BaseField::from_base_prime_field_elems(&base_fields).unwrap(); + let (x, y) = map.to_group(t); G::of_coordinates(x, y) } @@ -184,24 +185,22 @@ impl SRS { // For each chunk for i in 0..num_elems { // Initialize the vector with zero curve points - let mut lg: Vec<::Projective> = - vec![::Projective::zero(); n]; + let mut lg: Vec<::Group> = vec![::Group::zero(); n]; // Overwrite the terms corresponding to that chunk with the SRS curve points let start_offset = i * srs_size; let num_terms = min((i + 1) * srs_size, n) - start_offset; for j in 0..num_terms { - lg[start_offset + j] = self.g[j].into_projective() + lg[start_offset + j] = self.g[j].into_group() } // Apply the IFFT domain.ifft_in_place(&mut lg); - ::Projective::batch_normalization(lg.as_mut_slice()); // Append the 'partial Langrange polynomials' to the vector of elems chunks - elems.push(lg) + elems.push(::Group::normalize_batch(lg.as_mut_slice())); } let chunked_commitments: Vec<_> = (0..n) .map(|i| PolyComm { - elems: elems.iter().map(|v| v[i].into_affine()).collect(), + elems: elems.iter().map(|v| v[i]).collect(), }) .collect(); self.lagrange_bases.insert(n, chunked_commitments); @@ -214,7 +213,7 @@ impl SRS { let mut x_pow = G::ScalarField::one(); let g: Vec<_> = (0..depth) .map(|_| { - let res = G::prime_subgroup_generator().mul(x_pow); + let res = G::generator().mul(x_pow); x_pow *= x; res.into_affine() }) diff --git a/poly-commitment/src/tests/batch_15_wires.rs b/poly-commitment/src/tests/batch_15_wires.rs index 545a788fd8..488074e2f3 100644 --- a/poly-commitment/src/tests/batch_15_wires.rs +++ b/poly-commitment/src/tests/batch_15_wires.rs @@ -8,7 +8,7 @@ use crate::{ SRS as _, }; use ark_ff::{UniformRand, Zero}; -use ark_poly::{univariate::DensePolynomial, Radix2EvaluationDomain, UVPolynomial}; +use ark_poly::{univariate::DensePolynomial, DenseUVPolynomial, Radix2EvaluationDomain}; use colored::Colorize; use groupmap::GroupMap; use mina_curves::pasta::{Fp, Vesta, VestaParameters}; diff --git a/poly-commitment/src/tests/commitment.rs b/poly-commitment/src/tests/commitment.rs index 38d57994ec..3c96fb8c54 100644 --- a/poly-commitment/src/tests/commitment.rs +++ b/poly-commitment/src/tests/commitment.rs @@ -8,7 +8,7 @@ use crate::{ SRS as _, }; use ark_ff::{UniformRand, Zero}; -use ark_poly::{univariate::DensePolynomial, Radix2EvaluationDomain, UVPolynomial}; +use ark_poly::{univariate::DensePolynomial, DenseUVPolynomial, Radix2EvaluationDomain}; use colored::Colorize; use groupmap::GroupMap; use mina_curves::pasta::{Fp, Vesta, VestaParameters}; diff --git a/poseidon/Cargo.toml b/poseidon/Cargo.toml index 7a122051e4..9256552934 100644 --- a/poseidon/Cargo.toml +++ b/poseidon/Cargo.toml @@ -13,9 +13,10 @@ license = "Apache-2.0" path = "src/lib.rs" [dependencies] -ark-ff = { version = "0.3.0", features = [ "parallel", "asm" ] } -ark-ec = { version = "0.3.0", features = [ "parallel" ] } -ark-poly = { version = "0.3.0", features = [ "parallel" ] } +ark-ff = { version = "0.4.2", features = [ "parallel", "asm" ] } +ark-ec = { version = "0.4.2", features = [ "parallel" ] } +ark-poly = { version = "0.4.2", features = [ "parallel" ] } +ark-serialize = { version = "0.4.2", features = ["derive"]} rand = "0.8.0" rayon = "1" serde = { version = "1.0", features = ["derive"] } @@ -32,7 +33,7 @@ ocaml-gen = { version = "0.1.5", optional = true } [dev-dependencies] serde_json = "1.0" hex = "0.4" -ark-serialize = "0.3.0" +ark-serialize = "0.4.2" [features] default = [] diff --git a/poseidon/export_test_vectors/Cargo.toml b/poseidon/export_test_vectors/Cargo.toml index 6af585470b..80baaa21f8 100644 --- a/poseidon/export_test_vectors/Cargo.toml +++ b/poseidon/export_test_vectors/Cargo.toml @@ -10,11 +10,11 @@ edition = "2021" license = "Apache-2.0" [dependencies] -ark-ff = { version = "0.3.0", features = [ "parallel", "asm" ] } +ark-ff = { version = "0.4.2", features = [ "parallel", "asm" ] } num-bigint = { version = "0.4.0" } serde_json = { version = "1.0" } hex = { version = "0.4" } -ark-serialize = { version = "0.3.0" } +ark-serialize = { version = "0.4.2" } rand = "0.8.0" serde = { version = "1.0", features = ["derive"] } serde_with = "1.10.0" diff --git a/poseidon/export_test_vectors/src/vectors.rs b/poseidon/export_test_vectors/src/vectors.rs index 7fc8826cfc..b8feddeeb8 100644 --- a/poseidon/export_test_vectors/src/vectors.rs +++ b/poseidon/export_test_vectors/src/vectors.rs @@ -1,5 +1,5 @@ use super::{Mode, ParamType}; -use ark_ff::{fields::PrimeField as _, UniformRand as _}; +use ark_ff::UniformRand as _; use ark_serialize::CanonicalSerialize as _; use mina_curves::pasta::Fp; use mina_poseidon::{ @@ -78,9 +78,10 @@ pub fn generate(mode: Mode, param_type: ParamType) -> TestVectors { .into_iter() .map(|elem| { let mut input_bytes = vec![]; - elem.into_repr() - .serialize(&mut input_bytes) + elem.0 + .serialize_uncompressed(&mut input_bytes) .expect("canonical serialiation should work"); + match mode { Mode::Hex => hex::encode(&input_bytes), Mode::B10 => BigUint::from_bytes_le(&input_bytes).to_string(), @@ -89,8 +90,8 @@ pub fn generate(mode: Mode, param_type: ParamType) -> TestVectors { .collect(); let mut output_bytes = vec![]; output - .into_repr() - .serialize(&mut output_bytes) + .0 + .serialize_uncompressed(&mut output_bytes) .expect("canonical serialization should work"); // add vector diff --git a/poseidon/src/poseidon.rs b/poseidon/src/poseidon.rs index ff06022910..6a2d5c51b5 100644 --- a/poseidon/src/poseidon.rs +++ b/poseidon/src/poseidon.rs @@ -3,6 +3,7 @@ use crate::constants::SpongeConstants; use crate::permutation::{full_round, poseidon_block_cipher}; use ark_ff::Field; +use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; use serde::{Deserialize, Serialize}; use serde_with::serde_as; @@ -34,7 +35,7 @@ pub enum SpongeState { #[serde_as] #[derive(Clone, Serialize, Deserialize, Default, Debug)] -pub struct ArithmeticSpongeParams { +pub struct ArithmeticSpongeParams { #[serde_as(as = "Vec>")] pub round_constants: Vec>, #[serde_as(as = "Vec>")] diff --git a/poseidon/src/sponge.rs b/poseidon/src/sponge.rs index ff7f00a412..5fd68a2ce0 100644 --- a/poseidon/src/sponge.rs +++ b/poseidon/src/sponge.rs @@ -1,7 +1,7 @@ use crate::constants::SpongeConstants; use crate::poseidon::{ArithmeticSponge, ArithmeticSpongeParams, Sponge}; -use ark_ec::{short_weierstrass_jacobian::GroupAffine, SWModelParameters}; -use ark_ff::{BigInteger, Field, FpParameters, One, PrimeField, Zero}; +use ark_ec::models::short_weierstrass::{Affine, SWCurveConfig}; +use ark_ff::{BigInteger, Field, One, PrimeField, Zero}; pub use crate::FqSponge; @@ -17,9 +17,7 @@ pub struct ScalarChallenge(pub F); pub fn endo_coefficient() -> F { let p_minus_1_over_3 = (F::zero() - F::one()) / F::from(3u64); - let t = F::multiplicative_generator(); - - t.pow(p_minus_1_over_3.into_repr().as_ref()) + F::GENERATOR.pow(p_minus_1_over_3.into_bigint().as_ref()) } fn get_bit(limbs_lsb: &[u64], i: u64) -> u64 { @@ -30,7 +28,7 @@ fn get_bit(limbs_lsb: &[u64], i: u64) -> u64 { impl ScalarChallenge { pub fn to_field_with_length(&self, length_in_bits: usize, endo_coeff: &F) -> F { - let rep = self.0.into_repr(); + let rep = self.0.into_bigint(); let r = rep.as_ref(); let mut a: F = 2_u64.into(); @@ -63,7 +61,7 @@ impl ScalarChallenge { } #[derive(Clone)] -pub struct DefaultFqSponge { +pub struct DefaultFqSponge { pub sponge: ArithmeticSponge, pub last_squeezed: Vec, } @@ -74,10 +72,10 @@ pub struct DefaultFrSponge { } fn pack(limbs_lsb: &[u64]) -> B { - let mut res: B = 0.into(); + let mut res: B = 0u64.into(); for &x in limbs_lsb.iter().rev() { res.muln(64); - res.add_nocarry(&x.into()); + res.add_with_carry(&x.into()); } res } @@ -88,10 +86,9 @@ impl DefaultFrSponge { let last_squeezed = self.last_squeezed.clone(); let (limbs, remaining) = last_squeezed.split_at(num_limbs); self.last_squeezed = remaining.to_vec(); - Fr::from_repr(pack::(limbs)) - .expect("internal representation was not a valid field element") + Fr::from(pack::(limbs)) } else { - let x = self.sponge.squeeze().into_repr(); + let x = self.sponge.squeeze().into_bigint(); self.last_squeezed .extend(&x.as_ref()[0..HIGH_ENTROPY_LIMBS]); self.squeeze(num_limbs) @@ -99,7 +96,7 @@ impl DefaultFrSponge { } } -impl DefaultFqSponge +impl DefaultFqSponge where P::BaseField: PrimeField, ::BigInt: Into<::BigInt>, @@ -111,7 +108,7 @@ where self.last_squeezed = remaining.to_vec(); limbs.to_vec() } else { - let x = self.sponge.squeeze().into_repr(); + let x = self.sponge.squeeze().into_bigint(); self.last_squeezed .extend(&x.as_ref()[0..HIGH_ENTROPY_LIMBS]); self.squeeze_limbs(num_limbs) @@ -124,13 +121,13 @@ where } pub fn squeeze(&mut self, num_limbs: usize) -> P::ScalarField { - P::ScalarField::from_repr(pack(&self.squeeze_limbs(num_limbs))) + P::ScalarField::from_bigint(pack(&self.squeeze_limbs(num_limbs))) .expect("internal representation was not a valid field element") } } -impl - FqSponge, P::ScalarField> for DefaultFqSponge +impl FqSponge, P::ScalarField> + for DefaultFqSponge where P::BaseField: PrimeField, ::BigInt: Into<::BigInt>, @@ -143,7 +140,7 @@ where } } - fn absorb_g(&mut self, g: &[GroupAffine

]) { + fn absorb_g(&mut self, g: &[Affine

]) { self.last_squeezed = vec![]; for g in g.iter() { if g.infinity { @@ -170,13 +167,13 @@ where self.last_squeezed = vec![]; x.iter().for_each(|x| { - let bits = x.into_repr().to_bits_le(); + let bits = x.into_bigint().to_bits_le(); // absorb - if ::Params::MODULUS - < ::Params::MODULUS.into() + if ::MODULUS + < ::MODULUS.into() { - let fe = P::BaseField::from_repr( + let fe = P::BaseField::from_bigint( ::BigInt::from_bits_le(&bits), ) .expect("padding code has a bug"); @@ -188,7 +185,7 @@ where P::BaseField::zero() }; - let high_bits = P::BaseField::from_repr( + let high_bits = P::BaseField::from_bigint( ::BigInt::from_bits_le(&bits[1..bits.len()]), ) .expect("padding code has a bug"); @@ -200,14 +197,14 @@ where } fn digest(mut self) -> P::ScalarField { - let x: ::BigInt = self.squeeze_field().into_repr(); + let x: ::BigInt = self.squeeze_field().into_bigint(); // Returns zero for values that are too large. // This means that there is a bias for the value zero (in one of the curve). // An attacker could try to target that seed, in order to predict the challenges u and v produced by the Fr-Sponge. // This would allow the attacker to mess with the result of the aggregated evaluation proof. // Previously the attacker's odds were 1/q, now it's (q-p)/q. // Since log2(q-p) ~ 86 and log2(q) ~ 254 the odds of a successful attack are negligible. - P::ScalarField::from_repr(x.into()).unwrap_or_else(P::ScalarField::zero) + P::ScalarField::from_bigint(x.into()).unwrap_or_else(P::ScalarField::zero) } fn digest_fq(mut self) -> P::BaseField { diff --git a/proof-systems-vendors b/proof-systems-vendors index 2db2631fbb..782304f933 160000 --- a/proof-systems-vendors +++ b/proof-systems-vendors @@ -1 +1 @@ -Subproject commit 2db2631fbb47fb24ce111ffa4cc4240c438ca993 +Subproject commit 782304f9337249282065e2fc96ef1d8657e93e52 diff --git a/signer/Cargo.toml b/signer/Cargo.toml index aa06d94401..059d830a28 100644 --- a/signer/Cargo.toml +++ b/signer/Cargo.toml @@ -17,8 +17,8 @@ mina-curves = { path = "../curves", version = "0.1.0" } mina-hasher = { path = "../hasher", version = "0.1.0" } o1-utils = { path = "../utils", version = "0.1.0" } -ark-ec = { version = "0.3.0", features = [ "parallel" ] } -ark-ff = { version = "0.3.0", features = [ "parallel", "asm" ] } +ark-ec = { version = "0.4.2", features = [ "parallel" ] } +ark-ff = { version = "0.4.2", features = ["parallel", "asm"] } rand = "0.8.0" blake2 = "0.10.0" diff --git a/signer/src/lib.rs b/signer/src/lib.rs index 6f64a44930..ff90997c6d 100644 --- a/signer/src/lib.rs +++ b/signer/src/lib.rs @@ -15,16 +15,16 @@ pub use schnorr::Schnorr; pub use seckey::SecKey; pub use signature::Signature; -use ark_ec::AffineCurve; +use ark_ec::AffineRepr; /// Affine curve point type pub use mina_curves::pasta::Pallas as CurvePoint; /// Base field element type -pub type BaseField = ::BaseField; +pub type BaseField = ::BaseField; /// Scalar field element type -pub type ScalarField = ::ScalarField; +pub type ScalarField = ::ScalarField; /// Mina network (or blockchain) identifier #[derive(Debug, Clone)] diff --git a/signer/src/pubkey.rs b/signer/src/pubkey.rs index 9a52f39494..d02f67b09f 100644 --- a/signer/src/pubkey.rs +++ b/signer/src/pubkey.rs @@ -2,12 +2,12 @@ //! //! Definition of public key structure and helpers -use ark_ec::{AffineCurve, ProjectiveCurve}; +use ark_ec::{short_weierstrass::Affine, AffineRepr, CurveGroup}; use ark_ff::{BigInteger, PrimeField, Zero}; use bs58; use core::fmt; use sha2::{Digest, Sha256}; -use std::ops::Neg; +use std::ops::{Mul, Neg}; use thiserror::Error; use crate::{BaseField, CurvePoint, ScalarField, SecKey}; @@ -86,12 +86,17 @@ impl PubKey { .map_err(|_| PubKeyError::XCoordinateBytes)?; let y = BaseField::from_bytes(&bytes[BaseField::size_in_bytes()..]) .map_err(|_| PubKeyError::YCoordinateBytes)?; - let pt = CurvePoint::get_point_from_x(x, y.0.is_odd()).ok_or(PubKeyError::XCoordinate)?; + let pt = CurvePoint::get_point_from_x_unchecked(x, y.0.is_odd()) + .ok_or(PubKeyError::XCoordinate)?; if pt.y != y { return Err(PubKeyError::NonCurvePoint); } - let public = CurvePoint::new(x, y, pt.infinity); + let public = Affine { + x, + y, + infinity: pt.infinity, + }; if !public.is_on_curve() { return Err(PubKeyError::NonCurvePoint); } @@ -115,7 +120,7 @@ impl PubKey { if secret_key.clone().into_scalar() == ScalarField::zero() { return Err(PubKeyError::SecKey); } - let pt = CurvePoint::prime_subgroup_generator() + let pt = CurvePoint::generator() .mul(secret_key.into_scalar()) .into_affine(); if !pt.is_on_curve() { @@ -158,9 +163,10 @@ impl PubKey { } let x = BaseField::from_bytes(x_bytes).map_err(|_| PubKeyError::XCoordinateBytes)?; - let mut pt = CurvePoint::get_point_from_x(x, y_parity).ok_or(PubKeyError::XCoordinate)?; + let mut pt = + CurvePoint::get_point_from_x_unchecked(x, y_parity).ok_or(PubKeyError::XCoordinate)?; - if pt.y.into_repr().is_even() == y_parity { + if pt.y.into_bigint().is_even() == y_parity { pt.y = pt.y.neg(); } @@ -187,14 +193,14 @@ impl PubKey { let point = self.0; CompressedPubKey { x: point.x, - is_odd: point.y.into_repr().is_odd(), + is_odd: point.y.into_bigint().is_odd(), } } /// Serialize public key into corresponding Mina address pub fn into_address(&self) -> String { let point = self.point(); - into_address(&point.x, point.y.into_repr().is_odd()) + into_address(&point.x, point.y.into_bigint().is_odd()) } /// Deserialize public key into bytes @@ -271,7 +277,8 @@ impl CompressedPubKey { } else { return Err(PubKeyError::YCoordinateParity); }; - let public = CurvePoint::get_point_from_x(x, is_odd).ok_or(PubKeyError::XCoordinate)?; + let public = + CurvePoint::get_point_from_x_unchecked(x, is_odd).ok_or(PubKeyError::XCoordinate)?; if !public.is_on_curve() { return Err(PubKeyError::NonCurvePoint); } @@ -294,7 +301,7 @@ impl CompressedPubKey { pub fn from_secret_key(sec_key: SecKey) -> Self { // We do not need to check point is on the curve, since it's derived directly from the generator point let public = PubKey::from_point_unsafe( - CurvePoint::prime_subgroup_generator() + CurvePoint::generator() .mul(sec_key.into_scalar()) .into_affine(), ); diff --git a/signer/src/schnorr.rs b/signer/src/schnorr.rs index 6fb1cff55c..ed780121ac 100644 --- a/signer/src/schnorr.rs +++ b/signer/src/schnorr.rs @@ -5,8 +5,8 @@ //! Details: use ark_ec::{ - AffineCurve, // for prime_subgroup_generator() - ProjectiveCurve, // for into_affine() + AffineRepr, // for generator() + CurveGroup, }; use ark_ff::{ BigInteger, // for is_even() @@ -19,7 +19,7 @@ use blake2::{ Blake2bVar, }; use mina_hasher::{self, DomainParameter, Hasher, ROInput}; -use std::ops::Neg; +use std::ops::{Add, Neg}; use crate::{BaseField, CurvePoint, Hashable, Keypair, PubKey, ScalarField, Signature, Signer}; @@ -58,8 +58,10 @@ impl Hashable for Message { impl Signer for Schnorr { fn sign(&mut self, kp: &Keypair, input: &H) -> Signature { let k: ScalarField = self.derive_nonce(kp, input); - let r: CurvePoint = CurvePoint::prime_subgroup_generator().mul(k).into_affine(); - let k: ScalarField = if r.y.into_repr().is_even() { k } else { -k }; + let r: CurvePoint = CurvePoint::generator() + .mul_bigint(k.into_bigint()) + .into_affine(); + let k: ScalarField = if r.y.into_bigint().is_even() { k } else { -k }; let e: ScalarField = self.message_hash(&kp.public, r.x, input); let s: ScalarField = k + e * kp.secret.scalar(); @@ -70,17 +72,19 @@ impl Signer for Schnorr { fn verify(&mut self, sig: &Signature, public: &PubKey, input: &H) -> bool { let ev: ScalarField = self.message_hash(public, sig.rx, input); - let sv: CurvePoint = CurvePoint::prime_subgroup_generator() - .mul(sig.s) + let sv = CurvePoint::generator() + .mul_bigint(sig.s.into_bigint()) .into_affine(); // Perform addition and infinity check in projective coordinates for performance - let rv = public.point().mul(ev).neg().add_mixed(&sv); + let rv = public.point().mul_bigint(ev.into_bigint()).neg().add(sv); + if rv.is_zero() { return false; } + let rv = rv.into_affine(); - rv.y.into_repr().is_even() && rv.x == sig.rx + rv.y.into_bigint().is_even() && rv.x == sig.rx } } @@ -147,7 +151,6 @@ impl Schnorr { // Squeeze and convert from base field element to scalar field element // Since the difference in modulus between the two fields is < 2^125, w.h.p., a // random value from one field will fit in the other field. - ScalarField::from_repr(self.hasher.hash(&schnorr_input).into_repr()) - .expect("failed to create scalar") + ScalarField::from(self.hasher.hash(&schnorr_input).into_bigint()) } } diff --git a/tools/kimchi-visu/Cargo.toml b/tools/kimchi-visu/Cargo.toml index bc4b3ac227..89c5ec76ed 100644 --- a/tools/kimchi-visu/Cargo.toml +++ b/tools/kimchi-visu/Cargo.toml @@ -13,8 +13,8 @@ license = "Apache-2.0" path = "src/lib.rs" [dependencies] -ark-ec = "0.3.0" -ark-ff = "0.3.0" +ark-ec = "0.4.2" +ark-ff = "0.4.2" serde = { version = "1.0.136", features = ["derive"] } serde_json = "1.0.79" serde_with = "1.10.0" diff --git a/turshi/Cargo.toml b/turshi/Cargo.toml index 66fc14fbc0..d725b6e759 100644 --- a/turshi/Cargo.toml +++ b/turshi/Cargo.toml @@ -13,12 +13,12 @@ license = "Apache-2.0" path = "src/lib.rs" [dependencies] -ark-ff = { version = "0.3.0", features = [ "parallel", "asm" ] } +ark-ff = { version = "0.4.2", features = [ "parallel", "asm" ] } hex = "0.4" o1-utils = { path = "../utils", version = "0.1.0" } [dev-dependencies] -ark-ec = { version = "0.3.0", features = [ "parallel" ] } +ark-ec = { version = "0.4.2", features = [ "parallel" ] } mina-curves = { path = "../curves", version = "0.1.0" } diff --git a/turshi/src/helper.rs b/turshi/src/helper.rs index a3d9960372..10b72bcad7 100644 --- a/turshi/src/helper.rs +++ b/turshi/src/helper.rs @@ -50,12 +50,12 @@ impl CairoFieldHelpers for F { #[cfg(test)] mod tests { use super::*; - use ark_ec::AffineCurve; + use ark_ec::AffineRepr; use mina_curves::pasta::Pallas as CurvePoint; use o1_utils::FieldHelpers; /// Base field element type - pub type BaseField = ::BaseField; + pub type BaseField = ::BaseField; #[test] fn test_field_to_bits() { diff --git a/utils/Cargo.toml b/utils/Cargo.toml index 9c06c56768..b5af2aff8d 100644 --- a/utils/Cargo.toml +++ b/utils/Cargo.toml @@ -10,10 +10,10 @@ edition = "2021" license = "Apache-2.0" [dependencies] -ark-ec = { version = "0.3.0", features = [ "parallel" ] } -ark-ff = { version = "0.3.0", features = [ "parallel", "asm" ] } -ark-poly = { version = "0.3.0", features = [ "parallel" ] } -ark-serialize = "0.3.0" +ark-ec = { version = "0.4.2", features = [ "parallel" ] } +ark-ff = { version = "0.4.2", features = [ "parallel", "asm" ] } +ark-poly = { version = "0.4.2", features = [ "parallel" ] } +ark-serialize = "0.4.2" bcs = "0.1.3" rayon = "1.3.0" serde = "1.0.130" @@ -27,9 +27,10 @@ sha2 = "0.10.2" thiserror = "1.0.30" rand = "0.8.0" rand_core = "0.6.3" +mina-curves = { path = "../curves", version = "0.1.0" } [dev-dependencies] -ark-ec = { version = "0.3.0", features = [ "parallel" ] } +ark-ec = { version = "0.4.2", features = [ "parallel" ] } mina-curves = { path = "../curves", version = "0.1.0" } num-bigint = { version = "0.4.3", features = ["rand"] } secp256k1 = "0.24.2" diff --git a/utils/src/chunked_polynomial.rs b/utils/src/chunked_polynomial.rs index 6f79de09ea..45433df93b 100644 --- a/utils/src/chunked_polynomial.rs +++ b/utils/src/chunked_polynomial.rs @@ -55,7 +55,7 @@ mod tests { use super::*; use ark_ff::One; - use ark_poly::{univariate::DensePolynomial, UVPolynomial}; + use ark_poly::{univariate::DensePolynomial, DenseUVPolynomial}; use mina_curves::pasta::Fp; #[test] diff --git a/utils/src/dense_polynomial.rs b/utils/src/dense_polynomial.rs index 895e227fe0..2c7859d5d4 100644 --- a/utils/src/dense_polynomial.rs +++ b/utils/src/dense_polynomial.rs @@ -1,7 +1,7 @@ //! This adds a few utility functions for the [DensePolynomial] arkworks type. use ark_ff::Field; -use ark_poly::{univariate::DensePolynomial, Polynomial, UVPolynomial}; +use ark_poly::{univariate::DensePolynomial, DenseUVPolynomial, Polynomial}; use rayon::prelude::*; use crate::chunked_polynomial::ChunkedPolynomial; @@ -32,7 +32,7 @@ impl ExtendedDensePolynomial for DensePolynomial { result .coeffs .par_iter_mut() - .for_each(|coeff| *coeff *= &elm); + .for_each(|coeff: &mut F| *coeff *= &elm); result } @@ -76,7 +76,7 @@ impl ExtendedDensePolynomial for DensePolynomial { mod tests { use super::*; use ark_ff::One; - use ark_poly::{univariate::DensePolynomial, UVPolynomial}; + use ark_poly::{univariate::DensePolynomial, DenseUVPolynomial}; use mina_curves::pasta::Fp; #[test] diff --git a/utils/src/field_helpers.rs b/utils/src/field_helpers.rs index ed6a91b8c7..484f2e2340 100644 --- a/utils/src/field_helpers.rs +++ b/utils/src/field_helpers.rs @@ -1,6 +1,6 @@ //! Useful helper methods to extend [ark_ff::Field]. -use ark_ff::{BigInteger, Field, FpParameters, PrimeField}; +use ark_ff::{BigInteger, Field, PrimeField}; use num_bigint::{BigUint, RandBigInt}; use rand::rngs::StdRng; use std::ops::Neg; @@ -114,7 +114,7 @@ pub trait FieldHelpers { where F: PrimeField, { - F::size_in_bits() / 8 + (F::size_in_bits() % 8 != 0) as usize + (F::MODULUS_BIT_SIZE / 8) as usize + (F::MODULUS_BIT_SIZE % 8 != 0) as usize } /// Get the modulus as `BigUint` @@ -122,18 +122,19 @@ pub trait FieldHelpers { where F: PrimeField, { - BigUint::from_bytes_le(&F::Params::MODULUS.to_bytes_le()) + BigUint::from_bytes_le(&F::MODULUS.to_bytes_le()) } } impl FieldHelpers for F { fn from_bytes(bytes: &[u8]) -> Result { - F::deserialize(&mut &*bytes).map_err(|_| FieldHelpersError::DeserializeBytes) + F::deserialize_uncompressed(&mut &*bytes).map_err(|_| FieldHelpersError::DeserializeBytes) } fn from_hex(hex: &str) -> Result { let bytes: Vec = hex::decode(hex).map_err(|_| FieldHelpersError::DecodeHex)?; - F::deserialize(&mut &bytes[..]).map_err(|_| FieldHelpersError::DeserializeBytes) + F::deserialize_uncompressed(&mut &bytes[..]) + .map_err(|_| FieldHelpersError::DeserializeBytes) } fn from_bits(bits: &[bool]) -> Result { @@ -145,12 +146,13 @@ impl FieldHelpers for F { bytes }); - F::deserialize(&mut &bytes[..]).map_err(|_| FieldHelpersError::DeserializeBytes) + F::deserialize_uncompressed(&mut &bytes[..]) + .map_err(|_| FieldHelpersError::DeserializeBytes) } fn to_bytes(&self) -> Vec { let mut bytes: Vec = vec![]; - self.serialize(&mut bytes) + self.serialize_uncompressed(&mut bytes) .expect("Failed to serialize field"); bytes @@ -201,12 +203,12 @@ pub fn i32_to_field + Neg>(i: i32) -> F { mod tests { use super::*; - use ark_ec::AffineCurve; + use ark_ec::AffineRepr; use ark_ff::One; use mina_curves::pasta::Pallas as CurvePoint; /// Base field element type - pub type BaseField = ::BaseField; + pub type BaseField = ::BaseField; #[test] fn field_hex() { @@ -298,7 +300,10 @@ mod tests { .is_ok()); assert_eq!( - BaseField::from_bits(&vec![true; BaseField::size_in_bits()]), + BaseField::from_bits(&vec![ + true; + ::MODULUS_BIT_SIZE as usize + ]), Err(FieldHelpersError::DeserializeBytes) ); @@ -328,7 +333,7 @@ mod tests { let field_zero = BaseField::from(0u32); assert_eq!( - BigUint::from_bytes_be(&field_zero.into_repr().to_bytes_be()), + BigUint::from_bytes_be(&field_zero.0.to_bytes_be()), BigUint::from_bytes_be(&be_zero_32bytes) ); diff --git a/utils/src/foreign_field.rs b/utils/src/foreign_field.rs index f11d191038..14a7c66eb2 100644 --- a/utils/src/foreign_field.rs +++ b/utils/src/foreign_field.rs @@ -390,14 +390,14 @@ fn biguint_to_limbs(x: &BigUint, limb_bits: usize) -> Vec { mod tests { use super::*; use crate::field_helpers::FieldHelpers; - use ark_ec::AffineCurve; + use ark_ec::AffineRepr; use ark_ff::One; use mina_curves::pasta::Pallas as CurvePoint; use num_bigint::RandBigInt; use rand::{rngs::StdRng, SeedableRng}; /// Base field element type - pub type BaseField = ::BaseField; + pub type BaseField = ::BaseField; const RNG_SEED: [u8; 32] = [ 12, 31, 143, 75, 29, 255, 206, 26, 67, 193, 86, 160, 1, 90, 131, 221, 86, 168, 4, 95, 50, diff --git a/utils/src/serialization.rs b/utils/src/serialization.rs index 72178ca575..82ca15c8da 100644 --- a/utils/src/serialization.rs +++ b/utils/src/serialization.rs @@ -23,7 +23,7 @@ pub mod ser { S: serde::Serializer, { let mut bytes = vec![]; - val.serialize(&mut bytes) + val.serialize_compressed(&mut bytes) .map_err(serde::ser::Error::custom)?; Bytes::serialize_as(&bytes, serializer) @@ -37,7 +37,7 @@ pub mod ser { D: serde::Deserializer<'de>, { let bytes: Vec = Bytes::deserialize_as(deserializer)?; - T::deserialize(&mut &bytes[..]).map_err(serde::de::Error::custom) + T::deserialize_compressed(&mut &bytes[..]).map_err(serde::de::Error::custom) } } @@ -60,7 +60,7 @@ where S: serde::Serializer, { let mut bytes = vec![]; - val.serialize(&mut bytes) + val.serialize_compressed(&mut bytes) .map_err(serde::ser::Error::custom)?; if serializer.is_human_readable() { @@ -84,16 +84,17 @@ where } else { Bytes::deserialize_as(deserializer)? }; - T::deserialize(&mut &bytes[..]).map_err(serde::de::Error::custom) + T::deserialize_compressed(&mut &bytes[..]).map_err(serde::de::Error::custom) } } #[cfg(test)] mod tests { - use ark_ec::AffineCurve; + use ark_ec::short_weierstrass::SWCurveConfig; use ark_serialize::Write; use mina_curves::pasta::{Pallas, Vesta}; + use mina_curves::pasta::{PallasParameters, VestaParameters}; use serde::{Deserialize, Serialize}; use serde_with::serde_as; use std::io::BufReader; @@ -110,8 +111,8 @@ mod tests { } let data_expected = TestStruct { - pallas: Pallas::prime_subgroup_generator(), - vesta: Vesta::prime_subgroup_generator(), + pallas: PallasParameters::GENERATOR, + vesta: VestaParameters::GENERATOR, }; // reference serialized value