diff --git a/README.md b/README.md index 6e6032d1c..38f67acc2 100644 --- a/README.md +++ b/README.md @@ -31,7 +31,8 @@ Informally, the library provides the ability to create transactions that run arb This repository contains several Rust crates that implement the different building blocks of ZEXE. The high-level structure of the repository is as follows. -* [`algebra`](algebra): Rust crate that provides finite fields and elliptic curves +* [`algebra-core`](algebra-core): Rust crate that provides generic arithmetic for finite fields and elliptic curves +* [`algebra`](algebra): Rust crate that provides concrete instantiations of some finite fields and elliptic curves * [`crypto-primitives`](crypto-primitives): Rust crate that implements some useful cryptographic primitives (and constraints for them) * [`dpc`](dpc): Rust crate that implements DPC schemes (the main cryptographic primitive in this repository) * [`ff-fft`](ff-fft): Rust crate that provides efficient finite field polynomial arithmetic based on finite field FFTs @@ -64,9 +65,9 @@ cargo build --release This library comes with unit tests for each of the provided crates. Run the tests with: ```bash cargo test -``` +``` -Lastly, this library comes with benchmarks for the following crates: +This library comes with benchmarks for the following crates: - [`algebra`](algebra) - [`dpc`](dpc) @@ -76,6 +77,18 @@ These benchmarks require the nightly Rust toolchain; to install this, run `rustu cargo +nightly bench ``` +Compiling with `adcxq`, `adoxq` and `mulxq` instructions can lead to a 30-70% speedup. These are available on most `x86_64` platforms (Broadwell onwards for Intel and Ryzen onwards for AMD). Run the following command: +```bash +RUSTFLAGS="-C target-feature=+bmi2,+adx" cargo +nightly test/build/bench --features asm +``` +Tip: If optimising for performance, your mileage may vary with passing `--emit=asm` to `RUSTFLAGS`. + +To bench `algebra-benches` with greater accuracy, especially for functions with execution times on the order of nanoseconds, use the `n_fold` feature to run selected functions 1000x per iteration. To run with multiple features, make sure to double quote the features. +```bash +cargo +nightly bench --features "n_fold bls12_381" +``` + + ## License ZEXE is licensed under either of the following licenses, at your discretion. diff --git a/algebra-benches/Cargo.toml b/algebra-benches/Cargo.toml index b958e8eca..3e45dde90 100644 --- a/algebra-benches/Cargo.toml +++ b/algebra-benches/Cargo.toml @@ -19,11 +19,27 @@ include = ["Cargo.toml", "src", "README.md", "LICENSE-APACHE", "LICENSE-MIT"] license = "MIT/Apache-2.0" edition = "2018" publish = false +build = "build.rs" ################################# Dependencies ################################ [dev-dependencies] -algebra = { path = "../algebra", features = [ "full" ] } +algebra = { path = "../algebra" } blake2 = "0.8.1" rand = "0.7" rand_xorshift = { version = "0.2" } +paste = "0.1" + +[features] +asm = [ "algebra/asm"] +n_fold = [] +mnt4_298 = [ "algebra/mnt4_298"] +mnt6_298 = [ "algebra/mnt6_298"] +mnt4_753 = [ "algebra/mnt4_753"] +mnt6_753 = [ "algebra/mnt6_753"] +bls12_381 = [ "algebra/bls12_381"] +bls12_377 = [ "algebra/bls12_377"] +sw6 = [ "algebra/sw6" ] + +[build-dependencies] +rustc_version = "0.1.*" diff --git a/algebra-benches/benches/bls12_377/ec.rs b/algebra-benches/benches/bls12_377/ec.rs deleted file mode 100644 index bf332e32a..000000000 --- a/algebra-benches/benches/bls12_377/ec.rs +++ /dev/null @@ -1,183 +0,0 @@ -mod g1 { - use algebra::{ - bls12_377::{Fr, G1Affine, G1Projective as G1}, - ProjectiveCurve, UniformRand, - }; - use core::ops::AddAssign; - use rand::SeedableRng; - use rand_xorshift::XorShiftRng; - - #[bench] - fn bench_g1_rand(b: &mut ::test::Bencher) { - let mut rng = XorShiftRng::seed_from_u64(1231275789u64); - b.iter(|| G1::rand(&mut rng)); - } - - #[bench] - fn bench_g1_mul_assign(b: &mut ::test::Bencher) { - const SAMPLES: usize = 1000; - - let mut rng = XorShiftRng::seed_from_u64(1231275789u64); - - let v: Vec<(G1, Fr)> = (0..SAMPLES) - .map(|_| (G1::rand(&mut rng), Fr::rand(&mut rng))) - .collect(); - - let mut count = 0; - b.iter(|| { - let mut tmp = v[count].0; - tmp *= v[count].1; - count = (count + 1) % SAMPLES; - tmp - }); - } - - #[bench] - fn bench_g1_add_assign(b: &mut ::test::Bencher) { - const SAMPLES: usize = 1000; - - let mut rng = XorShiftRng::seed_from_u64(1231275789u64); - - let v: Vec<(G1, G1)> = (0..SAMPLES) - .map(|_| (G1::rand(&mut rng), G1::rand(&mut rng))) - .collect(); - - let mut count = 0; - b.iter(|| { - let mut tmp = v[count].0; - tmp.add_assign(&v[count].1); - count = (count + 1) % SAMPLES; - tmp - }); - } - - #[bench] - fn bench_g1_add_assign_mixed(b: &mut ::test::Bencher) { - const SAMPLES: usize = 1000; - - let mut rng = XorShiftRng::seed_from_u64(1231275789u64); - - let v: Vec<(G1, G1Affine)> = (0..SAMPLES) - .map(|_| (G1::rand(&mut rng), G1::rand(&mut rng).into())) - .collect(); - - let mut count = 0; - b.iter(|| { - let mut tmp = v[count].0; - tmp.add_assign_mixed(&v[count].1); - count = (count + 1) % SAMPLES; - tmp - }); - } - - #[bench] - fn bench_g1_double(b: &mut ::test::Bencher) { - const SAMPLES: usize = 1000; - - let mut rng = XorShiftRng::seed_from_u64(1231275789u64); - - let v: Vec<(G1, G1)> = (0..SAMPLES) - .map(|_| (G1::rand(&mut rng), G1::rand(&mut rng))) - .collect(); - - let mut count = 0; - b.iter(|| { - let mut tmp = v[count].0; - tmp.double_in_place(); - count = (count + 1) % SAMPLES; - tmp - }); - } -} - -mod g2 { - use algebra::{ - bls12_377::{Fr, G2Affine, G2Projective as G2}, - ProjectiveCurve, UniformRand, - }; - use core::ops::AddAssign; - use rand::SeedableRng; - use rand_xorshift::XorShiftRng; - - #[bench] - fn bench_g2_rand(b: &mut ::test::Bencher) { - let mut rng = XorShiftRng::seed_from_u64(1231275789u64); - b.iter(|| G2::rand(&mut rng)); - } - - #[bench] - fn bench_g2_mul_assign(b: &mut ::test::Bencher) { - const SAMPLES: usize = 1000; - - let mut rng = XorShiftRng::seed_from_u64(1231275789u64); - - let v: Vec<(G2, Fr)> = (0..SAMPLES) - .map(|_| (G2::rand(&mut rng), Fr::rand(&mut rng))) - .collect(); - - let mut count = 0; - b.iter(|| { - let mut tmp = v[count].0; - tmp *= v[count].1; - count = (count + 1) % SAMPLES; - tmp - }); - } - - #[bench] - fn bench_g2_add_assign(b: &mut ::test::Bencher) { - const SAMPLES: usize = 1000; - - let mut rng = XorShiftRng::seed_from_u64(1231275789u64); - - let v: Vec<(G2, G2)> = (0..SAMPLES) - .map(|_| (G2::rand(&mut rng), G2::rand(&mut rng))) - .collect(); - - let mut count = 0; - b.iter(|| { - let mut tmp = v[count].0; - tmp.add_assign(&v[count].1); - count = (count + 1) % SAMPLES; - tmp - }); - } - - #[bench] - fn bench_g2_add_assign_mixed(b: &mut ::test::Bencher) { - const SAMPLES: usize = 1000; - - let mut rng = XorShiftRng::seed_from_u64(1231275789u64); - - let v: Vec<(G2, G2Affine)> = (0..SAMPLES) - .map(|_| (G2::rand(&mut rng), G2::rand(&mut rng).into())) - .collect(); - - let mut count = 0; - b.iter(|| { - let mut tmp = v[count].0; - tmp.add_assign_mixed(&v[count].1); - count = (count + 1) % SAMPLES; - tmp - }); - } - - #[bench] - fn bench_g2_double(b: &mut ::test::Bencher) { - const SAMPLES: usize = 1000; - - let mut rng = XorShiftRng::seed_from_u64(1231275789u64); - - let v: Vec<(G2, G2)> = (0..SAMPLES) - .map(|_| (G2::rand(&mut rng), G2::rand(&mut rng))) - .collect(); - - let mut count = 0; - b.iter(|| { - let mut tmp = v[count].0; - tmp.double_in_place(); - count = (count + 1) % SAMPLES; - tmp - }); - } -} diff --git a/algebra-benches/benches/bls12_377/fq.rs b/algebra-benches/benches/bls12_377/fq.rs deleted file mode 100644 index 86a430e65..000000000 --- a/algebra-benches/benches/bls12_377/fq.rs +++ /dev/null @@ -1,290 +0,0 @@ -use algebra::UniformRand; -use rand::SeedableRng; -use rand_xorshift::XorShiftRng; -use std::ops::{AddAssign, MulAssign, SubAssign}; - -use algebra::{ - biginteger::BigInteger384 as FqRepr, bls12_377::fq::Fq, BigInteger, Field, PrimeField, - SquareRootField, -}; - -#[bench] -fn bench_fq_repr_add_nocarry(b: &mut ::test::Bencher) { - const SAMPLES: usize = 1000; - - let mut rng = XorShiftRng::seed_from_u64(1231275789u64); - - let v: Vec<(FqRepr, FqRepr)> = (0..SAMPLES) - .map(|_| { - let mut tmp1 = FqRepr::rand(&mut rng); - let mut tmp2 = FqRepr::rand(&mut rng); - // Shave a few bits off to avoid overflow. - for _ in 0..3 { - tmp1.div2(); - tmp2.div2(); - } - (tmp1, tmp2) - }) - .collect(); - - let mut count = 0; - b.iter(|| { - let mut tmp = v[count].0; - tmp.add_nocarry(&v[count].1); - count = (count + 1) % SAMPLES; - tmp - }); -} - -#[bench] -fn bench_fq_repr_sub_noborrow(b: &mut ::test::Bencher) { - const SAMPLES: usize = 1000; - - let mut rng = XorShiftRng::seed_from_u64(1231275789u64); - - let v: Vec<(FqRepr, FqRepr)> = (0..SAMPLES) - .map(|_| { - let tmp1 = FqRepr::rand(&mut rng); - let mut tmp2 = tmp1; - // Ensure tmp2 is smaller than tmp1. - for _ in 0..10 { - tmp2.div2(); - } - (tmp1, tmp2) - }) - .collect(); - - let mut count = 0; - b.iter(|| { - let mut tmp = v[count].0; - tmp.sub_noborrow(&v[count].1); - count = (count + 1) % SAMPLES; - tmp - }); -} - -#[bench] -fn bench_fq_repr_num_bits(b: &mut ::test::Bencher) { - const SAMPLES: usize = 1000; - - let mut rng = XorShiftRng::seed_from_u64(1231275789u64); - - let v: Vec = (0..SAMPLES).map(|_| FqRepr::rand(&mut rng)).collect(); - - let mut count = 0; - b.iter(|| { - let tmp = v[count].num_bits(); - count = (count + 1) % SAMPLES; - tmp - }); -} - -#[bench] -fn bench_fq_repr_mul2(b: &mut ::test::Bencher) { - const SAMPLES: usize = 1000; - - let mut rng = XorShiftRng::seed_from_u64(1231275789u64); - - let v: Vec = (0..SAMPLES).map(|_| FqRepr::rand(&mut rng)).collect(); - - let mut count = 0; - b.iter(|| { - let mut tmp = v[count]; - tmp.mul2(); - count = (count + 1) % SAMPLES; - tmp - }); -} - -#[bench] -fn bench_fq_repr_div2(b: &mut ::test::Bencher) { - const SAMPLES: usize = 1000; - - let mut rng = XorShiftRng::seed_from_u64(1231275789u64); - - let v: Vec = (0..SAMPLES).map(|_| FqRepr::rand(&mut rng)).collect(); - - let mut count = 0; - b.iter(|| { - let mut tmp = v[count]; - tmp.div2(); - count = (count + 1) % SAMPLES; - tmp - }); -} - -#[bench] -fn bench_fq_add_assign(b: &mut ::test::Bencher) { - const SAMPLES: usize = 1000; - - let mut rng = XorShiftRng::seed_from_u64(1231275789u64); - - let v: Vec<(Fq, Fq)> = (0..SAMPLES) - .map(|_| (Fq::rand(&mut rng), Fq::rand(&mut rng))) - .collect(); - - let mut count = 0; - b.iter(|| { - let mut tmp = v[count].0; - tmp.add_assign(&v[count].1); - count = (count + 1) % SAMPLES; - tmp - }); -} - -#[bench] -fn bench_fq_sub_assign(b: &mut ::test::Bencher) { - const SAMPLES: usize = 1000; - - let mut rng = XorShiftRng::seed_from_u64(1231275789u64); - - let v: Vec<(Fq, Fq)> = (0..SAMPLES) - .map(|_| (Fq::rand(&mut rng), Fq::rand(&mut rng))) - .collect(); - - let mut count = 0; - b.iter(|| { - let mut tmp = v[count].0; - tmp.sub_assign(&v[count].1); - count = (count + 1) % SAMPLES; - tmp - }); -} - -#[bench] -fn bench_fq_mul_assign(b: &mut ::test::Bencher) { - const SAMPLES: usize = 1000; - - let mut rng = XorShiftRng::seed_from_u64(1231275789u64); - - let v: Vec<(Fq, Fq)> = (0..SAMPLES) - .map(|_| (Fq::rand(&mut rng), Fq::rand(&mut rng))) - .collect(); - - let mut count = 0; - b.iter(|| { - let mut tmp = v[count].0; - tmp.mul_assign(&v[count].1); - count = (count + 1) % SAMPLES; - tmp - }); -} - -#[bench] -fn bench_fq_double(b: &mut ::test::Bencher) { - const SAMPLES: usize = 1000; - - let mut rng = XorShiftRng::seed_from_u64(1231275789u64); - - let v: Vec = (0..SAMPLES).map(|_| Fq::rand(&mut rng)).collect(); - - let mut count = 0; - b.iter(|| { - let mut tmp = v[count]; - tmp.double_in_place(); - count = (count + 1) % SAMPLES; - tmp - }); -} - -#[bench] -fn bench_fq_square(b: &mut ::test::Bencher) { - const SAMPLES: usize = 1000; - - let mut rng = XorShiftRng::seed_from_u64(1231275789u64); - - let v: Vec = (0..SAMPLES).map(|_| Fq::rand(&mut rng)).collect(); - - let mut count = 0; - b.iter(|| { - let mut tmp = v[count]; - tmp.square_in_place(); - count = (count + 1) % SAMPLES; - tmp - }); -} - -#[bench] -fn bench_fq_inverse(b: &mut ::test::Bencher) { - const SAMPLES: usize = 1000; - - let mut rng = XorShiftRng::seed_from_u64(1231275789u64); - - let v: Vec = (0..SAMPLES).map(|_| Fq::rand(&mut rng)).collect(); - - let mut count = 0; - b.iter(|| { - count = (count + 1) % SAMPLES; - v[count].inverse() - }); -} - -#[bench] -fn bench_fq_negate(b: &mut ::test::Bencher) { - const SAMPLES: usize = 1000; - - let mut rng = XorShiftRng::seed_from_u64(1231275789u64); - - let v: Vec = (0..SAMPLES).map(|_| Fq::rand(&mut rng)).collect(); - - let mut count = 0; - b.iter(|| { - let mut tmp = v[count]; - tmp = -tmp; - count = (count + 1) % SAMPLES; - tmp - }); -} - -#[bench] -fn bench_fq_sqrt(b: &mut ::test::Bencher) { - const SAMPLES: usize = 1000; - - let mut rng = XorShiftRng::seed_from_u64(1231275789u64); - - let v: Vec = (0..SAMPLES) - .map(|_| { - let mut tmp = Fq::rand(&mut rng); - tmp.square_in_place(); - tmp - }) - .collect(); - - let mut count = 0; - b.iter(|| { - count = (count + 1) % SAMPLES; - v[count].sqrt() - }); -} - -#[bench] -fn bench_fq_into_repr(b: &mut ::test::Bencher) { - const SAMPLES: usize = 1000; - - let mut rng = XorShiftRng::seed_from_u64(1231275789u64); - - let v: Vec = (0..SAMPLES).map(|_| Fq::rand(&mut rng)).collect(); - - let mut count = 0; - b.iter(|| { - count = (count + 1) % SAMPLES; - v[count].into_repr() - }); -} - -#[bench] -fn bench_fq_from_repr(b: &mut ::test::Bencher) { - const SAMPLES: usize = 1000; - - let mut rng = XorShiftRng::seed_from_u64(1231275789u64); - - let v: Vec = (0..SAMPLES) - .map(|_| Fq::rand(&mut rng).into_repr()) - .collect(); - - let mut count = 0; - b.iter(|| { - count = (count + 1) % SAMPLES; - Fq::from_repr(v[count]) - }); -} diff --git a/algebra-benches/benches/bls12_377/fq12.rs b/algebra-benches/benches/bls12_377/fq12.rs deleted file mode 100644 index 3ee1f0c06..000000000 --- a/algebra-benches/benches/bls12_377/fq12.rs +++ /dev/null @@ -1,113 +0,0 @@ -use algebra::UniformRand; -use rand::SeedableRng; -use rand_xorshift::XorShiftRng; -use std::ops::{AddAssign, MulAssign, SubAssign}; - -use algebra::{bls12_377::Fq12, Field}; - -#[bench] -fn bench_fq12_add_assign(b: &mut ::test::Bencher) { - const SAMPLES: usize = 1000; - - let mut rng = XorShiftRng::seed_from_u64(1231275789u64); - - let v: Vec<(Fq12, Fq12)> = (0..SAMPLES) - .map(|_| (Fq12::rand(&mut rng), Fq12::rand(&mut rng))) - .collect(); - - let mut count = 0; - b.iter(|| { - let mut tmp = v[count].0; - tmp.add_assign(&v[count].1); - count = (count + 1) % SAMPLES; - tmp - }); -} - -#[bench] -fn bench_fq12_sub_assign(b: &mut ::test::Bencher) { - const SAMPLES: usize = 1000; - - let mut rng = XorShiftRng::seed_from_u64(1231275789u64); - - let v: Vec<(Fq12, Fq12)> = (0..SAMPLES) - .map(|_| (Fq12::rand(&mut rng), Fq12::rand(&mut rng))) - .collect(); - - let mut count = 0; - b.iter(|| { - let mut tmp = v[count].0; - tmp.sub_assign(&v[count].1); - count = (count + 1) % SAMPLES; - tmp - }); -} - -#[bench] -fn bench_fq12_mul_assign(b: &mut ::test::Bencher) { - const SAMPLES: usize = 1000; - - let mut rng = XorShiftRng::seed_from_u64(1231275789u64); - - let v: Vec<(Fq12, Fq12)> = (0..SAMPLES) - .map(|_| (Fq12::rand(&mut rng), Fq12::rand(&mut rng))) - .collect(); - - let mut count = 0; - b.iter(|| { - let mut tmp = v[count].0; - tmp.mul_assign(&v[count].1); - count = (count + 1) % SAMPLES; - tmp - }); -} - -#[bench] -fn bench_fq12_double(b: &mut ::test::Bencher) { - const SAMPLES: usize = 1000; - - let mut rng = XorShiftRng::seed_from_u64(1231275789u64); - - let v: Vec = (0..SAMPLES).map(|_| Fq12::rand(&mut rng)).collect(); - - let mut count = 0; - b.iter(|| { - let mut tmp = v[count]; - tmp.double_in_place(); - count = (count + 1) % SAMPLES; - tmp - }); -} - -#[bench] -fn bench_fq12_square(b: &mut ::test::Bencher) { - const SAMPLES: usize = 1000; - - let mut rng = XorShiftRng::seed_from_u64(1231275789u64); - - let v: Vec = (0..SAMPLES).map(|_| Fq12::rand(&mut rng)).collect(); - - let mut count = 0; - b.iter(|| { - let mut tmp = v[count]; - tmp.square_in_place(); - count = (count + 1) % SAMPLES; - tmp - }); -} - -#[bench] -fn bench_fq12_inverse(b: &mut ::test::Bencher) { - const SAMPLES: usize = 1000; - - let mut rng = XorShiftRng::seed_from_u64(1231275789u64); - - let v: Vec = (0..SAMPLES).map(|_| Fq12::rand(&mut rng)).collect(); - - let mut count = 0; - b.iter(|| { - let tmp = v[count].inverse(); - count = (count + 1) % SAMPLES; - tmp - }); -} diff --git a/algebra-benches/benches/bls12_377/fq2.rs b/algebra-benches/benches/bls12_377/fq2.rs deleted file mode 100644 index ac38bde8a..000000000 --- a/algebra-benches/benches/bls12_377/fq2.rs +++ /dev/null @@ -1,129 +0,0 @@ -use algebra::UniformRand; -use rand::SeedableRng; -use rand_xorshift::XorShiftRng; -use std::ops::{AddAssign, MulAssign, SubAssign}; - -use algebra::{bls12_377::fq2::Fq2, Field, SquareRootField}; - -#[bench] -fn bench_fq2_add_assign(b: &mut ::test::Bencher) { - const SAMPLES: usize = 1000; - - let mut rng = XorShiftRng::seed_from_u64(1231275789u64); - - let v: Vec<(Fq2, Fq2)> = (0..SAMPLES) - .map(|_| (Fq2::rand(&mut rng), Fq2::rand(&mut rng))) - .collect(); - - let mut count = 0; - b.iter(|| { - let mut tmp = v[count].0; - tmp.add_assign(&v[count].1); - count = (count + 1) % SAMPLES; - tmp - }); -} - -#[bench] -fn bench_fq2_sub_assign(b: &mut ::test::Bencher) { - const SAMPLES: usize = 1000; - - let mut rng = XorShiftRng::seed_from_u64(1231275789u64); - - let v: Vec<(Fq2, Fq2)> = (0..SAMPLES) - .map(|_| (Fq2::rand(&mut rng), Fq2::rand(&mut rng))) - .collect(); - - let mut count = 0; - b.iter(|| { - let mut tmp = v[count].0; - tmp.sub_assign(&v[count].1); - count = (count + 1) % SAMPLES; - tmp - }); -} - -#[bench] -fn bench_fq2_mul_assign(b: &mut ::test::Bencher) { - const SAMPLES: usize = 1000; - - let mut rng = XorShiftRng::seed_from_u64(1231275789u64); - - let v: Vec<(Fq2, Fq2)> = (0..SAMPLES) - .map(|_| (Fq2::rand(&mut rng), Fq2::rand(&mut rng))) - .collect(); - - let mut count = 0; - b.iter(|| { - let mut tmp = v[count].0; - tmp.mul_assign(&v[count].1); - count = (count + 1) % SAMPLES; - tmp - }); -} - -#[bench] -fn bench_fq2_double(b: &mut ::test::Bencher) { - const SAMPLES: usize = 1000; - - let mut rng = XorShiftRng::seed_from_u64(1231275789u64); - - let v: Vec = (0..SAMPLES).map(|_| Fq2::rand(&mut rng)).collect(); - - let mut count = 0; - b.iter(|| { - let mut tmp = v[count]; - tmp.double_in_place(); - count = (count + 1) % SAMPLES; - tmp - }); -} - -#[bench] -fn bench_fq2_square(b: &mut ::test::Bencher) { - const SAMPLES: usize = 1000; - - let mut rng = XorShiftRng::seed_from_u64(1231275789u64); - - let v: Vec = (0..SAMPLES).map(|_| Fq2::rand(&mut rng)).collect(); - - let mut count = 0; - b.iter(|| { - let mut tmp = v[count]; - tmp.square_in_place(); - count = (count + 1) % SAMPLES; - tmp - }); -} - -#[bench] -fn bench_fq2_inverse(b: &mut ::test::Bencher) { - const SAMPLES: usize = 1000; - - let mut rng = XorShiftRng::seed_from_u64(1231275789u64); - - let v: Vec = (0..SAMPLES).map(|_| Fq2::rand(&mut rng)).collect(); - - let mut count = 0; - b.iter(|| { - let tmp = v[count].inverse(); - count = (count + 1) % SAMPLES; - tmp - }); -} - -#[bench] -fn bench_fq2_sqrt(b: &mut ::test::Bencher) { - const SAMPLES: usize = 1000; - - let mut rng = XorShiftRng::seed_from_u64(1231275789u64); - - let v: Vec = (0..SAMPLES).map(|_| Fq2::rand(&mut rng)).collect(); - - let mut count = 0; - b.iter(|| { - let tmp = v[count].sqrt(); - count = (count + 1) % SAMPLES; - tmp - }); -} diff --git a/algebra-benches/benches/bls12_377/fr.rs b/algebra-benches/benches/bls12_377/fr.rs deleted file mode 100644 index 786605d65..000000000 --- a/algebra-benches/benches/bls12_377/fr.rs +++ /dev/null @@ -1,290 +0,0 @@ -use algebra::UniformRand; -use rand::SeedableRng; -use rand_xorshift::XorShiftRng; -use std::ops::{AddAssign, MulAssign, SubAssign}; - -use algebra::{ - biginteger::BigInteger256 as FrRepr, bls12_377::fr::Fr, BigInteger, Field, PrimeField, - SquareRootField, -}; - -#[bench] -fn bench_fr_repr_add_nocarry(b: &mut ::test::Bencher) { - const SAMPLES: usize = 1000; - - let mut rng = XorShiftRng::seed_from_u64(1231275789u64); - - let v: Vec<(FrRepr, FrRepr)> = (0..SAMPLES) - .map(|_| { - let mut tmp1 = FrRepr::rand(&mut rng); - let mut tmp2 = FrRepr::rand(&mut rng); - // Shave a few bits off to avoid overflow. - for _ in 0..3 { - tmp1.div2(); - tmp2.div2(); - } - (tmp1, tmp2) - }) - .collect(); - - let mut count = 0; - b.iter(|| { - let mut tmp = v[count].0; - tmp.add_nocarry(&v[count].1); - count = (count + 1) % SAMPLES; - tmp - }); -} - -#[bench] -fn bench_fr_repr_sub_noborrow(b: &mut ::test::Bencher) { - const SAMPLES: usize = 1000; - - let mut rng = XorShiftRng::seed_from_u64(1231275789u64); - - let v: Vec<(FrRepr, FrRepr)> = (0..SAMPLES) - .map(|_| { - let tmp1 = FrRepr::rand(&mut rng); - let mut tmp2 = tmp1; - // Ensure tmp2 is smaller than tmp1. - for _ in 0..10 { - tmp2.div2(); - } - (tmp1, tmp2) - }) - .collect(); - - let mut count = 0; - b.iter(|| { - let mut tmp = v[count].0; - tmp.sub_noborrow(&v[count].1); - count = (count + 1) % SAMPLES; - tmp - }); -} - -#[bench] -fn bench_fr_repr_num_bits(b: &mut ::test::Bencher) { - const SAMPLES: usize = 1000; - - let mut rng = XorShiftRng::seed_from_u64(1231275789u64); - - let v: Vec = (0..SAMPLES).map(|_| FrRepr::rand(&mut rng)).collect(); - - let mut count = 0; - b.iter(|| { - let tmp = v[count].num_bits(); - count = (count + 1) % SAMPLES; - tmp - }); -} - -#[bench] -fn bench_fr_repr_mul2(b: &mut ::test::Bencher) { - const SAMPLES: usize = 1000; - - let mut rng = XorShiftRng::seed_from_u64(1231275789u64); - - let v: Vec = (0..SAMPLES).map(|_| FrRepr::rand(&mut rng)).collect(); - - let mut count = 0; - b.iter(|| { - let mut tmp = v[count]; - tmp.mul2(); - count = (count + 1) % SAMPLES; - tmp - }); -} - -#[bench] -fn bench_fr_repr_div2(b: &mut ::test::Bencher) { - const SAMPLES: usize = 1000; - - let mut rng = XorShiftRng::seed_from_u64(1231275789u64); - - let v: Vec = (0..SAMPLES).map(|_| FrRepr::rand(&mut rng)).collect(); - - let mut count = 0; - b.iter(|| { - let mut tmp = v[count]; - tmp.div2(); - count = (count + 1) % SAMPLES; - tmp - }); -} - -#[bench] -fn bench_fr_add_assign(b: &mut ::test::Bencher) { - const SAMPLES: usize = 1000; - - let mut rng = XorShiftRng::seed_from_u64(1231275789u64); - - let v: Vec<(Fr, Fr)> = (0..SAMPLES) - .map(|_| (Fr::rand(&mut rng), Fr::rand(&mut rng))) - .collect(); - - let mut count = 0; - b.iter(|| { - let mut tmp = v[count].0; - tmp.add_assign(&v[count].1); - count = (count + 1) % SAMPLES; - tmp - }); -} - -#[bench] -fn bench_fr_sub_assign(b: &mut ::test::Bencher) { - const SAMPLES: usize = 1000; - - let mut rng = XorShiftRng::seed_from_u64(1231275789u64); - - let v: Vec<(Fr, Fr)> = (0..SAMPLES) - .map(|_| (Fr::rand(&mut rng), Fr::rand(&mut rng))) - .collect(); - - let mut count = 0; - b.iter(|| { - let mut tmp = v[count].0; - tmp.sub_assign(&v[count].1); - count = (count + 1) % SAMPLES; - tmp - }); -} - -#[bench] -fn bench_fr_mul_assign(b: &mut ::test::Bencher) { - const SAMPLES: usize = 1000; - - let mut rng = XorShiftRng::seed_from_u64(1231275789u64); - - let v: Vec<(Fr, Fr)> = (0..SAMPLES) - .map(|_| (Fr::rand(&mut rng), Fr::rand(&mut rng))) - .collect(); - - let mut count = 0; - b.iter(|| { - let mut tmp = v[count].0; - tmp.mul_assign(&v[count].1); - count = (count + 1) % SAMPLES; - tmp - }); -} - -#[bench] -fn bench_fr_double(b: &mut ::test::Bencher) { - const SAMPLES: usize = 1000; - - let mut rng = XorShiftRng::seed_from_u64(1231275789u64); - - let v: Vec = (0..SAMPLES).map(|_| Fr::rand(&mut rng)).collect(); - - let mut count = 0; - b.iter(|| { - let mut tmp = v[count]; - tmp.double_in_place(); - count = (count + 1) % SAMPLES; - tmp - }); -} - -#[bench] -fn bench_fr_square(b: &mut ::test::Bencher) { - const SAMPLES: usize = 1000; - - let mut rng = XorShiftRng::seed_from_u64(1231275789u64); - - let v: Vec = (0..SAMPLES).map(|_| Fr::rand(&mut rng)).collect(); - - let mut count = 0; - b.iter(|| { - let mut tmp = v[count]; - tmp.square_in_place(); - count = (count + 1) % SAMPLES; - tmp - }); -} - -#[bench] -fn bench_fr_inverse(b: &mut ::test::Bencher) { - const SAMPLES: usize = 1000; - - let mut rng = XorShiftRng::seed_from_u64(1231275789u64); - - let v: Vec = (0..SAMPLES).map(|_| Fr::rand(&mut rng)).collect(); - - let mut count = 0; - b.iter(|| { - count = (count + 1) % SAMPLES; - v[count].inverse() - }); -} - -#[bench] -fn bench_fr_negate(b: &mut ::test::Bencher) { - const SAMPLES: usize = 1000; - - let mut rng = XorShiftRng::seed_from_u64(1231275789u64); - - let v: Vec = (0..SAMPLES).map(|_| Fr::rand(&mut rng)).collect(); - - let mut count = 0; - b.iter(|| { - let mut tmp = v[count]; - tmp = -tmp; - count = (count + 1) % SAMPLES; - tmp - }); -} - -#[bench] -fn bench_fr_sqrt(b: &mut ::test::Bencher) { - const SAMPLES: usize = 1000; - - let mut rng = XorShiftRng::seed_from_u64(1231275789u64); - - let v: Vec = (0..SAMPLES) - .map(|_| { - let mut tmp = Fr::rand(&mut rng); - tmp.square_in_place(); - tmp - }) - .collect(); - - let mut count = 0; - b.iter(|| { - count = (count + 1) % SAMPLES; - v[count].sqrt() - }); -} - -#[bench] -fn bench_fr_into_repr(b: &mut ::test::Bencher) { - const SAMPLES: usize = 1000; - - let mut rng = XorShiftRng::seed_from_u64(1231275789u64); - - let v: Vec = (0..SAMPLES).map(|_| Fr::rand(&mut rng)).collect(); - - let mut count = 0; - b.iter(|| { - count = (count + 1) % SAMPLES; - v[count].into_repr() - }); -} - -#[bench] -fn bench_fr_from_repr(b: &mut ::test::Bencher) { - const SAMPLES: usize = 1000; - - let mut rng = XorShiftRng::seed_from_u64(1231275789u64); - - let v: Vec = (0..SAMPLES) - .map(|_| Fr::rand(&mut rng).into_repr()) - .collect(); - - let mut count = 0; - b.iter(|| { - count = (count + 1) % SAMPLES; - Fr::from_repr(v[count]) - }); -} diff --git a/algebra-benches/benches/bls12_377/mod.rs b/algebra-benches/benches/bls12_377/mod.rs deleted file mode 100644 index ea18d0268..000000000 --- a/algebra-benches/benches/bls12_377/mod.rs +++ /dev/null @@ -1,6 +0,0 @@ -mod ec; -mod fq; -mod fq12; -mod fq2; -mod fr; -mod pairing; diff --git a/algebra-benches/benches/bls12_377/pairing.rs b/algebra-benches/benches/bls12_377/pairing.rs deleted file mode 100644 index 8ca4cbd32..000000000 --- a/algebra-benches/benches/bls12_377/pairing.rs +++ /dev/null @@ -1,76 +0,0 @@ -mod pairing { - use algebra::UniformRand; - use rand::SeedableRng; - use rand_xorshift::XorShiftRng; - - use algebra::{ - bls12::{G1Prepared, G2Prepared}, - bls12_377::{ - Bls12_377, Fq12, G1Affine, G1Projective as G1, G2Affine, G2Projective as G2, Parameters, - }, - PairingEngine, - }; - - #[bench] - fn bench_pairing_miller_loop(b: &mut ::test::Bencher) { - const SAMPLES: usize = 1000; - - let mut rng = XorShiftRng::seed_from_u64(1231275789u64); - - let v: Vec<(G1Prepared, G2Prepared)> = (0..SAMPLES) - .map(|_| { - ( - G1Affine::from(G1::rand(&mut rng)).into(), - G2Affine::from(G2::rand(&mut rng)).into(), - ) - }) - .collect(); - - let mut count = 0; - b.iter(|| { - let tmp = Bls12_377::miller_loop(&[(v[count].0.clone(), v[count].1.clone())]); - count = (count + 1) % SAMPLES; - tmp - }); - } - - #[bench] - fn bench_pairing_final_exponentiation(b: &mut ::test::Bencher) { - const SAMPLES: usize = 1000; - - let mut rng = XorShiftRng::seed_from_u64(1231275789u64); - - let v: Vec = (0..SAMPLES) - .map(|_| { - let p = G1Affine::from(G1::rand(&mut rng)).into(); - let q = G2Affine::from(G2::rand(&mut rng)).into(); - Bls12_377::miller_loop(&[(p, q)]) - }) - .collect(); - - let mut count = 0; - b.iter(|| { - let tmp = Bls12_377::final_exponentiation(&v[count]); - count = (count + 1) % SAMPLES; - tmp - }); - } - - #[bench] - fn bench_pairing_full(b: &mut ::test::Bencher) { - const SAMPLES: usize = 1000; - - let mut rng = XorShiftRng::seed_from_u64(1231275789u64); - - let v: Vec<(G1, G2)> = (0..SAMPLES) - .map(|_| (G1::rand(&mut rng), G2::rand(&mut rng))) - .collect(); - - let mut count = 0; - b.iter(|| { - let tmp = Bls12_377::pairing(v[count].0, v[count].1); - count = (count + 1) % SAMPLES; - tmp - }); - } -} diff --git a/algebra-benches/benches/bls12_381/ec.rs b/algebra-benches/benches/bls12_381/ec.rs deleted file mode 100644 index 3ec0098e8..000000000 --- a/algebra-benches/benches/bls12_381/ec.rs +++ /dev/null @@ -1,187 +0,0 @@ -mod g1 { - use algebra::UniformRand; - use rand::SeedableRng; - use rand_xorshift::XorShiftRng; - use std::ops::AddAssign; - - use algebra::{ - bls12_381::{Fr, G1Affine, G1Projective as G1}, - ProjectiveCurve, - }; - - #[bench] - fn bench_g1_rand(b: &mut ::test::Bencher) { - let mut rng = XorShiftRng::seed_from_u64(1231275789u64); - b.iter(|| G1::rand(&mut rng)); - } - - #[bench] - fn bench_g1_mul_assign(b: &mut ::test::Bencher) { - const SAMPLES: usize = 1000; - - let mut rng = XorShiftRng::seed_from_u64(1231275789u64); - - let v: Vec<(G1, Fr)> = (0..SAMPLES) - .map(|_| (G1::rand(&mut rng), Fr::rand(&mut rng))) - .collect(); - - let mut count = 0; - b.iter(|| { - let mut tmp = v[count].0; - tmp *= v[count].1; - count = (count + 1) % SAMPLES; - tmp - }); - } - - #[bench] - fn bench_g1_add_assign(b: &mut ::test::Bencher) { - const SAMPLES: usize = 1000; - - let mut rng = XorShiftRng::seed_from_u64(1231275789u64); - - let v: Vec<(G1, G1)> = (0..SAMPLES) - .map(|_| (G1::rand(&mut rng), G1::rand(&mut rng))) - .collect(); - - let mut count = 0; - b.iter(|| { - let mut tmp = v[count].0; - tmp.add_assign(&v[count].1); - count = (count + 1) % SAMPLES; - tmp - }); - } - - #[bench] - fn bench_g1_add_assign_mixed(b: &mut ::test::Bencher) { - const SAMPLES: usize = 1000; - - let mut rng = XorShiftRng::seed_from_u64(1231275789u64); - - let v: Vec<(G1, G1Affine)> = (0..SAMPLES) - .map(|_| (G1::rand(&mut rng), G1::rand(&mut rng).into())) - .collect(); - - let mut count = 0; - b.iter(|| { - let mut tmp = v[count].0; - tmp.add_assign_mixed(&v[count].1); - count = (count + 1) % SAMPLES; - tmp - }); - } - - #[bench] - fn bench_g1_double(b: &mut ::test::Bencher) { - const SAMPLES: usize = 1000; - - let mut rng = XorShiftRng::seed_from_u64(1231275789u64); - - let v: Vec<(G1, G1)> = (0..SAMPLES) - .map(|_| (G1::rand(&mut rng), G1::rand(&mut rng))) - .collect(); - - let mut count = 0; - b.iter(|| { - let mut tmp = v[count].0; - tmp.double_in_place(); - count = (count + 1) % SAMPLES; - tmp - }); - } -} - -mod g2 { - use algebra::UniformRand; - use rand::SeedableRng; - use rand_xorshift::XorShiftRng; - use std::ops::AddAssign; - - use algebra::{ - bls12_381::{Fr, G2Affine, G2Projective as G2}, - ProjectiveCurve, - }; - - #[bench] - fn bench_g2_rand(b: &mut ::test::Bencher) { - let mut rng = XorShiftRng::seed_from_u64(1231275789u64); - b.iter(|| G2::rand(&mut rng)); - } - - #[bench] - fn bench_g2_mul_assign(b: &mut ::test::Bencher) { - const SAMPLES: usize = 1000; - - let mut rng = XorShiftRng::seed_from_u64(1231275789u64); - - let v: Vec<(G2, Fr)> = (0..SAMPLES) - .map(|_| (G2::rand(&mut rng), Fr::rand(&mut rng))) - .collect(); - - let mut count = 0; - b.iter(|| { - let mut tmp = v[count].0; - tmp *= v[count].1; - count = (count + 1) % SAMPLES; - tmp - }); - } - - #[bench] - fn bench_g2_add_assign(b: &mut ::test::Bencher) { - const SAMPLES: usize = 1000; - - let mut rng = XorShiftRng::seed_from_u64(1231275789u64); - - let v: Vec<(G2, G2)> = (0..SAMPLES) - .map(|_| (G2::rand(&mut rng), G2::rand(&mut rng))) - .collect(); - - let mut count = 0; - b.iter(|| { - let mut tmp = v[count].0; - tmp.add_assign(&v[count].1); - count = (count + 1) % SAMPLES; - tmp - }); - } - - #[bench] - fn bench_g2_add_assign_mixed(b: &mut ::test::Bencher) { - const SAMPLES: usize = 1000; - - let mut rng = XorShiftRng::seed_from_u64(1231275789u64); - - let v: Vec<(G2, G2Affine)> = (0..SAMPLES) - .map(|_| (G2::rand(&mut rng), G2::rand(&mut rng).into())) - .collect(); - - let mut count = 0; - b.iter(|| { - let mut tmp = v[count].0; - tmp.add_assign_mixed(&v[count].1); - count = (count + 1) % SAMPLES; - tmp - }); - } - - #[bench] - fn bench_g2_double(b: &mut ::test::Bencher) { - const SAMPLES: usize = 1000; - - let mut rng = XorShiftRng::seed_from_u64(1231275789u64); - - let v: Vec<(G2, G2)> = (0..SAMPLES) - .map(|_| (G2::rand(&mut rng), G2::rand(&mut rng))) - .collect(); - - let mut count = 0; - b.iter(|| { - let mut tmp = v[count].0; - tmp.double_in_place(); - count = (count + 1) % SAMPLES; - tmp - }); - } -} diff --git a/algebra-benches/benches/bls12_381/fq.rs b/algebra-benches/benches/bls12_381/fq.rs deleted file mode 100644 index 16987d8a1..000000000 --- a/algebra-benches/benches/bls12_381/fq.rs +++ /dev/null @@ -1,290 +0,0 @@ -use algebra::UniformRand; -use rand::SeedableRng; -use rand_xorshift::XorShiftRng; -use std::ops::{AddAssign, MulAssign, SubAssign}; - -use algebra::{ - biginteger::BigInteger384 as FqRepr, bls12_381::fq::Fq, BigInteger, Field, PrimeField, - SquareRootField, -}; - -#[bench] -fn bench_fq_repr_add_nocarry(b: &mut ::test::Bencher) { - const SAMPLES: usize = 1000; - - let mut rng = XorShiftRng::seed_from_u64(1231275789u64); - - let v: Vec<(FqRepr, FqRepr)> = (0..SAMPLES) - .map(|_| { - let mut tmp1 = FqRepr::rand(&mut rng); - let mut tmp2 = FqRepr::rand(&mut rng); - // Shave a few bits off to avoid overflow. - for _ in 0..3 { - tmp1.div2(); - tmp2.div2(); - } - (tmp1, tmp2) - }) - .collect(); - - let mut count = 0; - b.iter(|| { - let mut tmp = v[count].0; - tmp.add_nocarry(&v[count].1); - count = (count + 1) % SAMPLES; - tmp - }); -} - -#[bench] -fn bench_fq_repr_sub_noborrow(b: &mut ::test::Bencher) { - const SAMPLES: usize = 1000; - - let mut rng = XorShiftRng::seed_from_u64(1231275789u64); - - let v: Vec<(FqRepr, FqRepr)> = (0..SAMPLES) - .map(|_| { - let tmp1 = FqRepr::rand(&mut rng); - let mut tmp2 = tmp1; - // Ensure tmp2 is smaller than tmp1. - for _ in 0..10 { - tmp2.div2(); - } - (tmp1, tmp2) - }) - .collect(); - - let mut count = 0; - b.iter(|| { - let mut tmp = v[count].0; - tmp.sub_noborrow(&v[count].1); - count = (count + 1) % SAMPLES; - tmp - }); -} - -#[bench] -fn bench_fq_repr_num_bits(b: &mut ::test::Bencher) { - const SAMPLES: usize = 1000; - - let mut rng = XorShiftRng::seed_from_u64(1231275789u64); - - let v: Vec = (0..SAMPLES).map(|_| FqRepr::rand(&mut rng)).collect(); - - let mut count = 0; - b.iter(|| { - let tmp = v[count].num_bits(); - count = (count + 1) % SAMPLES; - tmp - }); -} - -#[bench] -fn bench_fq_repr_mul2(b: &mut ::test::Bencher) { - const SAMPLES: usize = 1000; - - let mut rng = XorShiftRng::seed_from_u64(1231275789u64); - - let v: Vec = (0..SAMPLES).map(|_| FqRepr::rand(&mut rng)).collect(); - - let mut count = 0; - b.iter(|| { - let mut tmp = v[count]; - tmp.mul2(); - count = (count + 1) % SAMPLES; - tmp - }); -} - -#[bench] -fn bench_fq_repr_div2(b: &mut ::test::Bencher) { - const SAMPLES: usize = 1000; - - let mut rng = XorShiftRng::seed_from_u64(1231275789u64); - - let v: Vec = (0..SAMPLES).map(|_| FqRepr::rand(&mut rng)).collect(); - - let mut count = 0; - b.iter(|| { - let mut tmp = v[count]; - tmp.div2(); - count = (count + 1) % SAMPLES; - tmp - }); -} - -#[bench] -fn bench_fq_add_assign(b: &mut ::test::Bencher) { - const SAMPLES: usize = 1000; - - let mut rng = XorShiftRng::seed_from_u64(1231275789u64); - - let v: Vec<(Fq, Fq)> = (0..SAMPLES) - .map(|_| (Fq::rand(&mut rng), Fq::rand(&mut rng))) - .collect(); - - let mut count = 0; - b.iter(|| { - let mut tmp = v[count].0; - tmp.add_assign(&v[count].1); - count = (count + 1) % SAMPLES; - tmp - }); -} - -#[bench] -fn bench_fq_sub_assign(b: &mut ::test::Bencher) { - const SAMPLES: usize = 1000; - - let mut rng = XorShiftRng::seed_from_u64(1231275789u64); - - let v: Vec<(Fq, Fq)> = (0..SAMPLES) - .map(|_| (Fq::rand(&mut rng), Fq::rand(&mut rng))) - .collect(); - - let mut count = 0; - b.iter(|| { - let mut tmp = v[count].0; - tmp.sub_assign(&v[count].1); - count = (count + 1) % SAMPLES; - tmp - }); -} - -#[bench] -fn bench_fq_mul_assign(b: &mut ::test::Bencher) { - const SAMPLES: usize = 1000; - - let mut rng = XorShiftRng::seed_from_u64(1231275789u64); - - let v: Vec<(Fq, Fq)> = (0..SAMPLES) - .map(|_| (Fq::rand(&mut rng), Fq::rand(&mut rng))) - .collect(); - - let mut count = 0; - b.iter(|| { - let mut tmp = v[count].0; - tmp.mul_assign(&v[count].1); - count = (count + 1) % SAMPLES; - tmp - }); -} - -#[bench] -fn bench_fq_double(b: &mut ::test::Bencher) { - const SAMPLES: usize = 1000; - - let mut rng = XorShiftRng::seed_from_u64(1231275789u64); - - let v: Vec = (0..SAMPLES).map(|_| Fq::rand(&mut rng)).collect(); - - let mut count = 0; - b.iter(|| { - let mut tmp = v[count]; - tmp.double_in_place(); - count = (count + 1) % SAMPLES; - tmp - }); -} - -#[bench] -fn bench_fq_square(b: &mut ::test::Bencher) { - const SAMPLES: usize = 1000; - - let mut rng = XorShiftRng::seed_from_u64(1231275789u64); - - let v: Vec = (0..SAMPLES).map(|_| Fq::rand(&mut rng)).collect(); - - let mut count = 0; - b.iter(|| { - let mut tmp = v[count]; - tmp.square_in_place(); - count = (count + 1) % SAMPLES; - tmp - }); -} - -#[bench] -fn bench_fq_inverse(b: &mut ::test::Bencher) { - const SAMPLES: usize = 1000; - - let mut rng = XorShiftRng::seed_from_u64(1231275789u64); - - let v: Vec = (0..SAMPLES).map(|_| Fq::rand(&mut rng)).collect(); - - let mut count = 0; - b.iter(|| { - count = (count + 1) % SAMPLES; - v[count].inverse() - }); -} - -#[bench] -fn bench_fq_negate(b: &mut ::test::Bencher) { - const SAMPLES: usize = 1000; - - let mut rng = XorShiftRng::seed_from_u64(1231275789u64); - - let v: Vec = (0..SAMPLES).map(|_| Fq::rand(&mut rng)).collect(); - - let mut count = 0; - b.iter(|| { - let mut tmp = v[count]; - tmp = -tmp; - count = (count + 1) % SAMPLES; - tmp - }); -} - -#[bench] -fn bench_fq_sqrt(b: &mut ::test::Bencher) { - const SAMPLES: usize = 1000; - - let mut rng = XorShiftRng::seed_from_u64(1231275789u64); - - let v: Vec = (0..SAMPLES) - .map(|_| { - let mut tmp = Fq::rand(&mut rng); - tmp.square_in_place(); - tmp - }) - .collect(); - - let mut count = 0; - b.iter(|| { - count = (count + 1) % SAMPLES; - v[count].sqrt() - }); -} - -#[bench] -fn bench_fq_into_repr(b: &mut ::test::Bencher) { - const SAMPLES: usize = 1000; - - let mut rng = XorShiftRng::seed_from_u64(1231275789u64); - - let v: Vec = (0..SAMPLES).map(|_| Fq::rand(&mut rng)).collect(); - - let mut count = 0; - b.iter(|| { - count = (count + 1) % SAMPLES; - v[count].into_repr() - }); -} - -#[bench] -fn bench_fq_from_repr(b: &mut ::test::Bencher) { - const SAMPLES: usize = 1000; - - let mut rng = XorShiftRng::seed_from_u64(1231275789u64); - - let v: Vec = (0..SAMPLES) - .map(|_| Fq::rand(&mut rng).into_repr()) - .collect(); - - let mut count = 0; - b.iter(|| { - count = (count + 1) % SAMPLES; - Fq::from_repr(v[count]) - }); -} diff --git a/algebra-benches/benches/bls12_381/fq12.rs b/algebra-benches/benches/bls12_381/fq12.rs deleted file mode 100644 index c92acf8b3..000000000 --- a/algebra-benches/benches/bls12_381/fq12.rs +++ /dev/null @@ -1,112 +0,0 @@ -use rand::SeedableRng; -use rand_xorshift::XorShiftRng; -use std::ops::{AddAssign, MulAssign, SubAssign}; - -use algebra::{bls12_381::Fq12, Field, UniformRand}; - -#[bench] -fn bench_fq12_add_assign(b: &mut ::test::Bencher) { - const SAMPLES: usize = 1000; - - let mut rng = XorShiftRng::seed_from_u64(1231275789u64); - - let v: Vec<(Fq12, Fq12)> = (0..SAMPLES) - .map(|_| (Fq12::rand(&mut rng), Fq12::rand(&mut rng))) - .collect(); - - let mut count = 0; - b.iter(|| { - let mut tmp = v[count].0; - tmp.add_assign(&v[count].1); - count = (count + 1) % SAMPLES; - tmp - }); -} - -#[bench] -fn bench_fq12_sub_assign(b: &mut ::test::Bencher) { - const SAMPLES: usize = 1000; - - let mut rng = XorShiftRng::seed_from_u64(1231275789u64); - - let v: Vec<(Fq12, Fq12)> = (0..SAMPLES) - .map(|_| (Fq12::rand(&mut rng), Fq12::rand(&mut rng))) - .collect(); - - let mut count = 0; - b.iter(|| { - let mut tmp = v[count].0; - tmp.sub_assign(&v[count].1); - count = (count + 1) % SAMPLES; - tmp - }); -} - -#[bench] -fn bench_fq12_mul_assign(b: &mut ::test::Bencher) { - const SAMPLES: usize = 1000; - - let mut rng = XorShiftRng::seed_from_u64(1231275789u64); - - let v: Vec<(Fq12, Fq12)> = (0..SAMPLES) - .map(|_| (Fq12::rand(&mut rng), Fq12::rand(&mut rng))) - .collect(); - - let mut count = 0; - b.iter(|| { - let mut tmp = v[count].0; - tmp.mul_assign(&v[count].1); - count = (count + 1) % SAMPLES; - tmp - }); -} - -#[bench] -fn bench_fq12_double(b: &mut ::test::Bencher) { - const SAMPLES: usize = 1000; - - let mut rng = XorShiftRng::seed_from_u64(1231275789u64); - - let v: Vec = (0..SAMPLES).map(|_| Fq12::rand(&mut rng)).collect(); - - let mut count = 0; - b.iter(|| { - let mut tmp = v[count]; - tmp.double_in_place(); - count = (count + 1) % SAMPLES; - tmp - }); -} - -#[bench] -fn bench_fq12_square(b: &mut ::test::Bencher) { - const SAMPLES: usize = 1000; - - let mut rng = XorShiftRng::seed_from_u64(1231275789u64); - - let v: Vec = (0..SAMPLES).map(|_| Fq12::rand(&mut rng)).collect(); - - let mut count = 0; - b.iter(|| { - let mut tmp = v[count]; - tmp.square_in_place(); - count = (count + 1) % SAMPLES; - tmp - }); -} - -#[bench] -fn bench_fq12_inverse(b: &mut ::test::Bencher) { - const SAMPLES: usize = 1000; - - let mut rng = XorShiftRng::seed_from_u64(1231275789u64); - - let v: Vec = (0..SAMPLES).map(|_| Fq12::rand(&mut rng)).collect(); - - let mut count = 0; - b.iter(|| { - let tmp = v[count].inverse(); - count = (count + 1) % SAMPLES; - tmp - }); -} diff --git a/algebra-benches/benches/bls12_381/fq2.rs b/algebra-benches/benches/bls12_381/fq2.rs deleted file mode 100644 index 883a41714..000000000 --- a/algebra-benches/benches/bls12_381/fq2.rs +++ /dev/null @@ -1,129 +0,0 @@ -use algebra::UniformRand; -use rand::SeedableRng; -use rand_xorshift::XorShiftRng; -use std::ops::{AddAssign, MulAssign, SubAssign}; - -use algebra::{bls12_381::fq2::Fq2, Field, SquareRootField}; - -#[bench] -fn bench_fq2_add_assign(b: &mut ::test::Bencher) { - const SAMPLES: usize = 1000; - - let mut rng = XorShiftRng::seed_from_u64(1231275789u64); - - let v: Vec<(Fq2, Fq2)> = (0..SAMPLES) - .map(|_| (Fq2::rand(&mut rng), Fq2::rand(&mut rng))) - .collect(); - - let mut count = 0; - b.iter(|| { - let mut tmp = v[count].0; - tmp.add_assign(&v[count].1); - count = (count + 1) % SAMPLES; - tmp - }); -} - -#[bench] -fn bench_fq2_sub_assign(b: &mut ::test::Bencher) { - const SAMPLES: usize = 1000; - - let mut rng = XorShiftRng::seed_from_u64(1231275789u64); - - let v: Vec<(Fq2, Fq2)> = (0..SAMPLES) - .map(|_| (Fq2::rand(&mut rng), Fq2::rand(&mut rng))) - .collect(); - - let mut count = 0; - b.iter(|| { - let mut tmp = v[count].0; - tmp.sub_assign(&v[count].1); - count = (count + 1) % SAMPLES; - tmp - }); -} - -#[bench] -fn bench_fq2_mul_assign(b: &mut ::test::Bencher) { - const SAMPLES: usize = 1000; - - let mut rng = XorShiftRng::seed_from_u64(1231275789u64); - - let v: Vec<(Fq2, Fq2)> = (0..SAMPLES) - .map(|_| (Fq2::rand(&mut rng), Fq2::rand(&mut rng))) - .collect(); - - let mut count = 0; - b.iter(|| { - let mut tmp = v[count].0; - tmp.mul_assign(&v[count].1); - count = (count + 1) % SAMPLES; - tmp - }); -} - -#[bench] -fn bench_fq2_double(b: &mut ::test::Bencher) { - const SAMPLES: usize = 1000; - - let mut rng = XorShiftRng::seed_from_u64(1231275789u64); - - let v: Vec = (0..SAMPLES).map(|_| Fq2::rand(&mut rng)).collect(); - - let mut count = 0; - b.iter(|| { - let mut tmp = v[count]; - tmp.double_in_place(); - count = (count + 1) % SAMPLES; - tmp - }); -} - -#[bench] -fn bench_fq2_square(b: &mut ::test::Bencher) { - const SAMPLES: usize = 1000; - - let mut rng = XorShiftRng::seed_from_u64(1231275789u64); - - let v: Vec = (0..SAMPLES).map(|_| Fq2::rand(&mut rng)).collect(); - - let mut count = 0; - b.iter(|| { - let mut tmp = v[count]; - tmp.square_in_place(); - count = (count + 1) % SAMPLES; - tmp - }); -} - -#[bench] -fn bench_fq2_inverse(b: &mut ::test::Bencher) { - const SAMPLES: usize = 1000; - - let mut rng = XorShiftRng::seed_from_u64(1231275789u64); - - let v: Vec = (0..SAMPLES).map(|_| Fq2::rand(&mut rng)).collect(); - - let mut count = 0; - b.iter(|| { - let tmp = v[count].inverse(); - count = (count + 1) % SAMPLES; - tmp - }); -} - -#[bench] -fn bench_fq2_sqrt(b: &mut ::test::Bencher) { - const SAMPLES: usize = 1000; - - let mut rng = XorShiftRng::seed_from_u64(1231275789u64); - - let v: Vec = (0..SAMPLES).map(|_| Fq2::rand(&mut rng)).collect(); - - let mut count = 0; - b.iter(|| { - let tmp = v[count].sqrt(); - count = (count + 1) % SAMPLES; - tmp - }); -} diff --git a/algebra-benches/benches/bls12_381/fr.rs b/algebra-benches/benches/bls12_381/fr.rs deleted file mode 100644 index a5cc3f4f9..000000000 --- a/algebra-benches/benches/bls12_381/fr.rs +++ /dev/null @@ -1,290 +0,0 @@ -use algebra::UniformRand; -use rand::SeedableRng; -use rand_xorshift::XorShiftRng; -use std::ops::{AddAssign, MulAssign, SubAssign}; - -use algebra::{ - biginteger::BigInteger256 as FrRepr, bls12_381::fr::Fr, BigInteger, Field, PrimeField, - SquareRootField, -}; - -#[bench] -fn bench_fr_repr_add_nocarry(b: &mut ::test::Bencher) { - const SAMPLES: usize = 1000; - - let mut rng = XorShiftRng::seed_from_u64(1231275789u64); - - let v: Vec<(FrRepr, FrRepr)> = (0..SAMPLES) - .map(|_| { - let mut tmp1 = FrRepr::rand(&mut rng); - let mut tmp2 = FrRepr::rand(&mut rng); - // Shave a few bits off to avoid overflow. - for _ in 0..3 { - tmp1.div2(); - tmp2.div2(); - } - (tmp1, tmp2) - }) - .collect(); - - let mut count = 0; - b.iter(|| { - let mut tmp = v[count].0; - tmp.add_nocarry(&v[count].1); - count = (count + 1) % SAMPLES; - tmp - }); -} - -#[bench] -fn bench_fr_repr_sub_noborrow(b: &mut ::test::Bencher) { - const SAMPLES: usize = 1000; - - let mut rng = XorShiftRng::seed_from_u64(1231275789u64); - - let v: Vec<(FrRepr, FrRepr)> = (0..SAMPLES) - .map(|_| { - let tmp1 = FrRepr::rand(&mut rng); - let mut tmp2 = tmp1; - // Ensure tmp2 is smaller than tmp1. - for _ in 0..10 { - tmp2.div2(); - } - (tmp1, tmp2) - }) - .collect(); - - let mut count = 0; - b.iter(|| { - let mut tmp = v[count].0; - tmp.sub_noborrow(&v[count].1); - count = (count + 1) % SAMPLES; - tmp - }); -} - -#[bench] -fn bench_fr_repr_num_bits(b: &mut ::test::Bencher) { - const SAMPLES: usize = 1000; - - let mut rng = XorShiftRng::seed_from_u64(1231275789u64); - - let v: Vec = (0..SAMPLES).map(|_| FrRepr::rand(&mut rng)).collect(); - - let mut count = 0; - b.iter(|| { - let tmp = v[count].num_bits(); - count = (count + 1) % SAMPLES; - tmp - }); -} - -#[bench] -fn bench_fr_repr_mul2(b: &mut ::test::Bencher) { - const SAMPLES: usize = 1000; - - let mut rng = XorShiftRng::seed_from_u64(1231275789u64); - - let v: Vec = (0..SAMPLES).map(|_| FrRepr::rand(&mut rng)).collect(); - - let mut count = 0; - b.iter(|| { - let mut tmp = v[count]; - tmp.mul2(); - count = (count + 1) % SAMPLES; - tmp - }); -} - -#[bench] -fn bench_fr_repr_div2(b: &mut ::test::Bencher) { - const SAMPLES: usize = 1000; - - let mut rng = XorShiftRng::seed_from_u64(1231275789u64); - - let v: Vec = (0..SAMPLES).map(|_| FrRepr::rand(&mut rng)).collect(); - - let mut count = 0; - b.iter(|| { - let mut tmp = v[count]; - tmp.div2(); - count = (count + 1) % SAMPLES; - tmp - }); -} - -#[bench] -fn bench_fr_add_assign(b: &mut ::test::Bencher) { - const SAMPLES: usize = 1000; - - let mut rng = XorShiftRng::seed_from_u64(1231275789u64); - - let v: Vec<(Fr, Fr)> = (0..SAMPLES) - .map(|_| (Fr::rand(&mut rng), Fr::rand(&mut rng))) - .collect(); - - let mut count = 0; - b.iter(|| { - let mut tmp = v[count].0; - tmp.add_assign(&v[count].1); - count = (count + 1) % SAMPLES; - tmp - }); -} - -#[bench] -fn bench_fr_sub_assign(b: &mut ::test::Bencher) { - const SAMPLES: usize = 1000; - - let mut rng = XorShiftRng::seed_from_u64(1231275789u64); - - let v: Vec<(Fr, Fr)> = (0..SAMPLES) - .map(|_| (Fr::rand(&mut rng), Fr::rand(&mut rng))) - .collect(); - - let mut count = 0; - b.iter(|| { - let mut tmp = v[count].0; - tmp.sub_assign(&v[count].1); - count = (count + 1) % SAMPLES; - tmp - }); -} - -#[bench] -fn bench_fr_mul_assign(b: &mut ::test::Bencher) { - const SAMPLES: usize = 1000; - - let mut rng = XorShiftRng::seed_from_u64(1231275789u64); - - let v: Vec<(Fr, Fr)> = (0..SAMPLES) - .map(|_| (Fr::rand(&mut rng), Fr::rand(&mut rng))) - .collect(); - - let mut count = 0; - b.iter(|| { - let mut tmp = v[count].0; - tmp.mul_assign(&v[count].1); - count = (count + 1) % SAMPLES; - tmp - }); -} - -#[bench] -fn bench_fr_double(b: &mut ::test::Bencher) { - const SAMPLES: usize = 1000; - - let mut rng = XorShiftRng::seed_from_u64(1231275789u64); - - let v: Vec = (0..SAMPLES).map(|_| Fr::rand(&mut rng)).collect(); - - let mut count = 0; - b.iter(|| { - let mut tmp = v[count]; - tmp.double_in_place(); - count = (count + 1) % SAMPLES; - tmp - }); -} - -#[bench] -fn bench_fr_square(b: &mut ::test::Bencher) { - const SAMPLES: usize = 1000; - - let mut rng = XorShiftRng::seed_from_u64(1231275789u64); - - let v: Vec = (0..SAMPLES).map(|_| Fr::rand(&mut rng)).collect(); - - let mut count = 0; - b.iter(|| { - let mut tmp = v[count]; - tmp.square_in_place(); - count = (count + 1) % SAMPLES; - tmp - }); -} - -#[bench] -fn bench_fr_inverse(b: &mut ::test::Bencher) { - const SAMPLES: usize = 1000; - - let mut rng = XorShiftRng::seed_from_u64(1231275789u64); - - let v: Vec = (0..SAMPLES).map(|_| Fr::rand(&mut rng)).collect(); - - let mut count = 0; - b.iter(|| { - count = (count + 1) % SAMPLES; - v[count].inverse() - }); -} - -#[bench] -fn bench_fr_negate(b: &mut ::test::Bencher) { - const SAMPLES: usize = 1000; - - let mut rng = XorShiftRng::seed_from_u64(1231275789u64); - - let v: Vec = (0..SAMPLES).map(|_| Fr::rand(&mut rng)).collect(); - - let mut count = 0; - b.iter(|| { - let mut tmp = v[count]; - tmp = -tmp; - count = (count + 1) % SAMPLES; - tmp - }); -} - -#[bench] -fn bench_fr_sqrt(b: &mut ::test::Bencher) { - const SAMPLES: usize = 1000; - - let mut rng = XorShiftRng::seed_from_u64(1231275789u64); - - let v: Vec = (0..SAMPLES) - .map(|_| { - let mut tmp = Fr::rand(&mut rng); - tmp.square_in_place(); - tmp - }) - .collect(); - - let mut count = 0; - b.iter(|| { - count = (count + 1) % SAMPLES; - v[count].sqrt() - }); -} - -#[bench] -fn bench_fr_into_repr(b: &mut ::test::Bencher) { - const SAMPLES: usize = 1000; - - let mut rng = XorShiftRng::seed_from_u64(1231275789u64); - - let v: Vec = (0..SAMPLES).map(|_| Fr::rand(&mut rng)).collect(); - - let mut count = 0; - b.iter(|| { - count = (count + 1) % SAMPLES; - v[count].into_repr() - }); -} - -#[bench] -fn bench_fr_from_repr(b: &mut ::test::Bencher) { - const SAMPLES: usize = 1000; - - let mut rng = XorShiftRng::seed_from_u64(1231275789u64); - - let v: Vec = (0..SAMPLES) - .map(|_| Fr::rand(&mut rng).into_repr()) - .collect(); - - let mut count = 0; - b.iter(|| { - count = (count + 1) % SAMPLES; - Fr::from_repr(v[count]) - }); -} diff --git a/algebra-benches/benches/bls12_381/mod.rs b/algebra-benches/benches/bls12_381/mod.rs deleted file mode 100644 index ea18d0268..000000000 --- a/algebra-benches/benches/bls12_381/mod.rs +++ /dev/null @@ -1,6 +0,0 @@ -mod ec; -mod fq; -mod fq12; -mod fq2; -mod fr; -mod pairing; diff --git a/algebra-benches/benches/bls12_381/pairing.rs b/algebra-benches/benches/bls12_381/pairing.rs deleted file mode 100644 index 78aa7db27..000000000 --- a/algebra-benches/benches/bls12_381/pairing.rs +++ /dev/null @@ -1,78 +0,0 @@ -mod pairing { - use algebra::UniformRand; - use rand::SeedableRng; - use rand_xorshift::XorShiftRng; - - use algebra::{ - bls12::{G1Prepared, G2Prepared}, - bls12_381::{ - Bls12_381, Fq12, G1Affine, G1Projective as G1, G2Affine, G2Projective as G2, Parameters, - }, - PairingEngine, - }; - - #[bench] - fn bench_pairing_miller_loop(b: &mut ::test::Bencher) { - const SAMPLES: usize = 1000; - - let mut rng = XorShiftRng::seed_from_u64(1231275789u64); - - let v: Vec<(G1Prepared, G2Prepared)> = (0..SAMPLES) - .map(|_| { - ( - G1Affine::from(G1::rand(&mut rng)).into(), - G2Affine::from(G2::rand(&mut rng)).into(), - ) - }) - .collect(); - - let mut count = 0; - b.iter(|| { - let tmp = Bls12_381::miller_loop(&[(v[count].0.clone(), v[count].1.clone())]); - count = (count + 1) % SAMPLES; - tmp - }); - } - - #[bench] - fn bench_pairing_final_exponentiation(b: &mut ::test::Bencher) { - const SAMPLES: usize = 1000; - - let mut rng = XorShiftRng::seed_from_u64(1231275789u64); - - let v: Vec = (0..SAMPLES) - .map(|_| { - ( - G1Affine::from(G1::rand(&mut rng)).into(), - G2Affine::from(G2::rand(&mut rng)).into(), - ) - }) - .map(|(p, q)| Bls12_381::miller_loop(&[(p, q)])) - .collect(); - - let mut count = 0; - b.iter(|| { - let tmp = Bls12_381::final_exponentiation(&v[count]); - count = (count + 1) % SAMPLES; - tmp - }); - } - - #[bench] - fn bench_pairing_full(b: &mut ::test::Bencher) { - const SAMPLES: usize = 1000; - - let mut rng = XorShiftRng::seed_from_u64(1231275789u64); - - let v: Vec<(G1, G2)> = (0..SAMPLES) - .map(|_| (G1::rand(&mut rng), G2::rand(&mut rng))) - .collect(); - - let mut count = 0; - b.iter(|| { - let tmp = Bls12_381::pairing(v[count].0, v[count].1); - count = (count + 1) % SAMPLES; - tmp - }); - } -} diff --git a/algebra-benches/benches/curve_and_field_benches.rs b/algebra-benches/benches/curve_and_field_benches.rs deleted file mode 100644 index 044e3cec6..000000000 --- a/algebra-benches/benches/curve_and_field_benches.rs +++ /dev/null @@ -1,7 +0,0 @@ -#![feature(test)] - -extern crate test; - -mod bls12_377; -mod bls12_381; -mod sw6; diff --git a/algebra-benches/benches/sw6/ec.rs b/algebra-benches/benches/sw6/ec.rs deleted file mode 100644 index e9f747920..000000000 --- a/algebra-benches/benches/sw6/ec.rs +++ /dev/null @@ -1,183 +0,0 @@ -mod g1 { - use algebra::{ - sw6::{Fr, G1Affine, G1Projective as G1}, - ProjectiveCurve, UniformRand, - }; - use rand::SeedableRng; - use rand_xorshift::XorShiftRng; - use std::ops::AddAssign; - - #[bench] - fn bench_g1_rand(b: &mut ::test::Bencher) { - let mut rng = XorShiftRng::seed_from_u64(1231275789u64); - b.iter(|| G1::rand(&mut rng)); - } - - #[bench] - fn bench_g1_mul_assign(b: &mut ::test::Bencher) { - const SAMPLES: usize = 1000; - - let mut rng = XorShiftRng::seed_from_u64(1231275789u64); - - let v: Vec<(G1, Fr)> = (0..SAMPLES) - .map(|_| (G1::rand(&mut rng), Fr::rand(&mut rng))) - .collect(); - - let mut count = 0; - b.iter(|| { - let mut tmp = v[count].0; - tmp *= v[count].1; - count = (count + 1) % SAMPLES; - tmp - }); - } - - #[bench] - fn bench_g1_add_assign(b: &mut ::test::Bencher) { - const SAMPLES: usize = 1000; - - let mut rng = XorShiftRng::seed_from_u64(1231275789u64); - - let v: Vec<(G1, G1)> = (0..SAMPLES) - .map(|_| (G1::rand(&mut rng), G1::rand(&mut rng))) - .collect(); - - let mut count = 0; - b.iter(|| { - let mut tmp = v[count].0; - tmp.add_assign(&v[count].1); - count = (count + 1) % SAMPLES; - tmp - }); - } - - #[bench] - fn bench_g1_add_assign_mixed(b: &mut ::test::Bencher) { - const SAMPLES: usize = 1000; - - let mut rng = XorShiftRng::seed_from_u64(1231275789u64); - - let v: Vec<(G1, G1Affine)> = (0..SAMPLES) - .map(|_| (G1::rand(&mut rng), G1::rand(&mut rng).into())) - .collect(); - - let mut count = 0; - b.iter(|| { - let mut tmp = v[count].0; - tmp.add_assign_mixed(&v[count].1); - count = (count + 1) % SAMPLES; - tmp - }); - } - - #[bench] - fn bench_g1_double(b: &mut ::test::Bencher) { - const SAMPLES: usize = 1000; - - let mut rng = XorShiftRng::seed_from_u64(1231275789u64); - - let v: Vec<(G1, G1)> = (0..SAMPLES) - .map(|_| (G1::rand(&mut rng), G1::rand(&mut rng))) - .collect(); - - let mut count = 0; - b.iter(|| { - let mut tmp = v[count].0; - tmp.double_in_place(); - count = (count + 1) % SAMPLES; - tmp - }); - } -} - -mod g2 { - use algebra::{ - sw6::{Fr, G2Affine, G2Projective as G2}, - ProjectiveCurve, UniformRand, - }; - use rand::SeedableRng; - use rand_xorshift::XorShiftRng; - use std::ops::AddAssign; - - #[bench] - fn bench_g2_rand(b: &mut ::test::Bencher) { - let mut rng = XorShiftRng::seed_from_u64(1231275789u64); - b.iter(|| G2::rand(&mut rng)); - } - - #[bench] - fn bench_g2_mul_assign(b: &mut ::test::Bencher) { - const SAMPLES: usize = 1000; - - let mut rng = XorShiftRng::seed_from_u64(1231275789u64); - - let v: Vec<(G2, Fr)> = (0..SAMPLES) - .map(|_| (G2::rand(&mut rng), Fr::rand(&mut rng))) - .collect(); - - let mut count = 0; - b.iter(|| { - let mut tmp = v[count].0; - tmp *= v[count].1; - count = (count + 1) % SAMPLES; - tmp - }); - } - - #[bench] - fn bench_g2_add_assign(b: &mut ::test::Bencher) { - const SAMPLES: usize = 1000; - - let mut rng = XorShiftRng::seed_from_u64(1231275789u64); - - let v: Vec<(G2, G2)> = (0..SAMPLES) - .map(|_| (G2::rand(&mut rng), G2::rand(&mut rng))) - .collect(); - - let mut count = 0; - b.iter(|| { - let mut tmp = v[count].0; - tmp.add_assign(&v[count].1); - count = (count + 1) % SAMPLES; - tmp - }); - } - - #[bench] - fn bench_g2_add_assign_mixed(b: &mut ::test::Bencher) { - const SAMPLES: usize = 1000; - - let mut rng = XorShiftRng::seed_from_u64(1231275789u64); - - let v: Vec<(G2, G2Affine)> = (0..SAMPLES) - .map(|_| (G2::rand(&mut rng), G2::rand(&mut rng).into())) - .collect(); - - let mut count = 0; - b.iter(|| { - let mut tmp = v[count].0; - tmp.add_assign_mixed(&v[count].1); - count = (count + 1) % SAMPLES; - tmp - }); - } - - #[bench] - fn bench_g2_double(b: &mut ::test::Bencher) { - const SAMPLES: usize = 1000; - - let mut rng = XorShiftRng::seed_from_u64(1231275789u64); - - let v: Vec<(G2, G2)> = (0..SAMPLES) - .map(|_| (G2::rand(&mut rng), G2::rand(&mut rng))) - .collect(); - - let mut count = 0; - b.iter(|| { - let mut tmp = v[count].0; - tmp.double_in_place(); - count = (count + 1) % SAMPLES; - tmp - }); - } -} diff --git a/algebra-benches/benches/sw6/fq.rs b/algebra-benches/benches/sw6/fq.rs deleted file mode 100644 index 093d62c72..000000000 --- a/algebra-benches/benches/sw6/fq.rs +++ /dev/null @@ -1,290 +0,0 @@ -use algebra::UniformRand; -use rand::SeedableRng; -use rand_xorshift::XorShiftRng; - -use algebra::{ - biginteger::BigInteger832 as FqRepr, sw6::fq::Fq, BigInteger, Field, PrimeField, - SquareRootField, -}; -use std::ops::{AddAssign, MulAssign, SubAssign}; - -#[bench] -fn bench_fq_repr_add_nocarry(b: &mut ::test::Bencher) { - const SAMPLES: usize = 1000; - - let mut rng = XorShiftRng::seed_from_u64(1231275789u64); - - let v: Vec<(FqRepr, FqRepr)> = (0..SAMPLES) - .map(|_| { - let mut tmp1 = FqRepr::rand(&mut rng); - let mut tmp2 = FqRepr::rand(&mut rng); - // Shave a few bits off to avoid overflow. - for _ in 0..3 { - tmp1.div2(); - tmp2.div2(); - } - (tmp1, tmp2) - }) - .collect(); - - let mut count = 0; - b.iter(|| { - let mut tmp = v[count].0; - tmp.add_nocarry(&v[count].1); - count = (count + 1) % SAMPLES; - tmp - }); -} - -#[bench] -fn bench_fq_repr_sub_noborrow(b: &mut ::test::Bencher) { - const SAMPLES: usize = 1000; - - let mut rng = XorShiftRng::seed_from_u64(1231275789u64); - - let v: Vec<(FqRepr, FqRepr)> = (0..SAMPLES) - .map(|_| { - let tmp1 = FqRepr::rand(&mut rng); - let mut tmp2 = tmp1; - // Ensure tmp2 is smaller than tmp1. - for _ in 0..10 { - tmp2.div2(); - } - (tmp1, tmp2) - }) - .collect(); - - let mut count = 0; - b.iter(|| { - let mut tmp = v[count].0; - tmp.sub_noborrow(&v[count].1); - count = (count + 1) % SAMPLES; - tmp - }); -} - -#[bench] -fn bench_fq_repr_num_bits(b: &mut ::test::Bencher) { - const SAMPLES: usize = 1000; - - let mut rng = XorShiftRng::seed_from_u64(1231275789u64); - - let v: Vec = (0..SAMPLES).map(|_| FqRepr::rand(&mut rng)).collect(); - - let mut count = 0; - b.iter(|| { - let tmp = v[count].num_bits(); - count = (count + 1) % SAMPLES; - tmp - }); -} - -#[bench] -fn bench_fq_repr_mul2(b: &mut ::test::Bencher) { - const SAMPLES: usize = 1000; - - let mut rng = XorShiftRng::seed_from_u64(1231275789u64); - - let v: Vec = (0..SAMPLES).map(|_| FqRepr::rand(&mut rng)).collect(); - - let mut count = 0; - b.iter(|| { - let mut tmp = v[count]; - tmp.mul2(); - count = (count + 1) % SAMPLES; - tmp - }); -} - -#[bench] -fn bench_fq_repr_div2(b: &mut ::test::Bencher) { - const SAMPLES: usize = 1000; - - let mut rng = XorShiftRng::seed_from_u64(1231275789u64); - - let v: Vec = (0..SAMPLES).map(|_| FqRepr::rand(&mut rng)).collect(); - - let mut count = 0; - b.iter(|| { - let mut tmp = v[count]; - tmp.div2(); - count = (count + 1) % SAMPLES; - tmp - }); -} - -#[bench] -fn bench_fq_add_assign(b: &mut ::test::Bencher) { - const SAMPLES: usize = 1000; - - let mut rng = XorShiftRng::seed_from_u64(1231275789u64); - - let v: Vec<(Fq, Fq)> = (0..SAMPLES) - .map(|_| (Fq::rand(&mut rng), Fq::rand(&mut rng))) - .collect(); - - let mut count = 0; - b.iter(|| { - let mut tmp = v[count].0; - tmp.add_assign(&v[count].1); - count = (count + 1) % SAMPLES; - tmp - }); -} - -#[bench] -fn bench_fq_sub_assign(b: &mut ::test::Bencher) { - const SAMPLES: usize = 1000; - - let mut rng = XorShiftRng::seed_from_u64(1231275789u64); - - let v: Vec<(Fq, Fq)> = (0..SAMPLES) - .map(|_| (Fq::rand(&mut rng), Fq::rand(&mut rng))) - .collect(); - - let mut count = 0; - b.iter(|| { - let mut tmp = v[count].0; - tmp.sub_assign(&v[count].1); - count = (count + 1) % SAMPLES; - tmp - }); -} - -#[bench] -fn bench_fq_mul_assign(b: &mut ::test::Bencher) { - const SAMPLES: usize = 1000; - - let mut rng = XorShiftRng::seed_from_u64(1231275789u64); - - let v: Vec<(Fq, Fq)> = (0..SAMPLES) - .map(|_| (Fq::rand(&mut rng), Fq::rand(&mut rng))) - .collect(); - - let mut count = 0; - b.iter(|| { - let mut tmp = v[count].0; - tmp.mul_assign(&v[count].1); - count = (count + 1) % SAMPLES; - tmp - }); -} - -#[bench] -fn bench_fq_double(b: &mut ::test::Bencher) { - const SAMPLES: usize = 1000; - - let mut rng = XorShiftRng::seed_from_u64(1231275789u64); - - let v: Vec = (0..SAMPLES).map(|_| Fq::rand(&mut rng)).collect(); - - let mut count = 0; - b.iter(|| { - let mut tmp = v[count]; - tmp.double_in_place(); - count = (count + 1) % SAMPLES; - tmp - }); -} - -#[bench] -fn bench_fq_square(b: &mut ::test::Bencher) { - const SAMPLES: usize = 1000; - - let mut rng = XorShiftRng::seed_from_u64(1231275789u64); - - let v: Vec = (0..SAMPLES).map(|_| Fq::rand(&mut rng)).collect(); - - let mut count = 0; - b.iter(|| { - let mut tmp = v[count]; - tmp.square_in_place(); - count = (count + 1) % SAMPLES; - tmp - }); -} - -#[bench] -fn bench_fq_inverse(b: &mut ::test::Bencher) { - const SAMPLES: usize = 1000; - - let mut rng = XorShiftRng::seed_from_u64(1231275789u64); - - let v: Vec = (0..SAMPLES).map(|_| Fq::rand(&mut rng)).collect(); - - let mut count = 0; - b.iter(|| { - count = (count + 1) % SAMPLES; - v[count].inverse() - }); -} - -#[bench] -fn bench_fq_negate(b: &mut ::test::Bencher) { - const SAMPLES: usize = 1000; - - let mut rng = XorShiftRng::seed_from_u64(1231275789u64); - - let v: Vec = (0..SAMPLES).map(|_| Fq::rand(&mut rng)).collect(); - - let mut count = 0; - b.iter(|| { - let mut tmp = v[count]; - tmp = -tmp; - count = (count + 1) % SAMPLES; - tmp - }); -} - -#[bench] -fn bench_fq_sqrt(b: &mut ::test::Bencher) { - const SAMPLES: usize = 1000; - - let mut rng = XorShiftRng::seed_from_u64(1231275789u64); - - let v: Vec = (0..SAMPLES) - .map(|_| { - let mut tmp = Fq::rand(&mut rng); - tmp.square_in_place(); - tmp - }) - .collect(); - - let mut count = 0; - b.iter(|| { - count = (count + 1) % SAMPLES; - v[count].sqrt() - }); -} - -#[bench] -fn bench_fq_into_repr(b: &mut ::test::Bencher) { - const SAMPLES: usize = 1000; - - let mut rng = XorShiftRng::seed_from_u64(1231275789u64); - - let v: Vec = (0..SAMPLES).map(|_| Fq::rand(&mut rng)).collect(); - - let mut count = 0; - b.iter(|| { - count = (count + 1) % SAMPLES; - v[count].into_repr() - }); -} - -#[bench] -fn bench_fq_from_repr(b: &mut ::test::Bencher) { - const SAMPLES: usize = 1000; - - let mut rng = XorShiftRng::seed_from_u64(1231275789u64); - - let v: Vec = (0..SAMPLES) - .map(|_| Fq::rand(&mut rng).into_repr()) - .collect(); - - let mut count = 0; - b.iter(|| { - count = (count + 1) % SAMPLES; - Fq::from_repr(v[count]) - }); -} diff --git a/algebra-benches/benches/sw6/fq3.rs b/algebra-benches/benches/sw6/fq3.rs deleted file mode 100644 index 624a3f485..000000000 --- a/algebra-benches/benches/sw6/fq3.rs +++ /dev/null @@ -1,129 +0,0 @@ -use algebra::UniformRand; -use rand::SeedableRng; -use rand_xorshift::XorShiftRng; - -use algebra::{sw6::fq3::Fq3, Field, SquareRootField}; -use std::ops::{AddAssign, MulAssign, SubAssign}; - -#[bench] -fn bench_fq3_add_assign(b: &mut ::test::Bencher) { - const SAMPLES: usize = 1000; - - let mut rng = XorShiftRng::seed_from_u64(1231275789u64); - - let v: Vec<(Fq3, Fq3)> = (0..SAMPLES) - .map(|_| (Fq3::rand(&mut rng), Fq3::rand(&mut rng))) - .collect(); - - let mut count = 0; - b.iter(|| { - let mut tmp = v[count].0; - tmp.add_assign(&v[count].1); - count = (count + 1) % SAMPLES; - tmp - }); -} - -#[bench] -fn bench_fq3_sub_assign(b: &mut ::test::Bencher) { - const SAMPLES: usize = 1000; - - let mut rng = XorShiftRng::seed_from_u64(1231275789u64); - - let v: Vec<(Fq3, Fq3)> = (0..SAMPLES) - .map(|_| (Fq3::rand(&mut rng), Fq3::rand(&mut rng))) - .collect(); - - let mut count = 0; - b.iter(|| { - let mut tmp = v[count].0; - tmp.sub_assign(&v[count].1); - count = (count + 1) % SAMPLES; - tmp - }); -} - -#[bench] -fn bench_fq3_mul_assign(b: &mut ::test::Bencher) { - const SAMPLES: usize = 1000; - - let mut rng = XorShiftRng::seed_from_u64(1231275789u64); - - let v: Vec<(Fq3, Fq3)> = (0..SAMPLES) - .map(|_| (Fq3::rand(&mut rng), Fq3::rand(&mut rng))) - .collect(); - - let mut count = 0; - b.iter(|| { - let mut tmp = v[count].0; - tmp.mul_assign(&v[count].1); - count = (count + 1) % SAMPLES; - tmp - }); -} - -#[bench] -fn bench_fq3_double(b: &mut ::test::Bencher) { - const SAMPLES: usize = 1000; - - let mut rng = XorShiftRng::seed_from_u64(1231275789u64); - - let v: Vec = (0..SAMPLES).map(|_| Fq3::rand(&mut rng)).collect(); - - let mut count = 0; - b.iter(|| { - let mut tmp = v[count]; - tmp.double_in_place(); - count = (count + 1) % SAMPLES; - tmp - }); -} - -#[bench] -fn bench_fq3_square(b: &mut ::test::Bencher) { - const SAMPLES: usize = 1000; - - let mut rng = XorShiftRng::seed_from_u64(1231275789u64); - - let v: Vec = (0..SAMPLES).map(|_| Fq3::rand(&mut rng)).collect(); - - let mut count = 0; - b.iter(|| { - let mut tmp = v[count]; - tmp.square_in_place(); - count = (count + 1) % SAMPLES; - tmp - }); -} - -#[bench] -fn bench_fq3_inverse(b: &mut ::test::Bencher) { - const SAMPLES: usize = 1000; - - let mut rng = XorShiftRng::seed_from_u64(1231275789u64); - - let v: Vec = (0..SAMPLES).map(|_| Fq3::rand(&mut rng)).collect(); - - let mut count = 0; - b.iter(|| { - let tmp = v[count].inverse(); - count = (count + 1) % SAMPLES; - tmp - }); -} - -#[bench] -fn bench_fq3_sqrt(b: &mut ::test::Bencher) { - const SAMPLES: usize = 1000; - - let mut rng = XorShiftRng::seed_from_u64(1231275789u64); - - let v: Vec = (0..SAMPLES).map(|_| Fq3::rand(&mut rng)).collect(); - - let mut count = 0; - b.iter(|| { - let tmp = v[count].sqrt(); - count = (count + 1) % SAMPLES; - tmp - }); -} diff --git a/algebra-benches/benches/sw6/fq6.rs b/algebra-benches/benches/sw6/fq6.rs deleted file mode 100644 index 46376069a..000000000 --- a/algebra-benches/benches/sw6/fq6.rs +++ /dev/null @@ -1,113 +0,0 @@ -use algebra::UniformRand; -use rand::SeedableRng; -use rand_xorshift::XorShiftRng; - -use algebra::{sw6::Fq6, Field}; -use std::ops::{AddAssign, MulAssign, SubAssign}; - -#[bench] -fn bench_fq6_add_assign(b: &mut ::test::Bencher) { - const SAMPLES: usize = 1000; - - let mut rng = XorShiftRng::seed_from_u64(1231275789u64); - - let v: Vec<(Fq6, Fq6)> = (0..SAMPLES) - .map(|_| (Fq6::rand(&mut rng), Fq6::rand(&mut rng))) - .collect(); - - let mut count = 0; - b.iter(|| { - let mut tmp = v[count].0; - tmp.add_assign(&v[count].1); - count = (count + 1) % SAMPLES; - tmp - }); -} - -#[bench] -fn bench_fq6_sub_assign(b: &mut ::test::Bencher) { - const SAMPLES: usize = 1000; - - let mut rng = XorShiftRng::seed_from_u64(1231275789u64); - - let v: Vec<(Fq6, Fq6)> = (0..SAMPLES) - .map(|_| (Fq6::rand(&mut rng), Fq6::rand(&mut rng))) - .collect(); - - let mut count = 0; - b.iter(|| { - let mut tmp = v[count].0; - tmp.sub_assign(&v[count].1); - count = (count + 1) % SAMPLES; - tmp - }); -} - -#[bench] -fn bench_fq6_mul_assign(b: &mut ::test::Bencher) { - const SAMPLES: usize = 1000; - - let mut rng = XorShiftRng::seed_from_u64(1231275789u64); - - let v: Vec<(Fq6, Fq6)> = (0..SAMPLES) - .map(|_| (Fq6::rand(&mut rng), Fq6::rand(&mut rng))) - .collect(); - - let mut count = 0; - b.iter(|| { - let mut tmp = v[count].0; - tmp.mul_assign(&v[count].1); - count = (count + 1) % SAMPLES; - tmp - }); -} - -#[bench] -fn bench_fq6_double(b: &mut ::test::Bencher) { - const SAMPLES: usize = 1000; - - let mut rng = XorShiftRng::seed_from_u64(1231275789u64); - - let v: Vec = (0..SAMPLES).map(|_| Fq6::rand(&mut rng)).collect(); - - let mut count = 0; - b.iter(|| { - let mut tmp = v[count]; - tmp.double_in_place(); - count = (count + 1) % SAMPLES; - tmp - }); -} - -#[bench] -fn bench_fq6_square(b: &mut ::test::Bencher) { - const SAMPLES: usize = 1000; - - let mut rng = XorShiftRng::seed_from_u64(1231275789u64); - - let v: Vec = (0..SAMPLES).map(|_| Fq6::rand(&mut rng)).collect(); - - let mut count = 0; - b.iter(|| { - let mut tmp = v[count]; - tmp.square_in_place(); - count = (count + 1) % SAMPLES; - tmp - }); -} - -#[bench] -fn bench_fq6_inverse(b: &mut ::test::Bencher) { - const SAMPLES: usize = 1000; - - let mut rng = XorShiftRng::seed_from_u64(1231275789u64); - - let v: Vec = (0..SAMPLES).map(|_| Fq6::rand(&mut rng)).collect(); - - let mut count = 0; - b.iter(|| { - let tmp = v[count].inverse(); - count = (count + 1) % SAMPLES; - tmp - }); -} diff --git a/algebra-benches/benches/sw6/fr.rs b/algebra-benches/benches/sw6/fr.rs deleted file mode 100644 index caaff5d81..000000000 --- a/algebra-benches/benches/sw6/fr.rs +++ /dev/null @@ -1,291 +0,0 @@ -use algebra::UniformRand; -use rand::SeedableRng; -use rand_xorshift::XorShiftRng; - -use algebra::{ - biginteger::{BigInteger, BigInteger384 as FrRepr}, - sw6::Fr, - Field, PrimeField, SquareRootField, -}; -use std::ops::{AddAssign, MulAssign, SubAssign}; - -#[bench] -fn bench_fr_repr_add_nocarry(b: &mut ::test::Bencher) { - const SAMPLES: usize = 1000; - - let mut rng = XorShiftRng::seed_from_u64(1231275789u64); - - let v: Vec<(FrRepr, FrRepr)> = (0..SAMPLES) - .map(|_| { - let mut tmp1 = FrRepr::rand(&mut rng); - let mut tmp2 = FrRepr::rand(&mut rng); - // Shave a few bits off to avoid overflow. - for _ in 0..3 { - tmp1.div2(); - tmp2.div2(); - } - (tmp1, tmp2) - }) - .collect(); - - let mut count = 0; - b.iter(|| { - let mut tmp = v[count].0; - tmp.add_nocarry(&v[count].1); - count = (count + 1) % SAMPLES; - tmp - }); -} - -#[bench] -fn bench_fr_repr_sub_noborrow(b: &mut ::test::Bencher) { - const SAMPLES: usize = 1000; - - let mut rng = XorShiftRng::seed_from_u64(1231275789u64); - - let v: Vec<(FrRepr, FrRepr)> = (0..SAMPLES) - .map(|_| { - let tmp1 = FrRepr::rand(&mut rng); - let mut tmp2 = tmp1; - // Ensure tmp2 is smaller than tmp1. - for _ in 0..10 { - tmp2.div2(); - } - (tmp1, tmp2) - }) - .collect(); - - let mut count = 0; - b.iter(|| { - let mut tmp = v[count].0; - tmp.sub_noborrow(&v[count].1); - count = (count + 1) % SAMPLES; - tmp - }); -} - -#[bench] -fn bench_fr_repr_num_bits(b: &mut ::test::Bencher) { - const SAMPLES: usize = 1000; - - let mut rng = XorShiftRng::seed_from_u64(1231275789u64); - - let v: Vec = (0..SAMPLES).map(|_| FrRepr::rand(&mut rng)).collect(); - - let mut count = 0; - b.iter(|| { - let tmp = v[count].num_bits(); - count = (count + 1) % SAMPLES; - tmp - }); -} - -#[bench] -fn bench_fr_repr_mul2(b: &mut ::test::Bencher) { - const SAMPLES: usize = 1000; - - let mut rng = XorShiftRng::seed_from_u64(1231275789u64); - - let v: Vec = (0..SAMPLES).map(|_| FrRepr::rand(&mut rng)).collect(); - - let mut count = 0; - b.iter(|| { - let mut tmp = v[count]; - tmp.mul2(); - count = (count + 1) % SAMPLES; - tmp - }); -} - -#[bench] -fn bench_fr_repr_div2(b: &mut ::test::Bencher) { - const SAMPLES: usize = 1000; - - let mut rng = XorShiftRng::seed_from_u64(1231275789u64); - - let v: Vec = (0..SAMPLES).map(|_| FrRepr::rand(&mut rng)).collect(); - - let mut count = 0; - b.iter(|| { - let mut tmp = v[count]; - tmp.div2(); - count = (count + 1) % SAMPLES; - tmp - }); -} - -#[bench] -fn bench_fr_add_assign(b: &mut ::test::Bencher) { - const SAMPLES: usize = 1000; - - let mut rng = XorShiftRng::seed_from_u64(1231275789u64); - - let v: Vec<(Fr, Fr)> = (0..SAMPLES) - .map(|_| (Fr::rand(&mut rng), Fr::rand(&mut rng))) - .collect(); - - let mut count = 0; - b.iter(|| { - let mut tmp = v[count].0; - tmp.add_assign(&v[count].1); - count = (count + 1) % SAMPLES; - tmp - }); -} - -#[bench] -fn bench_fr_sub_assign(b: &mut ::test::Bencher) { - const SAMPLES: usize = 1000; - - let mut rng = XorShiftRng::seed_from_u64(1231275789u64); - - let v: Vec<(Fr, Fr)> = (0..SAMPLES) - .map(|_| (Fr::rand(&mut rng), Fr::rand(&mut rng))) - .collect(); - - let mut count = 0; - b.iter(|| { - let mut tmp = v[count].0; - tmp.sub_assign(&v[count].1); - count = (count + 1) % SAMPLES; - tmp - }); -} - -#[bench] -fn bench_fr_mul_assign(b: &mut ::test::Bencher) { - const SAMPLES: usize = 1000; - - let mut rng = XorShiftRng::seed_from_u64(1231275789u64); - - let v: Vec<(Fr, Fr)> = (0..SAMPLES) - .map(|_| (Fr::rand(&mut rng), Fr::rand(&mut rng))) - .collect(); - - let mut count = 0; - b.iter(|| { - let mut tmp = v[count].0; - tmp.mul_assign(&v[count].1); - count = (count + 1) % SAMPLES; - tmp - }); -} - -#[bench] -fn bench_fr_double(b: &mut ::test::Bencher) { - const SAMPLES: usize = 1000; - - let mut rng = XorShiftRng::seed_from_u64(1231275789u64); - - let v: Vec = (0..SAMPLES).map(|_| Fr::rand(&mut rng)).collect(); - - let mut count = 0; - b.iter(|| { - let mut tmp = v[count]; - tmp.double_in_place(); - count = (count + 1) % SAMPLES; - tmp - }); -} - -#[bench] -fn bench_fr_square(b: &mut ::test::Bencher) { - const SAMPLES: usize = 1000; - - let mut rng = XorShiftRng::seed_from_u64(1231275789u64); - - let v: Vec = (0..SAMPLES).map(|_| Fr::rand(&mut rng)).collect(); - - let mut count = 0; - b.iter(|| { - let mut tmp = v[count]; - tmp.square_in_place(); - count = (count + 1) % SAMPLES; - tmp - }); -} - -#[bench] -fn bench_fr_inverse(b: &mut ::test::Bencher) { - const SAMPLES: usize = 1000; - - let mut rng = XorShiftRng::seed_from_u64(1231275789u64); - - let v: Vec = (0..SAMPLES).map(|_| Fr::rand(&mut rng)).collect(); - - let mut count = 0; - b.iter(|| { - count = (count + 1) % SAMPLES; - v[count].inverse() - }); -} - -#[bench] -fn bench_fr_negate(b: &mut ::test::Bencher) { - const SAMPLES: usize = 1000; - - let mut rng = XorShiftRng::seed_from_u64(1231275789u64); - - let v: Vec = (0..SAMPLES).map(|_| Fr::rand(&mut rng)).collect(); - - let mut count = 0; - b.iter(|| { - let mut tmp = v[count]; - tmp = -tmp; - count = (count + 1) % SAMPLES; - tmp - }); -} - -#[bench] -fn bench_fr_sqrt(b: &mut ::test::Bencher) { - const SAMPLES: usize = 1000; - - let mut rng = XorShiftRng::seed_from_u64(1231275789u64); - - let v: Vec = (0..SAMPLES) - .map(|_| { - let mut tmp = Fr::rand(&mut rng); - tmp.square_in_place(); - tmp - }) - .collect(); - - let mut count = 0; - b.iter(|| { - count = (count + 1) % SAMPLES; - v[count].sqrt() - }); -} - -#[bench] -fn bench_fr_into_repr(b: &mut ::test::Bencher) { - const SAMPLES: usize = 1000; - - let mut rng = XorShiftRng::seed_from_u64(1231275789u64); - - let v: Vec = (0..SAMPLES).map(|_| Fr::rand(&mut rng)).collect(); - - let mut count = 0; - b.iter(|| { - count = (count + 1) % SAMPLES; - v[count].into_repr() - }); -} - -#[bench] -fn bench_fr_from_repr(b: &mut ::test::Bencher) { - const SAMPLES: usize = 1000; - - let mut rng = XorShiftRng::seed_from_u64(1231275789u64); - - let v: Vec = (0..SAMPLES) - .map(|_| Fr::rand(&mut rng).into_repr()) - .collect(); - - let mut count = 0; - b.iter(|| { - count = (count + 1) % SAMPLES; - Fr::from_repr(v[count]) - }); -} diff --git a/algebra-benches/benches/sw6/mod.rs b/algebra-benches/benches/sw6/mod.rs deleted file mode 100644 index 66eed9a18..000000000 --- a/algebra-benches/benches/sw6/mod.rs +++ /dev/null @@ -1,6 +0,0 @@ -mod ec; -mod fq; -mod fq3; -mod fq6; -mod fr; -mod pairing; diff --git a/algebra-benches/benches/sw6/pairing.rs b/algebra-benches/benches/sw6/pairing.rs deleted file mode 100644 index 76bedb218..000000000 --- a/algebra-benches/benches/sw6/pairing.rs +++ /dev/null @@ -1,73 +0,0 @@ -mod pairing { - use algebra::{ - sw6::{Fq6, G1Affine, G1Projective as G1, G2Affine, G2Projective as G2, SW6}, - PairingEngine, UniformRand, - }; - use rand::SeedableRng; - use rand_xorshift::XorShiftRng; - - #[bench] - fn bench_pairing_miller_loop(b: &mut ::test::Bencher) { - const SAMPLES: usize = 1000; - - let mut rng = XorShiftRng::seed_from_u64(1231275789u64); - - let v: Vec<(G1Affine, G2Affine)> = (0..SAMPLES) - .map(|_| { - ( - G1Affine::from(G1::rand(&mut rng)).into(), - G2Affine::from(G2::rand(&mut rng)).into(), - ) - }) - .collect(); - - let mut count = 0; - b.iter(|| { - let tmp = SW6::miller_loop(&[(v[count].0.clone(), v[count].1.clone())]); - count = (count + 1) % SAMPLES; - tmp - }); - } - - #[bench] - fn bench_pairing_final_exponentiation(b: &mut ::test::Bencher) { - const SAMPLES: usize = 1000; - - let mut rng = XorShiftRng::seed_from_u64(1231275789u64); - - let v: Vec = (0..SAMPLES) - .map(|_| { - ( - G1Affine::from(G1::rand(&mut rng)).into(), - G2Affine::from(G2::rand(&mut rng)).into(), - ) - }) - .map(|(p, q)| SW6::miller_loop(&[(p, q)])) - .collect(); - - let mut count = 0; - b.iter(|| { - let tmp = SW6::final_exponentiation(&v[count]); - count = (count + 1) % SAMPLES; - tmp - }); - } - - #[bench] - fn bench_pairing_full(b: &mut ::test::Bencher) { - const SAMPLES: usize = 1000; - - let mut rng = XorShiftRng::seed_from_u64(1231275789u64); - - let v: Vec<(G1, G2)> = (0..SAMPLES) - .map(|_| (G1::rand(&mut rng), G2::rand(&mut rng))) - .collect(); - - let mut count = 0; - b.iter(|| { - let tmp = SW6::pairing(v[count].0, v[count].1); - count = (count + 1) % SAMPLES; - tmp - }); - } -} diff --git a/algebra-benches/build.rs b/algebra-benches/build.rs new file mode 100644 index 000000000..2a906f2b2 --- /dev/null +++ b/algebra-benches/build.rs @@ -0,0 +1,9 @@ +extern crate rustc_version; + +use rustc_version::{version_meta, Channel}; + +fn main() { + if version_meta().channel == Channel::Nightly { + println!("cargo:rustc-cfg=nightly"); + } +} diff --git a/algebra-benches/src/curves/bls12_377.rs b/algebra-benches/src/curves/bls12_377.rs new file mode 100644 index 000000000..25887556e --- /dev/null +++ b/algebra-benches/src/curves/bls12_377.rs @@ -0,0 +1,20 @@ +use rand::SeedableRng; +use rand_xorshift::XorShiftRng; +use std::ops::{AddAssign, MulAssign, SubAssign}; + +use algebra::{ + biginteger::{BigInteger256 as FrRepr, BigInteger384 as FqRepr}, + bls12::{G1Prepared, G2Prepared}, + bls12_377::{ + fq::Fq, fq2::Fq2, fr::Fr, Bls12_377, Fq12, G1Affine, G1Projective as G1, G2Affine, + G2Projective as G2, Parameters, + }, + BigInteger, Field, PairingEngine, PrimeField, ProjectiveCurve, SquareRootField, UniformRand, +}; + +ec_bench!(); +f_bench!(1, Fq2, Fq2, fq2); +f_bench!(2, Fq12, Fq12, fq12); +f_bench!(Fq, Fq, FqRepr, FqRepr, fq); +f_bench!(Fr, Fr, FrRepr, FrRepr, fr); +pairing_bench!(Bls12_377, Fq12, prepared_v); diff --git a/algebra-benches/src/curves/bls12_381.rs b/algebra-benches/src/curves/bls12_381.rs new file mode 100644 index 000000000..eb44ff0b8 --- /dev/null +++ b/algebra-benches/src/curves/bls12_381.rs @@ -0,0 +1,20 @@ +use rand::SeedableRng; +use rand_xorshift::XorShiftRng; +use std::ops::{AddAssign, MulAssign, SubAssign}; + +use algebra::{ + biginteger::{BigInteger256 as FrRepr, BigInteger384 as FqRepr}, + bls12::{G1Prepared, G2Prepared}, + bls12_381::{ + fq::Fq, fq2::Fq2, fr::Fr, Bls12_381, Fq12, G1Affine, G1Projective as G1, G2Affine, + G2Projective as G2, Parameters, + }, + BigInteger, Field, PairingEngine, PrimeField, ProjectiveCurve, SquareRootField, UniformRand, +}; + +ec_bench!(); +f_bench!(1, Fq2, Fq2, fq2); +f_bench!(2, Fq12, Fq12, fq12); +f_bench!(Fq, Fq, FqRepr, FqRepr, fq); +f_bench!(Fr, Fr, FrRepr, FrRepr, fr); +pairing_bench!(Bls12_381, Fq12, prepared_v); diff --git a/algebra-benches/src/curves/mnt4_298.rs b/algebra-benches/src/curves/mnt4_298.rs new file mode 100644 index 000000000..4cf2b826d --- /dev/null +++ b/algebra-benches/src/curves/mnt4_298.rs @@ -0,0 +1,19 @@ +use rand::SeedableRng; +use rand_xorshift::XorShiftRng; +use std::ops::{AddAssign, MulAssign, SubAssign}; + +use algebra::{ + biginteger::BigInteger320 as FqRepr, + mnt4::{G1Prepared, G2Prepared}, + mnt4_298::{ + fq::Fq, fq2::Fq2, fr::Fr, Fq4, G1Affine, G1Projective as G1, G2Affine, G2Projective as G2, + Parameters, MNT4_298, + }, + BigInteger, Field, PairingEngine, PrimeField, ProjectiveCurve, SquareRootField, UniformRand, +}; + +ec_bench!(); +f_bench!(1, Fq2, Fq2, fq2); +f_bench!(2, Fq4, Fq4, fq4); +f_bench!(Fq, Fq, FqRepr, FqRepr, fq); +pairing_bench!(MNT4_298, Fq4, prepared_v); diff --git a/algebra-benches/src/curves/mnt4_753.rs b/algebra-benches/src/curves/mnt4_753.rs new file mode 100644 index 000000000..7d1378308 --- /dev/null +++ b/algebra-benches/src/curves/mnt4_753.rs @@ -0,0 +1,19 @@ +use rand::SeedableRng; +use rand_xorshift::XorShiftRng; +use std::ops::{AddAssign, MulAssign, SubAssign}; + +use algebra::{ + biginteger::BigInteger768 as FqRepr, + mnt4::{G1Prepared, G2Prepared}, + mnt4_753::{ + fq::Fq, fq2::Fq2, fr::Fr, Fq4, G1Affine, G1Projective as G1, G2Affine, G2Projective as G2, + Parameters, MNT4_753, + }, + BigInteger, Field, PairingEngine, PrimeField, ProjectiveCurve, SquareRootField, UniformRand, +}; + +ec_bench!(); +f_bench!(1, Fq2, Fq2, fq2); +f_bench!(2, Fq4, Fq4, fq4); +f_bench!(Fq, Fq, FqRepr, FqRepr, fq); +pairing_bench!(MNT4_753, Fq4, prepared_v); diff --git a/algebra-benches/src/curves/mnt6_298.rs b/algebra-benches/src/curves/mnt6_298.rs new file mode 100644 index 000000000..b30e65fdb --- /dev/null +++ b/algebra-benches/src/curves/mnt6_298.rs @@ -0,0 +1,19 @@ +use rand::SeedableRng; +use rand_xorshift::XorShiftRng; +use std::ops::{AddAssign, MulAssign, SubAssign}; + +use algebra::{ + biginteger::BigInteger320 as FqRepr, + mnt6::{G1Prepared, G2Prepared}, + mnt6_298::{ + fq::Fq, fq3::Fq3, fr::Fr, Fq6, G1Affine, G1Projective as G1, G2Affine, G2Projective as G2, + Parameters, MNT6_298, + }, + BigInteger, Field, PairingEngine, PrimeField, ProjectiveCurve, SquareRootField, UniformRand, +}; + +ec_bench!(); +f_bench!(1, Fq3, Fq3, fq3); +f_bench!(2, Fq6, Fq6, fq6); +f_bench!(Fq, Fq, FqRepr, FqRepr, fq); +pairing_bench!(MNT6_298, Fq6, prepared_v); diff --git a/algebra-benches/src/curves/mnt6_753.rs b/algebra-benches/src/curves/mnt6_753.rs new file mode 100644 index 000000000..197c8a861 --- /dev/null +++ b/algebra-benches/src/curves/mnt6_753.rs @@ -0,0 +1,19 @@ +use rand::SeedableRng; +use rand_xorshift::XorShiftRng; +use std::ops::{AddAssign, MulAssign, SubAssign}; + +use algebra::{ + biginteger::BigInteger768 as FqRepr, + mnt6::{G1Prepared, G2Prepared}, + mnt6_753::{ + fq::Fq, fq3::Fq3, fr::Fr, Fq6, G1Affine, G1Projective as G1, G2Affine, G2Projective as G2, + Parameters, MNT6_753, + }, + BigInteger, Field, PairingEngine, PrimeField, ProjectiveCurve, SquareRootField, UniformRand, +}; + +ec_bench!(); +f_bench!(1, Fq3, Fq3, fq3); +f_bench!(2, Fq6, Fq6, fq6); +f_bench!(Fq, Fq, FqRepr, FqRepr, fq); +pairing_bench!(MNT6_753, Fq6, prepared_v); diff --git a/algebra-benches/src/curves/mod.rs b/algebra-benches/src/curves/mod.rs new file mode 100644 index 000000000..2156b14ce --- /dev/null +++ b/algebra-benches/src/curves/mod.rs @@ -0,0 +1,14 @@ +#[cfg(feature = "bls12_377")] +mod bls12_377; +#[cfg(feature = "bls12_381")] +mod bls12_381; +#[cfg(feature = "mnt4_298")] +mod mnt4_298; +#[cfg(feature = "mnt4_753")] +mod mnt4_753; +#[cfg(feature = "mnt6_298")] +mod mnt6_298; +#[cfg(feature = "mnt6_753")] +mod mnt6_753; +#[cfg(feature = "sw6")] +mod sw6; diff --git a/algebra-benches/src/curves/sw6.rs b/algebra-benches/src/curves/sw6.rs new file mode 100644 index 000000000..06cc5a9d5 --- /dev/null +++ b/algebra-benches/src/curves/sw6.rs @@ -0,0 +1,19 @@ +use rand::SeedableRng; +use rand_xorshift::XorShiftRng; +use std::ops::{AddAssign, MulAssign, SubAssign}; + +use algebra::{ + biginteger::{BigInteger384 as FrRepr, BigInteger832 as FqRepr}, + sw6::{ + fq::Fq, fq3::Fq3, fr::Fr, Fq6, G1Affine, G1Projective as G1, G2Affine, G2Projective as G2, + SW6, + }, + BigInteger, Field, PairingEngine, PrimeField, ProjectiveCurve, SquareRootField, UniformRand, +}; + +ec_bench!(); +f_bench!(1, Fq3, Fq3, fq3); +f_bench!(2, Fq6, Fq6, fq6); +f_bench!(Fq, Fq, FqRepr, FqRepr, fq); +f_bench!(Fr, Fr, FrRepr, FrRepr, fr); +pairing_bench!(SW6, Fq6, affine_v); diff --git a/algebra-benches/src/lib.rs b/algebra-benches/src/lib.rs index 8b1378917..722a82390 100644 --- a/algebra-benches/src/lib.rs +++ b/algebra-benches/src/lib.rs @@ -1 +1,12 @@ +#![cfg_attr(nightly, feature(test))] +#![allow(unused_macros)] +#[cfg(nightly)] +extern crate test; + +#[cfg(all(nightly, test))] +#[macro_use] +pub mod macros; + +#[cfg(all(nightly, test))] +mod curves; diff --git a/algebra-benches/src/macros/ec.rs b/algebra-benches/src/macros/ec.rs new file mode 100644 index 000000000..d0c041ccf --- /dev/null +++ b/algebra-benches/src/macros/ec.rs @@ -0,0 +1,167 @@ +macro_rules! ec_bench { + () => { + #[bench] + fn bench_g1_rand(b: &mut ::test::Bencher) { + let mut rng = XorShiftRng::seed_from_u64(1231275789u64); + b.iter(|| G1::rand(&mut rng)); + } + + #[bench] + fn bench_g1_mul_assign(b: &mut ::test::Bencher) { + const SAMPLES: usize = 1000; + + let mut rng = XorShiftRng::seed_from_u64(1231275789u64); + + let v: Vec<(G1, Fr)> = (0..SAMPLES) + .map(|_| (G1::rand(&mut rng), Fr::rand(&mut rng))) + .collect(); + + let mut count = 0; + b.iter(|| { + let mut tmp = v[count].0; + tmp *= v[count].1; + count = (count + 1) % SAMPLES; + tmp + }); + } + + #[bench] + fn bench_g1_add_assign(b: &mut ::test::Bencher) { + const SAMPLES: usize = 1000; + + let mut rng = XorShiftRng::seed_from_u64(1231275789u64); + + let v: Vec<(G1, G1)> = (0..SAMPLES) + .map(|_| (G1::rand(&mut rng), G1::rand(&mut rng))) + .collect(); + + let mut count = 0; + b.iter(|| { + let mut tmp = v[count].0; + n_fold!(tmp, v, add_assign, count); + count = (count + 1) % SAMPLES; + tmp + }); + } + + #[bench] + fn bench_g1_add_assign_mixed(b: &mut ::test::Bencher) { + const SAMPLES: usize = 1000; + + let mut rng = XorShiftRng::seed_from_u64(1231275789u64); + + let v: Vec<(G1, G1Affine)> = (0..SAMPLES) + .map(|_| (G1::rand(&mut rng), G1::rand(&mut rng).into())) + .collect(); + + let mut count = 0; + b.iter(|| { + let mut tmp = v[count].0; + n_fold!(tmp, v, add_assign_mixed, count); + count = (count + 1) % SAMPLES; + tmp + }); + } + + #[bench] + fn bench_g1_double(b: &mut ::test::Bencher) { + const SAMPLES: usize = 1000; + + let mut rng = XorShiftRng::seed_from_u64(1231275789u64); + + let v: Vec<(G1, G1)> = (0..SAMPLES) + .map(|_| (G1::rand(&mut rng), G1::rand(&mut rng))) + .collect(); + + let mut count = 0; + b.iter(|| { + let mut tmp = v[count].0; + n_fold!(tmp, double_in_place); + count = (count + 1) % SAMPLES; + tmp + }); + } + + #[bench] + fn bench_g2_rand(b: &mut ::test::Bencher) { + let mut rng = XorShiftRng::seed_from_u64(1231275789u64); + b.iter(|| G2::rand(&mut rng)); + } + + #[bench] + fn bench_g2_mul_assign(b: &mut ::test::Bencher) { + const SAMPLES: usize = 1000; + + let mut rng = XorShiftRng::seed_from_u64(1231275789u64); + + let v: Vec<(G2, Fr)> = (0..SAMPLES) + .map(|_| (G2::rand(&mut rng), Fr::rand(&mut rng))) + .collect(); + + let mut count = 0; + b.iter(|| { + let mut tmp = v[count].0; + tmp *= v[count].1; + count = (count + 1) % SAMPLES; + tmp + }); + } + + #[bench] + fn bench_g2_add_assign(b: &mut ::test::Bencher) { + const SAMPLES: usize = 1000; + + let mut rng = XorShiftRng::seed_from_u64(1231275789u64); + + let v: Vec<(G2, G2)> = (0..SAMPLES) + .map(|_| (G2::rand(&mut rng), G2::rand(&mut rng))) + .collect(); + + let mut count = 0; + b.iter(|| { + let mut tmp = v[count].0; + tmp.add_assign(&v[count].1); + count = (count + 1) % SAMPLES; + tmp + }); + } + + #[bench] + fn bench_g2_add_assign_mixed(b: &mut ::test::Bencher) { + const SAMPLES: usize = 1000; + + let mut rng = XorShiftRng::seed_from_u64(1231275789u64); + + let v: Vec<(G2, G2Affine)> = (0..SAMPLES) + .map(|_| (G2::rand(&mut rng), G2::rand(&mut rng).into())) + .collect(); + + let mut count = 0; + b.iter(|| { + let mut tmp = v[count].0; + tmp.add_assign_mixed(&v[count].1); + count = (count + 1) % SAMPLES; + tmp + }); + } + + #[bench] + fn bench_g2_double(b: &mut ::test::Bencher) { + const SAMPLES: usize = 1000; + + let mut rng = XorShiftRng::seed_from_u64(1231275789u64); + + let v: Vec<(G2, G2)> = (0..SAMPLES) + .map(|_| (G2::rand(&mut rng), G2::rand(&mut rng))) + .collect(); + + let mut count = 0; + b.iter(|| { + let mut tmp = v[count].0; + tmp.double_in_place(); + count = (count + 1) % SAMPLES; + tmp + }); + } + }; +} diff --git a/algebra-benches/src/macros/field.rs b/algebra-benches/src/macros/field.rs new file mode 100644 index 000000000..5e786e2b5 --- /dev/null +++ b/algebra-benches/src/macros/field.rs @@ -0,0 +1,317 @@ +macro_rules! f_bench { + // Use this for base fields + ($f:ident, $f_type:ty, $f_repr:ident, $f_repr_type:ty, $field_ident:ident) => { + field_common!($f, $f_type, $field_ident); + sqrt!($f, $f_type, $field_ident); + field_base!($f, $f_type, $f_repr, $f_repr_type, $field_ident); + }; + // use this for intermediate fields + (1, $f:ident, $f_type:ty, $field_ident:ident) => { + field_common!($f, $f_type, $field_ident); + sqrt!($f, $f_type, $field_ident); + }; + // Use this for the full extension field Fqk + (2, $f:ident, $f_type:ty, $field_ident:ident) => { + field_common!($f, $f_type, $field_ident); + }; +} + +macro_rules! field_common { + ($f:ident, $f_type:ty, $field_ident:ident) => { + paste::item! { + #[bench] + fn [](b: &mut ::test::Bencher) { + const SAMPLES: usize = 1000; + + let mut rng = XorShiftRng::seed_from_u64(1231275789u64); + + let v: Vec<_> = (0..SAMPLES) + .map(|_| ($f::rand(&mut rng), $f::rand(&mut rng))) + .collect(); + + let mut count = 0; + b.iter(|| { + let mut tmp = v[count].0; + n_fold!(tmp, v, add_assign, count); + count = (count + 1) % SAMPLES; + tmp + }); + } + + #[bench] + fn [](b: &mut ::test::Bencher) { + const SAMPLES: usize = 1000; + + let mut rng = XorShiftRng::seed_from_u64(1231275789u64); + + let v: Vec<_> = (0..SAMPLES) + .map(|_| ($f::rand(&mut rng), $f::rand(&mut rng))) + .collect(); + + let mut count = 0; + b.iter(|| { + let mut tmp = v[count].0; + n_fold!(tmp, v, sub_assign, count); + count = (count + 1) % SAMPLES; + tmp + }); + } + + #[bench] + fn [](b: &mut ::test::Bencher) { + const SAMPLES: usize = 1000; + + let mut rng = XorShiftRng::seed_from_u64(1231275789u64); + + let v: Vec<_> = (0..SAMPLES) + .map(|_| ($f::rand(&mut rng), $f::rand(&mut rng))) + .collect(); + + let mut count = 0; + b.iter(|| { + let mut tmp = v[count].0; + n_fold!(tmp, v, mul_assign, count); + count = (count + 1) % SAMPLES; + tmp + }); + } + + #[bench] + fn [](b: &mut ::test::Bencher) { + const SAMPLES: usize = 1000; + + let mut rng = XorShiftRng::seed_from_u64(1231275789u64); + + let v: Vec<$f_type> = (0..SAMPLES).map(|_| $f::rand(&mut rng)).collect(); + + let mut count = 0; + b.iter(|| { + let mut tmp = v[count]; + n_fold!(tmp, double_in_place); + count = (count + 1) % SAMPLES; + tmp + }); + } + + #[bench] + fn [](b: &mut ::test::Bencher) { + const SAMPLES: usize = 1000; + + let mut rng = XorShiftRng::seed_from_u64(1231275789u64); + + let v: Vec<$f_type> = (0..SAMPLES).map(|_| $f::rand(&mut rng)).collect(); + + let mut count = 0; + b.iter(|| { + let mut tmp = v[count]; + n_fold!(tmp, square_in_place); + count = (count + 1) % SAMPLES; + tmp + }); + } + + #[bench] + fn [](b: &mut ::test::Bencher) { + const SAMPLES: usize = 1000; + + let mut rng = XorShiftRng::seed_from_u64(1231275789u64); + + let v: Vec<$f_type> = (0..SAMPLES).map(|_| $f::rand(&mut rng)).collect(); + + let mut count = 0; + b.iter(|| { + let tmp = v[count].inverse(); + count = (count + 1) % SAMPLES; + tmp + }); + } + } + }; +} + +macro_rules! sqrt { + ($f:ident, $f_type:ty, $field_ident:ident) => { + paste::item! { + #[bench] + fn [](b: &mut ::test::Bencher) { + const SAMPLES: usize = 1000; + + let mut rng = XorShiftRng::seed_from_u64(1231275789u64); + + let v: Vec<$f_type> = (0..SAMPLES) + .map(|_| { + let mut tmp = $f::rand(&mut rng); + tmp.square_in_place(); + tmp + }) + .collect(); + + let mut count = 0; + b.iter(|| { + count = (count + 1) % SAMPLES; + v[count].sqrt() + }); + } + } + }; +} + +macro_rules! field_base { + ($f:ident, $f_type:ty, $f_repr:ident, $f_repr_type:ty, $field_ident:ident) => { + paste::item! { + #[bench] + fn [](b: &mut ::test::Bencher) { + const SAMPLES: usize = 1000; + + let mut rng = XorShiftRng::seed_from_u64(1231275789u64); + + let v: Vec<_> = (0..SAMPLES) + .map(|_| { + let mut tmp1 = $f_repr::rand(&mut rng); + let mut tmp2 = $f_repr::rand(&mut rng); + // Shave a few bits off to avoid overflow. + for _ in 0..3 { + tmp1.div2(); + tmp2.div2(); + } + (tmp1, tmp2) + }) + .collect(); + + let mut count = 0; + b.iter(|| { + let mut tmp = v[count].0; + n_fold!(tmp, v, add_nocarry, count); + count = (count + 1) % SAMPLES; + tmp + }); + } + + #[bench] + fn [](b: &mut ::test::Bencher) { + const SAMPLES: usize = 1000; + + let mut rng = XorShiftRng::seed_from_u64(1231275789u64); + + let v: Vec<_> = (0..SAMPLES) + .map(|_| { + let tmp1 = $f_repr::rand(&mut rng); + let mut tmp2 = tmp1; + // Ensure tmp2 is smaller than tmp1. + for _ in 0..10 { + tmp2.div2(); + } + (tmp1, tmp2) + }) + .collect(); + + let mut count = 0; + b.iter(|| { + let mut tmp = v[count].0; + n_fold!(tmp, v, sub_noborrow, count); + count = (count + 1) % SAMPLES; + tmp + }); + } + + #[bench] + fn [](b: &mut ::test::Bencher) { + const SAMPLES: usize = 1000; + + let mut rng = XorShiftRng::seed_from_u64(1231275789u64); + + let v: Vec<$f_repr_type> = (0..SAMPLES).map(|_| $f_repr::rand(&mut rng)).collect(); + + let mut count = 0; + b.iter(|| { + let tmp = v[count].num_bits(); + count = (count + 1) % SAMPLES; + tmp + }); + } + + #[bench] + fn [](b: &mut ::test::Bencher) { + const SAMPLES: usize = 1000; + + let mut rng = XorShiftRng::seed_from_u64(1231275789u64); + + let v: Vec<$f_repr_type> = (0..SAMPLES).map(|_| $f_repr::rand(&mut rng)).collect(); + + let mut count = 0; + b.iter(|| { + let mut tmp = v[count]; + n_fold!(tmp, mul2); + count = (count + 1) % SAMPLES; + tmp + }); + } + + #[bench] + fn [](b: &mut ::test::Bencher) { + const SAMPLES: usize = 1000; + + let mut rng = XorShiftRng::seed_from_u64(1231275789u64); + + let v: Vec<$f_repr_type> = (0..SAMPLES).map(|_| $f_repr::rand(&mut rng)).collect(); + + let mut count = 0; + b.iter(|| { + let mut tmp = v[count]; + n_fold!(tmp, div2); + count = (count + 1) % SAMPLES; + tmp + }); + } + + #[bench] + fn [](b: &mut ::test::Bencher) { + const SAMPLES: usize = 1000; + + let mut rng = XorShiftRng::seed_from_u64(1231275789u64); + + let v: Vec<$f_type> = (0..SAMPLES).map(|_| $f::rand(&mut rng)).collect(); + + let mut count = 0; + b.iter(|| { + let mut tmp = v[count]; + tmp = -tmp; + count = (count + 1) % SAMPLES; + tmp + }); + } + + #[bench] + fn [](b: &mut ::test::Bencher) { + const SAMPLES: usize = 1000; + + let mut rng = XorShiftRng::seed_from_u64(1231275789u64); + + let v: Vec<$f_type> = (0..SAMPLES).map(|_| $f::rand(&mut rng)).collect(); + + let mut count = 0; + b.iter(|| { + count = (count + 1) % SAMPLES; + v[count].into_repr() + }); + } + + #[bench] + fn [](b: &mut ::test::Bencher) { + const SAMPLES: usize = 1000; + + let mut rng = XorShiftRng::seed_from_u64(1231275789u64); + + let v: Vec<$f_repr_type> = (0..SAMPLES) + .map(|_| $f::rand(&mut rng).into_repr()) + .collect(); + + let mut count = 0; + b.iter(|| { + count = (count + 1) % SAMPLES; + $f::from_repr(v[count]) + }); + } + } + }; +} diff --git a/algebra-benches/src/macros/mod.rs b/algebra-benches/src/macros/mod.rs new file mode 100644 index 000000000..5c936a240 --- /dev/null +++ b/algebra-benches/src/macros/mod.rs @@ -0,0 +1,11 @@ +#[macro_use] +mod ec; + +#[macro_use] +mod field; + +#[macro_use] +mod pairing; + +#[macro_use] +mod utils; diff --git a/algebra-benches/src/macros/pairing.rs b/algebra-benches/src/macros/pairing.rs new file mode 100644 index 000000000..117391a5f --- /dev/null +++ b/algebra-benches/src/macros/pairing.rs @@ -0,0 +1,61 @@ +macro_rules! pairing_bench { + ($curve:ident, $pairing_field:ident, $pairing_type:ident) => { + #[bench] + fn bench_pairing_miller_loop(b: &mut ::test::Bencher) { + const SAMPLES: usize = 1000; + + let mut rng = XorShiftRng::seed_from_u64(1231275789u64); + + $pairing_type!(v, rng); + + let mut count = 0; + b.iter(|| { + let tmp = $curve::miller_loop(&[(v[count].0.clone(), v[count].1.clone())]); + count = (count + 1) % SAMPLES; + tmp + }); + } + + #[bench] + fn bench_pairing_final_exponentiation(b: &mut ::test::Bencher) { + const SAMPLES: usize = 1000; + + let mut rng = XorShiftRng::seed_from_u64(1231275789u64); + + let v: Vec<$pairing_field> = (0..SAMPLES) + .map(|_| { + ( + G1Affine::from(G1::rand(&mut rng)).into(), + G2Affine::from(G2::rand(&mut rng)).into(), + ) + }) + .map(|(p, q)| $curve::miller_loop(&[(p, q)])) + .collect(); + + let mut count = 0; + b.iter(|| { + let tmp = $curve::final_exponentiation(&v[count]); + count = (count + 1) % SAMPLES; + tmp + }); + } + + #[bench] + fn bench_pairing_full(b: &mut ::test::Bencher) { + const SAMPLES: usize = 1000; + + let mut rng = XorShiftRng::seed_from_u64(1231275789u64); + + let v: Vec<(G1, G2)> = (0..SAMPLES) + .map(|_| (G1::rand(&mut rng), G2::rand(&mut rng))) + .collect(); + + let mut count = 0; + b.iter(|| { + let tmp = $curve::pairing(v[count].0, v[count].1); + count = (count + 1) % SAMPLES; + tmp + }); + } + }; +} diff --git a/algebra-benches/src/macros/utils.rs b/algebra-benches/src/macros/utils.rs new file mode 100644 index 000000000..8d9881a47 --- /dev/null +++ b/algebra-benches/src/macros/utils.rs @@ -0,0 +1,49 @@ +macro_rules! n_fold { + ($tmp:ident, $v:ident, $func:ident, $count:ident) => { + const ITERS: usize = 1000; + + #[cfg(not(feature = "n_fold"))] + $tmp.$func(&$v[$count].1); + #[cfg(feature = "n_fold")] + for _ in 0..ITERS { + $tmp.$func(&$v[$count].1); + } + }; + + ($tmp:ident, $func:ident) => { + const ITERS: usize = 1000; + + #[cfg(not(feature = "n_fold"))] + $tmp.$func(); + #[cfg(feature = "n_fold")] + for _ in 0..ITERS { + $tmp.$func(); + } + }; +} + +macro_rules! prepared_v { + ($v:ident, $rng:ident) => { + let $v: Vec<(G1Prepared, G2Prepared)> = (0..SAMPLES) + .map(|_| { + ( + G1Affine::from(G1::rand(&mut $rng)).into(), + G2Affine::from(G2::rand(&mut $rng)).into(), + ) + }) + .collect(); + }; +} + +macro_rules! affine_v { + ($v:ident, $rng:ident) => { + let $v: Vec<(G1Affine, G2Affine)> = (0..SAMPLES) + .map(|_| { + ( + G1Affine::from(G1::rand(&mut $rng)).into(), + G2Affine::from(G2::rand(&mut $rng)).into(), + ) + }) + .collect(); + }; +} diff --git a/algebra-core/Cargo.toml b/algebra-core/Cargo.toml index 335144dff..551b6eca8 100644 --- a/algebra-core/Cargo.toml +++ b/algebra-core/Cargo.toml @@ -18,6 +18,7 @@ categories = ["cryptography"] include = ["Cargo.toml", "src", "README.md", "LICENSE-APACHE", "LICENSE-MIT"] license = "MIT/Apache-2.0" edition = "2018" +build = "build.rs" ################################# Dependencies ################################ @@ -29,6 +30,10 @@ rand = { version = "0.7", default-features = false } rayon = { version = "1", optional = true } unroll = "0.1.4" +[build-dependencies] +field-assembly = { path = "./field-assembly" } +rustc_version = "0.1.*" + [dev-dependencies] rand_xorshift = "0.2" @@ -37,3 +42,4 @@ default = [ "std" ] std = [] parallel = [ "std", "rayon" ] derive = [ "algebra-core-derive" ] +asm = [] diff --git a/algebra-core/build.rs b/algebra-core/build.rs new file mode 100644 index 000000000..fe488528a --- /dev/null +++ b/algebra-core/build.rs @@ -0,0 +1,28 @@ +use std::env; +use std::fs; +use std::path::Path; + +extern crate rustc_version; +use rustc_version::{version_meta, Channel}; + +use field_assembly::generate_macro_string; + +const NUM_LIMBS: usize = 8; + +fn main() { + let out_dir = env::var_os("OUT_DIR").unwrap(); + let dest_path = Path::new(&out_dir).join("field_assembly.rs"); + let is_nightly = version_meta().channel == Channel::Nightly; + + if cfg!(feature = "llvm_asm") && is_nightly { + fs::write(&dest_path, generate_macro_string(NUM_LIMBS)).unwrap(); + } else { + fs::write(&dest_path, "").unwrap(); + } + + println!("cargo:rerun-if-changed=build.rs"); + + if is_nightly { + println!("cargo:rustc-cfg=nightly"); + } +} diff --git a/algebra-core/field-assembly/Cargo.toml b/algebra-core/field-assembly/Cargo.toml new file mode 100644 index 000000000..2d5c0efd2 --- /dev/null +++ b/algebra-core/field-assembly/Cargo.toml @@ -0,0 +1,10 @@ +[package] +name = "field-assembly" +version = "0.1.0" +authors = ["jon-chuang <9093549+jon-chuang@users.noreply.github.com>"] +edition = "2018" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +mince = { path = "../mince" } diff --git a/algebra-core/field-assembly/src/context.rs b/algebra-core/field-assembly/src/context.rs new file mode 100644 index 000000000..01d276d22 --- /dev/null +++ b/algebra-core/field-assembly/src/context.rs @@ -0,0 +1,154 @@ +use std::collections::HashMap; + +pub const REG_CLOBBER: [&'static str; 8] = ["r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15"]; + +#[derive(Clone)] +pub struct Context { + ctx_string: String, + declarations: HashMap, + declaration_vec: Vec, + clobbers: Vec, +} + +#[derive(Clone)] +struct Declare { + ty: String, + var: String, + pos: usize, + token: String, +} + +impl Context { + pub fn new() -> Self { + Context { + ctx_string: String::new(), + declarations: HashMap::new(), + declaration_vec: Vec::new(), + clobbers: Vec::new(), + } + } + + fn append(&mut self, other: &str) { + self.ctx_string += other; + } + + pub fn get_string(&mut self) -> String { + self.ctx_string.clone() + } + + pub fn reset(&mut self) { + self.declarations.clear(); + self.declaration_vec.clear(); + self.clobbers.clear(); + } + + pub fn get(self, id: &str) -> String { + self.declarations + .get(&id.to_string()) + .unwrap() + .token + .clone() + } + + pub fn try_get(self, id: &str, fallback_id: &str) -> String { + match self.declarations.get(&id.to_string()) { + Some(dec) => dec.token.clone(), + None => self + .declarations + .get(&fallback_id.to_string()) + .unwrap() + .token + .clone(), + } + } + + pub fn add_declaration(&mut self, id: &str, ty: &str, var: &str) { + self.declarations.insert( + id.to_string(), + Declare { + ty: ty.to_string(), + var: var.to_string(), + pos: self.declarations.len(), + token: format!("${}", self.declarations.len()), + }, + ); + self.declaration_vec.push(Declare { + ty: ty.to_string(), + var: var.to_string(), + pos: self.declaration_vec.len(), + token: format!("${}", self.declaration_vec.len()), + }); + } + + pub fn add_limb(&mut self, limb: usize) { + self.append(&format!( + " + {} => {{", + limb + )) + } + + pub fn add_buffer(&mut self, extra_reg: usize) { + self.append(&format!( + " + let mut spill_buffer = MaybeUninit::<[u64; {}]>::uninit();", + extra_reg + )); + } + + pub fn add_llvm_asm(&mut self, ctx_string: String) { + self.append(&format!( + " + unsafe {{ + llvm_asm!({} + : + :", + ctx_string + )); + } + + pub fn add_clobber_from_vec(&mut self, clobbers: Vec<&str>) { + for clobber in clobbers { + self.clobbers.push(format!(" \"{}\"", clobber)); + } + } + + pub fn add_clobber(&mut self, clobber: &str) { + self.clobbers.push(format!(" \"{}\"", clobber)); + } + + pub fn build(&mut self) { + for i in 0..self.declarations.len() { + let dec = &self.declaration_vec[i]; + let last = i == self.declarations.len() - 1; + let dec = &format!( + " + \"{}\"({}){} // {}", + dec.ty, + dec.var, + if last { "" } else { "," }, + dec.pos + ); + self.append(dec); + } + let clobbers = self.clobbers.join(","); + self.append(&format!( + " + : {} + ); + }} + }}", + clobbers + )); + } + + pub fn end(&mut self, num_limbs: usize) { + self.append(&format!(" + x => panic!(\"llvm_asm_mul (no-carry): number of limbs supported is 2 up to {}. You had {{}}.\", x) + }}; + }} +}} +", + num_limbs)); + } +} diff --git a/algebra-core/field-assembly/src/lib.rs b/algebra-core/field-assembly/src/lib.rs new file mode 100644 index 000000000..02746d966 --- /dev/null +++ b/algebra-core/field-assembly/src/lib.rs @@ -0,0 +1,104 @@ +extern crate std; + +#[macro_use] +pub mod utils; +use utils::*; + +pub mod context; +use context::*; + +use mince::assemble; + +use std::cell::RefCell; + +const MAX_REGS: usize = 6; + +pub fn generate_macro_string(num_limbs: usize) -> std::string::String { + if num_limbs > 3 * MAX_REGS { + panic!( + "Number of limbs must be <= {} and MAX_REGS >= 6", + 3 * MAX_REGS + ); + } + let mut macro_string = String::from( + " + macro_rules! llvm_asm_mul { + ($limbs:expr, $a:expr, $b:expr, $modulus:expr, $mod_prime:expr) => { + match $limbs {", + ); + macro_string += &generate_matches(num_limbs, true); + + macro_string += &" + macro_rules! llvm_asm_square { + ($limbs:expr, $a:expr, $modulus:expr, $mod_prime:expr) => { + match $limbs {"; + macro_string += &generate_matches(num_limbs, false); + macro_string +} + +#[assemble] +fn generate_llvm_asm_mul_string( + a: &str, + b: &str, + modulus: &str, + zero: &str, + mod_prime: &str, + limbs: usize, +) -> String { + reg!(a0, a1, a, limbs); + reg!(b0, b1, b, limbs); + reg!(m, m1, modulus, limbs); + + xorq(RCX, RCX); + for i in 0..limbs { + if i == 0 { + mul_1!(a1[0], b1, zero, limbs); + } else { + mul_add_1!(a1, b1, zero, i, limbs); + } + mul_add_shift_1!(m1, mod_prime, zero, i, limbs); + } + for i in 0..limbs { + movq(R[i], a1[i]); + } +} + +fn generate_matches(num_limbs: usize, is_mul: bool) -> String { + let mut ctx = Context::new(); + for limbs in 2..(num_limbs + 1) { + ctx.reset(); + + ctx.add_declaration("a", "r", "&mut $a"); + if is_mul { + ctx.add_declaration("b", "r", "&$b"); + } + ctx.add_declaration("modulus", "r", "&$modulus"); + ctx.add_declaration("0", "i", "0u64"); + ctx.add_declaration("mod_prime", "i", "$mod_prime"); + + ctx.add_limb(limbs); + if limbs > MAX_REGS { + ctx.add_buffer(2 * limbs); + ctx.add_declaration("buf", "r", "&mut spill_buffer"); + } + + let llvm_asm_string = generate_llvm_asm_mul_string( + &ctx.clone().get("a"), + &ctx.clone().try_get("b", "a"), + &ctx.clone().get("modulus"), + &ctx.clone().get("0"), + &ctx.clone().get("mod_prime"), + limbs, + ); + + ctx.add_llvm_asm(llvm_asm_string); + ctx.add_clobber_from_vec(vec!["rcx", "rbx", "rdx", "rax"]); + for j in 0..std::cmp::min(limbs, 8) { + ctx.add_clobber(REG_CLOBBER[j]); + } + ctx.add_clobber_from_vec(vec!["cc", "memory"]); + ctx.build(); + } + ctx.end(num_limbs); + ctx.get_string() +} diff --git a/algebra-core/field-assembly/src/utils.rs b/algebra-core/field-assembly/src/utils.rs new file mode 100644 index 000000000..7d9ebba01 --- /dev/null +++ b/algebra-core/field-assembly/src/utils.rs @@ -0,0 +1,20 @@ +pub const RAX: &'static str = "%rax"; +pub const RBX: &'static str = "%rbx"; +pub const RCX: &'static str = "%rcx"; +pub const RDX: &'static str = "%rdx"; +pub const RDI: &'static str = "%rdi"; +pub const RSI: &'static str = "%rsi"; +pub const R: [&'static str; 8] = ["%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14", "%r15"]; + +macro_rules! reg { + ($a_0:ident, $a_1:ident, $a:ident, $range:expr) => { + let mut $a_0 = Vec::new(); + let mut $a_1 = Vec::new(); + for i in 0..$range { + $a_0.push(format!("{}({})", i * 8, $a)); + } + for i in 0..$range { + $a_1.push(&*$a_0[i]); + } + }; +} diff --git a/algebra-core/mince/Cargo.toml b/algebra-core/mince/Cargo.toml new file mode 100644 index 000000000..7fe5e22ab --- /dev/null +++ b/algebra-core/mince/Cargo.toml @@ -0,0 +1,14 @@ +[package] +name = "mince" +version = "0.1.0" +authors = ["jon-chuang <9093549+jon-chuang@users.noreply.github.com>"] +edition = "2018" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +quote = "1.0" +syn = {version = "1.0.17", features = ["full"]} + +[lib] +proc-macro = true diff --git a/algebra-core/mince/src/arithmetic.rs b/algebra-core/mince/src/arithmetic.rs new file mode 100644 index 000000000..dae7af3c2 --- /dev/null +++ b/algebra-core/mince/src/arithmetic.rs @@ -0,0 +1,60 @@ +use proc_macro::TokenStream; +use quote::quote; + +pub fn define_arithmetic() -> TokenStream { + (quote! { + { + macro_rules! mul_1 { + ($a:expr, $b:ident, $zero:ident, $limbs:expr) => { + movq($a, RDX); + mulxq($b[0], R[0], R[1]); + for j in 1..$limbs-1 { + mulxq($b[j], RAX, R[((j + 1) % $limbs)]); + adcxq(RAX, R[j]); + } + mulxq($b[$limbs-1], RAX, RCX); + movq($zero, RBX); + adcxq(RAX, R[$limbs-1]); + adcxq(RBX, RCX); + } + } + + macro_rules! mul_add_1 { + ($a:ident, $b:ident, $zero:ident, $i:ident, $limbs:expr) => { + movq($a[$i], RDX); + for j in 0..$limbs-1 { + mulxq($b[j], RAX, RBX); + adcxq(RAX, R[(j+$i) % $limbs]); + adoxq(RBX, R[(j+$i+1) % $limbs]); + } + mulxq($b[$limbs-1], RAX, RCX); + movq($zero, RBX); + adcxq(RAX, R[($i+$limbs-1) % $limbs]); + adoxq(RBX, RCX); + adcxq(RBX, RCX); + } + } + + macro_rules! mul_add_shift_1 { + ($a:ident, $mod_prime:ident, $zero:ident, $i:ident, $limbs:expr) => { + movq($mod_prime, RDX); + mulxq(R[$i], RDX, RAX); + mulxq($a[0], RAX, RBX); + adcxq(R[$i % $limbs], RAX); + adoxq(RBX, R[($i+1) % $limbs]); + for j in 1..$limbs-1 { + mulxq($a[j], RAX, RBX); + adcxq(RAX, R[(j+$i) % $limbs]); + adoxq(RBX, R[(j+$i+1) % $limbs]); + } + mulxq($a[$limbs-1], RAX, R[$i % $limbs]); + movq($zero, RBX); + adcxq(RAX, R[($i+$limbs-1) % $limbs]); + adoxq(RCX, R[$i % $limbs]); + adcxq(RBX, R[$i % $limbs]); + } + } + } + }) + .into() +} diff --git a/algebra-core/mince/src/intrinsics.rs b/algebra-core/mince/src/intrinsics.rs new file mode 100644 index 000000000..ba3edd0b0 --- /dev/null +++ b/algebra-core/mince/src/intrinsics.rs @@ -0,0 +1,47 @@ +use proc_macro::TokenStream; +use quote::quote; + +pub fn define_intrinsics() -> TokenStream { + (quote! { + { + let mut begin = || { + llvm_asm_string.borrow_mut().push_str("\""); + }; + + let mut end = || { + llvm_asm_string.borrow_mut().push_str(" + \""); + }; + + let mut comment = | comment: &str | { + llvm_asm_string.borrow_mut().push_str(&format!(" // {}", comment)); + }; + + let mut mulxq = | a: &str, b: &str, c: &str | { + llvm_asm_string.borrow_mut().push_str(&format!(" + mulxq {}, {}, {}", a, b, c)); + }; + + let mut adcxq = | a: &str, b: &str| { + llvm_asm_string.borrow_mut().push_str(&format!(" + adcxq {}, {}", a, b)); + }; + + let mut adoxq = | a: &str, b: &str | { + llvm_asm_string.borrow_mut().push_str(&format!(" + adoxq {}, {}", a, b)); + }; + + let mut movq = | a: &str, b: &str | { + llvm_asm_string.borrow_mut().push_str(&format!(" + movq {}, {}", a, b)); + }; + + let mut xorq = | a: &str, b: &str | { + llvm_asm_string.borrow_mut().push_str(&format!(" + xorq {}, {}", a, b)); + }; + } + }) + .into() +} diff --git a/algebra-core/mince/src/lib.rs b/algebra-core/mince/src/lib.rs new file mode 100644 index 000000000..2b9021ce7 --- /dev/null +++ b/algebra-core/mince/src/lib.rs @@ -0,0 +1,62 @@ +#![recursion_limit = "256"] + +extern crate proc_macro; + +mod intrinsics; +use intrinsics::*; + +mod arithmetic; +use arithmetic::*; + +use proc_macro::TokenStream; +use quote::quote; +use syn; + +#[proc_macro_attribute] +pub fn assemble(_meta: TokenStream, input: TokenStream) -> TokenStream { + let ast: syn::ItemFn = syn::parse(input).unwrap(); + let sig = ast.sig; + let block = ast.block; + let attrs = ast.attrs; + + let arithmetic: syn::Block = syn::parse(define_arithmetic()).unwrap(); + let intrinsics: syn::Block = syn::parse(define_intrinsics()).unwrap(); + + let begin: syn::Stmt = syn::parse((quote! { begin(); }).into()).unwrap(); + let end: syn::Stmt = syn::parse((quote! { end(); }).into()).unwrap(); + let ret: syn::Stmt = + syn::parse((quote! { return llvm_asm_string.into_inner(); }).into()).unwrap(); + + let mut new_stmts = Vec::new(); + for stmt in &intrinsics.stmts { + new_stmts.push(stmt.clone()); + } + for stmt in &arithmetic.stmts { + new_stmts.push(stmt.clone()); + } + + new_stmts.push(begin); + + for stmt in block.stmts { + new_stmts.push(stmt); + } + + new_stmts.push(end); + new_stmts.push(ret); + + let new_block = syn::Block { + brace_token: block.brace_token, + stmts: new_stmts, + }; + + let gen = quote! { + #(#attrs) + * + #sig { + let mut llvm_asm_string = RefCell::new(String::new()); + + #new_block + } + }; + gen.into() +} diff --git a/algebra-core/src/fields/arithmetic.rs b/algebra-core/src/fields/arithmetic.rs new file mode 100644 index 000000000..681a4d76d --- /dev/null +++ b/algebra-core/src/fields/arithmetic.rs @@ -0,0 +1,499 @@ +/// This modular multiplication algorithm uses Montgomery +/// reduction for efficient implementation. It also additionally +/// uses the "no-carry optimization" outlined +/// [here](https://hackmd.io/@zkteam/modular_multiplication) if +/// `P::MODULUS` has (a) a non-zero MSB, and (b) at least one +/// zero bit in the rest of the modulus. +macro_rules! impl_field_mul_assign { + ($limbs:expr) => { + #[inline] + #[unroll_for_loops] + fn mul_assign(&mut self, other: &Self) { + // Checking the modulus at compile time + let first_bit_set = P::MODULUS.0[$limbs - 1] >> 63 != 0; + let mut all_bits_set = P::MODULUS.0[$limbs - 1] == !0 - (1 << 63); + for i in 1..$limbs { + all_bits_set &= P::MODULUS.0[$limbs - i - 1] == !0u64; + } + let _no_carry: bool = !(first_bit_set || all_bits_set); + + // No-carry optimisation applied to CIOS + if _no_carry { + #[cfg(all( + feature = "llvm_asm", + target_feature = "bmi2", + target_feature = "adx", + target_arch = "x86_64", + nightly, + ))] + { + if $limbs <= 6 { + #[allow(unsafe_code)] + llvm_asm_mul!($limbs, (self.0).0, (other.0).0, P::MODULUS.0, P::INV); + self.reduce(); + return; + } + } + let mut r = [0u64; $limbs]; + let mut carry1 = 0u64; + let mut carry2 = 0u64; + + for i in 0..$limbs { + r[0] = fa::mac(r[0], (self.0).0[0], (other.0).0[i], &mut carry1); + let k = r[0].wrapping_mul(P::INV); + fa::mac_discard(r[0], k, P::MODULUS.0[0], &mut carry2); + for j in 1..$limbs { + r[j] = fa::mac_with_carry(r[j], (self.0).0[j], (other.0).0[i], &mut carry1); + r[j - 1] = fa::mac_with_carry(r[j], k, P::MODULUS.0[j], &mut carry2); + } + r[$limbs - 1] = carry1 + carry2; + } + (self.0).0 = r; + self.reduce(); + // Alternative implementation + } else { + let mut r = [0u64; $limbs * 2]; + + for i in 0..$limbs { + let mut carry = 0; + for j in 0..$limbs { + r[j + i] = + fa::mac_with_carry(r[j + i], (self.0).0[i], (other.0).0[j], &mut carry); + } + r[$limbs + i] = carry; + } + // Montgomery reduction + let mut _carry2 = 0; + for i in 0..$limbs { + let k = r[i].wrapping_mul(P::INV); + let mut carry = 0; + fa::mac_with_carry(r[i], k, P::MODULUS.0[0], &mut carry); + for j in 1..$limbs { + r[j + i] = fa::mac_with_carry(r[j + i], k, P::MODULUS.0[j], &mut carry); + } + r[$limbs + i] = fa::adc(r[$limbs + i], _carry2, &mut carry); + _carry2 = carry; + } + (self.0).0.copy_from_slice(&r[$limbs..]); + self.reduce(); + } + } + }; +} + +macro_rules! impl_field_into_repr { + ($limbs:expr, $BigIntegerType:ty) => { + #[inline] + #[unroll_for_loops] + fn into_repr(&self) -> $BigIntegerType { + let mut tmp = self.0; + let mut r = tmp.0; + // Montgomery Reduction + for i in 0..$limbs { + let k = r[i].wrapping_mul(P::INV); + let mut carry = 0; + + fa::mac_with_carry(r[i], k, P::MODULUS.0[0], &mut carry); + for j in 1..$limbs { + r[(j + i) % $limbs] = + fa::mac_with_carry(r[(j + i) % $limbs], k, P::MODULUS.0[j], &mut carry); + } + r[i % $limbs] = carry; + } + tmp.0 = r; + tmp + } + }; +} + +macro_rules! impl_field_square_in_place { + ($limbs: expr) => { + #[inline] + #[unroll_for_loops] + #[allow(unused_braces)] + fn square_in_place(&mut self) -> &mut Self { + // Checking the modulus at compile time + let first_bit_set = P::MODULUS.0[$limbs - 1] >> 63 != 0; + let mut all_bits_set = P::MODULUS.0[$limbs - 1] == !0 - (1 << 63); + for i in 1..$limbs { + all_bits_set &= P::MODULUS.0[$limbs - i - 1] == !0u64; + } + let _no_carry: bool = !(first_bit_set || all_bits_set); + + #[cfg(all( + feature = "llvm_asm", + target_feature = "bmi2", + target_feature = "adx", + target_arch = "x86_64", + nightly, + ))] + { + if $limbs <= 6 && _no_carry { + #[allow(unsafe_code)] + llvm_asm_square!($limbs, (self.0).0, P::MODULUS.0, P::INV); + self.reduce(); + return self; + } + } + let mut r = [0u64; $limbs * 2]; + + let mut carry = 0; + for i in 0..$limbs { + if i < $limbs - 1 { + for j in 0..$limbs { + if j >= i + 1 { + r[i + j] = fa::mac_with_carry( + r[i + j], + (self.0).0[i], + (self.0).0[j], + &mut carry, + ); + } + } + r[$limbs + i] = carry; + carry = 0; + } + } + r[$limbs * 2 - 1] = r[$limbs * 2 - 2] >> 63; + for i in 0..$limbs { + r[$limbs * 2 - 2 - i] = + (r[$limbs * 2 - 2 - i] << 1) | (r[$limbs * 2 - 3 - i] >> 63); + } + for i in 3..$limbs { + r[$limbs + 1 - i] = (r[$limbs + 1 - i] << 1) | (r[$limbs - i] >> 63); + } + r[1] = r[1] << 1; + + for i in 0..$limbs { + r[2 * i] = fa::mac_with_carry(r[2 * i], (self.0).0[i], (self.0).0[i], &mut carry); + r[2 * i + 1] = fa::adc(r[2 * i + 1], 0, &mut carry); + } + // Montgomery reduction + let mut _carry2 = 0; + for i in 0..$limbs { + let k = r[i].wrapping_mul(P::INV); + let mut carry = 0; + fa::mac_with_carry(r[i], k, P::MODULUS.0[0], &mut carry); + for j in 1..$limbs { + r[j + i] = fa::mac_with_carry(r[j + i], k, P::MODULUS.0[j], &mut carry); + } + r[$limbs + i] = fa::adc(r[$limbs + i], _carry2, &mut carry); + _carry2 = carry; + } + (self.0).0.copy_from_slice(&r[$limbs..]); + self.reduce(); + self + } + }; +} + +macro_rules! impl_field_bigint_conv { + ($field: ident, $bigint: ident, $params: ident) => { + impl Into<$bigint> for $field

{ + fn into(self) -> $bigint { + self.into_repr() + } + } + + impl From<$bigint> for $field

{ + fn from(int: $bigint) -> Self { + Self::from_repr(int) + } + } + }; +} + +macro_rules! impl_prime_field_standard_sample { + ($field: ident, $params: ident) => { + impl rand::distributions::Distribution<$field

> + for rand::distributions::Standard + { + #[inline] + fn sample(&self, rng: &mut R) -> $field

{ + loop { + let mut tmp = $field(rng.sample(rand::distributions::Standard), PhantomData); + // Mask away the unused bits at the beginning. + tmp.0 + .as_mut() + .last_mut() + .map(|val| *val &= core::u64::MAX >> P::REPR_SHAVE_BITS); + + if tmp.is_valid() { + return tmp; + } + } + } + } + }; +} + +macro_rules! impl_prime_field_from_int { + ($field: ident, u128, $params: ident) => { + impl From for $field

{ + fn from(other: u128) -> Self { + let upper = (other >> 64) as u64; + let lower = ((other << 64) >> 64) as u64; + let mut default_int = P::BigInt::default(); + default_int.0[0] = lower; + default_int.0[1] = upper; + Self::from_repr(default_int) + } + } + }; + ($field: ident, $int: ident, $params: ident) => { + impl From<$int> for $field

{ + fn from(other: $int) -> Self { + Self::from_repr(P::BigInt::from(u64::from(other))) + } + } + }; +} + +macro_rules! sqrt_impl { + ($Self:ident, $P:tt, $self:expr) => {{ + use crate::fields::LegendreSymbol::*; + // https://eprint.iacr.org/2012/685.pdf (page 12, algorithm 5) + // Actually this is just normal Tonelli-Shanks; since `P::Generator` + // is a quadratic non-residue, `P::ROOT_OF_UNITY = P::GENERATOR ^ t` + // is also a quadratic non-residue (since `t` is odd). + match $self.legendre() { + Zero => Some(*$self), + QuadraticNonResidue => None, + QuadraticResidue => { + let mut z = $Self::qnr_to_t(); + let mut w = $self.pow($P::T_MINUS_ONE_DIV_TWO); + let mut x = w * $self; + let mut b = x * &w; + + let mut v = $P::TWO_ADICITY as usize; + // t = self^t + #[cfg(debug_assertions)] + { + let mut check = b; + for _ in 0..(v - 1) { + check.square_in_place(); + } + if !check.is_one() { + panic!("Input is not a square root, but it passed the QR test") + } + } + + while !b.is_one() { + let mut k = 0usize; + + let mut b2k = b; + while !b2k.is_one() { + // invariant: b2k = b^(2^k) after entering this loop + b2k.square_in_place(); + k += 1; + } + + let j = v - k - 1; + w = z; + for _ in 0..j { + w.square_in_place(); + } + + z = w.square(); + b *= &z; + x *= &w; + v = k; + } + + Some(x) + } + } + }}; +} + +// Implements AddAssign on Self by deferring to an implementation on &Self +#[macro_export] +macro_rules! impl_additive_ops_from_ref { + ($type: ident, $params: ident) => { + #[allow(unused_qualifications)] + impl core::ops::Add for $type

{ + type Output = Self; + + #[inline] + fn add(self, other: Self) -> Self { + let mut result = self; + result.add_assign(&other); + result + } + } + + #[allow(unused_qualifications)] + impl<'a, P: $params> core::ops::Add<&'a mut Self> for $type

{ + type Output = Self; + + #[inline] + fn add(self, other: &'a mut Self) -> Self { + let mut result = self; + result.add_assign(&*other); + result + } + } + + #[allow(unused_qualifications)] + impl core::ops::Sub for $type

{ + type Output = Self; + + #[inline] + fn sub(self, other: Self) -> Self { + let mut result = self; + result.sub_assign(&other); + result + } + } + + #[allow(unused_qualifications)] + impl<'a, P: $params> core::ops::Sub<&'a mut Self> for $type

{ + type Output = Self; + + #[inline] + fn sub(self, other: &'a mut Self) -> Self { + let mut result = self; + result.sub_assign(&*other); + result + } + } + + #[allow(unused_qualifications)] + impl core::iter::Sum for $type

{ + fn sum>(iter: I) -> Self { + iter.fold(Self::zero(), core::ops::Add::add) + } + } + + #[allow(unused_qualifications)] + impl<'a, P: $params> core::iter::Sum<&'a Self> for $type

{ + fn sum>(iter: I) -> Self { + iter.fold(Self::zero(), core::ops::Add::add) + } + } + + #[allow(unused_qualifications)] + impl core::ops::AddAssign for $type

{ + fn add_assign(&mut self, other: Self) { + self.add_assign(&other) + } + } + + #[allow(unused_qualifications)] + impl core::ops::SubAssign for $type

{ + fn sub_assign(&mut self, other: Self) { + self.sub_assign(&other) + } + } + + #[allow(unused_qualifications)] + impl<'a, P: $params> core::ops::AddAssign<&'a mut Self> for $type

{ + fn add_assign(&mut self, other: &'a mut Self) { + self.add_assign(&*other) + } + } + + #[allow(unused_qualifications)] + impl<'a, P: $params> core::ops::SubAssign<&'a mut Self> for $type

{ + fn sub_assign(&mut self, other: &'a mut Self) { + self.sub_assign(&*other) + } + } + }; +} + +// Implements AddAssign on Self by deferring to an implementation on &Self +#[macro_export] +macro_rules! impl_multiplicative_ops_from_ref { + ($type: ident, $params: ident) => { + #[allow(unused_qualifications)] + impl core::ops::Mul for $type

{ + type Output = Self; + + #[inline] + fn mul(self, other: Self) -> Self { + let mut result = self; + result.mul_assign(&other); + result + } + } + + #[allow(unused_qualifications)] + impl core::ops::Div for $type

{ + type Output = Self; + + #[inline] + fn div(self, other: Self) -> Self { + let mut result = self; + result.div_assign(&other); + result + } + } + + #[allow(unused_qualifications)] + impl<'a, P: $params> core::ops::Mul<&'a mut Self> for $type

{ + type Output = Self; + + #[inline] + fn mul(self, other: &'a mut Self) -> Self { + let mut result = self; + result.mul_assign(&*other); + result + } + } + + #[allow(unused_qualifications)] + impl<'a, P: $params> core::ops::Div<&'a mut Self> for $type

{ + type Output = Self; + + #[inline] + fn div(self, other: &'a mut Self) -> Self { + let mut result = self; + result.div_assign(&*other); + result + } + } + + #[allow(unused_qualifications)] + impl core::iter::Product for $type

{ + fn product>(iter: I) -> Self { + iter.fold(Self::one(), core::ops::Mul::mul) + } + } + + #[allow(unused_qualifications)] + impl<'a, P: $params> core::iter::Product<&'a Self> for $type

{ + fn product>(iter: I) -> Self { + iter.fold(Self::one(), Mul::mul) + } + } + + #[allow(unused_qualifications)] + impl core::ops::MulAssign for $type

{ + fn mul_assign(&mut self, other: Self) { + self.mul_assign(&other) + } + } + + #[allow(unused_qualifications)] + impl<'a, P: $params> core::ops::DivAssign<&'a mut Self> for $type

{ + fn div_assign(&mut self, other: &'a mut Self) { + self.div_assign(&*other) + } + } + + #[allow(unused_qualifications)] + impl<'a, P: $params> core::ops::MulAssign<&'a mut Self> for $type

{ + fn mul_assign(&mut self, other: &'a mut Self) { + self.mul_assign(&*other) + } + } + + #[allow(unused_qualifications)] + impl core::ops::DivAssign for $type

{ + fn div_assign(&mut self, other: Self) { + self.div_assign(&other) + } + } + }; +} diff --git a/algebra-core/src/fields/macros.rs b/algebra-core/src/fields/macros.rs index 7975c12a3..eaf92cdec 100644 --- a/algebra-core/src/fields/macros.rs +++ b/algebra-core/src/fields/macros.rs @@ -1,7 +1,6 @@ macro_rules! impl_Fp { - ($Fp:ident, $FpParameters:ident, $limbs:expr) => { - use $crate::serialize::CanonicalDeserialize; - pub trait $FpParameters: FpParameters {} + ($Fp:ident, $FpParameters:ident, $BigInteger:ident, $BigIntegerType:ty, $limbs:expr) => { + pub trait $FpParameters: FpParameters {} #[derive(Derivative)] #[derivative( @@ -14,7 +13,7 @@ macro_rules! impl_Fp { Eq(bound = "") )] pub struct $Fp

( - pub BigInteger, + pub $BigIntegerType, #[derivative(Debug = "ignore")] #[doc(hidden)] pub PhantomData

, @@ -22,7 +21,7 @@ macro_rules! impl_Fp { impl

$Fp

{ #[inline] - pub const fn new(element: BigInteger) -> Self { + pub const fn new(element: $BigIntegerType) -> Self { Self(element, PhantomData) } } @@ -44,7 +43,7 @@ macro_rules! impl_Fp { impl Zero for $Fp

{ #[inline] fn zero() -> Self { - $Fp::

(BigInteger::from(0), PhantomData) + $Fp::

($BigInteger::from(0), PhantomData) } #[inline] @@ -130,7 +129,7 @@ macro_rules! impl_Fp { // Cryptography // Algorithm 16 (BEA for Inversion in Fp) - let one = BigInteger::from(1); + let one = $BigInteger::from(1); let mut u = self.0; let mut v = P::MODULUS; @@ -194,10 +193,10 @@ macro_rules! impl_Fp { impl PrimeField for $Fp

{ type Params = P; - type BigInt = BigInteger; + type BigInt = $BigIntegerType; #[inline] - fn from_repr(r: BigInteger) -> Self { + fn from_repr(r: $BigIntegerType) -> Self { let mut r = $Fp(r, PhantomData); if r.is_valid() { r.mul_assign(&$Fp(P::R2, PhantomData)); @@ -207,7 +206,7 @@ macro_rules! impl_Fp { } } - impl_field_into_repr!($limbs); + impl_field_into_repr!($limbs, $BigIntegerType); } impl FftField for $Fp

{ @@ -290,7 +289,7 @@ macro_rules! impl_Fp { impl FromBytes for $Fp

{ #[inline] fn read(reader: R) -> IoResult { - BigInteger::read(reader).and_then( |b| + $BigInteger::read(reader).and_then( |b| if b.is_zero() { Ok($Fp::zero()) } else { @@ -456,466 +455,3 @@ macro_rules! impl_Fp { } } } - -/// This modular multiplication algorithm uses Montgomery -/// reduction for efficient implementation. It also additionally -/// uses the "no-carry optimization" outlined -/// [here](https://hackmd.io/@zkteam/modular_multiplication) if -/// `P::MODULUS` has (a) a non-zero MSB, and (b) at least one -/// zero bit in the rest of the modulus. -macro_rules! impl_field_mul_assign { - ($limbs:expr) => { - #[inline] - #[unroll_for_loops] - fn mul_assign(&mut self, other: &Self) { - // Checking the modulus at compile time - let first_bit_set = P::MODULUS.0[$limbs - 1] >> 63 != 0; - let mut all_bits_set = P::MODULUS.0[$limbs - 1] == !0 - (1 << 63); - for i in 1..$limbs { - all_bits_set &= P::MODULUS.0[$limbs - i - 1] == !0u64; - } - let no_carry: bool = !(first_bit_set || all_bits_set); - - // No-carry optimisation applied to CIOS - if no_carry { - let mut r = [0u64; $limbs]; - let mut carry1 = 0u64; - let mut carry2 = 0u64; - - for i in 0..$limbs { - r[0] = fa::mac(r[0], (self.0).0[0], (other.0).0[i], &mut carry1); - let k = r[0].wrapping_mul(P::INV); - fa::mac_discard(r[0], k, P::MODULUS.0[0], &mut carry2); - for j in 1..$limbs { - r[j] = fa::mac_with_carry(r[j], (self.0).0[j], (other.0).0[i], &mut carry1); - r[j - 1] = fa::mac_with_carry(r[j], k, P::MODULUS.0[j], &mut carry2); - } - r[$limbs - 1] = carry1 + carry2; - } - (self.0).0 = r; - self.reduce(); - // Alternative implementation - } else { - let mut r = [0u64; $limbs * 2]; - - for i in 0..$limbs { - let mut carry = 0; - for j in 0..$limbs { - r[j + i] = - fa::mac_with_carry(r[j + i], (self.0).0[i], (other.0).0[j], &mut carry); - } - r[$limbs + i] = carry; - } - // Montgomery reduction - let mut _carry2 = 0; - for i in 0..$limbs { - let k = r[i].wrapping_mul(P::INV); - let mut carry = 0; - fa::mac_with_carry(r[i], k, P::MODULUS.0[0], &mut carry); - for j in 1..$limbs { - r[j + i] = fa::mac_with_carry(r[j + i], k, P::MODULUS.0[j], &mut carry); - } - r[$limbs + i] = fa::adc(r[$limbs + i], _carry2, &mut carry); - _carry2 = carry; - } - (self.0).0.copy_from_slice(&r[$limbs..]); - self.reduce(); - } - } - }; -} - -macro_rules! impl_field_into_repr { - ($limbs:expr) => { - #[inline] - #[unroll_for_loops] - fn into_repr(&self) -> BigInteger { - let mut tmp = self.0; - let mut r = tmp.0; - // Montgomery Reduction - for i in 0..$limbs { - let k = r[i].wrapping_mul(P::INV); - let mut carry = 0; - - fa::mac_with_carry(r[i], k, P::MODULUS.0[0], &mut carry); - for j in 1..$limbs { - r[(j + i) % $limbs] = - fa::mac_with_carry(r[(j + i) % $limbs], k, P::MODULUS.0[j], &mut carry); - } - r[i % $limbs] = carry; - } - tmp.0 = r; - tmp - } - }; -} - -macro_rules! impl_field_square_in_place { - ($limbs: expr) => { - #[inline] - #[unroll_for_loops] - fn square_in_place(&mut self) -> &mut Self { - let mut r = [0u64; $limbs * 2]; - - let mut carry = 0; - for i in 0..$limbs { - if i < $limbs - 1 { - for j in 0..$limbs { - if j >= (i + 1) { - r[i + j] = fa::mac_with_carry( - r[i + j], - (self.0).0[i], - (self.0).0[j], - &mut carry, - ); - } - } - r[$limbs + i] = carry; - carry = 0; - } - } - - r[$limbs * 2 - 1] = r[$limbs * 2 - 2] >> 63; - for i in 0..$limbs { - r[$limbs * 2 - 2 - i] = - (r[$limbs * 2 - 2 - i] << 1) | (r[$limbs * 2 - 3 - i] >> 63); - } - for i in 3..$limbs { - r[$limbs + 1 - i] = (r[$limbs + 1 - i] << 1) | (r[$limbs - i] >> 63); - } - r[1] = r[1] << 1; - - for i in 0..$limbs { - r[2 * i] = fa::mac_with_carry(r[2 * i], (self.0).0[i], (self.0).0[i], &mut carry); - r[2 * i + 1] = fa::adc(r[2 * i + 1], 0, &mut carry); - } - // Montgomery reduction - let mut _carry2 = 0; - for i in 0..$limbs { - let k = r[i].wrapping_mul(P::INV); - let mut carry = 0; - fa::mac_with_carry(r[i], k, P::MODULUS.0[0], &mut carry); - for j in 1..$limbs { - r[j + i] = fa::mac_with_carry(r[j + i], k, P::MODULUS.0[j], &mut carry); - } - r[$limbs + i] = fa::adc(r[$limbs + i], _carry2, &mut carry); - _carry2 = carry; - } - - (self.0).0.copy_from_slice(&r[$limbs..]); - self.reduce(); - self - } - }; -} - -macro_rules! impl_field_bigint_conv { - ($field: ident, $bigint: ident, $params: ident) => { - impl Into<$bigint> for $field

{ - fn into(self) -> $bigint { - self.into_repr() - } - } - - impl From<$bigint> for $field

{ - fn from(int: $bigint) -> Self { - Self::from_repr(int) - } - } - }; -} - -macro_rules! impl_prime_field_standard_sample { - ($field: ident, $params: ident) => { - impl rand::distributions::Distribution<$field

> - for rand::distributions::Standard - { - #[inline] - fn sample(&self, rng: &mut R) -> $field

{ - loop { - let mut tmp = $field(rng.sample(rand::distributions::Standard), PhantomData); - // Mask away the unused bits at the beginning. - tmp.0 - .as_mut() - .last_mut() - .map(|val| *val &= core::u64::MAX >> P::REPR_SHAVE_BITS); - - if tmp.is_valid() { - return tmp; - } - } - } - } - }; -} - -macro_rules! impl_prime_field_from_int { - ($field: ident, u128, $params: ident) => { - impl From for $field

{ - fn from(other: u128) -> Self { - let upper = (other >> 64) as u64; - let lower = ((other << 64) >> 64) as u64; - let mut default_int = P::BigInt::default(); - default_int.0[0] = lower; - default_int.0[1] = upper; - Self::from_repr(default_int) - } - } - }; - ($field: ident, $int: ident, $params: ident) => { - impl From<$int> for $field

{ - fn from(other: $int) -> Self { - Self::from_repr(P::BigInt::from(u64::from(other))) - } - } - }; -} - -macro_rules! sqrt_impl { - ($Self:ident, $P:tt, $self:expr) => {{ - use crate::fields::LegendreSymbol::*; - // https://eprint.iacr.org/2012/685.pdf (page 12, algorithm 5) - // Actually this is just normal Tonelli-Shanks; since `P::Generator` - // is a quadratic non-residue, `P::TWO_ADIC_ROOT_OF_UNITY = P::GENERATOR ^ t` - // is also a quadratic non-residue (since `t` is odd). - match $self.legendre() { - Zero => Some(*$self), - QuadraticNonResidue => None, - QuadraticResidue => { - let mut z = $Self::qnr_to_t(); - let mut w = $self.pow($P::T_MINUS_ONE_DIV_TWO); - let mut x = w * $self; - let mut b = x * &w; - - let mut v = $P::TWO_ADICITY as usize; - // t = self^t - #[cfg(debug_assertions)] - { - let mut check = b; - for _ in 0..(v - 1) { - check.square_in_place(); - } - if !check.is_one() { - panic!("Input is not a square root, but it passed the QR test") - } - } - - while !b.is_one() { - let mut k = 0usize; - - let mut b2k = b; - while !b2k.is_one() { - // invariant: b2k = b^(2^k) after entering this loop - b2k.square_in_place(); - k += 1; - } - - let j = v - k - 1; - w = z; - for _ in 0..j { - w.square_in_place(); - } - - z = w.square(); - b *= &z; - x *= &w; - v = k; - } - - Some(x) - } - } - }}; -} - -// Implements AddAssign on Self by deferring to an implementation on &Self -#[macro_export] -macro_rules! impl_additive_ops_from_ref { - ($type: ident, $params: ident) => { - #[allow(unused_qualifications)] - impl core::ops::Add for $type

{ - type Output = Self; - - #[inline] - fn add(self, other: Self) -> Self { - let mut result = self; - result.add_assign(&other); - result - } - } - - #[allow(unused_qualifications)] - impl<'a, P: $params> core::ops::Add<&'a mut Self> for $type

{ - type Output = Self; - - #[inline] - fn add(self, other: &'a mut Self) -> Self { - let mut result = self; - result.add_assign(&*other); - result - } - } - - #[allow(unused_qualifications)] - impl core::ops::Sub for $type

{ - type Output = Self; - - #[inline] - fn sub(self, other: Self) -> Self { - let mut result = self; - result.sub_assign(&other); - result - } - } - - #[allow(unused_qualifications)] - impl<'a, P: $params> core::ops::Sub<&'a mut Self> for $type

{ - type Output = Self; - - #[inline] - fn sub(self, other: &'a mut Self) -> Self { - let mut result = self; - result.sub_assign(&*other); - result - } - } - - #[allow(unused_qualifications)] - impl core::iter::Sum for $type

{ - fn sum>(iter: I) -> Self { - iter.fold(Self::zero(), core::ops::Add::add) - } - } - - #[allow(unused_qualifications)] - impl<'a, P: $params> core::iter::Sum<&'a Self> for $type

{ - fn sum>(iter: I) -> Self { - iter.fold(Self::zero(), core::ops::Add::add) - } - } - - #[allow(unused_qualifications)] - impl core::ops::AddAssign for $type

{ - fn add_assign(&mut self, other: Self) { - self.add_assign(&other) - } - } - - #[allow(unused_qualifications)] - impl core::ops::SubAssign for $type

{ - fn sub_assign(&mut self, other: Self) { - self.sub_assign(&other) - } - } - - #[allow(unused_qualifications)] - impl<'a, P: $params> core::ops::AddAssign<&'a mut Self> for $type

{ - fn add_assign(&mut self, other: &'a mut Self) { - self.add_assign(&*other) - } - } - - #[allow(unused_qualifications)] - impl<'a, P: $params> core::ops::SubAssign<&'a mut Self> for $type

{ - fn sub_assign(&mut self, other: &'a mut Self) { - self.sub_assign(&*other) - } - } - }; -} - -// Implements AddAssign on Self by deferring to an implementation on &Self -#[macro_export] -macro_rules! impl_multiplicative_ops_from_ref { - ($type: ident, $params: ident) => { - #[allow(unused_qualifications)] - impl core::ops::Mul for $type

{ - type Output = Self; - - #[inline] - fn mul(self, other: Self) -> Self { - let mut result = self; - result.mul_assign(&other); - result - } - } - - #[allow(unused_qualifications)] - impl core::ops::Div for $type

{ - type Output = Self; - - #[inline] - fn div(self, other: Self) -> Self { - let mut result = self; - result.div_assign(&other); - result - } - } - - #[allow(unused_qualifications)] - impl<'a, P: $params> core::ops::Mul<&'a mut Self> for $type

{ - type Output = Self; - - #[inline] - fn mul(self, other: &'a mut Self) -> Self { - let mut result = self; - result.mul_assign(&*other); - result - } - } - - #[allow(unused_qualifications)] - impl<'a, P: $params> core::ops::Div<&'a mut Self> for $type

{ - type Output = Self; - - #[inline] - fn div(self, other: &'a mut Self) -> Self { - let mut result = self; - result.div_assign(&*other); - result - } - } - - #[allow(unused_qualifications)] - impl core::iter::Product for $type

{ - fn product>(iter: I) -> Self { - iter.fold(Self::one(), core::ops::Mul::mul) - } - } - - #[allow(unused_qualifications)] - impl<'a, P: $params> core::iter::Product<&'a Self> for $type

{ - fn product>(iter: I) -> Self { - iter.fold(Self::one(), Mul::mul) - } - } - - #[allow(unused_qualifications)] - impl core::ops::MulAssign for $type

{ - fn mul_assign(&mut self, other: Self) { - self.mul_assign(&other) - } - } - - #[allow(unused_qualifications)] - impl<'a, P: $params> core::ops::DivAssign<&'a mut Self> for $type

{ - fn div_assign(&mut self, other: &'a mut Self) { - self.div_assign(&*other) - } - } - - #[allow(unused_qualifications)] - impl<'a, P: $params> core::ops::MulAssign<&'a mut Self> for $type

{ - fn mul_assign(&mut self, other: &'a mut Self) { - self.mul_assign(&*other) - } - } - - #[allow(unused_qualifications)] - impl core::ops::DivAssign for $type

{ - fn div_assign(&mut self, other: Self) { - self.div_assign(&other) - } - } - }; -} diff --git a/algebra-core/src/fields/mod.rs b/algebra-core/src/fields/mod.rs index 97ec577cb..e3a12bed0 100644 --- a/algebra-core/src/fields/mod.rs +++ b/algebra-core/src/fields/mod.rs @@ -16,9 +16,12 @@ use num_traits::{One, Zero}; #[macro_use] pub mod macros; -pub mod models; pub mod utils; +#[macro_use] +pub mod arithmetic; + +pub mod models; pub use self::models::*; #[macro_export] diff --git a/algebra-core/src/fields/models/fp_256.rs b/algebra-core/src/fields/models/fp_256.rs deleted file mode 100644 index 3b2c539b0..000000000 --- a/algebra-core/src/fields/models/fp_256.rs +++ /dev/null @@ -1,18 +0,0 @@ -use core::{ - cmp::{Ord, Ordering, PartialOrd}, - fmt::{Display, Formatter, Result as FmtResult}, - marker::PhantomData, - ops::{Add, AddAssign, Div, DivAssign, Mul, MulAssign, Neg, Sub, SubAssign}, - str::FromStr, -}; -use num_traits::{One, Zero}; -use unroll::unroll_for_loops; - -use crate::{ - biginteger::{arithmetic as fa, BigInteger as _BigInteger, BigInteger256 as BigInteger}, - bytes::{FromBytes, ToBytes}, - fields::{FftField, Field, FpParameters, LegendreSymbol, PrimeField, SquareRootField}, - io::{Read, Result as IoResult, Write}, -}; - -impl_Fp!(Fp256, Fp256Parameters, 4); diff --git a/algebra-core/src/fields/models/fp_320.rs b/algebra-core/src/fields/models/fp_320.rs deleted file mode 100644 index 3123c28aa..000000000 --- a/algebra-core/src/fields/models/fp_320.rs +++ /dev/null @@ -1,18 +0,0 @@ -use core::{ - cmp::{Ord, Ordering, PartialOrd}, - fmt::{Display, Formatter, Result as FmtResult}, - marker::PhantomData, - ops::{Add, AddAssign, Div, DivAssign, Mul, MulAssign, Neg, Sub, SubAssign}, - str::FromStr, -}; -use num_traits::{One, Zero}; -use unroll::unroll_for_loops; - -use crate::{ - biginteger::{arithmetic as fa, BigInteger as _BigInteger, BigInteger320 as BigInteger}, - bytes::{FromBytes, ToBytes}, - fields::{FftField, Field, FpParameters, LegendreSymbol, PrimeField, SquareRootField}, - io::{Read, Result as IoResult, Write}, -}; - -impl_Fp!(Fp320, Fp320Parameters, 5); diff --git a/algebra-core/src/fields/models/fp_384.rs b/algebra-core/src/fields/models/fp_384.rs deleted file mode 100644 index adf1ad83a..000000000 --- a/algebra-core/src/fields/models/fp_384.rs +++ /dev/null @@ -1,18 +0,0 @@ -use core::{ - cmp::{Ord, Ordering, PartialOrd}, - fmt::{Display, Formatter, Result as FmtResult}, - marker::PhantomData, - ops::{Add, AddAssign, Div, DivAssign, Mul, MulAssign, Neg, Sub, SubAssign}, - str::FromStr, -}; -use num_traits::{One, Zero}; -use unroll::unroll_for_loops; - -use crate::{ - biginteger::{arithmetic as fa, BigInteger as _BigInteger, BigInteger384 as BigInteger}, - bytes::{FromBytes, ToBytes}, - fields::{FftField, Field, FpParameters, LegendreSymbol, PrimeField, SquareRootField}, - io::{Read, Result as IoResult, Write}, -}; - -impl_Fp!(Fp384, Fp384Parameters, 6); diff --git a/algebra-core/src/fields/models/fp_768.rs b/algebra-core/src/fields/models/fp_768.rs deleted file mode 100644 index 9f04c90bb..000000000 --- a/algebra-core/src/fields/models/fp_768.rs +++ /dev/null @@ -1,18 +0,0 @@ -use core::{ - cmp::{Ord, Ordering, PartialOrd}, - fmt::{Display, Formatter, Result as FmtResult}, - marker::PhantomData, - ops::{Add, AddAssign, Div, DivAssign, Mul, MulAssign, Neg, Sub, SubAssign}, - str::FromStr, -}; -use num_traits::{One, Zero}; -use unroll::unroll_for_loops; - -use crate::{ - biginteger::{arithmetic as fa, BigInteger as _BigInteger, BigInteger768 as BigInteger}, - bytes::{FromBytes, ToBytes}, - fields::{FftField, Field, FpParameters, LegendreSymbol, PrimeField, SquareRootField}, - io::{Read, Result as IoResult, Write}, -}; - -impl_Fp!(Fp768, Fp768Parameters, 12); diff --git a/algebra-core/src/fields/models/fp_832.rs b/algebra-core/src/fields/models/fp_832.rs deleted file mode 100644 index 8ce10a18f..000000000 --- a/algebra-core/src/fields/models/fp_832.rs +++ /dev/null @@ -1,17 +0,0 @@ -use crate::{ - biginteger::{arithmetic as fa, BigInteger as _BigInteger, BigInteger832 as BigInteger}, - bytes::{FromBytes, ToBytes}, - fields::{FftField, Field, FpParameters, LegendreSymbol, PrimeField, SquareRootField}, - io::{Read, Result as IoResult, Write}, -}; -use core::{ - cmp::{Ord, Ordering, PartialOrd}, - fmt::{Display, Formatter, Result as FmtResult}, - marker::PhantomData, - ops::{Add, AddAssign, Div, DivAssign, Mul, MulAssign, Neg, Sub, SubAssign}, - str::FromStr, -}; -use num_traits::{One, Zero}; -use unroll::unroll_for_loops; - -impl_Fp!(Fp832, Fp832Parameters, 13); diff --git a/algebra-core/src/fields/models/mod.rs b/algebra-core/src/fields/models/mod.rs index 2c591b00a..4e45d9f00 100644 --- a/algebra-core/src/fields/models/mod.rs +++ b/algebra-core/src/fields/models/mod.rs @@ -1,17 +1,57 @@ -pub mod fp_256; -pub use self::fp_256::*; +use core::{ + cmp::{Ord, Ordering, PartialOrd}, + fmt::{Display, Formatter, Result as FmtResult}, + marker::PhantomData, + ops::{Add, AddAssign, Div, DivAssign, Mul, MulAssign, Neg, Sub, SubAssign}, + str::FromStr, +}; +use num_traits::{One, Zero}; +use unroll::unroll_for_loops; -pub mod fp_320; -pub use self::fp_320::*; +use crate::{ + biginteger::{ + arithmetic as fa, BigInteger as _BigInteger, BigInteger256, BigInteger320, BigInteger384, + BigInteger768, BigInteger832, + }, + bytes::{FromBytes, ToBytes}, + fields::{FftField, Field, FpParameters, LegendreSymbol, PrimeField, SquareRootField}, + io::{Read, Result as IoResult, Write}, + serialize::CanonicalDeserialize, +}; -pub mod fp_384; -pub use self::fp_384::*; +#[cfg(all( + feature = "llvm_asm", + target_arch = "x86_64", + target_feature = "bmi2", + target_feature = "adx", + nightly, +))] +use std::mem::MaybeUninit; -pub mod fp_768; -pub use self::fp_768::*; +#[cfg(all( + feature = "llvm_asm", + target_arch = "x86_64", + target_feature = "bmi2", + target_feature = "adx", + nightly, +))] +#[cfg_attr( + all( + feature = "llvm_asm", + target_arch = "x86_64", + target_feature = "bmi2", + target_feature = "adx", + nightly, + ), + allow(unsafe_code) +)] +include!(concat!(env!("OUT_DIR"), "/field_assembly.rs")); -pub mod fp_832; -pub use self::fp_832::*; +impl_Fp!(Fp256, Fp256Parameters, BigInteger256, BigInteger256, 4); +impl_Fp!(Fp320, Fp320Parameters, BigInteger320, BigInteger320, 5); +impl_Fp!(Fp384, Fp384Parameters, BigInteger384, BigInteger384, 6); +impl_Fp!(Fp768, Fp768Parameters, BigInteger768, BigInteger768, 12); +impl_Fp!(Fp832, Fp832Parameters, BigInteger832, BigInteger832, 13); pub mod fp2; pub use self::fp2::*; diff --git a/algebra-core/src/lib.rs b/algebra-core/src/lib.rs index 612300f03..8463ed735 100644 --- a/algebra-core/src/lib.rs +++ b/algebra-core/src/lib.rs @@ -4,8 +4,28 @@ #![deny(non_shorthand_field_patterns, unused_attributes, unused_imports)] #![deny(unused_extern_crates, renamed_and_removed_lints, unused_allocation)] #![deny(unused_comparisons, bare_trait_objects, const_err, unused_must_use)] -#![deny(unused_mut, unused_unsafe, private_in_public, unsafe_code)] -#![forbid(unsafe_code)] +#![deny(unused_mut, unused_unsafe, private_in_public)] +#![cfg_attr( + all( + feature = "llvm_asm", + target_arch = "x86_64", + target_feature = "bmi2", + target_feature = "adx", + nightly, + ), + deny(unsafe_code) +)] +#![cfg_attr( + all( + feature = "llvm_asm", + target_arch = "x86_64", + target_feature = "bmi2", + target_feature = "adx", + nightly, + ), + feature(llvm_asm) +)] +#![cfg_attr(not(feature = "llvm_asm"), forbid(unsafe_code))] #[cfg(all(test, not(feature = "std")))] #[macro_use] diff --git a/algebra/Cargo.toml b/algebra/Cargo.toml index c88220a80..59531412c 100644 --- a/algebra/Cargo.toml +++ b/algebra/Cargo.toml @@ -47,3 +47,4 @@ mnt6_753 = [] std = [ "algebra-core/std" ] parallel = [ "std", "algebra-core/parallel" ] derive = [ "algebra-core/derive" ] +asm = [ "algebra-core/asm" ]