Skip to content

Commit

Permalink
fix: tests and benches (#17)
Browse files Browse the repository at this point in the history
Recent performance improvements require additional send + sync
constraints on rng amongst other things, this update fills in those
requirements for tests !
  • Loading branch information
alexander-camuto committed Sep 2, 2024
1 parent d9d470c commit 930970a
Show file tree
Hide file tree
Showing 22 changed files with 69 additions and 52 deletions.
4 changes: 2 additions & 2 deletions .github/workflows/lints-stable.yml
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ on: pull_request

jobs:
clippy:
name: Clippy (1.56.1)
name: Clippy
timeout-minutes: 30
runs-on: ubuntu-latest

Expand All @@ -18,6 +18,6 @@ jobs:
- name: Run clippy
uses: actions-rs/clippy-check@v1
with:
name: Clippy (1.56.1)
name: Clippy
token: ${{ secrets.GITHUB_TOKEN }}
args: --all-features --all-targets -- -D warnings
2 changes: 1 addition & 1 deletion halo2_gadgets/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@

#![cfg_attr(docsrs, feature(doc_cfg))]
// Temporary until we have more of the crate implemented.
#![allow(dead_code)]
#![allow(dead_code, clippy::single_range_in_vec_init)]
// Catch documentation errors caused by code changes.
#![deny(rustdoc::broken_intra_doc_links)]
#![deny(missing_debug_implementations)]
Expand Down
2 changes: 2 additions & 0 deletions halo2_proofs/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -85,6 +85,7 @@ gumdrop = "0.8"
proptest = "1"
rand_core = { version = "0.6", default-features = false, features = ["getrandom"] }
serde_json = "1"
ark-std = "0.4.0"

[target.'cfg(all(target_arch = "wasm32", target_os = "unknown"))'.dev-dependencies]
getrandom = { version = "0.2", features = ["js"] }
Expand Down Expand Up @@ -112,6 +113,7 @@ cost-estimator = ["serde_derive"]
derive_serde = ["halo2curves/derive_serde"]
parallel-poly-read = []
precompute-coset = []
multicore = []

[lib]
bench = false
Expand Down
4 changes: 2 additions & 2 deletions halo2_proofs/benches/arithmetic.rs
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
#[macro_use]
extern crate criterion;

use crate::arithmetic::small_multiexp;
use crate::arithmetic::best_multiexp_cpu;
use crate::halo2curves::pasta::{EqAffine, Fp};
use group::ff::Field;
use halo2_proofs::*;
Expand All @@ -27,7 +27,7 @@ fn criterion_benchmark(c: &mut Criterion) {
c.bench_function("double-and-add", |b| {
b.iter(|| {
for (g_lo, g_hi) in g_lo.iter().zip(g_hi.iter()) {
small_multiexp(&[black_box(coeff_1), black_box(coeff_2)], &[*g_lo, *g_hi]);
best_multiexp_cpu(&[black_box(coeff_1), black_box(coeff_2)], &[*g_lo, *g_hi]);
}
})
});
Expand Down
12 changes: 10 additions & 2 deletions halo2_proofs/benches/fft.rs
Original file line number Diff line number Diff line change
Expand Up @@ -7,16 +7,24 @@ use halo2_proofs::*;
use halo2curves::pasta::Fp;

use criterion::{BenchmarkId, Criterion};
use poly::EvaluationDomain;
use rand_core::OsRng;

fn criterion_benchmark(c: &mut Criterion) {
let mut group = c.benchmark_group("fft");
for k in 3..19 {
group.bench_function(BenchmarkId::new("k", k), |b| {
let mut a = (0..(1 << k)).map(|_| Fp::random(OsRng)).collect::<Vec<_>>();
let domain = EvaluationDomain::<Fp>::new(1, k);
let n = domain.get_n() as usize;

let input = vec![Fp::random(OsRng); n];

let mut a = input.clone();
let l_a = a.len();

let omega = Fp::random(OsRng); // would be weird if this mattered
b.iter(|| {
best_fft(&mut a, omega, k as u32);
best_fft(&mut a, omega, k, domain.get_fft_data(l_a), false);
});
});
}
Expand Down
1 change: 1 addition & 0 deletions halo2_proofs/benches/plonk.rs
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,7 @@ use criterion::{BenchmarkId, Criterion};
fn criterion_benchmark(c: &mut Criterion) {
/// This represents an advice column at a certain row in the ConstraintSystem
#[derive(Copy, Clone, Debug)]
#[allow(dead_code)]
pub struct Variable(Column<Advice>, usize);

#[derive(Clone)]
Expand Down
1 change: 0 additions & 1 deletion halo2_proofs/examples/simple-lookup-unblinded.rs
Original file line number Diff line number Diff line change
@@ -1,4 +1,3 @@
#[macro_use]
extern crate criterion;

use ff::PrimeField;
Expand Down
1 change: 0 additions & 1 deletion halo2_proofs/examples/simple-lookup.rs
Original file line number Diff line number Diff line change
@@ -1,4 +1,3 @@
#[macro_use]
extern crate criterion;

use ff::PrimeField;
Expand Down
8 changes: 0 additions & 8 deletions halo2_proofs/src/circuit/floor_planner/v1/strategy.rs
Original file line number Diff line number Diff line change
Expand Up @@ -217,16 +217,8 @@ pub fn slot_in_biggest_advice_first(
// We now use `sort_by_cached_key` with non-unique keys, and rely on `region_shapes`
// being sorted by region index (which we also rely on below to return `RegionStart`s
// in the correct order).
#[cfg(not(feature = "floor-planner-v1-legacy-pdqsort"))]
sorted_regions.sort_by_cached_key(sort_key);

// To preserve compatibility, when the "floor-planner-v1-legacy-pdqsort" feature is enabled,
// we use a copy of the pdqsort implementation from the Rust 1.56.1 standard library, fixed
// to its behaviour on 64-bit platforms.
// https://github.com/rust-lang/rust/blob/1.56.1/library/core/src/slice/mod.rs#L2365-L2402
#[cfg(feature = "floor-planner-v1-legacy-pdqsort")]
halo2_legacy_pdqsort::sort::quicksort(&mut sorted_regions, |a, b| sort_key(a).lt(&sort_key(b)));

sorted_regions.reverse();

// Lay out the sorted regions.
Expand Down
4 changes: 3 additions & 1 deletion halo2_proofs/src/dev.rs
Original file line number Diff line number Diff line change
Expand Up @@ -22,9 +22,11 @@ use crate::{

use maybe_rayon::prelude::{
IndexedParallelIterator, IntoParallelIterator, IntoParallelRefIterator, ParallelIterator,

Check failure on line 24 in halo2_proofs/src/dev.rs

View workflow job for this annotation

GitHub Actions / Build target wasm32-wasi

unresolved imports `maybe_rayon::prelude::IndexedParallelIterator`, `maybe_rayon::prelude::IntoParallelRefIterator`

Check failure on line 24 in halo2_proofs/src/dev.rs

View workflow job for this annotation

GitHub Actions / Build target wasm32-unknown-unknown

unresolved imports `maybe_rayon::prelude::IndexedParallelIterator`, `maybe_rayon::prelude::IntoParallelRefIterator`
ParallelSliceMut,
};

#[cfg(not(feature = "mv-lookup"))]
use maybe_rayon::prelude::ParallelSliceMut;

Check failure on line 28 in halo2_proofs/src/dev.rs

View workflow job for this annotation

GitHub Actions / Build target wasm32-wasi

unresolved import `maybe_rayon::prelude::ParallelSliceMut`

Check failure on line 28 in halo2_proofs/src/dev.rs

View workflow job for this annotation

GitHub Actions / Build target wasm32-unknown-unknown

unresolved import `maybe_rayon::prelude::ParallelSliceMut`

pub mod metadata;
use metadata::Column as ColumnMetadata;
mod util;
Expand Down
2 changes: 1 addition & 1 deletion halo2_proofs/src/fft.rs
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ mod tests {

let input = vec![Scalar::random(OsRng); n];

let num_threads = multicore::current_num_threads();
let _num_threads = multicore::current_num_threads();

let mut a = input.clone();
let l_a = a.len();
Expand Down
2 changes: 1 addition & 1 deletion halo2_proofs/src/helpers.rs
Original file line number Diff line number Diff line change
Expand Up @@ -89,7 +89,7 @@ pub trait SerdePrimeField: PrimeField + SerdeObject {

/// Writes a field element as bytes to the buffer according to the `format`:
/// - `Processed`: Writes a field element in standard form, with endianness specified by the
/// `PrimeField` implementation.
/// `PrimeField` implementation.
/// - Otherwise: Writes a field element into raw bytes in its internal Montgomery representation,
/// WITHOUT performing the expensive Montgomery reduction.
fn write<W: io::Write>(&self, writer: &mut W, format: SerdeFormat) -> io::Result<()> {
Expand Down
8 changes: 7 additions & 1 deletion halo2_proofs/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,13 @@

#![cfg_attr(docsrs, feature(doc_cfg))]
// The actual lints we want to disable.
#![allow(clippy::op_ref, clippy::many_single_char_names)]
#![allow(
clippy::op_ref,
clippy::many_single_char_names,
clippy::empty_docs,
clippy::doc_lazy_continuation,
clippy::single_range_in_vec_init
)]
#![deny(rustdoc::broken_intra_doc_links)]
#![deny(missing_debug_implementations)]
#![deny(missing_docs)]
Expand Down
2 changes: 2 additions & 0 deletions halo2_proofs/src/plonk.rs
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,9 @@ mod circuit;
mod error;
mod evaluation;
mod keygen;
#[cfg(not(feature = "mv-lookup"))]
mod lookup;
#[cfg(feature = "mv-lookup")]
mod mv_lookup;
pub mod permutation;
mod shuffle;
Expand Down
6 changes: 1 addition & 5 deletions halo2_proofs/src/plonk/circuit.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1374,11 +1374,6 @@ impl<F: Field> Product<Self> for Expression<F> {
}
}

/// Represents an index into a vector where each entry corresponds to a distinct
/// point that polynomials are queried at.
#[derive(Copy, Clone, Debug)]
pub(crate) struct PointIndex(pub usize);

/// A "virtual cell" is a PLONK cell that has been queried at a particular relative offset
/// within a custom gate.
#[derive(Clone, Debug)]
Expand Down Expand Up @@ -1554,6 +1549,7 @@ impl<F: Field> Gate<F> {

/// TODO doc
#[derive(Debug, Clone)]
#[allow(dead_code)]
pub struct LookupTracker<F: Field> {
pub(crate) table: Vec<Expression<F>>,
pub(crate) inputs: Vec<Vec<Expression<F>>>,
Expand Down
1 change: 1 addition & 0 deletions halo2_proofs/src/plonk/evaluation.rs
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@ use crate::{
};

use group::ff::{Field, PrimeField, WithSmallOrderMulGroup};
#[cfg(feature = "mv-lookup")]
use maybe_rayon::iter::IndexedParallelIterator;
use maybe_rayon::iter::IntoParallelRefIterator;

Check failure on line 20 in halo2_proofs/src/plonk/evaluation.rs

View workflow job for this annotation

GitHub Actions / Build target wasm32-wasi

unresolved import `maybe_rayon::iter::IntoParallelRefIterator`

Check failure on line 20 in halo2_proofs/src/plonk/evaluation.rs

View workflow job for this annotation

GitHub Actions / Build target wasm32-unknown-unknown

unresolved import `maybe_rayon::iter::IntoParallelRefIterator`
use maybe_rayon::iter::ParallelIterator;

Check warning on line 21 in halo2_proofs/src/plonk/evaluation.rs

View workflow job for this annotation

GitHub Actions / Build target wasm32-unknown-unknown

unused import: `maybe_rayon::iter::ParallelIterator`
Expand Down
3 changes: 3 additions & 0 deletions halo2_proofs/src/plonk/lookup/prover.rs
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,7 @@ pub(in crate::plonk) struct Permuted<C: CurveAffine> {
}

#[derive(Debug)]
#[allow(dead_code)]
pub(in crate::plonk) struct Committed<C: CurveAffine> {
pub(in crate::plonk) permuted_input_poly: Polynomial<C::Scalar, Coeff>,
permuted_input_blind: Blind<C::Scalar>,
Expand All @@ -56,6 +57,7 @@ pub(in crate::plonk) struct Committed<C: CurveAffine> {
}

impl<C: SerdeCurveAffine> Committed<C> {
#[allow(dead_code)]
pub fn write<W: std::io::Write>(
&self,
writer: &mut W,
Expand All @@ -74,6 +76,7 @@ impl<C: SerdeCurveAffine> Committed<C> {
self.commitment.write(writer, format)
}

#[allow(dead_code)]
pub fn read<R: std::io::Read>(reader: &mut R, format: SerdeFormat) -> std::io::Result<Self>
where
<C as CurveAffine>::ScalarExt: crate::helpers::SerdePrimeField,
Expand Down
2 changes: 2 additions & 0 deletions halo2_proofs/src/plonk/mv_lookup/prover.rs
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,7 @@ pub(in crate::plonk) struct Committed<C: CurveAffine> {
}

impl<C: SerdeCurveAffine> Committed<C> {
#[allow(dead_code)]
pub fn write<W: std::io::Write>(
&self,
writer: &mut W,
Expand All @@ -56,6 +57,7 @@ impl<C: SerdeCurveAffine> Committed<C> {
self.commitment.write(writer, format)
}

#[allow(dead_code)]
pub fn read<R: std::io::Read>(reader: &mut R, format: SerdeFormat) -> std::io::Result<Self>
where
<C as CurveAffine>::ScalarExt: crate::helpers::SerdePrimeField,
Expand Down
37 changes: 18 additions & 19 deletions halo2_proofs/src/plonk/prover.rs
Original file line number Diff line number Diff line change
@@ -1,7 +1,5 @@
use ff::{Field, FromUniformBytes, WithSmallOrderMulGroup};
use group::Curve;
use halo2curves::serde::SerdeObject;

use instant::Instant;
use rand_core::RngCore;
use rustc_hash::FxBuildHasher;
Expand All @@ -20,18 +18,16 @@ use super::{
permutation, shuffle, vanishing, ChallengeBeta, ChallengeGamma, ChallengeTheta, ChallengeX,
ChallengeY, Error, ProvingKey,
};
use maybe_rayon::iter::IndexedParallelIterator;
use maybe_rayon::iter::ParallelIterator;
#[cfg(feature = "mv-lookup")]
use maybe_rayon::iter::{IndexedParallelIterator, ParallelIterator};

#[cfg(not(feature = "mv-lookup"))]
use super::lookup;
#[cfg(feature = "mv-lookup")]
use super::mv_lookup as lookup;

#[cfg(feature = "mv-lookup")]
use maybe_rayon::iter::{
IntoParallelIterator, IntoParallelRefIterator, IntoParallelRefMutIterator,
};
use maybe_rayon::iter::{IntoParallelIterator, IntoParallelRefIterator};

use crate::{
arithmetic::{eval_polynomial, CurveAffine},
Expand Down Expand Up @@ -69,8 +65,7 @@ pub fn create_proof<
transcript: &mut T,
) -> Result<(), Error>
where
Scheme::Scalar: WithSmallOrderMulGroup<3> + FromUniformBytes<64> + SerdeObject,
Scheme::Curve: SerdeObject,
Scheme::Scalar: WithSmallOrderMulGroup<3> + FromUniformBytes<64>,
Scheme::ParamsProver: Send + Sync,
{
#[cfg(feature = "counter")]
Expand Down Expand Up @@ -498,11 +493,13 @@ where
.collect::<Result<Vec<_>, _>>()?;

#[cfg(feature = "mv-lookup")]
lookups.iter().for_each(|lookups| {
lookups.iter().for_each(|lookup| {
transcript.write_point(lookup.commitment);
});
});
{
for lookups_ in &lookups {
for lookup in lookups_.iter() {
transcript.write_point(lookup.commitment)?;
}
}
}

#[cfg(not(feature = "mv-lookup"))]
let lookups: Vec<Vec<lookup::prover::Permuted<Scheme::Curve>>> = instance
Expand Down Expand Up @@ -603,11 +600,13 @@ where
let lookups = commit_lookups()?;

#[cfg(feature = "mv-lookup")]
lookups.iter().for_each(|lookups| {
lookups.iter().for_each(|lookup| {
transcript.write_point(lookup.commitment);
});
});
{
for lookups_ in &lookups {
for lookup in lookups_.iter() {
transcript.write_point(lookup.commitment)?;
}
}
}

log::trace!("Lookup commitment: {:?}", start.elapsed());

Expand Down
6 changes: 4 additions & 2 deletions halo2_proofs/src/poly/commitment.rs
Original file line number Diff line number Diff line change
Expand Up @@ -199,7 +199,8 @@ impl<F: Field> Default for Blind<F> {
}

impl<F: Field + SerdePrimeField> Blind<F> {
/// Writes polynomial to buffer using `SerdePrimeField::write`.
/// Writes blind to buffer using `SerdePrimeField::write`.
#[allow(dead_code)]
pub(crate) fn write<W: io::Write>(
&self,
writer: &mut W,
Expand All @@ -209,7 +210,8 @@ impl<F: Field + SerdePrimeField> Blind<F> {
Ok(())
}

/// Reads polynomial from buffer using `SerdePrimeField::read`.
/// Reads blind from buffer using `SerdePrimeField::read`.
#[allow(dead_code)]
pub(crate) fn read<R: io::Read>(
reader: &mut R,
format: crate::SerdeFormat,
Expand Down
8 changes: 4 additions & 4 deletions halo2_proofs/src/poly/domain.rs
Original file line number Diff line number Diff line change
Expand Up @@ -597,7 +597,7 @@ impl<F: WithSmallOrderMulGroup<3>> EvaluationDomain<F> {
});
}

fn ifft(&self, a: &mut Vec<F>, omega_inv: F, log_n: u32, divisor: F) {
fn ifft(&self, a: &mut [F], omega_inv: F, log_n: u32, divisor: F) {
let fft_data = self.get_fft_data(a.len());
crate::fft::parallel::fft(a, omega_inv, log_n, fft_data, true);
// self.fft_inner(a, omega_inv, log_n, true);
Expand All @@ -609,7 +609,7 @@ impl<F: WithSmallOrderMulGroup<3>> EvaluationDomain<F> {
});
}

fn fft_inner(&self, a: &mut Vec<F>, omega: F, log_n: u32, inverse: bool) {
fn fft_inner(&self, a: &mut [F], omega: F, log_n: u32, inverse: bool) {
let fft_data = self.get_fft_data(a.len());
best_fft(a, omega, log_n, fft_data, inverse)
}
Expand Down Expand Up @@ -846,8 +846,8 @@ fn test_coeff_to_extended_part() {
#[test]
fn bench_coeff_to_extended_parts() {
use halo2curves::pasta::pallas::Scalar;
use rand_core::OsRng;
use instant::Instant;
use rand_core::OsRng;

let k = 20;
let domain = EvaluationDomain::<Scalar>::new(3, k);
Expand Down Expand Up @@ -934,8 +934,8 @@ fn test_lagrange_vecs_to_extended() {
#[test]
fn bench_lagrange_vecs_to_extended() {
use halo2curves::pasta::pallas::Scalar;
use rand_core::OsRng;
use instant::Instant;
use rand_core::OsRng;

let rng = OsRng;
let domain = EvaluationDomain::<Scalar>::new(8, 10);
Expand Down
Loading

0 comments on commit 930970a

Please sign in to comment.