Skip to content

Commit

Permalink
Merge remote-tracking branch 'origin/main' into commonprefix-patch
Browse files Browse the repository at this point in the history
  • Loading branch information
alxiong committed Sep 3, 2024
2 parents 5c61e3e + 3195eb0 commit 3b5a182
Show file tree
Hide file tree
Showing 16 changed files with 1,184 additions and 791 deletions.
66 changes: 16 additions & 50 deletions flake.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 1 addition & 1 deletion flake.nix
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,7 @@
clangStdenv
llvm_15
typos
grcov
# grcov # TODO uncomment this line after https://github.com/mozilla/grcov/issues/1187#issuecomment-2252214718
] ++ lib.optionals stdenv.isDarwin
[ darwin.apple_sdk.frameworks.Security ];

Expand Down
8 changes: 6 additions & 2 deletions plonk/src/circuit/plonk_verifier/gadgets.rs
Original file line number Diff line number Diff line change
Expand Up @@ -34,16 +34,20 @@ use jf_utils::{bytes_to_field_elements, field_switching};

/// Aggregate polynomial commitments into a single commitment (in the
/// ScalarsAndBases form). Useful in batch opening.
///
/// The verification key type is guaranteed to match the Plonk proof type.
///
/// The returned commitment is a generalization of `[F]1` described
/// in Sec 8.3, step 10 of https://eprint.iacr.org/2019/953.pdf
/// input
///
/// Input:
/// - vks: verification key variable
/// - challenges: challenge variable in FpElemVar form
/// - poly_evals: zeta^n, zeta^n-1 and Lagrange evaluated at 1
/// - batch_proof: batched proof inputs
/// - non_native_field_info: aux information for non-native field
/// Output
///
/// Output:
/// - scalar and bases prepared for MSM
/// - buffer info for u and v powers
pub(super) fn aggregate_poly_commitments_circuit<E, F>(
Expand Down
2 changes: 2 additions & 0 deletions plonk/src/proof_system/prover.rs
Original file line number Diff line number Diff line change
Expand Up @@ -68,8 +68,10 @@ impl<E: Pairing> Prover<E> {
}

/// Round 1:
///
/// 1. Compute and commit wire witness polynomials.
/// 2. Compute public input polynomial.
///
/// Return the wire witness polynomials and their commitments,
/// also return the public input polynomial.
pub(crate) fn run_1st_round<C: Arithmetization<E::ScalarField>, R: CryptoRng + RngCore>(
Expand Down
3 changes: 2 additions & 1 deletion plonk/src/proof_system/verifier.rs
Original file line number Diff line number Diff line change
Expand Up @@ -190,11 +190,12 @@ where

/// Batchly verify multiple (aggregated) PCS opening proofs.
///
/// We need to verify that
/// We need to verify that:
/// - `e(Ai, [x]2) = e(Bi, [1]2) for i \in {0, .., m-1}`, where
/// - `Ai = [open_proof_i] + u_i * [shifted_open_proof_i]` and
/// - `Bi = eval_point_i * [open_proof_i] + u_i * next_eval_point_i *
/// [shifted_open_proof_i] + comm_i - eval_i * [1]1`.
///
/// By Schwartz-Zippel lemma, it's equivalent to check that for a random r:
/// - `e(A0 + ... + r^{m-1} * Am, [x]2) = e(B0 + ... + r^{m-1} * Bm, [1]2)`.
pub(crate) fn batch_verify_opening_proofs<T>(
Expand Down
4 changes: 2 additions & 2 deletions relation/src/constraint_system.rs
Original file line number Diff line number Diff line change
Expand Up @@ -822,10 +822,10 @@ impl<F: FftField> PlonkCircuit<F> {
self.eval_domain.size() != 1
}

/// Re-arrange the order of the gates so that
/// Re-arrange the order of the gates so that:
/// 1. io gates are in the front.
/// 2. variable table lookup gate are at the rear so that they do not affect
/// the range gates when merging the lookup tables.
/// the range gates when merging the lookup tables.
///
/// Remember to pad gates before calling the method.
fn rearrange_gates(&mut self) -> Result<(), CircuitError> {
Expand Down
28 changes: 18 additions & 10 deletions relation/src/gadgets/ecc/emulated/short_weierstrass.rs
Original file line number Diff line number Diff line change
Expand Up @@ -153,28 +153,36 @@ impl<F: PrimeField> PlonkCircuit<F> {
/// Constrain variable `p2` to be the point addition of `p0` and
/// `p1` over an elliptic curve.
/// Let p0 = (x0, y0, inf0), p1 = (x1, y1, inf1), p2 = (x2, y2, inf2)
/// The addition formula for affine points of sw curve is
/// If either p0 or p1 is infinity, then p2 equals to another point.
/// The addition formula for affine points of sw curve is as follows:
///
/// If either p0 or p1 is infinity, then p2 equals to another point.
/// 1. if p0 == p1
/// - if y0 == 0 then inf2 = 1
/// - Calculate s = (3 * x0^2 + a) / (2 * y0)
/// - x2 = s^2 - x0 - x1
/// - y2 = s(x0 - x2) - y0
/// - if y0 == 0 then inf2 = 1
/// - Calculate s = (3 * x0^2 + a) / (2 * y0)
/// - x2 = s^2 - x0 - x1
/// - y2 = s(x0 - x2) - y0
/// 2. Otherwise
/// - if x0 == x1 then inf2 = 1
/// - Calculate s = (y0 - y1) / (x0 - x1)
/// - x2 = s^2 - x0 - x1
/// - y2 = s(x0 - x2) - y0
/// - if x0 == x1 then inf2 = 1
/// - Calculate s = (y0 - y1) / (x0 - x1)
/// - x2 = s^2 - x0 - x1
/// - y2 = s(x0 - x2) - y0
///
/// The first case is equivalent to the following:
///
/// - inf0 == 1 || inf1 == 1 || x0 != x1 || y0 != y1 || y0 != 0 || inf2 == 0
/// - (x0 + x1 + x2) * (y0 + y0)^2 == (3 * x0^2 + a)^2
/// - (y2 + y0) * (y0 + y0) == (3 * x0^2 + a) (x0 - x2)
///
/// The second case is equivalent to the following:
///
/// - inf0 == 1 || inf1 == 1 || x0 != x1 || y0 == y1 || inf2 == 0
/// - (x0 - x1)^2 (x0 + x1 + x2) == (y0 - y1)^2
/// - (x0 - x2) (y0 - y1) == (y0 + y2) (x0 - x1)
///
/// First check in both cases can be combined into the following:
///
/// inf0 == 1 || inf1 == 1 || inf2 == 0 || x0 != x1 || (y0 == y1 && y0 != 0)
///
/// For the rest equality checks,
/// - Both LHS and RHS must be multiplied with an indicator variable
/// (!inf0 && !inf1). So that if either p0 or p1 is infinity, those
Expand Down
1 change: 1 addition & 0 deletions vid/.gitignore
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
/target
5 changes: 5 additions & 0 deletions vid/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -46,6 +46,11 @@ name = "advz"
harness = false
required-features = ["test-srs"]

[[bench]]
name = "advz_multiplicity"
harness = false
required-features = ["test-srs"]

[features]
default = ["parallel"]
std = [
Expand Down
11 changes: 5 additions & 6 deletions vid/benches/advz.rs
Original file line number Diff line number Diff line change
Expand Up @@ -27,16 +27,15 @@ where
{
// play with these items
//
// CODE_RATE is merely a convenient way to automatically choose polynomial
// degree as a function of storage node count.
// If desired, you could set polynomial degrees independent of storage node
// count.
const CODE_RATE: u32 = 4; // ratio of num_storage_nodes : polynomial_degree
// INVERSE_CODE_RATE is merely a convenient way to automatically choose
// polynomial degree as a function of storage node count. If desired, you
// could set polynomial degrees independent of storage node count.
const INVERSE_CODE_RATE: u32 = 4; // ratio of num_storage_nodes : polynomial_degree
let storage_node_counts = [512, 1024];
let payload_byte_lens = [1 * MB];

// more items as a function of the above
let poly_degrees_iter = storage_node_counts.iter().map(|c| c / CODE_RATE);
let poly_degrees_iter = storage_node_counts.iter().map(|c| c / INVERSE_CODE_RATE);
let supported_degree = poly_degrees_iter.clone().max().unwrap();
let vid_sizes_iter = poly_degrees_iter.zip(storage_node_counts);
let mut rng = jf_utils::test_rng();
Expand Down
105 changes: 105 additions & 0 deletions vid/benches/advz_multiplicity.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,105 @@
// Copyright (c) 2024 Espresso Systems (espressosys.com)
// This file is part of the Jellyfish library.

// You should have received a copy of the MIT License
// along with the Jellyfish library. If not, see <https://mit-license.org/>.

//! Benchmarks demonstrating performance improvement in [`Advz::verify_share`]
//! from use of parallelism over `multiplicity`.
//!
//! Run
//! ```
//! cargo bench --bench=advz_multiplicity --features="test-srs"
//! ```
//!
//! By
//! [default](https://github.com/rayon-rs/rayon/blob/main/FAQ.md#how-many-threads-will-rayon-spawn)
//! the number of threads = number of available CPU cores. You can override this
//! choice by prevising the above command with `RAYON_NUM_THREADS=N `. Example:
//! set `N=1` to eliminate parallelism.

use ark_bn254::Bn254;
use ark_ec::pairing::Pairing;
use ark_serialize::Write;
use ark_std::rand::RngCore;
use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion};
use digest::{crypto_common::generic_array::ArrayLength, Digest, DynDigest, OutputSizeUser};
use jf_pcs::{checked_fft_size, prelude::UnivariateKzgPCS, PolynomialCommitmentScheme};
use jf_utils::field_byte_len;
use jf_vid::{advz::Advz, VidScheme};
use sha2::Sha256;

const KB: usize = 1 << 10;
// const MB: usize = KB << 10;

fn advz<E, H>(c: &mut Criterion)
where
E: Pairing,
// TODO(Gus) clean up nasty trait bounds upstream
H: Digest + DynDigest + Default + Clone + Write + Send + Sync,
<<H as OutputSizeUser>::OutputSize as ArrayLength<u8>>::ArrayType: Copy,
{
// play with these items
//
// INVERSE_CODE_RATE is merely a convenient way to automatically choose
// recovery threshold as a function of storage node count. If desired, you
// could set recovery thresholds independent of storage node counts.
let multiplicities = [1, 256];
let num_storage_nodes = 128;
const INVERSE_CODE_RATE: usize = 4; // ratio of num_storage_nodes : recovery_threshold

// more items as a function of the above
let recovery_threshold = num_storage_nodes / INVERSE_CODE_RATE;
let max_multiplicity = multiplicities.iter().max().unwrap();
let max_degree = recovery_threshold * max_multiplicity;
let coeff_byte_len = field_byte_len::<E::ScalarField>();
let payload_byte_len = {
// ensure payload is large enough to fill at least 1 polynomial at
// maximum multiplicity.
max_degree * coeff_byte_len
};
let mut rng = jf_utils::test_rng();
let payload_bytes = {
// random payload data
let mut payload_bytes = vec![0u8; payload_byte_len];
rng.fill_bytes(&mut payload_bytes);
payload_bytes
};
let srs =
UnivariateKzgPCS::<E>::gen_srs_for_testing(&mut rng, checked_fft_size(max_degree).unwrap())
.unwrap();

let benchmark_group_name = format!(
"advz_verify_payload_{}KB_multiplicity",
payload_byte_len / KB
);
let mut grp = c.benchmark_group(benchmark_group_name);
for multiplicity in multiplicities {
let mut advz = Advz::<E, H>::with_multiplicity(
num_storage_nodes.try_into().unwrap(),
recovery_threshold.try_into().unwrap(),
multiplicity.try_into().unwrap(),
&srs,
)
.unwrap();
let disperse = advz.disperse(&payload_bytes).unwrap();
let (shares, common, commit) = (disperse.shares, disperse.common, disperse.commit);
grp.bench_function(BenchmarkId::from_parameter(multiplicity), |b| {
// verify only the 0th share
b.iter(|| {
advz.verify_share(&shares[0], &common, &commit)
.unwrap()
.unwrap()
});
});
}
grp.finish();
}

fn advz_main(c: &mut Criterion) {
advz::<Bn254, Sha256>(c);
}

criterion_group!(name = benches; config = Criterion::default().sample_size(10); targets = advz_main);

criterion_main!(benches);
Loading

0 comments on commit 3b5a182

Please sign in to comment.