Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Implement FRI Prover #20

Closed
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
43 changes: 34 additions & 9 deletions commitment_scheme/src/table_prover.rs
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
use crate::make_commitment_scheme_prover;
use crate::table_utils::{all_query_rows, elements_to_be_transmitted, RowCol};
use crate::CommitmentSchemeProver;
use crate::{CommitmentHashes, CommitmentSchemeProver};
use ark_ff::{BigInteger, PrimeField};
use channel::fs_prover_channel::FSProverChannel;
use channel::ProverChannel;
Expand All @@ -13,19 +14,38 @@ pub struct TableProver<F: PrimeField, P: Prng, W: Digest> {
data_queries: BTreeSet<RowCol>,
integrity_queries: BTreeSet<RowCol>,
all_query_rows: BTreeSet<usize>,
mont_r: F,
}

impl<F: PrimeField, P: Prng, W: Digest> TableProver<F, P, W> {
impl<F: PrimeField, P: Prng + Clone + 'static, W: Digest + Clone + 'static> TableProver<F, P, W> {
pub fn new(
n_segments: usize,
n_rows_per_segment: usize,
n_columns: usize,
commitment_scheme: Box<dyn CommitmentSchemeProver<F, P, W>>,
field_element_size_in_bytes: usize,
n_verifier_friendly_commitment_layers: usize,
commitment_hashes: CommitmentHashes,
mont_r: F,
) -> Self {
let size_of_row = field_element_size_in_bytes * n_columns;

let commitment_scheme: Box<dyn CommitmentSchemeProver<F, P, W>> =
make_commitment_scheme_prover(
size_of_row,
n_rows_per_segment,
n_segments,
n_verifier_friendly_commitment_layers,
commitment_hashes,
n_columns,
);

Self {
n_columns,
commitment_scheme,
data_queries: BTreeSet::new(),
integrity_queries: BTreeSet::new(),
all_query_rows: BTreeSet::new(),
mont_r,
}
}

Expand All @@ -41,9 +61,10 @@ impl<F: PrimeField, P: Prng, W: Digest> TableProver<F, P, W> {
"segment length is expected to be equal to the number of columns"
);

let serialised_segment = serialize_field_columns(segment, self.mont_r);
let _ = &self
.commitment_scheme
.add_segment_for_commitment(&serialize_field_columns(segment), segment_idx);
.add_segment_for_commitment(&serialised_segment, segment_idx);
}

pub fn commit(&mut self, channel: &mut FSProverChannel<F, P, W>) -> Result<(), anyhow::Error> {
Expand Down Expand Up @@ -120,14 +141,17 @@ impl<F: PrimeField, P: Prng, W: Digest> TableProver<F, P, W> {

if let Some(&to_transmit_loc) = to_transmit_it.next() {
assert!(to_transmit_loc == query_loc);
channel.send_felts(&[data[i]])?;
let data_mont = data[i] * self.mont_r;
channel.send_felts(&[data_mont])?;
}
}
}
}

self.commitment_scheme
.decommit(&serialize_field_columns(&elements_data_last_rows), channel)?;
self.commitment_scheme.decommit(
&serialize_field_columns(&elements_data_last_rows, self.mont_r),
channel,
)?;

Ok(())
}
Expand All @@ -146,7 +170,7 @@ fn verify_all_columns_same_length<FieldElementT>(columns: &[Vec<FieldElementT>])
columns.iter().all(|column| column.len() == n_rows)
}

fn serialize_field_columns<F: PrimeField>(segment: &[Vec<F>]) -> Vec<u8> {
pub fn serialize_field_columns<F: PrimeField>(segment: &[Vec<F>], mont_r: F) -> Vec<u8> {
let columns = segment;

assert!(
Expand All @@ -163,7 +187,8 @@ fn serialize_field_columns<F: PrimeField>(segment: &[Vec<F>]) -> Vec<u8> {

for row in 0..n_rows {
for col_data in columns.iter().take(n_columns) {
serialization.extend_from_slice(&col_data[row].into_bigint().to_bytes_be());
let data_mont = col_data[row] * mont_r;
serialization.extend_from_slice(&data_mont.into_bigint().to_bytes_be());
}
}

Expand Down
296 changes: 88 additions & 208 deletions commitment_scheme/src/table_verifier.rs

Large diffs are not rendered by default.

1 change: 1 addition & 0 deletions fri/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@ serde_json.workspace = true
serde.workspace = true
commitment_scheme = { path = "../commitment_scheme" }
anyhow.workspace = true
num-bigint.workspace = true

[dev-dependencies]
rand.workspace = true
101 changes: 101 additions & 0 deletions fri/src/committed_layers.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,101 @@
use crate::{
details::next_layer_data_and_integrity_queries, layers::FriLayer, parameters::FriParameters,
};
use anyhow::Ok;
use ark_ff::PrimeField;
use ark_poly::EvaluationDomain;
use channel::fs_prover_channel::FSProverChannel;
use commitment_scheme::table_prover::TableProver;
use commitment_scheme::CommitmentHashes;
use randomness::Prng;
use sha3::Digest;
use std::sync::Arc;

#[allow(dead_code)]
pub struct FriCommittedLayer<F: PrimeField, E: EvaluationDomain<F>, P: Prng, W: Digest> {
fri_step: usize,
layer: Arc<dyn FriLayer<F, E>>,
params: FriParameters<F, E>,
layer_num: usize,
table_prover: TableProver<F, P, W>,
}

#[allow(dead_code)]
impl<
F: PrimeField,
E: EvaluationDomain<F>,
P: Prng + Clone + 'static,
W: Digest + Clone + 'static,
> FriCommittedLayer<F, E, P, W>
{
#[allow(clippy::too_many_arguments)]
pub fn new(
fri_step: usize,
layer: Arc<dyn FriLayer<F, E>>,
params: FriParameters<F, E>,
layer_num: usize,
field_element_size_in_bytes: usize,
n_verifier_friendly_commitment_layers: usize,
commitment_hashes: CommitmentHashes,
channel: &mut FSProverChannel<F, P, W>,
mont_r: F,
) -> Self {
let layer_size = layer.get_layer_size();

let mut table_prover = TableProver::new(
1,
layer_size / (1 << fri_step),
1 << fri_step,
field_element_size_in_bytes,
n_verifier_friendly_commitment_layers,
commitment_hashes,
mont_r,
);

let segment = layer.get_layer().unwrap();
table_prover.add_segment_for_commitment(&[segment], 0, 1 << fri_step);
table_prover.commit(channel).unwrap();

Self {
fri_step,
layer,
params,
layer_num,
table_prover,
}
}

pub fn eval_at_points(&self, required_row_indices: &[usize]) -> Vec<Vec<F>> {
let coset_size = 1 << self.params.fri_step_list[self.layer_num];
let mut elements_data_vectors = Vec::with_capacity(coset_size);

for col in 0..coset_size {
let mut required_indices: Vec<usize> = Vec::new();

for &row in required_row_indices {
required_indices.push(row * coset_size + col);
}

let eval_result = self.layer.eval_at_points(&required_indices);
elements_data_vectors.push(eval_result);
}

elements_data_vectors
}

pub fn decommit(
&mut self,
queries: &[u64],
channel: &mut FSProverChannel<F, P, W>,
) -> Result<(), anyhow::Error> {
let (data_queries, integrity_queries) =
next_layer_data_and_integrity_queries(&self.params, queries, self.layer_num);
let required_row_indices = self
.table_prover
.start_decommitment_phase(data_queries.clone(), integrity_queries.clone());

let elements_data = self.eval_at_points(&required_row_indices);
self.table_prover.decommit(channel, &elements_data)?;
Ok(())
}
}
6 changes: 3 additions & 3 deletions fri/src/folder.rs
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
use anyhow::Error;
use ark_ff::{FftField, PrimeField};
use ark_poly::Radix2EvaluationDomain;
use ark_poly::EvaluationDomain;
use std::sync::Arc;

use crate::stone_domain::get_field_element_at_index;
Expand All @@ -11,11 +11,11 @@ pub struct MultiplicativeFriFolder;
impl MultiplicativeFriFolder {
// Computes the values of the next FRI layer given the values and domain of the current layer.
pub fn compute_next_fri_layer<F: FftField + PrimeField>(
domain: Radix2EvaluationDomain<F>,
domain: impl EvaluationDomain<F>,
input_layer: &[F],
eval_point: &F,
) -> Result<Vec<F>, Error> {
assert_eq!(input_layer.len(), domain.size as usize);
assert_eq!(input_layer.len(), domain.size());

let mut next_layer = Vec::with_capacity(input_layer.len() / 2);
for j in (0..input_layer.len()).step_by(2) {
Expand Down
Loading