diff --git a/crates/prover/benches/pcs.rs b/crates/prover/benches/pcs.rs index 760f001db..0dd7f3a74 100644 --- a/crates/prover/benches/pcs.rs +++ b/crates/prover/benches/pcs.rs @@ -14,7 +14,7 @@ use binius_prover::{ use binius_transcript::{ProverTranscript, VerifierTranscript}; use binius_verifier::{ config::{B1, B128, StdChallenger}, - fri::FRIParams, + fri::{ConstantArityStrategy, FRIParams, calculate_n_test_queries}, hash::{StdCompression, StdDigest}, pcs, }; @@ -46,12 +46,15 @@ fn bench_pcs(c: &mut Criterion) { let log_num_shares = binius_utils::rayon::current_num_threads().ilog2() as usize; let ntt = NeighborsLastMultiThread::new(domain_context, log_num_shares); - let fri_params = FRIParams::choose_with_constant_fold_arity( + let n_test_queries = calculate_n_test_queries(SECURITY_BITS, LOG_INV_RATE); + let fri_params = FRIParams::with_strategy( &ntt, + merkle_prover.scheme(), log_len, - SECURITY_BITS, + None, LOG_INV_RATE, - ARITY, + n_test_queries, + &ConstantArityStrategy::new(ARITY), ) .expect("Failed to create FRI params"); diff --git a/crates/prover/src/protocols/basefold.rs b/crates/prover/src/protocols/basefold.rs index 4863cedc6..9bd1b5355 100644 --- a/crates/prover/src/protocols/basefold.rs +++ b/crates/prover/src/protocols/basefold.rs @@ -171,7 +171,7 @@ mod test { use binius_transcript::ProverTranscript; use binius_verifier::{ config::StdChallenger, - fri::FRIParams, + fri::{ConstantArityStrategy, FRIParams, calculate_n_test_queries}, hash::{StdCompression, StdDigest}, protocols::basefold, }; @@ -206,12 +206,15 @@ mod test { let domain_context = GenericOnTheFly::generate_from_subspace(&subspace); let ntt = NeighborsLastSingleThread::new(domain_context); - let fri_params = FRIParams::choose_with_constant_fold_arity( + let n_test_queries = calculate_n_test_queries(SECURITY_BITS, LOG_INV_RATE); + let fri_params = FRIParams::with_strategy( &ntt, + merkle_prover.scheme(), multilinear.log_len(), - SECURITY_BITS, + None, LOG_INV_RATE, - 2, + n_test_queries, + &ConstantArityStrategy::new(2), )?; let CommitOutput { diff --git a/crates/spartan-prover/src/pcs.rs b/crates/spartan-prover/src/pcs.rs index 39b644bab..1dcac6d6f 100644 --- a/crates/spartan-prover/src/pcs.rs +++ b/crates/spartan-prover/src/pcs.rs @@ -148,7 +148,7 @@ mod tests { }; use binius_transcript::ProverTranscript; use binius_verifier::{ - fri::FRIParams, + fri::{ConstantArityStrategy, FRIParams, calculate_n_test_queries}, hash::{StdCompression, StdDigest}, }; use rand::{SeedableRng, rngs::StdRng}; @@ -174,12 +174,15 @@ mod tests { let domain_context = GenericOnTheFly::generate_from_subspace(&subspace); let ntt = NeighborsLastSingleThread::new(domain_context); - let fri_params = FRIParams::choose_with_constant_fold_arity( + let n_test_queries = calculate_n_test_queries(SECURITY_BITS, LOG_INV_RATE); + let fri_params = FRIParams::with_strategy( &ntt, + merkle_prover.scheme(), multilinear.log_len(), - SECURITY_BITS, + None, LOG_INV_RATE, - 2, + n_test_queries, + &ConstantArityStrategy::new(2), )?; let pcs_prover = PCSProver::new(&ntt, &merkle_prover, &fri_params); diff --git a/crates/spartan-prover/src/wiring.rs b/crates/spartan-prover/src/wiring.rs index 0b97f94c8..684664397 100644 --- a/crates/spartan-prover/src/wiring.rs +++ b/crates/spartan-prover/src/wiring.rs @@ -337,7 +337,7 @@ mod tests { }; use binius_transcript::ProverTranscript; use binius_verifier::{ - fri::FRIParams, + fri::{ConstantArityStrategy, FRIParams, calculate_n_test_queries}, hash::{StdCompression, StdDigest}, }; use rand::{Rng, SeedableRng, rngs::StdRng}; @@ -513,12 +513,15 @@ mod tests { let domain_context = GenericOnTheFly::generate_from_subspace(&subspace); let ntt = NeighborsLastSingleThread::new(domain_context); - let fri_params = FRIParams::choose_with_constant_fold_arity( + let n_test_queries = calculate_n_test_queries(SECURITY_BITS, LOG_INV_RATE); + let fri_params = FRIParams::with_strategy( &ntt, + merkle_prover.scheme(), log_witness_size, - SECURITY_BITS, + None, LOG_INV_RATE, - 2, + n_test_queries, + &ConstantArityStrategy::new(2), ) .expect("FRI params creation should succeed"); diff --git a/crates/spartan-verifier/src/lib.rs b/crates/spartan-verifier/src/lib.rs index 4494aee86..806b94271 100644 --- a/crates/spartan-verifier/src/lib.rs +++ b/crates/spartan-verifier/src/lib.rs @@ -17,7 +17,7 @@ use binius_transcript::{ }; use binius_utils::{DeserializeBytes, checked_arithmetics::checked_log_2}; use binius_verifier::{ - fri::{self, FRIParams, estimate_optimal_arity}, + fri::{self, ConstantArityStrategy, FRIParams, calculate_n_test_queries}, hash::PseudoCompressionFunction, merkle_tree::BinaryMerkleTreeScheme, protocols::{mlecheck, sumcheck, sumcheck::SumcheckOutput}, @@ -54,22 +54,24 @@ where ) -> Result { let log_witness_len = constraint_system.log_size() as usize; let log_code_len = log_witness_len + log_inv_rate; + let merkle_scheme = BinaryMerkleTreeScheme::new(compression); let fri_arity = - estimate_optimal_arity(log_code_len, size_of::>(), size_of::()); + ConstantArityStrategy::with_optimal_arity::(&merkle_scheme, log_code_len).arity; let subspace = BinarySubspace::with_dim(log_code_len)?; let domain_context = GenericOnTheFly::generate_from_subspace(&subspace); let ntt = NeighborsLastSingleThread::new(domain_context); - let fri_params = FRIParams::choose_with_constant_fold_arity( + let n_test_queries = calculate_n_test_queries(SECURITY_BITS, log_inv_rate); + let fri_params = FRIParams::with_strategy( &ntt, + &merkle_scheme, log_witness_len, - SECURITY_BITS, + None, log_inv_rate, - fri_arity, + n_test_queries, + &ConstantArityStrategy::new(fri_arity), )?; - let merkle_scheme = BinaryMerkleTreeScheme::new(compression); - Ok(Self { constraint_system, fri_params, diff --git a/crates/verifier/src/fri/common.rs b/crates/verifier/src/fri/common.rs index e95a0944c..e462e1818 100644 --- a/crates/verifier/src/fri/common.rs +++ b/crates/verifier/src/fri/common.rs @@ -4,7 +4,6 @@ use std::iter; use binius_field::{BinaryField, Field}; use binius_math::{ntt::AdditiveNTT, reed_solomon::ReedSolomonCode}; -use binius_utils::checked_arithmetics::log2_ceil_usize; use getset::{CopyGetters, Getters}; use super::error::Error; @@ -53,10 +52,7 @@ where }) } - /// Create parameters that minimize proof size. - /// - /// This uses dynamic programming to find the optimal sequence of fold arities that minimizes - /// the total proof size, accounting for both leaf data and Merkle proof overhead. + /// Create parameters using the given arity selection strategy. /// /// ## Arguments /// @@ -67,6 +63,7 @@ where /// optimally along with the fold arities. /// * `log_inv_rate` - the binary logarithm of the inverse Reed–Solomon code rate. /// * `n_test_queries` - the number of test queries for the FRI protocol. + /// * `strategy` - the strategy for selecting fold arities. /// /// ## Preconditions /// @@ -77,22 +74,24 @@ where /// parameters should guarantee that the codeword length is not so large as to violate the /// required soundness error probability. In other words, this means bounding the maximum /// codeword length based on the field size and security bits. - pub fn with_min_proof_size( + pub fn with_strategy( ntt: &NTT, merkle_scheme: &MerkleScheme, log_msg_len: usize, log_batch_size: Option, log_inv_rate: usize, n_test_queries: usize, + strategy: &Strategy, ) -> Result where NTT: AdditiveNTT, MerkleScheme: MerkleTreeScheme, + Strategy: AritySelectionStrategy, { let (log_batch_size, fold_arities) = match log_batch_size { Some(log_batch_size) => { assert!(log_batch_size <= log_msg_len); // precondition - let fold_arities = find_fold_arities_for_min_proof_size( + let fold_arities = strategy.choose_arities::( merkle_scheme, log_msg_len - log_batch_size, log_inv_rate, @@ -101,7 +100,7 @@ where (log_batch_size, fold_arities) } None => { - let mut fold_arities = find_fold_arities_for_min_proof_size( + let mut fold_arities = strategy.choose_arities::( merkle_scheme, log_msg_len, log_inv_rate, @@ -122,67 +121,6 @@ where Self::new(rs_code, log_batch_size, fold_arities, n_test_queries) } - /// Choose commit parameters based on protocol parameters, using a constant fold arity. - /// - /// ## Arguments - /// - /// * `log_msg_len` - the binary logarithm of the length of the message to commit. - /// * `security_bits` - the target security level in bits. - /// * `log_inv_rate` - the binary logarithm of the inverse Reed–Solomon code rate. - /// * `arity` - the folding arity. - pub fn choose_with_constant_fold_arity( - ntt: &impl AdditiveNTT, - log_msg_len: usize, - security_bits: usize, - log_inv_rate: usize, - arity: usize, - ) -> Result { - assert!(arity > 0); - - let log_batch_size = log_msg_len.min(arity); - let log_dim = log_msg_len - log_batch_size; - let rs_code = ReedSolomonCode::with_ntt_subspace(ntt, log_dim, log_inv_rate)?; - let n_test_queries = calculate_n_test_queries(security_bits, log_inv_rate); - - // TODO: Use BinaryMerkleTreeScheme to estimate instead of log2_ceil_usize - let cap_height = log2_ceil_usize(n_test_queries); - let log_terminal_len = cap_height.clamp(log_inv_rate, rs_code.log_len()); - - let quotient = (rs_code.log_len() - log_terminal_len) / arity; - let remainder = (rs_code.log_len() - log_terminal_len) % arity; - let mut fold_arities = vec![arity; quotient]; - if remainder != 0 { - fold_arities.push(remainder); - } - - // here is the down-to-earth explanation of what we're doing: we want the terminal - // codeword's log-length to be at least as large as the Merkle cap height. note that - // `total_vars + log_inv_rate - sum(fold_arities)` is exactly the log-length of the - // terminal codeword; we want this number to be ≥ cap height. so fold_arities will repeat - // `arity` the maximal number of times possible, while maintaining that `total_vars + - // log_inv_rate - sum(fold_arities) ≥ cap_height` stays true. this arity-selection - // strategy can be characterized as: "terminate as late as you can, while maintaining that - // no Merkle cap is strictly smaller than `cap_height`." this strategy does attain that - // property: the Merkle path height of the last non-terminal codeword will equal the - // log-length of the terminal codeword, which is ≥ cap height by fiat. moreover, if we - // terminated later than we are above, then this would stop being true. imagine what would - // happen if we took the above terminal codeword and continued folding. in that case, we - // would Merklize this word, again with the coset-bundling trick; the post-bundling path - // height would thus be `total_vars + log_inv_rate - sum(fold_arities) - arity`. but we - // already agreed (by the maximality of the number of times we subtracted `arity`) that - // the above number will be < cap_height. in other words, its Merkle cap will be - // short. equivalently: this is the latest termination for which the `min` in - // `optimal_verify_layer` will never trigger; i.e., we will have log2_ceil_usize(n_queries) - // ≤ tree_depth there. it can be shown that this strategy beats any strategy which - // terminates later than it does (in other words, by doing this, we are NOT terminating - // TOO early!). this doesn't mean that we should't terminate EVEN earlier (maybe we - // should). but this approach is conservative and simple; and it's easy to show that you - // won't lose by doing this. - - // see https://github.com/IrreducibleOSS/binius/pull/300 for proof of this fact - Self::new(rs_code, log_batch_size, fold_arities, n_test_queries) - } - pub fn n_fold_rounds(&self) -> usize { self.log_msg_len() } @@ -254,145 +192,221 @@ pub fn calculate_n_test_queries(security_bits: usize, log_inv_rate: usize) -> us (security_bits as f64 / -per_query_err.log2()).ceil() as usize } -/// Heuristic for estimating the optimal FRI folding arity that minimizes proof size. -/// -/// `log_block_length` is the binary logarithm of the block length of the Reed–Solomon code. -pub fn estimate_optimal_arity( - log_block_length: usize, - digest_size: usize, - field_size: usize, -) -> usize { - (1..=log_block_length) - .map(|arity| { - ( - // for given arity, return a tuple (arity, estimate of query_proof_size). - // this estimate is basd on the following approximation of a single - // query_proof_size, where $\vartheta$ is the arity: $\big((n-\vartheta) + - // (n-2\vartheta) + \ldots\big)\text{digest_size} + - // \frac{n-\vartheta}{\vartheta}2^{\vartheta}\text{field_size}.$ - arity, - ((log_block_length) / 2 * digest_size + (1 << arity) * field_size) - * (log_block_length - arity) - / arity, - ) - }) - // now scan and terminate the iterator when query_proof_size increases. - .scan(None, |old: &mut Option<(usize, usize)>, new| { - let should_continue = !matches!(*old, Some(ref old) if new.1 > old.1); - *old = Some(new); - should_continue.then_some(new) - }) - .last() - .map(|(arity, _)| arity) - .unwrap_or(1) +/// Strategy for selecting fold arities in the FRI protocol. +pub trait AritySelectionStrategy { + fn choose_arities( + &self, + merkle_scheme: &MerkleScheme, + log_msg_len: usize, + log_inv_rate: usize, + n_test_queries: usize, + ) -> Vec + where + F: Field, + MerkleScheme: MerkleTreeScheme; } -fn find_fold_arities_for_min_proof_size( - merkle_scheme: &MerkleScheme, - log_msg_len: usize, - log_inv_rate: usize, - n_test_queries: usize, -) -> Vec -where - F: Field, - MerkleScheme: MerkleTreeScheme, -{ - // This algorithm uses a dynamic programming approach to determine the sequence of arities that - // minimizes proof size. For each i in [0, log_msg_len], we determine the minimum proof size - // attainable when for a batched codeword with message size 2^i. This is determined by - // minimizing over the first reduction arity, using the values already determined for the - // smaller values of i. - - // This vec maps log_msg_len values to the minimum proof size attainable for a batched FRI - // protocol committing a message with that length. - let mut min_sizes = Vec::::with_capacity(log_msg_len); - - #[derive(Debug)] - struct Entry { - // The minimum proof size attainable for the indexed value of i. - proof_size: usize, - // The first reduction arity to achieve the minimum proof size. If the value is none, then - // the best reduction sequence is to skip all folding and send the full codeword. - arity: Option, - } +/// Strategy that minimizes proof size using dynamic programming. +#[derive(Debug, Clone, Copy, Default)] +pub struct MinProofSizeStrategy; + +impl AritySelectionStrategy for MinProofSizeStrategy { + fn choose_arities( + &self, + merkle_scheme: &MerkleScheme, + log_msg_len: usize, + log_inv_rate: usize, + n_test_queries: usize, + ) -> Vec + where + F: Field, + MerkleScheme: MerkleTreeScheme, + { + // This algorithm uses a dynamic programming approach to determine the sequence of arities + // that minimizes proof size. For each i in [0, log_msg_len], we determine the minimum + // proof size attainable when for a batched codeword with message size 2^i. This is + // determined by minimizing over the first reduction arity, using the values already + // determined for the smaller values of i. + + // This vec maps log_msg_len values to the minimum proof size attainable for a batched FRI + // protocol committing a message with that length. + let mut min_sizes = Vec::::with_capacity(log_msg_len); + + #[derive(Debug)] + struct Entry { + // The minimum proof size attainable for the indexed value of i. + proof_size: usize, + // The first reduction arity to achieve the minimum proof size. If the value is none, + // then the best reduction sequence is to skip all folding and send the full codeword. + arity: Option, + } + + // The byte-size of an element. + let value_size = { + let mut buf = Vec::new(); + F::default() + .serialize(&mut buf) + .expect("default element can be serialized to a resizable buffer"); + buf.len() + }; + + for i in 0..=log_msg_len { + // Length of the batched codeword. + let log_code_len = i + log_inv_rate; + + let mut last_entry = None; + for arity in 1..=i { + // The additional proof bytes for the reduction by arity. + let reduction_proof_size = { + // Each queried coset contains 2^arity values. + let leaf_size = value_size << arity; + // One coset per test query. + let leaves_size = leaf_size * n_test_queries; + + // Size of the Merkle multi-proof. + let optimal_layer = + merkle_scheme.optimal_verify_layer(n_test_queries, log_code_len); + let merkle_size = merkle_scheme + .proof_size(1 << log_code_len, n_test_queries, optimal_layer) + .expect("layer computed with optimal_layer must be valid"); + leaves_size + merkle_size + }; - // The byte-size of an element. - let value_size = { - let mut buf = Vec::new(); - F::default() - .serialize(&mut buf) - .expect("default element can be serialized to a resizable buffer"); - buf.len() - }; - - for i in 0..=log_msg_len { - // Length of the batched codeword. - let log_code_len = i + log_inv_rate; - - let mut last_entry = None; - for arity in 1..=i { - // The additional proof bytes for the reduction by arity. - let reduction_proof_size = { - // Each queried coset contains 2^arity values. - let leaf_size = value_size << arity; - // One coset per test query. - let leaves_size = leaf_size * n_test_queries; - - // Size of the Merkle multi-proof. - let optimal_layer = - merkle_scheme.optimal_verify_layer(n_test_queries, log_code_len); - let merkle_size = merkle_scheme - .proof_size(1 << log_code_len, n_test_queries, optimal_layer) - .expect("layer computed with optimal_layer must be valid"); - leaves_size + merkle_size + let reduced_proof_size = min_sizes[i - arity].proof_size; + let proof_size = reduction_proof_size + reduced_proof_size; + let replace = last_entry + .as_ref() + .is_none_or(|last_entry: &Entry| proof_size <= last_entry.proof_size); + if replace { + last_entry = Some(Entry { + proof_size, + arity: Some(arity), + }); + } else { + // The proof size function is concave with respect to arity. Break as soon is + // it ascends. + break; + } + } + + // Determine the proof size if this is the terminal codeword. In that case, the proof + // simply consists of the 2^(i + log_inv_rate) leaf values. + let terminal_proof_size = value_size << log_code_len; + let terminal_entry = Entry { + proof_size: terminal_proof_size, + arity: None, }; - let reduced_proof_size = min_sizes[i - arity].proof_size; - let proof_size = reduction_proof_size + reduced_proof_size; - let replace = last_entry - .as_ref() - .is_none_or(|last_entry: &Entry| proof_size <= last_entry.proof_size); - if replace { - last_entry = Some(Entry { - proof_size, - arity: Some(arity), - }); + let optimal_entry = if let Some(last_entry) = last_entry + && last_entry.proof_size < terminal_entry.proof_size + { + last_entry } else { - // The proof size function is concave with respect to arity. Break as soon is it - // ascends. - break; - } + terminal_entry + }; + + min_sizes.push(optimal_entry); } - // Determine the proof size if this is the terminal codeword. In that case, the proof simply - // consists of the 2^(i + log_inv_rate) leaf values. - let terminal_proof_size = value_size << log_code_len; - let terminal_entry = Entry { - proof_size: terminal_proof_size, - arity: None, - }; + let mut fold_arities = Vec::with_capacity(log_msg_len); - let optimal_entry = if let Some(last_entry) = last_entry - && last_entry.proof_size < terminal_entry.proof_size - { - last_entry - } else { - terminal_entry - }; + let mut i = log_msg_len; + let mut entry = &min_sizes[i]; + while let Some(arity) = entry.arity { + fold_arities.push(arity); + i -= arity; + entry = &min_sizes[i]; + } + fold_arities + } +} + +/// Strategy that uses a constant fold arity. +#[derive(Debug, Clone, Copy)] +pub struct ConstantArityStrategy { + /// The fold arity to use for each reduction step. + pub arity: usize, +} - min_sizes.push(optimal_entry); +impl ConstantArityStrategy { + /// Creates a new strategy with the given arity. + pub fn new(arity: usize) -> Self { + Self { arity } } - let mut fold_arities = Vec::with_capacity(log_msg_len); + /// Creates a strategy with an estimated optimal arity. + /// + /// Uses a heuristic to estimate the optimal FRI folding arity that minimizes proof size. + /// + /// ## Arguments + /// + /// * `_merkle_scheme` - the Merkle tree scheme (used to infer digest size) + /// * `approx_log_code_len` - approximate log2 of the codeword length + pub fn with_optimal_arity( + _merkle_scheme: &MerkleScheme, + approx_log_code_len: usize, + ) -> Self + where + F: Field, + MerkleScheme: MerkleTreeScheme, + { + let digest_size = std::mem::size_of::() * 8; + let field_size = std::mem::size_of::() * 8; + + // Estimate optimal arity using a heuristic based on the approximation of a single + // query_proof_size, where θ is the arity: + // ((n-θ) + (n-2θ) + ...) * digest_size + ((n-θ)/θ) * 2^θ * field_size + let arity = (1..=approx_log_code_len) + .map(|arity| { + ( + arity, + ((approx_log_code_len) / 2 * digest_size + (1 << arity) * field_size) + * (approx_log_code_len - arity) + / arity, + ) + }) + // Scan and terminate when query_proof_size increases. + .scan(None, |old: &mut Option<(usize, usize)>, new| { + let should_continue = !matches!(*old, Some(ref old) if new.1 > old.1); + *old = Some(new); + should_continue.then_some(new) + }) + .last() + .map(|(arity, _)| arity) + .unwrap_or(1); + + Self { arity } + } +} - let mut i = log_msg_len; - let mut entry = &min_sizes[i]; - while let Some(arity) = entry.arity { - fold_arities.push(arity); - i -= arity; - entry = &min_sizes[i]; +impl AritySelectionStrategy for ConstantArityStrategy { + fn choose_arities( + &self, + merkle_scheme: &MerkleScheme, + log_msg_len: usize, + log_inv_rate: usize, + n_test_queries: usize, + ) -> Vec + where + F: Field, + MerkleScheme: MerkleTreeScheme, + { + let log_code_len = log_msg_len + log_inv_rate; + let cap_height = merkle_scheme.optimal_verify_layer(n_test_queries, log_code_len); + let log_terminal_len = cap_height.max(log_inv_rate); + + let mut fold_arities = Vec::new(); + let mut i = log_code_len; + while i > log_terminal_len { + if let Some(next_i) = i.checked_sub(self.arity) { + fold_arities.push(self.arity); + i = next_i; + } else { + break; + } + } + fold_arities } - fold_arities } #[cfg(test)] @@ -420,55 +434,30 @@ mod tests { } #[test] - fn test_estimate_optimal_arity() { - let field_size = 128; - for log_block_length in 22..35 { - let digest_size = 256; - assert_eq!(estimate_optimal_arity(log_block_length, digest_size, field_size), 4); - } - - for log_block_length in 22..28 { - let digest_size = 1024; - assert_eq!(estimate_optimal_arity(log_block_length, digest_size, field_size), 6); - } - } - - #[test] - fn test_find_fold_arities_for_min_proof_size() { + fn test_min_proof_size_strategy() { let merkle_scheme = test_merkle_scheme(); let log_inv_rate = 2; let n_test_queries = 128; + let strategy = MinProofSizeStrategy; // log_msg_len = 0: no folding needed, terminal codeword is optimal - let arities = find_fold_arities_for_min_proof_size::( - &merkle_scheme, - 0, - log_inv_rate, - n_test_queries, - ); + let arities = + strategy.choose_arities::(&merkle_scheme, 0, log_inv_rate, n_test_queries); assert_eq!(arities, vec![]); // log_msg_len = 3: no folding needed, terminal codeword is optimal - let arities = find_fold_arities_for_min_proof_size::( - &merkle_scheme, - 3, - log_inv_rate, - n_test_queries, - ); + let arities = + strategy.choose_arities::(&merkle_scheme, 3, log_inv_rate, n_test_queries); assert_eq!(arities, vec![]); // log_msg_len = 24 - let arities = find_fold_arities_for_min_proof_size::( - &merkle_scheme, - 24, - log_inv_rate, - n_test_queries, - ); + let arities = + strategy.choose_arities::(&merkle_scheme, 24, log_inv_rate, n_test_queries); assert_eq!(arities, vec![4, 4, 4, 4]); } #[test] - fn test_with_min_proof_size() { + fn test_with_strategy_min_proof_size() { let merkle_scheme = test_merkle_scheme(); let log_inv_rate = 2; let n_test_queries = 128; @@ -479,13 +468,14 @@ mod tests { // log_msg_len = 0 { - let fri_params = FRIParams::with_min_proof_size( + let fri_params = FRIParams::with_strategy( &ntt, &merkle_scheme, 0, None, log_inv_rate, n_test_queries, + &MinProofSizeStrategy, ) .unwrap(); assert_eq!(fri_params.fold_arities(), &[]); @@ -494,13 +484,14 @@ mod tests { // log_msg_len = 3 { - let fri_params = FRIParams::with_min_proof_size( + let fri_params = FRIParams::with_strategy( &ntt, &merkle_scheme, 3, None, log_inv_rate, n_test_queries, + &MinProofSizeStrategy, ) .unwrap(); assert_eq!(fri_params.fold_arities(), &[]); @@ -509,13 +500,14 @@ mod tests { // log_msg_len = 24 { - let fri_params = FRIParams::with_min_proof_size( + let fri_params = FRIParams::with_strategy( &ntt, &merkle_scheme, 24, None, log_inv_rate, n_test_queries, + &MinProofSizeStrategy, ) .unwrap(); assert_eq!(fri_params.fold_arities(), &[4, 4, 4]); @@ -524,7 +516,7 @@ mod tests { } #[test] - fn test_with_min_proof_size_fixed_batch_size() { + fn test_with_strategy_fixed_batch_size() { let merkle_scheme = test_merkle_scheme(); let log_inv_rate = 2; let n_test_queries = 128; @@ -535,13 +527,14 @@ mod tests { // log_msg_len = 3 { - let fri_params = FRIParams::with_min_proof_size( + let fri_params = FRIParams::with_strategy( &ntt, &merkle_scheme, 3, Some(1), log_inv_rate, n_test_queries, + &MinProofSizeStrategy, ) .unwrap(); assert_eq!(fri_params.fold_arities(), &[]); @@ -550,13 +543,14 @@ mod tests { // log_msg_len = 24 { - let fri_params = FRIParams::with_min_proof_size( + let fri_params = FRIParams::with_strategy( &ntt, &merkle_scheme, 24, Some(1), log_inv_rate, n_test_queries, + &MinProofSizeStrategy, ) .unwrap(); assert_eq!(fri_params.fold_arities(), &[4, 4, 4, 3]); diff --git a/crates/verifier/src/verify.rs b/crates/verifier/src/verify.rs index 3439036c5..7d77696f4 100644 --- a/crates/verifier/src/verify.rs +++ b/crates/verifier/src/verify.rs @@ -25,7 +25,7 @@ use crate::{ config::{ B128, LOG_WORD_SIZE_BITS, LOG_WORDS_PER_ELEM, PROVER_SMALL_FIELD_ZEROCHECK_CHALLENGES, }, - fri::{FRIParams, estimate_optimal_arity}, + fri::{ConstantArityStrategy, FRIParams, calculate_n_test_queries}, hash::PseudoCompressionFunction, merkle_tree::BinaryMerkleTreeScheme, pcs, @@ -78,25 +78,25 @@ where let log_witness_elems = log_witness_words - LOG_WORDS_PER_ELEM; let log_code_len = log_witness_elems + log_inv_rate; - let fri_arity = estimate_optimal_arity( - log_code_len, - size_of::>(), - size_of::(), - ); + let merkle_scheme = BinaryMerkleTreeScheme::new(compression); + let fri_arity = + ConstantArityStrategy::with_optimal_arity::(&merkle_scheme, log_code_len) + .arity; let subspace = BinarySubspace::with_dim(log_code_len)?; let domain_context = GenericOnTheFly::generate_from_subspace(&subspace); let ntt = NeighborsLastSingleThread::new(domain_context); - let fri_params = FRIParams::choose_with_constant_fold_arity( + let n_test_queries = calculate_n_test_queries(SECURITY_BITS, log_inv_rate); + let fri_params = FRIParams::with_strategy( &ntt, + &merkle_scheme, log_witness_elems, - SECURITY_BITS, + None, log_inv_rate, - fri_arity, + n_test_queries, + &ConstantArityStrategy::new(fri_arity), )?; - let merkle_scheme = BinaryMerkleTreeScheme::new(compression); - Ok(Self { constraint_system, fri_params,