diff --git a/crates/air/src/prove.rs b/crates/air/src/prove.rs index f271d883..dede1fdb 100644 --- a/crates/air/src/prove.rs +++ b/crates/air/src/prove.rs @@ -121,7 +121,7 @@ impl>, A: NormalAir, AP: PackedAir> AirTable>, ) -> Vec> { - prove_air::, EF, A, AP>(prover_state, univariate_skips, &self, witness) + prove_air::, EF, A, AP>(prover_state, univariate_skips, self, witness) } #[instrument(name = "air: prove in extension", skip_all)] @@ -131,7 +131,7 @@ impl>, A: NormalAir, AP: PackedAir> AirTable, ) -> Vec> { - prove_air::(prover_state, univariate_skips, &self, witness) + prove_air::(prover_state, univariate_skips, self, witness) } } @@ -226,9 +226,13 @@ fn open_structured_columns<'a, EF: ExtensionField> + ExtensionField, let mut column_scalars = vec![]; let mut index = 0; for group in &witness.column_groups { - for i in index..index + group.len() { - column_scalars.push(poly_eq_batching_scalars[i]); - } + column_scalars.extend( + poly_eq_batching_scalars + .iter() + .skip(index) + .take(group.len()) + .cloned(), + ); index += witness.max_columns_per_group().next_power_of_two(); } diff --git a/crates/air/src/table.rs b/crates/air/src/table.rs index e7891928..fdf4bfb5 100644 --- a/crates/air/src/table.rs +++ b/crates/air/src/table.rs @@ -81,18 +81,18 @@ impl>, A: NormalAir, AP: PackedAir> AirTable() == TypeId::of::() { unsafe { - self.air - .eval(transmute::<_, &mut ConstraintChecker<'_, EF, EF>>( - &mut constraints_checker, - )); + self.air.eval(transmute::< + &mut ConstraintChecker<'_, IF, EF>, + &mut ConstraintChecker<'_, EF, EF>, + >(&mut constraints_checker)); } } else { assert_eq!(TypeId::of::(), TypeId::of::>()); unsafe { - self.air - .eval(transmute::<_, &mut ConstraintChecker<'_, PF, EF>>( - &mut constraints_checker, - )); + self.air.eval(transmute::< + &mut ConstraintChecker<'_, IF, EF>, + &mut ConstraintChecker<'_, PF, EF>, + >(&mut constraints_checker)); } } handle_errors(row, &mut constraints_checker)?; @@ -110,18 +110,18 @@ impl>, A: NormalAir, AP: PackedAir> AirTable() == TypeId::of::() { unsafe { - self.air - .eval(transmute::<_, &mut ConstraintChecker<'_, EF, EF>>( - &mut constraints_checker, - )); + self.air.eval(transmute::< + &mut ConstraintChecker<'_, IF, EF>, + &mut ConstraintChecker<'_, EF, EF>, + >(&mut constraints_checker)); } } else { assert_eq!(TypeId::of::(), TypeId::of::>()); unsafe { - self.air - .eval(transmute::<_, &mut ConstraintChecker<'_, PF, EF>>( - &mut constraints_checker, - )); + self.air.eval(transmute::< + &mut ConstraintChecker<'_, IF, EF>, + &mut ConstraintChecker<'_, PF, EF>, + >(&mut constraints_checker)); } } handle_errors(row, &mut constraints_checker)?; diff --git a/crates/air/src/test.rs b/crates/air/src/test.rs index 15a02bb3..831698a9 100644 --- a/crates/air/src/test.rs +++ b/crates/air/src/test.rs @@ -106,9 +106,9 @@ fn generate_structured_trace>, A: NormalAir, AP: PackedAir>( table.n_columns(), univariate_skips, &inner_sums, - &column_groups, + column_groups, &Evaluation { point: MultilinearPoint( outer_statement.point[1..log_length - univariate_skips + 1].to_vec(), @@ -185,6 +185,7 @@ fn verify_many_unstructured_columns>>( Ok(evaluations_remaining_to_verify) } +#[allow(clippy::too_many_arguments)] fn verify_structured_columns>>( verifier_state: &mut FSVerifier>, n_columns: usize, @@ -205,9 +206,7 @@ fn verify_structured_columns>>( let mut column_scalars = vec![]; let mut index = 0; for group in column_groups { - for i in index..index + group.len() { - column_scalars.push(poly_eq_batching_scalars[i]); - } + column_scalars.extend_from_slice(&poly_eq_batching_scalars[index..index + group.len()]); index += max_columns_per_group.next_power_of_two(); } diff --git a/crates/lean_compiler/src/a_simplify_lang.rs b/crates/lean_compiler/src/a_simplify_lang.rs index 193b52df..ee19a5e0 100644 --- a/crates/lean_compiler/src/a_simplify_lang.rs +++ b/crates/lean_compiler/src/a_simplify_lang.rs @@ -460,8 +460,10 @@ fn simplify_lines( unimplemented!("Reverse for non-unrolled loops are not implemented yet"); } - let mut loop_const_malloc = ConstMalloc::default(); - loop_const_malloc.counter = const_malloc.counter; + let mut loop_const_malloc = ConstMalloc { + counter: const_malloc.counter, + ..ConstMalloc::default() + }; let valid_aux_vars_in_array_manager_before = array_manager.valid.clone(); array_manager.valid.clear(); let simplified_body = simplify_lines( @@ -678,16 +680,15 @@ fn simplify_expr( match expr { Expression::Value(value) => value.simplify_if_const(), Expression::ArrayAccess { array, index } => { - if let SimpleExpr::Var(array_var) = array { - if let Some(label) = const_malloc.map.get(array_var) { - if let Ok(mut offset) = ConstExpression::try_from(*index.clone()) { - offset = offset.try_naive_simplification(); - return SimpleExpr::ConstMallocAccess { - malloc_label: *label, - offset, - }; - } - } + if let SimpleExpr::Var(array_var) = array + && let Some(label) = const_malloc.map.get(array_var) + && let Ok(mut offset) = ConstExpression::try_from(*index.clone()) + { + offset = offset.try_naive_simplification(); + return SimpleExpr::ConstMallocAccess { + malloc_label: *label, + offset, + }; } let aux_arr = array_manager.get_aux_var(array, index); // auxiliary var to store m[array + index] @@ -1082,30 +1083,27 @@ fn handle_array_assignment( ) { let simplified_index = simplify_expr(index, res, counters, array_manager, const_malloc); - if let SimpleExpr::Constant(offset) = simplified_index.clone() { - if let SimpleExpr::Var(array_var) = &array { - if let Some(label) = const_malloc.map.get(array_var) { - if let ArrayAccessType::ArrayIsAssigned(Expression::Binary { - left, - operation, - right, - }) = access_type - { - let arg0 = simplify_expr(&left, res, counters, array_manager, const_malloc); - let arg1 = simplify_expr(&right, res, counters, array_manager, const_malloc); - res.push(SimpleLine::Assignment { - var: VarOrConstMallocAccess::ConstMallocAccess { - malloc_label: *label, - offset, - }, - operation, - arg0, - arg1, - }); - return; - } - } - } + if let SimpleExpr::Constant(offset) = simplified_index.clone() + && let SimpleExpr::Var(array_var) = &array + && let Some(label) = const_malloc.map.get(array_var) + && let ArrayAccessType::ArrayIsAssigned(Expression::Binary { + left, + operation, + right, + }) = access_type + { + let arg0 = simplify_expr(&left, res, counters, array_manager, const_malloc); + let arg1 = simplify_expr(&right, res, counters, array_manager, const_malloc); + res.push(SimpleLine::Assignment { + var: VarOrConstMallocAccess::ConstMallocAccess { + malloc_label: *label, + offset, + }, + operation, + arg0, + arg1, + }); + return; } let value_simplified = match access_type { diff --git a/crates/lean_compiler/src/b_compile_intermediate.rs b/crates/lean_compiler/src/b_compile_intermediate.rs index ea49e88d..8b80f685 100644 --- a/crates/lean_compiler/src/b_compile_intermediate.rs +++ b/crates/lean_compiler/src/b_compile_intermediate.rs @@ -47,7 +47,7 @@ impl Compiler { } impl SimpleExpr { - fn into_mem_after_fp_or_constant(&self, compiler: &Compiler) -> IntermediaryMemOrFpOrConstant { + fn to_mem_after_fp_or_constant(&self, compiler: &Compiler) -> IntermediaryMemOrFpOrConstant { match self { Self::Var(var) => IntermediaryMemOrFpOrConstant::MemoryAfterFp { offset: compiler.get_offset(&var.clone().into()), @@ -368,7 +368,7 @@ fn compile_lines( } SimpleLine::RawAccess { res, index, shift } => { - validate_vars_declared(&[index.clone()], declared_vars)?; + validate_vars_declared(std::slice::from_ref(index), declared_vars)?; if let SimpleExpr::Var(var) = res { declared_vars.insert(var.clone()); } @@ -379,7 +379,7 @@ fn compile_lines( instructions.push(IntermediateInstruction::Deref { shift_0, shift_1: shift.clone(), - res: res.into_mem_after_fp_or_constant(compiler), + res: res.to_mem_after_fp_or_constant(compiler), }); } @@ -623,10 +623,10 @@ fn validate_vars_declared>( declared: &BTreeSet, ) -> Result<(), String> { for voc in vocs { - if let SimpleExpr::Var(v) = voc.borrow() { - if !declared.contains(v) { - return Err(format!("Variable {v} not declared")); - } + if let SimpleExpr::Var(v) = voc.borrow() + && !declared.contains(v) + { + return Err(format!("Variable {v} not declared")); } } Ok(()) @@ -665,7 +665,7 @@ fn setup_function_call( instructions.push(IntermediateInstruction::Deref { shift_0: new_fp_pos.into(), shift_1: (2 + i).into(), - res: arg.into_mem_after_fp_or_constant(compiler), + res: arg.to_mem_after_fp_or_constant(compiler), }); } diff --git a/crates/lean_compiler/src/c_compile_final.rs b/crates/lean_compiler/src/c_compile_final.rs index b59035e5..8b4787bc 100644 --- a/crates/lean_compiler/src/c_compile_final.rs +++ b/crates/lean_compiler/src/c_compile_final.rs @@ -152,23 +152,23 @@ fn compile_block( mut arg_c, res, } => { - if let Some(arg_a_cst) = try_as_constant(&arg_a, compiler) { - if let Some(arg_b_cst) = try_as_constant(&arg_c, compiler) { - // res = constant +/x constant + if let Some(arg_a_cst) = try_as_constant(&arg_a, compiler) + && let Some(arg_b_cst) = try_as_constant(&arg_c, compiler) + { + // res = constant +/x constant - let op_res = operation.compute(arg_a_cst, arg_b_cst); + let op_res = operation.compute(arg_a_cst, arg_b_cst); - let res: MemOrFp = res.try_into_mem_or_fp(compiler).unwrap(); + let res: MemOrFp = res.try_into_mem_or_fp(compiler).unwrap(); - low_level_bytecode.push(Instruction::Computation { - operation: Operation::Add, - arg_a: MemOrConstant::zero(), - arg_c: res, - res: MemOrConstant::Constant(op_res), - }); - pc += 1; - continue; - } + low_level_bytecode.push(Instruction::Computation { + operation: Operation::Add, + arg_a: MemOrConstant::zero(), + arg_c: res, + res: MemOrConstant::Constant(op_res), + }); + pc += 1; + continue; } if arg_c.is_constant() { diff --git a/crates/lean_prover/src/common.rs b/crates/lean_prover/src/common.rs index aac2882b..cacfcb78 100644 --- a/crates/lean_prover/src/common.rs +++ b/crates/lean_prover/src/common.rs @@ -12,6 +12,7 @@ use whir_p3::poly::{evals::fold_multilinear, multilinear::MultilinearPoint}; use crate::*; use lean_vm::*; +#[allow(clippy::too_many_arguments)] pub fn get_base_dims( n_cycles: usize, log_public_memory: usize, diff --git a/crates/lean_prover/src/prove_execution.rs b/crates/lean_prover/src/prove_execution.rs index 31dbfffc..b02db088 100644 --- a/crates/lean_prover/src/prove_execution.rs +++ b/crates/lean_prover/src/prove_execution.rs @@ -833,12 +833,11 @@ pub fn prove_execution( let index_a: F = dot_product_columns[2][i].as_base().unwrap(); let index_b: F = dot_product_columns[3][i].as_base().unwrap(); let index_res: F = dot_product_columns[4][i].as_base().unwrap(); - for j in 0..DIMENSION { - dot_product_indexes_spread[j][i] = index_a + F::from_usize(j); - dot_product_indexes_spread[j][i + dot_product_table_length] = - index_b + F::from_usize(j); - dot_product_indexes_spread[j][i + 2 * dot_product_table_length] = - index_res + F::from_usize(j); + for (j, slice) in dot_product_indexes_spread.iter_mut().enumerate() { + let offset = F::from_usize(j); + slice[i] = index_a + offset; + slice[i + dot_product_table_length] = index_b + offset; + slice[i + 2 * dot_product_table_length] = index_res + offset; } } let dot_product_values_spread = dot_product_indexes_spread @@ -1020,7 +1019,7 @@ pub fn prove_execution( let packed_pcs_witness_extension = packed_pcs_commit( &pcs.pcs_b( log2_strict_usize(packed_pcs_witness_base.packed_polynomial.len()), - num_packed_vars_for_dims::(&extension_dims, LOG_SMALLEST_DECOMPOSITION_CHUNK), + num_packed_vars_for_dims::(&extension_dims, LOG_SMALLEST_DECOMPOSITION_CHUNK), ), &extension_pols, &extension_dims, diff --git a/crates/lean_prover/src/verify_execution.rs b/crates/lean_prover/src/verify_execution.rs index c492f31e..2ddebf88 100644 --- a/crates/lean_prover/src/verify_execution.rs +++ b/crates/lean_prover/src/verify_execution.rs @@ -562,7 +562,7 @@ pub fn verify_execution( let parsed_commitment_extension = packed_pcs_parse_commitment( &pcs.pcs_b( parsed_commitment_base.num_variables(), - num_packed_vars_for_dims::(&extension_dims, LOG_SMALLEST_DECOMPOSITION_CHUNK), + num_packed_vars_for_dims::(&extension_dims, LOG_SMALLEST_DECOMPOSITION_CHUNK), ), &mut verifier_state, &extension_dims, @@ -893,8 +893,12 @@ pub fn verify_execution( ); let mut dot_product_indexes_inner_evals_incr = vec![EF::ZERO; 8]; - for i in 0..DIMENSION { - dot_product_indexes_inner_evals_incr[i] = dot_product_logup_star_indexes_inner_value + for (i, value) in dot_product_indexes_inner_evals_incr + .iter_mut() + .enumerate() + .take(DIMENSION) + { + *value = dot_product_logup_star_indexes_inner_value + EF::from_usize(i) * [F::ONE, F::ONE, F::ONE, F::ZERO].evaluate(&MultilinearPoint( mem_lookup_eval_indexes_partial_point.0[3 + index_diff..5 + index_diff] diff --git a/crates/lean_prover/witness_generation/src/execution_trace.rs b/crates/lean_prover/witness_generation/src/execution_trace.rs index 02d651b4..2b99e488 100644 --- a/crates/lean_prover/witness_generation/src/execution_trace.rs +++ b/crates/lean_prover/witness_generation/src/execution_trace.rs @@ -267,12 +267,18 @@ pub fn get_execution_trace( } // repeat the last row to get to a power of two - for j in 0..N_INSTRUCTION_COLUMNS + N_EXEC_COLUMNS { - let last_value = trace[j][n_cycles - 1]; - for i in n_cycles..(1 << log_n_cycles_rounded_up) { - trace[j][i] = last_value; - } - } + let padded_len = 1 << log_n_cycles_rounded_up; + trace + .iter_mut() + .take(N_INSTRUCTION_COLUMNS + N_EXEC_COLUMNS) + .for_each(|column| { + let last_value = column[n_cycles - 1]; + column + .iter_mut() + .take(padded_len) + .skip(n_cycles) + .for_each(|value| *value = last_value); + }); let memory = memory .0 diff --git a/crates/lean_vm/src/memory.rs b/crates/lean_vm/src/memory.rs index 91a72f5b..a528244c 100644 --- a/crates/lean_vm/src/memory.rs +++ b/crates/lean_vm/src/memory.rs @@ -51,8 +51,8 @@ impl Memory { pub fn get_ef_element(&self, index: usize) -> Result { // index: non vectorized pointer let mut coeffs = [F::ZERO; DIMENSION]; - for i in 0..DIMENSION { - coeffs[i] = self.get(index + i)?; + for (offset, coeff) in coeffs.iter_mut().enumerate() { + *coeff = self.get(index + offset)?; } Ok(EF::from_basis_coefficients_slice(&coeffs).unwrap()) } diff --git a/crates/lean_vm/src/runner.rs b/crates/lean_vm/src/runner.rs index ccfacb6a..a5cb6ad8 100644 --- a/crates/lean_vm/src/runner.rs +++ b/crates/lean_vm/src/runner.rs @@ -157,15 +157,15 @@ pub fn build_public_memory(public_input: &[F]) -> Vec { public_memory[PUBLIC_INPUT_START..][..public_input.len()].copy_from_slice(public_input); // "zero" vector - for i in ZERO_VEC_PTR * VECTOR_LEN..(ZERO_VEC_PTR + 2) * VECTOR_LEN { - public_memory[i] = F::ZERO; - } + let zero_start = ZERO_VEC_PTR * VECTOR_LEN; + let zero_end = (ZERO_VEC_PTR + 2) * VECTOR_LEN; + public_memory[zero_start..zero_end].fill(F::ZERO); // "one" vector public_memory[ONE_VEC_PTR * VECTOR_LEN] = F::ONE; - for i in ONE_VEC_PTR * VECTOR_LEN + 1..(ONE_VEC_PTR + 1) * VECTOR_LEN { - public_memory[i] = F::ZERO; - } + let one_zero_start = ONE_VEC_PTR * VECTOR_LEN + 1; + let one_zero_end = (ONE_VEC_PTR + 1) * VECTOR_LEN; + public_memory[one_zero_start..one_zero_end].fill(F::ZERO); public_memory [POSEIDON_16_NULL_HASH_PTR * VECTOR_LEN..(POSEIDON_16_NULL_HASH_PTR + 2) * VECTOR_LEN] @@ -176,6 +176,7 @@ pub fn build_public_memory(public_input: &[F]) -> Vec { public_memory } +#[allow(clippy::too_many_arguments)] fn execute_bytecode_helper( bytecode: &Bytecode, public_input: &[F], diff --git a/crates/lookup/src/quotient_gkr.rs b/crates/lookup/src/quotient_gkr.rs index e984d4e5..ae0f1919 100644 --- a/crates/lookup/src/quotient_gkr.rs +++ b/crates/lookup/src/quotient_gkr.rs @@ -217,7 +217,7 @@ where fn prove_gkr_quotient_step_packed( prover_state: &mut FSProver>, - up_layer_packed: &Vec>, + up_layer_packed: &[EFPacking], claim: &Evaluation, ) -> (Evaluation, EF, EF) where diff --git a/crates/pcs/src/batch_pcs.rs b/crates/pcs/src/batch_pcs.rs index 54610f5c..c1e677ea 100644 --- a/crates/pcs/src/batch_pcs.rs +++ b/crates/pcs/src/batch_pcs.rs @@ -23,6 +23,7 @@ pub trait BatchPCS + ExtensionField fn pcs_b(&self, num_variables_a: usize, num_variables_b: usize) -> Self::PcsB; + #[allow(clippy::too_many_arguments)] fn batch_open( &self, dft: &EvalsDft>, diff --git a/crates/pcs/src/packed_pcs.rs b/crates/pcs/src/packed_pcs.rs index ceed2467..ad253c72 100644 --- a/crates/pcs/src/packed_pcs.rs +++ b/crates/pcs/src/packed_pcs.rs @@ -144,7 +144,7 @@ fn split_in_chunks( } } -fn compute_chunks>( +fn compute_chunks( dims: &[ColDims], log_smallest_decomposition_chunk: usize, ) -> (BTreeMap>, usize) { @@ -176,11 +176,11 @@ fn compute_chunks>( (chunks_decomposition, packed_n_vars) } -pub fn num_packed_vars_for_dims>( +pub fn num_packed_vars_for_dims( dims: &[ColDims], log_smallest_decomposition_chunk: usize, ) -> usize { - let (_, packed_n_vars) = compute_chunks::(dims, log_smallest_decomposition_chunk); + let (_, packed_n_vars) = compute_chunks::(dims, log_smallest_decomposition_chunk); packed_n_vars } @@ -211,7 +211,7 @@ pub fn packed_pcs_commit, Pcs: PCS>( ); } let (chunks_decomposition, packed_n_vars) = - compute_chunks::(dims, log_smallest_decomposition_chunk); + compute_chunks::(dims, log_smallest_decomposition_chunk); { // logging @@ -278,17 +278,16 @@ pub fn packed_pcs_global_statements_for_prover< // - current packing is not optimal in the end: can lead to [16][4][2][2] (instead of [16][8]) let (chunks_decomposition, packed_vars) = - compute_chunks::(dims, log_smallest_decomposition_chunk); + compute_chunks::(dims, log_smallest_decomposition_chunk); let statements_flattened = statements_per_polynomial .iter() .enumerate() - .map(|(poly_index, poly_statements)| { + .flat_map(|(poly_index, poly_statements)| { poly_statements .iter() .map(move |statement| (poly_index, statement)) }) - .flatten() .collect::>(); let sub_packed_statements_and_evals_to_send = statements_flattened @@ -297,7 +296,7 @@ pub fn packed_pcs_global_statements_for_prover< let dim = &dims[*poly_index]; let pol = polynomials[*poly_index]; - let chunks = &chunks_decomposition[&poly_index]; + let chunks = &chunks_decomposition[poly_index]; assert!(!chunks.is_empty()); let mut sub_packed_statements = Vec::new(); let mut evals_to_send = Vec::new(); @@ -338,14 +337,15 @@ pub fn packed_pcs_global_statements_for_prover< if !initial_booleans.is_empty() && initial_booleans.len() < offset_in_original_booleans.len() - && &initial_booleans - == &offset_in_original_booleans[..initial_booleans.len()] + && initial_booleans + == offset_in_original_booleans[..initial_booleans.len()] { tracing::warn!("TODO: sparse statement accroos mutiple chunks"); } if initial_booleans.len() >= offset_in_original_booleans.len() { - if &initial_booleans[..missing_vars] != &offset_in_original_booleans { + let offset_slice = offset_in_original_booleans.as_slice(); + if &initial_booleans[..missing_vars] != offset_slice { // this chunk is not concerned by this sparse evaluation return (None, EF::ZERO); } else { @@ -426,7 +426,7 @@ pub fn packed_pcs_parse_commitment, Pcs: PCS], log_smallest_decomposition_chunk: usize, ) -> Result { - let (_, packed_n_vars) = compute_chunks::(&dims, log_smallest_decomposition_chunk); + let (_, packed_n_vars) = compute_chunks::(dims, log_smallest_decomposition_chunk); pcs.parse_commitment(verifier_state, packed_n_vars) } @@ -442,7 +442,7 @@ pub fn packed_pcs_global_statements_for_verifier< ) -> Result>, ProofError> { assert_eq!(dims.len(), statements_per_polynomial.len()); let (chunks_decomposition, packed_n_vars) = - compute_chunks::(dims, log_smallest_decomposition_chunk); + compute_chunks::(dims, log_smallest_decomposition_chunk); let mut packed_statements = Vec::new(); for (poly_index, statements) in statements_per_polynomial.iter().enumerate() { let dim = &dims[poly_index]; @@ -480,7 +480,8 @@ pub fn packed_pcs_global_statements_for_verifier< to_big_endian_bits(chunk.offset_in_original >> chunk.n_vars, missing_vars); if initial_booleans.len() >= offset_in_original_booleans.len() { - if &initial_booleans[..missing_vars] != &offset_in_original_booleans { + let offset_slice = offset_in_original_booleans.as_slice(); + if &initial_booleans[..missing_vars] != offset_slice { // this chunk is not concerned by this sparse evaluation sub_values.push(EF::ZERO); } else { diff --git a/crates/rec_aggregation/src/xmss_aggregate.rs b/crates/rec_aggregation/src/xmss_aggregate.rs index a1847699..26e95f44 100644 --- a/crates/rec_aggregation/src/xmss_aggregate.rs +++ b/crates/rec_aggregation/src/xmss_aggregate.rs @@ -225,9 +225,7 @@ fn test_xmss_aggregate() { &xmss_signature_size_padded.to_string(), ); - let bitfield = (0..n_public_keys) - .map(|i| i % INV_BITFIELD_DENSITY == 0) - .collect::>(); + let bitfield = vec![true; n_public_keys]; let mut rng = StdRng::seed_from_u64(0); let message_hash: [F; 8] = rng.random(); diff --git a/crates/sumcheck/src/mle.rs b/crates/sumcheck/src/mle.rs index 42ad758c..cc6c7f2b 100644 --- a/crates/sumcheck/src/mle.rs +++ b/crates/sumcheck/src/mle.rs @@ -306,6 +306,7 @@ impl<'a, EF: ExtensionField>> MleGroupRef<'a, EF> { } } + #[allow(clippy::too_many_arguments)] pub fn sumcheck_compute( &self, zs: &[usize], @@ -469,6 +470,7 @@ impl<'a, EF: ExtensionField>> MleGroupRef<'a, EF> { } } +#[allow(clippy::too_many_arguments)] pub fn sumcheck_compute_not_packed< EF: ExtensionField> + ExtensionField, IF: ExtensionField>, diff --git a/crates/utils/src/multilinear.rs b/crates/utils/src/multilinear.rs index 6365ca22..c3866946 100644 --- a/crates/utils/src/multilinear.rs +++ b/crates/utils/src/multilinear.rs @@ -20,8 +20,12 @@ pub fn fold_multilinear_in_small_field, D>( let dim = >::DIMENSION; - let m_transmuted: &[F] = - unsafe { std::slice::from_raw_parts(std::mem::transmute(m.as_ptr()), m.len() * dim) }; + let m_transmuted: &[F] = unsafe { + std::slice::from_raw_parts( + std::mem::transmute::<*const D, *const F>(m.as_ptr()), + m.len() * dim, + ) + }; let res_transmuted = { let new_size = m.len() * dim / scalars.len(); @@ -56,7 +60,8 @@ pub fn fold_multilinear_in_small_field, D>( }) .collect::>(); - let mut unpacked: Vec = unsafe { std::mem::transmute(packed_res) }; + let mut unpacked: Vec = + unsafe { std::mem::transmute::, Vec>(packed_res) }; unsafe { unpacked.set_len(new_size); } @@ -65,7 +70,7 @@ pub fn fold_multilinear_in_small_field, D>( } }; let res: Vec = unsafe { - let mut res: Vec = std::mem::transmute(res_transmuted); + let mut res: Vec = std::mem::transmute::, Vec>(res_transmuted); res.set_len(new_size); res }; @@ -183,19 +188,17 @@ pub fn multilinear_eval_constants_at_right(limit: usize, point: &[F]) return F::ZERO; } - if point.len() == 0 { + if point.is_empty() { assert!(limit <= 1); if limit == 1 { F::ZERO } else { F::ONE } } else { let main_bit = limit >> (n_vars - 1); if main_bit == 1 { // limit is at the right half - return point[0] - * multilinear_eval_constants_at_right(limit - (1 << (n_vars - 1)), &point[1..]); + point[0] * multilinear_eval_constants_at_right(limit - (1 << (n_vars - 1)), &point[1..]) } else { // limit is at left half - return point[0] - + (F::ONE - point[0]) * multilinear_eval_constants_at_right(limit, &point[1..]); + point[0] + (F::ONE - point[0]) * multilinear_eval_constants_at_right(limit, &point[1..]) } } } @@ -279,9 +282,9 @@ mod tests { let n_point_vars = 7; let mut rng = StdRng::seed_from_u64(0); let mut pol = F::zero_vec(1 << n_point_vars); - for i in 0..(1 << n_vars) { - pol[i] = rng.random(); - } + pol.iter_mut() + .take(1 << n_vars) + .for_each(|coeff| *coeff = rng.random()); let point = (0..n_point_vars).map(|_| rng.random()).collect::>(); assert_eq!( evaluate_as_larger_multilinear_pol(&pol[..1 << n_vars], &point), @@ -297,9 +300,10 @@ mod tests { for limit in [0, 1, 2, 45, 74, 451, 741, 1022, 1023] { let eval = multilinear_eval_constants_at_right(limit, &point); let mut pol = F::zero_vec(1 << n_vars); - for i in limit..(1 << n_vars) { - pol[i] = F::ONE; - } + pol.iter_mut() + .take(1 << n_vars) + .skip(limit) + .for_each(|coeff| *coeff = F::ONE); assert_eq!(eval, pol.evaluate(&MultilinearPoint(point.clone()))); } } diff --git a/crates/utils/src/univariate.rs b/crates/utils/src/univariate.rs index 9a6f95d8..13049ff9 100644 --- a/crates/utils/src/univariate.rs +++ b/crates/utils/src/univariate.rs @@ -7,10 +7,11 @@ use std::collections::HashMap; use std::sync::{Arc, Mutex, OnceLock}; type CacheKey = (TypeId, usize); +type AnySelector = Arc; +type SelectorCell = Arc>; +type SelectorsCache = Mutex>; -static SELECTORS_CACHE: OnceLock< - Mutex>>>>, -> = OnceLock::new(); +static SELECTORS_CACHE: OnceLock = OnceLock::new(); pub fn univariate_selectors(n: usize) -> Arc>> { let key = (TypeId::of::(), n); diff --git a/src/examples/prove_poseidon2.rs b/src/examples/prove_poseidon2.rs index 7306e232..7cf3f947 100644 --- a/src/examples/prove_poseidon2.rs +++ b/src/examples/prove_poseidon2.rs @@ -42,7 +42,7 @@ impl fmt::Display for Poseidon2Benchmark { 1 << self.log_n_poseidons_16, 1 << self.log_n_poseidons_24, self.prover_time.as_millis() as f64 / 1000.0, - (((1 << self.log_n_poseidons_16) + (1 << self.log_n_poseidons_24)) as f64 + (f64::from((1 << self.log_n_poseidons_16) + (1 << self.log_n_poseidons_24)) / self.prover_time.as_secs_f64()) .round() as usize )?; @@ -55,6 +55,9 @@ impl fmt::Display for Poseidon2Benchmark { } } +#[allow(clippy::too_many_arguments)] +#[allow(clippy::too_many_lines)] +#[allow(clippy::default_trait_access)] pub fn prove_poseidon2( log_n_poseidons_16: usize, log_n_poseidons_24: usize, @@ -118,8 +121,8 @@ pub fn prove_poseidon2( .to_vec() }) .collect::>(); - let column_groups_16 = vec![0..n_columns_16]; - let column_groups_24 = vec![0..n_columns_24]; + let column_groups_16 = std::iter::once(0..n_columns_16).collect::>(); + let column_groups_24 = std::iter::once(0..n_columns_24).collect::>(); let witness_16 = AirWitness::new(&witness_columns_16, &column_groups_16); let witness_24 = AirWitness::new(&witness_columns_24, &column_groups_24);