diff --git a/ferveo/Cargo.toml b/ferveo/Cargo.toml index c00a423c..bf31f363 100644 --- a/ferveo/Cargo.toml +++ b/ferveo/Cargo.toml @@ -22,7 +22,7 @@ ark-serialize = "0.4" ark-std = "0.4" bincode = "1.3" ferveo-common = { package = "ferveo-common-pre-release", path = "../ferveo-common", version = "^0.1.1" } -group-threshold-cryptography = { package = "group-threshold-cryptography-pre-release", path = "../tpke", features = ["api"], version = "^0.2.0" } +group-threshold-cryptography = { package = "group-threshold-cryptography-pre-release", path = "../tpke", features = ["api", "test-common"], version = "^0.2.0" } hex = "0.4.3" itertools = "0.10.5" measure_time = "0.8" diff --git a/ferveo/src/dkg.rs b/ferveo/src/dkg.rs index ed183b72..9c95d96a 100644 --- a/ferveo/src/dkg.rs +++ b/ferveo/src/dkg.rs @@ -10,7 +10,7 @@ use serde_with::serde_as; use crate::{ aggregate, utils::is_sorted, AggregatedPvss, Error, EthereumAddress, - PubliclyVerifiableParams, PubliclyVerifiableSS, Pvss, Result, Validator, + PubliclyVerifiableParams, PubliclyVerifiableSS, Result, Validator, }; #[derive(Copy, Clone, Debug, Serialize, Deserialize)] @@ -156,7 +156,7 @@ impl PubliclyVerifiableDkg { rng: &mut R, ) -> Result> { use ark_std::UniformRand; - Pvss::::new(&E::ScalarField::rand(rng), self, rng) + PubliclyVerifiableSS::::new(&E::ScalarField::rand(rng), self, rng) } /// Aggregate all received PVSS messages into a single message, prepared to post on-chain @@ -279,7 +279,7 @@ impl PubliclyVerifiableDkg { pub fn deal( &mut self, sender: &Validator, - pvss: &Pvss, + pvss: &PubliclyVerifiableSS, ) -> Result<()> { // Add the ephemeral public key and pvss transcript let (sender_address, _) = self @@ -306,11 +306,11 @@ pub struct Aggregation { #[derive(Serialize, Deserialize, Clone, Debug)] #[serde(bound( - serialize = "AggregatedPvss: Serialize, Pvss: Serialize", - deserialize = "AggregatedPvss: DeserializeOwned, Pvss: DeserializeOwned" + serialize = "AggregatedPvss: Serialize, PubliclyVerifiableSS: Serialize", + deserialize = "AggregatedPvss: DeserializeOwned, PubliclyVerifiableSS: DeserializeOwned" ))] pub enum Message { - Deal(Pvss), + Deal(PubliclyVerifiableSS), Aggregate(Aggregation), } @@ -355,7 +355,7 @@ pub(crate) mod test_common { my_index: usize, ) -> TestSetup { let keypairs = gen_keypairs(shares_num); - let mut validators = gen_validators(&keypairs); + let mut validators = gen_validators(keypairs.as_slice()); validators.sort(); let me = validators[my_index].clone(); let dkg = PubliclyVerifiableDkg::new( diff --git a/ferveo/src/lib.rs b/ferveo/src/lib.rs index 1ddd54e0..41da3ba7 100644 --- a/ferveo/src/lib.rs +++ b/ferveo/src/lib.rs @@ -17,6 +17,7 @@ pub mod api; pub mod dkg; pub mod primitives; pub mod pvss; +pub mod refresh; pub mod validator; mod utils; @@ -24,6 +25,7 @@ mod utils; pub use dkg::*; pub use primitives::*; pub use pvss::*; +pub use refresh::*; pub use validator::*; #[derive(Debug, thiserror::Error)] @@ -212,7 +214,7 @@ mod test_dkg_full { &dkg, aad, &ciphertext.header().unwrap(), - &validator_keypairs, + validator_keypairs.as_slice(), ); let plaintext = tpke::decrypt_with_shared_secret( @@ -308,7 +310,7 @@ mod test_dkg_full { &dkg, aad, &ciphertext.header().unwrap(), - &validator_keypairs, + validator_keypairs.as_slice(), ); izip!( @@ -355,7 +357,10 @@ mod test_dkg_full { fn test_dkg_simple_tdec_share_recovery() { let rng = &mut test_rng(); - let (dkg, validator_keypairs) = setup_dealt_dkg_with_n_validators(3, 4); + let security_threshold = 3; + let shares_num = 4; + let (dkg, validator_keypairs) = + setup_dealt_dkg_with_n_validators(security_threshold, shares_num); let msg = "my-msg".as_bytes().to_vec(); let aad: &[u8] = "my-aad".as_bytes(); let public_key = &dkg.public_key(); @@ -368,29 +373,33 @@ mod test_dkg_full { &dkg, aad, &ciphertext.header().unwrap(), - &validator_keypairs, + validator_keypairs.as_slice(), ); - // Now, we're going to recover a new share at a random point and check that the shared secret is still the same - - // Our random point - let x_r = Fr::rand(rng); - // Remove one participant from the contexts and all nested structure let removed_validator_addr = dkg.validators.keys().last().unwrap().clone(); let mut remaining_validators = dkg.validators.clone(); - remaining_validators.remove(&removed_validator_addr); + remaining_validators + .remove(&removed_validator_addr) + .unwrap(); + // dkg.vss.remove(&removed_validator_addr); // TODO: Test whether it makes any difference // Remember to remove one domain point too let mut domain_points = dkg.domain.elements().collect::>(); domain_points.pop().unwrap(); + // Now, we're going to recover a new share at a random point, + // and check that the shared secret is still the same. + + // Our random point: + let x_r = Fr::rand(rng); + // Each participant prepares an update for each other participant let share_updates = remaining_validators .keys() .map(|v_addr| { - let deltas_i = tpke::prepare_share_updates_for_recovery::( + let deltas_i = prepare_share_updates_for_recovery::( &domain_points, &dkg.pvss_params.h.into_affine(), &x_r, @@ -402,15 +411,17 @@ mod test_dkg_full { .collect::>(); // Participants share updates and update their shares - let pvss_aggregated = aggregate(&dkg.vss); // Now, every participant separately: + // TODO: Move this logic outside tests (see #162, #163) let updated_shares: Vec<_> = remaining_validators - .iter() - .map(|(validator_address, validator)| { - // Receives updates from other participants - let updates_for_participant = - share_updates.get(validator_address).unwrap(); + .values() + .map(|validator| { + // Current participant receives updates from other participants + let updates_for_participant: Vec<_> = share_updates + .values() + .map(|updates| *updates.get(validator.share_index).unwrap()) + .collect(); // Each validator uses their decryption key to update their share let decryption_key = validator_keypairs @@ -419,28 +430,37 @@ mod test_dkg_full { .decryption_key; // Creates updated private key shares + // TODO: Why not using dkg.aggregate()? + let pvss_aggregated = aggregate(&dkg.vss); pvss_aggregated.update_private_key_share_for_recovery( &decryption_key, validator.share_index, - updates_for_participant, + updates_for_participant.as_slice(), ) }) .collect(); + // TODO: Rename updated_private_shares to something that doesn't imply mutation (see #162, #163) + // Now, we have to combine new share fragments into a new share - let new_private_key_share = - tpke::recover_share_from_updated_private_shares( - &x_r, - &domain_points, - &updated_shares, - ); + let new_private_key_share = recover_share_from_updated_private_shares( + &x_r, + &domain_points, + &updated_shares, + ); // Get decryption shares from remaining participants + let mut remaining_validator_keypairs = validator_keypairs; + remaining_validator_keypairs + .pop() + .expect("Should have a keypair"); let mut decryption_shares: Vec> = - validator_keypairs + remaining_validator_keypairs .iter() .enumerate() .map(|(share_index, validator_keypair)| { + // TODO: Why not using dkg.aggregate()? + let pvss_aggregated = aggregate(&dkg.vss); pvss_aggregated .make_decryption_share_simple( &ciphertext.header().unwrap(), @@ -466,73 +486,120 @@ mod test_dkg_full { .unwrap(), ); - let lagrange = tpke::prepare_combine_simple::(&domain_points); + domain_points.push(x_r); + assert_eq!(domain_points.len(), shares_num as usize); + assert_eq!(decryption_shares.len(), shares_num as usize); + + // Maybe parametrize this test with [1..] and [..threshold] + let domain_points = &domain_points[1..]; + let decryption_shares = &decryption_shares[1..]; + assert_eq!(domain_points.len(), security_threshold as usize); + assert_eq!(decryption_shares.len(), security_threshold as usize); + + let lagrange = tpke::prepare_combine_simple::(domain_points); let new_shared_secret = - tpke::share_combine_simple::(&decryption_shares, &lagrange); + tpke::share_combine_simple::(decryption_shares, &lagrange); - assert_eq!(old_shared_secret, new_shared_secret); + assert_eq!( + old_shared_secret, new_shared_secret, + "Shared secret reconstruction failed" + ); } #[test] - fn simple_tdec_share_refreshing() { + fn test_dkg_simple_tdec_share_refreshing() { let rng = &mut test_rng(); - let (dkg, validator_keypairs) = setup_dealt_dkg_with_n_validators(3, 4); + let security_threshold = 3; + let shares_num = 4; + let (dkg, validator_keypairs) = + setup_dealt_dkg_with_n_validators(security_threshold, shares_num); let msg = "my-msg".as_bytes().to_vec(); let aad: &[u8] = "my-aad".as_bytes(); - let public_key = dkg.public_key(); + let public_key = &dkg.public_key(); let ciphertext = - tpke::encrypt::(SecretBox::new(msg), aad, &public_key, rng) + tpke::encrypt::(SecretBox::new(msg), aad, public_key, rng) .unwrap(); - let pvss_aggregated = aggregate(&dkg.vss); - // Create an initial shared secret let (_, _, old_shared_secret) = make_shared_secret_simple_tdec( &dkg, aad, &ciphertext.header().unwrap(), - &validator_keypairs, + validator_keypairs.as_slice(), ); - // Now, we're going to refresh the shares and check that the shared secret is the same + let domain_points = dkg.domain.elements().collect::>(); - // Dealer computes a new random polynomial with constant term x_r = 0 - let polynomial = tpke::make_random_polynomial_at::( - dkg.dkg_params.security_threshold as usize, - &Fr::zero(), - rng, - ); + // Each participant prepares an update for each other participant + let share_updates = dkg + .validators + .keys() + .map(|v_addr| { + let deltas_i = prepare_share_updates_for_refresh::( + &domain_points, + &dkg.pvss_params.h.into_affine(), + dkg.dkg_params.security_threshold as usize, + rng, + ); + (v_addr.clone(), deltas_i) + }) + .collect::>(); - // Dealer shares the polynomial with participants + // Participants share updates and update their shares + + // Now, every participant separately: + // TODO: Move this logic outside tests (see #162, #163) + let updated_shares: Vec<_> = dkg + .validators + .values() + .map(|validator| { + // Current participant receives updates from other participants + let updates_for_participant: Vec<_> = share_updates + .values() + .map(|updates| *updates.get(validator.share_index).unwrap()) + .collect(); + + // Each validator uses their decryption key to update their share + let decryption_key = validator_keypairs + .get(validator.share_index) + .unwrap() + .decryption_key; + + // Creates updated private key shares + // TODO: Why not using dkg.aggregate()? + let pvss_aggregated = aggregate(&dkg.vss); + pvss_aggregated.update_private_key_share_for_recovery( + &decryption_key, + validator.share_index, + updates_for_participant.as_slice(), + ) + }) + .collect(); - // Participants computes new decryption shares - let new_decryption_shares: Vec> = + // Get decryption shares, now with refreshed private shares: + let decryption_shares: Vec> = validator_keypairs .iter() .enumerate() - .map(|(validator_address, validator_keypair)| { - pvss_aggregated - .refresh_decryption_share( - &ciphertext.header().unwrap(), - aad, - &validator_keypair.decryption_key, - validator_address, - &polynomial, - &dkg, - ) - .unwrap() + .map(|(share_index, validator_keypair)| { + DecryptionShareSimple::create( + &validator_keypair.decryption_key, + updated_shares.get(share_index).unwrap(), + &ciphertext.header().unwrap(), + aad, + &dkg.pvss_params.g_inv(), + ) + .unwrap() }) .collect(); - // Create a new shared secret - let domain = &dkg.domain.elements().collect::>(); - // TODO: Combine `tpke::prepare_combine_simple` and `tpke::share_combine_simple` into - // one function and expose it in the tpke::api? - let lagrange_coeffs = tpke::prepare_combine_simple::(domain); + let lagrange = tpke::prepare_combine_simple::( + &domain_points[..security_threshold as usize], + ); let new_shared_secret = tpke::share_combine_simple::( - &new_decryption_shares, - &lagrange_coeffs, + &decryption_shares[..security_threshold as usize], + &lagrange, ); assert_eq!(old_shared_secret, new_shared_secret); diff --git a/ferveo/src/pvss.rs b/ferveo/src/pvss.rs index e4bf1c7c..91976ee9 100644 --- a/ferveo/src/pvss.rs +++ b/ferveo/src/pvss.rs @@ -13,15 +13,15 @@ use serde::{Deserialize, Serialize}; use serde_with::serde_as; use subproductdomain::fast_multiexp; use tpke::{ - prepare_combine_simple, refresh_private_key_share, - update_share_for_recovery, CiphertextHeader, DecryptionSharePrecomputed, + prepare_combine_simple, CiphertextHeader, DecryptionSharePrecomputed, DecryptionShareSimple, PrivateKeyShare, }; use zeroize::{self, Zeroize, ZeroizeOnDrop}; use crate::{ - batch_to_projective_g1, batch_to_projective_g2, utils::is_sorted, Error, - PVSSMap, PubliclyVerifiableDkg, Result, Validator, + apply_updates_to_private_share, batch_to_projective_g1, + batch_to_projective_g2, utils::is_sorted, Error, PVSSMap, + PubliclyVerifiableDkg, Result, Validator, }; /// These are the blinded evaluations of shares of a single random polynomial @@ -41,8 +41,8 @@ pub trait Aggregate {} /// Apply trait gate to Aggregated marker struct impl Aggregate for Aggregated {} -/// Type alias for non aggregated PVSS transcripts -pub type Pvss = PubliclyVerifiableSS; +// /// Type alias for non aggregated PVSS transcripts +// pub type Pvss = PubliclyVerifiableSS; /// Type alias for aggregated PVSS transcripts pub type AggregatedPvss = PubliclyVerifiableSS; @@ -374,35 +374,7 @@ impl PubliclyVerifiableSS { .map_err(|e| e.into()) } - pub fn refresh_decryption_share( - &self, - ciphertext_header: &CiphertextHeader, - aad: &[u8], - validator_decryption_key: &E::ScalarField, - share_index: usize, - polynomial: &DensePolynomial, - dkg: &PubliclyVerifiableDkg, - ) -> Result> { - let validator_private_key_share = self - .decrypt_private_key_share(validator_decryption_key, share_index); - let h = dkg.pvss_params.h; - let domain_point = dkg.domain.element(share_index); - let refreshed_private_key_share = refresh_private_key_share( - &h, - &domain_point, - polynomial, - &validator_private_key_share, - ); - DecryptionShareSimple::create( - validator_decryption_key, - &refreshed_private_key_share, - ciphertext_header, - aad, - &dkg.pvss_params.g_inv(), - ) - .map_err(|e| e.into()) - } - + // TODO: Consider relocate to different place, maybe PrivateKeyShare? (see #162, #163) pub fn update_private_key_share_for_recovery( &self, validator_decryption_key: &E::ScalarField, @@ -414,7 +386,7 @@ impl PubliclyVerifiableSS { .decrypt_private_key_share(validator_decryption_key, share_index); // And updates their share - update_share_for_recovery::(&private_key_share, share_updates) + apply_updates_to_private_share::(&private_key_share, share_updates) } } @@ -504,8 +476,8 @@ mod test_pvss { let rng = &mut ark_std::test_rng(); let (dkg, _) = setup_dkg(0); let s = ScalarField::rand(rng); - let pvss = - Pvss::::new(&s, &dkg, rng).expect("Test failed"); + let pvss = PubliclyVerifiableSS::::new(&s, &dkg, rng) + .expect("Test failed"); // Check that the chosen secret coefficient is correct assert_eq!(pvss.coeffs[0], G1::generator().mul(s)); // Check that a polynomial of the correct degree was created @@ -548,7 +520,8 @@ mod test_pvss { let rng = &mut ark_std::test_rng(); let (dkg, _) = setup_dkg(0); let s = ScalarField::rand(rng); - let pvss = Pvss::::new(&s, &dkg, rng).unwrap(); + let pvss = + PubliclyVerifiableSS::::new(&s, &dkg, rng).unwrap(); // So far, everything works assert!(pvss.verify_optimistic()); diff --git a/ferveo/src/refresh.rs b/ferveo/src/refresh.rs new file mode 100644 index 00000000..ce87c81c --- /dev/null +++ b/ferveo/src/refresh.rs @@ -0,0 +1,385 @@ +use std::{ops::Mul, usize}; + +use ark_ec::{pairing::Pairing, AffineRepr, CurveGroup}; +use ark_ff::Zero; +use ark_poly::{univariate::DensePolynomial, DenseUVPolynomial, Polynomial}; +use group_threshold_cryptography as tpke; +use itertools::zip_eq; +use rand_core::RngCore; +use tpke::{lagrange_basis_at, PrivateKeyShare}; + +// SHARE UPDATE FUNCTIONS: + +/// From PSS paper, section 4.2.1, (https://link.springer.com/content/pdf/10.1007/3-540-44750-4_27.pdf) +pub fn prepare_share_updates_for_recovery( + domain_points: &[E::ScalarField], + h: &E::G2Affine, + x_r: &E::ScalarField, + threshold: usize, + rng: &mut impl RngCore, +) -> Vec { + // Update polynomial has root at x_r + prepare_share_updates_with_root::(domain_points, h, x_r, threshold, rng) +} + +// TODO: Consider relocating to PrivateKeyShare (see #162, #163) +/// From PSS paper, section 4.2.3, (https://link.springer.com/content/pdf/10.1007/3-540-44750-4_27.pdf) +pub fn apply_updates_to_private_share( + private_key_share: &PrivateKeyShare, + share_updates: &[E::G2], +) -> PrivateKeyShare { + let private_key_share = share_updates + .iter() + .fold( + private_key_share.private_key_share.into_group(), + |acc, delta| acc + delta, + ) + .into_affine(); + PrivateKeyShare { private_key_share } +} + +/// From the PSS paper, section 4.2.4, (https://link.springer.com/content/pdf/10.1007/3-540-44750-4_27.pdf) +pub fn recover_share_from_updated_private_shares( + x_r: &E::ScalarField, + domain_points: &[E::ScalarField], + updated_private_shares: &[PrivateKeyShare], +) -> PrivateKeyShare { + // Interpolate new shares to recover y_r + let lagrange = lagrange_basis_at::(domain_points, x_r); + let prods = zip_eq(updated_private_shares, lagrange) + .map(|(y_j, l)| y_j.private_key_share.mul(l)); + let y_r = prods.fold(E::G2::zero(), |acc, y_j| acc + y_j); + + PrivateKeyShare { + private_key_share: y_r.into_affine(), + } +} + +// SHARE REFRESH FUNCTIONS: + +pub fn prepare_share_updates_for_refresh( + domain_points: &[E::ScalarField], + h: &E::G2Affine, + threshold: usize, + rng: &mut impl RngCore, +) -> Vec { + // Update polynomial has root at 0 + prepare_share_updates_with_root::( + domain_points, + h, + &E::ScalarField::zero(), + threshold, + rng, + ) +} + +// UTILS: + +fn prepare_share_updates_with_root( + domain_points: &[E::ScalarField], + h: &E::G2Affine, + root: &E::ScalarField, + threshold: usize, + rng: &mut impl RngCore, +) -> Vec { + // Generate a new random polynomial with defined root + let d_i = make_random_polynomial_with_root::(threshold - 1, root, rng); + + // Now, we need to evaluate the polynomial at each of participants' indices + domain_points + .iter() + .map(|x_i| { + let eval = d_i.evaluate(x_i); + h.mul(eval) + }) + .collect() +} + +pub fn make_random_polynomial_with_root( + degree: usize, + root: &E::ScalarField, + rng: &mut impl RngCore, +) -> DensePolynomial { + // [c_0, c_1, ..., c_{degree}] (Random polynomial) + let mut poly = DensePolynomial::::rand(degree, rng); + + // [0, c_1, ... , c_{degree}] (We zeroize the free term) + poly[0] = E::ScalarField::zero(); + + // Now, we calculate a new free term so that `poly(root) = 0` + let new_c_0 = E::ScalarField::zero() - poly.evaluate(root); + poly[0] = new_c_0; + + // Evaluating the polynomial at the root should result in 0 + debug_assert!(poly.evaluate(root) == E::ScalarField::zero()); + debug_assert!(poly.coeffs.len() == degree + 1); + + poly +} + +#[cfg(test)] +mod tests_refresh { + + use std::collections::HashMap; + + use ark_bls12_381::Fr; + use ark_ec::pairing::Pairing; + use ark_std::{test_rng, UniformRand, Zero}; + use rand_core::RngCore; + + type E = ark_bls12_381::Bls12_381; + type ScalarField = ::ScalarField; + + use group_threshold_cryptography::{ + test_common::setup_simple, PrivateDecryptionContextSimple, + PrivateKeyShare, + }; + + use crate::{ + apply_updates_to_private_share, prepare_share_updates_for_recovery, + prepare_share_updates_for_refresh, + recover_share_from_updated_private_shares, + }; + + fn make_new_share_fragments_for_recovery( + rng: &mut R, + threshold: usize, + x_r: &Fr, + remaining_participants: &[PrivateDecryptionContextSimple], + ) -> Vec> { + // Each participant prepares an update for each other participant + // TODO: Extract as parameter + let domain_points = remaining_participants[0] + .public_decryption_contexts + .iter() + .map(|c| c.domain) + .collect::>(); + let h = remaining_participants[0].public_decryption_contexts[0].h; + let share_updates = remaining_participants + .iter() + .map(|p| { + let deltas_i = prepare_share_updates_for_recovery::( + &domain_points, + &h, + x_r, + threshold, + rng, + ); + (p.index, deltas_i) + }) + .collect::>(); + + // Participants share updates and update their shares + let new_share_fragments: Vec<_> = remaining_participants + .iter() + .map(|p| { + // Current participant receives updates from other participants + let updates_for_participant: Vec<_> = share_updates + .values() + .map(|updates| *updates.get(p.index).unwrap()) + .collect(); + + // And updates their share + apply_updates_to_private_share::( + &p.private_key_share, + &updates_for_participant, + ) + }) + .collect(); + + new_share_fragments + } + + /// Ñ parties (where t <= Ñ <= N) jointly execute a "share recovery" algorithm, and the output is 1 new share. + /// The new share is intended to restore a previously existing share, e.g., due to loss or corruption. + #[test] + fn tdec_simple_variant_share_recovery_at_selected_point() { + let rng = &mut test_rng(); + let shares_num = 16; + let threshold = shares_num * 2 / 3; + + let (_, _, mut contexts) = + setup_simple::(threshold, shares_num, rng); + + // Prepare participants + + // First, save the soon-to-be-removed participant + let selected_participant = contexts.pop().unwrap(); + let x_r = selected_participant + .public_decryption_contexts + .last() + .unwrap() + .domain; + let original_private_key_share = selected_participant.private_key_share; + + // Remove one participant from the contexts and all nested structures + let mut remaining_participants = contexts; + for p in &mut remaining_participants { + p.public_decryption_contexts.pop().unwrap(); + } + + // Each participant prepares an update for each other participant, and uses it to create a new share fragment + let new_share_fragments = make_new_share_fragments_for_recovery( + rng, + threshold, + &x_r, + &remaining_participants, + ); + + // Now, we have to combine new share fragments into a new share + let domain_points = &remaining_participants[0] + .public_decryption_contexts + .iter() + .map(|ctxt| ctxt.domain) + .collect::>(); + let new_private_key_share = recover_share_from_updated_private_shares( + &x_r, + &domain_points[..threshold], + &new_share_fragments[..threshold], + ); + + assert_eq!(new_private_key_share, original_private_key_share); + + // If we don't have enough private share updates, the resulting private share will be incorrect + assert_eq!(domain_points.len(), new_share_fragments.len()); + let incorrect_private_key_share = + recover_share_from_updated_private_shares( + &x_r, + &domain_points[..(threshold - 1)], + &new_share_fragments[..(threshold - 1)], + ); + + assert_ne!(incorrect_private_key_share, original_private_key_share); + } + + /// Ñ parties (where t <= Ñ <= N) jointly execute a "share recovery" algorithm, and the output is 1 new share. + /// The new share is independent from the previously existing shares. We can use this to on-board a new participant into an existing cohort. + #[test] + fn tdec_simple_variant_share_recovery_at_random_point() { + let rng = &mut test_rng(); + let shares_num = 16; + let threshold = shares_num * 2 / 3; + + let (_, shared_private_key, mut contexts) = + setup_simple::(threshold, shares_num, rng); + + // Prepare participants + + // Remove one participant from the contexts and all nested structures + contexts.pop().unwrap(); + let mut remaining_participants = contexts.clone(); + for p in &mut remaining_participants { + p.public_decryption_contexts.pop().unwrap(); + } + + // Now, we're going to recover a new share at a random point and check that the shared secret is still the same + + // Our random point + let x_r = ScalarField::rand(rng); + + // Each participant prepares an update for each other participant, and uses it to create a new share fragment + let new_share_fragments = make_new_share_fragments_for_recovery( + rng, + threshold, + &x_r, + &remaining_participants, + ); + + // Now, we have to combine new share fragments into a new share + let domain_points = &mut remaining_participants[0] + .public_decryption_contexts + .iter() + .map(|ctxt| ctxt.domain) + .collect::>(); + let new_private_key_share = recover_share_from_updated_private_shares( + &x_r, + &domain_points[..threshold], + &new_share_fragments[..threshold], + ); + + let mut private_shares = contexts + .iter() + .cloned() + .map(|ctxt| ctxt.private_key_share) + .collect::>(); + + // Finally, let's recreate the shared private key from some original shares and the recovered one + domain_points.push(x_r); + private_shares.push(new_private_key_share); + let start_from = shares_num - threshold; + let new_shared_private_key = recover_share_from_updated_private_shares( + &ScalarField::zero(), + &domain_points[start_from..], + &private_shares[start_from..], + ); + + assert_eq!( + shared_private_key, + new_shared_private_key.private_key_share + ); + } + + /// Ñ parties (where t <= Ñ <= N) jointly execute a "share refresh" algorithm. + /// The output is M new shares (with M <= Ñ), with each of the M new shares substituting the + /// original share (i.e., the original share is deleted). + #[test] + fn tdec_simple_variant_share_refreshing() { + let rng = &mut test_rng(); + let shares_num = 16; + let threshold = shares_num * 2 / 3; + + let (_, shared_private_key, contexts) = + setup_simple::(threshold, shares_num, rng); + + let domain_points = &contexts[0] + .public_decryption_contexts + .iter() + .map(|ctxt| ctxt.domain) + .collect::>(); + let h = contexts[0].public_decryption_contexts[0].h; + + // Each participant prepares an update for each other participant: + let share_updates = contexts + .iter() + .map(|p| { + let deltas_i = prepare_share_updates_for_refresh::( + domain_points, + &h, + threshold, + rng, + ); + (p.index, deltas_i) + }) + .collect::>(); + + // Participants "refresh" their shares with the updates from each other: + let refreshed_shares: Vec<_> = contexts + .iter() + .map(|p| { + // Current participant receives updates from other participants + let updates_for_participant: Vec<_> = share_updates + .values() + .map(|updates| *updates.get(p.index).unwrap()) + .collect(); + + // And updates their share + apply_updates_to_private_share::( + &p.private_key_share, + &updates_for_participant, + ) + }) + .collect(); + + // Finally, let's recreate the shared private key from the refreshed shares + let new_shared_private_key = recover_share_from_updated_private_shares( + &ScalarField::zero(), + &domain_points[..threshold], + &refreshed_shares[..threshold], + ); + + assert_eq!( + shared_private_key, + new_shared_private_key.private_key_share + ); + } +} diff --git a/tpke/benches/arkworks.rs b/tpke/benches/arkworks.rs index a1f6a50f..953f067c 100644 --- a/tpke/benches/arkworks.rs +++ b/tpke/benches/arkworks.rs @@ -10,14 +10,13 @@ use ark_ec::{ pairing::{prepare_g1, prepare_g2, Pairing}, AffineRepr, CurveGroup, }; -use ark_ff::{BigInteger256, Field, One, UniformRand, Zero}; +use ark_ff::{BigInteger256, Field, UniformRand}; use criterion::{ black_box, criterion_group, criterion_main, BenchmarkId, Criterion, }; -use group_threshold_cryptography_pre_release::make_random_polynomial_at; use itertools::izip; use rand::prelude::StdRng; -use rand_core::{RngCore, SeedableRng}; +use rand_core::SeedableRng; type E = Bls12_381; type G1Prepared = ::G1Prepared; @@ -205,77 +204,6 @@ pub fn bench_product_of_pairings(c: &mut Criterion) { } } -pub fn bench_random_poly(c: &mut Criterion) { - let mut group = c.benchmark_group("random_polynomial_evaluation"); - group.sample_size(10); - - fn evaluate_polynomial(polynomial: &[Fr], x: &Fr) -> Fr { - let mut result = Fr::zero(); - let mut x_power = Fr::one(); - for coeff in polynomial { - result += *coeff * x_power; - x_power *= x; - } - result - } - - pub fn naive_make_random_polynomial_at( - threshold: usize, - root: &Fr, - rng: &mut impl RngCore, - ) -> Vec { - // [][threshold-1] - let mut d_i = (0..threshold - 1) - .map(|_| Fr::rand(rng)) - .collect::>(); - // [0..][threshold] - d_i.insert(0, Fr::zero()); - - // Now, we calculate d_i_0 - // This is the term that will "zero out" the polynomial at x_r, d_i(x_r) = 0 - let d_i_0 = Fr::zero() - evaluate_polynomial::(&d_i, root); - d_i[0] = d_i_0; - assert_eq!(evaluate_polynomial::(&d_i, root), Fr::zero()); - - debug_assert!(d_i.len() == threshold); - debug_assert!(evaluate_polynomial::(&d_i, root) == Fr::zero()); - d_i - } - - // Skipping t=1, because it results in a random polynomial with t-1=0 coefficients - for threshold in [2, 4, 8, 16, 32, 64] { - let rng = &mut StdRng::seed_from_u64(0); - let mut ark = { - let mut rng = rng.clone(); - move || { - black_box(make_random_polynomial_at::( - threshold, - &Fr::zero(), - &mut rng, - )) - } - }; - let mut naive = { - let mut rng = rng.clone(); - move || { - black_box(naive_make_random_polynomial_at::( - threshold, - &Fr::zero(), - &mut rng, - )) - } - }; - group.bench_function( - BenchmarkId::new("random_polynomial_ark", threshold), - |b| b.iter(|| ark()), - ); - group.bench_function( - BenchmarkId::new("random_polynomial_naive", threshold), - |b| b.iter(|| naive()), - ); - } -} - pub fn bench_dummy(_c: &mut Criterion) { // Does nothing on purpose, but is required to make criterion happy. } @@ -294,7 +222,6 @@ criterion_group!( // bench_final_exponentiation, // bench_pairing, // bench_product_of_pairings, - // bench_random_poly, ); criterion_main!(benches); diff --git a/tpke/benches/tpke.rs b/tpke/benches/tpke.rs index c6ad85ae..cb553c27 100644 --- a/tpke/benches/tpke.rs +++ b/tpke/benches/tpke.rs @@ -1,10 +1,7 @@ #![allow(clippy::redundant_closure)] -use std::collections::HashMap; - use ark_bls12_381::{Bls12_381, Fr, G1Affine as G1, G2Affine as G2}; -use ark_ec::{pairing::Pairing, AffineRepr}; -use ark_ff::Zero; +use ark_ec::pairing::Pairing; use criterion::{ black_box, criterion_group, criterion_main, BenchmarkId, Criterion, }; @@ -470,105 +467,110 @@ pub fn bench_decryption_share_validity_checks(c: &mut Criterion) { } } -pub fn bench_recover_share_at_point(c: &mut Criterion) { - let mut group = c.benchmark_group("RECOVER SHARE"); - let rng = &mut StdRng::seed_from_u64(0); - let msg_size = MSG_SIZE_CASES[0]; - - for &shares_num in NUM_SHARES_CASES.iter() { - let mut setup = SetupSimple::new(shares_num, msg_size, rng); - let threshold = setup.shared.threshold; - let selected_participant = setup.contexts.pop().unwrap(); - let x_r = selected_participant - .public_decryption_contexts - .last() - .unwrap() - .domain; - let mut remaining_participants = setup.contexts; - for p in &mut remaining_participants { - p.public_decryption_contexts.pop(); - } - let domain_points = &remaining_participants[0] - .public_decryption_contexts - .iter() - .map(|ctxt| ctxt.domain) - .collect::>(); - let h = remaining_participants[0].public_decryption_contexts[0].h; - let share_updates = remaining_participants - .iter() - .map(|p| { - let deltas_i = prepare_share_updates_for_recovery::( - domain_points, - &h, - &x_r, - threshold, - rng, - ); - (p.index, deltas_i) - }) - .collect::>(); - let new_share_fragments: Vec<_> = remaining_participants - .iter() - .map(|p| { - // Current participant receives updates from other participants - let updates_for_participant: Vec<_> = share_updates - .values() - .map(|updates| *updates.get(p.index).unwrap()) - .collect(); - - // And updates their share - update_share_for_recovery::( - &p.private_key_share, - &updates_for_participant, - ) - }) - .collect(); - group.bench_function( - BenchmarkId::new( - "recover_share_from_updated_private_shares", - shares_num, - ), - |b| { - b.iter(|| { - let _ = black_box( - recover_share_from_updated_private_shares::( - &x_r, - domain_points, - &new_share_fragments, - ), - ); - }); - }, - ); - } -} - -pub fn bench_refresh_shares(c: &mut Criterion) { - let mut group = c.benchmark_group("REFRESH SHARES"); - let rng = &mut StdRng::seed_from_u64(0); - let msg_size = MSG_SIZE_CASES[0]; - - for &shares_num in NUM_SHARES_CASES.iter() { - let setup = SetupSimple::new(shares_num, msg_size, rng); - let threshold = setup.shared.threshold; - let polynomial = - make_random_polynomial_at::(threshold, &Fr::zero(), rng); - let p = setup.contexts[0].clone(); - group.bench_function( - BenchmarkId::new("refresh_private_key_share", shares_num), - |b| { - b.iter(|| { - black_box(refresh_private_key_share::( - &p.setup_params.h.into_group(), - &p.public_decryption_contexts[0].domain, - &polynomial, - &p.private_key_share, - )); - }); - }, - ); - } -} +// TODO: Relocate benchmark to ferveo/benches as part of #162, #163 +// pub fn bench_recover_share_at_point(c: &mut Criterion) { +// let mut group = c.benchmark_group("RECOVER SHARE"); +// let rng = &mut StdRng::seed_from_u64(0); +// let msg_size = MSG_SIZE_CASES[0]; + +// for &shares_num in NUM_SHARES_CASES.iter() { +// let mut setup = SetupSimple::new(shares_num, msg_size, rng); +// let threshold = setup.shared.threshold; +// let selected_participant = setup.contexts.pop().unwrap(); +// let x_r = selected_participant +// .public_decryption_contexts +// .last() +// .unwrap() +// .domain; +// let mut remaining_participants = setup.contexts; +// for p in &mut remaining_participants { +// p.public_decryption_contexts.pop(); +// } +// let domain_points = &remaining_participants[0] +// .public_decryption_contexts +// .iter() +// .map(|ctxt| ctxt.domain) +// .collect::>(); +// let h = remaining_participants[0].public_decryption_contexts[0].h; +// let share_updates = remaining_participants +// .iter() +// .map(|p| { +// let deltas_i = prepare_share_updates_for_recovery::( +// domain_points, +// &h, +// &x_r, +// threshold, +// rng, +// ); +// (p.index, deltas_i) +// }) +// .collect::>(); +// let new_share_fragments: Vec<_> = remaining_participants +// .iter() +// .map(|p| { +// // Current participant receives updates from other participants +// let updates_for_participant: Vec<_> = share_updates +// .values() +// .map(|updates| *updates.get(p.index).unwrap()) +// .collect(); + +// // And updates their share +// apply_updates_to_private_share::( +// &p.private_key_share, +// &updates_for_participant, +// ) +// }) +// .collect(); +// group.bench_function( +// BenchmarkId::new( +// "recover_share_from_updated_private_shares", +// shares_num, +// ), +// |b| { +// b.iter(|| { +// let _ = black_box( +// recover_share_from_updated_private_shares::( +// &x_r, +// domain_points, +// &new_share_fragments, +// ), +// ); +// }); +// }, +// ); +// } +// } + +// TODO: Relocate benchmark to ferveo/benches as part of #162, #163 +// pub fn bench_refresh_shares(c: &mut Criterion) { +// let mut group = c.benchmark_group("REFRESH SHARES"); +// let rng = &mut StdRng::seed_from_u64(0); +// let msg_size = MSG_SIZE_CASES[0]; + +// for &shares_num in NUM_SHARES_CASES.iter() { +// let setup = SetupSimple::new(shares_num, msg_size, rng); +// let threshold = setup.shared.threshold; +// let polynomial = make_random_polynomial_with_root::( +// threshold - 1, +// &Fr::zero(), +// rng, +// ); +// let p = setup.contexts[0].clone(); +// group.bench_function( +// BenchmarkId::new("refresh_private_key_share", shares_num), +// |b| { +// b.iter(|| { +// black_box(refresh_private_key_share::( +// &p.setup_params.h.into_group(), +// &p.public_decryption_contexts[0].domain, +// &polynomial, +// &p.private_key_share, +// )); +// }); +// }, +// ); +// } +// } criterion_group!( benches, @@ -578,8 +580,8 @@ criterion_group!( bench_share_encrypt_decrypt, bench_ciphertext_validity_checks, bench_decryption_share_validity_checks, - bench_recover_share_at_point, - bench_refresh_shares, + // bench_recover_share_at_point, + // bench_refresh_shares, ); criterion_main!(benches); diff --git a/tpke/src/combine.rs b/tpke/src/combine.rs index a46477fb..f9d8ddbb 100644 --- a/tpke/src/combine.rs +++ b/tpke/src/combine.rs @@ -56,6 +56,8 @@ pub fn prepare_combine_fast( .collect::>() } +// TODO: Combine `tpke::prepare_combine_simple` and `tpke::share_combine_simple` into +// one function and expose it in the tpke::api? pub fn prepare_combine_simple( domain: &[E::ScalarField], ) -> Vec { diff --git a/tpke/src/decryption.rs b/tpke/src/decryption.rs index 01ae5df7..0622e6a8 100644 --- a/tpke/src/decryption.rs +++ b/tpke/src/decryption.rs @@ -56,11 +56,13 @@ impl ValidatorShareChecksum { h: &E::G2, ciphertext: &Ciphertext, ) -> bool { + // See https://github.com/nucypher/ferveo/issues/42#issuecomment-1398953777 // D_i == e(C_i, Y_i) if *decryption_share != E::pairing(self.checksum, *share_aggregate).0 { return false; } + // TODO: use multipairing here (h_inv) // e(C_i, ek_i) == e(U, H) if E::pairing(self.checksum, *validator_public_key) != E::pairing(ciphertext.commitment, *h) diff --git a/tpke/src/lib.rs b/tpke/src/lib.rs index 46a78f24..a5c1b302 100644 --- a/tpke/src/lib.rs +++ b/tpke/src/lib.rs @@ -6,7 +6,6 @@ pub mod context; pub mod decryption; pub mod hash_to_curve; pub mod key_share; -pub mod refresh; pub mod secret_box; // TODO: Only show the public API, tpke::api @@ -24,7 +23,6 @@ pub use context::*; pub use decryption::*; pub use hash_to_curve::*; pub use key_share::*; -pub use refresh::*; pub use secret_box::*; #[cfg(feature = "api")] @@ -276,49 +274,31 @@ pub mod test_common { // In precomputed variant, the security threshold is equal to the number of shares setup_simple::(shares_num, shares_num, rng) } + + pub fn make_shared_secret( + pub_contexts: &[PublicDecryptionContextSimple], + decryption_shares: &[DecryptionShareSimple], + ) -> SharedSecret { + let domain = pub_contexts.iter().map(|c| c.domain).collect::>(); + let lagrange_coeffs = prepare_combine_simple::(&domain); + share_combine_simple::(decryption_shares, &lagrange_coeffs) + } } #[cfg(test)] mod tests { - use std::{collections::HashMap, ops::Mul}; + use std::ops::Mul; - use ark_bls12_381::Fr; use ark_ec::{pairing::Pairing, AffineRepr, CurveGroup}; - use ark_ff::Zero; use ark_std::{test_rng, UniformRand}; use ferveo_common::{FromBytes, ToBytes}; - use rand_core::RngCore; - use crate::{ - refresh::{ - make_random_polynomial_at, prepare_share_updates_for_recovery, - recover_share_from_updated_private_shares, - refresh_private_key_share, update_share_for_recovery, - }, - test_common::{setup_simple, *}, - }; + use crate::test_common::{make_shared_secret, setup_simple, *}; type E = ark_bls12_381::Bls12_381; type TargetField = ::TargetField; type ScalarField = ::ScalarField; - fn make_shared_secret_from_contexts( - contexts: &[PrivateDecryptionContextSimple], - ciphertext: &Ciphertext, - aad: &[u8], - ) -> SharedSecret { - let decryption_shares: Vec<_> = contexts - .iter() - .map(|c| { - c.create_share(&ciphertext.header().unwrap(), aad).unwrap() - }) - .collect(); - make_shared_secret( - &contexts[0].public_decryption_contexts, - &decryption_shares, - ) - } - #[test] fn ciphertext_serialization() { let rng = &mut test_rng(); @@ -374,63 +354,6 @@ mod tests { .is_err()); } - fn make_new_share_fragments( - rng: &mut R, - threshold: usize, - x_r: &Fr, - remaining_participants: &[PrivateDecryptionContextSimple], - ) -> Vec> { - // Each participant prepares an update for each other participant - let domain_points = remaining_participants[0] - .public_decryption_contexts - .iter() - .map(|c| c.domain) - .collect::>(); - let h = remaining_participants[0].public_decryption_contexts[0].h; - let share_updates = remaining_participants - .iter() - .map(|p| { - let deltas_i = prepare_share_updates_for_recovery::( - &domain_points, - &h, - x_r, - threshold, - rng, - ); - (p.index, deltas_i) - }) - .collect::>(); - - // Participants share updates and update their shares - let new_share_fragments: Vec<_> = remaining_participants - .iter() - .map(|p| { - // Current participant receives updates from other participants - let updates_for_participant: Vec<_> = share_updates - .values() - .map(|updates| *updates.get(p.index).unwrap()) - .collect(); - - // And updates their share - update_share_for_recovery::( - &p.private_key_share, - &updates_for_participant, - ) - }) - .collect(); - - new_share_fragments - } - - fn make_shared_secret( - pub_contexts: &[PublicDecryptionContextSimple], - decryption_shares: &[DecryptionShareSimple], - ) -> SharedSecret { - let domain = pub_contexts.iter().map(|c| c.domain).collect::>(); - let lagrange_coeffs = prepare_combine_simple::(&domain); - share_combine_simple::(decryption_shares, &lagrange_coeffs) - } - #[test] fn tdec_fast_variant_share_validation() { let rng = &mut test_rng(); @@ -679,198 +602,4 @@ mod tests { &ciphertext, )); } - - /// Ñ parties (where t <= Ñ <= N) jointly execute a "share recovery" algorithm, and the output is 1 new share. - /// The new share is intended to restore a previously existing share, e.g., due to loss or corruption. - #[test] - fn tdec_simple_variant_share_recovery_at_selected_point() { - let rng = &mut test_rng(); - let shares_num = 16; - let threshold = shares_num * 2 / 3; - - let (_, _, mut contexts) = - setup_simple::(threshold, shares_num, rng); - - // Prepare participants - - // First, save the soon-to-be-removed participant - let selected_participant = contexts.pop().unwrap(); - let x_r = selected_participant - .public_decryption_contexts - .last() - .unwrap() - .domain; - let original_private_key_share = selected_participant.private_key_share; - - // Remove one participant from the contexts and all nested structures - let mut remaining_participants = contexts; - for p in &mut remaining_participants { - p.public_decryption_contexts.pop().unwrap(); - } - - // Each participant prepares an update for each other participant, and uses it to create a new share fragment - let new_share_fragments = make_new_share_fragments( - rng, - threshold, - &x_r, - &remaining_participants, - ); - - // Now, we have to combine new share fragments into a new share - let domain_points = &remaining_participants[0] - .public_decryption_contexts - .iter() - .map(|ctxt| ctxt.domain) - .collect::>(); - let new_private_key_share = recover_share_from_updated_private_shares( - &x_r, - domain_points, - &new_share_fragments, - ); - - assert_eq!(new_private_key_share, original_private_key_share); - } - - /// Ñ parties (where t <= Ñ <= N) jointly execute a "share recovery" algorithm, and the output is 1 new share. - /// The new share is independent from the previously existing shares. We can use this to on-board a new participant into an existing cohort. - #[test] - fn tdec_simple_variant_share_recovery_at_random_point() { - let rng = &mut test_rng(); - let shares_num = 16; - let threshold = shares_num * 2 / 3; - let msg = "my-msg".as_bytes().to_vec(); - let aad: &[u8] = "my-aad".as_bytes(); - - let (pubkey, _, contexts) = - setup_simple::(threshold, shares_num, rng); - let g_inv = &contexts[0].setup_params.g_inv; - let ciphertext = - encrypt::(SecretBox::new(msg), aad, &pubkey, rng).unwrap(); - - // Create an initial shared secret - let old_shared_secret = - make_shared_secret_from_contexts(&contexts, &ciphertext, aad); - - // Now, we're going to recover a new share at a random point and check that the shared secret is still the same - - // Our random point - let x_r = ScalarField::rand(rng); - - // Remove one participant from the contexts and all nested structures - let mut remaining_participants = contexts.clone(); - remaining_participants.pop().unwrap(); - for p in &mut remaining_participants { - p.public_decryption_contexts.pop().unwrap(); - } - - let new_share_fragments = make_new_share_fragments( - rng, - threshold, - &x_r, - &remaining_participants, - ); - - // Now, we have to combine new share fragments into a new share - let domain_points = &remaining_participants[0] - .public_decryption_contexts - .iter() - .map(|ctxt| ctxt.domain) - .collect::>(); - let new_private_key_share = recover_share_from_updated_private_shares( - &x_r, - domain_points, - &new_share_fragments, - ); - - // Get decryption shares from remaining participants - let mut decryption_shares: Vec<_> = remaining_participants - .iter() - .map(|c| { - c.create_share(&ciphertext.header().unwrap(), aad).unwrap() - }) - .collect(); - - // Create a decryption share from a recovered private key share - let new_validator_decryption_key = ScalarField::rand(rng); - decryption_shares.push( - DecryptionShareSimple::create( - &new_validator_decryption_key, - &new_private_key_share, - &ciphertext.header().unwrap(), - aad, - g_inv, - ) - .unwrap(), - ); - - // Creating a shared secret from remaining shares and the recovered one - let new_shared_secret = make_shared_secret( - &remaining_participants[0].public_decryption_contexts, - &decryption_shares, - ); - - assert_eq!(old_shared_secret, new_shared_secret); - } - - /// Ñ parties (where t <= Ñ <= N) jointly execute a "share refresh" algorithm. - /// The output is M new shares (with M <= Ñ), with each of the M new shares substituting the - /// original share (i.e., the original share is deleted). - #[test] - fn tdec_simple_variant_share_refreshing() { - let rng = &mut test_rng(); - let shares_num = 16; - let threshold = shares_num * 2 / 3; - let msg = "my-msg".as_bytes().to_vec(); - let aad: &[u8] = "my-aad".as_bytes(); - - let (pubkey, _, contexts) = - setup_simple::(threshold, shares_num, rng); - let g_inv = &contexts[0].setup_params.g_inv; - let pub_contexts = contexts[0].public_decryption_contexts.clone(); - let ciphertext = - encrypt::(SecretBox::new(msg), aad, &pubkey, rng).unwrap(); - - // Create an initial shared secret - let old_shared_secret = - make_shared_secret_from_contexts(&contexts, &ciphertext, aad); - - // Now, we're going to refresh the shares and check that the shared secret is the same - - // Dealer computes a new random polynomial with constant term x_r - let polynomial = make_random_polynomial_at::( - threshold, - &ScalarField::zero(), - rng, - ); - - // Dealer shares the polynomial with participants - - // Participants computes new decryption shares - let new_decryption_shares: Vec<_> = contexts - .iter() - .enumerate() - .map(|(i, p)| { - // Participant computes share updates and update their private key shares - let private_key_share = refresh_private_key_share::( - &p.setup_params.h.into_group(), - &p.public_decryption_contexts[i].domain, - &polynomial, - &p.private_key_share, - ); - DecryptionShareSimple::create( - &p.validator_private_key, - &private_key_share, - &ciphertext.header().unwrap(), - aad, - g_inv, - ) - .unwrap() - }) - .collect(); - - let new_shared_secret = - make_shared_secret(&pub_contexts, &new_decryption_shares); - - assert_eq!(old_shared_secret, new_shared_secret); - } } diff --git a/tpke/src/refresh.rs b/tpke/src/refresh.rs deleted file mode 100644 index d7ebfa87..00000000 --- a/tpke/src/refresh.rs +++ /dev/null @@ -1,102 +0,0 @@ -use std::{ops::Mul, usize}; - -use ark_ec::{pairing::Pairing, AffineRepr, CurveGroup}; -use ark_ff::Zero; -use ark_poly::{univariate::DensePolynomial, DenseUVPolynomial, Polynomial}; -use itertools::zip_eq; -use rand_core::RngCore; - -use crate::{lagrange_basis_at, PrivateKeyShare}; - -/// From PSS paper, section 4.2.1, (https://link.springer.com/content/pdf/10.1007/3-540-44750-4_27.pdf) -pub fn prepare_share_updates_for_recovery( - domain_points: &[E::ScalarField], - h: &E::G2Affine, - x_r: &E::ScalarField, - threshold: usize, - rng: &mut impl RngCore, -) -> Vec { - // Generate a new random polynomial with constant term x_r - let d_i = make_random_polynomial_at::(threshold, x_r, rng); - - // Now, we need to evaluate the polynomial at each of participants' indices - domain_points - .iter() - .map(|x_i| { - let eval = d_i.evaluate(x_i); - h.mul(eval) - }) - .collect() -} - -/// From PSS paper, section 4.2.3, (https://link.springer.com/content/pdf/10.1007/3-540-44750-4_27.pdf) -pub fn update_share_for_recovery( - private_key_share: &PrivateKeyShare, - share_updates: &[E::G2], -) -> PrivateKeyShare { - let private_key_share = share_updates - .iter() - .fold( - private_key_share.private_key_share.into_group(), - |acc, delta| acc + delta, - ) - .into_affine(); - PrivateKeyShare { private_key_share } -} - -/// From the PSS paper, section 4.2.4, (https://link.springer.com/content/pdf/10.1007/3-540-44750-4_27.pdf) -pub fn recover_share_from_updated_private_shares( - x_r: &E::ScalarField, - domain_points: &[E::ScalarField], - updated_private_shares: &[PrivateKeyShare], -) -> PrivateKeyShare { - // Interpolate new shares to recover y_r - let lagrange = lagrange_basis_at::(domain_points, x_r); - let prods = zip_eq(updated_private_shares, lagrange) - .map(|(y_j, l)| y_j.private_key_share.mul(l)); - let y_r = prods.fold(E::G2::zero(), |acc, y_j| acc + y_j); - - PrivateKeyShare { - private_key_share: y_r.into_affine(), - } -} - -pub fn make_random_polynomial_at( - threshold: usize, - root: &E::ScalarField, - rng: &mut impl RngCore, -) -> DensePolynomial { - // [][threshold-1] - let mut threshold_poly = - DensePolynomial::::rand(threshold - 1, rng); - - // [0..][threshold] - threshold_poly[0] = E::ScalarField::zero(); - - // Now, we calculate d_i_0 - // This is the term that will "zero out" the polynomial at x_r, d_i(x_r) = 0 - let d_i_0 = E::ScalarField::zero() - threshold_poly.evaluate(root); - threshold_poly[0] = d_i_0; - - debug_assert!(threshold_poly.evaluate(root) == E::ScalarField::zero()); - debug_assert!(threshold_poly.coeffs.len() == threshold); - - threshold_poly -} - -// TODO: Expose a method to create a proper decryption share after refreshing -pub fn refresh_private_key_share( - h: &E::G2, - domain_point: &E::ScalarField, - polynomial: &DensePolynomial, - validator_private_key_share: &PrivateKeyShare, -) -> PrivateKeyShare { - let evaluated_polynomial = polynomial.evaluate(domain_point); - let share_update = h.mul(evaluated_polynomial); - let updated_share = - validator_private_key_share.private_key_share.into_group() - + share_update; - PrivateKeyShare { - private_key_share: updated_share.into_affine(), - } -}