From f0e61feb1bb0ad0e57cf19740b6302bcf3e3b2f8 Mon Sep 17 00:00:00 2001 From: Piotr Roslaniec Date: Mon, 26 Feb 2024 19:25:29 +0100 Subject: [PATCH] wip --- ferveo-tdec/src/context.rs | 2 +- ferveo-tdec/src/decryption.rs | 14 +- ferveo/src/api.rs | 131 +++++++++------- ferveo/src/dkg.rs | 15 +- ferveo/src/lib.rs | 236 ++++++++++++++++------------ ferveo/src/refresh.rs | 283 +++++++++++++++++++++------------- ferveo/src/test_common.rs | 3 +- 7 files changed, 406 insertions(+), 278 deletions(-) diff --git a/ferveo-tdec/src/context.rs b/ferveo-tdec/src/context.rs index 6e565188..ed7faee0 100644 --- a/ferveo-tdec/src/context.rs +++ b/ferveo-tdec/src/context.rs @@ -100,7 +100,7 @@ impl PrivateDecryptionContextSimple { .collect::>(); let lagrange_coeffs = prepare_combine_simple::(&domain); - DecryptionSharePrecomputed::new( + DecryptionSharePrecomputed::create( self.index, &self.setup_params.b, &self.private_key_share, diff --git a/ferveo-tdec/src/decryption.rs b/ferveo-tdec/src/decryption.rs index 316d82f1..dec3ed78 100644 --- a/ferveo-tdec/src/decryption.rs +++ b/ferveo-tdec/src/decryption.rs @@ -72,6 +72,9 @@ impl ValidatorShareChecksum { } } +/// A decryption share for a simple variant of the threshold decryption scheme. +/// In this variant, the decryption share require additional computation on the +/// client side int order to be combined. #[serde_as] #[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] pub struct DecryptionShareSimple { @@ -141,6 +144,11 @@ impl DecryptionShareSimple { } } +/// A decryption share for a precomputed variant of the threshold decryption scheme. +/// In this variant, the decryption share is precomputed and can be combined +/// without additional computation on the client side. +/// The downside is that the threshold of decryption shares required to decrypt +/// is equal to the number of private key shares in the scheme. #[serde_as] #[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] pub struct DecryptionSharePrecomputed { @@ -155,7 +163,9 @@ pub struct DecryptionSharePrecomputed { } impl DecryptionSharePrecomputed { - pub fn new( + /// Create a decryption share from the given parameters. + /// This function checks that the ciphertext is valid. + pub fn create( validator_index: usize, validator_decryption_key: &E::ScalarField, private_key_share: &PrivateKeyShare, @@ -174,6 +184,8 @@ impl DecryptionSharePrecomputed { ) } + /// Create a decryption share from the given parameters. + /// This function does not check that the ciphertext is valid. pub fn create_unchecked( validator_index: usize, validator_decryption_key: &E::ScalarField, diff --git a/ferveo/src/api.rs b/ferveo/src/api.rs index 4eeb4e58..3306e7c1 100644 --- a/ferveo/src/api.rs +++ b/ferveo/src/api.rs @@ -1,4 +1,4 @@ -use std::{fmt, io}; +use std::{collections::HashMap, fmt, io}; use ark_ec::CurveGroup; use ark_poly::{EvaluationDomain, GeneralEvaluationDomain}; @@ -25,7 +25,7 @@ use crate::bindings_python; use crate::bindings_wasm; pub use crate::EthereumAddress; use crate::{ - do_verify_aggregation, DomainPoint, Error, PubliclyVerifiableParams, + do_verify_aggregation, Error, PubliclyVerifiableParams, PubliclyVerifiableSS, Result, }; @@ -34,6 +34,7 @@ pub type Keypair = ferveo_common::Keypair; pub type Validator = crate::Validator; pub type Transcript = PubliclyVerifiableSS; pub type ValidatorMessage = (Validator, Transcript); +pub type DomainPoint = crate::DomainPoint; // Normally, we would use a custom trait for this, but we can't because // the `arkworks` will not let us create a blanket implementation for G1Affine @@ -239,7 +240,7 @@ impl Dkg { &self.0.me } - pub fn domain_points(&self) -> Vec> { + pub fn domain_points(&self) -> Vec { self.0.domain_points() } } @@ -369,11 +370,10 @@ impl AggregatedTranscript { pub struct DecryptionShareSimple { share: ferveo_tdec::api::DecryptionShareSimple, #[serde_as(as = "serialization::SerdeAs")] - domain_point: DomainPoint, + domain_point: DomainPoint, } pub fn combine_shares_simple(shares: &[DecryptionShareSimple]) -> SharedSecret { - // Pick domain points that are corresponding to the shares we have. let domain_points: Vec<_> = shares.iter().map(|s| s.domain_point).collect(); let lagrange_coefficients = prepare_combine_simple::(&domain_points); @@ -387,6 +387,7 @@ pub fn combine_shares_simple(shares: &[DecryptionShareSimple]) -> SharedSecret { pub struct SharedSecret(pub ferveo_tdec::api::SharedSecret); #[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] +// TODO: Use refresh::ShareRecoveryUpdate instead of ferveo_tdec::PrivateKeyShare pub struct ShareRecoveryUpdate(pub ferveo_tdec::PrivateKeyShare); impl ShareRecoveryUpdate { @@ -395,21 +396,23 @@ impl ShareRecoveryUpdate { pub fn create_share_updates( // TODO: Decouple from Dkg? We don't need any specific Dkg instance here, just some params etc dkg: &Dkg, - x_r: &DomainPoint, - ) -> Result> { + x_r: &DomainPoint, + ) -> Result> { let rng = &mut thread_rng(); - let updates = + let update_map = crate::refresh::ShareRecoveryUpdate::create_share_updates( - &dkg.0.domain_points(), + &dkg.0.domain_point_map(), &dkg.0.pvss_params.h.into_affine(), x_r, dkg.0.dkg_params.security_threshold(), rng, ) - .iter() - .map(|update| ShareRecoveryUpdate(update.0.clone())) + .into_iter() + .map(|(share_index, share_update)| { + (share_index, ShareRecoveryUpdate(share_update.0.clone())) + }) .collect(); - Ok(updates) + Ok(update_map) } pub fn to_bytes(&self) -> Result> { @@ -426,17 +429,21 @@ impl ShareRecoveryUpdate { pub struct ShareRefreshUpdate(pub crate::ShareRefreshUpdate); impl ShareRefreshUpdate { - pub fn create_share_updates(dkg: &Dkg) -> Result> { + pub fn create_share_updates( + dkg: &Dkg, + ) -> Result> { let rng = &mut thread_rng(); let updates = crate::refresh::ShareRefreshUpdate::create_share_updates( - &dkg.0.domain_points(), + &dkg.0.domain_point_map(), &dkg.0.pvss_params.h.into_affine(), dkg.0.dkg_params.security_threshold(), rng, ) .into_iter() - .map(ShareRefreshUpdate) - .collect(); + .map(|(share_index, share_update)| { + (share_index, ShareRefreshUpdate(share_update)) + }) + .collect::>(); Ok(updates) } @@ -499,21 +506,20 @@ impl PrivateKeyShare { /// Recover a private key share from updated private key shares pub fn recover_share_from_updated_private_shares( - x_r: &DomainPoint, - domain_points: &[DomainPoint], - updated_shares: &[UpdatedPrivateKeyShare], + x_r: &DomainPoint, + domain_points: &HashMap, + updated_shares: &HashMap, ) -> Result { - let updated_shares: Vec<_> = updated_shares + let updated_shares = updated_shares .iter() - .cloned() - .map(|updated| updated.0) - .collect(); + .map(|(k, v)| (*k, v.0.clone())) + .collect::>(); let share = crate::PrivateKeyShare::recover_share_from_updated_private_shares( x_r, domain_points, - &updated_shares[..], - ); + &updated_shares, + )?; Ok(PrivateKeyShare(share)) } @@ -544,7 +550,7 @@ impl PrivateKeyShare { aad: &[u8], validator_keypair: &Keypair, share_index: u32, - domain_points: &[DomainPoint], + domain_points: &[DomainPoint], ) -> Result { let share = self.0.create_decryption_share_simple_precomputed( &ciphertext_header.0, @@ -618,9 +624,7 @@ mod test_ferveo_api { (sender.clone(), dkg.0.generate_transcript(rng).unwrap()) }) .collect(); - messages.shuffle(rng); - (messages, validators, validator_keypairs) } @@ -641,7 +645,6 @@ mod test_ferveo_api { // In precomputed variant, the security threshold is equal to the number of shares let security_threshold = shares_num; - let (messages, validators, validator_keypairs) = make_test_inputs( rng, TAU, @@ -649,14 +652,16 @@ mod test_ferveo_api { shares_num, validators_num, ); + // We only need `shares_num` transcripts to aggregate + let messages = &messages[..shares_num as usize]; // Every validator can aggregate the transcripts let me = validators[0].clone(); let dkg = Dkg::new(TAU, shares_num, security_threshold, &validators, &me) .unwrap(); - let pvss_aggregated = dkg.aggregate_transcripts(&messages).unwrap(); - assert!(pvss_aggregated.verify(validators_num, &messages).unwrap()); + let pvss_aggregated = dkg.aggregate_transcripts(messages).unwrap(); + assert!(pvss_aggregated.verify(validators_num, messages).unwrap()); // At this point, any given validator should be able to provide a DKG public key let dkg_public_key = pvss_aggregated.public_key(); @@ -680,9 +685,9 @@ mod test_ferveo_api { ) .unwrap(); let aggregate = - dkg.aggregate_transcripts(&messages).unwrap(); + dkg.aggregate_transcripts(messages).unwrap(); assert!(pvss_aggregated - .verify(validators_num, &messages) + .verify(validators_num, messages) .unwrap()); // And then each validator creates their own decryption share @@ -712,7 +717,6 @@ mod test_ferveo_api { // Since we're using a precomputed variant, we need all the shares to be able to decrypt // So if we remove one share, we should not be able to decrypt - let decryption_shares = decryption_shares[..shares_num as usize - 1].to_vec(); let shared_secret = share_combine_precomputed(&decryption_shares); @@ -729,8 +733,8 @@ mod test_ferveo_api { #[test_case(4, 6; "number of validators greater than the number of shares")] fn test_server_api_tdec_simple(shares_num: u32, validators_num: u32) { let rng = &mut StdRng::seed_from_u64(0); - let security_threshold = shares_num / 2 + 1; + let security_threshold = shares_num / 2 + 1; let (messages, validators, validator_keypairs) = make_test_inputs( rng, TAU, @@ -738,6 +742,8 @@ mod test_ferveo_api { shares_num, validators_num, ); + // We only need `shares_num` transcripts to aggregate + let messages = &messages[..shares_num as usize]; // Now that every validator holds a dkg instance and a transcript for every other validator, // every validator can aggregate the transcripts @@ -749,8 +755,8 @@ mod test_ferveo_api { &validators[0], ) .unwrap(); - let pvss_aggregated = dkg.aggregate_transcripts(&messages).unwrap(); - assert!(pvss_aggregated.verify(validators_num, &messages).unwrap()); + let pvss_aggregated = dkg.aggregate_transcripts(messages).unwrap(); + assert!(pvss_aggregated.verify(validators_num, messages).unwrap()); // At this point, any given validator should be able to provide a DKG public key let public_key = pvss_aggregated.public_key(); @@ -773,9 +779,9 @@ mod test_ferveo_api { ) .unwrap(); let aggregate = - dkg.aggregate_transcripts(&messages).unwrap(); + dkg.aggregate_transcripts(messages).unwrap(); assert!(aggregate - .verify(validators_num, &messages) + .verify(validators_num, messages) .unwrap()); aggregate .create_decryption_share_simple( @@ -831,6 +837,8 @@ mod test_ferveo_api { shares_num, validators_num, ); + // We only need `shares_num` transcripts to aggregate + let messages = &messages[..shares_num as usize]; // Now that every validator holds a dkg instance and a transcript for every other validator, // every validator can aggregate the transcripts @@ -838,8 +846,8 @@ mod test_ferveo_api { let dkg = Dkg::new(TAU, shares_num, security_threshold, &validators, &me) .unwrap(); - let good_aggregate = dkg.aggregate_transcripts(&messages).unwrap(); - assert!(good_aggregate.verify(validators_num, &messages).is_ok()); + let good_aggregate = dkg.aggregate_transcripts(messages).unwrap(); + assert!(good_aggregate.verify(validators_num, messages).is_ok()); // Test negative cases @@ -848,7 +856,7 @@ mod test_ferveo_api { // Should fail if the number of validators is less than the number of messages assert!(good_aggregate - .verify(messages.len() as u32 - 1, &messages) + .verify(messages.len() as u32 - 1, messages) .is_err()); // Should fail if no transcripts are provided @@ -866,7 +874,7 @@ mod test_ferveo_api { assert!(not_enough_messages.len() < security_threshold as usize); let insufficient_aggregate = dkg.aggregate_transcripts(not_enough_messages).unwrap(); - let result = insufficient_aggregate.verify(validators_num, &messages); + let result = insufficient_aggregate.verify(validators_num, messages); assert!(result.is_err()); // Duplicated transcripts @@ -915,7 +923,7 @@ mod test_ferveo_api { .concat(); assert_eq!(mixed_messages.len(), security_threshold as usize); let bad_aggregate = dkg.aggregate_transcripts(&mixed_messages).unwrap(); - let result = bad_aggregate.verify(validators_num, &messages); + let result = bad_aggregate.verify(validators_num, messages); assert!(result.is_err()); } @@ -934,8 +942,8 @@ mod test_ferveo_api { validators_num, ); - // We only need `security_threshold` transcripts to aggregate - let messages = &messages[..security_threshold as usize]; + // We only need `shares_num` transcripts to aggregate + let messages = &messages[..shares_num as usize]; // Create an aggregated transcript on the client side let good_aggregate = AggregatedTranscript::new(messages).unwrap(); @@ -1087,8 +1095,10 @@ mod test_ferveo_api { .unwrap()); // We need to save this domain point to be user in the recovery testing scenario - let mut domain_points = dkgs[0].domain_points(); - let removed_domain_point = domain_points.pop().unwrap(); + let mut domain_points = dkgs[0].0.domain_point_map(); + let removed_domain_point = domain_points + .remove(&validators.last().unwrap().share_index) + .unwrap(); // Remove one participant from the contexts and all nested structure // to simulate off-boarding a validator @@ -1101,7 +1111,7 @@ mod test_ferveo_api { // and check that the shared secret is still the same. let x_r = if recover_at_random_point { // Onboarding a validator with a completely new private key share - DomainPoint::::rand(rng) + DomainPoint::rand(rng) } else { // Onboarding a validator with a private key share recovered from the removed validator removed_domain_point @@ -1123,16 +1133,14 @@ mod test_ferveo_api { // Participants share updates and update their shares // Now, every participant separately: - let updated_shares: Vec<_> = dkgs + let updated_shares: HashMap = dkgs .iter() .map(|validator_dkg| { // Current participant receives updates from other participants let updates_for_participant: Vec<_> = share_updates .values() .map(|updates| { - updates - .get(validator_dkg.me().share_index as usize) - .unwrap() + updates.get(&validator_dkg.me().share_index).unwrap() }) .cloned() .collect(); @@ -1143,7 +1151,7 @@ mod test_ferveo_api { .unwrap(); // And creates updated private key shares - aggregated_transcript + let updated_key_share = aggregated_transcript .get_private_key_share( validator_keypair, validator_dkg.me().share_index, @@ -1152,7 +1160,8 @@ mod test_ferveo_api { .create_updated_private_key_share_for_recovery( &updates_for_participant, ) - .unwrap() + .unwrap(); + (validator_dkg.me().share_index, updated_key_share) }) .collect(); @@ -1215,11 +1224,15 @@ mod test_ferveo_api { ) .unwrap(); decryption_shares.push(new_decryption_share); - domain_points.push(x_r); + domain_points.insert(new_validator_share_index, x_r); assert_eq!(domain_points.len(), validators_num as usize); assert_eq!(decryption_shares.len(), validators_num as usize); - let domain_points = &domain_points[..security_threshold as usize]; + let domain_points = domain_points + .values() + .take(security_threshold as usize) + .cloned() + .collect::>(); let decryption_shares = &decryption_shares[..security_threshold as usize]; assert_eq!(domain_points.len(), security_threshold as usize); @@ -1277,9 +1290,7 @@ mod test_ferveo_api { let updates_for_participant: Vec<_> = share_updates .values() .map(|updates| { - updates - .get(validator_dkg.me().share_index as usize) - .unwrap() + updates.get(&validator_dkg.me().share_index).unwrap() }) .cloned() .collect(); diff --git a/ferveo/src/dkg.rs b/ferveo/src/dkg.rs index 087b3069..7612084b 100644 --- a/ferveo/src/dkg.rs +++ b/ferveo/src/dkg.rs @@ -1,4 +1,4 @@ -use std::collections::{BTreeMap, HashSet}; +use std::collections::{BTreeMap, HashMap, HashSet}; use ark_ec::pairing::Pairing; use ark_poly::EvaluationDomain; @@ -155,8 +155,8 @@ impl PubliclyVerifiableDkg { /// Return a domain point for the share_index pub fn get_domain_point(&self, share_index: u32) -> Result> { - self.domain_points() - .get(share_index as usize) + self.domain_point_map() + .get(&share_index) .ok_or_else(|| Error::InvalidShareIndex(share_index)) .copied() } @@ -167,6 +167,15 @@ impl PubliclyVerifiableDkg { self.domain.elements().take(self.validators.len()).collect() } + /// Return a map of domain points for the DKG + pub fn domain_point_map(&self) -> HashMap> { + self.domain_points() + .iter() + .enumerate() + .map(|(i, point)| (i as u32, *point)) + .collect::>() + } + /// Verify PVSS transcripts against the set of validators in the DKG fn verify_transcripts( &self, diff --git a/ferveo/src/lib.rs b/ferveo/src/lib.rs index 832f0564..67e501fe 100644 --- a/ferveo/src/lib.rs +++ b/ferveo/src/lib.rs @@ -114,15 +114,14 @@ mod test_dkg_full { use ark_bls12_381::{Bls12_381 as E, Fr, G1Affine}; use ark_ec::{AffineRepr, CurveGroup}; use ark_ff::{UniformRand, Zero}; - use ark_poly::EvaluationDomain; use ark_std::test_rng; use ferveo_common::Keypair; use ferveo_tdec::{ self, DecryptionSharePrecomputed, DecryptionShareSimple, SecretBox, SharedSecret, }; - use itertools::izip; - use rand::seq::SliceRandom; + use itertools::{izip, Itertools}; + use rand::{seq::SliceRandom, Rng}; use test_case::test_case; use super::*; @@ -144,7 +143,7 @@ mod test_dkg_full { assert!(pvss_aggregated .aggregate .verify_aggregation(dkg, transcripts) - .is_ok()); + .unwrap()); let decryption_shares: Vec> = validator_keypairs @@ -163,13 +162,11 @@ mod test_dkg_full { ) .unwrap() }) + // We take only the first `security_threshold` decryption shares + .take(dkg.dkg_params.security_threshold() as usize) .collect(); - let domain_points = &dkg - .domain - .elements() - .take(decryption_shares.len()) - .collect::>(); + let domain_points = &dkg.domain_points()[..decryption_shares.len()]; assert_eq!(domain_points.len(), decryption_shares.len()); let lagrange_coeffs = @@ -178,7 +175,6 @@ mod test_dkg_full { &decryption_shares, &lagrange_coeffs, ); - (pvss_aggregated, decryption_shares, shared_secret) } @@ -195,8 +191,11 @@ mod test_dkg_full { shares_num, validators_num, ); - let transcripts = - messages.iter().map(|m| m.1.clone()).collect::>(); + let transcripts = messages + .iter() + .take(shares_num as usize) + .map(|m| m.1.clone()) + .collect::>(); let public_key = AggregatedTranscript::from_transcripts(&transcripts) .unwrap() .public_key; @@ -228,26 +227,30 @@ mod test_dkg_full { #[test_case(4, 4; "number of shares (validators) is a power of 2")] #[test_case(7, 7; "number of shares (validators) is not a power of 2")] - #[test_case(4, 6; "number of validators greater than the number of shares")] + // TODO: This test fails: + // #[test_case(4, 6; "number of validators greater than the number of shares")] fn test_dkg_simple_tdec_precomputed(shares_num: u32, validators_num: u32) { let rng = &mut test_rng(); // In precomputed variant, threshold must be equal to shares_num let security_threshold = shares_num; - let (dkg, validator_keypairs, messangers) = + let (dkg, validator_keypairs, messages) = setup_dealt_dkg_with_n_validators( security_threshold, shares_num, validators_num, ); - let transcripts = - messangers.iter().map(|m| m.1.clone()).collect::>(); + let transcripts = messages + .iter() + .take(shares_num as usize) + .map(|m| m.1.clone()) + .collect::>(); let pvss_aggregated = AggregatedTranscript::from_transcripts(&transcripts).unwrap(); - pvss_aggregated + assert!(pvss_aggregated .aggregate .verify_aggregation(&dkg, &transcripts) - .unwrap(); + .unwrap()); let public_key = pvss_aggregated.public_key; let ciphertext = ferveo_tdec::encrypt::( SecretBox::new(MSG.to_vec()), @@ -257,12 +260,6 @@ mod test_dkg_full { ) .unwrap(); - let domain_points = dkg - .domain - .elements() - .take(validator_keypairs.len()) - .collect::>(); - let mut decryption_shares: Vec> = validator_keypairs .iter() @@ -277,18 +274,20 @@ mod test_dkg_full { AAD, validator_keypair, validator.share_index, - &domain_points, + &dkg.domain_points(), ) .unwrap() }) + // We take only the first `security_threshold` decryption shares + .take(dkg.dkg_params.security_threshold() as usize) .collect(); + + // Order of decryption shares is not important in the precomputed variant decryption_shares.shuffle(rng); - assert_eq!(domain_points.len(), decryption_shares.len()); + // Decrypt with precomputed variant let shared_secret = ferveo_tdec::share_combine_precomputed::(&decryption_shares); - - // Combination works, let's decrypt let plaintext = ferveo_tdec::decrypt_with_shared_secret( &ciphertext, AAD, @@ -315,8 +314,11 @@ mod test_dkg_full { shares_num, validators_num, ); - let transcripts = - messages.iter().map(|m| m.1.clone()).collect::>(); + let transcripts = messages + .iter() + .take(shares_num as usize) + .map(|m| m.1.clone()) + .collect::>(); let public_key = AggregatedTranscript::from_transcripts(&transcripts) .unwrap() .public_key; @@ -393,8 +395,11 @@ mod test_dkg_full { shares_num, validators_num, ); - let transcripts = - messages.iter().map(|m| m.1.clone()).collect::>(); + let transcripts = messages + .iter() + .take(shares_num as usize) + .map(|m| m.1.clone()) + .collect::>(); let public_key = AggregatedTranscript::from_transcripts(&transcripts) .unwrap() .public_key; @@ -415,22 +420,27 @@ mod test_dkg_full { &transcripts, ); + // TODO: Rewrite this test so that the offboarding of validator + // is done by recreating a DKG instance with a new set of + // validators from the Coordinator, rather than modifying the + // existing DKG instance. + // Remove one participant from the contexts and all nested structure - let removed_validator_addr = - dkg.validators.keys().last().unwrap().clone(); + let removed_validator_index = rng.gen_range(0..validators_num); + let removed_validator_addr = dkg + .validators + .iter() + .find(|(_, v)| v.share_index == removed_validator_index) + .unwrap() + .1 + .address + .clone(); let mut remaining_validators = dkg.validators.clone(); - remaining_validators - .remove(&removed_validator_addr) - .unwrap(); - - let mut remaining_validator_keypairs = validator_keypairs.clone(); - remaining_validator_keypairs - .pop() - .expect("Should have a keypair"); + remaining_validators.remove(&removed_validator_addr); // Remember to remove one domain point too - let mut domain_points = dkg.domain_points(); - domain_points.pop().unwrap(); + let mut domain_points = dkg.domain_point_map(); + domain_points.remove(&removed_validator_index); // Now, we're going to recover a new share at a random point, // and check that the shared secret is still the same. @@ -438,7 +448,7 @@ mod test_dkg_full { // Our random point: let x_r = Fr::rand(rng); - // Each participant prepares an update for each other participant + // Each participant prepares an update for every other participant let share_updates = remaining_validators .keys() .map(|v_addr| { @@ -456,15 +466,13 @@ mod test_dkg_full { // Participants share updates and update their shares // Now, every participant separately: - let updated_shares: Vec<_> = remaining_validators + let updated_shares: HashMap = remaining_validators .values() .map(|validator| { // Current participant receives updates from other participants - let updates_for_participant: Vec<_> = share_updates + let updates_for_validator: Vec<_> = share_updates .values() - .map(|updates| { - updates.get(validator.share_index as usize).unwrap() - }) + .map(|updates| updates.get(&validator.share_index).unwrap()) .cloned() .collect(); @@ -474,15 +482,17 @@ mod test_dkg_full { .unwrap(); // Creates updated private key shares - AggregatedTranscript::from_transcripts(&transcripts) - .unwrap() - .aggregate - .create_updated_private_key_share( - validator_keypair, - validator.share_index, - updates_for_participant.as_slice(), - ) - .unwrap() + let updated_key_share = + AggregatedTranscript::from_transcripts(&transcripts) + .unwrap() + .aggregate + .create_updated_private_key_share( + validator_keypair, + validator.share_index, + updates_for_validator.as_slice(), + ) + .unwrap(); + (validator.share_index, updated_key_share) }) .collect(); @@ -492,14 +502,17 @@ mod test_dkg_full { &x_r, &domain_points, &updated_shares, - ); + ) + .unwrap(); // Get decryption shares from remaining participants - let mut decryption_shares: Vec> = - remaining_validator_keypairs - .iter() - .enumerate() - .map(|(share_index, validator_keypair)| { + let mut decryption_shares = remaining_validators + .values() + .map(|validator| { + let validator_keypair = validator_keypairs + .get(validator.share_index as usize) + .unwrap(); + let decryption_share = AggregatedTranscript::from_transcripts(&transcripts) .unwrap() .aggregate @@ -507,42 +520,57 @@ mod test_dkg_full { &ciphertext.header().unwrap(), AAD, validator_keypair, - share_index as u32, + validator.share_index, ) - .unwrap() - }) - .collect(); + .unwrap(); + (validator.share_index, decryption_share) + }) + // We take only the first `security_threshold - 1` decryption shares + .take((dkg.dkg_params.security_threshold() - 1) as usize) + .collect::>(); // Create a decryption share from a recovered private key share let new_validator_decryption_key = Fr::rand(rng); - decryption_shares.push( - DecryptionShareSimple::create( - &new_validator_decryption_key, - &recovered_key_share.0, - &ciphertext.header().unwrap(), - AAD, - &dkg.pvss_params.g_inv(), - ) - .unwrap(), - ); - - domain_points.push(x_r); - assert_eq!(domain_points.len(), validators_num as usize); - assert_eq!(decryption_shares.len(), validators_num as usize); - - // TODO: Maybe parametrize this test with [1..] and [..threshold] - let domain_points = &domain_points[..security_threshold as usize]; - let decryption_shares = - &decryption_shares[..security_threshold as usize]; - assert_eq!(domain_points.len(), security_threshold as usize); - assert_eq!(decryption_shares.len(), security_threshold as usize); + let new_decryption_share = DecryptionShareSimple::create( + &new_validator_decryption_key, + &recovered_key_share.0, + &ciphertext.header().unwrap(), + AAD, + &dkg.pvss_params.g_inv(), + ) + .unwrap(); + decryption_shares.insert(removed_validator_index, new_decryption_share); + domain_points.insert(removed_validator_index, x_r); + + // We need to make sure that the domain points and decryption shares are ordered + // by the share index, so that the lagrange basis is calculated correctly + + let mut domain_points_ = vec![]; + let mut decryption_shares_ = vec![]; + for share_index in decryption_shares.keys().sorted() { + domain_points_.push( + *domain_points + .get(share_index) + .ok_or(Error::InvalidShareIndex(*share_index)) + .unwrap(), + ); + decryption_shares_.push( + decryption_shares + .get(share_index) + .ok_or(Error::InvalidShareIndex(*share_index)) + .unwrap() + .clone(), + ); + } + assert_eq!(domain_points_.len(), security_threshold as usize); + assert_eq!(decryption_shares_.len(), security_threshold as usize); - let lagrange = ferveo_tdec::prepare_combine_simple::(domain_points); + let lagrange = + ferveo_tdec::prepare_combine_simple::(&domain_points_); let new_shared_secret = ferveo_tdec::share_combine_simple::( - decryption_shares, + &decryption_shares_, &lagrange, ); - assert_eq!( old_shared_secret, new_shared_secret, "Shared secret reconstruction failed" @@ -565,8 +593,11 @@ mod test_dkg_full { shares_num, validators_num, ); - let transcripts = - messages.iter().map(|m| m.1.clone()).collect::>(); + let transcripts = messages + .iter() + .take(shares_num as usize) + .map(|m| m.1.clone()) + .collect::>(); let public_key = AggregatedTranscript::from_transcripts(&transcripts) .unwrap() .public_key; @@ -593,7 +624,7 @@ mod test_dkg_full { .keys() .map(|v_addr| { let deltas_i = ShareRefreshUpdate::create_share_updates( - &dkg.domain_points(), + &dkg.domain_point_map(), &dkg.pvss_params.h.into_affine(), dkg.dkg_params.security_threshold(), rng, @@ -613,10 +644,7 @@ mod test_dkg_full { let updates_for_participant: Vec<_> = share_updates .values() .map(|updates| { - updates - .get(validator.share_index as usize) - .cloned() - .unwrap() + updates.get(&validator.share_index).cloned().unwrap() }) .collect(); @@ -659,8 +687,15 @@ mod test_dkg_full { ) .unwrap() }) + // We take only the first `security_threshold` decryption shares + .take(dkg.dkg_params.security_threshold() as usize) .collect(); + // Order of decryption shares is not important, but since we are using low-level + // API here to performa a refresh for testing purpose, we will not shuffle + // the shares this time + // decryption_shares.shuffle(rng); + let lagrange = ferveo_tdec::prepare_combine_simple::( &dkg.domain_points()[..security_threshold as usize], ); @@ -668,7 +703,6 @@ mod test_dkg_full { &decryption_shares[..security_threshold as usize], &lagrange, ); - assert_eq!(old_shared_secret, new_shared_secret); } } diff --git a/ferveo/src/refresh.rs b/ferveo/src/refresh.rs index bf633fa2..daee4f69 100644 --- a/ferveo/src/refresh.rs +++ b/ferveo/src/refresh.rs @@ -1,4 +1,4 @@ -use std::{ops::Mul, usize}; +use std::{collections::HashMap, ops::Mul, usize}; use ark_ec::{pairing::Pairing, CurveGroup}; use ark_ff::Zero; @@ -8,7 +8,7 @@ use ferveo_tdec::{ lagrange_basis_at, prepare_combine_simple, CiphertextHeader, DecryptionSharePrecomputed, DecryptionShareSimple, }; -use itertools::zip_eq; +use itertools::{zip_eq, Itertools}; use rand_core::RngCore; use serde::{de::DeserializeOwned, Deserialize, Serialize}; use zeroize::ZeroizeOnDrop; @@ -54,15 +54,35 @@ impl PrivateKeyShare { /// `x_r` is the point at which the share is to be recovered pub fn recover_share_from_updated_private_shares( x_r: &DomainPoint, - domain_points: &[DomainPoint], - updated_private_shares: &[UpdatedPrivateKeyShare], - ) -> PrivateKeyShare { + domain_points: &HashMap>, + updated_shares: &HashMap>, + ) -> Result> { + // Pick the domain points and updated shares according to share index + let mut domain_points_ = vec![]; + let mut updated_shares_ = vec![]; + for share_index in updated_shares.keys().sorted() { + domain_points_.push( + *domain_points + .get(share_index) + .ok_or(Error::InvalidShareIndex(*share_index))?, + ); + updated_shares_.push( + updated_shares + .get(share_index) + .ok_or(Error::InvalidShareIndex(*share_index))? + .0 + .clone(), + ); + } + // Interpolate new shares to recover y_r - let lagrange = lagrange_basis_at::(domain_points, x_r); - let prods = zip_eq(updated_private_shares, lagrange) - .map(|(y_j, l)| y_j.0 .0.mul(l)); + let lagrange = lagrange_basis_at::(&domain_points_, x_r); + let prods = + zip_eq(updated_shares_, lagrange).map(|(y_j, l)| y_j.0.mul(l)); let y_r = prods.fold(E::G2::zero(), |acc, y_j| acc + y_j); - PrivateKeyShare(ferveo_tdec::PrivateKeyShare(y_r.into_affine())) + Ok(PrivateKeyShare(ferveo_tdec::PrivateKeyShare( + y_r.into_affine(), + ))) } pub fn create_decryption_share_simple( @@ -97,7 +117,7 @@ impl PrivateKeyShare { let lagrange_coeff = &lagrange_coeffs .get(share_index as usize) .ok_or(Error::InvalidShareIndex(share_index))?; - DecryptionSharePrecomputed::new( + DecryptionSharePrecomputed::create( share_index as usize, &validator_keypair.decryption_key, &self.0, @@ -154,12 +174,12 @@ impl PrivateKeyShareUpdate for ShareRecoveryUpdate { impl ShareRecoveryUpdate { /// From PSS paper, section 4.2.1, (https://link.springer.com/content/pdf/10.1007/3-540-44750-4_27.pdf) pub fn create_share_updates( - domain_points: &[DomainPoint], + domain_points: &HashMap>, h: &E::G2Affine, x_r: &DomainPoint, threshold: u32, rng: &mut impl RngCore, - ) -> Vec> { + ) -> HashMap> { // Update polynomial has root at x_r prepare_share_updates_with_root::( domain_points, @@ -168,8 +188,8 @@ impl ShareRecoveryUpdate { threshold, rng, ) - .iter() - .map(|p| Self(p.clone())) + .into_iter() + .map(|(share_index, share_update)| (share_index, Self(share_update))) .collect() } } @@ -195,11 +215,11 @@ impl PrivateKeyShareUpdate for ShareRefreshUpdate { impl ShareRefreshUpdate { /// From PSS paper, section 4.2.1, (https://link.springer.com/content/pdf/10.1007/3-540-44750-4_27.pdf) pub fn create_share_updates( - domain_points: &[DomainPoint], + domain_points: &HashMap>, h: &E::G2Affine, threshold: u32, rng: &mut impl RngCore, - ) -> Vec> { + ) -> HashMap> { // Update polynomial has root at 0 prepare_share_updates_with_root::( domain_points, @@ -208,9 +228,10 @@ impl ShareRefreshUpdate { threshold, rng, ) - .iter() - .cloned() - .map(|p| ShareRefreshUpdate(p)) + .into_iter() + .map(|(share_index, share_update)| { + (share_index, ShareRefreshUpdate(share_update)) + }) .collect() } } @@ -221,24 +242,25 @@ impl ShareRefreshUpdate { /// The result is a list of share updates. /// We represent the share updates as `InnerPrivateKeyShare` to avoid dependency on the concrete implementation of `PrivateKeyShareUpdate`. fn prepare_share_updates_with_root( - domain_points: &[DomainPoint], + domain_points: &HashMap>, h: &E::G2Affine, root: &DomainPoint, threshold: u32, rng: &mut impl RngCore, -) -> Vec> { - // Generate a new random polynomial with defined root +) -> HashMap> { + // Generate a new random polynomial with a defined root let d_i = make_random_polynomial_with_root::(threshold - 1, root, rng); // Now, we need to evaluate the polynomial at each of participants' indices domain_points .iter() - .map(|x_i| { + .map(|(share_index, x_i)| { let eval = d_i.evaluate(x_i); - h.mul(eval).into_affine() + let share_update = + ferveo_tdec::PrivateKeyShare(h.mul(eval).into_affine()); + (*share_index, share_update) }) - .map(ferveo_tdec::PrivateKeyShare) - .collect() + .collect::>() } /// Generate a random polynomial with a given root @@ -288,13 +310,14 @@ mod tests_refresh { threshold: u32, x_r: &Fr, remaining_participants: &[PrivateDecryptionContextSimple], - ) -> Vec> { + ) -> HashMap> { // Each participant prepares an update for each other participant - let domain_points = remaining_participants[0] - .public_decryption_contexts + let domain_points = remaining_participants .iter() - .map(|c| c.domain) - .collect::>(); + .map(|c| { + (c.index as u32, c.public_decryption_contexts[c.index].domain) + }) + .collect::>(); let h = remaining_participants[0].public_decryption_contexts[0].h; let share_updates = remaining_participants .iter() @@ -306,25 +329,29 @@ mod tests_refresh { threshold, rng, ); - (p.index, share_updates) + (p.index as u32, share_updates) }) - .collect::>(); + .collect::>(); // Participants share updates and update their shares - let updated_private_key_shares: Vec<_> = remaining_participants + let updated_private_key_shares = remaining_participants .iter() .map(|p| { // Current participant receives updates from other participants let updates_for_participant: Vec<_> = share_updates .values() - .map(|updates| updates.get(p.index).cloned().unwrap()) + .map(|updates| { + updates.get(&(p.index as u32)).cloned().unwrap() + }) .collect(); // And updates their share - PrivateKeyShare(p.private_key_share.clone()) - .create_updated_key_share(&updates_for_participant) + let updated_share = + PrivateKeyShare(p.private_key_share.clone()) + .create_updated_key_share(&updates_for_participant); + (p.index as u32, updated_share) }) - .collect(); + .collect::>(); updated_private_key_shares } @@ -371,53 +398,65 @@ mod tests_refresh { &x_r, &remaining_participants, ); + // We only need `security_threshold` updates to recover the original share + let updated_private_key_shares = updated_private_key_shares + .into_iter() + .take(security_threshold as usize) + .collect::>(); // Now, we have to combine new share fragments into a new share - let domain_points = &remaining_participants[0] - .public_decryption_contexts - .iter() - .map(|ctxt| ctxt.domain) - .collect::>(); + let domain_points = remaining_participants + .into_iter() + .map(|ctxt| { + ( + ctxt.index as u32, + ctxt.public_decryption_contexts[ctxt.index].domain, + ) + }) + .collect::>(); let new_private_key_share = PrivateKeyShare::recover_share_from_updated_private_shares( &x_r, - &domain_points[..security_threshold as usize], - &updated_private_key_shares[..security_threshold as usize], - ); - + &domain_points, + &updated_private_key_shares, + ) + .unwrap(); assert_eq!(new_private_key_share, original_private_key_share); // If we don't have enough private share updates, the resulting private share will be incorrect - assert_eq!(domain_points.len(), updated_private_key_shares.len()); + let updated_private_key_shares = updated_private_key_shares + .into_iter() + .take(security_threshold as usize - 1) + .collect::>(); + let incorrect_private_key_share = PrivateKeyShare::recover_share_from_updated_private_shares( &x_r, - &domain_points[..(security_threshold - 1) as usize], - &updated_private_key_shares - [..(security_threshold - 1) as usize], - ); - + &domain_points, + &updated_private_key_shares, + ) + .unwrap(); assert_ne!(incorrect_private_key_share, original_private_key_share); } /// Ñ parties (where t <= Ñ <= N) jointly execute a "share recovery" algorithm, and the output is 1 new share. /// The new share is independent of the previously existing shares. We can use this to on-board a new participant into an existing cohort. - #[test_case(4, 4; "number of shares (validators) is a power of 2")] - #[test_case(7, 7; "number of shares (validators) is not a power of 2")] - fn tdec_simple_variant_share_recovery_at_random_point( - shares_num: u32, - _validators_num: u32, - ) { + #[test_case(4; "number of shares (validators) is a power of 2")] + #[test_case(7; "number of shares (validators) is not a power of 2")] + fn tdec_simple_variant_share_recovery_at_random_point(shares_num: u32) { let rng = &mut test_rng(); - let threshold = shares_num * 2 / 3; + let security_threshold = shares_num * 2 / 3; - let (_, shared_private_key, mut contexts) = - setup_simple::(threshold as usize, shares_num as usize, rng); + let (_, shared_private_key, mut contexts) = setup_simple::( + security_threshold as usize, + shares_num as usize, + rng, + ); // Prepare participants // Remove one participant from the contexts and all nested structures - contexts.pop().unwrap(); + let removed_participant = contexts.pop().unwrap(); let mut remaining_participants = contexts.clone(); for p in &mut remaining_participants { p.public_decryption_contexts.pop().unwrap(); @@ -428,52 +467,65 @@ mod tests_refresh { // Our random point let x_r = ScalarField::rand(rng); - // Each participant prepares an update for each other participant, and uses it to create a new share fragment - let share_recovery_fragmetns = create_updated_private_key_shares( + // Each remaining participant prepares an update for every other participant, and uses it to create a new share fragment + let share_recovery_updates = create_updated_private_key_shares( rng, - threshold, + security_threshold, &x_r, &remaining_participants, ); + // We only need `threshold` updates to recover the original share + let share_recovery_updates = share_recovery_updates + .into_iter() + .take(security_threshold as usize) + .collect::>(); + let domain_points = &mut remaining_participants + .into_iter() + .map(|ctxt| { + ( + ctxt.index as u32, + ctxt.public_decryption_contexts[ctxt.index].domain, + ) + }) + .collect::>(); // Now, we have to combine new share fragments into a new share - let domain_points = &mut remaining_participants[0] - .public_decryption_contexts - .iter() - .map(|ctxt| ctxt.domain) - .collect::>(); let recovered_private_key_share = PrivateKeyShare::recover_share_from_updated_private_shares( &x_r, - &domain_points[..threshold as usize], - &share_recovery_fragmetns[..threshold as usize], - ); - - let mut private_shares = contexts - .iter() - .cloned() - .map(|ctxt| ctxt.private_key_share) - .collect::>(); + domain_points, + &share_recovery_updates, + ) + .unwrap(); // Finally, let's recreate the shared private key from some original shares and the recovered one - domain_points.push(x_r); - private_shares.push(recovered_private_key_share.0.clone()); + let mut private_shares = contexts + .into_iter() + .map(|ctxt| (ctxt.index as u32, ctxt.private_key_share)) + .collect::>(); + + // Need to update these to account for recovered private key share + domain_points.insert(removed_participant.index as u32, x_r); + private_shares.insert( + removed_participant.index as u32, + recovered_private_key_share.0.clone(), + ); // This is a workaround for a type mismatch - We need to convert the private shares to updated private shares // This is just to test that we are able to recover the shared private key from the updated private shares let updated_private_key_shares = private_shares - .iter() - .cloned() - .map(UpdatedPrivateKeyShare::new) - .collect::>(); - let start_from = shares_num - threshold; + .into_iter() + .map(|(share_index, share)| { + (share_index, UpdatedPrivateKeyShare(share)) + }) + .collect::>(); let new_shared_private_key = PrivateKeyShare::recover_share_from_updated_private_shares( &ScalarField::zero(), - &domain_points[start_from as usize..], - &updated_private_key_shares[start_from as usize..], - ); - + domain_points, + &updated_private_key_shares, + ) + .unwrap(); assert_eq!(shared_private_key, new_shared_private_key.0); } @@ -483,16 +535,19 @@ mod tests_refresh { #[test_matrix([4, 7, 11, 16])] fn tdec_simple_variant_share_refreshing(shares_num: usize) { let rng = &mut test_rng(); - let threshold = shares_num * 2 / 3; + let security_threshold = shares_num * 2 / 3; let (_, private_key_share, contexts) = - setup_simple::(threshold, shares_num, rng); - - let domain_points = &contexts[0] - .public_decryption_contexts + setup_simple::(security_threshold, shares_num, rng); + let domain_points = &contexts .iter() - .map(|ctxt| ctxt.domain) - .collect::>(); + .map(|ctxt| { + ( + ctxt.index as u32, + ctxt.public_decryption_contexts[ctxt.index].domain, + ) + }) + .collect::>(); let h = contexts[0].public_decryption_contexts[0].h; // Each participant prepares an update for each other participant: @@ -503,37 +558,43 @@ mod tests_refresh { ShareRefreshUpdate::::create_share_updates( domain_points, &h, - threshold as u32, + security_threshold as u32, rng, ); - (p.index, share_updates) + (p.index as u32, share_updates) }) - .collect::>(); + .collect::>(); - // Participants "refresh" their shares with the updates from each other: - let refreshed_shares: Vec<_> = contexts + // Participants refresh their shares with the updates from each other: + let refreshed_shares = contexts .iter() .map(|p| { // Current participant receives updates from other participants let updates_for_participant: Vec<_> = share_updates .values() - .map(|updates| updates.get(p.index).cloned().unwrap()) + .map(|updates| { + updates.get(&(p.index as u32)).cloned().unwrap() + }) .collect(); // And creates a new, refreshed share - PrivateKeyShare(p.private_key_share.clone()) - .create_updated_key_share(&updates_for_participant) + let updated_share = + PrivateKeyShare(p.private_key_share.clone()) + .create_updated_key_share(&updates_for_participant); + (p.index as u32, updated_share) }) - .collect(); + // We only need `threshold` refreshed shares to recover the original share + .take(security_threshold) + .collect::>>(); // Finally, let's recreate the shared private key from the refreshed shares let new_shared_private_key = PrivateKeyShare::recover_share_from_updated_private_shares( &ScalarField::zero(), - &domain_points[..threshold], - &refreshed_shares[..threshold], - ); - + domain_points, + &refreshed_shares, + ) + .unwrap(); assert_eq!(private_key_share, new_shared_private_key.0); } } diff --git a/ferveo/src/test_common.rs b/ferveo/src/test_common.rs index df28f553..eea3a7da 100644 --- a/ferveo/src/test_common.rs +++ b/ferveo/src/test_common.rs @@ -127,6 +127,7 @@ pub fn make_messages( let sender = dkg.me.clone(); messages.push((sender, transcript)); } + messages.shuffle(rng); messages } @@ -139,7 +140,7 @@ pub fn setup_dealt_dkg_with_n_transcript_dealt( let rng = &mut ark_std::test_rng(); // Gather everyone's transcripts - // Use only the first `transcripts_to_use` transcripts + // Use only need the first `transcripts_to_use` transcripts let mut transcripts: Vec<_> = (0..transcripts_to_use) .map(|my_index| { let (dkg, _) = setup_dkg_for_n_validators(