Skip to content

Commit

Permalink
Preparing for refactor 6
Browse files Browse the repository at this point in the history
  • Loading branch information
cygnusv committed Apr 1, 2024
1 parent d00dbc5 commit b30431c
Show file tree
Hide file tree
Showing 2 changed files with 172 additions and 172 deletions.
226 changes: 113 additions & 113 deletions ferveo/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -473,53 +473,53 @@ mod test_dkg_full {
let x_r = Fr::rand(rng);

// Each participant prepares an update for every other participant
let share_updates = remaining_validators
.keys()
.map(|v_addr| {
let deltas_i =
crate::refresh::UpdateTranscript::create_recovery_updates(
&dkg.domain_and_key_map(),
&x_r,
dkg.dkg_params.security_threshold(),
rng,
)
.updates;
(v_addr.clone(), deltas_i)
})
.collect::<HashMap<_, _>>();
// let share_updates = remaining_validators
// .keys()
// .map(|v_addr| {
// let deltas_i =
// crate::refresh::UpdateTranscript::create_recovery_updates(
// &dkg.domain_and_key_map(),
// &x_r,
// dkg.dkg_params.security_threshold(),
// rng,
// )
// .updates;
// (v_addr.clone(), deltas_i)
// })
// .collect::<HashMap<_, _>>();

// Participants share updates and update their shares

// Now, every participant separately:
let updated_shares: HashMap<u32, _> = remaining_validators
.values()
.map(|validator| {
// Current participant receives updates from other participants
let updates_for_validator: Vec<_> = share_updates
.values()
.map(|updates| updates.get(&validator.share_index).unwrap())
.cloned()
.collect();

// Each validator uses their decryption key to update their share
let validator_keypair = validator_keypairs
.get(validator.share_index as usize)
.unwrap();

// Creates updated private key shares
let updated_key_share =
AggregatedTranscript::from_transcripts(&transcripts)
.unwrap()
.aggregate
.create_updated_private_key_share(
validator_keypair,
validator.share_index,
updates_for_validator.as_slice(),
)
.unwrap();
(validator.share_index, updated_key_share)
})
.collect();
// let updated_shares: HashMap<u32, _> = remaining_validators
// .values()
// .map(|validator| {
// // Current participant receives updates from other participants
// let updates_for_validator: Vec<_> = share_updates
// .values()
// .map(|updates| updates.get(&validator.share_index).unwrap())
// .cloned()
// .collect();

// // Each validator uses their decryption key to update their share
// let validator_keypair = validator_keypairs
// .get(validator.share_index as usize)
// .unwrap();

// // Creates updated private key shares
// let updated_key_share =
// AggregatedTranscript::from_transcripts(&transcripts)
// .unwrap()
// .aggregate
// .create_updated_private_key_share(
// validator_keypair,
// validator.share_index,
// updates_for_validator.as_slice(),
// )
// .unwrap();
// (validator.share_index, updated_key_share)
// })
// .collect();

// // Now, we have to combine new share fragments into a new share
// let recovered_key_share =
Expand Down Expand Up @@ -647,77 +647,77 @@ mod test_dkg_full {
);

// Each participant prepares an update for each other participant
let share_updates = dkg
.validators
.keys()
.map(|v_addr| {
let deltas_i = UpdateTranscript::create_refresh_updates(
&dkg.domain_and_key_map(),
dkg.dkg_params.security_threshold(),
rng,
)
.updates;
(v_addr.clone(), deltas_i)
})
.collect::<HashMap<_, _>>();
// let share_updates = dkg
// .validators
// .keys()
// .map(|v_addr| {
// let deltas_i = UpdateTranscript::create_refresh_updates(
// &dkg.domain_and_key_map(),
// dkg.dkg_params.security_threshold(),
// rng,
// )
// .updates;
// (v_addr.clone(), deltas_i)
// })
// .collect::<HashMap<_, _>>();

// Participants share updates and update their shares

// Now, every participant separately:
let updated_private_key_shares: Vec<_> = dkg
.validators
.values()
.map(|validator| {
// Current participant receives updates from other participants
let updates_for_participant: Vec<_> = share_updates
.values()
.map(|updates| {
updates.get(&validator.share_index).cloned().unwrap()
})
.collect();

// Each validator uses their decryption key to update their share
let validator_keypair = validator_keypairs
.get(validator.share_index as usize)
.unwrap();

// Creates updated private key shares
AggregatedTranscript::from_transcripts(&transcripts)
.unwrap()
.aggregate
.create_updated_private_key_share(
validator_keypair,
validator.share_index,
updates_for_participant.as_slice(),
)
.unwrap()
})
.collect();
// let updated_private_key_shares: Vec<_> = dkg
// .validators
// .values()
// .map(|validator| {
// // Current participant receives updates from other participants
// let updates_for_participant: Vec<_> = share_updates
// .values()
// .map(|updates| {
// updates.get(&validator.share_index).cloned().unwrap()
// })
// .collect();

// // Each validator uses their decryption key to update their share
// let validator_keypair = validator_keypairs
// .get(validator.share_index as usize)
// .unwrap();

// // Creates updated private key shares
// AggregatedTranscript::from_transcripts(&transcripts)
// .unwrap()
// .aggregate
// .create_updated_private_key_share(
// validator_keypair,
// validator.share_index,
// updates_for_participant.as_slice(),
// )
// .unwrap()
// })
// .collect();

// Get decryption shares, now with refreshed private shares:
let decryption_shares: Vec<DecryptionShareSimple<E>> =
validator_keypairs
.iter()
.enumerate()
.map(|(share_index, validator_keypair)| {
// In order to proceed with the decryption, we need to convert the updated private key shares
let private_key_share = &updated_private_key_shares
.get(share_index)
.unwrap()
.inner()
.0;
DecryptionShareSimple::create(
&validator_keypair.decryption_key,
private_key_share,
&ciphertext.header().unwrap(),
AAD,
&dkg.pvss_params.g_inv(),
)
.unwrap()
})
// We take only the first `security_threshold` decryption shares
.take(dkg.dkg_params.security_threshold() as usize)
.collect();
// let decryption_shares: Vec<DecryptionShareSimple<E>> =
// validator_keypairs
// .iter()
// .enumerate()
// .map(|(share_index, validator_keypair)| {
// // In order to proceed with the decryption, we need to convert the updated private key shares
// let private_key_share = &updated_private_key_shares
// .get(share_index)
// .unwrap()
// .inner()
// .0;
// DecryptionShareSimple::create(
// &validator_keypair.decryption_key,
// private_key_share,
// &ciphertext.header().unwrap(),
// AAD,
// &dkg.pvss_params.g_inv(),
// )
// .unwrap()
// })
// // We take only the first `security_threshold` decryption shares
// .take(dkg.dkg_params.security_threshold() as usize)
// .collect();

// Order of decryption shares is not important, but since we are using low-level
// API here to performa a refresh for testing purpose, we will not shuffle
Expand All @@ -727,10 +727,10 @@ mod test_dkg_full {
let lagrange = ferveo_tdec::prepare_combine_simple::<E>(
&dkg.domain_points()[..security_threshold as usize],
);
let new_shared_secret = ferveo_tdec::share_combine_simple::<E>(
&decryption_shares[..security_threshold as usize],
&lagrange,
);
assert_eq!(old_shared_secret, new_shared_secret);
// let new_shared_secret = ferveo_tdec::share_combine_simple::<E>(
// &decryption_shares[..security_threshold as usize],
// &lagrange,
// );
assert_ne!(old_shared_secret, old_shared_secret);
}
}
118 changes: 59 additions & 59 deletions ferveo/src/refresh.rs
Original file line number Diff line number Diff line change
Expand Up @@ -667,12 +667,12 @@ mod tests_refresh {

// This is a workaround for a type mismatch - We need to convert the private shares to updated private shares
// This is just to test that we are able to recover the shared private key from the updated private shares
let updated_private_key_shares = private_shares
.into_iter()
.map(|(share_index, share)| {
(share_index, UpdatedPrivateKeyShare(share))
})
.collect::<HashMap<u32, _>>();
// let updated_private_key_shares = private_shares
// .into_iter()
// .map(|(share_index, share)| {
// (share_index, UpdatedPrivateKeyShare(share))
// })
// .collect::<HashMap<u32, _>>();
// let new_shared_private_key =
// PrivateKeyShare::recover_share_from_updated_private_shares(
// &ScalarField::zero(),
Expand Down Expand Up @@ -736,59 +736,59 @@ mod tests_refresh {
})
.collect::<HashMap<u32, UpdateTranscript<E>>>();

// Participants refresh their shares with the updates from each other:
let refreshed_shares = contexts
.iter()
.map(|p| {
let blinded_key_share =
p.public_decryption_contexts[p.index].blinded_key_share;

let participant_public_key =
blinded_key_share.validator_public_key;

// Current participant receives update transcripts from other participants
let updates_for_participant: Vec<_> =
update_transcripts_by_producer
.values()
.map(|update_transcript_from_producer| {
// First, verify that the update transcript is valid
// TODO: Find a better way to ensure they're always validated
update_transcript_from_producer
.verify_refresh(validator_keys_map, &fft_domain)
.unwrap();

let update_for_participant =
update_transcript_from_producer
.updates
.get(&(p.index as u32))
.cloned()
.unwrap();
update_for_participant
})
.collect();

// And creates a new, refreshed share

// TODO: Encapsulate this somewhere, originally from PrivateKeyShare.create_updated_key_share
let updated_blinded_key_share: BlindedKeyShare<E> =
BlindedKeyShare {
validator_public_key: participant_public_key,
blinded_key_share: updates_for_participant.iter().fold(
blinded_key_share.blinded_key_share,
|acc, delta| (acc + delta.update).into(),
),
};

let unblinding_factor = p.setup_params.b_inv;
let updated_share = UpdatedPrivateKeyShare(
updated_blinded_key_share.unblind(unblinding_factor),
);

(p.index as u32, updated_share)
})
// We only need `threshold` refreshed shares to recover the original share
.take(security_threshold)
.collect::<HashMap<u32, UpdatedPrivateKeyShare<E>>>();
// // Participants refresh their shares with the updates from each other:
// let refreshed_shares = contexts
// .iter()
// .map(|p| {
// let blinded_key_share =
// p.public_decryption_contexts[p.index].blinded_key_share;

// let participant_public_key =
// blinded_key_share.validator_public_key;

// // Current participant receives update transcripts from other participants
// let updates_for_participant: Vec<_> =
// update_transcripts_by_producer
// .values()
// .map(|update_transcript_from_producer| {
// // First, verify that the update transcript is valid
// // TODO: Find a better way to ensure they're always validated
// update_transcript_from_producer
// .verify_refresh(validator_keys_map, &fft_domain)
// .unwrap();

// let update_for_participant =
// update_transcript_from_producer
// .updates
// .get(&(p.index as u32))
// .cloned()
// .unwrap();
// update_for_participant
// })
// .collect();

// // And creates a new, refreshed share

// // TODO: Encapsulate this somewhere, originally from PrivateKeyShare.create_updated_key_share
// let updated_blinded_key_share: BlindedKeyShare<E> =
// BlindedKeyShare {
// validator_public_key: participant_public_key,
// blinded_key_share: updates_for_participant.iter().fold(
// blinded_key_share.blinded_key_share,
// |acc, delta| (acc + delta.update).into(),
// ),
// };

// let unblinding_factor = p.setup_params.b_inv;
// let updated_share = UpdatedPrivateKeyShare(
// updated_blinded_key_share.unblind(unblinding_factor),
// );

// (p.index as u32, updated_share)
// })
// // We only need `threshold` refreshed shares to recover the original share
// .take(security_threshold)
// .collect::<HashMap<u32, UpdatedPrivateKeyShare<E>>>();

let domain_points = domain_points_and_keys
.iter()
Expand Down

0 comments on commit b30431c

Please sign in to comment.