Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
21 changes: 14 additions & 7 deletions common/dkg/benches/benchmarks.rs
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@ use nym_dkg::bte::{
};
use nym_dkg::interpolation::polynomial::Polynomial;
use nym_dkg::{combine_shares, Dealing, NodeIndex, Share, Threshold};
use rand::CryptoRng;
use rand_core::{RngCore, SeedableRng};
use std::collections::BTreeMap;

Expand All @@ -31,7 +32,7 @@ pub fn precomputing_g2_generator_for_miller_loop(c: &mut Criterion) {
}

fn prepare_keys(
mut rng: impl RngCore,
mut rng: impl RngCore + CryptoRng,
nodes: usize,
) -> (BTreeMap<NodeIndex, PublicKey>, Vec<DecryptionKey>) {
let params = setup();
Expand All @@ -50,7 +51,7 @@ fn prepare_keys(
}

fn prepare_resharing(
mut rng: impl RngCore,
mut rng: impl RngCore + CryptoRng,
params: &Params,
nodes: usize,
threshold: Threshold,
Expand All @@ -68,7 +69,7 @@ fn prepare_resharing(
for (i, ref mut dk) in dks.iter_mut().enumerate() {
let shares = first_dealings
.iter()
.map(|dealing| decrypt_share(dk, i, &dealing.ciphertexts, None).unwrap())
.map(|dealing| decrypt_share(params, dk, i, &dealing.ciphertexts, None).unwrap())
.collect();

let recovered_secret =
Expand Down Expand Up @@ -154,7 +155,9 @@ pub fn verifying_dealing_made_for_3_parties_and_recovering_share(c: &mut Criteri
|b| {
b.iter(|| {
assert!(dealing.verify(&params, threshold, &receivers, None).is_ok());
black_box(decrypt_share(first_key, 0, &dealing.ciphertexts, None).unwrap());
black_box(
decrypt_share(&params, first_key, 0, &dealing.ciphertexts, None).unwrap(),
);
})
},
);
Expand Down Expand Up @@ -237,7 +240,9 @@ pub fn verifying_dealing_made_for_20_parties_and_recovering_share(c: &mut Criter
|b| {
b.iter(|| {
assert!(dealing.verify(&params, threshold, &receivers, None).is_ok());
black_box(decrypt_share(first_key, 0, &dealing.ciphertexts, None).unwrap());
black_box(
decrypt_share(&params, first_key, 0, &dealing.ciphertexts, None).unwrap(),
);
})
},
);
Expand Down Expand Up @@ -320,7 +325,9 @@ pub fn verifying_dealing_made_for_100_parties_and_recovering_share(c: &mut Crite
|b| {
b.iter(|| {
assert!(dealing.verify(&params, threshold, &receivers, None).is_ok());
black_box(decrypt_share(first_key, 0, &dealing.ciphertexts, None).unwrap());
black_box(
decrypt_share(&params, first_key, 0, &dealing.ciphertexts, None).unwrap(),
);
})
},
);
Expand Down Expand Up @@ -547,7 +554,7 @@ pub fn share_decryption(c: &mut Criterion) {
let (ciphertexts, _) = encrypt_shares(&[(&share, pk.public_key())], &params, &mut rng);

c.bench_function("single share decryption", |b| {
b.iter(|| black_box(decrypt_share(&dk, 0, &ciphertexts, None)))
b.iter(|| black_box(decrypt_share(&params, &dk, 0, &ciphertexts, None)))
});
}

Expand Down
53 changes: 43 additions & 10 deletions common/dkg/src/bte/encryption.rs
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@ use crate::{Chunk, ChunkedShare, Share};
use bls12_381::{G1Affine, G1Projective, G2Prepared, G2Projective, Gt, Scalar};
use ff::Field;
use group::{Curve, Group, GroupEncoding};
use rand::CryptoRng;
use rand_core::RngCore;
use std::collections::HashMap;
use std::ops::Neg;
Expand Down Expand Up @@ -191,7 +192,7 @@ impl HazmatRandomness {
pub fn encrypt_shares(
shares: &[(&Share, &PublicKey)],
params: &Params,
mut rng: impl RngCore,
mut rng: impl RngCore + CryptoRng,
) -> (Ciphertexts, HazmatRandomness) {
let g1 = G1Projective::generator();

Expand Down Expand Up @@ -262,6 +263,7 @@ pub fn encrypt_shares(
}

pub fn decrypt_share(
params: &Params,
dk: &DecryptionKey,
// in the case of multiple receivers, specifies which index of ciphertext chunks should be used
i: usize,
Expand All @@ -270,6 +272,10 @@ pub fn decrypt_share(
) -> Result<Share, DkgError> {
let mut plaintext = ChunkedShare::default();

if !ciphertext.verify_integrity(params) {
return Err(DkgError::FailedCiphertextIntegrityCheck);
}

if i >= ciphertext.ciphertext_chunks.len() {
return Err(DkgError::UnavailableCiphertext(i));
}
Expand Down Expand Up @@ -461,10 +467,22 @@ mod tests {
let (ciphertext, hazmat) = encrypt_shares(shares, &params, &mut rng);
verify_hazmat_rand(&ciphertext, &hazmat);

let recovered1 =
decrypt_share(&decryption_key1, 0, &ciphertext, Some(lookup_table)).unwrap();
let recovered2 =
decrypt_share(&decryption_key2, 1, &ciphertext, Some(lookup_table)).unwrap();
let recovered1 = decrypt_share(
&params,
&decryption_key1,
0,
&ciphertext,
Some(lookup_table),
)
.unwrap();
let recovered2 = decrypt_share(
&params,
&decryption_key2,
1,
&ciphertext,
Some(lookup_table),
)
.unwrap();
assert_eq!(m1, recovered1);
assert_eq!(m2, recovered2);
}
Expand All @@ -490,10 +508,22 @@ mod tests {
let (ciphertext, hazmat) = encrypt_shares(shares, &params, &mut rng);
verify_hazmat_rand(&ciphertext, &hazmat);

let recovered1 =
decrypt_share(&decryption_key1, 0, &ciphertext, Some(lookup_table)).unwrap();
let recovered2 =
decrypt_share(&decryption_key2, 1, &ciphertext, Some(lookup_table)).unwrap();
let recovered1 = decrypt_share(
&params,
&decryption_key1,
0,
&ciphertext,
Some(lookup_table),
)
.unwrap();
let recovered2 = decrypt_share(
&params,
&decryption_key2,
1,
&ciphertext,
Some(lookup_table),
)
.unwrap();
assert_eq!(m1, recovered1);
assert_eq!(m2, recovered2);
}
Expand Down Expand Up @@ -574,7 +604,10 @@ mod tests {

#[test]
fn ciphertexts_roundtrip() {
fn random_ciphertexts(mut rng: impl RngCore, num_receivers: usize) -> Ciphertexts {
fn random_ciphertexts(
mut rng: impl RngCore + CryptoRng,
num_receivers: usize,
) -> Ciphertexts {
Ciphertexts {
rr: (0..NUM_CHUNKS)
.map(|_| G1Projective::random(&mut rng))
Expand Down
8 changes: 6 additions & 2 deletions common/dkg/src/bte/keys.rs
Original file line number Diff line number Diff line change
Expand Up @@ -9,11 +9,15 @@ use bls12_381::{G1Projective, G2Projective, Scalar};
use ff::Field;
use group::GroupEncoding;
use nym_pemstore::traits::{PemStorableKey, PemStorableKeyPair};
use rand::CryptoRng;
use rand_core::RngCore;
use zeroize::Zeroize;

// produces public key and a decryption key for the root of the tree
pub fn keygen(params: &Params, mut rng: impl RngCore) -> (DecryptionKey, PublicKeyWithProof) {
pub fn keygen(
params: &Params,
mut rng: impl RngCore + CryptoRng,
) -> (DecryptionKey, PublicKeyWithProof) {
let g1 = G1Projective::generator();
let g2 = G2Projective::generator();

Expand Down Expand Up @@ -244,7 +248,7 @@ pub struct KeyPair {
}

impl KeyPair {
pub fn new(params: &Params, rng: impl RngCore) -> Self {
pub fn new(params: &Params, rng: impl RngCore + CryptoRng) -> Self {
let (dk, pk) = keygen(params, rng);
Self {
private_key: dk,
Expand Down
110 changes: 90 additions & 20 deletions common/dkg/src/bte/proof_chunking.rs
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ use crate::utils::{deserialize_scalar, RandomOracleBuilder};
use bls12_381::{G1Projective, Scalar};
use ff::Field;
use group::{Group, GroupEncoding};
use rand::Rng;
use rand::{CryptoRng, Rng};
use rand_core::{RngCore, SeedableRng};

const CHUNKING_ORACLE_DOMAIN: &[u8] =
Expand All @@ -28,6 +28,7 @@ const SECURITY_PARAMETER: usize = 256;

/// ceil(SECURITY_PARAMETER / PARALLEL_RUNS) in the paper
const NUM_CHALLENGE_BITS: usize = SECURITY_PARAMETER.div_ceil(PARALLEL_RUNS);
const EE: usize = 1 << NUM_CHALLENGE_BITS;

// type alias for ease of use
type FirstChallenge = Vec<Vec<Vec<u64>>>;
Expand Down Expand Up @@ -94,7 +95,7 @@ impl ProofOfChunking {
// Scalar(-1) would in reality be Scalar(q - 1), which is greater than Scalar(1) and opposite to
// what we wanted.
pub fn construct(
mut rng: impl RngCore,
mut rng: impl RngCore + CryptoRng,
instance: Instance,
witness_r: &[Scalar; NUM_CHUNKS],
witnesses_s: &[Share],
Expand All @@ -110,21 +111,20 @@ impl ProofOfChunking {
// define bounds for the blinding factors
let n = instance.public_keys.len();
let m = NUM_CHUNKS;
let ee = 1 << NUM_CHALLENGE_BITS;

// CHUNK_MAX corresponds to paper's B
let ss = (n * m * (CHUNK_SIZE - 1) * (ee - 1)) as u64;
let zz = (2 * (PARALLEL_RUNS as u64))
.checked_mul(ss)
.expect("overflow in Z = 2 * l * S");
// ss = (n * m * (CHUNK_SIZE - 1) * (ee - 1))
// Z = 2 * l * S
let (ss, zz): (u64, u64) = compute_ss_zz(n, m)?;

let ss_scalar = Scalar::from(ss);

// rather than generating blinding factors in [-S, Z-1] directly,
// do it via [0, Z - 1 + S + 1] and deal with the shift later.
let combined_upper_range = (zz - 1)
.checked_add(ss + 1)
.expect("overflow in Z - 1 + S + 1");
// combined_upper_range = Z - 1 + S + 1

let combined_upper_range = zz.checked_add(ss).ok_or(DkgError::ArithmeticOverflow {
info: "ProofOfChunking::construct | Z - 1 + S + 1",
})?;

let mut betas = Vec::with_capacity(PARALLEL_RUNS);
let mut bs = Vec::with_capacity(PARALLEL_RUNS);
Expand Down Expand Up @@ -178,20 +178,42 @@ impl ProofOfChunking {
// I think this part is more readable with a range loop
#[allow(clippy::needless_range_loop)]
for l in 0..PARALLEL_RUNS {
let mut sum = 0;
let mut sum: u64 = 0;

for (i, witness_i) in witnesses_s.iter().enumerate() {
for (j, witness_ij) in witness_i.to_chunks().chunks.iter().enumerate() {
debug_assert!(std::mem::size_of::<Chunk>() <= std::mem::size_of::<u64>());
sum += first_challenge[i][j][l] * (*witness_ij as u64)
// sum += first_challenge[i][j][l] * (*witness_ij as u64)
sum = sum
.checked_add(
first_challenge[i][j][l]
.checked_mul(*witness_ij as u64)
.ok_or(DkgError::ArithmeticOverflow {
info: "ProofOfChunking::construct | first_challenge[i][j][l] * witness_ij",
})?,
)
.ok_or(DkgError::ArithmeticOverflow {
info: "ProofOfChunking::construct | sum + (first_challenge[i][j][l] * witness_ij)",
})?;
}
}

if sum + shifted_blinding_factors[l] < ss {
continue 'retry_loop;
}
// shifted_blinding_factors[l] - ss restores it to "proper" [-S, Z - 1] range
let response = sum + shifted_blinding_factors[l] - ss;
// let response = sum + shifted_blinding_factors[l] - ss;
let response = sum
.checked_add(shifted_blinding_factors[l])
.ok_or(DkgError::ArithmeticOverflow {
info:
"ProofOfChunking::construct | sum + (shifted_blinding_factors[l] - ss)",
})?
.checked_sub(ss)
.ok_or(DkgError::ArithmeticUnderflow {
info: "ProofOfChunking::construct | shifted_blinding_factors[l] - ss",
})?;

if response < zz {
responses_chunks.push(response)
} else {
Expand Down Expand Up @@ -276,11 +298,13 @@ impl ProofOfChunking {
ensure_len!(&self.responses_r, n);
ensure_len!(&self.responses_chunks, PARALLEL_RUNS);

let ee = 1 << NUM_CHALLENGE_BITS;
// ss = (n * m * (CHUNK_SIZE - 1) * (ee - 1))
// Z = 2 * l * S

// CHUNK_MAX corresponds to paper's B
let ss = (n * m * (CHUNK_SIZE - 1) * (ee - 1)) as u64;
let zz = 2 * (PARALLEL_RUNS as u64) * ss;
let zz: u64 = match compute_ss_zz(n, m) {
Ok((_, zz_res)) => zz_res,
_ => return false,
};

for response_chunk in &self.responses_chunks {
if response_chunk >= &zz {
Expand Down Expand Up @@ -411,7 +435,7 @@ impl ProofOfChunking {
random_oracle_builder.update(lambda_e.to_be_bytes());

let mut oracle = rand_chacha::ChaCha20Rng::from_seed(random_oracle_builder.finalize());
let range_max_excl = 1 << NUM_CHALLENGE_BITS;
let range_max_excl = EE as u64;

(0..n)
.map(|_| {
Expand Down Expand Up @@ -637,6 +661,50 @@ impl ProofOfChunking {
}
}

fn compute_ss_zz(n: usize, m: usize) -> Result<(u64, u64), DkgError> {
// let ss = (n * m * (CHUNK_SIZE - 1) * (ee - 1)) as u64;
// CHUNK_MAX corresponds to paper's B

let ee = EE;

let ss = n
.checked_mul(m)
.ok_or(DkgError::ArithmeticOverflow {
info: "ProofOfChunking::compute_ss_zz | n * m",
})?
.checked_mul(
CHUNK_SIZE
.checked_sub(1)
.ok_or(DkgError::ArithmeticUnderflow {
info: "ProofOfChunking::compute_ss_zz | (CHUNK_SIZE - 1)",
})?
.checked_mul(ee.checked_sub(1).ok_or(DkgError::ArithmeticUnderflow {
info: "ProofOfChunking::compute_ss_zz | (ee - 1)",
})?)
.ok_or(DkgError::ArithmeticOverflow {
info: "ProofOfChunking::compute_ss_zz | (CHUNK_SIZE - 1) * (ee - 1)",
})?,
)
.ok_or(DkgError::ArithmeticOverflow {
info: "ProofOfChunking::compute_ss_zz | ss_lhs * ss_rhs",
})? as u64;

// let zz = 2 * PARALLEL_RUNS as u64 * ss;
// Z = 2 * l * S

let zz = 2u64
.checked_mul(PARALLEL_RUNS as u64)
.ok_or(DkgError::ArithmeticOverflow {
info: "ProofOfChunking::compute_ss_zz | 2 * l",
})?
.checked_mul(ss)
.ok_or(DkgError::ArithmeticOverflow {
info: "ProofOfChunking::compute_ss_zz | (2 * l) * S",
})?;

Ok((ss, zz))
}

#[cfg(test)]
mod tests {
use super::*;
Expand All @@ -652,7 +720,9 @@ mod tests {
ciphertext_chunks: Vec<[G1Projective; NUM_CHUNKS]>,
}

fn setup(mut rng: impl RngCore) -> (OwnedInstance, [Scalar; NUM_CHUNKS], Vec<Share>) {
fn setup(
mut rng: impl RngCore + CryptoRng,
) -> (OwnedInstance, [Scalar; NUM_CHUNKS], Vec<Share>) {
let g1 = G1Projective::generator();

let mut pks = Vec::with_capacity(NODES);
Expand Down
Loading
Loading