diff --git a/Cargo.lock b/Cargo.lock index 685f931f..49d8037b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -58,12 +58,6 @@ dependencies = [ "libc", ] -[[package]] -name = "anes" -version = "0.1.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4b46cbb362ab8752921c97e041f5e366ee6297bd428a31275b9fcf1e380f7299" - [[package]] name = "anyhow" version = "1.0.71" @@ -323,33 +317,6 @@ dependencies = [ "winapi", ] -[[package]] -name = "ciborium" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0c137568cc60b904a7724001b35ce2630fd00d5d84805fbb608ab89509d788f" -dependencies = [ - "ciborium-io", - "ciborium-ll", - "serde", -] - -[[package]] -name = "ciborium-io" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "346de753af073cc87b52b2083a506b38ac176a44cfb05497b622e27be899b369" - -[[package]] -name = "ciborium-ll" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "213030a2b5a4e0c0892b6652260cf6ccac84827b83a85a534e178e3906c4cf1b" -dependencies = [ - "ciborium-io", - "half", -] - [[package]] name = "cipher" version = "0.4.4" @@ -368,31 +335,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a0610544180c38b88101fecf2dd634b174a62eef6946f84dfc6a7127512b381c" dependencies = [ "bitflags", - "textwrap 0.11.0", + "textwrap", "unicode-width", ] -[[package]] -name = "clap" -version = "3.2.25" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ea181bf566f71cb9a5d17a59e1871af638180a18fb0035c92ae62b705207123" -dependencies = [ - "bitflags", - "clap_lex", - "indexmap", - "textwrap 0.16.0", -] - -[[package]] -name = "clap_lex" -version = "0.2.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2850f2f5a82cbf437dd5af4d49848fbdfc27c157c3d010345776f952765261c5" -dependencies = [ - "os_str_bytes", -] - [[package]] name = "codespan-reporting" version = "0.11.1" @@ -445,8 +391,8 @@ checksum = "b01d6de93b2b6c65e17c634a26653a29d107b3c98c607c765bf38d041531cd8f" dependencies = [ "atty", "cast", - "clap 2.34.0", - "criterion-plot 0.4.5", + "clap", + "criterion-plot", "csv", "itertools", "lazy_static", @@ -463,32 +409,6 @@ dependencies = [ "walkdir", ] -[[package]] -name = "criterion" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e7c76e09c1aae2bc52b3d2f29e13c6572553b30c4aa1b8a49fd70de6412654cb" -dependencies = [ - "anes", - "atty", - "cast", - "ciborium", - "clap 3.2.25", - "criterion-plot 0.5.0", - "itertools", - "lazy_static", - "num-traits", - "oorandom", - "plotters", - "rayon", - "regex", - "serde", - "serde_derive", - "serde_json", - "tinytemplate", - "walkdir", -] - [[package]] name = "criterion-plot" version = "0.4.5" @@ -499,16 +419,6 @@ dependencies = [ "itertools", ] -[[package]] -name = "criterion-plot" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b50826342786a51a89e2da3a28f1c32b06e387201bc2d19791f622c673706b1" -dependencies = [ - "cast", - "itertools", -] - [[package]] name = "crossbeam-channel" version = "0.5.8" @@ -767,7 +677,7 @@ dependencies = [ "ark-std", "bincode", "console_error_panic_hook", - "criterion 0.3.6", + "criterion", "derive_more", "digest", "ferveo-common-pre-release", @@ -814,7 +724,7 @@ dependencies = [ "ark-std", "bincode", "chacha20poly1305", - "criterion 0.4.0", + "criterion", "ferveo-common-pre-release", "hex", "itertools", @@ -1283,12 +1193,6 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5" -[[package]] -name = "os_str_bytes" -version = "6.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ceedf44fb00f2d1984b0bc98102627ce622e083e49a5bacdb3e514fa4238e267" - [[package]] name = "parking_lot" version = "0.11.2" @@ -1367,7 +1271,7 @@ checksum = "55f35f865aa964be21fcde114cbd1cfbd9bf8a471460ed965b0f84f96c711401" dependencies = [ "backtrace", "cfg-if", - "criterion 0.3.6", + "criterion", "findshlibs", "inferno", "lazy_static", @@ -1924,12 +1828,6 @@ dependencies = [ "unicode-width", ] -[[package]] -name = "textwrap" -version = "0.16.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "222a222a5bfe1bba4a77b45ec488a741b3cb8872e5e499451fd7d0129c9c7c3d" - [[package]] name = "thiserror" version = "1.0.40" diff --git a/Cargo.toml b/Cargo.toml index 3af8d039..1d42253e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -9,6 +9,47 @@ members = [ "subproductdomain", ] +[workspace.dependencies] +anyhow = "1.0.47" +ark-bls12-381 = "0.4.0" +ark-ec = "0.4" +ark-ff = "0.4" +ark-poly = "0.4" +ark-serialize = "0.4" +ark-std = "0.4" +bincode = "1.3.3" +chacha20poly1305 = "0.10.1" +criterion = "0.3" +console_error_panic_hook = "0.1.7" +derive_more = { version = "0.99", default-features = false } +digest = "0.10.0" +ferveo-common = { path = "ferveo-common" } +ferveo-tdec = { path = "ferveo-tdec" } +generic-array = "0.14.7" +getrandom = "0.2" +hex = "0.4" +itertools = "0.10.5" +js-sys = "0.3.63" +measure_time = "0.8" +miracl_core = "=2.3.0" +pprof = "0.6" +pyo3 = "0.18.2" +pyo3-build-config = "*" +rand = "0.8" +rand_core = "0.6.4" +rand_old = { package = "rand", version = "0.7" } # TODO: Used by benchmarks/pairing.rs, update to rand = "0.8" when possible +serde = "1.0" +serde_bytes = "0.11.9" +serde_with = "2.2.0" +sha2 = "0.10.6" +subproductdomain = { path = "subproductdomain" } +test-case = "3.3.1" +thiserror = "1.0" +wasm-bindgen = "0.2.86" +wasm-bindgen-derive = "0.2.1" +wasm-bindgen-test = "0.3.28" +zeroize = "1.6.0" + [profile.bench] opt-level = 3 debug = true diff --git a/ferveo-common/Cargo.toml b/ferveo-common/Cargo.toml index cdbbe0ab..23c54a1a 100644 --- a/ferveo-common/Cargo.toml +++ b/ferveo-common/Cargo.toml @@ -7,14 +7,14 @@ authors = ["Heliax AG ", "Piotr Roslaniec PublicKey { } pub fn serialized_size() -> usize { - 96 + U96::to_usize() } } @@ -106,7 +109,6 @@ impl Ord for Keypair { impl Keypair { /// Returns the public session key for the publicly verifiable DKG participant - pub fn public_key(&self) -> PublicKey { PublicKey:: { encryption_key: E::G2Affine::generator() @@ -116,7 +118,6 @@ impl Keypair { } /// Creates a new ephemeral session key for participating in the DKG - pub fn new(rng: &mut R) -> Self { Self { decryption_key: E::ScalarField::rand(rng), diff --git a/ferveo-python/Cargo.toml b/ferveo-python/Cargo.toml index 44a81853..82d533d1 100644 --- a/ferveo-python/Cargo.toml +++ b/ferveo-python/Cargo.toml @@ -15,7 +15,7 @@ default = ["extension-module"] [dependencies] ferveo = { package = "ferveo-pre-release", path = "../ferveo", features = ["bindings-python"] } -pyo3 = "0.18.2" +pyo3 = { workspace = true } [build-dependencies] -pyo3-build-config = "*" +pyo3-build-config = { workspace = true } diff --git a/ferveo-python/examples/server_api_precomputed.py b/ferveo-python/examples/server_api_precomputed.py index a9b98001..4ebed2ea 100644 --- a/ferveo-python/examples/server_api_precomputed.py +++ b/ferveo-python/examples/server_api_precomputed.py @@ -39,6 +39,9 @@ def gen_eth_addr(i: int) -> str: ) messages.append(ValidatorMessage(sender, dkg.generate_transcript())) +# We only need `shares_num` messages to aggregate the transcript +messages = messages[:shares_num] + # Every validator can aggregate the transcripts dkg = Dkg( tau=tau, @@ -59,11 +62,15 @@ def gen_eth_addr(i: int) -> str: # In the meantime, the client creates a ciphertext and decryption request msg = "abc".encode() aad = "my-aad".encode() -ciphertext = encrypt(msg, aad, dkg.public_key) +ciphertext = encrypt(msg, aad, client_aggregate.public_key) + +# In precomputed variant, the client selects a subset of validators to use for decryption +selected_validators = validators[:security_threshold] +selected_keypairs = validator_keypairs[:security_threshold] # Having aggregated the transcripts, the validators can now create decryption shares decryption_shares = [] -for validator, validator_keypair in zip(validators, validator_keypairs): +for validator, validator_keypair in zip(selected_validators, selected_keypairs): dkg = Dkg( tau=tau, shares_num=shares_num, @@ -80,13 +87,15 @@ def gen_eth_addr(i: int) -> str: # Create a decryption share for the ciphertext decryption_share = aggregate.create_decryption_share_precomputed( - dkg, ciphertext.header, aad, validator_keypair + dkg, ciphertext.header, aad, validator_keypair, selected_validators ) decryption_shares.append(decryption_share) +# We need at most `security_threshold` decryption shares +decryption_shares = decryption_shares[:security_threshold] + # Now, the decryption share can be used to decrypt the ciphertext # This part is in the client API - shared_secret = combine_decryption_shares_precomputed(decryption_shares) # The client should have access to the public parameters of the DKG diff --git a/ferveo-python/examples/server_api_simple.py b/ferveo-python/examples/server_api_simple.py index 44fb69c4..972f3c45 100644 --- a/ferveo-python/examples/server_api_simple.py +++ b/ferveo-python/examples/server_api_simple.py @@ -40,6 +40,9 @@ def gen_eth_addr(i: int) -> str: ) messages.append(ValidatorMessage(sender, dkg.generate_transcript())) +# We only need `shares_num` messages to aggregate the transcript +messages = messages[:shares_num] + # Now that every validator holds a dkg instance and a transcript for every other validator, # every validator can aggregate the transcripts me = validators[0] @@ -62,7 +65,7 @@ def gen_eth_addr(i: int) -> str: # In the meantime, the client creates a ciphertext and decryption request msg = "abc".encode() aad = "my-aad".encode() -ciphertext = encrypt(msg, aad, dkg.public_key) +ciphertext = encrypt(msg, aad, client_aggregate.public_key) # The client can serialize/deserialize ciphertext for transport ciphertext_ser = bytes(ciphertext) @@ -90,6 +93,9 @@ def gen_eth_addr(i: int) -> str: ) decryption_shares.append(decryption_share) +# We only need `threshold` decryption shares in simple variant +decryption_shares = decryption_shares[:security_threshold] + # Now, the decryption share can be used to decrypt the ciphertext # This part is in the client API diff --git a/ferveo-python/ferveo/__init__.py b/ferveo-python/ferveo/__init__.py index fd906e54..7f63fa66 100644 --- a/ferveo-python/ferveo/__init__.py +++ b/ferveo-python/ferveo/__init__.py @@ -18,10 +18,6 @@ ValidatorMessage, FerveoVariant, ThresholdEncryptionError, - InvalidDkgStateToDeal, - InvalidDkgStateToAggregate, - InvalidDkgStateToVerify, - InvalidDkgStateToIngest, DealerNotInValidatorSet, UnknownDealer, DuplicateDealer, @@ -39,4 +35,7 @@ DuplicatedShareIndex, NoTranscriptsToAggregate, InvalidAggregateVerificationParameters, + UnknownValidator, + TooManyTranscripts, + DuplicateTranscript, ) diff --git a/ferveo-python/ferveo/__init__.pyi b/ferveo-python/ferveo/__init__.pyi index ba7e7403..30059f4a 100644 --- a/ferveo-python/ferveo/__init__.pyi +++ b/ferveo-python/ferveo/__init__.pyi @@ -105,6 +105,7 @@ class DecryptionSharePrecomputed: @final class AggregatedTranscript: + public_key: DkgPublicKey def __init__(self, messages: Sequence[ValidatorMessage]): ... def verify( self, validators_num: int, messages: Sequence[ValidatorMessage] @@ -122,6 +123,7 @@ class AggregatedTranscript: ciphertext_header: CiphertextHeader, aad: bytes, validator_keypair: Keypair, + selected_validators: Sequence[Validator], ) -> DecryptionSharePrecomputed: ... @staticmethod def from_bytes(data: bytes) -> AggregatedTranscript: ... @@ -157,18 +159,6 @@ def decrypt_with_shared_secret( class ThresholdEncryptionError(Exception): pass -class InvalidDkgStateToDeal(Exception): - pass - -class InvalidDkgStateToAggregate(Exception): - pass - -class InvalidDkgStateToVerify(Exception): - pass - -class InvalidDkgStateToIngest(Exception): - pass - class DealerNotInValidatorSet(Exception): pass @@ -181,12 +171,6 @@ class DuplicateDealer(Exception): class InvalidPvssTranscript(Exception): pass -class InsufficientTranscriptsForAggregate(Exception): - pass - -class InvalidDkgPublicKey(Exception): - pass - class InsufficientValidators(Exception): pass @@ -219,3 +203,9 @@ class NoTranscriptsToAggregate(Exception): class InvalidAggregateVerificationParameters(Exception): pass + +class TooManyTranscripts(Exception): + pass + +class DuplicateTranscript(Exception): + pass diff --git a/ferveo-python/test/test_ferveo.py b/ferveo-python/test/test_ferveo.py index 51af3867..6c8fb2a4 100644 --- a/ferveo-python/test/test_ferveo.py +++ b/ferveo-python/test/test_ferveo.py @@ -19,16 +19,6 @@ def gen_eth_addr(i: int) -> str: return f"0x{i:040x}" - -def decryption_share_for_variant(v: FerveoVariant, agg_transcript): - if v == FerveoVariant.Simple: - return agg_transcript.create_decryption_share_simple - elif v == FerveoVariant.Precomputed: - return agg_transcript.create_decryption_share_precomputed - else: - raise ValueError("Unknown variant") - - def combine_shares_for_variant(v: FerveoVariant, decryption_shares): if v == FerveoVariant.Simple: return combine_decryption_shares_simple(decryption_shares) @@ -39,7 +29,11 @@ def combine_shares_for_variant(v: FerveoVariant, decryption_shares): def scenario_for_variant( - variant: FerveoVariant, shares_num, validators_num, threshold, shares_to_use + variant: FerveoVariant, + shares_num, + validators_num, + threshold, + dec_shares_to_use ): if variant not in [FerveoVariant.Simple, FerveoVariant.Precomputed]: raise ValueError("Unknown variant: " + variant) @@ -47,10 +41,8 @@ def scenario_for_variant( if validators_num < shares_num: raise ValueError("validators_num must be >= shares_num") - if variant == FerveoVariant.Precomputed and shares_to_use != validators_num: - raise ValueError( - "In precomputed variant, shares_to_use must be equal to validators_num" - ) + if shares_num < threshold: + raise ValueError("shares_num must be >= threshold") tau = 1 validator_keypairs = [Keypair.random() for _ in range(0, validators_num)] @@ -72,6 +64,9 @@ def scenario_for_variant( ) messages.append(ValidatorMessage(sender, dkg.generate_transcript())) + # We only need `shares_num` messages to aggregate the transcript + messages = messages[:shares_num] + # Both client and server should be able to verify the aggregated transcript dkg = Dkg( tau=tau, @@ -82,18 +77,27 @@ def scenario_for_variant( ) server_aggregate = dkg.aggregate_transcripts(messages) assert server_aggregate.verify(validators_num, messages) - client_aggregate = AggregatedTranscript(messages) assert client_aggregate.verify(validators_num, messages) + # At this point, DKG is done, and we are proceeding to threshold decryption + # Client creates a ciphertext and requests decryption shares from validators msg = "abc".encode() aad = "my-aad".encode() - ciphertext = encrypt(msg, aad, dkg.public_key) + ciphertext = encrypt(msg, aad, client_aggregate.public_key) + + # In precomputed variant, the client selects a subset of validators to use for decryption + if variant == FerveoVariant.Precomputed: + selected_validators = validators[:threshold] + selected_validator_keypairs = validator_keypairs[:threshold] + else: + selected_validators = validators + selected_validator_keypairs = validator_keypairs # Having aggregated the transcripts, the validators can now create decryption shares decryption_shares = [] - for validator, validator_keypair in zip(validators, validator_keypairs): + for validator, validator_keypair in zip(selected_validators, selected_validator_keypairs): assert validator.public_key == validator_keypair.public_key() print("validator: ", validator.share_index) @@ -104,26 +108,28 @@ def scenario_for_variant( validators=validators, me=validator, ) - pvss_aggregated = dkg.aggregate_transcripts(messages) - assert pvss_aggregated.verify(validators_num, messages) - - decryption_share = decryption_share_for_variant(variant, pvss_aggregated)( - dkg, ciphertext.header, aad, validator_keypair - ) + server_aggregate = dkg.aggregate_transcripts(messages) + assert server_aggregate.verify(validators_num, messages) + + if variant == FerveoVariant.Simple: + decryption_share = server_aggregate.create_decryption_share_simple( + dkg, ciphertext.header, aad, validator_keypair + ) + elif variant == FerveoVariant.Precomputed: + decryption_share = server_aggregate.create_decryption_share_precomputed( + dkg, ciphertext.header, aad, validator_keypair, selected_validators + ) + else: + raise ValueError("Unknown variant") decryption_shares.append(decryption_share) # We are limiting the number of decryption shares to use for testing purposes - # decryption_shares = decryption_shares[:shares_to_use] + decryption_shares = decryption_shares[:dec_shares_to_use] # Client combines the decryption shares and decrypts the ciphertext shared_secret = combine_shares_for_variant(variant, decryption_shares) - if variant == FerveoVariant.Simple and len(decryption_shares) < threshold: - with pytest.raises(ThresholdEncryptionError): - decrypt_with_shared_secret(ciphertext, aad, shared_secret) - return - - if variant == FerveoVariant.Precomputed and len(decryption_shares) < threshold: + if len(decryption_shares) < threshold: with pytest.raises(ThresholdEncryptionError): decrypt_with_shared_secret(ciphertext, aad, shared_secret) return @@ -133,54 +139,57 @@ def scenario_for_variant( def test_simple_tdec_has_enough_messages(): - shares_num = 4 - threshold = shares_num - 1 + shares_num = 8 + threshold = int(shares_num * 2 / 3) for validators_num in [shares_num, shares_num + 2]: scenario_for_variant( FerveoVariant.Simple, shares_num=shares_num, validators_num=validators_num, threshold=threshold, - shares_to_use=threshold, + dec_shares_to_use=threshold, ) def test_simple_tdec_doesnt_have_enough_messages(): - shares_num = 4 - threshold = shares_num - 1 + shares_num = 8 + threshold = int(shares_num * 2 / 3) + dec_shares_to_use = threshold - 1 for validators_num in [shares_num, shares_num + 2]: scenario_for_variant( FerveoVariant.Simple, shares_num=shares_num, validators_num=validators_num, threshold=threshold, - shares_to_use=validators_num - 1, + dec_shares_to_use=dec_shares_to_use, ) def test_precomputed_tdec_has_enough_messages(): - shares_num = 4 - threshold = shares_num # in precomputed variant, we need all shares + shares_num = 8 + threshold = int(shares_num * 2 / 3) + dec_shares_to_use = threshold for validators_num in [shares_num, shares_num + 2]: scenario_for_variant( FerveoVariant.Precomputed, shares_num=shares_num, validators_num=validators_num, threshold=threshold, - shares_to_use=validators_num, + dec_shares_to_use=dec_shares_to_use, ) def test_precomputed_tdec_doesnt_have_enough_messages(): - shares_num = 4 - threshold = shares_num # in precomputed variant, we need all shares + shares_num = 8 + threshold = int(shares_num * 2 / 3) + dec_shares_to_use = threshold - 1 for validators_num in [shares_num, shares_num + 2]: scenario_for_variant( FerveoVariant.Simple, shares_num=shares_num, validators_num=validators_num, threshold=threshold, - shares_to_use=threshold - 1, + dec_shares_to_use=dec_shares_to_use, ) diff --git a/ferveo-python/test/test_serialization.py b/ferveo-python/test/test_serialization.py index 6c600771..d188ea4d 100644 --- a/ferveo-python/test/test_serialization.py +++ b/ferveo-python/test/test_serialization.py @@ -5,6 +5,7 @@ DkgPublicKey, FerveoPublicKey, FerveoVariant, + ValidatorMessage ) @@ -32,7 +33,10 @@ def make_dkg_public_key(): validators=validators, me=me, ) - return dkg.public_key + transcripts = [ValidatorMessage(v, dkg.generate_transcript()) for v in validators] + aggregate = dkg.aggregate_transcripts(transcripts) + assert aggregate.verify(shares_num, transcripts) + return aggregate.public_key def make_shared_secret(): diff --git a/ferveo-tdec/Cargo.toml b/ferveo-tdec/Cargo.toml index cfc7e3f1..58bc0583 100644 --- a/ferveo-tdec/Cargo.toml +++ b/ferveo-tdec/Cargo.toml @@ -16,30 +16,30 @@ test-common = [] api = [] [dependencies] -ark-bls12-381 = "0.4" -ark-ec = "0.4" -ark-ff = "0.4" -ark-poly = "0.4" -ark-serialize = "0.4" -ark-std = "0.4" -bincode = "1.3.3" -chacha20poly1305 = "0.10.1" +ark-bls12-381 = { workspace = true } +ark-ec = { workspace = true } +ark-ff = { workspace = true } +ark-poly = { workspace = true } +ark-serialize = { workspace = true } +ark-std = { workspace = true } +bincode = { workspace = true } +chacha20poly1305 = { workspace = true } ferveo-common = { package = "ferveo-common-pre-release", path = "../ferveo-common", version = "^0.1.1" } -itertools = "0.10" -miracl_core = "=2.3.0" -rand = "0.8" -rand_core = "0.6" -serde = { version = "1.0", features = ["derive"] } -serde_bytes = "0.11.9" -serde_with = "2.0.1" -sha2 = "0.10.6" +itertools = { workspace = true } +miracl_core = { workspace = true } +rand = { workspace = true } +rand_core = { workspace = true } +serde = { workspace = true, features = ["derive"] } +serde_bytes = { workspace = true } +serde_with = { workspace = true } +sha2 = { workspace = true } subproductdomain = { package = "subproductdomain-pre-release", path = "../subproductdomain", version = "^0.1.0" } -thiserror = "1.0" -zeroize = "1.6.0" +thiserror = { workspace = true } +zeroize = { workspace = true } [dev-dependencies] -criterion = { version = "0.4", features = ["html_reports"] } -hex = "=0.4.3" +criterion = { workspace = true, features = ["html_reports"] } +hex = { workspace = true } [package.metadata.cargo-machete] ignored = ["serde_bytes"] diff --git a/ferveo-tdec/benches/tpke.rs b/ferveo-tdec/benches/tpke.rs index 420bf869..25672b70 100644 --- a/ferveo-tdec/benches/tpke.rs +++ b/ferveo-tdec/benches/tpke.rs @@ -1,14 +1,10 @@ #![allow(clippy::redundant_closure)] -use ark_bls12_381::{Bls12_381, Fr, G1Affine as G1, G2Affine as G2}; -use ark_ec::pairing::Pairing; +use ark_bls12_381::{Bls12_381, Fr}; use criterion::{ black_box, criterion_group, criterion_main, BenchmarkId, Criterion, }; -use ferveo_tdec::{ - test_common::{setup_fast, setup_simple}, - *, -}; +use ferveo_tdec::{test_common::setup_simple, *}; use rand::prelude::StdRng; use rand_core::{RngCore, SeedableRng}; @@ -16,7 +12,6 @@ const NUM_SHARES_CASES: [usize; 5] = [4, 8, 16, 32, 64]; const MSG_SIZE_CASES: [usize; 7] = [256, 512, 1024, 2048, 4096, 8192, 16384]; type E = Bls12_381; -type G2Prepared = ::G2Prepared; #[allow(dead_code)] #[derive(Clone)] @@ -25,69 +20,12 @@ struct SetupShared { shares_num: usize, msg: Vec, aad: Vec, - pubkey: G1, - privkey: G2, + pubkey: PublicKey, + privkey: PrivateKeyShare, ciphertext: Ciphertext, shared_secret: SharedSecret, } -#[derive(Clone)] -struct SetupFast { - shared: SetupShared, - contexts: Vec>, - pub_contexts: Vec>, - decryption_shares: Vec>, - prepared_key_shares: Vec, -} - -impl SetupFast { - pub fn new(shares_num: usize, msg_size: usize, rng: &mut StdRng) -> Self { - let threshold = shares_num * 2 / 3; - let mut msg: Vec = vec![0u8; msg_size]; - rng.fill_bytes(&mut msg[..]); - let aad: &[u8] = "my-aad".as_bytes(); - - let (pubkey, privkey, contexts) = - setup_fast::(threshold, shares_num, rng); - let ciphertext = - encrypt::(SecretBox::new(msg.clone()), aad, &pubkey, rng) - .unwrap(); - - let mut decryption_shares: Vec> = vec![]; - for context in contexts.iter() { - decryption_shares - .push(context.create_share(&ciphertext, aad).unwrap()); - } - - let pub_contexts = contexts[0].clone().public_decryption_contexts; - let prepared_key_shares = - prepare_combine_fast(&pub_contexts, &decryption_shares); - - let shared_secret = share_combine_fast_unchecked( - &decryption_shares, - &prepared_key_shares, - ); - - let shared = SetupShared { - threshold, - shares_num, - msg: msg.to_vec(), - aad: aad.to_vec(), - pubkey, - privkey, - ciphertext, - shared_secret, - }; - Self { - shared, - contexts, - pub_contexts, - decryption_shares, - prepared_key_shares, - } - } -} - #[derive(Clone)] struct SetupSimple { shared: SetupShared, @@ -105,7 +43,7 @@ impl SetupSimple { let aad: &[u8] = "my-aad".as_bytes(); let (pubkey, privkey, contexts) = - setup_simple::(threshold, shares_num, rng); + setup_simple::(shares_num, threshold, rng); // Ciphertext.commitment is already computed to match U let ciphertext = @@ -124,10 +62,10 @@ impl SetupSimple { let pub_contexts = contexts[0].clone().public_decryption_contexts; let domain: Vec = pub_contexts.iter().map(|c| c.domain).collect(); - let lagrange = prepare_combine_simple::(&domain); + let lagrange_coeffs = prepare_combine_simple::(&domain); let shared_secret = - share_combine_simple::(&decryption_shares, &lagrange); + share_combine_simple::(&decryption_shares, &lagrange_coeffs); let shared = SetupShared { threshold, @@ -144,7 +82,7 @@ impl SetupSimple { contexts, pub_contexts, decryption_shares, - lagrange_coeffs: lagrange, + lagrange_coeffs, } } } @@ -158,25 +96,6 @@ pub fn bench_create_decryption_share(c: &mut Criterion) { let msg_size = MSG_SIZE_CASES[0]; for shares_num in NUM_SHARES_CASES { - let fast = { - let setup = SetupFast::new(shares_num, msg_size, rng); - move || { - black_box({ - // TODO: Consider running benchmarks for a single iteration and not for all iterations. - // This way we could test the performance of this method for a single participant. - setup - .contexts - .iter() - .map(|ctx| { - ctx.create_share( - &setup.shared.ciphertext, - &setup.shared.aad, - ) - }) - .collect::>() - }) - } - }; let simple = { let setup = SetupSimple::new(shares_num, msg_size, rng); move || { @@ -200,6 +119,8 @@ pub fn bench_create_decryption_share(c: &mut Criterion) { }; let simple_precomputed = { let setup = SetupSimple::new(shares_num, MSG_SIZE_CASES[0], rng); + let selected_participants = + (0..setup.shared.threshold).collect::>(); move || { black_box( setup @@ -209,17 +130,13 @@ pub fn bench_create_decryption_share(c: &mut Criterion) { context.create_share_precomputed( &setup.shared.ciphertext.header().unwrap(), &setup.shared.aad, + &selected_participants, ) }) .collect::>(), ); } }; - - group.bench_function( - BenchmarkId::new("share_create_fast", shares_num), - |b| b.iter(|| fast()), - ); group.bench_function( BenchmarkId::new("share_create_simple", shares_num), |b| b.iter(|| simple()), @@ -239,26 +156,12 @@ pub fn bench_share_prepare(c: &mut Criterion) { let msg_size = MSG_SIZE_CASES[0]; for shares_num in NUM_SHARES_CASES { - let fast = { - let setup = SetupFast::new(shares_num, msg_size, rng); - move || { - black_box(prepare_combine_fast( - &setup.pub_contexts, - &setup.decryption_shares, - )) - } - }; let simple = { let setup = SetupSimple::new(shares_num, msg_size, rng); let domain: Vec = setup.pub_contexts.iter().map(|c| c.domain).collect(); move || black_box(prepare_combine_simple::(&domain)) }; - - group.bench_function( - BenchmarkId::new("share_prepare_fast", shares_num), - |b| b.iter(|| fast()), - ); group.bench_function( BenchmarkId::new("share_prepare_simple", shares_num), |b| b.iter(|| simple()), @@ -275,15 +178,6 @@ pub fn bench_share_combine(c: &mut Criterion) { let msg_size = MSG_SIZE_CASES[0]; for shares_num in NUM_SHARES_CASES { - let fast = { - let setup = SetupFast::new(shares_num, msg_size, rng); - move || { - black_box(share_combine_fast_unchecked( - &setup.decryption_shares, - &setup.prepared_key_shares, - )); - } - }; let simple = { let setup = SetupSimple::new(shares_num, msg_size, rng); move || { @@ -295,6 +189,8 @@ pub fn bench_share_combine(c: &mut Criterion) { }; let simple_precomputed = { let setup = SetupSimple::new(shares_num, MSG_SIZE_CASES[0], rng); + // TODO: Use threshold instead of shares_num + let selected_participants = (0..shares_num).collect::>(); let decryption_shares: Vec<_> = setup .contexts @@ -304,6 +200,7 @@ pub fn bench_share_combine(c: &mut Criterion) { .create_share_precomputed( &setup.shared.ciphertext.header().unwrap(), &setup.shared.aad, + &selected_participants, ) .unwrap() }) @@ -314,10 +211,6 @@ pub fn bench_share_combine(c: &mut Criterion) { } }; - group.bench_function( - BenchmarkId::new("share_combine_fast", shares_num), - |b| b.iter(|| fast()), - ); group.bench_function( BenchmarkId::new("share_combine_simple", shares_num), |b| b.iter(|| simple()), @@ -339,7 +232,7 @@ pub fn bench_share_encrypt_decrypt(c: &mut Criterion) { for msg_size in MSG_SIZE_CASES { let mut encrypt = { let mut rng = rng.clone(); - let setup = SetupFast::new(shares_num, msg_size, &mut rng); + let setup = SetupSimple::new(shares_num, msg_size, &mut rng); move || { let setup = setup.clone(); black_box( @@ -387,7 +280,7 @@ pub fn bench_ciphertext_validity_checks(c: &mut Criterion) { for msg_size in MSG_SIZE_CASES { let ciphertext_verification = { let mut rng = rng.clone(); - let setup = SetupFast::new(shares_num, msg_size, &mut rng); + let setup = SetupSimple::new(shares_num, msg_size, &mut rng); move || { black_box(setup.shared.ciphertext.check( &setup.shared.aad, @@ -411,44 +304,6 @@ pub fn bench_decryption_share_validity_checks(c: &mut Criterion) { let msg_size = MSG_SIZE_CASES[0]; for shares_num in NUM_SHARES_CASES { - let share_fast_verification = { - let mut rng = rng.clone(); - let setup = SetupFast::new(shares_num, msg_size, &mut rng); - move || { - black_box(verify_decryption_shares_fast( - &setup.pub_contexts, - &setup.shared.ciphertext, - &setup.decryption_shares, - )) - } - }; - group.bench_function( - BenchmarkId::new("share_fast_verification", shares_num), - |b| b.iter(|| share_fast_verification()), - ); - - let mut share_fast_batch_verification = { - let mut rng = rng.clone(); - let setup = SetupFast::new(shares_num, msg_size, &mut rng); - // We need to repackage a bunch of variables here to avoid borrowing issues: - let ciphertext = setup.shared.ciphertext.clone(); - let ciphertexts = vec![ciphertext]; - let decryption_shares = setup.decryption_shares.clone(); - let decryption_shares = vec![decryption_shares]; - move || { - black_box(batch_verify_decryption_shares( - &setup.pub_contexts, - &ciphertexts, - &decryption_shares, - &mut rng, - )) - } - }; - group.bench_function( - BenchmarkId::new("share_fast_batch_verification", shares_num), - |b| b.iter(|| share_fast_batch_verification()), - ); - let share_simple_verification = { let mut rng = rng.clone(); let setup = SetupSimple::new(shares_num, msg_size, &mut rng); @@ -550,7 +405,7 @@ pub fn bench_decryption_share_validity_checks(c: &mut Criterion) { // for &shares_num in NUM_SHARES_CASES.iter() { // let setup = SetupSimple::new(shares_num, msg_size, rng); // let threshold = setup.shared.threshold; -// let polynomial = make_random_polynomial_with_root::( +// let polynomial = create_random_polynomial_with_root::( // threshold - 1, // &Fr::zero(), // rng, diff --git a/ferveo-tdec/src/ciphertext.rs b/ferveo-tdec/src/ciphertext.rs index 81f79389..6d33946c 100644 --- a/ferveo-tdec/src/ciphertext.rs +++ b/ferveo-tdec/src/ciphertext.rs @@ -13,7 +13,10 @@ use serde_with::serde_as; use sha2::{digest::Digest, Sha256}; use zeroize::ZeroizeOnDrop; -use crate::{htp_bls12381_g2, Error, Result, SecretBox, SharedSecret}; +use crate::{ + htp_bls12381_g2, Error, PrivateKeyShare, PublicKey, Result, SecretBox, + SharedSecret, +}; #[serde_as] #[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] @@ -95,7 +98,7 @@ impl CiphertextHeader { pub fn encrypt( message: SecretBox>, aad: &[u8], - pubkey: &E::G1Affine, + pubkey: &PublicKey, rng: &mut impl rand::Rng, ) -> Result> { // r @@ -105,7 +108,7 @@ pub fn encrypt( // h let h_gen = E::G2Affine::generator(); - let ry_prep = E::G1Prepared::from(pubkey.mul(rand_element).into()); + let ry_prep = E::G1Prepared::from(pubkey.0.mul(rand_element).into()); // s let product = E::pairing(ry_prep, h_gen).0; // u @@ -140,13 +143,13 @@ pub fn encrypt( pub fn decrypt_symmetric( ciphertext: &Ciphertext, aad: &[u8], - private_key: &E::G2Affine, + private_key: &PrivateKeyShare, g_inv: &E::G1Prepared, ) -> Result> { ciphertext.check(aad, g_inv)?; let shared_secret = E::pairing( E::G1Prepared::from(ciphertext.commitment), - E::G2Prepared::from(*private_key), + E::G2Prepared::from(private_key.0), ) .0; let shared_secret = SharedSecret(shared_secret); @@ -258,7 +261,7 @@ mod tests { let aad: &[u8] = "my-aad".as_bytes(); let (pubkey, privkey, contexts) = - setup_fast::(threshold, shares_num, rng); + setup_simple::(threshold, shares_num, rng); let g_inv = &contexts[0].setup_params.g_inv; let ciphertext = @@ -282,7 +285,8 @@ mod tests { let threshold = shares_num * 2 / 3; let msg = "my-msg".as_bytes().to_vec(); let aad: &[u8] = "my-aad".as_bytes(); - let (pubkey, _, contexts) = setup_fast::(threshold, shares_num, rng); + let (pubkey, _, contexts) = + setup_simple::(threshold, shares_num, rng); let g_inv = contexts[0].setup_params.g_inv.clone(); let mut ciphertext = encrypt::(SecretBox::new(msg), aad, &pubkey, rng).unwrap(); diff --git a/ferveo-tdec/src/combine.rs b/ferveo-tdec/src/combine.rs index f9d8ddbb..d04bc55f 100644 --- a/ferveo-tdec/src/combine.rs +++ b/ferveo-tdec/src/combine.rs @@ -1,14 +1,11 @@ #![allow(non_snake_case)] -use std::ops::Mul; - -use ark_ec::{pairing::Pairing, CurveGroup}; +use ark_ec::pairing::Pairing; use ark_ff::{Field, One, PrimeField, Zero}; use ferveo_common::serialization; use itertools::izip; use serde::{Deserialize, Serialize}; use serde_with::serde_as; -use subproductdomain::SubproductDomain; use zeroize::{Zeroize, ZeroizeOnDrop}; #[serde_as] @@ -19,45 +16,8 @@ pub struct SharedSecret( #[serde_as(as = "serialization::SerdeAs")] pub(crate) E::TargetField, ); -use crate::{ - verify_decryption_shares_fast, Ciphertext, DecryptionShareFast, - DecryptionSharePrecomputed, DecryptionShareSimple, Error, - PublicDecryptionContextFast, Result, -}; - -pub fn prepare_combine_fast( - public_decryption_contexts: &[PublicDecryptionContextFast], - shares: &[DecryptionShareFast], -) -> Vec { - let mut domain = vec![]; // omega_i, vector of domain points - let mut n_0 = E::ScalarField::one(); - for d_i in shares.iter() { - domain.push(public_decryption_contexts[d_i.decrypter_index].domain); - // n_0_i = 1 * t^1 * t^2 ... - n_0 *= public_decryption_contexts[d_i.decrypter_index].lagrange_n_0; - } - let s = SubproductDomain::::new(domain); - let mut lagrange = s.inverse_lagrange_coefficients(); // 1/L_i - - // Given a vector of field elements {v_i}, compute the vector {coeff * v_i^(-1)} - ark_ff::batch_inversion_and_mul(&mut lagrange, &n_0); // n_0 * L_i +use crate::{DecryptionSharePrecomputed, DecryptionShareSimple}; - // L_i * [b]Z_i - izip!(shares.iter(), lagrange.iter()) - .map(|(d_i, lambda)| { - let decrypter = &public_decryption_contexts[d_i.decrypter_index]; - let blinded_key_share = - decrypter.blinded_key_share.blinded_key_share; - E::G2Prepared::from( - // [b]Z_i * L_i - blinded_key_share.mul(*lambda).into_affine(), - ) - }) - .collect::>() -} - -// TODO: Combine `tpke::prepare_combine_simple` and `tpke::share_combine_simple` into -// one function and expose it in the tpke::api? pub fn prepare_combine_simple( domain: &[E::ScalarField], ) -> Vec { @@ -84,51 +44,6 @@ pub fn lagrange_basis_at( lagrange_coeffs } -// TODO: Hide this from external users. Currently blocked by usage in benchmarks. -pub fn share_combine_fast_unchecked( - shares: &[DecryptionShareFast], - prepared_key_shares: &[E::G2Prepared], -) -> SharedSecret { - let mut pairing_a = vec![]; - let mut pairing_b = vec![]; - - for (d_i, prepared_key_share) in izip!(shares, prepared_key_shares.iter()) { - pairing_a.push( - // D_i - E::G1Prepared::from(d_i.decryption_share), - ); - pairing_b.push( - // Z_{i,omega_i}) = [dk_{i}^{-1}]*\hat{Y}_{i_omega_j}] - // Reference: https://nikkolasg.github.io/ferveo/pvss.html#validator-decryption-of-private-key-shares - // Prepared key share is a sum of L_i * [b]Z_i - prepared_key_share.clone(), - ); - } - // e(D_i, [b*omega_i^-1] Z_{i,omega_i}) - let shared_secret = E::multi_pairing(pairing_a, pairing_b).0; - SharedSecret(shared_secret) -} - -pub fn share_combine_fast( - pub_contexts: &[PublicDecryptionContextFast], - ciphertext: &Ciphertext, - decryption_shares: &[DecryptionShareFast], - prepared_key_shares: &[E::G2Prepared], -) -> Result> { - let is_valid_shares = verify_decryption_shares_fast( - pub_contexts, - ciphertext, - decryption_shares, - ); - if !is_valid_shares { - return Err(Error::DecryptionShareVerificationFailed); - } - Ok(share_combine_fast_unchecked( - decryption_shares, - prepared_key_shares, - )) -} - pub fn share_combine_simple( decryption_shares: &[DecryptionShareSimple], lagrange_coeffs: &[E::ScalarField], diff --git a/ferveo-tdec/src/context.rs b/ferveo-tdec/src/context.rs index 238db71c..4bfb81fb 100644 --- a/ferveo-tdec/src/context.rs +++ b/ferveo-tdec/src/context.rs @@ -1,17 +1,15 @@ -use std::ops::Mul; - -use ark_ec::{pairing::Pairing, CurveGroup}; +use ark_ec::pairing::Pairing; use crate::{ - prepare_combine_simple, BlindedKeyShare, Ciphertext, CiphertextHeader, - DecryptionShareFast, DecryptionSharePrecomputed, DecryptionShareSimple, - PrivateKeyShare, PublicKeyShare, Result, + prepare_combine_simple, BlindedKeyShare, CiphertextHeader, + DecryptionSharePrecomputed, DecryptionShareSimple, PrivateKeyShare, + PublicKey, Result, }; #[derive(Clone, Debug)] pub struct PublicDecryptionContextFast { pub domain: E::ScalarField, - pub public_key_share: PublicKeyShare, + pub public_key: PublicKey, pub blinded_key_share: BlindedKeyShare, // This decrypter's contribution to N(0), namely (-1)^|domain| * \prod_i omega_i pub lagrange_n_0: E::ScalarField, @@ -21,7 +19,7 @@ pub struct PublicDecryptionContextFast { #[derive(Clone, Debug)] pub struct PublicDecryptionContextSimple { pub domain: E::ScalarField, - pub public_key_share: PublicKeyShare, + pub public_key: PublicKey, pub blinded_key_share: BlindedKeyShare, pub h: E::G2Affine, pub validator_public_key: E::G2, @@ -37,34 +35,6 @@ pub struct SetupParams { pub h: E::G2Affine, } -#[derive(Clone, Debug)] -pub struct PrivateDecryptionContextFast { - pub index: usize, - pub setup_params: SetupParams, - pub private_key_share: PrivateKeyShare, - pub public_decryption_contexts: Vec>, -} - -impl PrivateDecryptionContextFast { - pub fn create_share( - &self, - ciphertext: &Ciphertext, - aad: &[u8], - ) -> Result> { - ciphertext.check(aad, &self.setup_params.g_inv)?; - - let decryption_share = ciphertext - .commitment - .mul(self.setup_params.b_inv) - .into_affine(); - - Ok(DecryptionShareFast { - decrypter_index: self.index, - decryption_share, - }) - } -} - #[derive(Clone, Debug)] pub struct PrivateDecryptionContextSimple { pub index: usize, @@ -92,15 +62,16 @@ impl PrivateDecryptionContextSimple { &self, ciphertext_header: &CiphertextHeader, aad: &[u8], + selected_participants: &[usize], ) -> Result> { - let domain = self - .public_decryption_contexts + let selected_domain_points = selected_participants .iter() - .map(|c| c.domain) + .map(|i| self.public_decryption_contexts[*i].domain) .collect::>(); - let lagrange_coeffs = prepare_combine_simple::(&domain); + let lagrange_coeffs = + prepare_combine_simple::(&selected_domain_points); - DecryptionSharePrecomputed::new( + DecryptionSharePrecomputed::create( self.index, &self.setup_params.b, &self.private_key_share, diff --git a/ferveo-tdec/src/decryption.rs b/ferveo-tdec/src/decryption.rs index 0622e6a8..7c199fde 100644 --- a/ferveo-tdec/src/decryption.rs +++ b/ferveo-tdec/src/decryption.rs @@ -1,26 +1,17 @@ use std::ops::Mul; use ark_ec::{pairing::Pairing, CurveGroup}; -use ark_ff::{Field, One, Zero}; +use ark_ff::Field; use ferveo_common::serialization; -use itertools::{izip, zip_eq}; -use rand_core::RngCore; +use itertools::izip; use serde::{de::DeserializeOwned, Deserialize, Serialize}; use serde_with::serde_as; use crate::{ - generate_random, Ciphertext, CiphertextHeader, PrivateKeyShare, - PublicDecryptionContextFast, PublicDecryptionContextSimple, Result, + Ciphertext, CiphertextHeader, PrivateKeyShare, + PublicDecryptionContextSimple, Result, }; -#[serde_as] -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct DecryptionShareFast { - pub decrypter_index: usize, - #[serde_as(as = "serialization::SerdeAs")] - pub decryption_share: E::G1Affine, -} - #[serde_as] #[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq)] pub struct ValidatorShareChecksum { @@ -36,9 +27,6 @@ impl ValidatorShareChecksum { // C_i = dk_i^{-1} * U let checksum = ciphertext_header .commitment - // TODO: Should we panic here? I think we should since that would mean that the decryption key is invalid. - // And so, the validator should not be able to create a decryption share. - // And so, the validator should remake their keypair. .mul( validator_decryption_key .inverse() @@ -74,15 +62,18 @@ impl ValidatorShareChecksum { } } +/// A decryption share for a simple variant of the threshold decryption scheme. +/// In this variant, the decryption share require additional computation on the +/// client side int order to be combined. #[serde_as] #[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] -#[serde(bound( - serialize = "ValidatorShareChecksum: Serialize", - deserialize = "ValidatorShareChecksum: DeserializeOwned" -))] pub struct DecryptionShareSimple { #[serde_as(as = "serialization::SerdeAs")] pub decryption_share: E::TargetField, + #[serde(bound( + serialize = "ValidatorShareChecksum: Serialize", + deserialize = "ValidatorShareChecksum: DeserializeOwned" + ))] pub validator_checksum: ValidatorShareChecksum, } @@ -112,11 +103,8 @@ impl DecryptionShareSimple { ciphertext_header: &CiphertextHeader, ) -> Result { // D_i = e(U, Z_i) - let decryption_share = E::pairing( - ciphertext_header.commitment, - private_key_share.private_key_share, - ) - .0; + let decryption_share = + E::pairing(ciphertext_header.commitment, private_key_share.0).0; let validator_checksum = ValidatorShareChecksum::new( validator_decryption_key, @@ -146,21 +134,28 @@ impl DecryptionShareSimple { } } +/// A decryption share for a precomputed variant of the threshold decryption scheme. +/// In this variant, the decryption share is precomputed and can be combined +/// without additional computation on the client side. +/// The downside is that the threshold of decryption shares required to decrypt +/// is equal to the number of private key shares in the scheme. #[serde_as] #[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] -#[serde(bound( - serialize = "ValidatorShareChecksum: Serialize", - deserialize = "ValidatorShareChecksum: DeserializeOwned" -))] pub struct DecryptionSharePrecomputed { pub decrypter_index: usize, #[serde_as(as = "serialization::SerdeAs")] pub decryption_share: E::TargetField, + #[serde(bound( + serialize = "ValidatorShareChecksum: Serialize", + deserialize = "ValidatorShareChecksum: DeserializeOwned" + ))] pub validator_checksum: ValidatorShareChecksum, } impl DecryptionSharePrecomputed { - pub fn new( + /// Create a decryption share from the given parameters. + /// This function checks that the ciphertext is valid. + pub fn create( validator_index: usize, validator_decryption_key: &E::ScalarField, private_key_share: &PrivateKeyShare, @@ -179,6 +174,8 @@ impl DecryptionSharePrecomputed { ) } + /// Create a decryption share from the given parameters. + /// This function does not check that the ciphertext is valid. pub fn create_unchecked( validator_index: usize, validator_decryption_key: &E::ScalarField, @@ -190,11 +187,8 @@ impl DecryptionSharePrecomputed { let u_to_lagrange_coeff = ciphertext_header.commitment.mul(lagrange_coeff); // C_{λ_i} = e(U_{λ_i}, Z_i) - let decryption_share = E::pairing( - u_to_lagrange_coeff, - private_key_share.private_key_share, - ) - .0; + let decryption_share = + E::pairing(u_to_lagrange_coeff, private_key_share.0).0; let validator_checksum = ValidatorShareChecksum::new( validator_decryption_key, @@ -226,111 +220,6 @@ impl DecryptionSharePrecomputed { } } -// TODO: Remove this code? Currently only used in benchmarks. Move to benchmark suite? -pub fn batch_verify_decryption_shares( - pub_contexts: &[PublicDecryptionContextFast], - ciphertexts: &[Ciphertext], - decryption_shares: &[Vec>], - rng: &mut R, -) -> bool { - let num_ciphertexts = ciphertexts.len(); - let num_shares = decryption_shares[0].len(); - - // Get [b_i] H for each of the decryption shares - let blinding_keys = decryption_shares[0] - .iter() - .map(|d| { - pub_contexts[d.decrypter_index] - .blinded_key_share - .blinding_key_prepared - .clone() - }) - .collect::>(); - - // For each ciphertext, generate num_shares random scalars - let alpha_ij = (0..num_ciphertexts) - .map(|_| generate_random::<_, E>(num_shares, rng)) - .collect::>(); - - let mut pairings_a = Vec::with_capacity(num_shares + 1); - let mut pairings_b = Vec::with_capacity(num_shares + 1); - - // Compute \sum_j \alpha_{i,j} for each ciphertext i - let sum_alpha_i = alpha_ij - .iter() - .map(|alpha_j| alpha_j.iter().sum::()) - .collect::>(); - - // Compute \sum_i [ \sum_j \alpha_{i,j} ] U_i - let sum_u_i = E::G1Prepared::from( - izip!(ciphertexts.iter(), sum_alpha_i.iter()) - .map(|(c, alpha_j)| c.commitment.mul(*alpha_j)) - .sum::() - .into_affine(), - ); - - // e(\sum_i [ \sum_j \alpha_{i,j} ] U_i, -H) - pairings_a.push(sum_u_i); - pairings_b.push(pub_contexts[0].h_inv.clone()); - - let mut sum_d_i = vec![E::G1::zero(); num_shares]; - - // sum_D_i = { [\sum_i \alpha_{i,j} ] D_i } - for (d, alpha_j) in izip!(decryption_shares.iter(), alpha_ij.iter()) { - for (sum_alpha_d_i, d_ij, alpha) in - izip!(sum_d_i.iter_mut(), d.iter(), alpha_j.iter()) - { - *sum_alpha_d_i += d_ij.decryption_share.mul(*alpha); - } - } - - // e([\sum_i \alpha_{i,j} ] D_i, B_i) - for (d_i, b_i) in izip!(sum_d_i.iter(), blinding_keys.iter()) { - pairings_a.push(E::G1Prepared::from(d_i.into_affine())); - pairings_b.push(b_i.clone()); - } - - E::multi_pairing(pairings_a, pairings_b).0 == E::TargetField::one() -} - -pub fn verify_decryption_shares_fast( - pub_contexts: &[PublicDecryptionContextFast], - ciphertext: &Ciphertext, - decryption_shares: &[DecryptionShareFast], -) -> bool { - // [b_i] H - let blinding_keys = decryption_shares - .iter() - .map(|d| { - pub_contexts[d.decrypter_index] - .blinded_key_share - .blinding_key_prepared - .clone() - }) - .collect::>(); - - let mut pairing_a: Vec = vec![]; - let mut pairing_b = vec![]; - - // e(U, -H) - pairing_a.push(ciphertext.commitment.into()); - pairing_b.push(pub_contexts[0].h_inv.clone()); - - for (d_i, p_i) in zip_eq(decryption_shares, blinding_keys) { - let mut pairing_a_i = pairing_a.clone(); - let mut pairing_b_i = pairing_b.clone(); - // e(D_i, B_i) - pairing_a_i.push(d_i.decryption_share.into()); - pairing_b_i.push(p_i.clone()); - if E::multi_pairing(pairing_a_i, pairing_b_i).0 != E::TargetField::one() - { - return false; - } - } - - true -} - pub fn verify_decryption_shares_simple( pub_contexts: &Vec>, ciphertext: &Ciphertext, @@ -355,26 +244,3 @@ pub fn verify_decryption_shares_simple( } true } - -#[cfg(test)] -mod tests { - use ark_ec::AffineRepr; - use ferveo_common::{FromBytes, ToBytes}; - - use crate::*; - - type E = ark_bls12_381::Bls12_381; - - #[test] - fn decryption_share_serialization() { - let decryption_share = DecryptionShareFast:: { - decrypter_index: 1, - decryption_share: ark_bls12_381::G1Affine::generator(), - }; - - let serialized = decryption_share.to_bytes().unwrap(); - let deserialized: DecryptionShareFast = - DecryptionShareFast::from_bytes(&serialized).unwrap(); - assert_eq!(serialized, deserialized.to_bytes().unwrap()) - } -} diff --git a/ferveo-tdec/src/key_share.rs b/ferveo-tdec/src/key_share.rs index 2daaae56..cd04c356 100644 --- a/ferveo-tdec/src/key_share.rs +++ b/ferveo-tdec/src/key_share.rs @@ -3,42 +3,35 @@ use std::ops::Mul; use ark_ec::{pairing::Pairing, AffineRepr, CurveGroup}; use ark_ff::One; use ark_std::UniformRand; +use ferveo_common::serialization; use rand_core::RngCore; -use zeroize::ZeroizeOnDrop; +use serde::{Deserialize, Serialize}; +use serde_with::serde_as; +use zeroize::{Zeroize, ZeroizeOnDrop}; -#[derive(Debug, Clone)] -pub struct PublicKeyShare { - pub public_key_share: E::G1Affine, // A_{i, \omega_i} -} +#[serde_as] +#[derive(Debug, Copy, Clone, Serialize, Deserialize, PartialEq, Eq)] +pub struct PublicKey( + #[serde_as(as = "serialization::SerdeAs")] pub E::G1Affine, // A_{i, \omega_i} +); #[derive(Debug, Clone)] pub struct BlindedKeyShare { pub blinding_key: E::G2Affine, // [b] H pub blinded_key_share: E::G2Affine, // [b] Z_{i, \omega_i} - pub blinding_key_prepared: E::G2Prepared, -} - -pub fn generate_random( - n: usize, - rng: &mut R, -) -> Vec { - (0..n) - .map(|_| E::ScalarField::rand(rng)) - .collect::>() } impl BlindedKeyShare { pub fn verify_blinding( &self, - public_key_share: &PublicKeyShare, + public_key: &PublicKey, rng: &mut R, ) -> bool { let g = E::G1Affine::generator(); let alpha = E::ScalarField::rand(rng); - let alpha_a = E::G1Prepared::from( - g + public_key_share.public_key_share.mul(alpha).into_affine(), - ); + let alpha_a = + E::G1Prepared::from(g + public_key.0.mul(alpha).into_affine()); // \sum_i(Y_i) let alpha_z = E::G2Prepared::from( @@ -58,18 +51,20 @@ impl BlindedKeyShare { } } -#[derive(Debug, Clone, PartialEq, Eq, ZeroizeOnDrop)] -pub struct PrivateKeyShare { - pub private_key_share: E::G2Affine, -} +#[serde_as] +#[derive( + Clone, Debug, PartialEq, Eq, Serialize, Deserialize, Zeroize, ZeroizeOnDrop, +)] +pub struct PrivateKeyShare( + #[serde_as(as = "serialization::SerdeAs")] pub E::G2Affine, +); impl PrivateKeyShare { pub fn blind(&self, b: E::ScalarField) -> BlindedKeyShare { let blinding_key = E::G2Affine::generator().mul(b).into_affine(); BlindedKeyShare:: { blinding_key, - blinding_key_prepared: E::G2Prepared::from(blinding_key), - blinded_key_share: self.private_key_share.mul(b).into_affine(), + blinded_key_share: self.0.mul(b).into_affine(), } } } diff --git a/ferveo-tdec/src/lib.rs b/ferveo-tdec/src/lib.rs index 297b066c..e41ff704 100644 --- a/ferveo-tdec/src/lib.rs +++ b/ferveo-tdec/src/lib.rs @@ -61,130 +61,25 @@ pub mod test_common { pub use ark_bls12_381::Bls12_381 as EllipticCurve; use ark_ec::{pairing::Pairing, AffineRepr}; pub use ark_ff::UniformRand; - use ark_ff::{Field, One, Zero}; + use ark_ff::{Field, Zero}; use ark_poly::{ univariate::DensePolynomial, DenseUVPolynomial, EvaluationDomain, Polynomial, }; use itertools::izip; - use rand_core::RngCore; use subproductdomain::fast_multiexp; pub use super::*; - pub fn setup_fast( - threshold: usize, - shares_num: usize, - rng: &mut impl RngCore, - ) -> ( - E::G1Affine, - E::G2Affine, - Vec>, - ) { - assert!(shares_num >= threshold); - - // Generators G∈G1, H∈G2 - let g = E::G1Affine::generator(); - let h = E::G2Affine::generator(); - - // The dealer chooses a uniformly random polynomial f of degree t-1 - let threshold_poly = - DensePolynomial::::rand(threshold - 1, rng); - // Domain, or omega Ω - let fft_domain = - ark_poly::GeneralEvaluationDomain::::new( - shares_num, - ) - .unwrap(); - // `evals` are evaluations of the polynomial f over the domain, omega: f(ω_j) for ω_j in Ω - let evals = threshold_poly.evaluate_over_domain_by_ref(fft_domain); - - // A - public key shares of participants - let pubkey_shares = fast_multiexp(&evals.evals, g.into_group()); - let pubkey_share = g.mul(evals.evals[0]); - debug_assert!(pubkey_shares[0] == E::G1Affine::from(pubkey_share)); - - // Y, but only when b = 1 - private key shares of participants - let privkey_shares = fast_multiexp(&evals.evals, h.into_group()); - - // a_0 - let x = threshold_poly.coeffs[0]; - - // F_0 - The commitment to the constant term, and is the public key output Y from PVDKG - let pubkey = g.mul(x); - let privkey = h.mul(x); - - let mut domain_points = Vec::with_capacity(shares_num); - let mut point = E::ScalarField::one(); - let mut domain_points_inv = Vec::with_capacity(shares_num); - let mut point_inv = E::ScalarField::one(); - - for _ in 0..shares_num { - domain_points.push(point); // 1, t, t^2, t^3, ...; where t is a scalar generator fft_domain.group_gen - point *= fft_domain.group_gen(); - domain_points_inv.push(point_inv); - point_inv *= fft_domain.group_gen_inv(); - } - - let mut private_contexts = vec![]; - let mut public_contexts = vec![]; - - // (domain, domain_inv, A, Y) - for (index, (domain, domain_inv, public, private)) in izip!( - domain_points.iter(), - domain_points_inv.iter(), - pubkey_shares.iter(), - privkey_shares.iter() - ) - .enumerate() - { - let private_key_share = PrivateKeyShare:: { - private_key_share: *private, - }; - let b = E::ScalarField::rand(rng); - let mut blinded_key_shares = private_key_share.blind(b); - blinded_key_shares.multiply_by_omega_inv(domain_inv); - private_contexts.push(PrivateDecryptionContextFast:: { - index, - setup_params: SetupParams { - b, - b_inv: b.inverse().unwrap(), - g, - h_inv: E::G2Prepared::from(-h.into_group()), - g_inv: E::G1Prepared::from(-g.into_group()), - h, - }, - private_key_share, - public_decryption_contexts: vec![], - }); - public_contexts.push(PublicDecryptionContextFast:: { - domain: *domain, - public_key_share: PublicKeyShare:: { - public_key_share: *public, - }, - blinded_key_share: blinded_key_shares, - lagrange_n_0: *domain, - h_inv: E::G2Prepared::from(-h.into_group()), - }); - } - for private in private_contexts.iter_mut() { - private.public_decryption_contexts = public_contexts.clone(); - } - - (pubkey.into(), privkey.into(), private_contexts) - } - pub fn setup_simple( - threshold: usize, shares_num: usize, + threshold: usize, rng: &mut impl rand::Rng, ) -> ( - E::G1Affine, - E::G2Affine, + PublicKey, + PrivateKeyShare, Vec>, ) { - assert!(shares_num >= threshold); - let g = E::G1Affine::generator(); let h = E::G2Affine::generator(); @@ -227,9 +122,7 @@ pub mod test_common { izip!(shares_x.iter(), pubkey_shares.iter(), privkey_shares.iter()) .enumerate() { - let private_key_share = PrivateKeyShare:: { - private_key_share: *private, - }; + let private_key_share = PrivateKeyShare::(*private); let b = E::ScalarField::rand(rng); let blinded_key_share = private_key_share.blind(b); private_contexts.push(PrivateDecryptionContextSimple:: { @@ -247,9 +140,7 @@ pub mod test_common { }); public_contexts.push(PublicDecryptionContextSimple:: { domain: *domain, - public_key_share: PublicKeyShare:: { - public_key_share: *public, - }, + public_key: PublicKey::(*public), blinded_key_share, h, validator_public_key: h.mul(b), @@ -259,22 +150,26 @@ pub mod test_common { private.public_decryption_contexts = public_contexts.clone(); } - (pubkey.into(), privkey.into(), private_contexts) + ( + PublicKey(pubkey.into()), + PrivateKeyShare(privkey.into()), + private_contexts, + ) } pub fn setup_precomputed( shares_num: usize, + threshold: usize, rng: &mut impl rand::Rng, ) -> ( - E::G1Affine, - E::G2Affine, + PublicKey, + PrivateKeyShare, Vec>, ) { - // In precomputed variant, the security threshold is equal to the number of shares - setup_simple::(shares_num, shares_num, rng) + setup_simple::(shares_num, threshold, rng) } - pub fn make_shared_secret( + pub fn create_shared_secret_simple( pub_contexts: &[PublicDecryptionContextSimple], decryption_shares: &[DecryptionShareSimple], ) -> SharedSecret { @@ -291,8 +186,12 @@ mod tests { use ark_ec::{pairing::Pairing, AffineRepr, CurveGroup}; use ark_std::{test_rng, UniformRand}; use ferveo_common::{FromBytes, ToBytes}; + use rand::seq::IteratorRandom; - use crate::test_common::{make_shared_secret, setup_simple, *}; + use crate::{ + api::DecryptionSharePrecomputed, + test_common::{create_shared_secret_simple, setup_simple, *}, + }; type E = ark_bls12_381::Bls12_381; type TargetField = ::TargetField; @@ -306,7 +205,7 @@ mod tests { let msg = "my-msg".as_bytes().to_vec(); let aad: &[u8] = "my-aad".as_bytes(); - let (pubkey, _, _) = setup_fast::(threshold, shares_num, rng); + let (pubkey, _, _) = setup_simple::(threshold, shares_num, rng); let ciphertext = encrypt::(SecretBox::new(msg), aad, &pubkey, rng).unwrap(); @@ -353,22 +252,6 @@ mod tests { .is_err()); } - #[test] - fn tdec_fast_variant_share_validation() { - let rng = &mut test_rng(); - let shares_num = 16; - let threshold = shares_num * 2 / 3; - let msg = "my-msg".as_bytes().to_vec(); - let aad: &[u8] = "my-aad".as_bytes(); - - let (pubkey, _, contexts) = setup_fast::(threshold, shares_num, rng); - let ciphertext = - encrypt::(SecretBox::new(msg), aad, &pubkey, rng).unwrap(); - - let bad_aad = "bad aad".as_bytes(); - assert!(contexts[0].create_share(&ciphertext, bad_aad).is_err()); - } - #[test] fn tdec_simple_variant_share_validation() { let rng = &mut test_rng(); @@ -378,7 +261,7 @@ mod tests { let aad: &[u8] = "my-aad".as_bytes(); let (pubkey, _, contexts) = - setup_simple::(threshold, shares_num, rng); + setup_simple::(shares_num, threshold, rng); let ciphertext = encrypt::(SecretBox::new(msg), aad, &pubkey, rng).unwrap(); @@ -388,56 +271,6 @@ mod tests { .is_err()); } - #[test] - fn tdec_fast_variant_e2e() { - let mut rng = &mut test_rng(); - let shares_num = 16; - let threshold = shares_num * 2 / 3; - let msg = "my-msg".as_bytes().to_vec(); - let aad: &[u8] = "my-aad".as_bytes(); - - let (pubkey, _, contexts) = - setup_fast::(threshold, shares_num, &mut rng); - let ciphertext = - encrypt::(SecretBox::new(msg.clone()), aad, &pubkey, rng) - .unwrap(); - let g_inv = &contexts[0].setup_params.g_inv; - - let mut decryption_shares: Vec> = vec![]; - for context in contexts.iter() { - decryption_shares - .push(context.create_share(&ciphertext, aad).unwrap()); - } - - // TODO: Verify and enable this check - /*for pub_context in contexts[0].public_decryption_contexts.iter() { - assert!(pub_context - .blinded_key_shares - .verify_blinding(&pub_context.public_key_shares, rng)); - }*/ - - let prepared_blinded_key_shares = prepare_combine_fast( - &contexts[0].public_decryption_contexts, - &decryption_shares, - ); - - let shared_secret = share_combine_fast( - &contexts[0].public_decryption_contexts, - &ciphertext, - &decryption_shares, - &prepared_blinded_key_shares, - ) - .unwrap(); - - test_ciphertext_validation_fails( - &msg, - aad, - &ciphertext, - &shared_secret, - g_inv, - ); - } - #[test] fn tdec_simple_variant_e2e() { let mut rng = &mut test_rng(); @@ -447,7 +280,7 @@ mod tests { let aad: &[u8] = "my-aad".as_bytes(); let (pubkey, _, contexts) = - setup_simple::(threshold, shares_num, &mut rng); + setup_simple::(shares_num, threshold, &mut rng); let g_inv = &contexts[0].setup_params.g_inv; let ciphertext = @@ -462,10 +295,10 @@ mod tests { }) .take(threshold) .collect(); - let pub_contexts = + let selected_contexts = contexts[0].public_decryption_contexts[..threshold].to_vec(); let shared_secret = - make_shared_secret(&pub_contexts, &decryption_shares); + create_shared_secret_simple(&selected_contexts, &decryption_shares); test_ciphertext_validation_fails( &msg, @@ -476,13 +309,18 @@ mod tests { ); // If we use less than threshold shares, we should fail - let decryption_shares = decryption_shares[..threshold - 1].to_vec(); - let pub_contexts = pub_contexts[..threshold - 1].to_vec(); - let shared_secret = - make_shared_secret(&pub_contexts, &decryption_shares); - - let result = - decrypt_with_shared_secret(&ciphertext, aad, &shared_secret, g_inv); + let not_enough_dec_shares = decryption_shares[..threshold - 1].to_vec(); + let not_enough_contexts = selected_contexts[..threshold - 1].to_vec(); + let bash_shared_secret = create_shared_secret_simple( + ¬_enough_contexts, + ¬_enough_dec_shares, + ); + let result = decrypt_with_shared_secret( + &ciphertext, + aad, + &bash_shared_secret, + g_inv, + ); assert!(result.is_err()); } @@ -490,30 +328,39 @@ mod tests { fn tdec_precomputed_variant_e2e() { let mut rng = &mut test_rng(); let shares_num = 16; + let threshold = shares_num * 2 / 3; let msg = "my-msg".as_bytes().to_vec(); let aad: &[u8] = "my-aad".as_bytes(); let (pubkey, _, contexts) = - setup_precomputed::(shares_num, &mut rng); + setup_precomputed::(shares_num, threshold, &mut rng); let g_inv = &contexts[0].setup_params.g_inv; let ciphertext = encrypt::(SecretBox::new(msg.clone()), aad, &pubkey, rng) .unwrap(); - let decryption_shares: Vec<_> = contexts + let selected_participants = + (0..threshold).choose_multiple(rng, threshold); + let selected_contexts = contexts + .iter() + .filter(|c| selected_participants.contains(&c.index)) + .cloned() + .collect::>(); + + let decryption_shares = selected_contexts .iter() .map(|context| { context .create_share_precomputed( &ciphertext.header().unwrap(), aad, + &selected_participants, ) .unwrap() }) - .collect(); + .collect::>(); let shared_secret = share_combine_precomputed::(&decryption_shares); - test_ciphertext_validation_fails( &msg, aad, @@ -522,19 +369,17 @@ mod tests { g_inv, ); - // Note that in this variant, if we use less than `share_num` shares, we will get a - // decryption error. - - let not_enough_shares = &decryption_shares[0..shares_num - 1]; - let bad_shared_secret = - share_combine_precomputed::(not_enough_shares); - assert!(decrypt_with_shared_secret( + // If we use less than threshold shares, we should fail + let not_enough_dec_shares = decryption_shares[..threshold - 1].to_vec(); + let bash_shared_secret = + share_combine_precomputed(¬_enough_dec_shares); + let result = decrypt_with_shared_secret( &ciphertext, aad, - &bad_shared_secret, + &bash_shared_secret, g_inv, - ) - .is_err()); + ); + assert!(result.is_err()); } #[test] @@ -546,7 +391,7 @@ mod tests { let aad: &[u8] = "my-aad".as_bytes(); let (pubkey, _, contexts) = - setup_simple::(threshold, shares_num, &mut rng); + setup_simple::(shares_num, threshold, &mut rng); let ciphertext = encrypt::(SecretBox::new(msg), aad, &pubkey, rng).unwrap(); diff --git a/ferveo-wasm/Cargo.toml b/ferveo-wasm/Cargo.toml index 591c26a7..520b3251 100644 --- a/ferveo-wasm/Cargo.toml +++ b/ferveo-wasm/Cargo.toml @@ -17,6 +17,6 @@ crate-type = ["cdylib", "rlib"] ferveo = { package = "ferveo-pre-release", path = "../ferveo", features = ["bindings-wasm"] } [dev-dependencies] -wasm-bindgen-test = "0.3.28" -itertools = "0.10.5" +wasm-bindgen-test = { workspace = true } +itertools = { workspace = true } diff --git a/ferveo-wasm/examples/node/src/main.test.ts b/ferveo-wasm/examples/node/src/main.test.ts index 00da665b..ba5361c3 100644 --- a/ferveo-wasm/examples/node/src/main.test.ts +++ b/ferveo-wasm/examples/node/src/main.test.ts @@ -52,17 +52,16 @@ function setupTest( // every validator can aggregate the transcripts const dkg = new Dkg(TAU, sharesNum, threshold, validators, validators[0]); + // Both the server and the client can aggregate the transcripts and verify them const serverAggregate = dkg.aggregateTranscript(messages); expect(serverAggregate.verify(validatorsNum, messages)).toBe(true); - - // Client can also aggregate the transcripts and verify them const clientAggregate = new AggregatedTranscript(messages); expect(clientAggregate.verify(validatorsNum, messages)).toBe(true); // Client creates a ciphertext and requests decryption shares from validators const msg = Buffer.from("my-msg"); const aad = Buffer.from("my-aad"); - const ciphertext = ferveoEncrypt(msg, aad, dkg.publicKey()); + const ciphertext = ferveoEncrypt(msg, aad, clientAggregate.publicKey); return { validatorKeypairs, @@ -81,8 +80,14 @@ describe("ferveo-wasm", () => { const sharesNum = 4; const threshold = sharesNum - 1; [sharesNum, sharesNum + 2].forEach((validatorsNum) => { - const { validatorKeypairs, validators, messages, msg, aad, ciphertext } = - setupTest(sharesNum, validatorsNum, threshold); + const { + validatorKeypairs, + validators, + messages, + msg, + aad, + ciphertext + } = setupTest(sharesNum, validatorsNum, threshold); // Having aggregated the transcripts, the validators can now create decryption shares const decryptionShares: DecryptionShareSimple[] = []; @@ -90,11 +95,11 @@ describe("ferveo-wasm", () => { expect(validator.publicKey.equals(keypair.publicKey)).toBe(true); const dkg = new Dkg(TAU, sharesNum, threshold, validators, validator); - const aggregate = dkg.aggregateTranscript(messages); - const isValid = aggregate.verify(validatorsNum, messages); + const serverAggregate = dkg.aggregateTranscript(messages); + const isValid = serverAggregate.verify(validatorsNum, messages); expect(isValid).toBe(true); - const decryptionShare = aggregate.createDecryptionShareSimple( + const decryptionShare = serverAggregate.createDecryptionShareSimple( dkg, ciphertext.header, aad, @@ -105,49 +110,52 @@ describe("ferveo-wasm", () => { // Now, the decryption share can be used to decrypt the ciphertext // This part is in the client API - const sharedSecret = combineDecryptionSharesSimple(decryptionShares); - - // The client should have access to the public parameters of the DKG - const plaintext = decryptWithSharedSecret(ciphertext, aad, sharedSecret); expect(Buffer.from(plaintext)).toEqual(msg); }); }); it("precomputed tdec variant", () => { - const sharesNum = 4; - const threshold = sharesNum; // threshold is equal to sharesNum in precomputed variant + const sharesNum = 8; + const threshold = sharesNum * 2 / 3; [sharesNum, sharesNum + 2].forEach((validatorsNum) => { - const { validatorKeypairs, validators, messages, msg, aad, ciphertext } = - setupTest(sharesNum, validatorsNum, threshold); + const { + validatorKeypairs, + validators, + messages, + msg, + aad, + ciphertext + } = setupTest(sharesNum, validatorsNum, threshold); + + // In precomputed variant, client selects a subset of validators to create decryption shares + const selectedValidators = validators.slice(0, threshold); + const selectedValidatorKeypairs = validatorKeypairs.slice(0, threshold); // Having aggregated the transcripts, the validators can now create decryption shares const decryptionShares: DecryptionSharePrecomputed[] = []; - zip(validators, validatorKeypairs).forEach(([validator, keypair]) => { + zip(selectedValidators, selectedValidatorKeypairs).forEach(([validator, keypair]) => { expect(validator.publicKey.equals(keypair.publicKey)).toBe(true); const dkg = new Dkg(TAU, sharesNum, threshold, validators, validator); - const aggregate = dkg.aggregateTranscript(messages); - const isValid = aggregate.verify(validatorsNum, messages); + const serverAggregate = dkg.aggregateTranscript(messages); + const isValid = serverAggregate.verify(validatorsNum, messages); expect(isValid).toBe(true); - const decryptionShare = aggregate.createDecryptionSharePrecomputed( + const decryptionShare = serverAggregate.createDecryptionSharePrecomputed( dkg, ciphertext.header, aad, - keypair + keypair, + selectedValidators, ); decryptionShares.push(decryptionShare); }); // Now, the decryption share can be used to decrypt the ciphertext // This part is in the client API - const sharedSecret = combineDecryptionSharesPrecomputed(decryptionShares); - - // The client should have access to the public parameters of the DKG - const plaintext = decryptWithSharedSecret(ciphertext, aad, sharedSecret); expect(Buffer.from(plaintext)).toEqual(msg); }); diff --git a/ferveo-wasm/tests/node.rs b/ferveo-wasm/tests/node.rs index 7b35efa5..cddde011 100644 --- a/ferveo-wasm/tests/node.rs +++ b/ferveo-wasm/tests/node.rs @@ -1,4 +1,4 @@ -//! Test suite for the Nodejs. +//! Test suite for the Node.js. extern crate wasm_bindgen_test; @@ -45,7 +45,6 @@ fn setup_dkg( ) .unwrap(); let transcript = validator_dkg.generate_transcript().unwrap(); - ValidatorMessage::new(sender, &transcript).unwrap() }); @@ -61,6 +60,8 @@ fn setup_dkg( ) .unwrap(); + // We only need `shares_num` messages to aggregate the transcripts + let messages = messages.take(shares_num as usize).collect::>(); let messages_js = into_js_array(messages); // Server can aggregate the transcripts and verify them @@ -80,7 +81,8 @@ fn setup_dkg( // In the meantime, the client creates a ciphertext and decryption request let msg = "my-msg".as_bytes().to_vec(); let aad = "my-aad".as_bytes().to_vec(); - let ciphertext = ferveo_encrypt(&msg, &aad, &dkg.public_key()).unwrap(); + let ciphertext = + ferveo_encrypt(&msg, &aad, &client_aggregate.public_key()).unwrap(); ( validator_keypairs, @@ -124,7 +126,6 @@ fn tdec_simple() { let is_valid = aggregate.verify(validators_num, &messages_js).unwrap(); assert!(is_valid); - aggregate .create_decryption_share_simple( &dkg, @@ -134,16 +135,16 @@ fn tdec_simple() { ) .unwrap() }) + // We only need `security_threshold` decryption shares in simple variant + .take(security_threshold as usize) .collect::>(); - let decryption_shares_js = into_js_array(decryption_shares); - // Now, the decryption share can be used to decrypt the ciphertext - // This part is in the client API + let decryption_shares_js = into_js_array(decryption_shares); + // Now, decryption shares can be used to decrypt the ciphertext + // This part happens in the client API let shared_secret = combine_decryption_shares_simple(&decryption_shares_js).unwrap(); - - // The client should have access to the public parameters of the DKG let plaintext = decrypt_with_shared_secret(&ciphertext, &aad, &shared_secret) .unwrap(); @@ -154,7 +155,7 @@ fn tdec_simple() { #[wasm_bindgen_test] fn tdec_precomputed() { let shares_num = 16; - let security_threshold = shares_num; // Must be equal to shares_num in precomputed variant + let security_threshold = shares_num * 2 / 3; for validators_num in [shares_num, shares_num + 2] { let ( validator_keypairs, @@ -166,6 +167,11 @@ fn tdec_precomputed() { ciphertext, ) = setup_dkg(shares_num, validators_num, security_threshold); + // In precomputed variant, the client selects a subset of validators to create decryption shares + let selected_validators = + validators[..(security_threshold as usize)].to_vec(); + let selected_validators_js = into_js_array(selected_validators); + // Having aggregated the transcripts, the validators can now create decryption shares let decryption_shares = zip_eq(validators, validator_keypairs) .map(|(validator, keypair)| { @@ -177,32 +183,31 @@ fn tdec_precomputed() { &validator, ) .unwrap(); - let aggregate = + let server_aggregate = dkg.aggregate_transcripts(&messages_js).unwrap(); - let is_valid = - aggregate.verify(validators_num, &messages_js).unwrap(); - assert!(is_valid); - - aggregate + assert!(server_aggregate + .verify(validators_num, &messages_js) + .unwrap()); + server_aggregate .create_decryption_share_precomputed( &dkg, &ciphertext.header().unwrap(), &aad, &keypair, + &selected_validators_js, ) .unwrap() }) + // We need `security_threshold` decryption shares to decrypt + .take(security_threshold as usize) .collect::>(); let decryption_shares_js = into_js_array(decryption_shares); - // Now, the decryption share can be used to decrypt the ciphertext - // This part is in the client API - + // Now, decryption shares can be used to decrypt the ciphertext + // This part happens in the client API let shared_secret = combine_decryption_shares_precomputed(&decryption_shares_js) .unwrap(); - - // The client should have access to the public parameters of the DKG let plaintext = decrypt_with_shared_secret(&ciphertext, &aad, &shared_secret) .unwrap(); diff --git a/ferveo/Cargo.toml b/ferveo/Cargo.toml index d2a0d0eb..8a9ac7d6 100644 --- a/ferveo/Cargo.toml +++ b/ferveo/Cargo.toml @@ -14,49 +14,43 @@ authors = ["Heliax AG ", "Piotr Roslaniec (PubliclyVerifiableDkg, Message) { +) -> ( + PubliclyVerifiableDkg, + PubliclyVerifiableSS, +) { let mut transcripts = vec![]; for i in 0..shares_num { - let mut dkg = setup_dkg(i as usize, shares_num); - transcripts.push(dkg.share(rng).expect("Test failed")); + let dkg = setup_dkg(i as usize, shares_num); + transcripts.push(dkg.generate_transcript(rng).expect("Test failed")); } let dkg = setup_dkg(0, shares_num); let transcript = transcripts[0].clone(); @@ -78,20 +81,12 @@ pub fn bench_verify_full(c: &mut Criterion) { let pvss_verify_optimistic = { move || { - if let Message::Deal(ss) = transcript { - black_box(ss.verify_optimistic()); - } else { - panic!("Expected Deal"); - } + black_box(transcript.verify_optimistic()); } }; let pvss_verify_full = { move || { - if let Message::Deal(ss) = transcript { - black_box(ss.verify_full(&dkg)); - } else { - panic!("Expected Deal"); - } + black_box(transcript.verify_full(&dkg).unwrap()); } }; diff --git a/ferveo/examples/bench_primitives_size.rs b/ferveo/examples/bench_primitives_size.rs index d44d394c..755d10e1 100644 --- a/ferveo/examples/bench_primitives_size.rs +++ b/ferveo/examples/bench_primitives_size.rs @@ -91,20 +91,18 @@ fn setup( shares_num: u32, security_threshold: u32, rng: &mut StdRng, -) -> PubliclyVerifiableDkg { +) -> ( + PubliclyVerifiableDkg, + Vec>, +) { let mut transcripts = vec![]; for i in 0..shares_num { - let mut dkg = setup_dkg(i as usize, shares_num, security_threshold); - let message = dkg.share(rng).expect("Test failed"); - let sender = dkg.get_validator(&dkg.me.public_key).unwrap(); - transcripts.push((sender.clone(), message.clone())); - } - - let mut dkg = setup_dkg(0, shares_num, security_threshold); - for (sender, pvss) in transcripts.into_iter() { - dkg.apply_message(&sender, &pvss).expect("Setup failed"); + let dkg = setup_dkg(i as usize, shares_num, security_threshold); + let transcript = dkg.generate_transcript(rng).expect("Test failed"); + transcripts.push(transcript.clone()); } - dkg + let dkg = setup_dkg(0, shares_num, security_threshold); + (dkg, transcripts) } fn main() { @@ -128,9 +126,8 @@ fn main() { for (shares_num, threshold) in configs { println!("shares_num: {shares_num}, threshold: {threshold}"); - let dkg = setup(*shares_num as u32, threshold, rng); - let transcript = &dkg.vss.values().next().unwrap(); - let transcript_bytes = bincode::serialize(&transcript).unwrap(); + let (_, transcripts) = setup(*shares_num as u32, threshold, rng); + let transcript_bytes = bincode::serialize(&transcripts[0]).unwrap(); save_data( *shares_num as usize, diff --git a/ferveo/src/api.rs b/ferveo/src/api.rs index 7328515e..97a566fa 100644 --- a/ferveo/src/api.rs +++ b/ferveo/src/api.rs @@ -1,5 +1,6 @@ -use std::{fmt, io}; +use std::{collections::HashMap, fmt, io}; +use ark_ec::CurveGroup; use ark_poly::{EvaluationDomain, GeneralEvaluationDomain}; use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; use ark_std::UniformRand; @@ -7,39 +8,37 @@ use bincode; use ferveo_common::serialization; pub use ferveo_tdec::api::{ prepare_combine_simple, share_combine_precomputed, share_combine_simple, - Fr, G1Affine, G1Prepared, G2Affine, SecretBox, E, + DecryptionSharePrecomputed, Fr, G1Affine, G1Prepared, G2Affine, SecretBox, + E, }; use generic_array::{ typenum::{Unsigned, U48}, GenericArray, }; -use rand::RngCore; -use serde::{Deserialize, Serialize}; +use rand::{thread_rng, RngCore}; +use serde::{de::DeserializeOwned, Deserialize, Serialize}; use serde_with::serde_as; -pub type PublicKey = ferveo_common::PublicKey; -pub type Keypair = ferveo_common::Keypair; -pub type Validator = crate::Validator; -pub type Transcript = PubliclyVerifiableSS; - -pub type ValidatorMessage = (Validator, Transcript); - #[cfg(feature = "bindings-python")] use crate::bindings_python; #[cfg(feature = "bindings-wasm")] use crate::bindings_wasm; pub use crate::EthereumAddress; use crate::{ - do_verify_aggregation, Error, Message, PVSSMap, PubliclyVerifiableParams, + do_verify_aggregation, Error, PubliclyVerifiableParams, PubliclyVerifiableSS, Result, }; -pub type DecryptionSharePrecomputed = - ferveo_tdec::api::DecryptionSharePrecomputed; +pub type PublicKey = ferveo_common::PublicKey; +pub type Keypair = ferveo_common::Keypair; +pub type Validator = crate::Validator; +pub type Transcript = PubliclyVerifiableSS; +pub type ValidatorMessage = (Validator, Transcript); +pub type DomainPoint = crate::DomainPoint; // Normally, we would use a custom trait for this, but we can't because -// the arkworks will not let us create a blanket implementation for G1Affine -// and Fr types. So instead, we're using this shared utility function: +// the `arkworks` will not let us create a blanket implementation for G1Affine +// and `Fr` types. So instead, we're using this shared utility function: pub fn to_bytes(item: &T) -> Result> { let mut writer = Vec::new(); item.serialize_compressed(&mut writer)?; @@ -55,11 +54,11 @@ pub fn from_bytes(bytes: &[u8]) -> Result { pub fn encrypt( message: SecretBox>, aad: &[u8], - pubkey: &DkgPublicKey, + public_key: &DkgPublicKey, ) -> Result { - let mut rng = rand::thread_rng(); + let mut rng = thread_rng(); let ciphertext = - ferveo_tdec::api::encrypt(message, aad, &pubkey.0, &mut rng)?; + ferveo_tdec::api::encrypt(message, aad, &public_key.0, &mut rng)?; Ok(Ciphertext(ciphertext)) } @@ -68,12 +67,12 @@ pub fn decrypt_with_shared_secret( aad: &[u8], shared_secret: &SharedSecret, ) -> Result> { - let dkg_public_params = DkgPublicParameters::default(); + let g_inv = PubliclyVerifiableParams::::default().g_inv(); ferveo_tdec::api::decrypt_with_shared_secret( &ciphertext.0, aad, &shared_secret.0, - &dkg_public_params.g1_inv, + &g_inv, ) .map_err(Error::from) } @@ -91,7 +90,6 @@ impl Ciphertext { } } -#[serde_as] #[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)] pub struct CiphertextHeader(ferveo_tdec::api::CiphertextHeader); @@ -143,15 +141,19 @@ impl From for FerveoVariant { } } -#[serde_as] #[derive(Copy, Clone, Debug, Eq, PartialEq, Serialize, Deserialize)] pub struct DkgPublicKey( - #[serde_as(as = "serialization::SerdeAs")] pub(crate) G1Affine, + #[serde(bound( + serialize = "ferveo_tdec::PublicKey: Serialize", + deserialize = "ferveo_tdec::PublicKey: DeserializeOwned" + ))] + pub(crate) ferveo_tdec::PublicKey, ); +// TODO: Consider moving these implementation details to ferveo_tdec::PublicKey impl DkgPublicKey { pub fn to_bytes(&self) -> Result> { - let as_bytes = to_bytes(&self.0)?; + let as_bytes = to_bytes(&self.0 .0)?; Ok(GenericArray::::from_slice(&as_bytes).to_owned()) } @@ -164,7 +166,8 @@ impl DkgPublicKey { bytes.len(), ) })?; - from_bytes(&bytes).map(DkgPublicKey) + let pk: G1Affine = from_bytes(&bytes)?; + Ok(DkgPublicKey(ferveo_tdec::PublicKey(pk))) } pub fn serialized_size() -> usize { @@ -174,9 +177,9 @@ impl DkgPublicKey { /// Generate a random DKG public key. /// Use this for testing only. pub fn random() -> Self { - let mut rng = rand::thread_rng(); + let mut rng = thread_rng(); let g1 = G1Affine::rand(&mut rng); - Self(g1) + Self(ferveo_tdec::PublicKey(g1)) } } @@ -217,73 +220,43 @@ impl Dkg { Ok(Self(dkg)) } - pub fn public_key(&self) -> DkgPublicKey { - DkgPublicKey(self.0.public_key()) - } - pub fn generate_transcript( &mut self, rng: &mut R, - // TODO: Replace with Message::Deal? ) -> Result { - match self.0.share(rng) { - Ok(Message::Deal(transcript)) => Ok(transcript), - Err(e) => Err(e), - _ => Err(Error::InvalidDkgStateToDeal), - } + self.0.generate_transcript(rng) } pub fn aggregate_transcripts( - &mut self, + &self, messages: &[ValidatorMessage], - // TODO: Replace with Message::Aggregate? ) -> Result { - // We must use `deal` here instead of to produce AggregatedTranscript instead of simply - // creating an AggregatedTranscript from the messages, because `deal` also updates the - // internal state of the DKG. - // If we didn't do that, that would cause the DKG to produce incorrect decryption shares - // in the future. - // TODO: Remove this dependency on DKG state - // TODO: Avoid mutating current state here - for (validator, transcript) in messages { - self.0.deal(validator, transcript)?; - } - let pvss = messages - .iter() - .map(|(_, t)| t) - .cloned() - .collect::>>(); - Ok(AggregatedTranscript(crate::pvss::aggregate(&pvss)?)) + self.0 + .aggregate_transcripts(messages) + .map(AggregatedTranscript) } - pub fn public_params(&self) -> DkgPublicParameters { - DkgPublicParameters { - g1_inv: self.0.pvss_params.g_inv(), - } + pub fn me(&self) -> &Validator { + &self.0.me } -} -fn make_pvss_map(messages: &[ValidatorMessage]) -> PVSSMap { - let mut pvss_map: PVSSMap = PVSSMap::new(); - messages.iter().for_each(|(validator, transcript)| { - pvss_map.insert(validator.address.clone(), transcript.clone()); - }); - pvss_map + pub fn domain_points(&self) -> Vec { + self.0.domain_points() + } } #[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] -pub struct AggregatedTranscript( - pub(crate) PubliclyVerifiableSS, -); +pub struct AggregatedTranscript(crate::AggregatedTranscript); impl AggregatedTranscript { pub fn new(messages: &[ValidatorMessage]) -> Result { - let pvss_list = messages + let transcripts: Vec<_> = messages .iter() - .map(|(_, t)| t) - .cloned() - .collect::>>(); - Ok(AggregatedTranscript(crate::pvss::aggregate(&pvss_list)?)) + .map(|(_, transcript)| transcript.clone()) + .collect(); + let aggregated_transcript = + crate::AggregatedTranscript::::from_transcripts(&transcripts)?; + Ok(AggregatedTranscript(aggregated_transcript)) } pub fn verify( @@ -298,61 +271,64 @@ impl AggregatedTranscript { )); } - let pvss_params = PubliclyVerifiableParams::::default(); let domain = GeneralEvaluationDomain::::new(validators_num as usize) .expect("Unable to construct an evaluation domain"); - - let is_valid_optimistic = self.0.verify_optimistic(); + let is_valid_optimistic = self.0.aggregate.verify_optimistic(); if !is_valid_optimistic { return Err(Error::InvalidTranscriptAggregate); } - let pvss_map = make_pvss_map(messages); + let pvss_params = PubliclyVerifiableParams::::default(); let validators: Vec<_> = messages .iter() .map(|(validator, _)| validator) .cloned() .collect(); - + let pvss_list = messages + .iter() + .map(|(_validator, transcript)| transcript) + .cloned() + .collect::>(); // This check also includes `verify_full`. See impl. for details. - let is_valid = do_verify_aggregation( - &self.0.coeffs, - &self.0.shares, + do_verify_aggregation( + &self.0.aggregate.coeffs, + &self.0.aggregate.shares, &pvss_params, &validators, &domain, - &pvss_map, - )?; - Ok(is_valid) + &pvss_list, + ) } + // TODO: Consider deprecating in favor of PrivateKeyShare::create_decryption_share_simple pub fn create_decryption_share_precomputed( &self, dkg: &Dkg, ciphertext_header: &CiphertextHeader, aad: &[u8], validator_keypair: &Keypair, + selected_validators: &[Validator], ) -> Result { - // Prevent users from using the precomputed variant with improper DKG parameters - if dkg.0.dkg_params.shares_num() - != dkg.0.dkg_params.security_threshold() - { - return Err(Error::InvalidDkgParametersForPrecomputedVariant( - dkg.0.dkg_params.shares_num(), - dkg.0.dkg_params.security_threshold(), - )); - } - self.0.make_decryption_share_simple_precomputed( + let selected_domain_points = selected_validators + .iter() + .filter_map(|v| { + dkg.0 + .get_domain_point(v.share_index) + .ok() + .map(|domain_point| (v.share_index, domain_point)) + }) + .collect::>>(); + self.0.aggregate.create_decryption_share_precomputed( &ciphertext_header.0, aad, - &validator_keypair.decryption_key, - dkg.0.me.share_index as usize, - &dkg.0.domain_points(), - &dkg.0.pvss_params.g_inv(), + validator_keypair, + dkg.0.me.share_index, + &selected_domain_points, ) } + // TODO: Consider deprecating in favor of PrivateKeyShare::create_decryption_share_simple pub fn create_decryption_share_simple( &self, dkg: &Dkg, @@ -360,12 +336,11 @@ impl AggregatedTranscript { aad: &[u8], validator_keypair: &Keypair, ) -> Result { - let share = self.0.make_decryption_share_simple( + let share = self.0.aggregate.create_decryption_share_simple( &ciphertext_header.0, aad, - &validator_keypair.decryption_key, - dkg.0.me.share_index as usize, - &dkg.0.pvss_params.g_inv(), + validator_keypair, + dkg.0.me.share_index, )?; let domain_point = dkg.0.get_domain_point(dkg.0.me.share_index)?; Ok(DecryptionShareSimple { @@ -373,6 +348,22 @@ impl AggregatedTranscript { domain_point, }) } + + pub fn get_private_key_share( + &self, + validator_keypair: &Keypair, + share_index: u32, + ) -> Result { + Ok(PrivateKeyShare( + self.0 + .aggregate + .decrypt_private_key_share(validator_keypair, share_index)?, + )) + } + + pub fn public_key(&self) -> DkgPublicKey { + DkgPublicKey(self.0.public_key) + } } #[serde_as] @@ -380,56 +371,223 @@ impl AggregatedTranscript { pub struct DecryptionShareSimple { share: ferveo_tdec::api::DecryptionShareSimple, #[serde_as(as = "serialization::SerdeAs")] - domain_point: Fr, + domain_point: DomainPoint, } -#[serde_as] -#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] -pub struct DkgPublicParameters { - #[serde_as(as = "serialization::SerdeAs")] - pub(crate) g1_inv: G1Prepared, +pub fn combine_shares_simple(shares: &[DecryptionShareSimple]) -> SharedSecret { + let domain_points: Vec<_> = shares.iter().map(|s| s.domain_point).collect(); + let lagrange_coefficients = prepare_combine_simple::(&domain_points); + + let shares: Vec<_> = shares.iter().cloned().map(|s| s.share).collect(); + let shared_secret = + share_combine_simple(&shares, &lagrange_coefficients[..]); + SharedSecret(shared_secret) } -impl Default for DkgPublicParameters { - fn default() -> Self { - DkgPublicParameters { - g1_inv: PubliclyVerifiableParams::::default().g_inv(), - } +#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] +pub struct SharedSecret(pub ferveo_tdec::api::SharedSecret); + +#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] +// TODO: Use refresh::ShareRecoveryUpdate instead of ferveo_tdec::PrivateKeyShare +pub struct ShareRecoveryUpdate(pub ferveo_tdec::PrivateKeyShare); + +impl ShareRecoveryUpdate { + // TODO: There are two recovery scenarios: at random and at a specific point. Do we ever want + // to recover at a specific point? What scenario would that be? Validator rotation? + pub fn create_share_updates( + // TODO: Decouple from Dkg? We don't need any specific Dkg instance here, just some params etc + dkg: &Dkg, + x_r: &DomainPoint, + ) -> Result> { + let rng = &mut thread_rng(); + let update_map = + crate::refresh::ShareRecoveryUpdate::create_share_updates( + &dkg.0.domain_point_map(), + &dkg.0.pvss_params.h.into_affine(), + x_r, + dkg.0.dkg_params.security_threshold(), + rng, + ) + .into_iter() + .map(|(share_index, share_update)| { + (share_index, ShareRecoveryUpdate(share_update.0.clone())) + }) + .collect(); + Ok(update_map) + } + + pub fn to_bytes(&self) -> Result> { + bincode::serialize(self).map_err(|e| e.into()) } -} -impl DkgPublicParameters { pub fn from_bytes(bytes: &[u8]) -> Result { bincode::deserialize(bytes).map_err(|e| e.into()) } +} + +#[serde_as] +#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] +pub struct ShareRefreshUpdate(pub crate::ShareRefreshUpdate); + +impl ShareRefreshUpdate { + pub fn create_share_updates( + dkg: &Dkg, + ) -> Result> { + let rng = &mut thread_rng(); + let updates = crate::refresh::ShareRefreshUpdate::create_share_updates( + &dkg.0.domain_point_map(), + &dkg.0.pvss_params.h.into_affine(), + dkg.0.dkg_params.security_threshold(), + rng, + ) + .into_iter() + .map(|(share_index, share_update)| { + (share_index, ShareRefreshUpdate(share_update)) + }) + .collect::>(); + Ok(updates) + } pub fn to_bytes(&self) -> Result> { bincode::serialize(self).map_err(|e| e.into()) } + + pub fn from_bytes(bytes: &[u8]) -> Result { + bincode::deserialize(bytes).map_err(|e| e.into()) + } } -pub fn combine_shares_simple(shares: &[DecryptionShareSimple]) -> SharedSecret { - // Pick domain points that are corresponding to the shares we have. - let domain_points: Vec<_> = shares.iter().map(|s| s.domain_point).collect(); - let lagrange_coefficients = prepare_combine_simple::(&domain_points); +#[serde_as] +#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] +pub struct UpdatedPrivateKeyShare(pub crate::UpdatedPrivateKeyShare); - let shares: Vec<_> = shares.iter().cloned().map(|s| s.share).collect(); - let shared_secret = - share_combine_simple(&shares, &lagrange_coefficients[..]); - SharedSecret(shared_secret) +impl UpdatedPrivateKeyShare { + pub fn into_private_key_share(self) -> PrivateKeyShare { + PrivateKeyShare(self.0.inner()) + } + pub fn to_bytes(&self) -> Result> { + bincode::serialize(self).map_err(|e| e.into()) + } + + pub fn from_bytes(bytes: &[u8]) -> Result { + bincode::deserialize(bytes).map_err(|e| e.into()) + } } #[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] -pub struct SharedSecret(pub ferveo_tdec::api::SharedSecret); +pub struct PrivateKeyShare(pub crate::PrivateKeyShare); + +impl PrivateKeyShare { + pub fn create_updated_private_key_share_for_recovery( + &self, + share_updates: &[ShareRecoveryUpdate], + ) -> Result { + let share_updates: Vec<_> = share_updates + .iter() + .cloned() + .map(|update| crate::refresh::ShareRecoveryUpdate(update.0)) + .collect(); + // TODO: Remove this wrapping after figuring out serde_as + let updated_key_share = self.0.create_updated_key_share(&share_updates); + Ok(UpdatedPrivateKeyShare(updated_key_share)) + } + + pub fn create_updated_private_key_share_for_refresh( + &self, + share_updates: &[ShareRefreshUpdate], + ) -> Result { + let share_updates: Vec<_> = share_updates + .iter() + .cloned() + .map(|update| update.0) + .collect(); + let updated_key_share = self.0.create_updated_key_share(&share_updates); + Ok(UpdatedPrivateKeyShare(updated_key_share)) + } + + /// Recover a private key share from updated private key shares + pub fn recover_share_from_updated_private_shares( + x_r: &DomainPoint, + domain_points: &HashMap, + updated_shares: &HashMap, + ) -> Result { + let updated_shares = updated_shares + .iter() + .map(|(k, v)| (*k, v.0.clone())) + .collect::>(); + let share = + crate::PrivateKeyShare::recover_share_from_updated_private_shares( + x_r, + domain_points, + &updated_shares, + )?; + Ok(PrivateKeyShare(share)) + } + + /// Make a decryption share (simple variant) for a given ciphertext + pub fn create_decryption_share_simple( + &self, + dkg: &Dkg, + ciphertext_header: &CiphertextHeader, + validator_keypair: &Keypair, + aad: &[u8], + ) -> Result { + let share = self.0.create_decryption_share_simple( + &ciphertext_header.0, + aad, + validator_keypair, + )?; + let domain_point = dkg.0.get_domain_point(dkg.0.me.share_index)?; + Ok(DecryptionShareSimple { + share, + domain_point, + }) + } + + /// Make a decryption share (precomputed variant) for a given ciphertext + pub fn create_decryption_share_precomputed( + &self, + ciphertext_header: &CiphertextHeader, + aad: &[u8], + validator_keypair: &Keypair, + share_index: u32, + domain_points: &HashMap, + ) -> Result { + self.0.create_decryption_share_precomputed( + &ciphertext_header.0, + aad, + validator_keypair, + share_index, + domain_points, + ) + } + + pub fn to_bytes(&self) -> Result> { + bincode::serialize(self).map_err(|e| e.into()) + } + + pub fn from_bytes(bytes: &[u8]) -> Result { + bincode::deserialize(bytes).map_err(|e| e.into()) + } +} #[cfg(test)] mod test_ferveo_api { + use std::collections::HashMap; + + use ark_std::iterable::Iterable; use ferveo_tdec::SecretBox; - use itertools::izip; - use rand::{prelude::StdRng, SeedableRng}; + use itertools::{izip, Itertools}; + use rand::{ + prelude::{SliceRandom, StdRng}, + SeedableRng, + }; use test_case::test_case; - use crate::{api::*, test_common::*}; + use crate::{ + api::*, + test_common::{gen_address, gen_keypairs, AAD, MSG, TAU}, + }; type TestInputs = (Vec, Vec, Vec); @@ -452,11 +610,11 @@ mod test_ferveo_api { .collect::>(); // Each validator holds their own DKG instance and generates a transcript every - // every validator, including themselves - let messages: Vec<_> = validators + // validator, including themselves + let mut messages: Vec<_> = validators .iter() .map(|sender| { - let mut dkg = Dkg::new( + let dkg = Dkg::new( tau, shares_num, security_threshold, @@ -464,10 +622,10 @@ mod test_ferveo_api { sender, ) .unwrap(); - (sender.clone(), dkg.generate_transcript(rng).unwrap()) + (sender.clone(), dkg.0.generate_transcript(rng).unwrap()) }) .collect(); - + messages.shuffle(rng); (messages, validators, validator_keypairs) } @@ -484,11 +642,8 @@ mod test_ferveo_api { #[test_case(7, 7; "number of shares (validators) is not a power of 2")] #[test_case(4, 6; "number of validators greater than the number of shares")] fn test_server_api_tdec_precomputed(shares_num: u32, validators_num: u32) { + let security_threshold = shares_num * 2 / 3; let rng = &mut StdRng::seed_from_u64(0); - - // In precomputed variant, the security threshold is equal to the number of shares - let security_threshold = shares_num; - let (messages, validators, validator_keypairs) = make_test_inputs( rng, TAU, @@ -496,30 +651,42 @@ mod test_ferveo_api { shares_num, validators_num, ); + // We only need `shares_num` transcripts to aggregate + let messages = &messages[..shares_num as usize]; - // Now that every validator holds a dkg instance and a transcript for every other validator, - // every validator can aggregate the transcripts + // Every validator can aggregate the transcripts let me = validators[0].clone(); - let mut dkg = + let dkg = Dkg::new(TAU, shares_num, security_threshold, &validators, &me) .unwrap(); - - let pvss_aggregated = dkg.aggregate_transcripts(&messages).unwrap(); - assert!(pvss_aggregated.verify(validators_num, &messages).unwrap()); + let local_aggregate = dkg.aggregate_transcripts(messages).unwrap(); + assert!(local_aggregate.verify(validators_num, messages).unwrap()); // At this point, any given validator should be able to provide a DKG public key - let dkg_public_key = dkg.public_key(); + let dkg_public_key = local_aggregate.public_key(); // In the meantime, the client creates a ciphertext and decryption request let ciphertext = encrypt(SecretBox::new(MSG.to_vec()), AAD, &dkg_public_key) .unwrap(); + // In precomputed variant, client selects a specific subset of validators to create + // decryption shares + let selected_validators: Vec<_> = validators + .choose_multiple(rng, security_threshold as usize) + .cloned() + .collect(); + // Having aggregated the transcripts, the validators can now create decryption shares - let decryption_shares: Vec<_> = izip!(&validators, &validator_keypairs) - .map(|(validator, validator_keypair)| { + let mut decryption_shares = selected_validators + .iter() + .map(|validator| { + let validator_keypair = validator_keypairs + .iter() + .find(|kp| kp.public_key() == validator.public_key) + .unwrap(); // Each validator holds their own instance of DKG and creates their own aggregate - let mut dkg = Dkg::new( + let dkg = Dkg::new( TAU, shares_num, security_threshold, @@ -527,26 +694,30 @@ mod test_ferveo_api { validator, ) .unwrap(); - let aggregate = dkg.aggregate_transcripts(&messages).unwrap(); - assert!(pvss_aggregated - .verify(validators_num, &messages) + let server_aggregate = + dkg.aggregate_transcripts(messages).unwrap(); + assert!(server_aggregate + .verify(validators_num, messages) .unwrap()); // And then each validator creates their own decryption share - aggregate + server_aggregate .create_decryption_share_precomputed( &dkg, &ciphertext.header().unwrap(), AAD, validator_keypair, + &selected_validators, ) .unwrap() }) - .collect(); + // We only need `security_threshold` shares to be able to decrypt + .take(security_threshold as usize) + .collect::>(); + decryption_shares.shuffle(rng); // Now, the decryption share can be used to decrypt the ciphertext // This part is part of the client API - let shared_secret = share_combine_precomputed(&decryption_shares); let plaintext = decrypt_with_shared_secret( &ciphertext, @@ -556,11 +727,13 @@ mod test_ferveo_api { .unwrap(); assert_eq!(plaintext, MSG); - // Since we're using a precomputed variant, we need all the shares to be able to decrypt + // We need `security_threshold` shares to be able to decrypt // So if we remove one share, we should not be able to decrypt - let decryption_shares = - decryption_shares[..shares_num as usize - 1].to_vec(); - + let decryption_shares = decryption_shares + .iter() + .take(security_threshold as usize - 1) + .cloned() + .collect::>(); let shared_secret = share_combine_precomputed(&decryption_shares); let result = decrypt_with_shared_secret( &ciphertext, @@ -575,9 +748,7 @@ mod test_ferveo_api { #[test_case(4, 6; "number of validators greater than the number of shares")] fn test_server_api_tdec_simple(shares_num: u32, validators_num: u32) { let rng = &mut StdRng::seed_from_u64(0); - let security_threshold = shares_num / 2 + 1; - let (messages, validators, validator_keypairs) = make_test_inputs( rng, TAU, @@ -585,57 +756,55 @@ mod test_ferveo_api { shares_num, validators_num, ); + // We only need `shares_num` transcripts to aggregate + let messages = &messages[..shares_num as usize]; // Now that every validator holds a dkg instance and a transcript for every other validator, // every validator can aggregate the transcripts - let mut dkg = Dkg::new( - TAU, - shares_num, - security_threshold, - &validators, - &validators[0], - ) - .unwrap(); - - let pvss_aggregated = dkg.aggregate_transcripts(&messages).unwrap(); - assert!(pvss_aggregated.verify(validators_num, &messages).unwrap()); + let local_aggregate = AggregatedTranscript::new(messages).unwrap(); + assert!(local_aggregate.verify(validators_num, messages).unwrap()); // At this point, any given validator should be able to provide a DKG public key - let public_key = dkg.public_key(); + let public_key = local_aggregate.public_key(); // In the meantime, the client creates a ciphertext and decryption request let ciphertext = encrypt(SecretBox::new(MSG.to_vec()), AAD, &public_key).unwrap(); // Having aggregated the transcripts, the validators can now create decryption shares - let decryption_shares: Vec<_> = izip!(&validators, &validator_keypairs) - .map(|(validator, validator_keypair)| { - // Each validator holds their own instance of DKG and creates their own aggregate - let mut dkg = Dkg::new( - TAU, - shares_num, - security_threshold, - &validators, - validator, - ) - .unwrap(); - let aggregate = dkg.aggregate_transcripts(&messages).unwrap(); - assert!(aggregate.verify(validators_num, &messages).unwrap()); - aggregate - .create_decryption_share_simple( - &dkg, - &ciphertext.header().unwrap(), - AAD, - validator_keypair, + let mut decryption_shares: Vec<_> = + izip!(&validators, &validator_keypairs) + .map(|(validator, validator_keypair)| { + // Each validator holds their own instance of DKG and creates their own aggregate + let dkg = Dkg::new( + TAU, + shares_num, + security_threshold, + &validators, + validator, ) - .unwrap() - }) - .collect(); + .unwrap(); + let server_aggregate = + dkg.aggregate_transcripts(messages).unwrap(); + assert!(server_aggregate + .verify(validators_num, messages) + .unwrap()); + server_aggregate + .create_decryption_share_simple( + &dkg, + &ciphertext.header().unwrap(), + AAD, + validator_keypair, + ) + .unwrap() + }) + // We only need `security_threshold` shares to be able to decrypt + .take(security_threshold as usize) + .collect(); + decryption_shares.shuffle(rng); // Now, the decryption share can be used to decrypt the ciphertext // This part is part of the client API - - // In simple variant, we only need `security_threshold` shares to be able to decrypt let decryption_shares = decryption_shares[..security_threshold as usize].to_vec(); @@ -645,8 +814,8 @@ mod test_ferveo_api { .unwrap(); assert_eq!(plaintext, MSG); - // Let's say that we've only received `security_threshold - 1` shares - // In this case, we should not be able to decrypt + // We need `security_threshold` shares to be able to decrypt + // So if we remove one share, we should not be able to decrypt let decryption_shares = decryption_shares[..security_threshold as usize - 1].to_vec(); @@ -656,9 +825,9 @@ mod test_ferveo_api { assert!(result.is_err()); } - // Note that the server and client code are using the same underlying - // implementation for aggregation and aggregate verification. - // Here, we focus on testing user-facing APIs for server and client users. + /// Note that the server and client code are using the same underlying + /// implementation for aggregation and aggregate verification. + /// Here, we focus on testing user-facing APIs for server and client users. #[test_case(4, 4; "number of shares (validators) is a power of 2")] #[test_case(7, 7; "number of shares (validators) is not a power of 2")] @@ -666,7 +835,6 @@ mod test_ferveo_api { fn server_side_local_verification(shares_num: u32, validators_num: u32) { let rng = &mut StdRng::seed_from_u64(0); let security_threshold = shares_num / 2 + 1; - let (messages, validators, _) = make_test_inputs( rng, TAU, @@ -674,16 +842,17 @@ mod test_ferveo_api { shares_num, validators_num, ); + // We only need `shares_num` transcripts to aggregate + let messages = &messages[..shares_num as usize]; // Now that every validator holds a dkg instance and a transcript for every other validator, // every validator can aggregate the transcripts let me = validators[0].clone(); - let mut dkg = + let dkg = Dkg::new(TAU, shares_num, security_threshold, &validators, &me) .unwrap(); - - let good_aggregate = dkg.aggregate_transcripts(&messages).unwrap(); - assert!(good_aggregate.verify(validators_num, &messages).is_ok()); + let good_aggregate = dkg.aggregate_transcripts(messages).unwrap(); + assert!(good_aggregate.verify(validators_num, messages).is_ok()); // Test negative cases @@ -692,12 +861,12 @@ mod test_ferveo_api { // Should fail if the number of validators is less than the number of messages assert!(matches!( - good_aggregate.verify(messages.len() as u32 - 1, &messages), + good_aggregate.verify(messages.len() as u32 - 1, messages), Err(Error::InvalidAggregateVerificationParameters(_, _)) )); // Should fail if no transcripts are provided - let mut dkg = + let dkg = Dkg::new(TAU, shares_num, security_threshold, &validators, &me) .unwrap(); assert!(matches!( @@ -706,7 +875,7 @@ mod test_ferveo_api { )); // Not enough transcripts - let mut dkg = + let dkg = Dkg::new(TAU, shares_num, security_threshold, &validators, &me) .unwrap(); let not_enough_messages = &messages[..security_threshold as usize - 1]; @@ -714,26 +883,58 @@ mod test_ferveo_api { let insufficient_aggregate = dkg.aggregate_transcripts(not_enough_messages).unwrap(); assert!(matches!( - insufficient_aggregate.verify(validators_num, &messages), + insufficient_aggregate.verify(validators_num, messages), Err(Error::InvalidTranscriptAggregate) )); + // Duplicated transcripts + let messages_with_duplicated_transcript = [ + ( + validators[security_threshold as usize - 1].clone(), + messages[security_threshold as usize - 1].1.clone(), + ), + ( + validators[security_threshold as usize - 1].clone(), + messages[security_threshold as usize - 2].1.clone(), + ), + ]; + assert!(dkg + .aggregate_transcripts(&messages_with_duplicated_transcript) + .is_err()); + + let messages_with_duplicated_transcript = [ + ( + validators[security_threshold as usize - 1].clone(), + messages[security_threshold as usize - 1].1.clone(), + ), + ( + validators[security_threshold as usize - 2].clone(), + messages[security_threshold as usize - 1].1.clone(), + ), + ]; + assert!(dkg + .aggregate_transcripts(&messages_with_duplicated_transcript) + .is_err()); + // Unexpected transcripts in the aggregate or transcripts from a different ritual // Using same DKG parameters, but different DKG instances and validators let mut dkg = Dkg::new(TAU, shares_num, security_threshold, &validators, &me) .unwrap(); - let (bad_messages, _, _) = make_test_inputs( - rng, - TAU, - security_threshold, - shares_num, - validators_num, + let bad_message = ( + // Reusing a good validator, but giving them a bad transcript + messages[security_threshold as usize - 1].0.clone(), + dkg.generate_transcript(rng).unwrap(), ); - let mixed_messages = [&messages[..2], &bad_messages[..1]].concat(); + let mixed_messages = [ + &messages[..(security_threshold - 1) as usize], + &[bad_message], + ] + .concat(); + assert_eq!(mixed_messages.len(), security_threshold as usize); let bad_aggregate = dkg.aggregate_transcripts(&mixed_messages).unwrap(); assert!(matches!( - bad_aggregate.verify(validators_num, &messages), + bad_aggregate.verify(validators_num, messages), Err(Error::InvalidTranscriptAggregate) )); } @@ -744,7 +945,6 @@ mod test_ferveo_api { fn client_side_local_verification(shares_num: u32, validators_num: u32) { let rng = &mut StdRng::seed_from_u64(0); let security_threshold = shares_num / 2 + 1; - let (messages, _, _) = make_test_inputs( rng, TAU, @@ -753,8 +953,8 @@ mod test_ferveo_api { validators_num, ); - // We only need `security_threshold` transcripts to aggregate - let messages = &messages[..security_threshold as usize]; + // We only need `shares_num` transcripts to aggregate + let messages = &messages[..shares_num as usize]; // Create an aggregated transcript on the client side let good_aggregate = AggregatedTranscript::new(messages).unwrap(); @@ -808,4 +1008,365 @@ mod test_ferveo_api { Err(Error::InvalidTranscriptAggregate) )); } + + fn make_share_update_test_inputs( + shares_num: u32, + validators_num: u32, + rng: &mut StdRng, + security_threshold: u32, + ) -> ( + Vec, + Vec, + Vec, + Vec, + CiphertextHeader, + SharedSecret, + ) { + let (messages, validators, validator_keypairs) = make_test_inputs( + rng, + TAU, + security_threshold, + shares_num, + validators_num, + ); + let dkgs = validators + .iter() + .map(|validator| { + Dkg::new( + TAU, + shares_num, + security_threshold, + &validators, + validator, + ) + .unwrap() + }) + .collect::>(); + + // Creating a copy to avoiding accidentally changing DKG state + let dkg = dkgs[0].clone(); + let server_aggregate = dkg.aggregate_transcripts(&messages).unwrap(); + assert!(server_aggregate.verify(validators_num, &messages).unwrap()); + + // Create an initial shared secret for testing purposes + let public_key = server_aggregate.public_key(); + let ciphertext = + encrypt(SecretBox::new(MSG.to_vec()), AAD, &public_key).unwrap(); + let ciphertext_header = ciphertext.header().unwrap(); + let transcripts = messages + .iter() + .map(|(_, transcript)| transcript) + .cloned() + .collect::>(); + let (_, _, old_shared_secret) = + crate::test_dkg_full::create_shared_secret_simple_tdec( + &dkg.0, + AAD, + &ciphertext_header.0, + validator_keypairs.as_slice(), + &transcripts, + ); + ( + messages, + validators, + validator_keypairs, + dkgs, + ciphertext_header, + SharedSecret(old_shared_secret), + ) + } + + #[test_case(4, 4, true; "number of shares (validators) is a power of 2")] + #[test_case(7, 7, true; "number of shares (validators) is not a power of 2")] + #[test_case(4, 6, true; "number of validators greater than the number of shares")] + #[test_case(4, 6, false; "recovery at a specific point")] + fn test_dkg_simple_tdec_share_recovery( + shares_num: u32, + validators_num: u32, + recover_at_random_point: bool, + ) { + let rng = &mut StdRng::seed_from_u64(0); + let security_threshold = shares_num / 2 + 1; + let ( + mut messages, + mut validators, + mut validator_keypairs, + mut dkgs, + ciphertext_header, + old_shared_secret, + ) = make_share_update_test_inputs( + shares_num, + validators_num, + rng, + security_threshold, + ); + + // We assume that all participants have the same aggregate, and that participants created + // their own aggregates before the off-boarding of the validator + // If we didn't create this aggregate here, we risk having a "dangling validator message" + // later when we off-board the validator + let aggregated_transcript = + dkgs[0].clone().aggregate_transcripts(&messages).unwrap(); + assert!(aggregated_transcript + .verify(validators_num, &messages) + .unwrap()); + + // We need to save this domain point to be user in the recovery testing scenario + let mut domain_points = dkgs[0].0.domain_point_map(); + let removed_domain_point = domain_points + .remove(&validators.last().unwrap().share_index) + .unwrap(); + + // Remove one participant from the contexts and all nested structure + // to simulate off-boarding a validator + messages.pop().unwrap(); + dkgs.pop(); + validator_keypairs.pop().unwrap(); + let removed_validator = validators.pop().unwrap(); + + // Now, we're going to recover a new share at a random point or at a specific point + // and check that the shared secret is still the same. + let x_r = if recover_at_random_point { + // Onboarding a validator with a completely new private key share + DomainPoint::rand(rng) + } else { + // Onboarding a validator with a private key share recovered from the removed validator + removed_domain_point + }; + + // Each participant prepares an update for each other participant + let share_updates = dkgs + .iter() + .map(|validator_dkg| { + let share_update = ShareRecoveryUpdate::create_share_updates( + validator_dkg, + &x_r, + ) + .unwrap(); + (validator_dkg.me().address.clone(), share_update) + }) + .collect::>(); + + // Participants share updates and update their shares + + // Now, every participant separately: + let updated_shares: HashMap = dkgs + .iter() + .map(|validator_dkg| { + // Current participant receives updates from other participants + let updates_for_participant: Vec<_> = share_updates + .values() + .map(|updates| { + updates.get(&validator_dkg.me().share_index).unwrap() + }) + .cloned() + .collect(); + + // Each validator uses their decryption key to update their share + let validator_keypair = validator_keypairs + .get(validator_dkg.me().share_index as usize) + .unwrap(); + + // And creates updated private key shares + let updated_key_share = aggregated_transcript + .get_private_key_share( + validator_keypair, + validator_dkg.me().share_index, + ) + .unwrap() + .create_updated_private_key_share_for_recovery( + &updates_for_participant, + ) + .unwrap(); + (validator_dkg.me().share_index, updated_key_share) + }) + .collect(); + + // Now, we have to combine new share fragments into a new share + let recovered_key_share = + PrivateKeyShare::recover_share_from_updated_private_shares( + &x_r, + &domain_points, + &updated_shares, + ) + .unwrap(); + + // Get decryption shares from remaining participants + let mut decryption_shares: Vec = + validator_keypairs + .iter() + .zip_eq(dkgs.iter()) + .map(|(validator_keypair, validator_dkg)| { + aggregated_transcript + .create_decryption_share_simple( + validator_dkg, + &ciphertext_header, + AAD, + validator_keypair, + ) + .unwrap() + }) + .collect(); + decryption_shares.shuffle(rng); + + // In order to test the recovery, we need to create a new decryption share from the recovered + // private key share. To do that, we need a new validator + + // Let's create and onboard a new validator + // TODO: Add test scenarios for onboarding and offboarding validators + let new_validator_keypair = Keypair::random(); + // Normally, we would get these from the Coordinator: + let new_validator_share_index = removed_validator.share_index; + let new_validator = Validator { + address: gen_address(new_validator_share_index as usize), + public_key: new_validator_keypair.public_key(), + share_index: new_validator_share_index, + }; + validators.push(new_validator.clone()); + let new_validator_dkg = Dkg::new( + TAU, + shares_num, + security_threshold, + &validators, + &new_validator, + ) + .unwrap(); + + let new_decryption_share = recovered_key_share + .create_decryption_share_simple( + &new_validator_dkg, + &ciphertext_header, + &new_validator_keypair, + AAD, + ) + .unwrap(); + decryption_shares.push(new_decryption_share); + domain_points.insert(new_validator_share_index, x_r); + + let domain_points = domain_points + .values() + .take(security_threshold as usize) + .cloned() + .collect::>(); + let decryption_shares = + &decryption_shares[..security_threshold as usize]; + assert_eq!(domain_points.len(), security_threshold as usize); + assert_eq!(decryption_shares.len(), security_threshold as usize); + + let new_shared_secret = combine_shares_simple(decryption_shares); + assert_eq!( + old_shared_secret, new_shared_secret, + "Shared secret reconstruction failed" + ); + } + + #[test_case(4, 4; "number of shares (validators) is a power of 2")] + #[test_case(7, 7; "number of shares (validators) is not a power of 2")] + #[test_case(4, 6; "number of validators greater than the number of shares")] + fn test_dkg_simple_tdec_share_refresh( + shares_num: u32, + validators_num: u32, + ) { + let rng = &mut StdRng::seed_from_u64(0); + let security_threshold = shares_num / 2 + 1; + let ( + messages, + _validators, + validator_keypairs, + dkgs, + ciphertext_header, + old_shared_secret, + ) = make_share_update_test_inputs( + shares_num, + validators_num, + rng, + security_threshold, + ); + + // Each participant prepares an update for each other participant + let share_updates = dkgs + .iter() + .map(|validator_dkg| { + let share_update = + ShareRefreshUpdate::create_share_updates(validator_dkg) + .unwrap(); + (validator_dkg.me().address.clone(), share_update) + }) + .collect::>(); + + // Participants share updates and update their shares + + // Now, every participant separately: + let updated_shares: Vec<_> = dkgs + .iter() + .map(|validator_dkg| { + // Current participant receives updates from other participants + let updates_for_participant: Vec<_> = share_updates + .values() + .map(|updates| { + updates.get(&validator_dkg.me().share_index).unwrap() + }) + .cloned() + .collect(); + + // Each validator uses their decryption key to update their share + let validator_keypair = validator_keypairs + .get(validator_dkg.me().share_index as usize) + .unwrap(); + + // And creates updated private key shares + // We need an aggregate for that + let aggregate = validator_dkg + .clone() + .aggregate_transcripts(&messages) + .unwrap(); + assert!(aggregate.verify(validators_num, &messages).unwrap()); + + aggregate + .get_private_key_share( + validator_keypair, + validator_dkg.me().share_index, + ) + .unwrap() + .create_updated_private_key_share_for_refresh( + &updates_for_participant, + ) + .unwrap() + }) + .collect(); + + // Participants create decryption shares + let mut decryption_shares: Vec = + validator_keypairs + .iter() + .zip_eq(dkgs.iter()) + .map(|(validator_keypair, validator_dkg)| { + let pks = updated_shares + .get(validator_dkg.me().share_index as usize) + .unwrap() + .clone() + .into_private_key_share(); + pks.create_decryption_share_simple( + validator_dkg, + &ciphertext_header, + validator_keypair, + AAD, + ) + .unwrap() + }) + // We only need `security_threshold` shares to be able to decrypt + .take(security_threshold as usize) + .collect(); + decryption_shares.shuffle(rng); + + let decryption_shares = + &decryption_shares[..security_threshold as usize]; + assert_eq!(decryption_shares.len(), security_threshold as usize); + + let new_shared_secret = combine_shares_simple(decryption_shares); + assert_eq!( + old_shared_secret, new_shared_secret, + "Shared secret reconstruction failed" + ); + } } diff --git a/ferveo/src/bindings_python.rs b/ferveo/src/bindings_python.rs index fe534af8..bd189e7e 100644 --- a/ferveo/src/bindings_python.rs +++ b/ferveo/src/bindings_python.rs @@ -37,18 +37,6 @@ impl From for PyErr { Error::ThresholdEncryptionError(err) => { ThresholdEncryptionError::new_err(err.to_string()) } - Error::InvalidDkgStateToDeal => { - InvalidDkgStateToDeal::new_err("") - } - Error::InvalidDkgStateToAggregate => { - InvalidDkgStateToAggregate::new_err("") - } - Error::InvalidDkgStateToVerify => { - InvalidDkgStateToVerify::new_err("") - } - Error::InvalidDkgStateToIngest => { - InvalidDkgStateToIngest::new_err("") - } Error::DealerNotInValidatorSet(dealer) => { DealerNotInValidatorSet::new_err(dealer.to_string()) } @@ -58,16 +46,9 @@ impl From for PyErr { Error::DuplicateDealer(dealer) => { DuplicateDealer::new_err(dealer.to_string()) } - Error::InvalidPvssTranscript => { - InvalidPvssTranscript::new_err("") + Error::InvalidPvssTranscript(validator_addr) => { + InvalidPvssTranscript::new_err(validator_addr.to_string()) } - Error::InsufficientTranscriptsForAggregate( - expected, - actual, - ) => InsufficientTranscriptsForAggregate::new_err(format!( - "expected: {expected}, actual: {actual}" - )), - Error::InvalidDkgPublicKey => InvalidDkgPublicKey::new_err(""), Error::InsufficientValidators(expected, actual) => { InsufficientValidators::new_err(format!( "expected: {expected}, actual: {actual}" @@ -120,7 +101,17 @@ impl From for PyErr { InvalidAggregateVerificationParameters::new_err(format!( "validators_num: {validators_num}, messages_num: {messages_num}" )) + }, + Error::TooManyTranscripts(expected, received) => { + TooManyTranscripts::new_err(format!( + "expected: {expected}, received: {received}" + )) + } + Error::DuplicateTranscript(validator) => { + DuplicateTranscript::new_err(validator.to_string()) } + // Remember to create Python exceptions using `create_exception!` macro, and to register them in the + // `make_ferveo_py_module` function. You will have to update the `ferveo/__init__.{py, pyi}` files too. }, _ => default(), } @@ -168,6 +159,9 @@ create_exception!( InvalidAggregateVerificationParameters, PyValueError ); +create_exception!(exceptions, UnknownValidator, PyValueError); +create_exception!(exceptions, TooManyTranscripts, PyValueError); +create_exception!(exceptions, DuplicateTranscript, PyValueError); fn from_py_bytes(bytes: &[u8]) -> PyResult { T::from_bytes(bytes) @@ -510,11 +504,6 @@ impl Dkg { Ok(Self(dkg)) } - #[getter] - pub fn public_key(&self) -> DkgPublicKey { - DkgPublicKey(self.0.public_key()) - } - pub fn generate_transcript(&mut self) -> PyResult { let rng = &mut thread_rng(); let transcript = self @@ -632,7 +621,10 @@ impl AggregatedTranscript { ciphertext_header: &CiphertextHeader, aad: &[u8], validator_keypair: &Keypair, + selected_validators: Vec, ) -> PyResult { + let selected_validators: Vec<_> = + selected_validators.into_iter().map(|v| v.0).collect(); let decryption_share = self .0 .create_decryption_share_precomputed( @@ -640,6 +632,7 @@ impl AggregatedTranscript { &ciphertext_header.0, aad, &validator_keypair.0, + &selected_validators, ) .map_err(FerveoPythonError::FerveoError)?; Ok(DecryptionSharePrecomputed(decryption_share)) @@ -663,6 +656,11 @@ impl AggregatedTranscript { .map_err(FerveoPythonError::FerveoError)?; Ok(DecryptionShareSimple(decryption_share)) } + + #[getter] + pub fn public_key(&self) -> DkgPublicKey { + DkgPublicKey(self.0.public_key()) + } } // Since adding functions in pyo3 requires a two-step process @@ -782,6 +780,9 @@ pub fn make_ferveo_py_module(py: Python<'_>, m: &PyModule) -> PyResult<()> { "InvalidAggregateVerificationParameters", py.get_type::(), )?; + m.add("UnknownValidator", py.get_type::())?; + m.add("TooManyTranscripts", py.get_type::())?; + m.add("DuplicateTranscript", py.get_type::())?; Ok(()) } @@ -819,7 +820,7 @@ mod test_ferveo_python { .collect(); // Each validator holds their own DKG instance and generates a transcript every - // every validator, including themselves + // validator, including themselves let messages: Vec<_> = validators .iter() .cloned() @@ -844,9 +845,7 @@ mod test_ferveo_python { #[test_case(4, 4; "number of validators equal to the number of shares")] #[test_case(4, 6; "number of validators greater than the number of shares")] fn test_server_api_tdec_precomputed(shares_num: u32, validators_num: u32) { - // In precomputed variant, the security threshold is equal to the number of shares - let security_threshold = shares_num; - + let security_threshold = shares_num * 2 / 3; let (messages, validators, validator_keypairs) = make_test_inputs( TAU, security_threshold, @@ -867,20 +866,23 @@ mod test_ferveo_python { ) .unwrap(); - // Lets say that we've only received `security_threshold` transcripts + // Let's say that we've only received `security_threshold` transcripts let messages = messages[..security_threshold as usize].to_vec(); - let pvss_aggregated = + let local_aggregate = dkg.aggregate_transcripts(messages.clone()).unwrap(); - assert!(pvss_aggregated + assert!(local_aggregate .verify(validators_num, messages.clone()) .unwrap()); // At this point, any given validator should be able to provide a DKG public key - let dkg_public_key = dkg.public_key(); + let dkg_public_key = local_aggregate.public_key(); // In the meantime, the client creates a ciphertext and decryption request let ciphertext = encrypt(MSG.to_vec(), AAD, &dkg_public_key).unwrap(); + // TODO: Adjust the subset of validators to be used in the decryption for precomputed + // variant + // Having aggregated the transcripts, the validators can now create decryption shares let decryption_shares: Vec<_> = izip!(validators.clone(), &validator_keypairs) @@ -894,18 +896,19 @@ mod test_ferveo_python { &validator, ) .unwrap(); - let aggregate = validator_dkg + let server_aggregate = validator_dkg .aggregate_transcripts(messages.clone()) .unwrap(); - assert!(pvss_aggregated + assert!(server_aggregate .verify(validators_num, messages.clone()) .is_ok()); - aggregate + server_aggregate .create_decryption_share_precomputed( &validator_dkg, &ciphertext.header().unwrap(), AAD, validator_keypair, + validators.clone(), ) .unwrap() }) @@ -915,7 +918,6 @@ mod test_ferveo_python { // This part is part of the client API let shared_secret = combine_decryption_shares_precomputed(decryption_shares); - let plaintext = decrypt_with_shared_secret(&ciphertext, AAD, &shared_secret) .unwrap(); @@ -945,7 +947,7 @@ mod test_ferveo_python { ) .unwrap(); - // Lets say that we've only receives `security_threshold` transcripts + // Let's say that we've only receives `security_threshold` transcripts let messages = messages[..security_threshold as usize].to_vec(); let pvss_aggregated = dkg.aggregate_transcripts(messages.clone()).unwrap(); @@ -954,7 +956,7 @@ mod test_ferveo_python { .unwrap()); // At this point, any given validator should be able to provide a DKG public key - let dkg_public_key = dkg.public_key(); + let dkg_public_key = pvss_aggregated.public_key(); // In the meantime, the client creates a ciphertext and decryption request let ciphertext = encrypt(MSG.to_vec(), AAD, &dkg_public_key).unwrap(); @@ -992,9 +994,7 @@ mod test_ferveo_python { // Now, the decryption share can be used to decrypt the ciphertext // This part is part of the client API - let shared_secret = combine_decryption_shares_simple(decryption_shares); - let plaintext = decrypt_with_shared_secret(&ciphertext, AAD, &shared_secret) .unwrap(); diff --git a/ferveo/src/bindings_wasm.rs b/ferveo/src/bindings_wasm.rs index 07e22e3f..0b369874 100644 --- a/ferveo/src/bindings_wasm.rs +++ b/ferveo/src/bindings_wasm.rs @@ -360,11 +360,6 @@ impl Dkg { Ok(Self(dkg)) } - #[wasm_bindgen(js_name = "publicKey")] - pub fn public_key(&self) -> DkgPublicKey { - DkgPublicKey(self.0.public_key()) - } - #[wasm_bindgen(js_name = "generateTranscript")] pub fn generate_transcript(&mut self) -> JsResult { let rng = &mut thread_rng(); @@ -460,7 +455,6 @@ impl Validator { } } -// TODO: Consider removing and replacing with tuple #[derive(TryFromJsValue)] #[wasm_bindgen] #[derive(Clone, Debug, derive_more::AsRef, derive_more::From)] @@ -497,6 +491,14 @@ impl ValidatorMessage { #[derive(Clone, Debug, Serialize, Deserialize)] pub struct AggregatedTranscript(api::AggregatedTranscript); +#[wasm_bindgen] +impl AggregatedTranscript { + #[wasm_bindgen(getter, js_name = "publicKey")] + pub fn public_key(&self) -> DkgPublicKey { + DkgPublicKey(self.0.public_key()) + } +} + generate_common_methods!(AggregatedTranscript); #[wasm_bindgen] @@ -534,8 +536,15 @@ impl AggregatedTranscript { ciphertext_header: &CiphertextHeader, aad: &[u8], validator_keypair: &Keypair, + selected_validators_js: &ValidatorArray, ) -> JsResult { set_panic_hook(); + let selected_validators = + try_from_js_array::(selected_validators_js)?; + let selected_validators = selected_validators + .into_iter() + .map(|v| v.to_inner()) + .collect::>>()?; let decryption_share = self .0 .create_decryption_share_precomputed( @@ -543,6 +552,7 @@ impl AggregatedTranscript { &ciphertext_header.0, aad, &validator_keypair.0, + &selected_validators, ) .map_err(map_js_err)?; Ok(DecryptionSharePrecomputed(decryption_share)) diff --git a/ferveo/src/dkg.rs b/ferveo/src/dkg.rs index e8afbe30..e24c1fbd 100644 --- a/ferveo/src/dkg.rs +++ b/ferveo/src/dkg.rs @@ -1,20 +1,21 @@ -use std::collections::BTreeMap; +use std::collections::{BTreeMap, HashMap, HashSet}; -use ark_ec::{pairing::Pairing, AffineRepr, CurveGroup}; +use ark_ec::pairing::Pairing; use ark_poly::EvaluationDomain; use ark_std::UniformRand; use ferveo_common::PublicKey; use measure_time::print_time; use rand::RngCore; -use serde::{de::DeserializeOwned, Deserialize, Serialize}; -use serde_with::serde_as; +use serde::{Deserialize, Serialize}; use crate::{ - aggregate, assert_no_share_duplicates, AggregatedPvss, Error, - EthereumAddress, PubliclyVerifiableParams, PubliclyVerifiableSS, Result, - Validator, + assert_no_share_duplicates, AggregatedTranscript, Error, EthereumAddress, + PubliclyVerifiableParams, PubliclyVerifiableSS, Result, Validator, }; +pub type DomainPoint = ::ScalarField; +pub type ValidatorMessage = (Validator, PubliclyVerifiableSS); + #[derive(Copy, Clone, Debug, Serialize, Deserialize)] pub struct DkgParams { tau: u32, @@ -66,25 +67,7 @@ impl DkgParams { pub type ValidatorsMap = BTreeMap>; pub type PVSSMap = BTreeMap>; -#[derive(Debug, Clone)] -pub enum DkgState { - // TODO: Do we need to keep track of the block number? - Sharing { accumulated_shares: u32, block: u32 }, - Dealt, - Success { public_key: E::G1Affine }, - Invalid, -} - -impl DkgState { - fn new() -> Self { - DkgState::Sharing { - accumulated_shares: 0, - block: 0, - } - } -} - -/// The DKG context that holds all of the local state for participating in the DKG +/// The DKG context that holds all the local state for participating in the DKG // TODO: Consider removing Clone to avoid accidentally NOT-mutating state. // Currently, we're assuming that the DKG is only mutated by the owner of the instance. // Consider removing Clone after finalizing ferveo::api @@ -93,10 +76,8 @@ pub struct PubliclyVerifiableDkg { pub dkg_params: DkgParams, pub pvss_params: PubliclyVerifiableParams, pub validators: ValidatorsMap, - pub vss: PVSSMap, pub domain: ark_poly::GeneralEvaluationDomain, pub me: Validator, - state: DkgState, } impl PubliclyVerifiableDkg { @@ -111,13 +92,12 @@ impl PubliclyVerifiableDkg { dkg_params: &DkgParams, me: &Validator, ) -> Result { + assert_no_share_duplicates(validators)?; + let domain = ark_poly::GeneralEvaluationDomain::::new( validators.len(), ) .expect("unable to construct domain"); - - assert_no_share_duplicates(validators)?; - let validators: ValidatorsMap = validators .iter() .map(|validator| (validator.address.clone(), validator.clone())) @@ -135,14 +115,13 @@ impl PubliclyVerifiableDkg { Ok(Self { dkg_params: *dkg_params, pvss_params: PubliclyVerifiableParams::::default(), - vss: PVSSMap::::new(), domain, me: me.clone(), validators, - state: DkgState::new(), }) } + /// Get the validator with for the given public key pub fn get_validator( &self, public_key: &PublicKey, @@ -153,199 +132,85 @@ impl PubliclyVerifiableDkg { } /// Create a new PVSS instance within this DKG session, contributing to the final key - /// `rng` is a cryptographic random number generator - /// Returns a PVSS dealing message to post on-chain - pub fn share(&mut self, rng: &mut R) -> Result> { + pub fn generate_transcript( + &self, + rng: &mut R, + ) -> Result> { print_time!("PVSS Sharing"); - match self.state { - DkgState::Sharing { .. } | DkgState::Dealt => { - let vss = PubliclyVerifiableSS::::new( - &E::ScalarField::rand(rng), - self, - rng, - )?; - Ok(Message::Deal(vss)) - } - _ => Err(Error::InvalidDkgStateToDeal), - } + PubliclyVerifiableSS::::new(&DomainPoint::::rand(rng), self, rng) } /// Aggregate all received PVSS messages into a single message, prepared to post on-chain - pub fn aggregate(&self) -> Result> { - match self.state { - DkgState::Dealt => { - let public_key = self.public_key(); - let pvss_list = self.vss.values().cloned().collect::>(); - Ok(Message::Aggregate(Aggregation { - vss: aggregate(&pvss_list)?, - public_key, - })) - } - _ => Err(Error::InvalidDkgStateToAggregate), - } - } - - /// Returns the public key generated by the DKG - pub fn public_key(&self) -> E::G1Affine { - self.vss - .values() - .map(|vss| vss.coeffs[0].into_group()) - .sum::() - .into_affine() + pub fn aggregate_transcripts( + &self, + messages: &[ValidatorMessage], + ) -> Result> { + self.verify_transcripts(messages)?; + let transcripts: Vec> = messages + .iter() + .map(|(_sender, transcript)| transcript.clone()) + .collect(); + AggregatedTranscript::::from_transcripts(&transcripts) } /// Return a domain point for the share_index - pub fn get_domain_point(&self, share_index: u32) -> Result { - let domain_points = self.domain_points(); - domain_points - .get(share_index as usize) + pub fn get_domain_point(&self, share_index: u32) -> Result> { + self.domain_point_map() + .get(&share_index) .ok_or_else(|| Error::InvalidShareIndex(share_index)) .copied() } /// Return an appropriate amount of domain points for the DKG - pub fn domain_points(&self) -> Vec { + /// The number of domain points should be equal to the number of validators + pub fn domain_points(&self) -> Vec> { self.domain.elements().take(self.validators.len()).collect() } - /// `payload` is the content of the message - pub fn verify_message( + /// Return a map of domain points for the DKG + pub fn domain_point_map(&self) -> HashMap> { + self.domain + .elements() + .enumerate() + .map(|(i, point)| (i as u32, point)) + .collect::>() + } + + /// Verify PVSS transcripts against the set of validators in the DKG + fn verify_transcripts( &self, - sender: &Validator, - payload: &Message, + messages: &[ValidatorMessage], ) -> Result<()> { - match payload { - Message::Deal(pvss) - if matches!( - self.state, - DkgState::Sharing { .. } | DkgState::Dealt - ) => - { - if !self.validators.contains_key(&sender.address) { - Err(Error::UnknownDealer(sender.clone().address)) - } else if self.vss.contains_key(&sender.address) { - Err(Error::DuplicateDealer(sender.clone().address)) - } else if !pvss.verify_optimistic() { - Err(Error::InvalidPvssTranscript) - } else { - Ok(()) - } + let mut validator_set = HashSet::::new(); + let mut transcript_set = HashSet::>::new(); + for (sender, transcript) in messages.iter() { + let sender = &sender.address; + if !self.validators.contains_key(sender) { + return Err(Error::UnknownDealer(sender.clone())); + } else if validator_set.contains(sender) { + return Err(Error::DuplicateDealer(sender.clone())); + } else if transcript_set.contains(transcript) { + return Err(Error::DuplicateTranscript(sender.clone())); + } else if !transcript.verify_optimistic() { + return Err(Error::InvalidPvssTranscript(sender.clone())); } - Message::Aggregate(Aggregation { vss, public_key }) - if matches!(self.state, DkgState::Dealt) => - { - let minimum_shares = self.dkg_params.shares_num - - self.dkg_params.security_threshold; - let actual_shares = vss.shares.len() as u32; - // We reject aggregations that fail to meet the security threshold - if actual_shares < minimum_shares { - Err(Error::InsufficientTranscriptsForAggregate( - minimum_shares, - actual_shares, - )) - } else if vss.verify_aggregation(self).is_err() { - Err(Error::InvalidTranscriptAggregate) - } else if &self.public_key() == public_key { - Ok(()) - } else { - Err(Error::InvalidDkgPublicKey) - } - } - _ => Err(Error::InvalidDkgStateToVerify), + validator_set.insert(sender.clone()); + transcript_set.insert(transcript.clone()); } - } - /// After consensus has agreed to include a verified - /// message on the blockchain, we apply the chains - /// to the state machine - pub fn apply_message( - &mut self, - sender: &Validator, - payload: &Message, - ) -> Result<()> { - match payload { - Message::Deal(pvss) - if matches!( - self.state, - DkgState::Sharing { .. } | DkgState::Dealt - ) => - { - if !self.validators.contains_key(&sender.address) { - return Err(Error::UnknownDealer(sender.clone().address)); - } - - // TODO: Throw error instead of silently accepting excess shares? - // if self.vss.len() < self.dkg_params.shares_num as usize { - // self.vss.insert(sender.address.clone(), pvss.clone()); - // } - self.vss.insert(sender.address.clone(), pvss.clone()); - - // we keep track of the amount of shares seen until the security - // threshold is met. Then we may change the state of the DKG - if let DkgState::Sharing { - ref mut accumulated_shares, - .. - } = &mut self.state - { - *accumulated_shares += 1; - if *accumulated_shares >= self.dkg_params.security_threshold - { - self.state = DkgState::Dealt; - } - } - Ok(()) - } - Message::Aggregate(_) if matches!(self.state, DkgState::Dealt) => { - // change state and cache the final key - self.state = DkgState::Success { - public_key: self.public_key(), - }; - Ok(()) - } - _ => Err(Error::InvalidDkgStateToIngest), + if validator_set.len() > self.validators.len() + || transcript_set.len() > self.validators.len() + { + return Err(Error::TooManyTranscripts( + self.validators.len() as u32, + validator_set.len() as u32, + )); } - } - pub fn deal( - &mut self, - sender: &Validator, - pvss: &PubliclyVerifiableSS, - ) -> Result<()> { - // Add the ephemeral public key and pvss transcript - let (sender_address, _) = self - .validators - .iter() - .find(|(probe_address, _)| sender.address == **probe_address) - .ok_or_else(|| Error::UnknownDealer(sender.address.clone()))?; - self.vss.insert(sender_address.clone(), pvss.clone()); Ok(()) } } -#[serde_as] -#[derive(Serialize, Deserialize, Clone, Debug)] -#[serde(bound( - serialize = "AggregatedPvss: Serialize", - deserialize = "AggregatedPvss: DeserializeOwned" -))] -pub struct Aggregation { - vss: AggregatedPvss, - #[serde_as(as = "ferveo_common::serialization::SerdeAs")] - public_key: E::G1Affine, -} - -// TODO: Remove these? -// TODO: These messages are not actually used anywhere, we use our own ValidatorMessage for Deal, and Aggregate for Message.Aggregate -#[derive(Serialize, Deserialize, Clone, Debug)] -#[serde(bound( - serialize = "AggregatedPvss: Serialize, PubliclyVerifiableSS: Serialize", - deserialize = "AggregatedPvss: DeserializeOwned, PubliclyVerifiableSS: DeserializeOwned" -))] -pub enum Message { - Deal(PubliclyVerifiableSS), - Aggregate(Aggregation), -} - /// Test initializing DKG #[cfg(test)] mod test_dkg_init { @@ -373,7 +238,6 @@ mod test_dkg_init { &unknown_validator, ) .unwrap_err(); - assert_eq!(err.to_string(), "Expected validator to be a part of the DKG validator set: 0x0000000000000000000000000000000000000005") } } @@ -381,11 +245,8 @@ mod test_dkg_init { /// Test the dealing phase of the DKG #[cfg(test)] mod test_dealing { - use ark_ec::AffineRepr; - use crate::{ - test_common::*, DkgParams, DkgState, DkgState::Dealt, Error, - PubliclyVerifiableDkg, Validator, + test_common::*, DkgParams, Error, PubliclyVerifiableDkg, Validator, }; /// Check that the canonical share indices of validators are expected and enforced @@ -415,67 +276,24 @@ mod test_dealing { ); } - /// Test that dealing correct PVSS transcripts - /// pass verification an application and that - /// state is updated correctly + /// Test that dealing correct PVSS transcripts passes validation #[test] fn test_pvss_dealing() { let rng = &mut ark_std::test_rng(); - - // Create a test DKG instance - let (mut dkg, _) = setup_dkg(0); - - // Gather everyone's transcripts - let mut messages = vec![]; - for i in 0..dkg.dkg_params.shares_num() { - let (mut dkg, _) = setup_dkg(i as usize); - let message = dkg.share(rng).unwrap(); - let sender = dkg.me.clone(); - messages.push((sender, message)); - } - - let mut expected = 0u32; - for (sender, pvss) in messages.iter() { - // Check the verification passes - assert!(dkg.verify_message(sender, pvss).is_ok()); - - // Check that application passes - assert!(dkg.apply_message(sender, pvss).is_ok()); - - expected += 1; - if expected < dkg.dkg_params.security_threshold { - // check that shares accumulates correctly - match dkg.state { - DkgState::Sharing { - accumulated_shares, .. - } => { - assert_eq!(accumulated_shares, expected) - } - _ => panic!("Test failed"), - } - } else { - // Check that when enough shares is accumulated, we transition state - assert!(matches!(dkg.state, DkgState::Dealt)); - } - } + let (dkg, _) = setup_dkg(0); + let messages = make_messages(rng, &dkg); + assert!(dkg.verify_transcripts(&messages).is_ok()); } - /// Test the verification and application of - /// pvss transcripts from unknown validators - /// are rejected + /// Test the verification and application of pvss transcripts from + /// unknown validators are rejected #[test] fn test_pvss_from_unknown_dealer_rejected() { let rng = &mut ark_std::test_rng(); - let (mut dkg, _) = setup_dkg(0); - assert!(matches!( - dkg.state, - DkgState::Sharing { - accumulated_shares: 0, - block: 0 - } - )); - let pvss = dkg.share(rng).unwrap(); - // Need to make sure this falls outside of the validator set: + let (dkg, _) = setup_dkg(0); + let mut messages = make_messages(rng, &dkg); + + // Need to make sure this falls outside the validator set: let unknown_validator_index = dkg.dkg_params.shares_num + VALIDATORS_NUM + 1; let sender = Validator:: { @@ -483,246 +301,142 @@ mod test_dealing { public_key: ferveo_common::Keypair::::new(rng).public_key(), share_index: unknown_validator_index, }; - // check that verification fails - assert!(dkg.verify_message(&sender, &pvss).is_err()); - // check that application fails - assert!(dkg.apply_message(&sender, &pvss).is_err()); - // check that state has not changed - assert!(matches!( - dkg.state, - DkgState::Sharing { - accumulated_shares: 0, - block: 0, - } - )); + let transcript = dkg.generate_transcript(rng).unwrap(); + messages.push((sender, transcript)); + + assert!(dkg.verify_transcripts(&messages).is_err()); } - /// Test that if a validator sends two pvss transcripts, - /// the second fails to verify + /// Test that if a validator sends two pvss transcripts, the second fails to verify #[test] fn test_pvss_sent_twice_rejected() { let rng = &mut ark_std::test_rng(); - let (mut dkg, _) = setup_dkg(0); - // We start with an empty state - assert!(matches!( - dkg.state, - DkgState::Sharing { - accumulated_shares: 0, - block: 0, - } - )); + let (dkg, _) = setup_dkg(0); + let mut messages = make_messages(rng, &dkg); - let pvss = dkg.share(rng).unwrap(); + messages.push(messages[0].clone()); - // This validator has already sent a PVSS - let sender = dkg.me.clone(); - - // First PVSS is accepted - assert!(dkg.verify_message(&sender, &pvss).is_ok()); - assert!(dkg.apply_message(&sender, &pvss).is_ok()); - assert!(matches!( - dkg.state, - DkgState::Sharing { - accumulated_shares: 1, - block: 0, - } - )); - - // Second PVSS is rejected - assert!(dkg.verify_message(&sender, &pvss).is_err()); + assert!(dkg.verify_transcripts(&messages).is_err()); } - /// Test that if a validators tries to verify it's own - /// share message, it passes + /// Test that if a validators tries to verify its own share message, it passes #[test] fn test_own_pvss() { let rng = &mut ark_std::test_rng(); - let (mut dkg, _) = setup_dkg(0); - // We start with an empty state - assert!(matches!( - dkg.state, - DkgState::Sharing { - accumulated_shares: 0, - block: 0, - } - )); - - // Sender creates a PVSS transcript - let pvss = dkg.share(rng).unwrap(); - // Note that state of DKG has not changed - assert!(matches!( - dkg.state, - DkgState::Sharing { - accumulated_shares: 0, - block: 0, - } - )); - - let sender = dkg.me.clone(); - - // Sender verifies it's own PVSS transcript - assert!(dkg.verify_message(&sender, &pvss).is_ok()); - assert!(dkg.apply_message(&sender, &pvss).is_ok()); - assert!(matches!( - dkg.state, - DkgState::Sharing { - accumulated_shares: 1, - block: 0, - } - )); - } - - /// Test that the [`PubliclyVerifiableDkg::share`] method - /// errors if its state is not [`DkgState::Shared{..} | Dkg::Dealt`] - #[test] - fn test_pvss_cannot_share_from_wrong_state() { - let rng = &mut ark_std::test_rng(); - let (mut dkg, _) = setup_dkg(0); - assert!(matches!( - dkg.state, - DkgState::Sharing { - accumulated_shares: 0, - block: 0, - } - )); - - dkg.state = DkgState::Success { - public_key: G1::zero(), - }; - assert!(dkg.share(rng).is_err()); - - // check that even if security threshold is met, we can still share - dkg.state = Dealt; - assert!(dkg.share(rng).is_ok()); - } - - /// Check that share messages can only be - /// verified or applied if the dkg is in - /// state [`DkgState::Share{..} | DkgState::Dealt`] - #[test] - fn test_share_message_state_guards() { - let rng = &mut ark_std::test_rng(); - let (mut dkg, _) = setup_dkg(0); - let pvss = dkg.share(rng).unwrap(); - assert!(matches!( - dkg.state, - DkgState::Sharing { - accumulated_shares: 0, - block: 0, - } - )); + let (dkg, _) = setup_dkg(0); + let messages = make_messages(rng, &dkg) + .iter() + .take(1) + .cloned() + .collect::>(); - let sender = dkg.me.clone(); - dkg.state = DkgState::Success { - public_key: G1::zero(), - }; - assert!(dkg.verify_message(&sender, &pvss).is_err()); - assert!(dkg.apply_message(&sender, &pvss).is_err()); - - // check that we can still accept pvss transcripts after meeting threshold - dkg.state = Dealt; - assert!(dkg.verify_message(&sender, &pvss).is_ok()); - assert!(dkg.apply_message(&sender, &pvss).is_ok()); - assert!(matches!(dkg.state, DkgState::Dealt)) + assert!(dkg.verify_transcripts(&messages).is_ok()); } } /// Test aggregating transcripts into final key #[cfg(test)] mod test_aggregation { - use ark_ec::AffineRepr; - use test_case::test_case; + use ark_poly::EvaluationDomain; - use crate::{dkg::*, test_common::*, DkgState, Message}; + use crate::test_common::*; /// Test that if the security threshold is met, we can create a final key - #[test_case(4,4; "number of validators equal to the number of shares")] - #[test_case(4,6; "number of validators greater than the number of shares")] - fn test_aggregate(shares_num: u32, validators_num: u32) { - let security_threshold = shares_num - 1; - let (mut dkg, _) = setup_dealt_dkg_with_n_validators( - security_threshold, - shares_num, - validators_num, - ); - let aggregate_msg = dkg.aggregate().unwrap(); - if let Message::Aggregate(Aggregation { public_key, .. }) = - &aggregate_msg - { - assert_eq!(public_key, &dkg.public_key()); - } else { - panic!("Expected aggregate message") - } - let sender = dkg.me.clone(); - assert!(dkg.verify_message(&sender, &aggregate_msg).is_ok()); - assert!(dkg.apply_message(&sender, &aggregate_msg).is_ok()); - assert!(matches!(dkg.state, DkgState::Success { .. })); - } - - /// Test that aggregate only succeeds if we are in the state [`DkgState::Dealt] #[test] - fn test_aggregate_state_guards() { - let (mut dkg, _) = setup_dealt_dkg(); - dkg.state = DkgState::Sharing { - accumulated_shares: 0, - block: 0, - }; - assert!(dkg.aggregate().is_err()); - dkg.state = DkgState::Success { - public_key: G1::zero(), - }; - assert!(dkg.aggregate().is_err()); + fn test_aggregate() { + let rng = &mut ark_std::test_rng(); + let (dkg, _) = setup_dkg(0); + let all_messages = make_messages(rng, &dkg); + + let not_enough_messages = all_messages + .iter() + .take((dkg.dkg_params.security_threshold - 1) as usize) + .cloned() + .collect::>(); + let bad_aggregate = + dkg.aggregate_transcripts(¬_enough_messages).unwrap(); + + let enough_messages = all_messages + .iter() + .take(dkg.dkg_params.security_threshold as usize) + .cloned() + .collect::>(); + let good_aggregate_1 = + dkg.aggregate_transcripts(&enough_messages).unwrap(); + assert_ne!(bad_aggregate, good_aggregate_1); + + let good_aggregate_2 = + dkg.aggregate_transcripts(&all_messages).unwrap(); + assert_ne!(good_aggregate_1, good_aggregate_2); } - /// Test that aggregate message fail to be verified or applied unless - /// dkg.state is [`DkgState::Dealt`] + /// Size of the domain should be equal a power of 2 #[test] - fn test_aggregate_message_state_guards() { - let (mut dkg, _) = setup_dealt_dkg(); - let aggregate = dkg.aggregate().unwrap(); - let sender = dkg.me.clone(); - - dkg.state = DkgState::Sharing { - accumulated_shares: 0, - block: 0, - }; - assert!(dkg.verify_message(&sender, &aggregate).is_err()); - assert!(dkg.apply_message(&sender, &aggregate).is_err()); - - dkg.state = DkgState::Success { - public_key: G1::zero(), - }; - assert!(dkg.verify_message(&sender, &aggregate).is_err()); - assert!(dkg.apply_message(&sender, &aggregate).is_err()) + fn test_domain_points_size_is_power_of_2() { + // Using a validators number which is not a power of 2 + let validators_num = 6; + let (dkg, _, _) = setup_dealt_dkg_with_n_validators( + validators_num, + validators_num, + validators_num, + ); + // This should cause the domain to be of size that is a power of 2 + assert_eq!(dkg.domain.elements().count(), 8); } - /// Test that an aggregate message will fail to verify if the - /// security threshold is not met + /// For the same number of validators, we should get the same domain points + /// in two different DKG instances #[test] - fn test_aggregate_wont_verify_if_under_threshold() { - let (mut dkg, _) = setup_dealt_dkg(); - dkg.dkg_params.shares_num = 10; - let aggregate = dkg.aggregate().unwrap(); - let sender = dkg.me.clone(); - assert!(dkg.verify_message(&sender, &aggregate).is_err()); + fn test_domain_point_determinism_for_share_number() { + let validators_num = 6; + let (dkg1, _, _) = setup_dealt_dkg_with_n_validators( + validators_num, + validators_num, + validators_num, + ); + let (dkg2, _, _) = setup_dealt_dkg_with_n_validators( + validators_num, + validators_num, + validators_num, + ); + assert_eq!(dkg1.domain_points(), dkg2.domain_points()); } - /// If the aggregated pvss passes, check that the announced - /// key is correct. Verification should fail if it is not + /// For a different number of validators, two DKG instances should have different domain points + /// This is because the number of share determines the generator of the domain #[test] - fn test_aggregate_wont_verify_if_wrong_key() { - let (dkg, _) = setup_dealt_dkg(); - let mut aggregate = dkg.aggregate().unwrap(); - while dkg.public_key() == G1::zero() { - let (_dkg, _) = setup_dealt_dkg(); - } - if let Message::Aggregate(Aggregation { public_key, .. }) = - &mut aggregate - { - *public_key = G1::zero(); - } - let sender = dkg.me.clone(); - assert!(dkg.verify_message(&sender, &aggregate).is_err()); + fn test_domain_points_different_for_different_domain_size() { + // In the first case, both DKG should have the same domain points despite different + // number of validators. This is because the domain size is the nearest power of 2 + // and both 6 and 7 are rounded to 8 + let validators_num = 6; + let (dkg1, _, _) = setup_dealt_dkg_with_n_validators( + validators_num, + validators_num, + validators_num, + ); + let (dkg2, _, _) = setup_dealt_dkg_with_n_validators( + validators_num + 1, + validators_num + 1, + validators_num + 1, + ); + assert_eq!(dkg1.domain.elements().count(), 8); + assert_eq!(dkg2.domain.elements().count(), 8); + assert_eq!( + dkg1.domain_points()[..validators_num as usize], + dkg2.domain_points()[..validators_num as usize] + ); + + // In the second case, the domain size is different and so the domain points + // should be different + let validators_num_different = 15; + let (dkg3, _, _) = setup_dealt_dkg_with_n_validators( + validators_num_different, + validators_num_different, + validators_num_different, + ); + assert_eq!(dkg3.domain.elements().count(), 16); + assert_ne!(dkg1.domain_points(), dkg3.domain_points()); } } diff --git a/ferveo/src/lib.rs b/ferveo/src/lib.rs index f9d6c1a5..8ee00941 100644 --- a/ferveo/src/lib.rs +++ b/ferveo/src/lib.rs @@ -30,22 +30,6 @@ pub enum Error { #[error(transparent)] ThresholdEncryptionError(#[from] ferveo_tdec::Error), - /// DKG is not in a valid state to deal PVSS shares - #[error("Invalid DKG state to deal PVSS shares")] - InvalidDkgStateToDeal, - - /// DKG is not in a valid state to aggregate PVSS transcripts - #[error("Invalid DKG state to aggregate PVSS transcripts")] - InvalidDkgStateToAggregate, - - /// DKG is not in a valid state to verify PVSS transcripts - #[error("Invalid DKG state to verify PVSS transcripts")] - InvalidDkgStateToVerify, - - /// DKG is not in a valid state to ingest PVSS transcripts - #[error("Invalid DKG state to ingest PVSS transcripts")] - InvalidDkgStateToIngest, - /// DKG validator set must contain the validator with the given address #[error("Expected validator to be a part of the DKG validator set: {0}")] DealerNotInValidatorSet(EthereumAddress), @@ -59,18 +43,8 @@ pub enum Error { DuplicateDealer(EthereumAddress), /// DKG received an invalid transcript for which optimistic verification failed - #[error("DKG received an invalid transcript")] - InvalidPvssTranscript, - - /// Aggregation failed because the DKG did not receive enough PVSS transcripts - #[error( - "Insufficient transcripts for aggregation (expected {0}, got {1})" - )] - InsufficientTranscriptsForAggregate(u32, u32), - - /// Failed to derive a valid final key for the DKG - #[error("Failed to derive a valid final key for the DKG")] - InvalidDkgPublicKey, + #[error("DKG received an invalid transcript from validator: {0}")] + InvalidPvssTranscript(EthereumAddress), /// Not enough validators to perform the DKG for a given number of shares #[error("Not enough validators (expected {0}, got {1})")] @@ -98,7 +72,7 @@ pub enum Error { #[error("Invalid variant: {0}")] InvalidVariant(String), - /// DKG parameters validaiton failed + /// DKG parameters validation failed #[error("Invalid DKG parameters: number of shares {0}, threshold {1}")] InvalidDkgParameters(u32, u32), @@ -121,6 +95,14 @@ pub enum Error { /// The number of messages may not be greater than the number of validators #[error("Invalid aggregate verification parameters: number of validators {0}, number of messages: {1}")] InvalidAggregateVerificationParameters(u32, u32), + + /// Too many transcripts received by the DKG + #[error("Too many transcripts. Expected: {0}, got: {1}")] + TooManyTranscripts(u32, u32), + + /// Received a duplicated transcript from a validator + #[error("Received a duplicated transcript from validator: {0}")] + DuplicateTranscript(EthereumAddress), } pub type Result = std::result::Result; @@ -132,33 +114,36 @@ mod test_dkg_full { use ark_bls12_381::{Bls12_381 as E, Fr, G1Affine}; use ark_ec::{AffineRepr, CurveGroup}; use ark_ff::{UniformRand, Zero}; - use ark_poly::EvaluationDomain; use ark_std::test_rng; use ferveo_common::Keypair; use ferveo_tdec::{ self, DecryptionSharePrecomputed, DecryptionShareSimple, SecretBox, SharedSecret, }; - use itertools::izip; - use rand::seq::SliceRandom; + use itertools::{izip, Itertools}; + use rand::{seq::SliceRandom, Rng}; use test_case::test_case; use super::*; use crate::test_common::*; - fn make_shared_secret_simple_tdec( + pub fn create_shared_secret_simple_tdec( dkg: &PubliclyVerifiableDkg, aad: &[u8], ciphertext_header: &ferveo_tdec::CiphertextHeader, validator_keypairs: &[Keypair], + transcripts: &[PubliclyVerifiableSS], ) -> ( - PubliclyVerifiableSS, + AggregatedTranscript, Vec>, SharedSecret, ) { - let pvss_list = dkg.vss.values().cloned().collect::>(); - let pvss_aggregated = aggregate(&pvss_list).unwrap(); - assert!(pvss_aggregated.verify_aggregation(dkg).is_ok()); + let server_aggregate = + AggregatedTranscript::from_transcripts(transcripts).unwrap(); + assert!(server_aggregate + .aggregate + .verify_aggregation(dkg, transcripts) + .unwrap()); let decryption_shares: Vec> = validator_keypairs @@ -167,36 +152,30 @@ mod test_dkg_full { let validator = dkg .get_validator(&validator_keypair.public_key()) .unwrap(); - pvss_aggregated - .make_decryption_share_simple( + server_aggregate + .aggregate + .create_decryption_share_simple( ciphertext_header, aad, - &validator_keypair.decryption_key, - validator.share_index as usize, - &dkg.pvss_params.g_inv(), + validator_keypair, + validator.share_index, ) .unwrap() }) + // We take only the first `security_threshold` decryption shares + .take(dkg.dkg_params.security_threshold() as usize) .collect(); - let domain_points = &dkg - .domain - .elements() - .take(decryption_shares.len()) - .collect::>(); + let domain_points = &dkg.domain_points()[..decryption_shares.len()]; assert_eq!(domain_points.len(), decryption_shares.len()); - // TODO: Consider refactor this part into ferveo_tdec::combine_simple and expose it - // as a public API in ferveo_tdec::api - let lagrange_coeffs = ferveo_tdec::prepare_combine_simple::(domain_points); let shared_secret = ferveo_tdec::share_combine_simple::( &decryption_shares, &lagrange_coeffs, ); - - (pvss_aggregated, decryption_shares, shared_secret) + (server_aggregate, decryption_shares, shared_secret) } #[test_case(4, 4; "number of shares (validators) is a power of 2")] @@ -204,28 +183,37 @@ mod test_dkg_full { #[test_case(4, 6; "number of validators greater than the number of shares")] fn test_dkg_simple_tdec(shares_num: u32, validators_num: u32) { let rng = &mut test_rng(); - - let security_threshold = shares_num / 2 + 1; - let (dkg, validator_keypairs) = setup_dealt_dkg_with_n_validators( - security_threshold, - shares_num, - validators_num, - ); - - let public_key = dkg.public_key(); + let security_threshold = shares_num * 2 / 3; + let (dkg, validator_keypairs, messages) = + setup_dealt_dkg_with_n_validators( + security_threshold, + shares_num, + validators_num, + ); + let transcripts = messages + .iter() + .take(shares_num as usize) + .map(|m| m.1.clone()) + .collect::>(); + let local_aggregate = + AggregatedTranscript::from_transcripts(&transcripts).unwrap(); + assert!(local_aggregate + .aggregate + .verify_aggregation(&dkg, &transcripts) + .unwrap()); let ciphertext = ferveo_tdec::encrypt::( SecretBox::new(MSG.to_vec()), AAD, - &public_key, + &local_aggregate.public_key, rng, ) .unwrap(); - - let (_, _, shared_secret) = make_shared_secret_simple_tdec( + let (_, _, shared_secret) = create_shared_secret_simple_tdec( &dkg, AAD, &ciphertext.header().unwrap(), validator_keypairs.as_slice(), + &transcripts, ); let plaintext = ferveo_tdec::decrypt_with_shared_secret( @@ -243,58 +231,79 @@ mod test_dkg_full { #[test_case(4, 6; "number of validators greater than the number of shares")] fn test_dkg_simple_tdec_precomputed(shares_num: u32, validators_num: u32) { let rng = &mut test_rng(); - - // In precomputed variant, threshold must be equal to shares_num - let security_threshold = shares_num; - let (dkg, validator_keypairs) = setup_dealt_dkg_with_n_validators( - security_threshold, - shares_num, - validators_num, - ); - let public_key = dkg.public_key(); + let security_threshold = shares_num * 2 / 3; + let (dkg, validator_keypairs, messages) = + setup_dealt_dkg_with_n_transcript_dealt( + security_threshold, + shares_num, + validators_num, + shares_num, + ); + let transcripts = messages + .iter() + .take(shares_num as usize) + .map(|m| m.1.clone()) + .collect::>(); + let local_aggregate = + AggregatedTranscript::from_transcripts(&transcripts).unwrap(); + assert!(local_aggregate + .aggregate + .verify_aggregation(&dkg, &transcripts) + .unwrap()); let ciphertext = ferveo_tdec::encrypt::( SecretBox::new(MSG.to_vec()), AAD, - &public_key, + &local_aggregate.public_key, rng, ) .unwrap(); - let pvss_list = dkg.vss.values().cloned().collect::>(); - let pvss_aggregated = aggregate(&pvss_list).unwrap(); - pvss_aggregated.verify_aggregation(&dkg).unwrap(); - let domain_points = dkg - .domain - .elements() - .take(validator_keypairs.len()) + // In precomputed variant, client selects a specific subset of validators to create + // decryption shares + let selected_keypairs = validator_keypairs + .choose_multiple(rng, security_threshold as usize) + .collect::>(); + let selected_validators = selected_keypairs + .iter() + .map(|keypair| { + dkg.get_validator(&keypair.public_key()) + .expect("Validator not found") + }) .collect::>(); + let selected_domain_points = selected_validators + .iter() + .filter_map(|v| { + dkg.get_domain_point(v.share_index) + .ok() + .map(|domain_point| (v.share_index, domain_point)) + }) + .collect::>>(); let mut decryption_shares: Vec> = - validator_keypairs + selected_keypairs .iter() .map(|validator_keypair| { let validator = dkg .get_validator(&validator_keypair.public_key()) .unwrap(); - pvss_aggregated - .make_decryption_share_simple_precomputed( + local_aggregate + .aggregate + .create_decryption_share_precomputed( &ciphertext.header().unwrap(), AAD, - &validator_keypair.decryption_key, - validator.share_index as usize, - &domain_points, - &dkg.pvss_params.g_inv(), + validator_keypair, + validator.share_index, + &selected_domain_points, ) .unwrap() }) .collect(); + // Order of decryption shares is not important decryption_shares.shuffle(rng); - assert_eq!(domain_points.len(), decryption_shares.len()); + // Decrypt with precomputed variant let shared_secret = ferveo_tdec::share_combine_precomputed::(&decryption_shares); - - // Combination works, let's decrypt let plaintext = ferveo_tdec::decrypt_with_shared_secret( &ciphertext, AAD, @@ -305,7 +314,8 @@ mod test_dkg_full { assert_eq!(plaintext, MSG); } - #[test_case(4, 4; "number of validators equal to the number of shares")] + #[test_case(4, 4; "number of shares (validators) is a power of 2")] + #[test_case(7, 7; "number of shares (validators) is not a power of 2")] #[test_case(4, 6; "number of validators greater than the number of shares")] fn test_dkg_simple_tdec_share_verification( shares_num: u32, @@ -313,31 +323,42 @@ mod test_dkg_full { ) { let rng = &mut test_rng(); let security_threshold = shares_num / 2 + 1; - - let (dkg, validator_keypairs) = setup_dealt_dkg_with_n_validators( - security_threshold, - shares_num, - validators_num, - ); - let public_key = dkg.public_key(); + let (dkg, validator_keypairs, messages) = + setup_dealt_dkg_with_n_validators( + security_threshold, + shares_num, + validators_num, + ); + let transcripts = messages + .iter() + .take(shares_num as usize) + .map(|m| m.1.clone()) + .collect::>(); + let local_aggregate = + AggregatedTranscript::from_transcripts(&transcripts).unwrap(); + assert!(local_aggregate + .aggregate + .verify_aggregation(&dkg, &transcripts) + .unwrap()); let ciphertext = ferveo_tdec::encrypt::( SecretBox::new(MSG.to_vec()), AAD, - &public_key, + &local_aggregate.public_key, rng, ) .unwrap(); - let (pvss_aggregated, decryption_shares, _) = - make_shared_secret_simple_tdec( + let (local_aggregate, decryption_shares, _) = + create_shared_secret_simple_tdec( &dkg, AAD, &ciphertext.header().unwrap(), validator_keypairs.as_slice(), + &transcripts, ); izip!( - &pvss_aggregated.shares, + &local_aggregate.aggregate.shares, &validator_keypairs, &decryption_shares, ) @@ -359,7 +380,7 @@ mod test_dkg_full { let mut with_bad_decryption_share = decryption_share.clone(); with_bad_decryption_share.decryption_share = TargetField::zero(); assert!(!with_bad_decryption_share.verify( - &pvss_aggregated.shares[0], + &local_aggregate.aggregate.shares[0], &validator_keypairs[0].public_key().encryption_key, &dkg.pvss_params.h, &ciphertext, @@ -369,48 +390,77 @@ mod test_dkg_full { let mut with_bad_checksum = decryption_share; with_bad_checksum.validator_checksum.checksum = G1Affine::zero(); assert!(!with_bad_checksum.verify( - &pvss_aggregated.shares[0], + &local_aggregate.aggregate.shares[0], &validator_keypairs[0].public_key().encryption_key, &dkg.pvss_params.h, &ciphertext, )); } - #[test] - fn test_dkg_simple_tdec_share_recovery() { + #[test_case(4, 4; "number of shares (validators) is a power of 2")] + #[test_case(7, 7; "number of shares (validators) is not a power of 2")] + #[test_case(4, 6; "number of validators greater than the number of shares")] + fn test_dkg_simple_tdec_share_recovery( + shares_num: u32, + validators_num: u32, + ) { let rng = &mut test_rng(); - - let (dkg, validator_keypairs) = - setup_dealt_dkg_with(SECURITY_THRESHOLD, SHARES_NUM); - let public_key = &dkg.public_key(); + let security_threshold = shares_num / 2 + 1; + let (dkg, validator_keypairs, messages) = + setup_dealt_dkg_with_n_validators( + security_threshold, + shares_num, + validators_num, + ); + let transcripts = messages + .iter() + .take(shares_num as usize) + .map(|m| m.1.clone()) + .collect::>(); + let local_aggregate = + AggregatedTranscript::from_transcripts(&transcripts).unwrap(); + assert!(local_aggregate + .aggregate + .verify_aggregation(&dkg, &transcripts) + .unwrap()); let ciphertext = ferveo_tdec::encrypt::( SecretBox::new(MSG.to_vec()), AAD, - public_key, + &local_aggregate.public_key, rng, ) .unwrap(); // Create an initial shared secret - let (_, _, old_shared_secret) = make_shared_secret_simple_tdec( + let (_, _, old_shared_secret) = create_shared_secret_simple_tdec( &dkg, AAD, &ciphertext.header().unwrap(), validator_keypairs.as_slice(), + &transcripts, ); + // TODO: Rewrite this test so that the offboarding of validator + // is done by recreating a DKG instance with a new set of + // validators from the Coordinator, rather than modifying the + // existing DKG instance. + // Remove one participant from the contexts and all nested structure - let removed_validator_addr = - dkg.validators.keys().last().unwrap().clone(); + let removed_validator_index = rng.gen_range(0..validators_num); + let removed_validator_addr = dkg + .validators + .iter() + .find(|(_, v)| v.share_index == removed_validator_index) + .unwrap() + .1 + .address + .clone(); let mut remaining_validators = dkg.validators.clone(); - remaining_validators - .remove(&removed_validator_addr) - .unwrap(); - // dkg.vss.remove(&removed_validator_addr); // TODO: Test whether it makes any difference + remaining_validators.remove(&removed_validator_addr); // Remember to remove one domain point too - let mut domain_points = dkg.domain_points(); - domain_points.pop().unwrap(); + let mut domain_points = dkg.domain_point_map(); + domain_points.remove(&removed_validator_index); // Now, we're going to recover a new share at a random point, // and check that the shared secret is still the same. @@ -418,15 +468,15 @@ mod test_dkg_full { // Our random point: let x_r = Fr::rand(rng); - // Each participant prepares an update for each other participant + // Each participant prepares an update for every other participant let share_updates = remaining_validators .keys() .map(|v_addr| { - let deltas_i = prepare_share_updates_for_recovery::( + let deltas_i = ShareRecoveryUpdate::create_share_updates( &domain_points, &dkg.pvss_params.h.into_affine(), &x_r, - dkg.dkg_params.security_threshold() as usize, + dkg.dkg_params.security_threshold(), rng, ); (v_addr.clone(), deltas_i) @@ -436,129 +486,159 @@ mod test_dkg_full { // Participants share updates and update their shares // Now, every participant separately: - // TODO: Move this logic outside tests (see #162, #163) - let updated_shares: Vec<_> = remaining_validators + let updated_shares: HashMap = remaining_validators .values() .map(|validator| { // Current participant receives updates from other participants - let updates_for_participant: Vec<_> = share_updates + let updates_for_validator: Vec<_> = share_updates .values() - .map(|updates| { - *updates.get(validator.share_index as usize).unwrap() - }) + .map(|updates| updates.get(&validator.share_index).unwrap()) + .cloned() .collect(); // Each validator uses their decryption key to update their share - let decryption_key = validator_keypairs + let validator_keypair = validator_keypairs .get(validator.share_index as usize) - .unwrap() - .decryption_key; + .unwrap(); // Creates updated private key shares - // TODO: Why not using dkg.aggregate()? - let pvss_list = dkg.vss.values().cloned().collect::>(); - let pvss_aggregated = aggregate(&pvss_list).unwrap(); - pvss_aggregated - .update_private_key_share_for_recovery( - &decryption_key, - validator.share_index as usize, - updates_for_participant.as_slice(), - ) - .unwrap() + let updated_key_share = + AggregatedTranscript::from_transcripts(&transcripts) + .unwrap() + .aggregate + .create_updated_private_key_share( + validator_keypair, + validator.share_index, + updates_for_validator.as_slice(), + ) + .unwrap(); + (validator.share_index, updated_key_share) }) .collect(); - // TODO: Rename updated_private_shares to something that doesn't imply mutation (see #162, #163) - // Now, we have to combine new share fragments into a new share - let new_private_key_share = recover_share_from_updated_private_shares( - &x_r, - &domain_points, - &updated_shares, - ); + let recovered_key_share = + PrivateKeyShare::recover_share_from_updated_private_shares( + &x_r, + &domain_points, + &updated_shares, + ) + .unwrap(); // Get decryption shares from remaining participants - let mut remaining_validator_keypairs = validator_keypairs; - remaining_validator_keypairs - .pop() - .expect("Should have a keypair"); - let mut decryption_shares: Vec> = - remaining_validator_keypairs - .iter() - .enumerate() - .map(|(share_index, validator_keypair)| { - // TODO: Why not using dkg.aggregate()? - let pvss_list = - dkg.vss.values().cloned().collect::>(); - let pvss_aggregated = aggregate(&pvss_list).unwrap(); - pvss_aggregated - .make_decryption_share_simple( + let mut decryption_shares = remaining_validators + .values() + .map(|validator| { + let validator_keypair = validator_keypairs + .get(validator.share_index as usize) + .unwrap(); + let decryption_share = + AggregatedTranscript::from_transcripts(&transcripts) + .unwrap() + .aggregate + .create_decryption_share_simple( &ciphertext.header().unwrap(), AAD, - &validator_keypair.decryption_key, - share_index, - &dkg.pvss_params.g_inv(), + validator_keypair, + validator.share_index, ) - .unwrap() - }) - .collect(); + .unwrap(); + (validator.share_index, decryption_share) + }) + // We take only the first `security_threshold - 1` decryption shares + .take((dkg.dkg_params.security_threshold() - 1) as usize) + .collect::>(); // Create a decryption share from a recovered private key share let new_validator_decryption_key = Fr::rand(rng); - decryption_shares.push( - DecryptionShareSimple::create( - &new_validator_decryption_key, - &new_private_key_share, - &ciphertext.header().unwrap(), - AAD, - &dkg.pvss_params.g_inv(), - ) - .unwrap(), - ); - - domain_points.push(x_r); - assert_eq!(domain_points.len(), SHARES_NUM as usize); - assert_eq!(decryption_shares.len(), SHARES_NUM as usize); - - // Maybe parametrize this test with [1..] and [..threshold] - let domain_points = &domain_points[1..]; - let decryption_shares = &decryption_shares[1..]; - assert_eq!(domain_points.len(), SECURITY_THRESHOLD as usize); - assert_eq!(decryption_shares.len(), SECURITY_THRESHOLD as usize); + let new_decryption_share = DecryptionShareSimple::create( + &new_validator_decryption_key, + &recovered_key_share.0, + &ciphertext.header().unwrap(), + AAD, + &dkg.pvss_params.g_inv(), + ) + .unwrap(); + decryption_shares.insert(removed_validator_index, new_decryption_share); + domain_points.insert(removed_validator_index, x_r); + + // We need to make sure that the domain points and decryption shares are ordered + // by the share index, so that the lagrange basis is calculated correctly + + let mut domain_points_ = vec![]; + let mut decryption_shares_ = vec![]; + for share_index in decryption_shares.keys().sorted() { + domain_points_.push( + *domain_points + .get(share_index) + .ok_or(Error::InvalidShareIndex(*share_index)) + .unwrap(), + ); + decryption_shares_.push( + decryption_shares + .get(share_index) + .ok_or(Error::InvalidShareIndex(*share_index)) + .unwrap() + .clone(), + ); + } + assert_eq!(domain_points_.len(), security_threshold as usize); + assert_eq!(decryption_shares_.len(), security_threshold as usize); - let lagrange = ferveo_tdec::prepare_combine_simple::(domain_points); + let lagrange = + ferveo_tdec::prepare_combine_simple::(&domain_points_); let new_shared_secret = ferveo_tdec::share_combine_simple::( - decryption_shares, + &decryption_shares_, &lagrange, ); - assert_eq!( old_shared_secret, new_shared_secret, "Shared secret reconstruction failed" ); } - #[test] - fn test_dkg_simple_tdec_share_refreshing() { + #[test_case(4, 4; "number of shares (validators) is a power of 2")] + #[test_case(7, 7; "number of shares (validators) is not a power of 2")] + #[test_case(4, 6; "number of validators greater than the number of shares")] + fn test_dkg_simple_tdec_share_refreshing( + shares_num: u32, + validators_num: u32, + ) { let rng = &mut test_rng(); + let security_threshold = shares_num / 2 + 1; - let (dkg, validator_keypairs) = - setup_dealt_dkg_with(SECURITY_THRESHOLD, SHARES_NUM); - let public_key = &dkg.public_key(); + let (dkg, validator_keypairs, messages) = + setup_dealt_dkg_with_n_validators( + security_threshold, + shares_num, + validators_num, + ); + let transcripts = messages + .iter() + .take(shares_num as usize) + .map(|m| m.1.clone()) + .collect::>(); + let local_aggregate = + AggregatedTranscript::from_transcripts(&transcripts).unwrap(); + assert!(local_aggregate + .aggregate + .verify_aggregation(&dkg, &transcripts) + .unwrap()); let ciphertext = ferveo_tdec::encrypt::( SecretBox::new(MSG.to_vec()), AAD, - public_key, + &local_aggregate.public_key, rng, ) .unwrap(); // Create an initial shared secret - let (_, _, old_shared_secret) = make_shared_secret_simple_tdec( + let (_, _, old_shared_secret) = create_shared_secret_simple_tdec( &dkg, AAD, &ciphertext.header().unwrap(), validator_keypairs.as_slice(), + &transcripts, ); // Each participant prepares an update for each other participant @@ -566,10 +646,10 @@ mod test_dkg_full { .validators .keys() .map(|v_addr| { - let deltas_i = prepare_share_updates_for_refresh::( - &dkg.domain_points(), + let deltas_i = ShareRefreshUpdate::create_share_updates( + &dkg.domain_point_map(), &dkg.pvss_params.h.into_affine(), - dkg.dkg_params.security_threshold() as usize, + dkg.dkg_params.security_threshold(), rng, ); (v_addr.clone(), deltas_i) @@ -579,8 +659,7 @@ mod test_dkg_full { // Participants share updates and update their shares // Now, every participant separately: - // TODO: Move this logic outside tests (see #162, #163) - let updated_shares: Vec<_> = dkg + let updated_private_key_shares: Vec<_> = dkg .validators .values() .map(|validator| { @@ -588,24 +667,22 @@ mod test_dkg_full { let updates_for_participant: Vec<_> = share_updates .values() .map(|updates| { - *updates.get(validator.share_index as usize).unwrap() + updates.get(&validator.share_index).cloned().unwrap() }) .collect(); // Each validator uses their decryption key to update their share - let decryption_key = validator_keypairs + let validator_keypair = validator_keypairs .get(validator.share_index as usize) - .unwrap() - .decryption_key; + .unwrap(); // Creates updated private key shares - // TODO: Why not using dkg.aggregate()? - let pvss_list = dkg.vss.values().cloned().collect::>(); - let pvss_aggregated = aggregate(&pvss_list).unwrap(); - pvss_aggregated - .update_private_key_share_for_recovery( - &decryption_key, - validator.share_index as usize, + AggregatedTranscript::from_transcripts(&transcripts) + .unwrap() + .aggregate + .create_updated_private_key_share( + validator_keypair, + validator.share_index, updates_for_participant.as_slice(), ) .unwrap() @@ -618,25 +695,37 @@ mod test_dkg_full { .iter() .enumerate() .map(|(share_index, validator_keypair)| { + // In order to proceed with the decryption, we need to convert the updated private key shares + let private_key_share = &updated_private_key_shares + .get(share_index) + .unwrap() + .inner() + .0; DecryptionShareSimple::create( &validator_keypair.decryption_key, - updated_shares.get(share_index).unwrap(), + private_key_share, &ciphertext.header().unwrap(), AAD, &dkg.pvss_params.g_inv(), ) .unwrap() }) + // We take only the first `security_threshold` decryption shares + .take(dkg.dkg_params.security_threshold() as usize) .collect(); + // Order of decryption shares is not important, but since we are using low-level + // API here to performa a refresh for testing purpose, we will not shuffle + // the shares this time + // decryption_shares.shuffle(rng); + let lagrange = ferveo_tdec::prepare_combine_simple::( - &dkg.domain_points()[..SECURITY_THRESHOLD as usize], + &dkg.domain_points()[..security_threshold as usize], ); let new_shared_secret = ferveo_tdec::share_combine_simple::( - &decryption_shares[..SECURITY_THRESHOLD as usize], + &decryption_shares[..security_threshold as usize], &lagrange, ); - assert_eq!(old_shared_secret, new_shared_secret); } } diff --git a/ferveo/src/pvss.rs b/ferveo/src/pvss.rs index 5108f6b9..8d1affeb 100644 --- a/ferveo/src/pvss.rs +++ b/ferveo/src/pvss.rs @@ -1,4 +1,4 @@ -use std::{marker::PhantomData, ops::Mul}; +use std::{collections::HashMap, hash::Hash, marker::PhantomData, ops::Mul}; use ark_ec::{pairing::Pairing, AffineRepr, CurveGroup, Group}; use ark_ff::{Field, Zero}; @@ -6,24 +6,25 @@ use ark_poly::{ polynomial::univariate::DensePolynomial, DenseUVPolynomial, EvaluationDomain, Polynomial, }; +use ferveo_common::{serialization, Keypair}; use ferveo_tdec::{ - prepare_combine_simple, CiphertextHeader, DecryptionSharePrecomputed, - DecryptionShareSimple, PrivateKeyShare, + CiphertextHeader, DecryptionSharePrecomputed, DecryptionShareSimple, }; use itertools::Itertools; use rand::RngCore; -use serde::{Deserialize, Serialize}; +use serde::{de::DeserializeOwned, Deserialize, Serialize}; use serde_with::serde_as; use subproductdomain::fast_multiexp; use zeroize::{self, Zeroize, ZeroizeOnDrop}; use crate::{ - apply_updates_to_private_share, assert_no_share_duplicates, - batch_to_projective_g1, batch_to_projective_g2, Error, PVSSMap, - PubliclyVerifiableDkg, Result, Validator, + assert_no_share_duplicates, batch_to_projective_g1, batch_to_projective_g2, + DomainPoint, Error, PrivateKeyShare, PrivateKeyShareUpdate, + PubliclyVerifiableDkg, Result, UpdatedPrivateKeyShare, Validator, }; /// These are the blinded evaluations of shares of a single random polynomial +// TODO: Are these really blinded like in tdec or encrypted? pub type ShareEncryptions = ::G2Affine; /// Marker struct for unaggregated PVSS transcripts @@ -67,16 +68,16 @@ impl Default for PubliclyVerifiableParams { /// Secret polynomial used in the PVSS protocol /// We wrap this in a struct so that we can zeroize it after use -pub struct SecretPolynomial(pub DensePolynomial); +pub struct SecretPolynomial(pub DensePolynomial>); impl SecretPolynomial { pub fn new( - s: &E::ScalarField, + s: &DomainPoint, degree: usize, rng: &mut impl RngCore, ) -> Self { // Our random polynomial, \phi(x) = s + \sum_{i=1}^{t-1} a_i x^i - let mut phi = DensePolynomial::::rand(degree, rng); + let mut phi = DensePolynomial::>::rand(degree, rng); phi.coeffs[0] = *s; // setting the first coefficient to secret value Self(phi) } @@ -99,23 +100,23 @@ impl Drop for SecretPolynomial { impl ZeroizeOnDrop for SecretPolynomial {} -/// Each validator posts a transcript to the chain. Once enough -/// validators have done this (their total voting power exceeds -/// 2/3 the total), this will be aggregated into a final key +/// Each validator posts a transcript to the chain. Once enough (threshold) validators have done, +/// these will be aggregated into a final key #[serde_as] #[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq)] pub struct PubliclyVerifiableSS { /// Used in Feldman commitment to the VSS polynomial, F = g^{\phi} - #[serde_as(as = "ferveo_common::serialization::SerdeAs")] + #[serde_as(as = "serialization::SerdeAs")] pub coeffs: Vec, /// The shares to be dealt to each validator - #[serde_as(as = "ferveo_common::serialization::SerdeAs")] - // pub shares: Vec>, // TODO: Using a custom type instead of referring to E:G2Affine breaks the serialization + #[serde_as(as = "serialization::SerdeAs")] + // TODO: Using a custom type instead of referring to E:G2Affine breaks the serialization + // pub shares: Vec>, pub shares: Vec, /// Proof of Knowledge - #[serde_as(as = "ferveo_common::serialization::SerdeAs")] + #[serde_as(as = "serialization::SerdeAs")] pub sigma: E::G2Affine, /// Marker struct to distinguish between aggregated and @@ -123,6 +124,15 @@ pub struct PubliclyVerifiableSS { phantom: PhantomData, } +// Manually implementing Hash trait because of the PhantomData +impl Hash for PubliclyVerifiableSS { + fn hash(&self, state: &mut H) { + self.coeffs.hash(state); + self.shares.hash(state); + self.sigma.hash(state); + } +} + impl PubliclyVerifiableSS { /// Create a new PVSS instance /// `s`: the secret constant coefficient to share @@ -171,7 +181,7 @@ impl PubliclyVerifiableSS { // TODO: Cross check proof of knowledge check with the whitepaper; this check proves that there is a relationship between the secret and the pvss transcript // Sigma is a proof of knowledge of the secret, sigma = h^s - let sigma = E::G2Affine::generator().mul(*s).into(); //todo hash to curve + let sigma = E::G2Affine::generator().mul(*s).into(); // TODO: Use hash-to-curve here let vss = Self { coeffs, shares, @@ -204,7 +214,7 @@ impl PubliclyVerifiableSS { /// If aggregation fails, a validator needs to know that their pvss /// transcript was at fault so that the can issue a new one. This /// function may also be used for that purpose. - pub fn verify_full(&self, dkg: &PubliclyVerifiableDkg) -> bool { + pub fn verify_full(&self, dkg: &PubliclyVerifiableDkg) -> Result { let validators = dkg.validators.values().cloned().collect::>(); do_verify_full( &self.coeffs, @@ -223,29 +233,35 @@ pub fn do_verify_full( pvss_params: &PubliclyVerifiableParams, validators: &[Validator], domain: &ark_poly::GeneralEvaluationDomain, -) -> bool { +) -> Result { + assert_no_share_duplicates(validators)?; + let mut commitment = batch_to_projective_g1::(pvss_coefficients); domain.fft_in_place(&mut commitment); - assert_no_share_duplicates(validators).expect("Validators must be unique"); - // Each validator checks that their share is correct - validators - .iter() - .zip(pvss_encrypted_shares.iter()) - .enumerate() - .all(|(share_index, (validator, y_i))| { - // TODO: Check #3 is missing - // See #3 in 4.2.3 section of https://eprint.iacr.org/2022/898.pdf - - // Validator checks aggregated shares against commitment - let ek_i = validator.public_key.encryption_key.into_group(); - let a_i = &commitment[share_index]; - // We verify that e(G, Y_i) = e(A_i, ek_i) for validator i - // See #4 in 4.2.3 section of https://eprint.iacr.org/2022/898.pdf - // e(G,Y) = e(A, ek) - E::pairing(pvss_params.g, *y_i) == E::pairing(a_i, ek_i) - }) + for validator in validators { + // TODO: Check #3 is missing + // See #3 in 4.2.3 section of https://eprint.iacr.org/2022/898.pdf + + let y_i = pvss_encrypted_shares + .get(validator.share_index as usize) + .ok_or(Error::InvalidShareIndex(validator.share_index))?; + // Validator checks aggregated shares against commitment + let ek_i = validator.public_key.encryption_key.into_group(); + let a_i = commitment + .get(validator.share_index as usize) + .ok_or(Error::InvalidShareIndex(validator.share_index))?; + // We verify that e(G, Y_i) = e(A_i, ek_i) for validator i + // See #4 in 4.2.3 section of https://eprint.iacr.org/2022/898.pdf + // e(G,Y) = e(A, ek) + let is_valid = E::pairing(pvss_params.g, *y_i) == E::pairing(a_i, ek_i); + if !is_valid { + return Ok(false); + } + } + + Ok(true) } pub fn do_verify_aggregation( @@ -254,7 +270,7 @@ pub fn do_verify_aggregation( pvss_params: &PubliclyVerifiableParams, validators: &[Validator], domain: &ark_poly::GeneralEvaluationDomain, - vss: &PVSSMap, + pvss: &[PubliclyVerifiableSS], ) -> Result { let is_valid = do_verify_full( pvss_agg_coefficients, @@ -262,16 +278,15 @@ pub fn do_verify_aggregation( pvss_params, validators, domain, - ); + )?; if !is_valid { return Err(Error::InvalidTranscriptAggregate); } // Now, we verify that the aggregated PVSS transcript is a valid aggregation - let mut y = E::G1::zero(); - for pvss in vss.values() { - y += pvss.coeffs[0].into_group(); - } + let y = pvss + .iter() + .fold(E::G1::zero(), |acc, pvss| acc + pvss.coeffs[0].into_group()); if y.into_affine() == pvss_agg_coefficients[0] { Ok(true) } else { @@ -288,6 +303,7 @@ impl PubliclyVerifiableSS { pub fn verify_aggregation( &self, dkg: &PubliclyVerifiableDkg, + pvss: &[PubliclyVerifiableSS], ) -> Result { let validators = dkg.validators.values().cloned().collect::>(); do_verify_aggregation( @@ -296,102 +312,123 @@ impl PubliclyVerifiableSS { &dkg.pvss_params, &validators, &dkg.domain, - &dkg.vss, + pvss, ) } pub fn decrypt_private_key_share( &self, - validator_decryption_key: &E::ScalarField, - share_index: usize, + validator_keypair: &Keypair, + share_index: u32, ) -> Result> { - // Decrypt private key shares https://nikkolasg.github.io/ferveo/pvss.html#validator-decryption-of-private-key-shares - let private_key_share = self - .shares - .get(share_index) - .ok_or(Error::InvalidShareIndex(share_index as u32))? - .mul( - validator_decryption_key - .inverse() - .expect("Validator decryption key must have an inverse"), - ) - .into_affine(); - Ok(PrivateKeyShare { private_key_share }) + // Decrypt private key share https://nikkolasg.github.io/ferveo/pvss.html#validator-decryption-of-private-key-shares + let private_key_share = + self.shares + .get(share_index as usize) + .ok_or(Error::InvalidShareIndex(share_index))? + .mul( + validator_keypair.decryption_key.inverse().expect( + "Validator decryption key must have an inverse", + ), + ) + .into_affine(); + Ok(PrivateKeyShare(ferveo_tdec::PrivateKeyShare( + private_key_share, + ))) } - pub fn make_decryption_share_simple( + /// Make a decryption share (simple variant) for a given ciphertext + /// With this method, we wrap the PrivateKeyShare method to avoid exposing the private key share + // TODO: Consider deprecating to use PrivateKeyShare method directly + pub fn create_decryption_share_simple( &self, - ciphertext: &CiphertextHeader, + ciphertext_header: &CiphertextHeader, aad: &[u8], - validator_decryption_key: &E::ScalarField, - share_index: usize, - g_inv: &E::G1Prepared, + validator_keypair: &Keypair, + share_index: u32, ) -> Result> { - let private_key_share = self - .decrypt_private_key_share(validator_decryption_key, share_index)?; - DecryptionShareSimple::create( - validator_decryption_key, - &private_key_share, - ciphertext, - aad, - g_inv, - ) - .map_err(|e| e.into()) + self.decrypt_private_key_share(validator_keypair, share_index)? + .create_decryption_share_simple( + ciphertext_header, + aad, + validator_keypair, + ) } - pub fn make_decryption_share_simple_precomputed( + /// Make a decryption share (precomputed variant) for a given ciphertext + /// With this method, we wrap the PrivateKeyShare method to avoid exposing the private key share + // TODO: Consider deprecating to use PrivateKeyShare method directly + pub fn create_decryption_share_precomputed( &self, ciphertext_header: &CiphertextHeader, aad: &[u8], - validator_decryption_key: &E::ScalarField, - share_index: usize, - domain_points: &[E::ScalarField], - g_inv: &E::G1Prepared, + validator_keypair: &Keypair, + share_index: u32, + domain_points: &HashMap>, ) -> Result> { - let private_key_share = self - .decrypt_private_key_share(validator_decryption_key, share_index)?; - - // We use the `prepare_combine_simple` function to precompute the lagrange coefficients - let lagrange_coeffs = prepare_combine_simple::(domain_points); - - DecryptionSharePrecomputed::new( - share_index, - validator_decryption_key, - &private_key_share, - ciphertext_header, - aad, - &lagrange_coeffs[share_index], - g_inv, - ) - .map_err(|e| e.into()) + self.decrypt_private_key_share(validator_keypair, share_index)? + .create_decryption_share_precomputed( + ciphertext_header, + aad, + validator_keypair, + share_index, + domain_points, + ) } - // TODO: Consider relocate to different place, maybe PrivateKeyShare? (see #162, #163) - pub fn update_private_key_share_for_recovery( + // TODO: Consider deprecating to use PrivateKeyShare method directly + pub fn create_updated_private_key_share( &self, - validator_decryption_key: &E::ScalarField, - share_index: usize, - share_updates: &[E::G2], - ) -> Result> { - // Retrieves their private key share - let private_key_share = self - .decrypt_private_key_share(validator_decryption_key, share_index)?; - - // And updates their share - Ok(apply_updates_to_private_share::( - &private_key_share, - share_updates, - )) + validator_keypair: &Keypair, + share_index: u32, + share_updates: &[impl PrivateKeyShareUpdate], + ) -> Result> { + // Retrieve the private key share and apply the updates + Ok(self + .decrypt_private_key_share(validator_keypair, share_index)? + .create_updated_key_share(share_updates)) + } +} + +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq)] +pub struct AggregatedTranscript { + #[serde(bound( + serialize = "PubliclyVerifiableSS: Serialize", + deserialize = "PubliclyVerifiableSS: DeserializeOwned" + ))] + pub aggregate: PubliclyVerifiableSS, + #[serde(bound( + serialize = "ferveo_tdec::PublicKey: Serialize", + deserialize = "ferveo_tdec::PublicKey: DeserializeOwned" + ))] + pub public_key: ferveo_tdec::PublicKey, +} + +impl AggregatedTranscript { + pub fn from_transcripts( + transcripts: &[PubliclyVerifiableSS], + ) -> Result { + let aggregate = aggregate(transcripts)?; + let public_key = transcripts + .iter() + .map(|pvss| pvss.coeffs[0].into_group()) + .sum::() + .into_affine(); + let public_key = ferveo_tdec::PublicKey::(public_key); + Ok(AggregatedTranscript { + aggregate, + public_key, + }) } } /// Aggregate the PVSS instances in `pvss` from DKG session `dkg` /// into a new PVSS instance /// See: https://nikkolasg.github.io/ferveo/pvss.html?highlight=aggregate#aggregation -pub(crate) fn aggregate( - pvss_list: &[PubliclyVerifiableSS], +fn aggregate( + transcripts: &[PubliclyVerifiableSS], ) -> Result> { - let mut pvss_iter = pvss_list.iter(); + let mut pvss_iter = transcripts.iter(); let first_pvss = pvss_iter .next() .ok_or_else(|| Error::NoTranscriptsToAggregate)?; @@ -434,15 +471,31 @@ mod test_pvss { use super::*; use crate::test_common::*; + /// Test that an aggregate message will fail to verify if the + /// security threshold is not met + #[test] + fn test_aggregate_wont_verify_if_under_threshold() { + let (dkg, _, messages) = setup_dealt_dkg_with_n_transcript_dealt( + SECURITY_THRESHOLD, + SHARES_NUM, + VALIDATORS_NUM, + SECURITY_THRESHOLD - 1, + ); + let pvss_list = + messages.iter().map(|(_, pvss)| pvss).cloned().collect_vec(); + let aggregate = aggregate(&pvss_list).unwrap(); + assert!(aggregate.verify_aggregation(&dkg, &pvss_list).unwrap()); + } + /// Test the happy flow such that the PVSS with the correct form is created /// and that appropriate validations pass - #[test_case(4,4; "number of validators is equal to the number of shares")] - #[test_case(4,6; "number of validators is greater than the number of shares")] + #[test_case(4, 4; "number of validators is equal to the number of shares")] + #[test_case(4, 6; "number of validators is greater than the number of shares")] fn test_new_pvss(shares_num: u32, validators_num: u32) { let rng = &mut ark_std::test_rng(); let security_threshold = shares_num - 1; - let (dkg, _) = setup_dealt_dkg_with_n_validators( + let (dkg, _, _) = setup_dealt_dkg_with_n_validators( security_threshold, shares_num, validators_num, @@ -459,12 +512,12 @@ mod test_pvss { ); // Check that the correct number of shares were created assert_eq!(pvss.shares.len(), dkg.validators.len()); - // Check that the prove of knowledge is correct + // Check that the proof of knowledge is correct assert_eq!(pvss.sigma, G2::generator().mul(s)); // Check that the optimistic verify returns true assert!(pvss.verify_optimistic()); // Check that the full verify returns true - assert!(pvss.verify_full(&dkg)); + assert!(pvss.verify_full(&dkg).unwrap()); } /// Check that if the proof of knowledge is wrong, @@ -497,7 +550,7 @@ mod test_pvss { // So far, everything works assert!(pvss.verify_optimistic()); - assert!(pvss.verify_full(&dkg)); + assert!(pvss.verify_full(&dkg).unwrap()); // Now, we're going to tamper with the PVSS shares let mut bad_pvss = pvss; @@ -506,21 +559,22 @@ mod test_pvss { // Optimistic verification should not catch this issue assert!(bad_pvss.verify_optimistic()); // Full verification should catch this issue - assert!(!bad_pvss.verify_full(&dkg)); + assert!(!bad_pvss.verify_full(&dkg).unwrap()); } /// Check that happy flow of aggregating PVSS transcripts /// has the correct form and it's validations passes - #[test_case(4,4; "number of validators is equal to the number of shares")] - #[test_case(4,6; "number of validators is greater than the number of shares")] + #[test_case(4, 4; "number of validators is equal to the number of shares")] + #[test_case(4, 6; "number of validators is greater than the number of shares")] fn test_aggregate_pvss(shares_num: u32, validators_num: u32) { let security_threshold = shares_num - 1; - let (dkg, _) = setup_dealt_dkg_with_n_validators( + let (dkg, _, messages) = setup_dealt_dkg_with_n_validators( security_threshold, shares_num, validators_num, ); - let pvss_list = dkg.vss.values().cloned().collect::>(); + let pvss_list = + messages.iter().map(|(_, pvss)| pvss).cloned().collect_vec(); let aggregate = aggregate(&pvss_list).unwrap(); // Check that a polynomial of the correct degree was created assert_eq!( @@ -532,27 +586,29 @@ mod test_pvss { // Check that the optimistic verify returns true assert!(aggregate.verify_optimistic()); // Check that the full verify returns true - assert!(aggregate.verify_full(&dkg)); + assert!(aggregate.verify_full(&dkg).unwrap()); // Check that the verification of aggregation passes - assert!(aggregate.verify_aggregation(&dkg).expect("Test failed"),); + assert!(aggregate + .verify_aggregation(&dkg, &pvss_list) + .expect("Test failed")); } /// Check that if the aggregated PVSS transcript has an /// incorrect constant term, the verification fails #[test] fn test_verify_aggregation_fails_if_constant_term_wrong() { - let (dkg, _) = setup_dealt_dkg(); - let pvss_list = dkg.vss.values().cloned().collect::>(); + let (dkg, _, messages) = setup_dealt_dkg(); + let pvss_list = + messages.iter().map(|(_, pvss)| pvss).cloned().collect_vec(); let mut aggregated = aggregate(&pvss_list).unwrap(); while aggregated.coeffs[0] == G1::zero() { - let (dkg, _) = setup_dkg(0); - let pvss_list = dkg.vss.values().cloned().collect::>(); + let (_dkg, _) = setup_dkg(0); aggregated = aggregate(&pvss_list).unwrap(); } aggregated.coeffs[0] = G1::zero(); assert_eq!( aggregated - .verify_aggregation(&dkg) + .verify_aggregation(&dkg, &pvss_list) .expect_err("Test failed") .to_string(), "Transcript aggregate doesn't match the received PVSS instances" diff --git a/ferveo/src/refresh.rs b/ferveo/src/refresh.rs index b02eba3b..d7700cfa 100644 --- a/ferveo/src/refresh.rs +++ b/ferveo/src/refresh.rs @@ -1,117 +1,318 @@ -use std::{ops::Mul, usize}; +use std::{collections::HashMap, ops::Mul, usize}; -use ark_ec::{pairing::Pairing, AffineRepr, CurveGroup}; +use ark_ec::{pairing::Pairing, CurveGroup}; use ark_ff::Zero; use ark_poly::{univariate::DensePolynomial, DenseUVPolynomial, Polynomial}; -use ferveo_tdec::{lagrange_basis_at, PrivateKeyShare}; -use itertools::zip_eq; +use ferveo_common::Keypair; +use ferveo_tdec::{ + lagrange_basis_at, prepare_combine_simple, CiphertextHeader, + DecryptionSharePrecomputed, DecryptionShareSimple, +}; +use itertools::{zip_eq, Itertools}; use rand_core::RngCore; +use serde::{de::DeserializeOwned, Deserialize, Serialize}; +use zeroize::ZeroizeOnDrop; + +use crate::{DomainPoint, Error, PubliclyVerifiableParams, Result}; + +// TODO: Rename refresh.rs to key_share.rs? + +type InnerPrivateKeyShare = ferveo_tdec::PrivateKeyShare; + +/// Private key share held by a participant in the DKG protocol. +#[derive( + Debug, Clone, PartialEq, Eq, ZeroizeOnDrop, Serialize, Deserialize, +)] +pub struct PrivateKeyShare( + #[serde(bound( + serialize = "ferveo_tdec::PrivateKeyShare: Serialize", + deserialize = "ferveo_tdec::PrivateKeyShare: DeserializeOwned" + ))] + pub InnerPrivateKeyShare, +); + +impl PrivateKeyShare { + pub fn new(private_key_share: InnerPrivateKeyShare) -> Self { + Self(private_key_share) + } +} -// SHARE UPDATE FUNCTIONS: +impl PrivateKeyShare { + /// From PSS paper, section 4.2.3, (https://link.springer.com/content/pdf/10.1007/3-540-44750-4_27.pdf) + pub fn create_updated_key_share( + &self, + share_updates: &[impl PrivateKeyShareUpdate], + ) -> UpdatedPrivateKeyShare { + let updated_key_share = share_updates + .iter() + .fold(self.0 .0, |acc, delta| (acc + delta.inner().0).into()); + let updated_key_share = ferveo_tdec::PrivateKeyShare(updated_key_share); + UpdatedPrivateKeyShare(updated_key_share) + } -/// From PSS paper, section 4.2.1, (https://link.springer.com/content/pdf/10.1007/3-540-44750-4_27.pdf) -pub fn prepare_share_updates_for_recovery( - domain_points: &[E::ScalarField], - h: &E::G2Affine, - x_r: &E::ScalarField, - threshold: usize, - rng: &mut impl RngCore, -) -> Vec { - // Update polynomial has root at x_r - prepare_share_updates_with_root::(domain_points, h, x_r, threshold, rng) -} + /// From the PSS paper, section 4.2.4, (https://link.springer.com/content/pdf/10.1007/3-540-44750-4_27.pdf) + /// `x_r` is the point at which the share is to be recovered + pub fn recover_share_from_updated_private_shares( + x_r: &DomainPoint, + domain_points: &HashMap>, + updated_shares: &HashMap>, + ) -> Result> { + // Pick the domain points and updated shares according to share index + let mut domain_points_ = vec![]; + let mut updated_shares_ = vec![]; + for share_index in updated_shares.keys().sorted() { + domain_points_.push( + *domain_points + .get(share_index) + .ok_or(Error::InvalidShareIndex(*share_index))?, + ); + updated_shares_.push( + updated_shares + .get(share_index) + .ok_or(Error::InvalidShareIndex(*share_index))? + .0 + .clone(), + ); + } -// TODO: Consider relocating to PrivateKeyShare (see #162, #163) -/// From PSS paper, section 4.2.3, (https://link.springer.com/content/pdf/10.1007/3-540-44750-4_27.pdf) -pub fn apply_updates_to_private_share( - private_key_share: &PrivateKeyShare, - share_updates: &[E::G2], -) -> PrivateKeyShare { - let private_key_share = share_updates - .iter() - .fold( - private_key_share.private_key_share.into_group(), - |acc, delta| acc + delta, + // Interpolate new shares to recover y_r + let lagrange = lagrange_basis_at::(&domain_points_, x_r); + let prods = + zip_eq(updated_shares_, lagrange).map(|(y_j, l)| y_j.0.mul(l)); + let y_r = prods.fold(E::G2::zero(), |acc, y_j| acc + y_j); + Ok(PrivateKeyShare(ferveo_tdec::PrivateKeyShare( + y_r.into_affine(), + ))) + } + + pub fn create_decryption_share_simple( + &self, + ciphertext_header: &CiphertextHeader, + aad: &[u8], + validator_keypair: &Keypair, + ) -> Result> { + let g_inv = PubliclyVerifiableParams::::default().g_inv(); + DecryptionShareSimple::create( + &validator_keypair.decryption_key, + &self.0, + ciphertext_header, + aad, + &g_inv, ) - .into_affine(); - PrivateKeyShare { private_key_share } + .map_err(|e| e.into()) + } + + /// In precomputed variant, we offload some of the decryption related computation to the server-side: + /// We use the `prepare_combine_simple` function to precompute the lagrange coefficients + pub fn create_decryption_share_precomputed( + &self, + ciphertext_header: &CiphertextHeader, + aad: &[u8], + validator_keypair: &Keypair, + share_index: u32, + domain_points_map: &HashMap>, + ) -> Result> { + // We need to turn the domain points into a vector, and sort it by share index + let mut domain_points = domain_points_map + .iter() + .map(|(share_index, domain_point)| (*share_index, *domain_point)) + .collect::>(); + domain_points.sort_by_key(|(share_index, _)| *share_index); + + // Now, we have to pass the domain points to the `prepare_combine_simple` function + // and use the resulting lagrange coefficients to create the decryption share + + let only_domain_points = domain_points + .iter() + .map(|(_, domain_point)| *domain_point) + .collect::>(); + let lagrange_coeffs = prepare_combine_simple::(&only_domain_points); + + // Before we pick the lagrange coefficient for the current share index, we need + // to map the share index to the index in the domain points vector + // Given that we sorted the domain points by share index, the first element in the vector + // will correspond to the smallest share index, second to the second smallest, and so on + + let sorted_share_indices = domain_points + .iter() + .enumerate() + .map(|(adjusted_share_index, (share_index, _))| { + (*share_index, adjusted_share_index) + }) + .collect::>(); + let adjusted_share_index = *sorted_share_indices + .get(&share_index) + .ok_or(Error::InvalidShareIndex(share_index))?; + + // Finally, pick the lagrange coefficient for the current share index + let lagrange_coeff = &lagrange_coeffs[adjusted_share_index]; + let g_inv = PubliclyVerifiableParams::::default().g_inv(); + DecryptionSharePrecomputed::create( + share_index as usize, + &validator_keypair.decryption_key, + &self.0, + ciphertext_header, + aad, + lagrange_coeff, + &g_inv, + ) + .map_err(|e| e.into()) + } +} + +/// An updated private key share, resulting from an intermediate step in a share recovery or refresh operation. +#[derive( + Debug, Clone, PartialEq, Eq, ZeroizeOnDrop, Serialize, Deserialize, +)] +pub struct UpdatedPrivateKeyShare( + #[serde(bound( + serialize = "ferveo_tdec::PrivateKeyShare: Serialize", + deserialize = "ferveo_tdec::PrivateKeyShare: DeserializeOwned" + ))] + pub(crate) InnerPrivateKeyShare, +); + +impl UpdatedPrivateKeyShare { + /// One-way conversion from `UpdatedPrivateKeyShare` to `PrivateKeyShare`. + /// Use this method to eject from the `UpdatedPrivateKeyShare` type and use the resulting `PrivateKeyShare` in further operations. + pub fn inner(&self) -> PrivateKeyShare { + PrivateKeyShare(self.0.clone()) + } } -/// From the PSS paper, section 4.2.4, (https://link.springer.com/content/pdf/10.1007/3-540-44750-4_27.pdf) -pub fn recover_share_from_updated_private_shares( - x_r: &E::ScalarField, - domain_points: &[E::ScalarField], - updated_private_shares: &[PrivateKeyShare], -) -> PrivateKeyShare { - // Interpolate new shares to recover y_r - let lagrange = lagrange_basis_at::(domain_points, x_r); - let prods = zip_eq(updated_private_shares, lagrange) - .map(|(y_j, l)| y_j.private_key_share.mul(l)); - let y_r = prods.fold(E::G2::zero(), |acc, y_j| acc + y_j); - - PrivateKeyShare { - private_key_share: y_r.into_affine(), +impl UpdatedPrivateKeyShare { + pub fn new(private_key_share: InnerPrivateKeyShare) -> Self { + Self(private_key_share) } } -// SHARE REFRESH FUNCTIONS: +/// Trait for types that can be used to update a private key share. +pub trait PrivateKeyShareUpdate { + fn inner(&self) -> &InnerPrivateKeyShare; +} -pub fn prepare_share_updates_for_refresh( - domain_points: &[E::ScalarField], - h: &E::G2Affine, - threshold: usize, - rng: &mut impl RngCore, -) -> Vec { - // Update polynomial has root at 0 - prepare_share_updates_with_root::( - domain_points, - h, - &E::ScalarField::zero(), - threshold, - rng, - ) +/// An update to a private key share generated by a participant in a share recovery operation. +#[derive(Debug, Clone, PartialEq, Eq, ZeroizeOnDrop)] +pub struct ShareRecoveryUpdate(pub(crate) InnerPrivateKeyShare); + +impl PrivateKeyShareUpdate for ShareRecoveryUpdate { + fn inner(&self) -> &InnerPrivateKeyShare { + &self.0 + } } -// UTILS: +impl ShareRecoveryUpdate { + /// From PSS paper, section 4.2.1, (https://link.springer.com/content/pdf/10.1007/3-540-44750-4_27.pdf) + pub fn create_share_updates( + domain_points: &HashMap>, + h: &E::G2Affine, + x_r: &DomainPoint, + threshold: u32, + rng: &mut impl RngCore, + ) -> HashMap> { + // Update polynomial has root at x_r + prepare_share_updates_with_root::( + domain_points, + h, + x_r, + threshold, + rng, + ) + .into_iter() + .map(|(share_index, share_update)| (share_index, Self(share_update))) + .collect() + } +} + +/// An update to a private key share generated by a participant in a share refresh operation. +#[derive( + Serialize, Deserialize, Debug, Clone, PartialEq, Eq, ZeroizeOnDrop, +)] +pub struct ShareRefreshUpdate( + #[serde(bound( + serialize = "ferveo_tdec::PrivateKeyShare: Serialize", + deserialize = "ferveo_tdec::PrivateKeyShare: DeserializeOwned" + ))] + pub(crate) ferveo_tdec::PrivateKeyShare, +); + +impl PrivateKeyShareUpdate for ShareRefreshUpdate { + fn inner(&self) -> &InnerPrivateKeyShare { + &self.0 + } +} +impl ShareRefreshUpdate { + /// From PSS paper, section 4.2.1, (https://link.springer.com/content/pdf/10.1007/3-540-44750-4_27.pdf) + pub fn create_share_updates( + domain_points: &HashMap>, + h: &E::G2Affine, + threshold: u32, + rng: &mut impl RngCore, + ) -> HashMap> { + // Update polynomial has root at 0 + prepare_share_updates_with_root::( + domain_points, + h, + &DomainPoint::::zero(), + threshold, + rng, + ) + .into_iter() + .map(|(share_index, share_update)| { + (share_index, ShareRefreshUpdate(share_update)) + }) + .collect() + } +} + +/// Prepare share updates with a given root +/// This is a helper function for `ShareRecoveryUpdate::create_share_updates_for_recovery` and `ShareRefreshUpdate::create_share_updates_for_refresh` +/// It generates a new random polynomial with a defined root and evaluates it at each of the participants' indices. +/// The result is a list of share updates. +/// We represent the share updates as `InnerPrivateKeyShare` to avoid dependency on the concrete implementation of `PrivateKeyShareUpdate`. fn prepare_share_updates_with_root( - domain_points: &[E::ScalarField], + domain_points: &HashMap>, h: &E::G2Affine, - root: &E::ScalarField, - threshold: usize, + root: &DomainPoint, + threshold: u32, rng: &mut impl RngCore, -) -> Vec { - // Generate a new random polynomial with defined root +) -> HashMap> { + // Generate a new random polynomial with a defined root let d_i = make_random_polynomial_with_root::(threshold - 1, root, rng); // Now, we need to evaluate the polynomial at each of participants' indices domain_points .iter() - .map(|x_i| { + .map(|(share_index, x_i)| { let eval = d_i.evaluate(x_i); - h.mul(eval) + let share_update = + ferveo_tdec::PrivateKeyShare(h.mul(eval).into_affine()); + (*share_index, share_update) }) - .collect() + .collect::>() } -pub fn make_random_polynomial_with_root( - degree: usize, - root: &E::ScalarField, +/// Generate a random polynomial with a given root +fn make_random_polynomial_with_root( + degree: u32, + root: &DomainPoint, rng: &mut impl RngCore, -) -> DensePolynomial { +) -> DensePolynomial> { // [c_0, c_1, ..., c_{degree}] (Random polynomial) - let mut poly = DensePolynomial::::rand(degree, rng); + let mut poly = + DensePolynomial::>::rand(degree as usize, rng); // [0, c_1, ... , c_{degree}] (We zeroize the free term) - poly[0] = E::ScalarField::zero(); + poly[0] = DomainPoint::::zero(); // Now, we calculate a new free term so that `poly(root) = 0` - let new_c_0 = E::ScalarField::zero() - poly.evaluate(root); + let new_c_0 = DomainPoint::::zero() - poly.evaluate(root); poly[0] = new_c_0; // Evaluating the polynomial at the root should result in 0 - debug_assert!(poly.evaluate(root) == E::ScalarField::zero()); - debug_assert!(poly.coeffs.len() == degree + 1); + debug_assert!(poly.evaluate(root) == DomainPoint::::zero()); + debug_assert!(poly.coeffs.len() == (degree + 1) as usize); poly } @@ -124,75 +325,83 @@ mod tests_refresh { use ark_std::{test_rng, UniformRand, Zero}; use ferveo_tdec::{ test_common::setup_simple, PrivateDecryptionContextSimple, - PrivateKeyShare, }; use rand_core::RngCore; - use test_case::test_matrix; + use test_case::{test_case, test_matrix}; use crate::{ - apply_updates_to_private_share, prepare_share_updates_for_recovery, - prepare_share_updates_for_refresh, - recover_share_from_updated_private_shares, test_common::*, + test_common::*, PrivateKeyShare, ShareRecoveryUpdate, + ShareRefreshUpdate, UpdatedPrivateKeyShare, }; - fn make_new_share_fragments_for_recovery( + /// Using tdec test utilities here instead of PVSS to test the internals of the shared key recovery + fn create_updated_private_key_shares( rng: &mut R, - threshold: usize, + threshold: u32, x_r: &Fr, remaining_participants: &[PrivateDecryptionContextSimple], - ) -> Vec> { + ) -> HashMap> { // Each participant prepares an update for each other participant - // TODO: Extract as parameter - let domain_points = remaining_participants[0] - .public_decryption_contexts + let domain_points = remaining_participants .iter() - .map(|c| c.domain) - .collect::>(); + .map(|c| { + (c.index as u32, c.public_decryption_contexts[c.index].domain) + }) + .collect::>(); let h = remaining_participants[0].public_decryption_contexts[0].h; let share_updates = remaining_participants .iter() .map(|p| { - let deltas_i = prepare_share_updates_for_recovery::( + let share_updates = ShareRecoveryUpdate::create_share_updates( &domain_points, &h, x_r, threshold, rng, ); - (p.index, deltas_i) + (p.index as u32, share_updates) }) - .collect::>(); + .collect::>(); // Participants share updates and update their shares - let new_share_fragments: Vec<_> = remaining_participants + let updated_private_key_shares = remaining_participants .iter() .map(|p| { // Current participant receives updates from other participants let updates_for_participant: Vec<_> = share_updates .values() - .map(|updates| *updates.get(p.index).unwrap()) + .map(|updates| { + updates.get(&(p.index as u32)).cloned().unwrap() + }) .collect(); // And updates their share - apply_updates_to_private_share::( - &p.private_key_share, - &updates_for_participant, - ) + let updated_share = + PrivateKeyShare(p.private_key_share.clone()) + .create_updated_key_share(&updates_for_participant); + (p.index as u32, updated_share) }) - .collect(); + .collect::>(); - new_share_fragments + updated_private_key_shares } /// Ñ parties (where t <= Ñ <= N) jointly execute a "share recovery" algorithm, and the output is 1 new share. /// The new share is intended to restore a previously existing share, e.g., due to loss or corruption. - #[test_matrix([4, 7, 11, 16])] - fn tdec_simple_variant_share_recovery_at_selected_point(shares_num: usize) { + #[test_case(4, 4; "number of shares (validators) is a power of 2")] + #[test_case(7, 7; "number of shares (validators) is not a power of 2")] + fn tdec_simple_variant_share_recovery_at_selected_point( + shares_num: u32, + _validators_num: u32, + ) { let rng = &mut test_rng(); let security_threshold = shares_num * 2 / 3; - let (_, _, mut contexts) = - setup_simple::(security_threshold, shares_num, rng); + let (_, _, mut contexts) = setup_simple::( + shares_num as usize, + security_threshold as usize, + rng, + ); // Prepare participants @@ -203,62 +412,80 @@ mod tests_refresh { .last() .unwrap() .domain; - let original_private_key_share = selected_participant.private_key_share; + let original_private_key_share = + PrivateKeyShare(selected_participant.private_key_share); - // Remove one participant from the contexts and all nested structures + // Remove the selected participant from the contexts and all nested structures let mut remaining_participants = contexts; for p in &mut remaining_participants { p.public_decryption_contexts.pop().unwrap(); } // Each participant prepares an update for each other participant, and uses it to create a new share fragment - let new_share_fragments = make_new_share_fragments_for_recovery( + let updated_private_key_shares = create_updated_private_key_shares( rng, security_threshold, &x_r, &remaining_participants, ); + // We only need `security_threshold` updates to recover the original share + let updated_private_key_shares = updated_private_key_shares + .into_iter() + .take(security_threshold as usize) + .collect::>(); // Now, we have to combine new share fragments into a new share - let domain_points = &remaining_participants[0] - .public_decryption_contexts - .iter() - .map(|ctxt| ctxt.domain) - .collect::>(); - let new_private_key_share = recover_share_from_updated_private_shares( - &x_r, - &domain_points[..security_threshold], - &new_share_fragments[..security_threshold], - ); - + let domain_points = remaining_participants + .into_iter() + .map(|ctxt| { + ( + ctxt.index as u32, + ctxt.public_decryption_contexts[ctxt.index].domain, + ) + }) + .collect::>(); + let new_private_key_share = + PrivateKeyShare::recover_share_from_updated_private_shares( + &x_r, + &domain_points, + &updated_private_key_shares, + ) + .unwrap(); assert_eq!(new_private_key_share, original_private_key_share); // If we don't have enough private share updates, the resulting private share will be incorrect - assert_eq!(domain_points.len(), new_share_fragments.len()); + let not_enough_shares = updated_private_key_shares + .into_iter() + .take(security_threshold as usize - 1) + .collect::>(); let incorrect_private_key_share = - recover_share_from_updated_private_shares( + PrivateKeyShare::recover_share_from_updated_private_shares( &x_r, - &domain_points[..(security_threshold - 1)], - &new_share_fragments[..(security_threshold - 1)], - ); - + &domain_points, + ¬_enough_shares, + ) + .unwrap(); assert_ne!(incorrect_private_key_share, original_private_key_share); } /// Ñ parties (where t <= Ñ <= N) jointly execute a "share recovery" algorithm, and the output is 1 new share. - /// The new share is independent from the previously existing shares. We can use this to on-board a new participant into an existing cohort. - #[test_matrix([4, 7, 11, 16])] - fn tdec_simple_variant_share_recovery_at_random_point(shares_num: usize) { + /// The new share is independent of the previously existing shares. We can use this to on-board a new participant into an existing cohort. + #[test_case(4; "number of shares (validators) is a power of 2")] + #[test_case(7; "number of shares (validators) is not a power of 2")] + fn tdec_simple_variant_share_recovery_at_random_point(shares_num: u32) { let rng = &mut test_rng(); - let threshold = shares_num * 2 / 3; + let security_threshold = shares_num * 2 / 3; - let (_, shared_private_key, mut contexts) = - setup_simple::(threshold, shares_num, rng); + let (_, shared_private_key, mut contexts) = setup_simple::( + shares_num as usize, + security_threshold as usize, + rng, + ); // Prepare participants // Remove one participant from the contexts and all nested structures - contexts.pop().unwrap(); + let removed_participant = contexts.pop().unwrap(); let mut remaining_participants = contexts.clone(); for p in &mut remaining_participants { p.public_decryption_contexts.pop().unwrap(); @@ -269,46 +496,66 @@ mod tests_refresh { // Our random point let x_r = ScalarField::rand(rng); - // Each participant prepares an update for each other participant, and uses it to create a new share fragment - let new_share_fragments = make_new_share_fragments_for_recovery( + // Each remaining participant prepares an update for every other participant, and uses it to create a new share fragment + let share_recovery_updates = create_updated_private_key_shares( rng, - threshold, + security_threshold, &x_r, &remaining_participants, ); + // We only need `threshold` updates to recover the original share + let share_recovery_updates = share_recovery_updates + .into_iter() + .take(security_threshold as usize) + .collect::>(); + let domain_points = &mut remaining_participants + .into_iter() + .map(|ctxt| { + ( + ctxt.index as u32, + ctxt.public_decryption_contexts[ctxt.index].domain, + ) + }) + .collect::>(); // Now, we have to combine new share fragments into a new share - let domain_points = &mut remaining_participants[0] - .public_decryption_contexts - .iter() - .map(|ctxt| ctxt.domain) - .collect::>(); - let new_private_key_share = recover_share_from_updated_private_shares( - &x_r, - &domain_points[..threshold], - &new_share_fragments[..threshold], - ); - - let mut private_shares = contexts - .iter() - .cloned() - .map(|ctxt| ctxt.private_key_share) - .collect::>(); + let recovered_private_key_share = + PrivateKeyShare::recover_share_from_updated_private_shares( + &x_r, + domain_points, + &share_recovery_updates, + ) + .unwrap(); // Finally, let's recreate the shared private key from some original shares and the recovered one - domain_points.push(x_r); - private_shares.push(new_private_key_share); - let start_from = shares_num - threshold; - let new_shared_private_key = recover_share_from_updated_private_shares( - &ScalarField::zero(), - &domain_points[start_from..], - &private_shares[start_from..], + let mut private_shares = contexts + .into_iter() + .map(|ctxt| (ctxt.index as u32, ctxt.private_key_share)) + .collect::>(); + + // Need to update these to account for recovered private key share + domain_points.insert(removed_participant.index as u32, x_r); + private_shares.insert( + removed_participant.index as u32, + recovered_private_key_share.0.clone(), ); - assert_eq!( - shared_private_key, - new_shared_private_key.private_key_share - ); + // This is a workaround for a type mismatch - We need to convert the private shares to updated private shares + // This is just to test that we are able to recover the shared private key from the updated private shares + let updated_private_key_shares = private_shares + .into_iter() + .map(|(share_index, share)| { + (share_index, UpdatedPrivateKeyShare(share)) + }) + .collect::>(); + let new_shared_private_key = + PrivateKeyShare::recover_share_from_updated_private_shares( + &ScalarField::zero(), + domain_points, + &updated_private_key_shares, + ) + .unwrap(); + assert_eq!(shared_private_key, new_shared_private_key.0); } /// Ñ parties (where t <= Ñ <= N) jointly execute a "share refresh" algorithm. @@ -317,60 +564,66 @@ mod tests_refresh { #[test_matrix([4, 7, 11, 16])] fn tdec_simple_variant_share_refreshing(shares_num: usize) { let rng = &mut test_rng(); - let threshold = shares_num * 2 / 3; - - let (_, shared_private_key, contexts) = - setup_simple::(threshold, shares_num, rng); + let security_threshold = shares_num * 2 / 3; - let domain_points = &contexts[0] - .public_decryption_contexts + let (_, private_key_share, contexts) = + setup_simple::(shares_num, security_threshold, rng); + let domain_points = &contexts .iter() - .map(|ctxt| ctxt.domain) - .collect::>(); + .map(|ctxt| { + ( + ctxt.index as u32, + ctxt.public_decryption_contexts[ctxt.index].domain, + ) + }) + .collect::>(); let h = contexts[0].public_decryption_contexts[0].h; // Each participant prepares an update for each other participant: let share_updates = contexts .iter() .map(|p| { - let deltas_i = prepare_share_updates_for_refresh::( - domain_points, - &h, - threshold, - rng, - ); - (p.index, deltas_i) + let share_updates = + ShareRefreshUpdate::::create_share_updates( + domain_points, + &h, + security_threshold as u32, + rng, + ); + (p.index as u32, share_updates) }) - .collect::>(); + .collect::>(); - // Participants "refresh" their shares with the updates from each other: - let refreshed_shares: Vec<_> = contexts + // Participants refresh their shares with the updates from each other: + let refreshed_shares = contexts .iter() .map(|p| { // Current participant receives updates from other participants let updates_for_participant: Vec<_> = share_updates .values() - .map(|updates| *updates.get(p.index).unwrap()) + .map(|updates| { + updates.get(&(p.index as u32)).cloned().unwrap() + }) .collect(); - // And updates their share - apply_updates_to_private_share::( - &p.private_key_share, - &updates_for_participant, - ) + // And creates a new, refreshed share + let updated_share = + PrivateKeyShare(p.private_key_share.clone()) + .create_updated_key_share(&updates_for_participant); + (p.index as u32, updated_share) }) - .collect(); + // We only need `threshold` refreshed shares to recover the original share + .take(security_threshold) + .collect::>>(); // Finally, let's recreate the shared private key from the refreshed shares - let new_shared_private_key = recover_share_from_updated_private_shares( - &ScalarField::zero(), - &domain_points[..threshold], - &refreshed_shares[..threshold], - ); - - assert_eq!( - shared_private_key, - new_shared_private_key.private_key_share - ); + let new_shared_private_key = + PrivateKeyShare::recover_share_from_updated_private_shares( + &ScalarField::zero(), + domain_points, + &refreshed_shares, + ) + .unwrap(); + assert_eq!(private_key_share, new_shared_private_key.0); } } diff --git a/ferveo/src/test_common.rs b/ferveo/src/test_common.rs index dce10e5d..a1a3d130 100644 --- a/ferveo/src/test_common.rs +++ b/ferveo/src/test_common.rs @@ -1,12 +1,16 @@ /// Factory functions and variables for testing use std::str::FromStr; +use ark_bls12_381::Bls12_381; pub use ark_bls12_381::Bls12_381 as E; use ark_ec::pairing::Pairing; use ferveo_common::Keypair; -use rand::seq::SliceRandom; +use rand::{seq::SliceRandom, Rng}; -use crate::{DkgParams, EthereumAddress, PubliclyVerifiableDkg, Validator}; +use crate::{ + DkgParams, EthereumAddress, PubliclyVerifiableDkg, PubliclyVerifiableSS, + Validator, ValidatorMessage, +}; pub type ScalarField = ::ScalarField; pub type G1 = ::G1Affine; @@ -73,17 +77,23 @@ pub fn setup_dkg(my_validator_index: usize) -> TestSetup { ) } +pub type DealtTestSetup = ( + PubliclyVerifiableDkg, + Vec>, + Vec>, +); + /// Set up a dkg with enough pvss transcripts to meet the threshold /// /// The correctness of this function is tested in the module [`crate::dkg::test_dealing`] -pub fn setup_dealt_dkg() -> TestSetup { +pub fn setup_dealt_dkg() -> DealtTestSetup { setup_dealt_dkg_with(SECURITY_THRESHOLD, SHARES_NUM) } pub fn setup_dealt_dkg_with( security_threshold: u32, shares_num: u32, -) -> TestSetup { +) -> DealtTestSetup { setup_dealt_dkg_with_n_validators( security_threshold, shares_num, @@ -95,36 +105,62 @@ pub fn setup_dealt_dkg_with_n_validators( security_threshold: u32, shares_num: u32, validators_num: u32, -) -> TestSetup { +) -> DealtTestSetup { + setup_dealt_dkg_with_n_transcript_dealt( + security_threshold, + shares_num, + validators_num, + security_threshold, + ) +} + +pub fn make_messages( + rng: &mut (impl Rng + Sized), + dkg: &PubliclyVerifiableDkg, +) -> Vec<(Validator, PubliclyVerifiableSS)> { + let mut messages = vec![]; + for i in 0..dkg.dkg_params.shares_num() { + let (dkg, _) = setup_dkg(i as usize); + let transcript = dkg.generate_transcript(rng).unwrap(); + let sender = dkg.me.clone(); + messages.push((sender, transcript)); + } + messages.shuffle(rng); + messages +} + +pub fn setup_dealt_dkg_with_n_transcript_dealt( + security_threshold: u32, + shares_num: u32, + validators_num: u32, + transcripts_to_use: u32, +) -> DealtTestSetup { let rng = &mut ark_std::test_rng(); // Gather everyone's transcripts - let mut messages: Vec<_> = (0..validators_num) + // Use only need the first `transcripts_to_use` transcripts + let mut transcripts: Vec<_> = (0..transcripts_to_use) .map(|my_index| { - let (mut dkg, _) = setup_dkg_for_n_validators( + let (dkg, _) = setup_dkg_for_n_validators( security_threshold, shares_num, my_index as usize, validators_num, ); let me = dkg.me.clone(); - let message = dkg.share(rng).unwrap(); - (me, message) + let transcript = dkg.generate_transcript(rng).unwrap(); + (me, transcript) }) .collect(); // Create a test DKG instance - let (mut dkg, keypairs) = setup_dkg_for_n_validators( + let (dkg, keypairs) = setup_dkg_for_n_validators( security_threshold, shares_num, 0, validators_num, ); - // The ordering of messages should not matter - messages.shuffle(rng); - messages.iter().for_each(|(sender, message)| { - dkg.apply_message(sender, message).expect("Setup failed"); - }); - (dkg, keypairs) + transcripts.shuffle(rng); + (dkg, keypairs, transcripts) } diff --git a/ferveo/src/validator.rs b/ferveo/src/validator.rs index a1c73e11..f0d88e49 100644 --- a/ferveo/src/validator.rs +++ b/ferveo/src/validator.rs @@ -7,6 +7,8 @@ use thiserror::Error; use crate::Error; +const ETHEREUM_ADDRESS_LEN: usize = 42; + #[derive( Clone, Debug, PartialEq, Eq, Ord, PartialOrd, Serialize, Deserialize, Hash, )] @@ -25,10 +27,11 @@ impl FromStr for EthereumAddress { type Err = EthereumAddressParseError; fn from_str(s: &str) -> Result { - if s.len() != 42 { + if s.len() != ETHEREUM_ADDRESS_LEN { return Err(EthereumAddressParseError::InvalidLength); } - hex::decode(&s[2..]) + let prefix_len = "0x".len(); + hex::decode(&s[prefix_len..]) .map_err(|_| EthereumAddressParseError::InvalidHex)?; Ok(EthereumAddress(s.to_string())) } @@ -69,7 +72,6 @@ pub fn assert_no_share_duplicates( validators: &[Validator], ) -> Result<(), Error> { let mut set = HashSet::new(); - for validator in validators { if set.contains(&validator.share_index) { return Err(Error::DuplicatedShareIndex(validator.share_index)); @@ -77,6 +79,5 @@ pub fn assert_no_share_duplicates( set.insert(validator.share_index); } } - Ok(()) } diff --git a/subproductdomain/Cargo.toml b/subproductdomain/Cargo.toml index 481b708a..d5b57716 100644 --- a/subproductdomain/Cargo.toml +++ b/subproductdomain/Cargo.toml @@ -7,11 +7,11 @@ authors = ["Heliax AG ", "Piotr Roslaniec