Skip to content
Merged
Show file tree
Hide file tree
Changes from 3 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions src/inc_encoding.rs
Original file line number Diff line number Diff line change
Expand Up @@ -52,4 +52,6 @@ pub trait IncomparableEncoding {
fn internal_consistency_check();
}

#[cfg(test)]
pub mod basic_winternitz;
pub mod target_sum;
82 changes: 82 additions & 0 deletions src/inc_encoding/basic_winternitz.rs
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think that we removed this here ea9fc1d so I don't think this is a good idea to put this back

Original file line number Diff line number Diff line change
@@ -0,0 +1,82 @@
use crate::{
MESSAGE_LENGTH,
symmetric::message_hash::{MessageHash, bytes_to_chunks},
};

use super::IncomparableEncoding;

/// Incomparable Encoding Scheme based on the basic Winternitz scheme, implemented from a given message hash.
/// CHUNK_SIZE must be 1, 2, 4, or 8 and MH::BASE must be 2^CHUNK_SIZE.
/// NUM_CHUNKS_CHECKSUM is the precomputed number of checksum chunks (see original Winternitz description).
pub struct WinternitzEncoding<
MH: MessageHash,
const CHUNK_SIZE: usize,
const NUM_CHUNKS_CHECKSUM: usize,
> {
_marker_mh: std::marker::PhantomData<MH>,
}

impl<MH: MessageHash, const CHUNK_SIZE: usize, const NUM_CHUNKS_CHECKSUM: usize>
IncomparableEncoding for WinternitzEncoding<MH, CHUNK_SIZE, NUM_CHUNKS_CHECKSUM>
{
type Parameter = MH::Parameter;

type Randomness = MH::Randomness;

type Error = ();

const DIMENSION: usize = MH::DIMENSION + NUM_CHUNKS_CHECKSUM;

const MAX_TRIES: usize = 1;

const BASE: usize = MH::BASE;

fn rand<R: rand::Rng>(rng: &mut R) -> Self::Randomness {
MH::rand(rng)
}

fn encode(
parameter: &Self::Parameter,
message: &[u8; MESSAGE_LENGTH],
randomness: &Self::Randomness,
epoch: u32,
) -> Result<Vec<u8>, Self::Error> {
// apply the message hash to get chunks
let mut chunks_message = MH::apply(parameter, epoch, randomness, message);

// compute checksum and split into chunks in little endian
let checksum: u64 = chunks_message
.iter()
.map(|&x| Self::BASE as u64 - 1 - x as u64)
.sum();
let checksum_bytes = checksum.to_le_bytes();
let chunks_checksum = bytes_to_chunks(&checksum_bytes, CHUNK_SIZE);

// append checksum chunks (truncate to the expected number)
chunks_message.extend_from_slice(&chunks_checksum[..NUM_CHUNKS_CHECKSUM]);

Ok(chunks_message)
}

#[cfg(test)]
fn internal_consistency_check() {
assert!(
[1, 2, 4, 8].contains(&CHUNK_SIZE),
"Winternitz Encoding: Chunk Size must be 1, 2, 4, or 8"
);
assert!(
CHUNK_SIZE <= 8,
"Winternitz Encoding: Base must be at most 2^8"
);
assert!(
Self::DIMENSION <= 1 << 8,
"Winternitz Encoding: Dimension must be at most 2^8"
);
assert!(
MH::BASE == Self::BASE && MH::BASE == 1 << CHUNK_SIZE,
"Winternitz Encoding: Base and chunk size not consistent with message hash"
);

MH::internal_consistency_check();
}
}
63 changes: 44 additions & 19 deletions src/signature/generalized_xmss.rs
Original file line number Diff line number Diff line change
Expand Up @@ -584,10 +584,13 @@ pub mod instantiations_poseidon_top_level;
mod tests {
use crate::{
array::FieldArray,
inc_encoding::target_sum::TargetSumEncoding,
inc_encoding::{basic_winternitz::WinternitzEncoding, target_sum::TargetSumEncoding},
signature::test_templates::test_signature_scheme_correctness,
symmetric::{
message_hash::{MessageHash, poseidon::PoseidonMessageHashW1},
message_hash::{
MessageHash,
poseidon::{PoseidonMessageHash, PoseidonMessageHashW1},
},
prf::shake_to_field::ShakePRFtoF,
tweak_hash::poseidon::PoseidonTweakW1L5,
},
Expand All @@ -602,6 +605,32 @@ mod tests {

type TestTH = PoseidonTweakHash<5, 7, 2, 9, 155>;

#[test]
pub fn test_winternitz_poseidon() {
// Note: do not use these parameters, they are just for testing
type PRF = ShakePRFtoF<7, 5>;
type MH = PoseidonMessageHashW1;
const CHUNK_SIZE: usize = 1;
const NUM_CHUNKS_CHECKSUM: usize = 8;
const NUM_CHAINS: usize = MH::DIMENSION + NUM_CHUNKS_CHECKSUM;
type IE = WinternitzEncoding<MH, CHUNK_SIZE, NUM_CHUNKS_CHECKSUM>;
type TH = PoseidonTweakHash<5, 7, 2, 9, NUM_CHAINS>;
const LOG_LIFETIME: usize = 6;
type Sig = GeneralizedXMSSSignatureScheme<PRF, IE, TH, LOG_LIFETIME>;

Sig::internal_consistency_check();

test_signature_scheme_correctness::<Sig>(2, 0, Sig::LIFETIME as usize);
test_signature_scheme_correctness::<Sig>(19, 0, Sig::LIFETIME as usize);
test_signature_scheme_correctness::<Sig>(0, 0, Sig::LIFETIME as usize);
test_signature_scheme_correctness::<Sig>(11, 0, Sig::LIFETIME as usize);

test_signature_scheme_correctness::<Sig>(12, 10, (1 << 5) - 10);
test_signature_scheme_correctness::<Sig>(19, 4, 20);
test_signature_scheme_correctness::<Sig>(16, 16, 4);
test_signature_scheme_correctness::<Sig>(11, 1, 29);
}

#[test]
pub fn test_target_sum_poseidon() {
// Note: do not use these parameters, they are just for testing
Expand Down Expand Up @@ -666,17 +695,15 @@ mod tests {
assert_eq!(rho1, rho2);
}

/*#[test]
pub fn test_large_base_sha() {
#[test]
pub fn test_large_base_poseidon() {
// Note: do not use these parameters, they are just for testing
type PRF = ShaPRF<24, 8>;
type TH = ShaTweak192192;

// use chunk size 8
type MH = ShaMessageHash<24, 8, 32, 8>;
const TARGET_SUM: usize = 1 << 12;
type PRF = ShakePRFtoF<4, 4>;
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Why can't we use this? We want to be as close as possible to the old test configuration I think

Suggested change
type PRF = ShakePRFtoF<4, 4>;
type PRF = ShakePRFtoF<24, 8>;

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Understood , will revert them back

type TH = PoseidonTweakHash<4, 4, 2, 8, 8>;
type MH = PoseidonMessageHash<4, 4, 2, 8, 256, 2, 9>;
const TARGET_SUM: usize = 8 * (256 - 1) / 2;
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Why we can't have

Suggested change
const TARGET_SUM: usize = 8 * (256 - 1) / 2;
const TARGET_SUM: usize = 1 << 12;

type IE = TargetSumEncoding<MH, TARGET_SUM>;
const LOG_LIFETIME: usize = 10;
const LOG_LIFETIME: usize = 6;
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Why can't we use this?

Suggested change
const LOG_LIFETIME: usize = 6;
const LOG_LIFETIME: usize = 10;

type Sig = GeneralizedXMSSSignatureScheme<PRF, IE, TH, LOG_LIFETIME>;

Sig::internal_consistency_check();
Expand All @@ -686,23 +713,21 @@ mod tests {
}

#[test]
pub fn test_large_dimension_sha() {
pub fn test_large_dimension_poseidon() {
// Note: do not use these parameters, they are just for testing
type PRF = ShaPRF<24, 8>;
type TH = ShaTweak192192;

// use 256 chunks
type MH = ShaMessageHash<24, 8, 256, 1>;
type PRF = ShakePRFtoF<8, 4>;
type TH = PoseidonTweakHash<4, 8, 2, 8, 256>;
type MH = PoseidonMessageHash<4, 4, 8, 256, 2, 2, 9>;
const TARGET_SUM: usize = 128;
type IE = TargetSumEncoding<MH, TARGET_SUM>;
const LOG_LIFETIME: usize = 10;
const LOG_LIFETIME: usize = 6;
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Why can't we use this as before?

Suggested change
const LOG_LIFETIME: usize = 6;
const LOG_LIFETIME: usize = 10;

type Sig = GeneralizedXMSSSignatureScheme<PRF, IE, TH, LOG_LIFETIME>;

Sig::internal_consistency_check();

test_signature_scheme_correctness::<Sig>(2, 0, Sig::LIFETIME as usize);
test_signature_scheme_correctness::<Sig>(19, 0, Sig::LIFETIME as usize);
}*/
}

#[test]
pub fn test_expand_activation_time() {
Expand Down
108 changes: 108 additions & 0 deletions src/symmetric/message_hash.rs
Original file line number Diff line number Diff line change
Expand Up @@ -43,3 +43,111 @@ pub trait MessageHash {

pub mod poseidon;
pub mod top_level_poseidon;

#[cfg(test)]
/// Splits a list of bytes into smaller fixed-size bit chunks.
///
/// Each byte in the input slice is divided into `chunk_size`-bit chunks,
/// starting from the least significant bits. The `chunk_size` must divide 8 exactly
/// (i.e., valid values are 1, 2, 4, or 8), since each byte contains 8 bits.
#[must_use]
#[inline]
pub fn bytes_to_chunks(bytes: &[u8], chunk_size: usize) -> Vec<u8> {
// Only the chunk sizes 1, 2, 4, or 8 are valid.
assert!(
matches!(chunk_size, 1 | 2 | 4 | 8),
"chunk_size must be 1, 2, 4, or 8"
);

// Calculate how many chunks each byte will produce and preallocate exactly.
let chunks_per_byte = 8 / chunk_size;
let mut out = Vec::with_capacity(bytes.len() * chunks_per_byte);

match chunk_size {
8 => {
out.extend_from_slice(bytes);
}
4 => {
for &b in bytes {
out.push(b & 0x0F);
out.push(b >> 4);
}
}
2 => {
for &b in bytes {
out.push(b & 0b11);
out.push((b >> 2) & 0b11);
out.push((b >> 4) & 0b11);
out.push((b >> 6) & 0b11);
}
}
1 => {
for &b in bytes {
out.push(b & 1);
out.push((b >> 1) & 1);
out.push((b >> 2) & 1);
out.push((b >> 3) & 1);
out.push((b >> 4) & 1);
out.push((b >> 5) & 1);
out.push((b >> 6) & 1);
out.push((b >> 7) & 1);
}
}
_ => unreachable!(),
}

out
}

#[cfg(test)]
mod tests {
use super::bytes_to_chunks;
use proptest::prelude::*;

#[test]
fn test_bytes_to_chunks() {
let byte_a: u8 = 0b0110_1100;
let byte_b: u8 = 0b1010_0110;

let bytes = [byte_a, byte_b];
let expected_chunks = [0b00, 0b11, 0b10, 0b01, 0b10, 0b01, 0b10, 0b10];

let chunks = bytes_to_chunks(&bytes, 2);

assert_eq!(chunks.len(), 8);

for i in 0..chunks.len() {
assert_eq!(chunks[i], expected_chunks[i]);
}

// now test chunk size 8
let chunks = bytes_to_chunks(&bytes, 8);

assert_eq!(chunks.len(), 2);
assert_eq!(chunks[0], byte_a);
assert_eq!(chunks[1], byte_b);
}

proptest! {
#[test]
fn prop_bytes_to_chunks_matches_manual_bit_extraction(
bytes in proptest::collection::vec(any::<u8>(), 0..32),
chunk_size in prop_oneof![Just(1usize), Just(2), Just(4), Just(8)],
) {
let chunks = bytes_to_chunks(&bytes, chunk_size);

let chunks_per_byte = 8 / chunk_size;
let mut expected = Vec::with_capacity(bytes.len() * chunks_per_byte);

for &b in &bytes {
for i in 0..chunks_per_byte {
let shifted = b >> (i * chunk_size);
let mask = if chunk_size == 8 { 0xFF } else { (1u8 << chunk_size) - 1 };
expected.push(shifted & mask);
}
}

prop_assert_eq!(chunks.as_slice(), expected.as_slice());
}
}
}
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I don't think that we need all this neither no?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yeah i was thought too much , have removed them