From 81d1d70a615c11b83a643f90cd6791adb0e1ed5a Mon Sep 17 00:00:00 2001 From: Mark Rousskov Date: Mon, 3 Feb 2025 23:09:10 +0000 Subject: [PATCH] Replace shared map with larger bitset This simplifies the receiver dedup implementation by replacing it with an inline bitset, which we shift over on new arrivals. The end result is that we have an effective tracker for ~896 packets fitting into about the same amount of space as the previous combination of shared map and entry data (+6 bytes if we assume 500k entries). The new implementation is also easier to tweak, since we directly expose the window size. It's likely that this slows down inserts a little (we need to shift over 112 bytes) but shuffling a 112-byte array should be sufficiently fast that I don't think it's worth worrying about it. --- dc/s2n-quic-dc/Cargo.toml | 1 + dc/s2n-quic-dc/src/path/secret/map.rs | 8 +- dc/s2n-quic-dc/src/path/secret/map/entry.rs | 2 +- .../src/path/secret/map/entry/tests.rs | 2 +- .../src/path/secret/map/handshake.rs | 4 +- dc/s2n-quic-dc/src/path/secret/map/state.rs | 7 - .../src/path/secret/map/state/tests.rs | 2 +- dc/s2n-quic-dc/src/path/secret/map/store.rs | 2 - dc/s2n-quic-dc/src/path/secret/receiver.rs | 360 +++--------------- .../src/path/secret/receiver/tests.rs | 204 ++++------ 10 files changed, 148 insertions(+), 444 deletions(-) diff --git a/dc/s2n-quic-dc/Cargo.toml b/dc/s2n-quic-dc/Cargo.toml index 7bc87e3eac..43eb47ff20 100644 --- a/dc/s2n-quic-dc/Cargo.toml +++ b/dc/s2n-quic-dc/Cargo.toml @@ -44,6 +44,7 @@ tracing-subscriber = { version = "0.3", features = ["env-filter"], optional = tr zerocopy = { version = "0.7", features = ["derive"] } zeroize = "1" parking_lot = "0.12" +bitvec = { version = "1.0.1", default-features = false } [dev-dependencies] bolero = "0.12" diff --git a/dc/s2n-quic-dc/src/path/secret/map.rs b/dc/s2n-quic-dc/src/path/secret/map.rs index c62e426c3c..4d60e5fbf6 100644 --- a/dc/s2n-quic-dc/src/path/secret/map.rs +++ b/dc/s2n-quic-dc/src/path/secret/map.rs @@ -188,8 +188,6 @@ impl Map { let mut stateless_reset = [0; control::TAG_LEN]; aws_lc_rs::rand::fill(&mut stateless_reset).unwrap(); - let receiver_shared = receiver::Shared::new(); - let mut ids = Vec::with_capacity(peers.len()); for (idx, (ciphersuite, version, peer)) in peers.into_iter().enumerate() { secret[..8].copy_from_slice(&(idx as u64).to_be_bytes()[..]); @@ -206,7 +204,7 @@ impl Map { peer, secret, sender, - receiver_shared.clone().new_receiver(), + receiver::State::new(), dc::testing::TEST_APPLICATION_PARAMS, dc::testing::TEST_REHANDSHAKE_PERIOD, ); @@ -226,7 +224,7 @@ impl Map { #[doc(hidden)] #[cfg(any(test, feature = "testing"))] pub fn test_insert(&self, peer: SocketAddr) { - let receiver = self.store.receiver().clone().new_receiver(); + let receiver = super::receiver::State::new(); let entry = Entry::fake(peer, Some(receiver)); self.store.test_insert(entry); } @@ -259,7 +257,7 @@ impl Map { peer_addr, secret, sender, - map.store.receiver().clone().new_receiver(), + super::receiver::State::new(), dc::testing::TEST_APPLICATION_PARAMS, dc::testing::TEST_REHANDSHAKE_PERIOD, ); diff --git a/dc/s2n-quic-dc/src/path/secret/map/entry.rs b/dc/s2n-quic-dc/src/path/secret/map/entry.rs index d2685a71cf..a016b0130e 100644 --- a/dc/s2n-quic-dc/src/path/secret/map/entry.rs +++ b/dc/s2n-quic-dc/src/path/secret/map/entry.rs @@ -106,7 +106,7 @@ impl Entry { #[cfg(any(test, feature = "testing"))] pub fn fake(peer: SocketAddr, receiver: Option) -> Arc { - let receiver = receiver.unwrap_or_else(receiver::State::without_shared); + let receiver = receiver.unwrap_or_else(receiver::State::new); let mut secret = [0; 32]; aws_lc_rs::rand::fill(&mut secret).unwrap(); diff --git a/dc/s2n-quic-dc/src/path/secret/map/entry/tests.rs b/dc/s2n-quic-dc/src/path/secret/map/entry/tests.rs index d88c02e144..564538a272 100644 --- a/dc/s2n-quic-dc/src/path/secret/map/entry/tests.rs +++ b/dc/s2n-quic-dc/src/path/secret/map/entry/tests.rs @@ -15,7 +15,7 @@ fn entry_size() { if should_check { assert_eq!( Entry::fake((std::net::Ipv4Addr::LOCALHOST, 0).into(), None).size(), - 239 + 295 ); } } diff --git a/dc/s2n-quic-dc/src/path/secret/map/handshake.rs b/dc/s2n-quic-dc/src/path/secret/map/handshake.rs index 5d574c9a8c..f0abf285f1 100644 --- a/dc/s2n-quic-dc/src/path/secret/map/handshake.rs +++ b/dc/s2n-quic-dc/src/path/secret/map/handshake.rs @@ -4,7 +4,7 @@ use super::{Entry, Map}; use crate::{ packet::secret_control as control, - path::secret::{schedule, sender}, + path::secret::{receiver, schedule, sender}, }; use s2n_quic_core::{ dc::{self, ApplicationParams, DatagramInfo}, @@ -123,7 +123,7 @@ impl dc::Path for HandshakingPath { .into_inner(), ); - let receiver = self.map.store.receiver().clone().new_receiver(); + let receiver = receiver::State::new(); let entry = Entry::new( self.peer, diff --git a/dc/s2n-quic-dc/src/path/secret/map/state.rs b/dc/s2n-quic-dc/src/path/secret/map/state.rs index f4e09fd7e8..b9d17c3a10 100644 --- a/dc/s2n-quic-dc/src/path/secret/map/state.rs +++ b/dc/s2n-quic-dc/src/path/secret/map/state.rs @@ -78,8 +78,6 @@ where // FIXME: This will get replaced with sending on a handshake socket associated with the map. pub(super) control_socket: Arc, - pub(super) receiver_shared: Arc, - cleaner: Cleaner, init_time: Timestamp, @@ -136,7 +134,6 @@ where requested_handshakes: Default::default(), cleaner: Cleaner::new(), signer, - receiver_shared: receiver::Shared::new(), control_socket, init_time, clock, @@ -556,10 +553,6 @@ where &self.signer } - fn receiver(&self) -> &Arc { - &self.receiver_shared - } - fn send_control_packet(&self, dst: &SocketAddr, buffer: &mut [u8]) { match self.control_socket.send_to(buffer, dst) { Ok(_) => { diff --git a/dc/s2n-quic-dc/src/path/secret/map/state/tests.rs b/dc/s2n-quic-dc/src/path/secret/map/state/tests.rs index adf36059cd..0d67dca78f 100644 --- a/dc/s2n-quic-dc/src/path/secret/map/state/tests.rs +++ b/dc/s2n-quic-dc/src/path/secret/map/state/tests.rs @@ -136,7 +136,7 @@ impl Model { ip, secret, sender::State::new(stateless_reset), - state.receiver().clone().new_receiver(), + receiver::State::new(), dc::testing::TEST_APPLICATION_PARAMS, dc::testing::TEST_REHANDSHAKE_PERIOD, ))); diff --git a/dc/s2n-quic-dc/src/path/secret/map/store.rs b/dc/s2n-quic-dc/src/path/secret/map/store.rs index 2e4e2fb11c..8a98c265c1 100644 --- a/dc/s2n-quic-dc/src/path/secret/map/store.rs +++ b/dc/s2n-quic-dc/src/path/secret/map/store.rs @@ -43,8 +43,6 @@ pub trait Store: 'static + Send + Sync { fn signer(&self) -> &stateless_reset::Signer; - fn receiver(&self) -> &Arc; - fn send_control_packet(&self, dst: &SocketAddr, buffer: &mut [u8]); fn rehandshake_period(&self) -> Duration; diff --git a/dc/s2n-quic-dc/src/path/secret/receiver.rs b/dc/s2n-quic-dc/src/path/secret/receiver.rs index 41e413dac2..35a252dac9 100644 --- a/dc/s2n-quic-dc/src/path/secret/receiver.rs +++ b/dc/s2n-quic-dc/src/path/secret/receiver.rs @@ -1,263 +1,28 @@ // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 -use crate::credentials::{Credentials, Id, KeyId}; -use s2n_quic_core::packet::number::{ - PacketNumber, PacketNumberSpace, SlidingWindow, SlidingWindowError, +use crate::credentials::{Credentials, KeyId}; +use bitvec::BitArr; +use std::sync::{ + atomic::{AtomicU64, Ordering}, + Mutex, }; -use std::{ - cell::UnsafeCell, - ptr::NonNull, - sync::{ - atomic::{AtomicU64, Ordering}, - Arc, Mutex, - }, -}; - -const SHARED_ENTRIES: usize = 1 << 20; -// Maximum page size on current machines (macOS aarch64 has 16kb pages) -// -// mmap is documented as failing if we don't request a page boundary. Currently our sizes work out -// such that rounding is useless, but this is good future proofing. -const MAX_PAGE: usize = 16_384; -const SHARED_ALLOCATION: usize = { - let element = std::mem::size_of::(); - let size = element * SHARED_ENTRIES; - // TODO use `next_multiple_of` once MSRV is >=1.73 - (size + MAX_PAGE - 1) / MAX_PAGE * MAX_PAGE -}; - -#[derive(Debug)] -pub struct Shared { - secret: u64, - backing: NonNull, -} - -unsafe impl Send for Shared {} -unsafe impl Sync for Shared {} - -impl Drop for Shared { - fn drop(&mut self) { - unsafe { - if libc::munmap(self.backing.as_ptr().cast(), SHARED_ALLOCATION) != 0 { - // Avoid panicking in a destructor, just let the memory leak while logging. We - // expect this to be essentially a global singleton in most production cases so - // likely we're exiting the process anyway. - eprintln!( - "Failed to unmap memory: {:?}", - std::io::Error::last_os_error() - ); - } - } - } -} - -const fn assert_copy() {} - -struct SharedSlot { - id: UnsafeCell, - key_id: AtomicU64, -} - -impl SharedSlot { - fn try_lock(&self) -> Option> { - let current = self.key_id.load(Ordering::Relaxed); - if current & LOCK != 0 { - // If we are already locked, then give up. - // A concurrent thread updated this slot, any write we do would squash that thread's - // write. Doing so if that thread remove()d may make sense in the future but not right - // now. - return None; - } - let Ok(_) = self.key_id.compare_exchange( - current, - current | LOCK, - Ordering::Acquire, - Ordering::Relaxed, - ) else { - return None; - }; - - Some(SharedSlotGuard { - slot: self, - key_id: current, - }) - } -} - -struct SharedSlotGuard<'a> { - slot: &'a SharedSlot, - key_id: u64, -} - -impl SharedSlotGuard<'_> { - fn write_id(&mut self, id: Id) { - // Store the new ID. - // SAFETY: We hold the lock since we are in the guard. - unsafe { - // Note: no destructor is run for the previously stored element, but Id is Copy. - // If we did want to run a destructor we'd have to ensure that we replaced a PRESENT - // entry. - assert_copy::(); - std::ptr::write(self.slot.id.get(), id); - } - } - - fn id(&self) -> Id { - // SAFETY: We hold the lock, so copying out the Id is safe. - unsafe { *self.slot.id.get() } - } -} - -impl Drop for SharedSlotGuard<'_> { - fn drop(&mut self) { - self.slot.key_id.store(self.key_id, Ordering::Release); - } -} - -const LOCK: u64 = 1 << 62; -const PRESENT: u64 = 1 << 63; - -impl Shared { - pub fn new() -> Arc { - let mut secret = [0; 8]; - aws_lc_rs::rand::fill(&mut secret).expect("random is available"); - let shared = Shared { - secret: u64::from_ne_bytes(secret), - backing: unsafe { - // Note: We rely on the zero-initialization provided by the kernel. That ensures - // that an entry in the map is not LOCK'd to begin with and is not PRESENT as well. - let ptr = libc::mmap( - std::ptr::null_mut(), - SHARED_ALLOCATION, - libc::PROT_READ | libc::PROT_WRITE, - libc::MAP_ANONYMOUS | libc::MAP_PRIVATE, - 0, - 0, - ); - // -1 - if ptr as usize == usize::MAX { - panic!( - "Failed to allocate backing allocation for shared: {:?}", - std::io::Error::last_os_error() - ); - } - NonNull::new(ptr).unwrap().cast() - }, - }; - - // We need to modify the slot to which an all-zero path secert ID and key ID map. Otherwise - // we'd return Err(AlreadyExists) for that entry which isn't correct - it has not been - // inserted or removed, so it should be Err(Unknown). - // - // This is the only slot that needs modification. All other slots are never used for lookup - // of this set of credentials and so containing this set of credentials is fine. - let slot = shared.slot(&Credentials { - id: Id::from([0; 16]), - key_id: KeyId::new(0).unwrap(), - }); - // The max key ID is never used by senders (checked on the sending side), while avoiding - // taking a full bit out of the range of key IDs. We also statically return Unknown for it - // on removal to avoid a non-local invariant. - slot.key_id.store(KeyId::MAX.as_u64(), Ordering::Relaxed); - - Arc::new(shared) - } - - pub fn new_receiver(self: Arc) -> State { - State::with_shared(self) - } - - fn insert(&self, identity: &Credentials) { - let slot = self.slot(identity); - let Some(mut guard) = slot.try_lock() else { - return; - }; - guard.write_id(identity.id); - guard.key_id = *identity.key_id | PRESENT; - } - - fn remove(&self, identity: &Credentials) -> Result<(), Error> { - // See `new` for details. - if identity.key_id == KeyId::MAX.as_u64() { - return Err(Error::Unknown); - } - - let slot = self.slot(identity); - let previous = slot.key_id.load(Ordering::Relaxed); - if previous & LOCK != 0 { - // If we are already locked, then give up. - // A concurrent thread updated this slot, any write we do would squash that thread's - // write. No concurrent thread could have inserted what we're looking for since - // both insert and remove for a single path secret ID run under a Mutex. - return Err(Error::Unknown); - } - if previous & (!PRESENT) != *identity.key_id { - // If the currently stored entry does not match our desired KeyId, - // then we don't know whether this key has been replayed or not. - return Err(Error::Unknown); - } - - let Some(mut guard) = slot.try_lock() else { - // Don't try to win the race by spinning, let the other thread proceed. - return Err(Error::Unknown); - }; - - // Check if the path secret ID matches. - if guard.id() != identity.id { - return Err(Error::Unknown); - } - - // Ok, at this point we know that the key ID and the path secret ID both match. - - let ret = if guard.key_id & PRESENT != 0 { - Ok(()) - } else { - Err(Error::AlreadyExists) - }; - - // Release the lock, removing the PRESENT bit (which may already be missing). - guard.key_id = *identity.key_id; - ret - } - - fn index(&self, identity: &Credentials) -> usize { - let hash = u64::from_ne_bytes(identity.id[..8].try_into().unwrap()) - ^ *identity.key_id - ^ self.secret; - let index = hash & (SHARED_ENTRIES as u64 - 1); - index as usize - } +const WINDOW: usize = 896; - fn slot(&self, identity: &Credentials) -> &SharedSlot { - let index = self.index(identity); - // SAFETY: in-bounds -- the & above truncates such that we're always in the appropriate - // range that we allocated with mmap above. - // - // Casting to a reference is safe -- the Slot type has an UnsafeCell around all of the data - // (either inside the atomic or directly). - unsafe { self.backing.as_ptr().add(index).as_ref().unwrap_unchecked() } - } -} +type Seen = BitArr!(for WINDOW); #[derive(Debug)] pub struct State { - // Minimum that we're potentially willing to accept. - // This is lazily updated and so may be out of date. - min_key_id: AtomicU64, - // This is the maximum ID we've seen so far. This is sent to peers for when we cannot determine // if the packet sent is replayed as it falls outside our replay window. Peers use this // information to resynchronize on the latest state. max_seen_key_id: AtomicU64, - seen: Mutex, - - shared: Option>, + seen: Mutex, } -impl super::map::SizeOf for Mutex { +impl super::map::SizeOf for Mutex { fn size(&self) -> usize { // If we don't need drop, it's very likely that this type is fully contained in size_of // Self. This simplifies implementing this trait for e.g. std types. @@ -278,14 +43,10 @@ impl super::map::SizeOf for Mutex { impl super::map::SizeOf for State { fn size(&self) -> usize { let State { - min_key_id, max_seen_key_id, seen, - shared, } = self; - // shared is shared across all State's (effectively) so we don't currently account for that - // allocation. - min_key_id.size() + max_seen_key_id.size() + seen.size() + std::mem::size_of_val(shared) + max_seen_key_id.size() + seen.size() } } @@ -301,70 +62,73 @@ pub enum Error { } impl State { - pub fn without_shared() -> State { + pub fn new() -> State { State { - min_key_id: Default::default(), - max_seen_key_id: Default::default(), + max_seen_key_id: AtomicU64::new(u64::MAX), seen: Default::default(), - shared: None, } } - pub fn with_shared(shared: Arc) -> State { - State { - min_key_id: Default::default(), - max_seen_key_id: Default::default(), - seen: Default::default(), - shared: Some(shared), - } - } - - pub fn pre_authentication(&self, identity: &Credentials) -> Result<(), Error> { - if self.min_key_id.load(Ordering::Relaxed) > *identity.key_id { - return Err(Error::Unknown); - } - + pub fn pre_authentication(&self, _identity: &Credentials) -> Result<(), Error> { + // TODO: Provide more useful pre-auth checks. For now just don't bother checking this, we + // can always rely on the post-auth check in practice, this is just a slight optimization. Ok(()) } pub fn minimum_unseen_key_id(&self) -> KeyId { - KeyId::try_from(self.max_seen_key_id.load(Ordering::Relaxed) + 1).unwrap() + KeyId::try_from( + self.max_seen_key_id + .load(Ordering::Relaxed) + // Initial u64::MAX wraps to zero, which is the correct answer for the initial + // state. After that just +1 consistently. + .wrapping_add(1), + ) + .unwrap() } /// Called after decryption has been performed pub fn post_authentication(&self, identity: &Credentials) -> Result<(), Error> { - let key_id = identity.key_id; - self.max_seen_key_id.fetch_max(*key_id, Ordering::Relaxed); - let pn = PacketNumberSpace::Initial.new_packet_number(key_id); - - // Note: intentionally retaining this lock across potential insertion into the shared map. - // This avoids the case where we have evicted an entry but cannot see it in the shared map - // yet from a concurrent thread. This should not be required for correctness but helps - // reasoning about the state of the world. let mut seen = self.seen.lock().unwrap(); - match seen.insert_with_evicted(pn) { - Ok(evicted) => { - if let Some(shared) = &self.shared { - // FIXME: Consider bounding the number of evicted entries to insert or - // otherwise optimizing? This can run for at most 128 entries today... - for evicted in evicted { - shared.insert(&Credentials { - id: identity.id, - key_id: PacketNumber::as_varint(evicted), - }); - } - } - Ok(()) - } - Err(SlidingWindowError::TooOld) => { - if let Some(shared) = &self.shared { - shared.remove(identity) - } else { - Err(Error::Unknown) - } - } - Err(SlidingWindowError::Duplicate) => Err(Error::AlreadyExists), + + let key_id = *identity.key_id; + let mut previous_max = self.max_seen_key_id.load(Ordering::Relaxed); + let new_max = if previous_max == u64::MAX { + previous_max = 0; + key_id + } else { + previous_max.max(key_id) + }; + self.max_seen_key_id.store(new_max, Ordering::Relaxed); + + let delta = new_max - previous_max; + if delta > seen.len() as u64 { + // not yet seen since we shifted forward by more than the bitset's size. + seen.fill(false); + } else { + // Even on a 32-bit platform we'd hit the check above (since seen is way smaller than + // 2^32). + seen.shift_right(delta as usize); } + + let Ok(idx) = usize::try_from(new_max - key_id) else { + // We'd never store more than usize bits, so treat this as too old as well. + return Err(Error::Unknown); + }; + + let ret = if let Some(mut entry) = seen.get_mut(idx) { + if *entry { + return Err(Error::AlreadyExists); + } + + entry.set(true); + + Ok(()) + } else { + // Too old -- no longer in memory. + return Err(Error::Unknown); + }; + + ret } } diff --git a/dc/s2n-quic-dc/src/path/secret/receiver/tests.rs b/dc/s2n-quic-dc/src/path/secret/receiver/tests.rs index 8e49634a2b..ede7dd36e7 100644 --- a/dc/s2n-quic-dc/src/path/secret/receiver/tests.rs +++ b/dc/s2n-quic-dc/src/path/secret/receiver/tests.rs @@ -2,6 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 use super::*; +use crate::credentials::Id; use bolero::check; use rand::{seq::SliceRandom, Rng, SeedableRng}; use std::collections::{binary_heap::PeekMut, BinaryHeap, HashSet}; @@ -10,7 +11,7 @@ use std::collections::{binary_heap::PeekMut, BinaryHeap, HashSet}; fn check() { check!().with_type::>().for_each(|ops| { let mut oracle = std::collections::HashSet::new(); - let subject = State::with_shared(Shared::new()); + let subject = State::new(); let id = Id::from([0; 16]); for op in ops { let expected = oracle.insert(*op); @@ -27,13 +28,28 @@ fn check() { }); } +#[test] +fn check_huge_gap() { + let subject = State::new(); + let id = Id::from([0; 16]); + for op in [0u64, u32::MAX as u64 + 10] { + let actual = subject + .post_authentication(&Credentials { + id, + key_id: KeyId::new(op).unwrap(), + }) + .is_ok(); + assert!(actual); + } +} + #[test] fn check_ordered() { check!().with_type::>().for_each(|ops| { let mut ops = ops.clone(); ops.sort(); let mut oracle = std::collections::HashSet::new(); - let subject = State::with_shared(Shared::new()); + let subject = State::new(); let id = Id::from([0; 16]); for op in ops { let expected = oracle.insert(op); @@ -49,7 +65,7 @@ fn check_ordered() { fn check_u16() { check!().with_type::>().for_each(|ops| { let mut oracle = std::collections::HashSet::new(); - let subject = State::with_shared(Shared::new()); + let subject = State::new(); for op in ops { let op = KeyId::new(*op as u64).unwrap(); let expected = oracle.insert(op); @@ -60,10 +76,6 @@ fn check_u16() { // If we did expect this to be a new value, it may have already been marked as // "seen" by the set. However, we should never return a false OK (i.e., claim that // the value was not seen when it actually was). - // - // Note that despite the u16::MAX < SHARED_ENTRIES, this is still not able to be - // 100% reliable because not all evicted entries from the local set are put into - // the backing allocation. if !expected { assert!(!actual); } @@ -77,7 +89,7 @@ fn check_ordered_u16() { let mut ops = ops.clone(); ops.sort(); let mut oracle = std::collections::HashSet::new(); - let subject = State::with_shared(Shared::new()); + let subject = State::new(); let id = Id::from([0; 16]); for op in ops { let op = KeyId::new(op as u64).unwrap(); @@ -90,61 +102,6 @@ fn check_ordered_u16() { }); } -#[test] -fn shared() { - let subject = Shared::new(); - let id1 = Id::from([0; 16]); - let mut id2 = Id::from([0; 16]); - // This is a part of the key ID not used for hashing. - id2[10] = 1; - let key1 = KeyId::new(0).unwrap(); - let key2 = KeyId::new(1).unwrap(); - subject.insert(&Credentials { - id: id1, - key_id: key1, - }); - assert_eq!( - subject.remove(&Credentials { - id: id1, - key_id: key1, - }), - Ok(()) - ); - assert_eq!( - subject.remove(&Credentials { - id: id1, - key_id: key1, - }), - Err(Error::AlreadyExists) - ); - subject.insert(&Credentials { - id: id2, - key_id: key1, - }); - assert_eq!( - subject.remove(&Credentials { - id: id1, - key_id: key1, - }), - Err(Error::Unknown) - ); - assert_eq!( - subject.remove(&Credentials { - id: id1, - key_id: key2, - }), - Err(Error::Unknown) - ); - // Removal never taints an entry, so this is still fine. - assert_eq!( - subject.remove(&Credentials { - id: id2, - key_id: key1, - }), - Ok(()) - ); -} - // This test is not particularly interesting, it's mostly just the same as the random tests above // which insert ordered and unordered values. Mostly it tests that we continue to allow 129 IDs of // arbitrary reordering. @@ -187,7 +144,7 @@ fn check_shuffled_chunks_inner(seed: u64, chunk_size: u8) { // This represents the commonly seen behavior in production where a small percentage of inserted // keys are potentially significantly delayed. Currently our percentage is fixed, but the delay is -// not; it's minimum is set by our test here and the maximum is always at most SHARED_ENTRIES. +// not; it's minimum is set by our test here and the maximum is always at most WINDOW. // // This ensures that in the common case we see in production our receiver map, presuming no // contention in the shared map, is reliably able to return accurate results. @@ -196,6 +153,9 @@ fn check_delayed() { check!() .with_type::<(u64, u16)>() .for_each(|&(seed, delay)| { + if delay as usize >= WINDOW { + return; + } check_delayed_inner(seed, delay); }); } @@ -207,11 +167,9 @@ fn check_delayed_specific() { check_delayed_inner(0xf323243, 129); } -// delay represents the *minimum* delay a delayed entry sees. The maximum is up to SHARED_ENTRIES. +// delay represents the *minimum* delay a delayed entry sees. The maximum is up to WINDOW. fn check_delayed_inner(seed: u64, delay: u16) { - // We expect that the shared map is always big enough to absorb our delay. - // (This is statically true; u16::MAX < SHARED_ENTRIES). - assert!((delay as usize) < SHARED_ENTRIES); + assert!((delay as usize) < super::WINDOW); let delay = delay as u64; eprintln!("======== starting test run ({seed} {delay}) =========="); let mut model = Model::default(); @@ -221,7 +179,7 @@ fn check_delayed_inner(seed: u64, delay: u16) { // there are multiple elements to insert, inserting most recent first and only afterwards older // entries. let mut buffered: BinaryHeap<(std::cmp::Reverse, u64)> = BinaryHeap::new(); - for id in 0..(SHARED_ENTRIES as u64 * 3) { + for id in 0..(100000 as u64 * 3) { while let Some(peeked) = buffered.peek_mut() { // min-heap means that if the first entry isn't the one we want, then there's no entry // that we want. @@ -238,8 +196,8 @@ fn check_delayed_inner(seed: u64, delay: u16) { // to each other. (That's an approximation, it's not obvious how to really derive a simple // explanation for what guarantees we're actually trying to provide here). if id % 128 != 0 { - // ...until some random interval no more than SHARED_ENTRIES away. - let insert_before = rng.gen_range(id + 1 + delay..id + SHARED_ENTRIES as u64); + // ...until some random interval no more than WINDOW away. + let insert_before = rng.gen_range(id + 1 + delay..id + WINDOW as u64); buffered.push((std::cmp::Reverse(insert_before), id)); } else { model.insert(id); @@ -258,7 +216,7 @@ impl Default for Model { Self { oracle: Default::default(), insert_order: Vec::new(), - subject: State::with_shared(Shared::new()), + subject: State::new(), } } } @@ -287,66 +245,58 @@ impl Model { } #[test] -fn shared_no_collisions() { - let mut seen = HashSet::new(); - let shared = Shared::new(); - for key_id in 0..SHARED_ENTRIES as u64 { - let index = shared.index(&Credentials { - id: Id::from([0; 16]), - key_id: KeyId::new(key_id).unwrap(), - }); - assert!(seen.insert(index)); +fn check_sequential() { + let subject = State::new(); + let id = Id::from([0; 16]); + for op in 0u64..(100 * u16::MAX as u64) { + let actual = subject + .post_authentication(&Credentials { + id, + key_id: KeyId::new(op).unwrap(), + }) + .is_ok(); + assert!(actual); } - // The next entry should collide, since we will wrap around. - let index = shared.index(&Credentials { - id: Id::from([0; 16]), - key_id: KeyId::new(SHARED_ENTRIES as u64 + 1).unwrap(), - }); - assert!(!seen.insert(index)); + // check all of those are considered gone. + for op in 0u64..(100 * u16::MAX as u64) { + subject + .post_authentication(&Credentials { + id, + key_id: KeyId::new(op).unwrap(), + }) + .unwrap_err(); + } } #[test] -fn shared_id_pair_no_collisions() { - let shared = Shared::new(); - - // Two random IDs. Exact constants shouldn't matter much, we're mainly aiming to test overall - // quality of our mapping from Id + KeyId. - let id1 = Id::from(u128::to_ne_bytes(0x25add729cce683cd0cda41d35436bdc6)); - let id2 = Id::from(u128::to_ne_bytes(0x2862115d0691fe180f2aeb26af3c2e5e)); - - for key_id in 0..SHARED_ENTRIES as u64 { - let index1 = shared.index(&Credentials { - id: id1, - key_id: KeyId::new(key_id).unwrap(), - }); - let index2 = shared.index(&Credentials { - id: id2, - key_id: KeyId::new(key_id).unwrap(), - }); +fn unseen() { + let subject = State::new(); + assert_eq!(*subject.minimum_unseen_key_id(), 0); + let id = Id::from([0; 16]); + subject + .post_authentication(&Credentials { + id, + key_id: KeyId::new(0).unwrap(), + }) + .unwrap(); + assert_eq!(*subject.minimum_unseen_key_id(), 1); - // Our path secret IDs are sufficiently different that we expect that for any given index - // we map to a different slot. This test is not *really* saying much since it's highly - // dependent on the exact values of the path secret IDs, but it prevents simple bugs like - // ignoring the IDs entirely. - assert_ne!(index1, index2); - } -} + let id = Id::from([0; 16]); + subject + .post_authentication(&Credentials { + id, + key_id: KeyId::new(3).unwrap(), + }) + .unwrap(); + assert_eq!(*subject.minimum_unseen_key_id(), 4); -// Confirms that we start out without any entries present in the map. -#[test] -fn shared_no_entries() { - let shared = Shared::new(); - // We have to check all slots to be sure. The index used for lookup is going to be shuffled due - // to the hashing in of the secret. We need to use an all-zero path secret ID since the entries - // in the map start out zero-initialized today. - for key_id in 0..SHARED_ENTRIES as u64 { - assert_eq!( - shared.remove(&Credentials { - id: Id::from([0; 16]), - key_id: KeyId::new(key_id).unwrap(), - }), - Err(Error::Unknown) - ); - } + let id = Id::from([0; 16]); + subject + .post_authentication(&Credentials { + id, + key_id: KeyId::new(2).unwrap(), + }) + .unwrap(); + assert_eq!(*subject.minimum_unseen_key_id(), 4); }