From 9b8fc2ae596c8ace5ba502ad4b7ddbdb532e3d3c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 2 Dec 2024 10:45:12 +0200 Subject: [PATCH 01/23] build(deps): bump docker/build-push-action from 6.9.0 to 6.10.0 (#2260) Bumps [docker/build-push-action](https://github.com/docker/build-push-action) from 6.9.0 to 6.10.0. - [Release notes](https://github.com/docker/build-push-action/releases) - [Commits](https://github.com/docker/build-push-action/compare/4f58ea79222b3b9dc2c8bbdd6debcef730109a75...48aba3b46d1b1fec4febb7c5d0c644b249a11355) --- updated-dependencies: - dependency-name: docker/build-push-action dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/qns.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/qns.yml b/.github/workflows/qns.yml index 9ff452daa3..895590f102 100644 --- a/.github/workflows/qns.yml +++ b/.github/workflows/qns.yml @@ -55,7 +55,7 @@ jobs: # set latest tag for default branch type=raw,value=latest,enable={{is_default_branch}} - - uses: docker/build-push-action@4f58ea79222b3b9dc2c8bbdd6debcef730109a75 # v6.9.0 + - uses: docker/build-push-action@48aba3b46d1b1fec4febb7c5d0c644b249a11355 # v6.10.0 if: github.event_name != 'pull_request' with: push: true @@ -66,7 +66,7 @@ jobs: cache-to: type=gha,mode=max platforms: 'linux/amd64, linux/arm64' - - uses: docker/build-push-action@4f58ea79222b3b9dc2c8bbdd6debcef730109a75 # v6.9.0 + - uses: docker/build-push-action@48aba3b46d1b1fec4febb7c5d0c644b249a11355 # v6.10.0 id: docker_build_and_push with: tags: ${{ steps.meta.outputs.tags }} From 1fdb5b99e3350c152f6225ee4ec924dd9961e4e7 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 2 Dec 2024 10:57:33 +0200 Subject: [PATCH 02/23] build(deps): bump lukemathwalker/cargo-chef in /qns (#2261) Bumps lukemathwalker/cargo-chef from `75448ae` to `315e442`. --- updated-dependencies: - dependency-name: lukemathwalker/cargo-chef dependency-type: direct:production ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- qns/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/qns/Dockerfile b/qns/Dockerfile index 3f11d198e4..0812e673da 100644 --- a/qns/Dockerfile +++ b/qns/Dockerfile @@ -1,4 +1,4 @@ -FROM lukemathwalker/cargo-chef@sha256:75448aed469ef5951a7211cd152ee39d1e31887defbab428ad4de922e5b26dcb AS chef +FROM lukemathwalker/cargo-chef@sha256:315e44286168bbabc24c27c3af74a84c8208d362b70721c152d3e99228b3c486 AS chef WORKDIR /app From d669f0ceb40b0958918fe8a8082d8df382d1cd0e Mon Sep 17 00:00:00 2001 From: Martin Thomson Date: Mon, 2 Dec 2024 19:58:36 +1100 Subject: [PATCH 03/23] Add a benchmark for the decoder (#2258) * Add a benchmark for the decoder * Improve benchmark performance * `bench` feature --------- Co-authored-by: Lars Eggert --- .gitignore | 1 + Cargo.lock | 2 ++ neqo-common/Cargo.toml | 10 ++++++- neqo-common/benches/decoder.rs | 52 ++++++++++++++++++++++++++++++++++ 4 files changed, 64 insertions(+), 1 deletion(-) create mode 100644 neqo-common/benches/decoder.rs diff --git a/.gitignore b/.gitignore index 25a26d21d3..7ca7077bc4 100644 --- a/.gitignore +++ b/.gitignore @@ -3,4 +3,5 @@ *~ /.vscode/ /lcov.info +/mutants.out*/ /target/ diff --git a/Cargo.lock b/Cargo.lock index e619126696..e2693fda85 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -821,10 +821,12 @@ dependencies = [ name = "neqo-common" version = "0.11.0" dependencies = [ + "criterion", "enum-map", "env_logger", "hex", "log", + "neqo-crypto", "qlog", "regex", "test-fixture", diff --git a/neqo-common/Cargo.toml b/neqo-common/Cargo.toml index 8335a11c12..686681a264 100644 --- a/neqo-common/Cargo.toml +++ b/neqo-common/Cargo.toml @@ -28,13 +28,21 @@ qlog = { workspace = true } windows = { version = "0.58", default-features = false, features = ["Win32_Media"] } [dev-dependencies] +criterion = { version = "0.5", default-features = false } +neqo-crypto = { path = "../neqo-crypto" } test-fixture = { path = "../test-fixture" } regex = { workspace = true } [features] -ci = [] +bench = ["test-fixture/bench"] build-fuzzing-corpus = ["hex"] +ci = [] [lib] # See https://github.com/bheisler/criterion.rs/blob/master/book/src/faq.md#cargo-bench-gives-unrecognized-option-errors-for-valid-command-line-options bench = false + +[[bench]] +name = "decoder" +harness = false +required-features = ["bench"] diff --git a/neqo-common/benches/decoder.rs b/neqo-common/benches/decoder.rs new file mode 100644 index 0000000000..e15b1bec64 --- /dev/null +++ b/neqo-common/benches/decoder.rs @@ -0,0 +1,52 @@ +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use criterion::{black_box, criterion_group, criterion_main, Criterion}; +use neqo_common::Decoder; +use neqo_crypto::{init, randomize}; + +fn randomize_buffer(n: usize, mask: u8) -> Vec { + let mut buf = vec![0; n]; + // NSS doesn't like randomizing larger buffers, so chunk them up. + // https://searchfox.org/nss/rev/968939484921b0ceecca189cd1b66e97950c39da/lib/freebl/drbg.c#29 + for chunk in buf.chunks_mut(0x10000) { + randomize(chunk); + } + // Masking the top bits off causes the resulting values to be interpreted as + // smaller varints, which stresses the decoder differently. + // This is worth testing because most varints contain small values. + for x in &mut buf[..] { + *x &= mask; + } + buf +} + +fn decoder(c: &mut Criterion, count: usize, mask: u8) { + c.bench_function(&format!("decode {count} bytes, mask {mask:x}"), |b| { + b.iter_batched_ref( + || randomize_buffer(count, mask), + |buf| { + let mut dec = Decoder::new(&buf[..]); + while black_box(dec.decode_varint()).is_some() { + // Do nothing; + } + }, + criterion::BatchSize::SmallInput, + ); + }); +} + +fn benchmark_decoder(c: &mut Criterion) { + init().unwrap(); + for mask in [0xff, 0x7f, 0x3f] { + for exponent in [12, 20] { + decoder(c, 1 << exponent, mask); + } + } +} + +criterion_group!(benches, benchmark_decoder); +criterion_main!(benches); From 9143d73f9fa9f54f7f69478dd3ce94d4c0a44d79 Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Mon, 2 Dec 2024 14:56:52 +0200 Subject: [PATCH 04/23] ci: Delete `results-main` before `mv` (#2262) Because otherwise the `mv` command will fail. --- .github/workflows/qns.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/qns.yml b/.github/workflows/qns.yml index 895590f102..af61cbdac0 100644 --- a/.github/workflows/qns.yml +++ b/.github/workflows/qns.yml @@ -259,6 +259,7 @@ jobs: - if: github.ref == 'refs/heads/main' run: | + rm -rf results-main || true mv results results-main echo "${{ github.sha }}" > results-main/baseline-sha.txt From a7581770ff138c16727e92ecb7b47d9ceb95425e Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Tue, 3 Dec 2024 17:27:29 +0200 Subject: [PATCH 05/23] chore: Guard testing functions properly (#2264) * chore: Guard testing functions properly We have a bunch of functions that in their doc comments say that they are for testing purposes only. This commit adds guards to those functions to make sure they are not used in production code. * More --- neqo-common/Cargo.toml | 1 + neqo-common/src/codec.rs | 5 +++ neqo-qpack/src/encoder_instructions.rs | 42 ++++++++++++++++++++------ test-fixture/Cargo.toml | 2 +- 4 files changed, 40 insertions(+), 10 deletions(-) diff --git a/neqo-common/Cargo.toml b/neqo-common/Cargo.toml index 686681a264..4022befd9b 100644 --- a/neqo-common/Cargo.toml +++ b/neqo-common/Cargo.toml @@ -37,6 +37,7 @@ regex = { workspace = true } bench = ["test-fixture/bench"] build-fuzzing-corpus = ["hex"] ci = [] +test-fixture = [] [lib] # See https://github.com/bheisler/criterion.rs/blob/master/book/src/faq.md#cargo-bench-gives-unrecognized-option-errors-for-valid-command-line-options diff --git a/neqo-common/src/codec.rs b/neqo-common/src/codec.rs index afeb095781..d5e92a2e48 100644 --- a/neqo-common/src/codec.rs +++ b/neqo-common/src/codec.rs @@ -44,12 +44,15 @@ impl<'a> Decoder<'a> { } /// Skip helper that panics if `n` is `None` or not able to fit in `usize`. + /// Only use this for tests because we panic rather than reporting a result. + #[cfg(any(test, feature = "test-fixture"))] fn skip_inner(&mut self, n: Option) { self.skip(usize::try_from(n.expect("invalid length")).unwrap()); } /// Skip a vector. Panics if there isn't enough space. /// Only use this for tests because we panic rather than reporting a result. + #[cfg(any(test, feature = "test-fixture"))] pub fn skip_vec(&mut self, n: usize) { let len = self.decode_uint(n); self.skip_inner(len); @@ -57,6 +60,7 @@ impl<'a> Decoder<'a> { /// Skip a variable length vector. Panics if there isn't enough space. /// Only use this for tests because we panic rather than reporting a result. + #[cfg(any(test, feature = "test-fixture"))] pub fn skip_vvec(&mut self) { let len = self.decode_varint(); self.skip_inner(len); @@ -272,6 +276,7 @@ impl Encoder { /// # Panics /// /// When `s` contains non-hex values or an odd number of values. + #[cfg(any(test, feature = "test-fixture"))] #[must_use] pub fn from_hex(s: impl AsRef) -> Self { let s = s.as_ref(); diff --git a/neqo-qpack/src/encoder_instructions.rs b/neqo-qpack/src/encoder_instructions.rs index 1a513bd461..3fba5c2d96 100644 --- a/neqo-qpack/src/encoder_instructions.rs +++ b/neqo-qpack/src/encoder_instructions.rs @@ -18,17 +18,33 @@ use crate::{ Res, }; -// The encoder only uses InsertWithNameLiteral, therefore clippy is complaining about dead_code. -// We may decide to use othe instruction in the future. -// All instructions are used for testing, therefore they are defined. -#[allow(dead_code)] +// The encoder only uses InsertWithNameLiteral. +// All instructions are used for testing, therefore they are guarded with `#[cfg(test)]`. #[derive(Debug, PartialEq, Eq)] pub enum EncoderInstruction<'a> { - Capacity { value: u64 }, - InsertWithNameRefStatic { index: u64, value: &'a [u8] }, - InsertWithNameRefDynamic { index: u64, value: &'a [u8] }, - InsertWithNameLiteral { name: &'a [u8], value: &'a [u8] }, - Duplicate { index: u64 }, + Capacity { + value: u64, + }, + #[cfg(test)] + InsertWithNameRefStatic { + index: u64, + value: &'a [u8], + }, + #[cfg(test)] + InsertWithNameRefDynamic { + index: u64, + value: &'a [u8], + }, + InsertWithNameLiteral { + name: &'a [u8], + value: &'a [u8], + }, + #[cfg(test)] + Duplicate { + index: u64, + }, + #[cfg(test)] + #[allow(dead_code)] NoInstruction, } @@ -38,10 +54,12 @@ impl EncoderInstruction<'_> { Self::Capacity { value } => { enc.encode_prefixed_encoded_int(ENCODER_CAPACITY, *value); } + #[cfg(test)] Self::InsertWithNameRefStatic { index, value } => { enc.encode_prefixed_encoded_int(ENCODER_INSERT_WITH_NAME_REF_STATIC, *index); enc.encode_literal(use_huffman, NO_PREFIX, value); } + #[cfg(test)] Self::InsertWithNameRefDynamic { index, value } => { enc.encode_prefixed_encoded_int(ENCODER_INSERT_WITH_NAME_REF_DYNAMIC, *index); enc.encode_literal(use_huffman, NO_PREFIX, value); @@ -50,9 +68,11 @@ impl EncoderInstruction<'_> { enc.encode_literal(use_huffman, ENCODER_INSERT_WITH_NAME_LITERAL, name); enc.encode_literal(use_huffman, NO_PREFIX, value); } + #[cfg(test)] Self::Duplicate { index } => { enc.encode_prefixed_encoded_int(ENCODER_DUPLICATE, *index); } + #[cfg(test)] Self::NoInstruction => {} } } @@ -81,12 +101,14 @@ impl<'a> From<&'a EncoderInstruction<'a>> for DecodedEncoderInstruction { fn from(inst: &'a EncoderInstruction) -> Self { match inst { EncoderInstruction::Capacity { value } => Self::Capacity { value: *value }, + #[cfg(test)] EncoderInstruction::InsertWithNameRefStatic { index, value } => { Self::InsertWithNameRefStatic { index: *index, value: value.to_vec(), } } + #[cfg(test)] EncoderInstruction::InsertWithNameRefDynamic { index, value } => { Self::InsertWithNameRefDynamic { index: *index, @@ -99,7 +121,9 @@ impl<'a> From<&'a EncoderInstruction<'a>> for DecodedEncoderInstruction { value: value.to_vec(), } } + #[cfg(test)] EncoderInstruction::Duplicate { index } => Self::Duplicate { index: *index }, + #[cfg(test)] EncoderInstruction::NoInstruction => Self::NoInstruction, } } diff --git a/test-fixture/Cargo.toml b/test-fixture/Cargo.toml index 8ab2c87937..6bc8bbf1cd 100644 --- a/test-fixture/Cargo.toml +++ b/test-fixture/Cargo.toml @@ -18,7 +18,7 @@ workspace = true [dependencies] # Checked against https://searchfox.org/mozilla-central/source/Cargo.lock 2024-11-11 log = { workspace = true } -neqo-common = { path = "../neqo-common" } +neqo-common = { path = "../neqo-common", features = ["test-fixture"] } neqo-crypto = { path = "../neqo-crypto" } neqo-http3 = { path = "../neqo-http3" } neqo-transport = { path = "../neqo-transport" } From dd8e8018914268110ffcc40eb2b43e59e937a2a7 Mon Sep 17 00:00:00 2001 From: Martin Thomson Date: Mon, 9 Dec 2024 09:10:01 +1100 Subject: [PATCH 06/23] Decoder improvements (#2259) * Add a benchmark for the decoder * Make the bench more rigorous * Dramatic improvements in varint decoding * Why was I able to compile without importing size_of? * I kept too many bits * Improve benchmark performance * Make it build * skip rather than decode Co-authored-by: Lars Eggert Signed-off-by: Martin Thomson * Comment about not using Decoder --------- Signed-off-by: Lars Eggert Signed-off-by: Martin Thomson Co-authored-by: Lars Eggert --- neqo-common/src/codec.rs | 86 +++++++++++++++------------ neqo-common/src/incrdecoder.rs | 4 +- neqo-http3/src/frames/hframe.rs | 4 +- neqo-http3/src/frames/wtframe.rs | 3 +- neqo-transport/src/addr_valid.rs | 4 +- neqo-transport/src/connection/mod.rs | 7 ++- neqo-transport/src/frame.rs | 2 +- neqo-transport/src/packet/mod.rs | 6 +- neqo-transport/src/shuffle.rs | 24 ++++---- neqo-transport/src/tparams.rs | 11 ++-- neqo-transport/src/tracking.rs | 2 +- neqo-transport/tests/server.rs | 7 ++- test-fixture/src/assertions.rs | 31 ++++++---- test-fixture/src/header_protection.rs | 9 ++- test-fixture/src/lib.rs | 4 +- test-fixture/src/sim/rng.rs | 8 +-- 16 files changed, 118 insertions(+), 94 deletions(-) diff --git a/neqo-common/src/codec.rs b/neqo-common/src/codec.rs index d5e92a2e48..6ddb717a64 100644 --- a/neqo-common/src/codec.rs +++ b/neqo-common/src/codec.rs @@ -4,7 +4,7 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use std::fmt::Debug; +use std::{fmt::Debug, mem::size_of}; use crate::hex_with_len; @@ -54,7 +54,7 @@ impl<'a> Decoder<'a> { /// Only use this for tests because we panic rather than reporting a result. #[cfg(any(test, feature = "test-fixture"))] pub fn skip_vec(&mut self, n: usize) { - let len = self.decode_uint(n); + let len = self.decode_n(n); self.skip_inner(len); } @@ -66,16 +66,6 @@ impl<'a> Decoder<'a> { self.skip_inner(len); } - /// Decodes (reads) a single byte. - pub fn decode_byte(&mut self) -> Option { - if self.remaining() < 1 { - return None; - } - let b = self.buf[self.offset]; - self.offset += 1; - Some(b) - } - /// Provides the next byte without moving the read position. #[must_use] pub const fn peek_byte(&self) -> Option { @@ -96,33 +86,43 @@ impl<'a> Decoder<'a> { Some(res) } - /// Decodes an unsigned integer of length 1..=8. - /// - /// # Panics - /// - /// This panics if `n` is not in the range `1..=8`. - pub fn decode_uint(&mut self, n: usize) -> Option { - assert!(n > 0 && n <= 8); + #[inline] + pub(crate) fn decode_n(&mut self, n: usize) -> Option { + debug_assert!(n > 0 && n <= 8); if self.remaining() < n { return None; } - let mut v = 0_u64; - for i in 0..n { - let b = self.buf[self.offset + i]; - v = v << 8 | u64::from(b); - } - self.offset += n; - Some(v) + Some(if n == 1 { + let v = u64::from(self.buf[self.offset]); + self.offset += 1; + v + } else { + let mut buf = [0; 8]; + buf[8 - n..].copy_from_slice(&self.buf[self.offset..self.offset + n]); + self.offset += n; + u64::from_be_bytes(buf) + }) + } + + /// Decodes a big-endian, unsigned integer value into the target type. + /// This returns `None` if there is not enough data remaining + /// or if the conversion to the identified type fails. + /// Conversion is via `u64`, so failures are impossible for + /// unsigned integer types: `u8`, `u16`, `u32`, or `u64`. + /// Signed types will fail if the high bit is set. + pub fn decode_uint>(&mut self) -> Option { + let v = self.decode_n(size_of::()); + v.and_then(|v| T::try_from(v).ok()) } /// Decodes a QUIC varint. pub fn decode_varint(&mut self) -> Option { - let b1 = self.decode_byte()?; + let b1 = self.decode_n(1)?; match b1 >> 6 { - 0 => Some(u64::from(b1 & 0x3f)), - 1 => Some((u64::from(b1 & 0x3f) << 8) | self.decode_uint(1)?), - 2 => Some((u64::from(b1 & 0x3f) << 24) | self.decode_uint(3)?), - 3 => Some((u64::from(b1 & 0x3f) << 56) | self.decode_uint(7)?), + 0 => Some(b1), + 1 => Some((b1 & 0x3f) << 8 | self.decode_n(1)?), + 2 => Some((b1 & 0x3f) << 24 | self.decode_n(3)?), + 3 => Some((b1 & 0x3f) << 56 | self.decode_n(7)?), _ => unreachable!(), } } @@ -147,7 +147,7 @@ impl<'a> Decoder<'a> { /// Decodes a TLS-style length-prefixed buffer. pub fn decode_vec(&mut self, n: usize) -> Option<&'a [u8]> { - let len = self.decode_uint(n); + let len = self.decode_n(n); self.decode_checked(len) } @@ -486,16 +486,28 @@ mod tests { let enc = Encoder::from_hex("0123"); let mut dec = enc.as_decoder(); - assert_eq!(dec.decode_byte().unwrap(), 0x01); - assert_eq!(dec.decode_byte().unwrap(), 0x23); - assert!(dec.decode_byte().is_none()); + assert_eq!(dec.decode_uint::().unwrap(), 0x01); + assert_eq!(dec.decode_uint::().unwrap(), 0x23); + assert!(dec.decode_uint::().is_none()); + } + + #[test] + fn peek_byte() { + let enc = Encoder::from_hex("01"); + let mut dec = enc.as_decoder(); + + assert_eq!(dec.offset(), 0); + assert_eq!(dec.peek_byte().unwrap(), 0x01); + dec.skip(1); + assert_eq!(dec.offset(), 1); + assert!(dec.peek_byte().is_none()); } #[test] fn decode_byte_short() { let enc = Encoder::from_hex(""); let mut dec = enc.as_decoder(); - assert!(dec.decode_byte().is_none()); + assert!(dec.decode_uint::().is_none()); } #[test] @@ -506,7 +518,7 @@ mod tests { assert!(dec.decode(2).is_none()); let mut dec = Decoder::from(&[]); - assert_eq!(dec.decode_remainder().len(), 0); + assert!(dec.decode_remainder().is_empty()); } #[test] diff --git a/neqo-common/src/incrdecoder.rs b/neqo-common/src/incrdecoder.rs index 43a135f497..340e0ec5e4 100644 --- a/neqo-common/src/incrdecoder.rs +++ b/neqo-common/src/incrdecoder.rs @@ -31,7 +31,7 @@ impl IncrementalDecoderUint { if amount < 8 { self.v <<= amount * 8; } - self.v |= dv.decode_uint(amount).unwrap(); + self.v |= dv.decode_n(amount).unwrap(); *r -= amount; if *r == 0 { Some(self.v) @@ -39,7 +39,7 @@ impl IncrementalDecoderUint { None } } else { - let (v, remaining) = dv.decode_byte().map_or_else( + let (v, remaining) = dv.decode_uint::().map_or_else( || unreachable!(), |b| { ( diff --git a/neqo-http3/src/frames/hframe.rs b/neqo-http3/src/frames/hframe.rs index a60b6e9481..707242ae48 100644 --- a/neqo-http3/src/frames/hframe.rs +++ b/neqo-http3/src/frames/hframe.rs @@ -87,7 +87,9 @@ impl HFrame { Self::PriorityUpdateRequest { .. } => H3_FRAME_TYPE_PRIORITY_UPDATE_REQUEST, Self::PriorityUpdatePush { .. } => H3_FRAME_TYPE_PRIORITY_UPDATE_PUSH, Self::Grease => { - HFrameType(Decoder::from(&random::<7>()).decode_uint(7).unwrap() * 0x1f + 0x21) + let r = Decoder::from(&random::<8>()).decode_uint::().unwrap(); + // Zero out the top 7 bits: 2 for being a varint; 5 to account for the *0x1f. + HFrameType((r >> 7) * 0x1f + 0x21) } } } diff --git a/neqo-http3/src/frames/wtframe.rs b/neqo-http3/src/frames/wtframe.rs index 519d318d5d..4a962b5ba5 100644 --- a/neqo-http3/src/frames/wtframe.rs +++ b/neqo-http3/src/frames/wtframe.rs @@ -37,8 +37,7 @@ impl FrameDecoder for WebTransportFrame { if frame_len > WT_FRAME_CLOSE_MAX_MESSAGE_SIZE + 4 { return Err(Error::HttpMessageError); } - let error = - u32::try_from(dec.decode_uint(4).ok_or(Error::HttpMessageError)?).unwrap(); + let error = dec.decode_uint().ok_or(Error::HttpMessageError)?; let Ok(message) = String::from_utf8(dec.decode_remainder().to_vec()) else { return Err(Error::HttpMessageError); }; diff --git a/neqo-transport/src/addr_valid.rs b/neqo-transport/src/addr_valid.rs index 2570134dc3..5cb9341e0b 100644 --- a/neqo-transport/src/addr_valid.rs +++ b/neqo-transport/src/addr_valid.rs @@ -166,9 +166,9 @@ impl AddressValidation { let peer_addr = Self::encode_aad(peer_address, retry); let data = self.self_encrypt.open(peer_addr.as_ref(), token).ok()?; let mut dec = Decoder::new(&data); - match dec.decode_uint(4) { + match dec.decode_uint::() { Some(d) => { - let end = self.start_time + Duration::from_millis(d); + let end = self.start_time + Duration::from_millis(u64::from(d)); if end < now { qtrace!("Expired token: {:?} vs. {:?}", end, now); return None; diff --git a/neqo-transport/src/connection/mod.rs b/neqo-transport/src/connection/mod.rs index bb8098abaa..1d19f1abe7 100644 --- a/neqo-transport/src/connection/mod.rs +++ b/neqo-transport/src/connection/mod.rs @@ -731,9 +731,10 @@ impl Connection { ); let mut dec = Decoder::from(token.as_ref()); - let version = Version::try_from(u32::try_from( - dec.decode_uint(4).ok_or(Error::InvalidResumptionToken)?, - )?)?; + let version = Version::try_from( + dec.decode_uint::() + .ok_or(Error::InvalidResumptionToken)?, + )?; qtrace!([self], " version {:?}", version); if !self.conn_params.get_versions().all().contains(&version) { return Err(Error::DisabledVersion); diff --git a/neqo-transport/src/frame.rs b/neqo-transport/src/frame.rs index af933a3455..d0d191f9e3 100644 --- a/neqo-transport/src/frame.rs +++ b/neqo-transport/src/frame.rs @@ -629,7 +629,7 @@ impl<'a> Frame<'a> { return Err(Error::FrameEncodingError); } let delay = dv(dec)?; - let ignore_order = match d(dec.decode_uint(1))? { + let ignore_order = match d(dec.decode_uint::())? { 0 => false, 1 => true, _ => return Err(Error::FrameEncodingError), diff --git a/neqo-transport/src/packet/mod.rs b/neqo-transport/src/packet/mod.rs index 8f7ace63f1..7512a181e6 100644 --- a/neqo-transport/src/packet/mod.rs +++ b/neqo-transport/src/packet/mod.rs @@ -610,7 +610,7 @@ impl<'a> PublicPacket<'a> { #[allow(clippy::similar_names)] pub fn decode(data: &'a [u8], dcid_decoder: &dyn ConnectionIdDecoder) -> Res<(Self, &'a [u8])> { let mut decoder = Decoder::new(data); - let first = Self::opt(decoder.decode_byte())?; + let first = Self::opt(decoder.decode_uint::())?; if first & 0x80 == PACKET_BIT_SHORT { // Conveniently, this also guarantees that there is enough space @@ -638,7 +638,7 @@ impl<'a> PublicPacket<'a> { } // Generic long header. - let version = WireVersion::try_from(Self::opt(decoder.decode_uint(4))?)?; + let version = Self::opt(decoder.decode_uint())?; let dcid = ConnectionIdRef::from(Self::opt(decoder.decode_vec(1))?); let scid = ConnectionIdRef::from(Self::opt(decoder.decode_vec(1))?); @@ -893,7 +893,7 @@ impl<'a> PublicPacket<'a> { let mut decoder = Decoder::new(&self.data[self.header_len..]); let mut res = Vec::new(); while decoder.remaining() > 0 { - let version = WireVersion::try_from(Self::opt(decoder.decode_uint(4))?)?; + let version = Self::opt(decoder.decode_uint::())?; res.push(version); } Ok(res) diff --git a/neqo-transport/src/shuffle.rs b/neqo-transport/src/shuffle.rs index 0c2f0ae04e..fd81190c27 100644 --- a/neqo-transport/src/shuffle.rs +++ b/neqo-transport/src/shuffle.rs @@ -21,29 +21,33 @@ pub fn find_sni(buf: &[u8]) -> Option> { } #[must_use] - fn skip_vec(dec: &mut Decoder) -> Option<()> { - let len = dec.decode_uint(N)?.try_into().ok()?; - skip(dec, len) + fn skip_vec(dec: &mut Decoder) -> Option<()> + where + T: TryFrom, + usize: TryFrom, + { + let len = dec.decode_uint::()?; + skip(dec, usize::try_from(len).ok()?) } let mut dec = Decoder::from(buf); // Return if buf is empty or does not contain a ClientHello (first byte == 1) - if buf.is_empty() || dec.decode_byte()? != 1 { + if buf.is_empty() || dec.decode_uint::()? != 1 { return None; } skip(&mut dec, 3 + 2 + 32)?; // Skip length, version, random - skip_vec::<1>(&mut dec)?; // Skip session_id - skip_vec::<2>(&mut dec)?; // Skip cipher_suites - skip_vec::<1>(&mut dec)?; // Skip compression_methods + skip_vec::(&mut dec)?; // Skip session_id + skip_vec::(&mut dec)?; // Skip cipher_suites + skip_vec::(&mut dec)?; // Skip compression_methods skip(&mut dec, 2)?; while dec.remaining() >= 4 { - let ext_type: u16 = dec.decode_uint(2)?.try_into().ok()?; - let ext_len: u16 = dec.decode_uint(2)?.try_into().ok()?; + let ext_type: u16 = dec.decode_uint()?; + let ext_len: u16 = dec.decode_uint()?; if ext_type == 0 { // SNI! - let sni_len: u16 = dec.decode_uint(2)?.try_into().ok()?; + let sni_len: u16 = dec.decode_uint()?; skip(&mut dec, 3)?; // Skip name_type and host_name length let start = dec.offset(); let end = start + usize::from(sni_len) - 3; diff --git a/neqo-transport/src/tparams.rs b/neqo-transport/src/tparams.rs index 7c8139f7b6..baf11ff7a4 100644 --- a/neqo-transport/src/tparams.rs +++ b/neqo-transport/src/tparams.rs @@ -185,7 +185,7 @@ impl TransportParameter { fn decode_preferred_address(d: &mut Decoder) -> Res { // IPv4 address (maybe) let v4ip = Ipv4Addr::from(<[u8; 4]>::try_from(d.decode(4).ok_or(Error::NoMoreData)?)?); - let v4port = u16::try_from(d.decode_uint(2).ok_or(Error::NoMoreData)?)?; + let v4port = d.decode_uint::().ok_or(Error::NoMoreData)?; // Can't have non-zero IP and zero port, or vice versa. if v4ip.is_unspecified() ^ (v4port == 0) { return Err(Error::TransportParameterError); @@ -200,7 +200,7 @@ impl TransportParameter { let v6ip = Ipv6Addr::from(<[u8; 16]>::try_from( d.decode(16).ok_or(Error::NoMoreData)?, )?); - let v6port = u16::try_from(d.decode_uint(2).ok_or(Error::NoMoreData)?)?; + let v6port = d.decode_uint().ok_or(Error::NoMoreData)?; if v6ip.is_unspecified() ^ (v6port == 0) { return Err(Error::TransportParameterError); } @@ -229,11 +229,11 @@ impl TransportParameter { fn decode_versions(dec: &mut Decoder) -> Res { fn dv(dec: &mut Decoder) -> Res { - let v = dec.decode_uint(4).ok_or(Error::NoMoreData)?; + let v = dec.decode_uint::().ok_or(Error::NoMoreData)?; if v == 0 { Err(Error::TransportParameterError) } else { - Ok(WireVersion::try_from(v)?) + Ok(v) } } @@ -457,8 +457,7 @@ impl TransportParameters { let rbuf = random::<4>(); let mut other = Vec::with_capacity(versions.all().len() + 1); let mut dec = Decoder::new(&rbuf); - let grease = - (u32::try_from(dec.decode_uint(4).unwrap()).unwrap()) & 0xf0f0_f0f0 | 0x0a0a_0a0a; + let grease = dec.decode_uint::().unwrap() & 0xf0f0_f0f0 | 0x0a0a_0a0a; other.push(grease); for &v in versions.all() { if role == Role::Client && !versions.initial().is_compatible(v) { diff --git a/neqo-transport/src/tracking.rs b/neqo-transport/src/tracking.rs index 565b9bfd4d..20baea811d 100644 --- a/neqo-transport/src/tracking.rs +++ b/neqo-transport/src/tracking.rs @@ -1062,7 +1062,7 @@ mod tests { assert_eq!(stats.ack, 1); let mut dec = builder.as_decoder(); - _ = dec.decode_byte().unwrap(); // Skip the short header. + dec.skip(1); // Skip the short header. let frame = Frame::decode(&mut dec).unwrap(); if let Frame::Ack { ack_ranges, .. } = frame { assert_eq!(ack_ranges.len(), 0); diff --git a/neqo-transport/tests/server.rs b/neqo-transport/tests/server.rs index 12078a8960..8ff6511e0b 100644 --- a/neqo-transport/tests/server.rs +++ b/neqo-transport/tests/server.rs @@ -15,6 +15,7 @@ use neqo_crypto::{ }; use neqo_transport::{ server::{ConnectionRef, Server, ValidateAddress}, + version::WireVersion, CloseReason, Connection, ConnectionParameters, Error, Output, State, StreamType, Version, MIN_INITIAL_PACKET_SIZE, }; @@ -584,13 +585,13 @@ fn version_negotiation_ignored() { let vn = vn.expect("a vn packet"); let mut dec = Decoder::from(&vn[1..]); // Skip first byte. - assert_eq!(dec.decode_uint(4).expect("VN"), 0); + assert_eq!(dec.decode_uint::().expect("VN"), 0); assert_eq!(dec.decode_vec(1).expect("VN DCID"), &s_cid[..]); assert_eq!(dec.decode_vec(1).expect("VN SCID"), &d_cid[..]); let mut found = false; while dec.remaining() > 0 { - let v = dec.decode_uint(4).expect("supported version"); - found |= v == u64::from(Version::default().wire_version()); + let v = dec.decode_uint::().expect("supported version"); + found |= v == Version::default().wire_version(); } assert!(found, "valid version not found"); diff --git a/test-fixture/src/assertions.rs b/test-fixture/src/assertions.rs index 52d0194cbb..4cb3732258 100644 --- a/test-fixture/src/assertions.rs +++ b/test-fixture/src/assertions.rs @@ -14,8 +14,7 @@ use crate::{DEFAULT_ADDR, DEFAULT_ADDR_V4}; const PACKET_TYPE_MASK: u8 = 0b1011_0000; fn assert_default_version(dec: &mut Decoder) -> Version { - let version = - Version::try_from(WireVersion::try_from(dec.decode_uint(4).unwrap()).unwrap()).unwrap(); + let version = Version::try_from(dec.decode_uint::().unwrap()).unwrap(); assert!(version == Version::Version1 || version == Version::Version2); version } @@ -38,8 +37,12 @@ fn assert_long_packet_type(b: u8, v1_expected: u8, version: Version) { /// If this is not a long header packet with the given version. pub fn assert_version(payload: &[u8], v: u32) { let mut dec = Decoder::from(payload); - assert_eq!(dec.decode_byte().unwrap() & 0x80, 0x80, "is long header"); - assert_eq!(dec.decode_uint(4).unwrap(), u64::from(v)); + assert_eq!( + dec.decode_uint::().unwrap() & 0x80, + 0x80, + "is long header" + ); + assert_eq!(dec.decode_uint::().unwrap(), v); } /// Simple checks for a Version Negotiation packet. @@ -49,8 +52,12 @@ pub fn assert_version(payload: &[u8], v: u32) { /// If this is clearly not a Version Negotiation packet. pub fn assert_vn(payload: &[u8]) { let mut dec = Decoder::from(payload); - assert_eq!(dec.decode_byte().unwrap() & 0x80, 0x80, "is long header"); - assert_eq!(dec.decode_uint(4).unwrap(), 0); + assert_eq!( + dec.decode_uint::().unwrap() & 0x80, + 0x80, + "is long header" + ); + assert_eq!(dec.decode_uint::().unwrap(), 0); dec.skip_vec(1); // DCID dec.skip_vec(1); // SCID assert_eq!(dec.remaining() % 4, 0); @@ -64,7 +71,7 @@ pub fn assert_vn(payload: &[u8]) { pub fn assert_coalesced_0rtt(payload: &[u8]) { assert!(payload.len() >= MIN_INITIAL_PACKET_SIZE); let mut dec = Decoder::from(payload); - let initial_type = dec.decode_byte().unwrap(); // Initial + let initial_type = dec.decode_uint::().unwrap(); // Initial let version = assert_default_version(&mut dec); assert_long_packet_type(initial_type, 0b1000_0000, version); dec.skip_vec(1); // DCID @@ -72,7 +79,7 @@ pub fn assert_coalesced_0rtt(payload: &[u8]) { dec.skip_vvec(); let initial_len = dec.decode_varint().unwrap(); dec.skip(initial_len.try_into().unwrap()); - let zrtt_type = dec.decode_byte().unwrap(); + let zrtt_type = dec.decode_uint::().unwrap(); assert_long_packet_type(zrtt_type, 0b1001_0000, version); } @@ -81,7 +88,7 @@ pub fn assert_coalesced_0rtt(payload: &[u8]) { /// If the tests fail. pub fn assert_retry(payload: &[u8]) { let mut dec = Decoder::from(payload); - let t = dec.decode_byte().unwrap(); + let t = dec.decode_uint::().unwrap(); let version = assert_default_version(&mut dec); assert_long_packet_type(t, 0b1011_0000, version); } @@ -93,7 +100,7 @@ pub fn assert_retry(payload: &[u8]) { /// If the tests fail. pub fn assert_initial(payload: &[u8], expect_token: bool) { let mut dec = Decoder::from(payload); - let t = dec.decode_byte().unwrap(); + let t = dec.decode_uint::().unwrap(); let version = assert_default_version(&mut dec); assert_long_packet_type(t, 0b1000_0000, version); dec.skip_vec(1); // Destination Connection ID. @@ -109,7 +116,7 @@ pub fn assert_initial(payload: &[u8], expect_token: bool) { /// If the tests fail. pub fn assert_handshake(payload: &[u8]) { let mut dec = Decoder::from(payload); - let t = dec.decode_byte().unwrap(); + let t = dec.decode_uint::().unwrap(); let version = assert_default_version(&mut dec); assert_long_packet_type(t, 0b1010_0000, version); } @@ -119,7 +126,7 @@ pub fn assert_handshake(payload: &[u8]) { /// If the tests fail. pub fn assert_no_1rtt(payload: &[u8]) { let mut dec = Decoder::from(payload); - while let Some(b1) = dec.decode_byte() { + while let Some(b1) = dec.decode_uint::() { // If this is just padding, that's OK. Check. if payload.iter().skip(dec.offset()).all(|b| *b == 0) { return; diff --git a/test-fixture/src/header_protection.rs b/test-fixture/src/header_protection.rs index f4e0338421..3b7b8b0dc2 100644 --- a/test-fixture/src/header_protection.rs +++ b/test-fixture/src/header_protection.rs @@ -120,11 +120,10 @@ pub fn remove_header_protection(hp: &HpKey, header: &[u8], payload: &[u8]) -> (V // Trim down to size. fixed_header.truncate(pn_offset + pn_len); // The packet number should be 1. - let pn = Decoder::new(&fixed_header[pn_offset..]) - .decode_uint(pn_len) - .unwrap(); - - (fixed_header, pn) + // This doesn't use a `Decoder` because the public API can't handle a three byte packet number. + let mut pn = [0; 8]; + pn[8 - pn_len..].copy_from_slice(&fixed_header[pn_offset..pn_offset + pn_len]); + (fixed_header, u64::from_be_bytes(pn)) } #[allow(clippy::missing_panics_doc)] diff --git a/test-fixture/src/lib.rs b/test-fixture/src/lib.rs index 97b68e50c6..f2a86f6793 100644 --- a/test-fixture/src/lib.rs +++ b/test-fixture/src/lib.rs @@ -329,8 +329,8 @@ fn split_packet(buf: &[u8]) -> (&[u8], Option<&[u8]>) { return (buf, None); } let mut dec = Decoder::from(buf); - let first = dec.decode_byte().unwrap(); - let v = Version::try_from(WireVersion::try_from(dec.decode_uint(4).unwrap()).unwrap()).unwrap(); // Version. + let first: u8 = dec.decode_uint().unwrap(); + let v = Version::try_from(dec.decode_uint::().unwrap()).unwrap(); // Version. let (initial_type, retry_type) = if v == Version::Version2 { (0b1001_0000, 0b1000_0000) } else { diff --git a/test-fixture/src/sim/rng.rs b/test-fixture/src/sim/rng.rs index dd94798252..4bbb6b5aca 100644 --- a/test-fixture/src/sim/rng.rs +++ b/test-fixture/src/sim/rng.rs @@ -21,10 +21,10 @@ impl Random { let mut dec = Decoder::from(&seed); Self { state: [ - dec.decode_uint(8).unwrap(), - dec.decode_uint(8).unwrap(), - dec.decode_uint(8).unwrap(), - dec.decode_uint(8).unwrap(), + dec.decode_uint().unwrap(), + dec.decode_uint().unwrap(), + dec.decode_uint().unwrap(), + dec.decode_uint().unwrap(), ], } } From a87b379941c0d5795198ed4b35c49aba1950895e Mon Sep 17 00:00:00 2001 From: Max Inden Date: Sun, 8 Dec 2024 23:21:00 +0100 Subject: [PATCH 07/23] fix(sim): correct `Waiting` state comparison in `NodeHolder::ready()` (#2263) A `Node` (e.g. a `Client`, `Server` or `TailDrop` router) can be in 3 states: ``` rust enum NodeState { /// The node just produced a datagram. It should be activated again as soon as possible. Active, /// The node is waiting. Waiting(Instant), /// The node became idle. Idle, } ``` `NodeHolder::ready()` determines whether a `Node` is ready to be processed again. When `NodeState::Waiting`, it should only be ready when `t <= now`, i.e. the waiting time has passed, not `t >= now`. ``` rust impl NodeHolder { fn ready(&self, now: Instant) -> bool { match self.state { Active => true, Waiting(t) => t <= now, // not >= Idle => false, } } } ``` The previous behavior lead to wastefull non-ready `Node`s being processed and thus a large test runtime when e.g. simulating a gbit connection (https://github.com/mozilla/neqo/pull/2203). --- test-fixture/src/sim/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test-fixture/src/sim/mod.rs b/test-fixture/src/sim/mod.rs index 14c21dd75c..5969d0b282 100644 --- a/test-fixture/src/sim/mod.rs +++ b/test-fixture/src/sim/mod.rs @@ -110,7 +110,7 @@ impl NodeHolder { fn ready(&self, now: Instant) -> bool { match self.state { Active => true, - Waiting(t) => t >= now, + Waiting(t) => t <= now, Idle => false, } } From 2f91b41ef9870e4a71988fdb35e4b8fe419f55ad Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 9 Dec 2024 10:37:39 +0200 Subject: [PATCH 08/23] build(deps): bump codecov/codecov-action from 5.0.7 to 5.1.1 (#2272) Bumps [codecov/codecov-action](https://github.com/codecov/codecov-action) from 5.0.7 to 5.1.1. - [Release notes](https://github.com/codecov/codecov-action/releases) - [Changelog](https://github.com/codecov/codecov-action/blob/main/CHANGELOG.md) - [Commits](https://github.com/codecov/codecov-action/compare/015f24e6818733317a2da2edd6290ab26238649a...7f8b4b4bde536c465e797be725718b88c5d95e0e) --- updated-dependencies: - dependency-name: codecov/codecov-action dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/check.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/check.yml b/.github/workflows/check.yml index 076db64d82..36f21a97eb 100644 --- a/.github/workflows/check.yml +++ b/.github/workflows/check.yml @@ -111,7 +111,7 @@ jobs: RUST_LOG: warn BUILD_DIR: ${{ matrix.type == 'release' && 'release' || 'debug' }} - - uses: codecov/codecov-action@015f24e6818733317a2da2edd6290ab26238649a # v5.0.7 + - uses: codecov/codecov-action@7f8b4b4bde536c465e797be725718b88c5d95e0e # v5.1.1 with: files: lcov.info fail_ci_if_error: false From f3ece05b1da8cf5da737096a0c09d94ef5650a43 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 9 Dec 2024 10:37:55 +0200 Subject: [PATCH 09/23] build(deps): bump actions/cache from 4.1.2 to 4.2.0 (#2271) * build(deps): bump actions/cache from 4.1.2 to 4.2.0 Bumps [actions/cache](https://github.com/actions/cache) from 4.1.2 to 4.2.0. - [Release notes](https://github.com/actions/cache/releases) - [Changelog](https://github.com/actions/cache/blob/main/RELEASES.md) - [Commits](https://github.com/actions/cache/compare/6849a6489940f00c2f30c0fb92c6274307ccb58a...1bd1e32a3bdc45362d1e726936510720a7c30a57) --- updated-dependencies: - dependency-name: actions/cache dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * Add action --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Lars Eggert --- .github/actions/nss/action.yml | 2 +- .github/workflows/bench.yml | 4 ++-- .github/workflows/qns.yml | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/.github/actions/nss/action.yml b/.github/actions/nss/action.yml index 69f1737d42..5e22f4555c 100644 --- a/.github/actions/nss/action.yml +++ b/.github/actions/nss/action.yml @@ -111,7 +111,7 @@ runs: - name: Cache NSS id: cache if: env.USE_SYSTEM_NSS == '0' && runner.environment == 'github-hosted' - uses: actions/cache@6849a6489940f00c2f30c0fb92c6274307ccb58a # v4.1.2 + uses: actions/cache@1bd1e32a3bdc45362d1e726936510720a7c30a57 # v4.2.0 with: path: dist key: nss-${{ runner.os }}-${{ inputs.type }}-${{ env.NSS_HEAD }}-${{ env.NSPR_HEAD }} diff --git a/.github/workflows/bench.yml b/.github/workflows/bench.yml index c763f576ff..4424e9fd2e 100644 --- a/.github/workflows/bench.yml +++ b/.github/workflows/bench.yml @@ -86,7 +86,7 @@ jobs: - name: Download cached main-branch results id: criterion-cache - uses: actions/cache/restore@6849a6489940f00c2f30c0fb92c6274307ccb58a # v4.1.2 + uses: actions/cache/restore@1bd1e32a3bdc45362d1e726936510720a7c30a57 # v4.2.0 with: path: ./target/criterion key: criterion-${{ runner.name }}-${{ github.sha }} @@ -269,7 +269,7 @@ jobs: - name: Cache main-branch results if: github.ref == 'refs/heads/main' - uses: actions/cache/save@6849a6489940f00c2f30c0fb92c6274307ccb58a # v4.1.2 + uses: actions/cache/save@1bd1e32a3bdc45362d1e726936510720a7c30a57 # v4.2.0 with: path: ./target/criterion key: criterion-${{ runner.name }}-${{ github.sha }} diff --git a/.github/workflows/qns.yml b/.github/workflows/qns.yml index af61cbdac0..23705566ab 100644 --- a/.github/workflows/qns.yml +++ b/.github/workflows/qns.yml @@ -167,7 +167,7 @@ jobs: pattern: '*results' path: results - - uses: actions/cache/restore@6849a6489940f00c2f30c0fb92c6274307ccb58a # v4.1.2 + - uses: actions/cache/restore@1bd1e32a3bdc45362d1e726936510720a7c30a57 # v4.2.0 with: path: results-main key: qns-${{ github.sha }} @@ -264,7 +264,7 @@ jobs: echo "${{ github.sha }}" > results-main/baseline-sha.txt - if: github.ref == 'refs/heads/main' - uses: actions/cache/save@6849a6489940f00c2f30c0fb92c6274307ccb58a # v4.1.2 + uses: actions/cache/save@1bd1e32a3bdc45362d1e726936510720a7c30a57 # v4.2.0 with: path: results-main key: qns-${{ github.sha }} From 7c36a4dd38fa45fe23e9ef444694d29df80008c4 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 9 Dec 2024 11:06:30 +0200 Subject: [PATCH 10/23] build(deps): bump alpine in /taskcluster/docker/linux (#2273) Bumps alpine from `1e42bbe` to `21dc606`. --- updated-dependencies: - dependency-name: alpine dependency-type: direct:production ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- taskcluster/docker/linux/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/taskcluster/docker/linux/Dockerfile b/taskcluster/docker/linux/Dockerfile index eb9003d3d5..187b962e81 100644 --- a/taskcluster/docker/linux/Dockerfile +++ b/taskcluster/docker/linux/Dockerfile @@ -1,4 +1,4 @@ -FROM alpine:latest@sha256:1e42bbe2508154c9126d48c2b8a75420c3544343bf86fd041fb7527e017a4b4a +FROM alpine:latest@sha256:21dc6063fd678b478f57c0e13f47560d0ea4eeba26dfc947b2a4f81f686b9f45 LABEL maintainer="Mozilla Release Engineering " # Add worker user From 9a2571679473b1fff56283d95f1bdcf12fe2cfe1 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 9 Dec 2024 11:06:59 +0200 Subject: [PATCH 11/23] build(deps): bump lukemathwalker/cargo-chef in /qns (#2274) Bumps lukemathwalker/cargo-chef from `315e442` to `f1fdce8`. --- updated-dependencies: - dependency-name: lukemathwalker/cargo-chef dependency-type: direct:production ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- qns/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/qns/Dockerfile b/qns/Dockerfile index 0812e673da..42b011721c 100644 --- a/qns/Dockerfile +++ b/qns/Dockerfile @@ -1,4 +1,4 @@ -FROM lukemathwalker/cargo-chef@sha256:315e44286168bbabc24c27c3af74a84c8208d362b70721c152d3e99228b3c486 AS chef +FROM lukemathwalker/cargo-chef@sha256:f1fdce8dbd1a91c2bc2aff69a7f48bd9e859d9781bb35c87d556b3851587364b AS chef WORKDIR /app From 40c295f76c9f294d43e41e2a52df3ededbdd0112 Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Mon, 9 Dec 2024 13:10:56 +0200 Subject: [PATCH 12/23] `invalid_webtransport_connect_with_status` --- neqo-http3/src/headers_checks.rs | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/neqo-http3/src/headers_checks.rs b/neqo-http3/src/headers_checks.rs index e5678dc28c..18ab63866b 100644 --- a/neqo-http3/src/headers_checks.rs +++ b/neqo-http3/src/headers_checks.rs @@ -224,4 +224,18 @@ mod tests { fn valid_webtransport_connect() { assert!(headers_valid(&create_connect_headers(), MessageType::Request).is_ok()); } + + #[test] + fn invalid_webtransport_connect_with_status() { + assert!(headers_valid( + [ + create_connect_headers(), + vec![Header::new(":status", "200")] + ] + .concat() + .as_slice(), + MessageType::Request + ) + .is_err()); + } } From cf6d5370e65abb5689249a784242c42688f67318 Mon Sep 17 00:00:00 2001 From: Max Inden Date: Mon, 9 Dec 2024 14:54:10 +0100 Subject: [PATCH 13/23] feat(ecn): expose ECN path validation outcome in Stats (#2270) Expose the ECN path validation outcome via `neqo_transport::Stats`. --- neqo-transport/src/connection/tests/ecn.rs | 10 ++-- neqo-transport/src/ecn.rs | 62 ++++++++++++++++------ neqo-transport/src/path.rs | 3 +- neqo-transport/src/stats.rs | 15 +++--- 4 files changed, 64 insertions(+), 26 deletions(-) diff --git a/neqo-transport/src/connection/tests/ecn.rs b/neqo-transport/src/connection/tests/ecn.rs index 3b5bd5bc19..0b947c4d38 100644 --- a/neqo-transport/src/connection/tests/ecn.rs +++ b/neqo-transport/src/connection/tests/ecn.rs @@ -19,7 +19,7 @@ use crate::{ new_server, send_and_receive, send_something, send_something_with_modifier, send_with_modifier_and_receive, DEFAULT_RTT, }, - ecn::ECN_TEST_COUNT, + ecn::{EcnValidationOutcome, ECN_TEST_COUNT}, path::MAX_PATH_PROBES, ConnectionId, ConnectionParameters, StreamType, }; @@ -154,8 +154,12 @@ fn stats() { } for stats in [client.stats(), server.stats()] { - assert_eq!(stats.ecn_paths_capable, 1); - assert_eq!(stats.ecn_paths_not_capable, 0); + for (outcome, count) in stats.ecn_path_validation.iter() { + match outcome { + EcnValidationOutcome::Capable => assert_eq!(*count, 1), + EcnValidationOutcome::NotCapable(_) => assert_eq!(*count, 0), + } + } for codepoint in [IpTosEcn::Ect1, IpTosEcn::Ce] { assert_eq!(stats.ecn_tx[codepoint], 0); diff --git a/neqo-transport/src/ecn.rs b/neqo-transport/src/ecn.rs index 97d68fb6c1..944c63ac8a 100644 --- a/neqo-transport/src/ecn.rs +++ b/neqo-transport/src/ecn.rs @@ -6,7 +6,7 @@ use std::ops::{AddAssign, Deref, DerefMut, Sub}; -use enum_map::EnumMap; +use enum_map::{Enum, EnumMap}; use neqo_common::{qdebug, qinfo, qwarn, IpTosEcn}; use crate::{ @@ -37,7 +37,7 @@ enum EcnValidationState { /// The validation test has concluded but the path's ECN capability is not yet known. Unknown, /// The path is known to **not** be ECN capable. - Failed, + Failed(EcnValidationError), /// The path is known to be ECN capable. Capable, } @@ -57,13 +57,15 @@ impl EcnValidationState { match old { Self::Testing { .. } | Self::Unknown => {} - Self::Failed => debug_assert!(false, "Failed is a terminal state"), - Self::Capable => stats.ecn_paths_capable -= 1, + Self::Failed(_) => debug_assert!(false, "Failed is a terminal state"), + Self::Capable => stats.ecn_path_validation[EcnValidationOutcome::Capable] -= 1, } match new { Self::Testing { .. } | Self::Unknown => {} - Self::Failed => stats.ecn_paths_not_capable += 1, - Self::Capable => stats.ecn_paths_capable += 1, + Self::Failed(error) => { + stats.ecn_path_validation[EcnValidationOutcome::NotCapable(error)] += 1; + } + Self::Capable => stats.ecn_path_validation[EcnValidationOutcome::Capable] += 1, } } } @@ -125,6 +127,36 @@ impl AddAssign for EcnCount { } } +#[derive(PartialEq, Eq, Debug, Clone, Copy, Default)] +pub struct EcnValidationCount(EnumMap); + +impl Deref for EcnValidationCount { + type Target = EnumMap; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +impl DerefMut for EcnValidationCount { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.0 + } +} + +#[derive(Debug, Clone, Copy, Enum, PartialEq, Eq)] +pub enum EcnValidationError { + BlackHole, + Bleaching, + ReceivedUnsentECT1, +} + +#[derive(Debug, Clone, Copy, Enum, PartialEq, Eq)] +pub enum EcnValidationOutcome { + Capable, + NotCapable(EcnValidationError), +} + #[derive(Debug, Default)] pub struct EcnInfo { /// The current state of ECN validation on this path. @@ -164,8 +196,8 @@ impl EcnInfo { } /// Disable ECN. - pub fn disable_ecn(&mut self, stats: &mut Stats) { - self.state.set(EcnValidationState::Failed, stats); + pub fn disable_ecn(&mut self, stats: &mut Stats, reason: EcnValidationError) { + self.state.set(EcnValidationState::Failed(reason), stats); } /// Process ECN counts from an ACK frame. @@ -202,7 +234,7 @@ impl EcnInfo { "ECN validation failed, all {} initial marked packets were lost", probes_lost ); - self.disable_ecn(stats); + self.disable_ecn(stats, EcnValidationError::BlackHole); } } } @@ -220,7 +252,7 @@ impl EcnInfo { // > (see Section 13.4.2.1) causes the ECN state for the path to become "capable", unless // > no marked packet has been acknowledged. match self.state { - EcnValidationState::Testing { .. } | EcnValidationState::Failed => return, + EcnValidationState::Testing { .. } | EcnValidationState::Failed(_) => return, EcnValidationState::Unknown | EcnValidationState::Capable => {} } @@ -245,7 +277,7 @@ impl EcnInfo { // > corresponding ECN counts are not present in the ACK frame. let Some(ack_ecn) = ack_ecn else { qwarn!("ECN validation failed, no ECN counts in ACK frame"); - self.disable_ecn(stats); + self.disable_ecn(stats, EcnValidationError::Bleaching); return; }; @@ -262,7 +294,7 @@ impl EcnInfo { .unwrap(); if newly_acked_sent_with_ect0 == 0 { qwarn!("ECN validation failed, no ECT(0) packets were newly acked"); - self.disable_ecn(stats); + self.disable_ecn(stats, EcnValidationError::Bleaching); return; } let ecn_diff = ack_ecn - self.baseline; @@ -273,10 +305,10 @@ impl EcnInfo { sum_inc, newly_acked_sent_with_ect0 ); - self.disable_ecn(stats); + self.disable_ecn(stats, EcnValidationError::Bleaching); } else if ecn_diff[IpTosEcn::Ect1] > 0 { qwarn!("ECN validation failed, ACK counted ECT(1) marks that were never sent"); - self.disable_ecn(stats); + self.disable_ecn(stats, EcnValidationError::ReceivedUnsentECT1); } else if self.state != EcnValidationState::Capable { qinfo!("ECN validation succeeded, path is capable"); self.state.set(EcnValidationState::Capable, stats); @@ -290,7 +322,7 @@ impl EcnInfo { pub const fn ecn_mark(&self) -> IpTosEcn { match self.state { EcnValidationState::Testing { .. } | EcnValidationState::Capable => IpTosEcn::Ect0, - EcnValidationState::Failed | EcnValidationState::Unknown => IpTosEcn::NotEct, + EcnValidationState::Failed(_) | EcnValidationState::Unknown => IpTosEcn::NotEct, } } } diff --git a/neqo-transport/src/path.rs b/neqo-transport/src/path.rs index 7d0aaf62a6..5513ee651a 100644 --- a/neqo-transport/src/path.rs +++ b/neqo-transport/src/path.rs @@ -733,7 +733,8 @@ impl Path { [self], "Possible ECN blackhole, disabling ECN and re-probing path" ); - self.ecn_info.disable_ecn(stats); + self.ecn_info + .disable_ecn(stats, crate::ecn::EcnValidationError::BlackHole); ProbeState::ProbeNeeded { probe_count: 0 } } else { qinfo!([self], "Probing failed"); diff --git a/neqo-transport/src/stats.rs b/neqo-transport/src/stats.rs index a8c35c1c66..af8ae69a70 100644 --- a/neqo-transport/src/stats.rs +++ b/neqo-transport/src/stats.rs @@ -16,7 +16,10 @@ use std::{ use neqo_common::qwarn; -use crate::{ecn::EcnCount, packet::PacketNumber}; +use crate::{ + ecn::{EcnCount, EcnValidationCount}, + packet::PacketNumber, +}; pub const MAX_PTO_COUNTS: usize = 16; @@ -194,10 +197,8 @@ pub struct Stats { pub datagram_tx: DatagramStats, - /// Number of paths known to be ECN capable. - pub ecn_paths_capable: usize, - /// Number of paths known to be ECN incapable. - pub ecn_paths_not_capable: usize, + /// ECN path validation count, indexed by validation outcome. + pub ecn_path_validation: EcnValidationCount, /// ECN counts for outgoing UDP datagrams, returned by remote through QUIC ACKs. /// /// Note: Given that QUIC ACKs only carry [`Ect0`], [`Ect1`] and [`Ce`], but @@ -271,8 +272,8 @@ impl Debug for Stats { self.frame_tx.fmt(f)?; writeln!( f, - " ecn: {:?} for tx {:?} for rx {} capable paths {} not capable paths", - self.ecn_tx, self.ecn_rx, self.ecn_paths_capable, self.ecn_paths_not_capable + " ecn: {:?} for tx {:?} for rx {:?} path validation outcomes", + self.ecn_tx, self.ecn_rx, self.ecn_path_validation, ) } } From 7d42973cdb9ec7b899c9cd95539c6f0b91554ceb Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Mon, 9 Dec 2024 20:59:53 +0200 Subject: [PATCH 14/23] chore: Simplify some webtransport checks (#2267) * chore: Simplify some webtransport checks So this just uses it. While I'm here, eliminate some code redundancies. Fixes #1255 * Add some helper functions for headers This should reduce the cost of searching headers a little. --------- Co-authored-by: Martin Thomson --- neqo-bin/src/server/http3.rs | 9 +++---- neqo-common/src/header.rs | 26 +++++++++++++++++++ .../tests/webtransport/mod.rs | 20 +++++++------- .../tests/webtransport/sessions.rs | 24 ++++------------- neqo-http3/src/headers_checks.rs | 7 +++-- neqo-http3/src/recv_message.rs | 10 +++---- neqo-http3/tests/webtransport.rs | 12 +++------ 7 files changed, 54 insertions(+), 54 deletions(-) diff --git a/neqo-bin/src/server/http3.rs b/neqo-bin/src/server/http3.rs index 1d1d7a1cf8..bf5a3592e3 100644 --- a/neqo-bin/src/server/http3.rs +++ b/neqo-bin/src/server/http3.rs @@ -12,7 +12,7 @@ use std::{ time::Instant, }; -use neqo_common::{hex, qdebug, qerror, qinfo, Datagram, Header}; +use neqo_common::{header::HeadersExt, hex, qdebug, qerror, qinfo, Datagram, Header}; use neqo_crypto::{generate_ech_keys, random, AntiReplay}; use neqo_http3::{ Http3OrWebTransportStream, Http3Parameters, Http3Server, Http3ServerEvent, StreamId, @@ -94,15 +94,12 @@ impl super::HttpServer for HttpServer { } => { qdebug!("Headers (request={stream} fin={fin}): {headers:?}"); - if headers - .iter() - .any(|h| h.name() == ":method" && h.value() == "POST") - { + if headers.contains_header(":method", "POST") { self.posts.insert(stream, 0); continue; } - let Some(path) = headers.iter().find(|&h| h.name() == ":path") else { + let Some(path) = headers.find_header(":path") else { stream .cancel_fetch(neqo_http3::Error::HttpRequestIncomplete.code()) .unwrap(); diff --git a/neqo-common/src/header.rs b/neqo-common/src/header.rs index 73f2e85da1..a480eb67ca 100644 --- a/neqo-common/src/header.rs +++ b/neqo-common/src/header.rs @@ -46,3 +46,29 @@ impl Header { &self.value } } + +impl, U: AsRef> PartialEq<(T, U)> for Header { + fn eq(&self, other: &(T, U)) -> bool { + self.name == other.0.as_ref() && self.value == other.1.as_ref() + } +} + +pub trait HeadersExt<'h> { + fn contains_header, U: AsRef>(self, name: T, value: U) -> bool; + fn find_header + 'h>(self, name: T) -> Option<&'h Header>; +} + +impl<'h, H> HeadersExt<'h> for H +where + H: IntoIterator + 'h, +{ + fn contains_header, U: AsRef>(self, name: T, value: U) -> bool { + let (name, value) = (name.as_ref(), value.as_ref()); + self.into_iter().any(|h| h == &(name, value)) + } + + fn find_header + 'h>(self, name: T) -> Option<&'h Header> { + let name = name.as_ref(); + self.into_iter().find(|h| h.name == name) + } +} diff --git a/neqo-http3/src/features/extended_connect/tests/webtransport/mod.rs b/neqo-http3/src/features/extended_connect/tests/webtransport/mod.rs index 4b23277cbc..8c7aaa59eb 100644 --- a/neqo-http3/src/features/extended_connect/tests/webtransport/mod.rs +++ b/neqo-http3/src/features/extended_connect/tests/webtransport/mod.rs @@ -10,7 +10,7 @@ mod sessions; mod streams; use std::{cell::RefCell, rc::Rc, time::Duration}; -use neqo_common::event::Provider; +use neqo_common::{event::Provider, header::HeadersExt}; use neqo_crypto::AuthenticationStatus; use neqo_transport::{ConnectionParameters, Pmtud, StreamId, StreamType}; use test_fixture::{ @@ -60,6 +60,13 @@ pub fn default_http3_server(server_params: Http3Parameters) -> Http3Server { .expect("create a server") } +pub fn assert_wt(headers: &[Header]) { + assert!( + headers.contains_header(":method", "CONNECT") + && headers.contains_header(":protocol", "webtransport") + ); +} + fn exchange_packets(client: &mut Http3Client, server: &mut Http3Server) { let mut out = None; loop { @@ -148,14 +155,7 @@ impl WtTest { session, headers, }) => { - assert!( - headers - .iter() - .any(|h| h.name() == ":method" && h.value() == "CONNECT") - && headers - .iter() - .any(|h| h.name() == ":protocol" && h.value() == "webtransport") - ); + assert_wt(&headers); session.response(accept).unwrap(); wt_server_session = Some(session); } @@ -183,7 +183,7 @@ impl WtTest { }) if ( stream_id == wt_session_id && status == 200 && - headers.contains(&Header::new(":status", "200")) + headers.contains_header(":status", "200") ) ) }; diff --git a/neqo-http3/src/features/extended_connect/tests/webtransport/sessions.rs b/neqo-http3/src/features/extended_connect/tests/webtransport/sessions.rs index 821d8ac5e3..83650b4b9e 100644 --- a/neqo-http3/src/features/extended_connect/tests/webtransport/sessions.rs +++ b/neqo-http3/src/features/extended_connect/tests/webtransport/sessions.rs @@ -6,14 +6,14 @@ use std::mem; -use neqo_common::{event::Provider, Encoder}; +use neqo_common::{event::Provider, header::HeadersExt, Encoder}; use neqo_transport::StreamType; use test_fixture::now; use crate::{ features::extended_connect::{ tests::webtransport::{ - default_http3_client, default_http3_server, wt_default_parameters, WtTest, + assert_wt, default_http3_client, default_http3_server, wt_default_parameters, WtTest, }, SessionCloseReason, }, @@ -134,14 +134,7 @@ fn wt_session_response_with_1xx() { headers, }) = event { - assert!( - headers - .iter() - .any(|h| h.name() == ":method" && h.value() == "CONNECT") - && headers - .iter() - .any(|h| h.name() == ":protocol" && h.value() == "webtransport") - ); + assert_wt(&headers); wt_server_session = Some(session); } } @@ -168,7 +161,7 @@ fn wt_session_response_with_1xx() { }) if ( stream_id == wt_session_id && status == 200 && - headers.contains(&Header::new(":status", "200")) + headers.contains_header(":status", "200") ) ) }; @@ -209,14 +202,7 @@ fn wt_session_respone_200_with_fin() { headers, }) = event { - assert!( - headers - .iter() - .any(|h| h.name() == ":method" && h.value() == "CONNECT") - && headers - .iter() - .any(|h| h.name() == ":protocol" && h.value() == "webtransport") - ); + assert_wt(&headers); wt_server_session = Some(session); } } diff --git a/neqo-http3/src/headers_checks.rs b/neqo-http3/src/headers_checks.rs index 18ab63866b..7ea880e222 100644 --- a/neqo-http3/src/headers_checks.rs +++ b/neqo-http3/src/headers_checks.rs @@ -5,7 +5,7 @@ // except according to those terms. use enumset::{enum_set, EnumSet, EnumSetType}; -use neqo_common::Header; +use neqo_common::{header::HeadersExt, Header}; use crate::{Error, MessageType, Res}; @@ -49,10 +49,9 @@ impl TryFrom<(MessageType, &str)> for PseudoHeaderState { /// Returns an error if response headers do not contain /// a status header or if the value of the header is 101 or cannot be parsed. pub fn is_interim(headers: &[Header]) -> Res { - let status = headers.iter().take(1).find(|h| h.name() == ":status"); - if let Some(h) = status { + if let Some(h) = headers.iter().take(1).find_header(":status") { #[allow(clippy::map_err_ignore)] - let status_code = h.value().parse::().map_err(|_| Error::InvalidHeader)?; + let status_code = h.value().parse::().map_err(|_| Error::InvalidHeader)?; if status_code == 101 { // https://datatracker.ietf.org/doc/html/draft-ietf-quic-http#section-4.3 Err(Error::InvalidHeader) diff --git a/neqo-http3/src/recv_message.rs b/neqo-http3/src/recv_message.rs index cb85cdb6e6..de8dfcec66 100644 --- a/neqo-http3/src/recv_message.rs +++ b/neqo-http3/src/recv_message.rs @@ -6,7 +6,7 @@ use std::{cell::RefCell, cmp::min, collections::VecDeque, fmt::Debug, rc::Rc}; -use neqo_common::{qdebug, qinfo, qtrace, Header}; +use neqo_common::{header::HeadersExt, qdebug, qinfo, qtrace, Header}; use neqo_qpack::decoder::QPackDecoder; use neqo_transport::{Connection, StreamId}; @@ -167,12 +167,8 @@ impl RecvMessage { } let is_web_transport = self.message_type == MessageType::Request - && headers - .iter() - .any(|h| h.name() == ":method" && h.value() == "CONNECT") - && headers - .iter() - .any(|h| h.name() == ":protocol" && h.value() == "webtransport"); + && headers.contains_header(":method", "CONNECT") + && headers.contains_header(":protocol", "webtransport"); if is_web_transport { self.conn_events .extended_connect_new_session(self.stream_id, headers); diff --git a/neqo-http3/tests/webtransport.rs b/neqo-http3/tests/webtransport.rs index b2d2d571fb..2197be97e5 100644 --- a/neqo-http3/tests/webtransport.rs +++ b/neqo-http3/tests/webtransport.rs @@ -6,7 +6,7 @@ use std::{cell::RefCell, rc::Rc}; -use neqo_common::{event::Provider, Header}; +use neqo_common::{event::Provider, header::HeadersExt}; use neqo_crypto::AuthenticationStatus; use neqo_http3::{ Http3Client, Http3ClientEvent, Http3OrWebTransportStream, Http3Parameters, Http3Server, @@ -96,12 +96,8 @@ fn create_wt_session(client: &mut Http3Client, server: &mut Http3Server) -> WebT headers, }) => { assert!( - headers - .iter() - .any(|h| h.name() == ":method" && h.value() == "CONNECT") - && headers - .iter() - .any(|h| h.name() == ":protocol" && h.value() == "webtransport") + headers.contains_header(":method", "CONNECT") + && headers.contains_header(":protocol", "webtransport") ); session .response(&WebTransportSessionAcceptAction::Accept) @@ -127,7 +123,7 @@ fn create_wt_session(client: &mut Http3Client, server: &mut Http3Server) -> WebT }) if ( stream_id == wt_session_id && status == 200 && - headers.contains(&Header::new(":status", "200")) + headers.contains_header(":status", "200") ) ) }; From 8578776f3b9ab0949f448e3079a7cbcc574b90d2 Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Tue, 10 Dec 2024 15:26:47 +0200 Subject: [PATCH 15/23] chore: Fix misspellings (#2277) In comments and strings, fix misspellings found by `codespell` and the vscode checker. --- neqo-common/src/tos.rs | 2 +- neqo-http3/src/buffered_send_stream.rs | 2 +- neqo-http3/src/connection.rs | 10 +-- neqo-http3/src/connection_client.rs | 90 +++++++++---------- .../tests/webtransport/mod.rs | 4 +- neqo-http3/src/frames/reader.rs | 4 +- neqo-http3/src/frames/tests/mod.rs | 2 +- neqo-http3/src/frames/tests/reader.rs | 8 +- neqo-http3/src/push_controller.rs | 4 +- neqo-http3/src/server.rs | 10 +-- neqo-http3/tests/httpconn.rs | 2 +- neqo-http3/tests/priority.rs | 4 +- neqo-http3/tests/webtransport.rs | 2 +- neqo-qpack/src/encoder.rs | 14 +-- neqo-qpack/src/header_block.rs | 2 +- neqo-qpack/src/prefix.rs | 6 +- neqo-qpack/src/reader.rs | 4 +- neqo-qpack/src/stats.rs | 2 +- neqo-qpack/src/table.rs | 2 +- neqo-transport/src/cc/classic_cc.rs | 2 +- neqo-transport/src/connection/mod.rs | 10 +-- neqo-transport/src/connection/params.rs | 2 +- neqo-transport/src/connection/tests/idle.rs | 6 +- .../src/connection/tests/priority.rs | 6 +- neqo-transport/src/connection/tests/stream.rs | 8 +- neqo-transport/src/lib.rs | 2 +- neqo-transport/src/path.rs | 2 +- neqo-transport/src/quic_datagrams.rs | 2 +- neqo-transport/src/recv_stream.rs | 4 +- neqo-transport/src/streams.rs | 4 +- neqo-transport/src/tparams.rs | 4 +- neqo-transport/src/tracking.rs | 4 +- 32 files changed, 115 insertions(+), 115 deletions(-) diff --git a/neqo-common/src/tos.rs b/neqo-common/src/tos.rs index 9661d82ecf..ed6a5d2724 100644 --- a/neqo-common/src/tos.rs +++ b/neqo-common/src/tos.rs @@ -62,7 +62,7 @@ impl IpTosEcn { } } -/// Diffserv Codepoints, mapped to the upper six bits of the TOS field. +/// Diffserv codepoints, mapped to the upper six bits of the TOS field. /// #[derive(Copy, Clone, PartialEq, Eq, Enum, Default, Debug)] #[repr(u8)] diff --git a/neqo-http3/src/buffered_send_stream.rs b/neqo-http3/src/buffered_send_stream.rs index c4a0a3087b..4d1ef3511b 100644 --- a/neqo-http3/src/buffered_send_stream.rs +++ b/neqo-http3/src/buffered_send_stream.rs @@ -49,7 +49,7 @@ impl BufferedStream { /// # Panics /// - /// This functon cannot be called before the `BufferedStream` is initialized. + /// This function cannot be called before the `BufferedStream` is initialized. pub fn buffer(&mut self, to_buf: &[u8]) { if let Self::Initialized { buf, .. } = self { buf.extend_from_slice(to_buf); diff --git a/neqo-http3/src/connection.rs b/neqo-http3/src/connection.rs index 3a6698aa20..f3ec804475 100644 --- a/neqo-http3/src/connection.rs +++ b/neqo-http3/src/connection.rs @@ -159,7 +159,7 @@ The API consists of: Each `Http3Connection` holds a list of stream handlers. Each send and receive-handler is registered in `send_streams` and `recv_streams`. Unidirectional streams are registered only on one of the lists and bidirectional streams are registered in both lists and the 2 handlers are independent, e.g. one -can be closed and removed ane second may still be active. +can be closed and removed and second may still be active. The only streams that are not registered are the local control stream, local QPACK decoder stream, and local QPACK encoder stream. These streams are send-streams and sending data on this stream is @@ -195,7 +195,7 @@ are local or remote: type has been decoded. After this point the stream: - will be regegistered with the appropriate handler, - will be canceled if is an unknown stream type or - - the connection will fail if it is unallowed stream type (receiveing HTTP request on the + - the connection will fail if it is unallowed stream type (receiving HTTP request on the client-side). The output is handled in `handle_new_stream`, for control, qpack streams and partially @@ -277,7 +277,7 @@ For example for `Http` stream the listener will produce `HeaderReady` and `Da A `WebTransport` session is connected to a control stream that is in essence an HTTP transaction. Therefore, `WebTransportSession` will internally use a `SendMessage` and `RecvMessage` handler to -handle parsing and sending of HTTP part of the control stream. When HTTP headers are exchenged, +handle parsing and sending of HTTP part of the control stream. When HTTP headers are exchanged, `WebTransportSession` will take over handling of stream data. `WebTransportSession` sets `WebTransportSessionListener` as the `RecvMessage` event listener. @@ -501,7 +501,7 @@ impl Http3Connection { /// This function handles reading from all streams, i.e. control, qpack, request/response /// stream and unidi stream that are still do not have a type. /// The function cannot handle: - /// 1) a `Push(_)`, `Htttp` or `WebTransportStream(_)` stream + /// 1) a `Push(_)`, `Http` or `WebTransportStream(_)` stream /// 2) frames `MaxPushId`, `PriorityUpdateRequest`, `PriorityUpdateRequestPush` or `Goaway` must /// be handled by `Http3Client`/`Server`. /// @@ -1236,7 +1236,7 @@ impl Http3Connection { error: u32, message: &str, ) -> Res<()> { - qtrace!("Clos WebTransport session {:?}", session_id); + qtrace!("Close WebTransport session {:?}", session_id); let send_stream = self .send_streams .get_mut(&session_id) diff --git a/neqo-http3/src/connection_client.rs b/neqo-http3/src/connection_client.rs index 253e303ad1..58c5e6f8b1 100644 --- a/neqo-http3/src/connection_client.rs +++ b/neqo-http3/src/connection_client.rs @@ -38,7 +38,7 @@ use crate::{ }; // This is used for filtering send_streams and recv_Streams with a stream_ids greater than or equal -// a given id. Only the same type (bidirectional or unidirectionsl) streams are filtered. +// a given id. Only the same type (bidirectional or unidirectional) streams are filtered. fn id_gte(base: StreamId) -> impl FnMut((&StreamId, &U)) -> Option + 'static where U: ?Sized, @@ -226,7 +226,7 @@ const fn alpn_from_quic_version(version: Version) -> &'static str { /// while let Some(event) = client.next_event() { /// match event { /// Http3ClientEvent::DataReadable{ stream_id } => { -/// println!("Data receivedd form the server on WebTransport stream ID {:?}", +/// println!("Data received form the server on WebTransport stream ID {:?}", /// stream_id, /// ); /// let mut buf = [0; 100]; @@ -262,13 +262,13 @@ const fn alpn_from_quic_version(version: Version) -> &'static str { /// session_id, /// }) => { /// println!("New stream received on session{:?}, stream id={:?} stream type={:?}", -/// sesson_id.stream_id(), +/// session_id.stream_id(), /// stream_id.stream_id(), /// stream_id.stream_type() /// ); /// } /// Http3ClientEvent::DataReadable{ stream_id } => { -/// println!("Data receivedd form the server on WebTransport stream ID {:?}", +/// println!("Data received form the server on WebTransport stream ID {:?}", /// stream_id, /// ); /// let mut buf = [0; 100]; @@ -419,7 +419,7 @@ impl Http3Client { /// The correct way to obtain a resumption token is to wait for the /// `Http3ClientEvent::ResumptionToken` event. To emit the event we are waiting for a - /// resumtion token and a `NEW_TOKEN` frame to arrive. Some servers don't send `NEW_TOKEN` + /// resumption token and a `NEW_TOKEN` frame to arrive. Some servers don't send `NEW_TOKEN` /// frames and in this case, we wait for 3xPTO before emitting an event. This is especially a /// problem for short-lived connections, where the connection is closed before any events are /// released. This function retrieves the token, without waiting for a `NEW_TOKEN` frame to @@ -573,7 +573,7 @@ impl Http3Client { } /// An application may cancel a stream(request). - /// Both sides, the receiviing and sending side, sending and receiving side, will be closed. + /// Both sides, the receiving and sending side, sending and receiving side, will be closed. /// /// # Errors /// @@ -927,7 +927,7 @@ impl Http3Client { /// - a [`Output::Datagram(Datagram)`][1]: data that should be sent as a UDP payload, /// - a [`Output::Callback(Duration)`][1]: the duration of a timer. `process_output` should be /// called at least after the time expires, - /// - [`Output::None`][1]: this is returned when `Nttp3Client` is done and can be destroyed. + /// - [`Output::None`][1]: this is returned when `Http3Client` is done and can be destroyed. /// /// The application should call this function repeatedly until a timer value or None is /// returned. After that, the application should call the function again if a new UDP packet is @@ -1168,7 +1168,7 @@ impl Http3Client { Rc::clone(&self.base_handler.qpack_decoder), Box::new(RecvPushEvents::new(push_id, Rc::clone(&self.push_handler))), None, - // TODO: think about the right prority for the push streams. + // TODO: think about the right priority for the push streams. PriorityHandler::new(true, Priority::default()), )), ); @@ -1655,7 +1655,7 @@ mod tests { out } - // Perform only Quic transport handshake. + // Perform only QUIC transport handshake. fn connect_only_transport_with(client: &mut Http3Client, server: &mut TestServer) { let out = handshake_only(client, server); @@ -1668,7 +1668,7 @@ mod tests { assert!(server.conn.state().connected()); } - // Perform only Quic transport handshake. + // Perform only QUIC transport handshake. fn connect_only_transport() -> (Http3Client, TestServer) { let mut client = default_http3_client(); let mut server = TestServer::new(); @@ -1683,7 +1683,7 @@ mod tests { server.check_client_control_qpack_streams_no_resumption(); } - // Perform Quic transport handshake and exchange Http3 settings. + // Perform QUIC transport handshake and exchange Http3 settings. fn connect_with(client: &mut Http3Client, server: &mut TestServer) { connect_only_transport_with(client, server); @@ -1696,11 +1696,11 @@ mod tests { let out = server.conn.process(None::, now()); client.process_input(out.dgram().unwrap(), now()); - // assert no error occured. + // assert no error occurred. assert_eq!(client.state(), Http3State::Connected); } - // Perform Quic transport handshake and exchange Http3 settings. + // Perform QUIC transport handshake and exchange Http3 settings. fn connect_with_connection_parameters( server_conn_params: ConnectionParameters, ) -> (Http3Client, TestServer) { @@ -1718,7 +1718,7 @@ mod tests { (client, server) } - // Perform Quic transport handshake and exchange Http3 settings. + // Perform QUIC transport handshake and exchange Http3 settings. fn connect() -> (Http3Client, TestServer) { let mut client = default_http3_client(); let mut server = TestServer::new(); @@ -2272,19 +2272,19 @@ mod tests { assert_closed(&client, &Error::HttpFrameUnexpected); } - // send DATA frame on a cortrol stream + // send DATA frame on a control stream #[test] fn data_frame_on_control_stream() { test_wrong_frame_on_control_stream(&[0x0, 0x2, 0x1, 0x2]); } - // send HEADERS frame on a cortrol stream + // send HEADERS frame on a control stream #[test] fn headers_frame_on_control_stream() { test_wrong_frame_on_control_stream(&[0x1, 0x2, 0x1, 0x2]); } - // send PUSH_PROMISE frame on a cortrol stream + // send PUSH_PROMISE frame on a control stream #[test] fn push_promise_frame_on_control_stream() { test_wrong_frame_on_control_stream(&[0x5, 0x2, 0x1, 0x2]); @@ -2571,7 +2571,7 @@ mod tests { } } - // after this stream will be removed from hcoon. We will check this by trying to read + // after this stream will be removed from conn. We will check this by trying to read // from the stream and that should fail. let mut buf = [0_u8; 100]; let res = client.read_data(now(), request_stream_id, &mut buf); @@ -2795,7 +2795,7 @@ mod tests { } // Send 2 data frames so that the second one cannot fit into the send_buf and it is only - // partialy sent. We check that the sent data is correct. + // partially sent. We check that the sent data is correct. fn fetch_with_two_data_frames( first_frame: &[u8], expected_first_data_frame_header: &[u8], @@ -3260,7 +3260,7 @@ mod tests { assert!(stop_sending); assert!(header_ready); - // after this, we can sill read data from a sttream. + // after this, we can sill read data from a stream. let mut buf = [0_u8; 100]; let (amount, fin) = client .read_data(now(), request_stream_id, &mut buf) @@ -3598,7 +3598,7 @@ mod tests { let out = server.conn.process_output(now()); client.process(out.dgram(), now()); - // Recv HeaderReady wo headers with fin. + // Recv HeaderReady without headers with fin. let e = client.events().next().unwrap(); assert_eq!( e, @@ -3617,7 +3617,7 @@ mod tests { ); } - // Close stream imemediately after headers. + // Close stream immediately after headers. #[test] fn stream_fin_after_headers() { let (mut client, mut server, request_stream_id) = connect_and_send_request(true); @@ -3660,7 +3660,7 @@ mod tests { #[test] fn stream_fin_after_headers_are_read_wo_data_frame() { let (mut client, mut server, request_stream_id) = connect_and_send_request(true); - // Send some good data wo fin + // Send some good data without fin server_send_response_and_exchange_packet( &mut client, &mut server, @@ -3669,7 +3669,7 @@ mod tests { false, ); - // Recv headers wo fin + // Recv headers without fin while let Some(e) = client.next_event() { match e { Http3ClientEvent::HeaderReady { @@ -3696,7 +3696,7 @@ mod tests { let out = server.conn.process_output(now()); client.process(out.dgram(), now()); - // Recv DataReadable wo data with fin + // Recv DataReadable without data with fin while let Some(e) = client.next_event() { match e { Http3ClientEvent::HeaderReady { .. } => { @@ -3775,11 +3775,11 @@ mod tests { } // Send headers and an empty data frame. Read headers and then close the stream. - // We should get a HeaderReady without fin and a DataReadable wo data and with fin. + // We should get a HeaderReady without fin and a DataReadable without data and with fin. #[test] fn stream_fin_after_headers_an_empty_data_frame_are_read() { let (mut client, mut server, request_stream_id) = connect_and_send_request(true); - // Send some good data wo fin + // Send some good data without fin // Send headers. _ = server .conn @@ -3794,7 +3794,7 @@ mod tests { let out = server.conn.process_output(now()); client.process(out.dgram(), now()); - // Recv headers wo fin + // Recv headers without fin while let Some(e) = client.next_event() { match e { Http3ClientEvent::HeaderReady { @@ -3850,7 +3850,7 @@ mod tests { #[test] fn stream_fin_after_a_data_frame() { let (mut client, mut server, request_stream_id) = connect_and_send_request(true); - // Send some good data wo fin + // Send some good data without fin server_send_response_and_exchange_packet( &mut client, &mut server, @@ -3859,7 +3859,7 @@ mod tests { false, ); - // Recv some good data wo fin + // Recv some good data without fin while let Some(e) = client.next_event() { match e { Http3ClientEvent::HeaderReady { @@ -3891,7 +3891,7 @@ mod tests { let out = server.conn.process_output(now()); client.process(out.dgram(), now()); - // fin wo data should generate DataReadable + // fin without data should generate DataReadable let e = client.events().next().unwrap(); if let Http3ClientEvent::DataReadable { stream_id } = e { assert_eq!(stream_id, request_stream_id); @@ -4320,7 +4320,7 @@ mod tests { assert_eq!(make_request(&mut client, false, &[]), 0); } - // Connect to a server, get token and reconnect using 0-rtt. Seerver sends new Settings. + // Connect to a server, get token and reconnect using 0-rtt. Server sends new Settings. fn zero_rtt_change_settings( original_settings: &[HSetting], resumption_settings: &[HSetting], @@ -4903,7 +4903,7 @@ mod tests { ); assert!(!client.events().any(data_readable_event)); - // Now read only until the end of the first frame. The firs framee has 3 bytes. + // Now read only until the end of the first frame. The firs frame has 3 bytes. let mut buf2 = [0_u8; 2]; assert_eq!( (2, false), @@ -5991,13 +5991,13 @@ mod tests { let encoder_inst_pkt = send_push_promise_using_encoder(&mut client, &mut server, request_stream_id, 0); - // PushPromise is blocked wathing for encoder instructions. + // PushPromise is blocked watching for encoder instructions. assert!(!check_push_events(&mut client)); // Let client receive the encoder instructions. let _out = client.process(encoder_inst_pkt, now()); - // PushPromise is blocked wathing for encoder instructions. + // PushPromise is blocked watching for encoder instructions. assert!(check_push_events(&mut client)); } @@ -6020,7 +6020,7 @@ mod tests { let encoder_inst_pkt = send_push_promise_using_encoder(&mut client, &mut server, request_stream_id, 0); - // PushPromise is blocked wathing for encoder instructions. + // PushPromise is blocked watching for encoder instructions. assert!(!check_push_events(&mut client)); // Stream data can be still read @@ -6037,7 +6037,7 @@ mod tests { // Let client receive the encoder instructions. let _out = client.process(encoder_inst_pkt, now()); - // PushPromise is blocked wathing for encoder instructions. + // PushPromise is blocked watching for encoder instructions. assert!(check_push_events(&mut client)); // Stream data can be still read @@ -6062,7 +6062,7 @@ mod tests { let encoder_inst_pkt = send_push_promise_using_encoder(&mut client, &mut server, request_stream_id, 0); - // PushPromise is blocked wathing for encoder instructions. + // PushPromise is blocked watching for encoder instructions. assert!(!check_push_events(&mut client)); // Send response headers @@ -6079,7 +6079,7 @@ mod tests { // Let client receive the encoder instructions. let _out = client.process(encoder_inst_pkt, now()); - // PushPromise is blocked wathing for encoder instructions. + // PushPromise is blocked watching for encoder instructions. assert!(check_push_events(&mut client)); } @@ -6090,7 +6090,7 @@ mod tests { setup_server_side_encoder(&mut client, &mut server); - // Insert an elemet into a dynamic table. + // Insert an element into a dynamic table. // insert "content-length: 1234 server .encoder @@ -6104,7 +6104,7 @@ mod tests { let encoder_inst_pkt2 = send_push_promise_using_encoder(&mut client, &mut server, request_stream_id, 0); - // PushPromise is blocked wathing for encoder instructions. + // PushPromise is blocked watching for encoder instructions. assert!(!check_push_events(&mut client)); let response_headers = vec![ @@ -6165,7 +6165,7 @@ mod tests { Header::new("myn1", "myv1"), ); - // PushPromise is blocked wathing for encoder instructions. + // PushPromise is blocked watching for encoder instructions. assert!(!check_push_events(&mut client)); let encoder_inst_pkt2 = send_push_promise_using_encoder_with_custom_headers( @@ -6176,7 +6176,7 @@ mod tests { Header::new("myn2", "myv2"), ); - // PushPromise is blocked wathing for encoder instructions. + // PushPromise is blocked watching for encoder instructions. assert!(!check_push_events(&mut client)); let response_headers = vec![ @@ -6292,7 +6292,7 @@ mod tests { false, ); - // Headers are blocked waiting fro the encoder instructions. + // Headers are blocked waiting for the encoder instructions. let header_ready_event = |e| matches!(e, Http3ClientEvent::HeaderReady { .. }); assert!(!client.events().any(header_ready_event)); @@ -6833,7 +6833,7 @@ mod tests { true, ); - // Stream has been reset because of thei malformed headers. + // Stream has been reset because of their malformed headers. let push_reset_event = |e| { matches!(e, Http3ClientEvent::PushReset { push_id, diff --git a/neqo-http3/src/features/extended_connect/tests/webtransport/mod.rs b/neqo-http3/src/features/extended_connect/tests/webtransport/mod.rs index 8c7aaa59eb..718a2715c0 100644 --- a/neqo-http3/src/features/extended_connect/tests/webtransport/mod.rs +++ b/neqo-http3/src/features/extended_connect/tests/webtransport/mod.rs @@ -78,7 +78,7 @@ fn exchange_packets(client: &mut Http3Client, server: &mut Http3Server) { } } -// Perform only Quic transport handshake. +// Perform only QUIC transport handshake. fn connect_with(client: &mut Http3Client, server: &mut Http3Server) { assert_eq!(client.state(), Http3State::Initializing); let out = client.process_output(now()); @@ -99,7 +99,7 @@ fn connect_with(client: &mut Http3Client, server: &mut Http3Server) { assert_eq!(client.state(), Http3State::Connected); - // Exchange H3 setttings + // Exchange H3 settings let out = server.process(out.dgram(), now()); let out = client.process(out.dgram(), now()); let out = server.process(out.dgram(), now()); diff --git a/neqo-http3/src/frames/reader.rs b/neqo-http3/src/frames/reader.rs index bd1f0811b6..6d2c0614b0 100644 --- a/neqo-http3/src/frames/reader.rs +++ b/neqo-http3/src/frames/reader.rs @@ -24,7 +24,7 @@ pub trait FrameDecoder { /// # Errors /// - /// Returns `HttpFrameUnexpected` if frames is not alowed, i.e. is a `H3_RESERVED_FRAME_TYPES`. + /// Returns `HttpFrameUnexpected` if frames is not allowed, i.e. is a `H3_RESERVED_FRAME_TYPES`. fn frame_type_allowed(_frame_type: HFrameType) -> Res<()> { Ok(()) } @@ -154,7 +154,7 @@ impl FrameReader { } } - /// returns true if quic stream was closed. + /// Returns true if QUIC stream was closed. /// /// # Errors /// diff --git a/neqo-http3/src/frames/tests/mod.rs b/neqo-http3/src/frames/tests/mod.rs index 5c863242ff..c358cbf936 100644 --- a/neqo-http3/src/frames/tests/mod.rs +++ b/neqo-http3/src/frames/tests/mod.rs @@ -35,7 +35,7 @@ pub fn enc_dec>(d: &Encoder, st: &str, remaining: usize) -> T let mut fr: FrameReader = FrameReader::new(); - // conver string into u8 vector + // convert string into u8 vector let buf = Encoder::from_hex(st); conn_s.stream_send(stream_id, buf.as_ref()).unwrap(); let out = conn_s.process_output(now()); diff --git a/neqo-http3/src/frames/tests/reader.rs b/neqo-http3/src/frames/tests/reader.rs index 46e236ce55..dce2e0cf39 100644 --- a/neqo-http3/src/frames/tests/reader.rs +++ b/neqo-http3/src/frames/tests/reader.rs @@ -128,7 +128,7 @@ fn frame_reading_with_stream_data() { let frame = fr.process(&[0x0, 0x3, 0x1, 0x2, 0x3]).unwrap(); assert!(matches!(frame, HFrame::Data { len } if len == 3)); - // payloead is still on the stream. + // payload is still on the stream. // assert that we have 3 bytes in the stream let mut buf = [0_u8; 100]; let (amount, _) = fr.conn_c.stream_recv(fr.stream_id, &mut buf).unwrap(); @@ -150,7 +150,7 @@ fn unknown_frame() { buf.resize(UNKNOWN_FRAME_LEN + buf.len(), 0); assert!(fr.process::(&buf).is_none()); - // now receive a CANCEL_PUSH fram to see that frame reader is ok. + // now receive a CANCEL_PUSH frame to see that frame reader is ok. let frame = fr.process(&[0x03, 0x01, 0x05]); assert!(frame.is_some()); if let HFrame::CancelPush { push_id } = frame.unwrap() { @@ -194,7 +194,7 @@ fn unknown_wt_frame() { buf.resize(UNKNOWN_FRAME_LEN + buf.len(), 0); assert!(fr.process::(&buf).is_none()); - // now receive a WT_FRAME_CLOSE_SESSION fram to see that frame reader is ok. + // now receive a WT_FRAME_CLOSE_SESSION frame to see that frame reader is ok. let frame = fr.process(&[ 0x68, 0x43, 0x09, 0x00, 0x00, 0x00, 0x05, 0x48, 0x65, 0x6c, 0x6c, 0x6f, ]); @@ -318,7 +318,7 @@ fn test_complete_and_incomplete_frame + PartialEq + Debug>( done_state: usize, ) { use std::cmp::Ordering; - // Let's consume partial frames. It is enough to test partal frames + // Let's consume partial frames. It is enough to test partial frames // up to 10 byte. 10 byte is greater than frame type and frame // length and bit of data. let len = std::cmp::min(buf.len() - 1, 10); diff --git a/neqo-http3/src/push_controller.rs b/neqo-http3/src/push_controller.rs index 6881ce7174..6efe010c5b 100644 --- a/neqo-http3/src/push_controller.rs +++ b/neqo-http3/src/push_controller.rs @@ -135,7 +135,7 @@ impl ActivePushStreams { /// /// The `PushController` handles: /// `PUSH_PROMISE` frame: frames may change the push state from Init to `PushPromise` and from -/// `OnlyPushStream` to `Active`. Frames for a closed steams are ignored. +/// `OnlyPushStream` to `Active`. Frames for a closed streams are ignored. /// `CANCEL_PUSH` frame: (`handle_cancel_push` will be called). If a push is in state `PushPromise` /// or `Active`, any posted events will be removed and a `PushCanceled` event /// will be posted. If a push is in state `OnlyPushStream` or `Active` the @@ -169,7 +169,7 @@ impl PushController { impl Display for PushController { fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result { - write!(f, "Push controler") + write!(f, "Push controller") } } diff --git a/neqo-http3/src/server.rs b/neqo-http3/src/server.rs index 798fd7c70c..8a5dc8a2f3 100644 --- a/neqo-http3/src/server.rs +++ b/neqo-http3/src/server.rs @@ -502,7 +502,7 @@ mod tests { (server, client) } - // Test http3 connection inintialization. + // Test http3 connection initialization. // The server will open the control and qpack streams and send SETTINGS frame. #[test] fn server_connect() { @@ -561,7 +561,7 @@ mod tests { let out2 = server.process(out1.dgram(), now()); mem::drop(neqo_trans_conn.process(out2.dgram(), now())); - // assert no error occured. + // assert no error occurred. assert_not_closed(server); PeerConnection { @@ -685,13 +685,13 @@ mod tests { test_wrong_frame_on_control_stream(&[0x0, 0x2, 0x1, 0x2]); } - // send HEADERS frame on a cortrol stream + // send HEADERS frame on a control stream #[test] fn server_headers_frame_on_control_stream() { test_wrong_frame_on_control_stream(&[0x1, 0x2, 0x1, 0x2]); } - // send PUSH_PROMISE frame on a cortrol stream + // send PUSH_PROMISE frame on a control stream #[test] fn server_push_promise_frame_on_control_stream() { test_wrong_frame_on_control_stream(&[0x5, 0x2, 0x1, 0x2]); @@ -836,7 +836,7 @@ mod tests { fn test_incomplete_frame(res: &[u8]) { let (mut hconn, mut peer_conn) = connect_and_receive_settings(); - // send an incomplete reequest. + // send an incomplete request. let stream_id = peer_conn.stream_create(StreamType::BiDi).unwrap(); peer_conn.stream_send(stream_id, res).unwrap(); peer_conn.stream_close_send(stream_id).unwrap(); diff --git a/neqo-http3/tests/httpconn.rs b/neqo-http3/tests/httpconn.rs index 5e192e01cc..2e52739fcb 100644 --- a/neqo-http3/tests/httpconn.rs +++ b/neqo-http3/tests/httpconn.rs @@ -298,7 +298,7 @@ fn data_writable_events_low_watermark() -> Result<(), Box exchange_packets(&mut hconn_c, &mut hconn_s, None); assert!(hconn_s.events().any(data_writable)); - // Sending more fails, given that each data frame needs to be preceeded by a + // Sending more fails, given that each data frame needs to be preceded by a // header, i.e. needs more than 1 byte of send space to send 1 byte payload. assert_eq!(request.available()?, 1); assert_eq!(request.send_data(&buf)?, 0); diff --git a/neqo-http3/tests/priority.rs b/neqo-http3/tests/priority.rs index cafebe09b5..318a20577f 100644 --- a/neqo-http3/tests/priority.rs +++ b/neqo-http3/tests/priority.rs @@ -25,7 +25,7 @@ fn exchange_packets(client: &mut Http3Client, server: &mut Http3Server) { } } -// Perform only Quic transport handshake. +// Perform only QUIC transport handshake. fn connect_with(client: &mut Http3Client, server: &mut Http3Server) { assert_eq!(client.state(), Http3State::Initializing); let out = client.process_output(now()); @@ -45,7 +45,7 @@ fn connect_with(client: &mut Http3Client, server: &mut Http3Server) { assert!(client.events().any(connected)); assert_eq!(client.state(), Http3State::Connected); - // Exchange H3 setttings + // Exchange H3 settings let out = server.process(out.dgram(), now()); let out = client.process(out.dgram(), now()); let out = server.process(out.dgram(), now()); diff --git a/neqo-http3/tests/webtransport.rs b/neqo-http3/tests/webtransport.rs index 2197be97e5..9417bb3a4f 100644 --- a/neqo-http3/tests/webtransport.rs +++ b/neqo-http3/tests/webtransport.rs @@ -59,7 +59,7 @@ fn connect() -> (Http3Client, Http3Server) { assert_eq!(client.state(), Http3State::Connected); - // Exchange H3 setttings + // Exchange H3 settings loop { out = server.process(out, now()).dgram(); let dgram_present = out.is_some(); diff --git a/neqo-qpack/src/encoder.rs b/neqo-qpack/src/encoder.rs index e82237eb30..5ca8e71c4e 100644 --- a/neqo-qpack/src/encoder.rs +++ b/neqo-qpack/src/encoder.rs @@ -203,7 +203,7 @@ impl QPackEncoder { } fn call_instruction(&mut self, instruction: DecoderInstruction, qlog: &NeqoQlog) -> Res<()> { - qdebug!([self], "call intruction {:?}", instruction); + qdebug!([self], "call instruction {:?}", instruction); match instruction { DecoderInstruction::InsertCountIncrement { increment } => { qlog::qpack_read_insert_count_increment_instruction( @@ -891,7 +891,7 @@ mod tests { assert!(res.is_ok()); encoder.send_instructions(HEADER_CONTENT_LENGTH_VALUE_1_NAME_LITERAL); - // insert "content-length: 12345 which will fail because the ntry in the table cannot be + // insert "content-length: 12345 which will fail because the entry in the table cannot be // evicted. let res = encoder @@ -912,7 +912,7 @@ mod tests { encoder.send_instructions(HEADER_CONTENT_LENGTH_VALUE_2_NAME_LITERAL); } - // Test inserts block on waiting for acks + // Test inserts block on waiting for ACKs // test the table insertion is blocked: // 0 - waiting for a header ack // 2 - waiting for a stream cancel. @@ -1220,7 +1220,7 @@ mod tests { // receive a header_ack for the first header block. recv_instruction(&mut encoder, HEADER_ACK_STREAM_ID_1); - // The stream is not blocking anymore because header ack also acks the instruction. + // The stream is not blocking anymore because header ack also ACKs the instruction. assert_eq!(encoder.encoder.blocked_stream_cnt(), 0); } @@ -1260,7 +1260,7 @@ mod tests { // receive a header_ack for the second header block. This will ack the first as well recv_instruction(&mut encoder, HEADER_ACK_STREAM_ID_2); - // The stream is not blocking anymore because header ack also acks the instruction. + // The stream is not blocking anymore because header ack also ACKs the instruction. assert_eq!(encoder.encoder.blocked_stream_cnt(), 0); } @@ -1302,7 +1302,7 @@ mod tests { // acked. and the second steam will still be blocking. recv_instruction(&mut encoder, STREAM_CANCELED_ID_1); - // The stream is not blocking anymore because header ack also acks the instruction. + // The stream is not blocking anymore because header ack also ACKs the instruction. assert_eq!(encoder.encoder.blocked_stream_cnt(), 1); } @@ -1636,7 +1636,7 @@ mod tests { 0x36, 0x04, 0x31, 0x32, 0x33, 0x34 ] ); - // Also check that ther is no new instruction send by the encoder. + // Also check that there is no new instruction send by the encoder. assert!(encoder.conn.process_output(now()).dgram().is_none()); } diff --git a/neqo-qpack/src/header_block.rs b/neqo-qpack/src/header_block.rs index 1c5072ee96..d2eab95e3e 100644 --- a/neqo-qpack/src/header_block.rs +++ b/neqo-qpack/src/header_block.rs @@ -386,7 +386,7 @@ impl<'a> HeaderDecoder<'a> { fn read_literal_with_name_ref_dynamic(&mut self, table: &HeaderTable) -> Res
{ qtrace!( [self], - "read literal with name reference ot the dynamic table." + "read literal with name reference of the dynamic table." ); let index = self diff --git a/neqo-qpack/src/prefix.rs b/neqo-qpack/src/prefix.rs index d83798a3dc..2ed04668f2 100644 --- a/neqo-qpack/src/prefix.rs +++ b/neqo-qpack/src/prefix.rs @@ -15,9 +15,9 @@ pub struct Prefix { impl Prefix { pub fn new(prefix: u8, len: u8) -> Self { // len should never be larger than 7. - // Most of Prefixes are instantiated as consts bellow. The only place where this - // construcrtor is used is in tests and when literals are encoded and the Huffman - // bit is added to one of the consts bellow. create_prefix guaranty that all const + // Most of Prefixes are instantiated as consts below. The only place where this + // constructor is used is in tests and when literals are encoded and the Huffman + // bit is added to one of the consts below. create_prefix guaranty that all const // have len < 7 so we can safely assert that len is <=7. assert!(len <= 7); assert!((len == 0) || (prefix & ((1 << (8 - len)) - 1) == 0)); diff --git a/neqo-qpack/src/reader.rs b/neqo-qpack/src/reader.rs index 1b28ab3f7c..d03b118cfa 100644 --- a/neqo-qpack/src/reader.rs +++ b/neqo-qpack/src/reader.rs @@ -156,7 +156,7 @@ pub struct IntReader { } impl IntReader { - /// `IntReader` is created by suppling the first byte anf prefix length. + /// `IntReader` is created by supplying the first byte and prefix length. /// A varint may take only one byte, In that case already the first by has set state to done. /// /// # Panics @@ -251,7 +251,7 @@ pub struct LiteralReader { impl LiteralReader { /// Creates `LiteralReader` with the first byte. This constructor is always used - /// when a litreral has a prefix. + /// when a literal has a prefix. /// For literals without a prefix please use the default constructor. /// /// # Panics diff --git a/neqo-qpack/src/stats.rs b/neqo-qpack/src/stats.rs index 7cb334b0c7..0dd2c9dd93 100644 --- a/neqo-qpack/src/stats.rs +++ b/neqo-qpack/src/stats.rs @@ -10,7 +10,7 @@ /// `QPack` statistics pub struct Stats { pub dynamic_table_inserts: usize, - // This is the munber of header blockes that reference the dynamic table. + // This is the number of header blocks that reference the dynamic table. pub dynamic_table_references: usize, pub stream_cancelled_recv: usize, pub header_acks_recv: usize, diff --git a/neqo-qpack/src/table.rs b/neqo-qpack/src/table.rs index ea6ae58d35..8c8b8d5eaf 100644 --- a/neqo-qpack/src/table.rs +++ b/neqo-qpack/src/table.rs @@ -365,7 +365,7 @@ impl HeaderTable { let entry = self.get_dynamic(index, self.base, false)?; name = entry.name().to_vec(); value = entry.value().to_vec(); - qtrace!([self], "dumplicate name={:?} value={:?}", name, value); + qtrace!([self], "duplicate name={:?} value={:?}", name, value); } self.insert(&name, &value) } diff --git a/neqo-transport/src/cc/classic_cc.rs b/neqo-transport/src/cc/classic_cc.rs index 0c629afb59..5dc7356a19 100644 --- a/neqo-transport/src/cc/classic_cc.rs +++ b/neqo-transport/src/cc/classic_cc.rs @@ -88,7 +88,7 @@ pub trait WindowAdjustment: Display + Debug { max_datagram_size: usize, now: Instant, ) -> usize; - /// This function is called when a congestion event has beed detected and it + /// This function is called when a congestion event has been detected and it /// returns new (decreased) values of `curr_cwnd` and `acked_bytes`. /// This value can be very small; the calling code is responsible for ensuring that the /// congestion window doesn't drop below the minimum of `CWND_MIN`. diff --git a/neqo-transport/src/connection/mod.rs b/neqo-transport/src/connection/mod.rs index 1d19f1abe7..6233746c22 100644 --- a/neqo-transport/src/connection/mod.rs +++ b/neqo-transport/src/connection/mod.rs @@ -1699,7 +1699,7 @@ impl Connection { // Get the next packet number we'll send, for ACK verification. // TODO: Once PR #2118 lands, this can move to `input_frame`. For now, it needs to be here, - // because we can drop packet number spaces as we parse throught the packet, and if an ACK + // because we can drop packet number spaces as we parse through the packet, and if an ACK // frame follows a CRYPTO frame that makes us drop a space, we need to know this // packet number to verify the ACK against. let next_pn = self @@ -2291,7 +2291,7 @@ impl Connection { } if profile.ack_only(space) { - // If we are CC limited we can only send acks! + // If we are CC limited we can only send ACKs! return (tokens, false, false); } @@ -3278,8 +3278,8 @@ impl Connection { /// /// # Errors /// - /// `ConnectionState` if the connecton stat does not allow to create streams. - /// `StreamLimitError` if we are limiied by server's stream concurence. + /// `ConnectionState` if the connection stat does not allow to create streams. + /// `StreamLimitError` if we are limited by server's stream concurrence. pub fn stream_create(&mut self, st: StreamType) -> Res { // Can't make streams while closing, otherwise rely on the stream limits. match self.state { @@ -3548,7 +3548,7 @@ impl Connection { /// # Errors /// /// The function returns `TooMuchData` if the supply buffer is bigger than - /// the allowed remote datagram size. The funcion does not check if the + /// the allowed remote datagram size. The function does not check if the /// datagram can fit into a packet (i.e. MTU limit). This is checked during /// creation of an actual packet and the datagram will be dropped if it does /// not fit into the packet. The app is encourage to use `max_datagram_size` diff --git a/neqo-transport/src/connection/params.rs b/neqo-transport/src/connection/params.rs index fb176d904e..ae50b6e124 100644 --- a/neqo-transport/src/connection/params.rs +++ b/neqo-transport/src/connection/params.rs @@ -41,7 +41,7 @@ pub enum PreferredAddressConfig { Address(PreferredAddress), } -/// `ConnectionParameters` use for setting intitial value for QUIC parameters. +/// `ConnectionParameters` use for setting initial value for QUIC parameters. /// This collects configuration like initial limits, protocol version, and /// congestion control algorithm. #[allow(clippy::struct_excessive_bools)] // We need that many, sorry. diff --git a/neqo-transport/src/connection/tests/idle.rs b/neqo-transport/src/connection/tests/idle.rs index deb4312dca..024d0f9157 100644 --- a/neqo-transport/src/connection/tests/idle.rs +++ b/neqo-transport/src/connection/tests/idle.rs @@ -474,7 +474,7 @@ fn keep_alive_lost() { assert!(client.process(out, now).dgram().is_none()); // TODO: if we run server.process with current value of now, the server will - // return some small timeout for the recovry although it does not have + // return some small timeout for the recovery although it does not have // any outstanding data. Therefore we call it after AT_LEAST_PTO. now += AT_LEAST_PTO; assert_idle(&mut server, now, keep_alive_timeout() - AT_LEAST_PTO); @@ -662,7 +662,7 @@ fn keep_alive_uni() { server.stream_keep_alive(stream, true).unwrap(); } -/// Test a keep-alive ping is send if there are outstading ack-eliciting packets and that +/// Test a keep-alive ping is send if there are outstanding ack-eliciting packets and that /// the connection is closed after the idle timeout passes. #[test] fn keep_alive_with_ack_eliciting_packet_lost() { @@ -672,7 +672,7 @@ fn keep_alive_with_ack_eliciting_packet_lost() { // + 2pto) After handshake all packets will be lost. The following steps will happen after // the handshake: // - data will be sent on a stream that is marked for keep-alive, (at start time) - // - PTO timer will trigger first, and the data will be retransmited toghether with a PING, (at + // - PTO timer will trigger first, and the data will be retransmitted together with a PING, (at // the start time + pto) // - keep-alive timer will trigger and a keep-alive PING will be sent, (at the start time + // IDLE_TIMEOUT / 2) diff --git a/neqo-transport/src/connection/tests/priority.rs b/neqo-transport/src/connection/tests/priority.rs index 3a19a18830..7bf9beedd4 100644 --- a/neqo-transport/src/connection/tests/priority.rs +++ b/neqo-transport/src/connection/tests/priority.rs @@ -208,7 +208,7 @@ fn critical() { let now = now(); // Rather than connect, send stream data in 0.5-RTT. - // That allows this to test that critical streams pre-empt most frame types. + // That allows this to test that critical streams preempt most frame types. let dgram = client.process_output(now).dgram(); let dgram = server.process(dgram, now).dgram(); client.process_input(dgram.unwrap(), now); @@ -259,7 +259,7 @@ fn important() { let now = now(); // Rather than connect, send stream data in 0.5-RTT. - // That allows this to test that important streams pre-empt most frame types. + // That allows this to test that important streams preempt most frame types. let dgram = client.process_output(now).dgram(); let dgram = server.process(dgram, now).dgram(); client.process_input(dgram.unwrap(), now); @@ -312,7 +312,7 @@ fn high_normal() { let now = now(); // Rather than connect, send stream data in 0.5-RTT. - // That allows this to test that important streams pre-empt most frame types. + // That allows this to test that important streams preempt most frame types. let dgram = client.process_output(now).dgram(); let dgram = server.process(dgram, now).dgram(); client.process_input(dgram.unwrap(), now); diff --git a/neqo-transport/src/connection/tests/stream.rs b/neqo-transport/src/connection/tests/stream.rs index aa2b1003c9..cfe57f090a 100644 --- a/neqo-transport/src/connection/tests/stream.rs +++ b/neqo-transport/src/connection/tests/stream.rs @@ -116,7 +116,7 @@ fn transfer() { assert!(fin3); } -// tests stream sendorder priorization +// tests stream sendorder prioritization fn sendorder_test(order_of_sendorder: &[Option]) { let mut client = default_client(); let mut server = default_server(); @@ -217,7 +217,7 @@ fn sendorder_4() { ]); } -// Tests stream sendorder priorization +// Tests stream sendorder prioritization // Converts Vecs of u64's into StreamIds fn fairness_test(source: S, number_iterates: usize, truncate_to: usize, result_array: &R) where @@ -306,7 +306,7 @@ fn ordergroup_7() { } #[test] -// Send fin even if a peer closes a reomte bidi send stream before sending any data. +// Send fin even if a peer closes a remote bidi send stream before sending any data. fn report_fin_when_stream_closed_wo_data() { // Note that the two servers in this test will get different anti-replay filters. // That's OK because we aren't testing anti-replay. @@ -659,7 +659,7 @@ fn after_stream_stop_sending_is_called_conn_events_for_stream_should_be_removed( mem::drop(client.process(out, now())); - // send stop seending. + // send stop sending. client .stream_stop_sending(id, Error::NoError.code()) .unwrap(); diff --git a/neqo-transport/src/lib.rs b/neqo-transport/src/lib.rs index 859f0471cb..869ef7a4d1 100644 --- a/neqo-transport/src/lib.rs +++ b/neqo-transport/src/lib.rs @@ -86,7 +86,7 @@ const ERROR_AEAD_LIMIT_REACHED: TransportError = 15; pub enum Error { NoError, // Each time this error is returned a different parameter is supplied. - // This will be used to distinguish each occurance of this error. + // This will be used to distinguish each occurrence of this error. InternalError, ConnectionRefused, FlowControlError, diff --git a/neqo-transport/src/path.rs b/neqo-transport/src/path.rs index 5513ee651a..7ab36d8f3b 100644 --- a/neqo-transport/src/path.rs +++ b/neqo-transport/src/path.rs @@ -424,7 +424,7 @@ impl Paths { #[cfg(test)] pub fn rtt(&self) -> Duration { // Rather than have this fail when there is no active path, - // make a new RTT esimate and interrogate that. + // make a new RTT estimate and interrogate that. // That is more expensive, but it should be rare and breaking encapsulation // is worse, especially as this is only used in tests. self.primary().map_or_else( diff --git a/neqo-transport/src/quic_datagrams.rs b/neqo-transport/src/quic_datagrams.rs index 0d1a7dd843..2f4b9a5de3 100644 --- a/neqo-transport/src/quic_datagrams.rs +++ b/neqo-transport/src/quic_datagrams.rs @@ -144,7 +144,7 @@ impl QuicDatagrams { /// # Error /// /// The function returns `TooMuchData` if the supply buffer is bigger than - /// the allowed remote datagram size. The funcion does not check if the + /// the allowed remote datagram size. The function does not check if the /// datagram can fit into a packet (i.e. MTU limit). This is checked during /// creation of an actual packet and the datagram will be dropped if it does /// not fit into the packet. diff --git a/neqo-transport/src/recv_stream.rs b/neqo-transport/src/recv_stream.rs index 44e7f8b9a9..7d564abfff 100644 --- a/neqo-transport/src/recv_stream.rs +++ b/neqo-transport/src/recv_stream.rs @@ -570,7 +570,7 @@ impl RecvStream { match new_state { // Receiving all data, or receiving or requesting RESET_STREAM - // is cause to stop keep-alives. + // is cause to stop keepalives. RecvStreamState::DataRecvd { .. } | RecvStreamState::AbortReading { .. } | RecvStreamState::ResetRecvd { .. } => { @@ -1839,7 +1839,7 @@ mod tests { assert_eq!(s.read(&mut buf).unwrap(), (1, false)); check_fc(&fc.borrow(), SW / 4 + 1, SW / 4 + 1); check_fc(s.fc().unwrap(), SW / 4 + 1, SW / 4 + 1); - // Data are retired and the sttream fc will send an update. + // Data are retired and the stream fc will send an update. assert!(!fc.borrow().frame_needed()); assert!(s.fc().unwrap().frame_needed()); diff --git a/neqo-transport/src/streams.rs b/neqo-transport/src/streams.rs index eccd96a9e3..5e6f54aeab 100644 --- a/neqo-transport/src/streams.rs +++ b/neqo-transport/src/streams.rs @@ -202,7 +202,7 @@ impl Streams { } Frame::StreamsBlocked { .. } => { stats.streams_blocked += 1; - // We send an update evry time we retire a stream. There is no need to + // We send an update every time we retire a stream. There is no need to // trigger flow updates here. } _ => unreachable!("This is not a stream Frame"), @@ -337,7 +337,7 @@ impl Streams { let (removed_bidi, removed_uni) = self.recv.clear_terminal(&self.send, self.role); // Send max_streams updates if we removed remote-initiated recv streams. - // The updates will be send if any steams has been removed. + // The updates will be send if any streams has been removed. self.remote_stream_limits[StreamType::BiDi].add_retired(removed_bidi); self.remote_stream_limits[StreamType::UniDi].add_retired(removed_uni); } diff --git a/neqo-transport/src/tparams.rs b/neqo-transport/src/tparams.rs index baf11ff7a4..723492c448 100644 --- a/neqo-transport/src/tparams.rs +++ b/neqo-transport/src/tparams.rs @@ -1055,9 +1055,9 @@ mod tests { for i in INTEGER_KEYS { let mut tps_b = tps_a.clone(); tps_b.remove(*i); - // A value that is missing from what is rememebered is OK. + // A value that is missing from what is remembered is OK. assert!(tps_a.ok_for_0rtt(&tps_b)); - // A value that is rememebered, but not current is not OK. + // A value that is remembered, but not current is not OK. assert!(!tps_b.ok_for_0rtt(&tps_a)); } } diff --git a/neqo-transport/src/tracking.rs b/neqo-transport/src/tracking.rs index 20baea811d..17ec6d6f9c 100644 --- a/neqo-transport/src/tracking.rs +++ b/neqo-transport/src/tracking.rs @@ -4,7 +4,7 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -// Tracking of received packets and generating acks thereof. +// Tracking of received packets and generating ACKs thereof. use std::{ cmp::min, @@ -158,7 +158,7 @@ impl PacketRange { } } - /// Get the number of acknowleged packets in the range. + /// Get the number of acknowledged packets in the range. pub const fn len(&self) -> u64 { self.largest - self.smallest + 1 } From 3e6526177e69ae9ef00fd3af465563d4f8a40e02 Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Tue, 10 Dec 2024 15:43:56 +0200 Subject: [PATCH 16/23] fix: `function pointer comparisons do not produce meaningful results` (#2275) New clippy warning with the original code: ``` warning: function pointer comparisons do not produce meaningful results since their addresses are not guaranteed to be unique --> neqo-transport/src/connection/tests/ecn.rs:359:21 | 359 | assert!(*new_path_mod == drop() || migrated); | ^^^^^^^^^^^^^^^^^^^^^^^ | = note: the address of the same function can vary between different codegen units = note: furthermore, different functions could have the same address after being merged together = note: for more information visit = note: `#[warn(unpredictable_function_pointer_comparisons)]` on by default help: refactor your code, or use `std::ptr::fn_addr_eq` to suppress the lint | 359 | assert!(std::ptr::fn_addr_eq(*new_path_mod, drop()) || migrated); | +++++++++++++++++++++ ~ + ``` --- neqo-transport/src/connection/tests/ecn.rs | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/neqo-transport/src/connection/tests/ecn.rs b/neqo-transport/src/connection/tests/ecn.rs index 0b947c4d38..c4ba07547b 100644 --- a/neqo-transport/src/connection/tests/ecn.rs +++ b/neqo-transport/src/connection/tests/ecn.rs @@ -352,15 +352,21 @@ pub fn migration_with_modifiers( #[test] fn ecn_migration_zero_burst_all_cases() { - for orig_path_mod in &[noop(), bleach(), remark(), ce()] { - for new_path_mod in &[noop(), bleach(), remark(), ce(), drop()] { + for orig_path_mod in [noop(), bleach(), remark(), ce()] { + for (new_path_mod_name, new_path_mod) in [ + ("noop", noop()), + ("bleach", bleach()), + ("remark", remark()), + ("ce", ce()), + ("drop", drop()), + ] { let (before, after, migrated) = - migration_with_modifiers(*orig_path_mod, *new_path_mod, 0); + migration_with_modifiers(orig_path_mod, new_path_mod, 0); // Too few packets sent before and after migration to conclude ECN validation. assert_ecn_enabled(before); assert_ecn_enabled(after); // Migration succeeds except if the new path drops ECN. - assert!(*new_path_mod == drop() || migrated); + assert!(new_path_mod_name == "drop" || migrated); } } } From a1b9364b3f261cc211a91e83c8369ce130475704 Mon Sep 17 00:00:00 2001 From: Sunil Mayya Date: Wed, 11 Dec 2024 05:06:21 +0100 Subject: [PATCH 17/23] Mark WebTransport streams keep alive (#1389) * Mark WebTransport streams keep alive * Implement suggestions from @martinthomson * Update neqo-http3/tests/webtransport.rs Co-authored-by: Max Inden Signed-off-by: Lars Eggert * fmt --------- Signed-off-by: Lars Eggert Co-authored-by: Lars Eggert Co-authored-by: Max Inden --- neqo-http3/src/connection.rs | 4 ++-- neqo-http3/tests/webtransport.rs | 13 ++++++++++++- 2 files changed, 14 insertions(+), 3 deletions(-) diff --git a/neqo-http3/src/connection.rs b/neqo-http3/src/connection.rs index f3ec804475..a59b6908e7 100644 --- a/neqo-http3/src/connection.rs +++ b/neqo-http3/src/connection.rs @@ -282,7 +282,7 @@ handle parsing and sending of HTTP part of the control stream. When HTTP headers `WebTransportSessionListener` as the `RecvMessage` event listener. `WebTransportSendStream` and `WebTransportRecvStream` are associated with a `WebTransportSession` -and they will be canceled if the session is closed. To be avle to do this `WebTransportSession` +and they will be canceled if the session is closed. To be able to do this `WebTransportSession` holds a list of its active streams and clean up is done in `remove_extended_connect`. ### `WebTransportSendStream` and `WebTransportRecvStream` @@ -1177,6 +1177,7 @@ impl Http3Connection { } let send_stream = self.send_streams.get_mut(&stream_id); + conn.stream_keep_alive(stream_id, true)?; match (send_stream, recv_stream, accept_res) { (None, None, _) => Err(Error::InvalidStreamId), @@ -1336,7 +1337,6 @@ impl Http3Connection { recv_events: Box, local: bool, ) { - // TODO conn.stream_keep_alive(stream_id, true)?; webtransport_session.borrow_mut().add_stream(stream_id); if stream_id.stream_type() == StreamType::UniDi { if local { diff --git a/neqo-http3/tests/webtransport.rs b/neqo-http3/tests/webtransport.rs index 9417bb3a4f..1d29b702bc 100644 --- a/neqo-http3/tests/webtransport.rs +++ b/neqo-http3/tests/webtransport.rs @@ -13,7 +13,7 @@ use neqo_http3::{ Http3ServerEvent, Http3State, WebTransportEvent, WebTransportRequest, WebTransportServerEvent, WebTransportSessionAcceptAction, }; -use neqo_transport::{StreamId, StreamType}; +use neqo_transport::{ConnectionParameters, StreamId, StreamType}; use test_fixture::{ anti_replay, fixture_init, now, CountingConnectionIdGenerator, DEFAULT_ADDR, DEFAULT_ALPN_H3, DEFAULT_KEYS, DEFAULT_SERVER_NAME, @@ -225,6 +225,17 @@ fn receive_data_server( wt_stream.unwrap() } +#[test] +fn wt_keepalive() { + let (mut client, mut server) = connect(); + let _wt_session = create_wt_session(&mut client, &mut server); + let idle_timeout = ConnectionParameters::default().get_idle_timeout(); + // Expect client and server to send PING after half of the idle timeout in order to keep + // connection alive. + assert_eq!(client.process_output(now()).callback(), idle_timeout / 2); + assert_eq!(server.process_output(now()).callback(), idle_timeout / 2); +} + #[test] fn wt_client_stream_uni() { const BUF_CLIENT: &[u8] = &[0; 10]; From d93f40cd6ba20b4d91b870452d91fa9af43cfecf Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Wed, 11 Dec 2024 16:42:02 +0200 Subject: [PATCH 18/23] ci: Enable MC/DC coverage en.wikipedia.org/wiki/Code_coverage#Modified_condition/decision_coverage MC/DC is finding some partially covered lines, so coverage drops. This is expected. --- .github/workflows/check.yml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/check.yml b/.github/workflows/check.yml index 36f21a97eb..41c74c581e 100644 --- a/.github/workflows/check.yml +++ b/.github/workflows/check.yml @@ -66,7 +66,7 @@ jobs: with: version: ${{ matrix.rust-toolchain }} components: ${{ matrix.rust-toolchain == 'stable' && 'llvm-tools-preview' || '' }} - tools: ${{ matrix.rust-toolchain == 'stable' && 'cargo-llvm-cov, ' || '' }} cargo-nextest + tools: ${{ matrix.rust-toolchain == 'nightly' && 'cargo-llvm-cov, ' || '' }} cargo-nextest token: ${{ secrets.GITHUB_TOKEN }} - id: nss-version @@ -88,8 +88,8 @@ jobs: DUMP_SIMULATION_SEEDS="$(pwd)/simulation-seeds" export DUMP_SIMULATION_SEEDS # shellcheck disable=SC2086 - if [ "${{ matrix.rust-toolchain }}" == "stable" ]; then - cargo +${{ matrix.rust-toolchain }} llvm-cov nextest $BUILD_TYPE --features ci --profile ci --lcov --output-path lcov.info + if [ "${{ matrix.rust-toolchain }}" == "nightly" ]; then + cargo +${{ matrix.rust-toolchain }} llvm-cov nextest $BUILD_TYPE --mcdc --features ci --profile ci --lcov --output-path lcov.info else cargo +${{ matrix.rust-toolchain }} nextest run $BUILD_TYPE --features ci --profile ci fi @@ -119,7 +119,7 @@ jobs: verbose: true env: CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }} - if: matrix.type == 'debug' && matrix.rust-toolchain == 'stable' + if: matrix.type == 'debug' && matrix.rust-toolchain == 'nightly' - name: Save simulation seeds artifact if: always() From cf3830d281a2bccd1dd6f7fcc72f3e50cb12c09e Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Wed, 11 Dec 2024 16:50:19 +0200 Subject: [PATCH 19/23] One more nightly --- .github/workflows/check.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/check.yml b/.github/workflows/check.yml index 41c74c581e..ae1c45242b 100644 --- a/.github/workflows/check.yml +++ b/.github/workflows/check.yml @@ -65,7 +65,7 @@ jobs: - uses: ./.github/actions/rust with: version: ${{ matrix.rust-toolchain }} - components: ${{ matrix.rust-toolchain == 'stable' && 'llvm-tools-preview' || '' }} + components: ${{ matrix.rust-toolchain == 'nightly' && 'llvm-tools' || '' }} tools: ${{ matrix.rust-toolchain == 'nightly' && 'cargo-llvm-cov, ' || '' }} cargo-nextest token: ${{ secrets.GITHUB_TOKEN }} From fd42bed9206a0cd980be8d30b7c1ac2797acffa0 Mon Sep 17 00:00:00 2001 From: Max Inden Date: Thu, 12 Dec 2024 08:05:01 +0100 Subject: [PATCH 20/23] deps(Cargo.lock): update to quinn-udp v0.5.8 (#2278) Upstream release: https://github.com/quinn-rs/quinn/releases/tag/quinn-udp-0.5.8 Downstream patch updating mozilla-central: https://bugzilla.mozilla.org/show_bug.cgi?id=1935954 --- Cargo.lock | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index e2693fda85..89ea352a91 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -983,9 +983,9 @@ dependencies = [ [[package]] name = "quinn-udp" -version = "0.5.6" +version = "0.5.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e346e016eacfff12233c243718197ca12f148c84e1e84268a896699b41c71780" +checksum = "52cd4b1eff68bf27940dd39811292c49e007f4d0b4c357358dc9b0197be6b527" dependencies = [ "cfg_aliases", "libc", From 6772ae5ee38199a9b7ee90cfe878df434a5f32c2 Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Thu, 12 Dec 2024 16:17:04 +0200 Subject: [PATCH 21/23] fix: Send Retry and VN packets with default TOS (#2266) * fix: Send Retry and VN packets with default TOS Rather than using the TOS value from the incoming packet, which might have gotten ECN marked on the way in. Also create a debug log entry for those packets that resembles that created by `dump_packet`. * Update neqo-transport/src/server.rs Co-authored-by: Martin Thomson Signed-off-by: Lars Eggert * Update neqo-transport/src/server.rs Co-authored-by: Martin Thomson Signed-off-by: Lars Eggert * Fix logging. Add test for correct Retry/VN TOS. * Update test-fixture/src/assertions.rs Co-authored-by: Martin Thomson Signed-off-by: Lars Eggert * Update test-fixture/src/assertions.rs Co-authored-by: Martin Thomson Signed-off-by: Lars Eggert * fmt --------- Signed-off-by: Lars Eggert Co-authored-by: Martin Thomson --- neqo-transport/src/server.rs | 27 ++++++++++++++++++++++++--- neqo-transport/tests/retry.rs | 8 +++----- test-fixture/src/assertions.rs | 12 +++++++----- 3 files changed, 34 insertions(+), 13 deletions(-) diff --git a/neqo-transport/src/server.rs b/neqo-transport/src/server.rs index 7a4472ae1a..286f74283c 100644 --- a/neqo-transport/src/server.rs +++ b/neqo-transport/src/server.rs @@ -17,7 +17,8 @@ use std::{ }; use neqo_common::{ - event::Provider, hex, qdebug, qerror, qinfo, qlog::NeqoQlog, qtrace, qwarn, Datagram, Role, + event::Provider, hex, qdebug, qerror, qinfo, qlog::NeqoQlog, qtrace, qwarn, Datagram, IpTos, + Role, }; use neqo_crypto::{ encode_ech_config, AntiReplay, Cipher, PrivateKey, PublicKey, ZeroRttCheckResult, @@ -235,10 +236,20 @@ impl Server { Output::None }, |p| { + qdebug!( + [self], + "type={:?} path:{} {}->{} {:?} len {}", + PacketType::Retry, + initial.dst_cid, + dgram.destination(), + dgram.source(), + IpTos::default(), + p.len(), + ); Output::Datagram(Datagram::new( dgram.destination(), dgram.source(), - dgram.tos(), + IpTos::default(), p, )) }, @@ -386,6 +397,16 @@ impl Server { packet.wire_version(), self.conn_params.get_versions().all(), ); + qdebug!( + [self], + "type={:?} path:{} {}->{} {:?} len {}", + PacketType::VersionNegotiation, + packet.dcid(), + dgram.destination(), + dgram.source(), + IpTos::default(), + vn.len(), + ); crate::qlog::server_version_information_failed( &self.create_qlog_trace(packet.dcid()), @@ -397,7 +418,7 @@ impl Server { return Output::Datagram(Datagram::new( dgram.destination(), dgram.source(), - dgram.tos(), + IpTos::default(), vn, )); } diff --git a/neqo-transport/tests/retry.rs b/neqo-transport/tests/retry.rs index 36325be2e1..0dc083859b 100644 --- a/neqo-transport/tests/retry.rs +++ b/neqo-transport/tests/retry.rs @@ -37,12 +37,10 @@ fn retry_basic() { let dgram = client.process_output(now()).dgram(); // Initial assert!(dgram.is_some()); - let dgram = server.process(dgram, now()).dgram(); // Retry - assert!(dgram.is_some()); + let dgram = server.process(dgram, now()).dgram().unwrap(); // Retry + assertions::assert_retry(&dgram); - assertions::assert_retry(dgram.as_ref().unwrap()); - - let dgram = client.process(dgram, now()).dgram(); // Initial w/token + let dgram = client.process(Some(dgram), now()).dgram(); // Initial w/token assert!(dgram.is_some()); let dgram = server.process(dgram, now()).dgram(); // Initial, HS assert!(dgram.is_some()); diff --git a/test-fixture/src/assertions.rs b/test-fixture/src/assertions.rs index 4cb3732258..e70748fa36 100644 --- a/test-fixture/src/assertions.rs +++ b/test-fixture/src/assertions.rs @@ -6,7 +6,7 @@ use std::net::SocketAddr; -use neqo_common::{Datagram, Decoder}; +use neqo_common::{Datagram, Decoder, IpTos}; use neqo_transport::{version::WireVersion, Pmtud, Version, MIN_INITIAL_PACKET_SIZE}; use crate::{DEFAULT_ADDR, DEFAULT_ADDR_V4}; @@ -50,8 +50,9 @@ pub fn assert_version(payload: &[u8], v: u32) { /// # Panics /// /// If this is clearly not a Version Negotiation packet. -pub fn assert_vn(payload: &[u8]) { - let mut dec = Decoder::from(payload); +pub fn assert_vn(dgram: &Datagram) { + assert_eq!(dgram.tos(), IpTos::default()); // VN always uses default IP TOS. + let mut dec = Decoder::from(dgram.as_ref()); assert_eq!( dec.decode_uint::().unwrap() & 0x80, 0x80, @@ -86,8 +87,9 @@ pub fn assert_coalesced_0rtt(payload: &[u8]) { /// # Panics /// /// If the tests fail. -pub fn assert_retry(payload: &[u8]) { - let mut dec = Decoder::from(payload); +pub fn assert_retry(dgram: &Datagram) { + assert_eq!(dgram.tos(), IpTos::default()); // Retry always uses default IP TOS. + let mut dec = Decoder::from(dgram.as_ref()); let t = dec.decode_uint::().unwrap(); let version = assert_default_version(&mut dec); assert_long_packet_type(t, 0b1011_0000, version); From b4d0eb7147f7dced27f5188b6e97f693aea6dff9 Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Thu, 12 Dec 2024 16:18:21 +0200 Subject: [PATCH 22/23] Try Codecov JSON format and include FFI (#2282) --- .github/workflows/check.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/check.yml b/.github/workflows/check.yml index ae1c45242b..2e6cf3b458 100644 --- a/.github/workflows/check.yml +++ b/.github/workflows/check.yml @@ -89,7 +89,7 @@ jobs: export DUMP_SIMULATION_SEEDS # shellcheck disable=SC2086 if [ "${{ matrix.rust-toolchain }}" == "nightly" ]; then - cargo +${{ matrix.rust-toolchain }} llvm-cov nextest $BUILD_TYPE --mcdc --features ci --profile ci --lcov --output-path lcov.info + cargo +${{ matrix.rust-toolchain }} llvm-cov nextest $BUILD_TYPE --mcdc --include-ffi --features ci --profile ci --codecov --output-path codecov.json else cargo +${{ matrix.rust-toolchain }} nextest run $BUILD_TYPE --features ci --profile ci fi @@ -113,7 +113,7 @@ jobs: - uses: codecov/codecov-action@7f8b4b4bde536c465e797be725718b88c5d95e0e # v5.1.1 with: - files: lcov.info + files: codecov.json fail_ci_if_error: false token: ${{ secrets.CODECOV_TOKEN }} verbose: true From 3001a3a56f2274eaafaa956fb394f0817f526ae7 Mon Sep 17 00:00:00 2001 From: Oskar Mansfeld <113997378+mansf-osk@users.noreply.github.com> Date: Thu, 12 Dec 2024 15:24:17 +0100 Subject: [PATCH 23/23] chore(qlog): Add meaningful qlog titles and descriptions. (#2281) * chore(qlog): Add meaningful qlog titles and descriptions. * chore(qlog) Add client prefix to client qlog filename. * chore(qlog): Change EXPECTED_LOG_HEADER so tests pass. --- neqo-bin/src/client/mod.rs | 6 +++--- neqo-common/src/qlog.rs | 2 +- neqo-transport/src/server.rs | 2 +- test-fixture/src/lib.rs | 2 +- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/neqo-bin/src/client/mod.rs b/neqo-bin/src/client/mod.rs index 35463cad31..00509951af 100644 --- a/neqo-bin/src/client/mod.rs +++ b/neqo-bin/src/client/mod.rs @@ -508,9 +508,9 @@ fn qlog_new(args: &Args, hostname: &str, cid: &ConnectionId) -> Res { NeqoQlog::enabled_with_file( qlog_dir, Role::Client, - Some("Example qlog".to_string()), - Some("Example qlog description".to_string()), - format!("{hostname}-{cid}"), + Some("Neqo client qlog".to_string()), + Some("Neqo client qlog".to_string()), + format!("client-{hostname}-{cid}"), ) .map_err(Error::QlogError) } diff --git a/neqo-common/src/qlog.rs b/neqo-common/src/qlog.rs index 34f1e1c2ca..5f7b5dca3d 100644 --- a/neqo-common/src/qlog.rs +++ b/neqo-common/src/qlog.rs @@ -186,7 +186,7 @@ pub fn new_trace(role: Role) -> qlog::TraceSeq { flow: None, }, title: Some(format!("neqo-{role} trace")), - description: Some("Example qlog trace description".to_string()), + description: Some(format!("neqo-{role} trace")), configuration: Some(Configuration { time_offset: Some(0.0), original_uris: None, diff --git a/neqo-transport/src/server.rs b/neqo-transport/src/server.rs index 286f74283c..154ad17ed3 100644 --- a/neqo-transport/src/server.rs +++ b/neqo-transport/src/server.rs @@ -271,7 +271,7 @@ impl Server { Role::Server, Some("Neqo server qlog".to_string()), Some("Neqo server qlog".to_string()), - odcid, + format!("server-{odcid}"), ) .unwrap_or_else(|e| { qerror!("failed to create NeqoQlog: {}", e); diff --git a/test-fixture/src/lib.rs b/test-fixture/src/lib.rs index f2a86f6793..e58e32f37a 100644 --- a/test-fixture/src/lib.rs +++ b/test-fixture/src/lib.rs @@ -418,6 +418,6 @@ pub fn new_neqo_qlog() -> (NeqoQlog, SharedVec) { pub const EXPECTED_LOG_HEADER: &str = concat!( "\u{1e}", - r#"{"qlog_version":"0.3","qlog_format":"JSON-SEQ","trace":{"vantage_point":{"name":"neqo-Client","type":"client"},"title":"neqo-Client trace","description":"Example qlog trace description","configuration":{"time_offset":0.0},"common_fields":{"reference_time":0.0,"time_format":"relative"}}}"#, + r#"{"qlog_version":"0.3","qlog_format":"JSON-SEQ","trace":{"vantage_point":{"name":"neqo-Client","type":"client"},"title":"neqo-Client trace","description":"neqo-Client trace","configuration":{"time_offset":0.0},"common_fields":{"reference_time":0.0,"time_format":"relative"}}}"#, "\n" );