From 57dd4ec34e817f3edeb243ee6ea4934982f4147a Mon Sep 17 00:00:00 2001 From: Max Inden Date: Tue, 26 Mar 2024 16:57:32 +0100 Subject: [PATCH 01/20] refactor(github/interop): use grep instead of awk (#1747) * refactor(github/interop): use grep instead of awk * Simplify regex --- .github/actions/quic-interop-runner/action.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/actions/quic-interop-runner/action.yml b/.github/actions/quic-interop-runner/action.yml index ef4865bde6..30c7f0d8d6 100644 --- a/.github/actions/quic-interop-runner/action.yml +++ b/.github/actions/quic-interop-runner/action.yml @@ -92,8 +92,8 @@ runs: run: | echo '[**QUIC Interop Runner**](https://github.com/quic-interop/quic-interop-runner)' >> comment echo '' >> comment - # Ignore all, but table, which starts with "|:--". - cat quic-interop-runner/summary | awk '/^\|:--/{flag=1} flag' >> comment + # Ignore all, but table, which starts with "|". + grep -E '^\|' quic-interop-runner/summary >> comment echo '' >> comment shell: bash From 1af91f506120a3d68d6c938ddc42ea3a73218507 Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Wed, 27 Mar 2024 02:37:19 +0200 Subject: [PATCH 02/20] test: Let packets be modified with a closure during tests (#1773) * test: Let packets be modified with a closure during tests Broken out of #1678 to reduce the size of that PR. * Support dropping packets via `DatagramModifier` --- neqo-transport/src/connection/tests/mod.rs | 83 +++++++++++++++++++--- 1 file changed, 73 insertions(+), 10 deletions(-) diff --git a/neqo-transport/src/connection/tests/mod.rs b/neqo-transport/src/connection/tests/mod.rs index b6ce08f8d1..6f598fb23e 100644 --- a/neqo-transport/src/connection/tests/mod.rs +++ b/neqo-transport/src/connection/tests/mod.rs @@ -170,12 +170,17 @@ impl crate::connection::test_internal::FrameWriter for PingWriter { } } +trait DatagramModifier: FnMut(Datagram) -> Option {} + +impl DatagramModifier for T where T: FnMut(Datagram) -> Option {} + /// Drive the handshake between the client and server. -fn handshake( +fn handshake_with_modifier( client: &mut Connection, server: &mut Connection, now: Instant, rtt: Duration, + mut modifier: impl DatagramModifier, ) -> Instant { let mut a = client; let mut b = server; @@ -212,7 +217,11 @@ fn handshake( did_ping[a.role()] = true; } assert!(had_input || output.is_some()); - input = output; + if let Some(d) = output { + input = modifier(d); + } else { + input = output; + } qtrace!("handshake: t += {:?}", rtt / 2); now += rtt / 2; mem::swap(&mut a, &mut b); @@ -223,6 +232,15 @@ fn handshake( now } +fn handshake( + client: &mut Connection, + server: &mut Connection, + now: Instant, + rtt: Duration, +) -> Instant { + handshake_with_modifier(client, server, now, rtt, Some) +} + fn connect_fail( client: &mut Connection, server: &mut Connection, @@ -234,11 +252,12 @@ fn connect_fail( assert_error(server, &ConnectionError::Transport(server_error)); } -fn connect_with_rtt( +fn connect_with_rtt_and_modifier( client: &mut Connection, server: &mut Connection, now: Instant, rtt: Duration, + modifier: impl DatagramModifier, ) -> Instant { fn check_rtt(stats: &Stats, rtt: Duration) { assert_eq!(stats.rtt, rtt); @@ -246,7 +265,7 @@ fn connect_with_rtt( let n = stats.frame_rx.ack + usize::from(stats.rtt_init_guess); assert_eq!(stats.rttvar, rttvar_after_n_updates(n, rtt)); } - let now = handshake(client, server, now, rtt); + let now = handshake_with_modifier(client, server, now, rtt, modifier); assert_eq!(*client.state(), State::Confirmed); assert_eq!(*server.state(), State::Confirmed); @@ -255,6 +274,15 @@ fn connect_with_rtt( now } +fn connect_with_rtt( + client: &mut Connection, + server: &mut Connection, + now: Instant, + rtt: Duration, +) -> Instant { + connect_with_rtt_and_modifier(client, server, now, rtt, Some) +} + fn connect(client: &mut Connection, server: &mut Connection) { connect_with_rtt(client, server, now(), Duration::new(0, 0)); } @@ -301,8 +329,13 @@ fn assert_idle(client: &mut Connection, server: &mut Connection, rtt: Duration, } /// Connect with an RTT and then force both peers to be idle. -fn connect_rtt_idle(client: &mut Connection, server: &mut Connection, rtt: Duration) -> Instant { - let now = connect_with_rtt(client, server, now(), rtt); +fn connect_rtt_idle_with_modifier( + client: &mut Connection, + server: &mut Connection, + rtt: Duration, + modifier: impl DatagramModifier, +) -> Instant { + let now = connect_with_rtt_and_modifier(client, server, now(), rtt, modifier); assert_idle(client, server, rtt, now); // Drain events from both as well. _ = client.events().count(); @@ -311,8 +344,20 @@ fn connect_rtt_idle(client: &mut Connection, server: &mut Connection, rtt: Durat now } +fn connect_rtt_idle(client: &mut Connection, server: &mut Connection, rtt: Duration) -> Instant { + connect_rtt_idle_with_modifier(client, server, rtt, Some) +} + +fn connect_force_idle_with_modifier( + client: &mut Connection, + server: &mut Connection, + modifier: impl DatagramModifier, +) { + connect_rtt_idle_with_modifier(client, server, Duration::new(0, 0), modifier); +} + fn connect_force_idle(client: &mut Connection, server: &mut Connection) { - connect_rtt_idle(client, server, Duration::new(0, 0)); + connect_force_idle_with_modifier(client, server, Some); } fn fill_stream(c: &mut Connection, stream: StreamId) { @@ -524,12 +569,14 @@ fn assert_full_cwnd(packets: &[Datagram], cwnd: usize) { } /// Send something on a stream from `sender` to `receiver`, maybe allowing for pacing. +/// Takes a modifier function that can be used to modify the datagram before it is sent. /// Return the resulting datagram and the new time. #[must_use] -fn send_something_paced( +fn send_something_paced_with_modifier( sender: &mut Connection, mut now: Instant, allow_pacing: bool, + mut modifier: impl DatagramModifier, ) -> (Datagram, Instant) { let stream_id = sender.stream_create(StreamType::UniDi).unwrap(); assert!(sender.stream_send(stream_id, DEFAULT_STREAM_DATA).is_ok()); @@ -544,16 +591,32 @@ fn send_something_paced( .dgram() .expect("send_something: should have something to send") } - Output::Datagram(d) => d, + Output::Datagram(d) => modifier(d).unwrap(), Output::None => panic!("send_something: got Output::None"), }; (dgram, now) } +fn send_something_paced( + sender: &mut Connection, + now: Instant, + allow_pacing: bool, +) -> (Datagram, Instant) { + send_something_paced_with_modifier(sender, now, allow_pacing, Some) +} + +fn send_something_with_modifier( + sender: &mut Connection, + now: Instant, + modifier: impl DatagramModifier, +) -> Datagram { + send_something_paced_with_modifier(sender, now, false, modifier).0 +} + /// Send something on a stream from `sender` to `receiver`. /// Return the resulting datagram. fn send_something(sender: &mut Connection, now: Instant) -> Datagram { - send_something_paced(sender, now, false).0 + send_something_with_modifier(sender, now, Some) } /// Send something on a stream from `sender` to `receiver`. From 37be89473d72a1504894be36d387cda339c342af Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Wed, 27 Mar 2024 09:25:00 +0200 Subject: [PATCH 03/20] feat: TOS improvements (#1774) * feat: TOS improvements Broken out of #1678 to reduce the size of that PR. * Remove function --- neqo-common/src/datagram.rs | 7 ++-- neqo-common/src/tos.rs | 48 ++++++++++++++++++++------- neqo-transport/src/connection/dump.rs | 13 ++++++-- neqo-transport/src/connection/mod.rs | 4 ++- 4 files changed, 55 insertions(+), 17 deletions(-) diff --git a/neqo-common/src/datagram.rs b/neqo-common/src/datagram.rs index 9cebb64ea5..cc2cb7d113 100644 --- a/neqo-common/src/datagram.rs +++ b/neqo-common/src/datagram.rs @@ -53,6 +53,10 @@ impl Datagram { pub fn ttl(&self) -> Option { self.ttl } + + pub fn set_tos(&mut self, tos: IpTos) { + self.tos = tos; + } } impl Deref for Datagram { @@ -90,8 +94,7 @@ use test_fixture::datagram; fn fmt_datagram() { let d = datagram([0; 1].to_vec()); assert_eq!( - format!("{d:?}"), + &format!("{d:?}"), "Datagram IpTos(Cs0, NotEct) TTL Some(128) [fe80::1]:443->[fe80::1]:443: [1]: 00" - .to_string() ); } diff --git a/neqo-common/src/tos.rs b/neqo-common/src/tos.rs index 3610f72750..533c5447e2 100644 --- a/neqo-common/src/tos.rs +++ b/neqo-common/src/tos.rs @@ -36,7 +36,7 @@ impl From for u8 { impl From for IpTosEcn { fn from(v: u8) -> Self { - match v & 0b11 { + match v & 0b0000_0011 { 0b00 => IpTosEcn::NotEct, 0b01 => IpTosEcn::Ect1, 0b10 => IpTosEcn::Ect0, @@ -47,8 +47,8 @@ impl From for IpTosEcn { } impl From for IpTosEcn { - fn from(value: IpTos) -> Self { - IpTosEcn::from(value.0 & 0x3) + fn from(v: IpTos) -> Self { + IpTosEcn::from(u8::from(v)) } } @@ -166,14 +166,13 @@ impl From for IpTosDscp { } impl From for IpTosDscp { - fn from(value: IpTos) -> Self { - IpTosDscp::from(value.0 & 0xfc) + fn from(v: IpTos) -> Self { + IpTosDscp::from(u8::from(v)) } } /// The type-of-service field in an IP packet. -#[allow(clippy::module_name_repetitions)] -#[derive(Copy, Clone, PartialEq, Eq)] +#[derive(Copy, Clone, PartialEq, Eq, Default)] pub struct IpTos(u8); impl From for IpTos { @@ -215,15 +214,19 @@ impl From for IpTos { impl Debug for IpTos { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_tuple("IpTos") - .field(&IpTosDscp::from(self.0 & 0xfc)) - .field(&IpTosEcn::from(self.0 & 0x3)) + .field(&IpTosDscp::from(*self)) + .field(&IpTosEcn::from(*self)) .finish() } } -impl Default for IpTos { - fn default() -> Self { - (IpTosDscp::default(), IpTosEcn::default()).into() +impl IpTos { + pub fn set_ecn(&mut self, ecn: IpTosEcn) { + self.0 = u8::from(IpTosDscp::from(*self)) | u8::from(ecn); + } + + pub fn set_dscp(&mut self, dscp: IpTosDscp) { + self.0 = u8::from(IpTosEcn::from(*self)) | u8::from(dscp); } } @@ -322,4 +325,25 @@ mod tests { assert_eq!(tos, u8::from(iptos)); assert_eq!(IpTos::from(tos), iptos); } + + #[test] + fn iptos_to_iptosdscp() { + let tos = IpTos::from((IpTosDscp::Af41, IpTosEcn::NotEct)); + let dscp = IpTosDscp::from(tos); + assert_eq!(dscp, IpTosDscp::Af41); + } + + #[test] + fn tos_modify_ecn() { + let mut iptos: IpTos = (IpTosDscp::Af41, IpTosEcn::NotEct).into(); + iptos.set_ecn(IpTosEcn::Ce); + assert_eq!(u8::from(iptos), 0b1000_1011); + } + + #[test] + fn tos_modify_dscp() { + let mut iptos: IpTos = (IpTosDscp::Af41, IpTosEcn::Ect1).into(); + iptos.set_dscp(IpTosDscp::Le); + assert_eq!(u8::from(iptos), 0b0000_0101); + } } diff --git a/neqo-transport/src/connection/dump.rs b/neqo-transport/src/connection/dump.rs index 34ac58f55e..12d337c570 100644 --- a/neqo-transport/src/connection/dump.rs +++ b/neqo-transport/src/connection/dump.rs @@ -9,7 +9,7 @@ use std::fmt::Write; -use neqo_common::{qdebug, Decoder}; +use neqo_common::{qdebug, Decoder, IpTos}; use crate::{ connection::Connection, @@ -26,6 +26,7 @@ pub fn dump_packet( pt: PacketType, pn: PacketNumber, payload: &[u8], + tos: IpTos, ) { if log::STATIC_MAX_LEVEL == log::LevelFilter::Off || !log::log_enabled!(log::Level::Debug) { return; @@ -43,5 +44,13 @@ pub fn dump_packet( write!(&mut s, "\n {} {}", dir, &x).unwrap(); } } - qdebug!([conn], "pn={} type={:?} {}{}", pn, pt, path.borrow(), s); + qdebug!( + [conn], + "pn={} type={:?} {} {:?}{}", + pn, + pt, + path.borrow(), + tos, + s + ); } diff --git a/neqo-transport/src/connection/mod.rs b/neqo-transport/src/connection/mod.rs index 75c3490cba..535d3f4084 100644 --- a/neqo-transport/src/connection/mod.rs +++ b/neqo-transport/src/connection/mod.rs @@ -19,7 +19,7 @@ use std::{ use neqo_common::{ event::Provider as EventProvider, hex, hex_snip_middle, hrtime, qdebug, qerror, qinfo, - qlog::NeqoQlog, qtrace, qwarn, Datagram, Decoder, Encoder, Role, + qlog::NeqoQlog, qtrace, qwarn, Datagram, Decoder, Encoder, IpTos, Role, }; use neqo_crypto::{ agent::CertificateInfo, Agent, AntiReplay, AuthenticationStatus, Cipher, Client, Group, @@ -1492,6 +1492,7 @@ impl Connection { payload.packet_type(), payload.pn(), &payload[..], + d.tos(), ); qlog::packet_received(&mut self.qlog, &packet, &payload); @@ -2255,6 +2256,7 @@ impl Connection { pt, pn, &builder.as_ref()[payload_start..], + IpTos::default(), // TODO: set from path ); qlog::packet_sent( &mut self.qlog, From 6a51a35dd63aa9833f5a4bcf31900acb7bbf64d6 Mon Sep 17 00:00:00 2001 From: Max Inden Date: Wed, 27 Mar 2024 08:39:30 +0100 Subject: [PATCH 04/20] perf(bin/server): increase msg size and don't allocate msg per resp (#1772) * perf(bin/server): increase msg size and don't allocate msg per resp Previously `neqo-server` would respond to a request by repeatedly sending a static 440 byte message (Major-General's Song). Instead of sending 440 bytes, increase the batch size to 4096 bytes. This also matches the `neqo-client` receive buffer size. https://github.com/mozilla/neqo/blob/76630a5ebb6c6b94de6a40cf3f439b9a846f6ab7/neqo-bin/src/bin/client/http3.rs#L165 Previously `ResponseData::repeat` would convert the provided `buf: &[u8]` to ` Vec`, i.e. re-allocate the buf. Instead keep a reference to the original buf, thus removing the allocation. * Remove unnecessary into() --- neqo-bin/src/bin/server/main.rs | 18 ++++++------------ 1 file changed, 6 insertions(+), 12 deletions(-) diff --git a/neqo-bin/src/bin/server/main.rs b/neqo-bin/src/bin/server/main.rs index 753794d6f6..62eb19e78c 100644 --- a/neqo-bin/src/bin/server/main.rs +++ b/neqo-bin/src/bin/server/main.rs @@ -5,6 +5,7 @@ // except according to those terms. use std::{ + borrow::Cow, cell::RefCell, cmp::min, collections::HashMap, @@ -196,7 +197,7 @@ trait HttpServer: Display { } struct ResponseData { - data: Vec, + data: Cow<'static, [u8]>, offset: usize, remaining: usize, } @@ -211,7 +212,7 @@ impl From> for ResponseData { fn from(data: Vec) -> Self { let remaining = data.len(); Self { - data, + data: Cow::Owned(data), offset: 0, remaining, } @@ -219,9 +220,9 @@ impl From> for ResponseData { } impl ResponseData { - fn repeat(buf: &[u8], total: usize) -> Self { + fn repeat(buf: &'static [u8], total: usize) -> Self { Self { - data: buf.to_owned(), + data: Cow::Borrowed(buf), offset: 0, remaining: total, } @@ -260,14 +261,7 @@ struct SimpleServer { } impl SimpleServer { - const MESSAGE: &'static [u8] = b"I am the very model of a modern Major-General,\n\ - I've information vegetable, animal, and mineral,\n\ - I know the kings of England, and I quote the fights historical\n\ - From Marathon to Waterloo, in order categorical;\n\ - I'm very well acquainted, too, with matters mathematical,\n\ - I understand equations, both the simple and quadratical,\n\ - About binomial theorem, I'm teeming with a lot o' news,\n\ - With many cheerful facts about the square of the hypotenuse.\n"; + const MESSAGE: &'static [u8] = &[0; 4096]; pub fn new( args: &Args, From 47dfb3baa50ef89751cfc6818d6e2ee79616a66f Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Wed, 27 Mar 2024 09:52:23 +0200 Subject: [PATCH 05/20] build: Use system NSS when possible (#1739) * build: Use system-installed NSS instead of building our own Fixes #1711 * Update CI * Fix docs * Fix Dockerfile * Fix * build-essential * Try and search for nss * Try to get newest versions * More fixes * Restore Windows link.exe fix * Install pkg-config * Remove MSYS2 linker * Retain ability to build NSS from source * Update Linux instructions * Try and find MSYS2 library path * Retry * Again * Again * Again * Again * Again * Again * Again * Again * Again * Again * Revert many things, keep building NSS from source unless system version is OK * Fixes * Fixes * debug * Debug * Fixes * Compare versions with the `semver` crate * Use NSS version from code in CI * File has other name * Update .github/actions/nss/action.yml Co-authored-by: Martin Thomson Signed-off-by: Lars Eggert * Update neqo-crypto/build.rs Co-authored-by: Martin Thomson Signed-off-by: Lars Eggert * Update neqo-crypto/build.rs Co-authored-by: Martin Thomson Signed-off-by: Lars Eggert * Address code review comments. Not ready yet. Need to determine what to do in `nss_dir()`. See comments. * Update neqo-crypto/build.rs Co-authored-by: Martin Thomson Signed-off-by: Lars Eggert * Address code review * Updates to README * Remove `nss_dir()` --------- Signed-off-by: Lars Eggert Co-authored-by: Martin Thomson --- .github/actions/nss/action.yml | 33 +++++++++ .github/workflows/check.yml | 22 ++---- README.md | 114 ++++++++++++++++++----------- neqo-crypto/Cargo.toml | 1 + neqo-crypto/bindings/bindings.toml | 5 -- neqo-crypto/bindings/mozpkix.hpp | 1 - neqo-crypto/build.rs | 105 +++++++++++++++----------- neqo-crypto/min_version.txt | 1 + neqo-crypto/src/err.rs | 30 +++++++- neqo-crypto/src/lib.rs | 3 +- neqo-crypto/src/min_version.rs | 9 +++ 11 files changed, 214 insertions(+), 110 deletions(-) delete mode 100644 neqo-crypto/bindings/mozpkix.hpp create mode 100644 neqo-crypto/min_version.txt create mode 100644 neqo-crypto/src/min_version.rs diff --git a/.github/actions/nss/action.yml b/.github/actions/nss/action.yml index 23232ebc13..ec6f13eaf8 100644 --- a/.github/actions/nss/action.yml +++ b/.github/actions/nss/action.yml @@ -16,16 +16,46 @@ inputs: runs: using: composite steps: + - name: Check system NSS version + shell: bash + run: | + if ! command -v pkg-config &> /dev/null; then + echo "BUILD_NSS=1" >> "$GITHUB_ENV" + exit 0 + fi + if ! pkg-config --exists nss; then + echo "BUILD_NSS=1" >> "$GITHUB_ENV" + exit 0 + fi + NSS_VERSION="$(pkg-config --modversion nss)" + if [ "$?" -ne 0 ]; then + echo "BUILD_NSS=1" >> "$GITHUB_ENV" + exit 0 + fi + NSS_MAJOR=$(echo "$NSS_VERSION" | cut -d. -f1) + NSS_MINOR=$(echo "$NSS_VERSION" | cut -d. -f2) + REQ_NSS_MAJOR=$(cat neqo-crypto/min_version.txt | cut -d. -f1) + REQ_NSS_MINOR=$(cat neqo-crypto/min_version.txt | cut -d. -f2) + if [ "$NSS_MAJOR" -lt "REQ_NSS_MAJOR" ] || [ "$NSS_MAJOR" -eq "REQ_NSS_MAJOR" -a "$NSS_MINOR" -lt "REQ_NSS_MINOR"]; then + echo "System NSS is too old: $NSS_VERSION" + echo "BUILD_NSS=1" >> "$GITHUB_ENV" + exit 0 + fi + echo "System NSS is suitable: $NSS_VERSION" + echo "BUILD_NSS=0" >> "$GITHUB_ENV" + # Ideally, we'd use this. But things are sufficiently flaky that we're better off # trying both hg and git. Leaving this here in case we want to re-try in the future. # # - name: Checkout NSPR + # if: env.BUILD_NSS == '1' # uses: actions/checkout@v4 # with: # repository: "nss-dev/nspr" # path: ${{ github.workspace }}/nspr # - name: Checkout NSS + # if: env.BUILD_NSS == '1' # uses: actions/checkout@v4 # with: # repository: "nss-dev/nss" @@ -33,18 +63,21 @@ runs: - name: Checkout NSPR shell: bash + if: env.BUILD_NSS == '1' run: | hg clone https://hg.mozilla.org/projects/nspr "${{ github.workspace }}/nspr" || \ git clone --depth=1 https://github.com/nss-dev/nspr "${{ github.workspace }}/nspr" - name: Checkout NSS shell: bash + if: env.BUILD_NSS == '1' run: | hg clone https://hg.mozilla.org/projects/nss "${{ github.workspace }}/nss" || \ git clone --depth=1 https://github.com/nss-dev/nss "${{ github.workspace }}/nss" - name: Build shell: bash + if: env.BUILD_NSS == '1' run: | if [ "${{ inputs.type }}" != "Debug" ]; then # We want to do an optimized build for accurate CPU profiling, but diff --git a/.github/workflows/check.yml b/.github/workflows/check.yml index 9dc8ff2b7f..4e47961d8e 100644 --- a/.github/workflows/check.yml +++ b/.github/workflows/check.yml @@ -49,33 +49,21 @@ jobs: sudo apt-get install -y --no-install-recommends gyp mercurial ninja-build lld echo "RUSTFLAGS=-C link-arg=-fuse-ld=lld" >> "$GITHUB_ENV" - # In addition to installing dependencies, first make sure System Integrity Protection (SIP) - # is disabled on this MacOS runner. This is needed to allow the NSS libraries to be loaded - # from the build directory and avoid various other test failures. This seems to always be - # the case on any macos-13 runner, but not consistently on macos-latest (which is currently - # macos-12, FWIW). - name: Install dependencies (MacOS) if: runner.os == 'MacOS' run: | - csrutil status | grep disabled - brew install ninja mercurial llvm + brew update + brew install llvm nss echo "/opt/homebrew/opt/llvm/bin" >> "$GITHUB_PATH" - ln -s /opt/homebrew/bin/python3 /opt/homebrew/bin/python - # python3 -m pip install gyp-next - # Above does not work, since pypi only has gyp 0.15.0, which is too old - # for the homebrew python3. Install from source instead. - python3 -m pip install git+https://github.com/nodejs/gyp-next - python3 -m pip install packaging - echo "$(python3 -m site --user-base)/bin" >> "$GITHUB_PATH" echo "RUSTFLAGS=-C link-arg=-fuse-ld=lld" >> "$GITHUB_ENV" - - name: Use MSYS2 environment and install more dependencies (Windows) + - name: Install dependencies (Windows) if: runner.os == 'Windows' run: | # shellcheck disable=SC2028 { - echo "C:\\msys64\\usr\\bin" - echo "C:\\msys64\\mingw64\\bin" + echo C:/msys64/usr/bin + echo C:/msys64/mingw64/bin } >> "$GITHUB_PATH" /c/msys64/usr/bin/pacman -S --noconfirm nsinstall python3 -m pip install git+https://github.com/nodejs/gyp-next diff --git a/README.md b/README.md index 31d6ab9e94..beadf22ecf 100644 --- a/README.md +++ b/README.md @@ -1,82 +1,102 @@ -# Neqo, an Implementation of QUIC written in Rust +# Neqo, an Implementation of QUIC in Rust ![neqo logo](https://github.com/mozilla/neqo/raw/main/neqo.png "neqo logo") -To run test HTTP/3 programs (neqo-client and neqo-server): +To build Neqo: -* `cargo build` -* `./target/debug/neqo-server '[::]:12345' --db ./test-fixture/db` -* `./target/debug/neqo-client http://127.0.0.1:12345/` - -If a "Failure to load dynamic library" error happens at runtime, do ```shell -export LD_LIBRARY_PATH="$(dirname "$(find . -name libssl3.so -print | head -1)")" +cargo build ``` -On a macOS, do +This will use a system-installed [NSS][NSS] library if it is new enough. (See "Build with Separate NSS/NSPR" below if NSS is not installed or it is deemed too old.) + +To run test HTTP/3 programs (`neqo-client` and `neqo-server`): + ```shell -export DYLD_LIBRARY_PATH="$(dirname "$(find . -name libssl3.dylib -print | head -1)")" +./target/debug/neqo-server '[::]:12345' +./target/debug/neqo-client 'https://[::]:12345/' ``` -## Faster Builds with Separate NSS/NSPR +## Build with separate NSS/NSPR -You can clone NSS (https://hg.mozilla.org/projects/nss) and NSPR -(https://hg.mozilla.org/projects/nspr) into the same directory and export an +You can clone [NSS][NSS] and [NSPR][NSPR] into the same directory and export an environment variable called `NSS_DIR` pointing to NSS. This causes the build to use the existing NSS checkout. However, in order to run anything that depends -on NSS, you need to set `$\[DY]LD\_LIBRARY\_PATH` to point to -`$NSS_DIR/../dist/Debug/lib`. +on NSS, you need to set an environment as follows: + +### Linux + +```shell +export LD_LIBRARY_PATH="$(dirname "$(find . -name libssl3.so -print | head -1)")" +``` + +### macOS + +```shell +export DYLD_LIBRARY_PATH="$(dirname "$(find . -name libssl3.dylib -print | head -1)")" +``` -Note: If you did not compile NSS separately, you need to have mercurial (hg), installed. -NSS builds require gyp, and ninja (or ninja-build) to be present also. +Note: If you did not already compile NSS separately, you need to have +[Mercurial (hg)][HG], installed. NSS builds require [GYP][GYP] and +[Ninja][NINJA] to be installed. ## Debugging Neqo -### QUIC Logging +### QUIC logging -Enable [QLOG](https://datatracker.ietf.org/doc/draft-ietf-quic-qlog-main-schema/) with: +Enable generation of [QLOG][QLOG] logs with: -``` -$ mkdir "$logdir" -$ ./target/debug/neqo-server '[::]:12345' --db ./test-fixture/db --qlog-dir "$logdir" -$ ./target/debug/neqo-client 'https://[::]:12345/' --qlog-dir "$logdir" +```shell +target/debug/neqo-server '[::]:12345' --qlog-dir . +target/debug/neqo-client 'https://[::]:12345/' --qlog-dir . ``` -You may use https://qvis.quictools.info/ by uploading the QLOG files and visualize the flows. +You can of course specify a different directory for the QLOG files. +You can upload QLOG files to [qvis][QVIS] to visualize the flows. -### Using SSLKEYLOGFILE to decrypt Wireshark logs +### Using `SSLKEYLOGFILE` to decrypt Wireshark logs -[Info here](https://developer.mozilla.org/en-US/docs/Mozilla/Projects/NSS/Key_Log_Format) - -TODO: What is the minimum Wireshark version needed? -TODO: Above link may be incorrect, protocol now called TLS instead of SSL? +You can export TLS keys by setting the `SSLKEYLOGFILE` environment variable +to a filename to instruct NSS to dump keys in the +[standard format](https://datatracker.ietf.org/doc/draft-ietf-tls-keylogfile/) +to enable decryption by [Wireshark](https://wiki.wireshark.org/TLS) and other tools. ### Using RUST_LOG effectively As documented in the [env_logger documentation](https://docs.rs/env_logger/), the `RUST_LOG` environment variable can be used to selectively enable log messages -from Rust code. This works for Neqo's cmdline tools, as well as for when Neqo is +from Rust code. This works for Neqo's command line tools, as well as for when Neqo is incorporated into Gecko, although [Gecko needs to be built in debug mode](https://developer.mozilla.org/en-US/docs/Mozilla/Developer_guide/Build_Instructions/Configuring_Build_Options). Some examples: -1. `RUST_LOG=neqo_transport::dump ./mach run` lists sent and received QUIC - packets and their frames' contents only. -1. `RUST_LOG=neqo_transport=debug,neqo_http3=trace,info ./mach run` sets a - 'debug' log level for transport, 'trace' level for http3, and 'info' log + +1. ```shell + RUST_LOG=neqo_transport::dump ./mach run + ``` + + lists sent and received QUIC packets and their frames' contents only. + +1. ```shell + RUST_LOG=neqo_transport=debug,neqo_http3=trace,info ./mach run + ``` + + sets a `debug` log level for `transport`, `trace` level for `http3`, and `info` log level for all other Rust crates, both Neqo and others used by Gecko. -1. `RUST_LOG=neqo=trace,error ./mach run` sets `trace` level for all modules - starting with "neqo", and sets `error` as minimum log level for other - unrelated Rust log messages. +1. ```shell + RUST_LOG=neqo=trace,error ./mach run + ``` + + sets `trace` level for all modules starting with `neqo`, and sets `error` as minimum log level for other unrelated Rust log messages. -### Trying In-development Neqo code in Gecko +### Trying in-development Neqo code in Gecko In a checked-out copy of Gecko source, set `[patches.*]` values for the four Neqo crates to local versions in the root `Cargo.toml`. For example, if Neqo was checked out to `/home/alice/git/neqo`, add the following lines to the root `Cargo.toml`. -``` +```toml [patch."https://github.com/mozilla/neqo"] neqo-http3 = { path = "/home/alice/git/neqo/neqo-http3" } neqo-transport = { path = "/home/alice/git/neqo/neqo-transport" } @@ -87,11 +107,23 @@ neqo-crypto = { path = "/home/alice/git/neqo/neqo-crypto" } Then run the following: -``` +```shell ./mach vendor rust ``` -Compile Gecko as usual with `./mach build`. +Compile Gecko as usual with + +```shell +./mach build +``` Note: Using newer Neqo code with Gecko may also require changes (likely to `neqo_glue`) if something has changed. + +[NSS]: https://hg.mozilla.org/projects/nss +[NSPR]: https://hg.mozilla.org/projects/nspr +[GYP]: https://github.com/nodejs/gyp-next +[HG]: https://www.mercurial-scm.org/ +[NINJA]: https://ninja-build.org/ +[QLOG]: https://datatracker.ietf.org/doc/draft-ietf-quic-qlog-main-schema/ +[QVIS]: https://qvis.quictools.info/ diff --git a/neqo-crypto/Cargo.toml b/neqo-crypto/Cargo.toml index 588d084741..d2f70a5714 100644 --- a/neqo-crypto/Cargo.toml +++ b/neqo-crypto/Cargo.toml @@ -21,6 +21,7 @@ neqo-common = { path = "../neqo-common" } # Sync with https://searchfox.org/mozilla-central/source/Cargo.lock 2024-02-08 bindgen = { version = "0.69", default-features = false, features = ["runtime"] } mozbuild = { version = "0.1", default-features = false, optional = true } +semver = { version = "1.0", default-features = false } serde = { version = "1.0", default-features = false } serde_derive = { version = "1.0", default-features = false } toml = { version = "0.5", default-features = false } diff --git a/neqo-crypto/bindings/bindings.toml b/neqo-crypto/bindings/bindings.toml index 3e5c1fdf7d..72c6d524d5 100644 --- a/neqo-crypto/bindings/bindings.toml +++ b/neqo-crypto/bindings/bindings.toml @@ -265,8 +265,3 @@ enums = [ [nspr_time] types = ["PRTime"] functions = ["PR_Now"] - -[mozpkix] -cplusplus = true -types = ["mozilla::pkix::ErrorCode"] -enums = ["mozilla::pkix::ErrorCode"] diff --git a/neqo-crypto/bindings/mozpkix.hpp b/neqo-crypto/bindings/mozpkix.hpp deleted file mode 100644 index d0a6cb5861..0000000000 --- a/neqo-crypto/bindings/mozpkix.hpp +++ /dev/null @@ -1 +0,0 @@ -#include "mozpkix/pkixnss.h" \ No newline at end of file diff --git a/neqo-crypto/build.rs b/neqo-crypto/build.rs index c4c2a73e75..2dd4543797 100644 --- a/neqo-crypto/build.rs +++ b/neqo-crypto/build.rs @@ -12,8 +12,13 @@ use std::{ }; use bindgen::Builder; +use semver::{Version, VersionReq}; use serde_derive::Deserialize; +#[path = "src/min_version.rs"] +mod min_version; +use min_version::MINIMUM_NSS_VERSION; + const BINDINGS_DIR: &str = "bindings"; const BINDINGS_CONFIG: &str = "bindings.toml"; @@ -90,46 +95,6 @@ fn setup_clang() { } } -fn nss_dir() -> PathBuf { - let dir = if let Ok(dir) = env::var("NSS_DIR") { - let path = PathBuf::from(dir.trim()); - assert!( - !path.is_relative(), - "The NSS_DIR environment variable is expected to be an absolute path." - ); - path - } else { - let out_dir = env::var("OUT_DIR").unwrap(); - let dir = Path::new(&out_dir).join("nss"); - if !dir.exists() { - Command::new("hg") - .args([ - "clone", - "https://hg.mozilla.org/projects/nss", - dir.to_str().unwrap(), - ]) - .status() - .expect("can't clone nss"); - } - let nspr_dir = Path::new(&out_dir).join("nspr"); - if !nspr_dir.exists() { - Command::new("hg") - .args([ - "clone", - "https://hg.mozilla.org/projects/nspr", - nspr_dir.to_str().unwrap(), - ]) - .status() - .expect("can't clone nspr"); - } - dir - }; - assert!(dir.is_dir(), "NSS_DIR {dir:?} doesn't exist"); - // Note that this returns a relative path because UNC - // paths on windows cause certain tools to explode. - dir -} - fn get_bash() -> PathBuf { // If BASH is set, use that. if let Ok(bash) = env::var("BASH") { @@ -295,11 +260,63 @@ fn build_bindings(base: &str, bindings: &Bindings, flags: &[String], gecko: bool .expect("couldn't write bindings"); } -fn setup_standalone() -> Vec { +fn pkg_config() -> Vec { + let modversion = Command::new("pkg-config") + .args(["--modversion", "nss"]) + .output() + .expect("pkg-config reports NSS as absent") + .stdout; + let modversion = String::from_utf8(modversion).expect("non-UTF8 from pkg-config"); + let modversion = modversion.trim(); + // The NSS version number does not follow semver numbering, because it omits the patch version + // when that's 0. Deal with that. + let modversion_for_cmp = if modversion.chars().filter(|c| *c == '.').count() == 1 { + modversion.to_owned() + ".0" + } else { + modversion.to_owned() + }; + let modversion_for_cmp = + Version::parse(&modversion_for_cmp).expect("NSS version not in semver format"); + let version_req = VersionReq::parse(&format!(">={}", MINIMUM_NSS_VERSION.trim())).unwrap(); + assert!( + version_req.matches(&modversion_for_cmp), + "neqo has NSS version requirement {version_req}, found {modversion}" + ); + + let cfg = Command::new("pkg-config") + .args(["--cflags", "--libs", "nss"]) + .output() + .expect("NSS flags not returned by pkg-config") + .stdout; + let cfg_str = String::from_utf8(cfg).expect("non-UTF8 from pkg-config"); + + let mut flags: Vec = Vec::new(); + for f in cfg_str.split(' ') { + if let Some(include) = f.strip_prefix("-I") { + flags.push(String::from(f)); + println!("cargo:include={include}"); + } else if let Some(path) = f.strip_prefix("-L") { + println!("cargo:rustc-link-search=native={path}"); + } else if let Some(lib) = f.strip_prefix("-l") { + println!("cargo:rustc-link-lib=dylib={lib}"); + } else { + println!("Warning: Unknown flag from pkg-config: {f}"); + } + } + + flags +} + +fn setup_standalone(nss: &str) -> Vec { setup_clang(); println!("cargo:rerun-if-env-changed=NSS_DIR"); - let nss = nss_dir(); + let nss = PathBuf::from(nss); + assert!( + !nss.is_relative(), + "The NSS_DIR environment variable is expected to be an absolute path." + ); + build_nss(nss.clone()); // $NSS_DIR/../dist/ @@ -406,8 +423,10 @@ fn setup_for_gecko() -> Vec { fn main() { let flags = if cfg!(feature = "gecko") { setup_for_gecko() + } else if let Ok(nss_dir) = env::var("NSS_DIR") { + setup_standalone(nss_dir.trim()) } else { - setup_standalone() + pkg_config() }; let config_file = PathBuf::from(BINDINGS_DIR).join(BINDINGS_CONFIG); diff --git a/neqo-crypto/min_version.txt b/neqo-crypto/min_version.txt new file mode 100644 index 0000000000..422c9c7093 --- /dev/null +++ b/neqo-crypto/min_version.txt @@ -0,0 +1 @@ +3.98 diff --git a/neqo-crypto/src/err.rs b/neqo-crypto/src/err.rs index 187303d2a9..8d4f239a0b 100644 --- a/neqo-crypto/src/err.rs +++ b/neqo-crypto/src/err.rs @@ -16,13 +16,39 @@ mod codes { #![allow(non_snake_case)] include!(concat!(env!("OUT_DIR"), "/nss_secerr.rs")); include!(concat!(env!("OUT_DIR"), "/nss_sslerr.rs")); - include!(concat!(env!("OUT_DIR"), "/mozpkix.rs")); } -pub use codes::{mozilla_pkix_ErrorCode as mozpkix, SECErrorCodes as sec, SSLErrorCodes as ssl}; +pub use codes::{SECErrorCodes as sec, SSLErrorCodes as ssl}; pub mod nspr { include!(concat!(env!("OUT_DIR"), "/nspr_err.rs")); } +pub mod mozpkix { + // These are manually extracted from the many bindings generated + // by bindgen when provided with the simple header: + // #include "mozpkix/pkixnss.h" + + #[allow(non_camel_case_types)] + pub type mozilla_pkix_ErrorCode = ::std::os::raw::c_int; + pub const MOZILLA_PKIX_ERROR_KEY_PINNING_FAILURE: mozilla_pkix_ErrorCode = -16384; + pub const MOZILLA_PKIX_ERROR_CA_CERT_USED_AS_END_ENTITY: mozilla_pkix_ErrorCode = -16383; + pub const MOZILLA_PKIX_ERROR_INADEQUATE_KEY_SIZE: mozilla_pkix_ErrorCode = -16382; + pub const MOZILLA_PKIX_ERROR_V1_CERT_USED_AS_CA: mozilla_pkix_ErrorCode = -16381; + pub const MOZILLA_PKIX_ERROR_NO_RFC822NAME_MATCH: mozilla_pkix_ErrorCode = -16380; + pub const MOZILLA_PKIX_ERROR_NOT_YET_VALID_CERTIFICATE: mozilla_pkix_ErrorCode = -16379; + pub const MOZILLA_PKIX_ERROR_NOT_YET_VALID_ISSUER_CERTIFICATE: mozilla_pkix_ErrorCode = -16378; + pub const MOZILLA_PKIX_ERROR_SIGNATURE_ALGORITHM_MISMATCH: mozilla_pkix_ErrorCode = -16377; + pub const MOZILLA_PKIX_ERROR_OCSP_RESPONSE_FOR_CERT_MISSING: mozilla_pkix_ErrorCode = -16376; + pub const MOZILLA_PKIX_ERROR_VALIDITY_TOO_LONG: mozilla_pkix_ErrorCode = -16375; + pub const MOZILLA_PKIX_ERROR_REQUIRED_TLS_FEATURE_MISSING: mozilla_pkix_ErrorCode = -16374; + pub const MOZILLA_PKIX_ERROR_INVALID_INTEGER_ENCODING: mozilla_pkix_ErrorCode = -16373; + pub const MOZILLA_PKIX_ERROR_EMPTY_ISSUER_NAME: mozilla_pkix_ErrorCode = -16372; + pub const MOZILLA_PKIX_ERROR_ADDITIONAL_POLICY_CONSTRAINT_FAILED: mozilla_pkix_ErrorCode = + -16371; + pub const MOZILLA_PKIX_ERROR_SELF_SIGNED_CERT: mozilla_pkix_ErrorCode = -16370; + pub const MOZILLA_PKIX_ERROR_MITM_DETECTED: mozilla_pkix_ErrorCode = -16369; + pub const END_OF_LIST: mozilla_pkix_ErrorCode = -16368; +} + pub type Res = Result; #[derive(Clone, Debug, PartialEq, PartialOrd, Ord, Eq)] diff --git a/neqo-crypto/src/lib.rs b/neqo-crypto/src/lib.rs index 45f61f6127..33fe623b17 100644 --- a/neqo-crypto/src/lib.rs +++ b/neqo-crypto/src/lib.rs @@ -59,7 +59,8 @@ pub use self::{ ssl::Opt, }; -const MINIMUM_NSS_VERSION: &str = "3.97"; +mod min_version; +use min_version::MINIMUM_NSS_VERSION; #[allow(non_upper_case_globals, clippy::redundant_static_lifetimes)] #[allow(clippy::upper_case_acronyms)] diff --git a/neqo-crypto/src/min_version.rs b/neqo-crypto/src/min_version.rs new file mode 100644 index 0000000000..4386371b1b --- /dev/null +++ b/neqo-crypto/src/min_version.rs @@ -0,0 +1,9 @@ +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +/// The minimum version of NSS that is required by this version of neqo. +/// Note that the string may contain whitespace at the beginning and/or end. +pub(crate) const MINIMUM_NSS_VERSION: &str = include_str!("../min_version.txt"); From f1560abfeea51a309bcff70da2325de6e07a8e89 Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Wed, 27 Mar 2024 10:39:57 +0200 Subject: [PATCH 06/20] chore: Rename feature `fuzzing` to `disable-encryption` (#1767) * chore: Rename feature `fuzzing` to `disable-encryption` Because `cargo fuzz` relies on being able to use `fuzzing` * WIP * More * Add `disable-encryption` feature to CI, to make sure it doesn't rot * shellcheck * Undo * Fix * Address code review and rename `fuzzing` -> `null` * Fix clippy * Address code review --------- Signed-off-by: Lars Eggert --- neqo-crypto/Cargo.toml | 2 +- neqo-crypto/src/aead.rs | 8 +- .../src/{aead_fuzzing.rs => aead_null.rs} | 64 ++++---------- neqo-crypto/src/lib.rs | 12 +-- neqo-crypto/src/selfencrypt.rs | 2 +- neqo-crypto/tests/aead.rs | 3 +- neqo-crypto/tests/selfencrypt.rs | 2 +- neqo-http3/Cargo.toml | 2 +- neqo-transport/Cargo.toml | 2 +- neqo-transport/src/connection/mod.rs | 1 - neqo-transport/src/connection/params.rs | 13 --- .../src/connection/tests/handshake.rs | 6 +- neqo-transport/src/connection/tests/mod.rs | 2 +- .../connection/tests/{fuzzing.rs => null.rs} | 8 +- neqo-transport/src/crypto.rs | 88 +++---------------- neqo-transport/src/packet/mod.rs | 2 +- neqo-transport/src/packet/retry.rs | 1 - neqo-transport/tests/common/mod.rs | 9 +- neqo-transport/tests/conn_vectors.rs | 2 +- neqo-transport/tests/retry.rs | 2 +- 20 files changed, 53 insertions(+), 178 deletions(-) rename neqo-crypto/src/{aead_fuzzing.rs => aead_null.rs} (55%) rename neqo-transport/src/connection/tests/{fuzzing.rs => null.rs} (84%) diff --git a/neqo-crypto/Cargo.toml b/neqo-crypto/Cargo.toml index d2f70a5714..47337d99c0 100644 --- a/neqo-crypto/Cargo.toml +++ b/neqo-crypto/Cargo.toml @@ -31,7 +31,7 @@ test-fixture = { path = "../test-fixture" } [features] gecko = ["mozbuild"] -fuzzing = [] +disable-encryption = [] [lib] # See https://github.com/bheisler/criterion.rs/blob/master/book/src/faq.md#cargo-bench-gives-unrecognized-option-errors-for-valid-command-line-options diff --git a/neqo-crypto/src/aead.rs b/neqo-crypto/src/aead.rs index bf7d7fe9d7..21027d55b2 100644 --- a/neqo-crypto/src/aead.rs +++ b/neqo-crypto/src/aead.rs @@ -63,13 +63,7 @@ impl RealAead { /// # Errors /// /// Returns `Error` when the supporting NSS functions fail. - pub fn new( - _fuzzing: bool, - version: Version, - cipher: Cipher, - secret: &SymKey, - prefix: &str, - ) -> Res { + pub fn new(version: Version, cipher: Cipher, secret: &SymKey, prefix: &str) -> Res { let s: *mut PK11SymKey = **secret; unsafe { Self::from_raw(version, cipher, s, prefix) } } diff --git a/neqo-crypto/src/aead_fuzzing.rs b/neqo-crypto/src/aead_null.rs similarity index 55% rename from neqo-crypto/src/aead_fuzzing.rs rename to neqo-crypto/src/aead_null.rs index 1f3bfb14bd..2d5656de73 100644 --- a/neqo-crypto/src/aead_fuzzing.rs +++ b/neqo-crypto/src/aead_null.rs @@ -4,87 +4,63 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +#![cfg(feature = "disable-encryption")] + use std::fmt; use crate::{ constants::{Cipher, Version}, err::{sec::SEC_ERROR_BAD_DATA, Error, Res}, p11::SymKey, - RealAead, }; -pub const FIXED_TAG_FUZZING: &[u8] = &[0x0a; 16]; +pub const AEAD_NULL_TAG: &[u8] = &[0x0a; 16]; -pub struct FuzzingAead { - real: Option, -} +pub struct AeadNull {} -impl FuzzingAead { +impl AeadNull { #[allow(clippy::missing_errors_doc)] - pub fn new( - fuzzing: bool, - version: Version, - cipher: Cipher, - secret: &SymKey, - prefix: &str, - ) -> Res { - let real = if fuzzing { - None - } else { - Some(RealAead::new(false, version, cipher, secret, prefix)?) - }; - Ok(Self { real }) + pub fn new(_version: Version, _cipher: Cipher, _secret: &SymKey, _prefix: &str) -> Res { + Ok(Self {}) } #[must_use] pub fn expansion(&self) -> usize { - if let Some(aead) = &self.real { - aead.expansion() - } else { - FIXED_TAG_FUZZING.len() - } + AEAD_NULL_TAG.len() } #[allow(clippy::missing_errors_doc)] pub fn encrypt<'a>( &self, - count: u64, - aad: &[u8], + _count: u64, + _aad: &[u8], input: &[u8], output: &'a mut [u8], ) -> Res<&'a [u8]> { - if let Some(aead) = &self.real { - return aead.encrypt(count, aad, input, output); - } - let l = input.len(); output[..l].copy_from_slice(input); - output[l..l + 16].copy_from_slice(FIXED_TAG_FUZZING); + output[l..l + 16].copy_from_slice(AEAD_NULL_TAG); Ok(&output[..l + 16]) } #[allow(clippy::missing_errors_doc)] pub fn decrypt<'a>( &self, - count: u64, - aad: &[u8], + _count: u64, + _aad: &[u8], input: &[u8], output: &'a mut [u8], ) -> Res<&'a [u8]> { - if let Some(aead) = &self.real { - return aead.decrypt(count, aad, input, output); - } - - if input.len() < FIXED_TAG_FUZZING.len() { + if input.len() < AEAD_NULL_TAG.len() { return Err(Error::from(SEC_ERROR_BAD_DATA)); } - let len_encrypted = input.len() - FIXED_TAG_FUZZING.len(); + let len_encrypted = input.len() - AEAD_NULL_TAG.len(); // Check that: // 1) expansion is all zeros and // 2) if the encrypted data is also supplied that at least some values are no zero // (otherwise padding will be interpreted as a valid packet) - if &input[len_encrypted..] == FIXED_TAG_FUZZING + if &input[len_encrypted..] == AEAD_NULL_TAG && (len_encrypted == 0 || input[..len_encrypted].iter().any(|x| *x != 0x0)) { output[..len_encrypted].copy_from_slice(&input[..len_encrypted]); @@ -95,12 +71,8 @@ impl FuzzingAead { } } -impl fmt::Debug for FuzzingAead { +impl fmt::Debug for AeadNull { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - if let Some(a) = &self.real { - a.fmt(f) - } else { - write!(f, "[FUZZING AEAD]") - } + write!(f, "[NULL AEAD]") } } diff --git a/neqo-crypto/src/lib.rs b/neqo-crypto/src/lib.rs index 33fe623b17..b82b225d40 100644 --- a/neqo-crypto/src/lib.rs +++ b/neqo-crypto/src/lib.rs @@ -8,8 +8,8 @@ #![allow(clippy::unseparated_literal_suffix, clippy::used_underscore_binding)] // For bindgen code. mod aead; -#[cfg(feature = "fuzzing")] -pub mod aead_fuzzing; +#[cfg(feature = "disable-encryption")] +pub mod aead_null; pub mod agent; mod agentio; mod auth; @@ -33,12 +33,12 @@ mod time; use std::{ffi::CString, path::PathBuf, ptr::null, sync::OnceLock}; -#[cfg(not(feature = "fuzzing"))] +#[cfg(not(feature = "disable-encryption"))] pub use self::aead::RealAead as Aead; -#[cfg(feature = "fuzzing")] +#[cfg(feature = "disable-encryption")] pub use self::aead::RealAead; -#[cfg(feature = "fuzzing")] -pub use self::aead_fuzzing::FuzzingAead as Aead; +#[cfg(feature = "disable-encryption")] +pub use self::aead_null::AeadNull as Aead; pub use self::{ agent::{ Agent, AllowZeroRtt, Client, HandshakeState, Record, RecordList, ResumptionToken, diff --git a/neqo-crypto/src/selfencrypt.rs b/neqo-crypto/src/selfencrypt.rs index 1130c35250..d0a85830b0 100644 --- a/neqo-crypto/src/selfencrypt.rs +++ b/neqo-crypto/src/selfencrypt.rs @@ -47,7 +47,7 @@ impl SelfEncrypt { debug_assert_eq!(salt.len(), Self::SALT_LENGTH); let salt = hkdf::import_key(self.version, salt)?; let secret = hkdf::extract(self.version, self.cipher, Some(&salt), k)?; - Aead::new(false, self.version, self.cipher, &secret, "neqo self") + Aead::new(self.version, self.cipher, &secret, "neqo self") } /// Rotate keys. This causes any previous key that is being held to be replaced by the current diff --git a/neqo-crypto/tests/aead.rs b/neqo-crypto/tests/aead.rs index 5cf0034aec..f8416ed9a7 100644 --- a/neqo-crypto/tests/aead.rs +++ b/neqo-crypto/tests/aead.rs @@ -5,7 +5,7 @@ // except according to those terms. #![warn(clippy::pedantic)] -#![cfg(not(feature = "fuzzing"))] +#![cfg(not(feature = "disable-encryption"))] use neqo_crypto::{ constants::{Cipher, TLS_AES_128_GCM_SHA256, TLS_VERSION_1_3}, @@ -40,7 +40,6 @@ fn make_aead(cipher: Cipher) -> Aead { ) .expect("make a secret"); Aead::new( - false, TLS_VERSION_1_3, cipher, &secret, diff --git a/neqo-crypto/tests/selfencrypt.rs b/neqo-crypto/tests/selfencrypt.rs index 4c574a3ae9..b20aa27ee6 100644 --- a/neqo-crypto/tests/selfencrypt.rs +++ b/neqo-crypto/tests/selfencrypt.rs @@ -4,7 +4,7 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -#![cfg(not(feature = "fuzzing"))] +#![cfg(not(feature = "disable-encryption"))] use neqo_crypto::{ constants::{TLS_AES_128_GCM_SHA256, TLS_VERSION_1_3}, diff --git a/neqo-http3/Cargo.toml b/neqo-http3/Cargo.toml index 32e3ae7e35..27f43fd93f 100644 --- a/neqo-http3/Cargo.toml +++ b/neqo-http3/Cargo.toml @@ -28,7 +28,7 @@ url = { version = "2.5", default-features = false } test-fixture = { path = "../test-fixture" } [features] -fuzzing = ["neqo-transport/fuzzing", "neqo-crypto/fuzzing"] +disable-encryption = ["neqo-transport/disable-encryption", "neqo-crypto/disable-encryption"] [lib] # See https://github.com/bheisler/criterion.rs/blob/master/book/src/faq.md#cargo-bench-gives-unrecognized-option-errors-for-valid-command-line-options diff --git a/neqo-transport/Cargo.toml b/neqo-transport/Cargo.toml index 3da60bdabb..125da11508 100644 --- a/neqo-transport/Cargo.toml +++ b/neqo-transport/Cargo.toml @@ -27,7 +27,7 @@ test-fixture = { path = "../test-fixture" } [features] bench = [] -fuzzing = ["neqo-crypto/fuzzing"] +disable-encryption = ["neqo-crypto/disable-encryption"] [lib] # See https://github.com/bheisler/criterion.rs/blob/master/book/src/faq.md#cargo-bench-gives-unrecognized-option-errors-for-valid-command-line-options diff --git a/neqo-transport/src/connection/mod.rs b/neqo-transport/src/connection/mod.rs index 535d3f4084..3bf6b91263 100644 --- a/neqo-transport/src/connection/mod.rs +++ b/neqo-transport/src/connection/mod.rs @@ -383,7 +383,6 @@ impl Connection { agent, protocols.iter().map(P::as_ref).map(String::from).collect(), Rc::clone(&tphandler), - conn_params.is_fuzzing(), )?; let stats = StatsCell::default(); diff --git a/neqo-transport/src/connection/params.rs b/neqo-transport/src/connection/params.rs index 72d1efa3ee..d8aa617024 100644 --- a/neqo-transport/src/connection/params.rs +++ b/neqo-transport/src/connection/params.rs @@ -77,7 +77,6 @@ pub struct ConnectionParameters { outgoing_datagram_queue: usize, incoming_datagram_queue: usize, fast_pto: u8, - fuzzing: bool, grease: bool, pacing: bool, } @@ -100,7 +99,6 @@ impl Default for ConnectionParameters { outgoing_datagram_queue: MAX_QUEUED_DATAGRAMS_DEFAULT, incoming_datagram_queue: MAX_QUEUED_DATAGRAMS_DEFAULT, fast_pto: FAST_PTO_SCALE, - fuzzing: false, grease: true, pacing: true, } @@ -324,17 +322,6 @@ impl ConnectionParameters { self } - #[must_use] - pub fn is_fuzzing(&self) -> bool { - self.fuzzing - } - - #[must_use] - pub fn fuzzing(mut self, enable: bool) -> Self { - self.fuzzing = enable; - self - } - #[must_use] pub fn is_greasing(&self) -> bool { self.grease diff --git a/neqo-transport/src/connection/tests/handshake.rs b/neqo-transport/src/connection/tests/handshake.rs index 4b2a18642f..f2103523ec 100644 --- a/neqo-transport/src/connection/tests/handshake.rs +++ b/neqo-transport/src/connection/tests/handshake.rs @@ -16,7 +16,7 @@ use neqo_common::{event::Provider, qdebug, Datagram}; use neqo_crypto::{ constants::TLS_CHACHA20_POLY1305_SHA256, generate_ech_keys, AuthenticationStatus, }; -#[cfg(not(feature = "fuzzing"))] +#[cfg(not(feature = "disable-encryption"))] use test_fixture::datagram; use test_fixture::{ assertions, assertions::assert_coalesced_0rtt, fixture_init, now, split_datagram, DEFAULT_ADDR, @@ -606,7 +606,7 @@ fn reorder_1rtt() { } } -#[cfg(not(feature = "fuzzing"))] +#[cfg(not(feature = "disable-encryption"))] #[test] fn corrupted_initial() { let mut client = default_client(); @@ -809,7 +809,7 @@ fn anti_amplification() { assert_eq!(*server.state(), State::Confirmed); } -#[cfg(not(feature = "fuzzing"))] +#[cfg(not(feature = "disable-encryption"))] #[test] fn garbage_initial() { let mut client = default_client(); diff --git a/neqo-transport/src/connection/tests/mod.rs b/neqo-transport/src/connection/tests/mod.rs index 6f598fb23e..c8c87a0df0 100644 --- a/neqo-transport/src/connection/tests/mod.rs +++ b/neqo-transport/src/connection/tests/mod.rs @@ -37,11 +37,11 @@ mod ackrate; mod cc; mod close; mod datagram; -mod fuzzing; mod handshake; mod idle; mod keys; mod migration; +mod null; mod priority; mod recovery; mod resumption; diff --git a/neqo-transport/src/connection/tests/fuzzing.rs b/neqo-transport/src/connection/tests/null.rs similarity index 84% rename from neqo-transport/src/connection/tests/fuzzing.rs rename to neqo-transport/src/connection/tests/null.rs index b12100f8ad..e4d60445c6 100644 --- a/neqo-transport/src/connection/tests/fuzzing.rs +++ b/neqo-transport/src/connection/tests/null.rs @@ -4,9 +4,9 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -#![cfg(feature = "fuzzing")] +#![cfg(feature = "disable-encryption")] -use neqo_crypto::aead_fuzzing::FIXED_TAG_FUZZING; +use neqo_crypto::aead_null::AEAD_NULL_TAG; use test_fixture::now; use super::{connect_force_idle, default_client, default_server}; @@ -24,7 +24,7 @@ fn no_encryption() { client.stream_send(stream_id, DATA_CLIENT).unwrap(); let client_pkt = client.process_output(now()).dgram().unwrap(); - assert!(client_pkt[..client_pkt.len() - FIXED_TAG_FUZZING.len()].ends_with(DATA_CLIENT)); + assert!(client_pkt[..client_pkt.len() - AEAD_NULL_TAG.len()].ends_with(DATA_CLIENT)); server.process_input(&client_pkt, now()); let mut buf = vec![0; 100]; @@ -33,7 +33,7 @@ fn no_encryption() { assert_eq!(&buf[..len], DATA_CLIENT); server.stream_send(stream_id, DATA_SERVER).unwrap(); let server_pkt = server.process_output(now()).dgram().unwrap(); - assert!(server_pkt[..server_pkt.len() - FIXED_TAG_FUZZING.len()].ends_with(DATA_SERVER)); + assert!(server_pkt[..server_pkt.len() - AEAD_NULL_TAG.len()].ends_with(DATA_SERVER)); client.process_input(&server_pkt, now()); let (len, _) = client.stream_recv(stream_id, &mut buf).unwrap(); diff --git a/neqo-transport/src/crypto.rs b/neqo-transport/src/crypto.rs index acc02172d5..54bfe622cf 100644 --- a/neqo-transport/src/crypto.rs +++ b/neqo-transport/src/crypto.rs @@ -69,7 +69,6 @@ impl Crypto { mut agent: Agent, protocols: Vec, tphandler: TpHandler, - fuzzing: bool, ) -> Res { agent.set_version_range(TLS_VERSION_1_3, TLS_VERSION_1_3)?; agent.set_ciphers(&[ @@ -102,7 +101,6 @@ impl Crypto { tls: agent, streams: CryptoStreams::default(), states: CryptoStates { - fuzzing, ..CryptoStates::default() }, }) @@ -420,7 +418,6 @@ pub struct CryptoDxState { /// The total number of operations that are remaining before the keys /// become exhausted and can't be used any more. invocations: PacketNumber, - fuzzing: bool, } impl CryptoDxState { @@ -431,7 +428,6 @@ impl CryptoDxState { epoch: Epoch, secret: &SymKey, cipher: Cipher, - fuzzing: bool, ) -> Self { qdebug!( "Making {:?} {} CryptoDxState, v={:?} cipher={}", @@ -445,19 +441,11 @@ impl CryptoDxState { version, direction, epoch: usize::from(epoch), - aead: Aead::new( - fuzzing, - TLS_VERSION_1_3, - cipher, - secret, - version.label_prefix(), - ) - .unwrap(), + aead: Aead::new(TLS_VERSION_1_3, cipher, secret, version.label_prefix()).unwrap(), hpkey: HpKey::extract(TLS_VERSION_1_3, cipher, secret, &hplabel).unwrap(), used_pn: 0..0, min_pn: 0, invocations: Self::limit(direction, cipher), - fuzzing, } } @@ -466,7 +454,6 @@ impl CryptoDxState { direction: CryptoDxDirection, label: &str, dcid: &[u8], - fuzzing: bool, ) -> Self { qtrace!("new_initial {:?} {}", version, ConnectionIdRef::from(dcid)); let salt = version.initial_salt(); @@ -482,14 +469,7 @@ impl CryptoDxState { let secret = hkdf::expand_label(TLS_VERSION_1_3, cipher, &initial_secret, &[], label).unwrap(); - Self::new( - version, - direction, - TLS_EPOCH_INITIAL, - &secret, - cipher, - fuzzing, - ) + Self::new(version, direction, TLS_EPOCH_INITIAL, &secret, cipher) } /// Determine the confidentiality and integrity limits for the cipher. @@ -549,7 +529,6 @@ impl CryptoDxState { direction: self.direction, epoch: self.epoch + 1, aead: Aead::new( - self.fuzzing, TLS_VERSION_1_3, cipher, next_secret, @@ -560,7 +539,6 @@ impl CryptoDxState { used_pn: pn..pn, min_pn: pn, invocations, - fuzzing: self.fuzzing, } } @@ -696,7 +674,7 @@ impl CryptoDxState { Ok(res.to_vec()) } - #[cfg(all(test, not(feature = "fuzzing")))] + #[cfg(all(test, not(feature = "disable-encryption")))] pub(crate) fn test_default() -> Self { // This matches the value in packet.rs const CLIENT_CID: &[u8] = &[0x83, 0x94, 0xc8, 0xf0, 0x3e, 0x51, 0x57, 0x08]; @@ -705,7 +683,6 @@ impl CryptoDxState { CryptoDxDirection::Write, "server in", CLIENT_CID, - false, ) } @@ -759,7 +736,6 @@ pub(crate) struct CryptoDxAppData { cipher: Cipher, // Not the secret used to create `self.dx`, but the one needed for the next iteration. next_secret: SymKey, - fuzzing: bool, } impl CryptoDxAppData { @@ -768,20 +744,11 @@ impl CryptoDxAppData { dir: CryptoDxDirection, secret: &SymKey, cipher: Cipher, - fuzzing: bool, ) -> Res { Ok(Self { - dx: CryptoDxState::new( - version, - dir, - TLS_EPOCH_APPLICATION_DATA, - secret, - cipher, - fuzzing, - ), + dx: CryptoDxState::new(version, dir, TLS_EPOCH_APPLICATION_DATA, secret, cipher), cipher, next_secret: Self::update_secret(cipher, secret)?, - fuzzing, }) } @@ -800,7 +767,6 @@ impl CryptoDxAppData { dx: self.dx.next(&self.next_secret, self.cipher), cipher: self.cipher, next_secret, - fuzzing: self.fuzzing, }) } @@ -834,7 +800,6 @@ pub struct CryptoStates { // If this is set, then we have noticed a genuine update. // Once this time passes, we should switch in new keys. read_update_time: Option, - fuzzing: bool, } impl CryptoStates { @@ -989,20 +954,8 @@ impl CryptoStates { ); let mut initial = CryptoState { - tx: CryptoDxState::new_initial( - *v, - CryptoDxDirection::Write, - write, - dcid, - self.fuzzing, - ), - rx: CryptoDxState::new_initial( - *v, - CryptoDxDirection::Read, - read, - dcid, - self.fuzzing, - ), + tx: CryptoDxState::new_initial(*v, CryptoDxDirection::Write, write, dcid), + rx: CryptoDxState::new_initial(*v, CryptoDxDirection::Read, read, dcid), }; if let Some(prev) = self.initials.get(v) { qinfo!( @@ -1056,7 +1009,6 @@ impl CryptoStates { TLS_EPOCH_ZERO_RTT, secret, cipher, - self.fuzzing, )); } @@ -1097,7 +1049,6 @@ impl CryptoStates { TLS_EPOCH_HANDSHAKE, write_secret, cipher, - self.fuzzing, ), rx: CryptoDxState::new( version, @@ -1105,7 +1056,6 @@ impl CryptoStates { TLS_EPOCH_HANDSHAKE, read_secret, cipher, - self.fuzzing, ), }); } @@ -1113,13 +1063,7 @@ impl CryptoStates { pub fn set_application_write_key(&mut self, version: Version, secret: &SymKey) -> Res<()> { debug_assert!(self.app_write.is_none()); debug_assert_ne!(self.cipher, 0); - let mut app = CryptoDxAppData::new( - version, - CryptoDxDirection::Write, - secret, - self.cipher, - self.fuzzing, - )?; + let mut app = CryptoDxAppData::new(version, CryptoDxDirection::Write, secret, self.cipher)?; if let Some(z) = &self.zero_rtt { if z.direction == CryptoDxDirection::Write { app.dx.continuation(z)?; @@ -1138,13 +1082,7 @@ impl CryptoStates { ) -> Res<()> { debug_assert!(self.app_write.is_some(), "should have write keys installed"); debug_assert!(self.app_read.is_none()); - let mut app = CryptoDxAppData::new( - version, - CryptoDxDirection::Read, - secret, - self.cipher, - self.fuzzing, - )?; + let mut app = CryptoDxAppData::new(version, CryptoDxDirection::Read, secret, self.cipher)?; if let Some(z) = &self.zero_rtt { if z.direction == CryptoDxDirection::Read { app.dx.continuation(z)?; @@ -1286,7 +1224,7 @@ impl CryptoStates { } /// Make some state for removing protection in tests. - #[cfg(not(feature = "fuzzing"))] + #[cfg(not(feature = "disable-encryption"))] #[cfg(test)] pub(crate) fn test_default() -> Self { let read = |epoch| { @@ -1299,7 +1237,6 @@ impl CryptoStates { dx: read(epoch), cipher: TLS_AES_128_GCM_SHA256, next_secret: hkdf::import_key(TLS_VERSION_1_3, &[0xaa; 32]).unwrap(), - fuzzing: false, }; let mut initials = HashMap::new(); initials.insert( @@ -1319,11 +1256,10 @@ impl CryptoStates { app_read: Some(app_read(3)), app_read_next: Some(app_read(4)), read_update_time: None, - fuzzing: false, } } - #[cfg(all(not(feature = "fuzzing"), test))] + #[cfg(all(not(feature = "disable-encryption"), test))] pub(crate) fn test_chacha() -> Self { const SECRET: &[u8] = &[ 0x9a, 0xc3, 0x12, 0xa7, 0xf8, 0x77, 0x46, 0x8e, 0xbe, 0x69, 0x42, 0x27, 0x48, 0xad, @@ -1337,7 +1273,6 @@ impl CryptoStates { direction: CryptoDxDirection::Read, epoch, aead: Aead::new( - false, TLS_VERSION_1_3, TLS_CHACHA20_POLY1305_SHA256, &secret, @@ -1354,11 +1289,9 @@ impl CryptoStates { used_pn: 0..645_971_972, min_pn: 0, invocations: 10, - fuzzing: false, }, cipher: TLS_CHACHA20_POLY1305_SHA256, next_secret: secret.clone(), - fuzzing: false, }; Self { initials: HashMap::new(), @@ -1369,7 +1302,6 @@ impl CryptoStates { app_read: Some(app_read(3)), app_read_next: Some(app_read(4)), read_update_time: None, - fuzzing: false, } } } diff --git a/neqo-transport/src/packet/mod.rs b/neqo-transport/src/packet/mod.rs index d11b3423a4..552e50a1f9 100644 --- a/neqo-transport/src/packet/mod.rs +++ b/neqo-transport/src/packet/mod.rs @@ -868,7 +868,7 @@ impl Deref for DecryptedPacket { } } -#[cfg(all(test, not(feature = "fuzzing")))] +#[cfg(all(test, not(feature = "disable-encryption")))] mod tests { use neqo_common::Encoder; use test_fixture::{fixture_init, now}; diff --git a/neqo-transport/src/packet/retry.rs b/neqo-transport/src/packet/retry.rs index 72036d3b49..71193b9100 100644 --- a/neqo-transport/src/packet/retry.rs +++ b/neqo-transport/src/packet/retry.rs @@ -18,7 +18,6 @@ fn make_aead(version: Version) -> Aead { let secret = hkdf::import_key(TLS_VERSION_1_3, version.retry_secret()).unwrap(); Aead::new( - false, TLS_VERSION_1_3, TLS_AES_128_GCM_SHA256, &secret, diff --git a/neqo-transport/tests/common/mod.rs b/neqo-transport/tests/common/mod.rs index faff216eb9..e36e66f753 100644 --- a/neqo-transport/tests/common/mod.rs +++ b/neqo-transport/tests/common/mod.rs @@ -146,14 +146,7 @@ pub fn initial_aead_and_hp(dcid: &[u8], role: Role) -> (Aead, HpKey) { ) .unwrap(); ( - Aead::new( - false, - TLS_VERSION_1_3, - TLS_AES_128_GCM_SHA256, - &secret, - "quic ", - ) - .unwrap(), + Aead::new(TLS_VERSION_1_3, TLS_AES_128_GCM_SHA256, &secret, "quic ").unwrap(), HpKey::extract(TLS_VERSION_1_3, TLS_AES_128_GCM_SHA256, &secret, "quic hp").unwrap(), ) } diff --git a/neqo-transport/tests/conn_vectors.rs b/neqo-transport/tests/conn_vectors.rs index f478883075..86fe9d36fc 100644 --- a/neqo-transport/tests/conn_vectors.rs +++ b/neqo-transport/tests/conn_vectors.rs @@ -6,7 +6,7 @@ // Tests with the test vectors from the spec. -#![cfg(not(feature = "fuzzing"))] +#![cfg(not(feature = "disable-encryption"))] use std::{cell::RefCell, rc::Rc}; diff --git a/neqo-transport/tests/retry.rs b/neqo-transport/tests/retry.rs index e583fcae0f..36eff71e7b 100644 --- a/neqo-transport/tests/retry.rs +++ b/neqo-transport/tests/retry.rs @@ -4,7 +4,7 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -#![cfg(not(feature = "fuzzing"))] +#![cfg(not(feature = "disable-encryption"))] mod common; From 20ef1be046769efaeb151b21b6cde22af67ee712 Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Wed, 27 Mar 2024 12:10:59 +0200 Subject: [PATCH 07/20] feat: Turn on TLS greasing (#1760) * feat: Turn on TLS greasing Fixes #1391 Needs #1739 * Make clippy happy --- neqo-crypto/src/agent.rs | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/neqo-crypto/src/agent.rs b/neqo-crypto/src/agent.rs index 90085cb759..3d5a8b9f35 100644 --- a/neqo-crypto/src/agent.rs +++ b/neqo-crypto/src/agent.rs @@ -16,7 +16,7 @@ use std::{ time::Instant, }; -use neqo_common::{hex_snip_middle, hex_with_len, qdebug, qinfo, qtrace, qwarn}; +use neqo_common::{hex_snip_middle, hex_with_len, qdebug, qtrace, qwarn}; pub use crate::{ agentio::{as_c_void, Record, RecordList}, @@ -406,10 +406,7 @@ impl SecretAgent { self.set_option(ssl::Opt::Locking, false)?; self.set_option(ssl::Opt::Tickets, false)?; self.set_option(ssl::Opt::OcspStapling, true)?; - if let Err(e) = self.set_option(ssl::Opt::Grease, grease) { - // Until NSS supports greasing, it's OK to fail here. - qinfo!([self], "Failed to enable greasing {:?}", e); - } + self.set_option(ssl::Opt::Grease, grease)?; Ok(()) } From efc4813affbb8077e2f7f7fb2799be0452fc52ed Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Wed, 27 Mar 2024 12:39:04 +0200 Subject: [PATCH 08/20] fix: Address more clippy warnings (#1777) * fix: Address more clippy warnings PR #1764 exports `frame` and `packet` if feature `fuzzing` is enabled. This apparently turns on a bunch of clippy checks that are not on by default? This PR fixes them. Made this separate from #1764 to reduce that PR's size. * Fix clippy * Remove comment --- neqo-transport/src/frame.rs | 26 ++++++++++++-- neqo-transport/src/packet/mod.rs | 61 +++++++++++++++++++++++++++++--- 2 files changed, 79 insertions(+), 8 deletions(-) diff --git a/neqo-transport/src/frame.rs b/neqo-transport/src/frame.rs index 5a86a07108..d84eb61ce8 100644 --- a/neqo-transport/src/frame.rs +++ b/neqo-transport/src/frame.rs @@ -95,6 +95,12 @@ impl From for CloseError { } } +impl From for Error { + fn from(_err: std::array::TryFromSliceError) -> Self { + Self::FrameEncodingError + } +} + #[derive(PartialEq, Eq, Debug, Default, Clone)] pub struct AckRange { pub(crate) gap: u64, @@ -213,6 +219,7 @@ impl<'a> Frame<'a> { } } + #[must_use] pub fn get_type(&self) -> FrameType { match self { Self::Padding { .. } => FRAME_TYPE_PADDING, @@ -254,6 +261,7 @@ impl<'a> Frame<'a> { } } + #[must_use] pub fn is_stream(&self) -> bool { matches!( self, @@ -269,6 +277,7 @@ impl<'a> Frame<'a> { ) } + #[must_use] pub fn stream_type(fin: bool, nonzero_offset: bool, fill: bool) -> u64 { let mut t = FRAME_TYPE_STREAM; if fin { @@ -285,6 +294,7 @@ impl<'a> Frame<'a> { /// If the frame causes a recipient to generate an ACK within its /// advertised maximum acknowledgement delay. + #[must_use] pub fn ack_eliciting(&self) -> bool { !matches!( self, @@ -294,6 +304,7 @@ impl<'a> Frame<'a> { /// If the frame can be sent in a path probe /// without initiating migration to that path. + #[must_use] pub fn path_probing(&self) -> bool { matches!( self, @@ -307,6 +318,10 @@ impl<'a> Frame<'a> { /// Converts `AckRanges` as encoded in a ACK frame (see -transport /// 19.3.1) into ranges of acked packets (end, start), inclusive of /// start and end values. + /// + /// # Errors + /// + /// Returns an error if the ranges are invalid. pub fn decode_ack_frame( largest_acked: u64, first_ack_range: u64, @@ -347,6 +362,7 @@ impl<'a> Frame<'a> { Ok(acked_ranges) } + #[must_use] pub fn dump(&self) -> String { match self { Self::Crypto { offset, data } => { @@ -372,6 +388,7 @@ impl<'a> Frame<'a> { } } + #[must_use] pub fn is_allowed(&self, pt: PacketType) -> bool { match self { Self::Padding { .. } | Self::Ping => true, @@ -386,6 +403,9 @@ impl<'a> Frame<'a> { } } + /// # Errors + /// + /// Returns an error if the frame cannot be decoded. #[allow(clippy::too_many_lines)] // Yeah, but it's a nice match statement. pub fn decode(dec: &mut Decoder<'a>) -> Res { /// Maximum ACK Range Count in ACK Frame @@ -470,7 +490,7 @@ impl<'a> Frame<'a> { FRAME_TYPE_CRYPTO => { let offset = dv(dec)?; let data = d(dec.decode_vvec())?; - if offset + u64::try_from(data.len()).unwrap() > ((1 << 62) - 1) { + if offset + u64::try_from(data.len())? > ((1 << 62) - 1) { return Err(Error::FrameEncodingError); } Ok(Self::Crypto { offset, data }) @@ -497,7 +517,7 @@ impl<'a> Frame<'a> { qtrace!("STREAM frame, with length"); d(dec.decode_vvec())? }; - if o + u64::try_from(data.len()).unwrap() > ((1 << 62) - 1) { + if o + u64::try_from(data.len())? > ((1 << 62) - 1) { return Err(Error::FrameEncodingError); } Ok(Self::Stream { @@ -546,7 +566,7 @@ impl<'a> Frame<'a> { return Err(Error::DecodingFrame); } let srt = d(dec.decode(16))?; - let stateless_reset_token = <&[_; 16]>::try_from(srt).unwrap(); + let stateless_reset_token = <&[_; 16]>::try_from(srt)?; Ok(Self::NewConnectionId { sequence_number, diff --git a/neqo-transport/src/packet/mod.rs b/neqo-transport/src/packet/mod.rs index 552e50a1f9..0843d050ab 100644 --- a/neqo-transport/src/packet/mod.rs +++ b/neqo-transport/src/packet/mod.rs @@ -256,6 +256,10 @@ impl PacketBuilder { /// Maybe pad with "PADDING" frames. /// Only does so if padding was needed and this is a short packet. /// Returns true if padding was added. + /// + /// # Panics + /// + /// Cannot happen. pub fn pad(&mut self) -> bool { if self.padding && !self.is_long() { self.encoder @@ -290,6 +294,10 @@ impl PacketBuilder { /// The length is filled in after calling `build`. /// Does nothing if there isn't 4 bytes available other than render this builder /// unusable; if `remaining()` returns 0 at any point, call `abort()`. + /// + /// # Panics + /// + /// This will panic if the packet number length is too large. pub fn pn(&mut self, pn: PacketNumber, pn_len: usize) { if self.remaining() < 4 { self.limit = 0; @@ -354,6 +362,10 @@ impl PacketBuilder { } /// Build the packet and return the encoder. + /// + /// # Errors + /// + /// This will return an error if the packet is too large. pub fn build(mut self, crypto: &mut CryptoDxState) -> Res { if self.len() > self.limit { qwarn!("Packet contents are more than the limit"); @@ -378,7 +390,9 @@ impl PacketBuilder { // Calculate the mask. let offset = SAMPLE_OFFSET - self.offsets.pn.len(); - assert!(offset + SAMPLE_SIZE <= ciphertext.len()); + if offset + SAMPLE_SIZE > ciphertext.len() { + return Err(Error::InternalError); + } let sample = &ciphertext[offset..offset + SAMPLE_SIZE]; let mask = crypto.compute_mask(sample)?; @@ -412,6 +426,10 @@ impl PacketBuilder { /// As this is a simple packet, this is just an associated function. /// As Retry is odd (it has to be constructed with leading bytes), /// this returns a [`Vec`] rather than building on an encoder. + /// + /// # Errors + /// + /// This will return an error if AEAD encrypt fails. #[allow(clippy::similar_names)] // scid and dcid are fine here. pub fn retry( version: Version, @@ -445,6 +463,7 @@ impl PacketBuilder { /// Make a Version Negotiation packet. #[allow(clippy::similar_names)] // scid and dcid are fine here. + #[must_use] pub fn version_negotiation( dcid: &[u8], scid: &[u8], @@ -556,6 +575,10 @@ impl<'a> PublicPacket<'a> { /// Decode the common parts of a packet. This provides minimal parsing and validation. /// Returns a tuple of a `PublicPacket` and a slice with any remainder from the datagram. + /// + /// # Errors + /// + /// This will return an error if the packet could not be decoded. #[allow(clippy::similar_names)] // For dcid and scid, which are fine. pub fn decode(data: &'a [u8], dcid_decoder: &dyn ConnectionIdDecoder) -> Res<(Self, &'a [u8])> { let mut decoder = Decoder::new(data); @@ -587,7 +610,7 @@ impl<'a> PublicPacket<'a> { } // Generic long header. - let version = WireVersion::try_from(Self::opt(decoder.decode_uint(4))?).unwrap(); + let version = WireVersion::try_from(Self::opt(decoder.decode_uint(4))?)?; let dcid = ConnectionIdRef::from(Self::opt(decoder.decode_vec(1))?); let scid = ConnectionIdRef::from(Self::opt(decoder.decode_vec(1))?); @@ -647,11 +670,14 @@ impl<'a> PublicPacket<'a> { } /// Validate the given packet as though it were a retry. + #[must_use] pub fn is_valid_retry(&self, odcid: &ConnectionId) -> bool { if self.packet_type != PacketType::Retry { return false; } - let version = self.version().unwrap(); + let Some(version) = self.version() else { + return false; + }; let expansion = retry::expansion(version); if self.data.len() <= expansion { return false; @@ -667,6 +693,7 @@ impl<'a> PublicPacket<'a> { .unwrap_or(false) } + #[must_use] pub fn is_valid_initial(&self) -> bool { // Packet has to be an initial, with a DCID of 8 bytes, or a token. // Note: the Server class validates the token and checks the length. @@ -674,32 +701,42 @@ impl<'a> PublicPacket<'a> { && (self.dcid().len() >= 8 || !self.token.is_empty()) } + #[must_use] pub fn packet_type(&self) -> PacketType { self.packet_type } + #[must_use] pub fn dcid(&self) -> ConnectionIdRef<'a> { self.dcid } + /// # Panics + /// + /// This will panic if called for a short header packet. + #[must_use] pub fn scid(&self) -> ConnectionIdRef<'a> { self.scid .expect("should only be called for long header packets") } + #[must_use] pub fn token(&self) -> &'a [u8] { self.token } + #[must_use] pub fn version(&self) -> Option { self.version.and_then(|v| Version::try_from(v).ok()) } + #[must_use] pub fn wire_version(&self) -> WireVersion { debug_assert!(self.version.is_some()); self.version.unwrap_or(0) } + #[must_use] pub fn len(&self) -> usize { self.data.len() } @@ -778,6 +815,9 @@ impl<'a> PublicPacket<'a> { )) } + /// # Errors + /// + /// This will return an error if the packet cannot be decrypted. pub fn decrypt(&self, crypto: &mut CryptoStates, release_at: Instant) -> Res { let cspace: CryptoSpace = self.packet_type.into(); // When we don't have a version, the crypto code doesn't need a version @@ -792,7 +832,9 @@ impl<'a> PublicPacket<'a> { // too small (which is public information). let (key_phase, pn, header, body) = self.decrypt_header(rx)?; qtrace!([rx], "decoded header: {:?}", header); - let rx = crypto.rx(version, cspace, key_phase).unwrap(); + let Some(rx) = crypto.rx(version, cspace, key_phase) else { + return Err(Error::DecryptError); + }; let version = rx.version(); // Version fixup; see above. let d = rx.decrypt(pn, &header, body)?; // If this is the first packet ever successfully decrypted @@ -815,8 +857,14 @@ impl<'a> PublicPacket<'a> { } } + /// # Errors + /// + /// This will return an error if the packet is not a version negotiation packet + /// or if the versions cannot be decoded. pub fn supported_versions(&self) -> Res> { - assert_eq!(self.packet_type, PacketType::VersionNegotiation); + if self.packet_type != PacketType::VersionNegotiation { + return Err(Error::InvalidPacket); + } let mut decoder = Decoder::new(&self.data[self.header_len..]); let mut res = Vec::new(); while decoder.remaining() > 0 { @@ -847,14 +895,17 @@ pub struct DecryptedPacket { } impl DecryptedPacket { + #[must_use] pub fn version(&self) -> Version { self.version } + #[must_use] pub fn packet_type(&self) -> PacketType { self.pt } + #[must_use] pub fn pn(&self) -> PacketNumber { self.pn } From 36fae6282b2214e4fea425ee4a952c08acf1c445 Mon Sep 17 00:00:00 2001 From: Max Inden Date: Wed, 27 Mar 2024 16:50:21 +0100 Subject: [PATCH 09/20] perf(bin): add criterion benchmarks (#1758) * perf(bin): add criterion benchmarks Wraps the `neqo-client` and `neqo-server` code, starts the server and runs various benchmarks through the client. Benchmarks: - single-request-1gb - single-request-1mb - requests-per-second - handshakes-per-second * Rename benchmark instances * Turn off logging * Remove 100mb sample size restriction * Use v6 * Remove 1gb It just takes too long on the bench machine * Use /dev/null * rework imports * Fix import * Test without CPU pinning * Revert "Test without CPU pinning" This reverts commit a0ef46a3d321aa00aae0d3835ef44533dded46bf. * Pin all but neqo-bin to CPU 0 * Quote package * Add rational for neqo-bin handling * Rework tuples * Pin first * Just duplicate the two calls * Add --workspace flag * Remove taskset from neqo-bin --------- Co-authored-by: Martin Thomson --- .github/workflows/bench.yml | 10 +- neqo-bin/Cargo.toml | 16 +++- neqo-bin/benches/main.rs | 92 +++++++++++++++++++ neqo-bin/src/bin/client.rs | 14 +++ neqo-bin/src/bin/server.rs | 14 +++ neqo-bin/src/{bin => }/client/http09.rs | 3 +- neqo-bin/src/{bin => }/client/http3.rs | 2 +- .../src/{bin/client/main.rs => client/mod.rs} | 65 +++++++++---- neqo-bin/src/lib.rs | 40 +++++++- .../src/{bin/server/main.rs => server/mod.rs} | 46 ++++++---- neqo-bin/src/{bin => }/server/old_https.rs | 0 neqo-bin/src/udp.rs | 2 + 12 files changed, 262 insertions(+), 42 deletions(-) create mode 100644 neqo-bin/benches/main.rs create mode 100644 neqo-bin/src/bin/client.rs create mode 100644 neqo-bin/src/bin/server.rs rename neqo-bin/src/{bin => }/client/http09.rs (99%) rename neqo-bin/src/{bin => }/client/http3.rs (99%) rename neqo-bin/src/{bin/client/main.rs => client/mod.rs} (91%) rename neqo-bin/src/{bin/server/main.rs => server/mod.rs} (94%) rename neqo-bin/src/{bin => }/server/old_https.rs (100%) diff --git a/.github/workflows/bench.yml b/.github/workflows/bench.yml index 80c51c236d..5df8bcfd91 100644 --- a/.github/workflows/bench.yml +++ b/.github/workflows/bench.yml @@ -70,11 +70,15 @@ jobs: - name: Prepare machine run: sudo /root/bin/prep.sh - # Pin the benchmark run to core 0 and run all benchmarks at elevated priority. - name: Run cargo bench run: | - taskset -c 0 nice -n -20 \ - cargo "+$TOOLCHAIN" bench --features bench -- --noplot | tee results.txt + # Pin all but neqo-bin benchmarks to CPU 0. neqo-bin benchmarks run + # both a client and a server, thus benefiting from multiple CPU cores. + # + # Run all benchmarks at elevated priority. + taskset -c 0 nice -n -20 cargo "+$TOOLCHAIN" bench --workspace --exclude neqo-bin --features bench -- --noplot | tee results.txt + nice -n -20 cargo "+$TOOLCHAIN" bench --package neqo-bin --features bench -- --noplot | tee -a results.txt + # Compare various configurations of neqo against msquic, and gather perf data # during the hyperfine runs. diff --git a/neqo-bin/Cargo.toml b/neqo-bin/Cargo.toml index 04210e00db..a165a4ac32 100644 --- a/neqo-bin/Cargo.toml +++ b/neqo-bin/Cargo.toml @@ -11,12 +11,12 @@ license.workspace = true [[bin]] name = "neqo-client" -path = "src/bin/client/main.rs" +path = "src/bin/client.rs" bench = false [[bin]] name = "neqo-server" -path = "src/bin/server/main.rs" +path = "src/bin/server.rs" bench = false [lints] @@ -40,6 +40,18 @@ regex = { version = "1.9", default-features = false, features = ["unicode-perl"] tokio = { version = "1", default-features = false, features = ["net", "time", "macros", "rt", "rt-multi-thread"] } url = { version = "2.5", default-features = false } +[dev-dependencies] +criterion = { version = "0.5", default-features = false, features = ["html_reports", "async_tokio"] } +tokio = { version = "1", default-features = false, features = ["sync"] } + +[features] +bench = [] + [lib] # See https://github.com/bheisler/criterion.rs/blob/master/book/src/faq.md#cargo-bench-gives-unrecognized-option-errors-for-valid-command-line-options bench = false + +[[bench]] +name = "main" +harness = false +required-features = ["bench"] diff --git a/neqo-bin/benches/main.rs b/neqo-bin/benches/main.rs new file mode 100644 index 0000000000..fe3aba2714 --- /dev/null +++ b/neqo-bin/benches/main.rs @@ -0,0 +1,92 @@ +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use std::{path::PathBuf, str::FromStr}; + +use criterion::{criterion_group, criterion_main, BatchSize, Criterion, Throughput}; +use neqo_bin::{client, server}; +use tokio::runtime::Runtime; + +struct Benchmark { + name: String, + requests: Vec, + /// Download resources in series using separate connections. + download_in_series: bool, + sample_size: Option, +} + +fn transfer(c: &mut Criterion) { + neqo_common::log::init(Some(log::LevelFilter::Off)); + neqo_crypto::init_db(PathBuf::from_str("../test-fixture/db").unwrap()); + + let done_sender = spawn_server(); + + for Benchmark { + name, + requests, + download_in_series, + sample_size, + } in [ + Benchmark { + name: "1-conn/1-100mb-resp (aka. Download)".to_string(), + requests: vec![100 * 1024 * 1024], + download_in_series: false, + sample_size: Some(10), + }, + Benchmark { + name: "1-conn/10_000-1b-seq-resp (aka. RPS)".to_string(), + requests: vec![1; 10_000], + download_in_series: false, + sample_size: None, + }, + Benchmark { + name: "100-seq-conn/1-1b-resp (aka. HPS)".to_string(), + requests: vec![1; 100], + download_in_series: true, + sample_size: None, + }, + ] { + let mut group = c.benchmark_group(name); + group.throughput(if requests[0] > 1 { + assert_eq!(requests.len(), 1); + Throughput::Bytes(requests[0]) + } else { + Throughput::Elements(requests.len() as u64) + }); + if let Some(size) = sample_size { + group.sample_size(size); + } + group.bench_function("client", |b| { + b.to_async(Runtime::new().unwrap()).iter_batched( + || client::client(client::Args::new(&requests, download_in_series)), + |client| async move { + client.await.unwrap(); + }, + BatchSize::PerIteration, + ); + }); + group.finish(); + } + + done_sender.send(()).unwrap(); +} + +fn spawn_server() -> tokio::sync::oneshot::Sender<()> { + let (done_sender, mut done_receiver) = tokio::sync::oneshot::channel(); + std::thread::spawn(move || { + Runtime::new().unwrap().block_on(async { + let mut server = Box::pin(server::server(server::Args::default())); + tokio::select! { + _ = &mut done_receiver => {} + _ = &mut server => {} + } + }); + }); + done_sender +} + +criterion_group!(benches, transfer); +criterion_main!(benches); diff --git a/neqo-bin/src/bin/client.rs b/neqo-bin/src/bin/client.rs new file mode 100644 index 0000000000..25c0e8753f --- /dev/null +++ b/neqo-bin/src/bin/client.rs @@ -0,0 +1,14 @@ +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use clap::Parser; + +#[tokio::main] +async fn main() -> Result<(), neqo_bin::client::Error> { + let args = neqo_bin::client::Args::parse(); + + neqo_bin::client::client(args).await +} diff --git a/neqo-bin/src/bin/server.rs b/neqo-bin/src/bin/server.rs new file mode 100644 index 0000000000..8d166c7487 --- /dev/null +++ b/neqo-bin/src/bin/server.rs @@ -0,0 +1,14 @@ +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use clap::Parser; + +#[tokio::main] +async fn main() -> Result<(), std::io::Error> { + let args = neqo_bin::server::Args::parse(); + + neqo_bin::server::server(args).await +} diff --git a/neqo-bin/src/bin/client/http09.rs b/neqo-bin/src/client/http09.rs similarity index 99% rename from neqo-bin/src/bin/client/http09.rs rename to neqo-bin/src/client/http09.rs index 372a112853..9bdb6dca85 100644 --- a/neqo-bin/src/bin/client/http09.rs +++ b/neqo-bin/src/client/http09.rs @@ -25,8 +25,7 @@ use neqo_transport::{ }; use url::Url; -use super::{get_output_file, Args, KeyUpdateState, Res}; -use crate::qlog_new; +use super::{get_output_file, qlog_new, Args, KeyUpdateState, Res}; pub struct Handler<'a> { streams: HashMap>>, diff --git a/neqo-bin/src/bin/client/http3.rs b/neqo-bin/src/client/http3.rs similarity index 99% rename from neqo-bin/src/bin/client/http3.rs rename to neqo-bin/src/client/http3.rs index e9f5e406a5..c88a8448f6 100644 --- a/neqo-bin/src/bin/client/http3.rs +++ b/neqo-bin/src/client/http3.rs @@ -26,7 +26,7 @@ use neqo_transport::{ }; use url::Url; -use crate::{get_output_file, qlog_new, Args, KeyUpdateState, Res}; +use super::{get_output_file, qlog_new, Args, KeyUpdateState, Res}; pub(crate) struct Handler<'a> { #[allow( diff --git a/neqo-bin/src/bin/client/main.rs b/neqo-bin/src/client/mod.rs similarity index 91% rename from neqo-bin/src/bin/client/main.rs rename to neqo-bin/src/client/mod.rs index 63aa12db13..e0169e3f24 100644 --- a/neqo-bin/src/bin/client/main.rs +++ b/neqo-bin/src/client/mod.rs @@ -21,25 +21,26 @@ use futures::{ future::{select, Either}, FutureExt, TryFutureExt, }; -use neqo_bin::udp; use neqo_common::{self as common, qdebug, qerror, qinfo, qlog::NeqoQlog, qwarn, Datagram, Role}; use neqo_crypto::{ constants::{TLS_AES_128_GCM_SHA256, TLS_AES_256_GCM_SHA384, TLS_CHACHA20_POLY1305_SHA256}, init, Cipher, ResumptionToken, }; -use neqo_http3::{Error, Output}; +use neqo_http3::Output; use neqo_transport::{AppError, ConnectionId, Error as TransportError, Version}; use qlog::{events::EventImportance, streamer::QlogStreamer}; use tokio::time::Sleep; use url::{Origin, Url}; +use crate::{udp, SharedArgs}; + mod http09; mod http3; const BUFWRITER_BUFFER_SIZE: usize = 64 * 1024; #[derive(Debug)] -pub enum ClientError { +pub enum Error { ArgumentError(&'static str), Http3Error(neqo_http3::Error), IoError(io::Error), @@ -47,40 +48,40 @@ pub enum ClientError { TransportError(neqo_transport::Error), } -impl From for ClientError { +impl From for Error { fn from(err: io::Error) -> Self { Self::IoError(err) } } -impl From for ClientError { +impl From for Error { fn from(err: neqo_http3::Error) -> Self { Self::Http3Error(err) } } -impl From for ClientError { +impl From for Error { fn from(_err: qlog::Error) -> Self { Self::QlogError } } -impl From for ClientError { +impl From for Error { fn from(err: neqo_transport::Error) -> Self { Self::TransportError(err) } } -impl Display for ClientError { +impl Display for Error { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "Error: {self:?}")?; Ok(()) } } -impl std::error::Error for ClientError {} +impl std::error::Error for Error {} -type Res = Result; +type Res = Result; /// Track whether a key update is needed. #[derive(Debug, PartialEq, Eq)] @@ -90,14 +91,14 @@ impl KeyUpdateState { pub fn maybe_update(&mut self, update_fn: F) -> Res<()> where F: FnOnce() -> Result<(), E>, - E: Into, + E: Into, { if self.0 { if let Err(e) = update_fn() { let e = e.into(); match e { - ClientError::TransportError(TransportError::KeyUpdateBlocked) - | ClientError::Http3Error(Error::TransportError( + Error::TransportError(TransportError::KeyUpdateBlocked) + | Error::Http3Error(neqo_http3::Error::TransportError( TransportError::KeyUpdateBlocked, )) => (), _ => return Err(e), @@ -123,7 +124,7 @@ pub struct Args { verbose: clap_verbosity_flag::Verbosity, #[command(flatten)] - shared: neqo_bin::SharedArgs, + shared: SharedArgs, urls: Vec, @@ -189,6 +190,36 @@ pub struct Args { } impl Args { + #[must_use] + #[cfg(feature = "bench")] + #[allow(clippy::missing_panics_doc)] + pub fn new(requests: &[u64], download_in_series: bool) -> Self { + use std::str::FromStr; + Self { + verbose: clap_verbosity_flag::Verbosity::::default(), + shared: crate::SharedArgs::default(), + urls: requests + .iter() + .map(|r| Url::from_str(&format!("http://[::1]:12345/{r}")).unwrap()) + .collect(), + method: "GET".into(), + header: vec![], + max_concurrent_push_streams: 10, + download_in_series, + concurrency: 100, + output_read_data: false, + output_dir: Some("/dev/null".into()), + resume: false, + key_update: false, + ech: None, + ipv4_only: false, + ipv6_only: false, + test: None, + upload_size: 100, + stats: false, + } + } + fn get_ciphers(&self) -> Vec { self.shared .ciphers @@ -445,10 +476,10 @@ fn qlog_new(args: &Args, hostname: &str, cid: &ConnectionId) -> Res { } } -#[tokio::main] -async fn main() -> Res<()> { - let mut args = Args::parse(); +pub async fn client(mut args: Args) -> Res<()> { neqo_common::log::init(Some(args.verbose.log_level_filter())); + init(); + args.update_for_tests(); init(); diff --git a/neqo-bin/src/lib.rs b/neqo-bin/src/lib.rs index b7bc158245..380c56ddce 100644 --- a/neqo-bin/src/lib.rs +++ b/neqo-bin/src/lib.rs @@ -4,6 +4,9 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +#![allow(clippy::missing_panics_doc)] +#![allow(clippy::missing_errors_doc)] + use std::{ fmt::{self, Display}, net::{SocketAddr, ToSocketAddrs}, @@ -17,7 +20,9 @@ use neqo_transport::{ Version, }; -pub mod udp; +pub mod client; +pub mod server; +mod udp; #[derive(Debug, Parser)] pub struct SharedArgs { @@ -57,6 +62,23 @@ pub struct SharedArgs { pub quic_parameters: QuicParameters, } +#[cfg(feature = "bench")] +impl Default for SharedArgs { + fn default() -> Self { + Self { + alpn: "h3".into(), + qlog_dir: None, + max_table_size_encoder: 16384, + max_table_size_decoder: 16384, + max_blocked_streams: 10, + ciphers: vec![], + qns_test: None, + use_old_http: false, + quic_parameters: QuicParameters::default(), + } + } +} + #[derive(Debug, Parser)] pub struct QuicParameters { #[arg( @@ -102,6 +124,22 @@ pub struct QuicParameters { pub preferred_address_v6: Option, } +#[cfg(feature = "bench")] +impl Default for QuicParameters { + fn default() -> Self { + Self { + quic_version: vec![], + max_streams_bidi: 16, + max_streams_uni: 16, + idle_timeout: 30, + congestion_control: CongestionControlAlgorithm::NewReno, + no_pacing: false, + preferred_address_v4: None, + preferred_address_v6: None, + } + } +} + impl QuicParameters { fn get_sock_addr(opt: &Option, v: &str, f: F) -> Option where diff --git a/neqo-bin/src/bin/server/main.rs b/neqo-bin/src/server/mod.rs similarity index 94% rename from neqo-bin/src/bin/server/main.rs rename to neqo-bin/src/server/mod.rs index 62eb19e78c..f89d6620de 100644 --- a/neqo-bin/src/bin/server/main.rs +++ b/neqo-bin/src/server/mod.rs @@ -25,28 +25,28 @@ use futures::{ future::{select, select_all, Either}, FutureExt, }; -use neqo_bin::udp; use neqo_common::{hex, qdebug, qerror, qinfo, qwarn, Datagram, Header}; use neqo_crypto::{ constants::{TLS_AES_128_GCM_SHA256, TLS_AES_256_GCM_SHA384, TLS_CHACHA20_POLY1305_SHA256}, generate_ech_keys, init_db, random, AntiReplay, Cipher, }; use neqo_http3::{ - Error, Http3OrWebTransportStream, Http3Parameters, Http3Server, Http3ServerEvent, StreamId, + Http3OrWebTransportStream, Http3Parameters, Http3Server, Http3ServerEvent, StreamId, }; use neqo_transport::{ server::ValidateAddress, ConnectionIdGenerator, Output, RandomConnectionIdGenerator, Version, }; +use old_https::Http09Server; use tokio::time::Sleep; -use crate::old_https::Http09Server; +use crate::{udp, SharedArgs}; const ANTI_REPLAY_WINDOW: Duration = Duration::from_secs(10); mod old_https; #[derive(Debug)] -pub enum ServerError { +pub enum Error { ArgumentError(&'static str), Http3Error(neqo_http3::Error), IoError(io::Error), @@ -54,47 +54,47 @@ pub enum ServerError { TransportError(neqo_transport::Error), } -impl From for ServerError { +impl From for Error { fn from(err: io::Error) -> Self { Self::IoError(err) } } -impl From for ServerError { +impl From for Error { fn from(err: neqo_http3::Error) -> Self { Self::Http3Error(err) } } -impl From for ServerError { +impl From for Error { fn from(_err: qlog::Error) -> Self { Self::QlogError } } -impl From for ServerError { +impl From for Error { fn from(err: neqo_transport::Error) -> Self { Self::TransportError(err) } } -impl Display for ServerError { +impl Display for Error { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "Error: {self:?}")?; Ok(()) } } -impl std::error::Error for ServerError {} +impl std::error::Error for Error {} #[derive(Debug, Parser)] #[command(author, version, about, long_about = None)] -struct Args { +pub struct Args { #[command(flatten)] verbose: clap_verbosity_flag::Verbosity, #[command(flatten)] - shared: neqo_bin::SharedArgs, + shared: SharedArgs, /// List of IP:port to listen on #[arg(default_value = "[::]:4433")] @@ -119,6 +119,22 @@ struct Args { ech: bool, } +#[cfg(feature = "bench")] +impl Default for Args { + fn default() -> Self { + use std::str::FromStr; + Self { + verbose: clap_verbosity_flag::Verbosity::::default(), + shared: crate::SharedArgs::default(), + hosts: vec!["[::]:12345".to_string()], + db: PathBuf::from_str("../test-fixture/db").unwrap(), + key: "key".to_string(), + retry: false, + ech: false, + } + } +} + impl Args { fn get_ciphers(&self) -> Vec { self.shared @@ -339,7 +355,7 @@ impl HttpServer for SimpleServer { } } else { stream - .cancel_fetch(Error::HttpRequestIncomplete.code()) + .cancel_fetch(neqo_http3::Error::HttpRequestIncomplete.code()) .unwrap(); continue; }; @@ -565,11 +581,9 @@ enum Ready { Timeout, } -#[tokio::main] -async fn main() -> Result<(), io::Error> { +pub async fn server(mut args: Args) -> Result<(), io::Error> { const HQ_INTEROP: &str = "hq-interop"; - let mut args = Args::parse(); neqo_common::log::init(Some(args.verbose.log_level_filter())); assert!(!args.key.is_empty(), "Need at least one key"); diff --git a/neqo-bin/src/bin/server/old_https.rs b/neqo-bin/src/server/old_https.rs similarity index 100% rename from neqo-bin/src/bin/server/old_https.rs rename to neqo-bin/src/server/old_https.rs diff --git a/neqo-bin/src/udp.rs b/neqo-bin/src/udp.rs index f4ede0b5c2..7ccfa1f36f 100644 --- a/neqo-bin/src/udp.rs +++ b/neqo-bin/src/udp.rs @@ -23,6 +23,8 @@ use tokio::io::Interest; const RECV_BUF_SIZE: usize = u16::MAX as usize; pub struct Socket { + #[allow(unknown_lints)] // available with Rust v1.75 + #[allow(clippy::struct_field_names)] socket: tokio::net::UdpSocket, state: UdpSocketState, recv_buf: Vec, From f7492045743b5017e7d24051c15c21afe321c2a4 Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Thu, 28 Mar 2024 08:02:19 +0200 Subject: [PATCH 10/20] Enable the ossf/scorecard action (#1776) Let's see what this will report on our code... Signed-off-by: Lars Eggert --- .github/workflows/scorecard.yml | 72 +++++++++++++++++++++++++++++++++ 1 file changed, 72 insertions(+) create mode 100644 .github/workflows/scorecard.yml diff --git a/.github/workflows/scorecard.yml b/.github/workflows/scorecard.yml new file mode 100644 index 0000000000..338c9239c3 --- /dev/null +++ b/.github/workflows/scorecard.yml @@ -0,0 +1,72 @@ +# This workflow uses actions that are not certified by GitHub. They are provided +# by a third-party and are governed by separate terms of service, privacy +# policy, and support documentation. + +name: Scorecard supply-chain security +on: + # For Branch-Protection check. Only the default branch is supported. See + # https://github.com/ossf/scorecard/blob/main/docs/checks.md#branch-protection + branch_protection_rule: + # To guarantee Maintained check is occasionally updated. See + # https://github.com/ossf/scorecard/blob/main/docs/checks.md#maintained + schedule: + - cron: '26 8 * * 6' + push: + branches: [ "main" ] + +# Declare default permissions as read only. +permissions: read-all + +jobs: + analysis: + name: Scorecard analysis + runs-on: ubuntu-latest + permissions: + # Needed to upload the results to code-scanning dashboard. + security-events: write + # Needed to publish results and get a badge (see publish_results below). + id-token: write + # Uncomment the permissions below if installing in a private repository. + # contents: read + # actions: read + + steps: + - name: "Checkout code" + uses: actions/checkout@93ea575cb5d8a053eaa0ac8fa3b40d7e05a33cc8 # v3.1.0 + with: + persist-credentials: false + + - name: "Run analysis" + uses: ossf/scorecard-action@e38b1902ae4f44df626f11ba0734b14fb91f8f86 # v2.1.2 + with: + results_file: results.sarif + results_format: sarif + # (Optional) "write" PAT token. Uncomment the `repo_token` line below if: + # - you want to enable the Branch-Protection check on a *public* repository, or + # - you are installing Scorecard on a *private* repository + # To create the PAT, follow the steps in https://github.com/ossf/scorecard-action#authentication-with-pat. + # repo_token: ${{ secrets.SCORECARD_TOKEN }} + + # Public repositories: + # - Publish results to OpenSSF REST API for easy access by consumers + # - Allows the repository to include the Scorecard badge. + # - See https://github.com/ossf/scorecard-action#publishing-results. + # For private repositories: + # - `publish_results` will always be set to `false`, regardless + # of the value entered here. + publish_results: false + + # Upload the results as artifacts (optional). Commenting out will disable uploads of run results in SARIF + # format to the repository Actions tab. + - name: "Upload artifact" + uses: actions/upload-artifact@3cea5372237819ed00197afe530f5a7ea3e805c8 # v3.1.0 + with: + name: SARIF file + path: results.sarif + retention-days: 5 + + # Upload the results to GitHub's code scanning dashboard. + - name: "Upload to code-scanning" + uses: github/codeql-action/upload-sarif@17573ee1cc1b9d061760f3a006fc4aac4f944fd5 # v2.2.4 + with: + sarif_file: results.sarif From 3e12eae25da4ef3ac45363e512e89b538216d005 Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Thu, 28 Mar 2024 08:15:54 +0200 Subject: [PATCH 11/20] ci: Fix shell script bugs in `nss/action.yml` (#1778) --- .github/actions/nss/action.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/actions/nss/action.yml b/.github/actions/nss/action.yml index ec6f13eaf8..051b54143b 100644 --- a/.github/actions/nss/action.yml +++ b/.github/actions/nss/action.yml @@ -34,9 +34,9 @@ runs: fi NSS_MAJOR=$(echo "$NSS_VERSION" | cut -d. -f1) NSS_MINOR=$(echo "$NSS_VERSION" | cut -d. -f2) - REQ_NSS_MAJOR=$(cat neqo-crypto/min_version.txt | cut -d. -f1) - REQ_NSS_MINOR=$(cat neqo-crypto/min_version.txt | cut -d. -f2) - if [ "$NSS_MAJOR" -lt "REQ_NSS_MAJOR" ] || [ "$NSS_MAJOR" -eq "REQ_NSS_MAJOR" -a "$NSS_MINOR" -lt "REQ_NSS_MINOR"]; then + REQ_NSS_MAJOR=$(cut -d. -f1 < neqo-crypto/min_version.txt) + REQ_NSS_MINOR=$(cut -d. -f2 < neqo-crypto/min_version.txt) + if [[ "$NSS_MAJOR" -lt "$REQ_NSS_MAJOR" || "$NSS_MAJOR" -eq "$REQ_NSS_MAJOR" && "$NSS_MINOR" -lt "$REQ_NSS_MINOR" ]]; then echo "System NSS is too old: $NSS_VERSION" echo "BUILD_NSS=1" >> "$GITHUB_ENV" exit 0 From 3151adc53e71273eed1319114380119c70e169a2 Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Thu, 28 Mar 2024 08:44:37 +0200 Subject: [PATCH 12/20] fix: Don't panic in `neqo_crypto::init()` (#1775) * fix: Don't panic in `neqo_crypto::init()` Return `Res<()>` instead. Fixes #1675 * Update neqo-bin/src/bin/client/main.rs Co-authored-by: Martin Thomson Signed-off-by: Lars Eggert * Update neqo-crypto/src/lib.rs Co-authored-by: Martin Thomson Signed-off-by: Lars Eggert * Update neqo-crypto/tests/init.rs Co-authored-by: Martin Thomson Signed-off-by: Lars Eggert * Address code review * Try and improve coverage * Fix the `nss_nodb` case * Fail tests if `init()` fails * Address code review * Update neqo-bin/src/bin/client/main.rs Co-authored-by: Martin Thomson Signed-off-by: Lars Eggert * InternalError -> CryptoError * Fix merge * clippy --------- Signed-off-by: Lars Eggert Co-authored-by: Martin Thomson --- neqo-bin/benches/main.rs | 2 +- neqo-bin/src/bin/server.rs | 2 +- neqo-bin/src/client/mod.rs | 11 +++++-- neqo-bin/src/server/mod.rs | 15 +++++++-- neqo-crypto/src/lib.rs | 53 ++++++++++++++++---------------- neqo-crypto/tests/init.rs | 51 +++++++++++++++++++++++------- neqo-crypto/tests/selfencrypt.rs | 4 +-- test-fixture/src/lib.rs | 6 +++- 8 files changed, 96 insertions(+), 48 deletions(-) diff --git a/neqo-bin/benches/main.rs b/neqo-bin/benches/main.rs index fe3aba2714..6bb8b3161d 100644 --- a/neqo-bin/benches/main.rs +++ b/neqo-bin/benches/main.rs @@ -20,7 +20,7 @@ struct Benchmark { fn transfer(c: &mut Criterion) { neqo_common::log::init(Some(log::LevelFilter::Off)); - neqo_crypto::init_db(PathBuf::from_str("../test-fixture/db").unwrap()); + neqo_crypto::init_db(PathBuf::from_str("../test-fixture/db").unwrap()).unwrap(); let done_sender = spawn_server(); diff --git a/neqo-bin/src/bin/server.rs b/neqo-bin/src/bin/server.rs index 8d166c7487..e9b30261e4 100644 --- a/neqo-bin/src/bin/server.rs +++ b/neqo-bin/src/bin/server.rs @@ -7,7 +7,7 @@ use clap::Parser; #[tokio::main] -async fn main() -> Result<(), std::io::Error> { +async fn main() -> Result<(), neqo_bin::server::Error> { let args = neqo_bin::server::Args::parse(); neqo_bin::server::server(args).await diff --git a/neqo-bin/src/client/mod.rs b/neqo-bin/src/client/mod.rs index e0169e3f24..81721802e1 100644 --- a/neqo-bin/src/client/mod.rs +++ b/neqo-bin/src/client/mod.rs @@ -46,6 +46,13 @@ pub enum Error { IoError(io::Error), QlogError, TransportError(neqo_transport::Error), + CryptoError(neqo_crypto::Error), +} + +impl From for Error { + fn from(err: neqo_crypto::Error) -> Self { + Self::CryptoError(err) + } } impl From for Error { @@ -478,11 +485,11 @@ fn qlog_new(args: &Args, hostname: &str, cid: &ConnectionId) -> Res { pub async fn client(mut args: Args) -> Res<()> { neqo_common::log::init(Some(args.verbose.log_level_filter())); - init(); + init()?; args.update_for_tests(); - init(); + init()?; let urls_by_origin = args .urls diff --git a/neqo-bin/src/server/mod.rs b/neqo-bin/src/server/mod.rs index f89d6620de..38eb766f5f 100644 --- a/neqo-bin/src/server/mod.rs +++ b/neqo-bin/src/server/mod.rs @@ -52,6 +52,13 @@ pub enum Error { IoError(io::Error), QlogError, TransportError(neqo_transport::Error), + CryptoError(neqo_crypto::Error), +} + +impl From for Error { + fn from(err: neqo_crypto::Error) -> Self { + Self::CryptoError(err) + } } impl From for Error { @@ -87,6 +94,8 @@ impl Display for Error { impl std::error::Error for Error {} +type Res = Result; + #[derive(Debug, Parser)] #[command(author, version, about, long_about = None)] pub struct Args { @@ -551,7 +560,7 @@ impl ServersRunner { select(sockets_ready, timeout_ready).await.factor_first().0 } - async fn run(&mut self) -> Result<(), io::Error> { + async fn run(&mut self) -> Res<()> { loop { match self.ready().await? { Ready::Socket(inx) => loop { @@ -581,13 +590,13 @@ enum Ready { Timeout, } -pub async fn server(mut args: Args) -> Result<(), io::Error> { +pub async fn server(mut args: Args) -> Res<()> { const HQ_INTEROP: &str = "hq-interop"; neqo_common::log::init(Some(args.verbose.log_level_filter())); assert!(!args.key.is_empty(), "Need at least one key"); - init_db(args.db.clone()); + init_db(args.db.clone())?; if let Some(testcase) = args.shared.qns_test.as_ref() { if args.shared.quic_parameters.quic_version.is_empty() { diff --git a/neqo-crypto/src/lib.rs b/neqo-crypto/src/lib.rs index b82b225d40..2db985e8ee 100644 --- a/neqo-crypto/src/lib.rs +++ b/neqo-crypto/src/lib.rs @@ -90,7 +90,7 @@ impl Drop for NssLoaded { } } -static INITIALIZED: OnceLock = OnceLock::new(); +static INITIALIZED: OnceLock> = OnceLock::new(); fn already_initialized() -> bool { unsafe { nss::NSS_IsInitialized() != 0 } @@ -108,24 +108,24 @@ fn version_check() { /// Initialize NSS. This only executes the initialization routines once, so if there is any chance /// that /// -/// # Panics +/// # Errors /// /// When NSS initialization fails. -pub fn init() { +pub fn init() -> Res<()> { // Set time zero. time::init(); - _ = INITIALIZED.get_or_init(|| { + let res = INITIALIZED.get_or_init(|| { version_check(); if already_initialized() { - return NssLoaded::External; + return Ok(NssLoaded::External); } - secstatus_to_res(unsafe { nss::NSS_NoDB_Init(null()) }).expect("NSS_NoDB_Init failed"); - secstatus_to_res(unsafe { nss::NSS_SetDomesticPolicy() }) - .expect("NSS_SetDomesticPolicy failed"); + secstatus_to_res(unsafe { nss::NSS_NoDB_Init(null()) })?; + secstatus_to_res(unsafe { nss::NSS_SetDomesticPolicy() })?; - NssLoaded::NoDb + Ok(NssLoaded::NoDb) }); + res.as_ref().map(|_| ()).map_err(Clone::clone) } /// This enables SSLTRACE by calling a simple, harmless function to trigger its @@ -133,31 +133,32 @@ pub fn init() { /// global options are accessed. Reading an option is the least impact approach. /// This allows us to use SSLTRACE in all of our unit tests and programs. #[cfg(debug_assertions)] -fn enable_ssl_trace() { +fn enable_ssl_trace() -> Res<()> { let opt = ssl::Opt::Locking.as_int(); let mut v: ::std::os::raw::c_int = 0; secstatus_to_res(unsafe { ssl::SSL_OptionGetDefault(opt, &mut v) }) - .expect("SSL_OptionGetDefault failed"); } /// Initialize with a database. /// -/// # Panics +/// # Errors /// /// If NSS cannot be initialized. -pub fn init_db>(dir: P) { +pub fn init_db>(dir: P) -> Res<()> { time::init(); - _ = INITIALIZED.get_or_init(|| { + let res = INITIALIZED.get_or_init(|| { version_check(); if already_initialized() { - return NssLoaded::External; + return Ok(NssLoaded::External); } let path = dir.into(); - assert!(path.is_dir()); - let pathstr = path.to_str().expect("path converts to string").to_string(); - let dircstr = CString::new(pathstr).unwrap(); - let empty = CString::new("").unwrap(); + if !path.is_dir() { + return Err(Error::InternalError); + } + let pathstr = path.to_str().ok_or(Error::InternalError)?; + let dircstr = CString::new(pathstr)?; + let empty = CString::new("")?; secstatus_to_res(unsafe { nss::NSS_Initialize( dircstr.as_ptr(), @@ -166,21 +167,19 @@ pub fn init_db>(dir: P) { nss::SECMOD_DB.as_ptr().cast(), nss::NSS_INIT_READONLY, ) - }) - .expect("NSS_Initialize failed"); + })?; - secstatus_to_res(unsafe { nss::NSS_SetDomesticPolicy() }) - .expect("NSS_SetDomesticPolicy failed"); + secstatus_to_res(unsafe { nss::NSS_SetDomesticPolicy() })?; secstatus_to_res(unsafe { ssl::SSL_ConfigServerSessionIDCache(1024, 0, 0, dircstr.as_ptr()) - }) - .expect("SSL_ConfigServerSessionIDCache failed"); + })?; #[cfg(debug_assertions)] - enable_ssl_trace(); + enable_ssl_trace()?; - NssLoaded::Db + Ok(NssLoaded::Db) }); + res.as_ref().map(|_| ()).map_err(Clone::clone) } /// # Panics diff --git a/neqo-crypto/tests/init.rs b/neqo-crypto/tests/init.rs index 13218cc340..ee7d808e29 100644 --- a/neqo-crypto/tests/init.rs +++ b/neqo-crypto/tests/init.rs @@ -15,13 +15,7 @@ use neqo_crypto::{assert_initialized, init_db}; // Pull in the NSS internals so that we can ask NSS if it thinks that // it is properly initialized. -#[allow( - dead_code, - non_upper_case_globals, - clippy::redundant_static_lifetimes, - clippy::unseparated_literal_suffix, - clippy::upper_case_acronyms -)] +#[allow(dead_code, non_upper_case_globals)] mod nss { include!(concat!(env!("OUT_DIR"), "/nss_init.rs")); } @@ -29,19 +23,54 @@ mod nss { #[cfg(nss_nodb)] #[test] fn init_nodb() { - init(); + neqo_crypto::init().unwrap(); assert_initialized(); unsafe { - assert!(nss::NSS_IsInitialized() != 0); + assert_ne!(nss::NSS_IsInitialized(), 0); } } +#[cfg(nss_nodb)] +#[test] +fn init_twice_nodb() { + unsafe { + nss::NSS_NoDB_Init(std::ptr::null()); + assert_ne!(nss::NSS_IsInitialized(), 0); + } + // Now do it again + init_nodb(); +} + #[cfg(not(nss_nodb))] #[test] fn init_withdb() { - init_db(::test_fixture::NSS_DB_PATH); + init_db(::test_fixture::NSS_DB_PATH).unwrap(); assert_initialized(); unsafe { - assert!(nss::NSS_IsInitialized() != 0); + assert_ne!(nss::NSS_IsInitialized(), 0); + } +} + +#[cfg(not(nss_nodb))] +#[test] +fn init_twice_withdb() { + use std::{ffi::CString, path::PathBuf}; + + let empty = CString::new("").unwrap(); + let path: PathBuf = ::test_fixture::NSS_DB_PATH.into(); + assert!(path.is_dir()); + let pathstr = path.to_str().unwrap(); + let dircstr = CString::new(pathstr).unwrap(); + unsafe { + nss::NSS_Initialize( + dircstr.as_ptr(), + empty.as_ptr(), + empty.as_ptr(), + nss::SECMOD_DB.as_ptr().cast(), + nss::NSS_INIT_READONLY, + ); + assert_ne!(nss::NSS_IsInitialized(), 0); } + // Now do it again + init_withdb(); } diff --git a/neqo-crypto/tests/selfencrypt.rs b/neqo-crypto/tests/selfencrypt.rs index b20aa27ee6..9fc2162fe2 100644 --- a/neqo-crypto/tests/selfencrypt.rs +++ b/neqo-crypto/tests/selfencrypt.rs @@ -15,7 +15,7 @@ use neqo_crypto::{ #[test] fn se_create() { - init(); + init().unwrap(); SelfEncrypt::new(TLS_VERSION_1_3, TLS_AES_128_GCM_SHA256).expect("constructor works"); } @@ -23,7 +23,7 @@ const PLAINTEXT: &[u8] = b"PLAINTEXT"; const AAD: &[u8] = b"AAD"; fn sealed() -> (SelfEncrypt, Vec) { - init(); + init().unwrap(); let se = SelfEncrypt::new(TLS_VERSION_1_3, TLS_AES_128_GCM_SHA256).unwrap(); let sealed = se.seal(AAD, PLAINTEXT).expect("sealing works"); (se, sealed) diff --git a/test-fixture/src/lib.rs b/test-fixture/src/lib.rs index a6043cd974..e34fb522ff 100644 --- a/test-fixture/src/lib.rs +++ b/test-fixture/src/lib.rs @@ -41,8 +41,12 @@ pub const NSS_DB_PATH: &str = concat!(env!("CARGO_MANIFEST_DIR"), "/db"); /// Initialize the test fixture. Only call this if you aren't also calling a /// fixture function that depends on setup. Other functions in the fixture /// that depend on this setup call the function for you. +/// +/// # Panics +/// +/// When the NSS initialization fails. pub fn fixture_init() { - init_db(NSS_DB_PATH); + init_db(NSS_DB_PATH).unwrap(); } // This needs to be > 2ms to avoid it being rounded to zero. From 92ed07f1b7a537c1638a3a6fce8b0baa66e64ac4 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 2 Apr 2024 09:53:07 +0300 Subject: [PATCH 13/20] build(deps): bump github/codeql-action from 2.2.4 to 3.24.9 (#1782) Bumps [github/codeql-action](https://github.com/github/codeql-action) from 2.2.4 to 3.24.9. - [Release notes](https://github.com/github/codeql-action/releases) - [Changelog](https://github.com/github/codeql-action/blob/main/CHANGELOG.md) - [Commits](https://github.com/github/codeql-action/compare/17573ee1cc1b9d061760f3a006fc4aac4f944fd5...1b1aada464948af03b950897e5eb522f92603cc2) --- updated-dependencies: - dependency-name: github/codeql-action dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/scorecard.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/scorecard.yml b/.github/workflows/scorecard.yml index 338c9239c3..dfbda87b58 100644 --- a/.github/workflows/scorecard.yml +++ b/.github/workflows/scorecard.yml @@ -67,6 +67,6 @@ jobs: # Upload the results to GitHub's code scanning dashboard. - name: "Upload to code-scanning" - uses: github/codeql-action/upload-sarif@17573ee1cc1b9d061760f3a006fc4aac4f944fd5 # v2.2.4 + uses: github/codeql-action/upload-sarif@1b1aada464948af03b950897e5eb522f92603cc2 # v3.24.9 with: sarif_file: results.sarif From 94f8e687ebb0eff9656f556b38edb1a4b43fcb9d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 2 Apr 2024 09:53:27 +0300 Subject: [PATCH 14/20] build(deps): bump ossf/scorecard-action from 2.1.2 to 2.3.1 (#1781) Bumps [ossf/scorecard-action](https://github.com/ossf/scorecard-action) from 2.1.2 to 2.3.1. - [Release notes](https://github.com/ossf/scorecard-action/releases) - [Changelog](https://github.com/ossf/scorecard-action/blob/main/RELEASE.md) - [Commits](https://github.com/ossf/scorecard-action/compare/e38b1902ae4f44df626f11ba0734b14fb91f8f86...0864cf19026789058feabb7e87baa5f140aac736) --- updated-dependencies: - dependency-name: ossf/scorecard-action dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/scorecard.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/scorecard.yml b/.github/workflows/scorecard.yml index dfbda87b58..01e5fe16a8 100644 --- a/.github/workflows/scorecard.yml +++ b/.github/workflows/scorecard.yml @@ -37,7 +37,7 @@ jobs: persist-credentials: false - name: "Run analysis" - uses: ossf/scorecard-action@e38b1902ae4f44df626f11ba0734b14fb91f8f86 # v2.1.2 + uses: ossf/scorecard-action@0864cf19026789058feabb7e87baa5f140aac736 # v2.3.1 with: results_file: results.sarif results_format: sarif From 27a7250dd321e60bd2f68564cf7c2fd58fac0e27 Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Tue, 2 Apr 2024 10:43:29 +0300 Subject: [PATCH 15/20] fix: Exit with `ProtocolViolation` if a packet has no frames (#1779) * fix: Exit with `ProtocolViolation` if a packet has no frames Fixes #1476 * Fix comment * Cleanup * Simplify * Address code review * Clippy --- neqo-transport/src/connection/mod.rs | 4 ++ neqo-transport/src/packet/mod.rs | 6 +-- neqo-transport/tests/connection.rs | 70 ++++++++++++++++++++++++++++ 3 files changed, 75 insertions(+), 5 deletions(-) diff --git a/neqo-transport/src/connection/mod.rs b/neqo-transport/src/connection/mod.rs index 3bf6b91263..06d6cab9e1 100644 --- a/neqo-transport/src/connection/mod.rs +++ b/neqo-transport/src/connection/mod.rs @@ -1552,6 +1552,10 @@ impl Connection { packet: &DecryptedPacket, now: Instant, ) -> Res { + (!packet.is_empty()) + .then_some(()) + .ok_or(Error::ProtocolViolation)?; + // TODO(ekr@rtfm.com): Have the server blow away the initial // crypto state if this fails? Otherwise, we will get a panic // on the assert for doesn't exist. diff --git a/neqo-transport/src/packet/mod.rs b/neqo-transport/src/packet/mod.rs index 0843d050ab..d435ac0dd8 100644 --- a/neqo-transport/src/packet/mod.rs +++ b/neqo-transport/src/packet/mod.rs @@ -764,14 +764,10 @@ impl<'a> PublicPacket<'a> { assert_ne!(self.packet_type, PacketType::Retry); assert_ne!(self.packet_type, PacketType::VersionNegotiation); - qtrace!( - "unmask hdr={}", - hex(&self.data[..self.header_len + SAMPLE_OFFSET]) - ); - let sample_offset = self.header_len + SAMPLE_OFFSET; let mask = if let Some(sample) = self.data.get(sample_offset..(sample_offset + SAMPLE_SIZE)) { + qtrace!("unmask hdr={}", hex(&self.data[..sample_offset])); crypto.compute_mask(sample) } else { Err(Error::NoMoreData) diff --git a/neqo-transport/tests/connection.rs b/neqo-transport/tests/connection.rs index 0b91fcf306..b8877b946d 100644 --- a/neqo-transport/tests/connection.rs +++ b/neqo-transport/tests/connection.rs @@ -127,6 +127,76 @@ fn reorder_server_initial() { assert_eq!(*client.state(), State::Confirmed); } +fn set_payload(server_packet: &Option, client_dcid: &[u8], payload: &[u8]) -> Datagram { + let (server_initial, _server_hs) = split_datagram(server_packet.as_ref().unwrap()); + let (protected_header, _, _, orig_payload) = + decode_initial_header(&server_initial, Role::Server); + + // Now decrypt the packet. + let (aead, hp) = initial_aead_and_hp(client_dcid, Role::Server); + let (mut header, pn) = remove_header_protection(&hp, protected_header, orig_payload); + assert_eq!(pn, 0); + // Re-encode the packet number as four bytes, so we have enough material for the header + // protection sample if payload is empty. + let pn_pos = header.len() - 2; + header[pn_pos] = u8::try_from(4 + aead.expansion()).unwrap(); + header.resize(header.len() + 3, 0); + header[0] |= 0b0000_0011; // Set the packet number length to 4. + + // And build a packet containing the given payload. + let mut packet = header.clone(); + packet.resize(header.len() + payload.len() + aead.expansion(), 0); + aead.encrypt(pn, &header, payload, &mut packet[header.len()..]) + .unwrap(); + apply_header_protection(&hp, &mut packet, protected_header.len()..header.len()); + Datagram::new( + server_initial.source(), + server_initial.destination(), + server_initial.tos(), + server_initial.ttl(), + packet, + ) +} + +/// Test that the stack treats a packet without any frames as a protocol violation. +#[test] +fn packet_without_frames() { + let mut client = new_client( + ConnectionParameters::default().versions(Version::Version1, vec![Version::Version1]), + ); + let mut server = default_server(); + + let client_initial = client.process_output(now()); + let (_, client_dcid, _, _) = + decode_initial_header(client_initial.as_dgram_ref().unwrap(), Role::Client); + + let server_packet = server.process(client_initial.as_dgram_ref(), now()).dgram(); + let modified = set_payload(&server_packet, client_dcid, &[]); + client.process_input(&modified, now()); + assert_eq!( + client.state(), + &State::Closed(ConnectionError::Transport(Error::ProtocolViolation)) + ); +} + +/// Test that the stack permits a packet containing only padding. +#[test] +fn packet_with_only_padding() { + let mut client = new_client( + ConnectionParameters::default().versions(Version::Version1, vec![Version::Version1]), + ); + let mut server = default_server(); + + let client_initial = client.process_output(now()); + let (_, client_dcid, _, _) = + decode_initial_header(client_initial.as_dgram_ref().unwrap(), Role::Client); + + let server_packet = server.process(client_initial.as_dgram_ref(), now()).dgram(); + let modified = set_payload(&server_packet, client_dcid, &[0]); + client.process_input(&modified, now()); + assert_eq!(client.state(), &State::WaitInitial); +} + /// Overflow the crypto buffer. #[allow(clippy::similar_names)] // For ..._scid and ..._dcid, which are fine. #[test] From 68606871ca9f1a8435f2472bcaf49e83a06a823b Mon Sep 17 00:00:00 2001 From: Max Inden Date: Tue, 2 Apr 2024 14:23:59 +0200 Subject: [PATCH 16/20] refactor: print current `StateSignaling` variant in `debug_assert` (#1783) * refactor: print current `StateSignaling` variant in debug_assert CI paniced in `StateSignaling::handshake_done`. Though failure hasn't been reproducible locally. To ease debugging future CI failures, print the current state on panic. ``` thread 'idle_timeout_crazy_rtt' panicked at neqo-transport\src\connection\state.rs:212:13: StateSignaling must be in Idle state. stack backtrace: 0: std::panicking::begin_panic_handler at /rustc/8df7e723ea729a7f917501cc2d91d640b7021373/library\std\src\panicking.rs:646 1: core::panicking::panic_fmt at /rustc/8df7e723ea729a7f917501cc2d91d640b7021373/library\core\src\panicking.rs:72 2: enum2$::handshake_done at .\src\connection\state.rs:212 3: neqo_transport::connection::Connection::handle_lost_packets at .\src\connection\mod.rs:2847 4: neqo_transport::connection::Connection::process_timer at .\src\connection\mod.rs:966 5: neqo_transport::connection::Connection::process_output at .\src\connection\mod.rs:1085 6: neqo_transport::connection::Connection::process at .\src\connection\mod.rs:1108 7: test_fixture::sim::connection::impl$1::process at D:\a\neqo\neqo\test-fixture\src\sim\connection.rs:146 8: test_fixture::sim::Simulator::process_loop at D:\a\neqo\neqo\test-fixture\src\sim\mod.rs:193 9: test_fixture::sim::ReadySimulator::run at D:\a\neqo\neqo\test-fixture\src\sim\mod.rs:284 10: test_fixture::sim::Simulator::run at D:\a\neqo\neqo\test-fixture\src\sim\mod.rs:265 11: network::idle_timeout_crazy_rtt at D:\a\neqo\neqo\test-fixture\src\sim\mod.rs:69 12: network::idle_timeout_crazy_rtt::closure$0 at D:\a\neqo\neqo\test-fixture\src\sim\mod.rs:58 13: core::ops::function::FnOnce::call_once > at /rustc/8df7e723ea729a7f917501cc2d91d640b7021373\library\core\src\ops\function.rs:250 14: core::ops::function::FnOnce::call_once at /rustc/8df7e723ea729a7f917501cc2d91d640b7021373/library\core\src\ops\function.rs:250 note: Some details are omitted, run with `RUST_BACKTRACE=full` for a verbose backtrace. ``` https://github.com/mozilla/neqo/actions/runs/8496770595/job/23274538553 * clippy --- neqo-transport/src/connection/state.rs | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/neqo-transport/src/connection/state.rs b/neqo-transport/src/connection/state.rs index 9789151d3f..cc2f6e30d2 100644 --- a/neqo-transport/src/connection/state.rs +++ b/neqo-transport/src/connection/state.rs @@ -209,7 +209,10 @@ pub enum StateSignaling { impl StateSignaling { pub fn handshake_done(&mut self) { if !matches!(self, Self::Idle) { - debug_assert!(false, "StateSignaling must be in Idle state."); + debug_assert!( + false, + "StateSignaling must be in Idle state but is in {self:?} state.", + ); return; } *self = Self::HandshakeDone; From 07514299a70a8aea8d28a25544e85cf96620d976 Mon Sep 17 00:00:00 2001 From: Max Inden Date: Tue, 2 Apr 2024 18:25:10 +0200 Subject: [PATCH 17/20] refactor(server): simplify :method POST check (#1787) --- neqo-bin/src/server/mod.rs | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/neqo-bin/src/server/mod.rs b/neqo-bin/src/server/mod.rs index 38eb766f5f..e3067ecdf0 100644 --- a/neqo-bin/src/server/mod.rs +++ b/neqo-bin/src/server/mod.rs @@ -336,13 +336,10 @@ impl HttpServer for SimpleServer { } => { qdebug!("Headers (request={stream} fin={fin}): {headers:?}"); - let post = if let Some(method) = headers.iter().find(|&h| h.name() == ":method") + if headers + .iter() + .any(|h| h.name() == ":method" && h.value() == "POST") { - method.value() == "POST" - } else { - false - }; - if post { self.posts.insert(stream, 0); continue; } From 301ebe40da3794cb4e6410cd0cca87fe3e2c4f1c Mon Sep 17 00:00:00 2001 From: Max Inden Date: Wed, 3 Apr 2024 18:53:13 +0200 Subject: [PATCH 18/20] ci(interop/action): always upload comment data (#1790) In https://github.com/mozilla/neqo/pull/1785 the QUIC Network Simulator workflow failed. https://github.com/mozilla/neqo/actions/runs/8523273988/job/23345192160?pr=1785 This triggers the QUIC Network Simulator Comment workflow. https://github.com/mozilla/neqo/actions/runs/8524906268 Though currently, the former does not upload the _comment_ artifact on failure, which is to be consumed by the latter. This commit makes the former always upload the comment data, such that the latter can consume it on failure. --- .github/actions/quic-interop-runner/action.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/actions/quic-interop-runner/action.yml b/.github/actions/quic-interop-runner/action.yml index 30c7f0d8d6..4d88191646 100644 --- a/.github/actions/quic-interop-runner/action.yml +++ b/.github/actions/quic-interop-runner/action.yml @@ -89,6 +89,7 @@ runs: path: quic-interop-runner/logs - name: Format GitHub comment + if: always() run: | echo '[**QUIC Interop Runner**](https://github.com/quic-interop/quic-interop-runner)' >> comment echo '' >> comment @@ -98,6 +99,7 @@ runs: shell: bash - name: Export PR comment data + if: always() uses: ./.github/actions/pr-comment-data-export with: name: qns From 61fcd282c420da03a566cf3324b4dc86b04415e4 Mon Sep 17 00:00:00 2001 From: Max Inden Date: Thu, 4 Apr 2024 07:11:27 +0200 Subject: [PATCH 19/20] perf(transport): remove Server::timers (#1784) * perf(transport): remove Server::timers The current `neqo_transport::server::Server::timers` has a large performance overhead, especially when serving small amount of connections. See https://github.com/mozilla/neqo/pull/1780 for details. This commit optimizes for the small-number-of-connections case, keeping a single callback timestamp only, iterating each connection when there is no other work to be done. * Cleanups * Rename to wake_at * Introduce ServerConnectionState::{set_wake_at,needs_waking,woken} --- neqo-common/Cargo.toml | 4 - neqo-common/benches/timer.rs | 39 ---- neqo-common/src/lib.rs | 1 - neqo-common/src/timer.rs | 420 ----------------------------------- neqo-transport/src/server.rs | 87 ++++---- 5 files changed, 42 insertions(+), 509 deletions(-) delete mode 100644 neqo-common/benches/timer.rs delete mode 100644 neqo-common/src/timer.rs diff --git a/neqo-common/Cargo.toml b/neqo-common/Cargo.toml index 069d67b834..0cb4bcbf4f 100644 --- a/neqo-common/Cargo.toml +++ b/neqo-common/Cargo.toml @@ -34,7 +34,3 @@ features = ["timeapi"] [lib] # See https://github.com/bheisler/criterion.rs/blob/master/book/src/faq.md#cargo-bench-gives-unrecognized-option-errors-for-valid-command-line-options bench = false - -[[bench]] -name = "timer" -harness = false diff --git a/neqo-common/benches/timer.rs b/neqo-common/benches/timer.rs deleted file mode 100644 index 5ac8019db4..0000000000 --- a/neqo-common/benches/timer.rs +++ /dev/null @@ -1,39 +0,0 @@ -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use std::time::{Duration, Instant}; - -use criterion::{criterion_group, criterion_main, Criterion}; -use neqo_common::timer::Timer; -use test_fixture::now; - -fn benchmark_timer(c: &mut Criterion) { - c.bench_function("drain a timer quickly", |b| { - b.iter_batched_ref( - make_timer, - |(_now, timer)| { - while let Some(t) = timer.next_time() { - assert!(timer.take_next(t).is_some()); - } - }, - criterion::BatchSize::SmallInput, - ); - }); -} - -fn make_timer() -> (Instant, Timer<()>) { - const TIMES: &[u64] = &[1, 2, 3, 5, 8, 13, 21, 34]; - - let now = now(); - let mut timer = Timer::new(now, Duration::from_millis(777), 100); - for &t in TIMES { - timer.add(now + Duration::from_secs(t), ()); - } - (now, timer) -} - -criterion_group!(benches, benchmark_timer); -criterion_main!(benches); diff --git a/neqo-common/src/lib.rs b/neqo-common/src/lib.rs index e988c6071d..f3e8e63023 100644 --- a/neqo-common/src/lib.rs +++ b/neqo-common/src/lib.rs @@ -14,7 +14,6 @@ pub mod hrtime; mod incrdecoder; pub mod log; pub mod qlog; -pub mod timer; pub mod tos; use std::fmt::Write; diff --git a/neqo-common/src/timer.rs b/neqo-common/src/timer.rs deleted file mode 100644 index 3feddb2226..0000000000 --- a/neqo-common/src/timer.rs +++ /dev/null @@ -1,420 +0,0 @@ -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use std::{ - collections::VecDeque, - mem, - time::{Duration, Instant}, -}; - -/// Internal structure for a timer item. -struct TimerItem { - time: Instant, - item: T, -} - -impl TimerItem { - fn time(ti: &Self) -> Instant { - ti.time - } -} - -/// A timer queue. -/// This uses a classic timer wheel arrangement, with some characteristics that might be considered -/// peculiar. Each slot in the wheel is sorted (complexity O(N) insertions, but O(logN) to find cut -/// points). Time is relative, the wheel has an origin time and it is unable to represent times that -/// are more than `granularity * capacity` past that time. -pub struct Timer { - items: Vec>>, - now: Instant, - granularity: Duration, - cursor: usize, -} - -impl Timer { - /// Construct a new wheel at the given granularity, starting at the given time. - /// - /// # Panics - /// - /// When `capacity` is too large to fit in `u32` or `granularity` is zero. - pub fn new(now: Instant, granularity: Duration, capacity: usize) -> Self { - assert!(u32::try_from(capacity).is_ok()); - assert!(granularity.as_nanos() > 0); - let mut items = Vec::with_capacity(capacity); - items.resize_with(capacity, Default::default); - Self { - items, - now, - granularity, - cursor: 0, - } - } - - /// Return a reference to the time of the next entry. - #[must_use] - pub fn next_time(&self) -> Option { - let idx = self.bucket(0); - for i in idx..self.items.len() { - if let Some(t) = self.items[i].front() { - return Some(t.time); - } - } - for i in 0..idx { - if let Some(t) = self.items[i].front() { - return Some(t.time); - } - } - None - } - - /// Get the full span of time that this can cover. - /// Two timers cannot be more than this far apart. - /// In practice, this value is less by one amount of the timer granularity. - #[inline] - #[allow(clippy::cast_possible_truncation)] // guarded by assertion - #[must_use] - pub fn span(&self) -> Duration { - self.granularity * (self.items.len() as u32) - } - - /// For the given `time`, get the number of whole buckets in the future that is. - #[inline] - #[allow(clippy::cast_possible_truncation)] // guarded by assertion - fn delta(&self, time: Instant) -> usize { - // This really should use Duration::div_duration_f??(), but it can't yet. - ((time - self.now).as_nanos() / self.granularity.as_nanos()) as usize - } - - #[inline] - fn time_bucket(&self, time: Instant) -> usize { - self.bucket(self.delta(time)) - } - - #[inline] - fn bucket(&self, delta: usize) -> usize { - debug_assert!(delta < self.items.len()); - (self.cursor + delta) % self.items.len() - } - - /// Slide forward in time by `n * self.granularity`. - #[allow(clippy::cast_possible_truncation, clippy::reversed_empty_ranges)] - // cast_possible_truncation is ok because we have an assertion guard. - // reversed_empty_ranges is to avoid different types on the if/else. - fn tick(&mut self, n: usize) { - let new = self.bucket(n); - let iter = if new < self.cursor { - (self.cursor..self.items.len()).chain(0..new) - } else { - (self.cursor..new).chain(0..0) - }; - for i in iter { - assert!(self.items[i].is_empty()); - } - self.now += self.granularity * (n as u32); - self.cursor = new; - } - - /// Asserts if the time given is in the past or too far in the future. - /// - /// # Panics - /// - /// When `time` is in the past relative to previous calls. - pub fn add(&mut self, time: Instant, item: T) { - assert!(time >= self.now); - // Skip forward quickly if there is too large a gap. - let short_span = self.span() - self.granularity; - if time >= (self.now + self.span() + short_span) { - // Assert that there aren't any items. - for i in &self.items { - debug_assert!(i.is_empty()); - } - self.now = time.checked_sub(short_span).unwrap(); - self.cursor = 0; - } - - // Adjust time forward the minimum amount necessary. - let mut d = self.delta(time); - if d >= self.items.len() { - self.tick(1 + d - self.items.len()); - d = self.items.len() - 1; - } - - let bucket = self.bucket(d); - let ins = match self.items[bucket].binary_search_by_key(&time, TimerItem::time) { - Ok(j) | Err(j) => j, - }; - self.items[bucket].insert(ins, TimerItem { time, item }); - } - - /// Given knowledge of the time an item was added, remove it. - /// This requires use of a predicate that identifies matching items. - /// - /// # Panics - /// Impossible, I think. - pub fn remove(&mut self, time: Instant, mut selector: F) -> Option - where - F: FnMut(&T) -> bool, - { - if time < self.now { - return None; - } - if time > self.now + self.span() { - return None; - } - let bucket = self.time_bucket(time); - let Ok(start_index) = self.items[bucket].binary_search_by_key(&time, TimerItem::time) - else { - return None; - }; - // start_index is just one of potentially many items with the same time. - // Search backwards for a match, ... - for i in (0..=start_index).rev() { - if self.items[bucket][i].time != time { - break; - } - if selector(&self.items[bucket][i].item) { - return Some(self.items[bucket].remove(i).unwrap().item); - } - } - // ... then forwards. - for i in (start_index + 1)..self.items[bucket].len() { - if self.items[bucket][i].time != time { - break; - } - if selector(&self.items[bucket][i].item) { - return Some(self.items[bucket].remove(i).unwrap().item); - } - } - None - } - - /// Take the next item, unless there are no items with - /// a timeout in the past relative to `until`. - pub fn take_next(&mut self, until: Instant) -> Option { - fn maybe_take(v: &mut VecDeque>, until: Instant) -> Option { - if !v.is_empty() && v[0].time <= until { - Some(v.pop_front().unwrap().item) - } else { - None - } - } - - let idx = self.bucket(0); - for i in idx..self.items.len() { - let res = maybe_take(&mut self.items[i], until); - if res.is_some() { - return res; - } - } - for i in 0..idx { - let res = maybe_take(&mut self.items[i], until); - if res.is_some() { - return res; - } - } - None - } - - /// Create an iterator that takes all items until the given time. - /// Note: Items might be removed even if the iterator is not fully exhausted. - pub fn take_until(&mut self, until: Instant) -> impl Iterator { - let get_item = move |x: TimerItem| x.item; - if until >= self.now + self.span() { - // Drain everything, so a clean sweep. - let mut empty_items = Vec::with_capacity(self.items.len()); - empty_items.resize_with(self.items.len(), VecDeque::default); - let mut items = mem::replace(&mut self.items, empty_items); - self.now = until; - self.cursor = 0; - - let tail = items.split_off(self.cursor); - return tail.into_iter().chain(items).flatten().map(get_item); - } - - // Only returning a partial span, so do it bucket at a time. - let delta = self.delta(until); - let mut buckets = Vec::with_capacity(delta + 1); - - // First, the whole buckets. - for i in 0..delta { - let idx = self.bucket(i); - buckets.push(mem::take(&mut self.items[idx])); - } - self.tick(delta); - - // Now we need to split the last bucket, because there might be - // some items with `item.time > until`. - let bucket = &mut self.items[self.cursor]; - let last_idx = match bucket.binary_search_by_key(&until, TimerItem::time) { - Ok(mut m) => { - // If there are multiple values, the search will hit any of them. - // Make sure to get them all. - while m < bucket.len() && bucket[m].time == until { - m += 1; - } - m - } - Err(ins) => ins, - }; - let tail = bucket.split_off(last_idx); - buckets.push(mem::replace(bucket, tail)); - // This tomfoolery with the empty vector ensures that - // the returned type here matches the one above precisely - // without having to invoke the `either` crate. - buckets.into_iter().chain(vec![]).flatten().map(get_item) - } -} - -#[cfg(test)] -mod test { - use std::sync::OnceLock; - - use super::{Duration, Instant, Timer}; - - fn now() -> Instant { - static NOW: OnceLock = OnceLock::new(); - *NOW.get_or_init(Instant::now) - } - - const GRANULARITY: Duration = Duration::from_millis(10); - const CAPACITY: usize = 10; - #[test] - fn create() { - let t: Timer<()> = Timer::new(now(), GRANULARITY, CAPACITY); - assert_eq!(t.span(), Duration::from_millis(100)); - assert_eq!(None, t.next_time()); - } - - #[test] - fn immediate_entry() { - let mut t = Timer::new(now(), GRANULARITY, CAPACITY); - t.add(now(), 12); - assert_eq!(now(), t.next_time().expect("should have an entry")); - let values: Vec<_> = t.take_until(now()).collect(); - assert_eq!(vec![12], values); - } - - #[test] - fn same_time() { - let mut t = Timer::new(now(), GRANULARITY, CAPACITY); - let v1 = 12; - let v2 = 13; - t.add(now(), v1); - t.add(now(), v2); - assert_eq!(now(), t.next_time().expect("should have an entry")); - let values: Vec<_> = t.take_until(now()).collect(); - assert!(values.contains(&v1)); - assert!(values.contains(&v2)); - } - - #[test] - fn add() { - let mut t = Timer::new(now(), GRANULARITY, CAPACITY); - let near_future = now() + Duration::from_millis(17); - let v = 9; - t.add(near_future, v); - assert_eq!(near_future, t.next_time().expect("should return a value")); - assert_eq!( - t.take_until(near_future.checked_sub(Duration::from_millis(1)).unwrap()) - .count(), - 0 - ); - assert!(t - .take_until(near_future + Duration::from_millis(1)) - .any(|x| x == v)); - } - - #[test] - fn add_future() { - let mut t = Timer::new(now(), GRANULARITY, CAPACITY); - let future = now() + Duration::from_millis(117); - let v = 9; - t.add(future, v); - assert_eq!(future, t.next_time().expect("should return a value")); - assert!(t.take_until(future).any(|x| x == v)); - } - - #[test] - fn add_far_future() { - let mut t = Timer::new(now(), GRANULARITY, CAPACITY); - let far_future = now() + Duration::from_millis(892); - let v = 9; - t.add(far_future, v); - assert_eq!(far_future, t.next_time().expect("should return a value")); - assert!(t.take_until(far_future).any(|x| x == v)); - } - - const TIMES: &[Duration] = &[ - Duration::from_millis(40), - Duration::from_millis(91), - Duration::from_millis(6), - Duration::from_millis(3), - Duration::from_millis(22), - Duration::from_millis(40), - ]; - - fn with_times() -> Timer { - let mut t = Timer::new(now(), GRANULARITY, CAPACITY); - for (i, time) in TIMES.iter().enumerate() { - t.add(now() + *time, i); - } - assert_eq!( - now() + *TIMES.iter().min().unwrap(), - t.next_time().expect("should have a time") - ); - t - } - - #[test] - #[allow(clippy::needless_collect)] // false positive - fn multiple_values() { - let mut t = with_times(); - let values: Vec<_> = t.take_until(now() + *TIMES.iter().max().unwrap()).collect(); - for i in 0..TIMES.len() { - assert!(values.contains(&i)); - } - } - - #[test] - #[allow(clippy::needless_collect)] // false positive - fn take_far_future() { - let mut t = with_times(); - let values: Vec<_> = t.take_until(now() + Duration::from_secs(100)).collect(); - for i in 0..TIMES.len() { - assert!(values.contains(&i)); - } - } - - #[test] - fn remove_each() { - let mut t = with_times(); - for (i, time) in TIMES.iter().enumerate() { - assert_eq!(Some(i), t.remove(now() + *time, |&x| x == i)); - } - assert_eq!(None, t.next_time()); - } - - #[test] - fn remove_future() { - let mut t = Timer::new(now(), GRANULARITY, CAPACITY); - let future = now() + Duration::from_millis(117); - let v = 9; - t.add(future, v); - - assert_eq!(Some(v), t.remove(future, |candidate| *candidate == v)); - } - - #[test] - fn remove_too_far_future() { - let mut t = Timer::new(now(), GRANULARITY, CAPACITY); - let future = now() + Duration::from_millis(117); - let too_far_future = now() + t.span() + Duration::from_millis(117); - let v = 9; - t.add(future, v); - - assert_eq!(None, t.remove(too_far_future, |candidate| *candidate == v)); - } -} diff --git a/neqo-transport/src/server.rs b/neqo-transport/src/server.rs index 96a6244ef1..7d3d144a09 100644 --- a/neqo-transport/src/server.rs +++ b/neqo-transport/src/server.rs @@ -15,12 +15,12 @@ use std::{ ops::{Deref, DerefMut}, path::PathBuf, rc::{Rc, Weak}, - time::{Duration, Instant}, + time::Instant, }; use neqo_common::{ self as common, event::Provider, hex, qdebug, qerror, qinfo, qlog::NeqoQlog, qtrace, qwarn, - timer::Timer, Datagram, Decoder, Role, + Datagram, Decoder, Role, }; use neqo_crypto::{ encode_ech_config, AntiReplay, Cipher, PrivateKey, PublicKey, ZeroRttCheckResult, @@ -46,13 +46,6 @@ pub enum InitialResult { /// `MIN_INITIAL_PACKET_SIZE` is the smallest packet that can be used to establish /// a new connection across all QUIC versions this server supports. const MIN_INITIAL_PACKET_SIZE: usize = 1200; -/// The size of timer buckets. This is higher than the actual timer granularity -/// as this depends on there being some distribution of events. -const TIMER_GRANULARITY: Duration = Duration::from_millis(4); -/// The number of buckets in the timer. As mentioned in the definition of `Timer`, -/// the granularity and capacity need to multiply to be larger than the largest -/// delay that might be used. That's the idle timeout (currently 30s). -const TIMER_CAPACITY: usize = 16384; type StateRef = Rc>; type ConnectionTableRef = Rc>>; @@ -61,7 +54,21 @@ type ConnectionTableRef = Rc>>; pub struct ServerConnectionState { c: Connection, active_attempt: Option, - last_timer: Instant, + wake_at: Option, +} + +impl ServerConnectionState { + fn set_wake_at(&mut self, at: Instant) { + self.wake_at = Some(at); + } + + fn needs_waking(&self, now: Instant) -> bool { + self.wake_at.map_or(false, |t| t <= now) + } + + fn woken(&mut self) { + self.wake_at = None; + } } impl Deref for ServerConnectionState { @@ -174,8 +181,8 @@ pub struct Server { active: HashSet, /// The set of connections that need immediate processing. waiting: VecDeque, - /// Outstanding timers for connections. - timers: Timer, + /// The latest [`Output::Callback`] returned from [`Server::process`]. + wake_at: Option, /// Address validation logic, which determines whether we send a Retry. address_validation: Rc>, /// Directory to create qlog traces in @@ -219,10 +226,10 @@ impl Server { connections: Rc::default(), active: HashSet::default(), waiting: VecDeque::default(), - timers: Timer::new(now, TIMER_GRANULARITY, TIMER_CAPACITY), address_validation: Rc::new(RefCell::new(validation)), qlog_dir: None, ech_config: None, + wake_at: None, }) } @@ -260,11 +267,6 @@ impl Server { self.ech_config.as_ref().map_or(&[], |cfg| &cfg.encoded) } - fn remove_timer(&mut self, c: &StateRef) { - let last = c.borrow().last_timer; - self.timers.remove(last, |t| Rc::ptr_eq(t, c)); - } - fn process_connection( &mut self, c: &StateRef, @@ -280,16 +282,12 @@ impl Server { } Output::Callback(delay) => { let next = now + delay; - if next != c.borrow().last_timer { - qtrace!([self], "Change timer to {:?}", next); - self.remove_timer(c); - c.borrow_mut().last_timer = next; - self.timers.add(next, Rc::clone(c)); + c.borrow_mut().set_wake_at(next); + if self.wake_at.map_or(true, |c| c > next) { + self.wake_at = Some(next); } } - Output::None => { - self.remove_timer(c); - } + Output::None => {} } if c.borrow().has_events() { qtrace!([self], "Connection active: {:?}", c); @@ -507,7 +505,7 @@ impl Server { self.setup_connection(&mut c, &attempt_key, initial, orig_dcid); let c = Rc::new(RefCell::new(ServerConnectionState { c, - last_timer: now, + wake_at: None, active_attempt: Some(attempt_key.clone()), })); cid_mgr.borrow_mut().set_connection(&c); @@ -646,24 +644,28 @@ impl Server { return Some(d); } } - qtrace!([self], "No packet to send still, run timers"); - while let Some(c) = self.timers.take_next(now) { - if let Some(d) = self.process_connection(&c, None, now) { - return Some(d); + + qtrace!([self], "No packet to send still, check wake up times"); + loop { + let connection = self + .connections + .borrow() + .values() + .find(|c| c.borrow().needs_waking(now)) + .cloned()?; + let datagram = self.process_connection(&connection, None, now); + connection.borrow_mut().woken(); + if datagram.is_some() { + return datagram; } } - None } - fn next_time(&mut self, now: Instant) -> Option { - if self.waiting.is_empty() { - self.timers.next_time().map(|x| x - now) - } else { - Some(Duration::new(0, 0)) + pub fn process(&mut self, dgram: Option<&Datagram>, now: Instant) -> Output { + if self.wake_at.map_or(false, |c| c <= now) { + self.wake_at = None; } - } - pub fn process(&mut self, dgram: Option<&Datagram>, now: Instant) -> Output { dgram .and_then(|d| self.process_input(d, now)) .or_else(|| self.process_next_output(now)) @@ -671,12 +673,7 @@ impl Server { qtrace!([self], "Send packet: {:?}", d); Output::Datagram(d) }) - .or_else(|| { - self.next_time(now).map(|delay| { - qtrace!([self], "Wait: {:?}", delay); - Output::Callback(delay) - }) - }) + .or_else(|| self.wake_at.take().map(|c| Output::Callback(c - now))) .unwrap_or_else(|| { qtrace!([self], "Go dormant"); Output::None From a33fe606766b1f907c60075e7b88992112adc353 Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Thu, 4 Apr 2024 15:44:04 +0300 Subject: [PATCH 20/20] Set threshold to 0.05% --- .codecov.yml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/.codecov.yml b/.codecov.yml index 3ecf204940..7ff673d877 100644 --- a/.codecov.yml +++ b/.codecov.yml @@ -9,3 +9,9 @@ codecov: after_n_builds: 3 comment: after_n_builds: 3 + +coverage: + status: + project: + default: + threshold: 0.05%