From 514cae0901fe8305341a0c2025c6bf38a70a9001 Mon Sep 17 00:00:00 2001 From: Kershaw Date: Wed, 19 Jul 2023 17:09:41 +0200 Subject: [PATCH 001/321] add test (#1450) * Move datagrams below regular streams This will starve datagrams, which comes with its own risks. Eventually, we'll need a better strategy. * OK, make it run * Or maybe run it this way * Organize imports better * add test * clippy * address comments * clippy * address comments * clippy * clippy * clippy --------- Co-authored-by: Martin Thomson --- neqo-http3/src/send_message.rs | 4 +- neqo-transport/src/connection/mod.rs | 102 +++++++++--------- .../src/connection/tests/datagram.rs | 92 +++++++++++++++- neqo-transport/src/tparams.rs | 4 +- 4 files changed, 144 insertions(+), 58 deletions(-) diff --git a/neqo-http3/src/send_message.rs b/neqo-http3/src/send_message.rs index deb0cf3c34..aaf2e224d2 100644 --- a/neqo-http3/src/send_message.rs +++ b/neqo-http3/src/send_message.rs @@ -18,7 +18,6 @@ use std::any::Any; use std::cell::RefCell; use std::cmp::min; use std::fmt::Debug; -use std::mem; use std::rc::Rc; const MAX_DATA_HEADER_SIZE_2: usize = (1 << 6) - 1; // Maximal amount of data with DATA frame header size 2 @@ -303,7 +302,6 @@ impl SendStream for SendMessage { Some(self) } - #[allow(clippy::drop_copy)] fn send_data_atomic(&mut self, conn: &mut Connection, buf: &[u8]) -> Res<()> { let data_frame = HFrame::Data { len: buf.len() as u64, @@ -312,7 +310,7 @@ impl SendStream for SendMessage { data_frame.encode(&mut enc); self.stream.buffer(enc.as_ref()); self.stream.buffer(buf); - mem::drop(self.stream.send_buffer(conn)?); + let _ = self.stream.send_buffer(conn)?; Ok(()) } } diff --git a/neqo-transport/src/connection/mod.rs b/neqo-transport/src/connection/mod.rs index 0a388ea70a..c04591932a 100644 --- a/neqo-transport/src/connection/mod.rs +++ b/neqo-transport/src/connection/mod.rs @@ -6,30 +6,6 @@ // The class implementing a QUIC connection. -use std::{ - cell::RefCell, - cmp::{max, min}, - convert::TryFrom, - fmt::{self, Debug}, - mem, - net::{IpAddr, SocketAddr}, - ops::RangeInclusive, - rc::{Rc, Weak}, - time::{Duration, Instant}, -}; - -use smallvec::SmallVec; - -use neqo_common::{ - event::Provider as EventProvider, hex, hex_snip_middle, hrtime, qdebug, qerror, qinfo, - qlog::NeqoQlog, qtrace, qwarn, Datagram, Decoder, Encoder, Role, -}; -use neqo_crypto::{ - agent::CertificateInfo, random, Agent, AntiReplay, AuthenticationStatus, Cipher, Client, - HandshakeState, PrivateKey, PublicKey, ResumptionToken, SecretAgentInfo, SecretAgentPreInfo, - Server, ZeroRttChecker, -}; - use crate::{ addr_valid::{AddressValidation, NewTokenState}, cid::{ @@ -65,6 +41,27 @@ use crate::{ version::{Version, WireVersion}, AppError, ConnectionError, Error, Res, StreamId, }; +use neqo_common::{ + event::Provider as EventProvider, hex, hex_snip_middle, hrtime, qdebug, qerror, qinfo, + qlog::NeqoQlog, qtrace, qwarn, Datagram, Decoder, Encoder, Role, +}; +use neqo_crypto::{ + agent::CertificateInfo, random, Agent, AntiReplay, AuthenticationStatus, Cipher, Client, + HandshakeState, PrivateKey, PublicKey, ResumptionToken, SecretAgentInfo, SecretAgentPreInfo, + Server, ZeroRttChecker, +}; +use smallvec::SmallVec; +use std::{ + cell::RefCell, + cmp::{max, min}, + convert::TryFrom, + fmt::{self, Debug}, + mem, + net::{IpAddr, SocketAddr}, + ops::RangeInclusive, + rc::{Rc, Weak}, + time::{Duration, Instant}, +}; mod idle; pub mod params; @@ -73,12 +70,13 @@ mod state; #[cfg(test)] pub mod test_internal; +pub use params::{ConnectionParameters, ACK_RATIO_SCALE}; +pub use state::{ClosingFrame, State}; + use idle::IdleTimeout; use params::PreferredAddressConfig; -pub use params::{ConnectionParameters, ACK_RATIO_SCALE}; use saved::SavedDatagrams; use state::StateSignaling; -pub use state::{ClosingFrame, State}; #[derive(Debug, Default)] struct Packet(Vec); @@ -1905,71 +1903,79 @@ impl Connection { builder: &mut PacketBuilder, tokens: &mut Vec, ) -> Res<()> { + let stats = &mut self.stats.borrow_mut(); + let frame_stats = &mut stats.frame_tx; if self.role == Role::Server { if let Some(t) = self.state_signaling.write_done(builder)? { tokens.push(t); - self.stats.borrow_mut().frame_tx.handshake_done += 1; + frame_stats.handshake_done += 1; } } - // datagrams are best-effort and unreliable. Let streams starve them for now - // Check if there is a Datagram to be written - self.quic_datagrams - .write_frames(builder, tokens, &mut self.stats.borrow_mut()); - if builder.is_full() { - return Ok(()); - } - - let stats = &mut self.stats.borrow_mut().frame_tx; - self.streams - .write_frames(TransmissionPriority::Critical, builder, tokens, stats); + .write_frames(TransmissionPriority::Critical, builder, tokens, frame_stats); if builder.is_full() { return Ok(()); } - self.streams - .write_frames(TransmissionPriority::Important, builder, tokens, stats); + self.streams.write_frames( + TransmissionPriority::Important, + builder, + tokens, + frame_stats, + ); if builder.is_full() { return Ok(()); } // NEW_CONNECTION_ID, RETIRE_CONNECTION_ID, and ACK_FREQUENCY. - self.cid_manager.write_frames(builder, tokens, stats)?; + self.cid_manager + .write_frames(builder, tokens, frame_stats)?; if builder.is_full() { return Ok(()); } - self.paths.write_frames(builder, tokens, stats)?; + self.paths.write_frames(builder, tokens, frame_stats)?; if builder.is_full() { return Ok(()); } self.streams - .write_frames(TransmissionPriority::High, builder, tokens, stats); + .write_frames(TransmissionPriority::High, builder, tokens, frame_stats); if builder.is_full() { return Ok(()); } self.streams - .write_frames(TransmissionPriority::Normal, builder, tokens, stats); + .write_frames(TransmissionPriority::Normal, builder, tokens, frame_stats); + if builder.is_full() { + return Ok(()); + } + + // Datagrams are best-effort and unreliable. Let streams starve them for now. + self.quic_datagrams.write_frames(builder, tokens, stats); if builder.is_full() { return Ok(()); } + let frame_stats = &mut stats.frame_tx; // CRYPTO here only includes NewSessionTicket, plus NEW_TOKEN. // Both of these are only used for resumption and so can be relatively low priority. - self.crypto - .write_frame(PacketNumberSpace::ApplicationData, builder, tokens, stats)?; + self.crypto.write_frame( + PacketNumberSpace::ApplicationData, + builder, + tokens, + frame_stats, + )?; if builder.is_full() { return Ok(()); } - self.new_token.write_frames(builder, tokens, stats)?; + self.new_token.write_frames(builder, tokens, frame_stats)?; if builder.is_full() { return Ok(()); } self.streams - .write_frames(TransmissionPriority::Low, builder, tokens, stats); + .write_frames(TransmissionPriority::Low, builder, tokens, frame_stats); #[cfg(test)] { diff --git a/neqo-transport/src/connection/tests/datagram.rs b/neqo-transport/src/connection/tests/datagram.rs index f81f52ee98..4348f2dd3b 100644 --- a/neqo-transport/src/connection/tests/datagram.rs +++ b/neqo-transport/src/connection/tests/datagram.rs @@ -12,11 +12,12 @@ use crate::events::{ConnectionEvent, OutgoingDatagramOutcome}; use crate::frame::FRAME_TYPE_DATAGRAM; use crate::packet::PacketBuilder; use crate::quic_datagrams::MAX_QUIC_DATAGRAM; -use crate::{Connection, ConnectionError, ConnectionParameters, Error}; +use crate::{ + send_stream::{RetransmissionPriority, TransmissionPriority}, + Connection, ConnectionError, ConnectionParameters, Error, StreamType, +}; use neqo_common::event::Provider; -use std::cell::RefCell; -use std::convert::TryFrom; -use std::rc::Rc; +use std::{cell::RefCell, convert::TryFrom, rc::Rc}; use test_fixture::now; const DATAGRAM_LEN_MTU: u64 = 1310; @@ -224,6 +225,89 @@ fn datagram_acked() { )); } +fn send_packet_and_get_server_event( + client: &mut Connection, + server: &mut Connection, +) -> ConnectionEvent { + let out = client.process_output(now()).dgram(); + server.process_input(out.unwrap(), now()); + let mut events: Vec<_> = server + .events() + .filter_map(|evt| match evt { + ConnectionEvent::RecvStreamReadable { .. } | ConnectionEvent::Datagram { .. } => { + Some(evt) + } + _ => None, + }) + .collect(); + // We should only get one event - either RecvStreamReadable or Datagram. + assert_eq!(events.len(), 1); + events.remove(0) +} + +/// Write a datagram that is big enough to fill a packet, but then see that +/// normal priority stream data is sent first. +#[test] +fn datagram_after_stream_data() { + let (mut client, mut server) = connect_datagram(); + + // Write a datagram first. + let dgram_sent = client.stats().frame_tx.datagram; + assert_eq!(client.send_datagram(DATA_MTU, Some(1)), Ok(())); + + // Create a stream with normal priority and send some data. + let stream_id = client.stream_create(StreamType::BiDi).unwrap(); + client.stream_send(stream_id, &[6; 1200]).unwrap(); + + assert!( + matches!(send_packet_and_get_server_event(&mut client, &mut server), ConnectionEvent::RecvStreamReadable { stream_id: s } if s == stream_id) + ); + assert_eq!(client.stats().frame_tx.datagram, dgram_sent); + + if let ConnectionEvent::Datagram(data) = + &send_packet_and_get_server_event(&mut client, &mut server) + { + assert_eq!(data, DATA_MTU); + } else { + panic!(); + } + assert_eq!(client.stats().frame_tx.datagram, dgram_sent + 1); +} + +#[test] +fn datagram_before_stream_data() { + let (mut client, mut server) = connect_datagram(); + + // Create a stream with low priority and send some data before datagram. + let stream_id = client.stream_create(StreamType::BiDi).unwrap(); + client + .stream_priority( + stream_id, + TransmissionPriority::Low, + RetransmissionPriority::default(), + ) + .unwrap(); + client.stream_send(stream_id, &[6; 1200]).unwrap(); + + // Write a datagram. + let dgram_sent = client.stats().frame_tx.datagram; + assert_eq!(client.send_datagram(DATA_MTU, Some(1)), Ok(())); + + if let ConnectionEvent::Datagram(data) = + &send_packet_and_get_server_event(&mut client, &mut server) + { + assert_eq!(data, DATA_MTU); + } else { + panic!(); + } + assert_eq!(client.stats().frame_tx.datagram, dgram_sent + 1); + + assert!( + matches!(send_packet_and_get_server_event(&mut client, &mut server), ConnectionEvent::RecvStreamReadable { stream_id: s } if s == stream_id) + ); + assert_eq!(client.stats().frame_tx.datagram, dgram_sent + 1); +} + #[test] fn datagram_lost() { let (mut client, _) = connect_datagram(); diff --git a/neqo-transport/src/tparams.rs b/neqo-transport/src/tparams.rs index e2150b0627..e9a25fd52f 100644 --- a/neqo-transport/src/tparams.rs +++ b/neqo-transport/src/tparams.rs @@ -750,7 +750,6 @@ where #[allow(unused_variables)] mod tests { use super::*; - use std::mem; #[test] fn basic_tps() { @@ -937,8 +936,7 @@ mod tests { #[test] #[should_panic] fn preferred_address_neither() { - #[allow(clippy::drop_copy)] - mem::drop(PreferredAddress::new(None, None)); + _ = PreferredAddress::new(None, None); } #[test] From 23ae8e8b75b412c1c10fa44fc3f68e5422342b86 Mon Sep 17 00:00:00 2001 From: Kershaw Date: Wed, 19 Jul 2023 18:00:15 +0200 Subject: [PATCH 002/321] neqo v0.6.5 (#1452) --- neqo-client/Cargo.toml | 2 +- neqo-common/Cargo.toml | 2 +- neqo-crypto/Cargo.toml | 2 +- neqo-http3/Cargo.toml | 2 +- neqo-interop/Cargo.toml | 2 +- neqo-qpack/Cargo.toml | 2 +- neqo-server/Cargo.toml | 2 +- neqo-transport/Cargo.toml | 2 +- test-fixture/Cargo.toml | 2 +- 9 files changed, 9 insertions(+), 9 deletions(-) diff --git a/neqo-client/Cargo.toml b/neqo-client/Cargo.toml index bc679c3e10..de5d4f4e57 100644 --- a/neqo-client/Cargo.toml +++ b/neqo-client/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "neqo-client" -version = "0.6.4" +version = "0.6.5" authors = ["Martin Thomson ", "Dragana Damjanovic ", "Andy Grover "] diff --git a/neqo-common/Cargo.toml b/neqo-common/Cargo.toml index 04571ff8ea..fdca15b231 100644 --- a/neqo-common/Cargo.toml +++ b/neqo-common/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "neqo-common" -version = "0.6.4" +version = "0.6.5" authors = ["Bobby Holley "] edition = "2018" rust-version = "1.65.0" diff --git a/neqo-crypto/Cargo.toml b/neqo-crypto/Cargo.toml index 5903a227d9..0f6861996b 100644 --- a/neqo-crypto/Cargo.toml +++ b/neqo-crypto/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "neqo-crypto" -version = "0.6.4" +version = "0.6.5" authors = ["Martin Thomson "] edition = "2018" rust-version = "1.65.0" diff --git a/neqo-http3/Cargo.toml b/neqo-http3/Cargo.toml index 78a77a29cd..ac33450c6d 100644 --- a/neqo-http3/Cargo.toml +++ b/neqo-http3/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "neqo-http3" -version = "0.6.4" +version = "0.6.5" authors = ["Dragana Damjanovic "] edition = "2018" rust-version = "1.65.0" diff --git a/neqo-interop/Cargo.toml b/neqo-interop/Cargo.toml index d569992185..7a5d68ae28 100644 --- a/neqo-interop/Cargo.toml +++ b/neqo-interop/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "neqo-interop" -version = "0.6.4" +version = "0.6.5" authors = ["EKR "] edition = "2018" rust-version = "1.65.0" diff --git a/neqo-qpack/Cargo.toml b/neqo-qpack/Cargo.toml index 033b2d71a1..3d3e10c87d 100644 --- a/neqo-qpack/Cargo.toml +++ b/neqo-qpack/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "neqo-qpack" -version = "0.6.4" +version = "0.6.5" authors = ["Dragana Damjanovic "] edition = "2018" rust-version = "1.65.0" diff --git a/neqo-server/Cargo.toml b/neqo-server/Cargo.toml index 74e597c95e..9e823787b4 100644 --- a/neqo-server/Cargo.toml +++ b/neqo-server/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "neqo-server" -version = "0.6.4" +version = "0.6.5" authors = ["Dragana Damjanovic "] edition = "2018" rust-version = "1.65.0" diff --git a/neqo-transport/Cargo.toml b/neqo-transport/Cargo.toml index aa6f1ad1b9..32953a3fe6 100644 --- a/neqo-transport/Cargo.toml +++ b/neqo-transport/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "neqo-transport" -version = "0.6.4" +version = "0.6.5" authors = ["EKR ", "Andy Grover "] edition = "2018" rust-version = "1.65.0" diff --git a/test-fixture/Cargo.toml b/test-fixture/Cargo.toml index f7c54fbf27..5f14bb1d66 100644 --- a/test-fixture/Cargo.toml +++ b/test-fixture/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "test-fixture" -version = "0.6.4" +version = "0.6.5" authors = ["Martin Thomson "] edition = "2018" rust-version = "1.65.0" From 520ce6ffbb1ff8e8c65c5d5222ffdea9fc489858 Mon Sep 17 00:00:00 2001 From: Lucas Pardue Date: Sun, 23 Jul 2023 21:20:28 +0100 Subject: [PATCH 003/321] Describe purpose of H3 qlog file for consistency (#1455) --- neqo-http3/src/qlog.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/neqo-http3/src/qlog.rs b/neqo-http3/src/qlog.rs index 3d43a2c906..c77f951739 100644 --- a/neqo-http3/src/qlog.rs +++ b/neqo-http3/src/qlog.rs @@ -4,6 +4,8 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +// Functions that handle capturing QLOG traces. + use std::convert::TryFrom; use qlog::{self, event::Event, H3DataRecipient}; From 88c7e4931ee6d196cbe5ff97cbe9cd9c7671a880 Mon Sep 17 00:00:00 2001 From: Andrea Frigido Date: Mon, 24 Jul 2023 11:57:08 +0100 Subject: [PATCH 004/321] Update license field following SPDX 2.1 license expression standard (#1453) --- neqo-client/Cargo.toml | 2 +- neqo-common/Cargo.toml | 2 +- neqo-crypto/Cargo.toml | 2 +- neqo-http3/Cargo.toml | 2 +- neqo-interop/Cargo.toml | 2 +- neqo-qpack/Cargo.toml | 2 +- neqo-server/Cargo.toml | 2 +- neqo-transport/Cargo.toml | 2 +- test-fixture/Cargo.toml | 2 +- 9 files changed, 9 insertions(+), 9 deletions(-) diff --git a/neqo-client/Cargo.toml b/neqo-client/Cargo.toml index de5d4f4e57..4f8c959439 100644 --- a/neqo-client/Cargo.toml +++ b/neqo-client/Cargo.toml @@ -6,7 +6,7 @@ authors = ["Martin Thomson ", "Andy Grover "] edition = "2018" rust-version = "1.65.0" -license = "MIT/Apache-2.0" +license = "MIT OR Apache-2.0" [dependencies] neqo-crypto = { path = "./../neqo-crypto" } diff --git a/neqo-common/Cargo.toml b/neqo-common/Cargo.toml index fdca15b231..3ab37074a4 100644 --- a/neqo-common/Cargo.toml +++ b/neqo-common/Cargo.toml @@ -4,7 +4,7 @@ version = "0.6.5" authors = ["Bobby Holley "] edition = "2018" rust-version = "1.65.0" -license = "MIT/Apache-2.0" +license = "MIT OR Apache-2.0" build = "build.rs" [dependencies] diff --git a/neqo-crypto/Cargo.toml b/neqo-crypto/Cargo.toml index 0f6861996b..abccfad3ef 100644 --- a/neqo-crypto/Cargo.toml +++ b/neqo-crypto/Cargo.toml @@ -5,7 +5,7 @@ authors = ["Martin Thomson "] edition = "2018" rust-version = "1.65.0" build = "build.rs" -license = "MIT/Apache-2.0" +license = "MIT OR Apache-2.0" [dependencies] neqo-common = { path = "../neqo-common" } diff --git a/neqo-http3/Cargo.toml b/neqo-http3/Cargo.toml index ac33450c6d..46b0835351 100644 --- a/neqo-http3/Cargo.toml +++ b/neqo-http3/Cargo.toml @@ -4,7 +4,7 @@ version = "0.6.5" authors = ["Dragana Damjanovic "] edition = "2018" rust-version = "1.65.0" -license = "MIT/Apache-2.0" +license = "MIT OR Apache-2.0" [dependencies] neqo-common = { path = "./../neqo-common" } diff --git a/neqo-interop/Cargo.toml b/neqo-interop/Cargo.toml index 7a5d68ae28..2ab98089ec 100644 --- a/neqo-interop/Cargo.toml +++ b/neqo-interop/Cargo.toml @@ -4,7 +4,7 @@ version = "0.6.5" authors = ["EKR "] edition = "2018" rust-version = "1.65.0" -license = "MIT/Apache-2.0" +license = "MIT OR Apache-2.0" [dependencies] neqo-crypto = { path = "./../neqo-crypto" } diff --git a/neqo-qpack/Cargo.toml b/neqo-qpack/Cargo.toml index 3d3e10c87d..e0ba853d3d 100644 --- a/neqo-qpack/Cargo.toml +++ b/neqo-qpack/Cargo.toml @@ -4,7 +4,7 @@ version = "0.6.5" authors = ["Dragana Damjanovic "] edition = "2018" rust-version = "1.65.0" -license = "MIT/Apache-2.0" +license = "MIT OR Apache-2.0" [dependencies] neqo-common = { path = "./../neqo-common" } diff --git a/neqo-server/Cargo.toml b/neqo-server/Cargo.toml index 9e823787b4..cd79a436ed 100644 --- a/neqo-server/Cargo.toml +++ b/neqo-server/Cargo.toml @@ -4,7 +4,7 @@ version = "0.6.5" authors = ["Dragana Damjanovic "] edition = "2018" rust-version = "1.65.0" -license = "MIT/Apache-2.0" +license = "MIT OR Apache-2.0" [dependencies] neqo-crypto = { path = "./../neqo-crypto" } diff --git a/neqo-transport/Cargo.toml b/neqo-transport/Cargo.toml index 32953a3fe6..38a6404580 100644 --- a/neqo-transport/Cargo.toml +++ b/neqo-transport/Cargo.toml @@ -4,7 +4,7 @@ version = "0.6.5" authors = ["EKR ", "Andy Grover "] edition = "2018" rust-version = "1.65.0" -license = "MIT/Apache-2.0" +license = "MIT OR Apache-2.0" [dependencies] neqo-crypto = { path = "../neqo-crypto" } diff --git a/test-fixture/Cargo.toml b/test-fixture/Cargo.toml index 5f14bb1d66..87b3b2a074 100644 --- a/test-fixture/Cargo.toml +++ b/test-fixture/Cargo.toml @@ -4,7 +4,7 @@ version = "0.6.5" authors = ["Martin Thomson "] edition = "2018" rust-version = "1.65.0" -license = "MIT/Apache-2.0" +license = "MIT OR Apache-2.0" [dependencies] neqo-common = { path = "../neqo-common" } From 2f99de40254e6ba0bf3acb6cf6751653f08c6b72 Mon Sep 17 00:00:00 2001 From: Martin Thomson Date: Wed, 30 Aug 2023 23:38:48 +1000 Subject: [PATCH 005/321] General maintenance (#1459) * Stop using internal log crate macros * Pin to the last version of time that works with 1.65 * One version only * Fix clippy errors * More lint refinement * Fix Header::new clippy complains about this not taking a reference. But we want it to take a value, because there are cases for taking a `str` reference and cases for taking a value. So, switch to `Into`. --- neqo-common/Cargo.toml | 2 +- neqo-common/src/codec.rs | 5 ++- neqo-common/src/header.rs | 11 +++-- neqo-common/src/incrdecoder.rs | 3 +- neqo-common/src/log.rs | 2 +- neqo-common/src/timer.rs | 7 ++- neqo-crypto/src/agent.rs | 2 + neqo-crypto/src/hp.rs | 4 +- neqo-crypto/src/lib.rs | 2 + neqo-crypto/src/p11.rs | 5 ++- neqo-crypto/tests/agent.rs | 2 +- neqo-http3/src/client_events.rs | 2 +- neqo-http3/src/connection_client.rs | 6 ++- neqo-http3/src/control_stream_local.rs | 4 +- neqo-http3/src/frames/wtframe.rs | 4 +- neqo-qpack/src/encoder.rs | 2 +- neqo-qpack/src/header_block.rs | 45 ++++++++++--------- neqo-qpack/src/lib.rs | 2 +- neqo-qpack/src/reader.rs | 25 ++++------- neqo-qpack/src/table.rs | 2 +- neqo-server/src/main.rs | 2 +- .../src/connection/tests/handshake.rs | 13 +++--- neqo-transport/src/recv_stream.rs | 3 +- neqo-transport/src/send_stream.rs | 4 +- test-fixture/src/lib.rs | 5 +++ 25 files changed, 95 insertions(+), 69 deletions(-) diff --git a/neqo-common/Cargo.toml b/neqo-common/Cargo.toml index 3ab37074a4..c7d9c150ff 100644 --- a/neqo-common/Cargo.toml +++ b/neqo-common/Cargo.toml @@ -12,7 +12,7 @@ log = {version = "0.4.0", default-features = false} env_logger = {version = "0.10", default-features = false} lazy_static = "1.3.0" qlog = "0.4.0" -time = {version = "0.3", features = ["formatting"]} +time = {version = "=0.3.23", features = ["formatting"]} [features] default = ["deny-warnings"] diff --git a/neqo-common/src/codec.rs b/neqo-common/src/codec.rs index 6c8f3bd5a3..5eb23666fd 100644 --- a/neqo-common/src/codec.rs +++ b/neqo-common/src/codec.rs @@ -107,9 +107,10 @@ impl<'a> Decoder<'a> { } /// Decodes a QUIC varint. - #[allow(clippy::missing_panics_doc)] // See https://github.com/rust-lang/rust-clippy/issues/6699 pub fn decode_varint(&mut self) -> Option { - let Some(b1) = self.decode_byte() else { return None }; + let Some(b1) = self.decode_byte() else { + return None; + }; match b1 >> 6 { 0 => Some(u64::from(b1 & 0x3f)), 1 => Some((u64::from(b1 & 0x3f) << 8) | self.decode_uint(1)?), diff --git a/neqo-common/src/header.rs b/neqo-common/src/header.rs index 101c8ef91c..112fcf0057 100644 --- a/neqo-common/src/header.rs +++ b/neqo-common/src/header.rs @@ -11,11 +11,14 @@ pub struct Header { } impl Header { - #[allow(clippy::needless_pass_by_value)] - pub fn new(name: impl ToString, value: impl ToString) -> Self { + pub fn new(name: N, value: V) -> Self + where + N: Into + ?Sized, + V: Into + ?Sized, + { Self { - name: name.to_string(), - value: value.to_string(), + name: name.into(), + value: value.into(), } } diff --git a/neqo-common/src/incrdecoder.rs b/neqo-common/src/incrdecoder.rs index 351de240f0..e78a90f786 100644 --- a/neqo-common/src/incrdecoder.rs +++ b/neqo-common/src/incrdecoder.rs @@ -21,7 +21,8 @@ impl IncrementalDecoderUint { } /// Consume some data. - #[allow(clippy::missing_panics_doc)] // See https://github.com/rust-lang/rust-clippy/issues/6699 + /// # Panics + /// Never, but this is not something the compiler can tell. pub fn consume(&mut self, dv: &mut Decoder) -> Option { if let Some(r) = &mut self.remaining { let amount = min(*r, dv.remaining()); diff --git a/neqo-common/src/log.rs b/neqo-common/src/log.rs index 6cd22b4901..e376765523 100644 --- a/neqo-common/src/log.rs +++ b/neqo-common/src/log.rs @@ -29,7 +29,7 @@ macro_rules! do_log { ); } }); - ($lvl:expr, $($arg:tt)+) => ($crate::do_log!(target: ::log::__log_module_path!(), $lvl, $($arg)+)) + ($lvl:expr, $($arg:tt)+) => ($crate::do_log!(target: module_path!(), $lvl, $($arg)+)) } #[macro_export] diff --git a/neqo-common/src/timer.rs b/neqo-common/src/timer.rs index 83836ad773..24cb0abdbc 100644 --- a/neqo-common/src/timer.rs +++ b/neqo-common/src/timer.rs @@ -75,7 +75,7 @@ impl Timer { #[inline] #[allow(clippy::cast_possible_truncation)] // guarded by assertion fn delta(&self, time: Instant) -> usize { - // This really should use Instant::div_duration(), but it can't yet. + // This really should use Duration::div_duration_f??(), but it can't yet. ((time - self.now).as_nanos() / self.granularity.as_nanos()) as usize } @@ -151,7 +151,10 @@ impl Timer { return None; } let bucket = self.time_bucket(time); - let Ok(start_index) = self.items[bucket].binary_search_by_key(&time, TimerItem::time) else { return None }; + let Ok(start_index) = self.items[bucket].binary_search_by_key(&time, TimerItem::time) + else { + return None; + }; // start_index is just one of potentially many items with the same time. // Search backwards for a match, ... for i in (0..=start_index).rev() { diff --git a/neqo-crypto/src/agent.rs b/neqo-crypto/src/agent.rs index e02788fbdb..b3ddb0affc 100644 --- a/neqo-crypto/src/agent.rs +++ b/neqo-crypto/src/agent.rs @@ -716,6 +716,8 @@ impl SecretAgent { Ok(*Pin::into_inner(records)) } + /// # Panics + /// If setup fails. #[allow(unknown_lints, clippy::branches_sharing_code)] pub fn close(&mut self) { // It should be safe to close multiple times. diff --git a/neqo-crypto/src/hp.rs b/neqo-crypto/src/hp.rs index ee2f77cea0..fea67e9953 100644 --- a/neqo-crypto/src/hp.rs +++ b/neqo-crypto/src/hp.rs @@ -164,9 +164,9 @@ impl HpKey { Self::Chacha(key) => { let params: CK_CHACHA20_PARAMS = CK_CHACHA20_PARAMS { - pBlockCounter: sample.as_ptr() as *mut u8, + pBlockCounter: sample.as_ptr().cast_mut(), blockCounterBits: 32, - pNonce: sample[4..Self::SAMPLE_SIZE].as_ptr() as *mut _, + pNonce: sample[4..Self::SAMPLE_SIZE].as_ptr().cast_mut(), ulNonceBits: 96, }; let mut output_len: c_uint = 0; diff --git a/neqo-crypto/src/lib.rs b/neqo-crypto/src/lib.rs index 4a5673107e..332e58a033 100644 --- a/neqo-crypto/src/lib.rs +++ b/neqo-crypto/src/lib.rs @@ -120,6 +120,8 @@ fn version_check() { } /// Initialize NSS. This only executes the initialization routines once, so if there is any chance that +/// # Panics +/// When NSS initialization fails. pub fn init() { // Set time zero. time::init(); diff --git a/neqo-crypto/src/p11.rs b/neqo-crypto/src/p11.rs index 3f60577369..c7e47cbf15 100644 --- a/neqo-crypto/src/p11.rs +++ b/neqo-crypto/src/p11.rs @@ -237,7 +237,7 @@ impl Item { pub fn wrap(buf: &[u8]) -> SECItem { SECItem { type_: SECItemType::siBuffer, - data: buf.as_ptr() as *mut u8, + data: buf.as_ptr().cast_mut(), len: c_uint::try_from(buf.len()).unwrap(), } } @@ -247,9 +247,10 @@ impl Item { /// Minimally, it can only be passed as a `const SECItem*` argument to functions, /// or those that treat their argument as `const`. pub fn wrap_struct(v: &T) -> SECItem { + let data: *const T = v; SECItem { type_: SECItemType::siBuffer, - data: (v as *const T as *mut T).cast(), + data: data.cast_mut().cast(), len: c_uint::try_from(mem::size_of::()).unwrap(), } } diff --git a/neqo-crypto/tests/agent.rs b/neqo-crypto/tests/agent.rs index d487062d51..82e105fd1a 100644 --- a/neqo-crypto/tests/agent.rs +++ b/neqo-crypto/tests/agent.rs @@ -439,7 +439,7 @@ fn ech_retry() { HandshakeState::EchFallbackAuthenticationPending(String::from(PUBLIC_NAME)) ); client.authenticated(AuthenticationStatus::Ok); - let Err(Error::EchRetry(updated_config)) = client.handshake_raw(now(), None) else { + let Err(Error::EchRetry(updated_config)) = client.handshake_raw(now(), None) else { panic!( "Handshake should fail with EchRetry, state is instead {:?}", client.state() diff --git a/neqo-http3/src/client_events.rs b/neqo-http3/src/client_events.rs index b4fdde8e13..f21ec5929e 100644 --- a/neqo-http3/src/client_events.rs +++ b/neqo-http3/src/client_events.rs @@ -338,7 +338,7 @@ impl Http3ClientEvents { } pub fn has_push(&self, push_id: u64) -> bool { - for iter in self.events.borrow().iter() { + for iter in &*self.events.borrow() { if matches!(iter, Http3ClientEvent::PushPromise{push_id:x, ..} if *x == push_id) { return true; } diff --git a/neqo-http3/src/connection_client.rs b/neqo-http3/src/connection_client.rs index 8d0d78922a..17ab0a9857 100644 --- a/neqo-http3/src/connection_client.rs +++ b/neqo-http3/src/connection_client.rs @@ -399,6 +399,8 @@ impl Http3Client { /// Get the connection id, which is useful for disambiguating connections to /// the same origin. + /// # Panics + /// Never, because clients always have this field. #[must_use] pub fn connection_id(&self) -> &ConnectionId { self.conn.odcid().expect("Client always has odcid") @@ -445,7 +447,9 @@ impl Http3Client { return Err(Error::InvalidState); } let mut dec = Decoder::from(token.as_ref()); - let Some(settings_slice) = dec.decode_vvec() else { return Err(Error::InvalidResumptionToken) }; + let Some(settings_slice) = dec.decode_vvec() else { + return Err(Error::InvalidResumptionToken); + }; qtrace!([self], " settings {}", hex_with_len(settings_slice)); let mut dec_settings = Decoder::from(settings_slice); let mut settings = HSettings::default(); diff --git a/neqo-http3/src/control_stream_local.rs b/neqo-http3/src/control_stream_local.rs index a1842476e6..e6d63c3502 100644 --- a/neqo-http3/src/control_stream_local.rs +++ b/neqo-http3/src/control_stream_local.rs @@ -63,7 +63,9 @@ impl ControlStreamLocal { ) -> Res<()> { // send all necessary priority updates while let Some(update_id) = self.outstanding_priority_update.pop_front() { - let Some(update_stream) = recv_conn.get_mut(&update_id) else { continue }; + let Some(update_stream) = recv_conn.get_mut(&update_id) else { + continue; + }; // can assert and unwrap here, because priority updates can only be added to // HttpStreams in [Http3Connection::queue_update_priority} diff --git a/neqo-http3/src/frames/wtframe.rs b/neqo-http3/src/frames/wtframe.rs index 091c3fabe6..b5f76161c5 100644 --- a/neqo-http3/src/frames/wtframe.rs +++ b/neqo-http3/src/frames/wtframe.rs @@ -42,7 +42,9 @@ impl FrameDecoder for WebTransportFrame { } let error = u32::try_from(dec.decode_uint(4).ok_or(Error::HttpMessageError)?).unwrap(); - let Ok(message) = String::from_utf8(dec.decode_remainder().to_vec()) else { return Err(Error::HttpMessageError) }; + let Ok(message) = String::from_utf8(dec.decode_remainder().to_vec()) else { + return Err(Error::HttpMessageError); + }; Ok(Some(WebTransportFrame::CloseSession { error, message })) } else { Ok(None) diff --git a/neqo-qpack/src/encoder.rs b/neqo-qpack/src/encoder.rs index a5ebd01666..90d4b65709 100644 --- a/neqo-qpack/src/encoder.rs +++ b/neqo-qpack/src/encoder.rs @@ -376,7 +376,7 @@ impl QPackEncoder { let mut ref_entries = HashSet::new(); - for iter in h.iter() { + for iter in h { let name = iter.name().as_bytes().to_vec(); let value = iter.value().as_bytes().to_vec(); qtrace!("encoding {:x?} {:x?}.", name, value); diff --git a/neqo-qpack/src/header_block.rs b/neqo-qpack/src/header_block.rs index 38f8738df9..3b37db120e 100644 --- a/neqo-qpack/src/header_block.rs +++ b/neqo-qpack/src/header_block.rs @@ -4,19 +4,24 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use crate::prefix::{ - BASE_PREFIX_NEGATIVE, BASE_PREFIX_POSITIVE, HEADER_FIELD_INDEX_DYNAMIC, - HEADER_FIELD_INDEX_DYNAMIC_POST, HEADER_FIELD_INDEX_STATIC, HEADER_FIELD_LITERAL_NAME_LITERAL, - HEADER_FIELD_LITERAL_NAME_REF_DYNAMIC, HEADER_FIELD_LITERAL_NAME_REF_DYNAMIC_POST, - HEADER_FIELD_LITERAL_NAME_REF_STATIC, NO_PREFIX, +use crate::{ + prefix::{ + BASE_PREFIX_NEGATIVE, BASE_PREFIX_POSITIVE, HEADER_FIELD_INDEX_DYNAMIC, + HEADER_FIELD_INDEX_DYNAMIC_POST, HEADER_FIELD_INDEX_STATIC, + HEADER_FIELD_LITERAL_NAME_LITERAL, HEADER_FIELD_LITERAL_NAME_REF_DYNAMIC, + HEADER_FIELD_LITERAL_NAME_REF_DYNAMIC_POST, HEADER_FIELD_LITERAL_NAME_REF_STATIC, + NO_PREFIX, + }, + qpack_send_buf::QpackData, + reader::{parse_utf8, ReceiverBufferWrapper}, + table::HeaderTable, + Error, Res, }; -use crate::qpack_send_buf::QpackData; -use crate::reader::{to_string, ReceiverBufferWrapper}; -use crate::table::HeaderTable; -use crate::{Error, Res}; use neqo_common::{qtrace, Header}; -use std::mem; -use std::ops::{Deref, Div}; +use std::{ + mem, + ops::{Deref, Div}, +}; #[derive(Default, Debug, PartialEq)] pub struct HeaderEncoder { @@ -331,8 +336,8 @@ impl<'a> HeaderDecoder<'a> { qtrace!([self], "decoder static indexed {}.", index); let entry = HeaderTable::get_static(index)?; Ok(Header::new( - to_string(entry.name())?, - to_string(entry.value())?, + parse_utf8(entry.name())?, + parse_utf8(entry.value())?, )) } @@ -343,8 +348,8 @@ impl<'a> HeaderDecoder<'a> { qtrace!([self], "decoder dynamic indexed {}.", index); let entry = table.get_dynamic(index, self.base, false)?; Ok(Header::new( - to_string(entry.name())?, - to_string(entry.value())?, + parse_utf8(entry.name())?, + parse_utf8(entry.value())?, )) } @@ -355,8 +360,8 @@ impl<'a> HeaderDecoder<'a> { qtrace!([self], "decode post-based {}.", index); let entry = table.get_dynamic(index, self.base, true)?; Ok(Header::new( - to_string(entry.name())?, - to_string(entry.value())?, + parse_utf8(entry.name())?, + parse_utf8(entry.value())?, )) } @@ -371,7 +376,7 @@ impl<'a> HeaderDecoder<'a> { .read_prefixed_int(HEADER_FIELD_LITERAL_NAME_REF_STATIC.len())?; Ok(Header::new( - to_string(HeaderTable::get_static(index)?.name())?, + parse_utf8(HeaderTable::get_static(index)?.name())?, self.buf.read_literal_from_buffer(0)?, )) } @@ -387,7 +392,7 @@ impl<'a> HeaderDecoder<'a> { .read_prefixed_int(HEADER_FIELD_LITERAL_NAME_REF_DYNAMIC.len())?; Ok(Header::new( - to_string(table.get_dynamic(index, self.base, false)?.name())?, + parse_utf8(table.get_dynamic(index, self.base, false)?.name())?, self.buf.read_literal_from_buffer(0)?, )) } @@ -400,7 +405,7 @@ impl<'a> HeaderDecoder<'a> { .read_prefixed_int(HEADER_FIELD_LITERAL_NAME_REF_DYNAMIC_POST.len())?; Ok(Header::new( - to_string(table.get_dynamic(index, self.base, true)?.name())?, + parse_utf8(table.get_dynamic(index, self.base, true)?.name())?, self.buf.read_literal_from_buffer(0)?, )) } diff --git a/neqo-qpack/src/lib.rs b/neqo-qpack/src/lib.rs index c2d3bd8359..86ccb11ff8 100644 --- a/neqo-qpack/src/lib.rs +++ b/neqo-qpack/src/lib.rs @@ -50,7 +50,7 @@ pub enum Error { NeedMoreData, // Return when an input stream does not have more data that a decoder needs.(It does not mean that a stream is closed.) HeaderLookup, HuffmanDecompressionFailed, - ToStringFailed, + BadUtf8, ChangeCapacity, DynamicTableFull, IncrementAck, diff --git a/neqo-qpack/src/reader.rs b/neqo-qpack/src/reader.rs index 386a25ffc1..f47471005d 100644 --- a/neqo-qpack/src/reader.rs +++ b/neqo-qpack/src/reader.rs @@ -4,14 +4,10 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use crate::huffman::decode_huffman; -use crate::prefix::Prefix; -use crate::{Error, Res}; +use crate::{huffman::decode_huffman, prefix::Prefix, Error, Res}; use neqo_common::{qdebug, qerror}; use neqo_transport::{Connection, StreamId}; -use std::convert::TryInto; -use std::mem; -use std::str; +use std::{convert::TryInto, mem, str}; pub trait ReadByte { /// # Errors @@ -130,9 +126,9 @@ impl<'a> ReceiverBufferWrapper<'a> { .try_into() .or(Err(Error::DecompressionFailed))?; if use_huffman { - Ok(to_string(&decode_huffman(self.slice(length)?)?)?) + Ok(parse_utf8(&decode_huffman(self.slice(length)?)?)?.to_string()) } else { - Ok(to_string(self.slice(length)?)?) + Ok(parse_utf8(self.slice(length)?)?.to_string()) } } @@ -314,12 +310,9 @@ impl LiteralReader { /// This is a helper function used only by `ReceiverBufferWrapper`, therefore it returns /// `DecompressionFailed` if any error happens. /// # Errors -/// If an parsing error occurred, the function returns `ToStringFailed`. -pub fn to_string(v: &[u8]) -> Res { - match str::from_utf8(v) { - Ok(s) => Ok(s.to_string()), - Err(_) => Err(Error::ToStringFailed), - } +/// If an parsing error occurred, the function returns `BadUtf8`. +pub fn parse_utf8(v: &[u8]) -> Res<&str> { + str::from_utf8(v).map_err(|_| Error::BadUtf8) } #[cfg(test)] @@ -366,7 +359,7 @@ pub(crate) mod test_receiver { mod tests { use super::{ - str, test_receiver, to_string, Error, IntReader, LiteralReader, ReadByte, + parse_utf8, str, test_receiver, Error, IntReader, LiteralReader, ReadByte, ReceiverBufferWrapper, Res, }; use test_receiver::TestReceiver; @@ -531,7 +524,7 @@ mod tests { let mut test_receiver: TestReceiver = TestReceiver::default(); test_receiver.write(&buf[1..]); assert_eq!( - to_string(&reader.read(&mut test_receiver).unwrap()).unwrap(), + parse_utf8(&reader.read(&mut test_receiver).unwrap()).unwrap(), *value ); } diff --git a/neqo-qpack/src/table.rs b/neqo-qpack/src/table.rs index 8b2d70edce..cc9844ee27 100644 --- a/neqo-qpack/src/table.rs +++ b/neqo-qpack/src/table.rs @@ -197,7 +197,7 @@ impl HeaderTable { can_block ); let mut name_match = None; - for iter in HEADER_STATIC_TABLE.iter() { + for iter in HEADER_STATIC_TABLE { if iter.name() == name { if iter.value() == value { return Some(LookupResult { diff --git a/neqo-server/src/main.rs b/neqo-server/src/main.rs index 79c54adaa2..6ab0263f64 100644 --- a/neqo-server/src/main.rs +++ b/neqo-server/src/main.rs @@ -492,7 +492,7 @@ impl HttpServer for SimpleServer { stream .send_headers(&[ Header::new(":status", "200"), - Header::new("content-length", response.remaining), + Header::new("content-length", response.remaining.to_string()), ]) .unwrap(); response.send(&mut stream); diff --git a/neqo-transport/src/connection/tests/handshake.rs b/neqo-transport/src/connection/tests/handshake.rs index 76e0c0a898..5083ee7dcb 100644 --- a/neqo-transport/src/connection/tests/handshake.rs +++ b/neqo-transport/src/connection/tests/handshake.rs @@ -927,12 +927,13 @@ fn ech_retry() { Some(&ConnectionError::Transport(Error::PeerError(0x100 + 121))) ); - let Some(ConnectionError::Transport(Error::EchRetry(updated_config))) = client.state().error() else { - panic!( - "Client state should be failed with EchRetry, is {:?}", - client.state() - ); - }; + let Some(ConnectionError::Transport(Error::EchRetry(updated_config))) = client.state().error() + else { + panic!( + "Client state should be failed with EchRetry, is {:?}", + client.state() + ); + }; let mut server = default_server(); server diff --git a/neqo-transport/src/recv_stream.rs b/neqo-transport/src/recv_stream.rs index fbd2fad7bb..ff7b497a5a 100644 --- a/neqo-transport/src/recv_stream.rs +++ b/neqo-transport/src/recv_stream.rs @@ -991,6 +991,7 @@ mod tests { } #[test] + #[allow(unknown_lints, clippy::single_range_in_vec_init)] // Because that lint makes no sense here. fn recv_noncontiguous() { // Non-contiguous with the start, no data available. recv_ranges(&[10..20], 0); @@ -1109,7 +1110,7 @@ mod tests { s.inbound_frame(offset, &[0; EXTRA_SIZE]); // Read, providing only enough space for the first. - let mut buf = vec![0; 100]; + let mut buf = [0; 100]; let count = s.read(&mut buf[..CHUNK_SIZE]); assert_eq!(count, CHUNK_SIZE); let count = s.read(&mut buf[..]); diff --git a/neqo-transport/src/send_stream.rs b/neqo-transport/src/send_stream.rs index 4a2bf08002..21877ab23d 100644 --- a/neqo-transport/src/send_stream.rs +++ b/neqo-transport/src/send_stream.rs @@ -1807,7 +1807,7 @@ mod tests { // Mark all as sent. Get nothing txb.mark_as_sent(0, SEND_BUFFER_SIZE); - assert!(matches!(txb.next_bytes(), None)); + assert!(txb.next_bytes().is_none()); // Mark as lost. Get it again txb.mark_as_lost(one_byte_from_end, 1); @@ -1908,7 +1908,7 @@ mod tests { // No more bytes. txb.mark_as_sent(range_a_end, 60); - assert!(matches!(txb.next_bytes(), None)); + assert!(txb.next_bytes().is_none()); } #[test] diff --git a/test-fixture/src/lib.rs b/test-fixture/src/lib.rs index 8b1b5ef419..f0830415b4 100644 --- a/test-fixture/src/lib.rs +++ b/test-fixture/src/lib.rs @@ -124,6 +124,9 @@ impl ConnectionIdGenerator for CountingConnectionIdGenerator { } } +/// Create a new client. +/// # Panics +/// If this doesn't work. #[must_use] pub fn new_client(params: ConnectionParameters) -> Connection { fixture_init(); @@ -158,6 +161,8 @@ pub fn default_server_h3() -> Connection { } /// Create a transport server with a configuration. +/// # Panics +/// If this doesn't work. #[must_use] pub fn new_server(alpn: &[impl AsRef], params: ConnectionParameters) -> Connection { fixture_init(); From e5a413cc8101c7ee20f9656bb9787ab02e3b468f Mon Sep 17 00:00:00 2001 From: jesup Date: Mon, 18 Sep 2023 05:02:11 -0400 Subject: [PATCH 006/321] Make the external set_sendorder() take an Option (#1461) --- neqo-http3/src/connection_client.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/neqo-http3/src/connection_client.rs b/neqo-http3/src/connection_client.rs index 17ab0a9857..51cd8e2935 100644 --- a/neqo-http3/src/connection_client.rs +++ b/neqo-http3/src/connection_client.rs @@ -767,9 +767,9 @@ impl Http3Client { pub fn webtransport_set_sendorder( &mut self, stream_id: StreamId, - sendorder: SendOrder, + sendorder: Option, ) -> Res<()> { - Http3Connection::stream_set_sendorder(&mut self.conn, stream_id, Some(sendorder)) + Http3Connection::stream_set_sendorder(&mut self.conn, stream_id, sendorder) } /// Sets the `Fairness` for a given stream From 4a6fcf0e2b58d9e9b2e4f0bc72daece5b1529431 Mon Sep 17 00:00:00 2001 From: Martin Thomson Date: Fri, 22 Sep 2023 01:24:15 +0200 Subject: [PATCH 007/321] Add datagram frame stats to printout (#1462) --- neqo-transport/src/stats.rs | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/neqo-transport/src/stats.rs b/neqo-transport/src/stats.rs index dd8f8d4db5..9428b61949 100644 --- a/neqo-transport/src/stats.rs +++ b/neqo-transport/src/stats.rs @@ -9,11 +9,13 @@ use crate::packet::PacketNumber; use neqo_common::qinfo; -use std::cell::RefCell; -use std::fmt::{self, Debug}; -use std::ops::Deref; -use std::rc::Rc; -use std::time::Duration; +use std::{ + cell::RefCell, + fmt::{self, Debug}, + ops::Deref, + rc::Rc, + time::Duration, +}; pub(crate) const MAX_PTO_COUNTS: usize = 16; @@ -81,6 +83,7 @@ impl Debug for FrameStats { " blocked: stream {} data {} stream_data {}", self.streams_blocked, self.data_blocked, self.stream_data_blocked, )?; + writeln!(f, " datagram {}", self.datagram)?; writeln!( f, " ncid {} rcid {} pchallenge {} presponse {}", @@ -89,7 +92,7 @@ impl Debug for FrameStats { self.path_challenge, self.path_response, )?; - writeln!(f, " ack_frequency {} ", self.ack_frequency) + writeln!(f, " ack_frequency {}", self.ack_frequency) } } From fb3c3b571bc76887d1da4d1ca7c6713508cff0c1 Mon Sep 17 00:00:00 2001 From: Manuel Bucher Date: Fri, 29 Sep 2023 03:45:19 +0200 Subject: [PATCH 008/321] Fix terminology in cubic beta variable (#1464) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The variables were used correctly, so no actual bug was fixed. Dividend ÷ Divisor = Quotient 7 ÷ 10 = 0.7 https://en.wikipedia.org/w/index.php?title=Quotient&oldid=1175290601#Notation --- neqo-transport/src/cc/classic_cc.rs | 4 ++-- neqo-transport/src/cc/cubic.rs | 6 +++--- neqo-transport/src/cc/tests/cubic.rs | 6 +++--- 3 files changed, 8 insertions(+), 8 deletions(-) diff --git a/neqo-transport/src/cc/classic_cc.rs b/neqo-transport/src/cc/classic_cc.rs index fbaa5e1227..bdfafbec96 100644 --- a/neqo-transport/src/cc/classic_cc.rs +++ b/neqo-transport/src/cc/classic_cc.rs @@ -494,7 +494,7 @@ mod tests { use super::{ ClassicCongestionControl, WindowAdjustment, CWND_INITIAL, CWND_MIN, PERSISTENT_CONG_THRESH, }; - use crate::cc::cubic::{Cubic, CUBIC_BETA_USIZE_DIVISOR, CUBIC_BETA_USIZE_QUOTIENT}; + use crate::cc::cubic::{Cubic, CUBIC_BETA_USIZE_DIVIDEND, CUBIC_BETA_USIZE_DIVISOR}; use crate::cc::new_reno::NewReno; use crate::cc::{ CongestionControl, CongestionControlAlgorithm, CWND_INITIAL_PKTS, MAX_DATAGRAM_SIZE, @@ -580,7 +580,7 @@ mod tests { ); persistent_congestion_by_algorithm( CongestionControlAlgorithm::Cubic, - CWND_INITIAL * CUBIC_BETA_USIZE_QUOTIENT / CUBIC_BETA_USIZE_DIVISOR, + CWND_INITIAL * CUBIC_BETA_USIZE_DIVIDEND / CUBIC_BETA_USIZE_DIVISOR, lost_packets, persistent_expected, ); diff --git a/neqo-transport/src/cc/cubic.rs b/neqo-transport/src/cc/cubic.rs index ab3fedb74f..a7d7b845fd 100644 --- a/neqo-transport/src/cc/cubic.rs +++ b/neqo-transport/src/cc/cubic.rs @@ -21,7 +21,7 @@ pub const CUBIC_C: f64 = 0.4; pub const CUBIC_ALPHA: f64 = 3.0 * (1.0 - 0.7) / (1.0 + 0.7); // CUBIC_BETA = 0.7; -pub const CUBIC_BETA_USIZE_QUOTIENT: usize = 7; +pub const CUBIC_BETA_USIZE_DIVIDEND: usize = 7; pub const CUBIC_BETA_USIZE_DIVISOR: usize = 10; /// The fast convergence ratio further reduces the congestion window when a congestion event @@ -188,8 +188,8 @@ impl WindowAdjustment for Cubic { }; self.ca_epoch_start = None; ( - curr_cwnd * CUBIC_BETA_USIZE_QUOTIENT / CUBIC_BETA_USIZE_DIVISOR, - acked_bytes * CUBIC_BETA_USIZE_QUOTIENT / CUBIC_BETA_USIZE_DIVISOR, + curr_cwnd * CUBIC_BETA_USIZE_DIVIDEND / CUBIC_BETA_USIZE_DIVISOR, + acked_bytes * CUBIC_BETA_USIZE_DIVIDEND / CUBIC_BETA_USIZE_DIVISOR, ) } diff --git a/neqo-transport/src/cc/tests/cubic.rs b/neqo-transport/src/cc/tests/cubic.rs index a737f90f7c..d93643583d 100644 --- a/neqo-transport/src/cc/tests/cubic.rs +++ b/neqo-transport/src/cc/tests/cubic.rs @@ -11,7 +11,7 @@ use crate::{ cc::{ classic_cc::{ClassicCongestionControl, CWND_INITIAL}, cubic::{ - Cubic, CUBIC_ALPHA, CUBIC_BETA_USIZE_DIVISOR, CUBIC_BETA_USIZE_QUOTIENT, CUBIC_C, + Cubic, CUBIC_ALPHA, CUBIC_BETA_USIZE_DIVIDEND, CUBIC_BETA_USIZE_DIVISOR, CUBIC_C, CUBIC_FAST_CONVERGENCE, }, CongestionControl, MAX_DATAGRAM_SIZE, MAX_DATAGRAM_SIZE_F64, @@ -30,9 +30,9 @@ const RTT: Duration = Duration::from_millis(100); const CWND_INITIAL_F64: f64 = 10.0 * MAX_DATAGRAM_SIZE_F64; const CWND_INITIAL_10_F64: f64 = 10.0 * CWND_INITIAL_F64; const CWND_INITIAL_10: usize = 10 * CWND_INITIAL; -const CWND_AFTER_LOSS: usize = CWND_INITIAL * CUBIC_BETA_USIZE_QUOTIENT / CUBIC_BETA_USIZE_DIVISOR; +const CWND_AFTER_LOSS: usize = CWND_INITIAL * CUBIC_BETA_USIZE_DIVIDEND / CUBIC_BETA_USIZE_DIVISOR; const CWND_AFTER_LOSS_SLOW_START: usize = - (CWND_INITIAL + MAX_DATAGRAM_SIZE) * CUBIC_BETA_USIZE_QUOTIENT / CUBIC_BETA_USIZE_DIVISOR; + (CWND_INITIAL + MAX_DATAGRAM_SIZE) * CUBIC_BETA_USIZE_DIVIDEND / CUBIC_BETA_USIZE_DIVISOR; fn fill_cwnd(cc: &mut ClassicCongestionControl, mut next_pn: u64, now: Instant) -> u64 { while cc.bytes_in_flight() < cc.cwnd() { From 9e6065f944ff5ffb4499700e949a5aec9d3044ff Mon Sep 17 00:00:00 2001 From: Manuel Bucher Date: Fri, 13 Oct 2023 00:54:39 +0200 Subject: [PATCH 009/321] Fix clippy lints for clippy 0.1.73 (cc66ad4 2023-10-03) (#1466) --- neqo-common/src/codec.rs | 33 +++++++++---------- neqo-crypto/src/aead.rs | 4 +-- neqo-crypto/src/agent.rs | 4 +-- neqo-http3/src/server.rs | 2 +- neqo-qpack/src/encoder.rs | 2 +- neqo-transport/src/connection/tests/stream.rs | 2 +- 6 files changed, 23 insertions(+), 24 deletions(-) diff --git a/neqo-common/src/codec.rs b/neqo-common/src/codec.rs index 5eb23666fd..2df11b3c64 100644 --- a/neqo-common/src/codec.rs +++ b/neqo-common/src/codec.rs @@ -128,8 +128,7 @@ impl<'a> Decoder<'a> { } fn decode_checked(&mut self, n: Option) -> Option<&'a [u8]> { - let Some(len) = n else { return None }; - if let Ok(l) = usize::try_from(len) { + if let Ok(l) = usize::try_from(n?) { self.decode(l) } else { // sizeof(usize) < sizeof(u64) and the value is greater than @@ -204,11 +203,11 @@ impl Encoder { #[must_use] pub const fn varint_len(v: u64) -> usize { match () { - _ if v < (1 << 6) => 1, - _ if v < (1 << 14) => 2, - _ if v < (1 << 30) => 4, - _ if v < (1 << 62) => 8, - _ => panic!("Varint value too large"), + () if v < (1 << 6) => 1, + () if v < (1 << 14) => 2, + () if v < (1 << 30) => 4, + () if v < (1 << 62) => 8, + () => panic!("Varint value too large"), } } @@ -310,11 +309,11 @@ impl Encoder { pub fn encode_varint>(&mut self, v: T) -> &mut Self { let v = v.into(); match () { - _ if v < (1 << 6) => self.encode_uint(1, v), - _ if v < (1 << 14) => self.encode_uint(2, v | (1 << 14)), - _ if v < (1 << 30) => self.encode_uint(4, v | (2 << 30)), - _ if v < (1 << 62) => self.encode_uint(8, v | (3 << 62)), - _ => panic!("Varint value too large"), + () if v < (1 << 6) => self.encode_uint(1, v), + () if v < (1 << 14) => self.encode_uint(2, v | (1 << 14)), + () if v < (1 << 30) => self.encode_uint(4, v | (2 << 30)), + () if v < (1 << 62) => self.encode_uint(8, v | (3 << 62)), + () => panic!("Varint value too large"), }; self } @@ -379,11 +378,11 @@ impl Encoder { self.buf[start] = (v & 0xff) as u8; let (count, bits) = match () { // Great. The byte we have is enough. - _ if v < (1 << 6) => return self, - _ if v < (1 << 14) => (1, 1 << 6), - _ if v < (1 << 30) => (3, 2 << 22), - _ if v < (1 << 62) => (7, 3 << 54), - _ => panic!("Varint value too large"), + () if v < (1 << 6) => return self, + () if v < (1 << 14) => (1, 1 << 6), + () if v < (1 << 30) => (3, 2 << 22), + () if v < (1 << 62) => (7, 3 << 54), + () => panic!("Varint value too large"), }; // Now, we need to encode the high bits after the main block, ... self.encode_uint(count, (v >> 8) | bits); diff --git a/neqo-crypto/src/aead.rs b/neqo-crypto/src/aead.rs index 8bb84c856d..41cdf66469 100644 --- a/neqo-crypto/src/aead.rs +++ b/neqo-crypto/src/aead.rs @@ -118,7 +118,7 @@ impl RealAead { let mut l: c_uint = 0; unsafe { SSL_AeadEncrypt( - *self.ctx.deref(), + *self.ctx, count, aad.as_ptr(), c_uint::try_from(aad.len())?, @@ -150,7 +150,7 @@ impl RealAead { let mut l: c_uint = 0; unsafe { SSL_AeadDecrypt( - *self.ctx.deref(), + *self.ctx, count, aad.as_ptr(), c_uint::try_from(aad.len())?, diff --git a/neqo-crypto/src/agent.rs b/neqo-crypto/src/agent.rs index b3ddb0affc..d8db5c7a89 100644 --- a/neqo-crypto/src/agent.rs +++ b/neqo-crypto/src/agent.rs @@ -1037,12 +1037,12 @@ impl Server { let Ok(cert) = p11::Certificate::from_ptr(cert_ptr) else { return Err(Error::CertificateLoading); }; - let key_ptr = unsafe { p11::PK11_FindKeyByAnyCert(*cert.deref(), null_mut()) }; + let key_ptr = unsafe { p11::PK11_FindKeyByAnyCert(*cert, null_mut()) }; let Ok(key) = p11::PrivateKey::from_ptr(key_ptr) else { return Err(Error::CertificateLoading); }; secstatus_to_res(unsafe { - ssl::SSL_ConfigServerCert(agent.fd, *cert.deref(), *key.deref(), null(), 0) + ssl::SSL_ConfigServerCert(agent.fd, *cert, *key, null(), 0) })?; } diff --git a/neqo-http3/src/server.rs b/neqo-http3/src/server.rs index 0ee1301564..e4c1c707bb 100644 --- a/neqo-http3/src/server.rs +++ b/neqo-http3/src/server.rs @@ -743,7 +743,7 @@ mod tests { assert_closed(&mut hconn, &Error::HttpStreamCreation); } - //// Test reading of a slowly streamed frame. bytes are received one by one + /// Test reading of a slowly streamed frame. bytes are received one by one #[test] fn test_server_frame_reading() { let (mut hconn, mut peer_conn) = connect_and_receive_settings(); diff --git a/neqo-qpack/src/encoder.rs b/neqo-qpack/src/encoder.rs index 90d4b65709..211a41fc12 100644 --- a/neqo-qpack/src/encoder.rs +++ b/neqo-qpack/src/encoder.rs @@ -450,7 +450,7 @@ impl QPackEncoder { if !ref_entries.is_empty() { self.unacked_header_blocks .entry(stream_id) - .or_insert_with(VecDeque::new) + .or_default() .push_front(ref_entries); self.stats.dynamic_table_references += 1; } diff --git a/neqo-transport/src/connection/tests/stream.rs b/neqo-transport/src/connection/tests/stream.rs index 036a3adff9..ba73a54e36 100644 --- a/neqo-transport/src/connection/tests/stream.rs +++ b/neqo-transport/src/connection/tests/stream.rs @@ -1140,7 +1140,7 @@ fn connect_w_different_limit(bidi_limit: u64, unidi_limit: u64) { unidi_events += 1; } } - ConnectionEvent::StateChange(state) if state == State::Connected => { + ConnectionEvent::StateChange(State::Connected) => { connected_events += 1; } _ => {} From ac11aeb6cb23628f9633fab99fd381c8ea6b114e Mon Sep 17 00:00:00 2001 From: Martin Thomson Date: Fri, 13 Oct 2023 03:18:40 +0200 Subject: [PATCH 010/321] Preferred address types (#1458) * Tighten types for preferred addresses * Constrain the types for preferred addresses This won't help with the crash we're seeing in bug 1847415, but it should at least keep things tidy. * Clippy --- neqo-http3/src/send_message.rs | 2 +- neqo-server/src/main.rs | 12 ++ neqo-transport/src/connection/mod.rs | 11 +- neqo-transport/src/connection/params.rs | 2 +- .../src/connection/tests/migration.rs | 37 ++--- neqo-transport/src/tparams.rs | 142 ++++++------------ 6 files changed, 85 insertions(+), 121 deletions(-) diff --git a/neqo-http3/src/send_message.rs b/neqo-http3/src/send_message.rs index aaf2e224d2..531f804937 100644 --- a/neqo-http3/src/send_message.rs +++ b/neqo-http3/src/send_message.rs @@ -310,7 +310,7 @@ impl SendStream for SendMessage { data_frame.encode(&mut enc); self.stream.buffer(enc.as_ref()); self.stream.buffer(buf); - let _ = self.stream.send_buffer(conn)?; + _ = self.stream.send_buffer(conn)?; Ok(()) } } diff --git a/neqo-server/src/main.rs b/neqo-server/src/main.rs index 6ab0263f64..574d1f8ae1 100644 --- a/neqo-server/src/main.rs +++ b/neqo-server/src/main.rs @@ -284,6 +284,18 @@ impl QuicParameters { if v4.is_none() && v6.is_none() { None } else { + let v4 = v4.map(|v4| { + let SocketAddr::V4(v4) = v4 else { + unreachable!(); + }; + v4 + }); + let v6 = v6.map(|v6| { + let SocketAddr::V6(v6) = v6 else { + unreachable!(); + }; + v6 + }); Some(PreferredAddress::new(v4, v6)) } } diff --git a/neqo-transport/src/connection/mod.rs b/neqo-transport/src/connection/mod.rs index c04591932a..a9fd93c784 100644 --- a/neqo-transport/src/connection/mod.rs +++ b/neqo-transport/src/connection/mod.rs @@ -12,11 +12,6 @@ use crate::{ ConnectionId, ConnectionIdEntry, ConnectionIdGenerator, ConnectionIdManager, ConnectionIdRef, ConnectionIdStore, LOCAL_ACTIVE_CID_LIMIT, }, -}; - -use crate::recv_stream::RecvStreamStats; -pub use crate::send_stream::{RetransmissionPriority, SendStreamStats, TransmissionPriority}; -use crate::{ crypto::{Crypto, CryptoDxState, CryptoSpace}, dump::*, events::{ConnectionEvent, ConnectionEvents, OutgoingDatagramOutcome}, @@ -29,6 +24,7 @@ use crate::{ qlog, quic_datagrams::{DatagramTracking, QuicDatagrams}, recovery::{LossRecovery, RecoveryToken, SendProfile}, + recv_stream::RecvStreamStats, rtt::GRANULARITY, stats::{Stats, StatsCell}, stream_id::StreamType, @@ -70,6 +66,7 @@ mod state; #[cfg(test)] pub mod test_internal; +pub use crate::send_stream::{RetransmissionPriority, SendStreamStats, TransmissionPriority}; pub use params::{ConnectionParameters, ACK_RATIO_SCALE}; pub use state::{ClosingFrame, State}; @@ -1705,8 +1702,8 @@ impl Connection { // be needed to work out how to get addresses from a different family. let prev = self.paths.primary().borrow().remote_address(); let remote = match prev.ip() { - IpAddr::V4(_) => addr.ipv4(), - IpAddr::V6(_) => addr.ipv6(), + IpAddr::V4(_) => addr.ipv4().map(SocketAddr::V4), + IpAddr::V6(_) => addr.ipv6().map(SocketAddr::V6), }; if let Some(remote) = remote { diff --git a/neqo-transport/src/connection/params.rs b/neqo-transport/src/connection/params.rs index e6617b5adc..9d0db0f45d 100644 --- a/neqo-transport/src/connection/params.rs +++ b/neqo-transport/src/connection/params.rs @@ -30,7 +30,7 @@ const DEFAULT_IDLE_TIMEOUT: Duration = Duration::from_secs(30); const MAX_QUEUED_DATAGRAMS_DEFAULT: usize = 10; /// What to do with preferred addresses. -#[derive(Debug, Clone, Copy)] +#[derive(Debug, Clone)] pub enum PreferredAddressConfig { /// Disabled, whether for client or server. Disabled, diff --git a/neqo-transport/src/connection/tests/migration.rs b/neqo-transport/src/connection/tests/migration.rs index 9e6a2ba90b..dd35412ec2 100644 --- a/neqo-transport/src/connection/tests/migration.rs +++ b/neqo-transport/src/connection/tests/migration.rs @@ -4,26 +4,28 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use super::super::{Connection, Output, State, StreamType}; use super::{ + super::{Connection, Output, State, StreamType}, connect_fail, connect_force_idle, connect_rtt_idle, default_client, default_server, maybe_authenticate, new_client, new_server, send_something, CountingConnectionIdGenerator, }; -use crate::cid::LOCAL_ACTIVE_CID_LIMIT; -use crate::frame::FRAME_TYPE_NEW_CONNECTION_ID; -use crate::packet::PacketBuilder; -use crate::path::{PATH_MTU_V4, PATH_MTU_V6}; -use crate::tparams::{self, PreferredAddress, TransportParameter}; use crate::{ + cid::LOCAL_ACTIVE_CID_LIMIT, + frame::FRAME_TYPE_NEW_CONNECTION_ID, + packet::PacketBuilder, + path::{PATH_MTU_V4, PATH_MTU_V6}, + tparams::{self, PreferredAddress, TransportParameter}, ConnectionError, ConnectionId, ConnectionIdDecoder, ConnectionIdGenerator, ConnectionIdRef, ConnectionParameters, EmptyConnectionIdGenerator, Error, }; use neqo_common::{Datagram, Decoder}; -use std::cell::RefCell; -use std::net::{IpAddr, Ipv6Addr, SocketAddr}; -use std::rc::Rc; -use std::time::{Duration, Instant}; +use std::{ + cell::RefCell, + net::{IpAddr, Ipv6Addr, SocketAddr}, + rc::Rc, + time::{Duration, Instant}, +}; use test_fixture::{ self, addr, addr_v4, assertions::{assert_v4_path, assert_v6_path}, @@ -504,10 +506,9 @@ fn preferred_address(hs_client: SocketAddr, hs_server: SocketAddr, preferred: So now(), ) .unwrap(); - let spa = if preferred.ip().is_ipv6() { - PreferredAddress::new(None, Some(preferred)) - } else { - PreferredAddress::new(Some(preferred), None) + let spa = match preferred { + SocketAddr::V6(v6) => PreferredAddress::new(None, Some(v6)), + SocketAddr::V4(v4) => PreferredAddress::new(Some(v4), None), }; let mut server = new_server(ConnectionParameters::default().preferred_address(spa)); @@ -605,13 +606,13 @@ fn preferred_address_ignored(spa: PreferredAddress) { /// Using a loopback address in the preferred address is ignored. #[test] fn preferred_address_ignore_loopback() { - preferred_address_ignored(PreferredAddress::new(None, Some(loopback()))); + preferred_address_ignored(PreferredAddress::new_any(None, Some(loopback()))); } /// A preferred address in the wrong address family is ignored. #[test] fn preferred_address_ignore_different_family() { - preferred_address_ignored(PreferredAddress::new(Some(addr_v4()), None)); + preferred_address_ignored(PreferredAddress::new_any(Some(addr_v4()), None)); } /// Disabling preferred addresses at the client means that it ignores a perfectly @@ -621,7 +622,7 @@ fn preferred_address_disabled_client() { let mut client = new_client(ConnectionParameters::default().disable_preferred_address()); let mut preferred = addr(); preferred.set_ip(IpAddr::V6(Ipv6Addr::new(0xfe80, 0, 0, 0, 0, 0, 0, 2))); - let spa = PreferredAddress::new(None, Some(preferred)); + let spa = PreferredAddress::new_any(None, Some(preferred)); let mut server = new_server(ConnectionParameters::default().preferred_address(spa)); expect_no_migration(&mut client, &mut server); @@ -631,7 +632,7 @@ fn preferred_address_disabled_client() { fn preferred_address_empty_cid() { fixture_init(); - let spa = PreferredAddress::new(None, Some(new_port(addr()))); + let spa = PreferredAddress::new_any(None, Some(new_port(addr()))); let res = Connection::new_server( test_fixture::DEFAULT_KEYS, test_fixture::DEFAULT_ALPN, diff --git a/neqo-transport/src/tparams.rs b/neqo-transport/src/tparams.rs index e9a25fd52f..00fe127660 100644 --- a/neqo-transport/src/tparams.rs +++ b/neqo-transport/src/tparams.rs @@ -23,7 +23,7 @@ use std::{ cell::RefCell, collections::HashMap, convert::TryFrom, - net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}, + net::{Ipv4Addr, Ipv6Addr, SocketAddrV4, SocketAddrV6}, rc::Rc, }; @@ -61,10 +61,10 @@ tpids! { VERSION_NEGOTIATION = 0xff73db, } -#[derive(Clone, Debug, Copy)] +#[derive(Clone, Debug)] pub struct PreferredAddress { - v4: Option, - v6: Option, + v4: Option, + v6: Option, } impl PreferredAddress { @@ -73,33 +73,46 @@ impl PreferredAddress { /// # Panics /// If neither address is provided, or if either address is of the wrong type. #[must_use] - pub fn new(v4: Option, v6: Option) -> Self { + pub fn new(v4: Option, v6: Option) -> Self { assert!(v4.is_some() || v6.is_some()); if let Some(a) = v4 { - if let IpAddr::V4(addr) = a.ip() { - assert!(!addr.is_unspecified()); - } else { - panic!("invalid address type for v4 address"); - } + assert!(!a.ip().is_unspecified()); assert_ne!(a.port(), 0); } if let Some(a) = v6 { - if let IpAddr::V6(addr) = a.ip() { - assert!(!addr.is_unspecified()); - } else { - panic!("invalid address type for v6 address"); - } + assert!(!a.ip().is_unspecified()); assert_ne!(a.port(), 0); } Self { v4, v6 } } + /// A generic version of `new()` for testing. #[must_use] - pub fn ipv4(&self) -> Option { + #[cfg(test)] + pub fn new_any(v4: Option, v6: Option) -> Self { + use std::net::SocketAddr; + + let v4 = v4.map(|v4| { + let SocketAddr::V4(v4) = v4 else { + panic!("not v4"); + }; + v4 + }); + let v6 = v6.map(|v6| { + let SocketAddr::V6(v6) = v6 else { + panic!("not v6"); + }; + v6 + }); + Self::new(v4, v6) + } + + #[must_use] + pub fn ipv4(&self) -> Option { self.v4 } #[must_use] - pub fn ipv6(&self) -> Option { + pub fn ipv6(&self) -> Option { self.v6 } } @@ -110,8 +123,8 @@ pub enum TransportParameter { Integer(u64), Empty, PreferredAddress { - v4: Option, - v6: Option, + v4: Option, + v6: Option, cid: ConnectionId, srt: [u8; 16], }, @@ -140,23 +153,13 @@ impl TransportParameter { Self::PreferredAddress { v4, v6, cid, srt } => { enc.encode_vvec_with(|enc_inner| { if let Some(v4) = v4 { - debug_assert!(v4.is_ipv4()); - if let IpAddr::V4(a) = v4.ip() { - enc_inner.encode(&a.octets()[..]); - } else { - unreachable!(); - } + enc_inner.encode(&v4.ip().octets()[..]); enc_inner.encode_uint(2, v4.port()); } else { enc_inner.encode(&[0; 6]); } if let Some(v6) = v6 { - debug_assert!(v6.is_ipv6()); - if let IpAddr::V6(a) = v6.ip() { - enc_inner.encode(&a.octets()[..]); - } else { - unreachable!(); - } + enc_inner.encode(&v6.ip().octets()[..]); enc_inner.encode_uint(2, v6.port()); } else { enc_inner.encode(&[0; 18]); @@ -188,7 +191,7 @@ impl TransportParameter { let v4 = if v4port == 0 { None } else { - Some(SocketAddr::new(IpAddr::V4(v4ip), v4port)) + Some(SocketAddrV4::new(v4ip, v4port)) }; // IPv6 address (mostly the same as v4) @@ -201,7 +204,7 @@ impl TransportParameter { let v6 = if v6port == 0 { None } else { - Some(SocketAddr::new(IpAddr::V6(v6ip), v6port)) + Some(SocketAddrV6::new(v6ip, v6port, 0, 0)) }; // Need either v4 or v6 to be present. if v4.is_none() && v6.is_none() { @@ -790,13 +793,12 @@ mod tests { fn make_spa() -> TransportParameter { TransportParameter::PreferredAddress { - v4: Some(SocketAddr::new( - IpAddr::V4(Ipv4Addr::from(0xc000_0201)), - 443, - )), - v6: Some(SocketAddr::new( - IpAddr::V6(Ipv6Addr::from(0xfe80_0000_0000_0000_0000_0000_0000_0001)), + v4: Some(SocketAddrV4::new(Ipv4Addr::from(0xc000_0201), 443)), + v6: Some(SocketAddrV6::new( + Ipv6Addr::from(0xfe80_0000_0000_0000_0000_0000_0000_0001), 443, + 0, + 0, )), cid: ConnectionId::from(&[1, 2, 3, 4, 5]), srt: [3; 16], @@ -824,7 +826,7 @@ mod tests { fn mutate_spa(wrecker: F) -> TransportParameter where - F: FnOnce(&mut Option, &mut Option, &mut ConnectionId), + F: FnOnce(&mut Option, &mut Option, &mut ConnectionId), { let mut spa = make_spa(); if let TransportParameter::PreferredAddress { @@ -874,10 +876,10 @@ mod tests { })); // Either IP being zero is bad. assert_invalid_spa(mutate_spa(|v4, _, _| { - v4.as_mut().unwrap().set_ip(IpAddr::V4(Ipv4Addr::from(0))); + v4.as_mut().unwrap().set_ip(Ipv4Addr::from(0)); })); assert_invalid_spa(mutate_spa(|_, v6, _| { - v6.as_mut().unwrap().set_ip(IpAddr::V6(Ipv6Addr::from(0))); + v6.as_mut().unwrap().set_ip(Ipv6Addr::from(0)); })); // Either address being absent is OK. assert_valid_spa(mutate_spa(|v4, _, _| { @@ -915,24 +917,6 @@ mod tests { ); } - #[test] - #[should_panic] - fn preferred_address_wrong_family_v4() { - mutate_spa(|v4, _, _| { - v4.as_mut().unwrap().set_ip(IpAddr::V6(Ipv6Addr::from(0))); - }) - .encode(&mut Encoder::new(), PREFERRED_ADDRESS); - } - - #[test] - #[should_panic] - fn preferred_address_wrong_family_v6() { - mutate_spa(|_, v6, _| { - v6.as_mut().unwrap().set_ip(IpAddr::V4(Ipv4Addr::from(0))); - }) - .encode(&mut Encoder::new(), PREFERRED_ADDRESS); - } - #[test] #[should_panic] fn preferred_address_neither() { @@ -942,17 +926,14 @@ mod tests { #[test] #[should_panic] fn preferred_address_v4_unspecified() { - _ = PreferredAddress::new( - Some(SocketAddr::new(IpAddr::V4(Ipv4Addr::from(0)), 443)), - None, - ); + _ = PreferredAddress::new(Some(SocketAddrV4::new(Ipv4Addr::from(0), 443)), None); } #[test] #[should_panic] fn preferred_address_v4_zero_port() { _ = PreferredAddress::new( - Some(SocketAddr::new(IpAddr::V4(Ipv4Addr::from(0xc000_0201)), 0)), + Some(SocketAddrV4::new(Ipv4Addr::from(0xc000_0201), 0)), None, ); } @@ -960,40 +941,13 @@ mod tests { #[test] #[should_panic] fn preferred_address_v6_unspecified() { - _ = PreferredAddress::new( - None, - Some(SocketAddr::new(IpAddr::V6(Ipv6Addr::from(0)), 443)), - ); + _ = PreferredAddress::new(None, Some(SocketAddrV6::new(Ipv6Addr::from(0), 443, 0, 0))); } #[test] #[should_panic] fn preferred_address_v6_zero_port() { - _ = PreferredAddress::new( - None, - Some(SocketAddr::new(IpAddr::V6(Ipv6Addr::from(1)), 0)), - ); - } - - #[test] - #[should_panic] - fn preferred_address_v4_is_v6() { - _ = PreferredAddress::new( - Some(SocketAddr::new(IpAddr::V6(Ipv6Addr::from(1)), 443)), - None, - ); - } - - #[test] - #[should_panic] - fn preferred_address_v6_is_v4() { - _ = PreferredAddress::new( - None, - Some(SocketAddr::new( - IpAddr::V4(Ipv4Addr::from(0xc000_0201)), - 443, - )), - ); + _ = PreferredAddress::new(None, Some(SocketAddrV6::new(Ipv6Addr::from(1), 0, 0, 0))); } #[test] From 892a84a80a43bfe5a2a77c1eb1342e02bf21d17a Mon Sep 17 00:00:00 2001 From: Lucas Pardue Date: Fri, 13 Oct 2023 02:18:59 +0100 Subject: [PATCH 011/321] Bump qlog to 0.9.0 (#1454) * Bump qlog to 0.9.0 * Merge main * Use 'as' here and suppress the warning * clippay --------- Co-authored-by: Martin Thomson --- neqo-client/Cargo.toml | 2 +- neqo-client/src/main.rs | 5 +- neqo-common/Cargo.toml | 2 +- neqo-common/src/qlog.rs | 51 +-- neqo-http3/Cargo.toml | 2 +- neqo-http3/src/qlog.rs | 45 ++- neqo-qpack/Cargo.toml | 2 +- neqo-qpack/src/qlog.rs | 24 +- neqo-server/Cargo.toml | 2 +- neqo-transport/Cargo.toml | 2 +- neqo-transport/src/cc/classic_cc.rs | 14 +- neqo-transport/src/qlog.rs | 521 ++++++++++++++++------------ neqo-transport/src/server.rs | 4 +- 13 files changed, 387 insertions(+), 289 deletions(-) diff --git a/neqo-client/Cargo.toml b/neqo-client/Cargo.toml index 4f8c959439..a7c6724e69 100644 --- a/neqo-client/Cargo.toml +++ b/neqo-client/Cargo.toml @@ -16,7 +16,7 @@ neqo-http3 = { path = "./../neqo-http3" } neqo-qpack = { path = "./../neqo-qpack" } structopt = "0.3.7" url = "2.0" -qlog = "0.4.0" +qlog = "0.9.0" [features] default = ["deny-warnings"] diff --git a/neqo-client/src/main.rs b/neqo-client/src/main.rs index e68000c2f8..38369653da 100644 --- a/neqo-client/src/main.rs +++ b/neqo-client/src/main.rs @@ -7,7 +7,7 @@ #![cfg_attr(feature = "deny-warnings", deny(warnings))] #![warn(clippy::use_self)] -use qlog::QlogStreamer; +use qlog::{events::EventImportance, streamer::QlogStreamer}; use neqo_common::{self as common, event::Provider, hex, qlog::NeqoQlog, Datagram, Role}; use neqo_crypto::{ @@ -698,7 +698,7 @@ fn client( fn qlog_new(args: &Args, hostname: &str, cid: &ConnectionId) -> Res { if let Some(qlog_dir) = &args.qlog_dir { let mut qlog_path = qlog_dir.to_path_buf(); - let filename = format!("{}-{}.qlog", hostname, cid); + let filename = format!("{}-{}.sqlog", hostname, cid); qlog_path.push(filename); let f = OpenOptions::new() @@ -714,6 +714,7 @@ fn qlog_new(args: &Args, hostname: &str, cid: &ConnectionId) -> Res { None, std::time::Instant::now(), common::qlog::new_trace(Role::Client), + EventImportance::Base, Box::new(f), ); diff --git a/neqo-common/Cargo.toml b/neqo-common/Cargo.toml index c7d9c150ff..9ca5c32ac1 100644 --- a/neqo-common/Cargo.toml +++ b/neqo-common/Cargo.toml @@ -11,7 +11,7 @@ build = "build.rs" log = {version = "0.4.0", default-features = false} env_logger = {version = "0.10", default-features = false} lazy_static = "1.3.0" -qlog = "0.4.0" +qlog = "0.9.0" time = {version = "=0.3.23", features = ["formatting"]} [features] diff --git a/neqo-common/src/qlog.rs b/neqo-common/src/qlog.rs index 29445cce9d..ac03ecfcb0 100644 --- a/neqo-common/src/qlog.rs +++ b/neqo-common/src/qlog.rs @@ -4,13 +4,15 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use std::cell::RefCell; -use std::fmt; -use std::path::{Path, PathBuf}; -use std::rc::Rc; +use std::{ + cell::RefCell, + fmt, + path::{Path, PathBuf}, + rc::Rc, +}; use qlog::{ - self, CommonFields, Configuration, QlogStreamer, TimeUnits, Trace, VantagePoint, + self, streamer::QlogStreamer, CommonFields, Configuration, TraceSeq, VantagePoint, VantagePointType, }; @@ -55,7 +57,7 @@ impl NeqoQlog { /// If logging enabled, closure may generate an event to be logged. pub fn add_event(&mut self, f: F) where - F: FnOnce() -> Option, + F: FnOnce() -> Option, { self.add_event_with_stream(|s| { if let Some(evt) = f() { @@ -65,6 +67,19 @@ impl NeqoQlog { }); } + /// If logging enabled, closure may generate an event to be logged. + pub fn add_event_data(&mut self, f: F) + where + F: FnOnce() -> Option, + { + self.add_event_with_stream(|s| { + if let Some(ev_data) = f() { + s.add_event_data_now(ev_data)?; + } + Ok(()) + }); + } + /// If logging enabled, closure is given the Qlog stream to write events and /// frames to. pub fn add_event_with_stream(&mut self, f: F) @@ -99,8 +114,8 @@ impl Drop for NeqoQlogShared { } #[must_use] -pub fn new_trace(role: Role) -> qlog::Trace { - Trace { +pub fn new_trace(role: Role) -> qlog::TraceSeq { + TraceSeq { vantage_point: VantagePoint { name: Some(format!("neqo-{role}")), ty: match role { @@ -112,26 +127,20 @@ pub fn new_trace(role: Role) -> qlog::Trace { title: Some(format!("neqo-{role} trace")), description: Some("Example qlog trace description".to_string()), configuration: Some(Configuration { - time_offset: Some("0".into()), - time_units: Some(TimeUnits::Us), + time_offset: Some(0.0), original_uris: None, }), common_fields: Some(CommonFields { group_id: None, protocol_type: None, reference_time: { - let datetime = time::OffsetDateTime::now_utc(); - datetime - .format(&time::format_description::well_known::Rfc3339) - .ok() // This is expected to never fail. + // It is better to allow this than deal with a conversion from i64 to f64. + // We can't do the obvious two-step conversion with f64::from(i32::try_from(...)), + // because that overflows earlier than is ideal. This should be fine for a while. + #[allow(clippy::cast_precision_loss)] + Some(time::OffsetDateTime::now_utc().unix_timestamp() as f64) }, + time_format: Some("relative".to_string()), }), - event_fields: vec![ - "relative_time".to_string(), - "category".to_string(), - "event".to_string(), - "data".to_string(), - ], - events: Vec::new(), } } diff --git a/neqo-http3/Cargo.toml b/neqo-http3/Cargo.toml index 46b0835351..d72a7f1a12 100644 --- a/neqo-http3/Cargo.toml +++ b/neqo-http3/Cargo.toml @@ -13,7 +13,7 @@ neqo-transport = { path = "./../neqo-transport" } neqo-qpack = { path = "./../neqo-qpack" } log = {version = "0.4.0", default-features = false} smallvec = "1.0.0" -qlog = "0.4.0" +qlog = "0.9.0" sfv = "0.9.1" url = "2.0" lazy_static = "1.3.0" diff --git a/neqo-http3/src/qlog.rs b/neqo-http3/src/qlog.rs index c77f951739..84c13dad43 100644 --- a/neqo-http3/src/qlog.rs +++ b/neqo-http3/src/qlog.rs @@ -8,33 +8,40 @@ use std::convert::TryFrom; -use qlog::{self, event::Event, H3DataRecipient}; +use qlog::{ + self, + events::{DataRecipient, EventData}, +}; use neqo_common::qlog::NeqoQlog; use neqo_transport::StreamId; pub fn h3_data_moved_up(qlog: &mut NeqoQlog, stream_id: StreamId, amount: usize) { - qlog.add_event(|| { - Some(Event::h3_data_moved( - stream_id.to_string(), - None, - Some(u64::try_from(amount).unwrap()), - Some(H3DataRecipient::Transport), - Some(H3DataRecipient::Application), - None, - )) + qlog.add_event_data(|| { + let ev_data = EventData::DataMoved(qlog::events::quic::DataMoved { + stream_id: Some(stream_id.as_u64()), + offset: None, + length: Some(u64::try_from(amount).unwrap()), + from: Some(DataRecipient::Transport), + to: Some(DataRecipient::Application), + raw: None, + }); + + Some(ev_data) }); } pub fn h3_data_moved_down(qlog: &mut NeqoQlog, stream_id: StreamId, amount: usize) { - qlog.add_event(|| { - Some(Event::h3_data_moved( - stream_id.to_string(), - None, - Some(u64::try_from(amount).unwrap()), - Some(H3DataRecipient::Application), - Some(H3DataRecipient::Transport), - None, - )) + qlog.add_event_data(|| { + let ev_data = EventData::DataMoved(qlog::events::quic::DataMoved { + stream_id: Some(stream_id.as_u64()), + offset: None, + length: Some(u64::try_from(amount).unwrap()), + from: Some(DataRecipient::Application), + to: Some(DataRecipient::Transport), + raw: None, + }); + + Some(ev_data) }); } diff --git a/neqo-qpack/Cargo.toml b/neqo-qpack/Cargo.toml index e0ba853d3d..ca952e7042 100644 --- a/neqo-qpack/Cargo.toml +++ b/neqo-qpack/Cargo.toml @@ -12,7 +12,7 @@ neqo-transport = { path = "./../neqo-transport" } neqo-crypto = { path = "./../neqo-crypto" } log = {version = "0.4.0", default-features = false} static_assertions = "1.1.0" -qlog = "0.4.0" +qlog = "0.9.0" lazy_static = "1.3.0" [dev-dependencies] diff --git a/neqo-qpack/src/qlog.rs b/neqo-qpack/src/qlog.rs index 11f9dbc0b3..c6ae6b5d0f 100644 --- a/neqo-qpack/src/qlog.rs +++ b/neqo-qpack/src/qlog.rs @@ -8,21 +8,31 @@ use neqo_common::hex; use neqo_common::qlog::NeqoQlog; -use qlog::{event::Event, QPackInstruction, QpackInstructionTypeName}; +use qlog::events::{ + qpack::QpackInstructionTypeName, + qpack::{QPackInstruction, QpackInstructionParsed}, + EventData, RawInfo, +}; pub fn qpack_read_insert_count_increment_instruction( qlog: &mut NeqoQlog, increment: u64, data: &[u8], ) { - qlog.add_event(|| { - Some(Event::qpack_instruction_received( - QPackInstruction::InsertCountIncrementInstruction { + qlog.add_event_data(|| { + let raw = RawInfo { + length: Some(8), + payload_length: None, + data: Some(hex(data)), + }; + let ev_data = EventData::QpackInstructionParsed(QpackInstructionParsed { + instruction: QPackInstruction::InsertCountIncrementInstruction { instruction_type: QpackInstructionTypeName::InsertCountIncrementInstruction, increment, }, - Some(8.to_string()), - Some(hex(data)), - )) + raw: Some(raw), + }); + + Some(ev_data) }); } diff --git a/neqo-server/Cargo.toml b/neqo-server/Cargo.toml index cd79a436ed..e75f6c9291 100644 --- a/neqo-server/Cargo.toml +++ b/neqo-server/Cargo.toml @@ -17,7 +17,7 @@ regex = "1" mio = "0.6.17" mio-extras = "2.0.5" log = {version = "0.4.0", default-features = false} -qlog = "0.4.0" +qlog = "0.9.0" [features] default = ["deny-warnings"] diff --git a/neqo-transport/Cargo.toml b/neqo-transport/Cargo.toml index 38a6404580..bb5d87cee3 100644 --- a/neqo-transport/Cargo.toml +++ b/neqo-transport/Cargo.toml @@ -12,7 +12,7 @@ neqo-common = { path = "../neqo-common" } lazy_static = "1.3.0" log = {version = "0.4.0", default-features = false} smallvec = "1.0.0" -qlog = "0.4.0" +qlog = "0.9.0" indexmap = "1.0" [dev-dependencies] diff --git a/neqo-transport/src/cc/classic_cc.rs b/neqo-transport/src/cc/classic_cc.rs index bdfafbec96..5867ffe6ec 100644 --- a/neqo-transport/src/cc/classic_cc.rs +++ b/neqo-transport/src/cc/classic_cc.rs @@ -17,6 +17,8 @@ use crate::cc::MAX_DATAGRAM_SIZE; use crate::qlog::{self, QlogMetric}; use crate::sender::PACING_BURST_SIZE; use crate::tracking::SentPacket; +use ::qlog::events::quic::CongestionStateUpdated; +use ::qlog::events::EventData; use neqo_common::{const_max, const_min, qdebug, qinfo, qlog::NeqoQlog, qtrace}; pub const CWND_INITIAL_PKTS: usize = 10; @@ -363,15 +365,17 @@ impl ClassicCongestionControl { if self.state != state { qdebug!([self], "state -> {:?}", state); let old_state = self.state; - self.qlog.add_event(|| { + self.qlog.add_event_data(|| { // No need to tell qlog about exit from transient states. if old_state.transient() { None } else { - Some(::qlog::event::Event::congestion_state_updated( - Some(old_state.to_qlog().to_owned()), - state.to_qlog().to_owned(), - )) + let ev_data = EventData::CongestionStateUpdated(CongestionStateUpdated { + old: Some(old_state.to_qlog().to_owned()), + new: state.to_qlog().to_owned(), + trigger: None, + }); + Some(ev_data) } }); self.state = state; diff --git a/neqo-transport/src/qlog.rs b/neqo-transport/src/qlog.rs index ad86ec2b2e..dce27732b0 100644 --- a/neqo-transport/src/qlog.rs +++ b/neqo-transport/src/qlog.rs @@ -6,73 +6,74 @@ // Functions that handle capturing QLOG traces. -use std::convert::TryFrom; -use std::ops::{Deref, RangeInclusive}; -use std::string::String; -use std::time::Duration; +use std::{ + convert::TryFrom, + ops::{Deref, RangeInclusive}, + string::String, + time::Duration, +}; -use qlog::{self, event::Event, PacketHeader, QuicFrame}; +use qlog::events::{ + connectivity::{ConnectionStarted, ConnectionState, ConnectionStateUpdated}, + quic::{ + AckedRanges, ErrorSpace, MetricsUpdated, PacketDropped, PacketHeader, PacketLost, + PacketReceived, PacketSent, QuicFrame, StreamType, + }, + Event, EventData, RawInfo, +}; use neqo_common::{hex, qinfo, qlog::NeqoQlog, Decoder}; +use smallvec::SmallVec; -use crate::connection::State; -use crate::frame::{CloseError, Frame}; -use crate::packet::{DecryptedPacket, PacketNumber, PacketType, PublicPacket}; -use crate::path::PathRef; -use crate::stream_id::StreamType as NeqoStreamType; -use crate::tparams::{self, TransportParametersHandler}; -use crate::tracking::SentPacket; -use crate::Version; +use crate::{ + connection::State, + frame::{CloseError, Frame}, + packet::{DecryptedPacket, PacketNumber, PacketType, PublicPacket}, + path::PathRef, + stream_id::StreamType as NeqoStreamType, + tparams::{self, TransportParametersHandler}, + tracking::SentPacket, +}; pub fn connection_tparams_set(qlog: &mut NeqoQlog, tph: &TransportParametersHandler) { qlog.add_event(|| { let remote = tph.remote(); - Some(Event::transport_parameters_set( - None, - None, - None, - None, - None, - None, - remote + let ev_data = EventData::TransportParametersSet( + qlog::events::quic::TransportParametersSet { + owner: None, + resumption_allowed: None, + early_data_enabled: None, + tls_cipher: None, + aead_tag_length: None, + original_destination_connection_id: remote .get_bytes(tparams::ORIGINAL_DESTINATION_CONNECTION_ID) .map(hex), - remote.get_bytes(tparams::STATELESS_RESET_TOKEN).map(hex), - if remote.get_empty(tparams::DISABLE_MIGRATION) { - Some(true) - } else { - None - }, - Some(remote.get_integer(tparams::IDLE_TIMEOUT)), - Some(remote.get_integer(tparams::MAX_UDP_PAYLOAD_SIZE)), - Some(remote.get_integer(tparams::ACK_DELAY_EXPONENT)), - Some(remote.get_integer(tparams::MAX_ACK_DELAY)), - // TODO(hawkinsw@obs.cr): We do not yet handle ACTIVE_CONNECTION_ID_LIMIT in tparams yet. - None, - Some(format!("{}", remote.get_integer(tparams::INITIAL_MAX_DATA))), - Some(format!( - "{}", - remote.get_integer(tparams::INITIAL_MAX_STREAM_DATA_BIDI_LOCAL) - )), - Some(format!( - "{}", - remote.get_integer(tparams::INITIAL_MAX_STREAM_DATA_BIDI_REMOTE) - )), - Some(format!( - "{}", - remote.get_integer(tparams::INITIAL_MAX_STREAM_DATA_UNI) - )), - Some(format!( - "{}", - remote.get_integer(tparams::INITIAL_MAX_STREAMS_BIDI) - )), - Some(format!( - "{}", - remote.get_integer(tparams::INITIAL_MAX_STREAMS_UNI) - )), - // TODO(hawkinsw@obs.cr): We do not yet handle PREFERRED_ADDRESS in tparams yet. - None, - )) + initial_source_connection_id: None, + retry_source_connection_id: None, + stateless_reset_token: remote.get_bytes(tparams::STATELESS_RESET_TOKEN).map(hex), + disable_active_migration: if remote.get_empty(tparams::DISABLE_MIGRATION) { + Some(true) + } else { + None + }, + max_idle_timeout: Some(remote.get_integer(tparams::IDLE_TIMEOUT)), + max_udp_payload_size: Some(remote.get_integer(tparams::MAX_UDP_PAYLOAD_SIZE) as u32), + ack_delay_exponent: Some(remote.get_integer(tparams::ACK_DELAY_EXPONENT) as u16), + max_ack_delay: Some(remote.get_integer(tparams::MAX_ACK_DELAY) as u16), + // TODO(hawkinsw@obs.cr): We do not yet handle ACTIVE_CONNECTION_ID_LIMIT in tparams yet. + active_connection_id_limit: None, + initial_max_data: Some(remote.get_integer(tparams::INITIAL_MAX_DATA)), + initial_max_stream_data_bidi_local: Some(remote.get_integer(tparams::INITIAL_MAX_STREAM_DATA_BIDI_LOCAL)), + initial_max_stream_data_bidi_remote: Some(remote.get_integer(tparams::INITIAL_MAX_STREAM_DATA_BIDI_REMOTE)), + initial_max_stream_data_uni: Some(remote.get_integer(tparams::INITIAL_MAX_STREAM_DATA_UNI)), + initial_max_streams_bidi: Some(remote.get_integer(tparams::INITIAL_MAX_STREAMS_BIDI)), + initial_max_streams_uni: Some(remote.get_integer(tparams::INITIAL_MAX_STREAMS_UNI)), + // TODO(hawkinsw@obs.cr): We do not yet handle PREFERRED_ADDRESS in tparams yet. + preferred_address: None, + }); + + // This event occurs very early, so just mark the time as 0.0. + Some(Event::with_time(0.0, ev_data)) }) } @@ -85,38 +86,44 @@ pub fn client_connection_started(qlog: &mut NeqoQlog, path: &PathRef) { } fn connection_started(qlog: &mut NeqoQlog, path: &PathRef) { - qlog.add_event(|| { + qlog.add_event_data(|| { let p = path.deref().borrow(); - Some(Event::connection_started( - if p.local_address().ip().is_ipv4() { - "ipv4".into() + let ev_data = EventData::ConnectionStarted(ConnectionStarted { + ip_version: if p.local_address().ip().is_ipv4() { + Some("ipv4".into()) } else { - "ipv6".into() + Some("ipv6".into()) }, - format!("{}", p.local_address().ip()), - format!("{}", p.remote_address().ip()), - Some("QUIC".into()), - p.local_address().port().into(), - p.remote_address().port().into(), - Some(format!("{:x}", Version::default().wire_version())), - Some(format!("{}", p.local_cid())), - Some(format!("{}", p.remote_cid())), - )) + src_ip: format!("{}", p.local_address().ip()), + dst_ip: format!("{}", p.remote_address().ip()), + protocol: Some("QUIC".into()), + src_port: p.local_address().port().into(), + dst_port: p.remote_address().port().into(), + src_cid: Some(format!("{}", p.local_cid())), + dst_cid: Some(format!("{}", p.remote_cid())), + }); + + Some(ev_data) }) } pub fn connection_state_updated(qlog: &mut NeqoQlog, new: &State) { - qlog.add_event(|| { - Some(Event::connection_state_updated_min(match new { - State::Init => qlog::ConnectionState::Attempted, - State::WaitInitial => qlog::ConnectionState::Attempted, - State::WaitVersion | State::Handshaking => qlog::ConnectionState::Handshake, - State::Connected => qlog::ConnectionState::Active, - State::Confirmed => qlog::ConnectionState::Active, - State::Closing { .. } => qlog::ConnectionState::Draining, - State::Draining { .. } => qlog::ConnectionState::Draining, - State::Closed { .. } => qlog::ConnectionState::Closed, - })) + qlog.add_event_data(|| { + let ev_data = EventData::ConnectionStateUpdated(ConnectionStateUpdated { + old: None, + new: match new { + State::Init => ConnectionState::Attempted, + State::WaitInitial => ConnectionState::Attempted, + State::WaitVersion | State::Handshaking => ConnectionState::HandshakeStarted, + State::Connected => ConnectionState::HandshakeCompleted, + State::Confirmed => ConnectionState::HandshakeConfirmed, + State::Closing { .. } => ConnectionState::Draining, + State::Draining { .. } => ConnectionState::Draining, + State::Closed { .. } => ConnectionState::Closed, + }, + }); + + Some(ev_data) }) } @@ -129,24 +136,18 @@ pub fn packet_sent( ) { qlog.add_event_with_stream(|stream| { let mut d = Decoder::from(body); + let header = PacketHeader::with_type(to_qlog_pkt_type(pt), pn, None, None, None); + let raw = RawInfo { + length: None, + payload_length: Some(plen as u64), + data: None, + }; - stream.add_event(Event::packet_sent_min( - to_qlog_pkt_type(pt), - PacketHeader::new( - pn, - Some(u64::try_from(plen).unwrap()), - None, - None, - None, - None, - ), - Some(Vec::new()), - ))?; - + let mut frames = SmallVec::new(); while d.remaining() > 0 { match Frame::decode(&mut d) { Ok(f) => { - stream.add_frame(frame_to_qlogframe(&f), false)?; + frames.push(frame_to_qlogframe(&f)); } Err(_) => { qinfo!("qlog: invalid frame"); @@ -155,30 +156,59 @@ pub fn packet_sent( } } - stream.finish_frames() + let ev_data = EventData::PacketSent(PacketSent { + header, + frames: Some(frames), + is_coalesced: None, + retry_token: None, + stateless_reset_token: None, + supported_versions: None, + raw: Some(raw), + datagram_id: None, + send_at_time: None, + trigger: None, + }); + + stream.add_event_data_now(ev_data) }) } pub fn packet_dropped(qlog: &mut NeqoQlog, payload: &PublicPacket) { - qlog.add_event(|| { - Some(Event::packet_dropped( - Some(to_qlog_pkt_type(payload.packet_type())), - Some(u64::try_from(payload.len()).unwrap()), - None, - )) + qlog.add_event_data(|| { + // TODO: packet number is optional in the spec but qlog crate doesn't support that, so use a placeholder value of 0 + let header = + PacketHeader::with_type(to_qlog_pkt_type(payload.packet_type()), 0, None, None, None); + let raw = RawInfo { + length: None, + payload_length: Some(payload.len() as u64), + data: None, + }; + + let ev_data = EventData::PacketDropped(PacketDropped { + header: Some(header), + raw: Some(raw), + datagram_id: None, + details: None, + trigger: None, + }); + + Some(ev_data) }) } pub fn packets_lost(qlog: &mut NeqoQlog, pkts: &[SentPacket]) { qlog.add_event_with_stream(|stream| { for pkt in pkts { - stream.add_event(Event::packet_lost_min( - to_qlog_pkt_type(pkt.pt), - pkt.pn.to_string(), - Vec::new(), - ))?; + let header = + PacketHeader::with_type(to_qlog_pkt_type(pkt.pt), pkt.pn, None, None, None); - stream.finish_frames()?; + let ev_data = EventData::PacketLost(PacketLost { + header: Some(header), + trigger: None, + frames: None, + }); + + stream.add_event_data_now(ev_data)?; } Ok(()) }) @@ -192,25 +222,24 @@ pub fn packet_received( qlog.add_event_with_stream(|stream| { let mut d = Decoder::from(&payload[..]); - stream.add_event(Event::packet_received( - to_qlog_pkt_type(payload.packet_type()), - PacketHeader::new( - payload.pn(), - Some(u64::try_from(public_packet.len()).unwrap()), - None, - None, - None, - None, - ), - Some(Vec::new()), + let header = PacketHeader::with_type( + to_qlog_pkt_type(public_packet.packet_type()), + payload.pn(), None, None, None, - ))?; + ); + let raw = RawInfo { + length: None, + payload_length: Some(public_packet.len() as u64), + data: None, + }; + + let mut frames = Vec::new(); while d.remaining() > 0 { match Frame::decode(&mut d) { - Ok(f) => stream.add_frame(frame_to_qlogframe(&f), false)?, + Ok(f) => frames.push(frame_to_qlogframe(&f)), Err(_) => { qinfo!("qlog: invalid frame"); break; @@ -218,7 +247,19 @@ pub fn packet_received( } } - stream.finish_frames() + let ev_data = EventData::PacketReceived(PacketReceived { + header, + frames: Some(frames), + is_coalesced: None, + retry_token: None, + stateless_reset_token: None, + supported_versions: None, + raw: Some(raw), + datagram_id: None, + trigger: None, + }); + + stream.add_event_data_now(ev_data) }) } @@ -241,57 +282,50 @@ pub enum QlogMetric { pub fn metrics_updated(qlog: &mut NeqoQlog, updated_metrics: &[QlogMetric]) { debug_assert!(!updated_metrics.is_empty()); - qlog.add_event(|| { - let mut min_rtt: Option = None; - let mut smoothed_rtt: Option = None; - let mut latest_rtt: Option = None; - let mut rtt_variance: Option = None; - let mut max_ack_delay: Option = None; - let mut pto_count: Option = None; + qlog.add_event_data(|| { + let mut min_rtt: Option = None; + let mut smoothed_rtt: Option = None; + let mut latest_rtt: Option = None; + let mut rtt_variance: Option = None; + let mut pto_count: Option = None; let mut congestion_window: Option = None; let mut bytes_in_flight: Option = None; let mut ssthresh: Option = None; let mut packets_in_flight: Option = None; - let mut in_recovery: Option = None; let mut pacing_rate: Option = None; for metric in updated_metrics { match metric { - QlogMetric::MinRtt(v) => min_rtt = Some(u64::try_from(v.as_millis()).unwrap()), - QlogMetric::SmoothedRtt(v) => { - smoothed_rtt = Some(u64::try_from(v.as_millis()).unwrap()) - } - QlogMetric::LatestRtt(v) => { - latest_rtt = Some(u64::try_from(v.as_millis()).unwrap()) - } - QlogMetric::RttVariance(v) => rtt_variance = Some(*v), - QlogMetric::MaxAckDelay(v) => max_ack_delay = Some(*v), - QlogMetric::PtoCount(v) => pto_count = Some(u64::try_from(*v).unwrap()), + QlogMetric::MinRtt(v) => min_rtt = Some(v.as_secs_f32() * 1000.0), + QlogMetric::SmoothedRtt(v) => smoothed_rtt = Some(v.as_secs_f32() * 1000.0), + QlogMetric::LatestRtt(v) => latest_rtt = Some(v.as_secs_f32() * 1000.0), + QlogMetric::RttVariance(v) => rtt_variance = Some(*v as f32), + QlogMetric::PtoCount(v) => pto_count = Some(u16::try_from(*v).unwrap()), QlogMetric::CongestionWindow(v) => { congestion_window = Some(u64::try_from(*v).unwrap()) } QlogMetric::BytesInFlight(v) => bytes_in_flight = Some(u64::try_from(*v).unwrap()), QlogMetric::SsThresh(v) => ssthresh = Some(u64::try_from(*v).unwrap()), QlogMetric::PacketsInFlight(v) => packets_in_flight = Some(*v), - QlogMetric::InRecovery(v) => in_recovery = Some(*v), QlogMetric::PacingRate(v) => pacing_rate = Some(*v), + _ => (), } } - Some(Event::metrics_updated( + let ev_data = EventData::MetricsUpdated(MetricsUpdated { min_rtt, smoothed_rtt, latest_rtt, rtt_variance, - max_ack_delay, pto_count, congestion_window, bytes_in_flight, ssthresh, packets_in_flight, - in_recovery, pacing_rate, - )) + }); + + Some(ev_data) }) } @@ -299,8 +333,8 @@ pub fn metrics_updated(qlog: &mut NeqoQlog, updated_metrics: &[QlogMetric]) { fn frame_to_qlogframe(frame: &Frame) -> QuicFrame { match frame { - Frame::Padding => QuicFrame::padding(), - Frame::Ping => QuicFrame::ping(), + Frame::Padding => QuicFrame::Padding, + Frame::Ping => QuicFrame::Ping, Frame::Ack { largest_acknowledged, ack_delay, @@ -310,129 +344,160 @@ fn frame_to_qlogframe(frame: &Frame) -> QuicFrame { let ranges = Frame::decode_ack_frame(*largest_acknowledged, *first_ack_range, ack_ranges).ok(); - QuicFrame::ack( - Some(ack_delay.to_string()), - ranges.map(|all| { + let acked_ranges = ranges.map(|all| { + AckedRanges::Double( all.into_iter() .map(RangeInclusive::into_inner) - .collect::>() - }), - None, - None, - None, - ) + .collect::>(), + ) + }); + + QuicFrame::Ack { + ack_delay: Some(*ack_delay as f32 / 1000.0), + acked_ranges, + ect1: None, + ect0: None, + ce: None, + } } Frame::ResetStream { stream_id, application_error_code, final_size, - } => QuicFrame::reset_stream( - stream_id.as_u64().to_string(), - *application_error_code, - final_size.to_string(), - ), + } => QuicFrame::ResetStream { + stream_id: stream_id.as_u64(), + error_code: *application_error_code, + final_size: *final_size, + }, Frame::StopSending { stream_id, application_error_code, - } => QuicFrame::stop_sending(stream_id.as_u64().to_string(), *application_error_code), - Frame::Crypto { offset, data } => { - QuicFrame::crypto(offset.to_string(), data.len().to_string()) - } - Frame::NewToken { token } => QuicFrame::new_token(token.len().to_string(), hex(token)), + } => QuicFrame::StopSending { + stream_id: stream_id.as_u64(), + error_code: *application_error_code, + }, + Frame::Crypto { offset, data } => QuicFrame::Crypto { + offset: *offset, + length: data.len() as u64, + }, + Frame::NewToken { token } => QuicFrame::NewToken { + token: qlog::Token { + ty: Some(qlog::TokenType::Retry), + details: None, + raw: Some(RawInfo { + data: Some(hex(token)), + length: Some(token.len() as u64), + payload_length: None, + }), + }, + }, Frame::Stream { fin, stream_id, offset, data, .. - } => QuicFrame::stream( - stream_id.as_u64().to_string(), - offset.to_string(), - data.len().to_string(), - *fin, - None, - ), - Frame::MaxData { maximum_data } => QuicFrame::max_data(maximum_data.to_string()), + } => QuicFrame::Stream { + stream_id: stream_id.as_u64(), + offset: *offset, + length: data.len() as u64, + fin: Some(*fin), + raw: None, + }, + Frame::MaxData { maximum_data } => QuicFrame::MaxData { + maximum: *maximum_data, + }, Frame::MaxStreamData { stream_id, maximum_stream_data, - } => QuicFrame::max_stream_data( - stream_id.as_u64().to_string(), - maximum_stream_data.to_string(), - ), + } => QuicFrame::MaxStreamData { + stream_id: stream_id.as_u64(), + maximum: *maximum_stream_data, + }, Frame::MaxStreams { stream_type, maximum_streams, - } => QuicFrame::max_streams( - match stream_type { - NeqoStreamType::BiDi => qlog::StreamType::Bidirectional, - NeqoStreamType::UniDi => qlog::StreamType::Unidirectional, + } => QuicFrame::MaxStreams { + stream_type: match stream_type { + NeqoStreamType::BiDi => StreamType::Bidirectional, + NeqoStreamType::UniDi => StreamType::Unidirectional, }, - maximum_streams.to_string(), - ), - Frame::DataBlocked { data_limit } => QuicFrame::data_blocked(data_limit.to_string()), + maximum: *maximum_streams, + }, + Frame::DataBlocked { data_limit } => QuicFrame::DataBlocked { limit: *data_limit }, Frame::StreamDataBlocked { stream_id, stream_data_limit, - } => QuicFrame::stream_data_blocked( - stream_id.as_u64().to_string(), - stream_data_limit.to_string(), - ), + } => QuicFrame::StreamDataBlocked { + stream_id: stream_id.as_u64(), + limit: *stream_data_limit, + }, Frame::StreamsBlocked { stream_type, stream_limit, - } => QuicFrame::streams_blocked( - match stream_type { - NeqoStreamType::BiDi => qlog::StreamType::Bidirectional, - NeqoStreamType::UniDi => qlog::StreamType::Unidirectional, + } => QuicFrame::StreamsBlocked { + stream_type: match stream_type { + NeqoStreamType::BiDi => StreamType::Bidirectional, + NeqoStreamType::UniDi => StreamType::Unidirectional, }, - stream_limit.to_string(), - ), + limit: *stream_limit, + }, Frame::NewConnectionId { sequence_number, retire_prior, connection_id, stateless_reset_token, - } => QuicFrame::new_connection_id( - sequence_number.to_string(), - retire_prior.to_string(), - connection_id.len() as u64, - hex(connection_id), - hex(stateless_reset_token), - ), - Frame::RetireConnectionId { sequence_number } => { - QuicFrame::retire_connection_id(sequence_number.to_string()) - } - Frame::PathChallenge { data } => QuicFrame::path_challenge(Some(hex(data))), - Frame::PathResponse { data } => QuicFrame::path_response(Some(hex(data))), + } => QuicFrame::NewConnectionId { + sequence_number: *sequence_number as u32, + retire_prior_to: *retire_prior as u32, + connection_id_length: Some(connection_id.len() as u8), + connection_id: hex(connection_id), + stateless_reset_token: Some(hex(stateless_reset_token)), + }, + Frame::RetireConnectionId { sequence_number } => QuicFrame::RetireConnectionId { + sequence_number: *sequence_number as u32, + }, + Frame::PathChallenge { data } => QuicFrame::PathChallenge { + data: Some(hex(data)), + }, + Frame::PathResponse { data } => QuicFrame::PathResponse { + data: Some(hex(data)), + }, Frame::ConnectionClose { error_code, frame_type, reason_phrase, - } => QuicFrame::connection_close( - match error_code { - CloseError::Transport(_) => qlog::ErrorSpace::TransportError, - CloseError::Application(_) => qlog::ErrorSpace::ApplicationError, + } => QuicFrame::ConnectionClose { + error_space: match error_code { + CloseError::Transport(_) => Some(ErrorSpace::TransportError), + CloseError::Application(_) => Some(ErrorSpace::ApplicationError), }, - error_code.code(), - 0, - String::from_utf8_lossy(reason_phrase).to_string(), - Some(frame_type.to_string()), - ), - Frame::HandshakeDone => QuicFrame::handshake_done(), - Frame::AckFrequency { .. } => QuicFrame::unknown(frame.get_type()), - Frame::Datagram { .. } => QuicFrame::unknown(frame.get_type()), + error_code: Some(error_code.code()), + error_code_value: Some(0), + reason: Some(String::from_utf8_lossy(reason_phrase).to_string()), + trigger_frame_type: Some(*frame_type), + }, + Frame::HandshakeDone => QuicFrame::HandshakeDone, + Frame::AckFrequency { .. } => QuicFrame::Unknown { + frame_type_value: None, + raw_frame_type: frame.get_type(), + raw: None, + }, + Frame::Datagram { data, .. } => QuicFrame::Datagram { + length: data.len() as u64, + raw: None, + }, } } -fn to_qlog_pkt_type(ptype: PacketType) -> qlog::PacketType { +fn to_qlog_pkt_type(ptype: PacketType) -> qlog::events::quic::PacketType { match ptype { - PacketType::Initial => qlog::PacketType::Initial, - PacketType::Handshake => qlog::PacketType::Handshake, - PacketType::ZeroRtt => qlog::PacketType::ZeroRtt, - PacketType::Short => qlog::PacketType::OneRtt, - PacketType::Retry => qlog::PacketType::Retry, - PacketType::VersionNegotiation => qlog::PacketType::VersionNegotiation, - PacketType::OtherVersion => qlog::PacketType::Unknown, + PacketType::Initial => qlog::events::quic::PacketType::Initial, + PacketType::Handshake => qlog::events::quic::PacketType::Handshake, + PacketType::ZeroRtt => qlog::events::quic::PacketType::ZeroRtt, + PacketType::Short => qlog::events::quic::PacketType::OneRtt, + PacketType::Retry => qlog::events::quic::PacketType::Retry, + PacketType::VersionNegotiation => qlog::events::quic::PacketType::VersionNegotiation, + PacketType::OtherVersion => qlog::events::quic::PacketType::Unknown, } } diff --git a/neqo-transport/src/server.rs b/neqo-transport/src/server.rs index 6db5e3e8c7..75cc6d42d8 100644 --- a/neqo-transport/src/server.rs +++ b/neqo-transport/src/server.rs @@ -14,6 +14,7 @@ use neqo_crypto::{ encode_ech_config, AntiReplay, Cipher, PrivateKey, PublicKey, ZeroRttCheckResult, ZeroRttChecker, }; +use qlog::streamer::QlogStreamer; pub use crate::addr_valid::ValidateAddress; use crate::addr_valid::{AddressValidation, AddressValidationResult}; @@ -400,13 +401,14 @@ impl Server { Ok(f) => { qinfo!("Qlog output to {}", qlog_path.display()); - let streamer = ::qlog::QlogStreamer::new( + let streamer = QlogStreamer::new( qlog::QLOG_VERSION.to_string(), Some("Neqo server qlog".to_string()), Some("Neqo server qlog".to_string()), None, std::time::Instant::now(), common::qlog::new_trace(Role::Server), + qlog::events::EventImportance::Base, Box::new(f), ); let n_qlog = NeqoQlog::enabled(streamer, qlog_path); From fd514d20b20ec7a70d712ffca7a8ead98a2b33ec Mon Sep 17 00:00:00 2001 From: Manuel Bucher Date: Wed, 18 Oct 2023 22:03:16 +0200 Subject: [PATCH 012/321] Only warn on warnings and don't deny compiling during development (#1468) Denying in the source makes developing neqo a bit cumbersome. It also makes it harder to compile neqo with future versions of Rust. We do have a CI pipeline for clippy denying all warnings. That should be enough to keep all the warnings out. --- neqo-client/Cargo.toml | 1 - neqo-common/Cargo.toml | 1 - neqo-crypto/Cargo.toml | 1 - neqo-http3/Cargo.toml | 1 - neqo-interop/Cargo.toml | 1 - neqo-qpack/Cargo.toml | 1 - neqo-server/Cargo.toml | 1 - neqo-transport/Cargo.toml | 1 - test-fixture/Cargo.toml | 1 - 9 files changed, 9 deletions(-) diff --git a/neqo-client/Cargo.toml b/neqo-client/Cargo.toml index a7c6724e69..4932658f41 100644 --- a/neqo-client/Cargo.toml +++ b/neqo-client/Cargo.toml @@ -19,5 +19,4 @@ url = "2.0" qlog = "0.9.0" [features] -default = ["deny-warnings"] deny-warnings = [] diff --git a/neqo-common/Cargo.toml b/neqo-common/Cargo.toml index 9ca5c32ac1..04d04b8caa 100644 --- a/neqo-common/Cargo.toml +++ b/neqo-common/Cargo.toml @@ -15,7 +15,6 @@ qlog = "0.9.0" time = {version = "=0.3.23", features = ["formatting"]} [features] -default = ["deny-warnings"] deny-warnings = [] [target."cfg(windows)".dependencies.winapi] diff --git a/neqo-crypto/Cargo.toml b/neqo-crypto/Cargo.toml index abccfad3ef..82f75bab1e 100644 --- a/neqo-crypto/Cargo.toml +++ b/neqo-crypto/Cargo.toml @@ -22,7 +22,6 @@ mozbuild = {version = "0.1", optional = true} test-fixture = { path = "../test-fixture" } [features] -default = ["deny-warnings"] deny-warnings = [] gecko = ["mozbuild"] fuzzing = [] diff --git a/neqo-http3/Cargo.toml b/neqo-http3/Cargo.toml index d72a7f1a12..a6f6982d4c 100644 --- a/neqo-http3/Cargo.toml +++ b/neqo-http3/Cargo.toml @@ -23,6 +23,5 @@ enumset = "1.0.8" test-fixture = { path = "../test-fixture" } [features] -default = ["deny-warnings"] deny-warnings = [] fuzzing = ["neqo-transport/fuzzing", "neqo-crypto/fuzzing"] diff --git a/neqo-interop/Cargo.toml b/neqo-interop/Cargo.toml index 2ab98089ec..e4fcd33199 100644 --- a/neqo-interop/Cargo.toml +++ b/neqo-interop/Cargo.toml @@ -17,5 +17,4 @@ structopt = "0.3.7" lazy_static = "1.3.0" [features] -default = ["deny-warnings"] deny-warnings = [] diff --git a/neqo-qpack/Cargo.toml b/neqo-qpack/Cargo.toml index ca952e7042..3af198faf1 100644 --- a/neqo-qpack/Cargo.toml +++ b/neqo-qpack/Cargo.toml @@ -19,5 +19,4 @@ lazy_static = "1.3.0" test-fixture = { path = "../test-fixture" } [features] -default = ["deny-warnings"] deny-warnings = [] diff --git a/neqo-server/Cargo.toml b/neqo-server/Cargo.toml index e75f6c9291..d25a814b4c 100644 --- a/neqo-server/Cargo.toml +++ b/neqo-server/Cargo.toml @@ -20,5 +20,4 @@ log = {version = "0.4.0", default-features = false} qlog = "0.9.0" [features] -default = ["deny-warnings"] deny-warnings = [] diff --git a/neqo-transport/Cargo.toml b/neqo-transport/Cargo.toml index bb5d87cee3..d4e66cb138 100644 --- a/neqo-transport/Cargo.toml +++ b/neqo-transport/Cargo.toml @@ -19,6 +19,5 @@ indexmap = "1.0" test-fixture = { path = "../test-fixture" } [features] -default = ["deny-warnings"] deny-warnings = [] fuzzing = ["neqo-crypto/fuzzing"] diff --git a/test-fixture/Cargo.toml b/test-fixture/Cargo.toml index 87b3b2a074..c2cb6a6410 100644 --- a/test-fixture/Cargo.toml +++ b/test-fixture/Cargo.toml @@ -16,5 +16,4 @@ log = {version = "0.4.0", default-features = false} lazy_static = "1.3.0" [features] -default = ["deny-warnings"] deny-warnings = [] From b1326c8856c728bb2bf9d7c0125fa3e47c3567c4 Mon Sep 17 00:00:00 2001 From: Manuel Bucher Date: Wed, 18 Oct 2023 22:05:37 +0200 Subject: [PATCH 013/321] Fix detection whether packet was sent after recovery start (#1465) * Fix detection whether packet was sent after recovery start Before this patch, the congestion window was reduced multiple times per round trip time (RTT) on multiple packet loss. Not it is capped to one per RTT. The recovery_start variable is only updated after transitioning from RECOVERY_START to RECOVERY when sending out packets after the recovery event. So if the state is RECOVERY_START (transient), all packets are from before the RECOVERY. Therefore we must only check the recovery_start time after trasitioning to the RECOVERY state. If we haven't encountered a packet loss, the packets always count as after recovery_start. This is to fix the congestion algorithm to be [spec aligned][1]: > The recovery period aims to limit congestion window reduction to once > per round trip. Therefore, during a recovery period, the congestion > window does not change in response to new losses or increases in the > ECN-CE count. > > A recovery period ends and the sender enters congestion avoidance when > a packet sent during the recovery period is acknowledged. This is > slightly different from TCP's definition of recovery, which ends when > the lost segment that started recovery is acknowledged [RFC5681]. This was found when investigating [Bug 1852924][2], but doesn't fix the upload speed issue. [1]: https://www.rfc-editor.org/rfc/rfc9002.html#section-7.3.2 [2]: https://bugzilla.mozilla.org/show_bug.cgi?id=1852924 * Add test case for after_recovery_start fix --- neqo-transport/src/cc/classic_cc.rs | 11 ++-- neqo-transport/src/cc/tests/new_reno.rs | 82 ++++++++++++++++++++++++- 2 files changed, 88 insertions(+), 5 deletions(-) diff --git a/neqo-transport/src/cc/classic_cc.rs b/neqo-transport/src/cc/classic_cc.rs index 5867ffe6ec..639cfe9358 100644 --- a/neqo-transport/src/cc/classic_cc.rs +++ b/neqo-transport/src/cc/classic_cc.rs @@ -437,10 +437,13 @@ impl ClassicCongestionControl { #[must_use] fn after_recovery_start(&mut self, packet: &SentPacket) -> bool { - // At the start of the first recovery period, if the state is - // transient, all packets will have been sent before recovery. - self.recovery_start - .map_or(!self.state.transient(), |t| packet.time_sent >= t) + // At the start of the recovery period, the state is transient and + // all packets will have been sent before recovery. When sending out + // the first packet we transition to the non-transient `Recovery` + // state and update the variable `self.recovery_start`. Before the + // first recovery, all packets were sent after the recovery event, + // allowing to reduce the cwnd on congestion events. + !self.state.transient() && self.recovery_start.map_or(true, |t| packet.time_sent >= t) } /// Handle a congestion event. diff --git a/neqo-transport/src/cc/tests/new_reno.rs b/neqo-transport/src/cc/tests/new_reno.rs index 0b678ca55e..aacf3363cf 100644 --- a/neqo-transport/src/cc/tests/new_reno.rs +++ b/neqo-transport/src/cc/tests/new_reno.rs @@ -9,7 +9,7 @@ use crate::cc::new_reno::NewReno; use crate::cc::{ClassicCongestionControl, CongestionControl, CWND_INITIAL, MAX_DATAGRAM_SIZE}; -use crate::packet::PacketType; +use crate::packet::{PacketNumber, PacketType}; use crate::tracking::SentPacket; use std::time::Duration; use test_fixture::now; @@ -129,3 +129,83 @@ fn issue_876() { cwnd_is_halved(&cc); assert_eq!(cc.bytes_in_flight(), 4 * MAX_DATAGRAM_SIZE); } + +#[test] +// https://github.com/mozilla/neqo/pull/1465 +fn issue_1465() { + let mut cc = ClassicCongestionControl::new(NewReno::default()); + let mut pn = 0; + let mut now = now(); + let next_packet = |pn: &mut PacketNumber, now| { + let p = SentPacket::new( + PacketType::Short, + *pn, // pn + now, // time_sent + true, // ack eliciting + Vec::new(), // tokens + MAX_DATAGRAM_SIZE, // size + ); + *pn += 1; + p + }; + let send_next = |cc: &mut ClassicCongestionControl, pn: &mut PacketNumber, now| { + let p = next_packet(pn, now); + cc.on_packet_sent(&p); + p + }; + + let p1 = send_next(&mut cc, &mut pn, now); + let p2 = send_next(&mut cc, &mut pn, now); + let p3 = send_next(&mut cc, &mut pn, now); + + assert_eq!(cc.acked_bytes(), 0); + cwnd_is_default(&cc); + assert_eq!(cc.bytes_in_flight(), 3 * MAX_DATAGRAM_SIZE); + + // advance one rtt to detect lost packet there this simplifies the timers, because on_packet_loss + // would only be called after RTO, but that is not relevant to the problem + now += RTT; + cc.on_packets_lost(Some(now), None, PTO, &[p1]); + + // We are now in recovery + assert!(cc.recovery_packet()); + assert_eq!(cc.acked_bytes(), 0); + cwnd_is_halved(&cc); + assert_eq!(cc.bytes_in_flight(), 2 * MAX_DATAGRAM_SIZE); + + // Don't reduce the cwnd again on second packet loss + cc.on_packets_lost(Some(now), None, PTO, &[p3]); + assert_eq!(cc.acked_bytes(), 0); + cwnd_is_halved(&cc); // still the same as after first packet loss + assert_eq!(cc.bytes_in_flight(), MAX_DATAGRAM_SIZE); + + // the acked packets before on_packet_sent were the cause of + // https://github.com/mozilla/neqo/pull/1465 + cc.on_packets_acked(&[p2], RTT, now); + + assert_eq!(cc.bytes_in_flight(), 0); + + // send out recovery packet and get it acked to get out of recovery state + let p4 = send_next(&mut cc, &mut pn, now); + cc.on_packet_sent(&p4); + now += RTT; + cc.on_packets_acked(&[p4], RTT, now); + + // do the same as in the first rtt but now the bug appears + let p5 = send_next(&mut cc, &mut pn, now); + let p6 = send_next(&mut cc, &mut pn, now); + now += RTT; + + let cur_cwnd = cc.cwnd(); + cc.on_packets_lost(Some(now), None, PTO, &[p5]); + + // go back into recovery + assert!(cc.recovery_packet()); + assert_eq!(cc.cwnd(), cur_cwnd / 2); + assert_eq!(cc.acked_bytes(), 0); + assert_eq!(cc.bytes_in_flight(), 2 * MAX_DATAGRAM_SIZE); + + // this shouldn't introduce further cwnd reduction, but it did before https://github.com/mozilla/neqo/pull/1465 + cc.on_packets_lost(Some(now), None, PTO, &[p6]); + assert_eq!(cc.cwnd(), cur_cwnd / 2); +} From 6c05ac330dd4850e2088d9d36bc0db5ac4fda864 Mon Sep 17 00:00:00 2001 From: Manuel Bucher Date: Fri, 20 Oct 2023 01:27:29 +0200 Subject: [PATCH 014/321] Capture packet number in closure to simplify congestion control test (#1471) https://github.com/mozilla/neqo/pull/1465#discussion_r1364475488 --- neqo-transport/src/cc/tests/new_reno.rs | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/neqo-transport/src/cc/tests/new_reno.rs b/neqo-transport/src/cc/tests/new_reno.rs index aacf3363cf..0e4322c08c 100644 --- a/neqo-transport/src/cc/tests/new_reno.rs +++ b/neqo-transport/src/cc/tests/new_reno.rs @@ -9,7 +9,7 @@ use crate::cc::new_reno::NewReno; use crate::cc::{ClassicCongestionControl, CongestionControl, CWND_INITIAL, MAX_DATAGRAM_SIZE}; -use crate::packet::{PacketNumber, PacketType}; +use crate::packet::PacketType; use crate::tracking::SentPacket; use std::time::Duration; use test_fixture::now; @@ -136,27 +136,27 @@ fn issue_1465() { let mut cc = ClassicCongestionControl::new(NewReno::default()); let mut pn = 0; let mut now = now(); - let next_packet = |pn: &mut PacketNumber, now| { + let mut next_packet = |now| { let p = SentPacket::new( PacketType::Short, - *pn, // pn + pn, // pn now, // time_sent true, // ack eliciting Vec::new(), // tokens MAX_DATAGRAM_SIZE, // size ); - *pn += 1; + pn += 1; p }; - let send_next = |cc: &mut ClassicCongestionControl, pn: &mut PacketNumber, now| { - let p = next_packet(pn, now); + let mut send_next = |cc: &mut ClassicCongestionControl, now| { + let p = next_packet(now); cc.on_packet_sent(&p); p }; - let p1 = send_next(&mut cc, &mut pn, now); - let p2 = send_next(&mut cc, &mut pn, now); - let p3 = send_next(&mut cc, &mut pn, now); + let p1 = send_next(&mut cc, now); + let p2 = send_next(&mut cc, now); + let p3 = send_next(&mut cc, now); assert_eq!(cc.acked_bytes(), 0); cwnd_is_default(&cc); @@ -186,14 +186,14 @@ fn issue_1465() { assert_eq!(cc.bytes_in_flight(), 0); // send out recovery packet and get it acked to get out of recovery state - let p4 = send_next(&mut cc, &mut pn, now); + let p4 = send_next(&mut cc, now); cc.on_packet_sent(&p4); now += RTT; cc.on_packets_acked(&[p4], RTT, now); // do the same as in the first rtt but now the bug appears - let p5 = send_next(&mut cc, &mut pn, now); - let p6 = send_next(&mut cc, &mut pn, now); + let p5 = send_next(&mut cc, now); + let p6 = send_next(&mut cc, now); now += RTT; let cur_cwnd = cc.cwnd(); From 34b2c3c45dc8c396f82ef5c3a5246f04bf0adad3 Mon Sep 17 00:00:00 2001 From: Kershaw Date: Thu, 26 Oct 2023 12:20:47 +0200 Subject: [PATCH 015/321] neqo v0.6.6 (#1480) --- neqo-client/Cargo.toml | 2 +- neqo-common/Cargo.toml | 2 +- neqo-crypto/Cargo.toml | 2 +- neqo-http3/Cargo.toml | 2 +- neqo-interop/Cargo.toml | 2 +- neqo-qpack/Cargo.toml | 2 +- neqo-server/Cargo.toml | 2 +- neqo-transport/Cargo.toml | 2 +- test-fixture/Cargo.toml | 2 +- 9 files changed, 9 insertions(+), 9 deletions(-) diff --git a/neqo-client/Cargo.toml b/neqo-client/Cargo.toml index 4932658f41..c6e06bdefb 100644 --- a/neqo-client/Cargo.toml +++ b/neqo-client/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "neqo-client" -version = "0.6.5" +version = "0.6.6" authors = ["Martin Thomson ", "Dragana Damjanovic ", "Andy Grover "] diff --git a/neqo-common/Cargo.toml b/neqo-common/Cargo.toml index 04d04b8caa..075e824881 100644 --- a/neqo-common/Cargo.toml +++ b/neqo-common/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "neqo-common" -version = "0.6.5" +version = "0.6.6" authors = ["Bobby Holley "] edition = "2018" rust-version = "1.65.0" diff --git a/neqo-crypto/Cargo.toml b/neqo-crypto/Cargo.toml index 82f75bab1e..e13b8aca7e 100644 --- a/neqo-crypto/Cargo.toml +++ b/neqo-crypto/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "neqo-crypto" -version = "0.6.5" +version = "0.6.6" authors = ["Martin Thomson "] edition = "2018" rust-version = "1.65.0" diff --git a/neqo-http3/Cargo.toml b/neqo-http3/Cargo.toml index a6f6982d4c..4aee991c75 100644 --- a/neqo-http3/Cargo.toml +++ b/neqo-http3/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "neqo-http3" -version = "0.6.5" +version = "0.6.6" authors = ["Dragana Damjanovic "] edition = "2018" rust-version = "1.65.0" diff --git a/neqo-interop/Cargo.toml b/neqo-interop/Cargo.toml index e4fcd33199..119876d8e7 100644 --- a/neqo-interop/Cargo.toml +++ b/neqo-interop/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "neqo-interop" -version = "0.6.5" +version = "0.6.6" authors = ["EKR "] edition = "2018" rust-version = "1.65.0" diff --git a/neqo-qpack/Cargo.toml b/neqo-qpack/Cargo.toml index 3af198faf1..443ee4ead8 100644 --- a/neqo-qpack/Cargo.toml +++ b/neqo-qpack/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "neqo-qpack" -version = "0.6.5" +version = "0.6.6" authors = ["Dragana Damjanovic "] edition = "2018" rust-version = "1.65.0" diff --git a/neqo-server/Cargo.toml b/neqo-server/Cargo.toml index d25a814b4c..3cb2852508 100644 --- a/neqo-server/Cargo.toml +++ b/neqo-server/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "neqo-server" -version = "0.6.5" +version = "0.6.6" authors = ["Dragana Damjanovic "] edition = "2018" rust-version = "1.65.0" diff --git a/neqo-transport/Cargo.toml b/neqo-transport/Cargo.toml index d4e66cb138..6bb59479e2 100644 --- a/neqo-transport/Cargo.toml +++ b/neqo-transport/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "neqo-transport" -version = "0.6.5" +version = "0.6.6" authors = ["EKR ", "Andy Grover "] edition = "2018" rust-version = "1.65.0" diff --git a/test-fixture/Cargo.toml b/test-fixture/Cargo.toml index c2cb6a6410..b9e2bdd909 100644 --- a/test-fixture/Cargo.toml +++ b/test-fixture/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "test-fixture" -version = "0.6.5" +version = "0.6.6" authors = ["Martin Thomson "] edition = "2018" rust-version = "1.65.0" From 310a9d14eca683c132d1535a124df6b874816ee9 Mon Sep 17 00:00:00 2001 From: Manuel Bucher Date: Wed, 1 Nov 2023 15:42:48 +0100 Subject: [PATCH 016/321] Add debug log info to make debugging congestion controller easier The following log module can be set to see log output timestamp,neqo_transport::*:3 --- neqo-transport/src/cc/classic_cc.rs | 43 +++++++++++++++++++++-------- 1 file changed, 32 insertions(+), 11 deletions(-) diff --git a/neqo-transport/src/cc/classic_cc.rs b/neqo-transport/src/cc/classic_cc.rs index 639cfe9358..55859a841f 100644 --- a/neqo-transport/src/cc/classic_cc.rs +++ b/neqo-transport/src/cc/classic_cc.rs @@ -163,7 +163,18 @@ impl CongestionControl for ClassicCongestionControl { ); let mut new_acked = 0; - for pkt in acked_pkts.iter().filter(|pkt| pkt.cc_outstanding()) { + for pkt in acked_pkts { + qinfo!( + "packet_acked this={:p}, pn={}, ps={}, ignored={}, lost={}", + self, + pkt.pn, + pkt.size, + i32::from(!pkt.cc_outstanding()), + i32::from(pkt.lost()) + ); + if !pkt.cc_outstanding() { + continue; + } assert!(self.bytes_in_flight >= pkt.size); self.bytes_in_flight -= pkt.size; @@ -183,11 +194,10 @@ impl CongestionControl for ClassicCongestionControl { if is_app_limited { self.cc_algorithm.on_app_limited(); + qinfo!("on_packets_acked this={:p}, limited=1, bytes_in_flight={}, cwnd={}, state={:?}, new_acked={}", self, self.bytes_in_flight, self.congestion_window, self.state, new_acked); return; } - qtrace!([self], "ACK received, acked_bytes = {}", self.acked_bytes); - // Slow start, up to the slow start threshold. if self.congestion_window < self.ssthresh { self.acked_bytes += new_acked; @@ -235,6 +245,7 @@ impl CongestionControl for ClassicCongestionControl { QlogMetric::BytesInFlight(self.bytes_in_flight), ], ); + qinfo!([self], "on_packets_acked this={:p}, limited=0, bytes_in_flight={}, cwnd={}, state={:?}, new_acked={}", self, self.bytes_in_flight, self.congestion_window, self.state, new_acked); } /// Update congestion controller state based on lost packets. @@ -250,6 +261,12 @@ impl CongestionControl for ClassicCongestionControl { } for pkt in lost_packets.iter().filter(|pkt| pkt.cc_in_flight()) { + qinfo!( + "packet_lost this={:?}, pn={}, ps={}", + self, + pkt.pn, + pkt.size + ); assert!(self.bytes_in_flight >= pkt.size); self.bytes_in_flight -= pkt.size; } @@ -258,8 +275,6 @@ impl CongestionControl for ClassicCongestionControl { &[QlogMetric::BytesInFlight(self.bytes_in_flight)], ); - qdebug!([self], "Pkts lost {}", lost_packets.len()); - let congestion = self.on_congestion_event(lost_packets.last().unwrap()); let persistent_congestion = self.detect_persistent_congestion( first_rtt_sample_time, @@ -267,6 +282,13 @@ impl CongestionControl for ClassicCongestionControl { pto, lost_packets, ); + qinfo!( + "on_packets_lost this={:p}, bytes_in_flight={}, cwnd={}, state={:?}", + self, + self.bytes_in_flight, + self.congestion_window, + self.state + ); congestion || persistent_congestion } @@ -302,12 +324,11 @@ impl CongestionControl for ClassicCongestionControl { } self.bytes_in_flight += pkt.size; - qdebug!( - [self], - "Pkt Sent len {}, bif {}, cwnd {}", - pkt.size, - self.bytes_in_flight, - self.congestion_window + qinfo!( + "packet_sent this={:p}, pn={}, ps={}", + self, + pkt.pn, + pkt.size ); qlog::metrics_updated( &mut self.qlog, From 887d25646e2f1af5fe14247460129c169b56ef88 Mon Sep 17 00:00:00 2001 From: Manuel Bucher Date: Wed, 1 Nov 2023 14:43:50 +0100 Subject: [PATCH 017/321] Use packet number instead of time sent to identify packets sent after recovery event This doesn't change behavior, simplifies after_recovery_start check by removing the ambiguousness when the recovery packet was sent at the same time as some other packet. --- neqo-transport/src/cc/classic_cc.rs | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/neqo-transport/src/cc/classic_cc.rs b/neqo-transport/src/cc/classic_cc.rs index 55859a841f..39b98e49a5 100644 --- a/neqo-transport/src/cc/classic_cc.rs +++ b/neqo-transport/src/cc/classic_cc.rs @@ -14,6 +14,7 @@ use std::time::{Duration, Instant}; use super::CongestionControl; use crate::cc::MAX_DATAGRAM_SIZE; +use crate::packet::PacketNumber; use crate::qlog::{self, QlogMetric}; use crate::sender::PACING_BURST_SIZE; use crate::tracking::SentPacket; @@ -109,7 +110,7 @@ pub struct ClassicCongestionControl { bytes_in_flight: usize, acked_bytes: usize, ssthresh: usize, - recovery_start: Option, + recovery_start: Option, qlog: NeqoQlog, } @@ -315,7 +316,7 @@ impl CongestionControl for ClassicCongestionControl { fn on_packet_sent(&mut self, pkt: &SentPacket) { // Record the recovery time and exit any transient state. if self.state.transient() { - self.recovery_start = Some(pkt.time_sent); + self.recovery_start = Some(pkt.pn); self.state.update(); } @@ -464,7 +465,7 @@ impl ClassicCongestionControl { // state and update the variable `self.recovery_start`. Before the // first recovery, all packets were sent after the recovery event, // allowing to reduce the cwnd on congestion events. - !self.state.transient() && self.recovery_start.map_or(true, |t| packet.time_sent >= t) + !self.state.transient() && self.recovery_start.map_or(true, |pn| packet.pn >= pn) } /// Handle a congestion event. From 5624b83920abd5ccaa107d77299179b76d018550 Mon Sep 17 00:00:00 2001 From: Martin Thomson Date: Mon, 6 Nov 2023 09:11:58 +0100 Subject: [PATCH 018/321] Update enumset dependency (#1489) --- neqo-http3/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/neqo-http3/Cargo.toml b/neqo-http3/Cargo.toml index 4aee991c75..164815eeca 100644 --- a/neqo-http3/Cargo.toml +++ b/neqo-http3/Cargo.toml @@ -17,7 +17,7 @@ qlog = "0.9.0" sfv = "0.9.1" url = "2.0" lazy_static = "1.3.0" -enumset = "1.0.8" +enumset = "1.1.2" [dev-dependencies] test-fixture = { path = "../test-fixture" } From 3b1ee9dcc48bcde2595a8015df0fb7bf6ca89392 Mon Sep 17 00:00:00 2001 From: Manuel Bucher Date: Thu, 2 Nov 2023 14:44:17 +0100 Subject: [PATCH 019/321] Fix classic_cc debug output print pointer of self instead of debug print internal variables. Used to identify same congestion controller when having multiple concurrent connections --- neqo-transport/src/cc/classic_cc.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/neqo-transport/src/cc/classic_cc.rs b/neqo-transport/src/cc/classic_cc.rs index 39b98e49a5..ae814f1711 100644 --- a/neqo-transport/src/cc/classic_cc.rs +++ b/neqo-transport/src/cc/classic_cc.rs @@ -263,7 +263,7 @@ impl CongestionControl for ClassicCongestionControl { for pkt in lost_packets.iter().filter(|pkt| pkt.cc_in_flight()) { qinfo!( - "packet_lost this={:?}, pn={}, ps={}", + "packet_lost this={:p}, pn={}, ps={}", self, pkt.pn, pkt.size From 2582acda8c9c2a1b00304e07f46be43246c56db7 Mon Sep 17 00:00:00 2001 From: Martin Thomson Date: Tue, 7 Nov 2023 14:10:13 +0100 Subject: [PATCH 020/321] ACK immediately after an RTT (#1488) * ACK immediately after an RTT This should improve loss recovery performance if we haven't been active for a short while. * Adding a missing test * Semi-colon, thanks clippy --- neqo-transport/src/connection/idle.rs | 11 ++- neqo-transport/src/connection/mod.rs | 10 ++- .../src/connection/tests/ackrate.rs | 12 +++ neqo-transport/src/connection/tests/idle.rs | 30 ++++---- neqo-transport/src/connection/tests/keys.rs | 3 +- .../src/connection/tests/migration.rs | 8 +- neqo-transport/src/connection/tests/mod.rs | 31 ++++++-- .../src/connection/tests/recovery.rs | 46 +++++------- neqo-transport/src/connection/tests/stream.rs | 20 ++--- neqo-transport/src/tracking.rs | 73 ++++++++++++++----- 10 files changed, 164 insertions(+), 80 deletions(-) diff --git a/neqo-transport/src/connection/idle.rs b/neqo-transport/src/connection/idle.rs index 5b1bd857dc..da1c520777 100644 --- a/neqo-transport/src/connection/idle.rs +++ b/neqo-transport/src/connection/idle.rs @@ -5,8 +5,11 @@ // except according to those terms. use crate::recovery::RecoveryToken; -use std::cmp::{max, min}; -use std::time::{Duration, Instant}; +use neqo_common::qtrace; +use std::{ + cmp::{max, min}, + time::{Duration, Instant}, +}; #[derive(Debug, Clone)] /// There's a little bit of different behavior for resetting idle timeout. See @@ -53,6 +56,10 @@ impl IdleTimeout { } else { max(self.timeout, pto * 3) }; + qtrace!( + "IdleTimeout::expiry@{now:?} pto={pto:?}, ka={keep_alive} => {t:?}", + t = start + delay + ); start + delay } diff --git a/neqo-transport/src/connection/mod.rs b/neqo-transport/src/connection/mod.rs index a9fd93c784..abb7e590ad 100644 --- a/neqo-transport/src/connection/mod.rs +++ b/neqo-transport/src/connection/mod.rs @@ -2049,8 +2049,14 @@ impl Connection { if primary { let stats = &mut self.stats.borrow_mut().frame_tx; - self.acks - .write_frame(space, now, builder, &mut tokens, stats)?; + self.acks.write_frame( + space, + now, + path.borrow().rtt().estimate(), + builder, + &mut tokens, + stats, + )?; } let ack_end = builder.len(); diff --git a/neqo-transport/src/connection/tests/ackrate.rs b/neqo-transport/src/connection/tests/ackrate.rs index 5a0c7ae351..8d0f73f154 100644 --- a/neqo-transport/src/connection/tests/ackrate.rs +++ b/neqo-transport/src/connection/tests/ackrate.rs @@ -117,6 +117,11 @@ fn ack_rate_client_one_rtt() { // A single packet from the client will cause the server to engage its delayed // acknowledgment timer, which should now be equal to RTT. + // The first packet will elicit an immediate ACK however, so do this twice. + let d = send_something(&mut client, now); + now += RTT / 2; + let ack = server.process(Some(d), now).dgram(); + assert!(ack.is_some()); let d = send_something(&mut client, now); now += RTT / 2; let delay = server.process(Some(d), now).callback(); @@ -133,6 +138,13 @@ fn ack_rate_server_half_rtt() { let mut server = new_server(ConnectionParameters::default().ack_ratio(ACK_RATIO_SCALE * 2)); let mut now = connect_rtt_idle(&mut client, &mut server, RTT); + // The server now sends something. + let d = send_something(&mut server, now); + now += RTT / 2; + // The client now will acknowledge immediately because it has been more than + // an RTT since it last sent an acknowledgment. + let ack = client.process(Some(d), now); + assert!(ack.as_dgram_ref().is_some()); let d = send_something(&mut server, now); now += RTT / 2; let delay = client.process(Some(d), now).callback(); diff --git a/neqo-transport/src/connection/tests/idle.rs b/neqo-transport/src/connection/tests/idle.rs index 947a800244..a9643c284f 100644 --- a/neqo-transport/src/connection/tests/idle.rs +++ b/neqo-transport/src/connection/tests/idle.rs @@ -183,7 +183,7 @@ fn idle_send_packet1() { now += Duration::from_secs(10); let dgram = send_and_receive(&mut client, &mut server, now); - assert!(dgram.is_none()); + assert!(dgram.is_some()); // the server will want to ACK, we can drop that. // Still connected after 39 seconds because idle timer reset by the // outgoing packet. @@ -237,11 +237,13 @@ fn idle_send_packet2() { #[test] fn idle_recv_packet() { + const FUDGE: Duration = Duration::from_millis(10); + let mut client = default_client(); let mut server = default_server(); connect_force_idle(&mut client, &mut server); - let now = now(); + let mut now = now(); let res = client.process(None, now); assert_eq!(res, Output::Callback(default_timeout())); @@ -250,23 +252,25 @@ fn idle_recv_packet() { assert_eq!(stream, 0); assert_eq!(client.stream_send(stream, b"hello").unwrap(), 5); - // Respond with another packet - let out = client.process(None, now + Duration::from_secs(10)); - server.process_input(out.dgram().unwrap(), now + Duration::from_secs(10)); + // Respond with another packet. + // Note that it is important that this not result in the RTT increasing above 0. + // Otherwise, the eventual timeout will be extended (and we're not testing that). + now += Duration::from_secs(10); + let out = client.process(None, now); + server.process_input(out.dgram().unwrap(), now); assert_eq!(server.stream_send(stream, b"world").unwrap(), 5); - let out = server.process_output(now + Duration::from_secs(10)); + let out = server.process_output(now); assert_ne!(out.as_dgram_ref(), None); - - mem::drop(client.process(out.dgram(), now + Duration::from_secs(20))); + mem::drop(client.process(out.dgram(), now)); assert!(matches!(client.state(), State::Confirmed)); - // Still connected after 49 seconds because idle timer reset by received - // packet - mem::drop(client.process(None, now + default_timeout() + Duration::from_secs(19))); + // Add a little less than the idle timeout and we're still connected. + now += default_timeout() - FUDGE; + mem::drop(client.process(None, now)); assert!(matches!(client.state(), State::Confirmed)); - // Not connected after 50 seconds. - mem::drop(client.process(None, now + default_timeout() + Duration::from_secs(20))); + now += FUDGE; + mem::drop(client.process(None, now)); assert!(matches!(client.state(), State::Closed(_))); } diff --git a/neqo-transport/src/connection/tests/keys.rs b/neqo-transport/src/connection/tests/keys.rs index 26a3768b7b..7e04aaf191 100644 --- a/neqo-transport/src/connection/tests/keys.rs +++ b/neqo-transport/src/connection/tests/keys.rs @@ -116,7 +116,8 @@ fn key_update_client() { assert_eq!(client.get_epochs(), (Some(4), Some(3))); // Send something to propagate the update. - assert!(send_and_receive(&mut client, &mut server, now).is_none()); + // Note that the server will acknowledge immediately when RTT is zero. + assert!(send_and_receive(&mut client, &mut server, now).is_some()); // The server should now be waiting to discharge read keys. assert_eq!(server.get_epochs(), (Some(4), Some(3))); diff --git a/neqo-transport/src/connection/tests/migration.rs b/neqo-transport/src/connection/tests/migration.rs index dd35412ec2..9d662da0b8 100644 --- a/neqo-transport/src/connection/tests/migration.rs +++ b/neqo-transport/src/connection/tests/migration.rs @@ -11,6 +11,7 @@ use super::{ }; use crate::{ cid::LOCAL_ACTIVE_CID_LIMIT, + connection::tests::send_something_paced, frame::FRAME_TYPE_NEW_CONNECTION_ID, packet::PacketBuilder, path::{PATH_MTU_V4, PATH_MTU_V6}, @@ -177,7 +178,7 @@ fn migrate_immediate() { let mut client = default_client(); let mut server = default_server(); connect_force_idle(&mut client, &mut server); - let mut now = now(); + let now = now(); client .migrate(Some(addr_v4()), Some(addr_v4()), true, now) @@ -206,8 +207,9 @@ fn migrate_immediate() { // Receiving a packet sent by the server before migration doesn't change path. client.process_input(server_delayed, now); - now = skip_pacing(&mut client, now); - let client3 = send_something(&mut client, now); + // The client has sent two unpaced packets and this new path has no RTT estimate + // so this might be paced. + let (client3, _t) = send_something_paced(&mut client, now, true); assert_v4_path(&client3, false); } diff --git a/neqo-transport/src/connection/tests/mod.rs b/neqo-transport/src/connection/tests/mod.rs index c12f3576fb..72fe9d1db8 100644 --- a/neqo-transport/src/connection/tests/mod.rs +++ b/neqo-transport/src/connection/tests/mod.rs @@ -501,16 +501,37 @@ fn assert_full_cwnd(packets: &[Datagram], cwnd: usize) { assert_eq!(last.len(), last_packet(cwnd)); } -/// Send something on a stream from `sender` to `receiver`. -/// Return the resulting datagram. +/// Send something on a stream from `sender` to `receiver`, maybe allowing for pacing. +/// Return the resulting datagram and the new time. #[must_use] -fn send_something(sender: &mut Connection, now: Instant) -> Datagram { +fn send_something_paced( + sender: &mut Connection, + mut now: Instant, + allow_pacing: bool, +) -> (Datagram, Instant) { let stream_id = sender.stream_create(StreamType::UniDi).unwrap(); assert!(sender.stream_send(stream_id, DEFAULT_STREAM_DATA).is_ok()); assert!(sender.stream_close_send(stream_id).is_ok()); qdebug!([sender], "send_something on {}", stream_id); - let dgram = sender.process(None, now).dgram(); - dgram.expect("should have something to send") + let dgram = match sender.process_output(now) { + Output::Callback(t) => { + assert!(allow_pacing, "send_something: unexpected delay"); + now += t; + sender + .process_output(now) + .dgram() + .expect("send_something: should have something to send") + } + Output::Datagram(d) => d, + Output::None => panic!("send_something: got Output::None"), + }; + (dgram, now) +} + +/// Send something on a stream from `sender` to `receiver`. +/// Return the resulting datagram. +fn send_something(sender: &mut Connection, now: Instant) -> Datagram { + send_something_paced(sender, now, false).0 } /// Send something on a stream from `sender` to `receiver`. diff --git a/neqo-transport/src/connection/tests/recovery.rs b/neqo-transport/src/connection/tests/recovery.rs index 910d7470c7..44e77cab1e 100644 --- a/neqo-transport/src/connection/tests/recovery.rs +++ b/neqo-transport/src/connection/tests/recovery.rs @@ -4,27 +4,29 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use super::super::{Connection, ConnectionParameters, Output, State}; use super::{ + super::{Connection, ConnectionParameters, Output, State}, assert_full_cwnd, connect, connect_force_idle, connect_rtt_idle, connect_with_rtt, cwnd, default_client, default_server, fill_cwnd, maybe_authenticate, new_client, send_and_receive, send_something, AT_LEAST_PTO, DEFAULT_RTT, DEFAULT_STREAM_DATA, POST_HANDSHAKE_CWND, }; -use crate::cc::CWND_MIN; -use crate::path::PATH_MTU_V6; -use crate::recovery::{ - FAST_PTO_SCALE, MAX_OUTSTANDING_UNACK, MIN_OUTSTANDING_UNACK, PTO_PACKET_COUNT, +use crate::{ + cc::CWND_MIN, + path::PATH_MTU_V6, + recovery::{FAST_PTO_SCALE, MAX_OUTSTANDING_UNACK, MIN_OUTSTANDING_UNACK, PTO_PACKET_COUNT}, + rtt::GRANULARITY, + stats::MAX_PTO_COUNTS, + tparams::TransportParameter, + tracking::DEFAULT_ACK_DELAY, + StreamType, }; -use crate::rtt::GRANULARITY; -use crate::stats::MAX_PTO_COUNTS; -use crate::tparams::TransportParameter; -use crate::tracking::DEFAULT_ACK_DELAY; -use crate::StreamType; use neqo_common::qdebug; use neqo_crypto::AuthenticationStatus; -use std::mem; -use std::time::{Duration, Instant}; +use std::{ + mem, + time::{Duration, Instant}, +}; use test_fixture::{self, now, split_datagram}; #[test] @@ -96,15 +98,7 @@ fn pto_works_ping() { let mut client = default_client(); let mut server = default_server(); connect_force_idle(&mut client, &mut server); - let mut now = now(); - - let res = client.process(None, now); - assert_eq!( - res, - Output::Callback(ConnectionParameters::default().get_idle_timeout()) - ); - - now += Duration::from_secs(10); + let mut now = now() + Duration::from_secs(10); // Send a few packets from the client. let pkt0 = send_something(&mut client, now); @@ -125,22 +119,22 @@ fn pto_works_ping() { now += Duration::from_millis(20); - // process pkt2 (no ack yet) + // process pkt2 (immediate ack because last ack was more than an RTT ago; RTT=0) let srv1 = server.process(Some(pkt2), now).dgram(); - assert!(srv1.is_none()); + assert!(srv1.is_some()); // this is now dropped - // process pkt3 (acked) + now += Duration::from_millis(20); + // process pkt3 (acked for same reason) let srv2 = server.process(Some(pkt3), now).dgram(); // ack client pkt 2 & 3 assert!(srv2.is_some()); - now += Duration::from_millis(20); // client processes ack let pkt4 = client.process(srv2, now).dgram(); // client resends data from pkt0 assert!(pkt4.is_some()); - // server sees ooo pkt0 and generates ack + // server sees ooo pkt0 and generates immediate ack let srv3 = server.process(Some(pkt0), now).dgram(); assert!(srv3.is_some()); diff --git a/neqo-transport/src/connection/tests/stream.rs b/neqo-transport/src/connection/tests/stream.rs index ba73a54e36..980077e5aa 100644 --- a/neqo-transport/src/connection/tests/stream.rs +++ b/neqo-transport/src/connection/tests/stream.rs @@ -11,12 +11,16 @@ use super::{ use crate::{ events::ConnectionEvent, recv_stream::RECV_BUFFER_SIZE, - send_stream::OrderGroup, - send_stream::{SendStreamState, SEND_BUFFER_SIZE}, + send_stream::{OrderGroup, SendStreamState, SEND_BUFFER_SIZE}, streams::{SendOrder, StreamOrder}, tparams::{self, TransportParameter}, - tracking::DEFAULT_ACK_PACKET_TOLERANCE, - Connection, ConnectionError, ConnectionParameters, Error, StreamId, StreamType, + // tracking::DEFAULT_ACK_PACKET_TOLERANCE, + Connection, + ConnectionError, + ConnectionParameters, + Error, + StreamId, + StreamType, }; use std::collections::HashMap; @@ -81,12 +85,10 @@ fn transfer() { assert_eq!(*client.state(), State::Confirmed); qdebug!("---- server receives"); - for (d_num, d) in datagrams.into_iter().enumerate() { + for d in datagrams { let out = server.process(Some(d), now()); - assert_eq!( - out.as_dgram_ref().is_some(), - (d_num + 1) % usize::try_from(DEFAULT_ACK_PACKET_TOLERANCE + 1).unwrap() == 0 - ); + // With an RTT of zero, the server will acknowledge every packet immediately. + assert!(out.as_dgram_ref().is_some()); qdebug!("Output={:0x?}", out.as_dgram_ref()); } assert_eq!(*server.state(), State::Confirmed); diff --git a/neqo-transport/src/tracking.rs b/neqo-transport/src/tracking.rs index 0c3d25b9ed..32f1c8d1b7 100644 --- a/neqo-transport/src/tracking.rs +++ b/neqo-transport/src/tracking.rs @@ -366,6 +366,8 @@ pub struct RecvdPackets { largest_pn_time: Option, /// The time that we should be sending an ACK. ack_time: Option, + /// The time we last sent an ACK. + last_ack_time: Option, /// The current ACK frequency sequence number. ack_frequency_seqno: u64, /// The time to delay after receiving the first packet that is @@ -391,6 +393,7 @@ impl RecvdPackets { min_tracked: 0, largest_pn_time: None, ack_time: None, + last_ack_time: None, ack_frequency_seqno: 0, ack_delay: DEFAULT_ACK_DELAY, unacknowledged_count: 0, @@ -424,11 +427,13 @@ impl RecvdPackets { } /// Returns true if an ACK frame should be sent now. - fn ack_now(&self, now: Instant) -> bool { - match self.ack_time { - Some(t) => t <= now, - None => false, - } + fn ack_now(&self, now: Instant, rtt: Duration) -> bool { + // If ack_time is Some, then we have something to acknowledge. + // In that case, either ack because `now >= ack_time`, or + // because it is more than an RTT since the last time we sent an ack. + self.ack_time.map_or(false, |next| { + next <= now || self.last_ack_time.map_or(false, |last| last + rtt <= now) + }) } // A simple addition of a packet number to the tracked set. @@ -558,6 +563,7 @@ impl RecvdPackets { fn write_frame( &mut self, now: Instant, + rtt: Duration, builder: &mut PacketBuilder, tokens: &mut Vec, stats: &mut FrameStats, @@ -567,7 +573,7 @@ impl RecvdPackets { const LONGEST_ACK_HEADER: usize = 1 + 8 + 8 + 1 + 8; // Check that we aren't delaying ACKs. - if !self.ack_now(now) { + if !self.ack_now(now, rtt) { return; } @@ -618,6 +624,7 @@ impl RecvdPackets { // We've sent an ACK, reset the timer. self.ack_time = None; + self.last_ack_time = Some(now); self.unacknowledged_count = 0; tokens.push(RecoveryToken::Ack(AckToken { @@ -714,12 +721,13 @@ impl AckTracker { &mut self, pn_space: PacketNumberSpace, now: Instant, + rtt: Duration, builder: &mut PacketBuilder, tokens: &mut Vec, stats: &mut FrameStats, ) -> Res<()> { if let Some(space) = self.get_mut(pn_space) { - space.write_frame(now, builder, tokens, stats); + space.write_frame(now, rtt, builder, tokens, stats); if builder.len() > builder.limit() { return Err(Error::InternalError(24)); } @@ -755,6 +763,7 @@ mod tests { use neqo_common::Encoder; use std::collections::HashSet; + const RTT: Duration = Duration::from_millis(100); lazy_static! { static ref NOW: Instant = Instant::now(); } @@ -838,7 +847,7 @@ mod tests { // Only application data packets are delayed. let mut rp = RecvdPackets::new(PacketNumberSpace::ApplicationData); assert!(rp.ack_time().is_none()); - assert!(!rp.ack_now(*NOW)); + assert!(!rp.ack_now(*NOW, RTT)); rp.ack_freq(0, COUNT, DELAY, false); @@ -846,14 +855,14 @@ mod tests { for i in 0..COUNT { rp.set_received(*NOW, i, true); assert_eq!(Some(*NOW + DELAY), rp.ack_time()); - assert!(!rp.ack_now(*NOW)); - assert!(rp.ack_now(*NOW + DELAY)); + assert!(!rp.ack_now(*NOW, RTT)); + assert!(rp.ack_now(*NOW + DELAY, RTT)); } // Exceeding COUNT will move the ACK time to now. rp.set_received(*NOW, COUNT, true); assert_eq!(Some(*NOW), rp.ack_time()); - assert!(rp.ack_now(*NOW)); + assert!(rp.ack_now(*NOW, RTT)); } #[test] @@ -861,12 +870,12 @@ mod tests { for space in &[PacketNumberSpace::Initial, PacketNumberSpace::Handshake] { let mut rp = RecvdPackets::new(*space); assert!(rp.ack_time().is_none()); - assert!(!rp.ack_now(*NOW)); + assert!(!rp.ack_now(*NOW, RTT)); // Any packet in these spaces is acknowledged straight away. rp.set_received(*NOW, 0, true); assert_eq!(Some(*NOW), rp.ack_time()); - assert!(rp.ack_now(*NOW)); + assert!(rp.ack_now(*NOW, RTT)); } } @@ -874,23 +883,27 @@ mod tests { fn ooo_no_ack_delay_new() { let mut rp = RecvdPackets::new(PacketNumberSpace::ApplicationData); assert!(rp.ack_time().is_none()); - assert!(!rp.ack_now(*NOW)); + assert!(!rp.ack_now(*NOW, RTT)); // Anything other than packet 0 is acknowledged immediately. rp.set_received(*NOW, 1, true); assert_eq!(Some(*NOW), rp.ack_time()); - assert!(rp.ack_now(*NOW)); + assert!(rp.ack_now(*NOW, RTT)); } - fn write_frame(rp: &mut RecvdPackets) { + fn write_frame_at(rp: &mut RecvdPackets, now: Instant) { let mut builder = PacketBuilder::short(Encoder::new(), false, []); let mut stats = FrameStats::default(); let mut tokens = Vec::new(); - rp.write_frame(*NOW, &mut builder, &mut tokens, &mut stats); + rp.write_frame(now, RTT, &mut builder, &mut tokens, &mut stats); assert!(!tokens.is_empty()); assert_eq!(stats.ack, 1); } + fn write_frame(rp: &mut RecvdPackets) { + write_frame_at(rp, *NOW); + } + #[test] fn ooo_no_ack_delay_fill() { let mut rp = RecvdPackets::new(PacketNumberSpace::ApplicationData); @@ -899,8 +912,26 @@ mod tests { // Filling in behind the largest acknowledged causes immediate ACK. rp.set_received(*NOW, 0, true); - assert_eq!(Some(*NOW), rp.ack_time()); - assert!(rp.ack_now(*NOW)); + write_frame(&mut rp); + + // Receiving the next packet won't elicit an ACK. + rp.set_received(*NOW, 2, true); + assert!(!rp.ack_now(*NOW, RTT)); + } + + #[test] + fn immediate_ack_after_rtt() { + let mut rp = RecvdPackets::new(PacketNumberSpace::ApplicationData); + rp.set_received(*NOW, 1, true); + write_frame(&mut rp); + + // Filling in behind the largest acknowledged causes immediate ACK. + rp.set_received(*NOW, 0, true); + write_frame(&mut rp); + + // A new packet ordinarily doesn't result in an ACK, but this time it does. + rp.set_received(*NOW + RTT, 2, true); + write_frame_at(&mut rp, *NOW + RTT); } #[test] @@ -1032,6 +1063,7 @@ mod tests { .write_frame( PacketNumberSpace::Initial, *NOW, + RTT, &mut builder, &mut tokens, &mut stats, @@ -1059,6 +1091,7 @@ mod tests { .write_frame( PacketNumberSpace::Initial, *NOW, + RTT, &mut builder, &mut tokens, &mut stats, @@ -1091,6 +1124,7 @@ mod tests { .write_frame( PacketNumberSpace::Initial, *NOW, + RTT, &mut builder, &mut Vec::new(), &mut stats, @@ -1123,6 +1157,7 @@ mod tests { .write_frame( PacketNumberSpace::Initial, *NOW, + RTT, &mut builder, &mut Vec::new(), &mut stats, From 8a3aaa26c97d41a65a0de0cba885667c930ca61d Mon Sep 17 00:00:00 2001 From: Manuel Bucher Date: Thu, 9 Nov 2023 17:30:14 +0100 Subject: [PATCH 021/321] neqo v0.6.7 --- neqo-client/Cargo.toml | 2 +- neqo-common/Cargo.toml | 2 +- neqo-crypto/Cargo.toml | 2 +- neqo-http3/Cargo.toml | 2 +- neqo-interop/Cargo.toml | 2 +- neqo-qpack/Cargo.toml | 2 +- neqo-server/Cargo.toml | 2 +- neqo-transport/Cargo.toml | 2 +- test-fixture/Cargo.toml | 2 +- 9 files changed, 9 insertions(+), 9 deletions(-) diff --git a/neqo-client/Cargo.toml b/neqo-client/Cargo.toml index c6e06bdefb..938d5d60c2 100644 --- a/neqo-client/Cargo.toml +++ b/neqo-client/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "neqo-client" -version = "0.6.6" +version = "0.6.7" authors = ["Martin Thomson ", "Dragana Damjanovic ", "Andy Grover "] diff --git a/neqo-common/Cargo.toml b/neqo-common/Cargo.toml index 075e824881..512f42f6cd 100644 --- a/neqo-common/Cargo.toml +++ b/neqo-common/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "neqo-common" -version = "0.6.6" +version = "0.6.7" authors = ["Bobby Holley "] edition = "2018" rust-version = "1.65.0" diff --git a/neqo-crypto/Cargo.toml b/neqo-crypto/Cargo.toml index e13b8aca7e..d8d244c472 100644 --- a/neqo-crypto/Cargo.toml +++ b/neqo-crypto/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "neqo-crypto" -version = "0.6.6" +version = "0.6.7" authors = ["Martin Thomson "] edition = "2018" rust-version = "1.65.0" diff --git a/neqo-http3/Cargo.toml b/neqo-http3/Cargo.toml index 164815eeca..13879d8547 100644 --- a/neqo-http3/Cargo.toml +++ b/neqo-http3/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "neqo-http3" -version = "0.6.6" +version = "0.6.7" authors = ["Dragana Damjanovic "] edition = "2018" rust-version = "1.65.0" diff --git a/neqo-interop/Cargo.toml b/neqo-interop/Cargo.toml index 119876d8e7..9891c421a3 100644 --- a/neqo-interop/Cargo.toml +++ b/neqo-interop/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "neqo-interop" -version = "0.6.6" +version = "0.6.7" authors = ["EKR "] edition = "2018" rust-version = "1.65.0" diff --git a/neqo-qpack/Cargo.toml b/neqo-qpack/Cargo.toml index 443ee4ead8..a595e4cbf7 100644 --- a/neqo-qpack/Cargo.toml +++ b/neqo-qpack/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "neqo-qpack" -version = "0.6.6" +version = "0.6.7" authors = ["Dragana Damjanovic "] edition = "2018" rust-version = "1.65.0" diff --git a/neqo-server/Cargo.toml b/neqo-server/Cargo.toml index 3cb2852508..6f2f25afdd 100644 --- a/neqo-server/Cargo.toml +++ b/neqo-server/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "neqo-server" -version = "0.6.6" +version = "0.6.7" authors = ["Dragana Damjanovic "] edition = "2018" rust-version = "1.65.0" diff --git a/neqo-transport/Cargo.toml b/neqo-transport/Cargo.toml index 6bb59479e2..a3c30f4dd1 100644 --- a/neqo-transport/Cargo.toml +++ b/neqo-transport/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "neqo-transport" -version = "0.6.6" +version = "0.6.7" authors = ["EKR ", "Andy Grover "] edition = "2018" rust-version = "1.65.0" diff --git a/test-fixture/Cargo.toml b/test-fixture/Cargo.toml index b9e2bdd909..7ec88afdce 100644 --- a/test-fixture/Cargo.toml +++ b/test-fixture/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "test-fixture" -version = "0.6.6" +version = "0.6.7" authors = ["Martin Thomson "] edition = "2018" rust-version = "1.65.0" From b0eb12a6f075bf50bbaecd2774219a2c9e0d6fea Mon Sep 17 00:00:00 2001 From: Martin Thomson Date: Tue, 21 Nov 2023 02:03:42 +1100 Subject: [PATCH 022/321] Rust clippy updates (#1494) --- neqo-common/src/codec.rs | 20 +++++------ neqo-crypto/src/agent.rs | 4 +-- neqo-crypto/tests/hp.rs | 4 +-- neqo-transport/src/cc/classic_cc.rs | 53 +++++++++++++++++------------ neqo-transport/src/fc.rs | 2 +- neqo-transport/src/tparams.rs | 10 +++--- 6 files changed, 52 insertions(+), 41 deletions(-) diff --git a/neqo-common/src/codec.rs b/neqo-common/src/codec.rs index 2df11b3c64..99ba9ec52a 100644 --- a/neqo-common/src/codec.rs +++ b/neqo-common/src/codec.rs @@ -37,13 +37,13 @@ impl<'a> Decoder<'a> { /// # Panics /// If the remaining quantity is less than `n`. pub fn skip(&mut self, n: usize) { - assert!(self.remaining() >= n); + assert!(self.remaining() >= n, "insufficient data"); self.offset += n; } /// Skip helper that panics if `n` is `None` or not able to fit in `usize`. fn skip_inner(&mut self, n: Option) { - self.skip(usize::try_from(n.unwrap()).unwrap()); + self.skip(usize::try_from(n.expect("invalid length")).unwrap()); } /// Skip a vector. Panics if there isn't enough space. @@ -545,7 +545,7 @@ mod tests { } #[test] - #[should_panic] + #[should_panic(expected = "insufficient data")] fn skip_too_much() { let enc = Encoder::from_hex("ff"); let mut dec = enc.as_decoder(); @@ -561,7 +561,7 @@ mod tests { } #[test] - #[should_panic] + #[should_panic(expected = "insufficient data")] fn skip_vec_too_much() { let enc = Encoder::from_hex("ff1234"); let mut dec = enc.as_decoder(); @@ -569,7 +569,7 @@ mod tests { } #[test] - #[should_panic] + #[should_panic(expected = "invalid length")] fn skip_vec_short_length() { let enc = Encoder::from_hex("ff"); let mut dec = enc.as_decoder(); @@ -584,7 +584,7 @@ mod tests { } #[test] - #[should_panic] + #[should_panic(expected = "insufficient data")] fn skip_vvec_too_much() { let enc = Encoder::from_hex("0f1234"); let mut dec = enc.as_decoder(); @@ -592,7 +592,7 @@ mod tests { } #[test] - #[should_panic] + #[should_panic(expected = "invalid length")] fn skip_vvec_short_length() { let enc = Encoder::from_hex("ff"); let mut dec = enc.as_decoder(); @@ -611,7 +611,7 @@ mod tests { } #[test] - #[should_panic] + #[should_panic(expected = "Varint value too large")] fn encoded_length_oob() { _ = Encoder::varint_len(1 << 62); } @@ -628,7 +628,7 @@ mod tests { } #[test] - #[should_panic] + #[should_panic(expected = "Varint value too large")] fn encoded_vvec_length_oob() { _ = Encoder::vvec_len(1 << 62); } @@ -752,7 +752,7 @@ mod tests { } #[test] - #[should_panic] + #[should_panic(expected = "assertion failed")] fn encode_vec_with_overflow() { let mut enc = Encoder::default(); enc.encode_vec_with(1, |enc_inner| { diff --git a/neqo-crypto/src/agent.rs b/neqo-crypto/src/agent.rs index d8db5c7a89..3612fec7e3 100644 --- a/neqo-crypto/src/agent.rs +++ b/neqo-crypto/src/agent.rs @@ -540,8 +540,8 @@ impl SecretAgent { /// Install an extension handler. /// /// This can be called multiple times with different values for `ext`. The handler is provided as - /// Rc> so that the caller is able to hold a reference to the handler and later access any - /// state that it accumulates. + /// `Rc>` so that the caller is able to hold a reference to the handler and later + /// access any state that it accumulates. /// /// # Errors /// When the extension handler can't be successfully installed. diff --git a/neqo-crypto/tests/hp.rs b/neqo-crypto/tests/hp.rs index 8b2ba7612b..2e0aea6b8a 100644 --- a/neqo-crypto/tests/hp.rs +++ b/neqo-crypto/tests/hp.rs @@ -67,14 +67,14 @@ fn chacha20_ctr() { } #[test] -#[should_panic] +#[should_panic(expected = "out of range")] fn aes_short() { let hp = make_hp(TLS_AES_128_GCM_SHA256); mem::drop(hp.mask(&[0; 15])); } #[test] -#[should_panic] +#[should_panic(expected = "out of range")] fn chacha20_short() { let hp = make_hp(TLS_CHACHA20_POLY1305_SHA256); mem::drop(hp.mask(&[0; 15])); diff --git a/neqo-transport/src/cc/classic_cc.rs b/neqo-transport/src/cc/classic_cc.rs index ae814f1711..c9f93163a6 100644 --- a/neqo-transport/src/cc/classic_cc.rs +++ b/neqo-transport/src/cc/classic_cc.rs @@ -7,19 +7,22 @@ // Congestion control #![deny(clippy::pedantic)] -use std::cmp::{max, min}; -use std::fmt::{self, Debug, Display}; -use std::time::{Duration, Instant}; +use std::{ + cmp::{max, min}, + fmt::{self, Debug, Display}, + time::{Duration, Instant}, +}; use super::CongestionControl; -use crate::cc::MAX_DATAGRAM_SIZE; -use crate::packet::PacketNumber; -use crate::qlog::{self, QlogMetric}; -use crate::sender::PACING_BURST_SIZE; -use crate::tracking::SentPacket; -use ::qlog::events::quic::CongestionStateUpdated; -use ::qlog::events::EventData; +use crate::{ + cc::MAX_DATAGRAM_SIZE, + packet::PacketNumber, + qlog::{self, QlogMetric}, + sender::PACING_BURST_SIZE, + tracking::SentPacket, +}; +use ::qlog::events::{quic::CongestionStateUpdated, EventData}; use neqo_common::{const_max, const_min, qdebug, qinfo, qlog::NeqoQlog, qtrace}; pub const CWND_INITIAL_PKTS: usize = 10; @@ -439,7 +442,11 @@ impl ClassicCongestionControl { continue; } if let Some(t) = start { - if p.time_sent.checked_duration_since(t).unwrap() > pc_period { + let elapsed = p + .time_sent + .checked_duration_since(t) + .expect("time is monotonic"); + if elapsed > pc_period { qinfo!([self], "persistent congestion"); self.congestion_window = CWND_MIN; self.acked_bytes = 0; @@ -523,15 +530,19 @@ mod tests { use super::{ ClassicCongestionControl, WindowAdjustment, CWND_INITIAL, CWND_MIN, PERSISTENT_CONG_THRESH, }; - use crate::cc::cubic::{Cubic, CUBIC_BETA_USIZE_DIVIDEND, CUBIC_BETA_USIZE_DIVISOR}; - use crate::cc::new_reno::NewReno; - use crate::cc::{ - CongestionControl, CongestionControlAlgorithm, CWND_INITIAL_PKTS, MAX_DATAGRAM_SIZE, + use crate::{ + cc::{ + cubic::{Cubic, CUBIC_BETA_USIZE_DIVIDEND, CUBIC_BETA_USIZE_DIVISOR}, + new_reno::NewReno, + CongestionControl, CongestionControlAlgorithm, CWND_INITIAL_PKTS, MAX_DATAGRAM_SIZE, + }, + packet::{PacketNumber, PacketType}, + tracking::SentPacket, + }; + use std::{ + convert::TryFrom, + time::{Duration, Instant}, }; - use crate::packet::{PacketNumber, PacketType}; - use crate::tracking::SentPacket; - use std::convert::TryFrom; - use std::time::{Duration, Instant}; use test_fixture::now; const PTO: Duration = Duration::from_millis(100); @@ -952,7 +963,7 @@ mod tests { /// The code asserts on ordering errors. #[test] - #[should_panic] + #[should_panic(expected = "time is monotonic")] fn persistent_congestion_unsorted_newreno() { let lost = make_lost(&[PERSISTENT_CONG_THRESH + 2, 1]); assert!(!persistent_congestion_by_pto( @@ -965,7 +976,7 @@ mod tests { /// The code asserts on ordering errors. #[test] - #[should_panic] + #[should_panic(expected = "time is monotonic")] fn persistent_congestion_unsorted_cubic() { let lost = make_lost(&[PERSISTENT_CONG_THRESH + 2, 1]); assert!(!persistent_congestion_by_pto( diff --git a/neqo-transport/src/fc.rs b/neqo-transport/src/fc.rs index 8ed6573521..090afdc538 100644 --- a/neqo-transport/src/fc.rs +++ b/neqo-transport/src/fc.rs @@ -858,7 +858,7 @@ mod test { remote_stream_limits(Role::Server, 0, 2); } - #[should_panic] + #[should_panic(expected = ".is_allowed")] #[test] fn remote_stream_limits_asserts_if_limit_exceeded() { let mut fc = RemoteStreamLimits::new(2, 1, Role::Client); diff --git a/neqo-transport/src/tparams.rs b/neqo-transport/src/tparams.rs index 00fe127660..82efa8db55 100644 --- a/neqo-transport/src/tparams.rs +++ b/neqo-transport/src/tparams.rs @@ -918,19 +918,19 @@ mod tests { } #[test] - #[should_panic] + #[should_panic(expected = "v4.is_some() || v6.is_some()")] fn preferred_address_neither() { _ = PreferredAddress::new(None, None); } #[test] - #[should_panic] + #[should_panic(expected = ".is_unspecified")] fn preferred_address_v4_unspecified() { _ = PreferredAddress::new(Some(SocketAddrV4::new(Ipv4Addr::from(0), 443)), None); } #[test] - #[should_panic] + #[should_panic(expected = "left != right")] fn preferred_address_v4_zero_port() { _ = PreferredAddress::new( Some(SocketAddrV4::new(Ipv4Addr::from(0xc000_0201), 0)), @@ -939,13 +939,13 @@ mod tests { } #[test] - #[should_panic] + #[should_panic(expected = ".is_unspecified")] fn preferred_address_v6_unspecified() { _ = PreferredAddress::new(None, Some(SocketAddrV6::new(Ipv6Addr::from(0), 443, 0, 0))); } #[test] - #[should_panic] + #[should_panic(expected = "left != right")] fn preferred_address_v6_zero_port() { _ = PreferredAddress::new(None, Some(SocketAddrV6::new(Ipv6Addr::from(1), 0, 0, 0))); } From 67faf0fe5e24ede76f4da28383c77aac5c3ca730 Mon Sep 17 00:00:00 2001 From: Manuel Bucher Date: Mon, 20 Nov 2023 15:47:01 +0100 Subject: [PATCH 023/321] Improve app_limit detection by keeping track on app_limited state in on_packet_sent Fixes #1475 --- neqo-transport/src/cc/classic_cc.rs | 229 ++++++++++++++++++---------- 1 file changed, 145 insertions(+), 84 deletions(-) diff --git a/neqo-transport/src/cc/classic_cc.rs b/neqo-transport/src/cc/classic_cc.rs index c9f93163a6..8465d6b0af 100644 --- a/neqo-transport/src/cc/classic_cc.rs +++ b/neqo-transport/src/cc/classic_cc.rs @@ -114,6 +114,14 @@ pub struct ClassicCongestionControl { acked_bytes: usize, ssthresh: usize, recovery_start: Option, + /// `first_app_limited` indicates the packet number after which the application might be + /// underutilizing the congestion window. When underutilizing the congestion window due to not + /// sending out enough data, we SHOULD NOT increase the congestion window.[1] Packets sent + /// before this point are deemed to fully utilize the congestion window and count towards + /// increasing the congestion window. + /// + /// [1]: https://datatracker.ietf.org/doc/html/rfc9002#section-7.8 + first_app_limited: PacketNumber, qlog: NeqoQlog, } @@ -153,19 +161,7 @@ impl CongestionControl for ClassicCongestionControl { // Multi-packet version of OnPacketAckedCC fn on_packets_acked(&mut self, acked_pkts: &[SentPacket], min_rtt: Duration, now: Instant) { - // Check whether we are app limited before acked packets are removed - // from bytes_in_flight. - let is_app_limited = self.app_limited(); - qtrace!( - [self], - "limited={}, bytes_in_flight={}, cwnd={}, state={:?} pacing_burst_size={}", - is_app_limited, - self.bytes_in_flight, - self.congestion_window, - self.state, - MAX_DATAGRAM_SIZE * PACING_BURST_SIZE, - ); - + let mut is_app_limited = true; let mut new_acked = 0; for pkt in acked_pkts { qinfo!( @@ -179,6 +175,9 @@ impl CongestionControl for ClassicCongestionControl { if !pkt.cc_outstanding() { continue; } + if pkt.pn < self.first_app_limited { + is_app_limited = false; + } assert!(self.bytes_in_flight >= pkt.size); self.bytes_in_flight -= pkt.size; @@ -326,6 +325,13 @@ impl CongestionControl for ClassicCongestionControl { if !pkt.cc_in_flight() { return; } + if !self.app_limited() { + // Given the current non-app-limited condition, we're fully utilizing the congestion + // window. Assume that all in-flight packets up to this one are NOT app-limited. + // However, subsequent packets might be app-limited. Set `first_app_limited` to the + // next packet number. + self.first_app_limited = pkt.pn + 1; + } self.bytes_in_flight += pkt.size; qinfo!( @@ -357,6 +363,7 @@ impl ClassicCongestionControl { ssthresh: usize::MAX, recovery_start: None, qlog: NeqoQlog::disabled(), + first_app_limited: 0, } } @@ -532,6 +539,7 @@ mod tests { }; use crate::{ cc::{ + classic_cc::State, cubic::{Cubic, CUBIC_BETA_USIZE_DIVIDEND, CUBIC_BETA_USIZE_DIVISOR}, new_reno::NewReno, CongestionControl, CongestionControlAlgorithm, CWND_INITIAL_PKTS, MAX_DATAGRAM_SIZE, @@ -539,6 +547,7 @@ mod tests { packet::{PacketNumber, PacketType}, tracking::SentPacket, }; + use neqo_common::qinfo; use std::{ convert::TryFrom, time::{Duration, Instant}, @@ -989,131 +998,183 @@ mod tests { #[test] fn app_limited_slow_start() { - const LESS_THAN_CWND_PKTS: usize = 4; + const BELOW_APP_LIMIT_PKTS: usize = 5; + const ABOVE_APP_LIMIT_PKTS: usize = BELOW_APP_LIMIT_PKTS + 1; let mut cc = ClassicCongestionControl::new(NewReno::default()); - - for i in 0..CWND_INITIAL_PKTS { - let sent = SentPacket::new( - PacketType::Short, - u64::try_from(i).unwrap(), // pn - now(), // time sent - true, // ack eliciting - Vec::new(), // tokens - MAX_DATAGRAM_SIZE, // size - ); - cc.on_packet_sent(&sent); + let cwnd = cc.congestion_window; + let mut now = now(); + let mut next_pn = 0; + + // simulate packet bursts below app_limit + for packet_burst_size in 1..=BELOW_APP_LIMIT_PKTS { + // always stay below app_limit during sent. + let mut pkts = Vec::new(); + for _ in 0..packet_burst_size { + let p = SentPacket::new( + PacketType::Short, + next_pn, // pn + now, // time sent + true, // ack eliciting + Vec::new(), // tokens + MAX_DATAGRAM_SIZE, // size + ); + next_pn += 1; + cc.on_packet_sent(&p); + pkts.push(p); + } + assert_eq!(cc.bytes_in_flight(), packet_burst_size * MAX_DATAGRAM_SIZE); + now += RTT; + cc.on_packets_acked(&pkts, RTT, now); + assert_eq!(cc.bytes_in_flight(), 0); + assert_eq!(cc.acked_bytes, 0); + assert_eq!(cwnd, cc.congestion_window); // CWND doesn't grow because we're app limited } - assert_eq!(cc.bytes_in_flight(), CWND_INITIAL); - for i in 0..LESS_THAN_CWND_PKTS { - let acked = SentPacket::new( + // Fully utilize the congestion window by sending enough packets to + // have `bytes_in_flight` above the `app_limited` threshold. + let mut pkts = Vec::new(); + for _ in 0..ABOVE_APP_LIMIT_PKTS { + let p = SentPacket::new( PacketType::Short, - u64::try_from(i).unwrap(), // pn - now(), // time sent - true, // ack eliciting - Vec::new(), // tokens - MAX_DATAGRAM_SIZE, // size + next_pn, // pn + now, // time sent + true, // ack eliciting + Vec::new(), // tokens + MAX_DATAGRAM_SIZE, // size ); - cc.on_packets_acked(&[acked], RTT, now()); - - assert_eq!( - cc.bytes_in_flight(), - (CWND_INITIAL_PKTS - i - 1) * MAX_DATAGRAM_SIZE - ); - assert_eq!(cc.cwnd(), (CWND_INITIAL_PKTS + i + 1) * MAX_DATAGRAM_SIZE); + next_pn += 1; + cc.on_packet_sent(&p); + pkts.push(p); } - - // Now we are app limited - for i in 4..CWND_INITIAL_PKTS { - let p = [SentPacket::new( - PacketType::Short, - u64::try_from(i).unwrap(), // pn - now(), // time sent - true, // ack eliciting - Vec::new(), // tokens - MAX_DATAGRAM_SIZE, // size - )]; - cc.on_packets_acked(&p, RTT, now()); + assert_eq!( + cc.bytes_in_flight(), + ABOVE_APP_LIMIT_PKTS * MAX_DATAGRAM_SIZE + ); + now += RTT; + // Check if congestion window gets increased for all packets currently in flight + for (i, pkt) in pkts.into_iter().enumerate() { + cc.on_packets_acked(&[pkt], RTT, now); assert_eq!( cc.bytes_in_flight(), - (CWND_INITIAL_PKTS - i - 1) * MAX_DATAGRAM_SIZE + (ABOVE_APP_LIMIT_PKTS - i - 1) * MAX_DATAGRAM_SIZE ); - assert_eq!(cc.cwnd(), (CWND_INITIAL_PKTS + 4) * MAX_DATAGRAM_SIZE); + // increase acked_bytes with each packet + qinfo!("{} {}", cc.congestion_window, cwnd + i * MAX_DATAGRAM_SIZE); + assert_eq!(cc.congestion_window, cwnd + (i + 1) * MAX_DATAGRAM_SIZE); + assert_eq!(cc.acked_bytes, 0); } } #[test] fn app_limited_congestion_avoidance() { const CWND_PKTS_CA: usize = CWND_INITIAL_PKTS / 2; + const BELOW_APP_LIMIT_PKTS: usize = CWND_PKTS_CA - 2; + const ABOVE_APP_LIMIT_PKTS: usize = BELOW_APP_LIMIT_PKTS + 1; let mut cc = ClassicCongestionControl::new(NewReno::default()); + let mut now = now(); // Change state to congestion avoidance by introducing loss. let p_lost = SentPacket::new( PacketType::Short, 1, // pn - now(), // time sent + now, // time sent true, // ack eliciting Vec::new(), // tokens MAX_DATAGRAM_SIZE, // size ); cc.on_packet_sent(&p_lost); cwnd_is_default(&cc); - cc.on_packets_lost(Some(now()), None, PTO, &[p_lost]); + now += PTO; + cc.on_packets_lost(Some(now), None, PTO, &[p_lost]); cwnd_is_halved(&cc); let p_not_lost = SentPacket::new( PacketType::Short, - 1, // pn - now(), // time sent + 2, // pn + now, // time sent true, // ack eliciting Vec::new(), // tokens MAX_DATAGRAM_SIZE, // size ); cc.on_packet_sent(&p_not_lost); - cc.on_packets_acked(&[p_not_lost], RTT, now()); + now += RTT; + cc.on_packets_acked(&[p_not_lost], RTT, now); cwnd_is_halved(&cc); // cc is app limited therefore cwnd in not increased. assert_eq!(cc.acked_bytes, 0); // Now we are in the congestion avoidance state. + assert_eq!(cc.state, State::CongestionAvoidance); + // simulate packet bursts below app_limit + let mut next_pn = 3; + for packet_burst_size in 1..=BELOW_APP_LIMIT_PKTS { + // always stay below app_limit during sent. + let mut pkts = Vec::new(); + for _ in 0..packet_burst_size { + let p = SentPacket::new( + PacketType::Short, + next_pn, // pn + now, // time sent + true, // ack eliciting + Vec::new(), // tokens + MAX_DATAGRAM_SIZE, // size + ); + next_pn += 1; + cc.on_packet_sent(&p); + pkts.push(p); + } + assert_eq!(cc.bytes_in_flight(), packet_burst_size * MAX_DATAGRAM_SIZE); + now += RTT; + for (i, pkt) in pkts.into_iter().enumerate() { + cc.on_packets_acked(&[pkt], RTT, now); + + assert_eq!( + cc.bytes_in_flight(), + (packet_burst_size - i - 1) * MAX_DATAGRAM_SIZE + ); + cwnd_is_halved(&cc); // CWND doesn't grow because we're app limited + assert_eq!(cc.acked_bytes, 0); + } + } + + // Fully utilize the congestion window by sending enough packets to + // have `bytes_in_flight` above the `app_limited` threshold. let mut pkts = Vec::new(); - for i in 0..CWND_PKTS_CA { + for _ in 0..ABOVE_APP_LIMIT_PKTS { let p = SentPacket::new( PacketType::Short, - u64::try_from(i + 3).unwrap(), // pn - now(), // time sent - true, // ack eliciting - Vec::new(), // tokens - MAX_DATAGRAM_SIZE, // size + next_pn, // pn + now, // time sent + true, // ack eliciting + Vec::new(), // tokens + MAX_DATAGRAM_SIZE, // size ); + next_pn += 1; cc.on_packet_sent(&p); pkts.push(p); } - assert_eq!(cc.bytes_in_flight(), CWND_INITIAL / 2); - - for i in 0..CWND_PKTS_CA - 2 { - cc.on_packets_acked(&pkts[i..=i], RTT, now()); - - assert_eq!( - cc.bytes_in_flight(), - (CWND_PKTS_CA - i - 1) * MAX_DATAGRAM_SIZE - ); - assert_eq!(cc.cwnd(), CWND_PKTS_CA * MAX_DATAGRAM_SIZE); - assert_eq!(cc.acked_bytes, MAX_DATAGRAM_SIZE * (i + 1)); - } - - // Now we are app limited - for i in CWND_PKTS_CA - 2..CWND_PKTS_CA { - cc.on_packets_acked(&pkts[i..=i], RTT, now()); + assert_eq!( + cc.bytes_in_flight(), + ABOVE_APP_LIMIT_PKTS * MAX_DATAGRAM_SIZE + ); + now += RTT; + let mut last_acked_bytes = 0; + // Check if congestion window gets increased for all packets currently in flight + for (i, pkt) in pkts.into_iter().enumerate() { + cc.on_packets_acked(&[pkt], RTT, now); assert_eq!( cc.bytes_in_flight(), - (CWND_PKTS_CA - i - 1) * MAX_DATAGRAM_SIZE + (ABOVE_APP_LIMIT_PKTS - i - 1) * MAX_DATAGRAM_SIZE ); - assert_eq!(cc.cwnd(), CWND_PKTS_CA * MAX_DATAGRAM_SIZE); - assert_eq!(cc.acked_bytes, MAX_DATAGRAM_SIZE * 3); + // The cwnd doesn't increase, but the acked_bytes do, which will eventually lead to an + // increase, once the number of bytes reaches the necessary level + cwnd_is_halved(&cc); + // increase acked_bytes with each packet + assert_ne!(cc.acked_bytes, last_acked_bytes); + last_acked_bytes = cc.acked_bytes; } } } From 83735a88217a6b3a6a9d3cd5d9243040c5e41319 Mon Sep 17 00:00:00 2001 From: Manuel Bucher Date: Wed, 22 Nov 2023 13:30:10 +0100 Subject: [PATCH 024/321] neqo v0.6.8 --- neqo-client/Cargo.toml | 2 +- neqo-common/Cargo.toml | 2 +- neqo-crypto/Cargo.toml | 2 +- neqo-http3/Cargo.toml | 2 +- neqo-interop/Cargo.toml | 2 +- neqo-qpack/Cargo.toml | 2 +- neqo-server/Cargo.toml | 2 +- neqo-transport/Cargo.toml | 2 +- test-fixture/Cargo.toml | 2 +- 9 files changed, 9 insertions(+), 9 deletions(-) diff --git a/neqo-client/Cargo.toml b/neqo-client/Cargo.toml index 938d5d60c2..4dbaa276bb 100644 --- a/neqo-client/Cargo.toml +++ b/neqo-client/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "neqo-client" -version = "0.6.7" +version = "0.6.8" authors = ["Martin Thomson ", "Dragana Damjanovic ", "Andy Grover "] diff --git a/neqo-common/Cargo.toml b/neqo-common/Cargo.toml index 512f42f6cd..a23ab3824e 100644 --- a/neqo-common/Cargo.toml +++ b/neqo-common/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "neqo-common" -version = "0.6.7" +version = "0.6.8" authors = ["Bobby Holley "] edition = "2018" rust-version = "1.65.0" diff --git a/neqo-crypto/Cargo.toml b/neqo-crypto/Cargo.toml index d8d244c472..8129ad3e20 100644 --- a/neqo-crypto/Cargo.toml +++ b/neqo-crypto/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "neqo-crypto" -version = "0.6.7" +version = "0.6.8" authors = ["Martin Thomson "] edition = "2018" rust-version = "1.65.0" diff --git a/neqo-http3/Cargo.toml b/neqo-http3/Cargo.toml index 13879d8547..a5981f238e 100644 --- a/neqo-http3/Cargo.toml +++ b/neqo-http3/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "neqo-http3" -version = "0.6.7" +version = "0.6.8" authors = ["Dragana Damjanovic "] edition = "2018" rust-version = "1.65.0" diff --git a/neqo-interop/Cargo.toml b/neqo-interop/Cargo.toml index 9891c421a3..23672050db 100644 --- a/neqo-interop/Cargo.toml +++ b/neqo-interop/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "neqo-interop" -version = "0.6.7" +version = "0.6.8" authors = ["EKR "] edition = "2018" rust-version = "1.65.0" diff --git a/neqo-qpack/Cargo.toml b/neqo-qpack/Cargo.toml index a595e4cbf7..9360385e33 100644 --- a/neqo-qpack/Cargo.toml +++ b/neqo-qpack/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "neqo-qpack" -version = "0.6.7" +version = "0.6.8" authors = ["Dragana Damjanovic "] edition = "2018" rust-version = "1.65.0" diff --git a/neqo-server/Cargo.toml b/neqo-server/Cargo.toml index 6f2f25afdd..1aa0798aca 100644 --- a/neqo-server/Cargo.toml +++ b/neqo-server/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "neqo-server" -version = "0.6.7" +version = "0.6.8" authors = ["Dragana Damjanovic "] edition = "2018" rust-version = "1.65.0" diff --git a/neqo-transport/Cargo.toml b/neqo-transport/Cargo.toml index a3c30f4dd1..6ae4455ac9 100644 --- a/neqo-transport/Cargo.toml +++ b/neqo-transport/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "neqo-transport" -version = "0.6.7" +version = "0.6.8" authors = ["EKR ", "Andy Grover "] edition = "2018" rust-version = "1.65.0" diff --git a/test-fixture/Cargo.toml b/test-fixture/Cargo.toml index 7ec88afdce..9ceed28474 100644 --- a/test-fixture/Cargo.toml +++ b/test-fixture/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "test-fixture" -version = "0.6.7" +version = "0.6.8" authors = ["Martin Thomson "] edition = "2018" rust-version = "1.65.0" From b91b5485b67b30fc70e246535b3982c7b2964433 Mon Sep 17 00:00:00 2001 From: Max Inden Date: Mon, 11 Dec 2023 01:33:46 +0100 Subject: [PATCH 025/321] docs(README): escape IPv6 address (#1503) Escape IPv6 address with single quotes. Prevents shells from interpreting IPv6 address as file pattern: ``` zsh: no matches found: [::]:1234 ``` Also makes it consistent with `neqo-*` commands in `### QUIC Logging`. --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 86beb7e230..1e44f8534a 100644 --- a/README.md +++ b/README.md @@ -5,7 +5,7 @@ To run test HTTP/3 programs (neqo-client and neqo-server): * `cargo build` -* `./target/debug/neqo-server [::]:12345 --db ./test-fixture/db` +* `./target/debug/neqo-server '[::]:12345' --db ./test-fixture/db` * `./target/debug/neqo-client http://127.0.0.1:12345/` If a "Failure to load dynamic library" error happens at runtime, do From 157d76fecb1266a5256dce1c8dac71d8849d7cc8 Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Mon, 11 Dec 2023 02:34:20 +0200 Subject: [PATCH 026/321] Create dependabot.yml (#1501) To generate PRs to update cargo and GitHub action dependencies. --- .github/dependabot.yml | 13 +++++++++++++ 1 file changed, 13 insertions(+) create mode 100644 .github/dependabot.yml diff --git a/.github/dependabot.yml b/.github/dependabot.yml new file mode 100644 index 0000000000..6d7f78e7b3 --- /dev/null +++ b/.github/dependabot.yml @@ -0,0 +1,13 @@ +# Please see the documentation for all configuration options: +# https://docs.github.com/github/administering-a-repository/configuration-options-for-dependency-updates + +version: 2 +updates: + - package-ecosystem: "cargo" + directory: "/" + schedule: + interval: "weekly" + - package-ecosystem: "github-actions" + directory: "/" + schedule: + interval: "weekly" From 081a4350990b20cb84279aaaf45828ac1ffde8c3 Mon Sep 17 00:00:00 2001 From: Max Inden Date: Mon, 11 Dec 2023 01:35:34 +0100 Subject: [PATCH 027/321] deps(qlog): update to qlog v0.10.0 (#1504) This commit upgrades all `neqo-*` crates to use `qlog` `v0.10.0`. See also `qlog` `v0.10.0` release pull request https://github.com/cloudflare/quiche/pull/1647 --- neqo-client/Cargo.toml | 2 +- neqo-common/Cargo.toml | 2 +- neqo-http3/Cargo.toml | 2 +- neqo-qpack/Cargo.toml | 2 +- neqo-server/Cargo.toml | 2 +- neqo-transport/Cargo.toml | 2 +- neqo-transport/src/qlog.rs | 16 ++++++++++------ 7 files changed, 16 insertions(+), 12 deletions(-) diff --git a/neqo-client/Cargo.toml b/neqo-client/Cargo.toml index 4dbaa276bb..cf9b86484a 100644 --- a/neqo-client/Cargo.toml +++ b/neqo-client/Cargo.toml @@ -16,7 +16,7 @@ neqo-http3 = { path = "./../neqo-http3" } neqo-qpack = { path = "./../neqo-qpack" } structopt = "0.3.7" url = "2.0" -qlog = "0.9.0" +qlog = "0.10.0" [features] deny-warnings = [] diff --git a/neqo-common/Cargo.toml b/neqo-common/Cargo.toml index a23ab3824e..35dff0a529 100644 --- a/neqo-common/Cargo.toml +++ b/neqo-common/Cargo.toml @@ -11,7 +11,7 @@ build = "build.rs" log = {version = "0.4.0", default-features = false} env_logger = {version = "0.10", default-features = false} lazy_static = "1.3.0" -qlog = "0.9.0" +qlog = "0.10.0" time = {version = "=0.3.23", features = ["formatting"]} [features] diff --git a/neqo-http3/Cargo.toml b/neqo-http3/Cargo.toml index a5981f238e..70f323fe76 100644 --- a/neqo-http3/Cargo.toml +++ b/neqo-http3/Cargo.toml @@ -13,7 +13,7 @@ neqo-transport = { path = "./../neqo-transport" } neqo-qpack = { path = "./../neqo-qpack" } log = {version = "0.4.0", default-features = false} smallvec = "1.0.0" -qlog = "0.9.0" +qlog = "0.10.0" sfv = "0.9.1" url = "2.0" lazy_static = "1.3.0" diff --git a/neqo-qpack/Cargo.toml b/neqo-qpack/Cargo.toml index 9360385e33..5b8d15a7c7 100644 --- a/neqo-qpack/Cargo.toml +++ b/neqo-qpack/Cargo.toml @@ -12,7 +12,7 @@ neqo-transport = { path = "./../neqo-transport" } neqo-crypto = { path = "./../neqo-crypto" } log = {version = "0.4.0", default-features = false} static_assertions = "1.1.0" -qlog = "0.9.0" +qlog = "0.10.0" lazy_static = "1.3.0" [dev-dependencies] diff --git a/neqo-server/Cargo.toml b/neqo-server/Cargo.toml index 1aa0798aca..d3b0dd9615 100644 --- a/neqo-server/Cargo.toml +++ b/neqo-server/Cargo.toml @@ -17,7 +17,7 @@ regex = "1" mio = "0.6.17" mio-extras = "2.0.5" log = {version = "0.4.0", default-features = false} -qlog = "0.9.0" +qlog = "0.10.0" [features] deny-warnings = [] diff --git a/neqo-transport/Cargo.toml b/neqo-transport/Cargo.toml index 6ae4455ac9..6cd19a1955 100644 --- a/neqo-transport/Cargo.toml +++ b/neqo-transport/Cargo.toml @@ -12,7 +12,7 @@ neqo-common = { path = "../neqo-common" } lazy_static = "1.3.0" log = {version = "0.4.0", default-features = false} smallvec = "1.0.0" -qlog = "0.9.0" +qlog = "0.10.0" indexmap = "1.0" [dev-dependencies] diff --git a/neqo-transport/src/qlog.rs b/neqo-transport/src/qlog.rs index dce27732b0..a3ce45ea57 100644 --- a/neqo-transport/src/qlog.rs +++ b/neqo-transport/src/qlog.rs @@ -136,7 +136,7 @@ pub fn packet_sent( ) { qlog.add_event_with_stream(|stream| { let mut d = Decoder::from(body); - let header = PacketHeader::with_type(to_qlog_pkt_type(pt), pn, None, None, None); + let header = PacketHeader::with_type(to_qlog_pkt_type(pt), Some(pn), None, None, None); let raw = RawInfo { length: None, payload_length: Some(plen as u64), @@ -175,9 +175,13 @@ pub fn packet_sent( pub fn packet_dropped(qlog: &mut NeqoQlog, payload: &PublicPacket) { qlog.add_event_data(|| { - // TODO: packet number is optional in the spec but qlog crate doesn't support that, so use a placeholder value of 0 - let header = - PacketHeader::with_type(to_qlog_pkt_type(payload.packet_type()), 0, None, None, None); + let header = PacketHeader::with_type( + to_qlog_pkt_type(payload.packet_type()), + None, + None, + None, + None, + ); let raw = RawInfo { length: None, payload_length: Some(payload.len() as u64), @@ -200,7 +204,7 @@ pub fn packets_lost(qlog: &mut NeqoQlog, pkts: &[SentPacket]) { qlog.add_event_with_stream(|stream| { for pkt in pkts { let header = - PacketHeader::with_type(to_qlog_pkt_type(pkt.pt), pkt.pn, None, None, None); + PacketHeader::with_type(to_qlog_pkt_type(pkt.pt), Some(pkt.pn), None, None, None); let ev_data = EventData::PacketLost(PacketLost { header: Some(header), @@ -224,7 +228,7 @@ pub fn packet_received( let header = PacketHeader::with_type( to_qlog_pkt_type(public_packet.packet_type()), - payload.pn(), + Some(payload.pn()), None, None, None, From ac37e742c4f35e5ba94551245d958468c3c2c5f8 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 11 Dec 2023 00:51:44 +0000 Subject: [PATCH 028/321] Update bindgen requirement from 0.64 to 0.69 Updates the requirements on [bindgen](https://github.com/rust-lang/rust-bindgen) to permit the latest version. - [Release notes](https://github.com/rust-lang/rust-bindgen/releases) - [Changelog](https://github.com/rust-lang/rust-bindgen/blob/main/CHANGELOG.md) - [Commits](https://github.com/rust-lang/rust-bindgen/compare/v0.64.0...v0.69.1) --- updated-dependencies: - dependency-name: bindgen dependency-type: direct:production ... Signed-off-by: dependabot[bot] --- neqo-crypto/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/neqo-crypto/Cargo.toml b/neqo-crypto/Cargo.toml index 8129ad3e20..3d54ea0dea 100644 --- a/neqo-crypto/Cargo.toml +++ b/neqo-crypto/Cargo.toml @@ -12,7 +12,7 @@ neqo-common = { path = "../neqo-common" } log = {version = "0.4.0", default-features = false} [build-dependencies] -bindgen = {version = "0.64", default-features = false, features= ["runtime"]} +bindgen = {version = "0.69", default-features = false, features= ["runtime"]} serde = "1.0" serde_derive = "1.0" toml = "0.5" From 5422767bf81e966f2ab4f29b83b5653cb254ee08 Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Mon, 11 Dec 2023 10:48:51 +0200 Subject: [PATCH 029/321] Expand the CI matrix and generate coverage information (#1500) * Expand the CI matrix and generate coverage information. TODO: Support Windows. * Undo modification to neqo-common/src/hrtime.rs * Remove debug commands * Minimize diff * Update .github/workflows/check.yml Co-authored-by: Martin Thomson * Fixes * A lot of fixes * Run tests and coverage at the same time. Set Windows DLL path. * Re-enable stable --------- Co-authored-by: Martin Thomson --- .github/workflows/check.yml | 146 ++++++++++++++++++++---------------- neqo-crypto/build.rs | 10 ++- 2 files changed, 90 insertions(+), 66 deletions(-) diff --git a/.github/workflows/check.yml b/.github/workflows/check.yml index bc545a6a91..748b6b55c2 100644 --- a/.github/workflows/check.yml +++ b/.github/workflows/check.yml @@ -5,72 +5,67 @@ env: jobs: check: - name: Neqo Build and Test - runs-on: ubuntu-22.04 - env: - RUSTFLAGS: -C link-arg=-fuse-ld=lld + name: Build & test strategy: + fail-fast: false matrix: - rust-toolchain: [stable, 1.65.0] + os: [ubuntu-latest, macos-latest, windows-latest] + # TODO: add beta (and nightly?) but don't fail the test if they fail + rust-toolchain: [1.65.0, stable] + runs-on: ${{ matrix.os }} + defaults: + run: + shell: bash steps: - - name: Install Packages + - name: Install Rust + uses: dtolnay/rust-toolchain@master + with: + toolchain: ${{ matrix.rust-toolchain }} + components: rustfmt, clippy + + - name: Install dependencies (Linux) + if: runner.os == 'Linux' env: DEBIAN_FRONTEND: noninteractive - llvm_version: 14 run: | - sudo apt-get update - sudo apt-get install -y \ - build-essential \ - clang \ - coreutils \ - curl \ - git \ - gyp \ - libclang-dev \ - libssl-dev \ - lld \ - llvm-"$llvm_version" \ - llvm-"$llvm_version"-runtime \ - make \ - mercurial \ - ninja-build \ - pkg-config \ - ssh \ - zlib1g-dev \ - --no-install-recommends - [ -d "/usr/lib/llvm-$llvm_version/lib" ] - echo "LIBCLANG_DIR=/usr/lib/llvm-$llvm_version/lib" >> "$GITHUB_ENV" - - # Rust installation cribbed from Dockerfiles at https://github.com/rust-lang/docker-rust - - name: Install Rust + sudo apt-get install -y --no-install-recommends gyp mercurial ninja-build + curl -L --proto '=https' --tlsv1.2 -sSf https://raw.githubusercontent.com/cargo-bins/cargo-binstall/main/install-from-binstall-release.sh | bash + + - name: Install dependencies (MacOS) + if: runner.os == 'MacOS' run: | - export RUSTUP_HOME=~/.rustup - export CARGO_HOME=~/.cargo - rustup_dir=`mktemp -d /tmp/rustup-XXXXX` - mkdir -p "$rustup_dir" - rustup_init="$rustup_dir/rustup-init" - rustup_url="https://static.rust-lang.org/rustup/archive/$rustup_version/$rustup_host/rustup-init" - curl -SsLf "$rustup_url" -o "$rustup_init" - echo "${rustup_hash} $rustup_init" | sha256sum -c - - chmod +x "$rustup_init" - "$rustup_init" -y -q --no-modify-path --profile minimal \ - --default-toolchain ${{ matrix.rust-toolchain }} \ - --default-host "$rustup_host" \ - --component clippy --component rustfmt - rm -rf "$rustup_dir" - chmod -R a+w "$RUSTUP_HOME" "$CARGO_HOME" - "$CARGO_HOME/bin/rustup" default ${{ matrix.rust-toolchain }} - echo "RUSTUP_HOME=$RUSTUP_HOME" >> "$GITHUB_ENV" - echo "CARGO_HOME=$CARGO_HOME" >> "$GITHUB_ENV" - echo "$CARGO_HOME/bin" >> "$GITHUB_PATH" - env: - rustup_version: 1.26.0 - rustup_host: x86_64-unknown-linux-gnu - rustup_hash: 0b2f6c8f85a3d02fde2efc0ced4657869d73fccfce59defb4e8d29233116e6db + brew install ninja mercurial cargo-binstall + # python3 -m pip install gyp-next + # Above does not work, since pypi only has gyp 0.15.0, which is too old + # for the homebrew python3. Install from source instead. + python3 -m pip install git+https://github.com/nodejs/gyp-next + python3 -m pip install packaging + echo "$(python3 -m site --user-base)/bin" >> "$GITHUB_PATH" + + - name: Install dependencies (Windows) + if: runner.os == 'Windows' + shell: pwsh + run: Set-ExecutionPolicy Unrestricted -Scope Process; iex (iwr "https://raw.githubusercontent.com/cargo-bins/cargo-binstall/main/install-from-binstall-release.ps1").Content + + - name: Use MSYS2 environment and install more dependencies (Windows) + if: runner.os == 'Windows' + run: | + echo "C:\\msys64\\usr\\bin" >> "$GITHUB_PATH" + echo "C:\\msys64\\mingw64\\bin" >> "$GITHUB_PATH" + /c/msys64/usr/bin/pacman -S --noconfirm nsinstall + python3 -m pip install git+https://github.com/nodejs/gyp-next + echo "$(python3 -m site --user-base)/bin" >> "$GITHUB_PATH" + + - name: Set up Windows build environment + if: runner.os == 'Windows' + uses: ilammy/msvc-dev-cmd@v1 + + - name: Install Rust tools + run: cargo +${{ matrix.rust-toolchain }} binstall --no-confirm cargo-llvm-cov cargo-nextest - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 # This step might be removed if the distro included a recent enough # version of NSS. Ubuntu 20.04 only has 3.49, which is far too old. @@ -82,24 +77,47 @@ jobs: git clone --depth=1 https://github.com/nss-dev/nss "$NSS_DIR" echo "NSS_DIR=$NSS_DIR" >> "$GITHUB_ENV" echo "NSPR_DIR=$NSPR_DIR" >> "$GITHUB_ENV" + echo "NSS_JOBS=$NUMBER_OF_PROCESSORS" >> "$GITHUB_ENV" env: NSS_DIR: ${{ github.workspace }}/nss NSPR_DIR: ${{ github.workspace }}/nspr + - name: Set up environment for NSS build.sh (Windows) + if: runner.os == 'Windows' + run: | + echo "GYP_MSVS_OVERRIDE_PATH=$VSINSTALLDIR" >> "$GITHUB_ENV" + echo "GYP_MSVS_VERSION=2022" >> "$GITHUB_ENV" + echo "BASH=$SHELL" >> "$GITHUB_ENV" + # See https://github.com/ilammy/msvc-dev-cmd#name-conflicts-with-shell-bash + rm /usr/bin/link.exe + - name: Build - run: cargo +${{ matrix.rust-toolchain }} build -v --tests + run: | + cargo +${{ matrix.rust-toolchain }} build -v --all-targets + echo "LD_LIBRARY_PATH=${{ github.workspace }}/dist/Debug/lib" >> "$GITHUB_ENV" + echo "DYLD_LIBRARY_PATH=${{ github.workspace }}/dist/Debug/lib" >> "$GITHUB_ENV" + echo "${{ github.workspace }}/dist/Debug/lib" >> "$GITHUB_PATH" + env: + RUST_BACKTRACE: 1 - - name: Run Tests - run: cargo +${{ matrix.rust-toolchain }} test -v + - name: Run tests and determine coverage + run: cargo +${{ matrix.rust-toolchain }} llvm-cov nextest --all-targets --no-fail-fast --lcov --output-path lcov.info env: - LD_LIBRARY_PATH: ${{ github.workspace }}/dist/Debug/lib - RUST_BACKTRACE: 1 - RUST_LOG: neqo=debug + RUST_BACKTRACE: 1 + RUST_LOG: neqo=debug - name: Check formatting run: cargo +${{ matrix.rust-toolchain }} fmt --all -- --check - if: ${{ success() || failure() }} + if: success() || failure() - name: Clippy run: cargo +${{ matrix.rust-toolchain }} clippy -v --tests -- -D warnings - if: ${{ success() || failure() }} + if: success() || failure() + + - name: Upload coverage reports to Codecov + uses: codecov/codecov-action@v3 + with: + file: lcov.info + fail_ci_if_error: true + env: + CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }} diff --git a/neqo-crypto/build.rs b/neqo-crypto/build.rs index e19f197710..c462b3db19 100644 --- a/neqo-crypto/build.rs +++ b/neqo-crypto/build.rs @@ -60,7 +60,8 @@ fn is_debug() -> bool { // On windows, this doesn't just work, you have to set LIBCLANG_PATH. // Rather than download the 400Mb+ files, like gecko does, let's just reuse their work. fn setup_clang() { - if env::consts::OS != "windows" { + // If this isn't Windows, or we're in CI, then we don't need to do anything. + if env::consts::OS != "windows" || env::var("GITHUB_WORKFLOW").unwrap() == "CI" { return; } println!("rerun-if-env-changed=LIBCLANG_PATH"); @@ -130,6 +131,11 @@ fn nss_dir() -> PathBuf { } fn get_bash() -> PathBuf { + // If BASH is set, use that. + if let Ok(bash) = env::var("BASH") { + return PathBuf::from(bash); + } + // When running under MOZILLABUILD, we need to make sure not to invoke // another instance of bash that might be sitting around (like WSL). match env::var("MOZILLABUILD") { @@ -257,7 +263,7 @@ fn build_bindings(base: &str, bindings: &Bindings, flags: &[String], gecko: bool builder = builder.clang_arg("-DANDROID"); } if bindings.cplusplus { - builder = builder.clang_args(&["-x", "c++", "-std=c++11"]); + builder = builder.clang_args(&["-x", "c++", "-std=c++14"]); } } From 88a203f3608fa73ec911a56d54b4d4ef486820c0 Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Tue, 12 Dec 2023 23:55:17 +0200 Subject: [PATCH 030/321] Don't run twice on pushes to PRs (#1513) * Don't run twice on pushes to PRs Also ignore changes to some paths/files that don't need to trigger CI. * Force a push to check that CI runs once (again) --- .github/workflows/check.yml | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/.github/workflows/check.yml b/.github/workflows/check.yml index 748b6b55c2..5524942767 100644 --- a/.github/workflows/check.yml +++ b/.github/workflows/check.yml @@ -1,5 +1,11 @@ name: CI -on: [push, pull_request] +on: + push: + branches: [ "main" ] + paths-ignore: [ "*.md", "*.png", "*.svg", "LICENSE-*" ] + pull_request: + branches: [ "main" ] + paths-ignore: [ "*.md", "*.png", "*.svg", "LICENSE-*" ] env: CARGO_TERM_COLOR: always From 0853d9d8c977b248d1e261804d0f822bc02f40b8 Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Wed, 13 Dec 2023 00:01:40 +0200 Subject: [PATCH 031/321] Fix clippy issues on MacOS and Windows (#1512) * Fix clippy issues on MacOS and Windows I.e. the ones found via the new expanded CI matrix. * Fix for Windows * More fixes --- neqo-common/src/hrtime.rs | 24 +++++++++++++----------- neqo-crypto/src/ext.rs | 4 ++-- 2 files changed, 15 insertions(+), 13 deletions(-) diff --git a/neqo-common/src/hrtime.rs b/neqo-common/src/hrtime.rs index 2ac0a08cdd..3b9703e073 100644 --- a/neqo-common/src/hrtime.rs +++ b/neqo-common/src/hrtime.rs @@ -27,12 +27,12 @@ impl Period { const MIN: Period = Period(1); #[cfg(windows)] - fn as_uint(&self) -> UINT { + fn as_uint(self) -> UINT { UINT::from(self.0) } #[cfg(target_os = "macos")] - fn scaled(&self, scale: f64) -> f64 { + fn scaled(self, scale: f64) -> f64 { scale * f64::from(self.0) } } @@ -119,13 +119,15 @@ mod mac { #[repr(C)] #[derive(Debug, Copy, Clone, Default)] pub struct thread_time_constraint_policy { - period: u32, - computation: u32, - constraint: u32, + period: f64, + computation: f64, + constraint: f64, preemptible: boolean_t, } const THREAD_TIME_CONSTRAINT_POLICY: thread_policy_flavor_t = 2; + + #[allow(clippy::cast_possible_truncation)] const THREAD_TIME_CONSTRAINT_POLICY_COUNT: mach_msg_type_number_t = (size_of::() / size_of::()) as mach_msg_type_number_t; @@ -163,7 +165,7 @@ mod mac { thread_policy_set( pthread_mach_thread_np(pthread_self()), THREAD_TIME_CONSTRAINT_POLICY, - addr_of_mut!(policy) as _, // horror! + addr_of_mut!(policy).cast(), // horror! THREAD_TIME_CONSTRAINT_POLICY_COUNT, ) }; @@ -181,9 +183,9 @@ mod mac { /// Create a realtime policy and set it. pub fn set_realtime(base: f64) { let policy = thread_time_constraint_policy { - period: base as u32, // Base interval - computation: (base * 0.5) as u32, - constraint: (base * 1.0) as u32, + period: base, // Base interval + computation: base * 0.5, + constraint: base, preemptible: 1, }; set_thread_policy(policy); @@ -198,7 +200,7 @@ mod mac { thread_policy_get( pthread_mach_thread_np(pthread_self()), THREAD_TIME_CONSTRAINT_POLICY, - addr_of_mut!(policy) as _, // horror! + addr_of_mut!(policy).cast(), // horror! &mut count, &mut get_default, ) @@ -292,7 +294,7 @@ impl Time { if let Some(p) = self.active { mac::set_realtime(p.scaled(self.scale)); } else { - mac::set_thread_policy(self.deflt.clone()); + mac::set_thread_policy(self.deflt); } } diff --git a/neqo-crypto/src/ext.rs b/neqo-crypto/src/ext.rs index aa89677b98..010b9f120e 100644 --- a/neqo-crypto/src/ext.rs +++ b/neqo-crypto/src/ext.rs @@ -74,7 +74,7 @@ impl ExtensionTracker { f(&mut *rc.borrow_mut()) } - #[allow(clippy::cast_possible_truncation)] + #[allow(clippy::cast_possible_truncation, clippy::cast_sign_loss)] unsafe extern "C" fn extension_writer( _fd: *mut PRFileDesc, message: SSLHandshakeType::Type, @@ -105,7 +105,7 @@ impl ExtensionTracker { arg: *mut c_void, ) -> SECStatus { let d = std::slice::from_raw_parts(data, len as usize); - #[allow(clippy::cast_possible_truncation)] + #[allow(clippy::cast_possible_truncation, clippy::cast_sign_loss)] Self::wrap_handler_call(arg, |handler| { // Cast is safe here because the message type is always part of the enum match handler.handle(message as HandshakeMessage, d) { From 33f2c2311e8f5ce9c3facfe122ed1fcee962fbbc Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Thu, 14 Dec 2023 23:28:03 +0200 Subject: [PATCH 032/321] Don't fail CI if Codecov upload fails (#1518) Codecov seems flaky often. --- .github/workflows/check.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/check.yml b/.github/workflows/check.yml index 5524942767..76de5286fb 100644 --- a/.github/workflows/check.yml +++ b/.github/workflows/check.yml @@ -124,6 +124,6 @@ jobs: uses: codecov/codecov-action@v3 with: file: lcov.info - fail_ci_if_error: true + fail_ci_if_error: false env: CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }} From d1825f421db059c227cf628f27ecf04ffe1b48c9 Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Thu, 14 Dec 2023 23:31:40 +0200 Subject: [PATCH 033/321] Implement the final RFC9369 version of QUIC v2 (#1515) * Implement the final RFC9369 version of QUIC v2 * Renumber and rename the v2 VERSION_INFORMATION transport parameter. * Update test vectors --- neqo-transport/src/connection/tests/vn.rs | 6 +- neqo-transport/src/packet/mod.rs | 8 +- neqo-transport/src/tparams.rs | 29 ++--- neqo-transport/src/version.rs | 24 ++-- neqo-transport/tests/conn_vectors.rs | 150 +++++++++++----------- 5 files changed, 107 insertions(+), 110 deletions(-) diff --git a/neqo-transport/src/connection/tests/vn.rs b/neqo-transport/src/connection/tests/vn.rs index 416128f74e..6f8bd15614 100644 --- a/neqo-transport/src/connection/tests/vn.rs +++ b/neqo-transport/src/connection/tests/vn.rs @@ -358,7 +358,7 @@ fn invalid_current_version_client() { assert_ne!(OTHER_VERSION, client.version()); client .set_local_tparam( - tparams::VERSION_NEGOTIATION, + tparams::VERSION_INFORMATION, TransportParameter::Versions { current: OTHER_VERSION.wire_version(), other: Version::all() @@ -394,7 +394,7 @@ fn invalid_current_version_server() { assert!(!Version::default().is_compatible(OTHER_VERSION)); server .set_local_tparam( - tparams::VERSION_NEGOTIATION, + tparams::VERSION_INFORMATION, TransportParameter::Versions { current: OTHER_VERSION.wire_version(), other: vec![OTHER_VERSION.wire_version()], @@ -420,7 +420,7 @@ fn no_compatible_version() { assert_ne!(OTHER_VERSION, client.version()); client .set_local_tparam( - tparams::VERSION_NEGOTIATION, + tparams::VERSION_INFORMATION, TransportParameter::Versions { current: Version::default().wire_version(), other: vec![OTHER_VERSION.wire_version()], diff --git a/neqo-transport/src/packet/mod.rs b/neqo-transport/src/packet/mod.rs index 631bf84795..98fadc1cd3 100644 --- a/neqo-transport/src/packet/mod.rs +++ b/neqo-transport/src/packet/mod.rs @@ -1170,9 +1170,9 @@ mod tests { } const SAMPLE_RETRY_V2: &[u8] = &[ - 0xcf, 0x70, 0x9a, 0x50, 0xc4, 0x00, 0x08, 0xf0, 0x67, 0xa5, 0x50, 0x2a, 0x42, 0x62, 0xb5, - 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x1d, 0xc7, 0x11, 0x30, 0xcd, 0x1e, 0xd3, 0x9d, 0x6e, 0xfc, - 0xee, 0x5c, 0x85, 0x80, 0x65, 0x01, + 0xcf, 0x6b, 0x33, 0x43, 0xcf, 0x00, 0x08, 0xf0, 0x67, 0xa5, 0x50, 0x2a, 0x42, 0x62, 0xb5, + 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0xc8, 0x64, 0x6c, 0xe8, 0xbf, 0xe3, 0x39, 0x52, 0xd9, 0x55, + 0x54, 0x36, 0x65, 0xdc, 0xc7, 0xb6, ]; const SAMPLE_RETRY_V1: &[u8] = &[ @@ -1353,7 +1353,7 @@ mod tests { const SAMPLE_VN: &[u8] = &[ 0x80, 0x00, 0x00, 0x00, 0x00, 0x08, 0xf0, 0x67, 0xa5, 0x50, 0x2a, 0x42, 0x62, 0xb5, 0x08, - 0x83, 0x94, 0xc8, 0xf0, 0x3e, 0x51, 0x57, 0x08, 0x70, 0x9a, 0x50, 0xc4, 0x00, 0x00, 0x00, + 0x83, 0x94, 0xc8, 0xf0, 0x3e, 0x51, 0x57, 0x08, 0x6b, 0x33, 0x43, 0xcf, 0x00, 0x00, 0x00, 0x01, 0xff, 0x00, 0x00, 0x20, 0xff, 0x00, 0x00, 0x1f, 0xff, 0x00, 0x00, 0x1e, 0xff, 0x00, 0x00, 0x1d, 0x0a, 0x0a, 0x0a, 0x0a, ]; diff --git a/neqo-transport/src/tparams.rs b/neqo-transport/src/tparams.rs index 82efa8db55..df2f1b34c4 100644 --- a/neqo-transport/src/tparams.rs +++ b/neqo-transport/src/tparams.rs @@ -55,10 +55,10 @@ tpids! { ACTIVE_CONNECTION_ID_LIMIT = 0x0e, INITIAL_SOURCE_CONNECTION_ID = 0x0f, RETRY_SOURCE_CONNECTION_ID = 0x10, + VERSION_INFORMATION = 0x11, GREASE_QUIC_BIT = 0x2ab2, MIN_ACK_DELAY = 0xff02_de1a, MAX_DATAGRAM_FRAME_SIZE = 0x0020, - VERSION_NEGOTIATION = 0xff73db, } #[derive(Clone, Debug)] @@ -298,7 +298,7 @@ impl TransportParameter { _ => return Err(Error::TransportParameterError), }, - VERSION_NEGOTIATION => Self::decode_versions(&mut d)?, + VERSION_INFORMATION => Self::decode_versions(&mut d)?, // Skip. _ => return Ok(None), @@ -450,7 +450,7 @@ impl TransportParameters { } let current = versions.initial().wire_version(); self.set( - VERSION_NEGOTIATION, + VERSION_INFORMATION, TransportParameter::Versions { current, other }, ); } @@ -458,7 +458,7 @@ impl TransportParameters { fn compatible_upgrade(&mut self, v: Version) { if let Some(TransportParameter::Versions { ref mut current, .. - }) = self.params.get_mut(&VERSION_NEGOTIATION) + }) = self.params.get_mut(&VERSION_INFORMATION) { *current = v.wire_version(); } else { @@ -543,7 +543,7 @@ impl TransportParameters { #[must_use] pub fn get_versions(&self) -> Option<(WireVersion, &[WireVersion])> { if let Some(TransportParameter::Versions { current, other }) = - self.params.get(&VERSION_NEGOTIATION) + self.params.get(&VERSION_INFORMATION) { Some((*current, other)) } else { @@ -1043,8 +1043,7 @@ mod tests { #[test] fn versions_encode_decode() { const ENCODED: &[u8] = &[ - 0x80, 0xff, 0x73, 0xdb, 0x0c, 0x00, 0x00, 0x00, 0x01, 0x1a, 0x2a, 0x3a, 0x4a, 0x5a, - 0x6a, 0x7a, 0x8a, + 0x11, 0x0c, 0x00, 0x00, 0x00, 0x01, 0x1a, 0x2a, 0x3a, 0x4a, 0x5a, 0x6a, 0x7a, 0x8a, ]; let vn = TransportParameter::Versions { current: Version::Version1.wire_version(), @@ -1052,12 +1051,12 @@ mod tests { }; let mut enc = Encoder::new(); - vn.encode(&mut enc, VERSION_NEGOTIATION); + vn.encode(&mut enc, VERSION_INFORMATION); assert_eq!(enc.as_ref(), ENCODED); let mut dec = enc.as_decoder(); let (id, decoded) = TransportParameter::decode(&mut dec).unwrap().unwrap(); - assert_eq!(id, VERSION_NEGOTIATION); + assert_eq!(id, VERSION_INFORMATION); assert_eq!(decoded, vn); } @@ -1076,10 +1075,8 @@ mod tests { #[test] fn versions_zero() { - const ZERO1: &[u8] = &[0x80, 0xff, 0x73, 0xdb, 0x04, 0x00, 0x00, 0x00, 0x00]; - const ZERO2: &[u8] = &[ - 0x80, 0xff, 0x73, 0xdb, 0x08, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, - ]; + const ZERO1: &[u8] = &[0x11, 0x04, 0x00, 0x00, 0x00, 0x00]; + const ZERO2: &[u8] = &[0x11, 0x08, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00]; let mut dec = Decoder::from(&ZERO1); assert_eq!( @@ -1097,7 +1094,7 @@ mod tests { fn versions_equal_0rtt() { let mut current = TransportParameters::default(); current.set( - VERSION_NEGOTIATION, + VERSION_INFORMATION, TransportParameter::Versions { current: Version::Version1.wire_version(), other: vec![0x1a2a_3a4a], @@ -1112,7 +1109,7 @@ mod tests { // If the version matches, it's OK to use 0-RTT. remembered.set( - VERSION_NEGOTIATION, + VERSION_INFORMATION, TransportParameter::Versions { current: Version::Version1.wire_version(), other: vec![0x5a6a_7a8a, 0x9aaa_baca], @@ -1123,7 +1120,7 @@ mod tests { // An apparent "upgrade" is still cause to reject 0-RTT. remembered.set( - VERSION_NEGOTIATION, + VERSION_INFORMATION, TransportParameter::Versions { current: Version::Version1.wire_version() + 1, other: vec![], diff --git a/neqo-transport/src/version.rs b/neqo-transport/src/version.rs index 71a1d7a8e6..b628ba2769 100644 --- a/neqo-transport/src/version.rs +++ b/neqo-transport/src/version.rs @@ -23,7 +23,7 @@ pub enum Version { impl Version { pub const fn wire_version(self) -> WireVersion { match self { - Self::Version2 => 0x709a50c4, + Self::Version2 => 0x6b3343cf, Self::Version1 => 1, Self::Draft29 => 0xff00_0000 + 29, Self::Draft30 => 0xff00_0000 + 30, @@ -34,8 +34,8 @@ impl Version { pub(crate) fn initial_salt(self) -> &'static [u8] { const INITIAL_SALT_V2: &[u8] = &[ - 0xa7, 0x07, 0xc2, 0x03, 0xa5, 0x9b, 0x47, 0x18, 0x4a, 0x1d, 0x62, 0xca, 0x57, 0x04, - 0x06, 0xea, 0x7a, 0xe3, 0xe5, 0xd3, + 0x0d, 0xed, 0xe3, 0xde, 0xf7, 0x00, 0xa6, 0xdb, 0x81, 0x93, 0x81, 0xbe, 0x6e, 0x26, + 0x9d, 0xcb, 0xf9, 0xbd, 0x2e, 0xd9, ]; const INITIAL_SALT_V1: &[u8] = &[ 0x38, 0x76, 0x2c, 0xf7, 0xf5, 0x59, 0x34, 0xb3, 0x4d, 0x17, 0x9a, 0xe6, 0xa4, 0xc8, @@ -62,20 +62,20 @@ impl Version { } pub(crate) fn retry_secret(self) -> &'static [u8] { - const RETRY_SECRET_29: &[u8] = &[ - 0x8b, 0x0d, 0x37, 0xeb, 0x85, 0x35, 0x02, 0x2e, 0xbc, 0x8d, 0x76, 0xa2, 0x07, 0xd8, - 0x0d, 0xf2, 0x26, 0x46, 0xec, 0x06, 0xdc, 0x80, 0x96, 0x42, 0xc3, 0x0a, 0x8b, 0xaa, - 0x2b, 0xaa, 0xff, 0x4c, + const RETRY_SECRET_V2: &[u8] = &[ + 0xc4, 0xdd, 0x24, 0x84, 0xd6, 0x81, 0xae, 0xfa, 0x4f, 0xf4, 0xd6, 0x9c, 0x2c, 0x20, + 0x29, 0x99, 0x84, 0xa7, 0x65, 0xa5, 0xd3, 0xc3, 0x19, 0x82, 0xf3, 0x8f, 0xc7, 0x41, + 0x62, 0x15, 0x5e, 0x9f, ]; const RETRY_SECRET_V1: &[u8] = &[ 0xd9, 0xc9, 0x94, 0x3e, 0x61, 0x01, 0xfd, 0x20, 0x00, 0x21, 0x50, 0x6b, 0xcc, 0x02, 0x81, 0x4c, 0x73, 0x03, 0x0f, 0x25, 0xc7, 0x9d, 0x71, 0xce, 0x87, 0x6e, 0xca, 0x87, 0x6e, 0x6f, 0xca, 0x8e, ]; - const RETRY_SECRET_V2: &[u8] = &[ - 0x34, 0x25, 0xc2, 0x0c, 0xf8, 0x87, 0x79, 0xdf, 0x2f, 0xf7, 0x1e, 0x8a, 0xbf, 0xa7, - 0x82, 0x49, 0x89, 0x1e, 0x76, 0x3b, 0xbe, 0xd2, 0xf1, 0x3c, 0x04, 0x83, 0x43, 0xd3, - 0x48, 0xc0, 0x60, 0xe2, + const RETRY_SECRET_29: &[u8] = &[ + 0x8b, 0x0d, 0x37, 0xeb, 0x85, 0x35, 0x02, 0x2e, 0xbc, 0x8d, 0x76, 0xa2, 0x07, 0xd8, + 0x0d, 0xf2, 0x26, 0x46, 0xec, 0x06, 0xdc, 0x80, 0x96, 0x42, 0xc3, 0x0a, 0x8b, 0xaa, + 0x2b, 0xaa, 0xff, 0x4c, ]; match self { Self::Version2 => RETRY_SECRET_V2, @@ -131,7 +131,7 @@ impl TryFrom for Version { fn try_from(wire: WireVersion) -> Res { if wire == 1 { Ok(Self::Version1) - } else if wire == 0x709a50c4 { + } else if wire == 0x6b3343cf { Ok(Self::Version2) } else if wire == 0xff00_0000 + 29 { Ok(Self::Draft29) diff --git a/neqo-transport/tests/conn_vectors.rs b/neqo-transport/tests/conn_vectors.rs index 83de136d91..3dcebde168 100644 --- a/neqo-transport/tests/conn_vectors.rs +++ b/neqo-transport/tests/conn_vectors.rs @@ -18,81 +18,81 @@ use std::cell::RefCell; use std::rc::Rc; const INITIAL_PACKET_V2: &[u8] = &[ - 0xdd, 0x70, 0x9a, 0x50, 0xc4, 0x08, 0x83, 0x94, 0xc8, 0xf0, 0x3e, 0x51, 0x57, 0x08, 0x00, 0x00, - 0x44, 0x9e, 0x43, 0x91, 0xd8, 0x48, 0x23, 0xb8, 0xe6, 0x10, 0x58, 0x9c, 0x83, 0xc9, 0x2d, 0x0e, - 0x97, 0xeb, 0x7a, 0x6e, 0x50, 0x03, 0xf5, 0x77, 0x64, 0xc5, 0xc7, 0xf0, 0x09, 0x5b, 0xa5, 0x4b, - 0x90, 0x81, 0x8f, 0x1b, 0xfe, 0xec, 0xc1, 0xc9, 0x7c, 0x54, 0xfc, 0x73, 0x1e, 0xdb, 0xd2, 0xa2, - 0x44, 0xe3, 0xb1, 0xe6, 0x39, 0xa9, 0xbc, 0x75, 0xed, 0x54, 0x5b, 0x98, 0x64, 0x93, 0x43, 0xb2, - 0x53, 0x61, 0x5e, 0xc6, 0xb3, 0xe4, 0xdf, 0x0f, 0xd2, 0xe7, 0xfe, 0x9d, 0x69, 0x1a, 0x09, 0xe6, - 0xa1, 0x44, 0xb4, 0x36, 0xd8, 0xa2, 0xc0, 0x88, 0xa4, 0x04, 0x26, 0x23, 0x40, 0xdf, 0xd9, 0x95, - 0xec, 0x38, 0x65, 0x69, 0x4e, 0x30, 0x26, 0xec, 0xd8, 0xc6, 0xd2, 0x56, 0x1a, 0x5a, 0x36, 0x67, - 0x2a, 0x10, 0x05, 0x01, 0x81, 0x68, 0xc0, 0xf0, 0x81, 0xc1, 0x0e, 0x2b, 0xf1, 0x4d, 0x55, 0x0c, - 0x97, 0x7e, 0x28, 0xbb, 0x9a, 0x75, 0x9c, 0x57, 0xd0, 0xf7, 0xff, 0xb1, 0xcd, 0xfb, 0x40, 0xbd, - 0x77, 0x4d, 0xec, 0x58, 0x96, 0x57, 0x54, 0x20, 0x47, 0xdf, 0xfe, 0xfa, 0x56, 0xfc, 0x80, 0x89, - 0xa4, 0xd1, 0xef, 0x37, 0x9c, 0x81, 0xba, 0x3d, 0xf7, 0x1a, 0x05, 0xdd, 0xc7, 0x92, 0x83, 0x40, - 0x77, 0x59, 0x10, 0xfe, 0xb3, 0xce, 0x4c, 0xbc, 0xfd, 0x8d, 0x25, 0x3e, 0xdd, 0x05, 0xf1, 0x61, - 0x45, 0x8f, 0x9d, 0xc4, 0x4b, 0xea, 0x01, 0x7c, 0x31, 0x17, 0xcc, 0xa7, 0x06, 0x5a, 0x31, 0x5d, - 0xed, 0xa9, 0x46, 0x4e, 0x67, 0x2e, 0xc8, 0x0c, 0x3f, 0x79, 0xac, 0x99, 0x34, 0x37, 0xb4, 0x41, - 0xef, 0x74, 0x22, 0x7e, 0xcc, 0x4d, 0xc9, 0xd5, 0x97, 0xf6, 0x6a, 0xb0, 0xab, 0x8d, 0x21, 0x4b, - 0x55, 0x84, 0x0c, 0x70, 0x34, 0x9d, 0x76, 0x16, 0xcb, 0xe3, 0x8e, 0x5e, 0x1d, 0x05, 0x2d, 0x07, - 0xf1, 0xfe, 0xdb, 0x3d, 0xd3, 0xc4, 0xd8, 0xce, 0x29, 0x57, 0x24, 0x94, 0x5e, 0x67, 0xed, 0x2e, - 0xef, 0xcd, 0x9f, 0xb5, 0x24, 0x72, 0x38, 0x7f, 0x31, 0x8e, 0x3d, 0x9d, 0x23, 0x3b, 0xe7, 0xdf, - 0xc7, 0x9d, 0x6b, 0xf6, 0x08, 0x0d, 0xcb, 0xbb, 0x41, 0xfe, 0xb1, 0x80, 0xd7, 0x85, 0x88, 0x49, - 0x7c, 0x3e, 0x43, 0x9d, 0x38, 0xc3, 0x34, 0x74, 0x8d, 0x2b, 0x56, 0xfd, 0x19, 0xab, 0x36, 0x4d, - 0x05, 0x7a, 0x9b, 0xd5, 0xa6, 0x99, 0xae, 0x14, 0x5d, 0x7f, 0xdb, 0xc8, 0xf5, 0x77, 0x75, 0x18, - 0x1b, 0x0a, 0x97, 0xc3, 0xbd, 0xed, 0xc9, 0x1a, 0x55, 0x5d, 0x6c, 0x9b, 0x86, 0x34, 0xe1, 0x06, - 0xd8, 0xc9, 0xca, 0x45, 0xa9, 0xd5, 0x45, 0x0a, 0x76, 0x79, 0xed, 0xc5, 0x45, 0xda, 0x91, 0x02, - 0x5b, 0xc9, 0x3a, 0x7c, 0xf9, 0xa0, 0x23, 0xa0, 0x66, 0xff, 0xad, 0xb9, 0x71, 0x7f, 0xfa, 0xf3, - 0x41, 0x4c, 0x3b, 0x64, 0x6b, 0x57, 0x38, 0xb3, 0xcc, 0x41, 0x16, 0x50, 0x2d, 0x18, 0xd7, 0x9d, - 0x82, 0x27, 0x43, 0x63, 0x06, 0xd9, 0xb2, 0xb3, 0xaf, 0xc6, 0xc7, 0x85, 0xce, 0x3c, 0x81, 0x7f, - 0xeb, 0x70, 0x3a, 0x42, 0xb9, 0xc8, 0x3b, 0x59, 0xf0, 0xdc, 0xef, 0x12, 0x45, 0xd0, 0xb3, 0xe4, - 0x02, 0x99, 0x82, 0x1e, 0xc1, 0x95, 0x49, 0xce, 0x48, 0x97, 0x14, 0xfe, 0x26, 0x11, 0xe7, 0x2c, - 0xd8, 0x82, 0xf4, 0xf7, 0x0d, 0xce, 0x7d, 0x36, 0x71, 0x29, 0x6f, 0xc0, 0x45, 0xaf, 0x5c, 0x9f, - 0x63, 0x0d, 0x7b, 0x49, 0xa3, 0xeb, 0x82, 0x1b, 0xbc, 0xa6, 0x0f, 0x19, 0x84, 0xdc, 0xe6, 0x64, - 0x91, 0x71, 0x3b, 0xfe, 0x06, 0x00, 0x1a, 0x56, 0xf5, 0x1b, 0xb3, 0xab, 0xe9, 0x2f, 0x79, 0x60, - 0x54, 0x7c, 0x4d, 0x0a, 0x70, 0xf4, 0xa9, 0x62, 0xb3, 0xf0, 0x5d, 0xc2, 0x5a, 0x34, 0xbb, 0xe8, - 0x30, 0xa7, 0xea, 0x47, 0x36, 0xd3, 0xb0, 0x16, 0x17, 0x23, 0x50, 0x0d, 0x82, 0xbe, 0xda, 0x9b, - 0xe3, 0x32, 0x7a, 0xf2, 0xaa, 0x41, 0x38, 0x21, 0xff, 0x67, 0x8b, 0x2a, 0x87, 0x6e, 0xc4, 0xb0, - 0x0b, 0xb6, 0x05, 0xff, 0xcc, 0x39, 0x17, 0xff, 0xdc, 0x27, 0x9f, 0x18, 0x7d, 0xaa, 0x2f, 0xce, - 0x8c, 0xde, 0x12, 0x19, 0x80, 0xbb, 0xa8, 0xec, 0x8f, 0x44, 0xca, 0x56, 0x2b, 0x0f, 0x13, 0x19, - 0x14, 0xc9, 0x01, 0xcf, 0xbd, 0x84, 0x74, 0x08, 0xb7, 0x78, 0xe6, 0x73, 0x8c, 0x7b, 0xb5, 0xb1, - 0xb3, 0xf9, 0x7d, 0x01, 0xb0, 0xa2, 0x4d, 0xcc, 0xa4, 0x0e, 0x3b, 0xed, 0x29, 0x41, 0x1b, 0x1b, - 0xa8, 0xf6, 0x08, 0x43, 0xc4, 0xa2, 0x41, 0x02, 0x1b, 0x23, 0x13, 0x2b, 0x95, 0x00, 0x50, 0x9b, - 0x9a, 0x35, 0x16, 0xd4, 0xa9, 0xdd, 0x41, 0xd3, 0xba, 0xcb, 0xcd, 0x42, 0x6b, 0x45, 0x13, 0x93, - 0x52, 0x18, 0x28, 0xaf, 0xed, 0xcf, 0x20, 0xfa, 0x46, 0xac, 0x24, 0xf4, 0x4a, 0x8e, 0x29, 0x73, - 0x30, 0xb1, 0x67, 0x05, 0xd5, 0xd5, 0xf7, 0x98, 0xef, 0xf9, 0xe9, 0x13, 0x4a, 0x06, 0x59, 0x79, - 0x87, 0xa1, 0xdb, 0x46, 0x17, 0xca, 0xa2, 0xd9, 0x38, 0x37, 0x73, 0x08, 0x29, 0xd4, 0xd8, 0x9e, - 0x16, 0x41, 0x3b, 0xe4, 0xd8, 0xa8, 0xa3, 0x8a, 0x7e, 0x62, 0x26, 0x62, 0x3b, 0x64, 0xa8, 0x20, - 0x17, 0x8e, 0xc3, 0xa6, 0x69, 0x54, 0xe1, 0x07, 0x10, 0xe0, 0x43, 0xae, 0x73, 0xdd, 0x3f, 0xb2, - 0x71, 0x5a, 0x05, 0x25, 0xa4, 0x63, 0x43, 0xfb, 0x75, 0x90, 0xe5, 0xea, 0xc7, 0xee, 0x55, 0xfc, - 0x81, 0x0e, 0x0d, 0x8b, 0x4b, 0x8f, 0x7b, 0xe8, 0x2c, 0xd5, 0xa2, 0x14, 0x57, 0x5a, 0x1b, 0x99, - 0x62, 0x9d, 0x47, 0xa9, 0xb2, 0x81, 0xb6, 0x13, 0x48, 0xc8, 0x62, 0x7c, 0xab, 0x38, 0xe2, 0xa6, - 0x4d, 0xb6, 0x62, 0x6e, 0x97, 0xbb, 0x8f, 0x77, 0xbd, 0xcb, 0x0f, 0xee, 0x47, 0x6a, 0xed, 0xd7, - 0xba, 0x8f, 0x54, 0x41, 0xac, 0xaa, 0xb0, 0x0f, 0x44, 0x32, 0xed, 0xab, 0x37, 0x91, 0x04, 0x7d, - 0x90, 0x91, 0xb2, 0xa7, 0x53, 0xf0, 0x35, 0x64, 0x84, 0x31, 0xf6, 0xd1, 0x2f, 0x7d, 0x6a, 0x68, - 0x1e, 0x64, 0xc8, 0x61, 0xf4, 0xac, 0x91, 0x1a, 0x0f, 0x7d, 0x6e, 0xc0, 0x49, 0x1a, 0x78, 0xc9, - 0xf1, 0x92, 0xf9, 0x6b, 0x3a, 0x5e, 0x75, 0x60, 0xa3, 0xf0, 0x56, 0xbc, 0x1c, 0xa8, 0x59, 0x83, - 0x67, 0xad, 0x6a, 0xcb, 0x6f, 0x2e, 0x03, 0x4c, 0x7f, 0x37, 0xbe, 0xeb, 0x9e, 0xd4, 0x70, 0xc4, - 0x30, 0x4a, 0xf0, 0x10, 0x7f, 0x0e, 0xb9, 0x19, 0xbe, 0x36, 0xa8, 0x6f, 0x68, 0xf3, 0x7f, 0xa6, - 0x1d, 0xae, 0x7a, 0xff, 0x14, 0xde, 0xcd, 0x67, 0xec, 0x31, 0x57, 0xa1, 0x14, 0x88, 0xa1, 0x4f, - 0xed, 0x01, 0x42, 0x82, 0x83, 0x48, 0xf5, 0xf6, 0x08, 0xb0, 0xfe, 0x03, 0xe1, 0xf3, 0xc0, 0xaf, - 0x3a, 0xcc, 0xa0, 0xce, 0x36, 0x85, 0x2e, 0xd4, 0x2e, 0x22, 0x0a, 0xe9, 0xab, 0xf8, 0xf8, 0x90, - 0x6f, 0x00, 0xf1, 0xb8, 0x6b, 0xff, 0x85, 0x04, 0xc8, 0xf1, 0x6c, 0x78, 0x4f, 0xd5, 0x2d, 0x25, - 0xe0, 0x13, 0xff, 0x4f, 0xda, 0x90, 0x3e, 0x9e, 0x1e, 0xb4, 0x53, 0xc1, 0x46, 0x4b, 0x11, 0x96, - 0x6d, 0xb9, 0xb2, 0x8e, 0x8f, 0x26, 0xa3, 0xfc, 0x41, 0x9e, 0x6a, 0x60, 0xa4, 0x8d, 0x4c, 0x72, - 0x14, 0xee, 0x9c, 0x6c, 0x6a, 0x12, 0xb6, 0x8a, 0x32, 0xca, 0xc8, 0xf6, 0x15, 0x80, 0xc6, 0x4f, - 0x29, 0xcb, 0x69, 0x22, 0x40, 0x87, 0x83, 0xc6, 0xd1, 0x2e, 0x72, 0x5b, 0x01, 0x4f, 0xe4, 0x85, - 0xcd, 0x17, 0xe4, 0x84, 0xc5, 0x95, 0x2b, 0xf9, 0x9b, 0xc9, 0x49, 0x41, 0xd4, 0xb1, 0x91, 0x9d, - 0x04, 0x31, 0x7b, 0x8a, 0xa1, 0xbd, 0x37, 0x54, 0xec, 0xba, 0xa1, 0x0e, 0xc2, 0x27, 0xde, 0x85, - 0x40, 0x69, 0x5b, 0xf2, 0xfb, 0x8e, 0xe5, 0x6f, 0x6d, 0xc5, 0x26, 0xef, 0x36, 0x66, 0x25, 0xb9, - 0x1a, 0xa4, 0x97, 0x0b, 0x6f, 0xfa, 0x5c, 0x82, 0x84, 0xb9, 0xb5, 0xab, 0x85, 0x2b, 0x90, 0x5f, - 0x9d, 0x83, 0xf5, 0x66, 0x9c, 0x05, 0x35, 0xbc, 0x37, 0x7b, 0xcc, 0x05, 0xad, 0x5e, 0x48, 0xe2, - 0x81, 0xec, 0x0e, 0x19, 0x17, 0xca, 0x3c, 0x6a, 0x47, 0x1f, 0x8d, 0xa0, 0x89, 0x4b, 0xc8, 0x2a, - 0xc2, 0xa8, 0x96, 0x54, 0x05, 0xd6, 0xee, 0xf3, 0xb5, 0xe2, 0x93, 0xa8, 0x8f, 0xda, 0x20, 0x3f, - 0x09, 0xbd, 0xc7, 0x27, 0x57, 0xb1, 0x07, 0xab, 0x14, 0x88, 0x0e, 0xaa, 0x3e, 0xf7, 0x04, 0x5b, - 0x58, 0x0f, 0x48, 0x21, 0xce, 0x6d, 0xd3, 0x25, 0xb5, 0xa9, 0x06, 0x55, 0xd8, 0xc5, 0xb5, 0x5f, - 0x76, 0xfb, 0x84, 0x62, 0x79, 0xa9, 0xb5, 0x18, 0xc5, 0xe9, 0xb9, 0xa2, 0x11, 0x65, 0xc5, 0x09, - 0x3e, 0xd4, 0x9b, 0xaa, 0xac, 0xad, 0xf1, 0xf2, 0x18, 0x73, 0x26, 0x6c, 0x76, 0x7f, 0x67, 0x69, + 0xd7, 0x6b, 0x33, 0x43, 0xcf, 0x08, 0x83, 0x94, 0xc8, 0xf0, 0x3e, 0x51, 0x57, 0x08, 0x00, 0x00, + 0x44, 0x9e, 0xa0, 0xc9, 0x5e, 0x82, 0xff, 0xe6, 0x7b, 0x6a, 0xbc, 0xdb, 0x42, 0x98, 0xb4, 0x85, + 0xdd, 0x04, 0xde, 0x80, 0x60, 0x71, 0xbf, 0x03, 0xdc, 0xee, 0xbf, 0xa1, 0x62, 0xe7, 0x5d, 0x6c, + 0x96, 0x05, 0x8b, 0xdb, 0xfb, 0x12, 0x7c, 0xdf, 0xcb, 0xf9, 0x03, 0x38, 0x8e, 0x99, 0xad, 0x04, + 0x9f, 0x9a, 0x3d, 0xd4, 0x42, 0x5a, 0xe4, 0xd0, 0x99, 0x2c, 0xff, 0xf1, 0x8e, 0xcf, 0x0f, 0xdb, + 0x5a, 0x84, 0x2d, 0x09, 0x74, 0x70, 0x52, 0xf1, 0x7a, 0xc2, 0x05, 0x3d, 0x21, 0xf5, 0x7c, 0x5d, + 0x25, 0x0f, 0x2c, 0x4f, 0x0e, 0x02, 0x02, 0xb7, 0x07, 0x85, 0xb7, 0x94, 0x6e, 0x99, 0x2e, 0x58, + 0xa5, 0x9a, 0xc5, 0x2d, 0xea, 0x67, 0x74, 0xd4, 0xf0, 0x3b, 0x55, 0x54, 0x52, 0x43, 0xcf, 0x1a, + 0x12, 0x83, 0x4e, 0x3f, 0x24, 0x9a, 0x78, 0xd3, 0x95, 0xe0, 0xd1, 0x8f, 0x4d, 0x76, 0x60, 0x04, + 0xf1, 0xa2, 0x67, 0x48, 0x02, 0xa7, 0x47, 0xea, 0xa9, 0x01, 0xc3, 0xf1, 0x0c, 0xda, 0x55, 0x00, + 0xcb, 0x91, 0x22, 0xfa, 0xa9, 0xf1, 0xdf, 0x66, 0xc3, 0x92, 0x07, 0x9a, 0x1b, 0x40, 0xf0, 0xde, + 0x1c, 0x60, 0x54, 0x19, 0x6a, 0x11, 0xcb, 0xea, 0x40, 0xaf, 0xb6, 0xef, 0x52, 0x53, 0xcd, 0x68, + 0x18, 0xf6, 0x62, 0x5e, 0xfc, 0xe3, 0xb6, 0xde, 0xf6, 0xba, 0x7e, 0x4b, 0x37, 0xa4, 0x0f, 0x77, + 0x32, 0xe0, 0x93, 0xda, 0xa7, 0xd5, 0x21, 0x90, 0x93, 0x5b, 0x8d, 0xa5, 0x89, 0x76, 0xff, 0x33, + 0x12, 0xae, 0x50, 0xb1, 0x87, 0xc1, 0x43, 0x3c, 0x0f, 0x02, 0x8e, 0xdc, 0xc4, 0xc2, 0x83, 0x8b, + 0x6a, 0x9b, 0xfc, 0x22, 0x6c, 0xa4, 0xb4, 0x53, 0x0e, 0x7a, 0x4c, 0xce, 0xe1, 0xbf, 0xa2, 0xa3, + 0xd3, 0x96, 0xae, 0x5a, 0x3f, 0xb5, 0x12, 0x38, 0x4b, 0x2f, 0xdd, 0x85, 0x1f, 0x78, 0x4a, 0x65, + 0xe0, 0x3f, 0x2c, 0x4f, 0xbe, 0x11, 0xa5, 0x3c, 0x77, 0x77, 0xc0, 0x23, 0x46, 0x22, 0x39, 0xdd, + 0x6f, 0x75, 0x21, 0xa3, 0xf6, 0xc7, 0xd5, 0xdd, 0x3e, 0xc9, 0xb3, 0xf2, 0x33, 0x77, 0x3d, 0x4b, + 0x46, 0xd2, 0x3c, 0xc3, 0x75, 0xeb, 0x19, 0x8c, 0x63, 0x30, 0x1c, 0x21, 0x80, 0x1f, 0x65, 0x20, + 0xbc, 0xfb, 0x79, 0x66, 0xfc, 0x49, 0xb3, 0x93, 0xf0, 0x06, 0x1d, 0x97, 0x4a, 0x27, 0x06, 0xdf, + 0x8c, 0x4a, 0x94, 0x49, 0xf1, 0x1d, 0x7f, 0x3d, 0x2d, 0xcb, 0xb9, 0x0c, 0x6b, 0x87, 0x70, 0x45, + 0x63, 0x6e, 0x7c, 0x0c, 0x0f, 0xe4, 0xeb, 0x0f, 0x69, 0x75, 0x45, 0x46, 0x0c, 0x80, 0x69, 0x10, + 0xd2, 0xc3, 0x55, 0xf1, 0xd2, 0x53, 0xbc, 0x9d, 0x24, 0x52, 0xaa, 0xa5, 0x49, 0xe2, 0x7a, 0x1f, + 0xac, 0x7c, 0xf4, 0xed, 0x77, 0xf3, 0x22, 0xe8, 0xfa, 0x89, 0x4b, 0x6a, 0x83, 0x81, 0x0a, 0x34, + 0xb3, 0x61, 0x90, 0x17, 0x51, 0xa6, 0xf5, 0xeb, 0x65, 0xa0, 0x32, 0x6e, 0x07, 0xde, 0x7c, 0x12, + 0x16, 0xcc, 0xce, 0x2d, 0x01, 0x93, 0xf9, 0x58, 0xbb, 0x38, 0x50, 0xa8, 0x33, 0xf7, 0xae, 0x43, + 0x2b, 0x65, 0xbc, 0x5a, 0x53, 0x97, 0x5c, 0x15, 0x5a, 0xa4, 0xbc, 0xb4, 0xf7, 0xb2, 0xc4, 0xe5, + 0x4d, 0xf1, 0x6e, 0xfa, 0xf6, 0xdd, 0xea, 0x94, 0xe2, 0xc5, 0x0b, 0x4c, 0xd1, 0xdf, 0xe0, 0x60, + 0x17, 0xe0, 0xe9, 0xd0, 0x29, 0x00, 0xcf, 0xfe, 0x19, 0x35, 0xe0, 0x49, 0x1d, 0x77, 0xff, 0xb4, + 0xfd, 0xf8, 0x52, 0x90, 0xfd, 0xd8, 0x93, 0xd5, 0x77, 0xb1, 0x13, 0x1a, 0x61, 0x0e, 0xf6, 0xa5, + 0xc3, 0x2b, 0x2e, 0xe0, 0x29, 0x36, 0x17, 0xa3, 0x7c, 0xbb, 0x08, 0xb8, 0x47, 0x74, 0x1c, 0x3b, + 0x80, 0x17, 0xc2, 0x5c, 0xa9, 0x05, 0x2c, 0xa1, 0x07, 0x9d, 0x8b, 0x78, 0xae, 0xbd, 0x47, 0x87, + 0x6d, 0x33, 0x0a, 0x30, 0xf6, 0xa8, 0xc6, 0xd6, 0x1d, 0xd1, 0xab, 0x55, 0x89, 0x32, 0x9d, 0xe7, + 0x14, 0xd1, 0x9d, 0x61, 0x37, 0x0f, 0x81, 0x49, 0x74, 0x8c, 0x72, 0xf1, 0x32, 0xf0, 0xfc, 0x99, + 0xf3, 0x4d, 0x76, 0x6c, 0x69, 0x38, 0x59, 0x70, 0x40, 0xd8, 0xf9, 0xe2, 0xbb, 0x52, 0x2f, 0xf9, + 0x9c, 0x63, 0xa3, 0x44, 0xd6, 0xa2, 0xae, 0x8a, 0xa8, 0xe5, 0x1b, 0x7b, 0x90, 0xa4, 0xa8, 0x06, + 0x10, 0x5f, 0xcb, 0xca, 0x31, 0x50, 0x6c, 0x44, 0x61, 0x51, 0xad, 0xfe, 0xce, 0xb5, 0x1b, 0x91, + 0xab, 0xfe, 0x43, 0x96, 0x09, 0x77, 0xc8, 0x74, 0x71, 0xcf, 0x9a, 0xd4, 0x07, 0x4d, 0x30, 0xe1, + 0x0d, 0x6a, 0x7f, 0x03, 0xc6, 0x3b, 0xd5, 0xd4, 0x31, 0x7f, 0x68, 0xff, 0x32, 0x5b, 0xa3, 0xbd, + 0x80, 0xbf, 0x4d, 0xc8, 0xb5, 0x2a, 0x0b, 0xa0, 0x31, 0x75, 0x80, 0x22, 0xeb, 0x02, 0x5c, 0xdd, + 0x77, 0x0b, 0x44, 0xd6, 0xd6, 0xcf, 0x06, 0x70, 0xf4, 0xe9, 0x90, 0xb2, 0x23, 0x47, 0xa7, 0xdb, + 0x84, 0x82, 0x65, 0xe3, 0xe5, 0xeb, 0x72, 0xdf, 0xe8, 0x29, 0x9a, 0xd7, 0x48, 0x1a, 0x40, 0x83, + 0x22, 0xca, 0xc5, 0x57, 0x86, 0xe5, 0x2f, 0x63, 0x3b, 0x2f, 0xb6, 0xb6, 0x14, 0xea, 0xed, 0x18, + 0xd7, 0x03, 0xdd, 0x84, 0x04, 0x5a, 0x27, 0x4a, 0xe8, 0xbf, 0xa7, 0x33, 0x79, 0x66, 0x13, 0x88, + 0xd6, 0x99, 0x1f, 0xe3, 0x9b, 0x0d, 0x93, 0xde, 0xbb, 0x41, 0x70, 0x0b, 0x41, 0xf9, 0x0a, 0x15, + 0xc4, 0xd5, 0x26, 0x25, 0x02, 0x35, 0xdd, 0xcd, 0x67, 0x76, 0xfc, 0x77, 0xbc, 0x97, 0xe7, 0xa4, + 0x17, 0xeb, 0xcb, 0x31, 0x60, 0x0d, 0x01, 0xe5, 0x7f, 0x32, 0x16, 0x2a, 0x85, 0x60, 0xca, 0xcc, + 0x7e, 0x27, 0xa0, 0x96, 0xd3, 0x7a, 0x1a, 0x86, 0x95, 0x2e, 0xc7, 0x1b, 0xd8, 0x9a, 0x3e, 0x9a, + 0x30, 0xa2, 0xa2, 0x61, 0x62, 0x98, 0x4d, 0x77, 0x40, 0xf8, 0x11, 0x93, 0xe8, 0x23, 0x8e, 0x61, + 0xf6, 0xb5, 0xb9, 0x84, 0xd4, 0xd3, 0xdf, 0xa0, 0x33, 0xc1, 0xbb, 0x7e, 0x4f, 0x00, 0x37, 0xfe, + 0xbf, 0x40, 0x6d, 0x91, 0xc0, 0xdc, 0xcf, 0x32, 0xac, 0xf4, 0x23, 0xcf, 0xa1, 0xe7, 0x07, 0x10, + 0x10, 0xd3, 0xf2, 0x70, 0x12, 0x1b, 0x49, 0x3c, 0xe8, 0x50, 0x54, 0xef, 0x58, 0xba, 0xda, 0x42, + 0x31, 0x01, 0x38, 0xfe, 0x08, 0x1a, 0xdb, 0x04, 0xe2, 0xbd, 0x90, 0x1f, 0x2f, 0x13, 0x45, 0x8b, + 0x3d, 0x67, 0x58, 0x15, 0x81, 0x97, 0x10, 0x7c, 0x14, 0xeb, 0xb1, 0x93, 0x23, 0x0c, 0xd1, 0x15, + 0x73, 0x80, 0xaa, 0x79, 0xca, 0xe1, 0x37, 0x4a, 0x7c, 0x1e, 0x5b, 0xbc, 0xb8, 0x0e, 0xe2, 0x3e, + 0x06, 0xeb, 0xfd, 0xe2, 0x06, 0xbf, 0xb0, 0xfc, 0xbc, 0x0e, 0xdc, 0x4e, 0xbe, 0xc3, 0x09, 0x66, + 0x1b, 0xdd, 0x90, 0x8d, 0x53, 0x2e, 0xb0, 0xc6, 0xad, 0xc3, 0x8b, 0x7c, 0xa7, 0x33, 0x1d, 0xce, + 0x8d, 0xfc, 0xe3, 0x9a, 0xb7, 0x1e, 0x7c, 0x32, 0xd3, 0x18, 0xd1, 0x36, 0xb6, 0x10, 0x06, 0x71, + 0xa1, 0xae, 0x6a, 0x66, 0x00, 0xe3, 0x89, 0x9f, 0x31, 0xf0, 0xee, 0xd1, 0x9e, 0x34, 0x17, 0xd1, + 0x34, 0xb9, 0x0c, 0x90, 0x58, 0xf8, 0x63, 0x2c, 0x79, 0x8d, 0x44, 0x90, 0xda, 0x49, 0x87, 0x30, + 0x7c, 0xba, 0x92, 0x2d, 0x61, 0xc3, 0x98, 0x05, 0xd0, 0x72, 0xb5, 0x89, 0xbd, 0x52, 0xfd, 0xf1, + 0xe8, 0x62, 0x15, 0xc2, 0xd5, 0x4e, 0x66, 0x70, 0xe0, 0x73, 0x83, 0xa2, 0x7b, 0xbf, 0xfb, 0x5a, + 0xdd, 0xf4, 0x7d, 0x66, 0xaa, 0x85, 0xa0, 0xc6, 0xf9, 0xf3, 0x2e, 0x59, 0xd8, 0x5a, 0x44, 0xdd, + 0x5d, 0x3b, 0x22, 0xdc, 0x2b, 0xe8, 0x09, 0x19, 0xb4, 0x90, 0x43, 0x7a, 0xe4, 0xf3, 0x6a, 0x0a, + 0xe5, 0x5e, 0xdf, 0x1d, 0x0b, 0x5c, 0xb4, 0xe9, 0xa3, 0xec, 0xab, 0xee, 0x93, 0xdf, 0xc6, 0xe3, + 0x8d, 0x20, 0x9d, 0x0f, 0xa6, 0x53, 0x6d, 0x27, 0xa5, 0xd6, 0xfb, 0xb1, 0x76, 0x41, 0xcd, 0xe2, + 0x75, 0x25, 0xd6, 0x10, 0x93, 0xf1, 0xb2, 0x80, 0x72, 0xd1, 0x11, 0xb2, 0xb4, 0xae, 0x5f, 0x89, + 0xd5, 0x97, 0x4e, 0xe1, 0x2e, 0x5c, 0xf7, 0xd5, 0xda, 0x4d, 0x6a, 0x31, 0x12, 0x30, 0x41, 0xf3, + 0x3e, 0x61, 0x40, 0x7e, 0x76, 0xcf, 0xfc, 0xdc, 0xfd, 0x7e, 0x19, 0xba, 0x58, 0xcf, 0x4b, 0x53, + 0x6f, 0x4c, 0x49, 0x38, 0xae, 0x79, 0x32, 0x4d, 0xc4, 0x02, 0x89, 0x4b, 0x44, 0xfa, 0xf8, 0xaf, + 0xba, 0xb3, 0x52, 0x82, 0xab, 0x65, 0x9d, 0x13, 0xc9, 0x3f, 0x70, 0x41, 0x2e, 0x85, 0xcb, 0x19, + 0x9a, 0x37, 0xdd, 0xec, 0x60, 0x05, 0x45, 0x47, 0x3c, 0xfb, 0x5a, 0x05, 0xe0, 0x8d, 0x0b, 0x20, + 0x99, 0x73, 0xb2, 0x17, 0x2b, 0x4d, 0x21, 0xfb, 0x69, 0x74, 0x5a, 0x26, 0x2c, 0xcd, 0xe9, 0x6b, + 0xa1, 0x8b, 0x2f, 0xaa, 0x74, 0x5b, 0x6f, 0xe1, 0x89, 0xcf, 0x77, 0x2a, 0x9f, 0x84, 0xcb, 0xfc, ]; const INITIAL_PACKET_V1: &[u8] = &[ From d3489dfd913309200519e253e84233e728c38480 Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Thu, 14 Dec 2023 23:34:17 +0200 Subject: [PATCH 034/321] Fix hrtimer tests on MacOS (#1517) Whatever I did to this file in #1512 made the tests on MacOS fail (because timer accuracy got bad.) Restoring the code and adding clippy ignores fixes this. --- neqo-common/src/hrtime.rs | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/neqo-common/src/hrtime.rs b/neqo-common/src/hrtime.rs index 3b9703e073..d1f42d41c5 100644 --- a/neqo-common/src/hrtime.rs +++ b/neqo-common/src/hrtime.rs @@ -119,14 +119,13 @@ mod mac { #[repr(C)] #[derive(Debug, Copy, Clone, Default)] pub struct thread_time_constraint_policy { - period: f64, - computation: f64, - constraint: f64, + period: u32, + computation: u32, + constraint: u32, preemptible: boolean_t, } const THREAD_TIME_CONSTRAINT_POLICY: thread_policy_flavor_t = 2; - #[allow(clippy::cast_possible_truncation)] const THREAD_TIME_CONSTRAINT_POLICY_COUNT: mach_msg_type_number_t = (size_of::() / size_of::()) @@ -182,10 +181,11 @@ mod mac { /// Create a realtime policy and set it. pub fn set_realtime(base: f64) { + #[allow(clippy::cast_possible_truncation, clippy::cast_sign_loss)] let policy = thread_time_constraint_policy { - period: base, // Base interval - computation: base * 0.5, - constraint: base, + period: base as u32, // Base interval + computation: (base * 0.5) as u32, + constraint: (base * 1.0) as u32, preemptible: 1, }; set_thread_policy(policy); From 69d243909701ed0579983cc13b97dd35cca3a53c Mon Sep 17 00:00:00 2001 From: Kershaw Date: Tue, 19 Dec 2023 07:53:41 +0100 Subject: [PATCH 035/321] Make neqo-client support upload test (#1502) * Make neqo-client support upload test * address comments * format --- neqo-client/src/main.rs | 349 +++++++++++++++++++++++++++++++++------- neqo-server/src/main.rs | 45 +++++- 2 files changed, 328 insertions(+), 66 deletions(-) diff --git a/neqo-client/src/main.rs b/neqo-client/src/main.rs index 38369653da..a72d78890a 100644 --- a/neqo-client/src/main.rs +++ b/neqo-client/src/main.rs @@ -238,6 +238,14 @@ pub struct Args { #[structopt(name = "ipv6-only", short = "6", long)] /// Connect only over IPv6 ipv6_only: bool, + + /// The test that this client will run. Currently, we only support "upload". + #[structopt(name = "test", long)] + test: Option, + + /// The request size that will be used for upload test. + #[structopt(name = "upload-size", long, default_value = "100")] + upload_size: usize, } impl Args { @@ -443,35 +451,169 @@ fn process_loop( } } -struct Handler<'a> { - streams: HashMap>, +trait StreamHandler { + fn process_header_ready(&mut self, stream_id: StreamId, fin: bool, headers: Vec
); + fn process_data_readable( + &mut self, + stream_id: StreamId, + fin: bool, + data: Vec, + sz: usize, + output_read_data: bool, + ) -> Res; + fn process_data_writable(&mut self, client: &mut Http3Client, stream_id: StreamId); +} + +enum StreamHandlerType { + Download, + Upload, +} + +impl StreamHandlerType { + fn make_handler( + handler_type: &Self, + url: &Url, + args: &Args, + all_paths: &mut Vec, + ) -> Box { + match handler_type { + Self::Download => { + let out_file = get_output_file(url, &args.output_dir, all_paths); + Box::new(DownloadStreamHandler { out_file }) + } + Self::Upload => Box::new(UploadStreamHandler { + data: vec![42; args.upload_size], + offset: 0, + chunk_size: 32768, + start: Instant::now(), + }), + } + } +} + +struct DownloadStreamHandler { + out_file: Option, +} + +impl StreamHandler for DownloadStreamHandler { + fn process_header_ready(&mut self, stream_id: StreamId, fin: bool, headers: Vec
) { + if self.out_file.is_none() { + println!("READ HEADERS[{}]: fin={} {:?}", stream_id, fin, headers); + } + } + + fn process_data_readable( + &mut self, + stream_id: StreamId, + fin: bool, + data: Vec, + sz: usize, + output_read_data: bool, + ) -> Res { + if let Some(out_file) = &mut self.out_file { + if sz > 0 { + out_file.write_all(&data[..sz])?; + } + return Ok(true); + } else if !output_read_data { + println!("READ[{}]: {} bytes", stream_id, sz); + } else if let Ok(txt) = String::from_utf8(data.clone()) { + println!("READ[{}]: {}", stream_id, txt); + } else { + println!("READ[{}]: 0x{}", stream_id, hex(&data)); + } + + if fin && self.out_file.is_none() { + println!("", stream_id); + } + + Ok(true) + } + + fn process_data_writable(&mut self, _client: &mut Http3Client, _stream_id: StreamId) {} +} + +struct UploadStreamHandler { + data: Vec, + offset: usize, + chunk_size: usize, + start: Instant, +} + +impl StreamHandler for UploadStreamHandler { + fn process_header_ready(&mut self, stream_id: StreamId, fin: bool, headers: Vec
) { + println!("READ HEADERS[{}]: fin={} {:?}", stream_id, fin, headers); + } + + fn process_data_readable( + &mut self, + stream_id: StreamId, + _fin: bool, + data: Vec, + _sz: usize, + _output_read_data: bool, + ) -> Res { + if let Ok(txt) = String::from_utf8(data.clone()) { + let trimmed_txt = txt.trim_end_matches(char::from(0)); + let parsed: usize = trimmed_txt.parse().unwrap(); + if parsed == self.data.len() { + let upload_time = Instant::now().duration_since(self.start); + println!("Stream ID: {:?}, Upload time: {:?}", stream_id, upload_time); + } + } else { + panic!("Unexpected data [{}]: 0x{}", stream_id, hex(&data)); + } + Ok(true) + } + + fn process_data_writable(&mut self, client: &mut Http3Client, stream_id: StreamId) { + while self.offset < self.data.len() { + let end = self.offset + self.chunk_size.min(self.data.len() - self.offset); + let chunk = &self.data[self.offset..end]; + match client.send_data(stream_id, chunk) { + Ok(amount) => { + if amount == 0 { + break; + } + self.offset += amount; + if self.offset == self.data.len() { + client.stream_close_send(stream_id).unwrap(); + } + } + Err(_) => break, + }; + } + } +} + +struct URLHandler<'a> { url_queue: VecDeque, + stream_handlers: HashMap>, all_paths: Vec, + handler_type: StreamHandlerType, args: &'a Args, - key_update: KeyUpdateState, - token: Option, } -impl<'a> Handler<'a> { - fn download_urls(&mut self, client: &mut Http3Client) { +impl<'a> URLHandler<'a> { + fn stream_handler(&mut self, stream_id: &StreamId) -> Option<&mut Box> { + self.stream_handlers.get_mut(stream_id) + } + + fn process_urls(&mut self, client: &mut Http3Client) { loop { if self.url_queue.is_empty() { break; } - if self.streams.len() >= self.args.concurrency { + if self.stream_handlers.len() >= self.args.concurrency { break; } - if !self.download_next(client) { + if !self.next_url(client) { break; } } } - fn download_next(&mut self, client: &mut Http3Client) -> bool { - if self.key_update.needed() { - println!("Deferring requests until first key update"); - return false; - } + fn next_url(&mut self, client: &mut Http3Client) -> bool { let url = self .url_queue .pop_front() @@ -488,13 +630,14 @@ impl<'a> Handler<'a> { "Successfully created stream id {} for {}", client_stream_id, url ); - client - .stream_close_send(client_stream_id) - .expect("failed to close send stream"); - let out_file = get_output_file(&url, &self.args.output_dir, &mut self.all_paths); - - self.streams.insert(client_stream_id, out_file); + let handler: Box = StreamHandlerType::make_handler( + &self.handler_type, + &url, + self.args, + &mut self.all_paths, + ); + self.stream_handlers.insert(client_stream_id, handler); true } Err(Error::TransportError(TransportError::StreamLimitError)) @@ -509,25 +652,47 @@ impl<'a> Handler<'a> { } } - fn maybe_key_update(&mut self, c: &mut Http3Client) -> Res<()> { - self.key_update.maybe_update(|| c.initiate_key_update())?; - self.download_urls(c); - Ok(()) - } - fn done(&mut self) -> bool { - self.streams.is_empty() && self.url_queue.is_empty() + self.stream_handlers.is_empty() && self.url_queue.is_empty() } fn on_stream_fin(&mut self, client: &mut Http3Client, stream_id: StreamId) -> bool { - self.streams.remove(&stream_id); - self.download_urls(client); + self.stream_handlers.remove(&stream_id); + self.process_urls(client); if self.done() { client.close(Instant::now(), 0, "kthxbye!"); return false; } true } +} + +struct Handler<'a> { + url_handler: URLHandler<'a>, + key_update: KeyUpdateState, + token: Option, + output_read_data: bool, +} + +impl<'a> Handler<'a> { + pub fn new( + url_handler: URLHandler<'a>, + key_update: KeyUpdateState, + output_read_data: bool, + ) -> Self { + Self { + url_handler, + key_update, + token: None, + output_read_data, + } + } + + fn maybe_key_update(&mut self, c: &mut Http3Client) -> Res<()> { + self.key_update.maybe_update(|| c.initiate_key_update())?; + self.url_handler.process_urls(c); + Ok(()) + } fn handle(&mut self, client: &mut Http3Client) -> Res { while let Some(event) = client.next_event() { @@ -541,11 +706,9 @@ impl<'a> Handler<'a> { fin, .. } => { - match self.streams.get(&stream_id) { - Some(out_file) => { - if out_file.is_none() { - println!("READ HEADERS[{}]: fin={} {:?}", stream_id, fin, headers); - } + match self.url_handler.stream_handler(&stream_id) { + Some(handler) => { + handler.process_header_ready(stream_id, fin, headers); } None => { println!("Data on unexpected stream: {}", stream_id); @@ -553,38 +716,31 @@ impl<'a> Handler<'a> { } } if fin { - return Ok(self.on_stream_fin(client, stream_id)); + return Ok(self.url_handler.on_stream_fin(client, stream_id)); } } Http3ClientEvent::DataReadable { stream_id } => { let mut stream_done = false; - match self.streams.get_mut(&stream_id) { + match self.url_handler.stream_handler(&stream_id) { None => { println!("Data on unexpected stream: {}", stream_id); return Ok(false); } - Some(out_file) => loop { + Some(handler) => loop { let mut data = vec![0; 4096]; let (sz, fin) = client .read_data(Instant::now(), stream_id, &mut data) .expect("Read should succeed"); - if let Some(out_file) = out_file { - if sz > 0 { - out_file.write_all(&data[..sz])?; - } - } else if !self.args.output_read_data { - println!("READ[{}]: {} bytes", stream_id, sz); - } else if let Ok(txt) = String::from_utf8(data.clone()) { - println!("READ[{}]: {}", stream_id, txt); - } else { - println!("READ[{}]: 0x{}", stream_id, hex(&data)); - } + handler.process_data_readable( + stream_id, + fin, + data, + sz, + self.output_read_data, + )?; if fin { - if out_file.is_none() { - println!("", stream_id); - } stream_done = true; break; } @@ -596,12 +752,24 @@ impl<'a> Handler<'a> { } if stream_done { - return Ok(self.on_stream_fin(client, stream_id)); + return Ok(self.url_handler.on_stream_fin(client, stream_id)); + } + } + Http3ClientEvent::DataWritable { stream_id } => { + match self.url_handler.stream_handler(&stream_id) { + None => { + println!("Data on unexpected stream: {}", stream_id); + return Ok(false); + } + Some(handler) => { + handler.process_data_writable(client, stream_id); + return Ok(true); + } } } Http3ClientEvent::StateChange(Http3State::Connected) | Http3ClientEvent::RequestsCreatable => { - self.download_urls(client); + self.url_handler.process_urls(client); } Http3ClientEvent::ResumptionToken(t) => self.token = Some(t), _ => { @@ -629,8 +797,10 @@ fn to_headers(values: &[impl AsRef]) -> Vec
{ .collect() } -fn client( - args: &Args, +#[allow(clippy::too_many_arguments)] +fn handle_test( + testcase: &String, + args: &mut Args, socket: &UdpSocket, local_addr: SocketAddr, remote_addr: SocketAddr, @@ -638,6 +808,39 @@ fn client( urls: &[Url], resumption_token: Option, ) -> Res> { + let key_update = KeyUpdateState(args.key_update); + match testcase.as_str() { + "upload" => { + let mut client = + create_http3_client(args, local_addr, remote_addr, hostname, resumption_token) + .expect("failed to create client"); + args.method = String::from("POST"); + let url_handler = URLHandler { + url_queue: VecDeque::from(urls.to_vec()), + stream_handlers: HashMap::new(), + all_paths: Vec::new(), + handler_type: StreamHandlerType::Upload, + args, + }; + let mut h = Handler::new(url_handler, key_update, args.output_read_data); + process_loop(&local_addr, socket, &mut client, &mut h)?; + } + _ => { + eprintln!("Unsupported test case: {}", testcase); + exit(127) + } + } + + Ok(None) +} + +fn create_http3_client( + args: &mut Args, + local_addr: SocketAddr, + remote_addr: SocketAddr, + hostname: &str, + resumption_token: Option, +) -> Res { let mut transport = Connection::new_client( hostname, &[&args.alpn], @@ -671,15 +874,43 @@ fn client( .expect("enable resumption"); } + Ok(client) +} + +fn client( + args: &mut Args, + socket: &UdpSocket, + local_addr: SocketAddr, + remote_addr: SocketAddr, + hostname: &str, + urls: &[Url], + resumption_token: Option, +) -> Res> { + let testcase = args.test.clone(); + if let Some(testcase) = testcase { + return handle_test( + &testcase, + args, + socket, + local_addr, + remote_addr, + hostname, + urls, + resumption_token, + ); + } + + let mut client = create_http3_client(args, local_addr, remote_addr, hostname, resumption_token) + .expect("failed to create client"); let key_update = KeyUpdateState(args.key_update); - let mut h = Handler { - streams: HashMap::new(), + let url_handler = URLHandler { url_queue: VecDeque::from(urls.to_vec()), + stream_handlers: HashMap::new(), all_paths: Vec::new(), + handler_type: StreamHandlerType::Download, args, - key_update, - token: None, }; + let mut h = Handler::new(url_handler, key_update, args.output_read_data); process_loop(&local_addr, socket, &mut client, &mut h)?; @@ -848,7 +1079,7 @@ fn main() -> Res<()> { )? } else { client( - &args, + &mut args, &socket, real_local, remote_addr, diff --git a/neqo-server/src/main.rs b/neqo-server/src/main.rs index 574d1f8ae1..6eb85f17cf 100644 --- a/neqo-server/src/main.rs +++ b/neqo-server/src/main.rs @@ -420,6 +420,7 @@ struct SimpleServer { server: Http3Server, /// Progress writing to each stream. remaining_data: HashMap, + posts: HashMap, } impl SimpleServer { @@ -454,6 +455,7 @@ impl SimpleServer { Self { server, remaining_data: HashMap::new(), + posts: HashMap::new(), } } } @@ -479,6 +481,17 @@ impl HttpServer for SimpleServer { } => { println!("Headers (request={} fin={}): {:?}", stream, fin, headers); + let post = if let Some(method) = headers.iter().find(|&h| h.name() == ":method") + { + method.value() == "POST" + } else { + false + }; + if post { + self.posts.insert(stream, 0); + continue; + } + let mut response = if let Some(path) = headers.iter().find(|&h| h.name() == ":path") { if args.qns_test.is_some() { @@ -515,17 +528,35 @@ impl HttpServer for SimpleServer { } } Http3ServerEvent::DataWritable { mut stream } => { - if let Some(remaining) = self.remaining_data.get_mut(&stream.stream_id()) { - remaining.send(&mut stream); - if remaining.done() { - self.remaining_data.remove(&stream.stream_id()); - stream.stream_close_send().unwrap(); + if self.posts.get_mut(&stream).is_none() { + if let Some(remaining) = self.remaining_data.get_mut(&stream.stream_id()) { + remaining.send(&mut stream); + if remaining.done() { + self.remaining_data.remove(&stream.stream_id()); + stream.stream_close_send().unwrap(); + } } } } - Http3ServerEvent::Data { stream, data, fin } => { - println!("Data (request={} fin={}): {:?}", stream, fin, data); + Http3ServerEvent::Data { + mut stream, + data, + fin, + } => { + if let Some(received) = self.posts.get_mut(&stream) { + *received += data.len(); + } + if fin { + if let Some(received) = self.posts.remove(&stream) { + let msg = received.to_string().as_bytes().to_vec(); + stream + .send_headers(&[Header::new(":status", "200")]) + .unwrap(); + stream.send_data(&msg).unwrap(); + stream.stream_close_send().unwrap(); + } + } } _ => {} } From dc7cb9b4428ac4d850787d505d89cc3735e5d69b Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Tue, 19 Dec 2023 12:35:53 +0200 Subject: [PATCH 036/321] Replace nextest with test To see if this addresses #1521 --- .github/workflows/check.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/check.yml b/.github/workflows/check.yml index 76de5286fb..5120bf1bc7 100644 --- a/.github/workflows/check.yml +++ b/.github/workflows/check.yml @@ -107,7 +107,7 @@ jobs: RUST_BACKTRACE: 1 - name: Run tests and determine coverage - run: cargo +${{ matrix.rust-toolchain }} llvm-cov nextest --all-targets --no-fail-fast --lcov --output-path lcov.info + run: cargo +${{ matrix.rust-toolchain }} llvm-cov test --all-targets --no-fail-fast --lcov --output-path lcov.info env: RUST_BACKTRACE: 1 RUST_LOG: neqo=debug From 1d28ba86e0b9baa0382bbf4f2390e11090cd661a Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Tue, 19 Dec 2023 14:03:51 +0200 Subject: [PATCH 037/321] Don't install nextest --- .github/workflows/check.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/check.yml b/.github/workflows/check.yml index 5120bf1bc7..8c9a08d232 100644 --- a/.github/workflows/check.yml +++ b/.github/workflows/check.yml @@ -68,7 +68,7 @@ jobs: uses: ilammy/msvc-dev-cmd@v1 - name: Install Rust tools - run: cargo +${{ matrix.rust-toolchain }} binstall --no-confirm cargo-llvm-cov cargo-nextest + run: cargo +${{ matrix.rust-toolchain }} binstall --no-confirm cargo-llvm-cov - name: Checkout uses: actions/checkout@v4 From cf7ac312c60e776cc877c2d1dea1eb523c4bd088 Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Tue, 19 Dec 2023 19:59:18 +0200 Subject: [PATCH 038/321] Bump MSRV to 1.70.0 Per https://searchfox.org/mozilla-central/source/docs/writing-rust-code/update-policy.md#150 --- .github/workflows/check.yml | 2 +- clippy.toml | 2 +- neqo-client/Cargo.toml | 2 +- neqo-common/Cargo.toml | 2 +- neqo-crypto/Cargo.toml | 2 +- neqo-http3/Cargo.toml | 2 +- neqo-interop/Cargo.toml | 2 +- neqo-qpack/Cargo.toml | 2 +- neqo-server/Cargo.toml | 2 +- neqo-transport/Cargo.toml | 2 +- test-fixture/Cargo.toml | 2 +- 11 files changed, 11 insertions(+), 11 deletions(-) diff --git a/.github/workflows/check.yml b/.github/workflows/check.yml index 76de5286fb..5afd73b197 100644 --- a/.github/workflows/check.yml +++ b/.github/workflows/check.yml @@ -17,7 +17,7 @@ jobs: matrix: os: [ubuntu-latest, macos-latest, windows-latest] # TODO: add beta (and nightly?) but don't fail the test if they fail - rust-toolchain: [1.65.0, stable] + rust-toolchain: [1.70.0, stable] runs-on: ${{ matrix.os }} defaults: run: diff --git a/clippy.toml b/clippy.toml index e034672c76..1645c19f32 100644 --- a/clippy.toml +++ b/clippy.toml @@ -1 +1 @@ -msrv = "1.65.0" +msrv = "1.70.0" diff --git a/neqo-client/Cargo.toml b/neqo-client/Cargo.toml index cf9b86484a..06b131a6a2 100644 --- a/neqo-client/Cargo.toml +++ b/neqo-client/Cargo.toml @@ -5,7 +5,7 @@ authors = ["Martin Thomson ", "Dragana Damjanovic ", "Andy Grover "] edition = "2018" -rust-version = "1.65.0" +rust-version = "1.70.0" license = "MIT OR Apache-2.0" [dependencies] diff --git a/neqo-common/Cargo.toml b/neqo-common/Cargo.toml index 35dff0a529..b3486e7220 100644 --- a/neqo-common/Cargo.toml +++ b/neqo-common/Cargo.toml @@ -3,7 +3,7 @@ name = "neqo-common" version = "0.6.8" authors = ["Bobby Holley "] edition = "2018" -rust-version = "1.65.0" +rust-version = "1.70.0" license = "MIT OR Apache-2.0" build = "build.rs" diff --git a/neqo-crypto/Cargo.toml b/neqo-crypto/Cargo.toml index 3d54ea0dea..79d6dc21d9 100644 --- a/neqo-crypto/Cargo.toml +++ b/neqo-crypto/Cargo.toml @@ -3,7 +3,7 @@ name = "neqo-crypto" version = "0.6.8" authors = ["Martin Thomson "] edition = "2018" -rust-version = "1.65.0" +rust-version = "1.70.0" build = "build.rs" license = "MIT OR Apache-2.0" diff --git a/neqo-http3/Cargo.toml b/neqo-http3/Cargo.toml index 70f323fe76..e83ced739e 100644 --- a/neqo-http3/Cargo.toml +++ b/neqo-http3/Cargo.toml @@ -3,7 +3,7 @@ name = "neqo-http3" version = "0.6.8" authors = ["Dragana Damjanovic "] edition = "2018" -rust-version = "1.65.0" +rust-version = "1.70.0" license = "MIT OR Apache-2.0" [dependencies] diff --git a/neqo-interop/Cargo.toml b/neqo-interop/Cargo.toml index 23672050db..7660b0f1d0 100644 --- a/neqo-interop/Cargo.toml +++ b/neqo-interop/Cargo.toml @@ -3,7 +3,7 @@ name = "neqo-interop" version = "0.6.8" authors = ["EKR "] edition = "2018" -rust-version = "1.65.0" +rust-version = "1.70.0" license = "MIT OR Apache-2.0" [dependencies] diff --git a/neqo-qpack/Cargo.toml b/neqo-qpack/Cargo.toml index 5b8d15a7c7..d9af0abaf3 100644 --- a/neqo-qpack/Cargo.toml +++ b/neqo-qpack/Cargo.toml @@ -3,7 +3,7 @@ name = "neqo-qpack" version = "0.6.8" authors = ["Dragana Damjanovic "] edition = "2018" -rust-version = "1.65.0" +rust-version = "1.70.0" license = "MIT OR Apache-2.0" [dependencies] diff --git a/neqo-server/Cargo.toml b/neqo-server/Cargo.toml index d3b0dd9615..1517f7d53d 100644 --- a/neqo-server/Cargo.toml +++ b/neqo-server/Cargo.toml @@ -3,7 +3,7 @@ name = "neqo-server" version = "0.6.8" authors = ["Dragana Damjanovic "] edition = "2018" -rust-version = "1.65.0" +rust-version = "1.70.0" license = "MIT OR Apache-2.0" [dependencies] diff --git a/neqo-transport/Cargo.toml b/neqo-transport/Cargo.toml index 6cd19a1955..6425fdbdcb 100644 --- a/neqo-transport/Cargo.toml +++ b/neqo-transport/Cargo.toml @@ -3,7 +3,7 @@ name = "neqo-transport" version = "0.6.8" authors = ["EKR ", "Andy Grover "] edition = "2018" -rust-version = "1.65.0" +rust-version = "1.70.0" license = "MIT OR Apache-2.0" [dependencies] diff --git a/test-fixture/Cargo.toml b/test-fixture/Cargo.toml index 9ceed28474..99bdd41cb5 100644 --- a/test-fixture/Cargo.toml +++ b/test-fixture/Cargo.toml @@ -3,7 +3,7 @@ name = "test-fixture" version = "0.6.8" authors = ["Martin Thomson "] edition = "2018" -rust-version = "1.65.0" +rust-version = "1.70.0" license = "MIT OR Apache-2.0" [dependencies] From 72f9cf8a566c1adb212c34375291a390aba141cd Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Wed, 20 Dec 2023 08:24:38 +0200 Subject: [PATCH 039/321] More CI fixes --- .github/workflows/check.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/check.yml b/.github/workflows/check.yml index 3bde763a65..fdeabfaa5e 100644 --- a/.github/workflows/check.yml +++ b/.github/workflows/check.yml @@ -28,7 +28,7 @@ jobs: uses: dtolnay/rust-toolchain@master with: toolchain: ${{ matrix.rust-toolchain }} - components: rustfmt, clippy + components: rustfmt, clippy, llvm-tools-preview - name: Install dependencies (Linux) if: runner.os == 'Linux' From d1639a4ad15c933714485ace71f6008b08115b33 Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Wed, 20 Dec 2023 11:14:12 +0200 Subject: [PATCH 040/321] Fix hrtimer CI (#1523) * Fix hrtimer CI Or try to. * Add min and max * See if using q10 is more robust * Don't run `hrtimer` tests in CI on Windows and MacOS * Fix formatting * Add clippy allows for useless_conversion, to be removed when we bump the MSRV to 1.74.0. * Fix formatting * One more clippy allow * Debug Mac test failure * MacOS failure seems spurious? Sigh. --- .github/workflows/check.yml | 3 +-- neqo-common/Cargo.toml | 1 + neqo-common/src/hrtime.rs | 7 ++++++- neqo-crypto/src/ech.rs | 2 ++ neqo-crypto/src/hkdf.rs | 1 + neqo-crypto/src/hp.rs | 5 +++++ neqo-crypto/src/p11.rs | 1 + 7 files changed, 17 insertions(+), 3 deletions(-) diff --git a/.github/workflows/check.yml b/.github/workflows/check.yml index fdeabfaa5e..94b7ff2447 100644 --- a/.github/workflows/check.yml +++ b/.github/workflows/check.yml @@ -83,7 +83,6 @@ jobs: git clone --depth=1 https://github.com/nss-dev/nss "$NSS_DIR" echo "NSS_DIR=$NSS_DIR" >> "$GITHUB_ENV" echo "NSPR_DIR=$NSPR_DIR" >> "$GITHUB_ENV" - echo "NSS_JOBS=$NUMBER_OF_PROCESSORS" >> "$GITHUB_ENV" env: NSS_DIR: ${{ github.workspace }}/nss NSPR_DIR: ${{ github.workspace }}/nspr @@ -107,7 +106,7 @@ jobs: RUST_BACKTRACE: 1 - name: Run tests and determine coverage - run: cargo +${{ matrix.rust-toolchain }} llvm-cov test --all-targets --no-fail-fast --lcov --output-path lcov.info + run: cargo +${{ matrix.rust-toolchain }} llvm-cov test --features ci --all-targets --no-fail-fast --lcov --output-path lcov.info env: RUST_BACKTRACE: 1 RUST_LOG: neqo=debug diff --git a/neqo-common/Cargo.toml b/neqo-common/Cargo.toml index b3486e7220..68583e21dc 100644 --- a/neqo-common/Cargo.toml +++ b/neqo-common/Cargo.toml @@ -16,6 +16,7 @@ time = {version = "=0.3.23", features = ["formatting"]} [features] deny-warnings = [] +ci = [] [target."cfg(windows)".dependencies.winapi] version = "0.3" diff --git a/neqo-common/src/hrtime.rs b/neqo-common/src/hrtime.rs index d1f42d41c5..682c097035 100644 --- a/neqo-common/src/hrtime.rs +++ b/neqo-common/src/hrtime.rs @@ -372,7 +372,12 @@ impl Drop for Time { } } -#[cfg(test)] +// Only run these tests in CI on platforms other than MacOS and Windows, where the timer +// inaccuracies are too high to pass the tests. +#[cfg(all( + test, + not(all(any(target_os = "macos", target_os = "windows"), feature = "ci")) +))] mod test { use super::Time; use std::{ diff --git a/neqo-crypto/src/ech.rs b/neqo-crypto/src/ech.rs index 5425e1a64c..c4b33b0bee 100644 --- a/neqo-crypto/src/ech.rs +++ b/neqo-crypto/src/ech.rs @@ -109,6 +109,7 @@ pub fn generate_keys() -> Res<(PrivateKey, PublicKey)> { // If we have tracing on, try to ensure that key data can be read. let insensitive_secret_ptr = if log::log_enabled!(log::Level::Trace) { + #[allow(clippy::useless_conversion)] // TODO: Remove when we bump the MSRV to 1.74.0. unsafe { p11::PK11_GenerateKeyPairWithOpFlags( *slot, @@ -126,6 +127,7 @@ pub fn generate_keys() -> Res<(PrivateKey, PublicKey)> { }; assert_eq!(insensitive_secret_ptr.is_null(), public_ptr.is_null()); let secret_ptr = if insensitive_secret_ptr.is_null() { + #[allow(clippy::useless_conversion)] // TODO: Remove when we bump the MSRV to 1.74.0. unsafe { p11::PK11_GenerateKeyPairWithOpFlags( *slot, diff --git a/neqo-crypto/src/hkdf.rs b/neqo-crypto/src/hkdf.rs index 3745d646d5..44df30ecfd 100644 --- a/neqo-crypto/src/hkdf.rs +++ b/neqo-crypto/src/hkdf.rs @@ -68,6 +68,7 @@ pub fn import_key(version: Version, buf: &[u8]) -> Res { return Err(Error::UnsupportedVersion); } let slot = Slot::internal()?; + #[allow(clippy::useless_conversion)] // TODO: Remove when we bump the MSRV to 1.74.0. let key_ptr = unsafe { PK11_ImportDataKey( *slot, diff --git a/neqo-crypto/src/hp.rs b/neqo-crypto/src/hp.rs index fea67e9953..2409521903 100644 --- a/neqo-crypto/src/hp.rs +++ b/neqo-crypto/src/hp.rs @@ -72,6 +72,7 @@ impl HpKey { let l = label.as_bytes(); let mut secret: *mut PK11SymKey = null_mut(); + #[allow(clippy::useless_conversion)] // TODO: Remove when we bump the MSRV to 1.74.0. let (mech, key_size) = match cipher { TLS_AES_128_GCM_SHA256 => (CK_MECHANISM_TYPE::from(CKM_AES_ECB), 16), TLS_AES_256_GCM_SHA384 => (CK_MECHANISM_TYPE::from(CKM_AES_ECB), 32), @@ -99,6 +100,8 @@ impl HpKey { let res = match cipher { TLS_AES_128_GCM_SHA256 | TLS_AES_256_GCM_SHA384 => { + // TODO: Remove when we bump the MSRV to 1.74.0. + #[allow(clippy::useless_conversion)] let context_ptr = unsafe { PK11_CreateContextBySymKey( mech, @@ -171,6 +174,8 @@ impl HpKey { }; let mut output_len: c_uint = 0; let mut param_item = Item::wrap_struct(¶ms); + // TODO: Remove when we bump the MSRV to 1.74.0. + #[allow(clippy::useless_conversion)] secstatus_to_res(unsafe { PK11_Encrypt( **key, diff --git a/neqo-crypto/src/p11.rs b/neqo-crypto/src/p11.rs index c7e47cbf15..ebd641c17e 100644 --- a/neqo-crypto/src/p11.rs +++ b/neqo-crypto/src/p11.rs @@ -130,6 +130,7 @@ impl PrivateKey { /// When the values are too large to fit. So never. pub fn key_data(&self) -> Res> { let mut key_item = Item::make_empty(); + #[allow(clippy::useless_conversion)] // TODO: Remove when we bump the MSRV to 1.74.0. secstatus_to_res(unsafe { PK11_ReadRawAttribute( PK11ObjectType::PK11_TypePrivKey, From 5d1bca0728f7cc965439808434840246a5dc1654 Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Tue, 2 Jan 2024 11:54:16 +0200 Subject: [PATCH 041/321] Add beta toolchain to CI (#1520) * Fix beta clippy issues * Undo * Try to ignore beta clippy errors * Syntax * Syntax * Trye * test * again * test * verbose * shorten * finalize --- .github/workflows/check.yml | 26 +++++++++++++++++++------- 1 file changed, 19 insertions(+), 7 deletions(-) diff --git a/.github/workflows/check.yml b/.github/workflows/check.yml index 94b7ff2447..8313f77d0c 100644 --- a/.github/workflows/check.yml +++ b/.github/workflows/check.yml @@ -1,11 +1,11 @@ name: CI on: push: - branches: [ "main" ] - paths-ignore: [ "*.md", "*.png", "*.svg", "LICENSE-*" ] + branches: ["main"] + paths-ignore: ["*.md", "*.png", "*.svg", "LICENSE-*"] pull_request: - branches: [ "main" ] - paths-ignore: [ "*.md", "*.png", "*.svg", "LICENSE-*" ] + branches: ["main"] + paths-ignore: ["*.md", "*.png", "*.svg", "LICENSE-*"] env: CARGO_TERM_COLOR: always @@ -15,10 +15,21 @@ jobs: strategy: fail-fast: false matrix: - os: [ubuntu-latest, macos-latest, windows-latest] - # TODO: add beta (and nightly?) but don't fail the test if they fail + os: [ubuntu, macos, windows] rust-toolchain: [1.70.0, stable] - runs-on: ${{ matrix.os }} + ignore_clippy_errors: [false] + # Add beta toolchain, but don't fail CI on clippy errors. + include: + - os: ubuntu + rust-toolchain: beta + ignore_clippy_errors: true + - os: macos + rust-toolchain: beta + ignore_clippy_errors: true + - os: windows + rust-toolchain: beta + ignore_clippy_errors: true + runs-on: ${{ matrix.os }}-latest defaults: run: shell: bash @@ -118,6 +129,7 @@ jobs: - name: Clippy run: cargo +${{ matrix.rust-toolchain }} clippy -v --tests -- -D warnings if: success() || failure() + continue-on-error: ${{ matrix.ignore_clippy_errors }} - name: Upload coverage reports to Codecov uses: codecov/codecov-action@v3 From c26af08db047669f79f7546e422a58a1c95a06bd Mon Sep 17 00:00:00 2001 From: Max Inden Date: Tue, 2 Jan 2024 10:55:27 +0100 Subject: [PATCH 042/321] fix(clippy): latest lints (#1534) --- neqo-http3/src/push_controller.rs | 4 +--- neqo-qpack/src/decoder.rs | 2 +- neqo-qpack/src/prefix.rs | 2 ++ neqo-transport/src/cc/classic_cc.rs | 1 + neqo-transport/src/cc/cubic.rs | 4 ++-- neqo-transport/src/cc/mod.rs | 4 +++- neqo-transport/src/cc/tests/cubic.rs | 4 +--- neqo-transport/src/connection/mod.rs | 4 +++- neqo-transport/src/connection/tests/stream.rs | 2 +- neqo-transport/src/path.rs | 2 ++ 10 files changed, 17 insertions(+), 12 deletions(-) diff --git a/neqo-http3/src/push_controller.rs b/neqo-http3/src/push_controller.rs index 79ebab4efc..62171039e3 100644 --- a/neqo-http3/src/push_controller.rs +++ b/neqo-http3/src/push_controller.rs @@ -93,9 +93,7 @@ impl ActivePushStreams { None | Some(PushState::Closed) => None, Some(s) => { let res = mem::replace(s, PushState::Closed); - while self.push_streams.get(0).is_some() - && *self.push_streams.get(0).unwrap() == PushState::Closed - { + while let Some(PushState::Closed) = self.push_streams.front() { self.push_streams.pop_front(); self.first_push_id += 1; } diff --git a/neqo-qpack/src/decoder.rs b/neqo-qpack/src/decoder.rs index 5971545938..b4f9a7284a 100644 --- a/neqo-qpack/src/decoder.rs +++ b/neqo-qpack/src/decoder.rs @@ -50,7 +50,7 @@ impl QPackDecoder { send_buf, local_stream_id: None, max_table_size: qpack_settings.max_table_size_decoder, - max_blocked_streams: usize::try_from(qpack_settings.max_blocked_streams).unwrap(), + max_blocked_streams: usize::from(qpack_settings.max_blocked_streams), blocked_streams: Vec::new(), stats: Stats::default(), } diff --git a/neqo-qpack/src/prefix.rs b/neqo-qpack/src/prefix.rs index ee0826850d..5019dd7d6d 100644 --- a/neqo-qpack/src/prefix.rs +++ b/neqo-qpack/src/prefix.rs @@ -6,6 +6,8 @@ #[derive(Copy, Clone, Debug)] pub struct Prefix { + #[allow(unknown_lints)] // available with Rust v1.75 + #[allow(clippy::struct_field_names)] prefix: u8, len: u8, mask: u8, diff --git a/neqo-transport/src/cc/classic_cc.rs b/neqo-transport/src/cc/classic_cc.rs index 8465d6b0af..000d9bf4d5 100644 --- a/neqo-transport/src/cc/classic_cc.rs +++ b/neqo-transport/src/cc/classic_cc.rs @@ -22,6 +22,7 @@ use crate::{ sender::PACING_BURST_SIZE, tracking::SentPacket, }; +#[rustfmt::skip] // to keep `::` and thus prevent conflict with `crate::qlog` use ::qlog::events::{quic::CongestionStateUpdated, EventData}; use neqo_common::{const_max, const_min, qdebug, qinfo, qlog::NeqoQlog, qtrace}; diff --git a/neqo-transport/src/cc/cubic.rs b/neqo-transport/src/cc/cubic.rs index a7d7b845fd..1a2928cdf7 100644 --- a/neqo-transport/src/cc/cubic.rs +++ b/neqo-transport/src/cc/cubic.rs @@ -39,9 +39,9 @@ const EXPONENTIAL_GROWTH_REDUCTION: f64 = 2.0; /// This has the effect of reducing larger values to `1<<53`. /// If you have a congestion window that large, something is probably wrong. fn convert_to_f64(v: usize) -> f64 { - let mut f_64 = f64::try_from(u32::try_from(v >> 21).unwrap_or(u32::MAX)).unwrap(); + let mut f_64 = f64::from(u32::try_from(v >> 21).unwrap_or(u32::MAX)); f_64 *= 2_097_152.0; // f_64 <<= 21 - f_64 += f64::try_from(u32::try_from(v & 0x1f_ffff).unwrap()).unwrap(); + f_64 += f64::from(u32::try_from(v & 0x1f_ffff).unwrap()); f_64 } diff --git a/neqo-transport/src/cc/mod.rs b/neqo-transport/src/cc/mod.rs index 5cd5676747..675168367a 100644 --- a/neqo-transport/src/cc/mod.rs +++ b/neqo-transport/src/cc/mod.rs @@ -20,7 +20,9 @@ mod classic_cc; mod cubic; mod new_reno; -pub use classic_cc::{ClassicCongestionControl, CWND_INITIAL, CWND_INITIAL_PKTS, CWND_MIN}; +pub use classic_cc::ClassicCongestionControl; +#[cfg(test)] +pub use classic_cc::{CWND_INITIAL, CWND_INITIAL_PKTS, CWND_MIN}; pub use cubic::Cubic; pub use new_reno::NewReno; diff --git a/neqo-transport/src/cc/tests/cubic.rs b/neqo-transport/src/cc/tests/cubic.rs index d93643583d..1229e6307f 100644 --- a/neqo-transport/src/cc/tests/cubic.rs +++ b/neqo-transport/src/cc/tests/cubic.rs @@ -76,9 +76,7 @@ fn packet_lost(cc: &mut ClassicCongestionControl, pn: u64) { } fn expected_tcp_acks(cwnd_rtt_start: usize) -> u64 { - (f64::try_from(i32::try_from(cwnd_rtt_start).unwrap()).unwrap() - / MAX_DATAGRAM_SIZE_F64 - / CUBIC_ALPHA) + (f64::from(i32::try_from(cwnd_rtt_start).unwrap()) / MAX_DATAGRAM_SIZE_F64 / CUBIC_ALPHA) .round() as u64 } diff --git a/neqo-transport/src/connection/mod.rs b/neqo-transport/src/connection/mod.rs index abb7e590ad..3db127c4e7 100644 --- a/neqo-transport/src/connection/mod.rs +++ b/neqo-transport/src/connection/mod.rs @@ -67,7 +67,9 @@ mod state; pub mod test_internal; pub use crate::send_stream::{RetransmissionPriority, SendStreamStats, TransmissionPriority}; -pub use params::{ConnectionParameters, ACK_RATIO_SCALE}; +pub use params::ConnectionParameters; +#[cfg(test)] +pub use params::ACK_RATIO_SCALE; pub use state::{ClosingFrame, State}; use idle::IdleTimeout; diff --git a/neqo-transport/src/connection/tests/stream.rs b/neqo-transport/src/connection/tests/stream.rs index 980077e5aa..5cc5e9594d 100644 --- a/neqo-transport/src/connection/tests/stream.rs +++ b/neqo-transport/src/connection/tests/stream.rs @@ -156,7 +156,7 @@ fn sendorder_test(order_of_sendorder: &[Option]) { assert_eq!(*client.state(), State::Confirmed); qdebug!("---- server receives"); - for (_, d) in datagrams.into_iter().enumerate() { + for d in datagrams { let out = server.process(Some(d), now()); qdebug!("Output={:0x?}", out.as_dgram_ref()); } diff --git a/neqo-transport/src/path.rs b/neqo-transport/src/path.rs index 3a25a1bea9..9be3d4c966 100644 --- a/neqo-transport/src/path.rs +++ b/neqo-transport/src/path.rs @@ -56,6 +56,8 @@ pub type PathRef = Rc>; #[derive(Debug, Default)] pub struct Paths { /// All of the paths. All of these paths will be permanent. + #[allow(unknown_lints)] // available with Rust v1.75 + #[allow(clippy::struct_field_names)] paths: Vec, /// This is the primary path. This will only be `None` initially, so /// care needs to be taken regarding that only during the handshake. From eb75aef9e8fb7b0626b6abee9464f24a6439ceb5 Mon Sep 17 00:00:00 2001 From: Max Inden Date: Tue, 2 Jan 2024 23:16:54 +0100 Subject: [PATCH 043/321] fix(transport/qlog): map State::Closing onto ConnectionState::Closing (#1533) Map `State::Closing` to `ConnectionState::Closing` instead of `ConnectionState::Draining`. --- neqo-transport/src/qlog.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/neqo-transport/src/qlog.rs b/neqo-transport/src/qlog.rs index a3ce45ea57..35944d5694 100644 --- a/neqo-transport/src/qlog.rs +++ b/neqo-transport/src/qlog.rs @@ -117,7 +117,7 @@ pub fn connection_state_updated(qlog: &mut NeqoQlog, new: &State) { State::WaitVersion | State::Handshaking => ConnectionState::HandshakeStarted, State::Connected => ConnectionState::HandshakeCompleted, State::Confirmed => ConnectionState::HandshakeConfirmed, - State::Closing { .. } => ConnectionState::Draining, + State::Closing { .. } => ConnectionState::Closing, State::Draining { .. } => ConnectionState::Draining, State::Closed { .. } => ConnectionState::Closed, }, From 096c46b73a68683bf8db83fb4791f8a36790d7d9 Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Wed, 3 Jan 2024 13:27:45 +0200 Subject: [PATCH 044/321] Debug MacOS intermittent CI failure (#1528) * Debug MacOS intermittent CI failure * DYLD_PRINT_LIBRARIES=true * More testing * Again * Potential fix * Fix conditional * Different try * Try harder * Try * DYLD_FALLBACK_LIBRARY_PATH * More * env * cp * ls * cp * sudo * local * No sudo * sudo needed * Tweaks * Debug DYLD * Fix * Try canary * Again * Again * Not yet * DYLD_FALLBACK_LIBRARY_PATH * Add comment * Properly skip * grep * Reword comment * true * Use filter expression * Expr * Fix filter * Update .github/workflows/check.yml Co-authored-by: Martin Thomson * csrutil status * System Integrity Protection status: enabled. * 113 * Simplify * Fix * x * Finalize * Good to go? --------- Co-authored-by: Martin Thomson --- .github/workflows/check.yml | 42 ++++++++++++++----------------------- 1 file changed, 16 insertions(+), 26 deletions(-) diff --git a/.github/workflows/check.yml b/.github/workflows/check.yml index 8313f77d0c..bc4a5dc9c8 100644 --- a/.github/workflows/check.yml +++ b/.github/workflows/check.yml @@ -8,6 +8,7 @@ on: paths-ignore: ["*.md", "*.png", "*.svg", "LICENSE-*"] env: CARGO_TERM_COLOR: always + RUST_BACKTRACE: 1 jobs: check: @@ -15,21 +16,9 @@ jobs: strategy: fail-fast: false matrix: - os: [ubuntu, macos, windows] - rust-toolchain: [1.70.0, stable] - ignore_clippy_errors: [false] - # Add beta toolchain, but don't fail CI on clippy errors. - include: - - os: ubuntu - rust-toolchain: beta - ignore_clippy_errors: true - - os: macos - rust-toolchain: beta - ignore_clippy_errors: true - - os: windows - rust-toolchain: beta - ignore_clippy_errors: true - runs-on: ${{ matrix.os }}-latest + os: [ubuntu-latest, macos-13, windows-latest] + rust-toolchain: [1.70.0, stable, beta] + runs-on: ${{ matrix.os }} defaults: run: shell: bash @@ -49,9 +38,15 @@ jobs: sudo apt-get install -y --no-install-recommends gyp mercurial ninja-build curl -L --proto '=https' --tlsv1.2 -sSf https://raw.githubusercontent.com/cargo-bins/cargo-binstall/main/install-from-binstall-release.sh | bash + # In addition to installing dependencies, first make sure System Integrity Protection (SIP) + # is disabled on this MacOS runner. This is needed to allow the NSS libraries to be loaded + # from the build directory and avoid various other test failures. This seems to always be + # the case on any macos-13 runner, but not consistently on macos-latest (which is currently + # macos-12, FWIW). - name: Install dependencies (MacOS) if: runner.os == 'MacOS' run: | + csrutil status | grep disabled brew install ninja mercurial cargo-binstall # python3 -m pip install gyp-next # Above does not work, since pypi only has gyp 0.15.0, which is too old @@ -74,12 +69,12 @@ jobs: python3 -m pip install git+https://github.com/nodejs/gyp-next echo "$(python3 -m site --user-base)/bin" >> "$GITHUB_PATH" - - name: Set up Windows build environment + - name: Set up MSVC build environment (Windows) if: runner.os == 'Windows' uses: ilammy/msvc-dev-cmd@v1 - name: Install Rust tools - run: cargo +${{ matrix.rust-toolchain }} binstall --no-confirm cargo-llvm-cov + run: cargo +${{ matrix.rust-toolchain }} binstall --no-confirm cargo-llvm-cov cargo-nextest - name: Checkout uses: actions/checkout@v4 @@ -98,7 +93,7 @@ jobs: NSS_DIR: ${{ github.workspace }}/nss NSPR_DIR: ${{ github.workspace }}/nspr - - name: Set up environment for NSS build.sh (Windows) + - name: Set up NSS/NSPR build environment (Windows) if: runner.os == 'Windows' run: | echo "GYP_MSVS_OVERRIDE_PATH=$VSINSTALLDIR" >> "$GITHUB_ENV" @@ -111,16 +106,11 @@ jobs: run: | cargo +${{ matrix.rust-toolchain }} build -v --all-targets echo "LD_LIBRARY_PATH=${{ github.workspace }}/dist/Debug/lib" >> "$GITHUB_ENV" - echo "DYLD_LIBRARY_PATH=${{ github.workspace }}/dist/Debug/lib" >> "$GITHUB_ENV" + echo "DYLD_FALLBACK_LIBRARY_PATH=${{ github.workspace }}/dist/Debug/lib" >> "$GITHUB_ENV" echo "${{ github.workspace }}/dist/Debug/lib" >> "$GITHUB_PATH" - env: - RUST_BACKTRACE: 1 - name: Run tests and determine coverage - run: cargo +${{ matrix.rust-toolchain }} llvm-cov test --features ci --all-targets --no-fail-fast --lcov --output-path lcov.info - env: - RUST_BACKTRACE: 1 - RUST_LOG: neqo=debug + run: cargo +${{ matrix.rust-toolchain }} llvm-cov nextest --features ci --all-targets --no-fail-fast --lcov --output-path lcov.info - name: Check formatting run: cargo +${{ matrix.rust-toolchain }} fmt --all -- --check @@ -129,7 +119,7 @@ jobs: - name: Clippy run: cargo +${{ matrix.rust-toolchain }} clippy -v --tests -- -D warnings if: success() || failure() - continue-on-error: ${{ matrix.ignore_clippy_errors }} + continue-on-error: ${{ matrix.rust-toolchain == 'beta' }} - name: Upload coverage reports to Codecov uses: codecov/codecov-action@v3 From 0c421d2247243a07f5da3dc9ad71779a28b6f17e Mon Sep 17 00:00:00 2001 From: Kershaw Date: Fri, 5 Jan 2024 09:15:27 +0100 Subject: [PATCH 045/321] Enhance socket read to handle multiple packets (#1530) * Enhance socket read to handle multiple packets * Enhance socket read to handle multiple packets --- neqo-client/Cargo.toml | 1 + neqo-client/src/main.rs | 180 +++++++++++++++++---------- neqo-http3/src/connection_client.rs | 9 ++ neqo-transport/src/connection/mod.rs | 13 ++ 4 files changed, 137 insertions(+), 66 deletions(-) diff --git a/neqo-client/Cargo.toml b/neqo-client/Cargo.toml index 06b131a6a2..4d4f1b67db 100644 --- a/neqo-client/Cargo.toml +++ b/neqo-client/Cargo.toml @@ -17,6 +17,7 @@ neqo-qpack = { path = "./../neqo-qpack" } structopt = "0.3.7" url = "2.0" qlog = "0.10.0" +mio = "0.6.17" [features] deny-warnings = [] diff --git a/neqo-client/src/main.rs b/neqo-client/src/main.rs index a72d78890a..6bcbfae9be 100644 --- a/neqo-client/src/main.rs +++ b/neqo-client/src/main.rs @@ -9,6 +9,8 @@ use qlog::{events::EventImportance, streamer::QlogStreamer}; +use mio::{net::UdpSocket, Events, Poll, PollOpt, Ready, Token}; + use neqo_common::{self as common, event::Provider, hex, qlog::NeqoQlog, Datagram, Role}; use neqo_crypto::{ constants::{TLS_AES_128_GCM_SHA256, TLS_AES_256_GCM_SHA384, TLS_CHACHA20_POLY1305_SHA256}, @@ -30,7 +32,7 @@ use std::{ fmt::{self, Display}, fs::{create_dir_all, File, OpenOptions}, io::{self, ErrorKind, Write}, - net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr, ToSocketAddrs, UdpSocket}, + net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr, ToSocketAddrs}, path::PathBuf, process::exit, rc::Rc, @@ -338,8 +340,8 @@ impl QuicParameters { } } -fn emit_datagram(socket: &UdpSocket, d: Datagram) -> io::Result<()> { - let sent = socket.send_to(&d[..], d.destination())?; +fn emit_datagram(socket: &mio::net::UdpSocket, d: Datagram) -> io::Result<()> { + let sent = socket.send_to(&d[..], &d.destination())?; if sent != d.len() { eprintln!("Unable to send all {} bytes of datagram", d.len()); } @@ -391,36 +393,73 @@ fn get_output_file( fn process_loop( local_addr: &SocketAddr, socket: &UdpSocket, + poll: &Poll, client: &mut Http3Client, handler: &mut Handler, ) -> Res { let buf = &mut [0u8; 2048]; + let mut events = Events::with_capacity(1024); + let mut timeout: Option = None; loop { + poll.poll( + &mut events, + timeout.or_else(|| Some(Duration::from_millis(0))), + )?; + + let mut datagrams: Vec = Vec::new(); + 'read: loop { + match socket.recv_from(&mut buf[..]) { + Err(ref err) + if err.kind() == ErrorKind::WouldBlock + || err.kind() == ErrorKind::Interrupted => + { + break 'read + } + Err(ref err) => { + eprintln!("UDP error: {}", err); + exit(1); + } + Ok((sz, remote)) => { + if sz == buf.len() { + eprintln!("Received more than {} bytes", buf.len()); + break 'read; + } + if sz > 0 { + let d = Datagram::new(remote, *local_addr, &buf[..sz]); + datagrams.push(d); + } + } + }; + } + if !datagrams.is_empty() { + client.process_multiple_input(datagrams, Instant::now()); + handler.maybe_key_update(client)?; + } + if let Http3State::Closed(..) = client.state() { return Ok(client.state()); } let mut exiting = !handler.handle(client)?; - loop { + 'write: loop { match client.process_output(Instant::now()) { Output::Datagram(dgram) => { if let Err(e) = emit_datagram(socket, dgram) { eprintln!("UDP write error: {}", e); client.close(Instant::now(), 0, e.to_string()); exiting = true; - break; + break 'write; } } - Output::Callback(duration) => { - socket.set_read_timeout(Some(duration)).unwrap(); - break; + Output::Callback(new_timeout) => { + timeout = Some(new_timeout); + break 'write; } Output::None => { // Not strictly necessary, since we're about to exit - socket.set_read_timeout(None).unwrap(); exiting = true; - break; + break 'write; } } } @@ -428,26 +467,6 @@ fn process_loop( if exiting { return Ok(client.state()); } - - match socket.recv_from(&mut buf[..]) { - Err(ref err) - if err.kind() == ErrorKind::WouldBlock || err.kind() == ErrorKind::Interrupted => {} - Err(err) => { - eprintln!("UDP error: {}", err); - exit(1) - } - Ok((sz, remote)) => { - if sz == buf.len() { - eprintln!("Received more than {} bytes", buf.len()); - continue; - } - if sz > 0 { - let d = Datagram::new(remote, *local_addr, &buf[..sz]); - client.process_input(d, Instant::now()); - handler.maybe_key_update(client)?; - } - } - }; } } @@ -802,6 +821,7 @@ fn handle_test( testcase: &String, args: &mut Args, socket: &UdpSocket, + poll: &Poll, local_addr: SocketAddr, remote_addr: SocketAddr, hostname: &str, @@ -823,7 +843,7 @@ fn handle_test( args, }; let mut h = Handler::new(url_handler, key_update, args.output_read_data); - process_loop(&local_addr, socket, &mut client, &mut h)?; + process_loop(&local_addr, socket, poll, &mut client, &mut h)?; } _ => { eprintln!("Unsupported test case: {}", testcase); @@ -877,9 +897,11 @@ fn create_http3_client( Ok(client) } +#[allow(clippy::too_many_arguments)] fn client( args: &mut Args, socket: &UdpSocket, + poll: &Poll, local_addr: SocketAddr, remote_addr: SocketAddr, hostname: &str, @@ -892,6 +914,7 @@ fn client( &testcase, args, socket, + poll, local_addr, remote_addr, hostname, @@ -912,7 +935,7 @@ fn client( }; let mut h = Handler::new(url_handler, key_update, args.output_read_data); - process_loop(&local_addr, socket, &mut client, &mut h)?; + process_loop(&local_addr, socket, poll, &mut client, &mut h)?; let token = if args.resume { // If we haven't received an event, take a token if there is one. @@ -1026,7 +1049,7 @@ fn main() -> Res<()> { SocketAddr::V6(..) => SocketAddr::new(IpAddr::V6(Ipv6Addr::from([0; 16])), 0), }; - let socket = match UdpSocket::bind(local_addr) { + let socket = match UdpSocket::bind(&local_addr) { Err(e) => { eprintln!("Unable to bind UDP socket: {}", e); exit(1) @@ -1034,6 +1057,14 @@ fn main() -> Res<()> { Ok(s) => s, }; + let poll = Poll::new()?; + poll.register( + &socket, + Token(0), + Ready::readable() | Ready::writable(), + PollOpt::edge(), + )?; + let real_local = socket.local_addr().unwrap(); println!( "{} Client connecting: {:?} -> {:?}", @@ -1071,6 +1102,7 @@ fn main() -> Res<()> { old::old_client( &args, &socket, + &poll, real_local, remote_addr, &hostname, @@ -1081,6 +1113,7 @@ fn main() -> Res<()> { client( &mut args, &socket, + &poll, real_local, remote_addr, &hostname, @@ -1100,17 +1133,17 @@ mod old { collections::{HashMap, VecDeque}, fs::File, io::{ErrorKind, Write}, - net::{SocketAddr, UdpSocket}, + net::SocketAddr, path::PathBuf, process::exit, rc::Rc, - time::Instant, + time::{Duration, Instant}, }; use url::Url; use super::{qlog_new, KeyUpdateState, Res}; - + use mio::{Events, Poll}; use neqo_common::{event::Provider, Datagram}; use neqo_crypto::{AuthenticationStatus, ResumptionToken}; use neqo_transport::{ @@ -1304,37 +1337,70 @@ mod old { fn process_loop_old( local_addr: &SocketAddr, - socket: &UdpSocket, + socket: &mio::net::UdpSocket, + poll: &Poll, client: &mut Connection, handler: &mut HandlerOld, ) -> Res { let buf = &mut [0u8; 2048]; + let mut events = Events::with_capacity(1024); + let mut timeout: Option = None; loop { + poll.poll( + &mut events, + timeout.or_else(|| Some(Duration::from_millis(0))), + )?; + + 'read: loop { + match socket.recv_from(&mut buf[..]) { + Err(ref err) + if err.kind() == ErrorKind::WouldBlock + || err.kind() == ErrorKind::Interrupted => + { + break 'read + } + Err(ref err) => { + eprintln!("UDP error: {}", err); + exit(1); + } + Ok((sz, remote)) => { + if sz == buf.len() { + eprintln!("Received more than {} bytes", buf.len()); + break 'read; + } + if sz > 0 { + let d = Datagram::new(remote, *local_addr, &buf[..sz]); + client.process_input(d, Instant::now()); + handler.maybe_key_update(client)?; + } + } + }; + } + if let State::Closed(..) = client.state() { return Ok(client.state().clone()); } let mut exiting = !handler.handle(client)?; - loop { + 'write: loop { match client.process_output(Instant::now()) { Output::Datagram(dgram) => { if let Err(e) = emit_datagram(socket, dgram) { eprintln!("UDP write error: {}", e); client.close(Instant::now(), 0, e.to_string()); exiting = true; - break; + break 'write; } } - Output::Callback(duration) => { - socket.set_read_timeout(Some(duration)).unwrap(); - break; + Output::Callback(new_timeout) => { + timeout = Some(new_timeout); + break 'write; } Output::None => { // Not strictly necessary, since we're about to exit - socket.set_read_timeout(None).unwrap(); exiting = true; - break; + break 'write; } } } @@ -1342,32 +1408,14 @@ mod old { if exiting { return Ok(client.state().clone()); } - - match socket.recv_from(&mut buf[..]) { - Err(err) => { - if err.kind() != ErrorKind::WouldBlock && err.kind() != ErrorKind::Interrupted { - eprintln!("UDP error: {}", err); - exit(1); - } - } - Ok((sz, addr)) => { - if sz == buf.len() { - eprintln!("Received more than {} bytes", buf.len()); - continue; - } - if sz > 0 { - let d = Datagram::new(addr, *local_addr, &buf[..sz]); - client.process_input(d, Instant::now()); - handler.maybe_key_update(client)?; - } - } - } } } + #[allow(clippy::too_many_arguments)] pub fn old_client( args: &Args, - socket: &UdpSocket, + socket: &mio::net::UdpSocket, + poll: &Poll, local_addr: SocketAddr, remote_addr: SocketAddr, origin: &str, @@ -1410,7 +1458,7 @@ mod old { key_update, }; - process_loop_old(&local_addr, socket, &mut client, &mut h)?; + process_loop_old(&local_addr, socket, poll, &mut client, &mut h)?; let token = if args.resume { // If we haven't received an event, take a token if there is one. diff --git a/neqo-http3/src/connection_client.rs b/neqo-http3/src/connection_client.rs index 51cd8e2935..3cb6d94c8a 100644 --- a/neqo-http3/src/connection_client.rs +++ b/neqo-http3/src/connection_client.rs @@ -828,6 +828,15 @@ impl Http3Client { self.process_http3(now); } + pub fn process_multiple_input(&mut self, dgrams: Vec, now: Instant) { + qtrace!([self], "Process multiple datagrams, len={}", dgrams.len()); + if dgrams.is_empty() { + return; + } + self.conn.process_multiple_input(dgrams, now); + self.process_http3(now); + } + /// This should not be used because it gives access to functionalities that may disrupt the /// proper functioning of the HTTP/3 session. /// Only used by `neqo-interop`. diff --git a/neqo-transport/src/connection/mod.rs b/neqo-transport/src/connection/mod.rs index 3db127c4e7..e30b120129 100644 --- a/neqo-transport/src/connection/mod.rs +++ b/neqo-transport/src/connection/mod.rs @@ -923,6 +923,19 @@ impl Connection { self.streams.cleanup_closed_streams(); } + /// Process new input datagrams on the connection. + pub fn process_multiple_input(&mut self, dgrams: Vec, now: Instant) { + if dgrams.is_empty() { + return; + } + + for d in dgrams { + self.input(d, now, now); + } + self.process_saved(now); + self.streams.cleanup_closed_streams(); + } + /// Get the time that we next need to be called back, relative to `now`. fn next_delay(&mut self, now: Instant, paced: bool) -> Duration { qtrace!([self], "Get callback delay {:?}", now); From b3341cff87d066b93c46c8c0f2428ce154abe79e Mon Sep 17 00:00:00 2001 From: Max Inden Date: Fri, 5 Jan 2024 22:18:13 +0100 Subject: [PATCH 046/321] refactor: inline format args (#1540) Inline variables into format strings wherever possible. https://rust-lang.github.io/rust-clippy/master/index.html#/uninlined_format_args --- neqo-client/src/main.rs | 86 +++++++++++++--------------- neqo-interop/src/main.rs | 40 ++++++------- neqo-server/src/main.rs | 17 +++--- neqo-server/src/old_https.rs | 10 ++-- neqo-transport/src/connection/mod.rs | 4 +- neqo-transport/src/frame.rs | 2 +- neqo-transport/src/lib.rs | 2 +- neqo-transport/src/tparams.rs | 2 +- 8 files changed, 77 insertions(+), 86 deletions(-) diff --git a/neqo-client/src/main.rs b/neqo-client/src/main.rs index 6bcbfae9be..8d85eff9b0 100644 --- a/neqo-client/src/main.rs +++ b/neqo-client/src/main.rs @@ -78,7 +78,7 @@ impl From for ClientError { impl Display for ClientError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "Error: {:?}", self)?; + write!(f, "Error: {self:?}")?; Ok(()) } } @@ -370,7 +370,7 @@ fn get_output_file( return None; } - eprintln!("Saving {} to {:?}", url, out_path); + eprintln!("Saving {url} to {out_path:?}"); if let Some(parent) = out_path.parent() { create_dir_all(parent).ok()?; @@ -416,7 +416,7 @@ fn process_loop( break 'read } Err(ref err) => { - eprintln!("UDP error: {}", err); + eprintln!("UDP error: {err}"); exit(1); } Ok((sz, remote)) => { @@ -446,7 +446,7 @@ fn process_loop( match client.process_output(Instant::now()) { Output::Datagram(dgram) => { if let Err(e) = emit_datagram(socket, dgram) { - eprintln!("UDP write error: {}", e); + eprintln!("UDP write error: {e}"); client.close(Instant::now(), 0, e.to_string()); exiting = true; break 'write; @@ -517,7 +517,7 @@ struct DownloadStreamHandler { impl StreamHandler for DownloadStreamHandler { fn process_header_ready(&mut self, stream_id: StreamId, fin: bool, headers: Vec
) { if self.out_file.is_none() { - println!("READ HEADERS[{}]: fin={} {:?}", stream_id, fin, headers); + println!("READ HEADERS[{stream_id}]: fin={fin} {headers:?}"); } } @@ -535,15 +535,15 @@ impl StreamHandler for DownloadStreamHandler { } return Ok(true); } else if !output_read_data { - println!("READ[{}]: {} bytes", stream_id, sz); + println!("READ[{stream_id}]: {sz} bytes"); } else if let Ok(txt) = String::from_utf8(data.clone()) { - println!("READ[{}]: {}", stream_id, txt); + println!("READ[{stream_id}]: {txt}"); } else { println!("READ[{}]: 0x{}", stream_id, hex(&data)); } if fin && self.out_file.is_none() { - println!("", stream_id); + println!(""); } Ok(true) @@ -561,7 +561,7 @@ struct UploadStreamHandler { impl StreamHandler for UploadStreamHandler { fn process_header_ready(&mut self, stream_id: StreamId, fin: bool, headers: Vec
) { - println!("READ HEADERS[{}]: fin={} {:?}", stream_id, fin, headers); + println!("READ HEADERS[{stream_id}]: fin={fin} {headers:?}"); } fn process_data_readable( @@ -577,7 +577,7 @@ impl StreamHandler for UploadStreamHandler { let parsed: usize = trimmed_txt.parse().unwrap(); if parsed == self.data.len() { let upload_time = Instant::now().duration_since(self.start); - println!("Stream ID: {:?}, Upload time: {:?}", stream_id, upload_time); + println!("Stream ID: {stream_id:?}, Upload time: {upload_time:?}"); } } else { panic!("Unexpected data [{}]: 0x{}", stream_id, hex(&data)); @@ -645,10 +645,7 @@ impl<'a> URLHandler<'a> { Priority::default(), ) { Ok(client_stream_id) => { - println!( - "Successfully created stream id {} for {}", - client_stream_id, url - ); + println!("Successfully created stream id {client_stream_id} for {url}"); let handler: Box = StreamHandlerType::make_handler( &self.handler_type, @@ -730,7 +727,7 @@ impl<'a> Handler<'a> { handler.process_header_ready(stream_id, fin, headers); } None => { - println!("Data on unexpected stream: {}", stream_id); + println!("Data on unexpected stream: {stream_id}"); return Ok(false); } } @@ -742,7 +739,7 @@ impl<'a> Handler<'a> { let mut stream_done = false; match self.url_handler.stream_handler(&stream_id) { None => { - println!("Data on unexpected stream: {}", stream_id); + println!("Data on unexpected stream: {stream_id}"); return Ok(false); } Some(handler) => loop { @@ -777,7 +774,7 @@ impl<'a> Handler<'a> { Http3ClientEvent::DataWritable { stream_id } => { match self.url_handler.stream_handler(&stream_id) { None => { - println!("Data on unexpected stream: {}", stream_id); + println!("Data on unexpected stream: {stream_id}"); return Ok(false); } Some(handler) => { @@ -792,7 +789,7 @@ impl<'a> Handler<'a> { } Http3ClientEvent::ResumptionToken(t) => self.token = Some(t), _ => { - println!("Unhandled event {:?}", event); + println!("Unhandled event {event:?}"); } } } @@ -846,7 +843,7 @@ fn handle_test( process_loop(&local_addr, socket, poll, &mut client, &mut h)?; } _ => { - eprintln!("Unsupported test case: {}", testcase); + eprintln!("Unsupported test case: {testcase}"); exit(127) } } @@ -952,7 +949,7 @@ fn client( fn qlog_new(args: &Args, hostname: &str, cid: &ConnectionId) -> Res { if let Some(qlog_dir) = &args.qlog_dir { let mut qlog_path = qlog_dir.to_path_buf(); - let filename = format!("{}-{}.sqlog", hostname, cid); + let filename = format!("{hostname}-{cid}.sqlog"); qlog_path.push(filename); let f = OpenOptions::new() @@ -1024,22 +1021,20 @@ fn main() -> Res<()> { for ((_scheme, host, port), urls) in urls_by_origin.into_iter().filter_map(|(k, v)| match k { Origin::Tuple(s, h, p) => Some(((s, h, p), v)), Origin::Opaque(x) => { - eprintln!("Opaque origin {:?}", x); + eprintln!("Opaque origin {x:?}"); None } }) { - let remote_addr = format!("{}:{}", host, port) - .to_socket_addrs()? - .find(|addr| { - !matches!( - (addr, args.ipv4_only, args.ipv6_only), - (SocketAddr::V4(..), false, true) | (SocketAddr::V6(..), true, false) - ) - }); + let remote_addr = format!("{host}:{port}").to_socket_addrs()?.find(|addr| { + !matches!( + (addr, args.ipv4_only, args.ipv6_only), + (SocketAddr::V4(..), false, true) | (SocketAddr::V6(..), true, false) + ) + }); let remote_addr = match remote_addr { Some(a) => a, None => { - eprintln!("No compatible address found for: {}", host); + eprintln!("No compatible address found for: {host}"); exit(1); } }; @@ -1051,7 +1046,7 @@ fn main() -> Res<()> { let socket = match UdpSocket::bind(&local_addr) { Err(e) => { - eprintln!("Unable to bind UDP socket: {}", e); + eprintln!("Unable to bind UDP socket: {e}"); exit(1) } Ok(s) => s, @@ -1073,7 +1068,7 @@ fn main() -> Res<()> { remote_addr, ); - let hostname = format!("{}", host); + let hostname = format!("{host}"); let mut token: Option = None; let mut remaining = &urls[..]; let mut first = true; @@ -1084,8 +1079,7 @@ fn main() -> Res<()> { remaining = &remaining[1..]; if args.resume && first && remaining.is_empty() { println!( - "Error: resumption to {} cannot work without at least 2 URLs.", - hostname + "Error: resumption to {hostname} cannot work without at least 2 URLs." ); exit(127); } @@ -1188,7 +1182,7 @@ mod old { .expect("download_next called with empty queue"); match client.stream_create(StreamType::BiDi) { Ok(client_stream_id) => { - println!("Created stream {} for {}", client_stream_id, url); + println!("Created stream {client_stream_id} for {url}"); let req = format!("GET {}\r\n", url.path()); _ = client .stream_send(client_stream_id, req.as_bytes()) @@ -1200,7 +1194,7 @@ mod old { true } Err(e @ Error::StreamLimitError) | Err(e @ Error::ConnectionState) => { - println!("Cannot create stream {:?}", e); + println!("Cannot create stream {e:?}"); self.url_queue.push_front(url); false } @@ -1228,7 +1222,7 @@ mod old { if let Some(out_file) = maybe_out_file { out_file.write_all(&data[..sz])?; } else if !output_read_data { - println!("READ[{}]: {} bytes", stream_id, sz); + println!("READ[{stream_id}]: {sz} bytes"); } else { println!( "READ[{}]: {}", @@ -1252,7 +1246,7 @@ mod old { let mut maybe_maybe_out_file = self.streams.get_mut(&stream_id); match &mut maybe_maybe_out_file { None => { - println!("Data on unexpected stream: {}", stream_id); + println!("Data on unexpected stream: {stream_id}"); return Ok(false); } Some(maybe_out_file) => { @@ -1265,7 +1259,7 @@ mod old { if fin_recvd { if maybe_out_file.is_none() { - println!("", stream_id); + println!(""); } self.streams.remove(&stream_id); self.download_urls(client); @@ -1302,13 +1296,13 @@ mod old { }; } ConnectionEvent::SendStreamWritable { stream_id } => { - println!("stream {} writable", stream_id) + println!("stream {stream_id} writable") } ConnectionEvent::SendStreamComplete { stream_id } => { - println!("stream {} complete", stream_id); + println!("stream {stream_id} complete"); } ConnectionEvent::SendStreamCreatable { stream_type } => { - println!("stream {:?} creatable", stream_type); + println!("stream {stream_type:?} creatable"); if stream_type == StreamType::BiDi { self.download_urls(client); } @@ -1316,7 +1310,7 @@ mod old { ConnectionEvent::StateChange(State::WaitInitial) | ConnectionEvent::StateChange(State::Handshaking) | ConnectionEvent::StateChange(State::Connected) => { - println!("{:?}", event); + println!("{event:?}"); self.download_urls(client); } ConnectionEvent::StateChange(State::Confirmed) => { @@ -1326,7 +1320,7 @@ mod old { self.token = Some(token); } _ => { - println!("Unhandled event {:?}", event); + println!("Unhandled event {event:?}"); } } } @@ -1360,7 +1354,7 @@ mod old { break 'read } Err(ref err) => { - eprintln!("UDP error: {}", err); + eprintln!("UDP error: {err}"); exit(1); } Ok((sz, remote)) => { @@ -1387,7 +1381,7 @@ mod old { match client.process_output(Instant::now()) { Output::Datagram(dgram) => { if let Err(e) = emit_datagram(socket, dgram) { - eprintln!("UDP write error: {}", e); + eprintln!("UDP write error: {e}"); client.close(Instant::now(), 0, e.to_string()); exiting = true; break 'write; diff --git a/neqo-interop/src/main.rs b/neqo-interop/src/main.rs index 0469c7bab0..bb8f38c76b 100644 --- a/neqo-interop/src/main.rs +++ b/neqo-interop/src/main.rs @@ -178,11 +178,11 @@ impl Handler for H9Handler { fn handle(&mut self, client: &mut Connection) -> bool { let mut data = vec![0; 4000]; while let Some(event) = client.next_event() { - eprintln!("Event: {:?}", event); + eprintln!("Event: {event:?}"); match event { ConnectionEvent::RecvStreamReadable { stream_id } => { if !self.streams.contains(&stream_id) { - eprintln!("Data on unexpected stream: {}", stream_id); + eprintln!("Data on unexpected stream: {stream_id}"); return false; } @@ -190,20 +190,20 @@ impl Handler for H9Handler { .stream_recv(stream_id, &mut data) .expect("Read should succeed"); data.truncate(sz); - eprintln!("Length={}", sz); + eprintln!("Length={sz}"); self.rbytes += sz; if fin { - eprintln!("", stream_id); + eprintln!(""); client.close(Instant::now(), 0, "kthxbye!"); self.rsfin = true; return false; } } ConnectionEvent::SendStreamWritable { stream_id } => { - eprintln!("stream {} writable", stream_id) + eprintln!("stream {stream_id} writable") } _ => { - eprintln!("Unexpected event {:?}", event); + eprintln!("Unexpected event {event:?}"); } } } @@ -328,15 +328,15 @@ impl H3Handler { .. } => { if !self.streams.contains(&stream_id) { - eprintln!("Data on unexpected stream: {}", stream_id); + eprintln!("Data on unexpected stream: {stream_id}"); return false; } - eprintln!("READ HEADERS[{}]: fin={} {:?}", stream_id, fin, headers); + eprintln!("READ HEADERS[{stream_id}]: fin={fin} {headers:?}"); } Http3ClientEvent::DataReadable { stream_id } => { if !self.streams.contains(&stream_id) { - eprintln!("Data on unexpected stream: {}", stream_id); + eprintln!("Data on unexpected stream: {stream_id}"); return false; } @@ -345,12 +345,12 @@ impl H3Handler { .read_data(Instant::now(), stream_id, &mut data) .expect("Read should succeed"); if let Ok(txt) = String::from_utf8(data.clone()) { - eprintln!("READ[{}]: {}", stream_id, txt); + eprintln!("READ[{stream_id}]: {txt}"); } else { eprintln!("READ[{}]: 0x{}", stream_id, hex(&data)); } if fin { - eprintln!("", stream_id); + eprintln!(""); if close { self.h3.close(Instant::now(), 0, "kthxbye!"); } @@ -472,14 +472,14 @@ fn test_connect(nctx: &NetworkCtx, test: &Test, peer: &Peer) -> Result st, Err(e) => { - return Err(format!("ERROR: {}", e)); + return Err(format!("ERROR: {e}")); } }; if st.connected() { Ok(client) } else { - Err(format!("{:?}", st)) + Err(format!("{st:?}")) } } @@ -494,7 +494,7 @@ fn test_h9(nctx: &NetworkCtx, client: &mut Connection) -> Result<(), String> { let res = process_loop(nctx, client, &mut hc); if let Err(e) = res { - return Err(format!("ERROR: {}", e)); + return Err(format!("ERROR: {e}")); } if hc.rbytes == 0 { return Err(String::from("Empty response")); @@ -522,7 +522,7 @@ fn connect_h3(nctx: &NetworkCtx, peer: &Peer, client: Connection) -> Result R hc.streams.insert(client_stream_id); if let Err(e) = process_loop_h3(nctx, &mut hc, false, *test != Test::D) { - return Err(format!("ERROR: {}", e)); + return Err(format!("ERROR: {e}")); } if *test == Test::D { @@ -562,7 +562,7 @@ fn test_h3(nctx: &NetworkCtx, peer: &Peer, client: Connection, test: &Test) -> R hc.h3.stream_close_send(client_stream_id).unwrap(); hc.streams.insert(client_stream_id); if let Err(e) = process_loop_h3(nctx, &mut hc, false, true) { - return Err(format!("ERROR: {}", e)); + return Err(format!("ERROR: {e}")); } if hc.h3.qpack_decoder_stats().dynamic_table_references == 0 { @@ -600,7 +600,7 @@ fn test_h3_rz( hc.streams.insert(client_stream_id); if let Err(e) = process_loop_h3(nctx, &mut hc, false, true) { - return Err(format!("ERROR: {}", e)); + return Err(format!("ERROR: {e}")); } // get resumption ticket @@ -652,7 +652,7 @@ fn test_h3_rz( mem::drop(hc.h3.stream_close_send(client_stream_id)); hc.streams.insert(client_stream_id); if let Err(e) = process_loop_h3(nctx, &mut hc, false, true) { - return Err(format!("ERROR: {}", e)); + return Err(format!("ERROR: {e}")); } let recvd_0rtt_reject = |e| e == Http3ClientEvent::ZeroRttRejected; @@ -662,7 +662,7 @@ fn test_h3_rz( } else { println!("Test resumption"); if let Err(e) = process_loop_h3(nctx, &mut hc, true, true) { - return Err(format!("ERROR: {}", e)); + return Err(format!("ERROR: {e}")); } } diff --git a/neqo-server/src/main.rs b/neqo-server/src/main.rs index 6eb85f17cf..3eda9189b3 100644 --- a/neqo-server/src/main.rs +++ b/neqo-server/src/main.rs @@ -85,7 +85,7 @@ impl From for ServerError { impl Display for ServerError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "Error: {:?}", self)?; + write!(f, "Error: {self:?}")?; Ok(()) } } @@ -343,7 +343,7 @@ fn qns_read_response(filename: &str) -> Option> { Some(data) } Err(e) => { - eprintln!("Error reading data: {:?}", e); + eprintln!("Error reading data: {e:?}"); None } } @@ -479,7 +479,7 @@ impl HttpServer for SimpleServer { headers, fin, } => { - println!("Headers (request={} fin={}): {:?}", stream, fin, headers); + println!("Headers (request={stream} fin={fin}): {headers:?}"); let post = if let Some(method) = headers.iter().find(|&h| h.name() == ":method") { @@ -592,7 +592,7 @@ fn read_dgram( let (sz, remote_addr) = match socket.recv_from(&mut buf[..]) { Err(ref err) if err.kind() == io::ErrorKind::WouldBlock => return Ok(None), Err(err) => { - eprintln!("UDP recv error: {:?}", err); + eprintln!("UDP recv error: {err:?}"); return Err(err); } Ok(res) => res, @@ -652,7 +652,7 @@ impl ServersRunner { for (i, host) in self.hosts.iter().enumerate() { let socket = match UdpSocket::bind(host) { Err(err) => { - eprintln!("Unable to bind UDP socket: {}", err); + eprintln!("Unable to bind UDP socket: {err}"); return Err(err); } Ok(s) => s, @@ -660,7 +660,7 @@ impl ServersRunner { let local_addr = match socket.local_addr() { Err(err) => { - eprintln!("Socket local address not bound: {}", err); + eprintln!("Socket local address not bound: {err}"); return Err(err); } Ok(s) => s, @@ -671,10 +671,7 @@ impl ServersRunner { } else { " as well as V4" }; - println!( - "Server waiting for connection on: {:?}{}", - local_addr, also_v4 - ); + println!("Server waiting for connection on: {local_addr:?}{also_v4}"); self.poll.register( &socket, diff --git a/neqo-server/src/old_https.rs b/neqo-server/src/old_https.rs index 1cb1d59fc7..c88464ff31 100644 --- a/neqo-server/src/old_https.rs +++ b/neqo-server/src/old_https.rs @@ -158,7 +158,7 @@ impl Http09Server { } Some(path) => { let path = path.as_str(); - eprintln!("Path = '{}'", path); + eprintln!("Path = '{path}'"); if args.qns_test.is_some() { qns_read_response(path) } else { @@ -173,7 +173,7 @@ impl Http09Server { fn stream_writable(&mut self, stream_id: StreamId, conn: &mut ActiveConnectionRef) { match self.write_state.get_mut(&stream_id) { None => { - eprintln!("Unknown stream {}, ignoring event", stream_id); + eprintln!("Unknown stream {stream_id}, ignoring event"); } Some(stream_state) => { stream_state.writable = true; @@ -186,7 +186,7 @@ impl Http09Server { *offset += sent; self.server.add_to_waiting(conn.clone()); if *offset == data.len() { - eprintln!("Sent {} on {}, closing", sent, stream_id); + eprintln!("Sent {sent} on {stream_id}, closing"); conn.borrow_mut().stream_close_send(stream_id).unwrap(); self.write_state.remove(&stream_id); } else { @@ -211,7 +211,7 @@ impl HttpServer for Http09Server { None => break, Some(e) => e, }; - eprintln!("Event {:?}", event); + eprintln!("Event {event:?}"); match event { ConnectionEvent::NewStream { stream_id } => { self.write_state @@ -231,7 +231,7 @@ impl HttpServer for Http09Server { } ConnectionEvent::StateChange(_) | ConnectionEvent::SendStreamComplete { .. } => (), - e => eprintln!("unhandled event {:?}", e), + e => eprintln!("unhandled event {e:?}"), } } } diff --git a/neqo-transport/src/connection/mod.rs b/neqo-transport/src/connection/mod.rs index e30b120129..7ceb0c48f0 100644 --- a/neqo-transport/src/connection/mod.rs +++ b/neqo-transport/src/connection/mod.rs @@ -421,7 +421,7 @@ impl Connection { #[cfg(test)] test_frame_writer: None, }; - c.stats.borrow_mut().init(format!("{}", c)); + c.stats.borrow_mut().init(format!("{c}")); Ok(c) } @@ -817,7 +817,7 @@ impl Connection { ) -> Res { if let Err(v) = &res { #[cfg(debug_assertions)] - let msg = format!("{:?}", v); + let msg = format!("{v:?}"); #[cfg(not(debug_assertions))] let msg = ""; let error = ConnectionError::Transport(v.clone()); diff --git a/neqo-transport/src/frame.rs b/neqo-transport/src/frame.rs index 8d56fd3000..18222f8c62 100644 --- a/neqo-transport/src/frame.rs +++ b/neqo-transport/src/frame.rs @@ -368,7 +368,7 @@ impl<'a> Frame<'a> { )), Self::Padding => None, Self::Datagram { data, .. } => Some(format!("Datagram {{ len: {} }}", data.len())), - _ => Some(format!("{:?}", self)), + _ => Some(format!("{self:?}")), } } diff --git a/neqo-transport/src/lib.rs b/neqo-transport/src/lib.rs index daff7e73c2..608caeddc6 100644 --- a/neqo-transport/src/lib.rs +++ b/neqo-transport/src/lib.rs @@ -186,7 +186,7 @@ impl ::std::error::Error for Error { impl ::std::fmt::Display for Error { fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result { - write!(f, "Transport error: {:?}", self) + write!(f, "Transport error: {self:?}") } } diff --git a/neqo-transport/src/tparams.rs b/neqo-transport/src/tparams.rs index df2f1b34c4..fca54d8208 100644 --- a/neqo-transport/src/tparams.rs +++ b/neqo-transport/src/tparams.rs @@ -771,7 +771,7 @@ mod tests { let tps2 = TransportParameters::decode(&mut enc.as_decoder()).expect("Couldn't decode"); assert_eq!(tps, tps2); - println!("TPS = {:?}", tps); + println!("TPS = {tps:?}"); assert_eq!(tps2.get_integer(IDLE_TIMEOUT), 0); // Default assert_eq!(tps2.get_integer(MAX_ACK_DELAY), 25); // Default assert_eq!(tps2.get_integer(ACTIVE_CONNECTION_ID_LIMIT), 2); // Default From 28b5c9a669c5f028b425bf96fcb02374c88d454e Mon Sep 17 00:00:00 2001 From: Max Inden Date: Fri, 5 Jan 2024 22:28:09 +0100 Subject: [PATCH 047/321] refactor: match single variant over wildcard (#1541) Replace match wildcard whereever it is matching a single variant only. https://rust-lang.github.io/rust-clippy/master/index.html#/match_wildcard_for_single_variants --- neqo-transport/src/lib.rs | 2 +- neqo-transport/src/send_stream.rs | 4 ++-- neqo-transport/src/server.rs | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/neqo-transport/src/lib.rs b/neqo-transport/src/lib.rs index 608caeddc6..e3e50102d1 100644 --- a/neqo-transport/src/lib.rs +++ b/neqo-transport/src/lib.rs @@ -202,7 +202,7 @@ impl ConnectionError { pub fn app_code(&self) -> Option { match self { Self::Application(e) => Some(*e), - _ => None, + Self::Transport(_) => None, } } } diff --git a/neqo-transport/src/send_stream.rs b/neqo-transport/src/send_stream.rs index 21877ab23d..8ade4b4c7f 100644 --- a/neqo-transport/src/send_stream.rs +++ b/neqo-transport/src/send_stream.rs @@ -751,7 +751,7 @@ impl SendStream { final_written, .. } => *final_retired + *final_written, - _ => 0, + SendStreamState::Ready { .. } => 0, } } @@ -763,7 +763,7 @@ impl SendStream { SendStreamState::DataRecvd { retired, .. } => *retired, SendStreamState::ResetSent { final_retired, .. } | SendStreamState::ResetRecvd { final_retired, .. } => *final_retired, - _ => 0, + SendStreamState::Ready { .. } => 0, } } diff --git a/neqo-transport/src/server.rs b/neqo-transport/src/server.rs index 75cc6d42d8..506e90ad14 100644 --- a/neqo-transport/src/server.rs +++ b/neqo-transport/src/server.rs @@ -278,7 +278,7 @@ impl Server { self.timers.add(next, Rc::clone(&c)); } } - _ => { + Output::None => { self.remove_timer(&c); } } From d2fa0bd6bc9d3d3a90ec1921fc8bce5b484f3190 Mon Sep 17 00:00:00 2001 From: Mike Hommey Date: Wed, 10 Jan 2024 13:37:08 +0900 Subject: [PATCH 048/321] Undo the restriction on the time dependency. (#1545) It was done because of time's MSRV, but neqo's MSRV was raised to 1.70.0, which now more recent than what any version of time requires. --- neqo-common/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/neqo-common/Cargo.toml b/neqo-common/Cargo.toml index 68583e21dc..bb3c7c2a15 100644 --- a/neqo-common/Cargo.toml +++ b/neqo-common/Cargo.toml @@ -12,7 +12,7 @@ log = {version = "0.4.0", default-features = false} env_logger = {version = "0.10", default-features = false} lazy_static = "1.3.0" qlog = "0.10.0" -time = {version = "=0.3.23", features = ["formatting"]} +time = {version = "0.3", features = ["formatting"]} [features] deny-warnings = [] From 4d98da3cd87d6d197206fc4fb5042e0b90142707 Mon Sep 17 00:00:00 2001 From: Martin Thomson Date: Wed, 10 Jan 2024 21:02:42 +1100 Subject: [PATCH 049/321] Ensure that too much crypto data is fatal for a connection (#1546) This is a denial of service opportunity, but one that only is available to an authenticated peer or while we are willing to receive Initial packets. This fits the existing DoS threat model. --- neqo-transport/src/connection/mod.rs | 2 +- neqo-transport/src/crypto.rs | 13 ++++- neqo-transport/src/lib.rs | 38 ++++++++------ neqo-transport/src/packet/mod.rs | 30 ++++++----- neqo-transport/src/recovery.rs | 68 +++++++++++++----------- neqo-transport/src/recv_stream.rs | 36 +++++++------ neqo-transport/tests/connection.rs | 78 +++++++++++++++++++++++++++- 7 files changed, 187 insertions(+), 78 deletions(-) diff --git a/neqo-transport/src/connection/mod.rs b/neqo-transport/src/connection/mod.rs index 7ceb0c48f0..3a30c0f4d3 100644 --- a/neqo-transport/src/connection/mod.rs +++ b/neqo-transport/src/connection/mod.rs @@ -2645,7 +2645,7 @@ impl Connection { &data ); self.stats.borrow_mut().frame_rx.crypto += 1; - self.crypto.streams.inbound_frame(space, offset, data); + self.crypto.streams.inbound_frame(space, offset, data)?; if self.crypto.streams.data_ready(space) { let mut buf = Vec::new(); let read = self.crypto.streams.read_to_end(space, &mut buf); diff --git a/neqo-transport/src/crypto.rs b/neqo-transport/src/crypto.rs index e97f8fc9b7..363ed097ef 100644 --- a/neqo-transport/src/crypto.rs +++ b/neqo-transport/src/crypto.rs @@ -1400,6 +1400,9 @@ pub enum CryptoStreams { } impl CryptoStreams { + /// Keep around 64k if a server wants to push excess data at us. + const BUFFER_LIMIT: u64 = 65536; + pub fn discard(&mut self, space: PacketNumberSpace) { match space { PacketNumberSpace::Initial => { @@ -1434,8 +1437,14 @@ impl CryptoStreams { self.get_mut(space).unwrap().tx.send(data); } - pub fn inbound_frame(&mut self, space: PacketNumberSpace, offset: u64, data: &[u8]) { - self.get_mut(space).unwrap().rx.inbound_frame(offset, data); + pub fn inbound_frame(&mut self, space: PacketNumberSpace, offset: u64, data: &[u8]) -> Res<()> { + let rx = &mut self.get_mut(space).unwrap().rx; + rx.inbound_frame(offset, data); + if rx.received() - rx.retired() <= Self::BUFFER_LIMIT { + Ok(()) + } else { + Err(Error::CryptoBufferExceeded) + } } pub fn data_ready(&self, space: PacketNumberSpace) -> bool { diff --git a/neqo-transport/src/lib.rs b/neqo-transport/src/lib.rs index e3e50102d1..35bdd7d34a 100644 --- a/neqo-transport/src/lib.rs +++ b/neqo-transport/src/lib.rs @@ -38,26 +38,32 @@ pub mod tparams; mod tracking; pub mod version; -pub use self::cc::CongestionControlAlgorithm; -pub use self::cid::{ - ConnectionId, ConnectionIdDecoder, ConnectionIdGenerator, ConnectionIdRef, - EmptyConnectionIdGenerator, RandomConnectionIdGenerator, +pub use self::{ + cc::CongestionControlAlgorithm, + cid::{ + ConnectionId, ConnectionIdDecoder, ConnectionIdGenerator, ConnectionIdRef, + EmptyConnectionIdGenerator, RandomConnectionIdGenerator, + }, + connection::{ + params::{ConnectionParameters, ACK_RATIO_SCALE}, + Connection, Output, State, ZeroRttState, + }, + events::{ConnectionEvent, ConnectionEvents}, + frame::CloseError, + quic_datagrams::DatagramTracking, + stats::Stats, + stream_id::{StreamId, StreamType}, + version::Version, }; -pub use self::connection::{ - params::ConnectionParameters, params::ACK_RATIO_SCALE, Connection, Output, State, ZeroRttState, -}; -pub use self::events::{ConnectionEvent, ConnectionEvents}; -pub use self::frame::CloseError; -pub use self::quic_datagrams::DatagramTracking; -pub use self::stats::Stats; -pub use self::stream_id::{StreamId, StreamType}; -pub use self::version::Version; -pub use self::recv_stream::{RecvStreamStats, RECV_BUFFER_SIZE}; -pub use self::send_stream::{SendStreamStats, SEND_BUFFER_SIZE}; +pub use self::{ + recv_stream::{RecvStreamStats, RECV_BUFFER_SIZE}, + send_stream::{SendStreamStats, SEND_BUFFER_SIZE}, +}; pub type TransportError = u64; const ERROR_APPLICATION_CLOSE: TransportError = 12; +const ERROR_CRYPTO_BUFFER_EXCEEDED: TransportError = 13; const ERROR_AEAD_LIMIT_REACHED: TransportError = 15; #[derive(Clone, Debug, PartialEq, PartialOrd, Ord, Eq)] @@ -76,6 +82,7 @@ pub enum Error { ProtocolViolation, InvalidToken, ApplicationError, + CryptoBufferExceeded, CryptoError(CryptoError), QlogError, CryptoAlert(u8), @@ -142,6 +149,7 @@ impl Error { Self::KeysExhausted => ERROR_AEAD_LIMIT_REACHED, Self::ApplicationError => ERROR_APPLICATION_CLOSE, Self::NoAvailablePath => 16, + Self::CryptoBufferExceeded => ERROR_CRYPTO_BUFFER_EXCEEDED, Self::CryptoAlert(a) => 0x100 + u64::from(*a), // As we have a special error code for ECH fallbacks, we lose the alert. // Send the server "ech_required" directly. diff --git a/neqo-transport/src/packet/mod.rs b/neqo-transport/src/packet/mod.rs index 98fadc1cd3..ac4765f75d 100644 --- a/neqo-transport/src/packet/mod.rs +++ b/neqo-transport/src/packet/mod.rs @@ -5,20 +5,24 @@ // except according to those terms. // Encoding and decoding packets off the wire. -use crate::cid::{ConnectionId, ConnectionIdDecoder, ConnectionIdRef, MAX_CONNECTION_ID_LEN}; -use crate::crypto::{CryptoDxState, CryptoSpace, CryptoStates}; -use crate::version::{Version, WireVersion}; -use crate::{Error, Res}; +use crate::{ + cid::{ConnectionId, ConnectionIdDecoder, ConnectionIdRef, MAX_CONNECTION_ID_LEN}, + crypto::{CryptoDxState, CryptoSpace, CryptoStates}, + version::{Version, WireVersion}, + Error, Res, +}; use neqo_common::{hex, hex_with_len, qtrace, qwarn, Decoder, Encoder}; use neqo_crypto::random; -use std::cmp::min; -use std::convert::TryFrom; -use std::fmt; -use std::iter::ExactSizeIterator; -use std::ops::{Deref, DerefMut, Range}; -use std::time::Instant; +use std::{ + cmp::min, + convert::TryFrom, + fmt, + iter::ExactSizeIterator, + ops::{Deref, DerefMut, Range}, + time::Instant, +}; pub const PACKET_BIT_LONG: u8 = 0x80; const PACKET_BIT_SHORT: u8 = 0x00; @@ -865,8 +869,10 @@ impl Deref for DecryptedPacket { #[cfg(all(test, not(feature = "fuzzing")))] mod tests { use super::*; - use crate::crypto::{CryptoDxState, CryptoStates}; - use crate::{EmptyConnectionIdGenerator, RandomConnectionIdGenerator, Version}; + use crate::{ + crypto::{CryptoDxState, CryptoStates}, + EmptyConnectionIdGenerator, RandomConnectionIdGenerator, Version, + }; use neqo_common::Encoder; use test_fixture::{fixture_init, now}; diff --git a/neqo-transport/src/recovery.rs b/neqo-transport/src/recovery.rs index 9256a0727c..c625f18fdf 100644 --- a/neqo-transport/src/recovery.rs +++ b/neqo-transport/src/recovery.rs @@ -8,29 +8,33 @@ #![deny(clippy::pedantic)] -use std::cmp::{max, min}; -use std::collections::BTreeMap; -use std::convert::TryFrom; -use std::mem; -use std::ops::RangeInclusive; -use std::time::{Duration, Instant}; +use std::{ + cmp::{max, min}, + collections::BTreeMap, + convert::TryFrom, + mem, + ops::RangeInclusive, + time::{Duration, Instant}, +}; use smallvec::{smallvec, SmallVec}; use neqo_common::{qdebug, qinfo, qlog::NeqoQlog, qtrace, qwarn}; -use crate::ackrate::AckRate; -use crate::cid::ConnectionIdEntry; -use crate::crypto::CryptoRecoveryToken; -use crate::packet::PacketNumber; -use crate::path::{Path, PathRef}; -use crate::qlog::{self, QlogMetric}; -use crate::quic_datagrams::DatagramTracking; -use crate::rtt::RttEstimate; -use crate::send_stream::SendStreamRecoveryToken; -use crate::stats::{Stats, StatsCell}; -use crate::stream_id::{StreamId, StreamType}; -use crate::tracking::{AckToken, PacketNumberSpace, PacketNumberSpaceSet, SentPacket}; +use crate::{ + ackrate::AckRate, + cid::ConnectionIdEntry, + crypto::CryptoRecoveryToken, + packet::PacketNumber, + path::{Path, PathRef}, + qlog::{self, QlogMetric}, + quic_datagrams::DatagramTracking, + rtt::RttEstimate, + send_stream::SendStreamRecoveryToken, + stats::{Stats, StatsCell}, + stream_id::{StreamId, StreamType}, + tracking::{AckToken, PacketNumberSpace, PacketNumberSpaceSet, SentPacket}, +}; pub(crate) const PACKET_THRESHOLD: u64 = 3; /// `ACK_ONLY_SIZE_LIMIT` is the minimum size of the congestion window. @@ -806,7 +810,7 @@ impl LossRecovery { (Some(loss_time), Some(pto_time)) => Some(min(loss_time, pto_time)), (Some(loss_time), None) => Some(loss_time), (None, Some(pto_time)) => Some(pto_time), - _ => None, + (None, None) => None, } } @@ -997,18 +1001,22 @@ mod tests { use super::{ LossRecovery, LossRecoverySpace, PacketNumberSpace, SendProfile, SentPacket, FAST_PTO_SCALE, }; - use crate::cc::CongestionControlAlgorithm; - use crate::cid::{ConnectionId, ConnectionIdEntry}; - use crate::packet::PacketType; - use crate::path::{Path, PathRef}; - use crate::rtt::RttEstimate; - use crate::stats::{Stats, StatsCell}; + use crate::{ + cc::CongestionControlAlgorithm, + cid::{ConnectionId, ConnectionIdEntry}, + packet::PacketType, + path::{Path, PathRef}, + rtt::RttEstimate, + stats::{Stats, StatsCell}, + }; use neqo_common::qlog::NeqoQlog; - use std::cell::RefCell; - use std::convert::TryInto; - use std::ops::{Deref, DerefMut, RangeInclusive}; - use std::rc::Rc; - use std::time::{Duration, Instant}; + use std::{ + cell::RefCell, + convert::TryInto, + ops::{Deref, DerefMut, RangeInclusive}, + rc::Rc, + time::{Duration, Instant}, + }; use test_fixture::{addr, now}; // Shorthand for a time in milliseconds. diff --git a/neqo-transport/src/recv_stream.rs b/neqo-transport/src/recv_stream.rs index ff7b497a5a..659627f7cf 100644 --- a/neqo-transport/src/recv_stream.rs +++ b/neqo-transport/src/recv_stream.rs @@ -7,23 +7,27 @@ // Building a stream of ordered bytes to give the application from a series of // incoming STREAM frames. -use std::cmp::max; -use std::collections::BTreeMap; -use std::convert::TryFrom; -use std::mem; -use std::rc::{Rc, Weak}; +use std::{ + cmp::max, + collections::BTreeMap, + convert::TryFrom, + mem, + rc::{Rc, Weak}, +}; use smallvec::SmallVec; -use crate::events::ConnectionEvents; -use crate::fc::ReceiverFlowControl; -use crate::frame::FRAME_TYPE_STOP_SENDING; -use crate::packet::PacketBuilder; -use crate::recovery::{RecoveryToken, StreamRecoveryToken}; -use crate::send_stream::SendStreams; -use crate::stats::FrameStats; -use crate::stream_id::StreamId; -use crate::{AppError, Error, Res}; +use crate::{ + events::ConnectionEvents, + fc::ReceiverFlowControl, + frame::FRAME_TYPE_STOP_SENDING, + packet::PacketBuilder, + recovery::{RecoveryToken, StreamRecoveryToken}, + send_stream::SendStreams, + stats::FrameStats, + stream_id::StreamId, + AppError, Error, Res, +}; use neqo_common::{qtrace, Role}; use std::cell::RefCell; @@ -278,11 +282,11 @@ impl RxStreamOrderer { } /// Bytes read by the application. - fn retired(&self) -> u64 { + pub fn retired(&self) -> u64 { self.retired } - fn received(&self) -> u64 { + pub fn received(&self) -> u64 { self.received } diff --git a/neqo-transport/tests/connection.rs b/neqo-transport/tests/connection.rs index 6dd3d263cd..1c95a4ad2d 100644 --- a/neqo-transport/tests/connection.rs +++ b/neqo-transport/tests/connection.rs @@ -12,8 +12,9 @@ mod common; use common::{ apply_header_protection, decode_initial_header, initial_aead_and_hp, remove_header_protection, }; -use neqo_common::{Datagram, Decoder, Role}; -use neqo_transport::{ConnectionParameters, State, Version}; +use neqo_common::{Datagram, Decoder, Encoder, Role}; +use neqo_transport::{ConnectionError, ConnectionParameters, Error, State, Version}; +use std::convert::TryFrom; use test_fixture::{self, default_client, default_server, new_client, now, split_datagram}; #[test] @@ -125,3 +126,76 @@ fn reorder_server_initial() { client.process_input(done.unwrap(), now()); assert_eq!(*client.state(), State::Confirmed); } + +/// Overflow the crypto buffer. +#[test] +fn overflow_crypto() { + let mut client = new_client( + ConnectionParameters::default().versions(Version::Version1, vec![Version::Version1]), + ); + let mut server = default_server(); + + let client_initial = client.process_output(now()).dgram(); + let (_, client_dcid, _, _) = + decode_initial_header(client_initial.as_ref().unwrap(), Role::Client); + let client_dcid = client_dcid.to_owned(); + + let server_packet = server.process(client_initial, now()).dgram(); + let (server_initial, _) = split_datagram(server_packet.as_ref().unwrap()); + + // Now decrypt the server packet to get AEAD and HP instances. + // We won't be using the packet, but making new ones. + let (aead, hp) = initial_aead_and_hp(&client_dcid, Role::Server); + let (_, server_dcid, server_scid, _) = decode_initial_header(&server_initial, Role::Server); + + // Send in 100 packets, each with 1000 bytes of crypto frame data each, + // eventually this will overrun the buffer we keep for crypto data. + let mut payload = Encoder::with_capacity(1024); + for pn in 0..100_u64 { + payload.truncate(0); + payload + .encode_varint(0x06_u64) // CRYPTO frame type. + .encode_varint(pn * 1000 + 1) // offset + .encode_varint(1000_u64); // length + let plen = payload.len(); + payload.pad_to(plen + 1000, 44); + + let mut packet = Encoder::with_capacity(1200); + packet + .encode_byte(0xc1) // Initial with packet number length of 2. + .encode_uint(4, Version::Version1.wire_version()) + .encode_vec(1, server_dcid) + .encode_vec(1, server_scid) + .encode_vvec(&[]) // token + .encode_varint(u64::try_from(2 + payload.len() + aead.expansion()).unwrap()); // length + let pn_offset = packet.len(); + packet.encode_uint(2, pn); + + let mut packet = Vec::from(packet); + let header = packet.clone(); + packet.resize(header.len() + payload.len() + aead.expansion(), 0); + aead.encrypt(pn, &header, payload.as_ref(), &mut packet[header.len()..]) + .unwrap(); + apply_header_protection(&hp, &mut packet, pn_offset..(pn_offset + 2)); + packet.resize(1200, 0); // Initial has to be 1200 bytes! + + let dgram = Datagram::new( + server_initial.source(), + server_initial.destination(), + packet, + ); + client.process_input(dgram, now()); + if let State::Closing { error, .. } = client.state() { + assert!( + matches!( + error, + ConnectionError::Transport(Error::CryptoBufferExceeded), + ), + "the connection need to abort on crypto buffer" + ); + assert!(pn > 64, "at least 64000 bytes of data is buffered"); + return; + } + } + panic!("Was not able to overflow the crypto buffer"); +} From 74f5a5d3308a09fb74ef1442f3890e4640b8a505 Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Thu, 11 Jan 2024 02:28:16 +0200 Subject: [PATCH 050/321] chore: Update qlog to 0.11.0 (#1547) --- neqo-client/Cargo.toml | 2 +- neqo-common/Cargo.toml | 2 +- neqo-http3/Cargo.toml | 2 +- neqo-qpack/Cargo.toml | 2 +- neqo-server/Cargo.toml | 2 +- neqo-transport/Cargo.toml | 2 +- 6 files changed, 6 insertions(+), 6 deletions(-) diff --git a/neqo-client/Cargo.toml b/neqo-client/Cargo.toml index 4d4f1b67db..43ff45d9e5 100644 --- a/neqo-client/Cargo.toml +++ b/neqo-client/Cargo.toml @@ -16,7 +16,7 @@ neqo-http3 = { path = "./../neqo-http3" } neqo-qpack = { path = "./../neqo-qpack" } structopt = "0.3.7" url = "2.0" -qlog = "0.10.0" +qlog = "0.11.0" mio = "0.6.17" [features] diff --git a/neqo-common/Cargo.toml b/neqo-common/Cargo.toml index bb3c7c2a15..25d72980ca 100644 --- a/neqo-common/Cargo.toml +++ b/neqo-common/Cargo.toml @@ -11,7 +11,7 @@ build = "build.rs" log = {version = "0.4.0", default-features = false} env_logger = {version = "0.10", default-features = false} lazy_static = "1.3.0" -qlog = "0.10.0" +qlog = "0.11.0" time = {version = "0.3", features = ["formatting"]} [features] diff --git a/neqo-http3/Cargo.toml b/neqo-http3/Cargo.toml index e83ced739e..bea90f159c 100644 --- a/neqo-http3/Cargo.toml +++ b/neqo-http3/Cargo.toml @@ -13,7 +13,7 @@ neqo-transport = { path = "./../neqo-transport" } neqo-qpack = { path = "./../neqo-qpack" } log = {version = "0.4.0", default-features = false} smallvec = "1.0.0" -qlog = "0.10.0" +qlog = "0.11.0" sfv = "0.9.1" url = "2.0" lazy_static = "1.3.0" diff --git a/neqo-qpack/Cargo.toml b/neqo-qpack/Cargo.toml index d9af0abaf3..0cc6cb8c2e 100644 --- a/neqo-qpack/Cargo.toml +++ b/neqo-qpack/Cargo.toml @@ -12,7 +12,7 @@ neqo-transport = { path = "./../neqo-transport" } neqo-crypto = { path = "./../neqo-crypto" } log = {version = "0.4.0", default-features = false} static_assertions = "1.1.0" -qlog = "0.10.0" +qlog = "0.11.0" lazy_static = "1.3.0" [dev-dependencies] diff --git a/neqo-server/Cargo.toml b/neqo-server/Cargo.toml index 1517f7d53d..09ac930d50 100644 --- a/neqo-server/Cargo.toml +++ b/neqo-server/Cargo.toml @@ -17,7 +17,7 @@ regex = "1" mio = "0.6.17" mio-extras = "2.0.5" log = {version = "0.4.0", default-features = false} -qlog = "0.10.0" +qlog = "0.11.0" [features] deny-warnings = [] diff --git a/neqo-transport/Cargo.toml b/neqo-transport/Cargo.toml index 6425fdbdcb..a4da735a8a 100644 --- a/neqo-transport/Cargo.toml +++ b/neqo-transport/Cargo.toml @@ -12,7 +12,7 @@ neqo-common = { path = "../neqo-common" } lazy_static = "1.3.0" log = {version = "0.4.0", default-features = false} smallvec = "1.0.0" -qlog = "0.10.0" +qlog = "0.11.0" indexmap = "1.0" [dev-dependencies] From a5d2df6b21b2976a6581a01700438fecf45eef67 Mon Sep 17 00:00:00 2001 From: Max Inden Date: Thu, 11 Jan 2024 10:22:46 +0100 Subject: [PATCH 051/321] refactor: accept Datagrams by reference, not value (#1543) * refactor: have process_* take &Datagram * Use IntoIterator * Remove TODO on datagram clone * Reduce diff Revert to previous control flow. --- neqo-client/src/main.rs | 4 +- neqo-http3/src/connection_client.rs | 357 +++++++++--------- .../tests/webtransport/mod.rs | 28 +- .../tests/webtransport/negotiation.rs | 6 +- .../tests/webtransport/sessions.rs | 16 +- neqo-http3/src/frames/tests/mod.rs | 10 +- neqo-http3/src/frames/tests/reader.rs | 14 +- neqo-http3/src/server.rs | 104 ++--- neqo-http3/src/stream_type_reader.rs | 6 +- neqo-http3/tests/httpconn.rs | 58 +-- neqo-http3/tests/priority.rs | 22 +- neqo-http3/tests/send_message.rs | 4 +- neqo-http3/tests/webtransport.rs | 16 +- neqo-interop/src/main.rs | 4 +- neqo-qpack/src/decoder.rs | 4 +- neqo-qpack/src/encoder.rs | 10 +- neqo-server/src/main.rs | 8 +- neqo-server/src/old_https.rs | 2 +- neqo-transport/src/connection/mod.rs | 29 +- .../src/connection/tests/ackrate.rs | 14 +- neqo-transport/src/connection/tests/cc.rs | 26 +- neqo-transport/src/connection/tests/close.rs | 26 +- .../src/connection/tests/datagram.rs | 22 +- .../src/connection/tests/fuzzing.rs | 4 +- .../src/connection/tests/handshake.rs | 196 +++++----- neqo-transport/src/connection/tests/idle.rs | 71 ++-- neqo-transport/src/connection/tests/keys.rs | 32 +- .../src/connection/tests/migration.rs | 74 ++-- neqo-transport/src/connection/tests/mod.rs | 31 +- .../src/connection/tests/priority.rs | 40 +- .../src/connection/tests/recovery.rs | 100 ++--- .../src/connection/tests/resumption.rs | 8 +- neqo-transport/src/connection/tests/stream.rs | 110 +++--- neqo-transport/src/connection/tests/vn.rs | 34 +- .../src/connection/tests/zerortt.rs | 22 +- neqo-transport/src/server.rs | 14 +- neqo-transport/tests/common/mod.rs | 36 +- neqo-transport/tests/conn_vectors.rs | 2 +- neqo-transport/tests/connection.rs | 40 +- neqo-transport/tests/retry.rs | 90 ++--- neqo-transport/tests/server.rs | 122 +++--- neqo-transport/tests/sim/connection.rs | 2 +- test-fixture/src/lib.rs | 2 +- 43 files changed, 926 insertions(+), 894 deletions(-) diff --git a/neqo-client/src/main.rs b/neqo-client/src/main.rs index 8d85eff9b0..d422f51b7f 100644 --- a/neqo-client/src/main.rs +++ b/neqo-client/src/main.rs @@ -432,7 +432,7 @@ fn process_loop( }; } if !datagrams.is_empty() { - client.process_multiple_input(datagrams, Instant::now()); + client.process_multiple_input(&datagrams, Instant::now()); handler.maybe_key_update(client)?; } @@ -1364,7 +1364,7 @@ mod old { } if sz > 0 { let d = Datagram::new(remote, *local_addr, &buf[..sz]); - client.process_input(d, Instant::now()); + client.process_input(&d, Instant::now()); handler.maybe_key_update(client)?; } } diff --git a/neqo-http3/src/connection_client.rs b/neqo-http3/src/connection_client.rs index 3cb6d94c8a..13a3c4a47c 100644 --- a/neqo-http3/src/connection_client.rs +++ b/neqo-http3/src/connection_client.rs @@ -804,7 +804,7 @@ impl Http3Client { } /// This function combines `process_input` and `process_output` function. - pub fn process(&mut self, dgram: Option, now: Instant) -> Output { + pub fn process(&mut self, dgram: Option<&Datagram>, now: Instant) -> Output { qtrace!([self], "Process."); if let Some(d) = dgram { self.process_input(d, now); @@ -822,15 +822,20 @@ impl Http3Client { /// packets need to be sent or if a timer needs to be updated. /// /// [1]: ../neqo_transport/enum.ConnectionEvent.html - pub fn process_input(&mut self, dgram: Datagram, now: Instant) { + pub fn process_input(&mut self, dgram: &Datagram, now: Instant) { qtrace!([self], "Process input."); self.conn.process_input(dgram, now); self.process_http3(now); } - pub fn process_multiple_input(&mut self, dgrams: Vec, now: Instant) { + pub fn process_multiple_input<'a, I>(&mut self, dgrams: I, now: Instant) + where + I: IntoIterator, + I::IntoIter: ExactSizeIterator, + { + let dgrams = dgrams.into_iter(); qtrace!([self], "Process multiple datagrams, len={}", dgrams.len()); - if dgrams.is_empty() { + if dgrams.len() == 0 { return; } self.conn.process_multiple_input(dgrams, now); @@ -1589,11 +1594,11 @@ mod tests { assert_eq!(client.state(), Http3State::Initializing); assert_eq!(*server.conn.state(), State::Init); - let out = server.conn.process(out.dgram(), now()); + let out = server.conn.process(out.as_dgram_ref(), now()); assert_eq!(*server.conn.state(), State::Handshaking); - let out = client.process(out.dgram(), now()); - let out = server.conn.process(out.dgram(), now()); + let out = client.process(out.as_dgram_ref(), now()); + let out = server.conn.process(out.as_dgram_ref(), now()); assert!(out.as_dgram_ref().is_none()); let authentication_needed = |e| matches!(e, Http3ClientEvent::AuthenticationNeeded); @@ -1606,12 +1611,14 @@ mod tests { fn connect_only_transport_with(client: &mut Http3Client, server: &mut TestServer) { let out = handshake_only(client, server); - let out = client.process(out.dgram(), now()); + let out = client.process(out.as_dgram_ref(), now()); let connected = |e| matches!(e, Http3ClientEvent::StateChange(Http3State::Connected)); assert!(client.events().any(connected)); assert_eq!(client.state(), Http3State::Connected); - server.conn.process_input(out.dgram().unwrap(), now()); + server + .conn + .process_input(out.as_dgram_ref().unwrap(), now()); assert!(server.conn.state().connected()); } @@ -1625,8 +1632,10 @@ mod tests { fn send_and_receive_client_settings(client: &mut Http3Client, server: &mut TestServer) { // send and receive client settings - let dgram = client.process(None, now()).dgram(); - server.conn.process_input(dgram.unwrap(), now()); + let out = client.process(None, now()); + server + .conn + .process_input(out.as_dgram_ref().unwrap(), now()); server.check_client_control_qpack_streams_no_resumption(); } @@ -1640,8 +1649,8 @@ mod tests { server.create_qpack_streams(); // Send the server's control and qpack streams data. - let dgram = server.conn.process(None, now()).dgram(); - client.process_input(dgram.unwrap(), now()); + let out = server.conn.process(None, now()); + client.process_input(out.as_dgram_ref().unwrap(), now()); // assert no error occured. assert_eq!(client.state(), Http3State::Connected); @@ -1783,8 +1792,10 @@ mod tests { ) -> StreamId { let request_stream_id = make_request(client, close_sending_side, &[]); - let dgram = client.process(None, now()).dgram(); - server.conn.process_input(dgram.unwrap(), now()); + let out = client.process(None, now()); + server + .conn + .process_input(out.as_dgram_ref().unwrap(), now()); // find the new request/response stream and send frame v on it. while let Some(e) = server.conn.next_event() { @@ -1814,7 +1825,7 @@ mod tests { } let dgram = server.conn.process_output(now()).dgram(); if let Some(d) = dgram { - client.process_input(d, now()); + client.process_input(&d, now()); } request_stream_id } @@ -1843,8 +1854,8 @@ mod tests { server.conn.stream_close_send(stream_id).unwrap(); } let out = server.conn.process(None, now()); - let out = client.process(out.dgram(), now()); - mem::drop(server.conn.process(out.dgram(), now())); + let out = client.process(out.as_dgram_ref(), now()); + mem::drop(server.conn.process(out.as_dgram_ref(), now())); } const PUSH_PROMISE_DATA: &[u8] = &[ @@ -1882,8 +1893,8 @@ mod tests { let push_stream_id = send_push_data(&mut server.conn, push_id, close_push_stream); let out = server.conn.process(None, now()); - let out = client.process(out.dgram(), now()); - mem::drop(server.conn.process(out.dgram(), now())); + let out = client.process(out.as_dgram_ref(), now()); + mem::drop(server.conn.process(out.as_dgram_ref(), now())); push_stream_id } @@ -1897,8 +1908,8 @@ mod tests { send_push_promise(&mut server.conn, stream_id, push_id); let out = server.conn.process(None, now()); - let out = client.process(out.dgram(), now()); - mem::drop(server.conn.process(out.dgram(), now())); + let out = client.process(out.as_dgram_ref(), now()); + mem::drop(server.conn.process(out.as_dgram_ref(), now())); } fn send_cancel_push_and_exchange_packets( @@ -1915,8 +1926,8 @@ mod tests { .unwrap(); let out = server.conn.process(None, now()); - let out = client.process(out.dgram(), now()); - mem::drop(server.conn.process(out.dgram(), now())); + let out = client.process(out.as_dgram_ref(), now()); + mem::drop(server.conn.process(out.as_dgram_ref(), now())); } const PUSH_DATA: &[u8] = &[ @@ -2083,7 +2094,7 @@ mod tests { .stream_close_send(server.control_stream_id.unwrap()) .unwrap(); let out = server.conn.process(None, now()); - client.process(out.dgram(), now()); + client.process(out.as_dgram_ref(), now()); assert_closed(&client, &Error::HttpClosedCriticalStream); } @@ -2097,7 +2108,7 @@ mod tests { .stream_reset_send(server.control_stream_id.unwrap(), Error::HttpNoError.code()) .unwrap(); let out = server.conn.process(None, now()); - client.process(out.dgram(), now()); + client.process(out.as_dgram_ref(), now()); assert_closed(&client, &Error::HttpClosedCriticalStream); } @@ -2111,7 +2122,7 @@ mod tests { .stream_reset_send(server.encoder_stream_id.unwrap(), Error::HttpNoError.code()) .unwrap(); let out = server.conn.process(None, now()); - client.process(out.dgram(), now()); + client.process(out.as_dgram_ref(), now()); assert_closed(&client, &Error::HttpClosedCriticalStream); } @@ -2125,7 +2136,7 @@ mod tests { .stream_reset_send(server.decoder_stream_id.unwrap(), Error::HttpNoError.code()) .unwrap(); let out = server.conn.process(None, now()); - client.process(out.dgram(), now()); + client.process(out.as_dgram_ref(), now()); assert_closed(&client, &Error::HttpClosedCriticalStream); } @@ -2139,7 +2150,7 @@ mod tests { .stream_stop_sending(CLIENT_SIDE_CONTROL_STREAM_ID, Error::HttpNoError.code()) .unwrap(); let out = server.conn.process(None, now()); - client.process(out.dgram(), now()); + client.process(out.as_dgram_ref(), now()); assert_closed(&client, &Error::HttpClosedCriticalStream); } @@ -2153,7 +2164,7 @@ mod tests { .stream_stop_sending(CLIENT_SIDE_ENCODER_STREAM_ID, Error::HttpNoError.code()) .unwrap(); let out = server.conn.process(None, now()); - client.process(out.dgram(), now()); + client.process(out.as_dgram_ref(), now()); assert_closed(&client, &Error::HttpClosedCriticalStream); } @@ -2167,7 +2178,7 @@ mod tests { .stream_stop_sending(CLIENT_SIDE_DECODER_STREAM_ID, Error::HttpNoError.code()) .unwrap(); let out = server.conn.process(None, now()); - client.process(out.dgram(), now()); + client.process(out.as_dgram_ref(), now()); assert_closed(&client, &Error::HttpClosedCriticalStream); } @@ -2184,7 +2195,7 @@ mod tests { .stream_send(control_stream, &[0x0, 0x1, 0x3, 0x0, 0x1, 0x2]); assert_eq!(sent, Ok(6)); let out = server.conn.process(None, now()); - client.process(out.dgram(), now()); + client.process(out.as_dgram_ref(), now()); assert_closed(&client, &Error::HttpMissingSettings); } @@ -2200,7 +2211,7 @@ mod tests { ); assert_eq!(sent, Ok(8)); let out = server.conn.process(None, now()); - client.process(out.dgram(), now()); + client.process(out.as_dgram_ref(), now()); assert_closed(&client, &Error::HttpFrameUnexpected); } @@ -2214,7 +2225,7 @@ mod tests { .unwrap(); let out = server.conn.process(None, now()); - client.process(out.dgram(), now()); + client.process(out.as_dgram_ref(), now()); assert_closed(&client, &Error::HttpFrameUnexpected); } @@ -2263,8 +2274,8 @@ mod tests { _ = server.conn.stream_send(push_stream_id, v).unwrap(); let out = server.conn.process(None, now()); - let out = client.process(out.dgram(), now()); - mem::drop(server.conn.process(out.dgram(), now())); + let out = client.process(out.as_dgram_ref(), now()); + mem::drop(server.conn.process(out.as_dgram_ref(), now())); assert_closed(&client, &Error::HttpFrameUnexpected); } @@ -2323,8 +2334,8 @@ mod tests { .stream_send(new_stream_id, &[0x41, 0x19, 0x4, 0x4, 0x6, 0x0, 0x8, 0x0]) .unwrap(); let out = server.conn.process(None, now()); - let out = client.process(out.dgram(), now()); - mem::drop(server.conn.process(out.dgram(), now())); + let out = client.process(out.as_dgram_ref(), now()); + mem::drop(server.conn.process(out.as_dgram_ref(), now())); // check for stop-sending with Error::HttpStreamCreation. let mut stop_sending_event_found = false; @@ -2352,7 +2363,7 @@ mod tests { // Generate packet with the above bad h3 input let out = server.conn.process(None, now()); // Process bad input and close the connection. - mem::drop(client.process(out.dgram(), now())); + mem::drop(client.process(out.as_dgram_ref(), now())); assert_closed(&client, &Error::HttpFrameUnexpected); } @@ -2399,38 +2410,38 @@ mod tests { let mut sent = server.conn.stream_send(control_stream, &[0x0]); assert_eq!(sent, Ok(1)); let out = server.conn.process(None, now()); - client.process(out.dgram(), now()); + client.process(out.as_dgram_ref(), now()); // start sending SETTINGS frame sent = server.conn.stream_send(control_stream, &[0x4]); assert_eq!(sent, Ok(1)); let out = server.conn.process(None, now()); - client.process(out.dgram(), now()); + client.process(out.as_dgram_ref(), now()); sent = server.conn.stream_send(control_stream, &[0x4]); assert_eq!(sent, Ok(1)); let out = server.conn.process(None, now()); - client.process(out.dgram(), now()); + client.process(out.as_dgram_ref(), now()); sent = server.conn.stream_send(control_stream, &[0x6]); assert_eq!(sent, Ok(1)); let out = server.conn.process(None, now()); - client.process(out.dgram(), now()); + client.process(out.as_dgram_ref(), now()); sent = server.conn.stream_send(control_stream, &[0x0]); assert_eq!(sent, Ok(1)); let out = server.conn.process(None, now()); - client.process(out.dgram(), now()); + client.process(out.as_dgram_ref(), now()); sent = server.conn.stream_send(control_stream, &[0x8]); assert_eq!(sent, Ok(1)); let out = server.conn.process(None, now()); - client.process(out.dgram(), now()); + client.process(out.as_dgram_ref(), now()); sent = server.conn.stream_send(control_stream, &[0x0]); assert_eq!(sent, Ok(1)); let out = server.conn.process(None, now()); - client.process(out.dgram(), now()); + client.process(out.as_dgram_ref(), now()); assert_eq!(client.state(), Http3State::Connected); @@ -2438,37 +2449,37 @@ mod tests { sent = server.conn.stream_send(control_stream, &[0x5]); assert_eq!(sent, Ok(1)); let out = server.conn.process(None, now()); - client.process(out.dgram(), now()); + client.process(out.as_dgram_ref(), now()); sent = server.conn.stream_send(control_stream, &[0x5]); assert_eq!(sent, Ok(1)); let out = server.conn.process(None, now()); - client.process(out.dgram(), now()); + client.process(out.as_dgram_ref(), now()); sent = server.conn.stream_send(control_stream, &[0x4]); assert_eq!(sent, Ok(1)); let out = server.conn.process(None, now()); - client.process(out.dgram(), now()); + client.process(out.as_dgram_ref(), now()); sent = server.conn.stream_send(control_stream, &[0x61]); assert_eq!(sent, Ok(1)); let out = server.conn.process(None, now()); - client.process(out.dgram(), now()); + client.process(out.as_dgram_ref(), now()); sent = server.conn.stream_send(control_stream, &[0x62]); assert_eq!(sent, Ok(1)); let out = server.conn.process(None, now()); - client.process(out.dgram(), now()); + client.process(out.as_dgram_ref(), now()); sent = server.conn.stream_send(control_stream, &[0x63]); assert_eq!(sent, Ok(1)); let out = server.conn.process(None, now()); - client.process(out.dgram(), now()); + client.process(out.as_dgram_ref(), now()); sent = server.conn.stream_send(control_stream, &[0x64]); assert_eq!(sent, Ok(1)); let out = server.conn.process(None, now()); - client.process(out.dgram(), now()); + client.process(out.as_dgram_ref(), now()); // PUSH_PROMISE on a control stream will cause an error assert_closed(&client, &Error::HttpFrameUnexpected); @@ -2544,14 +2555,14 @@ mod tests { let d1 = dgram(&mut client.conn); let d2 = dgram(&mut client.conn); - server.conn.process_input(d2, now()); - server.conn.process_input(d1, now()); + server.conn.process_input(&d2, now()); + server.conn.process_input(&d1, now()); let d3 = dgram(&mut server.conn); let d4 = dgram(&mut server.conn); - client.process_input(d4, now()); - client.process_input(d3, now()); + client.process_input(&d4, now()); + client.process_input(&d3, now()); let ack = client.process_output(now()).dgram(); - server.conn.process_input(ack.unwrap(), now()); + server.conn.process_input(&ack.unwrap(), now()); } /// The client should keep a connection alive if it has unanswered requests. @@ -2571,7 +2582,7 @@ mod tests { request_stream_id: StreamId, ) { let out = server.process(None, now()); - client.process(out.dgram(), now()); + client.process(out.as_dgram_ref(), now()); while let Some(e) = client.next_event() { match e { @@ -2627,7 +2638,7 @@ mod tests { client.stream_close_send(request_stream_id).unwrap(); let out = client.process(None, now()); - mem::drop(server.conn.process(out.dgram(), now())); + mem::drop(server.conn.process(out.as_dgram_ref(), now())); // find the new request/response stream and send response on it. while let Some(e) = server.conn.next_event() { @@ -2675,8 +2686,8 @@ mod tests { // We need to loop a bit until all data has been sent. let mut out = client.process(None, now()); for _i in 0..20 { - out = server.conn.process(out.dgram(), now()); - out = client.process(out.dgram(), now()); + out = server.conn.process(out.as_dgram_ref(), now()); + out = client.process(out.as_dgram_ref(), now()); } // check request body is received. @@ -2768,8 +2779,8 @@ mod tests { // We need to loop a bit until all data has been sent. Once for every 1K // of data. for _i in 0..SEND_BUFFER_SIZE / 1000 { - out = server.conn.process(out.dgram(), now()); - out = client.process(out.dgram(), now()); + out = server.conn.process(out.as_dgram_ref(), now()); + out = client.process(out.as_dgram_ref(), now()); } // check received frames and send a response. @@ -2983,7 +2994,7 @@ mod tests { ); let out = server.conn.process(None, now()); - client.process(out.dgram(), now()); + client.process(out.as_dgram_ref(), now()); let mut reset = false; let mut stop_sending = false; @@ -3039,7 +3050,7 @@ mod tests { ); let out = server.conn.process(None, now()); - client.process(out.dgram(), now()); + client.process(out.as_dgram_ref(), now()); let mut stop_sending = false; @@ -3103,7 +3114,7 @@ mod tests { ); let out = server.conn.process(None, now()); - client.process(out.dgram(), now()); + client.process(out.as_dgram_ref(), now()); let mut reset = false; @@ -3170,7 +3181,7 @@ mod tests { ); let out = server.conn.process(None, now()); - client.process(out.dgram(), now()); + client.process(out.as_dgram_ref(), now()); let mut stop_sending = false; let mut header_ready = false; @@ -3222,7 +3233,7 @@ mod tests { ); let out = server.conn.process(None, now()); - client.process(out.dgram(), now()); + client.process(out.as_dgram_ref(), now()); let mut reset = false; @@ -3312,7 +3323,7 @@ mod tests { assert_eq!(request_stream_id_3, 8); let out = client.process(None, now()); - mem::drop(server.conn.process(out.dgram(), now())); + mem::drop(server.conn.process(out.as_dgram_ref(), now())); _ = server .conn @@ -3334,7 +3345,7 @@ mod tests { } } let out = server.conn.process(None, now()); - client.process(out.dgram(), now()); + client.process(out.as_dgram_ref(), now()); let mut stream_reset = false; while let Some(e) = client.next_event() { @@ -3396,7 +3407,7 @@ mod tests { assert_eq!(request_stream_id_3, 8); let out = client.process(None, now()); - mem::drop(server.conn.process(out.dgram(), now())); + mem::drop(server.conn.process(out.as_dgram_ref(), now())); // First send a Goaway frame with an higher number _ = server @@ -3405,7 +3416,7 @@ mod tests { .unwrap(); let out = server.conn.process(None, now()); - client.process(out.dgram(), now()); + client.process(out.as_dgram_ref(), now()); // Check that there is one reset for stream_id 8 let mut stream_reset_1 = 0; @@ -3491,7 +3502,7 @@ mod tests { .unwrap(); let out = server.conn.process(None, now()); - client.process(out.dgram(), now()); + client.process(out.as_dgram_ref(), now()); assert_eq!(client.state(), Http3State::GoingAway(StreamId::new(4))); @@ -3502,7 +3513,7 @@ mod tests { .unwrap(); let out = server.conn.process(None, now()); - client.process(out.dgram(), now()); + client.process(out.as_dgram_ref(), now()); assert_closed(&client, &Error::HttpGeneralProtocol); } @@ -3517,7 +3528,7 @@ mod tests { .unwrap(); let out = server.conn.process(None, now()); - client.process(out.dgram(), now()); + client.process(out.as_dgram_ref(), now()); assert_closed(&client, &Error::HttpId); } @@ -3530,7 +3541,7 @@ mod tests { server.conn.stream_close_send(request_stream_id).unwrap(); let out = server.conn.process(None, now()); - client.process(out.dgram(), now()); + client.process(out.as_dgram_ref(), now()); // Recv HeaderReady wo headers with fin. let e = client.events().next().unwrap(); @@ -3628,7 +3639,7 @@ mod tests { server.conn.stream_close_send(request_stream_id).unwrap(); let out = server.conn.process(None, now()); - client.process(out.dgram(), now()); + client.process(out.as_dgram_ref(), now()); // Recv DataReadable wo data with fin while let Some(e) = client.next_event() { @@ -3675,7 +3686,7 @@ mod tests { server.conn.stream_close_send(request_stream_id).unwrap(); let out = server.conn.process(None, now()); - client.process(out.dgram(), now()); + client.process(out.as_dgram_ref(), now()); // Recv HeaderReady with fin. while let Some(e) = client.next_event() { @@ -3726,7 +3737,7 @@ mod tests { .unwrap(); let out = server.conn.process(None, now()); - client.process(out.dgram(), now()); + client.process(out.as_dgram_ref(), now()); // Recv headers wo fin while let Some(e) = client.next_event() { @@ -3753,7 +3764,7 @@ mod tests { server.conn.stream_close_send(request_stream_id).unwrap(); let out = server.conn.process(None, now()); - client.process(out.dgram(), now()); + client.process(out.as_dgram_ref(), now()); // Recv no data, but do get fin while let Some(e) = client.next_event() { @@ -3823,7 +3834,7 @@ mod tests { // ok NOW send fin server.conn.stream_close_send(request_stream_id).unwrap(); let out = server.conn.process(None, now()); - client.process(out.dgram(), now()); + client.process(out.as_dgram_ref(), now()); // fin wo data should generate DataReadable let e = client.events().next().unwrap(); @@ -3968,10 +3979,10 @@ mod tests { assert!(!client.events().any(header_ready_event)); // Let client receive the encoder instructions. - mem::drop(client.process(encoder_inst_pkt.dgram(), now())); + mem::drop(client.process(encoder_inst_pkt.as_dgram_ref(), now())); let out = server.conn.process(None, now()); - mem::drop(client.process(out.dgram(), now())); + mem::drop(client.process(out.as_dgram_ref(), now())); mem::drop(client.process(None, now())); let mut recv_header = false; @@ -4033,7 +4044,7 @@ mod tests { assert!(!hconn.events().any(header_ready_event)); // Let client receive the encoder instructions. - let _out = hconn.process(encoder_inst_pkt.dgram(), now()); + let _out = hconn.process(encoder_inst_pkt.as_dgram_ref(), now()); let mut recv_header = false; // Now the stream is unblocked. After headers we will receive a fin. @@ -4061,7 +4072,7 @@ mod tests { server.send_ticket(now(), &[]).expect("can send ticket"); let out = server.process_output(now()); assert!(out.as_dgram_ref().is_some()); - client.process_input(out.dgram().unwrap(), now()); + client.process_input(out.as_dgram_ref().unwrap(), now()); // We do not have a token so we need to wait for a resumption token timer to trigger. client.process_output(now() + Duration::from_millis(250)); assert_eq!(client.state(), Http3State::Connected); @@ -4105,7 +4116,7 @@ mod tests { assert_eq!(client.state(), Http3State::ZeroRtt); assert_eq!(*server.conn.state(), State::Init); - let out = server.conn.process(out.dgram(), now()); + let out = server.conn.process(out.as_dgram_ref(), now()); // Check that control and qpack streams are received and a // SETTINGS frame has been received. @@ -4118,10 +4129,10 @@ mod tests { ); assert_eq!(*server.conn.state(), State::Handshaking); - let out = client.process(out.dgram(), now()); + let out = client.process(out.as_dgram_ref(), now()); assert_eq!(client.state(), Http3State::Connected); - mem::drop(server.conn.process(out.dgram(), now())); + mem::drop(server.conn.process(out.as_dgram_ref(), now())); assert!(server.conn.state().connected()); assert!(client.tls_info().unwrap().resumed()); @@ -4140,7 +4151,7 @@ mod tests { assert_eq!(client.state(), Http3State::ZeroRtt); assert_eq!(*server.conn.state(), State::Init); - let out = server.conn.process(out.dgram(), now()); + let out = server.conn.process(out.as_dgram_ref(), now()); // Check that control and qpack streams are received and a // SETTINGS frame has been received. @@ -4153,11 +4164,11 @@ mod tests { ); assert_eq!(*server.conn.state(), State::Handshaking); - let out = client.process(out.dgram(), now()); + let out = client.process(out.as_dgram_ref(), now()); assert_eq!(client.state(), Http3State::Connected); - let out = server.conn.process(out.dgram(), now()); + let out = server.conn.process(out.as_dgram_ref(), now()); assert!(server.conn.state().connected()); - let out = client.process(out.dgram(), now()); + let out = client.process(out.as_dgram_ref(), now()); assert!(out.as_dgram_ref().is_none()); // After the server has been connected, send a response. @@ -4224,9 +4235,9 @@ mod tests { let client_0rtt = client.process(None, now()); assert!(client_0rtt.as_dgram_ref().is_some()); - let server_hs = server.process(client_hs.dgram(), now()); + let server_hs = server.process(client_hs.as_dgram_ref(), now()); assert!(server_hs.as_dgram_ref().is_some()); // Should produce ServerHello etc... - let server_ignored = server.process(client_0rtt.dgram(), now()); + let server_ignored = server.process(client_0rtt.as_dgram_ref(), now()); assert!(server_ignored.as_dgram_ref().is_none()); // The server shouldn't receive that 0-RTT data. @@ -4234,7 +4245,7 @@ mod tests { assert!(!server.events().any(recvd_stream_evt)); // Client should get a rejection. - let client_out = client.process(server_hs.dgram(), now()); + let client_out = client.process(server_hs.as_dgram_ref(), now()); assert!(client_out.as_dgram_ref().is_some()); let recvd_0rtt_reject = |e| e == Http3ClientEvent::ZeroRttRejected; assert!(client.events().any(recvd_0rtt_reject)); @@ -4245,7 +4256,7 @@ mod tests { assert_eq!(res.unwrap_err(), Error::InvalidStreamId); // Client will send Setting frame and open new qpack streams. - mem::drop(server.process(client_out.dgram(), now())); + mem::drop(server.process(client_out.as_dgram_ref(), now())); TestServer::new_with_conn(server).check_client_control_qpack_streams_no_resumption(); // Check that we can send a request and that the stream_id starts again from 0. @@ -4276,7 +4287,7 @@ mod tests { assert_eq!(client.state(), Http3State::ZeroRtt); assert_eq!(*server.conn.state(), State::Init); - let out = server.conn.process(out.dgram(), now()); + let out = server.conn.process(out.as_dgram_ref(), now()); // Check that control and qpack streams anda SETTINGS frame are received. // Also qpack encoder stream will send "change capacity" instruction because it has @@ -4288,10 +4299,10 @@ mod tests { ); assert_eq!(*server.conn.state(), State::Handshaking); - let out = client.process(out.dgram(), now()); + let out = client.process(out.as_dgram_ref(), now()); assert_eq!(client.state(), Http3State::Connected); - mem::drop(server.conn.process(out.dgram(), now())); + mem::drop(server.conn.process(out.as_dgram_ref(), now())); assert!(server.conn.state().connected()); assert!(client.tls_info().unwrap().resumed()); @@ -4307,7 +4318,7 @@ mod tests { assert_eq!(sent.unwrap(), enc.len()); let out = server.conn.process(None, now()); - client.process(out.dgram(), now()); + client.process(out.as_dgram_ref(), now()); assert_eq!(&client.state(), expected_client_state); assert!(server.conn.state().connected()); @@ -4667,7 +4678,7 @@ mod tests { server.conn.stream_close_send(request_stream_id).unwrap(); let out = server.conn.process(None, now()); - client.process(out.dgram(), now()); + client.process(out.as_dgram_ref(), now()); let events: Vec = client.events().collect(); @@ -4882,7 +4893,7 @@ mod tests { _ = server.conn.stream_send(request_stream_id, &[0, 0]).unwrap(); server.conn.stream_close_send(request_stream_id).unwrap(); let dgram = server.conn.process_output(now()).dgram(); - client.process_input(dgram.unwrap(), now()); + client.process_input(&dgram.unwrap(), now()); let data_readable_event = |e: &_| matches!(e, Http3ClientEvent::DataReadable { stream_id } if *stream_id == request_stream_id); assert_eq!(client.events().filter(data_readable_event).count(), 1); @@ -4906,7 +4917,7 @@ mod tests { server.create_control_stream(); // Send the server's control stream data. let out = server.conn.process(None, now()); - client.process(out.dgram(), now()); + client.process(out.as_dgram_ref(), now()); server.create_qpack_streams(); let qpack_pkt1 = server.conn.process(None, now()); @@ -4914,7 +4925,7 @@ mod tests { let request_stream_id = make_request(&mut client, true, &[]); let out = client.process(None, now()); - mem::drop(server.conn.process(out.dgram(), now())); + mem::drop(server.conn.process(out.as_dgram_ref(), now())); setup_server_side_encoder(&mut client, &mut server); @@ -4934,7 +4945,7 @@ mod tests { // Send the encoder instructions, let out = server.conn.process(None, now()); - client.process(out.dgram(), now()); + client.process(out.as_dgram_ref(), now()); // Send response let mut d = Encoder::default(); @@ -4949,13 +4960,13 @@ mod tests { server.conn.stream_close_send(request_stream_id).unwrap(); let out = server.conn.process(None, now()); - mem::drop(client.process(out.dgram(), now())); + mem::drop(client.process(out.as_dgram_ref(), now())); let header_ready_event = |e| matches!(e, Http3ClientEvent::HeaderReady { .. }); assert!(!client.events().any(header_ready_event)); // Let client receive the encoder instructions. - mem::drop(client.process(qpack_pkt1.dgram(), now())); + mem::drop(client.process(qpack_pkt1.as_dgram_ref(), now())); assert!(client.events().any(header_ready_event)); } @@ -5030,8 +5041,8 @@ mod tests { // Reading push data will stop the client from being idle. _ = send_push_data(&mut server.conn, 0, false); - let dgram = server.conn.process_output(now()).dgram(); - client.process_input(dgram.unwrap(), now()); + let out = server.conn.process_output(now()); + client.process_input(out.as_dgram_ref().unwrap(), now()); let mut buf = [0; 16]; let (read, fin) = client.push_read_data(now(), 0, &mut buf).unwrap(); @@ -5340,7 +5351,7 @@ mod tests { assert_eq!(request_stream_id_2, 4); let out = client.process(None, now()); - mem::drop(server.conn.process(out.dgram(), now())); + mem::drop(server.conn.process(out.as_dgram_ref(), now())); send_push_promise_and_exchange_packets(&mut client, &mut server, request_stream_id_2, 5); @@ -5376,7 +5387,7 @@ mod tests { assert_eq!(request_stream_id_2, 4); let out = client.process(None, now()); - mem::drop(server.conn.process(out.dgram(), now())); + mem::drop(server.conn.process(out.as_dgram_ref(), now())); send_push_promise_and_exchange_packets(&mut client, &mut server, request_stream_id_2, 5); @@ -5424,7 +5435,7 @@ mod tests { assert_eq!(request_stream_id_2, 4); let out = client.process(None, now()); - mem::drop(server.conn.process(out.dgram(), now())); + mem::drop(server.conn.process(out.as_dgram_ref(), now())); send_push_promise_and_exchange_packets(&mut client, &mut server, request_stream_id_2, 5); @@ -5517,7 +5528,7 @@ mod tests { ); let out = client.process(None, now()); - mem::drop(server.conn.process(out.dgram(), now())); + mem::drop(server.conn.process(out.as_dgram_ref(), now())); // Check max_push_id frame has been received let control_stream_readable = @@ -5535,8 +5546,8 @@ mod tests { send_push_data(&mut server.conn, 8, true); let out = server.conn.process(None, now()); - let out = client.process(out.dgram(), now()); - mem::drop(server.conn.process(out.dgram(), now())); + let out = client.process(out.as_dgram_ref(), now()); + mem::drop(server.conn.process(out.as_dgram_ref(), now())); assert_eq!(client.state(), Http3State::Connected); @@ -5698,8 +5709,8 @@ mod tests { .conn .stream_reset_send(push_stream_id, Error::HttpRequestCancelled.code()) .unwrap(); - let out = server.conn.process(None, now()).dgram(); - client.process(out, now()); + let out = server.conn.process(None, now()); + client.process(out.as_dgram_ref(), now()); // Assert that we do not have any push event. assert!(!check_push_events(&mut client)); @@ -5724,8 +5735,8 @@ mod tests { .conn .stream_reset_send(push_stream_id, Error::HttpRequestCancelled.code()) .unwrap(); - let out = server.conn.process(None, now()).dgram(); - client.process(out, now()); + let out = server.conn.process(None, now()); + client.process(out.as_dgram_ref(), now()); send_push_promise_and_exchange_packets(&mut client, &mut server, request_stream_id, 0); @@ -5768,8 +5779,8 @@ mod tests { send_push_data_and_exchange_packets(&mut client, &mut server, 0, false); assert!(client.cancel_push(0).is_ok()); - let out = client.process(None, now()).dgram(); - mem::drop(server.conn.process(out, now())); + let out = client.process(None, now()); + mem::drop(server.conn.process(out.as_dgram_ref(), now())); // Assert that we do not have any push event. assert!(!check_push_events(&mut client)); @@ -5798,8 +5809,8 @@ mod tests { send_push_data_and_exchange_packets(&mut client, &mut server, 0, false); assert!(client.cancel_push(0).is_ok()); - let out = client.process(None, now()).dgram(); - mem::drop(server.conn.process(out, now())); + let out = client.process(None, now()); + mem::drop(server.conn.process(out.as_dgram_ref(), now())); send_push_promise_and_exchange_packets(&mut client, &mut server, request_stream_id, 0); @@ -5840,7 +5851,7 @@ mod tests { .send_encoder_updates(&mut server.conn) .unwrap(); let out = server.conn.process(None, now()); - mem::drop(client.process(out.dgram(), now())); + mem::drop(client.process(out.as_dgram_ref(), now())); } fn setup_server_side_encoder(client: &mut Http3Client, server: &mut TestServer) { @@ -5912,7 +5923,7 @@ mod tests { assert!(!check_push_events(&mut client)); // Let client receive the encoder instructions. - let _out = client.process(encoder_inst_pkt, now()); + let _out = client.process(encoder_inst_pkt.as_ref(), now()); // PushPromise is blocked wathing for encoder instructions. assert!(check_push_events(&mut client)); @@ -5952,7 +5963,7 @@ mod tests { assert!(check_data_readable(&mut client)); // Let client receive the encoder instructions. - let _out = client.process(encoder_inst_pkt, now()); + let _out = client.process(encoder_inst_pkt.as_ref(), now()); // PushPromise is blocked wathing for encoder instructions. assert!(check_push_events(&mut client)); @@ -5994,7 +6005,7 @@ mod tests { assert!(check_header_ready(&mut client)); // Let client receive the encoder instructions. - let _out = client.process(encoder_inst_pkt, now()); + let _out = client.process(encoder_inst_pkt.as_ref(), now()); // PushPromise is blocked wathing for encoder instructions. assert!(check_push_events(&mut client)); @@ -6015,7 +6026,7 @@ mod tests { .send_and_insert(&mut server.conn, b"content-length", b"1234") .unwrap(); let encoder_inst_pkt1 = server.conn.process(None, now()).dgram(); - let _out = client.process(encoder_inst_pkt1, now()); + let _out = client.process(encoder_inst_pkt1.as_ref(), now()); // Send a PushPromise that is blocked until encoder_inst_pkt2 is process by the client. let encoder_inst_pkt2 = @@ -6050,7 +6061,7 @@ mod tests { assert!(!check_header_ready(&mut client)); // Let client receive the encoder instructions. - let _out = client.process(encoder_inst_pkt2, now()); + let _out = client.process(encoder_inst_pkt2.as_ref(), now()); // The response headers are blocked. assert!(check_header_ready_and_push_promise(&mut client)); @@ -6123,12 +6134,12 @@ mod tests { assert!(!check_header_ready(&mut client)); // Let client receive the encoder instructions. - let _out = client.process(encoder_inst_pkt1, now()); + let _out = client.process(encoder_inst_pkt1.as_ref(), now()); assert!(check_push_events(&mut client)); // Let client receive the encoder instructions. - let _out = client.process(encoder_inst_pkt2, now()); + let _out = client.process(encoder_inst_pkt2.as_ref(), now()); assert!(check_header_ready_and_push_promise(&mut client)); } @@ -6163,7 +6174,7 @@ mod tests { .unwrap(); let out = client.process(None, now()); - mem::drop(server.conn.process(out.dgram(), now())); + mem::drop(server.conn.process(out.as_dgram_ref(), now())); // Check that encoder got stream_canceled instruction. let mut inst = [0_u8; 100]; let (amount, fin) = server @@ -6227,7 +6238,7 @@ mod tests { ); // Now read headers. - mem::drop(client.process(encoder_insts.dgram(), now())); + mem::drop(client.process(encoder_insts.as_dgram_ref(), now())); } #[test] @@ -6238,7 +6249,7 @@ mod tests { mem::drop(client.cancel_fetch(request_stream_id, Error::HttpRequestCancelled.code())); assert_eq!(server.encoder.borrow_mut().stats().stream_cancelled_recv, 0); let out = client.process(None, now()); - mem::drop(server.conn.process(out.dgram(), now())); + mem::drop(server.conn.process(out.as_dgram_ref(), now())); mem::drop(server.encoder_receiver.receive(&mut server.conn)); assert_eq!(server.encoder.borrow_mut().stats().stream_cancelled_recv, 1); } @@ -6286,8 +6297,8 @@ mod tests { .unwrap(); assert_eq!(server.encoder.borrow_mut().stats().stream_cancelled_recv, 0); let out = server.conn.process(None, now()); - let out = client.process(out.dgram(), now()); - mem::drop(server.conn.process(out.dgram(), now())); + let out = client.process(out.as_dgram_ref(), now()); + mem::drop(server.conn.process(out.as_dgram_ref(), now())); mem::drop(server.encoder_receiver.receive(&mut server.conn)); assert_eq!(server.encoder.borrow_mut().stats().stream_cancelled_recv, 1); } @@ -6323,7 +6334,7 @@ mod tests { assert_eq!(server.encoder.borrow_mut().stats().stream_cancelled_recv, 0); let out = client.process(None, now()); - mem::drop(server.conn.process(out.dgram(), now())); + mem::drop(server.conn.process(out.as_dgram_ref(), now())); mem::drop(server.encoder_receiver.receive(&mut server.conn).unwrap()); assert_eq!(server.encoder.borrow_mut().stats().stream_cancelled_recv, 1); } @@ -6347,7 +6358,7 @@ mod tests { ); // Exchange encoder instructions - mem::drop(client.process(encoder_instruct, now())); + mem::drop(client.process(encoder_instruct.as_ref(), now())); let header_ready_event = |e| matches!(e, Http3ClientEvent::HeaderReady { .. }); assert!(client.events().any(header_ready_event)); @@ -6360,7 +6371,7 @@ mod tests { assert_eq!(server.encoder.borrow_mut().stats().stream_cancelled_recv, 0); let out = client.process(None, now()); - mem::drop(server.conn.process(out.dgram(), now())); + mem::drop(server.conn.process(out.as_dgram_ref(), now())); mem::drop(server.encoder_receiver.receive(&mut server.conn).unwrap()); assert_eq!(server.encoder.borrow_mut().stats().stream_cancelled_recv, 0); } @@ -6386,7 +6397,7 @@ mod tests { // Send the encoder instructions. let out = server.conn.process(None, now()); - mem::drop(client.process(out.dgram(), now())); + mem::drop(client.process(out.as_dgram_ref(), now())); // Send PushPromise that will be blocked waiting for decoder instructions. mem::drop( @@ -6416,7 +6427,7 @@ mod tests { .unwrap(); let out = client.process(None, now()); - mem::drop(server.conn.process(out.dgram(), now())); + mem::drop(server.conn.process(out.as_dgram_ref(), now())); mem::drop(server.encoder_receiver.receive(&mut server.conn).unwrap()); assert_eq!(server.encoder.borrow_mut().stats().stream_cancelled_recv, 1); } @@ -6430,7 +6441,7 @@ mod tests { .unwrap(); assert_eq!(server.encoder.borrow_mut().stats().stream_cancelled_recv, 0); let out = client.process(None, now()); - mem::drop(server.conn.process(out.dgram(), now())); + mem::drop(server.conn.process(out.as_dgram_ref(), now())); mem::drop(server.encoder_receiver.receive(&mut server.conn).unwrap()); assert_eq!(server.encoder.borrow_mut().stats().stream_cancelled_recv, 0); } @@ -6483,7 +6494,7 @@ mod tests { assert!(!client.events().any(header_ready_event)); // Now make the encoder instructions available. - mem::drop(client.process(encoder_insts.dgram(), now())); + mem::drop(client.process(encoder_insts.as_dgram_ref(), now())); // Header blocks for both streams should be ready. let mut count_responses = 0; @@ -6527,7 +6538,7 @@ mod tests { let sent = server.conn.stream_send(control_stream, enc.as_ref()); assert_eq!(sent, Ok(4)); let out = server.conn.process(None, now()); - client.process(out.dgram(), now()); + client.process(out.as_dgram_ref(), now()); assert_closed(&client, &Error::HttpSettings); } } @@ -6627,8 +6638,8 @@ mod tests { } ); - let out = client.process(None, now()).dgram(); - mem::drop(server.conn.process(out, now())); + let out = client.process(None, now()); + mem::drop(server.conn.process(out.as_dgram_ref(), now())); // Check that server has received a reset. let stop_sending_event = |e| { @@ -6760,8 +6771,8 @@ mod tests { assert!(client.events().any(push_reset_event)); - let out = client.process(None, now()).dgram(); - mem::drop(server.conn.process(out, now())); + let out = client.process(None, now()); + mem::drop(server.conn.process(out.as_dgram_ref(), now())); // Check that server has received a reset. let stop_sending_event = |e| { @@ -6775,7 +6786,7 @@ mod tests { fn handshake_client_error(client: &mut Http3Client, server: &mut TestServer, error: &Error) { let out = handshake_only(client, server); - client.process(out.dgram(), now()); + client.process(out.as_dgram_ref(), now()); assert_closed(client, error); } @@ -6936,14 +6947,14 @@ mod tests { let is_done = |c: &Http3Client| matches!(c.state(), Http3State::Connected); while !is_done(&mut client) { maybe_authenticate(&mut client); - datagram = client.process(datagram, now()).dgram(); - datagram = server.process(datagram, now()).dgram(); + datagram = client.process(datagram.as_ref(), now()).dgram(); + datagram = server.process(datagram.as_ref(), now()).dgram(); } // exchange qpack settings, server will send a token as well. - datagram = client.process(datagram, now()).dgram(); - datagram = server.process(datagram, now()).dgram(); - mem::drop(client.process(datagram, now()).dgram()); + datagram = client.process(datagram.as_ref(), now()).dgram(); + datagram = server.process(datagram.as_ref(), now()).dgram(); + mem::drop(client.process(datagram.as_ref(), now()).dgram()); client .events() @@ -6997,15 +7008,15 @@ mod tests { // Exchange packets until header-ack is received. // These many packet exchange is needed, to get a header-ack. // TODO this may be optimize at Http3Server. - let out = client.process(None, now()).dgram(); - let out = server.process(out, now()).dgram(); - let out = client.process(out, now()).dgram(); - let out = server.process(out, now()).dgram(); - let out = client.process(out, now()).dgram(); - let out = server.process(out, now()).dgram(); - let out = client.process(out, now()).dgram(); - let out = server.process(out, now()).dgram(); - mem::drop(client.process(out, now())); + let out = client.process(None, now()); + let out = server.process(out.as_dgram_ref(), now()); + let out = client.process(out.as_dgram_ref(), now()); + let out = server.process(out.as_dgram_ref(), now()); + let out = client.process(out.as_dgram_ref(), now()); + let out = server.process(out.as_dgram_ref(), now()); + let out = client.process(out.as_dgram_ref(), now()); + let out = server.process(out.as_dgram_ref(), now()); + mem::drop(client.process(out.as_dgram_ref(), now())); // The header ack for the first request has been received. assert_eq!(client.qpack_encoder_stats().header_acks_recv, 1); @@ -7061,7 +7072,7 @@ mod tests { _ = server.conn.stream_send(push_stream_id, &[0]).unwrap(); server.conn.stream_close_send(push_stream_id).unwrap(); let out = server.conn.process(None, now()); - client.process(out.dgram(), now()); + client.process(out.as_dgram_ref(), now()); assert_closed(&client, &Error::HttpGeneralProtocol); } @@ -7085,14 +7096,14 @@ mod tests { let md_before = server.conn.stats().frame_tx.max_data; // sending the http request and most most of the request data - let out = client.process(None, now()).dgram(); - let out = server.conn.process(out, now()).dgram(); + let out = client.process(None, now()); + let out = server.conn.process(out.as_dgram_ref(), now()); // the server responses with an ack, but the max_data didn't change assert_eq!(md_before, server.conn.stats().frame_tx.max_data); - let out = client.process(out, now()).dgram(); - let out = server.conn.process(out, now()).dgram(); + let out = client.process(out.as_dgram_ref(), now()); + let out = server.conn.process(out.as_dgram_ref(), now()); // the server increased the max_data during the second read if that isn't the case // in the future and therefore this asserts fails, the request data on stream 0 could be read @@ -7107,8 +7118,10 @@ mod tests { ); // the client now sends the priority update - let out = client.process(out, now()).dgram(); - server.conn.process_input(out.unwrap(), now()); + let out = client.process(out.as_dgram_ref(), now()); + server + .conn + .process_input(out.as_dgram_ref().unwrap(), now()); // check that the priority_update arrived at the client control stream let num_read = server.conn.stream_recv(StreamId::new(2), &mut buf).unwrap(); @@ -7154,7 +7167,7 @@ mod tests { ); // Let client receive the encoder instructions. - client.process_input(encoder_inst_pkt.dgram().unwrap(), now()); + client.process_input(encoder_inst_pkt.as_dgram_ref().unwrap(), now()); let reset_event = |e| matches!(e, Http3ClientEvent::Reset { stream_id, .. } if stream_id == request_stream_id); assert!(client.events().any(reset_event)); diff --git a/neqo-http3/src/features/extended_connect/tests/webtransport/mod.rs b/neqo-http3/src/features/extended_connect/tests/webtransport/mod.rs index 4ac5f72b0f..fcdcff0fe1 100644 --- a/neqo-http3/src/features/extended_connect/tests/webtransport/mod.rs +++ b/neqo-http3/src/features/extended_connect/tests/webtransport/mod.rs @@ -64,8 +64,8 @@ pub fn default_http3_server(server_params: Http3Parameters) -> Http3Server { fn exchange_packets(client: &mut Http3Client, server: &mut Http3Server) { let mut out = None; loop { - out = client.process(out, now()).dgram(); - out = server.process(out, now()).dgram(); + out = client.process(out.as_ref(), now()).dgram(); + out = server.process(out.as_ref(), now()).dgram(); if out.is_none() { break; } @@ -78,28 +78,28 @@ fn connect_with(client: &mut Http3Client, server: &mut Http3Server) { let out = client.process(None, now()); assert_eq!(client.state(), Http3State::Initializing); - let out = server.process(out.dgram(), now()); - let out = client.process(out.dgram(), now()); - let out = server.process(out.dgram(), now()); + let out = server.process(out.as_dgram_ref(), now()); + let out = client.process(out.as_dgram_ref(), now()); + let out = server.process(out.as_dgram_ref(), now()); assert!(out.as_dgram_ref().is_none()); let authentication_needed = |e| matches!(e, Http3ClientEvent::AuthenticationNeeded); assert!(client.events().any(authentication_needed)); client.authenticated(AuthenticationStatus::Ok, now()); - let out = client.process(out.dgram(), now()); + let out = client.process(out.as_dgram_ref(), now()); let connected = |e| matches!(e, Http3ClientEvent::StateChange(Http3State::Connected)); assert!(client.events().any(connected)); assert_eq!(client.state(), Http3State::Connected); // Exchange H3 setttings - let out = server.process(out.dgram(), now()); - let out = client.process(out.dgram(), now()); - let out = server.process(out.dgram(), now()); - let out = client.process(out.dgram(), now()); - let out = server.process(out.dgram(), now()); - std::mem::drop(client.process(out.dgram(), now())); + let out = server.process(out.as_dgram_ref(), now()); + let out = client.process(out.as_dgram_ref(), now()); + let out = server.process(out.as_dgram_ref(), now()); + let out = client.process(out.as_dgram_ref(), now()); + let out = server.process(out.as_dgram_ref(), now()); + std::mem::drop(client.process(out.as_dgram_ref(), now())); } fn connect( @@ -201,10 +201,10 @@ impl WtTest { let mut now = now(); loop { now += RTT / 2; - out = self.client.process(out, now).dgram(); + out = self.client.process(out.as_ref(), now).dgram(); let client_none = out.is_none(); now += RTT / 2; - out = self.server.process(out, now).dgram(); + out = self.server.process(out.as_ref(), now).dgram(); if client_none && out.is_none() { break; } diff --git a/neqo-http3/src/features/extended_connect/tests/webtransport/negotiation.rs b/neqo-http3/src/features/extended_connect/tests/webtransport/negotiation.rs index 23784e5609..e838646ab2 100644 --- a/neqo-http3/src/features/extended_connect/tests/webtransport/negotiation.rs +++ b/neqo-http3/src/features/extended_connect/tests/webtransport/negotiation.rs @@ -86,7 +86,7 @@ fn zero_rtt( // exchange token let out = server.process(None, now()); // We do not have a token so we need to wait for a resumption token timer to trigger. - std::mem::drop(client.process(out.dgram(), now() + Duration::from_millis(250))); + std::mem::drop(client.process(out.as_dgram_ref(), now() + Duration::from_millis(250))); assert_eq!(client.state(), Http3State::Connected); let token = client .events() @@ -234,8 +234,8 @@ fn zero_rtt_wt_settings() { fn exchange_packets2(client: &mut Http3Client, server: &mut Connection) { let mut out = None; loop { - out = client.process(out, now()).dgram(); - out = server.process(out, now()).dgram(); + out = client.process(out.as_ref(), now()).dgram(); + out = server.process(out.as_ref(), now()).dgram(); if out.is_none() { break; } diff --git a/neqo-http3/src/features/extended_connect/tests/webtransport/sessions.rs b/neqo-http3/src/features/extended_connect/tests/webtransport/sessions.rs index 65572a1c2a..06d9318b87 100644 --- a/neqo-http3/src/features/extended_connect/tests/webtransport/sessions.rs +++ b/neqo-http3/src/features/extended_connect/tests/webtransport/sessions.rs @@ -419,18 +419,18 @@ fn wt_close_session_cannot_be_sent_at_once() { Err(Error::InvalidStreamId) ); - let out = wt.server.process(None, now()).dgram(); - let out = wt.client.process(out, now()).dgram(); + let out = wt.server.process(None, now()); + let out = wt.client.process(out.as_dgram_ref(), now()); // Client has not received the full CloseSession frame and it can create more streams. let unidi_client = wt.create_wt_stream_client(wt_session.stream_id(), StreamType::UniDi); - let out = wt.server.process(out, now()).dgram(); - let out = wt.client.process(out, now()).dgram(); - let out = wt.server.process(out, now()).dgram(); - let out = wt.client.process(out, now()).dgram(); - let out = wt.server.process(out, now()).dgram(); - let _out = wt.client.process(out, now()).dgram(); + let out = wt.server.process(out.as_dgram_ref(), now()); + let out = wt.client.process(out.as_dgram_ref(), now()); + let out = wt.server.process(out.as_dgram_ref(), now()); + let out = wt.client.process(out.as_dgram_ref(), now()); + let out = wt.server.process(out.as_dgram_ref(), now()); + let _out = wt.client.process(out.as_dgram_ref(), now()); wt.check_events_after_closing_session_client( &[], diff --git a/neqo-http3/src/frames/tests/mod.rs b/neqo-http3/src/frames/tests/mod.rs index 092b3039ec..086af90300 100644 --- a/neqo-http3/src/frames/tests/mod.rs +++ b/neqo-http3/src/frames/tests/mod.rs @@ -22,12 +22,12 @@ pub(crate) fn enc_dec>(d: &Encoder, st: &str, remaining: usiz let mut conn_c = default_client(); let mut conn_s = default_server(); let out = conn_c.process(None, now()); - let out = conn_s.process(out.dgram(), now()); - let out = conn_c.process(out.dgram(), now()); - mem::drop(conn_s.process(out.dgram(), now())); + let out = conn_s.process(out.as_dgram_ref(), now()); + let out = conn_c.process(out.as_dgram_ref(), now()); + mem::drop(conn_s.process(out.as_dgram_ref(), now())); conn_c.authenticated(AuthenticationStatus::Ok, now()); let out = conn_c.process(None, now()); - mem::drop(conn_s.process(out.dgram(), now())); + mem::drop(conn_s.process(out.as_dgram_ref(), now())); // create a stream let stream_id = conn_s.stream_create(StreamType::BiDi).unwrap(); @@ -38,7 +38,7 @@ pub(crate) fn enc_dec>(d: &Encoder, st: &str, remaining: usiz let buf = Encoder::from_hex(st); conn_s.stream_send(stream_id, buf.as_ref()).unwrap(); let out = conn_s.process(None, now()); - mem::drop(conn_c.process(out.dgram(), now())); + mem::drop(conn_c.process(out.as_dgram_ref(), now())); let (frame, fin) = fr .receive::(&mut StreamReaderConnectionWrapper::new( diff --git a/neqo-http3/src/frames/tests/reader.rs b/neqo-http3/src/frames/tests/reader.rs index f694e4dbe3..8923a0994b 100644 --- a/neqo-http3/src/frames/tests/reader.rs +++ b/neqo-http3/src/frames/tests/reader.rs @@ -39,7 +39,7 @@ impl FrameReaderTest { fn process>(&mut self, v: &[u8]) -> Option { self.conn_s.stream_send(self.stream_id, v).unwrap(); let out = self.conn_s.process(None, now()); - mem::drop(self.conn_c.process(out.dgram(), now())); + mem::drop(self.conn_c.process(out.as_dgram_ref(), now())); let (frame, fin) = self .fr .receive::(&mut StreamReaderConnectionWrapper::new( @@ -230,12 +230,12 @@ fn test_reading_frame + PartialEq + Debug>( } let out = fr.conn_s.process(None, now()); - mem::drop(fr.conn_c.process(out.dgram(), now())); + mem::drop(fr.conn_c.process(out.as_dgram_ref(), now())); if let FrameReadingTestSend::DataThenFin = test_to_send { fr.conn_s.stream_close_send(fr.stream_id).unwrap(); let out = fr.conn_s.process(None, now()); - mem::drop(fr.conn_c.process(out.dgram(), now())); + mem::drop(fr.conn_c.process(out.as_dgram_ref(), now())); } let rv = fr.fr.receive::(&mut StreamReaderConnectionWrapper::new( @@ -478,11 +478,11 @@ fn test_frame_reading_when_stream_is_closed_before_sending_data() { fr.conn_s.stream_send(fr.stream_id, &[0x00]).unwrap(); let out = fr.conn_s.process(None, now()); - mem::drop(fr.conn_c.process(out.dgram(), now())); + mem::drop(fr.conn_c.process(out.as_dgram_ref(), now())); assert_eq!(Ok(()), fr.conn_c.stream_close_send(fr.stream_id)); let out = fr.conn_c.process(None, now()); - mem::drop(fr.conn_s.process(out.dgram(), now())); + mem::drop(fr.conn_s.process(out.as_dgram_ref(), now())); assert_eq!( Ok((None, true)), fr.fr @@ -501,11 +501,11 @@ fn test_wt_frame_reading_when_stream_is_closed_before_sending_data() { fr.conn_s.stream_send(fr.stream_id, &[0x00]).unwrap(); let out = fr.conn_s.process(None, now()); - mem::drop(fr.conn_c.process(out.dgram(), now())); + mem::drop(fr.conn_c.process(out.as_dgram_ref(), now())); assert_eq!(Ok(()), fr.conn_c.stream_close_send(fr.stream_id)); let out = fr.conn_c.process(None, now()); - mem::drop(fr.conn_s.process(out.dgram(), now())); + mem::drop(fr.conn_s.process(out.as_dgram_ref(), now())); assert_eq!( Ok((None, true)), fr.fr diff --git a/neqo-http3/src/server.rs b/neqo-http3/src/server.rs index e4c1c707bb..c432039972 100644 --- a/neqo-http3/src/server.rs +++ b/neqo-http3/src/server.rs @@ -109,7 +109,7 @@ impl Http3Server { self.server.ech_config() } - pub fn process(&mut self, dgram: Option, now: Instant) -> Output { + pub fn process(&mut self, dgram: Option<&Datagram>, now: Instant) -> Output { qtrace!([self], "Process."); let out = self.server.process(dgram, now); self.process_http3(now); @@ -119,7 +119,7 @@ impl Http3Server { qtrace!([self], "Send packet: {:?}", d); Output::Datagram(d) } - _ => self.server.process(None, now), + _ => self.server.process(Option::<&Datagram>::None, now), } } @@ -399,29 +399,29 @@ mod tests { const SERVER_SIDE_DECODER_STREAM_ID: StreamId = StreamId::new(11); fn connect_transport(server: &mut Http3Server, client: &mut Connection, resume: bool) { - let c1 = client.process(None, now()).dgram(); - let s1 = server.process(c1, now()).dgram(); - let c2 = client.process(s1, now()).dgram(); + let c1 = client.process(None, now()); + let s1 = server.process(c1.as_dgram_ref(), now()); + let c2 = client.process(s1.as_dgram_ref(), now()); let needs_auth = client .events() .any(|e| e == ConnectionEvent::AuthenticationNeeded); let c2 = if needs_auth { assert!(!resume); // c2 should just be an ACK, so absorb that. - let s_ack = server.process(c2, now()).dgram(); - assert!(s_ack.is_none()); + let s_ack = server.process(c2.as_dgram_ref(), now()); + assert!(s_ack.as_dgram_ref().is_none()); client.authenticated(AuthenticationStatus::Ok, now()); - client.process(None, now()).dgram() + client.process(None, now()) } else { assert!(resume); c2 }; assert!(client.state().connected()); - let s2 = server.process(c2, now()).dgram(); + let s2 = server.process(c2.as_dgram_ref(), now()); assert_connected(server); - let c3 = client.process(s2, now()).dgram(); - assert!(c3.is_none()); + let c3 = client.process(s2.as_dgram_ref(), now()); + assert!(c3.as_dgram_ref().is_none()); } // Start a client/server and check setting frame. @@ -556,8 +556,8 @@ mod tests { sent = neqo_trans_conn.stream_send(decoder_stream, &[0x3]); assert_eq!(sent, Ok(1)); let out1 = neqo_trans_conn.process(None, now()); - let out2 = server.process(out1.dgram(), now()); - mem::drop(neqo_trans_conn.process(out2.dgram(), now())); + let out2 = server.process(out1.as_dgram_ref(), now()); + mem::drop(neqo_trans_conn.process(out2.as_dgram_ref(), now())); // assert no error occured. assert_not_closed(server); @@ -588,7 +588,7 @@ mod tests { let control = peer_conn.control_stream_id; peer_conn.stream_close_send(control).unwrap(); let out = peer_conn.process(None, now()); - hconn.process(out.dgram(), now()); + hconn.process(out.as_dgram_ref(), now()); assert_closed(&mut hconn, &Error::HttpClosedCriticalStream); } @@ -603,7 +603,7 @@ mod tests { let sent = neqo_trans_conn.stream_send(control_stream, &[0x0, 0xd, 0x1, 0xf]); assert_eq!(sent, Ok(4)); let out = neqo_trans_conn.process(None, now()); - hconn.process(out.dgram(), now()); + hconn.process(out.as_dgram_ref(), now()); assert_closed(&mut hconn, &Error::HttpMissingSettings); } @@ -615,7 +615,7 @@ mod tests { // send the second SETTINGS frame. peer_conn.control_send(&[0x4, 0x6, 0x1, 0x40, 0x64, 0x7, 0x40, 0x64]); let out = peer_conn.process(None, now()); - hconn.process(out.dgram(), now()); + hconn.process(out.as_dgram_ref(), now()); assert_closed(&mut hconn, &Error::HttpFrameUnexpected); } @@ -630,7 +630,7 @@ mod tests { frame.encode(&mut e); peer_conn.control_send(e.as_ref()); let out = peer_conn.process(None, now()); - hconn.process(out.dgram(), now()); + hconn.process(out.as_dgram_ref(), now()); // check if the given connection got closed on invalid stream ids if valid { assert_not_closed(&mut hconn); @@ -673,7 +673,7 @@ mod tests { peer_conn.control_send(v); let out = peer_conn.process(None, now()); - hconn.process(out.dgram(), now()); + hconn.process(out.as_dgram_ref(), now()); assert_closed(&mut hconn, &Error::HttpFrameUnexpected); } @@ -707,10 +707,10 @@ mod tests { .stream_send(new_stream_id, &[0x41, 0x19, 0x4, 0x4, 0x6, 0x0, 0x8, 0x0]) .unwrap(); let out = peer_conn.process(None, now()); - let out = hconn.process(out.dgram(), now()); - mem::drop(peer_conn.process(out.dgram(), now())); + let out = hconn.process(out.as_dgram_ref(), now()); + mem::drop(peer_conn.process(out.as_dgram_ref(), now())); let out = hconn.process(None, now()); - mem::drop(peer_conn.process(out.dgram(), now())); + mem::drop(peer_conn.process(out.as_dgram_ref(), now())); // check for stop-sending with Error::HttpStreamCreation. let mut stop_sending_event_found = false; @@ -738,8 +738,8 @@ mod tests { let push_stream_id = peer_conn.stream_create(StreamType::UniDi).unwrap(); _ = peer_conn.stream_send(push_stream_id, &[0x1]).unwrap(); let out = peer_conn.process(None, now()); - let out = hconn.process(out.dgram(), now()); - mem::drop(peer_conn.conn.process(out.dgram(), now())); + let out = hconn.process(out.as_dgram_ref(), now()); + mem::drop(peer_conn.conn.process(out.as_dgram_ref(), now())); assert_closed(&mut hconn, &Error::HttpStreamCreation); } @@ -755,38 +755,38 @@ mod tests { let mut sent = peer_conn.stream_send(control_stream, &[0x0]); assert_eq!(sent, Ok(1)); let out = peer_conn.process(None, now()); - hconn.process(out.dgram(), now()); + hconn.process(out.as_dgram_ref(), now()); // start sending SETTINGS frame sent = peer_conn.stream_send(control_stream, &[0x4]); assert_eq!(sent, Ok(1)); let out = peer_conn.process(None, now()); - hconn.process(out.dgram(), now()); + hconn.process(out.as_dgram_ref(), now()); sent = peer_conn.stream_send(control_stream, &[0x4]); assert_eq!(sent, Ok(1)); let out = peer_conn.process(None, now()); - hconn.process(out.dgram(), now()); + hconn.process(out.as_dgram_ref(), now()); sent = peer_conn.stream_send(control_stream, &[0x6]); assert_eq!(sent, Ok(1)); let out = peer_conn.process(None, now()); - hconn.process(out.dgram(), now()); + hconn.process(out.as_dgram_ref(), now()); sent = peer_conn.stream_send(control_stream, &[0x0]); assert_eq!(sent, Ok(1)); let out = peer_conn.process(None, now()); - hconn.process(out.dgram(), now()); + hconn.process(out.as_dgram_ref(), now()); sent = peer_conn.stream_send(control_stream, &[0x8]); assert_eq!(sent, Ok(1)); let out = peer_conn.process(None, now()); - hconn.process(out.dgram(), now()); + hconn.process(out.as_dgram_ref(), now()); sent = peer_conn.stream_send(control_stream, &[0x0]); assert_eq!(sent, Ok(1)); let out = peer_conn.process(None, now()); - hconn.process(out.dgram(), now()); + hconn.process(out.as_dgram_ref(), now()); assert_not_closed(&mut hconn); @@ -794,37 +794,37 @@ mod tests { sent = peer_conn.stream_send(control_stream, &[0x5]); assert_eq!(sent, Ok(1)); let out = peer_conn.process(None, now()); - hconn.process(out.dgram(), now()); + hconn.process(out.as_dgram_ref(), now()); sent = peer_conn.stream_send(control_stream, &[0x5]); assert_eq!(sent, Ok(1)); let out = peer_conn.process(None, now()); - hconn.process(out.dgram(), now()); + hconn.process(out.as_dgram_ref(), now()); sent = peer_conn.stream_send(control_stream, &[0x4]); assert_eq!(sent, Ok(1)); let out = peer_conn.process(None, now()); - hconn.process(out.dgram(), now()); + hconn.process(out.as_dgram_ref(), now()); sent = peer_conn.stream_send(control_stream, &[0x61]); assert_eq!(sent, Ok(1)); let out = peer_conn.process(None, now()); - hconn.process(out.dgram(), now()); + hconn.process(out.as_dgram_ref(), now()); sent = peer_conn.stream_send(control_stream, &[0x62]); assert_eq!(sent, Ok(1)); let out = peer_conn.process(None, now()); - hconn.process(out.dgram(), now()); + hconn.process(out.as_dgram_ref(), now()); sent = peer_conn.stream_send(control_stream, &[0x63]); assert_eq!(sent, Ok(1)); let out = peer_conn.process(None, now()); - hconn.process(out.dgram(), now()); + hconn.process(out.as_dgram_ref(), now()); sent = peer_conn.stream_send(control_stream, &[0x64]); assert_eq!(sent, Ok(1)); let out = peer_conn.process(None, now()); - hconn.process(out.dgram(), now()); + hconn.process(out.as_dgram_ref(), now()); // PUSH_PROMISE on a control stream will cause an error assert_closed(&mut hconn, &Error::HttpFrameUnexpected); @@ -840,7 +840,7 @@ mod tests { peer_conn.stream_close_send(stream_id).unwrap(); let out = peer_conn.process(None, now()); - hconn.process(out.dgram(), now()); + hconn.process(out.as_dgram_ref(), now()); assert_closed(&mut hconn, &Error::HttpFrame); } @@ -892,7 +892,7 @@ mod tests { peer_conn.stream_close_send(stream_id).unwrap(); let out = peer_conn.process(None, now()); - hconn.process(out.dgram(), now()); + hconn.process(out.as_dgram_ref(), now()); // Check connection event. There should be 1 Header and 2 data events. let mut headers_frames = 0; @@ -943,7 +943,7 @@ mod tests { .unwrap(); let out = peer_conn.process(None, now()); - hconn.process(out.dgram(), now()); + hconn.process(out.as_dgram_ref(), now()); // Check connection event. There should be 1 Header and no data events. let mut headers_frames = 0; @@ -987,8 +987,8 @@ mod tests { .unwrap(); peer_conn.stream_close_send(stream_id).unwrap(); - let out = peer_conn.process(out.dgram(), now()); - hconn.process(out.dgram(), now()); + let out = peer_conn.process(out.as_dgram_ref(), now()); + hconn.process(out.as_dgram_ref(), now()); while let Some(event) = hconn.next_event() { match event { @@ -1020,7 +1020,7 @@ mod tests { .unwrap(); let out = peer_conn.process(None, now()); - hconn.process(out.dgram(), now()); + hconn.process(out.as_dgram_ref(), now()); // Check connection event. There should be 1 Header and no data events. // The server will reset the stream. @@ -1052,8 +1052,8 @@ mod tests { } let out = hconn.process(None, now()); - let out = peer_conn.process(out.dgram(), now()); - hconn.process(out.dgram(), now()); + let out = peer_conn.process(out.as_dgram_ref(), now()); + hconn.process(out.as_dgram_ref(), now()); // Check that STOP_SENDING and REET has been received. let mut reset = 0; @@ -1085,7 +1085,7 @@ mod tests { .stream_reset_send(CLIENT_SIDE_CONTROL_STREAM_ID, Error::HttpNoError.code()) .unwrap(); let out = peer_conn.process(None, now()); - hconn.process(out.dgram(), now()); + hconn.process(out.as_dgram_ref(), now()); assert_closed(&mut hconn, &Error::HttpClosedCriticalStream); } @@ -1098,7 +1098,7 @@ mod tests { .stream_reset_send(CLIENT_SIDE_ENCODER_STREAM_ID, Error::HttpNoError.code()) .unwrap(); let out = peer_conn.process(None, now()); - hconn.process(out.dgram(), now()); + hconn.process(out.as_dgram_ref(), now()); assert_closed(&mut hconn, &Error::HttpClosedCriticalStream); } @@ -1111,7 +1111,7 @@ mod tests { .stream_reset_send(CLIENT_SIDE_DECODER_STREAM_ID, Error::HttpNoError.code()) .unwrap(); let out = peer_conn.process(None, now()); - hconn.process(out.dgram(), now()); + hconn.process(out.as_dgram_ref(), now()); assert_closed(&mut hconn, &Error::HttpClosedCriticalStream); } @@ -1125,7 +1125,7 @@ mod tests { .stream_stop_sending(SERVER_SIDE_CONTROL_STREAM_ID, Error::HttpNoError.code()) .unwrap(); let out = peer_conn.process(None, now()); - hconn.process(out.dgram(), now()); + hconn.process(out.as_dgram_ref(), now()); assert_closed(&mut hconn, &Error::HttpClosedCriticalStream); } @@ -1138,7 +1138,7 @@ mod tests { .stream_stop_sending(SERVER_SIDE_ENCODER_STREAM_ID, Error::HttpNoError.code()) .unwrap(); let out = peer_conn.process(None, now()); - hconn.process(out.dgram(), now()); + hconn.process(out.as_dgram_ref(), now()); assert_closed(&mut hconn, &Error::HttpClosedCriticalStream); } @@ -1151,7 +1151,7 @@ mod tests { .stream_stop_sending(SERVER_SIDE_DECODER_STREAM_ID, Error::HttpNoError.code()) .unwrap(); let out = peer_conn.process(None, now()); - hconn.process(out.dgram(), now()); + hconn.process(out.as_dgram_ref(), now()); assert_closed(&mut hconn, &Error::HttpClosedCriticalStream); } @@ -1259,7 +1259,7 @@ mod tests { .unwrap(); let out = peer_conn.process(None, now()); - hconn.process(out.dgram(), now()); + hconn.process(out.as_dgram_ref(), now()); let mut requests = HashMap::new(); while let Some(event) = hconn.next_event() { diff --git a/neqo-http3/src/stream_type_reader.rs b/neqo-http3/src/stream_type_reader.rs index 364064f26b..775d8dc233 100644 --- a/neqo-http3/src/stream_type_reader.rs +++ b/neqo-http3/src/stream_type_reader.rs @@ -262,7 +262,7 @@ mod tests { // create a stream let stream_id = conn_s.stream_create(stream_type).unwrap(); let out = conn_s.process(None, now()); - mem::drop(conn_c.process(out.dgram(), now())); + mem::drop(conn_c.process(out.as_dgram_ref(), now())); Self { conn_c, @@ -285,7 +285,7 @@ mod tests { .stream_send(self.stream_id, &enc[i..=i]) .unwrap(); let out = self.conn_s.process(None, now()); - mem::drop(self.conn_c.process(out.dgram(), now())); + mem::drop(self.conn_c.process(out.as_dgram_ref(), now())); assert_eq!( self.decoder.receive(&mut self.conn_c).unwrap(), (ReceiveOutput::NoOutput, false) @@ -299,7 +299,7 @@ mod tests { self.conn_s.stream_close_send(self.stream_id).unwrap(); } let out = self.conn_s.process(None, now()); - mem::drop(self.conn_c.process(out.dgram(), now())); + mem::drop(self.conn_c.process(out.dgram().as_ref(), now())); assert_eq!(&self.decoder.receive(&mut self.conn_c), outcome); assert_eq!(self.decoder.done(), done); } diff --git a/neqo-http3/tests/httpconn.rs b/neqo-http3/tests/httpconn.rs index c78b3f0be8..facc1c00fe 100644 --- a/neqo-http3/tests/httpconn.rs +++ b/neqo-http3/tests/httpconn.rs @@ -94,19 +94,19 @@ fn process_client_events(conn: &mut Http3Client) { fn connect_peers(hconn_c: &mut Http3Client, hconn_s: &mut Http3Server) -> Option { assert_eq!(hconn_c.state(), Http3State::Initializing); let out = hconn_c.process(None, now()); // Initial - let out = hconn_s.process(out.dgram(), now()); // Initial + Handshake - let out = hconn_c.process(out.dgram(), now()); // ACK - mem::drop(hconn_s.process(out.dgram(), now())); //consume ACK + let out = hconn_s.process(out.as_dgram_ref(), now()); // Initial + Handshake + let out = hconn_c.process(out.as_dgram_ref(), now()); // ACK + mem::drop(hconn_s.process(out.as_dgram_ref(), now())); //consume ACK let authentication_needed = |e| matches!(e, Http3ClientEvent::AuthenticationNeeded); assert!(hconn_c.events().any(authentication_needed)); hconn_c.authenticated(AuthenticationStatus::Ok, now()); let out = hconn_c.process(None, now()); // Handshake assert_eq!(hconn_c.state(), Http3State::Connected); - let out = hconn_s.process(out.dgram(), now()); // Handshake - let out = hconn_c.process(out.dgram(), now()); - let out = hconn_s.process(out.dgram(), now()); + let out = hconn_s.process(out.as_dgram_ref(), now()); // Handshake + let out = hconn_c.process(out.as_dgram_ref(), now()); + let out = hconn_s.process(out.as_dgram_ref(), now()); // assert!(hconn_s.settings_received); - let out = hconn_c.process(out.dgram(), now()); + let out = hconn_c.process(out.as_dgram_ref(), now()); // assert!(hconn_c.settings_received); out.dgram() @@ -122,11 +122,11 @@ fn connect_peers_with_network_propagation_delay( let mut now = now(); let out = hconn_c.process(None, now); // Initial now += net_delay; - let out = hconn_s.process(out.dgram(), now); // Initial + Handshake + let out = hconn_s.process(out.as_dgram_ref(), now); // Initial + Handshake now += net_delay; - let out = hconn_c.process(out.dgram(), now); // ACK + let out = hconn_c.process(out.as_dgram_ref(), now); // ACK now += net_delay; - let out = hconn_s.process(out.dgram(), now); //consume ACK + let out = hconn_s.process(out.as_dgram_ref(), now); //consume ACK assert!(out.dgram().is_none()); let authentication_needed = |e| matches!(e, Http3ClientEvent::AuthenticationNeeded); assert!(hconn_c.events().any(authentication_needed)); @@ -135,13 +135,13 @@ fn connect_peers_with_network_propagation_delay( let out = hconn_c.process(None, now); // Handshake assert_eq!(hconn_c.state(), Http3State::Connected); now += net_delay; - let out = hconn_s.process(out.dgram(), now); // HANDSHAKE_DONE + let out = hconn_s.process(out.as_dgram_ref(), now); // HANDSHAKE_DONE now += net_delay; - let out = hconn_c.process(out.dgram(), now); // Consume HANDSHAKE_DONE, send control streams. + let out = hconn_c.process(out.as_dgram_ref(), now); // Consume HANDSHAKE_DONE, send control streams. now += net_delay; - let out = hconn_s.process(out.dgram(), now); // consume and send control streams. + let out = hconn_s.process(out.as_dgram_ref(), now); // consume and send control streams. now += net_delay; - let out = hconn_c.process(out.dgram(), now); // consume control streams. + let out = hconn_c.process(out.as_dgram_ref(), now); // consume control streams. (out.dgram(), now) } @@ -156,8 +156,8 @@ fn connect() -> (Http3Client, Http3Server, Option) { fn exchange_packets(client: &mut Http3Client, server: &mut Http3Server, out_ex: Option) { let mut out = out_ex; loop { - out = client.process(out, now()).dgram(); - out = server.process(out, now()).dgram(); + out = client.process(out.as_ref(), now()).dgram(); + out = server.process(out.as_ref(), now()).dgram(); if out.is_none() { break; } @@ -185,17 +185,17 @@ fn test_fetch() { .unwrap(); assert_eq!(req, 0); hconn_c.stream_close_send(req).unwrap(); - let out = hconn_c.process(dgram, now()); + let out = hconn_c.process(dgram.as_ref(), now()); qtrace!("-----server"); - let out = hconn_s.process(out.dgram(), now()); - mem::drop(hconn_c.process(out.dgram(), now())); + let out = hconn_s.process(out.as_dgram_ref(), now()); + mem::drop(hconn_c.process(out.as_dgram_ref(), now())); process_server_events(&mut hconn_s); let out = hconn_s.process(None, now()); qtrace!("-----client"); - mem::drop(hconn_c.process(out.dgram(), now())); + mem::drop(hconn_c.process(out.as_dgram_ref(), now())); let out = hconn_s.process(None, now()); - mem::drop(hconn_c.process(out.dgram(), now())); + mem::drop(hconn_c.process(out.as_dgram_ref(), now())); process_client_events(&mut hconn_c); } @@ -214,10 +214,10 @@ fn test_103_response() { .unwrap(); assert_eq!(req, 0); hconn_c.stream_close_send(req).unwrap(); - let out = hconn_c.process(dgram, now()); + let out = hconn_c.process(dgram.as_ref(), now()); - let out = hconn_s.process(out.dgram(), now()); - mem::drop(hconn_c.process(out.dgram(), now())); + let out = hconn_s.process(out.as_dgram_ref(), now()); + mem::drop(hconn_c.process(out.as_dgram_ref(), now())); let mut request = receive_request(&mut hconn_s).unwrap(); let info_headers = [ @@ -228,7 +228,7 @@ fn test_103_response() { request.send_headers(&info_headers).unwrap(); let out = hconn_s.process(None, now()); - mem::drop(hconn_c.process(out.dgram(), now())); + mem::drop(hconn_c.process(out.as_dgram_ref(), now())); let info_headers_event = |e| { matches!(e, Http3ClientEvent::HeaderReady { headers, @@ -239,7 +239,7 @@ fn test_103_response() { set_response(&mut request); let out = hconn_s.process(None, now()); - mem::drop(hconn_c.process(out.dgram(), now())); + mem::drop(hconn_c.process(out.as_dgram_ref(), now())); process_client_events(&mut hconn_c) } @@ -371,8 +371,8 @@ fn zerortt() { .unwrap(); hconn_c.stream_close_send(req).unwrap(); - let out = hconn_c.process(dgram, now()); - let out = hconn_s.process(out.dgram(), now()); + let out = hconn_c.process(dgram.as_ref(), now()); + let out = hconn_s.process(out.as_dgram_ref(), now()); let mut request_stream = None; let mut zerortt_state_change = false; @@ -436,7 +436,7 @@ fn fetch_noresponse_will_idletimeout() { .unwrap(); assert_eq!(req, 0); hconn_c.stream_close_send(req).unwrap(); - let _out = hconn_c.process(dgram, now); + let _out = hconn_c.process(dgram.as_ref(), now); qtrace!("-----server"); let mut done = false; diff --git a/neqo-http3/tests/priority.rs b/neqo-http3/tests/priority.rs index df9259ad4b..4ecd2e7a40 100644 --- a/neqo-http3/tests/priority.rs +++ b/neqo-http3/tests/priority.rs @@ -17,9 +17,9 @@ use test_fixture::*; fn exchange_packets(client: &mut Http3Client, server: &mut Http3Server) { let mut out = None; loop { - out = client.process(out, now()).dgram(); + out = client.process(out.as_ref(), now()).dgram(); let client_done = out.is_none(); - out = server.process(out, now()).dgram(); + out = server.process(out.as_ref(), now()).dgram(); if out.is_none() && client_done { break; } @@ -32,26 +32,26 @@ fn connect_with(client: &mut Http3Client, server: &mut Http3Server) { let out = client.process(None, now()); assert_eq!(client.state(), Http3State::Initializing); - let out = server.process(out.dgram(), now()); - let out = client.process(out.dgram(), now()); - let out = server.process(out.dgram(), now()); + let out = server.process(out.as_dgram_ref(), now()); + let out = client.process(out.as_dgram_ref(), now()); + let out = server.process(out.as_dgram_ref(), now()); assert!(out.as_dgram_ref().is_none()); let authentication_needed = |e| matches!(e, Http3ClientEvent::AuthenticationNeeded); assert!(client.events().any(authentication_needed)); client.authenticated(AuthenticationStatus::Ok, now()); - let out = client.process(out.dgram(), now()); + let out = client.process(out.as_dgram_ref(), now()); let connected = |e| matches!(e, Http3ClientEvent::StateChange(Http3State::Connected)); assert!(client.events().any(connected)); assert_eq!(client.state(), Http3State::Connected); // Exchange H3 setttings - let out = server.process(out.dgram(), now()); - let out = client.process(out.dgram(), now()); - let out = server.process(out.dgram(), now()); - let out = client.process(out.dgram(), now()); - _ = server.process(out.dgram(), now()); + let out = server.process(out.as_dgram_ref(), now()); + let out = client.process(out.as_dgram_ref(), now()); + let out = server.process(out.as_dgram_ref(), now()); + let out = client.process(out.as_dgram_ref(), now()); + _ = server.process(out.as_dgram_ref(), now()); } fn connect() -> (Http3Client, Http3Server) { diff --git a/neqo-http3/tests/send_message.rs b/neqo-http3/tests/send_message.rs index ef4a571dff..507c4bd552 100644 --- a/neqo-http3/tests/send_message.rs +++ b/neqo-http3/tests/send_message.rs @@ -28,8 +28,8 @@ lazy_static! { fn exchange_packets(client: &mut Http3Client, server: &mut Http3Server) { let mut out = None; loop { - out = client.process(out, now()).dgram(); - out = server.process(out, now()).dgram(); + out = client.process(out.as_ref(), now()).dgram(); + out = server.process(out.as_ref(), now()).dgram(); if out.is_none() { break; } diff --git a/neqo-http3/tests/webtransport.rs b/neqo-http3/tests/webtransport.rs index e0556708f1..fb82350dd3 100644 --- a/neqo-http3/tests/webtransport.rs +++ b/neqo-http3/tests/webtransport.rs @@ -44,16 +44,16 @@ fn connect() -> (Http3Client, Http3Server) { let out = client.process(None, now()); assert_eq!(client.state(), Http3State::Initializing); - let out = server.process(out.dgram(), now()); - let out = client.process(out.dgram(), now()); - let out = server.process(out.dgram(), now()); + let out = server.process(out.as_dgram_ref(), now()); + let out = client.process(out.as_dgram_ref(), now()); + let out = server.process(out.as_dgram_ref(), now()); assert!(out.as_dgram_ref().is_none()); let authentication_needed = |e| matches!(e, Http3ClientEvent::AuthenticationNeeded); assert!(client.events().any(authentication_needed)); client.authenticated(AuthenticationStatus::Ok, now()); - let mut out = client.process(out.dgram(), now()).dgram(); + let mut out = client.process(out.as_dgram_ref(), now()).dgram(); let connected = |e| matches!(e, Http3ClientEvent::StateChange(Http3State::Connected)); assert!(client.events().any(connected)); @@ -61,9 +61,9 @@ fn connect() -> (Http3Client, Http3Server) { // Exchange H3 setttings loop { - out = server.process(out, now()).dgram(); + out = server.process(out.as_ref(), now()).dgram(); let dgram_present = out.is_some(); - out = client.process(out, now()).dgram(); + out = client.process(out.as_ref(), now()).dgram(); if out.is_none() && !dgram_present { break; } @@ -74,8 +74,8 @@ fn connect() -> (Http3Client, Http3Server) { fn exchange_packets(client: &mut Http3Client, server: &mut Http3Server) { let mut out = None; loop { - out = client.process(out, now()).dgram(); - out = server.process(out, now()).dgram(); + out = client.process(out.as_ref(), now()).dgram(); + out = server.process(out.as_ref(), now()).dgram(); if out.is_none() { break; } diff --git a/neqo-interop/src/main.rs b/neqo-interop/src/main.rs index bb8f38c76b..0f6e665cf2 100644 --- a/neqo-interop/src/main.rs +++ b/neqo-interop/src/main.rs @@ -149,7 +149,7 @@ fn process_loop( } if sz > 0 { let received = Datagram::new(nctx.remote_addr, nctx.local_addr, &buf[..sz]); - client.process_input(received, Instant::now()); + client.process_input(&received, Instant::now()); } } } @@ -310,7 +310,7 @@ fn process_loop_h3( } if sz > 0 { let received = Datagram::new(nctx.remote_addr, nctx.local_addr, &buf[..sz]); - handler.h3.process_input(received, Instant::now()); + handler.h3.process_input(&received, Instant::now()); } } } diff --git a/neqo-qpack/src/decoder.rs b/neqo-qpack/src/decoder.rs index b4f9a7284a..5b3b93dcee 100644 --- a/neqo-qpack/src/decoder.rs +++ b/neqo-qpack/src/decoder.rs @@ -319,7 +319,7 @@ mod tests { .stream_send(decoder.recv_stream_id, encoder_instruction) .unwrap(); let out = decoder.peer_conn.process(None, now()); - mem::drop(decoder.conn.process(out.dgram(), now())); + mem::drop(decoder.conn.process(out.as_dgram_ref(), now())); assert_eq!( decoder .decoder @@ -331,7 +331,7 @@ mod tests { fn send_instructions_and_check(decoder: &mut TestDecoder, decoder_instruction: &[u8]) { decoder.decoder.send(&mut decoder.conn).unwrap(); let out = decoder.conn.process(None, now()); - mem::drop(decoder.peer_conn.process(out.dgram(), now())); + mem::drop(decoder.peer_conn.process(out.as_dgram_ref(), now())); let mut buf = [0_u8; 100]; let (amount, fin) = decoder .peer_conn diff --git a/neqo-qpack/src/encoder.rs b/neqo-qpack/src/encoder.rs index 211a41fc12..9893229dbc 100644 --- a/neqo-qpack/src/encoder.rs +++ b/neqo-qpack/src/encoder.rs @@ -556,8 +556,8 @@ mod tests { pub fn send_instructions(&mut self, encoder_instruction: &[u8]) { self.encoder.send_encoder_updates(&mut self.conn).unwrap(); let out = self.conn.process(None, now()); - let out2 = self.peer_conn.process(out.dgram(), now()); - mem::drop(self.conn.process(out2.dgram(), now())); + let out2 = self.peer_conn.process(out.as_dgram_ref(), now()); + mem::drop(self.conn.process(out2.as_dgram_ref(), now())); let mut buf = [0_u8; 100]; let (amount, fin) = self .peer_conn @@ -619,7 +619,7 @@ mod tests { .stream_send(encoder.recv_stream_id, decoder_instruction) .unwrap(); let out = encoder.peer_conn.process(None, now()); - mem::drop(encoder.conn.process(out.dgram(), now())); + mem::drop(encoder.conn.process(out.as_dgram_ref(), now())); assert!(encoder .encoder .read_instructions(&mut encoder.conn, encoder.recv_stream_id) @@ -1540,7 +1540,7 @@ mod tests { // exchange a flow control update. let out = encoder.peer_conn.process(None, now()); - mem::drop(encoder.conn.process(out.dgram(), now())); + mem::drop(encoder.conn.process(out.as_dgram_ref(), now())); // Try writing a new header block. Now, headers will be added to the dynamic table again, because // instructions can be sent. @@ -1587,7 +1587,7 @@ mod tests { .send_encoder_updates(&mut encoder.conn) .unwrap(); let out = encoder.conn.process(None, now()); - mem::drop(encoder.peer_conn.process(out.dgram(), now())); + mem::drop(encoder.peer_conn.process(out.as_dgram_ref(), now())); // receive an insert count increment. recv_instruction(&mut encoder, &[0x01]); diff --git a/neqo-server/src/main.rs b/neqo-server/src/main.rs index 3eda9189b3..3e91b5cc28 100644 --- a/neqo-server/src/main.rs +++ b/neqo-server/src/main.rs @@ -351,7 +351,7 @@ fn qns_read_response(filename: &str) -> Option> { } trait HttpServer: Display { - fn process(&mut self, dgram: Option, now: Instant) -> Output; + fn process(&mut self, dgram: Option<&Datagram>, now: Instant) -> Output; fn process_events(&mut self, args: &Args, now: Instant); fn set_qlog_dir(&mut self, dir: Option); fn set_ciphers(&mut self, ciphers: &[Cipher]); @@ -467,7 +467,7 @@ impl Display for SimpleServer { } impl HttpServer for SimpleServer { - fn process(&mut self, dgram: Option, now: Instant) -> Output { + fn process(&mut self, dgram: Option<&Datagram>, now: Instant) -> Output { self.server.process(dgram, now) } @@ -734,7 +734,7 @@ impl ServersRunner { .unwrap_or(first) } - fn process(&mut self, inx: usize, dgram: Option) -> bool { + fn process(&mut self, inx: usize, dgram: Option<&Datagram>) -> bool { match self.server.process(dgram, self.args.now()) { Output::Datagram(dgram) => { let socket = self.find_socket(dgram.source()); @@ -770,7 +770,7 @@ impl ServersRunner { if dgram.is_none() { break; } - _ = self.process(inx, dgram); + _ = self.process(inx, dgram.as_ref()); } } else { _ = self.process(inx, None); diff --git a/neqo-server/src/old_https.rs b/neqo-server/src/old_https.rs index c88464ff31..01a097a914 100644 --- a/neqo-server/src/old_https.rs +++ b/neqo-server/src/old_https.rs @@ -199,7 +199,7 @@ impl Http09Server { } impl HttpServer for Http09Server { - fn process(&mut self, dgram: Option, now: Instant) -> Output { + fn process(&mut self, dgram: Option<&Datagram>, now: Instant) -> Output { self.server.process(dgram, now) } diff --git a/neqo-transport/src/connection/mod.rs b/neqo-transport/src/connection/mod.rs index 3a30c0f4d3..4e50176831 100644 --- a/neqo-transport/src/connection/mod.rs +++ b/neqo-transport/src/connection/mod.rs @@ -917,15 +917,20 @@ impl Connection { } /// Process new input datagrams on the connection. - pub fn process_input(&mut self, d: Datagram, now: Instant) { + pub fn process_input(&mut self, d: &Datagram, now: Instant) { self.input(d, now, now); self.process_saved(now); self.streams.cleanup_closed_streams(); } /// Process new input datagrams on the connection. - pub fn process_multiple_input(&mut self, dgrams: Vec, now: Instant) { - if dgrams.is_empty() { + pub fn process_multiple_input<'a, I>(&mut self, dgrams: I, now: Instant) + where + I: IntoIterator, + I::IntoIter: ExactSizeIterator, + { + let dgrams = dgrams.into_iter(); + if dgrams.len() == 0 { return; } @@ -1035,7 +1040,7 @@ impl Connection { /// Process input and generate output. #[must_use = "Output of the process function must be handled"] - pub fn process(&mut self, dgram: Option, now: Instant) -> Output { + pub fn process(&mut self, dgram: Option<&Datagram>, now: Instant) -> Output { if let Some(d) = dgram { self.input(d, now, now); self.process_saved(now); @@ -1134,18 +1139,18 @@ impl Connection { debug_assert!(self.crypto.states.rx_hp(self.version, cspace).is_some()); for saved in self.saved_datagrams.take_saved() { qtrace!([self], "input saved @{:?}: {:?}", saved.t, saved.d); - self.input(saved.d, saved.t, now); + self.input(&saved.d, saved.t, now); } } } /// In case a datagram arrives that we can only partially process, save any /// part that we don't have keys for. - fn save_datagram(&mut self, cspace: CryptoSpace, d: Datagram, remaining: usize, now: Instant) { + fn save_datagram(&mut self, cspace: CryptoSpace, d: &Datagram, remaining: usize, now: Instant) { let d = if remaining < d.len() { Datagram::new(d.source(), d.destination(), &d[d.len() - remaining..]) } else { - d + d.clone() }; self.saved_datagrams.save(cspace, d, now); self.stats.borrow_mut().saved_datagrams += 1; @@ -1371,7 +1376,7 @@ impl Connection { /// Take a datagram as input. This reports an error if the packet was bad. /// This takes two times: when the datagram was received, and the current time. - fn input(&mut self, d: Datagram, received: Instant, now: Instant) { + fn input(&mut self, d: &Datagram, received: Instant, now: Instant) { // First determine the path. let path = self.paths.find_path_with_rebinding( d.destination(), @@ -1384,7 +1389,7 @@ impl Connection { self.capture_error(Some(path), now, 0, res).ok(); } - fn input_path(&mut self, path: &PathRef, d: Datagram, now: Instant) -> Res<()> { + fn input_path(&mut self, path: &PathRef, d: &Datagram, now: Instant) -> Res<()> { let mut slc = &d[..]; let mut dcid = None; @@ -1432,7 +1437,7 @@ impl Connection { self.stats.borrow_mut().dups_rx += 1; } else { match self.process_packet(path, &payload, now) { - Ok(migrate) => self.postprocess_packet(path, &d, &packet, migrate, now), + Ok(migrate) => self.postprocess_packet(path, d, &packet, migrate, now), Err(e) => { self.ensure_error_path(path, &packet, now); return Err(e); @@ -1464,7 +1469,7 @@ impl Connection { // Decryption failure, or not having keys is not fatal. // If the state isn't available, or we can't decrypt the packet, drop // the rest of the datagram on the floor, but don't generate an error. - self.check_stateless_reset(path, &d, dcid.is_none(), now)?; + self.check_stateless_reset(path, d, dcid.is_none(), now)?; self.stats.borrow_mut().pkt_dropped("Decryption failure"); qlog::packet_dropped(&mut self.qlog, &packet); } @@ -1472,7 +1477,7 @@ impl Connection { slc = remainder; dcid = Some(ConnectionId::from(packet.dcid())); } - self.check_stateless_reset(path, &d, dcid.is_none(), now)?; + self.check_stateless_reset(path, d, dcid.is_none(), now)?; Ok(()) } diff --git a/neqo-transport/src/connection/tests/ackrate.rs b/neqo-transport/src/connection/tests/ackrate.rs index 8d0f73f154..3c909bcc70 100644 --- a/neqo-transport/src/connection/tests/ackrate.rs +++ b/neqo-transport/src/connection/tests/ackrate.rs @@ -71,7 +71,7 @@ fn ack_rate_exit_slow_start() { // and to send ACK_FREQUENCY. now += DEFAULT_RTT / 2; assert_eq!(client.stats().frame_tx.ack_frequency, 0); - let af = client.process(Some(ack), now).dgram(); + let af = client.process(Some(&ack), now).dgram(); assert!(af.is_some()); assert_eq!(client.stats().frame_tx.ack_frequency, 1); } @@ -120,11 +120,11 @@ fn ack_rate_client_one_rtt() { // The first packet will elicit an immediate ACK however, so do this twice. let d = send_something(&mut client, now); now += RTT / 2; - let ack = server.process(Some(d), now).dgram(); + let ack = server.process(Some(&d), now).dgram(); assert!(ack.is_some()); let d = send_something(&mut client, now); now += RTT / 2; - let delay = server.process(Some(d), now).callback(); + let delay = server.process(Some(&d), now).callback(); assert_eq!(delay, RTT); assert_eq!(client.stats().frame_tx.ack_frequency, 1); @@ -143,11 +143,11 @@ fn ack_rate_server_half_rtt() { now += RTT / 2; // The client now will acknowledge immediately because it has been more than // an RTT since it last sent an acknowledgment. - let ack = client.process(Some(d), now); + let ack = client.process(Some(&d), now); assert!(ack.as_dgram_ref().is_some()); let d = send_something(&mut server, now); now += RTT / 2; - let delay = client.process(Some(d), now).callback(); + let delay = client.process(Some(&d), now).callback(); assert_eq!(delay, RTT / 2); assert_eq!(server.stats().frame_tx.ack_frequency, 1); @@ -171,7 +171,7 @@ fn migrate_ack_delay() { let client2 = send_something(&mut client, now); assertions::assert_v4_path(&client2, false); // Doesn't. Is dropped. now += DEFAULT_RTT / 2; - server.process_input(client1, now); + server.process_input(&client1, now); let stream = client.stream_create(StreamType::UniDi).unwrap(); let now = increase_cwnd(&mut client, &mut server, stream, now); @@ -187,7 +187,7 @@ fn migrate_ack_delay() { // After noticing this new loss, the client sends ACK_FREQUENCY. // It has sent a few before (as we dropped `client2`), so ignore those. let ad_before = client.stats().frame_tx.ack_frequency; - let af = client.process(Some(ack), now).dgram(); + let af = client.process(Some(&ack), now).dgram(); assert!(af.is_some()); assert_eq!(client.stats().frame_tx.ack_frequency, ad_before + 1); } diff --git a/neqo-transport/src/connection/tests/cc.rs b/neqo-transport/src/connection/tests/cc.rs index 26e4dbd014..f974fd94a0 100644 --- a/neqo-transport/src/connection/tests/cc.rs +++ b/neqo-transport/src/connection/tests/cc.rs @@ -66,7 +66,7 @@ fn cc_slow_start_to_cong_avoidance_recovery_period() { // Client: Process ack now += DEFAULT_RTT / 2; - client.process_input(s_ack, now); + client.process_input(&s_ack, now); assert_eq!( client.stats().frame_rx.largest_acknowledged, flight1_largest @@ -88,7 +88,7 @@ fn cc_slow_start_to_cong_avoidance_recovery_period() { // Client: Process ack now += DEFAULT_RTT / 2; - client.process_input(s_ack, now); + client.process_input(&s_ack, now); assert_eq!( client.stats().frame_rx.largest_acknowledged, flight2_largest @@ -118,7 +118,7 @@ fn cc_cong_avoidance_recovery_period_unchanged() { // Server: Receive and generate ack let s_ack = ack_bytes(&mut server, stream_id, c_tx_dgrams, now); - client.process_input(s_ack, now); + client.process_input(&s_ack, now); let cwnd1 = cwnd(&client); @@ -126,7 +126,7 @@ fn cc_cong_avoidance_recovery_period_unchanged() { let s_ack = ack_bytes(&mut server, stream_id, c_tx_dgrams2, now); // ACK more packets but they were sent before end of recovery period - client.process_input(s_ack, now); + client.process_input(&s_ack, now); // cwnd should not have changed since ACKed packets were sent before // recovery period expired @@ -156,12 +156,12 @@ fn single_packet_on_recovery() { // Acknowledge just one packet and cause one packet to be declared lost. // The length is the amount of credit the client should have. - let ack = server.process(Some(delivered), now).dgram(); + let ack = server.process(Some(&delivered), now).dgram(); assert!(ack.is_some()); // The client should see the loss and enter recovery. // As there are many outstanding packets, there should be no available cwnd. - client.process_input(ack.unwrap(), now); + client.process_input(&ack.unwrap(), now); assert_eq!(cwnd_avail(&client), 0); // The client should send one packet, ignoring the cwnd. @@ -193,7 +193,7 @@ fn cc_cong_avoidance_recovery_period_to_cong_avoidance() { // Client: Process ack now += DEFAULT_RTT / 2; - client.process_input(s_ack, now); + client.process_input(&s_ack, now); // Should be in CARP now. now += DEFAULT_RTT / 2; @@ -227,7 +227,7 @@ fn cc_cong_avoidance_recovery_period_to_cong_avoidance() { let most = c_tx_dgrams.len() - usize::try_from(DEFAULT_ACK_PACKET_TOLERANCE).unwrap() - 1; let s_ack = ack_bytes(&mut server, stream_id, c_tx_dgrams.drain(..most), now); assert_eq!(cwnd(&client), expected_cwnd); - client.process_input(s_ack, now); + client.process_input(&s_ack, now); // make sure to fill cwnd again. let (mut new_pkts, next_now) = fill_cwnd(&mut client, stream_id, now); now = next_now; @@ -235,7 +235,7 @@ fn cc_cong_avoidance_recovery_period_to_cong_avoidance() { let s_ack = ack_bytes(&mut server, stream_id, c_tx_dgrams, now); assert_eq!(cwnd(&client), expected_cwnd); - client.process_input(s_ack, now); + client.process_input(&s_ack, now); // make sure to fill cwnd again. let (mut new_pkts, next_now) = fill_cwnd(&mut client, stream_id, now); now = next_now; @@ -287,7 +287,7 @@ fn cc_slow_start_to_persistent_congestion_some_acks() { let s_ack = ack_bytes(&mut server, stream, c_tx_dgrams, now); now += Duration::from_millis(100); - client.process_input(s_ack, now); + client.process_input(&s_ack, now); // send bytes that will be lost let (_, next_now) = fill_cwnd(&mut client, stream, now); @@ -333,7 +333,7 @@ fn cc_persistent_congestion_to_slow_start() { // No longer in CARP. (pkts acked from after start of CARP) // Should be in slow start now. - client.process_input(s_ack, now); + client.process_input(&s_ack, now); // ACKing 2 packets should let client send 4. let (c_tx_dgrams, _) = fill_cwnd(&mut client, stream, now); @@ -371,11 +371,11 @@ fn ack_are_not_cc() { // The client can ack the server packet even if cc windows is full. qdebug!([client], "Process ack-eliciting"); - let ack_pkt = client.process(ack_eliciting_packet, now).dgram(); + let ack_pkt = client.process(ack_eliciting_packet.as_ref(), now).dgram(); assert!(ack_pkt.is_some()); qdebug!([server], "Handle ACK"); let prev_ack_count = server.stats().frame_rx.ack; - server.process_input(ack_pkt.unwrap(), now); + server.process_input(&ack_pkt.unwrap(), now); assert_eq!(server.stats().frame_rx.ack, prev_ack_count + 1); } diff --git a/neqo-transport/src/connection/tests/close.rs b/neqo-transport/src/connection/tests/close.rs index a9f1fafa25..6efbb6e24f 100644 --- a/neqo-transport/src/connection/tests/close.rs +++ b/neqo-transport/src/connection/tests/close.rs @@ -38,7 +38,7 @@ fn connection_close() { let out = client.process(None, now); - server.process_input(out.dgram().unwrap(), now); + server.process_input(&out.dgram().unwrap(), now); assert_draining(&server, &Error::PeerApplicationError(42)); } @@ -55,7 +55,7 @@ fn connection_close_with_long_reason_string() { let out = client.process(None, now); - server.process_input(out.dgram().unwrap(), now); + server.process_input(&out.dgram().unwrap(), now); assert_draining(&server, &Error::PeerApplicationError(42)); } @@ -68,7 +68,7 @@ fn early_application_close() { // One flight each. let dgram = client.process(None, now()).dgram(); assert!(dgram.is_some()); - let dgram = server.process(dgram, now()).dgram(); + let dgram = server.process(dgram.as_ref(), now()).dgram(); assert!(dgram.is_some()); server.close(now(), 77, String::new()); @@ -76,7 +76,7 @@ fn early_application_close() { let dgram = server.process(None, now()).dgram(); assert!(dgram.is_some()); - client.process_input(dgram.unwrap(), now()); + client.process_input(&dgram.unwrap(), now()); assert_draining(&client, &Error::PeerError(ERROR_APPLICATION_CLOSE)); } @@ -93,13 +93,13 @@ fn bad_tls_version() { let dgram = client.process(None, now()).dgram(); assert!(dgram.is_some()); - let dgram = server.process(dgram, now()).dgram(); + let dgram = server.process(dgram.as_ref(), now()).dgram(); assert_eq!( *server.state(), State::Closed(ConnectionError::Transport(Error::ProtocolViolation)) ); assert!(dgram.is_some()); - client.process_input(dgram.unwrap(), now()); + client.process_input(&dgram.unwrap(), now()); assert_draining(&client, &Error::PeerError(Error::ProtocolViolation.code())); } @@ -116,11 +116,11 @@ fn closing_timers_interation() { // We're going to induce time-based loss recovery so that timer is set. let _p1 = send_something(&mut client, now); let p2 = send_something(&mut client, now); - let ack = server.process(Some(p2), now).dgram(); + let ack = server.process(Some(&p2), now).dgram(); assert!(ack.is_some()); // This is an ACK. // After processing the ACK, we should be on the loss recovery timer. - let cb = client.process(ack, now).callback(); + let cb = client.process(ack.as_ref(), now).callback(); assert_ne!(cb, Duration::from_secs(0)); now += cb; @@ -153,7 +153,7 @@ fn closing_and_draining() { // The client will spit out the same packet in response to anything it receives. let p3 = send_something(&mut server, now()); - let client_close2 = client.process(Some(p3), now()).dgram(); + let client_close2 = client.process(Some(&p3), now()).dgram(); assert_eq!( client_close.as_ref().unwrap().len(), client_close2.as_ref().unwrap().len() @@ -168,14 +168,14 @@ fn closing_and_draining() { ); // When the server receives the close, it too should generate CONNECTION_CLOSE. - let server_close = server.process(client_close, now()).dgram(); + let server_close = server.process(client_close.as_ref(), now()).dgram(); assert!(server.state().closed()); assert!(server_close.is_some()); // .. but it ignores any further close packets. - let server_close_timer = server.process(client_close2, now()).callback(); + let server_close_timer = server.process(client_close2.as_ref(), now()).callback(); assert_ne!(server_close_timer, Duration::from_secs(0)); // Even a legitimate packet without a close in it. - let server_close_timer2 = server.process(Some(p1), now()).callback(); + let server_close_timer2 = server.process(Some(&p1), now()).callback(); assert_eq!(server_close_timer, server_close_timer2); let end = server.process(None, now() + server_close_timer); @@ -201,6 +201,6 @@ fn stateless_reset_client() { .unwrap(); connect_force_idle(&mut client, &mut server); - client.process_input(Datagram::new(addr(), addr(), vec![77; 21]), now()); + client.process_input(&Datagram::new(addr(), addr(), vec![77; 21]), now()); assert_draining(&client, &Error::StatelessReset); } diff --git a/neqo-transport/src/connection/tests/datagram.rs b/neqo-transport/src/connection/tests/datagram.rs index 4348f2dd3b..891773ddaa 100644 --- a/neqo-transport/src/connection/tests/datagram.rs +++ b/neqo-transport/src/connection/tests/datagram.rs @@ -80,7 +80,7 @@ fn datagram_enabled_on_client() { let out = server.process_output(now()).dgram().unwrap(); assert_eq!(server.stats().frame_tx.datagram, dgram_sent + 1); - client.process_input(out, now()); + client.process_input(&out, now()); assert!(matches!( client.next_event().unwrap(), ConnectionEvent::Datagram(data) if data == DATA_SMALLER_THAN_MTU @@ -108,7 +108,7 @@ fn datagram_enabled_on_server() { let out = client.process_output(now()).dgram().unwrap(); assert_eq!(client.stats().frame_tx.datagram, dgram_sent + 1); - server.process_input(out, now()); + server.process_input(&out, now()); assert!(matches!( server.next_event().unwrap(), ConnectionEvent::Datagram(data) if data == DATA_SMALLER_THAN_MTU @@ -205,7 +205,7 @@ fn datagram_acked() { assert_eq!(client.stats().frame_tx.datagram, dgram_sent + 1); let dgram_received = server.stats().frame_rx.datagram; - server.process_input(out.unwrap(), now()); + server.process_input(&out.unwrap(), now()); assert_eq!(server.stats().frame_rx.datagram, dgram_received + 1); let now = now() + AT_LEAST_PTO; // Ack should be sent @@ -218,7 +218,7 @@ fn datagram_acked() { ConnectionEvent::Datagram(data) if data == DATA_SMALLER_THAN_MTU )); - client.process_input(out.unwrap(), now); + client.process_input(&out.unwrap(), now); assert!(matches!( client.next_event().unwrap(), ConnectionEvent::OutgoingDatagramOutcome { id, outcome } if id == 1 && outcome == OutgoingDatagramOutcome::Acked @@ -230,7 +230,7 @@ fn send_packet_and_get_server_event( server: &mut Connection, ) -> ConnectionEvent { let out = client.process_output(now()).dgram(); - server.process_input(out.unwrap(), now()); + server.process_input(&out.unwrap(), now()); let mut events: Vec<_> = server .events() .filter_map(|evt| match evt { @@ -358,7 +358,7 @@ fn dgram_no_allowed() { let out = server.process_output(now()).dgram().unwrap(); server.test_frame_writer = None; - client.process_input(out, now()); + client.process_input(&out, now()); assert_error( &client, @@ -379,7 +379,7 @@ fn dgram_too_big() { let out = server.process_output(now()).dgram().unwrap(); server.test_frame_writer = None; - client.process_input(out, now()); + client.process_input(&out, now()); assert_error( &client, @@ -414,7 +414,7 @@ fn outgoing_datagram_queue_full() { // Send DATA_SMALLER_THAN_MTU_2 datagram let out = client.process_output(now()).dgram(); assert_eq!(client.stats().frame_tx.datagram, dgram_sent + 1); - server.process_input(out.unwrap(), now()); + server.process_input(&out.unwrap(), now()); assert!(matches!( server.next_event().unwrap(), ConnectionEvent::Datagram(data) if data == DATA_SMALLER_THAN_MTU_2 @@ -424,7 +424,7 @@ fn outgoing_datagram_queue_full() { let dgram_sent2 = client.stats().frame_tx.datagram; let out = client.process_output(now()).dgram(); assert_eq!(client.stats().frame_tx.datagram, dgram_sent2 + 1); - server.process_input(out.unwrap(), now()); + server.process_input(&out.unwrap(), now()); assert!(matches!( server.next_event().unwrap(), ConnectionEvent::Datagram(data) if data == DATA_MTU @@ -438,7 +438,7 @@ fn send_datagram(sender: &mut Connection, receiver: &mut Connection, data: &[u8] assert_eq!(sender.stats().frame_tx.datagram, dgram_sent + 1); let dgram_received = receiver.stats().frame_rx.datagram; - receiver.process_input(out, now()); + receiver.process_input(&out, now()); assert_eq!(receiver.stats().frame_rx.datagram, dgram_received + 1); } @@ -552,7 +552,7 @@ fn multiple_quic_datagrams_in_one_packet() { let out = client.process_output(now()).dgram(); assert_eq!(client.stats().frame_tx.datagram, dgram_sent + 2); - server.process_input(out.unwrap(), now()); + server.process_input(&out.unwrap(), now()); let datagram = |e: &_| matches!(e, ConnectionEvent::Datagram(..)); assert_eq!(server.events().filter(datagram).count(), 2); } diff --git a/neqo-transport/src/connection/tests/fuzzing.rs b/neqo-transport/src/connection/tests/fuzzing.rs index 24201eff26..75caa7e857 100644 --- a/neqo-transport/src/connection/tests/fuzzing.rs +++ b/neqo-transport/src/connection/tests/fuzzing.rs @@ -27,7 +27,7 @@ fn no_encryption() { let client_pkt = client.process_output(now()).dgram().unwrap(); assert!(client_pkt[..client_pkt.len() - FIXED_TAG_FUZZING.len()].ends_with(DATA_CLIENT)); - server.process_input(client_pkt, now()); + server.process_input(&client_pkt, now()); let mut buf = vec![0; 100]; let (len, _) = server.stream_recv(stream_id, &mut buf).unwrap(); assert_eq!(len, DATA_CLIENT.len()); @@ -36,7 +36,7 @@ fn no_encryption() { let server_pkt = server.process_output(now()).dgram().unwrap(); assert!(server_pkt[..server_pkt.len() - FIXED_TAG_FUZZING.len()].ends_with(DATA_SERVER)); - client.process_input(server_pkt, now()); + client.process_input(&server_pkt, now()); let (len, _) = client.stream_recv(stream_id, &mut buf).unwrap(); assert_eq!(len, DATA_SERVER.len()); assert_eq!(&buf[..len], DATA_SERVER); diff --git a/neqo-transport/src/connection/tests/handshake.rs b/neqo-transport/src/connection/tests/handshake.rs index 5083ee7dcb..602611d34f 100644 --- a/neqo-transport/src/connection/tests/handshake.rs +++ b/neqo-transport/src/connection/tests/handshake.rs @@ -45,31 +45,31 @@ fn full_handshake() { qdebug!("---- server: CH -> SH, EE, CERT, CV, FIN"); let mut server = default_server(); - let out = server.process(out.dgram(), now()); + let out = server.process(out.as_dgram_ref(), now()); assert!(out.as_dgram_ref().is_some()); assert_eq!(out.as_dgram_ref().unwrap().len(), PATH_MTU_V6); qdebug!("---- client: cert verification"); - let out = client.process(out.dgram(), now()); + let out = client.process(out.as_dgram_ref(), now()); assert!(out.as_dgram_ref().is_some()); - let out = server.process(out.dgram(), now()); + let out = server.process(out.as_dgram_ref(), now()); assert!(out.as_dgram_ref().is_none()); assert!(maybe_authenticate(&mut client)); qdebug!("---- client: SH..FIN -> FIN"); - let out = client.process(out.dgram(), now()); + let out = client.process(out.as_dgram_ref(), now()); assert!(out.as_dgram_ref().is_some()); assert_eq!(*client.state(), State::Connected); qdebug!("---- server: FIN -> ACKS"); - let out = server.process(out.dgram(), now()); + let out = server.process(out.as_dgram_ref(), now()); assert!(out.as_dgram_ref().is_some()); assert_eq!(*server.state(), State::Confirmed); qdebug!("---- client: ACKS -> 0"); - let out = client.process(out.dgram(), now()); + let out = client.process(out.as_dgram_ref(), now()); assert!(out.as_dgram_ref().is_none()); assert_eq!(*client.state(), State::Confirmed); } @@ -83,14 +83,14 @@ fn handshake_failed_authentication() { qdebug!("---- server: CH -> SH, EE, CERT, CV, FIN"); let mut server = default_server(); - let out = server.process(out.dgram(), now()); + let out = server.process(out.as_dgram_ref(), now()); assert!(out.as_dgram_ref().is_some()); qdebug!("---- client: cert verification"); - let out = client.process(out.dgram(), now()); + let out = client.process(out.as_dgram_ref(), now()); assert!(out.as_dgram_ref().is_some()); - let out = server.process(out.dgram(), now()); + let out = server.process(out.as_dgram_ref(), now()); assert!(out.as_dgram_ref().is_none()); let authentication_needed = |e| matches!(e, ConnectionEvent::AuthenticationNeeded); @@ -103,7 +103,7 @@ fn handshake_failed_authentication() { assert!(out.as_dgram_ref().is_some()); qdebug!("---- server: Alert(certificate_revoked)"); - let out = server.process(out.dgram(), now()); + let out = server.process(out.as_dgram_ref(), now()); assert!(out.as_dgram_ref().is_some()); assert_error(&client, &ConnectionError::Transport(Error::CryptoAlert(44))); assert_error(&server, &ConnectionError::Transport(Error::PeerError(300))); @@ -145,16 +145,16 @@ fn dup_server_flight1() { qdebug!("---- server: CH -> SH, EE, CERT, CV, FIN"); let mut server = default_server(); - let out_to_rep = server.process(out.dgram(), now()); + let out_to_rep = server.process(out.as_dgram_ref(), now()); assert!(out_to_rep.as_dgram_ref().is_some()); qdebug!("Output={:0x?}", out_to_rep.as_dgram_ref()); qdebug!("---- client: cert verification"); - let out = client.process(Some(out_to_rep.as_dgram_ref().unwrap().clone()), now()); + let out = client.process(Some(out_to_rep.as_dgram_ref().unwrap()), now()); assert!(out.as_dgram_ref().is_some()); qdebug!("Output={:0x?}", out.as_dgram_ref()); - let out = server.process(out.dgram(), now()); + let out = server.process(out.as_dgram_ref(), now()); assert!(out.as_dgram_ref().is_none()); assert!(maybe_authenticate(&mut client)); @@ -169,7 +169,7 @@ fn dup_server_flight1() { assert_eq!(1, client.stats().dropped_rx); qdebug!("---- Dup, ignored"); - let out = client.process(out_to_rep.dgram(), now()); + let out = client.process(out_to_rep.as_dgram_ref(), now()); assert!(out.as_dgram_ref().is_none()); qdebug!("Output={:0x?}", out.as_dgram_ref()); @@ -199,12 +199,12 @@ fn crypto_frame_split() { // The entire server flight doesn't fit in a single packet because the // certificate is large, therefore the server will produce 2 packets. - let server1 = server.process(client1.dgram(), now()); + let server1 = server.process(client1.as_dgram_ref(), now()); assert!(server1.as_dgram_ref().is_some()); let server2 = server.process(None, now()); assert!(server2.as_dgram_ref().is_some()); - let client2 = client.process(server1.dgram(), now()); + let client2 = client.process(server1.as_dgram_ref(), now()); // This is an ack. assert!(client2.as_dgram_ref().is_some()); // The client might have the certificate now, so we can't guarantee that @@ -213,11 +213,11 @@ fn crypto_frame_split() { assert_eq!(*client.state(), State::Handshaking); // let server process the ack for the first packet. - let server3 = server.process(client2.dgram(), now()); + let server3 = server.process(client2.as_dgram_ref(), now()); assert!(server3.as_dgram_ref().is_none()); // Consume the second packet from the server. - let client3 = client.process(server2.dgram(), now()); + let client3 = client.process(server2.as_dgram_ref(), now()); // Check authentication. let auth2 = maybe_authenticate(&mut client); @@ -225,13 +225,13 @@ fn crypto_frame_split() { // Now client has all data to finish handshake. assert_eq!(*client.state(), State::Connected); - let client4 = client.process(server3.dgram(), now()); + let client4 = client.process(server3.as_dgram_ref(), now()); // One of these will contain data depending on whether Authentication was completed // after the first or second server packet. assert!(client3.as_dgram_ref().is_some() ^ client4.as_dgram_ref().is_some()); - mem::drop(server.process(client3.dgram(), now())); - mem::drop(server.process(client4.dgram(), now())); + mem::drop(server.process(client3.as_dgram_ref(), now())); + mem::drop(server.process(client4.as_dgram_ref(), now())); assert_eq!(*client.state(), State::Connected); assert_eq!(*server.state(), State::Confirmed); @@ -263,19 +263,19 @@ fn send_05rtt() { let c1 = client.process(None, now()).dgram(); assert!(c1.is_some()); - let s1 = server.process(c1, now()).dgram().unwrap(); + let s1 = server.process(c1.as_ref(), now()).dgram().unwrap(); assert_eq!(s1.len(), PATH_MTU_V6); // The server should accept writes at this point. let s2 = send_something(&mut server, now()); // Complete the handshake at the client. - client.process_input(s1, now()); + client.process_input(&s1, now()); maybe_authenticate(&mut client); assert_eq!(*client.state(), State::Connected); // The client should receive the 0.5-RTT data now. - client.process_input(s2, now()); + client.process_input(&s2, now()); let mut buf = vec![0; DEFAULT_STREAM_DATA.len() + 1]; let stream_id = client .events() @@ -300,19 +300,19 @@ fn reorder_05rtt() { let c1 = client.process(None, now()).dgram(); assert!(c1.is_some()); - let s1 = server.process(c1, now()).dgram().unwrap(); + let s1 = server.process(c1.as_ref(), now()).dgram().unwrap(); // The server should accept writes at this point. let s2 = send_something(&mut server, now()); // We can't use the standard facility to complete the handshake, so // drive it as aggressively as possible. - client.process_input(s2, now()); + client.process_input(&s2, now()); assert_eq!(client.stats().saved_datagrams, 1); // After processing the first packet, the client should go back and // process the 0.5-RTT packet data, which should make data available. - client.process_input(s1, now()); + client.process_input(&s1, now()); // We can't use `maybe_authenticate` here as that consumes events. client.authenticated(AuthenticationStatus::Ok, now()); assert_eq!(*client.state(), State::Connected); @@ -350,7 +350,7 @@ fn reorder_05rtt_with_0rtt() { server.send_ticket(now, &[]).unwrap(); let ticket = server.process_output(now).dgram().unwrap(); now += RTT / 2; - client.process_input(ticket, now); + client.process_input(&ticket, now); let token = get_tokens(&mut client).pop().unwrap(); let mut client = default_client(); @@ -367,14 +367,14 @@ fn reorder_05rtt_with_0rtt() { // Handle the first packet and send 0.5-RTT in response. Drop the response. now += RTT / 2; - mem::drop(server.process(Some(c1), now).dgram().unwrap()); + mem::drop(server.process(Some(&c1), now).dgram().unwrap()); // The gap in 0-RTT will result in this 0.5 RTT containing an ACK. - server.process_input(c2, now); + server.process_input(&c2, now); let s2 = send_something(&mut server, now); // Save the 0.5 RTT. now += RTT / 2; - client.process_input(s2, now); + client.process_input(&s2, now); assert_eq!(client.stats().saved_datagrams, 1); // Now PTO at the client and cause the server to re-send handshake packets. @@ -382,20 +382,20 @@ fn reorder_05rtt_with_0rtt() { let c3 = client.process(None, now).dgram(); now += RTT / 2; - let s3 = server.process(c3, now).dgram().unwrap(); + let s3 = server.process(c3.as_ref(), now).dgram().unwrap(); assertions::assert_no_1rtt(&s3[..]); // The client should be able to process the 0.5 RTT now. // This should contain an ACK, so we are processing an ACK from the past. now += RTT / 2; - client.process_input(s3, now); + client.process_input(&s3, now); maybe_authenticate(&mut client); let c4 = client.process(None, now).dgram(); assert_eq!(*client.state(), State::Connected); assert_eq!(client.paths.rtt(), RTT); now += RTT / 2; - server.process_input(c4.unwrap(), now); + server.process_input(&c4.unwrap(), now); assert_eq!(*server.state(), State::Confirmed); // Don't check server RTT as it will be massively inflated by a // poor initial estimate received when the server dropped the @@ -416,7 +416,7 @@ fn coalesce_05rtt() { let c1 = client.process(None, now).dgram(); assert!(c1.is_some()); now += RTT / 2; - let s1 = server.process(c1, now).dgram(); + let s1 = server.process(c1.as_ref(), now).dgram(); assert!(s1.is_some()); // Drop the server flight. Then send some data. @@ -431,7 +431,7 @@ fn coalesce_05rtt() { let c2 = client.process(None, now).dgram(); assert!(c2.is_some()); now += RTT / 2; - let s2 = server.process(c2, now).dgram(); + let s2 = server.process(c2.as_ref(), now).dgram(); // Even though there is a 1-RTT packet at the end of the datagram, the // flight should be padded to full size. assert_eq!(s2.as_ref().unwrap().len(), PATH_MTU_V6); @@ -440,7 +440,7 @@ fn coalesce_05rtt() { // packet until authentication completes though. So it saves it. now += RTT / 2; assert_eq!(client.stats().dropped_rx, 0); - mem::drop(client.process(s2, now).dgram()); + mem::drop(client.process(s2.as_ref(), now).dgram()); // This packet will contain an ACK, but we can ignore it. assert_eq!(client.stats().dropped_rx, 0); assert_eq!(client.stats().packets_rx, 3); @@ -457,11 +457,11 @@ fn coalesce_05rtt() { // Allow the handshake to complete. now += RTT / 2; - let s3 = server.process(c3, now).dgram(); + let s3 = server.process(c3.as_ref(), now).dgram(); assert!(s3.is_some()); assert_eq!(*server.state(), State::Confirmed); now += RTT / 2; - mem::drop(client.process(s3, now).dgram()); + mem::drop(client.process(s3.as_ref(), now).dgram()); assert_eq!(*client.state(), State::Confirmed); assert_eq!(client.stats().dropped_rx, 0); // No dropped packets. @@ -478,7 +478,7 @@ fn reorder_handshake() { assert!(c1.is_some()); now += RTT / 2; - let s1 = server.process(c1, now).dgram(); + let s1 = server.process(c1.as_ref(), now).dgram(); assert!(s1.is_some()); // Drop the Initial packet from this. @@ -488,7 +488,7 @@ fn reorder_handshake() { // Pass just the handshake packet in and the client can't handle it yet. // It can only send another Initial packet. now += RTT / 2; - let dgram = client.process(s_hs, now).dgram(); + let dgram = client.process(s_hs.as_ref(), now).dgram(); assertions::assert_initial(dgram.as_ref().unwrap(), false); assert_eq!(client.stats().saved_datagrams, 1); assert_eq!(client.stats().packets_rx, 1); @@ -499,7 +499,7 @@ fn reorder_handshake() { now += AT_LEAST_PTO; let c2 = client.process(None, now).dgram(); now += RTT / 2; - let s2 = server.process(c2, now).dgram(); + let s2 = server.process(c2.as_ref(), now).dgram(); assert!(s2.is_some()); let (s_init, s_hs) = split_datagram(&s2.unwrap()); @@ -507,11 +507,11 @@ fn reorder_handshake() { // Processing the Handshake packet first should save it. now += RTT / 2; - client.process_input(s_hs.unwrap(), now); + client.process_input(&s_hs.unwrap(), now); assert_eq!(client.stats().saved_datagrams, 2); assert_eq!(client.stats().packets_rx, 2); - client.process_input(s_init, now); + client.process_input(&s_init, now); // Each saved packet should now be "received" again. assert_eq!(client.stats().packets_rx, 7); maybe_authenticate(&mut client); @@ -521,14 +521,14 @@ fn reorder_handshake() { // Note that though packets were saved and processed very late, // they don't cause the RTT to change. now += RTT / 2; - let s3 = server.process(c3, now).dgram(); + let s3 = server.process(c3.as_ref(), now).dgram(); assert_eq!(*server.state(), State::Confirmed); // Don't check server RTT estimate as it will be inflated due to // it making a guess based on retransmissions when it dropped // the Initial packet number space. now += RTT / 2; - client.process_input(s3.unwrap(), now); + client.process_input(&s3.unwrap(), now); assert_eq!(*client.state(), State::Confirmed); assert_eq!(client.paths.rtt(), RTT); } @@ -545,11 +545,11 @@ fn reorder_1rtt() { assert!(c1.is_some()); now += RTT / 2; - let s1 = server.process(c1, now).dgram(); + let s1 = server.process(c1.as_ref(), now).dgram(); assert!(s1.is_some()); now += RTT / 2; - client.process_input(s1.unwrap(), now); + client.process_input(&s1.unwrap(), now); maybe_authenticate(&mut client); let c2 = client.process(None, now).dgram(); assert!(c2.is_some()); @@ -558,7 +558,7 @@ fn reorder_1rtt() { // Give them to the server before giving it `c2`. for _ in 0..PACKETS { let d = send_something(&mut client, now); - server.process_input(d, now + RTT / 2); + server.process_input(&d, now + RTT / 2); } // The server has now received those packets, and saved them. // The two extra received are Initial + the junk we use for padding. @@ -567,7 +567,7 @@ fn reorder_1rtt() { assert_eq!(server.stats().dropped_rx, 1); now += RTT / 2; - let s2 = server.process(c2, now).dgram(); + let s2 = server.process(c2.as_ref(), now).dgram(); // The server has now received those packets, and saved them. // The two additional are a Handshake and a 1-RTT (w/ NEW_CONNECTION_ID). assert_eq!(server.stats().packets_rx, PACKETS * 2 + 4); @@ -577,7 +577,7 @@ fn reorder_1rtt() { assert_eq!(server.paths.rtt(), RTT); now += RTT / 2; - client.process_input(s2.unwrap(), now); + client.process_input(&s2.unwrap(), now); assert_eq!(client.paths.rtt(), RTT); // All the stream data that was sent should now be available. @@ -616,7 +616,7 @@ fn corrupted_initial() { .unwrap(); corrupted[idx] ^= 0x76; let dgram = Datagram::new(d.source(), d.destination(), corrupted); - server.process_input(dgram, now()); + server.process_input(&dgram, now()); // The server should have received two packets, // the first should be dropped, the second saved. assert_eq!(server.stats().packets_rx, 2); @@ -654,7 +654,7 @@ fn extra_initial_hs() { let c_init = client.process(None, now).dgram(); assert!(c_init.is_some()); now += DEFAULT_RTT / 2; - let s_init = server.process(c_init, now).dgram(); + let s_init = server.process(c_init.as_ref(), now).dgram(); assert!(s_init.is_some()); now += DEFAULT_RTT / 2; @@ -666,13 +666,13 @@ fn extra_initial_hs() { // Do that EXTRA_INITIALS times and each time the client will emit // another Initial packet. for _ in 0..=super::super::EXTRA_INITIALS { - let c_init = client.process(undecryptable.clone(), now).dgram(); + let c_init = client.process(undecryptable.as_ref(), now).dgram(); assertions::assert_initial(c_init.as_ref().unwrap(), false); now += DEFAULT_RTT / 10; } // After EXTRA_INITIALS, the client stops sending Initial packets. - let nothing = client.process(undecryptable, now).dgram(); + let nothing = client.process(undecryptable.as_ref(), now).dgram(); assert!(nothing.is_none()); // Until PTO, where another Initial can be used to complete the handshake. @@ -680,14 +680,14 @@ fn extra_initial_hs() { let c_init = client.process(None, now).dgram(); assertions::assert_initial(c_init.as_ref().unwrap(), false); now += DEFAULT_RTT / 2; - let s_init = server.process(c_init, now).dgram(); + let s_init = server.process(c_init.as_ref(), now).dgram(); now += DEFAULT_RTT / 2; - client.process_input(s_init.unwrap(), now); + client.process_input(&s_init.unwrap(), now); maybe_authenticate(&mut client); let c_fin = client.process_output(now).dgram(); assert_eq!(*client.state(), State::Connected); now += DEFAULT_RTT / 2; - server.process_input(c_fin.unwrap(), now); + server.process_input(&c_fin.unwrap(), now); assert_eq!(*server.state(), State::Confirmed); } @@ -700,7 +700,7 @@ fn extra_initial_invalid_cid() { let c_init = client.process(None, now).dgram(); assert!(c_init.is_some()); now += DEFAULT_RTT / 2; - let s_init = server.process(c_init, now).dgram(); + let s_init = server.process(c_init.as_ref(), now).dgram(); assert!(s_init.is_some()); now += DEFAULT_RTT / 2; @@ -712,7 +712,7 @@ fn extra_initial_invalid_cid() { assert_ne!(copy[5], 0); // The DCID should be non-zero length. copy[6] ^= 0xc4; let dgram_copy = Datagram::new(hs.destination(), hs.source(), copy); - let nothing = client.process(Some(dgram_copy), now).dgram(); + let nothing = client.process(Some(&dgram_copy), now).dgram(); assert!(nothing.is_none()); } @@ -761,7 +761,7 @@ fn anti_amplification() { let c_init = client.process_output(now).dgram(); now += DEFAULT_RTT / 2; - let s_init1 = server.process(c_init, now).dgram().unwrap(); + let s_init1 = server.process(c_init.as_ref(), now).dgram().unwrap(); assert_eq!(s_init1.len(), PATH_MTU_V6); let s_init2 = server.process_output(now).dgram().unwrap(); assert_eq!(s_init2.len(), PATH_MTU_V6); @@ -777,11 +777,11 @@ fn anti_amplification() { assert_ne!(cb, Duration::new(0, 0)); now += DEFAULT_RTT / 2; - client.process_input(s_init1, now); - client.process_input(s_init2, now); + client.process_input(&s_init1, now); + client.process_input(&s_init2, now); let ack_count = client.stats().frame_tx.ack; let frame_count = client.stats().frame_tx.all; - let ack = client.process(Some(s_init3), now).dgram().unwrap(); + let ack = client.process(Some(&s_init3), now).dgram().unwrap(); assert!(!maybe_authenticate(&mut client)); // No need yet. // The client sends a padded datagram, with just ACK for Handshake. @@ -790,16 +790,16 @@ fn anti_amplification() { assert_ne!(ack.len(), PATH_MTU_V6); // Not padded (it includes Handshake). now += DEFAULT_RTT / 2; - let remainder = server.process(Some(ack), now).dgram(); + let remainder = server.process(Some(&ack), now).dgram(); now += DEFAULT_RTT / 2; - client.process_input(remainder.unwrap(), now); + client.process_input(&remainder.unwrap(), now); assert!(maybe_authenticate(&mut client)); // OK, we have all of it. let fin = client.process_output(now).dgram(); assert_eq!(*client.state(), State::Connected); now += DEFAULT_RTT / 2; - server.process_input(fin.unwrap(), now); + server.process_input(&fin.unwrap(), now); assert_eq!(*server.state(), State::Confirmed); } @@ -815,7 +815,7 @@ fn garbage_initial() { corrupted.push(initial[initial.len() - 1] ^ 0xb7); corrupted.extend_from_slice(rest.as_ref().map_or(&[], |r| &r[..])); let garbage = Datagram::new(addr(), addr(), corrupted); - assert_eq!(Output::None, server.process(Some(garbage), now())); + assert_eq!(Output::None, server.process(Some(&garbage), now())); } #[test] @@ -825,7 +825,7 @@ fn drop_initial_packet_from_wrong_address() { assert!(out.as_dgram_ref().is_some()); let mut server = default_server(); - let out = server.process(out.dgram(), now()); + let out = server.process(out.as_dgram_ref(), now()); assert!(out.as_dgram_ref().is_some()); let p = out.dgram().unwrap(); @@ -835,7 +835,7 @@ fn drop_initial_packet_from_wrong_address() { &p[..], ); - let out = client.process(Some(dgram), now()); + let out = client.process(Some(&dgram), now()); assert!(out.as_dgram_ref().is_none()); } @@ -846,13 +846,13 @@ fn drop_handshake_packet_from_wrong_address() { assert!(out.as_dgram_ref().is_some()); let mut server = default_server(); - let out = server.process(out.dgram(), now()); + let out = server.process(out.as_dgram_ref(), now()); assert!(out.as_dgram_ref().is_some()); let (s_in, s_hs) = split_datagram(&out.dgram().unwrap()); // Pass the initial packet. - mem::drop(client.process(Some(s_in), now()).dgram()); + mem::drop(client.process(Some(&s_in), now()).dgram()); let p = s_hs.unwrap(); let dgram = Datagram::new( @@ -861,7 +861,7 @@ fn drop_handshake_packet_from_wrong_address() { &p[..], ); - let out = client.process(Some(dgram), now()); + let out = client.process(Some(&dgram), now()); assert!(out.as_dgram_ref().is_none()); } @@ -910,8 +910,8 @@ fn ech_retry() { .unwrap(); let dgram = client.process_output(now()).dgram(); - let dgram = server.process(dgram, now()).dgram(); - client.process_input(dgram.unwrap(), now()); + let dgram = server.process(dgram.as_ref(), now()).dgram(); + client.process_input(&dgram.unwrap(), now()); let auth_event = ConnectionEvent::EchFallbackAuthenticationNeeded { public_name: String::from(ECH_PUBLIC_NAME), }; @@ -921,7 +921,7 @@ fn ech_retry() { // Tell the server about the error. let dgram = client.process_output(now()).dgram(); - server.process_input(dgram.unwrap(), now()); + server.process_input(&dgram.unwrap(), now()); assert_eq!( server.state().error(), Some(&ConnectionError::Transport(Error::PeerError(0x100 + 121))) @@ -965,8 +965,8 @@ fn ech_retry_fallback_rejected() { .unwrap(); let dgram = client.process_output(now()).dgram(); - let dgram = server.process(dgram, now()).dgram(); - client.process_input(dgram.unwrap(), now()); + let dgram = server.process(dgram.as_ref(), now()).dgram(); + client.process_input(&dgram.unwrap(), now()); let auth_event = ConnectionEvent::EchFallbackAuthenticationNeeded { public_name: String::from(ECH_PUBLIC_NAME), }; @@ -980,7 +980,7 @@ fn ech_retry_fallback_rejected() { // Pass the error on. let dgram = client.process_output(now()).dgram(); - server.process_input(dgram.unwrap(), now()); + server.process_input(&dgram.unwrap(), now()); assert_eq!( server.state().error(), Some(&ConnectionError::Transport(Error::PeerError(298))) @@ -999,13 +999,13 @@ fn bad_min_ack_delay() { let mut client = default_client(); let dgram = client.process_output(now()).dgram(); - let dgram = server.process(dgram, now()).dgram(); - client.process_input(dgram.unwrap(), now()); + let dgram = server.process(dgram.as_ref(), now()).dgram(); + client.process_input(&dgram.unwrap(), now()); client.authenticated(AuthenticationStatus::Ok, now()); assert_eq!(client.state().error(), Some(&EXPECTED_ERROR)); let dgram = client.process_output(now()).dgram(); - server.process_input(dgram.unwrap(), now()); + server.process_input(&dgram.unwrap(), now()); assert_eq!( server.state().error(), Some(&ConnectionError::Transport(Error::PeerError( @@ -1025,7 +1025,7 @@ fn only_server_initial() { let client_dgram = client.process_output(now).dgram(); // Now fetch two flights of messages from the server. - let server_dgram1 = server.process(client_dgram, now).dgram(); + let server_dgram1 = server.process(client_dgram.as_ref(), now).dgram(); let server_dgram2 = server.process_output(now + AT_LEAST_PTO).dgram(); // Only pass on the Initial from the first. We should get a Handshake in return. @@ -1035,7 +1035,7 @@ fn only_server_initial() { // The client will not acknowledge the Initial as it discards keys. // It sends a Handshake probe instead, containing just a PING frame. assert_eq!(client.stats().frame_tx.ping, 0); - let probe = client.process(Some(initial), now).dgram(); + let probe = client.process(Some(&initial), now).dgram(); assertions::assert_handshake(&probe.unwrap()); assert_eq!(client.stats().dropped_rx, 0); assert_eq!(client.stats().frame_tx.ping, 1); @@ -1047,17 +1047,17 @@ fn only_server_initial() { now += AT_LEAST_PTO; assert_eq!(client.stats().frame_tx.ping, 1); let discarded = client.stats().dropped_rx; - let probe = client.process(Some(initial), now).dgram(); + let probe = client.process(Some(&initial), now).dgram(); assertions::assert_handshake(&probe.unwrap()); assert_eq!(client.stats().frame_tx.ping, 2); assert_eq!(client.stats().dropped_rx, discarded + 1); // Pass the Handshake packet and complete the handshake. - client.process_input(handshake.unwrap(), now); + client.process_input(&handshake.unwrap(), now); maybe_authenticate(&mut client); let dgram = client.process_output(now).dgram(); - let dgram = server.process(dgram, now).dgram(); - client.process_input(dgram.unwrap(), now); + let dgram = server.process(dgram.as_ref(), now).dgram(); + client.process_input(&dgram.unwrap(), now); assert_eq!(*client.state(), State::Confirmed); assert_eq!(*server.state(), State::Confirmed); @@ -1083,25 +1083,25 @@ fn no_extra_probes_after_confirmed() { // Finally, run the handshake. now += AT_LEAST_PTO * 2; let dgram = client.process_output(now).dgram(); - let dgram = server.process(dgram, now).dgram(); + let dgram = server.process(dgram.as_ref(), now).dgram(); // The server should have dropped the Initial keys now, so passing in the Initial // should elicit a retransmit rather than having it completely ignored. - let spare_handshake = server.process(Some(replay_initial), now).dgram(); + let spare_handshake = server.process(Some(&replay_initial), now).dgram(); assert!(spare_handshake.is_some()); - client.process_input(dgram.unwrap(), now); + client.process_input(&dgram.unwrap(), now); maybe_authenticate(&mut client); let dgram = client.process_output(now).dgram(); - let dgram = server.process(dgram, now).dgram(); - client.process_input(dgram.unwrap(), now); + let dgram = server.process(dgram.as_ref(), now).dgram(); + client.process_input(&dgram.unwrap(), now); assert_eq!(*client.state(), State::Confirmed); assert_eq!(*server.state(), State::Confirmed); - let probe = server.process(spare_initial, now).dgram(); + let probe = server.process(spare_initial.as_ref(), now).dgram(); assert!(probe.is_none()); - let probe = client.process(spare_handshake, now).dgram(); + let probe = client.process(spare_handshake.as_ref(), now).dgram(); assert!(probe.is_none()); } @@ -1114,12 +1114,12 @@ fn implicit_rtt_server() { let dgram = client.process_output(now).dgram(); now += RTT / 2; - let dgram = server.process(dgram, now).dgram(); + let dgram = server.process(dgram.as_ref(), now).dgram(); now += RTT / 2; - let dgram = client.process(dgram, now).dgram(); + let dgram = client.process(dgram.as_ref(), now).dgram(); assertions::assert_handshake(dgram.as_ref().unwrap()); now += RTT / 2; - server.process_input(dgram.unwrap(), now); + server.process_input(&dgram.unwrap(), now); // The server doesn't receive any acknowledgments, but it can infer // an RTT estimate from having discarded the Initial packet number space. diff --git a/neqo-transport/src/connection/tests/idle.rs b/neqo-transport/src/connection/tests/idle.rs index a9643c284f..1b7dac9de9 100644 --- a/neqo-transport/src/connection/tests/idle.rs +++ b/neqo-transport/src/connection/tests/idle.rs @@ -107,15 +107,18 @@ fn asymmetric_idle_timeout() { connect(&mut client, &mut server); let c1 = send_something(&mut client, now()); let c2 = send_something(&mut client, now()); - server.process_input(c2, now()); - server.process_input(c1, now()); + server.process_input(&c2, now()); + server.process_input(&c1, now()); let s1 = send_something(&mut server, now()); let s2 = send_something(&mut server, now()); - client.process_input(s2, now()); - let ack = client.process(Some(s1), now()).dgram(); + client.process_input(&s2, now()); + let ack = client.process(Some(&s1), now()).dgram(); assert!(ack.is_some()); // Now both should have received ACK frames so should be idle. - assert_eq!(server.process(ack, now()), Output::Callback(LOWER_TIMEOUT)); + assert_eq!( + server.process(ack.as_ref(), now()), + Output::Callback(LOWER_TIMEOUT) + ); assert_eq!(client.process(None, now()), Output::Callback(LOWER_TIMEOUT)); } @@ -144,13 +147,13 @@ fn tiny_idle_timeout() { let c1 = send_something(&mut client, now); let c2 = send_something(&mut client, now); now += RTT / 2; - server.process_input(c2, now); - server.process_input(c1, now); + server.process_input(&c2, now); + server.process_input(&c1, now); let s1 = send_something(&mut server, now); let s2 = send_something(&mut server, now); now += RTT / 2; - client.process_input(s2, now); - let ack = client.process(Some(s1), now).dgram(); + client.process_input(&s2, now); + let ack = client.process(Some(&s1), now).dgram(); assert!(ack.is_some()); // The client should be idle now, but with a different timer. @@ -162,7 +165,7 @@ fn tiny_idle_timeout() { // The server should go idle after the ACK, but again with a larger timeout. now += RTT / 2; - if let Output::Callback(t) = client.process(ack, now) { + if let Output::Callback(t) = client.process(ack.as_ref(), now) { assert!(t > LOWER_TIMEOUT); } else { panic!("Client not idle"); @@ -257,11 +260,11 @@ fn idle_recv_packet() { // Otherwise, the eventual timeout will be extended (and we're not testing that). now += Duration::from_secs(10); let out = client.process(None, now); - server.process_input(out.dgram().unwrap(), now); + server.process_input(&out.dgram().unwrap(), now); assert_eq!(server.stream_send(stream, b"world").unwrap(), 5); let out = server.process_output(now); assert_ne!(out.as_dgram_ref(), None); - mem::drop(client.process(out.dgram(), now)); + mem::drop(client.process(out.as_dgram_ref(), now)); assert!(matches!(client.state(), State::Confirmed)); // Add a little less than the idle timeout and we're still connected. @@ -288,9 +291,9 @@ fn idle_caching() { // Perform the first round trip, but drop the Initial from the server. // The client then caches the Handshake packet. let dgram = client.process_output(start).dgram(); - let dgram = server.process(dgram, start).dgram(); + let dgram = server.process(dgram.as_ref(), start).dgram(); let (_, handshake) = split_datagram(&dgram.unwrap()); - client.process_input(handshake.unwrap(), start); + client.process_input(&handshake.unwrap(), start); // Perform an exchange and keep the connection alive. // Only allow a packet containing a PING to pass. @@ -303,7 +306,7 @@ fn idle_caching() { // Now let the server process the client PING. This causes the server // to send CRYPTO frames again, so manually extract and discard those. let ping_before_s = server.stats().frame_rx.ping; - server.process_input(dgram.unwrap(), middle); + server.process_input(&dgram.unwrap(), middle); assert_eq!(server.stats().frame_rx.ping, ping_before_s + 1); let mut tokens = Vec::new(); server @@ -336,7 +339,7 @@ fn idle_caching() { let (initial, _) = split_datagram(&dgram.unwrap()); let ping_before_c = client.stats().frame_rx.ping; let ack_before = client.stats().frame_rx.ack; - client.process_input(initial, middle); + client.process_input(&initial, middle); assert_eq!(client.stats().frame_rx.ping, ping_before_c + 1); assert_eq!(client.stats().frame_rx.ack, ack_before + 1); @@ -345,11 +348,11 @@ fn idle_caching() { let dgram = server.process_output(end).dgram(); let (initial, _) = split_datagram(&dgram.unwrap()); neqo_common::qwarn!("client ingests initial, finally"); - mem::drop(client.process(Some(initial), end)); + mem::drop(client.process(Some(&initial), end)); maybe_authenticate(&mut client); let dgram = client.process_output(end).dgram(); - let dgram = server.process(dgram, end).dgram(); - client.process_input(dgram.unwrap(), end); + let dgram = server.process(dgram.as_ref(), end).dgram(); + client.process_input(&dgram.unwrap(), end); assert_eq!(*client.state(), State::Confirmed); assert_eq!(*server.state(), State::Confirmed); } @@ -378,7 +381,7 @@ fn create_stream_idle_rtt( _ = initiator.stream_send(stream, DEFAULT_STREAM_DATA).unwrap(); let req = initiator.process_output(now).dgram(); now += rtt / 2; - responder.process_input(req.unwrap(), now); + responder.process_input(&req.unwrap(), now); // Reordering two packets from the responder forces the initiator to be idle. _ = responder.stream_send(stream, DEFAULT_STREAM_DATA).unwrap(); @@ -387,15 +390,15 @@ fn create_stream_idle_rtt( let resp2 = responder.process_output(now).dgram(); now += rtt / 2; - initiator.process_input(resp2.unwrap(), now); - initiator.process_input(resp1.unwrap(), now); + initiator.process_input(&resp2.unwrap(), now); + initiator.process_input(&resp1.unwrap(), now); let ack = initiator.process_output(now).dgram(); assert!(ack.is_some()); check_idle(initiator, now); // Receiving the ACK should return the responder to idle too. now += rtt / 2; - responder.process_input(ack.unwrap(), now); + responder.process_input(&ack.unwrap(), now); check_idle(responder, now); (now, stream) @@ -431,9 +434,9 @@ fn keep_alive_initiator() { assert_eq!(server.stats().frame_tx.ping, pings_before + 1); // Exchange ack for the PING. - let out = client.process(ping, now).dgram(); - let out = server.process(out, now).dgram(); - assert!(client.process(out, now).dgram().is_none()); + let out = client.process(ping.as_ref(), now).dgram(); + let out = server.process(out.as_ref(), now).dgram(); + assert!(client.process(out.as_ref(), now).dgram().is_none()); // Check that there will be next keep-alive ping after default_timeout() / 2. assert_idle(&mut server, now, default_timeout() / 2); @@ -473,12 +476,12 @@ fn keep_alive_lost() { assert_eq!(server.stats().frame_tx.ping, pings_before2 + 1); // Exchange ack for the PING. - let out = client.process(ping, now).dgram(); + let out = client.process(ping.as_ref(), now).dgram(); now += Duration::from_millis(20); - let out = server.process(out, now).dgram(); + let out = server.process(out.as_ref(), now).dgram(); - assert!(client.process(out, now).dgram().is_none()); + assert!(client.process(out.as_ref(), now).dgram().is_none()); // TODO: if we run server.process with current value of now, the server will // return some small timeout for the recovry although it does not have @@ -531,10 +534,10 @@ fn keep_alive_unmark() { fn transfer_force_idle(sender: &mut Connection, receiver: &mut Connection) { let dgram = sender.process_output(now()).dgram(); let chaff = send_something(sender, now()); - receiver.process_input(chaff, now()); - receiver.process_input(dgram.unwrap(), now()); + receiver.process_input(&chaff, now()); + receiver.process_input(&dgram.unwrap(), now()); let ack = receiver.process_output(now()).dgram(); - sender.process_input(ack.unwrap(), now()); + sender.process_input(&ack.unwrap(), now()); } /// Receiving the end of the stream stops keep-alives for that stream. @@ -602,7 +605,7 @@ fn keep_alive_stop_sending() { // The server will have sent RESET_STREAM, which the client will // want to acknowledge, so force that out. let junk = send_something(&mut server, now()); - let ack = client.process(Some(junk), now()).dgram(); + let ack = client.process(Some(&junk), now()).dgram(); assert!(ack.is_some()); // Now the client should be idle. @@ -665,7 +668,7 @@ fn keep_alive_uni() { _ = client.stream_send(stream, DEFAULT_STREAM_DATA).unwrap(); let dgram = client.process_output(now()).dgram(); - server.process_input(dgram.unwrap(), now()); + server.process_input(&dgram.unwrap(), now()); server.stream_keep_alive(stream, true).unwrap(); } diff --git a/neqo-transport/src/connection/tests/keys.rs b/neqo-transport/src/connection/tests/keys.rs index 7e04aaf191..a0e3b6596e 100644 --- a/neqo-transport/src/connection/tests/keys.rs +++ b/neqo-transport/src/connection/tests/keys.rs @@ -20,7 +20,7 @@ use test_fixture::{self, now}; fn check_discarded( peer: &mut Connection, - pkt: Datagram, + pkt: &Datagram, response: bool, dropped: usize, dups: usize, @@ -59,11 +59,11 @@ fn discarded_initial_keys() { qdebug!("---- server: CH -> SH, EE, CERT, CV, FIN"); let mut server = default_server(); - let init_pkt_s = server.process(init_pkt_c.clone(), now()).dgram(); + let init_pkt_s = server.process(init_pkt_c.as_ref(), now()).dgram(); assert!(init_pkt_s.is_some()); qdebug!("---- client: cert verification"); - let out = client.process(init_pkt_s.clone(), now()).dgram(); + let out = client.process(init_pkt_s.as_ref(), now()).dgram(); assert!(out.is_some()); // The client has received a handshake packet. It will remove the Initial keys. @@ -71,7 +71,7 @@ fn discarded_initial_keys() { // The initial packet should be dropped. The packet contains a Handshake packet as well, which // will be marked as dup. And it will contain padding, which will be "dropped". // The client will generate a Handshake packet here to avoid stalling. - check_discarded(&mut client, init_pkt_s.unwrap(), true, 2, 1); + check_discarded(&mut client, &init_pkt_s.unwrap(), true, 2, 1); assert!(maybe_authenticate(&mut client)); @@ -79,7 +79,7 @@ fn discarded_initial_keys() { // packet from the client. // We will check this by processing init_pkt_c a second time. // The dropped packet is padding. The Initial packet has been mark dup. - check_discarded(&mut server, init_pkt_c.clone().unwrap(), false, 1, 1); + check_discarded(&mut server, &init_pkt_c.clone().unwrap(), false, 1, 1); qdebug!("---- client: SH..FIN -> FIN"); let out = client.process(None, now()).dgram(); @@ -87,14 +87,14 @@ fn discarded_initial_keys() { // The server will process the first Handshake packet. // After this the Initial keys will be dropped. - let out = server.process(out, now()).dgram(); + let out = server.process(out.as_ref(), now()).dgram(); assert!(out.is_some()); // Check that the Initial keys are dropped at the server // We will check this by processing init_pkt_c a third time. // The Initial packet has been dropped and padding that follows it. // There is no dups, everything has been dropped. - check_discarded(&mut server, init_pkt_c.unwrap(), false, 1, 0); + check_discarded(&mut server, &init_pkt_c.unwrap(), false, 1, 0); } #[test] @@ -151,7 +151,7 @@ fn key_update_client() { // The previous PTO packet (see above) was dropped, so we should get an ACK here. let dgram = send_and_receive(&mut client, &mut server, now); assert!(dgram.is_some()); - let res = client.process(dgram, now); + let res = client.process(dgram.as_ref(), now); // This is the first packet that the client has received from the server // with new keys, so its read timer just started. if let Output::Callback(t) = res { @@ -190,7 +190,7 @@ fn key_update_consecutive() { assert_eq!(client.get_epochs(), (Some(4), Some(3))); // Have the server process the ACK. - if let Output::Callback(_) = server.process(dgram, now) { + if let Output::Callback(_) = server.process(dgram.as_ref(), now) { assert_eq!(server.get_epochs(), (Some(4), Some(3))); // Now move the server temporarily into the future so that it // rotates the keys. The client stays in the present. @@ -208,7 +208,7 @@ fn key_update_consecutive() { // However, as the server didn't wait long enough to update again, the // client hasn't rotated its keys, so the packet gets dropped. - check_discarded(&mut client, dgram, false, 1, 0); + check_discarded(&mut client, &dgram, false, 1, 0); } // Key updates can't be initiated too early. @@ -225,12 +225,12 @@ fn key_update_before_confirmed() { assert_update_blocked(&mut client); // Server Initial + Handshake - let dgram = server.process(dgram, now()).dgram(); + let dgram = server.process(dgram.as_ref(), now()).dgram(); assert!(dgram.is_some()); assert_update_blocked(&mut server); // Client Handshake - client.process_input(dgram.unwrap(), now()); + client.process_input(&dgram.unwrap(), now()); assert_update_blocked(&mut client); assert!(maybe_authenticate(&mut client)); @@ -241,12 +241,12 @@ fn key_update_before_confirmed() { assert_update_blocked(&mut client); // Server HANDSHAKE_DONE - let dgram = server.process(dgram, now()).dgram(); + let dgram = server.process(dgram.as_ref(), now()).dgram(); assert!(dgram.is_some()); assert!(server.initiate_key_update().is_ok()); // Client receives HANDSHAKE_DONE - let dgram = client.process(dgram, now()).dgram(); + let dgram = client.process(dgram.as_ref(), now()).dgram(); assert!(dgram.is_none()); assert!(client.initiate_key_update().is_ok()); } @@ -277,13 +277,13 @@ fn exhaust_read_keys() { let dgram = send_something(&mut client, now()); overwrite_invocations(0); - let dgram = server.process(Some(dgram), now()).dgram(); + let dgram = server.process(Some(&dgram), now()).dgram(); assert!(matches!( server.state(), State::Closed(ConnectionError::Transport(Error::KeysExhausted)) )); - client.process_input(dgram.unwrap(), now()); + client.process_input(&dgram.unwrap(), now()); assert!(matches!( client.state(), State::Draining { diff --git a/neqo-transport/src/connection/tests/migration.rs b/neqo-transport/src/connection/tests/migration.rs index 9d662da0b8..7dd5e50d13 100644 --- a/neqo-transport/src/connection/tests/migration.rs +++ b/neqo-transport/src/connection/tests/migration.rs @@ -81,7 +81,7 @@ fn rebinding_port() { let dgram = send_something(&mut client, now()); let dgram = change_source_port(&dgram); - server.process_input(dgram, now()); + server.process_input(&dgram, now()); // Have the server send something so that it generates a packet. let stream_id = server.stream_create(StreamType::UniDi).unwrap(); server.stream_close_send(stream_id).unwrap(); @@ -103,7 +103,7 @@ fn path_forwarding_attack() { let dgram = send_something(&mut client, now); let dgram = change_path(&dgram, addr_v4()); - server.process_input(dgram, now); + server.process_input(&dgram, now); // The server now probes the new (primary) path. let new_probe = server.process_output(now).dgram().unwrap(); @@ -123,14 +123,14 @@ fn path_forwarding_attack() { // The client should respond to the challenge on the new path. // The server couldn't pad, so the client is also amplification limited. - let new_resp = client.process(Some(new_probe), now).dgram().unwrap(); + let new_resp = client.process(Some(&new_probe), now).dgram().unwrap(); assert_eq!(client.stats().frame_rx.path_challenge, 1); assert_eq!(client.stats().frame_tx.path_challenge, 1); assert_eq!(client.stats().frame_tx.path_response, 1); assert_v4_path(&new_resp, false); // The client also responds to probes on the old path. - let old_resp = client.process(Some(old_probe), now).dgram().unwrap(); + let old_resp = client.process(Some(&old_probe), now).dgram().unwrap(); assert_eq!(client.stats().frame_rx.path_challenge, 2); assert_eq!(client.stats().frame_tx.path_challenge, 1); assert_eq!(client.stats().frame_tx.path_response, 2); @@ -143,12 +143,12 @@ fn path_forwarding_attack() { // Receiving the PATH_RESPONSE from the client opens the amplification // limit enough for the server to respond. // This is padded because it includes PATH_CHALLENGE. - let server_data1 = server.process(Some(new_resp), now).dgram().unwrap(); + let server_data1 = server.process(Some(&new_resp), now).dgram().unwrap(); assert_v4_path(&server_data1, true); assert_eq!(server.stats().frame_tx.path_challenge, 3); // The client responds to this probe on the new path. - client.process_input(server_data1, now); + client.process_input(&server_data1, now); let stream_before = client.stats().frame_tx.stream; let padded_resp = send_something(&mut client, now); assert_eq!(stream_before, client.stats().frame_tx.stream); @@ -164,7 +164,7 @@ fn path_forwarding_attack() { assert_v4_path(&server_data2, false); // Until new data is received from the client on the old path. - server.process_input(client_data2, now); + server.process_input(&client_data2, now); // The server sends a probe on the "old" path. let server_data3 = send_something(&mut server, now); assert_v4_path(&server_data3, true); @@ -192,7 +192,7 @@ fn migrate_immediate() { let server_delayed = send_something(&mut server, now); // The server accepts the first packet and migrates (but probes). - let server1 = server.process(Some(client1), now).dgram().unwrap(); + let server1 = server.process(Some(&client1), now).dgram().unwrap(); assert_v4_path(&server1, true); let server2 = server.process_output(now).dgram().unwrap(); assert_v6_path(&server2, true); @@ -200,13 +200,13 @@ fn migrate_immediate() { // The second packet has no real effect, it just elicits an ACK. let all_before = server.stats().frame_tx.all; let ack_before = server.stats().frame_tx.ack; - let server3 = server.process(Some(client2), now).dgram(); + let server3 = server.process(Some(&client2), now).dgram(); assert!(server3.is_some()); assert_eq!(server.stats().frame_tx.all, all_before + 1); assert_eq!(server.stats().frame_tx.ack, ack_before + 1); // Receiving a packet sent by the server before migration doesn't change path. - client.process_input(server_delayed, now); + client.process_input(&server_delayed, now); // The client has sent two unpaced packets and this new path has no RTT estimate // so this might be paced. let (client3, _t) = send_something_paced(&mut client, now, true); @@ -293,13 +293,13 @@ fn migrate_same() { assert_v6_path(&probe, true); // Contains PATH_CHALLENGE. assert_eq!(client.stats().frame_tx.path_challenge, 1); - let resp = server.process(Some(probe), now).dgram().unwrap(); + let resp = server.process(Some(&probe), now).dgram().unwrap(); assert_v6_path(&resp, true); assert_eq!(server.stats().frame_tx.path_response, 1); assert_eq!(server.stats().frame_tx.path_challenge, 0); // Everything continues happily. - client.process_input(resp, now); + client.process_input(&resp, now); let contd = send_something(&mut client, now); assert_v6_path(&contd, false); } @@ -376,7 +376,7 @@ fn migration(mut client: Connection) { assert_eq!(client.stats().frame_tx.path_challenge, 1); let probe_cid = ConnectionId::from(&get_cid(&probe)); - let resp = server.process(Some(probe), now).dgram().unwrap(); + let resp = server.process(Some(&probe), now).dgram().unwrap(); assert_v4_path(&resp, true); assert_eq!(server.stats().frame_tx.path_response, 1); assert_eq!(server.stats().frame_tx.path_challenge, 1); @@ -385,12 +385,12 @@ fn migration(mut client: Connection) { let client_data = send_something(&mut client, now); assert_ne!(get_cid(&client_data), probe_cid); assert_v6_path(&client_data, false); - server.process_input(client_data, now); + server.process_input(&client_data, now); let server_data = send_something(&mut server, now); assert_v6_path(&server_data, false); // Once the client receives the probe response, it migrates to the new path. - client.process_input(resp, now); + client.process_input(&resp, now); assert_eq!(client.stats().frame_rx.path_challenge, 1); let migrate_client = send_something(&mut client, now); assert_v4_path(&migrate_client, true); // Responds to server probe. @@ -399,7 +399,7 @@ fn migration(mut client: Connection) { // However, it will probe the old path again, even though it has just // received a response to its last probe, because it needs to verify // that the migration is genuine. - server.process_input(migrate_client, now); + server.process_input(&migrate_client, now); let stream_before = server.stats().frame_tx.stream; let probe_old_server = send_something(&mut server, now); // This is just the double-check probe; no STREAM frames. @@ -414,8 +414,8 @@ fn migration(mut client: Connection) { assert_eq!(server.stats().frame_tx.stream, stream_before + 1); // The client receives these checks and responds to the probe, but uses the new path. - client.process_input(migrate_server, now); - client.process_input(probe_old_server, now); + client.process_input(&migrate_server, now); + client.process_input(&probe_old_server, now); let old_probe_resp = send_something(&mut client, now); assert_v6_path(&old_probe_resp, true); let client_confirmation = client.process_output(now).dgram().unwrap(); @@ -455,11 +455,11 @@ fn migration_client_empty_cid() { /// Returns the packet containing `HANDSHAKE_DONE` from the server. fn fast_handshake(client: &mut Connection, server: &mut Connection) -> Option { let dgram = client.process_output(now()).dgram(); - let dgram = server.process(dgram, now()).dgram(); - client.process_input(dgram.unwrap(), now()); + let dgram = server.process(dgram.as_ref(), now()).dgram(); + client.process_input(&dgram.unwrap(), now()); assert!(maybe_authenticate(client)); let dgram = client.process_output(now()).dgram(); - server.process(dgram, now()).dgram() + server.process(dgram.as_ref(), now()).dgram() } fn preferred_address(hs_client: SocketAddr, hs_server: SocketAddr, preferred: SocketAddr) { @@ -518,7 +518,7 @@ fn preferred_address(hs_client: SocketAddr, hs_server: SocketAddr, preferred: So // The client is about to process HANDSHAKE_DONE. // It should start probing toward the server's preferred address. - let probe = client.process(dgram, now()).dgram().unwrap(); + let probe = client.process(dgram.as_ref(), now()).dgram().unwrap(); assert_toward_spa(&probe, true); assert_eq!(client.stats().frame_tx.path_challenge, 1); assert_ne!(client.process_output(now()).callback(), Duration::new(0, 0)); @@ -528,26 +528,26 @@ fn preferred_address(hs_client: SocketAddr, hs_server: SocketAddr, preferred: So assert_orig_path(&data, false); // The server responds to the probe. - let resp = server.process(Some(probe), now()).dgram().unwrap(); + let resp = server.process(Some(&probe), now()).dgram().unwrap(); assert_from_spa(&resp, true); assert_eq!(server.stats().frame_tx.path_challenge, 1); assert_eq!(server.stats().frame_tx.path_response, 1); // Data continues on the main path for the server. - server.process_input(data, now()); + server.process_input(&data, now()); let data = send_something(&mut server, now()); assert_orig_path(&data, false); // Client gets the probe response back and it migrates. - client.process_input(resp, now()); - client.process_input(data, now()); + client.process_input(&resp, now()); + client.process_input(&data, now()); let data = send_something(&mut client, now()); assert_toward_spa(&data, true); assert_eq!(client.stats().frame_tx.stream, 2); assert_eq!(client.stats().frame_tx.path_response, 1); // The server sees the migration and probes the old path. - let probe = server.process(Some(data), now()).dgram().unwrap(); + let probe = server.process(Some(&data), now()).dgram().unwrap(); assert_orig_path(&probe, true); assert_eq!(server.stats().frame_tx.path_challenge, 2); @@ -589,7 +589,7 @@ fn expect_no_migration(client: &mut Connection, server: &mut Connection) { let dgram = fast_handshake(client, server); // The client won't probe now, though it could; it remains idle. - let out = client.process(dgram, now()); + let out = client.process(dgram.as_ref(), now()); assert_ne!(out.callback(), Duration::new(0, 0)); // Data continues on the main path for the client. @@ -716,12 +716,12 @@ fn migration_invalid_state() { .is_err()); let close = client.process(None, now()).dgram(); - let dgram = server.process(close, now()).dgram(); + let dgram = server.process(close.as_ref(), now()).dgram(); assert!(server .migrate(Some(addr()), Some(addr()), false, now()) .is_err()); - client.process_input(dgram.unwrap(), now()); + client.process_input(&dgram.unwrap(), now()); assert!(client .migrate(Some(addr()), Some(addr()), false, now()) .is_err()); @@ -822,7 +822,7 @@ fn retire_all() { let new_cid_before = client.stats().frame_rx.new_connection_id; let retire_cid_before = client.stats().frame_tx.retire_connection_id; - client.process_input(ncid, now()); + client.process_input(&ncid, now()); let retire = send_something(&mut client, now()); assert_eq!( client.stats().frame_rx.new_connection_id, @@ -871,17 +871,17 @@ fn retire_prior_to_migration_failure() { let retire_all = send_something(&mut server, now()); server.test_frame_writer = None; - let resp = server.process(Some(probe), now()).dgram().unwrap(); + let resp = server.process(Some(&probe), now()).dgram().unwrap(); assert_v4_path(&resp, true); assert_eq!(server.stats().frame_tx.path_response, 1); assert_eq!(server.stats().frame_tx.path_challenge, 1); // Have the client receive the NEW_CONNECTION_ID with Retire Prior To. - client.process_input(retire_all, now()); + client.process_input(&retire_all, now()); // This packet contains the probe response, which should be fine, but it // also includes PATH_CHALLENGE for the new path, and the client can't // respond without a connection ID. We treat this as a connection error. - client.process_input(resp, now()); + client.process_input(&resp, now()); assert!(matches!( client.state(), State::Closing { @@ -926,15 +926,15 @@ fn retire_prior_to_migration_success() { let retire_all = send_something(&mut server, now()); server.test_frame_writer = None; - let resp = server.process(Some(probe), now()).dgram().unwrap(); + let resp = server.process(Some(&probe), now()).dgram().unwrap(); assert_v4_path(&resp, true); assert_eq!(server.stats().frame_tx.path_response, 1); assert_eq!(server.stats().frame_tx.path_challenge, 1); // Have the client receive the NEW_CONNECTION_ID with Retire Prior To second. // As this occurs in a very specific order, migration succeeds. - client.process_input(resp, now()); - client.process_input(retire_all, now()); + client.process_input(&resp, now()); + client.process_input(&retire_all, now()); // Migration succeeds and the new path gets the last connection ID. let dgram = send_something(&mut client, now()); diff --git a/neqo-transport/src/connection/tests/mod.rs b/neqo-transport/src/connection/tests/mod.rs index 72fe9d1db8..a244efca53 100644 --- a/neqo-transport/src/connection/tests/mod.rs +++ b/neqo-transport/src/connection/tests/mod.rs @@ -168,7 +168,7 @@ fn handshake( while !is_done(a) { _ = maybe_authenticate(a); let had_input = input.is_some(); - let output = a.process(input, now).dgram(); + let output = a.process(input.as_ref(), now).dgram(); assert!(had_input || output.is_some()); input = output; qtrace!("handshake: t += {:?}", rtt / 2); @@ -176,7 +176,7 @@ fn handshake( mem::swap(&mut a, &mut b); } if let Some(d) = input { - a.process_input(d, now); + a.process_input(&d, now); } now } @@ -237,7 +237,7 @@ fn exchange_ticket( server.send_ticket(now, &[]).expect("can send ticket"); let ticket = server.process_output(now).dgram(); assert!(ticket.is_some()); - client.process_input(ticket.unwrap(), now); + client.process_input(&ticket.unwrap(), now); assert_eq!(*client.state(), State::Confirmed); get_tokens(client).pop().expect("should have token") } @@ -257,8 +257,8 @@ fn force_idle( let c1 = send_something(client, now); let c2 = send_something(client, now); now += rtt / 2; - server.process_input(c2, now); - server.process_input(c1, now); + server.process_input(&c2, now); + server.process_input(&c1, now); // Now do the same for the server. (The ACK is in the first one.) qtrace!("force_idle: send reordered server packets"); @@ -266,17 +266,20 @@ fn force_idle( let s2 = send_something(server, now); now += rtt / 2; // Delivering s2 first at the client causes it to want to ACK. - client.process_input(s2, now); + client.process_input(&s2, now); // Delivering s1 should not have the client change its mind about the ACK. - let ack = client.process(Some(s1), now).dgram(); - assert!(ack.is_some()); + let ack = client.process(Some(&s1), now); + assert!(ack.as_dgram_ref().is_some()); let idle_timeout = min( client.conn_params.get_idle_timeout(), server.conn_params.get_idle_timeout(), ); assert_eq!(client.process_output(now), Output::Callback(idle_timeout)); now += rtt / 2; - assert_eq!(server.process(ack, now), Output::Callback(idle_timeout)); + assert_eq!( + server.process(ack.as_dgram_ref(), now), + Output::Callback(idle_timeout) + ); now } @@ -359,7 +362,7 @@ fn increase_cwnd( let pkt = sender.process_output(now); match pkt { Output::Datagram(dgram) => { - receiver.process_input(dgram, now + DEFAULT_RTT / 2); + receiver.process_input(&dgram, now + DEFAULT_RTT / 2); } Output::Callback(t) => { if t < DEFAULT_RTT { @@ -376,7 +379,7 @@ fn increase_cwnd( now += DEFAULT_RTT / 2; let ack = receiver.process_output(now).dgram(); now += DEFAULT_RTT / 2; - sender.process_input(ack.unwrap(), now); + sender.process_input(&ack.unwrap(), now); now } @@ -395,7 +398,7 @@ where let in_dgrams = in_dgrams.into_iter(); qdebug!([dest], "ack_bytes {} datagrams", in_dgrams.len()); for dgram in in_dgrams { - dest.process_input(dgram, now); + dest.process_input(&dgram, now); } loop { @@ -461,7 +464,7 @@ fn induce_persistent_congestion( // An ACK for the third PTO causes persistent congestion. let s_ack = ack_bytes(server, stream, c_tx_dgrams, now); - client.process_input(s_ack, now); + client.process_input(&s_ack, now); assert_eq!(cwnd(client), CWND_MIN); now } @@ -542,7 +545,7 @@ fn send_and_receive( now: Instant, ) -> Option { let dgram = send_something(sender, now); - receiver.process(Some(dgram), now).dgram() + receiver.process(Some(&dgram), now).dgram() } fn get_tokens(client: &mut Connection) -> Vec { diff --git a/neqo-transport/src/connection/tests/priority.rs b/neqo-transport/src/connection/tests/priority.rs index 2b0b5ecdc2..5fb27b3a4d 100644 --- a/neqo-transport/src/connection/tests/priority.rs +++ b/neqo-transport/src/connection/tests/priority.rs @@ -40,7 +40,7 @@ fn receive_stream() { assert_eq!(MESSAGE.len(), client.stream_send(id, MESSAGE).unwrap()); let dgram = client.process_output(now()).dgram(); - server.process_input(dgram.unwrap(), now()); + server.process_input(&dgram.unwrap(), now()); assert_eq!( server .stream_priority( @@ -82,7 +82,7 @@ fn relative() { .unwrap(); let dgram = client.process_output(now()).dgram(); - server.process_input(dgram.unwrap(), now()); + server.process_input(&dgram.unwrap(), now()); // The "id_normal" stream will get a `NewStream` event, but no data. for e in server.events() { @@ -113,7 +113,7 @@ fn reprioritize() { .unwrap(); let dgram = client.process_output(now()).dgram(); - server.process_input(dgram.unwrap(), now()); + server.process_input(&dgram.unwrap(), now()); // The "id_normal" stream will get a `NewStream` event, but no data. for e in server.events() { @@ -132,7 +132,7 @@ fn reprioritize() { ) .unwrap(); let dgram = client.process_output(now()).dgram(); - server.process_input(dgram.unwrap(), now()); + server.process_input(&dgram.unwrap(), now()); for e in server.events() { if let ConnectionEvent::RecvStreamReadable { stream_id } = e { @@ -163,7 +163,7 @@ fn repairing_loss() { let _lost = client.process_output(now).dgram(); for _ in 0..5 { match client.process_output(now) { - Output::Datagram(d) => server.process_input(d, now), + Output::Datagram(d) => server.process_input(&d, now), Output::Callback(delay) => now += delay, Output::None => unreachable!(), } @@ -176,9 +176,9 @@ fn repairing_loss() { let id_normal = client.stream_create(StreamType::UniDi).unwrap(); fill_stream(&mut client, id_normal); - let dgram = client.process(ack, now).dgram(); + let dgram = client.process(ack.as_ref(), now).dgram(); assert_eq!(client.stats().lost, 1); // Client should have noticed the loss. - server.process_input(dgram.unwrap(), now); + server.process_input(&dgram.unwrap(), now); // Only the low priority stream has data as the retransmission of the data from // the lost packet is now more important than new data from the high priority stream. @@ -194,7 +194,7 @@ fn repairing_loss() { // the retransmitted data into a second packet, it will also contain data from the // normal priority stream. let dgram = client.process_output(now).dgram(); - server.process_input(dgram.unwrap(), now); + server.process_input(&dgram.unwrap(), now); assert!(server.events().any( |e| matches!(e, ConnectionEvent::RecvStreamReadable { stream_id } if stream_id == id_normal), )); @@ -209,8 +209,8 @@ fn critical() { // Rather than connect, send stream data in 0.5-RTT. // That allows this to test that critical streams pre-empt most frame types. let dgram = client.process_output(now).dgram(); - let dgram = server.process(dgram, now).dgram(); - client.process_input(dgram.unwrap(), now); + let dgram = server.process(dgram.as_ref(), now).dgram(); + client.process_input(&dgram.unwrap(), now); maybe_authenticate(&mut client); let id = server.stream_create(StreamType::UniDi).unwrap(); @@ -237,8 +237,8 @@ fn critical() { assert_eq!(stats_after.handshake_done, 0); // Complete the handshake. - let dgram = client.process(dgram, now).dgram(); - server.process_input(dgram.unwrap(), now); + let dgram = client.process(dgram.as_ref(), now).dgram(); + server.process_input(&dgram.unwrap(), now); // Critical beats everything but HANDSHAKE_DONE. let stats_before = server.stats().frame_tx; @@ -260,8 +260,8 @@ fn important() { // Rather than connect, send stream data in 0.5-RTT. // That allows this to test that important streams pre-empt most frame types. let dgram = client.process_output(now).dgram(); - let dgram = server.process(dgram, now).dgram(); - client.process_input(dgram.unwrap(), now); + let dgram = server.process(dgram.as_ref(), now).dgram(); + client.process_input(&dgram.unwrap(), now); maybe_authenticate(&mut client); let id = server.stream_create(StreamType::UniDi).unwrap(); @@ -289,8 +289,8 @@ fn important() { assert_eq!(stats_after.stream, stats_before.stream + 1); // Complete the handshake. - let dgram = client.process(dgram, now).dgram(); - server.process_input(dgram.unwrap(), now); + let dgram = client.process(dgram.as_ref(), now).dgram(); + server.process_input(&dgram.unwrap(), now); // Important beats everything but flow control. let stats_before = server.stats().frame_tx; @@ -313,8 +313,8 @@ fn high_normal() { // Rather than connect, send stream data in 0.5-RTT. // That allows this to test that important streams pre-empt most frame types. let dgram = client.process_output(now).dgram(); - let dgram = server.process(dgram, now).dgram(); - client.process_input(dgram.unwrap(), now); + let dgram = server.process(dgram.as_ref(), now).dgram(); + client.process_input(&dgram.unwrap(), now); maybe_authenticate(&mut client); let id = server.stream_create(StreamType::UniDi).unwrap(); @@ -342,8 +342,8 @@ fn high_normal() { assert_eq!(stats_after.stream, stats_before.stream + 1); // Complete the handshake. - let dgram = client.process(dgram, now).dgram(); - server.process_input(dgram.unwrap(), now); + let dgram = client.process(dgram.as_ref(), now).dgram(); + server.process_input(&dgram.unwrap(), now); // High or Normal doesn't beat NEW_CONNECTION_ID, // but they beat CRYPTO/NEW_TOKEN. diff --git a/neqo-transport/src/connection/tests/recovery.rs b/neqo-transport/src/connection/tests/recovery.rs index 44e77cab1e..073f1ca156 100644 --- a/neqo-transport/src/connection/tests/recovery.rs +++ b/neqo-transport/src/connection/tests/recovery.rs @@ -63,7 +63,7 @@ fn pto_works_basic() { let out = client.process(None, now); let stream_before = server.stats().frame_rx.stream; - server.process_input(out.dgram().unwrap(), now); + server.process_input(&out.dgram().unwrap(), now); assert_eq!(server.stats().frame_rx.stream, stream_before + 2); } @@ -88,7 +88,7 @@ fn pto_works_full_cwnd() { // Both datagrams contain one or more STREAM frames. for d in dgrams { let stream_before = server.stats().frame_rx.stream; - server.process_input(d, now); + server.process_input(&d, now); assert!(server.stats().frame_rx.stream > stream_before); } } @@ -114,32 +114,32 @@ fn pto_works_ping() { assert_eq!(cb, GRANULARITY * 2); // Process these by server, skipping pkt0 - let srv0 = server.process(Some(pkt1), now).dgram(); + let srv0 = server.process(Some(&pkt1), now).dgram(); assert!(srv0.is_some()); // ooo, ack client pkt1 now += Duration::from_millis(20); // process pkt2 (immediate ack because last ack was more than an RTT ago; RTT=0) - let srv1 = server.process(Some(pkt2), now).dgram(); + let srv1 = server.process(Some(&pkt2), now).dgram(); assert!(srv1.is_some()); // this is now dropped now += Duration::from_millis(20); // process pkt3 (acked for same reason) - let srv2 = server.process(Some(pkt3), now).dgram(); + let srv2 = server.process(Some(&pkt3), now).dgram(); // ack client pkt 2 & 3 assert!(srv2.is_some()); // client processes ack - let pkt4 = client.process(srv2, now).dgram(); + let pkt4 = client.process(srv2.as_ref(), now).dgram(); // client resends data from pkt0 assert!(pkt4.is_some()); // server sees ooo pkt0 and generates immediate ack - let srv3 = server.process(Some(pkt0), now).dgram(); + let srv3 = server.process(Some(&pkt0), now).dgram(); assert!(srv3.is_some()); // Accept the acknowledgment. - let pkt5 = client.process(srv3, now).dgram(); + let pkt5 = client.process(srv3.as_ref(), now).dgram(); assert!(pkt5.is_none()); now += Duration::from_millis(70); @@ -149,7 +149,7 @@ fn pto_works_ping() { assert_eq!(client.stats().frame_tx.ping, client_pings + 1); let server_pings = server.stats().frame_rx.ping; - server.process_input(pkt6.unwrap(), now); + server.process_input(&pkt6.unwrap(), now); assert_eq!(server.stats().frame_rx.ping, server_pings + 1); } @@ -183,14 +183,14 @@ fn pto_initial() { // Server process the first initial pkt. let mut server = default_server(); - let out = server.process(pkt1, now).dgram(); + let out = server.process(pkt1.as_ref(), now).dgram(); assert!(out.is_some()); // Client receives ack for the first initial packet as well a Handshake packet. // After the handshake packet the initial keys and the crypto stream for the initial // packet number space will be discarded. // Here only an ack for the Handshake packet will be sent. - let out = client.process(out, now).dgram(); + let out = client.process(out.as_ref(), now).dgram(); assert!(out.is_some()); // We do not have PTO for the resent initial packet any more, but @@ -216,10 +216,10 @@ fn pto_handshake_complete() { assert_eq!(cb, Duration::from_millis(300)); now += HALF_RTT; - let pkt = server.process(pkt, now).dgram(); + let pkt = server.process(pkt.as_ref(), now).dgram(); now += HALF_RTT; - let pkt = client.process(pkt, now).dgram(); + let pkt = client.process(pkt.as_ref(), now).dgram(); let cb = client.process(None, now).callback(); // The client now has a single RTT estimate (20ms), so @@ -227,7 +227,7 @@ fn pto_handshake_complete() { assert_eq!(cb, HALF_RTT * 6); now += HALF_RTT; - let pkt = server.process(pkt, now).dgram(); + let pkt = server.process(pkt.as_ref(), now).dgram(); assert!(pkt.is_none()); now += HALF_RTT; @@ -276,8 +276,8 @@ fn pto_handshake_complete() { // This should remove the 1-RTT PTO from messing this test up. let server_acks = server.stats().frame_tx.ack; let server_done = server.stats().frame_tx.handshake_done; - server.process_input(pkt3_1rtt.unwrap(), now); - let ack = server.process(pkt1, now).dgram(); + server.process_input(&pkt3_1rtt.unwrap(), now); + let ack = server.process(pkt1.as_ref(), now).dgram(); assert!(ack.is_some()); assert_eq!(server.stats().frame_tx.ack, server_acks + 2); assert_eq!(server.stats().frame_tx.handshake_done, server_done + 1); @@ -287,12 +287,12 @@ fn pto_handshake_complete() { // Note that these don't include 1-RTT packets, because 1-RTT isn't send on PTO. let dropped_before1 = server.stats().dropped_rx; let server_frames = server.stats().frame_rx.all; - server.process_input(pkt2.unwrap(), now); + server.process_input(&pkt2.unwrap(), now); assert_eq!(1, server.stats().dropped_rx - dropped_before1); assert_eq!(server.stats().frame_rx.all, server_frames); let dropped_before2 = server.stats().dropped_rx; - server.process_input(pkt3_hs, now); + server.process_input(&pkt3_hs, now); assert_eq!(1, server.stats().dropped_rx - dropped_before2); assert_eq!(server.stats().frame_rx.all, server_frames); @@ -300,7 +300,7 @@ fn pto_handshake_complete() { // Let the client receive the ACK. // It should now be wait to acknowledge the HANDSHAKE_DONE. - let cb = client.process(ack, now).callback(); + let cb = client.process(ack.as_ref(), now).callback(); // The default ack delay is the RTT divided by the default ACK ratio of 4. let expected_ack_delay = HALF_RTT * 2 / 4; assert_eq!(cb, expected_ack_delay); @@ -329,14 +329,14 @@ fn pto_handshake_frames() { now += Duration::from_millis(10); qdebug!("---- server: CH -> SH, EE, CERT, CV, FIN"); let mut server = default_server(); - let pkt = server.process(pkt.dgram(), now); + let pkt = server.process(pkt.as_dgram_ref(), now); now += Duration::from_millis(10); qdebug!("---- client: cert verification"); - let pkt = client.process(pkt.dgram(), now); + let pkt = client.process(pkt.as_dgram_ref(), now); now += Duration::from_millis(10); - mem::drop(server.process(pkt.dgram(), now)); + mem::drop(server.process(pkt.as_dgram_ref(), now)); now += Duration::from_millis(10); client.authenticated(AuthenticationStatus::Ok, now); @@ -359,7 +359,7 @@ fn pto_handshake_frames() { now += Duration::from_millis(10); let crypto_before = server.stats().frame_rx.crypto; - server.process_input(pkt2.unwrap(), now); + server.process_input(&pkt2.unwrap(), now); assert_eq!(server.stats().frame_rx.crypto, crypto_before + 1); } @@ -381,7 +381,7 @@ fn handshake_ack_pto() { let c1 = client.process(None, now).dgram(); now += RTT / 2; - let s1 = server.process(c1, now).dgram(); + let s1 = server.process(c1.as_ref(), now).dgram(); assert!(s1.is_some()); let s2 = server.process(None, now).dgram(); assert!(s1.is_some()); @@ -389,8 +389,8 @@ fn handshake_ack_pto() { // Now let the client have the Initial, but drop the first coalesced Handshake packet. now += RTT / 2; let (initial, _) = split_datagram(&s1.unwrap()); - client.process_input(initial, now); - let c2 = client.process(s2, now).dgram(); + client.process_input(&initial, now); + let c2 = client.process(s2.as_ref(), now).dgram(); assert!(c2.is_some()); // This is an ACK. Drop it. let delay = client.process(None, now).callback(); assert_eq!(delay, RTT * 3); @@ -405,7 +405,7 @@ fn handshake_ack_pto() { now += RTT / 2; let ping_before = server.stats().frame_rx.ping; - server.process_input(c3.unwrap(), now); + server.process_input(&c3.unwrap(), now); assert_eq!(server.stats().frame_rx.ping, ping_before + 1); pto_counts[0] = 1; @@ -413,13 +413,13 @@ fn handshake_ack_pto() { // Now complete the handshake as cheaply as possible. let dgram = server.process(None, now).dgram(); - client.process_input(dgram.unwrap(), now); + client.process_input(&dgram.unwrap(), now); maybe_authenticate(&mut client); let dgram = client.process(None, now).dgram(); assert_eq!(*client.state(), State::Connected); - let dgram = server.process(dgram, now).dgram(); + let dgram = server.process(dgram.as_ref(), now).dgram(); assert_eq!(*server.state(), State::Confirmed); - client.process_input(dgram.unwrap(), now); + client.process_input(&dgram.unwrap(), now); assert_eq!(*client.state(), State::Confirmed); assert_eq!(client.stats.borrow().pto_counts, pto_counts); @@ -440,7 +440,7 @@ fn loss_recovery_crash() { assert!(ack.is_some()); // Have the server process the ACK. - let cb = server.process(ack, now).callback(); + let cb = server.process(ack.as_ref(), now).callback(); assert!(cb > Duration::from_secs(0)); // Now we leap into the future. The server should regard the first @@ -484,13 +484,13 @@ fn ack_after_pto() { // The client is now after a PTO, but if it receives something // that demands acknowledgment, it will send just the ACK. - let ack = client.process(Some(dgram), now).dgram(); + let ack = client.process(Some(&dgram), now).dgram(); assert!(ack.is_some()); // Make sure that the packet only contained an ACK frame. let all_frames_before = server.stats().frame_rx.all; let ack_before = server.stats().frame_rx.ack; - server.process_input(ack.unwrap(), now); + server.process_input(&ack.unwrap(), now); assert_eq!(server.stats().frame_rx.all, all_frames_before + 1); assert_eq!(server.stats().frame_rx.ack, ack_before + 1); } @@ -511,7 +511,7 @@ fn lost_but_kept_and_lr_timer() { // At t=RTT/2 the server receives the packet and ACKs it. now += RTT / 2; - let ack = server.process(Some(p2), now).dgram(); + let ack = server.process(Some(&p2), now).dgram(); assert!(ack.is_some()); // The client also sends another two packets (p3, p4), again losing the first. let _p3 = send_something(&mut client, now); @@ -520,14 +520,14 @@ fn lost_but_kept_and_lr_timer() { // At t=RTT the client receives the ACK and goes into timed loss recovery. // The client doesn't call p1 lost at this stage, but it will soon. now += RTT / 2; - let res = client.process(ack, now); + let res = client.process(ack.as_ref(), now); // The client should be on a loss recovery timer as p1 is missing. let lr_timer = res.callback(); // Loss recovery timer should be RTT/8, but only check for 0 or >=RTT/2. assert_ne!(lr_timer, Duration::from_secs(0)); assert!(lr_timer < (RTT / 2)); // The server also receives and acknowledges p4, again sending an ACK. - let ack = server.process(Some(p4), now).dgram(); + let ack = server.process(Some(&p4), now).dgram(); assert!(ack.is_some()); // At t=RTT*3/2 the client should declare p1 to be lost. @@ -537,7 +537,7 @@ fn lost_but_kept_and_lr_timer() { assert!(res.dgram().is_some()); // When the client processes the ACK, it should engage the // loss recovery timer for p3, not p1 (even though it still tracks p1). - let res = client.process(ack, now); + let res = client.process(ack.as_ref(), now); let lr_timer2 = res.callback(); assert_eq!(lr_timer, lr_timer2); } @@ -560,7 +560,7 @@ fn loss_time_past_largest_acked() { // Start the handshake. let c_in = client.process(None, now).dgram(); now += RTT / 2; - let s_hs1 = server.process(c_in, now).dgram(); + let s_hs1 = server.process(c_in.as_ref(), now).dgram(); // Get some spare server handshake packets for the client to ACK. // This involves a time machine, so be a little cautious. @@ -583,7 +583,7 @@ fn loss_time_past_largest_acked() { // to generate an ack-eliciting packet. For that, we use the Finished message. // Reordering delivery ensures that the later packet is also acknowledged. now += RTT / 2; - let c_hs1 = client.process(s_hs1, now).dgram(); + let c_hs1 = client.process(s_hs1.as_ref(), now).dgram(); assert!(c_hs1.is_some()); // This comes first, so it's useless. maybe_authenticate(&mut client); let c_hs2 = client.process(None, now).dgram(); @@ -592,17 +592,17 @@ fn loss_time_past_largest_acked() { // The we need the outstanding packet to be sent after the // application data packet, so space these out a tiny bit. let _p1 = send_something(&mut client, now + INCR); - let c_hs3 = client.process(s_hs2, now + (INCR * 2)).dgram(); + let c_hs3 = client.process(s_hs2.as_ref(), now + (INCR * 2)).dgram(); assert!(c_hs3.is_some()); // This will be left outstanding. - let c_hs4 = client.process(s_hs3, now + (INCR * 3)).dgram(); + let c_hs4 = client.process(s_hs3.as_ref(), now + (INCR * 3)).dgram(); assert!(c_hs4.is_some()); // This will be acknowledged. // Process c_hs2 and c_hs4, but skip c_hs3. // Then get an ACK for the client. now += RTT / 2; // Deliver c_hs4 first, but don't generate a packet. - server.process_input(c_hs4.unwrap(), now); - let s_ack = server.process(c_hs2, now).dgram(); + server.process_input(&c_hs4.unwrap(), now); + let s_ack = server.process(c_hs2.as_ref(), now).dgram(); assert!(s_ack.is_some()); // This includes an ACK, but it also includes HANDSHAKE_DONE, // which we need to remove because that will cause the Handshake loss @@ -611,7 +611,7 @@ fn loss_time_past_largest_acked() { // Now the client should start its loss recovery timer based on the ACK. now += RTT / 2; - let c_ack = client.process(Some(s_hs_ack), now).dgram(); + let c_ack = client.process(Some(&s_hs_ack), now).dgram(); assert!(c_ack.is_none()); // The client should now have the loss recovery timer active. let lr_time = client.process(None, now).callback(); @@ -636,12 +636,12 @@ fn trickle(sender: &mut Connection, receiver: &mut Connection, mut count: usize, while count > 0 { qdebug!("trickle: remaining={}", count); assert_eq!(sender.stream_send(id, &[9]).unwrap(), 1); - let dgram = sender.process(maybe_ack, now).dgram(); + let dgram = sender.process(maybe_ack.as_ref(), now).dgram(); - maybe_ack = receiver.process(dgram, now).dgram(); + maybe_ack = receiver.process(dgram.as_ref(), now).dgram(); count -= usize::from(maybe_ack.is_some()); } - sender.process_input(maybe_ack.unwrap(), now); + sender.process_input(&maybe_ack.unwrap(), now); } /// Ensure that a PING frame is sent with ACK sometimes. @@ -757,7 +757,7 @@ fn fast_pto() { let dgram = client.process(None, now).dgram(); let stream_before = server.stats().frame_rx.stream; - server.process_input(dgram.unwrap(), now); + server.process_input(&dgram.unwrap(), now); assert_eq!(server.stats().frame_rx.stream, stream_before + 1); } @@ -797,8 +797,8 @@ fn fast_pto_persistent_congestion() { // Now acknowledge the tail packet and enter persistent congestion. now += DEFAULT_RTT / 2; - let ack = server.process(Some(dgram), now).dgram(); + let ack = server.process(Some(&dgram), now).dgram(); now += DEFAULT_RTT / 2; - client.process_input(ack.unwrap(), now); + client.process_input(&ack.unwrap(), now); assert_eq!(cwnd(&client), CWND_MIN); } diff --git a/neqo-transport/src/connection/tests/resumption.rs b/neqo-transport/src/connection/tests/resumption.rs index 0c34f3448d..fa56f6eae2 100644 --- a/neqo-transport/src/connection/tests/resumption.rs +++ b/neqo-transport/src/connection/tests/resumption.rs @@ -55,7 +55,7 @@ fn remember_smoothed_rtt() { let ticket = server.process_output(now).dgram(); assert!(ticket.is_some()); now += RTT1 / 2; - client.process_input(ticket.unwrap(), now); + client.process_input(&ticket.unwrap(), now); let token = get_tokens(&mut client).pop().unwrap(); let mut client = default_client(); @@ -122,7 +122,7 @@ fn two_tickets_on_timer() { let pkt = send_something(&mut server, now()); // process() will return an ack first - assert!(client.process(Some(pkt), now()).dgram().is_some()); + assert!(client.process(Some(&pkt), now()).dgram().is_some()); // We do not have a ResumptionToken event yet, because NEW_TOKEN was not sent. assert_eq!(get_tokens(&mut client).len(), 0); @@ -163,7 +163,7 @@ fn two_tickets_with_new_token() { server.send_ticket(now(), &[]).expect("send ticket2"); let pkt = send_something(&mut server, now()); - client.process_input(pkt, now()); + client.process_input(&pkt, now()); let mut all_tokens = get_tokens(&mut client); assert_eq!(all_tokens.len(), 2); let token1 = all_tokens.pop().unwrap(); @@ -184,7 +184,7 @@ fn take_token() { server.send_ticket(now(), &[]).unwrap(); let dgram = server.process(None, now()).dgram(); - client.process_input(dgram.unwrap(), now()); + client.process_input(&dgram.unwrap(), now()); // There should be no ResumptionToken event here. let tokens = get_tokens(&mut client); diff --git a/neqo-transport/src/connection/tests/stream.rs b/neqo-transport/src/connection/tests/stream.rs index 5cc5e9594d..d83ca07b61 100644 --- a/neqo-transport/src/connection/tests/stream.rs +++ b/neqo-transport/src/connection/tests/stream.rs @@ -34,10 +34,10 @@ fn stream_create() { let out = client.process(None, now()); let mut server = default_server(); - let out = server.process(out.dgram(), now()); + let out = server.process(out.as_dgram_ref(), now()); - let out = client.process(out.dgram(), now()); - mem::drop(server.process(out.dgram(), now())); + let out = client.process(out.as_dgram_ref(), now()); + mem::drop(server.process(out.as_dgram_ref(), now())); assert!(maybe_authenticate(&mut client)); let out = client.process(None, now()); @@ -47,7 +47,7 @@ fn stream_create() { assert_eq!(client.stream_create(StreamType::BiDi).unwrap(), 0); assert_eq!(client.stream_create(StreamType::BiDi).unwrap(), 4); - mem::drop(server.process(out.dgram(), now())); + mem::drop(server.process(out.as_dgram_ref(), now())); // server now in State::Connected assert_eq!(server.stream_create(StreamType::UniDi).unwrap(), 3); assert_eq!(server.stream_create(StreamType::UniDi).unwrap(), 7); @@ -86,7 +86,7 @@ fn transfer() { qdebug!("---- server receives"); for d in datagrams { - let out = server.process(Some(d), now()); + let out = server.process(Some(&d), now()); // With an RTT of zero, the server will acknowledge every packet immediately. assert!(out.as_dgram_ref().is_some()); qdebug!("Output={:0x?}", out.as_dgram_ref()); @@ -157,7 +157,7 @@ fn sendorder_test(order_of_sendorder: &[Option]) { qdebug!("---- server receives"); for d in datagrams { - let out = server.process(Some(d), now()); + let out = server.process(Some(&d), now()); qdebug!("Output={:0x?}", out.as_dgram_ref()); } assert_eq!(*server.state(), State::Confirmed); @@ -324,11 +324,11 @@ fn report_fin_when_stream_closed_wo_data() { let stream_id = client.stream_create(StreamType::BiDi).unwrap(); client.stream_send(stream_id, &[0x00]).unwrap(); let out = client.process(None, now()); - mem::drop(server.process(out.dgram(), now())); + mem::drop(server.process(out.as_dgram_ref(), now())); server.stream_close_send(stream_id).unwrap(); let out = server.process(None, now()); - mem::drop(client.process(out.dgram(), now())); + mem::drop(client.process(out.as_dgram_ref(), now())); let stream_readable = |e| matches!(e, ConnectionEvent::RecvStreamReadable { .. }); assert!(client.events().any(stream_readable)); } @@ -336,9 +336,9 @@ fn report_fin_when_stream_closed_wo_data() { fn exchange_data(client: &mut Connection, server: &mut Connection) { let mut input = None; loop { - let out = client.process(input, now()).dgram(); + let out = client.process(input.as_ref(), now()).dgram(); let c_done = out.is_none(); - let out = server.process(out, now()).dgram(); + let out = server.process(out.as_ref(), now()).dgram(); if out.is_none() && c_done { break; } @@ -380,7 +380,7 @@ fn sending_max_data() { assert!(!fin); let out = server.process(None, now()).dgram(); - client.process_input(out.unwrap(), now()); + client.process_input(&out.unwrap(), now()); assert_eq!( client @@ -521,7 +521,7 @@ fn do_not_accept_data_after_stop_sending() { let stream_id = client.stream_create(StreamType::BiDi).unwrap(); client.stream_send(stream_id, &[0x00]).unwrap(); let out = client.process(None, now()); - mem::drop(server.process(out.dgram(), now())); + mem::drop(server.process(out.as_dgram_ref(), now())); let stream_readable = |e| matches!(e, ConnectionEvent::RecvStreamReadable { .. }); assert!(server.events().any(stream_readable)); @@ -538,10 +538,10 @@ fn do_not_accept_data_after_stop_sending() { // Receive the second data frame. The frame should be ignored and // DataReadable events shouldn't be posted. - let out = server.process(out_second_data_frame.dgram(), now()); + let out = server.process(out_second_data_frame.as_dgram_ref(), now()); assert!(!server.events().any(stream_readable)); - mem::drop(client.process(out.dgram(), now())); + mem::drop(client.process(out.as_dgram_ref(), now())); assert_eq!( Err(Error::FinalSizeError), client.stream_send(stream_id, &[0x00]) @@ -559,7 +559,7 @@ fn simultaneous_stop_sending_and_reset() { let stream_id = client.stream_create(StreamType::BiDi).unwrap(); client.stream_send(stream_id, &[0x00]).unwrap(); let out = client.process(None, now()); - let ack = server.process(out.dgram(), now()).dgram(); + let ack = server.process(out.as_dgram_ref(), now()).dgram(); let stream_readable = |e| matches!(e, ConnectionEvent::RecvStreamReadable { stream_id: id } if id == stream_id); @@ -568,23 +568,23 @@ fn simultaneous_stop_sending_and_reset() { // The client resets the stream. The packet with reset should arrive after the server // has already requested stop_sending. client.stream_reset_send(stream_id, 0).unwrap(); - let out_reset_frame = client.process(ack, now()).dgram(); + let out_reset_frame = client.process(ack.as_ref(), now()).dgram(); // Send something out of order to force the server to generate an // acknowledgment at the next opportunity. let force_ack = send_something(&mut client, now()); - server.process_input(force_ack, now()); + server.process_input(&force_ack, now()); // Call stop sending. server.stream_stop_sending(stream_id, 0).unwrap(); // Receive the second data frame. The frame should be ignored and // DataReadable events shouldn't be posted. - let ack = server.process(out_reset_frame, now()).dgram(); + let ack = server.process(out_reset_frame.as_ref(), now()).dgram(); assert!(ack.is_some()); assert!(!server.events().any(stream_readable)); // The client gets the STOP_SENDING frame. - client.process_input(ack.unwrap(), now()); + client.process_input(&ack.unwrap(), now()); assert_eq!( Err(Error::InvalidStreamId), client.stream_send(stream_id, &[0x00]) @@ -600,13 +600,13 @@ fn client_fin_reorder() { let client_hs = client.process(None, now()); assert!(client_hs.as_dgram_ref().is_some()); - let server_hs = server.process(client_hs.dgram(), now()); + let server_hs = server.process(client_hs.as_dgram_ref(), now()); assert!(server_hs.as_dgram_ref().is_some()); // ServerHello, etc... - let client_ack = client.process(server_hs.dgram(), now()); + let client_ack = client.process(server_hs.as_dgram_ref(), now()); assert!(client_ack.as_dgram_ref().is_some()); - let server_out = server.process(client_ack.dgram(), now()); + let server_out = server.process(client_ack.as_dgram_ref(), now()); assert!(server_out.as_dgram_ref().is_none()); assert!(maybe_authenticate(&mut client)); @@ -621,11 +621,11 @@ fn client_fin_reorder() { assert!(client_stream_data.as_dgram_ref().is_some()); // Now stream data gets before client_fin - let server_out = server.process(client_stream_data.dgram(), now()); + let server_out = server.process(client_stream_data.as_dgram_ref(), now()); assert!(server_out.as_dgram_ref().is_none()); // the packet will be discarded assert_eq!(*server.state(), State::Handshaking); - let server_out = server.process(client_fin.dgram(), now()); + let server_out = server.process(client_fin.as_dgram_ref(), now()); assert!(server_out.as_dgram_ref().is_some()); } @@ -641,7 +641,7 @@ fn after_fin_is_read_conn_events_for_stream_should_be_removed() { let out = server.process(None, now()).dgram(); assert!(out.is_some()); - mem::drop(client.process(out, now())); + mem::drop(client.process(out.as_ref(), now())); // read from the stream before checking connection events. let mut buf = vec![0; 4000]; @@ -666,7 +666,7 @@ fn after_stream_stop_sending_is_called_conn_events_for_stream_should_be_removed( let out = server.process(None, now()).dgram(); assert!(out.is_some()); - mem::drop(client.process(out, now())); + mem::drop(client.process(out.as_ref(), now())); // send stop seending. client @@ -695,7 +695,7 @@ fn stream_data_blocked_generates_max_stream_data() { assert!(dgram.is_some()); // Consume the data. - client.process_input(dgram.unwrap(), now); + client.process_input(&dgram.unwrap(), now); let mut buf = [0; 10]; let (count, end) = client.stream_recv(stream_id, &mut buf[..]).unwrap(); assert_eq!(count, DEFAULT_STREAM_DATA.len()); @@ -712,14 +712,14 @@ fn stream_data_blocked_generates_max_stream_data() { assert!(dgram.is_some()); let sdb_before = client.stats().frame_rx.stream_data_blocked; - let dgram = client.process(dgram, now).dgram(); + let dgram = client.process(dgram.as_ref(), now).dgram(); assert_eq!(client.stats().frame_rx.stream_data_blocked, sdb_before + 1); assert!(dgram.is_some()); // Client should have sent a MAX_STREAM_DATA frame with just a small increase // on the default window size. let msd_before = server.stats().frame_rx.max_stream_data; - server.process_input(dgram.unwrap(), now); + server.process_input(&dgram.unwrap(), now); assert_eq!(server.stats().frame_rx.max_stream_data, msd_before + 1); // Test that the entirety of the receive buffer is available now. @@ -754,19 +754,19 @@ fn max_streams_after_bidi_closed() { let dgram = client.process(None, now()).dgram(); // Now handle the stream and send an incomplete response. - server.process_input(dgram.unwrap(), now()); + server.process_input(&dgram.unwrap(), now()); server.stream_send(stream_id, RESPONSE).unwrap(); let dgram = server.process_output(now()).dgram(); // The server shouldn't have released more stream credit. - client.process_input(dgram.unwrap(), now()); + client.process_input(&dgram.unwrap(), now()); let e = client.stream_create(StreamType::BiDi).unwrap_err(); assert!(matches!(e, Error::StreamLimitError)); // Closing the stream isn't enough. server.stream_close_send(stream_id).unwrap(); let dgram = server.process_output(now()).dgram(); - client.process_input(dgram.unwrap(), now()); + client.process_input(&dgram.unwrap(), now()); assert!(client.stream_create(StreamType::BiDi).is_err()); // The server needs to see an acknowledgment from the client for its @@ -780,12 +780,12 @@ fn max_streams_after_bidi_closed() { // We need an ACK from the client now, but that isn't guaranteed, // so give the client one more packet just in case. let dgram = send_something(&mut server, now()); - client.process_input(dgram, now()); + client.process_input(&dgram, now()); // Now get the client to send the ACK and have the server handle that. let dgram = send_something(&mut client, now()); - let dgram = server.process(Some(dgram), now()).dgram(); - client.process_input(dgram.unwrap(), now()); + let dgram = server.process(Some(&dgram), now()).dgram(); + client.process_input(&dgram.unwrap(), now()); assert!(client.stream_create(StreamType::BiDi).is_ok()); assert!(client.stream_create(StreamType::BiDi).is_err()); } @@ -800,7 +800,7 @@ fn no_dupdata_readable_events() { let stream_id = client.stream_create(StreamType::BiDi).unwrap(); client.stream_send(stream_id, &[0x00]).unwrap(); let out = client.process(None, now()); - mem::drop(server.process(out.dgram(), now())); + mem::drop(server.process(out.as_dgram_ref(), now())); // We have a data_readable event. let stream_readable = |e| matches!(e, ConnectionEvent::RecvStreamReadable { .. }); @@ -810,7 +810,7 @@ fn no_dupdata_readable_events() { // therefore there should not be a new DataReadable event. client.stream_send(stream_id, &[0x00]).unwrap(); let out_second_data_frame = client.process(None, now()); - mem::drop(server.process(out_second_data_frame.dgram(), now())); + mem::drop(server.process(out_second_data_frame.as_dgram_ref(), now())); assert!(!server.events().any(stream_readable)); // One more frame with a fin will not produce a new DataReadable event, because the @@ -818,7 +818,7 @@ fn no_dupdata_readable_events() { client.stream_send(stream_id, &[0x00]).unwrap(); client.stream_close_send(stream_id).unwrap(); let out_third_data_frame = client.process(None, now()); - mem::drop(server.process(out_third_data_frame.dgram(), now())); + mem::drop(server.process(out_third_data_frame.as_dgram_ref(), now())); assert!(!server.events().any(stream_readable)); } @@ -832,7 +832,7 @@ fn no_dupdata_readable_events_empty_last_frame() { let stream_id = client.stream_create(StreamType::BiDi).unwrap(); client.stream_send(stream_id, &[0x00]).unwrap(); let out = client.process(None, now()); - mem::drop(server.process(out.dgram(), now())); + mem::drop(server.process(out.as_dgram_ref(), now())); // We have a data_readable event. let stream_readable = |e| matches!(e, ConnectionEvent::RecvStreamReadable { .. }); @@ -842,7 +842,7 @@ fn no_dupdata_readable_events_empty_last_frame() { // the previous stream data has not been read yet. client.stream_close_send(stream_id).unwrap(); let out_second_data_frame = client.process(None, now()); - mem::drop(server.process(out_second_data_frame.dgram(), now())); + mem::drop(server.process(out_second_data_frame.as_dgram_ref(), now())); assert!(!server.events().any(stream_readable)); } @@ -864,14 +864,14 @@ fn change_flow_control(stream_type: StreamType, new_fc: u64) { // Send the stream to the client. let out = server.process(None, now()); - mem::drop(client.process(out.dgram(), now())); + mem::drop(client.process(out.as_dgram_ref(), now())); // change max_stream_data for stream_id. client.set_stream_max_data(stream_id, new_fc).unwrap(); // server should receive a MAX_SREAM_DATA frame if the flow control window is updated. let out2 = client.process(None, now()); - let out3 = server.process(out2.dgram(), now()); + let out3 = server.process(out2.as_dgram_ref(), now()); let expected = usize::from(RECV_BUFFER_START < new_fc); assert_eq!(server.stats().frame_rx.max_stream_data, expected); @@ -884,9 +884,9 @@ fn change_flow_control(stream_type: StreamType, new_fc: u64) { } // Exchange packets so that client gets all data. - let out4 = client.process(out3.dgram(), now()); - let out5 = server.process(out4.dgram(), now()); - mem::drop(client.process(out5.dgram(), now())); + let out4 = client.process(out3.as_dgram_ref(), now()); + let out5 = server.process(out4.as_dgram_ref(), now()); + mem::drop(client.process(out5.as_dgram_ref(), now())); // read all data by client let mut buf = [0x0; 10000]; @@ -894,7 +894,7 @@ fn change_flow_control(stream_type: StreamType, new_fc: u64) { assert_eq!(u64::try_from(read).unwrap(), max(RECV_BUFFER_START, new_fc)); let out4 = client.process(None, now()); - mem::drop(server.process(out4.dgram(), now())); + mem::drop(server.process(out4.as_dgram_ref(), now())); let written3 = server.stream_send(stream_id, &[0x0; 10000]).unwrap(); assert_eq!(u64::try_from(written3).unwrap(), new_fc); @@ -949,12 +949,12 @@ fn session_flow_control_stop_sending_state_recv() { // The server sends STOP_SENDING -> the client sends RESET -> the server // sends MAX_DATA. let out = server.process(None, now()).dgram(); - let out = client.process(out, now()).dgram(); + let out = client.process(out.as_ref(), now()).dgram(); // the client is still limited. let stream_id2 = client.stream_create(StreamType::UniDi).unwrap(); assert_eq!(client.stream_avail_send_space(stream_id2).unwrap(), 0); - let out = server.process(out, now()).dgram(); - client.process_input(out.unwrap(), now()); + let out = server.process(out.as_ref(), now()).dgram(); + client.process_input(&out.unwrap(), now()); assert_eq!( client.stream_avail_send_space(stream_id2).unwrap(), SMALL_MAX_DATA @@ -991,7 +991,7 @@ fn session_flow_control_stop_sending_state_size_known() { client.stream_close_send(stream_id).unwrap(); let out2 = client.process(None, now()).dgram(); - server.process_input(out2.unwrap(), now()); + server.process_input(&out2.unwrap(), now()); server .stream_stop_sending(stream_id, Error::NoError.code()) @@ -1000,8 +1000,8 @@ fn session_flow_control_stop_sending_state_size_known() { // In this case the final size is known when stream_stop_sending is called // and the server releases flow control immediately and sends STOP_SENDING and // MAX_DATA in the same packet. - let out = server.process(out1, now()).dgram(); - client.process_input(out.unwrap(), now()); + let out = server.process(out1.as_ref(), now()).dgram(); + client.process_input(&out.unwrap(), now()); // The flow control should have been updated and the client can again send // SMALL_MAX_DATA. @@ -1123,10 +1123,10 @@ fn connect_w_different_limit(bidi_limit: u64, unidi_limit: u64) { .max_streams(StreamType::BiDi, bidi_limit) .max_streams(StreamType::UniDi, unidi_limit), ); - let out = server.process(out.dgram(), now()); + let out = server.process(out.as_dgram_ref(), now()); - let out = client.process(out.dgram(), now()); - mem::drop(server.process(out.dgram(), now())); + let out = client.process(out.as_dgram_ref(), now()); + mem::drop(server.process(out.as_dgram_ref(), now())); assert!(maybe_authenticate(&mut client)); diff --git a/neqo-transport/src/connection/tests/vn.rs b/neqo-transport/src/connection/tests/vn.rs index 6f8bd15614..4c00253642 100644 --- a/neqo-transport/src/connection/tests/vn.rs +++ b/neqo-transport/src/connection/tests/vn.rs @@ -30,7 +30,7 @@ fn unknown_version() { let mut unknown_version_packet = vec![0x80, 0x1a, 0x1a, 0x1a, 0x1a]; unknown_version_packet.resize(1200, 0x0); mem::drop(client.process( - Some(Datagram::new(addr(), addr(), unknown_version_packet)), + Some(&Datagram::new(addr(), addr(), unknown_version_packet)), now(), )); assert_eq!(1, client.stats().dropped_rx); @@ -45,7 +45,7 @@ fn server_receive_unknown_first_packet() { assert_eq!( server.process( - Some(Datagram::new(addr(), addr(), unknown_version_packet,)), + Some(&Datagram::new(addr(), addr(), unknown_version_packet,)), now(), ), Output::None @@ -87,7 +87,7 @@ fn version_negotiation_current_version() { ); let dgram = Datagram::new(addr(), addr(), vn); - let delay = client.process(Some(dgram), now()).callback(); + let delay = client.process(Some(&dgram), now()).callback(); assert_eq!(delay, INITIAL_PTO); assert_eq!(*client.state(), State::WaitInitial); assert_eq!(1, client.stats().dropped_rx); @@ -106,7 +106,7 @@ fn version_negotiation_version0() { let vn = create_vn(&initial_pkt, &[0, 0x1a1a_1a1a]); let dgram = Datagram::new(addr(), addr(), vn); - let delay = client.process(Some(dgram), now()).callback(); + let delay = client.process(Some(&dgram), now()).callback(); assert_eq!(delay, INITIAL_PTO); assert_eq!(*client.state(), State::WaitInitial); assert_eq!(1, client.stats().dropped_rx); @@ -125,7 +125,7 @@ fn version_negotiation_only_reserved() { let vn = create_vn(&initial_pkt, &[0x1a1a_1a1a, 0x2a2a_2a2a]); let dgram = Datagram::new(addr(), addr(), vn); - assert_eq!(client.process(Some(dgram), now()), Output::None); + assert_eq!(client.process(Some(&dgram), now()), Output::None); match client.state() { State::Closed(err) => { assert_eq!(*err, ConnectionError::Transport(Error::VersionNegotiation)); @@ -147,7 +147,7 @@ fn version_negotiation_corrupted() { let vn = create_vn(&initial_pkt, &[0x1a1a_1a1a, 0x2a2a_2a2a]); let dgram = Datagram::new(addr(), addr(), &vn[..vn.len() - 1]); - let delay = client.process(Some(dgram), now()).callback(); + let delay = client.process(Some(&dgram), now()).callback(); assert_eq!(delay, INITIAL_PTO); assert_eq!(*client.state(), State::WaitInitial); assert_eq!(1, client.stats().dropped_rx); @@ -166,7 +166,7 @@ fn version_negotiation_empty() { let vn = create_vn(&initial_pkt, &[]); let dgram = Datagram::new(addr(), addr(), vn); - let delay = client.process(Some(dgram), now()).callback(); + let delay = client.process(Some(&dgram), now()).callback(); assert_eq!(delay, INITIAL_PTO); assert_eq!(*client.state(), State::WaitInitial); assert_eq!(1, client.stats().dropped_rx); @@ -184,7 +184,7 @@ fn version_negotiation_not_supported() { let vn = create_vn(&initial_pkt, &[0x1a1a_1a1a, 0x2a2a_2a2a, 0xff00_0001]); let dgram = Datagram::new(addr(), addr(), vn); - assert_eq!(client.process(Some(dgram), now()), Output::None); + assert_eq!(client.process(Some(&dgram), now()), Output::None); match client.state() { State::Closed(err) => { assert_eq!(*err, ConnectionError::Transport(Error::VersionNegotiation)); @@ -207,7 +207,7 @@ fn version_negotiation_bad_cid() { let vn = create_vn(&initial_pkt, &[0x1a1a_1a1a, 0x2a2a_2a2a, 0xff00_0001]); let dgram = Datagram::new(addr(), addr(), vn); - let delay = client.process(Some(dgram), now()).callback(); + let delay = client.process(Some(&dgram), now()).callback(); assert_eq!(delay, INITIAL_PTO); assert_eq!(*client.state(), State::WaitInitial); assert_eq!(1, client.stats().dropped_rx); @@ -244,11 +244,11 @@ fn compatible_upgrade_large_initial() { // Each should elicit a Version 1 ACK from the server. let dgram = client.process_output(now()).dgram(); assert!(dgram.is_some()); - let dgram = server.process(dgram, now()).dgram(); + let dgram = server.process(dgram.as_ref(), now()).dgram(); assert!(dgram.is_some()); // The following uses the Version from *outside* this crate. assertions::assert_version(dgram.as_ref().unwrap(), Version::Version1.wire_version()); - client.process_input(dgram.unwrap(), now()); + client.process_input(&dgram.unwrap(), now()); connect(&mut client, &mut server); assert_eq!(client.version(), Version::Version2); @@ -312,7 +312,7 @@ fn version_negotiation_downgrade() { let initial = client.process_output(now()).dgram().unwrap(); let vn = create_vn(&initial, &[DOWNGRADE.wire_version()]); let dgram = Datagram::new(addr(), addr(), vn); - client.process_input(dgram, now()); + client.process_input(&dgram, now()); connect_fail( &mut client, @@ -332,7 +332,7 @@ fn invalid_server_version() { new_server(ConnectionParameters::default().versions(Version::Version2, Version::all())); let dgram = client.process_output(now()).dgram(); - server.process_input(dgram.unwrap(), now()); + server.process_input(&dgram.unwrap(), now()); // One packet received. assert_eq!(server.stats().packets_rx, 1); @@ -463,7 +463,7 @@ fn compatible_upgrade_0rtt_rejected() { let initial = send_something(&mut client, now()); assertions::assert_version(&initial, Version::Version1.wire_version()); assertions::assert_coalesced_0rtt(&initial); - server.process_input(initial, now()); + server.process_input(&initial, now()); assert!(!server .events() .any(|e| matches!(e, ConnectionEvent::NewStream { .. }))); @@ -471,9 +471,9 @@ fn compatible_upgrade_0rtt_rejected() { // Finalize the connection. Don't use connect() because it uses // maybe_authenticate() too liberally and that eats the events we want to check. let dgram = server.process_output(now()).dgram(); // ServerHello flight - let dgram = client.process(dgram, now()).dgram(); // Client Finished (note: no authentication) - let dgram = server.process(dgram, now()).dgram(); // HANDSHAKE_DONE - client.process_input(dgram.unwrap(), now()); + let dgram = client.process(dgram.as_ref(), now()).dgram(); // Client Finished (note: no authentication) + let dgram = server.process(dgram.as_ref(), now()).dgram(); // HANDSHAKE_DONE + client.process_input(&dgram.unwrap(), now()); assert!(matches!(client.state(), State::Confirmed)); assert!(matches!(server.state(), State::Confirmed)); diff --git a/neqo-transport/src/connection/tests/zerortt.rs b/neqo-transport/src/connection/tests/zerortt.rs index 8c8a980c0c..f896b30730 100644 --- a/neqo-transport/src/connection/tests/zerortt.rs +++ b/neqo-transport/src/connection/tests/zerortt.rs @@ -62,12 +62,12 @@ fn zero_rtt_send_recv() { // 0-RTT packets on their own shouldn't be padded to 1200. assert!(client_0rtt.as_dgram_ref().unwrap().len() < 1200); - let server_hs = server.process(client_hs.dgram(), now()); + let server_hs = server.process(client_hs.as_dgram_ref(), now()); assert!(server_hs.as_dgram_ref().is_some()); // ServerHello, etc... let all_frames = server.stats().frame_tx.all; let ack_frames = server.stats().frame_tx.ack; - let server_process_0rtt = server.process(client_0rtt.dgram(), now()); + let server_process_0rtt = server.process(client_0rtt.as_dgram_ref(), now()); assert!(server_process_0rtt.as_dgram_ref().is_some()); assert_eq!(server.stats().frame_tx.all, all_frames + 1); assert_eq!(server.stats().frame_tx.ack, ack_frames + 1); @@ -104,7 +104,7 @@ fn zero_rtt_send_coalesce() { assertions::assert_coalesced_0rtt(&client_0rtt.as_dgram_ref().unwrap()[..]); - let server_hs = server.process(client_0rtt.dgram(), now()); + let server_hs = server.process(client_0rtt.as_dgram_ref(), now()); assert!(server_hs.as_dgram_ref().is_some()); // Should produce ServerHello etc... let server_stream_id = server @@ -161,9 +161,9 @@ fn zero_rtt_send_reject() { let client_0rtt = client.process(None, now()); assert!(client_0rtt.as_dgram_ref().is_some()); - let server_hs = server.process(client_hs.dgram(), now()); + let server_hs = server.process(client_hs.as_dgram_ref(), now()); assert!(server_hs.as_dgram_ref().is_some()); // Should produce ServerHello etc... - let server_ignored = server.process(client_0rtt.dgram(), now()); + let server_ignored = server.process(client_0rtt.as_dgram_ref(), now()); assert!(server_ignored.as_dgram_ref().is_none()); // The server shouldn't receive that 0-RTT data. @@ -171,14 +171,14 @@ fn zero_rtt_send_reject() { assert!(!server.events().any(recvd_stream_evt)); // Client should get a rejection. - let client_fin = client.process(server_hs.dgram(), now()); + let client_fin = client.process(server_hs.as_dgram_ref(), now()); let recvd_0rtt_reject = |e| e == ConnectionEvent::ZeroRttRejected; assert!(client.events().any(recvd_0rtt_reject)); // Server consume client_fin - let server_ack = server.process(client_fin.dgram(), now()); + let server_ack = server.process(client_fin.as_dgram_ref(), now()); assert!(server_ack.as_dgram_ref().is_some()); - let client_out = client.process(server_ack.dgram(), now()); + let client_out = client.process(server_ack.as_dgram_ref(), now()); assert!(client_out.as_dgram_ref().is_none()); // ...and the client stream should be gone. @@ -194,7 +194,7 @@ fn zero_rtt_send_reject() { assert!(client_after_reject.is_some()); // The server should receive new stream - server.process_input(client_after_reject.unwrap(), now()); + server.process_input(&client_after_reject.unwrap(), now()); assert!(server.events().any(recvd_stream_evt)); } @@ -233,8 +233,8 @@ fn zero_rtt_update_flow_control() { assert!(!client.stream_send_atomic(bidi_stream, MESSAGE).unwrap()); // Now get the server transport parameters. - let server_hs = server.process(client_hs, now()).dgram(); - client.process_input(server_hs.unwrap(), now()); + let server_hs = server.process(client_hs.as_ref(), now()).dgram(); + client.process_input(&server_hs.unwrap(), now()); // The streams should report a writeable event. let mut uni_stream_event = false; diff --git a/neqo-transport/src/server.rs b/neqo-transport/src/server.rs index 506e90ad14..68334abfcd 100644 --- a/neqo-transport/src/server.rs +++ b/neqo-transport/src/server.rs @@ -259,7 +259,7 @@ impl Server { fn process_connection( &mut self, c: StateRef, - dgram: Option, + dgram: Option<&Datagram>, now: Instant, ) -> Option { qtrace!([self], "Process connection {:?}", c); @@ -310,7 +310,7 @@ impl Server { fn handle_initial( &mut self, initial: InitialDetails, - dgram: Datagram, + dgram: &Datagram, now: Instant, ) -> Option { qdebug!([self], "Handle initial"); @@ -364,7 +364,7 @@ impl Server { fn connection_attempt( &mut self, initial: InitialDetails, - dgram: Datagram, + dgram: &Datagram, orig_dcid: Option, now: Instant, ) -> Option { @@ -465,7 +465,7 @@ impl Server { &mut self, attempt_key: AttemptKey, initial: InitialDetails, - dgram: Datagram, + dgram: &Datagram, orig_dcid: Option, now: Instant, ) -> Option { @@ -511,7 +511,7 @@ impl Server { /// receives a connection ID from the server. fn handle_0rtt( &mut self, - dgram: Datagram, + dgram: &Datagram, dcid: ConnectionId, now: Instant, ) -> Option { @@ -533,7 +533,7 @@ impl Server { } } - fn process_input(&mut self, dgram: Datagram, now: Instant) -> Option { + fn process_input(&mut self, dgram: &Datagram, now: Instant) -> Option { qtrace!("Process datagram: {}", hex(&dgram[..])); // This is only looking at the first packet header in the datagram. @@ -629,7 +629,7 @@ impl Server { } } - pub fn process(&mut self, dgram: Option, now: Instant) -> Output { + pub fn process(&mut self, dgram: Option<&Datagram>, now: Instant) -> Output { let out = if let Some(d) = dgram { self.process_input(d, now) } else { diff --git a/neqo-transport/tests/common/mod.rs b/neqo-transport/tests/common/mod.rs index 3bc97a0528..1a414df5b0 100644 --- a/neqo-transport/tests/common/mod.rs +++ b/neqo-transport/tests/common/mod.rs @@ -63,28 +63,28 @@ pub fn connect(client: &mut Connection, server: &mut Server) -> ActiveConnection server.set_validation(ValidateAddress::Never); assert_eq!(*client.state(), State::Init); - let dgram = client.process(None, now()).dgram(); // ClientHello - assert!(dgram.is_some()); - let dgram = server.process(dgram, now()).dgram(); // ServerHello... - assert!(dgram.is_some()); + let out = client.process(None, now()); // ClientHello + assert!(out.as_dgram_ref().is_some()); + let out = server.process(out.as_dgram_ref(), now()); // ServerHello... + assert!(out.as_dgram_ref().is_some()); // Ingest the server Certificate. - let dgram = client.process(dgram, now()).dgram(); - assert!(dgram.is_some()); // This should just be an ACK. - let dgram = server.process(dgram, now()).dgram(); - assert!(dgram.is_none()); // So the server should have nothing to say. + let out = client.process(out.as_dgram_ref(), now()); + assert!(out.as_dgram_ref().is_some()); // This should just be an ACK. + let out = server.process(out.as_dgram_ref(), now()); + assert!(out.as_dgram_ref().is_none()); // So the server should have nothing to say. // Now mark the server as authenticated. client.authenticated(AuthenticationStatus::Ok, now()); - let dgram = client.process(None, now()).dgram(); - assert!(dgram.is_some()); + let out = client.process(None, now()); + assert!(out.as_dgram_ref().is_some()); assert_eq!(*client.state(), State::Connected); - let dgram = server.process(dgram, now()).dgram(); - assert!(dgram.is_some()); // ACK + HANDSHAKE_DONE + NST + let out = server.process(out.as_dgram_ref(), now()); + assert!(out.as_dgram_ref().is_some()); // ACK + HANDSHAKE_DONE + NST // Have the client process the HANDSHAKE_DONE. - let dgram = client.process(dgram, now()).dgram(); - assert!(dgram.is_none()); + let out = client.process(out.as_dgram_ref(), now()); + assert!(out.as_dgram_ref().is_none()); assert_eq!(*client.state(), State::Confirmed); connected_server(server) @@ -225,14 +225,14 @@ pub fn generate_ticket(server: &mut Server) -> ResumptionToken { let mut server_conn = connect(&mut client, server); server_conn.borrow_mut().send_ticket(now(), &[]).unwrap(); - let dgram = server.process(None, now()).dgram(); - client.process_input(dgram.unwrap(), now()); // Consume ticket, ignore output. + let out = server.process(None, now()); + client.process_input(out.as_dgram_ref().unwrap(), now()); // Consume ticket, ignore output. let ticket = find_ticket(&mut client); // Have the client close the connection and then let the server clean up. client.close(now(), 0, "got a ticket"); - let dgram = client.process_output(now()).dgram(); - mem::drop(server.process(dgram, now())); + let out = client.process_output(now()); + mem::drop(server.process(out.as_dgram_ref(), now())); // Calling active_connections clears the set of active connections. assert_eq!(server.active_connections().len(), 1); ticket diff --git a/neqo-transport/tests/conn_vectors.rs b/neqo-transport/tests/conn_vectors.rs index 3dcebde168..f088ebea3f 100644 --- a/neqo-transport/tests/conn_vectors.rs +++ b/neqo-transport/tests/conn_vectors.rs @@ -267,7 +267,7 @@ fn process_client_initial(v: Version, packet: &[u8]) { let dgram = Datagram::new(addr(), addr(), packet); assert_eq!(*server.state(), State::Init); - let out = server.process(Some(dgram), now()); + let out = server.process(Some(&dgram), now()); assert_eq!(*server.state(), State::Handshaking); assert!(out.dgram().is_some()); } diff --git a/neqo-transport/tests/connection.rs b/neqo-transport/tests/connection.rs index 1c95a4ad2d..4de9575c2a 100644 --- a/neqo-transport/tests/connection.rs +++ b/neqo-transport/tests/connection.rs @@ -27,13 +27,13 @@ fn truncate_long_packet() { let mut client = default_client(); let mut server = default_server(); - let dgram = client.process(None, now()).dgram(); - assert!(dgram.is_some()); - let dgram = server.process(dgram, now()).dgram(); - assert!(dgram.is_some()); + let out = client.process(None, now()); + assert!(out.as_dgram_ref().is_some()); + let out = server.process(out.as_dgram_ref(), now()); + assert!(out.as_dgram_ref().is_some()); // This will truncate the Handshake packet from the server. - let dupe = dgram.as_ref().unwrap().clone(); + let dupe = out.as_dgram_ref().unwrap().clone(); // Count the padding in the packet, plus 1. let tail = dupe.iter().rev().take_while(|b| **b == 0).count() + 1; let truncated = Datagram::new( @@ -41,19 +41,19 @@ fn truncate_long_packet() { dupe.destination(), &dupe[..(dupe.len() - tail)], ); - let hs_probe = client.process(Some(truncated), now()).dgram(); + let hs_probe = client.process(Some(&truncated), now()).dgram(); assert!(hs_probe.is_some()); // Now feed in the untruncated packet. - let dgram = client.process(dgram, now()).dgram(); - assert!(dgram.is_some()); // Throw this ACK away. + let out = client.process(out.as_dgram_ref(), now()); + assert!(out.as_dgram_ref().is_some()); // Throw this ACK away. assert!(test_fixture::maybe_authenticate(&mut client)); - let dgram = client.process(None, now()).dgram(); - assert!(dgram.is_some()); + let out = client.process(None, now()); + assert!(out.as_dgram_ref().is_some()); assert!(client.state().connected()); - let dgram = server.process(dgram, now()).dgram(); - assert!(dgram.is_some()); + let out = server.process(out.as_dgram_ref(), now()); + assert!(out.as_dgram_ref().is_some()); assert!(server.state().connected()); } @@ -68,12 +68,12 @@ fn reorder_server_initial() { ); let mut server = default_server(); - let client_initial = client.process_output(now()).dgram(); + let client_initial = client.process_output(now()); let (_, client_dcid, _, _) = - decode_initial_header(client_initial.as_ref().unwrap(), Role::Client); + decode_initial_header(client_initial.as_dgram_ref().unwrap(), Role::Client); let client_dcid = client_dcid.to_owned(); - let server_packet = server.process(client_initial, now()).dgram(); + let server_packet = server.process(client_initial.as_dgram_ref(), now()).dgram(); let (server_initial, server_hs) = split_datagram(server_packet.as_ref().unwrap()); let (protected_header, _, _, payload) = decode_initial_header(&server_initial, Role::Server); @@ -114,16 +114,16 @@ fn reorder_server_initial() { // Now a connection can be made successfully. // Though we modified the server's Initial packet, we get away with it. // TLS only authenticates the content of the CRYPTO frame, which was untouched. - client.process_input(reordered, now()); - client.process_input(server_hs.unwrap(), now()); + client.process_input(&reordered, now()); + client.process_input(&server_hs.unwrap(), now()); assert!(test_fixture::maybe_authenticate(&mut client)); - let finished = client.process_output(now()).dgram(); + let finished = client.process_output(now()); assert_eq!(*client.state(), State::Connected); - let done = server.process(finished, now()).dgram(); + let done = server.process(finished.as_dgram_ref(), now()); assert_eq!(*server.state(), State::Confirmed); - client.process_input(done.unwrap(), now()); + client.process_input(done.as_dgram_ref().unwrap(), now()); assert_eq!(*client.state(), State::Confirmed); } diff --git a/neqo-transport/tests/retry.rs b/neqo-transport/tests/retry.rs index 51cc442ddd..0b51eacab1 100644 --- a/neqo-transport/tests/retry.rs +++ b/neqo-transport/tests/retry.rs @@ -31,21 +31,21 @@ fn retry_basic() { let dgram = client.process(None, now()).dgram(); // Initial assert!(dgram.is_some()); - let dgram = server.process(dgram, now()).dgram(); // Retry + let dgram = server.process(dgram.as_ref(), now()).dgram(); // Retry assert!(dgram.is_some()); assertions::assert_retry(dgram.as_ref().unwrap()); - let dgram = client.process(dgram, now()).dgram(); // Initial w/token + let dgram = client.process(dgram.as_ref(), now()).dgram(); // Initial w/token assert!(dgram.is_some()); - let dgram = server.process(dgram, now()).dgram(); // Initial, HS + let dgram = server.process(dgram.as_ref(), now()).dgram(); // Initial, HS assert!(dgram.is_some()); - mem::drop(client.process(dgram, now()).dgram()); // Ingest, drop any ACK. + mem::drop(client.process(dgram.as_ref(), now()).dgram()); // Ingest, drop any ACK. client.authenticated(AuthenticationStatus::Ok, now()); let dgram = client.process(None, now()).dgram(); // Send Finished assert!(dgram.is_some()); assert_eq!(*client.state(), State::Connected); - let dgram = server.process(dgram, now()).dgram(); // (done) + let dgram = server.process(dgram.as_ref(), now()).dgram(); // (done) assert!(dgram.is_some()); // Note that this packet will be dropped... connected_server(&mut server); } @@ -62,10 +62,10 @@ fn implicit_rtt_retry() { let dgram = client.process(None, now).dgram(); now += RTT / 2; - let dgram = server.process(dgram, now).dgram(); + let dgram = server.process(dgram.as_ref(), now).dgram(); assertions::assert_retry(dgram.as_ref().unwrap()); now += RTT / 2; - client.process_input(dgram.unwrap(), now); + client.process_input(&dgram.unwrap(), now); assert_eq!(client.stats().rtt, RTT); } @@ -79,16 +79,16 @@ fn retry_expired() { let dgram = client.process(None, now).dgram(); // Initial assert!(dgram.is_some()); - let dgram = server.process(dgram, now).dgram(); // Retry + let dgram = server.process(dgram.as_ref(), now).dgram(); // Retry assert!(dgram.is_some()); assertions::assert_retry(dgram.as_ref().unwrap()); - let dgram = client.process(dgram, now).dgram(); // Initial w/token + let dgram = client.process(dgram.as_ref(), now).dgram(); // Initial w/token assert!(dgram.is_some()); now += Duration::from_secs(60); // Too long for Retry. - let dgram = server.process(dgram, now).dgram(); // Initial, HS + let dgram = server.process(dgram.as_ref(), now).dgram(); // Initial, HS assert!(dgram.is_none()); } @@ -108,23 +108,23 @@ fn retry_0rtt() { let dgram = client.process(None, now()).dgram(); // Initial w/0-RTT assert!(dgram.is_some()); assertions::assert_coalesced_0rtt(dgram.as_ref().unwrap()); - let dgram = server.process(dgram, now()).dgram(); // Retry + let dgram = server.process(dgram.as_ref(), now()).dgram(); // Retry assert!(dgram.is_some()); assertions::assert_retry(dgram.as_ref().unwrap()); // After retry, there should be a token and still coalesced 0-RTT. - let dgram = client.process(dgram, now()).dgram(); + let dgram = client.process(dgram.as_ref(), now()).dgram(); assert!(dgram.is_some()); assertions::assert_coalesced_0rtt(dgram.as_ref().unwrap()); - let dgram = server.process(dgram, now()).dgram(); // Initial, HS + let dgram = server.process(dgram.as_ref(), now()).dgram(); // Initial, HS assert!(dgram.is_some()); - let dgram = client.process(dgram, now()).dgram(); + let dgram = client.process(dgram.as_ref(), now()).dgram(); // Note: the client doesn't need to authenticate the server here // as there is no certificate; authentication is based on the ticket. assert!(dgram.is_some()); assert_eq!(*client.state(), State::Connected); - let dgram = server.process(dgram, now()).dgram(); // (done) + let dgram = server.process(dgram.as_ref(), now()).dgram(); // (done) assert!(dgram.is_some()); connected_server(&mut server); assert!(client.tls_info().unwrap().resumed()); @@ -136,14 +136,14 @@ fn retry_different_ip() { server.set_validation(ValidateAddress::Always); let mut client = default_client(); - let dgram = client.process(None, now()).dgram(); // Initial + let dgram = client.process(None.as_ref(), now()).dgram(); // Initial assert!(dgram.is_some()); - let dgram = server.process(dgram, now()).dgram(); // Retry + let dgram = server.process(dgram.as_ref(), now()).dgram(); // Retry assert!(dgram.is_some()); assertions::assert_retry(dgram.as_ref().unwrap()); - let dgram = client.process(dgram, now()).dgram(); // Initial w/token + let dgram = client.process(dgram.as_ref(), now()).dgram(); // Initial w/token assert!(dgram.is_some()); // Change the source IP on the address from the client. @@ -151,7 +151,7 @@ fn retry_different_ip() { let other_v4 = IpAddr::V4(Ipv4Addr::new(127, 0, 0, 2)); let other_addr = SocketAddr::new(other_v4, 443); let from_other = Datagram::new(other_addr, dgram.destination(), &dgram[..]); - let dgram = server.process(Some(from_other), now()).dgram(); + let dgram = server.process(Some(&from_other), now()).dgram(); assert!(dgram.is_none()); } @@ -172,7 +172,7 @@ fn new_token_different_ip() { let d = dgram.unwrap(); let src = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 2)), d.source().port()); let dgram = Some(Datagram::new(src, d.destination(), &d[..])); - let dgram = server.process(dgram, now()).dgram(); // Retry + let dgram = server.process(dgram.as_ref(), now()).dgram(); // Retry assert!(dgram.is_some()); assertions::assert_retry(dgram.as_ref().unwrap()); } @@ -197,7 +197,7 @@ fn new_token_expired() { let d = dgram.unwrap(); let src = SocketAddr::new(d.source().ip(), d.source().port() + 1); let dgram = Some(Datagram::new(src, d.destination(), &d[..])); - let dgram = server.process(dgram, the_future).dgram(); // Retry + let dgram = server.process(dgram.as_ref(), the_future).dgram(); // Retry assert!(dgram.is_some()); assertions::assert_retry(dgram.as_ref().unwrap()); } @@ -211,32 +211,32 @@ fn retry_after_initial() { let cinit = client.process(None, now()).dgram(); // Initial assert!(cinit.is_some()); - let server_flight = server.process(cinit.clone(), now()).dgram(); // Initial + let server_flight = server.process(cinit.as_ref(), now()).dgram(); // Initial assert!(server_flight.is_some()); // We need to have the client just process the Initial. let (server_initial, _other) = split_datagram(server_flight.as_ref().unwrap()); - let dgram = client.process(Some(server_initial), now()).dgram(); + let dgram = client.process(Some(&server_initial), now()).dgram(); assert!(dgram.is_some()); assert!(*client.state() != State::Connected); - let retry = retry_server.process(cinit, now()).dgram(); // Retry! + let retry = retry_server.process(cinit.as_ref(), now()).dgram(); // Retry! assert!(retry.is_some()); assertions::assert_retry(retry.as_ref().unwrap()); // The client should ignore the retry. - let junk = client.process(retry, now()).dgram(); + let junk = client.process(retry.as_ref(), now()).dgram(); assert!(junk.is_none()); // Either way, the client should still be able to process the server flight and connect. - let dgram = client.process(server_flight, now()).dgram(); + let dgram = client.process(server_flight.as_ref(), now()).dgram(); assert!(dgram.is_some()); // Drop this one. assert!(test_fixture::maybe_authenticate(&mut client)); let dgram = client.process(None, now()).dgram(); assert!(dgram.is_some()); assert_eq!(*client.state(), State::Connected); - let dgram = server.process(dgram, now()).dgram(); // (done) + let dgram = server.process(dgram.as_ref(), now()).dgram(); // (done) assert!(dgram.is_some()); connected_server(&mut server); } @@ -249,7 +249,7 @@ fn retry_bad_integrity() { let dgram = client.process(None, now()).dgram(); // Initial assert!(dgram.is_some()); - let dgram = server.process(dgram, now()).dgram(); // Retry + let dgram = server.process(dgram.as_ref(), now()).dgram(); // Retry assert!(dgram.is_some()); let retry = &dgram.as_ref().unwrap(); @@ -260,7 +260,7 @@ fn retry_bad_integrity() { let tweaked_packet = Datagram::new(retry.source(), retry.destination(), tweaked); // The client should ignore this packet. - let dgram = client.process(Some(tweaked_packet), now()).dgram(); + let dgram = client.process(Some(&tweaked_packet), now()).dgram(); assert!(dgram.is_none()); } @@ -274,12 +274,14 @@ fn retry_bad_token() { // Send a retry to one server, then replay it to the other. let client_initial1 = client.process(None, now()).dgram(); assert!(client_initial1.is_some()); - let retry = retry_server.process(client_initial1, now()).dgram(); + let retry = retry_server + .process(client_initial1.as_ref(), now()) + .dgram(); assert!(retry.is_some()); - let client_initial2 = client.process(retry, now()).dgram(); + let client_initial2 = client.process(retry.as_ref(), now()).dgram(); assert!(client_initial2.is_some()); - let dgram = server.process(client_initial2, now()).dgram(); + let dgram = server.process(client_initial2.as_ref(), now()).dgram(); assert!(dgram.is_none()); } @@ -307,10 +309,10 @@ fn retry_after_pto() { let cb = client.process(None, now).callback(); assert_ne!(cb, Duration::new(0, 0)); - let retry = server.process(ci, now).dgram(); + let retry = server.process(ci.as_ref(), now).dgram(); assertions::assert_retry(retry.as_ref().unwrap()); - let ci2 = client.process(retry, now).dgram(); + let ci2 = client.process(retry.as_ref(), now).dgram(); assert!(ci2.unwrap().len() >= 1200); } @@ -322,12 +324,12 @@ fn vn_after_retry() { let dgram = client.process(None, now()).dgram(); // Initial assert!(dgram.is_some()); - let dgram = server.process(dgram, now()).dgram(); // Retry + let dgram = server.process(dgram.as_ref(), now()).dgram(); // Retry assert!(dgram.is_some()); assertions::assert_retry(dgram.as_ref().unwrap()); - let dgram = client.process(dgram, now()).dgram(); // Initial w/token + let dgram = client.process(dgram.as_ref(), now()).dgram(); // Initial w/token assert!(dgram.is_some()); let mut encoder = Encoder::default(); @@ -339,7 +341,7 @@ fn vn_after_retry() { let vn = Datagram::new(addr(), addr(), encoder); assert_ne!( - client.process(Some(vn), now()).callback(), + client.process(Some(&vn), now()).callback(), Duration::from_secs(0) ); } @@ -365,9 +367,11 @@ fn mitm_retry() { // Trigger initial and a second client Initial. let client_initial1 = client.process(None, now()).dgram(); assert!(client_initial1.is_some()); - let retry = retry_server.process(client_initial1, now()).dgram(); + let retry = retry_server + .process(client_initial1.as_ref(), now()) + .dgram(); assert!(retry.is_some()); - let client_initial2 = client.process(retry, now()).dgram(); + let client_initial2 = client.process(retry.as_ref(), now()).dgram(); assert!(client_initial2.is_some()); // Now to start the epic process of decrypting the packet, @@ -424,15 +428,15 @@ fn mitm_retry() { notoken_packet, ); qdebug!("passing modified Initial to the main server"); - let dgram = server.process(Some(new_datagram), now()).dgram(); + let dgram = server.process(Some(&new_datagram), now()).dgram(); assert!(dgram.is_some()); - let dgram = client.process(dgram, now()).dgram(); // Generate an ACK. + let dgram = client.process(dgram.as_ref(), now()).dgram(); // Generate an ACK. assert!(dgram.is_some()); - let dgram = server.process(dgram, now()).dgram(); + let dgram = server.process(dgram.as_ref(), now()).dgram(); assert!(dgram.is_none()); assert!(test_fixture::maybe_authenticate(&mut client)); - let dgram = client.process(dgram, now()).dgram(); + let dgram = client.process(dgram.as_ref(), now()).dgram(); assert!(dgram.is_some()); // Client sending CLOSE_CONNECTIONs assert!(matches!( *client.state(), diff --git a/neqo-transport/tests/server.rs b/neqo-transport/tests/server.rs index fe03b2df1d..a4f07def87 100644 --- a/neqo-transport/tests/server.rs +++ b/neqo-transport/tests/server.rs @@ -47,8 +47,8 @@ pub fn complete_connection( }; while !is_done(client) { _ = test_fixture::maybe_authenticate(client); - let out = client.process(datagram, now()); - let out = server.process(out.dgram(), now()); + let out = client.process(datagram.as_ref(), now()); + let out = server.process(out.as_dgram_ref(), now()); datagram = out.dgram(); } @@ -109,11 +109,11 @@ fn connect_single_version_server() { if client.version() != version { // Run the version negotiation exchange if necessary. - let dgram = client.process_output(now()).dgram(); - assert!(dgram.is_some()); - let dgram = server.process(dgram, now()).dgram(); + let out = client.process_output(now()); + assert!(out.as_dgram_ref().is_some()); + let dgram = server.process(out.as_dgram_ref(), now()).dgram(); assertions::assert_vn(dgram.as_ref().unwrap()); - client.process_input(dgram.unwrap(), now()); + client.process_input(&dgram.unwrap(), now()); } let server_conn = connect(&mut client, &mut server); @@ -133,14 +133,14 @@ fn duplicate_initial() { let mut client = default_client(); assert_eq!(*client.state(), State::Init); - let initial = client.process(None, now()).dgram(); - assert!(initial.is_some()); + let initial = client.process(None, now()); + assert!(initial.as_dgram_ref().is_some()); // The server should ignore a packets with the same remote address and // destination connection ID as an existing connection attempt. - let server_initial = server.process(initial.clone(), now()).dgram(); + let server_initial = server.process(initial.as_dgram_ref(), now()).dgram(); assert!(server_initial.is_some()); - let dgram = server.process(initial, now()).dgram(); + let dgram = server.process(initial.as_dgram_ref(), now()).dgram(); assert!(dgram.is_none()); assert_eq!(server.active_connections().len(), 1); @@ -161,10 +161,10 @@ fn duplicate_initial_new_path() { ); // The server should respond to both as these came from different addresses. - let dgram = server.process(Some(other), now()).dgram(); + let dgram = server.process(Some(&other), now()).dgram(); assert!(dgram.is_some()); - let server_initial = server.process(Some(initial), now()).dgram(); + let server_initial = server.process(Some(&initial), now()).dgram(); assert!(server_initial.is_some()); assert_eq!(server.active_connections().len(), 2); @@ -177,16 +177,20 @@ fn different_initials_same_path() { let mut client1 = default_client(); let mut client2 = default_client(); - let client_initial1 = client1.process(None, now()).dgram(); - assert!(client_initial1.is_some()); - let client_initial2 = client2.process(None, now()).dgram(); - assert!(client_initial2.is_some()); + let client_initial1 = client1.process(None, now()); + assert!(client_initial1.as_dgram_ref().is_some()); + let client_initial2 = client2.process(None, now()); + assert!(client_initial2.as_dgram_ref().is_some()); // The server should respond to both as these came from different addresses. - let server_initial1 = server.process(client_initial1, now()).dgram(); + let server_initial1 = server + .process(client_initial1.as_dgram_ref(), now()) + .dgram(); assert!(server_initial1.is_some()); - let server_initial2 = server.process(client_initial2, now()).dgram(); + let server_initial2 = server + .process(client_initial2.as_dgram_ref(), now()) + .dgram(); assert!(server_initial2.is_some()); assert_eq!(server.active_connections().len(), 2); @@ -199,10 +203,10 @@ fn same_initial_after_connected() { let mut server = default_server(); let mut client = default_client(); - let client_initial = client.process(None, now()).dgram(); - assert!(client_initial.is_some()); + let client_initial = client.process(None, now()); + assert!(client_initial.as_dgram_ref().is_some()); - let server_initial = server.process(client_initial.clone(), now()).dgram(); + let server_initial = server.process(client_initial.as_dgram_ref(), now()).dgram(); assert!(server_initial.is_some()); complete_connection(&mut client, &mut server, server_initial); // This removes the connection from the active set until something happens to it. @@ -210,7 +214,7 @@ fn same_initial_after_connected() { // Now make a new connection using the exact same initial as before. // The server should respond to an attempt to connect with the same Initial. - let dgram = server.process(client_initial, now()).dgram(); + let dgram = server.process(client_initial.as_dgram_ref(), now()).dgram(); assert!(dgram.is_some()); // The server should make a new connection object. assert_eq!(server.active_connections().len(), 1); @@ -232,7 +236,7 @@ fn drop_non_initial() { bogus_data.resize(1200, 66); let bogus = Datagram::new(test_fixture::addr(), test_fixture::addr(), bogus_data); - assert!(server.process(Some(bogus), now()).dgram().is_none()); + assert!(server.process(Some(&bogus), now()).dgram().is_none()); } #[test] @@ -251,7 +255,7 @@ fn drop_short_initial() { bogus_data.resize(1199, 66); let bogus = Datagram::new(test_fixture::addr(), test_fixture::addr(), bogus_data); - assert!(server.process(Some(bogus), now()).dgram().is_none()); + assert!(server.process(Some(&bogus), now()).dgram().is_none()); } /// Verify that the server can read 0-RTT properly. A more robust server would buffer @@ -296,12 +300,12 @@ fn zero_rtt() { let c4 = client_send(); // 0-RTT packets that arrive before the handshake get dropped. - mem::drop(server.process(Some(c2), now)); + mem::drop(server.process(Some(&c2), now)); assert!(server.active_connections().is_empty()); // Now handshake and let another 0-RTT packet in. - let shs = server.process(Some(c1), now).dgram(); - mem::drop(server.process(Some(c3), now)); + let shs = server.process(Some(&c1), now); + mem::drop(server.process(Some(&c3), now)); // The server will have received two STREAM frames now if it processed both packets. let active = server.active_connections(); assert_eq!(active.len(), 1); @@ -310,11 +314,11 @@ fn zero_rtt() { // Complete the handshake. As the client was pacing 0-RTT packets, extend the time // a little so that the pacer doesn't prevent the Finished from being sent. now += now - start_time; - let cfin = client.process(shs, now).dgram(); - mem::drop(server.process(cfin, now)); + let cfin = client.process(shs.as_dgram_ref(), now); + mem::drop(server.process(cfin.as_dgram_ref(), now)); // The server will drop this last 0-RTT packet. - mem::drop(server.process(Some(c4), now)); + mem::drop(server.process(Some(&c4), now)); let active = server.active_connections(); assert_eq!(active.len(), 1); assert_eq!(active[0].borrow().stats().frame_rx.stream, 2); @@ -332,21 +336,21 @@ fn new_token_0rtt() { let client_stream = client.stream_create(StreamType::UniDi).unwrap(); client.stream_send(client_stream, &[1, 2, 3]).unwrap(); - let dgram = client.process(None, now()).dgram(); // Initial w/0-RTT - assert!(dgram.is_some()); - assertions::assert_initial(dgram.as_ref().unwrap(), true); - assertions::assert_coalesced_0rtt(dgram.as_ref().unwrap()); - let dgram = server.process(dgram, now()).dgram(); // Initial - assert!(dgram.is_some()); - assertions::assert_initial(dgram.as_ref().unwrap(), false); + let out = client.process(None, now()); // Initial w/0-RTT + assert!(out.as_dgram_ref().is_some()); + assertions::assert_initial(out.as_dgram_ref().unwrap(), true); + assertions::assert_coalesced_0rtt(out.as_dgram_ref().unwrap()); + let out = server.process(out.as_dgram_ref(), now()); // Initial + assert!(out.as_dgram_ref().is_some()); + assertions::assert_initial(out.as_dgram_ref().unwrap(), false); - let dgram = client.process(dgram, now()).dgram(); + let dgram = client.process(out.as_dgram_ref(), now()); // Note: the client doesn't need to authenticate the server here // as there is no certificate; authentication is based on the ticket. - assert!(dgram.is_some()); + assert!(out.as_dgram_ref().is_some()); assert_eq!(*client.state(), State::Connected); - let dgram = server.process(dgram, now()).dgram(); // (done) - assert!(dgram.is_some()); + let dgram = server.process(dgram.as_dgram_ref(), now()); // (done) + assert!(dgram.as_dgram_ref().is_some()); connected_server(&mut server); assert!(client.tls_info().unwrap().resumed()); } @@ -368,7 +372,7 @@ fn new_token_different_port() { let d = dgram.unwrap(); let src = SocketAddr::new(d.source().ip(), d.source().port() + 1); let dgram = Some(Datagram::new(src, d.destination(), &d[..])); - let dgram = server.process(dgram, now()).dgram(); // Retry + let dgram = server.process(dgram.as_ref(), now()).dgram(); // Retry assert!(dgram.is_some()); assertions::assert_initial(dgram.as_ref().unwrap(), false); } @@ -425,7 +429,7 @@ fn bad_client_initial() { let bad_dgram = Datagram::new(dgram.source(), dgram.destination(), ciphertext); // The server should reject this. - let response = server.process(Some(bad_dgram), now()); + let response = server.process(Some(&bad_dgram), now()); let close_dgram = response.dgram().unwrap(); // The resulting datagram might contain multiple packets, but each is small. let (initial_close, rest) = split_datagram(&close_dgram); @@ -439,7 +443,7 @@ fn bad_client_initial() { // The client should accept this new and stop trying to connect. // It will generate a CONNECTION_CLOSE first though. - let response = client.process(Some(close_dgram), now()).dgram(); + let response = client.process(Some(&close_dgram), now()).dgram(); assert!(response.is_some()); // The client will now wait out its closing period. let delay = client.process(None, now()).callback(); @@ -471,7 +475,7 @@ fn version_negotiation_ignored() { let mut input = dgram.to_vec(); input[1] ^= 0x12; let damaged = Datagram::new(dgram.source(), dgram.destination(), input.clone()); - let vn = server.process(Some(damaged), now()).dgram(); + let vn = server.process(Some(&damaged), now()).dgram(); let mut dec = Decoder::from(&input[5..]); // Skip past version. let d_cid = dec.decode_vec(1).expect("client DCID").to_vec(); @@ -492,7 +496,7 @@ fn version_negotiation_ignored() { assert!(found, "valid version not found"); // Client ignores VN packet that contain negotiated version. - let res = client.process(Some(vn), now()); + let res = client.process(Some(&vn), now()); assert!(res.callback() > Duration::new(0, 120)); assert_eq!(client.state(), &State::WaitInitial); } @@ -512,9 +516,9 @@ fn version_negotiation() { // `connect()` runs a fixed exchange, so manually run the Version Negotiation. let dgram = client.process_output(now()).dgram(); assert!(dgram.is_some()); - let dgram = server.process(dgram, now()).dgram(); + let dgram = server.process(dgram.as_ref(), now()).dgram(); assertions::assert_vn(dgram.as_ref().unwrap()); - client.process_input(dgram.unwrap(), now()); + client.process_input(&dgram.unwrap(), now()); let sconn = connect(&mut client, &mut server); assert_eq!(client.version(), VN_VERSION); @@ -548,22 +552,22 @@ fn version_negotiation_and_compatible() { let dgram = client.process_output(now()).dgram(); assert!(dgram.is_some()); assertions::assert_version(dgram.as_ref().unwrap(), ORIG_VERSION.wire_version()); - let dgram = server.process(dgram, now()).dgram(); + let dgram = server.process(dgram.as_ref(), now()).dgram(); assertions::assert_vn(dgram.as_ref().unwrap()); - client.process_input(dgram.unwrap(), now()); + client.process_input(&dgram.unwrap(), now()); let dgram = client.process(None, now()).dgram(); // ClientHello assertions::assert_version(dgram.as_ref().unwrap(), VN_VERSION.wire_version()); - let dgram = server.process(dgram, now()).dgram(); // ServerHello... + let dgram = server.process(dgram.as_ref(), now()).dgram(); // ServerHello... assertions::assert_version(dgram.as_ref().unwrap(), COMPAT_VERSION.wire_version()); - client.process_input(dgram.unwrap(), now()); + client.process_input(&dgram.unwrap(), now()); client.authenticated(AuthenticationStatus::Ok, now()); let dgram = client.process_output(now()).dgram(); assertions::assert_version(dgram.as_ref().unwrap(), COMPAT_VERSION.wire_version()); assert_eq!(*client.state(), State::Connected); - let dgram = server.process(dgram, now()).dgram(); // ACK + HANDSHAKE_DONE + NST - client.process_input(dgram.unwrap(), now()); + let dgram = server.process(dgram.as_ref(), now()).dgram(); // ACK + HANDSHAKE_DONE + NST + client.process_input(&dgram.unwrap(), now()); assert_eq!(*client.state(), State::Confirmed); let sconn = connected_server(&mut server); @@ -596,7 +600,7 @@ fn compatible_upgrade_resumption_and_vn() { server_conn.borrow_mut().send_ticket(now(), &[]).unwrap(); let dgram = server.process(None, now()).dgram(); - client.process_input(dgram.unwrap(), now()); // Consume ticket, ignore output. + client.process_input(&dgram.unwrap(), now()); // Consume ticket, ignore output. let ticket = find_ticket(&mut client); // This new server will reject the ticket, but it will also generate a VN packet. @@ -610,9 +614,9 @@ fn compatible_upgrade_resumption_and_vn() { let dgram = client.process_output(now()).dgram(); assert!(dgram.is_some()); assertions::assert_version(dgram.as_ref().unwrap(), COMPAT_VERSION.wire_version()); - let dgram = server.process(dgram, now()).dgram(); + let dgram = server.process(dgram.as_ref(), now()).dgram(); assertions::assert_vn(dgram.as_ref().unwrap()); - client.process_input(dgram.unwrap(), now()); + client.process_input(&dgram.unwrap(), now()); let server_conn = connect(&mut client, &mut server); assert_eq!(client.version(), RESUMPTION_VERSION); @@ -722,8 +726,8 @@ fn max_streams_after_0rtt_rejection() { client.enable_resumption(now(), &token).unwrap(); _ = client.stream_create(StreamType::BiDi).unwrap(); let dgram = client.process_output(now()).dgram(); - let dgram = server.process(dgram, now()).dgram(); - let dgram = client.process(dgram, now()).dgram(); + let dgram = server.process(dgram.as_ref(), now()).dgram(); + let dgram = client.process(dgram.as_ref(), now()).dgram(); assert!(dgram.is_some()); // We're far enough along to complete the test now. // Make sure that we can create MAX_STREAMS uni- and bidirectional streams. diff --git a/neqo-transport/tests/sim/connection.rs b/neqo-transport/tests/sim/connection.rs index 5768941e4a..b624c119bd 100644 --- a/neqo-transport/tests/sim/connection.rs +++ b/neqo-transport/tests/sim/connection.rs @@ -120,7 +120,7 @@ impl Node for ConnectionNode { fn process(&mut self, mut d: Option, now: Instant) -> Output { _ = self.process_goals(|goal, c| goal.process(c, now)); loop { - let res = self.c.process(d.take(), now); + let res = self.c.process(d.take().as_ref(), now); let mut active = false; while let Some(e) = self.c.next_event() { diff --git a/test-fixture/src/lib.rs b/test-fixture/src/lib.rs index f0830415b4..5ddba24814 100644 --- a/test-fixture/src/lib.rs +++ b/test-fixture/src/lib.rs @@ -203,7 +203,7 @@ pub fn handshake(client: &mut Connection, server: &mut Connection) { }; while !is_done(a) { _ = maybe_authenticate(a); - let d = a.process(datagram, now()); + let d = a.process(datagram.as_ref(), now()); datagram = d.dgram(); mem::swap(&mut a, &mut b); } From c82a569d078ef43473fac0da51ed35df06d019a1 Mon Sep 17 00:00:00 2001 From: Max Inden Date: Thu, 11 Jan 2024 22:57:05 +0100 Subject: [PATCH 052/321] refactor: use let-else (#1539) * refactor: use let-else Use Rust's (>= v1.65) new let-else feature whereever possible. https://doc.rust-lang.org/rust-by-example/flow_control/let_else.html https://rust-lang.github.io/rust-clippy/master/index.html#/manual_let_else * Fix by reference --- neqo-client/src/main.rs | 9 +++------ neqo-server/src/old_https.rs | 4 +--- neqo-transport/src/connection/mod.rs | 24 ++++++++---------------- neqo-transport/src/crypto.rs | 4 +--- neqo-transport/src/dump.rs | 9 +++------ neqo-transport/src/packet/mod.rs | 4 +--- neqo-transport/src/server.rs | 13 ++++--------- neqo-transport/src/tparams.rs | 8 ++------ neqo-transport/tests/connection.rs | 4 ++-- 9 files changed, 25 insertions(+), 54 deletions(-) diff --git a/neqo-client/src/main.rs b/neqo-client/src/main.rs index d422f51b7f..0ec1c0962f 100644 --- a/neqo-client/src/main.rs +++ b/neqo-client/src/main.rs @@ -1031,12 +1031,9 @@ fn main() -> Res<()> { (SocketAddr::V4(..), false, true) | (SocketAddr::V6(..), true, false) ) }); - let remote_addr = match remote_addr { - Some(a) => a, - None => { - eprintln!("No compatible address found for: {host}"); - exit(1); - } + let Some(remote_addr) = remote_addr else { + eprintln!("No compatible address found for: {host}"); + exit(1); }; let local_addr = match remote_addr { diff --git a/neqo-server/src/old_https.rs b/neqo-server/src/old_https.rs index 01a097a914..e259c27f54 100644 --- a/neqo-server/src/old_https.rs +++ b/neqo-server/src/old_https.rs @@ -138,9 +138,7 @@ impl Http09Server { data }; - let msg = if let Ok(s) = std::str::from_utf8(&buf[..]) { - s - } else { + let Ok(msg) = std::str::from_utf8(&buf[..]) else { self.save_partial(stream_id, buf, conn); return; }; diff --git a/neqo-transport/src/connection/mod.rs b/neqo-transport/src/connection/mod.rs index 4e50176831..069b37ab44 100644 --- a/neqo-transport/src/connection/mod.rs +++ b/neqo-transport/src/connection/mod.rs @@ -1864,12 +1864,9 @@ impl Connection { let grease_quic_bit = self.can_grease_quic_bit(); let version = self.version(); for space in PacketNumberSpace::iter() { - let (cspace, tx) = - if let Some(crypto) = self.crypto.states.select_tx_mut(self.version, *space) { - crypto - } else { - continue; - }; + let Some((cspace, tx)) = self.crypto.states.select_tx_mut(self.version, *space) else { + continue; + }; let path = close.path().borrow(); let (_, mut builder) = Self::build_packet_header( @@ -2152,12 +2149,9 @@ impl Connection { let mut encoder = Encoder::with_capacity(profile.limit()); for space in PacketNumberSpace::iter() { // Ensure we have tx crypto state for this epoch, or skip it. - let (cspace, tx) = - if let Some(crypto) = self.crypto.states.select_tx_mut(self.version, *space) { - crypto - } else { - continue; - }; + let Some((cspace, tx)) = self.crypto.states.select_tx_mut(self.version, *space) else { + continue; + }; let header_start = encoder.len(); let (pt, mut builder) = Self::build_packet_header( @@ -3113,13 +3107,11 @@ impl Connection { return Err(Error::NotAvailable); } let version = self.version(); - let (cspace, tx) = if let Some(crypto) = self + let Some((cspace, tx)) = self .crypto .states .select_tx(self.version, PacketNumberSpace::ApplicationData) - { - crypto - } else { + else { return Err(Error::NotAvailable); }; let path = self.paths.primary_fallible().ok_or(Error::NotAvailable)?; diff --git a/neqo-transport/src/crypto.rs b/neqo-transport/src/crypto.rs index 363ed097ef..803c049de5 100644 --- a/neqo-transport/src/crypto.rs +++ b/neqo-transport/src/crypto.rs @@ -245,9 +245,7 @@ impl Crypto { fn install_handshake_keys(&mut self) -> Res { qtrace!([self], "Attempt to install handshake keys"); - let write_secret = if let Some(secret) = self.tls.write_secret(TLS_EPOCH_HANDSHAKE) { - secret - } else { + let Some(write_secret) = self.tls.write_secret(TLS_EPOCH_HANDSHAKE) else { // No keys is fine. return Ok(false); }; diff --git a/neqo-transport/src/dump.rs b/neqo-transport/src/dump.rs index fceb6b6f5d..7dac137340 100644 --- a/neqo-transport/src/dump.rs +++ b/neqo-transport/src/dump.rs @@ -31,12 +31,9 @@ pub fn dump_packet( let mut s = String::from(""); let mut d = Decoder::from(payload); while d.remaining() > 0 { - let f = match Frame::decode(&mut d) { - Ok(f) => f, - Err(_) => { - s.push_str(" [broken]..."); - break; - } + let Ok(f) = Frame::decode(&mut d) else { + s.push_str(" [broken]..."); + break; }; if let Some(x) = f.dump() { write!(&mut s, "\n {} {}", dir, &x).unwrap(); diff --git a/neqo-transport/src/packet/mod.rs b/neqo-transport/src/packet/mod.rs index ac4765f75d..69447948b3 100644 --- a/neqo-transport/src/packet/mod.rs +++ b/neqo-transport/src/packet/mod.rs @@ -603,9 +603,7 @@ impl<'a> PublicPacket<'a> { } // Check that this is a long header from a supported version. - let version = if let Ok(v) = Version::try_from(version) { - v - } else { + let Ok(version) = Version::try_from(version) else { return Ok(( Self { packet_type: PacketType::OtherVersion, diff --git a/neqo-transport/src/server.rs b/neqo-transport/src/server.rs index 68334abfcd..9c617a7a39 100644 --- a/neqo-transport/src/server.rs +++ b/neqo-transport/src/server.rs @@ -332,9 +332,7 @@ impl Server { dgram.source(), now, ); - let token = if let Ok(t) = res { - t - } else { + let Ok(token) = res else { qerror!([self], "unable to generate token, dropping packet"); return None; }; @@ -539,12 +537,9 @@ impl Server { // This is only looking at the first packet header in the datagram. // All packets in the datagram are routed to the same connection. let res = PublicPacket::decode(&dgram[..], self.cid_generator.borrow().as_decoder()); - let (packet, _remainder) = match res { - Ok(res) => res, - _ => { - qtrace!([self], "Discarding {:?}", dgram); - return None; - } + let Ok((packet, _remainder)) = res else { + qtrace!([self], "Discarding {:?}", dgram); + return None; }; // Finding an existing connection. Should be the most common case. diff --git a/neqo-transport/src/tparams.rs b/neqo-transport/src/tparams.rs index fca54d8208..28f20f7bcf 100644 --- a/neqo-transport/src/tparams.rs +++ b/neqo-transport/src/tparams.rs @@ -726,16 +726,12 @@ where return ZeroRttCheckResult::Reject; } let mut dec = Decoder::from(token); - let tpslice = if let Some(v) = dec.decode_vvec() { - v - } else { + let Some(tpslice) = dec.decode_vvec() else { qinfo!("0-RTT: token code error"); return ZeroRttCheckResult::Fail; }; let mut dec_tp = Decoder::from(tpslice); - let remembered = if let Ok(v) = TransportParameters::decode(&mut dec_tp) { - v - } else { + let Ok(remembered) = TransportParameters::decode(&mut dec_tp) else { qinfo!("0-RTT: transport parameter decode error"); return ZeroRttCheckResult::Fail; }; diff --git a/neqo-transport/tests/connection.rs b/neqo-transport/tests/connection.rs index 4de9575c2a..13c70590fa 100644 --- a/neqo-transport/tests/connection.rs +++ b/neqo-transport/tests/connection.rs @@ -140,7 +140,7 @@ fn overflow_crypto() { decode_initial_header(client_initial.as_ref().unwrap(), Role::Client); let client_dcid = client_dcid.to_owned(); - let server_packet = server.process(client_initial, now()).dgram(); + let server_packet = server.process(client_initial.as_ref(), now()).dgram(); let (server_initial, _) = split_datagram(server_packet.as_ref().unwrap()); // Now decrypt the server packet to get AEAD and HP instances. @@ -184,7 +184,7 @@ fn overflow_crypto() { server_initial.destination(), packet, ); - client.process_input(dgram, now()); + client.process_input(&dgram, now()); if let State::Closing { error, .. } = client.state() { assert!( matches!( From 5c11d2650ad011a8a8282de3faa4ea96d0ffb895 Mon Sep 17 00:00:00 2001 From: Max Inden Date: Thu, 11 Jan 2024 22:58:01 +0100 Subject: [PATCH 053/321] refactor: add semicolon if nothing is returned (#1542) * refactor: add semicolon if nothing is returned https://rust-lang.github.io/rust-clippy/master/index.html#/semicolon_if_nothing_returned * Fix as ref --- neqo-client/src/main.rs | 4 ++-- neqo-http3/tests/httpconn.rs | 2 +- neqo-interop/src/main.rs | 4 ++-- neqo-server/src/main.rs | 2 +- neqo-server/src/old_https.rs | 2 +- neqo-transport/src/connection/mod.rs | 4 ++-- neqo-transport/src/connection/state.rs | 2 +- neqo-transport/src/events.rs | 2 +- neqo-transport/src/frame.rs | 2 +- neqo-transport/src/packet/mod.rs | 2 +- neqo-transport/src/qlog.rs | 22 +++++++++++----------- neqo-transport/src/recv_stream.rs | 6 +++--- neqo-transport/src/send_stream.rs | 20 ++++++++++---------- neqo-transport/src/server.rs | 2 +- neqo-transport/src/streams.rs | 6 +++--- neqo-transport/src/tparams.rs | 2 +- 16 files changed, 42 insertions(+), 42 deletions(-) diff --git a/neqo-client/src/main.rs b/neqo-client/src/main.rs index 0ec1c0962f..143d0cfcd0 100644 --- a/neqo-client/src/main.rs +++ b/neqo-client/src/main.rs @@ -1225,7 +1225,7 @@ mod old { "READ[{}]: {}", stream_id, String::from_utf8(data.clone()).unwrap() - ) + ); } if fin { return Ok(true); @@ -1293,7 +1293,7 @@ mod old { }; } ConnectionEvent::SendStreamWritable { stream_id } => { - println!("stream {stream_id} writable") + println!("stream {stream_id} writable"); } ConnectionEvent::SendStreamComplete { stream_id } => { println!("stream {stream_id} complete"); diff --git a/neqo-http3/tests/httpconn.rs b/neqo-http3/tests/httpconn.rs index facc1c00fe..fc49851e5b 100644 --- a/neqo-http3/tests/httpconn.rs +++ b/neqo-http3/tests/httpconn.rs @@ -240,7 +240,7 @@ fn test_103_response() { set_response(&mut request); let out = hconn_s.process(None, now()); mem::drop(hconn_c.process(out.as_dgram_ref(), now())); - process_client_events(&mut hconn_c) + process_client_events(&mut hconn_c); } #[test] diff --git a/neqo-interop/src/main.rs b/neqo-interop/src/main.rs index 0f6e665cf2..ef5646ea73 100644 --- a/neqo-interop/src/main.rs +++ b/neqo-interop/src/main.rs @@ -200,7 +200,7 @@ impl Handler for H9Handler { } } ConnectionEvent::SendStreamWritable { stream_id } => { - eprintln!("stream {stream_id} writable") + eprintln!("stream {stream_id} writable"); } _ => { eprintln!("Unexpected event {event:?}"); @@ -777,7 +777,7 @@ fn run_peer(args: &Args, peer: &'static Peer) -> Vec<(&'static Test, String)> { match child.1.join() { Ok(e) => { eprintln!("Test complete {:?}, {:?}", child.0, e); - results.push(e) + results.push(e); } Err(_) => { eprintln!("Thread crashed {:?}", child.0); diff --git a/neqo-server/src/main.rs b/neqo-server/src/main.rs index 3e91b5cc28..ac4c952837 100644 --- a/neqo-server/src/main.rs +++ b/neqo-server/src/main.rs @@ -564,7 +564,7 @@ impl HttpServer for SimpleServer { } fn set_qlog_dir(&mut self, dir: Option) { - self.server.set_qlog_dir(dir) + self.server.set_qlog_dir(dir); } fn validate_address(&mut self, v: ValidateAddress) { diff --git a/neqo-server/src/old_https.rs b/neqo-server/src/old_https.rs index e259c27f54..61ebd53258 100644 --- a/neqo-server/src/old_https.rs +++ b/neqo-server/src/old_https.rs @@ -236,7 +236,7 @@ impl HttpServer for Http09Server { } fn set_qlog_dir(&mut self, dir: Option) { - self.server.set_qlog_dir(dir) + self.server.set_qlog_dir(dir); } fn validate_address(&mut self, v: ValidateAddress) { diff --git a/neqo-transport/src/connection/mod.rs b/neqo-transport/src/connection/mod.rs index 069b37ab44..3fbcb7f9a8 100644 --- a/neqo-transport/src/connection/mod.rs +++ b/neqo-transport/src/connection/mod.rs @@ -1250,7 +1250,7 @@ impl Connection { self.tps.borrow_mut().local.set_bytes( tparams::ORIGINAL_DESTINATION_CONNECTION_ID, packet.dcid().to_vec(), - ) + ); } } (PacketType::VersionNegotiation, State::WaitInitial, Role::Client) => { @@ -2919,7 +2919,7 @@ impl Connection { self.streams.clear_streams(); } self.events.connection_state_change(state); - qlog::connection_state_updated(&mut self.qlog, &self.state) + qlog::connection_state_updated(&mut self.qlog, &self.state); } else if mem::discriminant(&state) != mem::discriminant(&self.state) { // Only tolerate a regression in state if the new state is closing // and the connection is already closed. diff --git a/neqo-transport/src/connection/state.rs b/neqo-transport/src/connection/state.rs index a34c91865e..ffd9f16b51 100644 --- a/neqo-transport/src/connection/state.rs +++ b/neqo-transport/src/connection/state.rs @@ -206,7 +206,7 @@ impl StateSignaling { debug_assert!(false, "StateSignaling must be in Idle state."); return; } - *self = Self::HandshakeDone + *self = Self::HandshakeDone; } pub fn write_done(&mut self, builder: &mut PacketBuilder) -> Res> { diff --git a/neqo-transport/src/events.rs b/neqo-transport/src/events.rs index 65b376eb0b..93cb63a86f 100644 --- a/neqo-transport/src/events.rs +++ b/neqo-transport/src/events.rs @@ -235,7 +235,7 @@ impl ConnectionEvents { where F: Fn(&ConnectionEvent) -> bool, { - self.events.borrow_mut().retain(|evt| !f(evt)) + self.events.borrow_mut().retain(|evt| !f(evt)); } } diff --git a/neqo-transport/src/frame.rs b/neqo-transport/src/frame.rs index 18222f8c62..7eeba507bc 100644 --- a/neqo-transport/src/frame.rs +++ b/neqo-transport/src/frame.rs @@ -658,7 +658,7 @@ mod tests { application_error_code: 0x77, }; - just_dec(&f, "053F4077") + just_dec(&f, "053F4077"); } #[test] diff --git a/neqo-transport/src/packet/mod.rs b/neqo-transport/src/packet/mod.rs index 69447948b3..b8a2d96790 100644 --- a/neqo-transport/src/packet/mod.rs +++ b/neqo-transport/src/packet/mod.rs @@ -245,7 +245,7 @@ impl PacketBuilder { /// Adjust the limit to ensure that no more data is added. pub fn mark_full(&mut self) { - self.limit = self.encoder.len() + self.limit = self.encoder.len(); } /// Mark the packet as needing padding (or not). diff --git a/neqo-transport/src/qlog.rs b/neqo-transport/src/qlog.rs index 35944d5694..42c01cab28 100644 --- a/neqo-transport/src/qlog.rs +++ b/neqo-transport/src/qlog.rs @@ -74,15 +74,15 @@ pub fn connection_tparams_set(qlog: &mut NeqoQlog, tph: &TransportParametersHand // This event occurs very early, so just mark the time as 0.0. Some(Event::with_time(0.0, ev_data)) - }) + }); } pub fn server_connection_started(qlog: &mut NeqoQlog, path: &PathRef) { - connection_started(qlog, path) + connection_started(qlog, path); } pub fn client_connection_started(qlog: &mut NeqoQlog, path: &PathRef) { - connection_started(qlog, path) + connection_started(qlog, path); } fn connection_started(qlog: &mut NeqoQlog, path: &PathRef) { @@ -104,7 +104,7 @@ fn connection_started(qlog: &mut NeqoQlog, path: &PathRef) { }); Some(ev_data) - }) + }); } pub fn connection_state_updated(qlog: &mut NeqoQlog, new: &State) { @@ -124,7 +124,7 @@ pub fn connection_state_updated(qlog: &mut NeqoQlog, new: &State) { }); Some(ev_data) - }) + }); } pub fn packet_sent( @@ -170,7 +170,7 @@ pub fn packet_sent( }); stream.add_event_data_now(ev_data) - }) + }); } pub fn packet_dropped(qlog: &mut NeqoQlog, payload: &PublicPacket) { @@ -197,7 +197,7 @@ pub fn packet_dropped(qlog: &mut NeqoQlog, payload: &PublicPacket) { }); Some(ev_data) - }) + }); } pub fn packets_lost(qlog: &mut NeqoQlog, pkts: &[SentPacket]) { @@ -215,7 +215,7 @@ pub fn packets_lost(qlog: &mut NeqoQlog, pkts: &[SentPacket]) { stream.add_event_data_now(ev_data)?; } Ok(()) - }) + }); } pub fn packet_received( @@ -264,7 +264,7 @@ pub fn packet_received( }); stream.add_event_data_now(ev_data) - }) + }); } #[allow(dead_code)] @@ -306,7 +306,7 @@ pub fn metrics_updated(qlog: &mut NeqoQlog, updated_metrics: &[QlogMetric]) { QlogMetric::RttVariance(v) => rtt_variance = Some(*v as f32), QlogMetric::PtoCount(v) => pto_count = Some(u16::try_from(*v).unwrap()), QlogMetric::CongestionWindow(v) => { - congestion_window = Some(u64::try_from(*v).unwrap()) + congestion_window = Some(u64::try_from(*v).unwrap()); } QlogMetric::BytesInFlight(v) => bytes_in_flight = Some(u64::try_from(*v).unwrap()), QlogMetric::SsThresh(v) => ssthresh = Some(u64::try_from(*v).unwrap()), @@ -330,7 +330,7 @@ pub fn metrics_updated(qlog: &mut NeqoQlog, updated_metrics: &[QlogMetric]) { }); Some(ev_data) - }) + }); } // Helper functions diff --git a/neqo-transport/src/recv_stream.rs b/neqo-transport/src/recv_stream.rs index 659627f7cf..04db42d36b 100644 --- a/neqo-transport/src/recv_stream.rs +++ b/neqo-transport/src/recv_stream.rs @@ -656,12 +656,12 @@ impl RecvStream { | RecvStreamState::AbortReading { .. } | RecvStreamState::WaitForReset { .. } | RecvStreamState::ResetRecvd { .. } => { - qtrace!("data received when we are in state {}", self.state.name()) + qtrace!("data received when we are in state {}", self.state.name()); } } if !already_data_ready && (self.data_ready() || self.needs_to_inform_app_about_fin()) { - self.conn_events.recv_stream_readable(self.stream_id) + self.conn_events.recv_stream_readable(self.stream_id); } Ok(()) @@ -841,7 +841,7 @@ impl RecvStream { err, final_received: received, final_read: read, - }) + }); } RecvStreamState::DataRecvd { fc, diff --git a/neqo-transport/src/send_stream.rs b/neqo-transport/src/send_stream.rs index 8ade4b4c7f..6ff173302e 100644 --- a/neqo-transport/src/send_stream.rs +++ b/neqo-transport/src/send_stream.rs @@ -260,7 +260,7 @@ impl RangeTracker { // Create final chunk if anything remains of the new range if tmp_len > 0 { - v.push((tmp_off, tmp_len, new_state)) + v.push((tmp_off, tmp_len, new_state)); } v @@ -312,7 +312,7 @@ impl RangeTracker { self.used.insert(sub_off, (sub_len, sub_state)); } - self.coalesce_acked_from_zero() + self.coalesce_acked_from_zero(); } fn unmark_range(&mut self, off: u64, len: usize) { @@ -443,7 +443,7 @@ impl TxBuffer { } pub fn mark_as_sent(&mut self, offset: u64, len: usize) { - self.ranges.mark_range(offset, len, RangeState::Sent) + self.ranges.mark_range(offset, len, RangeState::Sent); } pub fn mark_as_acked(&mut self, offset: u64, len: usize) { @@ -463,7 +463,7 @@ impl TxBuffer { } pub fn mark_as_lost(&mut self, offset: u64, len: usize) { - self.ranges.unmark_range(offset, len) + self.ranges.unmark_range(offset, len); } /// Forget about anything that was marked as sent. @@ -622,7 +622,7 @@ pub struct SendStream { impl Hash for SendStream { fn hash(&self, state: &mut H) { - self.stream_id.hash(state) + self.stream_id.hash(state); } } @@ -909,7 +909,7 @@ impl SendStream { | SendStreamState::Send { .. } | SendStreamState::DataSent { .. } | SendStreamState::DataRecvd { .. } => { - qtrace!([self], "Reset acked while in {} state?", self.state.name()) + qtrace!([self], "Reset acked while in {} state?", self.state.name()); } SendStreamState::ResetSent { final_retired, @@ -1023,7 +1023,7 @@ impl SendStream { } => { send_buf.mark_as_acked(offset, len); if self.avail() > 0 { - self.conn_events.send_stream_writable(self.stream_id) + self.conn_events.send_stream_writable(self.stream_id); } } SendStreamState::DataSent { @@ -1101,7 +1101,7 @@ impl SendStream { let stream_was_blocked = fc.available() == 0; fc.update(limit); if stream_was_blocked && self.avail() > 0 { - self.conn_events.send_stream_writable(self.stream_id) + self.conn_events.send_stream_writable(self.stream_id); } } } @@ -1484,7 +1484,7 @@ impl SendStreams { pub fn reset_acked(&mut self, id: StreamId) { if let Some(ss) = self.map.get_mut(&id) { - ss.reset_acked() + ss.reset_acked(); } } @@ -1526,7 +1526,7 @@ impl SendStreams { match stream.sendorder() { None => regular.remove(*stream_id), Some(sendorder) => { - sendordered.get_mut(&sendorder).unwrap().remove(*stream_id) + sendordered.get_mut(&sendorder).unwrap().remove(*stream_id); } }; } diff --git a/neqo-transport/src/server.rs b/neqo-transport/src/server.rs index 9c617a7a39..859a5e4759 100644 --- a/neqo-transport/src/server.rs +++ b/neqo-transport/src/server.rs @@ -682,7 +682,7 @@ impl ActiveConnectionRef { impl std::hash::Hash for ActiveConnectionRef { fn hash(&self, state: &mut H) { let ptr: *const _ = self.c.as_ref(); - ptr.hash(state) + ptr.hash(state); } } diff --git a/neqo-transport/src/streams.rs b/neqo-transport/src/streams.rs index 735e602feb..507cfbc214 100644 --- a/neqo-transport/src/streams.rs +++ b/neqo-transport/src/streams.rs @@ -269,7 +269,7 @@ impl Streams { StreamRecoveryToken::Stream(st) => self.send.lost(st), StreamRecoveryToken::ResetStream { stream_id } => self.send.reset_lost(*stream_id), StreamRecoveryToken::StreamDataBlocked { stream_id, limit } => { - self.send.blocked_lost(*stream_id, *limit) + self.send.blocked_lost(*stream_id, *limit); } StreamRecoveryToken::MaxStreamData { stream_id, @@ -294,10 +294,10 @@ impl Streams { self.remote_stream_limits[*stream_type].frame_lost(*max_streams); } StreamRecoveryToken::DataBlocked(limit) => { - self.sender_fc.borrow_mut().frame_lost(*limit) + self.sender_fc.borrow_mut().frame_lost(*limit); } StreamRecoveryToken::MaxData(maximum_data) => { - self.receiver_fc.borrow_mut().frame_lost(*maximum_data) + self.receiver_fc.borrow_mut().frame_lost(*maximum_data); } } } diff --git a/neqo-transport/src/tparams.rs b/neqo-transport/src/tparams.rs index 28f20f7bcf..59790c47f9 100644 --- a/neqo-transport/src/tparams.rs +++ b/neqo-transport/src/tparams.rs @@ -584,7 +584,7 @@ impl TransportParametersHandler { pub fn set_version(&mut self, version: Version) { debug_assert_eq!(self.role, Role::Client); self.versions.set_initial(version); - self.local.set_versions(self.role, &self.versions) + self.local.set_versions(self.role, &self.versions); } pub fn remote(&self) -> &TransportParameters { From abf2636153d5c88feee5faf5663facc79defc858 Mon Sep 17 00:00:00 2001 From: Max Inden Date: Thu, 11 Jan 2024 22:59:12 +0100 Subject: [PATCH 054/321] feat(qlog): log version_information on client (#1505) * deps(qlog): update to qlog v0.10.0 This commit upgrades all `neqo-*` crates to use `qlog` `v0.10.0`. See also `qlog` `v0.10.0` release pull request https://github.com/cloudflare/quiche/pull/1647 * feat(qlog): log version_information on client This commit adds support for the qlog [`version_information` QUIC event](https://quicwg.org/qlog/draft-ietf-quic-qlog-quic-events.html#name-version_information) on the client. Depends on https://github.com/mozilla/neqo/pull/1504 Depends on https://github.com/cloudflare/quiche/pull/1684 Meta issue: https://github.com/mozilla/neqo/issues/528 * Use replace github.com/cloudflare with github.com/mxinden With https://github.com/cloudflare/quiche/pull/1684 merged, one can use cloudflare's repo. * Inline ev_data * Remove crates.io patch Patch no longer needed since upgrade to neqo v0.11.0 https://github.com/mozilla/neqo/pull/1547. --- neqo-transport/src/connection/mod.rs | 7 +++++ neqo-transport/src/qlog.rs | 39 +++++++++++++++++++++++++++- 2 files changed, 45 insertions(+), 1 deletion(-) diff --git a/neqo-transport/src/connection/mod.rs b/neqo-transport/src/connection/mod.rs index 3fbcb7f9a8..5ba1620427 100644 --- a/neqo-transport/src/connection/mod.rs +++ b/neqo-transport/src/connection/mod.rs @@ -1183,6 +1183,12 @@ impl Connection { .get_versions_mut() .set_initial(self.conn_params.get_versions().initial()); mem::swap(self, &mut c); + qlog::client_version_information_negotiated( + &mut self.qlog, + self.conn_params.get_versions().all(), + supported, + version, + ); Ok(()) } else { qinfo!([self], "Version negotiation: failed with {:?}", supported); @@ -2296,6 +2302,7 @@ impl Connection { qinfo!([self], "client_start"); debug_assert_eq!(self.role, Role::Client); qlog::client_connection_started(&mut self.qlog, &self.paths.primary()); + qlog::client_version_information_initiated(&mut self.qlog, self.conn_params.get_versions()); self.handshake(now, self.version, PacketNumberSpace::Initial, None)?; self.set_state(State::WaitInitial); diff --git a/neqo-transport/src/qlog.rs b/neqo-transport/src/qlog.rs index 42c01cab28..42a0e6d586 100644 --- a/neqo-transport/src/qlog.rs +++ b/neqo-transport/src/qlog.rs @@ -17,7 +17,7 @@ use qlog::events::{ connectivity::{ConnectionStarted, ConnectionState, ConnectionStateUpdated}, quic::{ AckedRanges, ErrorSpace, MetricsUpdated, PacketDropped, PacketHeader, PacketLost, - PacketReceived, PacketSent, QuicFrame, StreamType, + PacketReceived, PacketSent, QuicFrame, StreamType, VersionInformation, }, Event, EventData, RawInfo, }; @@ -33,6 +33,7 @@ use crate::{ stream_id::StreamType as NeqoStreamType, tparams::{self, TransportParametersHandler}, tracking::SentPacket, + version::{Version, VersionConfig, WireVersion}, }; pub fn connection_tparams_set(qlog: &mut NeqoQlog, tph: &TransportParametersHandler) { @@ -127,6 +128,42 @@ pub fn connection_state_updated(qlog: &mut NeqoQlog, new: &State) { }); } +pub fn client_version_information_initiated(qlog: &mut NeqoQlog, version_config: &VersionConfig) { + qlog.add_event_data(|| { + Some(EventData::VersionInformation(VersionInformation { + client_versions: Some( + version_config + .all() + .iter() + .map(|v| format!("{:02x}", v.wire_version())) + .collect(), + ), + server_versions: None, + chosen_version: Some(format!("{:02x}", version_config.initial().wire_version())), + })) + }); +} + +pub fn client_version_information_negotiated( + qlog: &mut NeqoQlog, + client: &[Version], + server: &[WireVersion], + chosen: Version, +) { + qlog.add_event_data(|| { + Some(EventData::VersionInformation(VersionInformation { + client_versions: Some( + client + .iter() + .map(|v| format!("{:02x}", v.wire_version())) + .collect(), + ), + server_versions: Some(server.iter().map(|v| format!("{v:02x}")).collect()), + chosen_version: Some(format!("{:02x}", chosen.wire_version())), + })) + }); +} + pub fn packet_sent( qlog: &mut NeqoQlog, pt: PacketType, From faa0f247a5a206ad9e921ee47da8e27a947a0e16 Mon Sep 17 00:00:00 2001 From: Marten Seemann Date: Mon, 15 Jan 2024 07:17:03 +0700 Subject: [PATCH 055/321] README: fix install instructions on macOS (#1551) There's no `libssl3.so`, so the existing command sets the environment variable to an empty string. --- README.md | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 1e44f8534a..31d6ab9e94 100644 --- a/README.md +++ b/README.md @@ -9,10 +9,14 @@ To run test HTTP/3 programs (neqo-client and neqo-server): * `./target/debug/neqo-client http://127.0.0.1:12345/` If a "Failure to load dynamic library" error happens at runtime, do -``` +```shell export LD_LIBRARY_PATH="$(dirname "$(find . -name libssl3.so -print | head -1)")" ``` -On a mac, use `DYLD_LIBRARY_PATH` instead. + +On a macOS, do +```shell +export DYLD_LIBRARY_PATH="$(dirname "$(find . -name libssl3.dylib -print | head -1)")" +``` ## Faster Builds with Separate NSS/NSPR From 6fb7a6cf1ffcc1af67f72313b59c05e1370b99d3 Mon Sep 17 00:00:00 2001 From: Martin Thomson Date: Mon, 15 Jan 2024 18:18:16 +1100 Subject: [PATCH 056/321] Supply the token more directly (#1553) --- .github/workflows/check.yml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/.github/workflows/check.yml b/.github/workflows/check.yml index bc4a5dc9c8..140d29bccf 100644 --- a/.github/workflows/check.yml +++ b/.github/workflows/check.yml @@ -126,5 +126,4 @@ jobs: with: file: lcov.info fail_ci_if_error: false - env: - CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }} + token: ${{ secrets.CODECOV_TOKEN }} From c1529485bb90683c8e3ae27b9877d0b2a4d11d62 Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Mon, 15 Jan 2024 15:13:49 +0200 Subject: [PATCH 057/321] chore: Enable dependabot updates for docker images (#1555) --- .github/dependabot.yml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.github/dependabot.yml b/.github/dependabot.yml index 6d7f78e7b3..dbbb80c154 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -11,3 +11,7 @@ updates: directory: "/" schedule: interval: "weekly" + - package-ecosystem: "docker" + directory: "/qns" + schedule: + interval: "weekly" From 9e6cd932b140fde141e6414d53f539b6d296d4f9 Mon Sep 17 00:00:00 2001 From: Kershaw Date: Tue, 16 Jan 2024 10:46:54 +0100 Subject: [PATCH 058/321] Make pacing configurable (#1550) * make pacing configurable * format * address comment --- neqo-client/src/main.rs | 7 +++- neqo-transport/src/connection/mod.rs | 12 +++++-- neqo-transport/src/connection/params.rs | 11 ++++++ neqo-transport/src/pace.rs | 45 +++++++++++++++++++------ neqo-transport/src/path.rs | 8 +++-- neqo-transport/src/recovery.rs | 2 +- neqo-transport/src/sender.rs | 9 +++-- 7 files changed, 74 insertions(+), 20 deletions(-) diff --git a/neqo-client/src/main.rs b/neqo-client/src/main.rs index 143d0cfcd0..de7da48a27 100644 --- a/neqo-client/src/main.rs +++ b/neqo-client/src/main.rs @@ -309,6 +309,10 @@ struct QuicParameters { #[structopt(long = "cc", default_value = "newreno")] /// The congestion controller to use. congestion_control: CongestionControlAlgorithm, + + #[structopt(long = "pacing")] + /// Whether pacing is enabled. + pacing: bool, } impl QuicParameters { @@ -317,7 +321,8 @@ impl QuicParameters { .max_streams(StreamType::BiDi, self.max_streams_bidi) .max_streams(StreamType::UniDi, self.max_streams_uni) .idle_timeout(Duration::from_secs(self.idle_timeout)) - .cc_algorithm(self.congestion_control); + .cc_algorithm(self.congestion_control) + .pacing(self.pacing); if let Some(&first) = self.quic_version.first() { let all = if self.quic_version[1..].contains(&first) { diff --git a/neqo-transport/src/connection/mod.rs b/neqo-transport/src/connection/mod.rs index 5ba1620427..3d7bc0a88c 100644 --- a/neqo-transport/src/connection/mod.rs +++ b/neqo-transport/src/connection/mod.rs @@ -328,6 +328,7 @@ impl Connection { local_addr, remote_addr, c.conn_params.get_cc_algorithm(), + c.conn_params.pacing_enabled(), NeqoQlog::default(), now, ); @@ -1388,6 +1389,7 @@ impl Connection { d.destination(), d.source(), self.conn_params.get_cc_algorithm(), + self.conn_params.pacing_enabled(), now, ); path.borrow_mut().add_received(d.len()); @@ -1693,9 +1695,13 @@ impl Connection { return Err(Error::InvalidMigration); } - let path = self - .paths - .find_path(local, remote, self.conn_params.get_cc_algorithm(), now); + let path = self.paths.find_path( + local, + remote, + self.conn_params.get_cc_algorithm(), + self.conn_params.pacing_enabled(), + now, + ); self.ensure_permanent(&path)?; qinfo!( [self], diff --git a/neqo-transport/src/connection/params.rs b/neqo-transport/src/connection/params.rs index 9d0db0f45d..3d8dff67a6 100644 --- a/neqo-transport/src/connection/params.rs +++ b/neqo-transport/src/connection/params.rs @@ -75,6 +75,7 @@ pub struct ConnectionParameters { fast_pto: u8, fuzzing: bool, grease: bool, + pacing: bool, } impl Default for ConnectionParameters { @@ -97,6 +98,7 @@ impl Default for ConnectionParameters { fast_pto: FAST_PTO_SCALE, fuzzing: false, grease: true, + pacing: true, } } } @@ -304,6 +306,15 @@ impl ConnectionParameters { self } + pub fn pacing_enabled(&self) -> bool { + self.pacing + } + + pub fn pacing(mut self, pacing: bool) -> Self { + self.pacing = pacing; + self + } + pub fn create_transport_parameter( &self, role: Role, diff --git a/neqo-transport/src/pace.rs b/neqo-transport/src/pace.rs index 84a60bcd3e..6b86575eb5 100644 --- a/neqo-transport/src/pace.rs +++ b/neqo-transport/src/pace.rs @@ -26,6 +26,8 @@ const PACER_SPEEDUP: usize = 2; /// A pacer that uses a leaky bucket. pub struct Pacer { + /// Whether pacing is enabled. + enabled: bool, /// The last update time. t: Instant, /// The maximum capacity, or burst size, in bytes. @@ -47,9 +49,15 @@ impl Pacer { /// The value of `p` is the packet size in bytes, which determines the minimum /// credit needed before a packet is sent. This should be a substantial /// fraction of the maximum packet size, if not the packet size. - pub fn new(now: Instant, m: usize, p: usize) -> Self { + pub fn new(enabled: bool, now: Instant, m: usize, p: usize) -> Self { assert!(m >= p, "maximum capacity has to be at least one packet"); - Self { t: now, m, c: m, p } + Self { + enabled, + t: now, + m, + c: m, + p, + } } /// Determine when the next packet will be available based on the provided RTT @@ -78,6 +86,11 @@ impl Pacer { /// an estimate of the round trip time (`rtt`), the estimated congestion /// window (`cwnd`), and the number of bytes that were sent (`count`). pub fn spend(&mut self, now: Instant, rtt: Duration, cwnd: usize, count: usize) { + if !self.enabled { + self.t = now; + return; + } + qtrace!([self], "spend {} over {}, {:?}", count, cwnd, rtt); // Increase the capacity by: // `(now - self.t) * PACER_SPEEDUP * cwnd / rtt` @@ -108,9 +121,10 @@ impl Debug for Pacer { } } -#[cfg(tests)] +#[cfg(test)] mod tests { use super::Pacer; + use std::time::Duration; use test_fixture::now; const RTT: Duration = Duration::from_millis(1000); @@ -119,20 +133,29 @@ mod tests { #[test] fn even() { - let mut n = now(); - let p = Pacer::new(n, PACKET, PACKET); - assert_eq!(p.next(RTT, CWND), None); + let n = now(); + let mut p = Pacer::new(true, n, PACKET, PACKET); + assert_eq!(p.next(RTT, CWND), n); p.spend(n, RTT, CWND, PACKET); - assert_eq!(p.next(RTT, CWND), Some(n + (RTT / 10))); + assert_eq!(p.next(RTT, CWND), n + (RTT / 20)); } #[test] fn backwards_in_time() { - let mut n = now(); - let p = Pacer::new(n + RTT, PACKET, PACKET); - assert_eq!(p.next(RTT, CWND), None); + let n = now(); + let mut p = Pacer::new(true, n + RTT, PACKET, PACKET); + assert_eq!(p.next(RTT, CWND), n + RTT); // Now spend some credit in the past using a time machine. p.spend(n, RTT, CWND, PACKET); - assert_eq!(p.next(RTT, CWND), Some(n + (RTT / 10))); + assert_eq!(p.next(RTT, CWND), n + (RTT / 20)); + } + + #[test] + fn pacing_disabled() { + let n = now(); + let mut p = Pacer::new(false, n, PACKET, PACKET); + assert_eq!(p.next(RTT, CWND), n); + p.spend(n, RTT, CWND, PACKET); + assert_eq!(p.next(RTT, CWND), n); } } diff --git a/neqo-transport/src/path.rs b/neqo-transport/src/path.rs index 9be3d4c966..54849eee56 100644 --- a/neqo-transport/src/path.rs +++ b/neqo-transport/src/path.rs @@ -82,6 +82,7 @@ impl Paths { local: SocketAddr, remote: SocketAddr, cc: CongestionControlAlgorithm, + pacing: bool, now: Instant, ) -> PathRef { self.paths @@ -94,7 +95,7 @@ impl Paths { } }) .unwrap_or_else(|| { - let mut p = Path::temporary(local, remote, cc, self.qlog.clone(), now); + let mut p = Path::temporary(local, remote, cc, pacing, self.qlog.clone(), now); if let Some(primary) = self.primary.as_ref() { p.prime_rtt(primary.borrow().rtt()); } @@ -111,6 +112,7 @@ impl Paths { local: SocketAddr, remote: SocketAddr, cc: CongestionControlAlgorithm, + pacing: bool, now: Instant, ) -> PathRef { self.paths @@ -136,6 +138,7 @@ impl Paths { local, remote, cc, + pacing, self.qlog.clone(), now, ))) @@ -553,10 +556,11 @@ impl Path { local: SocketAddr, remote: SocketAddr, cc: CongestionControlAlgorithm, + pacing: bool, qlog: NeqoQlog, now: Instant, ) -> Self { - let mut sender = PacketSender::new(cc, Self::mtu_by_addr(remote.ip()), now); + let mut sender = PacketSender::new(cc, pacing, Self::mtu_by_addr(remote.ip()), now); sender.set_qlog(qlog.clone()); Self { local, diff --git a/neqo-transport/src/recovery.rs b/neqo-transport/src/recovery.rs index c625f18fdf..23c296949d 100644 --- a/neqo-transport/src/recovery.rs +++ b/neqo-transport/src/recovery.rs @@ -1083,7 +1083,7 @@ mod tests { impl Default for Fixture { fn default() -> Self { const CC: CongestionControlAlgorithm = CongestionControlAlgorithm::NewReno; - let mut path = Path::temporary(addr(), addr(), CC, NeqoQlog::default(), now()); + let mut path = Path::temporary(addr(), addr(), CC, true, NeqoQlog::default(), now()); path.make_permanent( None, ConnectionIdEntry::new(0, ConnectionId::from(&[1, 2, 3]), [0; 16]), diff --git a/neqo-transport/src/sender.rs b/neqo-transport/src/sender.rs index 05cf9740bb..3d8302369c 100644 --- a/neqo-transport/src/sender.rs +++ b/neqo-transport/src/sender.rs @@ -35,7 +35,12 @@ impl Display for PacketSender { impl PacketSender { #[must_use] - pub fn new(alg: CongestionControlAlgorithm, mtu: usize, now: Instant) -> Self { + pub fn new( + alg: CongestionControlAlgorithm, + pacing_enabled: bool, + mtu: usize, + now: Instant, + ) -> Self { Self { cc: match alg { CongestionControlAlgorithm::NewReno => { @@ -45,7 +50,7 @@ impl PacketSender { Box::new(ClassicCongestionControl::new(Cubic::default())) } }, - pacer: Pacer::new(now, mtu * PACING_BURST_SIZE, mtu), + pacer: Pacer::new(pacing_enabled, now, mtu * PACING_BURST_SIZE, mtu), } } From eab772140f44c248701d1770083d977806ec46f3 Mon Sep 17 00:00:00 2001 From: Max Inden Date: Tue, 16 Jan 2024 22:28:34 +0100 Subject: [PATCH 059/321] refactor: address clippy::unreadable-literal (#1559) --- neqo-transport/src/tparams.rs | 2 +- neqo-transport/src/version.rs | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/neqo-transport/src/tparams.rs b/neqo-transport/src/tparams.rs index 59790c47f9..ea5f78fc36 100644 --- a/neqo-transport/src/tparams.rs +++ b/neqo-transport/src/tparams.rs @@ -440,7 +440,7 @@ impl TransportParameters { let rbuf = random(4); let mut other = Vec::with_capacity(versions.all().len() + 1); let mut dec = Decoder::new(&rbuf); - let grease = (dec.decode_uint(4).unwrap() as u32) & 0xf0f0_f0f0 | 0x0a0a0a0a; + let grease = (dec.decode_uint(4).unwrap() as u32) & 0xf0f0_f0f0 | 0x0a0a_0a0a; other.push(grease); for &v in versions.all() { if role == Role::Client && !versions.initial().is_compatible(v) { diff --git a/neqo-transport/src/version.rs b/neqo-transport/src/version.rs index b628ba2769..4cb9b964ce 100644 --- a/neqo-transport/src/version.rs +++ b/neqo-transport/src/version.rs @@ -23,7 +23,7 @@ pub enum Version { impl Version { pub const fn wire_version(self) -> WireVersion { match self { - Self::Version2 => 0x6b3343cf, + Self::Version2 => 0x6b33_43cf, Self::Version1 => 1, Self::Draft29 => 0xff00_0000 + 29, Self::Draft30 => 0xff00_0000 + 30, @@ -131,7 +131,7 @@ impl TryFrom for Version { fn try_from(wire: WireVersion) -> Res { if wire == 1 { Ok(Self::Version1) - } else if wire == 0x6b3343cf { + } else if wire == 0x6b33_43cf { Ok(Self::Version2) } else if wire == 0xff00_0000 + 29 { Ok(Self::Draft29) From 0d6f3eaa1194d1ed1e87f45b159a88bdf1f5e830 Mon Sep 17 00:00:00 2001 From: Max Inden Date: Tue, 16 Jan 2024 22:55:37 +0100 Subject: [PATCH 060/321] feat(qlog): log version_information on server (#1531) * feat(qlog): log version_information on server This commit adds support for the qlog [`version_information` QUIC event](https://quicwg.org/qlog/draft-ietf-quic-qlog-quic-events.html#name-version_information) on the server. Depends on https://github.com/cloudflare/quiche/pull/1684 Meta issue: https://github.com/mozilla/neqo/issues/528 * Use WireVersion for client version * Have create_qlog_trace take &ConnectionIdRef --- neqo-transport/src/qlog.rs | 19 ++++++++++++++ neqo-transport/src/server.rs | 51 ++++++++++++++++++++++++------------ 2 files changed, 53 insertions(+), 17 deletions(-) diff --git a/neqo-transport/src/qlog.rs b/neqo-transport/src/qlog.rs index 42a0e6d586..f5ca21ca40 100644 --- a/neqo-transport/src/qlog.rs +++ b/neqo-transport/src/qlog.rs @@ -164,6 +164,25 @@ pub fn client_version_information_negotiated( }); } +pub fn server_version_information_failed( + qlog: &mut NeqoQlog, + server: &[Version], + client: WireVersion, +) { + qlog.add_event_data(|| { + Some(EventData::VersionInformation(VersionInformation { + client_versions: Some(vec![format!("{client:02x}")]), + server_versions: Some( + server + .iter() + .map(|v| format!("{:02x}", v.wire_version())) + .collect(), + ), + chosen_version: None, + })) + }); +} + pub fn packet_sent( qlog: &mut NeqoQlog, pt: PacketType, diff --git a/neqo-transport/src/server.rs b/neqo-transport/src/server.rs index 859a5e4759..277f10e876 100644 --- a/neqo-transport/src/server.rs +++ b/neqo-transport/src/server.rs @@ -383,11 +383,11 @@ impl Server { } } - fn create_qlog_trace(&self, attempt_key: &AttemptKey) -> NeqoQlog { + fn create_qlog_trace(&self, odcid: &ConnectionIdRef<'_>) -> NeqoQlog { if let Some(qlog_dir) = &self.qlog_dir { let mut qlog_path = qlog_dir.to_path_buf(); - qlog_path.push(format!("{}.qlog", attempt_key.odcid)); + qlog_path.push(format!("{}.qlog", odcid)); // The original DCID is chosen by the client. Using create_new() // prevents attackers from overwriting existing logs. @@ -449,7 +449,7 @@ impl Server { c.set_retry_cids(odcid, initial.src_cid, initial.dst_cid); } c.set_validation(Rc::clone(&self.address_validation)); - c.set_qlog(self.create_qlog_trace(attempt_key)); + c.set_qlog(self.create_qlog_trace(&attempt_key.odcid.as_cid_ref())); if let Some(cfg) = &self.ech_config { if c.server_enable_ech(cfg.config, &cfg.public_name, &cfg.sk, &cfg.pk) .is_err() @@ -487,20 +487,30 @@ impl Server { params, ); - if let Ok(mut c) = sconn { - self.setup_connection(&mut c, &attempt_key, initial, orig_dcid); - let c = Rc::new(RefCell::new(ServerConnectionState { - c, - last_timer: now, - active_attempt: Some(attempt_key.clone()), - })); - cid_mgr.borrow_mut().set_connection(Rc::clone(&c)); - let previous_attempt = self.active_attempts.insert(attempt_key, Rc::clone(&c)); - debug_assert!(previous_attempt.is_none()); - self.process_connection(c, Some(dgram), now) - } else { - qwarn!([self], "Unable to create connection"); - None + match sconn { + Ok(mut c) => { + self.setup_connection(&mut c, &attempt_key, initial, orig_dcid); + let c = Rc::new(RefCell::new(ServerConnectionState { + c, + last_timer: now, + active_attempt: Some(attempt_key.clone()), + })); + cid_mgr.borrow_mut().set_connection(Rc::clone(&c)); + let previous_attempt = self.active_attempts.insert(attempt_key, Rc::clone(&c)); + debug_assert!(previous_attempt.is_none()); + self.process_connection(c, Some(dgram), now) + } + Err(e) => { + qwarn!([self], "Unable to create connection"); + if e == crate::Error::VersionNegotiation { + crate::qlog::server_version_information_failed( + &mut self.create_qlog_trace(&attempt_key.odcid.as_cid_ref()), + self.conn_params.get_versions().all(), + initial.version.wire_version(), + ) + } + None + } } } @@ -573,6 +583,13 @@ impl Server { packet.wire_version(), self.conn_params.get_versions().all(), ); + + crate::qlog::server_version_information_failed( + &mut self.create_qlog_trace(packet.dcid()), + self.conn_params.get_versions().all(), + packet.wire_version(), + ); + return Some(Datagram::new(dgram.destination(), dgram.source(), vn)); } From ba59d54d2aafd3bf944ef0eb4f9abcd0812fec72 Mon Sep 17 00:00:00 2001 From: Max Inden Date: Tue, 16 Jan 2024 23:58:35 +0100 Subject: [PATCH 061/321] ci: automate quic-network-simulator docker image build & push (#1554) * ci: automate quic-network-simulator docker image build & push * Merge workflow files and extract version from Cargo.toml * Disable needs for debugging * Fix Dockerfile syntax * Don't push on pull request * Only run after check * Move to separate file qns.yml * Run daily and on related pull requests * Always run on stable Rust * Don't ignore build errors * Depend on build-push-action git context * Update rustup-init * Typo * Reduce diff * Newline * Fix typo in path * Fix name --------- Co-authored-by: Martin Thomson --- .dockerignore | 3 +++ .github/workflows/qns.yml | 48 +++++++++++++++++++++++++++++++++++++++ qns/Dockerfile | 30 ++++++++---------------- qns/update.sh | 38 ------------------------------- 4 files changed, 60 insertions(+), 59 deletions(-) create mode 100644 .dockerignore create mode 100644 .github/workflows/qns.yml delete mode 100755 qns/update.sh diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 0000000000..8012c0d3ae --- /dev/null +++ b/.dockerignore @@ -0,0 +1,3 @@ +nss +nspr +target diff --git a/.github/workflows/qns.yml b/.github/workflows/qns.yml new file mode 100644 index 0000000000..1d83d31547 --- /dev/null +++ b/.github/workflows/qns.yml @@ -0,0 +1,48 @@ +name: QUIC Network Simulator + +on: + schedule: + - cron: '42 3 * * *' # Runs at 03:42 UTC (m and h chosen arbitrarily) every day. + pull_request: + branch: ["main"] + paths: + - 'qns/**' + - '.github/workflows/qns.yml' +jobs: + docker-image: + runs-on: ubuntu-latest + steps: + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + + - name: Login to GitHub Container Registry + uses: docker/login-action@v3 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ github.token }} + + - name: Docker meta + id: meta + uses: docker/metadata-action@v5 + with: + images: ghcr.io/${{ github.repository }}-qns + tags: | + # default + type=schedule + type=ref,event=branch + type=ref,event=tag + type=ref,event=pr + # set latest tag for default branch + type=raw,value=latest,enable={{is_default_branch}} + + - name: Build and push + uses: docker/build-push-action@v5 + with: + push: ${{ github.event_name != 'pull_request' }} + tags: ${{ steps.meta.outputs.tags }} + file: qns/Dockerfile + build-args: | + RUST_VERSION=stable + cache-from: type=gha + cache-to: type=gha,mode=max diff --git a/qns/Dockerfile b/qns/Dockerfile index dd18af0e25..d8da71b6b5 100644 --- a/qns/Dockerfile +++ b/qns/Dockerfile @@ -10,14 +10,15 @@ RUN apt-get update && apt-get install -y --no-install-recommends \ && apt-get autoremove -y && apt-get clean -y \ && rm -rf /var/lib/apt/lists/* +ARG RUST_VERSION + ENV RUSTUP_HOME=/usr/local/rustup \ CARGO_HOME=/usr/local/cargo \ - PATH=/usr/local/cargo/bin:$PATH \ - RUST_VERSION=1.45.2 + PATH=/usr/local/cargo/bin:$PATH RUN set -eux; \ - curl -sSLf "https://static.rust-lang.org/rustup/archive/1.22.1/x86_64-unknown-linux-gnu/rustup-init" -o rustup-init; \ - echo '49c96f3f74be82f4752b8bffcf81961dea5e6e94ce1ccba94435f12e871c3bdb *rustup-init' | sha256sum -c -; \ + curl -sSLf "https://static.rust-lang.org/rustup/archive/1.26.0/x86_64-unknown-linux-gnu/rustup-init" -o rustup-init; \ + echo '0b2f6c8f85a3d02fde2efc0ced4657869d73fccfce59defb4e8d29233116e6db *rustup-init' | sha256sum -c -; \ chmod +x rustup-init; \ ./rustup-init -y -q --no-modify-path --profile minimal --default-toolchain "$RUST_VERSION"; \ rm -f rustup-init; \ @@ -33,35 +34,22 @@ RUN set -eux; \ RUN "$NSS_DIR"/build.sh --static -Ddisable_tests=1 -o -# Copy the .git directory from the local clone so that it is possible to create -# an image that includes local updates. -RUN mkdir -p /neqo-reference -ADD . /neqo-reference -RUN if [ -d /neqo-reference/.git ]; then \ - source=/neqo-reference; \ - else \ - source=https://github.com/mozilla/neqo; \ - fi; \ - git clone --depth 1 --branch "$NEQO_BRANCH" "$source" /neqo; \ - rm -rf /neqo-reference +ADD . /neqo RUN set -eux; \ cd /neqo; \ RUSTFLAGS="-g -C link-arg=-fuse-ld=lld" cargo build --release \ - --bin neqo-client --bin neqo-server; \ - cp target/release/neqo-client target; \ - cp target/release/neqo-server target; \ - rm -rf target/release + --bin neqo-client --bin neqo-server # Copy only binaries to the final image to keep it small. FROM martenseemann/quic-network-simulator-endpoint:latest ENV LD_LIBRARY_PATH=/neqo/lib -COPY --from=buildimage /neqo/target/neqo-client /neqo/target/neqo-server /neqo/bin/ +COPY --from=buildimage /neqo/target/release/neqo-client /neqo/target/release/neqo-server /neqo/bin/ COPY --from=buildimage /dist/Release/lib/*.so /neqo/lib/ COPY --from=buildimage /dist/Release/bin/certutil /dist/Release/bin/pk12util /neqo/bin/ -COPY interop.sh /neqo/ +COPY qns/interop.sh /neqo/ RUN chmod +x /neqo/interop.sh ENTRYPOINT [ "/neqo/interop.sh" ] diff --git a/qns/update.sh b/qns/update.sh deleted file mode 100755 index 2243ee23a0..0000000000 --- a/qns/update.sh +++ /dev/null @@ -1,38 +0,0 @@ -#!/bin/bash -set -e - -if [[ "$1" == "-p" ]]; then - shift - push=1 -else - push=0 -fi - -branch="${1:-$(git rev-parse --abbrev-ref HEAD)}" -if [[ "$branch" == "main" ]]; then - tag="neqoquic/neqo-qns:latest" -else - tag="neqoquic/neqo-qns:${branch}" -fi - -cd "$(dirname "$0")" - -rev=$(git log -n 1 --format='format:%H') -if [[ "$rev" == "$(cat ".last-update-$branch")" ]]; then - echo "No change since $rev." - exit 0 -fi - -# This makes the local .git directory the source, allowing for the current -# build to be build and pushed. -[[ ! -e .git ]] || ! echo "Found .git directory. Script still active. Exiting." -trap 'rm -rf .git' EXIT -cp -R ../.git .git - -docker build -t "$tag" --build-arg NEQO_BRANCH="$branch" . -if [[ "$push" == "1" ]]; then - docker login - docker push "$tag" -fi - -echo "$rev" > ".last-update-$branch" From 2fcf7a77719e5510052f0d6ef13052c1e7a35091 Mon Sep 17 00:00:00 2001 From: Martin Thomson Date: Wed, 17 Jan 2024 21:15:44 +1100 Subject: [PATCH 062/321] Add workflow_dispatch for QNS, reduce rate (#1562) * Add workflow_dispatch for QNS, reduce rate We don't need this to run so often. * Remove NEQO_BRANCH arg and add a default for RUST_VERSION --- .github/workflows/qns.yml | 3 ++- qns/.dockerignore | 1 + qns/Dockerfile | 5 +---- 3 files changed, 4 insertions(+), 5 deletions(-) diff --git a/.github/workflows/qns.yml b/.github/workflows/qns.yml index 1d83d31547..353d0ae696 100644 --- a/.github/workflows/qns.yml +++ b/.github/workflows/qns.yml @@ -2,7 +2,8 @@ name: QUIC Network Simulator on: schedule: - - cron: '42 3 * * *' # Runs at 03:42 UTC (m and h chosen arbitrarily) every day. + - cron: '42 3 * * 2,5' # Runs at 03:42 UTC (m and h chosen arbitrarily) twice a week. + workflow_dispatch: pull_request: branch: ["main"] paths: diff --git a/qns/.dockerignore b/qns/.dockerignore index acdb180198..2f10ed7b44 100644 --- a/qns/.dockerignore +++ b/qns/.dockerignore @@ -1 +1,2 @@ .last-update-* +/target/ diff --git a/qns/Dockerfile b/qns/Dockerfile index d8da71b6b5..051cf5b8a5 100644 --- a/qns/Dockerfile +++ b/qns/Dockerfile @@ -1,8 +1,5 @@ FROM martenseemann/quic-network-simulator-endpoint:latest AS buildimage -# Which branch to build from. -ARG NEQO_BRANCH=main - RUN apt-get update && apt-get install -y --no-install-recommends \ ca-certificates coreutils curl git make mercurial ssh \ build-essential clang llvm libclang-dev lld \ @@ -10,7 +7,7 @@ RUN apt-get update && apt-get install -y --no-install-recommends \ && apt-get autoremove -y && apt-get clean -y \ && rm -rf /var/lib/apt/lists/* -ARG RUST_VERSION +ARG RUST_VERSION=stable ENV RUSTUP_HOME=/usr/local/rustup \ CARGO_HOME=/usr/local/cargo \ From 9d58e647b82b7ac938d14815c2b4ddd654183dac Mon Sep 17 00:00:00 2001 From: Martin Thomson Date: Wed, 17 Jan 2024 21:35:15 +1100 Subject: [PATCH 063/321] Make ConnectionIdRef Copy (#1561) This is just a reference, so Copy is cheap. This reduces the number of unnecessary references that are taken. --- neqo-transport/src/cid.rs | 37 ++++++++------- neqo-transport/src/connection/mod.rs | 6 +-- .../src/connection/tests/migration.rs | 12 ++--- neqo-transport/src/packet/mod.rs | 7 ++- neqo-transport/src/path.rs | 44 +++++++++--------- neqo-transport/src/server.rs | 46 ++++++++++--------- 6 files changed, 78 insertions(+), 74 deletions(-) diff --git a/neqo-transport/src/cid.rs b/neqo-transport/src/cid.rs index 38157419de..70e327a296 100644 --- a/neqo-transport/src/cid.rs +++ b/neqo-transport/src/cid.rs @@ -6,24 +6,23 @@ // Representation and management of connection IDs. -use crate::frame::FRAME_TYPE_NEW_CONNECTION_ID; -use crate::packet::PacketBuilder; -use crate::recovery::RecoveryToken; -use crate::stats::FrameStats; -use crate::{Error, Res}; +use crate::{ + frame::FRAME_TYPE_NEW_CONNECTION_ID, packet::PacketBuilder, recovery::RecoveryToken, + stats::FrameStats, Error, Res, +}; use neqo_common::{hex, hex_with_len, qinfo, Decoder, Encoder}; use neqo_crypto::random; use smallvec::SmallVec; -use std::borrow::Borrow; -use std::cell::{Ref, RefCell}; -use std::cmp::max; -use std::cmp::min; -use std::convert::AsRef; -use std::convert::TryFrom; -use std::ops::Deref; -use std::rc::Rc; +use std::{ + borrow::Borrow, + cell::{Ref, RefCell}, + cmp::{max, min}, + convert::{AsRef, TryFrom}, + ops::Deref, + rc::Rc, +}; pub const MAX_CONNECTION_ID_LEN: usize = 20; pub const LOCAL_ACTIVE_CID_LIMIT: usize = 8; @@ -88,8 +87,8 @@ impl + ?Sized> From<&T> for ConnectionId { } } -impl<'a> From<&ConnectionIdRef<'a>> for ConnectionId { - fn from(cidref: &ConnectionIdRef<'a>) -> Self { +impl<'a> From> for ConnectionId { + fn from(cidref: ConnectionIdRef<'a>) -> Self { Self::from(SmallVec::from(cidref.cid)) } } @@ -120,7 +119,7 @@ impl<'a> PartialEq> for ConnectionId { } } -#[derive(Hash, Eq, PartialEq)] +#[derive(Hash, Eq, PartialEq, Clone, Copy)] pub struct ConnectionIdRef<'a> { cid: &'a [u8], } @@ -340,8 +339,8 @@ impl ConnectionIdStore { self.cids.retain(|c| c.seqno != seqno); } - pub fn contains(&self, cid: &ConnectionIdRef) -> bool { - self.cids.iter().any(|c| &c.cid == cid) + pub fn contains(&self, cid: ConnectionIdRef) -> bool { + self.cids.iter().any(|c| c.cid == cid) } pub fn next(&mut self) -> Option> { @@ -479,7 +478,7 @@ impl ConnectionIdManager { } } - pub fn is_valid(&self, cid: &ConnectionIdRef) -> bool { + pub fn is_valid(&self, cid: ConnectionIdRef) -> bool { self.connection_ids.contains(cid) } diff --git a/neqo-transport/src/connection/mod.rs b/neqo-transport/src/connection/mod.rs index 3d7bc0a88c..270b96f08f 100644 --- a/neqo-transport/src/connection/mod.rs +++ b/neqo-transport/src/connection/mod.rs @@ -1210,7 +1210,7 @@ impl Connection { dcid: Option<&ConnectionId>, now: Instant, ) -> Res { - if dcid.map_or(false, |d| d != packet.dcid()) { + if dcid.map_or(false, |d| d != &packet.dcid()) { self.stats .borrow_mut() .pkt_dropped("Coalesced packet has different DCID"); @@ -1266,7 +1266,7 @@ impl Connection { if versions.is_empty() || versions.contains(&self.version().wire_version()) || versions.contains(&0) - || packet.scid() != self.odcid().unwrap() + || &packet.scid() != self.odcid().unwrap() || matches!( self.address_validation, AddressValidationInfo::Retry { .. } @@ -1373,7 +1373,7 @@ impl Connection { self.handle_migration(path, d, migrate, now); } else if self.role != Role::Client && (packet.packet_type() == PacketType::Handshake - || (packet.dcid().len() >= 8 && packet.dcid() == &self.local_initial_source_cid)) + || (packet.dcid().len() >= 8 && packet.dcid() == self.local_initial_source_cid)) { // We only allow one path during setup, so apply handshake // path validation to this path. diff --git a/neqo-transport/src/connection/tests/migration.rs b/neqo-transport/src/connection/tests/migration.rs index 7dd5e50d13..056aa0247f 100644 --- a/neqo-transport/src/connection/tests/migration.rs +++ b/neqo-transport/src/connection/tests/migration.rs @@ -374,7 +374,7 @@ fn migration(mut client: Connection) { let probe = client.process_output(now).dgram().unwrap(); assert_v4_path(&probe, true); // Contains PATH_CHALLENGE. assert_eq!(client.stats().frame_tx.path_challenge, 1); - let probe_cid = ConnectionId::from(&get_cid(&probe)); + let probe_cid = ConnectionId::from(get_cid(&probe)); let resp = server.process(Some(&probe), now).dgram().unwrap(); assert_v4_path(&resp, true); @@ -814,7 +814,7 @@ fn retire_all() { .unwrap(); connect_force_idle(&mut client, &mut server); - let original_cid = ConnectionId::from(&get_cid(&send_something(&mut client, now()))); + let original_cid = ConnectionId::from(get_cid(&send_something(&mut client, now()))); server.test_frame_writer = Some(Box::new(RetireAll { cid_gen })); let ncid = send_something(&mut server, now()); @@ -852,7 +852,7 @@ fn retire_prior_to_migration_failure() { .unwrap(); connect_force_idle(&mut client, &mut server); - let original_cid = ConnectionId::from(&get_cid(&send_something(&mut client, now()))); + let original_cid = ConnectionId::from(get_cid(&send_something(&mut client, now()))); client .migrate(Some(addr_v4()), Some(addr_v4()), false, now()) @@ -862,7 +862,7 @@ fn retire_prior_to_migration_failure() { let probe = client.process_output(now()).dgram().unwrap(); assert_v4_path(&probe, true); assert_eq!(client.stats().frame_tx.path_challenge, 1); - let probe_cid = ConnectionId::from(&get_cid(&probe)); + let probe_cid = ConnectionId::from(get_cid(&probe)); assert_ne!(original_cid, probe_cid); // Have the server receive the probe, but separately have it decide to @@ -907,7 +907,7 @@ fn retire_prior_to_migration_success() { .unwrap(); connect_force_idle(&mut client, &mut server); - let original_cid = ConnectionId::from(&get_cid(&send_something(&mut client, now()))); + let original_cid = ConnectionId::from(get_cid(&send_something(&mut client, now()))); client .migrate(Some(addr_v4()), Some(addr_v4()), false, now()) @@ -917,7 +917,7 @@ fn retire_prior_to_migration_success() { let probe = client.process_output(now()).dgram().unwrap(); assert_v4_path(&probe, true); assert_eq!(client.stats().frame_tx.path_challenge, 1); - let probe_cid = ConnectionId::from(&get_cid(&probe)); + let probe_cid = ConnectionId::from(get_cid(&probe)); assert_ne!(original_cid, probe_cid); // Have the server receive the probe, but separately have it decide to diff --git a/neqo-transport/src/packet/mod.rs b/neqo-transport/src/packet/mod.rs index b8a2d96790..acd3b5b2be 100644 --- a/neqo-transport/src/packet/mod.rs +++ b/neqo-transport/src/packet/mod.rs @@ -673,13 +673,12 @@ impl<'a> PublicPacket<'a> { self.packet_type } - pub fn dcid(&self) -> &ConnectionIdRef<'a> { - &self.dcid + pub fn dcid(&self) -> ConnectionIdRef<'a> { + self.dcid } - pub fn scid(&self) -> &ConnectionIdRef<'a> { + pub fn scid(&self) -> ConnectionIdRef<'a> { self.scid - .as_ref() .expect("should only be called for long header packets") } diff --git a/neqo-transport/src/path.rs b/neqo-transport/src/path.rs index 54849eee56..40014c73a1 100644 --- a/neqo-transport/src/path.rs +++ b/neqo-transport/src/path.rs @@ -7,27 +7,29 @@ #![deny(clippy::pedantic)] #![allow(clippy::module_name_repetitions)] -use std::cell::RefCell; -use std::convert::TryFrom; -use std::fmt::{self, Display}; -use std::mem; -use std::net::{IpAddr, SocketAddr}; -use std::rc::Rc; -use std::time::{Duration, Instant}; - -use crate::ackrate::{AckRate, PeerAckDelay}; -use crate::cc::CongestionControlAlgorithm; -use crate::cid::{ConnectionId, ConnectionIdRef, ConnectionIdStore, RemoteConnectionIdEntry}; -use crate::frame::{ - FRAME_TYPE_PATH_CHALLENGE, FRAME_TYPE_PATH_RESPONSE, FRAME_TYPE_RETIRE_CONNECTION_ID, +use std::{ + cell::RefCell, + convert::TryFrom, + fmt::{self, Display}, + mem, + net::{IpAddr, SocketAddr}, + rc::Rc, + time::{Duration, Instant}, +}; + +use crate::{ + ackrate::{AckRate, PeerAckDelay}, + cc::CongestionControlAlgorithm, + cid::{ConnectionId, ConnectionIdRef, ConnectionIdStore, RemoteConnectionIdEntry}, + frame::{FRAME_TYPE_PATH_CHALLENGE, FRAME_TYPE_PATH_RESPONSE, FRAME_TYPE_RETIRE_CONNECTION_ID}, + packet::PacketBuilder, + recovery::RecoveryToken, + rtt::RttEstimate, + sender::PacketSender, + stats::FrameStats, + tracking::{PacketNumberSpace, SentPacket}, + Error, Res, }; -use crate::packet::PacketBuilder; -use crate::recovery::RecoveryToken; -use crate::rtt::RttEstimate; -use crate::sender::PacketSender; -use crate::stats::FrameStats; -use crate::tracking::{PacketNumberSpace, SentPacket}; -use crate::{Error, Res}; use neqo_common::{hex, qdebug, qinfo, qlog::NeqoQlog, qtrace, Datagram, Encoder}; use neqo_crypto::random; @@ -664,7 +666,7 @@ impl Path { /// Set the remote connection ID based on the peer's choice. /// This is only valid during the handshake. - pub fn set_remote_cid(&mut self, cid: &ConnectionIdRef) { + pub fn set_remote_cid(&mut self, cid: ConnectionIdRef) { self.remote_cid .as_mut() .unwrap() diff --git a/neqo-transport/src/server.rs b/neqo-transport/src/server.rs index 277f10e876..a15e1fc1f8 100644 --- a/neqo-transport/src/server.rs +++ b/neqo-transport/src/server.rs @@ -17,21 +17,25 @@ use neqo_crypto::{ use qlog::streamer::QlogStreamer; pub use crate::addr_valid::ValidateAddress; -use crate::addr_valid::{AddressValidation, AddressValidationResult}; -use crate::cid::{ConnectionId, ConnectionIdDecoder, ConnectionIdGenerator, ConnectionIdRef}; -use crate::connection::{Connection, Output, State}; -use crate::packet::{PacketBuilder, PacketType, PublicPacket}; -use crate::{ConnectionParameters, Res, Version}; - -use std::cell::RefCell; -use std::collections::{HashMap, HashSet, VecDeque}; -use std::fs::OpenOptions; -use std::mem; -use std::net::SocketAddr; -use std::ops::{Deref, DerefMut}; -use std::path::PathBuf; -use std::rc::{Rc, Weak}; -use std::time::{Duration, Instant}; +use crate::{ + addr_valid::{AddressValidation, AddressValidationResult}, + cid::{ConnectionId, ConnectionIdDecoder, ConnectionIdGenerator, ConnectionIdRef}, + connection::{Connection, Output, State}, + packet::{PacketBuilder, PacketType, PublicPacket}, + ConnectionParameters, Res, Version, +}; + +use std::{ + cell::RefCell, + collections::{HashMap, HashSet, VecDeque}, + fs::OpenOptions, + mem, + net::SocketAddr, + ops::{Deref, DerefMut}, + path::PathBuf, + rc::{Rc, Weak}, + time::{Duration, Instant}, +}; pub enum InitialResult { Accept, @@ -303,7 +307,7 @@ impl Server { out.dgram() } - fn connection(&self, cid: &ConnectionIdRef) -> Option { + fn connection(&self, cid: ConnectionIdRef) -> Option { self.connections.borrow().get(&cid[..]).map(Rc::clone) } @@ -383,7 +387,7 @@ impl Server { } } - fn create_qlog_trace(&self, odcid: &ConnectionIdRef<'_>) -> NeqoQlog { + fn create_qlog_trace(&self, odcid: ConnectionIdRef<'_>) -> NeqoQlog { if let Some(qlog_dir) = &self.qlog_dir { let mut qlog_path = qlog_dir.to_path_buf(); @@ -449,7 +453,7 @@ impl Server { c.set_retry_cids(odcid, initial.src_cid, initial.dst_cid); } c.set_validation(Rc::clone(&self.address_validation)); - c.set_qlog(self.create_qlog_trace(&attempt_key.odcid.as_cid_ref())); + c.set_qlog(self.create_qlog_trace(attempt_key.odcid.as_cid_ref())); if let Some(cfg) = &self.ech_config { if c.server_enable_ech(cfg.config, &cfg.public_name, &cfg.sk, &cfg.pk) .is_err() @@ -504,7 +508,7 @@ impl Server { qwarn!([self], "Unable to create connection"); if e == crate::Error::VersionNegotiation { crate::qlog::server_version_information_failed( - &mut self.create_qlog_trace(&attempt_key.odcid.as_cid_ref()), + &mut self.create_qlog_trace(attempt_key.odcid.as_cid_ref()), self.conn_params.get_versions().all(), initial.version.wire_version(), ) @@ -578,8 +582,8 @@ impl Server { qdebug!([self], "Unsupported version: {:x}", packet.wire_version()); let vn = PacketBuilder::version_negotiation( - packet.scid(), - packet.dcid(), + &packet.scid()[..], + &packet.dcid()[..], packet.wire_version(), self.conn_params.get_versions().all(), ); From 70e3ac44014af65eb7792013ceb5829693b50992 Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Wed, 17 Jan 2024 12:40:39 +0200 Subject: [PATCH 064/321] Fix qlog issues to enable correct qvis rendering (#1544) * Fix qlog issues that affect correct qvis rendering * Add tests * Log tparams with valid timestamp * Log ACTIVE_CONNECTION_ID_LIMIT and PREFERRED_ADDRESS to qlog. * Fix imports * More tests * Add clippy ignores * Restore fix to logging tparams with valid timestamp No idea how that got reverted * Interim commit * Progress * Enable qlog generation for some more tests * Update test-fixture/src/lib.rs Co-authored-by: Max Inden * Update test-fixture/src/lib.rs Co-authored-by: Max Inden * Update neqo-transport/src/qlog.rs Co-authored-by: Max Inden * Update neqo-common/src/qlog.rs Co-authored-by: Max Inden * Incorporate suggestions from @mxinden - thanks! * More readable JSON literals * Undo one suggestion per @martinthomson * Update neqo-common/src/qlog.rs Co-authored-by: Martin Thomson * Update neqo-common/src/qlog.rs Co-authored-by: Martin Thomson * Update neqo-common/src/qlog.rs Co-authored-by: Martin Thomson * Suggestions from @martinthomson * Fix clippy. --------- Co-authored-by: Max Inden Co-authored-by: Martin Thomson --- neqo-common/Cargo.toml | 9 ++- neqo-common/src/qlog.rs | 41 ++++++++++ neqo-transport/src/cid.rs | 4 + .../src/connection/tests/migration.rs | 4 +- neqo-transport/src/connection/tests/mod.rs | 13 +++- neqo-transport/src/qlog.rs | 38 ++++++---- test-fixture/Cargo.toml | 5 +- test-fixture/src/lib.rs | 74 ++++++++++++++++++- 8 files changed, 158 insertions(+), 30 deletions(-) diff --git a/neqo-common/Cargo.toml b/neqo-common/Cargo.toml index 25d72980ca..ca209328e5 100644 --- a/neqo-common/Cargo.toml +++ b/neqo-common/Cargo.toml @@ -8,11 +8,14 @@ license = "MIT OR Apache-2.0" build = "build.rs" [dependencies] -log = {version = "0.4.0", default-features = false} -env_logger = {version = "0.10", default-features = false} +log = { version = "0.4.0", default-features = false } +env_logger = { version = "0.10", default-features = false } lazy_static = "1.3.0" qlog = "0.11.0" -time = {version = "0.3", features = ["formatting"]} +time = { version = "0.3", features = ["formatting"] } + +[dev-dependencies] +test-fixture = { path = "../test-fixture" } [features] deny-warnings = [] diff --git a/neqo-common/src/qlog.rs b/neqo-common/src/qlog.rs index ac03ecfcb0..5ff74750b0 100644 --- a/neqo-common/src/qlog.rs +++ b/neqo-common/src/qlog.rs @@ -48,6 +48,11 @@ impl NeqoQlog { }) } + #[must_use] + pub fn inner(&self) -> Rc>> { + Rc::clone(&self.inner) + } + /// Create a disabled `NeqoQlog` configuration. #[must_use] pub fn disabled() -> Self { @@ -144,3 +149,39 @@ pub fn new_trace(role: Role) -> qlog::TraceSeq { }), } } + +#[cfg(test)] +mod test { + use qlog::events::Event; + use test_fixture::EXPECTED_LOG_HEADER; + + const EV_DATA: qlog::events::EventData = + qlog::events::EventData::SpinBitUpdated(qlog::events::connectivity::SpinBitUpdated { + state: true, + }); + + const EXPECTED_LOG_EVENT: &str = concat!( + "\u{1e}", + r#"{"time":0.0,"name":"connectivity:spin_bit_updated","data":{"state":true}}"#, + "\n" + ); + + #[test] + fn new_neqo_qlog() { + let (_log, contents) = test_fixture::new_neqo_qlog(); + assert_eq!(contents.to_string(), EXPECTED_LOG_HEADER); + } + + #[test] + fn add_event() { + let (mut log, contents) = test_fixture::new_neqo_qlog(); + log.add_event(|| Some(Event::with_time(1.1, EV_DATA))); + assert_eq!( + contents.to_string(), + format!( + "{EXPECTED_LOG_HEADER}{e}", + e = EXPECTED_LOG_EVENT.replace("\"time\":0.0,", "\"time\":1.1,") + ) + ); + } +} diff --git a/neqo-transport/src/cid.rs b/neqo-transport/src/cid.rs index 70e327a296..eefc3104a9 100644 --- a/neqo-transport/src/cid.rs +++ b/neqo-transport/src/cid.rs @@ -323,6 +323,10 @@ impl ConnectionIdEntry { pub fn connection_id(&self) -> &ConnectionId { &self.cid } + + pub fn reset_token(&self) -> &SRT { + &self.srt + } } pub type RemoteConnectionIdEntry = ConnectionIdEntry<[u8; 16]>; diff --git a/neqo-transport/src/connection/tests/migration.rs b/neqo-transport/src/connection/tests/migration.rs index 056aa0247f..b7e5392903 100644 --- a/neqo-transport/src/connection/tests/migration.rs +++ b/neqo-transport/src/connection/tests/migration.rs @@ -30,7 +30,7 @@ use std::{ use test_fixture::{ self, addr, addr_v4, assertions::{assert_v4_path, assert_v6_path}, - fixture_init, now, + fixture_init, new_neqo_qlog, now, }; /// This should be a valid-seeming transport parameter. @@ -498,6 +498,7 @@ fn preferred_address(hs_client: SocketAddr, hs_server: SocketAddr, preferred: So }; fixture_init(); + let (log, _contents) = new_neqo_qlog(); let mut client = Connection::new_client( test_fixture::DEFAULT_SERVER_NAME, test_fixture::DEFAULT_ALPN, @@ -508,6 +509,7 @@ fn preferred_address(hs_client: SocketAddr, hs_server: SocketAddr, preferred: So now(), ) .unwrap(); + client.set_qlog(log); let spa = match preferred { SocketAddr::V6(v6) => PreferredAddress::new(None, Some(v6)), SocketAddr::V4(v4) => PreferredAddress::new(Some(v4), None), diff --git a/neqo-transport/src/connection/tests/mod.rs b/neqo-transport/src/connection/tests/mod.rs index a244efca53..b722feff78 100644 --- a/neqo-transport/src/connection/tests/mod.rs +++ b/neqo-transport/src/connection/tests/mod.rs @@ -30,7 +30,7 @@ use std::{ use neqo_common::{event::Provider, qdebug, qtrace, Datagram, Decoder, Role}; use neqo_crypto::{random, AllowZeroRtt, AuthenticationStatus, ResumptionToken}; -use test_fixture::{self, addr, fixture_init, now}; +use test_fixture::{self, addr, fixture_init, new_neqo_qlog, now}; // All the tests. mod ackrate; @@ -99,7 +99,8 @@ impl ConnectionIdGenerator for CountingConnectionIdGenerator { // These are a direct copy of those functions. pub fn new_client(params: ConnectionParameters) -> Connection { fixture_init(); - Connection::new_client( + let (log, _contents) = new_neqo_qlog(); + let mut client = Connection::new_client( test_fixture::DEFAULT_SERVER_NAME, test_fixture::DEFAULT_ALPN, Rc::new(RefCell::new(CountingConnectionIdGenerator::default())), @@ -108,15 +109,18 @@ pub fn new_client(params: ConnectionParameters) -> Connection { params, now(), ) - .expect("create a default client") + .expect("create a default client"); + client.set_qlog(log); + client } + pub fn default_client() -> Connection { new_client(ConnectionParameters::default()) } pub fn new_server(params: ConnectionParameters) -> Connection { fixture_init(); - + let (log, _contents) = new_neqo_qlog(); let mut c = Connection::new_server( test_fixture::DEFAULT_KEYS, test_fixture::DEFAULT_ALPN, @@ -124,6 +128,7 @@ pub fn new_server(params: ConnectionParameters) -> Connection { params, ) .expect("create a default server"); + c.set_qlog(log); c.server_enable_0rtt(&test_fixture::anti_replay(), AllowZeroRtt {}) .expect("enable 0-RTT"); c diff --git a/neqo-transport/src/qlog.rs b/neqo-transport/src/qlog.rs index f5ca21ca40..021367d8da 100644 --- a/neqo-transport/src/qlog.rs +++ b/neqo-transport/src/qlog.rs @@ -19,7 +19,7 @@ use qlog::events::{ AckedRanges, ErrorSpace, MetricsUpdated, PacketDropped, PacketHeader, PacketLost, PacketReceived, PacketSent, QuicFrame, StreamType, VersionInformation, }, - Event, EventData, RawInfo, + EventData, RawInfo, }; use neqo_common::{hex, qinfo, qlog::NeqoQlog, Decoder}; @@ -37,7 +37,7 @@ use crate::{ }; pub fn connection_tparams_set(qlog: &mut NeqoQlog, tph: &TransportParametersHandler) { - qlog.add_event(|| { + qlog.add_event_data(|| { let remote = tph.remote(); let ev_data = EventData::TransportParametersSet( qlog::events::quic::TransportParametersSet { @@ -61,20 +61,26 @@ pub fn connection_tparams_set(qlog: &mut NeqoQlog, tph: &TransportParametersHand max_udp_payload_size: Some(remote.get_integer(tparams::MAX_UDP_PAYLOAD_SIZE) as u32), ack_delay_exponent: Some(remote.get_integer(tparams::ACK_DELAY_EXPONENT) as u16), max_ack_delay: Some(remote.get_integer(tparams::MAX_ACK_DELAY) as u16), - // TODO(hawkinsw@obs.cr): We do not yet handle ACTIVE_CONNECTION_ID_LIMIT in tparams yet. - active_connection_id_limit: None, + active_connection_id_limit: Some(remote.get_integer(tparams::ACTIVE_CONNECTION_ID_LIMIT) as u32), initial_max_data: Some(remote.get_integer(tparams::INITIAL_MAX_DATA)), initial_max_stream_data_bidi_local: Some(remote.get_integer(tparams::INITIAL_MAX_STREAM_DATA_BIDI_LOCAL)), initial_max_stream_data_bidi_remote: Some(remote.get_integer(tparams::INITIAL_MAX_STREAM_DATA_BIDI_REMOTE)), initial_max_stream_data_uni: Some(remote.get_integer(tparams::INITIAL_MAX_STREAM_DATA_UNI)), initial_max_streams_bidi: Some(remote.get_integer(tparams::INITIAL_MAX_STREAMS_BIDI)), initial_max_streams_uni: Some(remote.get_integer(tparams::INITIAL_MAX_STREAMS_UNI)), - // TODO(hawkinsw@obs.cr): We do not yet handle PREFERRED_ADDRESS in tparams yet. - preferred_address: None, + preferred_address: remote.get_preferred_address().and_then(|(paddr, cid)| { + Some(qlog::events::quic::PreferredAddress { + ip_v4: paddr.ipv4()?.ip().to_string(), + ip_v6: paddr.ipv6()?.ip().to_string(), + port_v4: paddr.ipv4()?.port(), + port_v6: paddr.ipv6()?.port(), + connection_id: cid.connection_id().to_string(), + stateless_reset_token: hex(cid.reset_token()), + }) + }), }); - // This event occurs very early, so just mark the time as 0.0. - Some(Event::with_time(0.0, ev_data)) + Some(ev_data) }); } @@ -194,8 +200,8 @@ pub fn packet_sent( let mut d = Decoder::from(body); let header = PacketHeader::with_type(to_qlog_pkt_type(pt), Some(pn), None, None, None); let raw = RawInfo { - length: None, - payload_length: Some(plen as u64), + length: Some(plen as u64), + payload_length: None, data: None, }; @@ -229,18 +235,18 @@ pub fn packet_sent( }); } -pub fn packet_dropped(qlog: &mut NeqoQlog, payload: &PublicPacket) { +pub fn packet_dropped(qlog: &mut NeqoQlog, public_packet: &PublicPacket) { qlog.add_event_data(|| { let header = PacketHeader::with_type( - to_qlog_pkt_type(payload.packet_type()), + to_qlog_pkt_type(public_packet.packet_type()), None, None, None, None, ); let raw = RawInfo { - length: None, - payload_length: Some(payload.len() as u64), + length: Some(public_packet.len() as u64), + payload_length: None, data: None, }; @@ -290,8 +296,8 @@ pub fn packet_received( None, ); let raw = RawInfo { - length: None, - payload_length: Some(public_packet.len() as u64), + length: Some(public_packet.len() as u64), + payload_length: None, data: None, }; diff --git a/test-fixture/Cargo.toml b/test-fixture/Cargo.toml index 99bdd41cb5..dddabfbc2d 100644 --- a/test-fixture/Cargo.toml +++ b/test-fixture/Cargo.toml @@ -7,13 +7,14 @@ rust-version = "1.70.0" license = "MIT OR Apache-2.0" [dependencies] +lazy_static = "1.3.0" +log = {version = "0.4.0", default-features = false} neqo-common = { path = "../neqo-common" } neqo-crypto = { path = "../neqo-crypto" } neqo-http3 = { path = "../neqo-http3" } neqo-qpack = { path = "../neqo-qpack" } neqo-transport = { path = "../neqo-transport" } -log = {version = "0.4.0", default-features = false} -lazy_static = "1.3.0" +qlog = "0.11.0" [features] deny-warnings = [] diff --git a/test-fixture/src/lib.rs b/test-fixture/src/lib.rs index 5ddba24814..fa9276bf5c 100644 --- a/test-fixture/src/lib.rs +++ b/test-fixture/src/lib.rs @@ -7,7 +7,12 @@ #![cfg_attr(feature = "deny-warnings", deny(warnings))] #![warn(clippy::pedantic)] -use neqo_common::{event::Provider, hex, qtrace, Datagram, Decoder}; +use neqo_common::{ + event::Provider, + hex, + qlog::{new_trace, NeqoQlog}, + qtrace, Datagram, Decoder, Role, +}; use neqo_crypto::{init_db, random, AllowZeroRtt, AntiReplay, AuthenticationStatus}; use neqo_http3::{Http3Client, Http3Parameters, Http3Server}; @@ -16,13 +21,17 @@ use neqo_transport::{ ConnectionIdGenerator, ConnectionIdRef, ConnectionParameters, State, Version, }; +use qlog::{events::EventImportance, streamer::QlogStreamer}; + use std::{ cell::RefCell, cmp::max, convert::TryFrom, + io::{Cursor, Result, Write}, mem, net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}, rc::Rc, + sync::{Arc, Mutex}, time::{Duration, Instant}, }; @@ -130,7 +139,8 @@ impl ConnectionIdGenerator for CountingConnectionIdGenerator { #[must_use] pub fn new_client(params: ConnectionParameters) -> Connection { fixture_init(); - Connection::new_client( + let (log, _contents) = new_neqo_qlog(); + let mut client = Connection::new_client( DEFAULT_SERVER_NAME, DEFAULT_ALPN, Rc::new(RefCell::new(CountingConnectionIdGenerator::default())), @@ -139,7 +149,9 @@ pub fn new_client(params: ConnectionParameters) -> Connection { params.ack_ratio(255), // Tests work better with this set this way. now(), ) - .expect("create a client") + .expect("create a client"); + client.set_qlog(log); + client } /// Create a transport client with default configuration. @@ -166,7 +178,7 @@ pub fn default_server_h3() -> Connection { #[must_use] pub fn new_server(alpn: &[impl AsRef], params: ConnectionParameters) -> Connection { fixture_init(); - + let (log, _contents) = new_neqo_qlog(); let mut c = Connection::new_server( DEFAULT_KEYS, alpn, @@ -174,6 +186,7 @@ pub fn new_server(alpn: &[impl AsRef], params: ConnectionParameters) -> Con params.ack_ratio(255), ) .expect("create a server"); + c.set_qlog(log); c.server_enable_0rtt(&anti_replay(), AllowZeroRtt {}) .expect("enable 0-RTT"); c @@ -323,3 +336,56 @@ pub fn split_datagram(d: &Datagram) -> (Datagram, Option) { b.map(|b| Datagram::new(d.source(), d.destination(), b)), ) } + +#[derive(Clone)] +pub struct SharedVec { + buf: Arc>>>, +} + +impl Write for SharedVec { + fn write(&mut self, buf: &[u8]) -> Result { + self.buf.lock().unwrap().write(buf) + } + fn flush(&mut self) -> Result<()> { + self.buf.lock().unwrap().flush() + } +} + +impl ToString for SharedVec { + fn to_string(&self) -> String { + String::from_utf8(self.buf.lock().unwrap().clone().into_inner()).unwrap() + } +} + +/// Returns a pair of new enabled `NeqoQlog` that is backed by a Vec together with a +/// `Cursor>` that can be used to read the contents of the log. +/// # Panics +/// Panics if the log cannot be created. +#[must_use] +pub fn new_neqo_qlog() -> (NeqoQlog, SharedVec) { + let mut trace = new_trace(Role::Client); + // Set reference time to 0.0 for testing. + trace.common_fields.as_mut().unwrap().reference_time = Some(0.0); + let buf = SharedVec { + buf: Arc::default(), + }; + let contents = buf.clone(); + let streamer = QlogStreamer::new( + qlog::QLOG_VERSION.to_string(), + None, + None, + None, + std::time::Instant::now(), + trace, + EventImportance::Base, + Box::new(buf), + ); + let log = NeqoQlog::enabled(streamer, ""); + (log.expect("to be able to write to new log"), contents) +} + +pub const EXPECTED_LOG_HEADER: &str = concat!( + "\u{1e}", + r#"{"qlog_version":"0.3","qlog_format":"JSON-SEQ","trace":{"vantage_point":{"name":"neqo-Client","type":"client"},"title":"neqo-Client trace","description":"Example qlog trace description","configuration":{"time_offset":0.0},"common_fields":{"reference_time":0.0,"time_format":"relative"}}}"#, + "\n" +); From 895f15d9559021eabe713590b5ab79cdbe1acc0c Mon Sep 17 00:00:00 2001 From: Max Inden Date: Wed, 17 Jan 2024 14:54:11 +0100 Subject: [PATCH 065/321] refactor: clippy `match-same-arms` & `unnested-or-patterns` (#1558) * refactor: address clippy::match-same-arms Addresses clippy lint match-same-arms. https://rust-lang.github.io/rust-clippy/master/index.html#match_same_arms * refactor: address clippy::unnested-or-patterns * Use v1 instead. --------- Co-authored-by: Martin Thomson --- neqo-client/src/main.rs | 2 +- neqo-interop/src/main.rs | 6 ++---- neqo-transport/src/connection/mod.rs | 9 +++------ neqo-transport/src/qlog.rs | 3 +-- 4 files changed, 7 insertions(+), 13 deletions(-) diff --git a/neqo-client/src/main.rs b/neqo-client/src/main.rs index de7da48a27..e204b179cc 100644 --- a/neqo-client/src/main.rs +++ b/neqo-client/src/main.rs @@ -333,7 +333,7 @@ impl QuicParameters { params.versions(first.0, all.iter().map(|&x| x.0).collect()) } else { let version = match alpn { - "h3" | "hq-interop" => Version::default(), + "h3" | "hq-interop" => Version::Version1, "h3-29" | "hq-29" => Version::Draft29, "h3-30" | "hq-30" => Version::Draft30, "h3-31" | "hq-31" => Version::Draft31, diff --git a/neqo-interop/src/main.rs b/neqo-interop/src/main.rs index ef5646ea73..c274c57d0f 100644 --- a/neqo-interop/src/main.rs +++ b/neqo-interop/src/main.rs @@ -736,11 +736,9 @@ fn run_test<'t>(peer: &Peer, test: &'t Test) -> (&'t Test, String) { return (test, String::from("OK")); } Test::H9 => test_h9(&nctx, &mut client), - Test::H3 => test_h3(&nctx, peer, client, test), + Test::H3 | Test::D => test_h3(&nctx, peer, client, test), Test::VN => unimplemented!(), - Test::R => test_h3_rz(&nctx, peer, client, test), - Test::Z => test_h3_rz(&nctx, peer, client, test), - Test::D => test_h3(&nctx, peer, client, test), + Test::R | Test::Z => test_h3_rz(&nctx, peer, client, test), }; if let Err(e) = res { diff --git a/neqo-transport/src/connection/mod.rs b/neqo-transport/src/connection/mod.rs index 270b96f08f..e8d10387e3 100644 --- a/neqo-transport/src/connection/mod.rs +++ b/neqo-transport/src/connection/mod.rs @@ -1019,7 +1019,7 @@ impl Connection { let res = self.client_start(now); self.absorb_error(now, res); } - (State::Init, Role::Server) | (State::WaitInitial, Role::Server) => { + (State::Init | State::WaitInitial, Role::Server) => { return Output::None; } _ => { @@ -1292,8 +1292,7 @@ impl Connection { self.handle_retry(packet, now); return Ok(PreprocessResult::Next); } - (PacketType::Handshake, State::WaitInitial, Role::Client) - | (PacketType::Short, State::WaitInitial, Role::Client) => { + (PacketType::Handshake | PacketType::Short, State::WaitInitial, Role::Client) => { // This packet can't be processed now, but it could be a sign // that Initial packets were lost. // Resend Initial CRYPTO frames immediately a few times just @@ -1306,9 +1305,7 @@ impl Connection { self.crypto.resend_unacked(PacketNumberSpace::Initial); } } - (PacketType::VersionNegotiation, ..) - | (PacketType::Retry, ..) - | (PacketType::OtherVersion, ..) => { + (PacketType::VersionNegotiation | PacketType::Retry | PacketType::OtherVersion, ..) => { self.stats .borrow_mut() .pkt_dropped(format!("{:?}", packet.packet_type())); diff --git a/neqo-transport/src/qlog.rs b/neqo-transport/src/qlog.rs index 021367d8da..59168147d2 100644 --- a/neqo-transport/src/qlog.rs +++ b/neqo-transport/src/qlog.rs @@ -119,8 +119,7 @@ pub fn connection_state_updated(qlog: &mut NeqoQlog, new: &State) { let ev_data = EventData::ConnectionStateUpdated(ConnectionStateUpdated { old: None, new: match new { - State::Init => ConnectionState::Attempted, - State::WaitInitial => ConnectionState::Attempted, + State::Init | State::WaitInitial => ConnectionState::Attempted, State::WaitVersion | State::Handshaking => ConnectionState::HandshakeStarted, State::Connected => ConnectionState::HandshakeCompleted, State::Confirmed => ConnectionState::HandshakeConfirmed, From abe45dfb7edeb74506132373b3c73676ee161133 Mon Sep 17 00:00:00 2001 From: Kershaw Date: Wed, 17 Jan 2024 14:55:26 +0100 Subject: [PATCH 066/321] Add upload_test.sh script (#1529) --- test/README.md | 33 ++++++++++++ test/upload_test.sh | 124 ++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 157 insertions(+) create mode 100644 test/README.md create mode 100755 test/upload_test.sh diff --git a/test/README.md b/test/README.md new file mode 100644 index 0000000000..ca1bc0684f --- /dev/null +++ b/test/README.md @@ -0,0 +1,33 @@ +## Steps to run an upload test with neqo-client and neqo-server: + +1. Build the release version of neqo-client and neqo-server by running + `cargo build --release` +1. Start neqo-server. `./target/release/neqo-server --db ./test-fixture/db` +1. Start neqo-client and specify parameters to start the upload test. + ` ./target/release/neqo-client http://127.0.0.1:4433/ --test upload --upload-size ${size_in_bytes}` + +## To enable log messages for analyzing upload performance + +This can be done by setting the `RUST_LOG` environment variable to `neqo_transport=info`. +For example, the command below starts neqo-client and uploads 8MB of content to the server. +``` +RUST_LOG=neqo_transport=info ./target/release/neqo-client http://127.0.0.1:4433/ --test upload --upload-size 8388608 &>upload.log +``` + +## To run the upload test with `upload_test.sh` script + +### Overview +The `upload_test.sh` script automates testing network conditions for `neqo-client` and `neqo-server`. It runs the upload test under various network parameters like bandwidth, RTT (Round-Trip Time), and PLR (Packet Loss Rate). + +### Configuration +- **Server Address and Port**: Defaults to `127.0.0.1` and `4433`. +- **Upload Size**: Set to 8MB by default. +- **Network Conditions**: Modify `network_conditions`, `network_bandwidths`, `network_rtts`, and `plrs` arrays for different conditions. +- **Runs**: Number of test iterations, default is `1`. + +### Usage +1. **Start the Script**: Execute with `./upload_test.sh`. +2. **Root Password Prompt**: Enter the root password when prompted for executing network configuration commands. +3. **Automated Test Execution**: The script sets up network conditions and runs `neqo-client` and `neqo-server` tests. +4. **Cleanup**: At the end, it resets network conditions and stops the server. + diff --git a/test/upload_test.sh b/test/upload_test.sh new file mode 100755 index 0000000000..d75bcda1bd --- /dev/null +++ b/test/upload_test.sh @@ -0,0 +1,124 @@ +#!/bin/bash + +set -e + +server_address=127.0.0.1 +server_port=4433 +upload_size=8388608 +client="cargo run --release --bin neqo-client -- http://$server_address:$server_port/ --test upload --upload-size $upload_size" +server="cargo run --release --bin neqo-server -- --db ../test-fixture/db $server_address:$server_port" +server_pid=0 + +# Define two indexed arrays to store network conditions +network_conditions=("cable" "3g_slow" "DSL" "LTE" "fast wifi") +network_bandwidths=("5Mbit/s" "400Kbit/s" "2Mbit/s" "12Mbit/s" "100Mbit/s") +network_rtts=("14" "200" "25" "35" "10") +plrs=("0.0001" "0.0005" "0.001" "0.002" "0.005") + +runs=1 + +echo -n "Enter root password: " +read -s root_password +echo + +setup_network_conditions() { + bw="$1" + delay_ms="$2" + plr="$3" + delay_s=$(echo "scale=5; $delay_ms / 1000" | bc -l) + if [[ $bw == *"Mbit/s"* ]]; then + bw_value=$(echo "$bw" | sed 's/Mbit\/s//') # Remove 'Mbit/s' + bw_bits_per_second=$(echo "$bw_value * 1000000" | bc) # Convert from Mbits to bits + elif [[ $bw == *"Kbit/s"* ]]; then + bw_value=$(echo "$bw" | sed 's/Kbit\/s//') # Remove 'Kbit/s' + bw_bits_per_second=$(echo "$bw_value * 1000" | bc) # Convert from Kbits to bits + fi + + bdp_bits=$(echo "$bw_bits_per_second * $delay_s" | bc) + + # Convert BDP to kilobytes + bdp_kb=$(echo "scale=2; $bdp_bits / 8 / 1024" | bc) + bdp_kb_rounded_up=$(printf "%.0f" "$bdp_kb") + + # if we are on MacOS X, configure the firewall to add delay and queue traffic + if [ -x /usr/sbin/dnctl ]; then + set_condition_commands=( + "sudo dnctl pipe 1 config bw $bw delay $delay_ms plr $plr queue ${bdp_kb_rounded_up}Kbytes noerror" + "sudo dnctl pipe 2 config bw $bw delay $delay_ms plr $plr queue ${bdp_kb_rounded_up}Kbytes noerror" + "sudo echo 'dummynet in proto {udp} from any to localhost pipe 1' | sudo pfctl -f -" + "sudo echo 'dummynet in proto {udp} from localhost to any pipe 2' | sudo pfctl -f -" + "sudo pfctl -e || true" + ) + else + # TODO implement commands for linux + return 0 + fi + + for command in "${set_condition_commands[@]}"; do + echo $command + echo $root_password | sudo -S bash -c "$command" + done +} + +stop_network_conditions() { + if [ -x /usr/sbin/dnctl ]; then + stop_condition_commands=( + "sudo pfctl -f /etc/pf.conf" + "sudo dnctl -q flush" + ) + else + # TODO implement commands for linux + return 0 + fi + + for command in "${set_condition_commands[@]}"; do + echo $root_password | sudo -S bash -c "$command" + done +} + +stop_server() { + echo "stop server" + server_pid=$(pgrep -f "neqo-server") + # Kill the server + kill $server_pid +} + +start_test() { + echo "start_test" + eval "$server" > /dev/null 2>&1 & sleep 1 + + # Run the client command and capture its output + echo "Running client..." + client_output=$(eval "$client") + echo "Client output: $client_output" +} + +cleanup() { + echo "clean up" + stop_server + stop_network_conditions +} + +trap cleanup SIGINT + +for i in "${!network_conditions[@]}"; do + condition=${network_conditions[$i]} + bandwidth=${network_bandwidths[$i]} + rtt=${network_rtts[$i]} + + for plr in "${plrs[@]}"; do + echo "Setting up tests for condition: $condition, Bandwidth: $bandwidth, RTT: $rtt, Packet Loss Rate: $plr" + + for r in $(seq 1 $runs); do + echo "Test Run: $r | Condition: $condition | Bandwidth: $bandwidth | RTT: $rtt | PLR: $plr | Start" + setup_network_conditions "$bandwidth" "$rtt" "$plr" + start_test + cleanup + echo "Test Run: $r | Condition: $condition | Bandwidth: $bandwidth | RTT: $rtt | PLR: $plr | End" + done + done + + echo "Completed tests for condition: $condition." +done + +echo "All test runs completed." From 64fb41f47cbbbb1101484dcd37533c7b295fa659 Mon Sep 17 00:00:00 2001 From: Dragana Damjanovic Date: Wed, 17 Jan 2024 16:33:41 +0100 Subject: [PATCH 067/321] Script to update neqo version (#1028) * Script to update neqo version * Address various shellcheck nits, and resolve the "sed -i" issue --------- Co-authored-by: Lars Eggert --- update_version | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) create mode 100755 update_version diff --git a/update_version b/update_version new file mode 100755 index 0000000000..2be137665e --- /dev/null +++ b/update_version @@ -0,0 +1,16 @@ +#! /usr/bin/env bash + +pushd "$(dirname "$0")" > /dev/null +set -e + +[[ -n "$1" ]] || { echo "Usage: $0 "; exit 1; } + +while IFS= read -r -d '' entry +do + echo "$entry" + line=$(grep -n -m1 "version" "$entry" | cut -d: -f2) + current=$(echo "${line}" | awk -F'"' '{print $2}') + sed -i.bak "s/$current/$1/g" "$entry" && rm "$entry.bak" +done < <(find . -mindepth 2 -name Cargo.toml -print0) + +popd > /dev/null From c567bf4965847f72aa9495f9ac4f35988ddb4831 Mon Sep 17 00:00:00 2001 From: Max Inden Date: Fri, 19 Jan 2024 00:02:37 +0100 Subject: [PATCH 068/321] fix(client): call stream_close_send after GET (#1564) Previously `neqo-client` would not close the sending side of a stream after sending a GET request. Corresponding section in RFC 9114: > After sending a request, a client MUST close the stream for sending. https://www.rfc-editor.org/rfc/rfc9114.html#name-http-message-framing This surfaces in the Quic Interop Runner `http3` test. Here the client download 3 files from the server. 1. The client opens stream `0`, sends a GET for the first file. 2. The client opens stream `4`, sends a GET for the second file. 3. The client opens stream `8`, sends a GET for the third file. 4. ... 5. Eventually the client has read the whole response on stream `0`, [it removes the corresponding `StreamHandler` from `self.url_handler.stream_handlers`](https://github.com/mozilla/neqo/blob/64fb41f47cbbbb1101484dcd37533c7b295fa659/neqo-client/src/main.rs#L775-L777) and continues with the remaining requests. 6. Given that the client did not close the sending side of stream `0` after sending the GET request, it still handles `Http3ClientEvent::DataWritable` events for stream `0`. Given that it previously removed stream `0` from `self.url_handler.stream_handlers`, [it errors](https://github.com/mozilla/neqo/blob/64fb41f47cbbbb1101484dcd37533c7b295fa659/neqo-client/src/main.rs#L780-L784) and discontinues the [process_loop](https://github.com/mozilla/neqo/blob/64fb41f47cbbbb1101484dcd37533c7b295fa659/neqo-client/src/main.rs#L472-L474). 7. The second and third request don't finish and the Quic Interop Runner fails the test given that the second and third file are not fully downloaded. > File size of /tmp/download_sivy_1mt/cjwxjpvzjr doesn't match. Original: 10240 bytes, downloaded: 4056 bytes. --- neqo-client/src/main.rs | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/neqo-client/src/main.rs b/neqo-client/src/main.rs index e204b179cc..1ea52f8fb4 100644 --- a/neqo-client/src/main.rs +++ b/neqo-client/src/main.rs @@ -499,10 +499,13 @@ impl StreamHandlerType { url: &Url, args: &Args, all_paths: &mut Vec, + client: &mut Http3Client, + client_stream_id: StreamId, ) -> Box { match handler_type { Self::Download => { let out_file = get_output_file(url, &args.output_dir, all_paths); + client.stream_close_send(client_stream_id).unwrap(); Box::new(DownloadStreamHandler { out_file }) } Self::Upload => Box::new(UploadStreamHandler { @@ -657,6 +660,8 @@ impl<'a> URLHandler<'a> { &url, self.args, &mut self.all_paths, + client, + client_stream_id, ); self.stream_handlers.insert(client_stream_id, handler); true From 4de3279b8ac881ef4683a8331fc33c2917019562 Mon Sep 17 00:00:00 2001 From: Max Inden Date: Fri, 19 Jan 2024 10:29:43 +0100 Subject: [PATCH 069/321] fix(qns): always use Version1 on server (#1563) * fix(qns): always use Version1 on server The Quic Interop Runner expects the server to use `Version::Version1` in all testcases but `versionnegotiation` and `v2`. See discussion in [Quic Interop Runner issue](https://github.com/quic-interop/quic-interop-runner/pull/344#issuecomment-1895771528) and corresponding code in the [Quic Interop Runner](https://github.com/quic-interop/quic-interop-runner/blob/ca27dcb5272a82d994337ae3d14533c318d81b76/testcases.py#L188-L195). While Neqo's default `Version` is `Version1`, the default `VersionConfig` includes all versions (`Version::all()`) and thus the server will currently upgrade the connection to the compatible `Version::Version2`. With this commit, the server will always choose `Version1` when running a qns testcase. Given that Neqo's qns implementation does not support the `versionnegotiation` and `v2` testcases, this fix is applicable for all supported testcases. * Only set if unset --- neqo-server/src/main.rs | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/neqo-server/src/main.rs b/neqo-server/src/main.rs index ac4c952837..ca6b6800fb 100644 --- a/neqo-server/src/main.rs +++ b/neqo-server/src/main.rs @@ -836,6 +836,15 @@ fn main() -> Result<(), io::Error> { init_db(args.db.clone()); if let Some(testcase) = args.qns_test.as_ref() { + if args.quic_parameters.quic_version.is_empty() { + // Quic Interop Runner expects the server to support `Version1` only. + // Exceptions are testcases `versionnegotiation` and `v2`. Neither are + // supported by Neqo. Thus always set `Version1`. + args.quic_parameters.quic_version = vec![VersionArg(Version::Version1)]; + } else { + qwarn!("Both -V and --qns-test were set. Ignoring testcase specific versions."); + } + match testcase.as_str() { "http3" => (), "zerortt" => { From dc61acdc8dbe4493c983c26eb8b2b6433e04a788 Mon Sep 17 00:00:00 2001 From: Max Inden Date: Mon, 22 Jan 2024 00:20:30 +0100 Subject: [PATCH 070/321] refactor: address clippy::single-match-else (#1560) * refactor: address clippy::single-match-else Fixes clippy lint `single-match-else`. https://rust-lang.github.io/rust-clippy/master/index.html#single_match_else * Return PreprocessResult::End once --- neqo-client/src/main.rs | 50 ++++++++++++---------------- neqo-interop/src/main.rs | 15 ++++----- neqo-transport/src/connection/mod.rs | 40 +++++++++------------- neqo-transport/src/qlog.rs | 24 ++++++------- neqo-transport/src/send_stream.rs | 15 ++++----- neqo-transport/src/server.rs | 31 ++++++++--------- 6 files changed, 73 insertions(+), 102 deletions(-) diff --git a/neqo-client/src/main.rs b/neqo-client/src/main.rs index 1ea52f8fb4..d099d99331 100644 --- a/neqo-client/src/main.rs +++ b/neqo-client/src/main.rs @@ -732,14 +732,11 @@ impl<'a> Handler<'a> { fin, .. } => { - match self.url_handler.stream_handler(&stream_id) { - Some(handler) => { - handler.process_header_ready(stream_id, fin, headers); - } - None => { - println!("Data on unexpected stream: {stream_id}"); - return Ok(false); - } + if let Some(handler) = self.url_handler.stream_handler(&stream_id) { + handler.process_header_ready(stream_id, fin, headers); + } else { + println!("Data on unexpected stream: {stream_id}"); + return Ok(false); } if fin { return Ok(self.url_handler.on_stream_fin(client, stream_id)); @@ -836,26 +833,23 @@ fn handle_test( resumption_token: Option, ) -> Res> { let key_update = KeyUpdateState(args.key_update); - match testcase.as_str() { - "upload" => { - let mut client = - create_http3_client(args, local_addr, remote_addr, hostname, resumption_token) - .expect("failed to create client"); - args.method = String::from("POST"); - let url_handler = URLHandler { - url_queue: VecDeque::from(urls.to_vec()), - stream_handlers: HashMap::new(), - all_paths: Vec::new(), - handler_type: StreamHandlerType::Upload, - args, - }; - let mut h = Handler::new(url_handler, key_update, args.output_read_data); - process_loop(&local_addr, socket, poll, &mut client, &mut h)?; - } - _ => { - eprintln!("Unsupported test case: {testcase}"); - exit(127) - } + if testcase.as_str() == "upload" { + let mut client = + create_http3_client(args, local_addr, remote_addr, hostname, resumption_token) + .expect("failed to create client"); + args.method = String::from("POST"); + let url_handler = URLHandler { + url_queue: VecDeque::from(urls.to_vec()), + stream_handlers: HashMap::new(), + all_paths: Vec::new(), + handler_type: StreamHandlerType::Upload, + args, + }; + let mut h = Handler::new(url_handler, key_update, args.output_read_data); + process_loop(&local_addr, socket, poll, &mut client, &mut h)?; + } else { + eprintln!("Unsupported test case: {testcase}"); + exit(127) } Ok(None) diff --git a/neqo-interop/src/main.rs b/neqo-interop/src/main.rs index c274c57d0f..3c634c11e0 100644 --- a/neqo-interop/src/main.rs +++ b/neqo-interop/src/main.rs @@ -772,15 +772,12 @@ fn run_peer(args: &Args, peer: &'static Peer) -> Vec<(&'static Test, String)> { } for child in children { - match child.1.join() { - Ok(e) => { - eprintln!("Test complete {:?}, {:?}", child.0, e); - results.push(e); - } - Err(_) => { - eprintln!("Thread crashed {:?}", child.0); - results.push((child.0, String::from("CRASHED"))); - } + if let Ok(e) = child.1.join() { + eprintln!("Test complete {:?}, {:?}", child.0, e); + results.push(e); + } else { + eprintln!("Thread crashed {:?}", child.0); + results.push((child.0, String::from("CRASHED"))); } } diff --git a/neqo-transport/src/connection/mod.rs b/neqo-transport/src/connection/mod.rs index e8d10387e3..832ea0d67b 100644 --- a/neqo-transport/src/connection/mod.rs +++ b/neqo-transport/src/connection/mod.rs @@ -1261,32 +1261,24 @@ impl Connection { } } (PacketType::VersionNegotiation, State::WaitInitial, Role::Client) => { - match packet.supported_versions() { - Ok(versions) => { - if versions.is_empty() - || versions.contains(&self.version().wire_version()) - || versions.contains(&0) - || &packet.scid() != self.odcid().unwrap() - || matches!( - self.address_validation, - AddressValidationInfo::Retry { .. } - ) - { - // Ignore VersionNegotiation packets that contain the current version. - // Or don't have the right connection ID. - // Or are received after a Retry. - self.stats.borrow_mut().pkt_dropped("Invalid VN"); - return Ok(PreprocessResult::End); - } - + if let Ok(versions) = packet.supported_versions() { + if versions.is_empty() + || versions.contains(&self.version().wire_version()) + || versions.contains(&0) + || &packet.scid() != self.odcid().unwrap() + || matches!(self.address_validation, AddressValidationInfo::Retry { .. }) + { + // Ignore VersionNegotiation packets that contain the current version. + // Or don't have the right connection ID. + // Or are received after a Retry. + self.stats.borrow_mut().pkt_dropped("Invalid VN"); + } else { self.version_negotiation(&versions, now)?; - return Ok(PreprocessResult::End); - } - Err(_) => { - self.stats.borrow_mut().pkt_dropped("VN with no versions"); - return Ok(PreprocessResult::End); } - } + } else { + self.stats.borrow_mut().pkt_dropped("VN with no versions"); + }; + return Ok(PreprocessResult::End); } (PacketType::Retry, State::WaitInitial, Role::Client) => { self.handle_retry(packet, now); diff --git a/neqo-transport/src/qlog.rs b/neqo-transport/src/qlog.rs index 59168147d2..1639da6e74 100644 --- a/neqo-transport/src/qlog.rs +++ b/neqo-transport/src/qlog.rs @@ -206,14 +206,11 @@ pub fn packet_sent( let mut frames = SmallVec::new(); while d.remaining() > 0 { - match Frame::decode(&mut d) { - Ok(f) => { - frames.push(frame_to_qlogframe(&f)); - } - Err(_) => { - qinfo!("qlog: invalid frame"); - break; - } + if let Ok(f) = Frame::decode(&mut d) { + frames.push(frame_to_qlogframe(&f)) + } else { + qinfo!("qlog: invalid frame"); + break; } } @@ -303,12 +300,11 @@ pub fn packet_received( let mut frames = Vec::new(); while d.remaining() > 0 { - match Frame::decode(&mut d) { - Ok(f) => frames.push(frame_to_qlogframe(&f)), - Err(_) => { - qinfo!("qlog: invalid frame"); - break; - } + if let Ok(f) = Frame::decode(&mut d) { + frames.push(frame_to_qlogframe(&f)) + } else { + qinfo!("qlog: invalid frame"); + break; } } diff --git a/neqo-transport/src/send_stream.rs b/neqo-transport/src/send_stream.rs index 6ff173302e..ed227b2a31 100644 --- a/neqo-transport/src/send_stream.rs +++ b/neqo-transport/src/send_stream.rs @@ -1595,16 +1595,13 @@ impl SendStreams { .flat_map(|group| group.iter()), ); for stream_id in stream_ids { - match self.map.get_mut(&stream_id).unwrap().sendorder() { - Some(order) => qdebug!(" {} ({})", stream_id, order), - None => qdebug!(" None"), + let stream = self.map.get_mut(&stream_id).unwrap(); + if let Some(order) = stream.sendorder() { + qdebug!(" {} ({})", stream_id, order) + } else { + qdebug!(" None") } - if !self - .map - .get_mut(&stream_id) - .unwrap() - .write_frames_with_early_return(priority, builder, tokens, stats) - { + if !stream.write_frames_with_early_return(priority, builder, tokens, stats) { break; } } diff --git a/neqo-transport/src/server.rs b/neqo-transport/src/server.rs index a15e1fc1f8..08d21915c6 100644 --- a/neqo-transport/src/server.rs +++ b/neqo-transport/src/server.rs @@ -646,28 +646,23 @@ impl Server { } pub fn process(&mut self, dgram: Option<&Datagram>, now: Instant) -> Output { - let out = if let Some(d) = dgram { - self.process_input(d, now) - } else { - None - }; - let out = out.or_else(|| self.process_next_output(now)); - match out { - Some(d) => { + dgram + .and_then(|d| self.process_input(d, now)) + .or_else(|| self.process_next_output(now)) + .map(|d| { qtrace!([self], "Send packet: {:?}", d); Output::Datagram(d) - } - _ => match self.next_time(now) { - Some(delay) => { + }) + .or_else(|| { + self.next_time(now).map(|delay| { qtrace!([self], "Wait: {:?}", delay); Output::Callback(delay) - } - _ => { - qtrace!([self], "Go dormant"); - Output::None - } - }, - } + }) + }) + .unwrap_or_else(|| { + qtrace!([self], "Go dormant"); + Output::None + }) } /// This lists the connections that have received new events From a334be299383aad93727961f0aa60a1c8ab37fe2 Mon Sep 17 00:00:00 2001 From: Max Inden Date: Mon, 22 Jan 2024 00:40:47 +0100 Subject: [PATCH 071/321] feat(qns): implement v2 testcase (#1570) * feat(qns): implement v2 testcase Add support for Quic Network Simulator `v2` testcase. In `v2` testcase, don't restrict the server to `Version::Version1`, thus allowing the server to upgrade incoming `Version::Version1` connection to compatible `Version::Version2` connection. See also [Quic Network Simulator `v2` testcase implementation]( https://github.com/quic-interop/quic-interop-runner/blob/ca27dcb5272a82d994337ae3d14533c318d81b76/testcases.py#L1460-L1545). * Set use_old_http --- neqo-client/src/main.rs | 3 +++ neqo-server/src/main.rs | 14 ++++++++++---- 2 files changed, 13 insertions(+), 4 deletions(-) diff --git a/neqo-client/src/main.rs b/neqo-client/src/main.rs index d099d99331..83dd2c5ac7 100644 --- a/neqo-client/src/main.rs +++ b/neqo-client/src/main.rs @@ -1012,6 +1012,9 @@ fn main() -> Res<()> { args.use_old_http = true; args.key_update = true; } + "v2" => { + args.use_old_http = true; + } _ => exit(127), } } diff --git a/neqo-server/src/main.rs b/neqo-server/src/main.rs index ca6b6800fb..a9e4d37e63 100644 --- a/neqo-server/src/main.rs +++ b/neqo-server/src/main.rs @@ -837,10 +837,12 @@ fn main() -> Result<(), io::Error> { if let Some(testcase) = args.qns_test.as_ref() { if args.quic_parameters.quic_version.is_empty() { - // Quic Interop Runner expects the server to support `Version1` only. - // Exceptions are testcases `versionnegotiation` and `v2`. Neither are - // supported by Neqo. Thus always set `Version1`. - args.quic_parameters.quic_version = vec![VersionArg(Version::Version1)]; + // Quic Interop Runner expects the server to support `Version1` + // only. Exceptions are testcases `versionnegotiation` (not yet + // implemented) and `v2`. + if testcase != "v2" { + args.quic_parameters.quic_version = vec![VersionArg(Version::Version1)]; + } } else { qwarn!("Both -V and --qns-test were set. Ignoring testcase specific versions."); } @@ -868,6 +870,10 @@ fn main() -> Result<(), io::Error> { args.alpn = String::from(HQ_INTEROP); args.retry = true; } + "v2" => { + args.use_old_http = true; + args.alpn = String::from(HQ_INTEROP); + } _ => exit(127), } } From 3945bf0e46e63b6c2bd192d72a5f65ab80d69082 Mon Sep 17 00:00:00 2001 From: Kershaw Date: Mon, 22 Jan 2024 13:19:39 +0100 Subject: [PATCH 072/321] Don't crash when timeBeginPeriod/timeEndPeriod returns an error (#1565) * Don't crash when timeBeginPeriod/timeEndPeriod returns an error * format * address comment --- neqo-common/src/hrtime.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/neqo-common/src/hrtime.rs b/neqo-common/src/hrtime.rs index 682c097035..1187e39a5a 100644 --- a/neqo-common/src/hrtime.rs +++ b/neqo-common/src/hrtime.rs @@ -301,7 +301,7 @@ impl Time { #[cfg(windows)] { if let Some(p) = self.active { - assert_eq!(0, unsafe { timeBeginPeriod(p.as_uint()) }); + _ = unsafe { timeBeginPeriod(p.as_uint()) }; } } } @@ -311,7 +311,7 @@ impl Time { #[cfg(windows)] { if let Some(p) = self.active { - assert_eq!(0, unsafe { timeEndPeriod(p.as_uint()) }); + _ = unsafe { timeEndPeriod(p.as_uint()) }; } } } From fb32a04fea56b46e67fad321205b68f67ffe4508 Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Mon, 22 Jan 2024 17:13:01 +0200 Subject: [PATCH 073/321] Add TOS and TTL information to Datagrams (#1568) * Add TOS and TTL information to Datagrams This part of the refactor of #1495. This doesn't include any ECN I/O or other logic, just adds the required fields to the datagram header. * cargo fmt * Add test * Fix clippy * Add an IpTos type as suggested by @martinthomson. I might back this back out if it is not the direction we want to go in, pending further discussion. * Move IpTos things into their own source file * Make IpTos use a single byte --- neqo-client/src/main.rs | 14 +- neqo-common/Cargo.toml | 1 + neqo-common/src/datagram.rs | 43 ++- neqo-common/src/lib.rs | 2 + neqo-common/src/tos.rs | 290 ++++++++++++++++++ neqo-interop/src/main.rs | 26 +- neqo-server/src/main.rs | 10 +- neqo-transport/src/connection/mod.rs | 8 +- neqo-transport/src/connection/tests/close.rs | 5 +- .../src/connection/tests/handshake.rs | 12 +- .../src/connection/tests/migration.rs | 10 +- neqo-transport/src/connection/tests/vn.rs | 30 +- neqo-transport/src/path.rs | 10 +- neqo-transport/src/server.rs | 16 +- neqo-transport/tests/conn_vectors.rs | 5 +- neqo-transport/tests/connection.rs | 6 + neqo-transport/tests/retry.rs | 38 ++- neqo-transport/tests/server.rs | 32 +- test-fixture/src/lib.rs | 12 +- 19 files changed, 507 insertions(+), 63 deletions(-) create mode 100644 neqo-common/src/tos.rs diff --git a/neqo-client/src/main.rs b/neqo-client/src/main.rs index 83dd2c5ac7..3bf7585e9f 100644 --- a/neqo-client/src/main.rs +++ b/neqo-client/src/main.rs @@ -7,6 +7,7 @@ #![cfg_attr(feature = "deny-warnings", deny(warnings))] #![warn(clippy::use_self)] +use common::IpTos; use qlog::{events::EventImportance, streamer::QlogStreamer}; use mio::{net::UdpSocket, Events, Poll, PollOpt, Ready, Token}; @@ -430,7 +431,8 @@ fn process_loop( break 'read; } if sz > 0 { - let d = Datagram::new(remote, *local_addr, &buf[..sz]); + let d = + Datagram::new(remote, *local_addr, IpTos::default(), None, &buf[..sz]); datagrams.push(d); } } @@ -1142,7 +1144,7 @@ mod old { use super::{qlog_new, KeyUpdateState, Res}; use mio::{Events, Poll}; - use neqo_common::{event::Provider, Datagram}; + use neqo_common::{event::Provider, Datagram, IpTos}; use neqo_crypto::{AuthenticationStatus, ResumptionToken}; use neqo_transport::{ Connection, ConnectionEvent, EmptyConnectionIdGenerator, Error, Output, State, StreamId, @@ -1367,7 +1369,13 @@ mod old { break 'read; } if sz > 0 { - let d = Datagram::new(remote, *local_addr, &buf[..sz]); + let d = Datagram::new( + remote, + *local_addr, + IpTos::default(), + None, + &buf[..sz], + ); client.process_input(&d, Instant::now()); handler.maybe_key_update(client)?; } diff --git a/neqo-common/Cargo.toml b/neqo-common/Cargo.toml index ca209328e5..584f3100d4 100644 --- a/neqo-common/Cargo.toml +++ b/neqo-common/Cargo.toml @@ -9,6 +9,7 @@ build = "build.rs" [dependencies] log = { version = "0.4.0", default-features = false } +enum-map = "~2.7.3" env_logger = { version = "0.10", default-features = false } lazy_static = "1.3.0" qlog = "0.11.0" diff --git a/neqo-common/src/datagram.rs b/neqo-common/src/datagram.rs index 0316dd2309..cdd61753a3 100644 --- a/neqo-common/src/datagram.rs +++ b/neqo-common/src/datagram.rs @@ -7,20 +7,30 @@ use std::net::SocketAddr; use std::ops::Deref; -use crate::hex_with_len; +use crate::{hex_with_len, IpTos}; -#[derive(PartialEq, Eq, Clone)] +#[derive(Clone, PartialEq, Eq)] pub struct Datagram { src: SocketAddr, dst: SocketAddr, + tos: IpTos, + ttl: Option, d: Vec, } impl Datagram { - pub fn new>>(src: SocketAddr, dst: SocketAddr, d: V) -> Self { + pub fn new>>( + src: SocketAddr, + dst: SocketAddr, + tos: IpTos, + ttl: Option, + d: V, + ) -> Self { Self { src, dst, + tos, + ttl, d: d.into(), } } @@ -34,6 +44,16 @@ impl Datagram { pub fn destination(&self) -> SocketAddr { self.dst } + + #[must_use] + pub fn tos(&self) -> IpTos { + self.tos + } + + #[must_use] + pub fn ttl(&self) -> Option { + self.ttl + } } impl Deref for Datagram { @@ -48,10 +68,25 @@ impl std::fmt::Debug for Datagram { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { write!( f, - "Datagram {:?}->{:?}: {}", + "Datagram {:?} TTL {:?} {:?}->{:?}: {}", + self.tos, + self.ttl, self.src, self.dst, hex_with_len(&self.d) ) } } + +#[cfg(test)] +use test_fixture::datagram; + +#[test] +fn fmt_datagram() { + let d = datagram([0; 1].to_vec()); + assert_eq!( + format!("{d:?}"), + "Datagram IpTos(Cs0, NotEct) TTL Some(128) [fe80::1]:443->[fe80::1]:443: [1]: 00" + .to_string() + ); +} diff --git a/neqo-common/src/lib.rs b/neqo-common/src/lib.rs index 3fb0fd27ec..202f39e0fb 100644 --- a/neqo-common/src/lib.rs +++ b/neqo-common/src/lib.rs @@ -16,6 +16,7 @@ mod incrdecoder; pub mod log; pub mod qlog; pub mod timer; +pub mod tos; pub use self::codec::{Decoder, Encoder}; pub use self::datagram::Datagram; @@ -23,6 +24,7 @@ pub use self::header::Header; pub use self::incrdecoder::{ IncrementalDecoderBuffer, IncrementalDecoderIgnore, IncrementalDecoderUint, }; +pub use self::tos::{IpTos, IpTosDscp, IpTosEcn}; use std::fmt::Write; diff --git a/neqo-common/src/tos.rs b/neqo-common/src/tos.rs new file mode 100644 index 0000000000..7693ef1808 --- /dev/null +++ b/neqo-common/src/tos.rs @@ -0,0 +1,290 @@ +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use std::fmt::Debug; + +use enum_map::Enum; + +/// ECN (Explicit Congestion Notification) codepoints mapped to the +/// lower 2 bits of the TOS field. +/// +#[derive(Copy, Clone, PartialEq, Eq, Enum, Default, Debug)] +#[repr(u8)] +pub enum IpTosEcn { + #[default] + /// Not-ECT, Not ECN-Capable Transport, [RFC3168] + NotEct = 0b00, + + /// ECT(1), ECN-Capable Transport(1), [RFC8311][RFC9331] + Ect1 = 0b01, + + /// ECT(0), ECN-Capable Transport(0), [RFC3168] + Ect0 = 0b10, + + /// CE, Congestion Experienced, [RFC3168] + Ce = 0b11, +} + +impl From for u8 { + fn from(v: IpTosEcn) -> Self { + v as u8 + } +} + +impl From for IpTosEcn { + fn from(v: u8) -> Self { + match v & 0b11 { + 0b00 => IpTosEcn::NotEct, + 0b01 => IpTosEcn::Ect1, + 0b10 => IpTosEcn::Ect0, + 0b11 => IpTosEcn::Ce, + _ => unreachable!(), + } + } +} + +/// Diffserv Codepoints, mapped to the upper six bits of the TOS field. +/// +#[derive(Copy, Clone, PartialEq, Eq, Enum, Default, Debug)] +#[repr(u8)] +pub enum IpTosDscp { + #[default] + /// Class Selector 0, [RFC2474] + Cs0 = 0b0000_0000, + + /// Class Selector 1, [RFC2474] + Cs1 = 0b0010_0000, + + /// Class Selector 2, [RFC2474] + Cs2 = 0b0100_0000, + + /// Class Selector 3, [RFC2474] + Cs3 = 0b0110_0000, + + /// Class Selector 4, [RFC2474] + Cs4 = 0b1000_0000, + + /// Class Selector 5, [RFC2474] + Cs5 = 0b1010_0000, + + /// Class Selector 6, [RFC2474] + Cs6 = 0b1100_0000, + + /// Class Selector 7, [RFC2474] + Cs7 = 0b1110_0000, + + /// Assured Forwarding 11, [RFC2597] + Af11 = 0b0010_1000, + + /// Assured Forwarding 12, [RFC2597] + Af12 = 0b0011_0000, + + /// Assured Forwarding 13, [RFC2597] + Af13 = 0b0011_1000, + + /// Assured Forwarding 21, [RFC2597] + Af21 = 0b0100_1000, + + /// Assured Forwarding 22, [RFC2597] + Af22 = 0b0101_0000, + + /// Assured Forwarding 23, [RFC2597] + Af23 = 0b0101_1000, + + /// Assured Forwarding 31, [RFC2597] + Af31 = 0b0110_1000, + + /// Assured Forwarding 32, [RFC2597] + Af32 = 0b0111_0000, + + /// Assured Forwarding 33, [RFC2597] + Af33 = 0b0111_1000, + + /// Assured Forwarding 41, [RFC2597] + Af41 = 0b1000_1000, + + /// Assured Forwarding 42, [RFC2597] + Af42 = 0b1001_0000, + + /// Assured Forwarding 43, [RFC2597] + Af43 = 0b1001_1000, + + /// Expedited Forwarding, [RFC3246] + Ef = 0b1011_1000, + + /// Capacity-Admitted Traffic, [RFC5865] + VoiceAdmit = 0b1011_0000, + + /// Lower-Effort, [RFC8622] + Le = 0b0000_0100, +} + +impl From for u8 { + fn from(v: IpTosDscp) -> Self { + v as u8 + } +} + +impl From for IpTosDscp { + fn from(v: u8) -> Self { + match v & 0b1111_1100 { + 0b0000_0000 => IpTosDscp::Cs0, + 0b0010_0000 => IpTosDscp::Cs1, + 0b0100_0000 => IpTosDscp::Cs2, + 0b0110_0000 => IpTosDscp::Cs3, + 0b1000_0000 => IpTosDscp::Cs4, + 0b1010_0000 => IpTosDscp::Cs5, + 0b1100_0000 => IpTosDscp::Cs6, + 0b1110_0000 => IpTosDscp::Cs7, + 0b0010_1000 => IpTosDscp::Af11, + 0b0011_0000 => IpTosDscp::Af12, + 0b0011_1000 => IpTosDscp::Af13, + 0b0100_1000 => IpTosDscp::Af21, + 0b0101_0000 => IpTosDscp::Af22, + 0b0101_1000 => IpTosDscp::Af23, + 0b0110_1000 => IpTosDscp::Af31, + 0b0111_0000 => IpTosDscp::Af32, + 0b0111_1000 => IpTosDscp::Af33, + 0b1000_1000 => IpTosDscp::Af41, + 0b1001_0000 => IpTosDscp::Af42, + 0b1001_1000 => IpTosDscp::Af43, + 0b1011_1000 => IpTosDscp::Ef, + 0b1011_0000 => IpTosDscp::VoiceAdmit, + 0b0000_0100 => IpTosDscp::Le, + _ => unreachable!(), + } + } +} + +/// The type-of-service field in an IP packet. +#[allow(clippy::module_name_repetitions)] +#[derive(Copy, Clone, PartialEq, Eq)] +pub struct IpTos(u8); + +impl From for IpTos { + fn from(v: IpTosEcn) -> Self { + Self(u8::from(v)) + } +} +impl From for IpTos { + fn from(v: IpTosDscp) -> Self { + Self(u8::from(v)) + } +} +impl From<(IpTosDscp, IpTosEcn)> for IpTos { + fn from(v: (IpTosDscp, IpTosEcn)) -> Self { + Self(u8::from(v.0) | u8::from(v.1)) + } +} +impl From for u8 { + fn from(v: IpTos) -> Self { + v.0 + } +} + +impl Debug for IpTos { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_tuple("IpTos") + .field(&IpTosDscp::from(self.0 & 0xfc)) + .field(&IpTosEcn::from(self.0 & 0x3)) + .finish() + } +} + +impl Default for IpTos { + fn default() -> Self { + (IpTosDscp::default(), IpTosEcn::default()).into() + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn iptosecn_into_u8() { + assert_eq!(u8::from(IpTosEcn::NotEct), 0b00); + assert_eq!(u8::from(IpTosEcn::Ect1), 0b01); + assert_eq!(u8::from(IpTosEcn::Ect0), 0b10); + assert_eq!(u8::from(IpTosEcn::Ce), 0b11); + } + + #[test] + fn u8_into_iptosecn() { + assert_eq!(IpTosEcn::from(0b00), IpTosEcn::NotEct); + assert_eq!(IpTosEcn::from(0b01), IpTosEcn::Ect1); + assert_eq!(IpTosEcn::from(0b10), IpTosEcn::Ect0); + assert_eq!(IpTosEcn::from(0b11), IpTosEcn::Ce); + } + + #[test] + fn iptosdscp_into_u8() { + assert_eq!(u8::from(IpTosDscp::Cs0), 0b0000_0000); + assert_eq!(u8::from(IpTosDscp::Cs1), 0b0010_0000); + assert_eq!(u8::from(IpTosDscp::Cs2), 0b0100_0000); + assert_eq!(u8::from(IpTosDscp::Cs3), 0b0110_0000); + assert_eq!(u8::from(IpTosDscp::Cs4), 0b1000_0000); + assert_eq!(u8::from(IpTosDscp::Cs5), 0b1010_0000); + assert_eq!(u8::from(IpTosDscp::Cs6), 0b1100_0000); + assert_eq!(u8::from(IpTosDscp::Cs7), 0b1110_0000); + assert_eq!(u8::from(IpTosDscp::Af11), 0b0010_1000); + assert_eq!(u8::from(IpTosDscp::Af12), 0b0011_0000); + assert_eq!(u8::from(IpTosDscp::Af13), 0b0011_1000); + assert_eq!(u8::from(IpTosDscp::Af21), 0b0100_1000); + assert_eq!(u8::from(IpTosDscp::Af22), 0b0101_0000); + assert_eq!(u8::from(IpTosDscp::Af23), 0b0101_1000); + assert_eq!(u8::from(IpTosDscp::Af31), 0b0110_1000); + assert_eq!(u8::from(IpTosDscp::Af32), 0b0111_0000); + assert_eq!(u8::from(IpTosDscp::Af33), 0b0111_1000); + assert_eq!(u8::from(IpTosDscp::Af41), 0b1000_1000); + assert_eq!(u8::from(IpTosDscp::Af42), 0b1001_0000); + assert_eq!(u8::from(IpTosDscp::Af43), 0b1001_1000); + assert_eq!(u8::from(IpTosDscp::Ef), 0b1011_1000); + assert_eq!(u8::from(IpTosDscp::VoiceAdmit), 0b1011_0000); + assert_eq!(u8::from(IpTosDscp::Le), 0b0000_0100); + } + + #[test] + fn u8_into_iptosdscp() { + assert_eq!(IpTosDscp::from(0b0000_0000), IpTosDscp::Cs0); + assert_eq!(IpTosDscp::from(0b0010_0000), IpTosDscp::Cs1); + assert_eq!(IpTosDscp::from(0b0100_0000), IpTosDscp::Cs2); + assert_eq!(IpTosDscp::from(0b0110_0000), IpTosDscp::Cs3); + assert_eq!(IpTosDscp::from(0b1000_0000), IpTosDscp::Cs4); + assert_eq!(IpTosDscp::from(0b1010_0000), IpTosDscp::Cs5); + assert_eq!(IpTosDscp::from(0b1100_0000), IpTosDscp::Cs6); + assert_eq!(IpTosDscp::from(0b1110_0000), IpTosDscp::Cs7); + assert_eq!(IpTosDscp::from(0b0010_1000), IpTosDscp::Af11); + assert_eq!(IpTosDscp::from(0b0011_0000), IpTosDscp::Af12); + assert_eq!(IpTosDscp::from(0b0011_1000), IpTosDscp::Af13); + assert_eq!(IpTosDscp::from(0b0100_1000), IpTosDscp::Af21); + assert_eq!(IpTosDscp::from(0b0101_0000), IpTosDscp::Af22); + assert_eq!(IpTosDscp::from(0b0101_1000), IpTosDscp::Af23); + assert_eq!(IpTosDscp::from(0b0110_1000), IpTosDscp::Af31); + assert_eq!(IpTosDscp::from(0b0111_0000), IpTosDscp::Af32); + assert_eq!(IpTosDscp::from(0b0111_1000), IpTosDscp::Af33); + assert_eq!(IpTosDscp::from(0b1000_1000), IpTosDscp::Af41); + assert_eq!(IpTosDscp::from(0b1001_0000), IpTosDscp::Af42); + assert_eq!(IpTosDscp::from(0b1001_1000), IpTosDscp::Af43); + assert_eq!(IpTosDscp::from(0b1011_1000), IpTosDscp::Ef); + assert_eq!(IpTosDscp::from(0b1011_0000), IpTosDscp::VoiceAdmit); + assert_eq!(IpTosDscp::from(0b0000_0100), IpTosDscp::Le); + } + + #[test] + fn iptosecn_into_iptos() { + let ecn = IpTosEcn::default(); + let iptos_ecn: IpTos = ecn.into(); + assert_eq!(u8::from(iptos_ecn), ecn as u8); + } + + #[test] + fn iptosdscp_into_iptos() { + let dscp = IpTosDscp::default(); + let iptos_dscp: IpTos = dscp.into(); + assert_eq!(u8::from(iptos_dscp), dscp as u8); + } +} diff --git a/neqo-interop/src/main.rs b/neqo-interop/src/main.rs index 3c634c11e0..254b953f22 100644 --- a/neqo-interop/src/main.rs +++ b/neqo-interop/src/main.rs @@ -7,7 +7,7 @@ #![cfg_attr(feature = "deny-warnings", deny(warnings))] #![warn(clippy::use_self)] -use neqo_common::{event::Provider, hex, Datagram}; +use neqo_common::{event::Provider, hex, Datagram, IpTos}; use neqo_crypto::{init, AuthenticationStatus, ResumptionToken}; use neqo_http3::{Header, Http3Client, Http3ClientEvent, Http3Parameters, Http3State, Priority}; use neqo_transport::{ @@ -148,7 +148,13 @@ fn process_loop( continue; } if sz > 0 { - let received = Datagram::new(nctx.remote_addr, nctx.local_addr, &buf[..sz]); + let received = Datagram::new( + nctx.remote_addr, + nctx.local_addr, + IpTos::default(), + None, + &buf[..sz], + ); client.process_input(&received, Instant::now()); } } @@ -309,7 +315,13 @@ fn process_loop_h3( continue; } if sz > 0 { - let received = Datagram::new(nctx.remote_addr, nctx.local_addr, &buf[..sz]); + let received = Datagram::new( + nctx.remote_addr, + nctx.local_addr, + IpTos::default(), + None, + &buf[..sz], + ); handler.h3.process_input(&received, Instant::now()); } } @@ -682,7 +694,13 @@ impl Handler for VnHandler { fn rewrite_out(&mut self, d: &Datagram) -> Option { let mut payload = d[..].to_vec(); payload[1] = 0x1a; - Some(Datagram::new(d.source(), d.destination(), payload)) + Some(Datagram::new( + d.source(), + d.destination(), + d.tos(), + d.ttl(), + payload, + )) } } diff --git a/neqo-server/src/main.rs b/neqo-server/src/main.rs index a9e4d37e63..eb6a00b8cc 100644 --- a/neqo-server/src/main.rs +++ b/neqo-server/src/main.rs @@ -30,7 +30,7 @@ use mio_extras::timer::{Builder, Timeout, Timer}; use neqo_transport::ConnectionIdGenerator; use structopt::StructOpt; -use neqo_common::{hex, qdebug, qinfo, qwarn, Datagram, Header}; +use neqo_common::{hex, qdebug, qinfo, qwarn, Datagram, Header, IpTos}; use neqo_crypto::{ constants::{TLS_AES_128_GCM_SHA256, TLS_AES_256_GCM_SHA384, TLS_CHACHA20_POLY1305_SHA256}, generate_ech_keys, init_db, random, AntiReplay, Cipher, @@ -606,7 +606,13 @@ fn read_dgram( eprintln!("zero length datagram received?"); Ok(None) } else { - Ok(Some(Datagram::new(remote_addr, *local_address, &buf[..sz]))) + Ok(Some(Datagram::new( + remote_addr, + *local_address, + IpTos::default(), + None, + &buf[..sz], + ))) } } diff --git a/neqo-transport/src/connection/mod.rs b/neqo-transport/src/connection/mod.rs index 832ea0d67b..f3519f7daa 100644 --- a/neqo-transport/src/connection/mod.rs +++ b/neqo-transport/src/connection/mod.rs @@ -1149,7 +1149,13 @@ impl Connection { /// part that we don't have keys for. fn save_datagram(&mut self, cspace: CryptoSpace, d: &Datagram, remaining: usize, now: Instant) { let d = if remaining < d.len() { - Datagram::new(d.source(), d.destination(), &d[d.len() - remaining..]) + Datagram::new( + d.source(), + d.destination(), + d.tos(), + d.ttl(), + &d[d.len() - remaining..], + ) } else { d.clone() }; diff --git a/neqo-transport/src/connection/tests/close.rs b/neqo-transport/src/connection/tests/close.rs index 6efbb6e24f..39b1106ce0 100644 --- a/neqo-transport/src/connection/tests/close.rs +++ b/neqo-transport/src/connection/tests/close.rs @@ -9,9 +9,8 @@ use super::{connect, connect_force_idle, default_client, default_server, send_so use crate::tparams::{self, TransportParameter}; use crate::{AppError, ConnectionError, Error, ERROR_APPLICATION_CLOSE}; -use neqo_common::Datagram; use std::time::Duration; -use test_fixture::{self, addr, now}; +use test_fixture::{self, datagram, now}; fn assert_draining(c: &Connection, expected: &Error) { assert!(c.state().closed()); @@ -201,6 +200,6 @@ fn stateless_reset_client() { .unwrap(); connect_force_idle(&mut client, &mut server); - client.process_input(&Datagram::new(addr(), addr(), vec![77; 21]), now()); + client.process_input(&datagram(vec![77; 21]), now()); assert_draining(&client, &Error::StatelessReset); } diff --git a/neqo-transport/src/connection/tests/handshake.rs b/neqo-transport/src/connection/tests/handshake.rs index 602611d34f..55cd10b667 100644 --- a/neqo-transport/src/connection/tests/handshake.rs +++ b/neqo-transport/src/connection/tests/handshake.rs @@ -30,7 +30,7 @@ use std::mem; use std::net::{IpAddr, Ipv6Addr, SocketAddr}; use std::rc::Rc; use std::time::Duration; -use test_fixture::{self, addr, assertions, fixture_init, now, split_datagram}; +use test_fixture::{self, addr, assertions, datagram, fixture_init, now, split_datagram}; const ECH_CONFIG_ID: u8 = 7; const ECH_PUBLIC_NAME: &str = "public.example"; @@ -615,7 +615,7 @@ fn corrupted_initial() { .find(|(_, &v)| v != 0) .unwrap(); corrupted[idx] ^= 0x76; - let dgram = Datagram::new(d.source(), d.destination(), corrupted); + let dgram = Datagram::new(d.source(), d.destination(), d.tos(), d.ttl(), corrupted); server.process_input(&dgram, now()); // The server should have received two packets, // the first should be dropped, the second saved. @@ -711,7 +711,7 @@ fn extra_initial_invalid_cid() { let mut copy = hs.to_vec(); assert_ne!(copy[5], 0); // The DCID should be non-zero length. copy[6] ^= 0xc4; - let dgram_copy = Datagram::new(hs.destination(), hs.source(), copy); + let dgram_copy = Datagram::new(hs.destination(), hs.source(), hs.tos(), hs.ttl(), copy); let nothing = client.process(Some(&dgram_copy), now).dgram(); assert!(nothing.is_none()); } @@ -814,7 +814,7 @@ fn garbage_initial() { let mut corrupted = Vec::from(&initial[..initial.len() - 1]); corrupted.push(initial[initial.len() - 1] ^ 0xb7); corrupted.extend_from_slice(rest.as_ref().map_or(&[], |r| &r[..])); - let garbage = Datagram::new(addr(), addr(), corrupted); + let garbage = datagram(corrupted); assert_eq!(Output::None, server.process(Some(&garbage), now())); } @@ -832,6 +832,8 @@ fn drop_initial_packet_from_wrong_address() { let dgram = Datagram::new( SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0xfe80, 0, 0, 0, 0, 0, 0, 2)), 443), p.destination(), + p.tos(), + p.ttl(), &p[..], ); @@ -858,6 +860,8 @@ fn drop_handshake_packet_from_wrong_address() { let dgram = Datagram::new( SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0xfe80, 0, 0, 0, 0, 0, 0, 2)), 443), p.destination(), + p.tos(), + p.ttl(), &p[..], ); diff --git a/neqo-transport/src/connection/tests/migration.rs b/neqo-transport/src/connection/tests/migration.rs index b7e5392903..79c13faa77 100644 --- a/neqo-transport/src/connection/tests/migration.rs +++ b/neqo-transport/src/connection/tests/migration.rs @@ -52,7 +52,7 @@ fn loopback() -> SocketAddr { } fn change_path(d: &Datagram, a: SocketAddr) -> Datagram { - Datagram::new(a, a, &d[..]) + Datagram::new(a, a, d.tos(), d.ttl(), &d[..]) } fn new_port(a: SocketAddr) -> SocketAddr { @@ -61,7 +61,13 @@ fn new_port(a: SocketAddr) -> SocketAddr { } fn change_source_port(d: &Datagram) -> Datagram { - Datagram::new(new_port(d.source()), d.destination(), &d[..]) + Datagram::new( + new_port(d.source()), + d.destination(), + d.tos(), + d.ttl(), + &d[..], + ) } /// As these tests use a new path, that path often has a non-zero RTT. diff --git a/neqo-transport/src/connection/tests/vn.rs b/neqo-transport/src/connection/tests/vn.rs index 4c00253642..e289bc654c 100644 --- a/neqo-transport/src/connection/tests/vn.rs +++ b/neqo-transport/src/connection/tests/vn.rs @@ -13,10 +13,10 @@ use crate::packet::PACKET_BIT_LONG; use crate::tparams::{self, TransportParameter}; use crate::{ConnectionParameters, Error, Version}; -use neqo_common::{event::Provider, Datagram, Decoder, Encoder}; +use neqo_common::{event::Provider, Decoder, Encoder}; use std::mem; use std::time::Duration; -use test_fixture::{self, addr, assertions, now}; +use test_fixture::{self, assertions, datagram, now}; // The expected PTO duration after the first Initial is sent. const INITIAL_PTO: Duration = Duration::from_millis(300); @@ -29,10 +29,7 @@ fn unknown_version() { let mut unknown_version_packet = vec![0x80, 0x1a, 0x1a, 0x1a, 0x1a]; unknown_version_packet.resize(1200, 0x0); - mem::drop(client.process( - Some(&Datagram::new(addr(), addr(), unknown_version_packet)), - now(), - )); + mem::drop(client.process(Some(&datagram(unknown_version_packet)), now())); assert_eq!(1, client.stats().dropped_rx); } @@ -44,10 +41,7 @@ fn server_receive_unknown_first_packet() { unknown_version_packet.resize(1200, 0x0); assert_eq!( - server.process( - Some(&Datagram::new(addr(), addr(), unknown_version_packet,)), - now(), - ), + server.process(Some(&datagram(unknown_version_packet,)), now(),), Output::None ); @@ -86,7 +80,7 @@ fn version_negotiation_current_version() { &[0x1a1a_1a1a, Version::default().wire_version()], ); - let dgram = Datagram::new(addr(), addr(), vn); + let dgram = datagram(vn); let delay = client.process(Some(&dgram), now()).callback(); assert_eq!(delay, INITIAL_PTO); assert_eq!(*client.state(), State::WaitInitial); @@ -105,7 +99,7 @@ fn version_negotiation_version0() { let vn = create_vn(&initial_pkt, &[0, 0x1a1a_1a1a]); - let dgram = Datagram::new(addr(), addr(), vn); + let dgram = datagram(vn); let delay = client.process(Some(&dgram), now()).callback(); assert_eq!(delay, INITIAL_PTO); assert_eq!(*client.state(), State::WaitInitial); @@ -124,7 +118,7 @@ fn version_negotiation_only_reserved() { let vn = create_vn(&initial_pkt, &[0x1a1a_1a1a, 0x2a2a_2a2a]); - let dgram = Datagram::new(addr(), addr(), vn); + let dgram = datagram(vn); assert_eq!(client.process(Some(&dgram), now()), Output::None); match client.state() { State::Closed(err) => { @@ -146,7 +140,7 @@ fn version_negotiation_corrupted() { let vn = create_vn(&initial_pkt, &[0x1a1a_1a1a, 0x2a2a_2a2a]); - let dgram = Datagram::new(addr(), addr(), &vn[..vn.len() - 1]); + let dgram = datagram(vn[..vn.len() - 1].to_vec()); let delay = client.process(Some(&dgram), now()).callback(); assert_eq!(delay, INITIAL_PTO); assert_eq!(*client.state(), State::WaitInitial); @@ -165,7 +159,7 @@ fn version_negotiation_empty() { let vn = create_vn(&initial_pkt, &[]); - let dgram = Datagram::new(addr(), addr(), vn); + let dgram = datagram(vn); let delay = client.process(Some(&dgram), now()).callback(); assert_eq!(delay, INITIAL_PTO); assert_eq!(*client.state(), State::WaitInitial); @@ -183,7 +177,7 @@ fn version_negotiation_not_supported() { .to_vec(); let vn = create_vn(&initial_pkt, &[0x1a1a_1a1a, 0x2a2a_2a2a, 0xff00_0001]); - let dgram = Datagram::new(addr(), addr(), vn); + let dgram = datagram(vn); assert_eq!(client.process(Some(&dgram), now()), Output::None); match client.state() { State::Closed(err) => { @@ -206,7 +200,7 @@ fn version_negotiation_bad_cid() { initial_pkt[6] ^= 0xc4; let vn = create_vn(&initial_pkt, &[0x1a1a_1a1a, 0x2a2a_2a2a, 0xff00_0001]); - let dgram = Datagram::new(addr(), addr(), vn); + let dgram = datagram(vn); let delay = client.process(Some(&dgram), now()).callback(); assert_eq!(delay, INITIAL_PTO); assert_eq!(*client.state(), State::WaitInitial); @@ -311,7 +305,7 @@ fn version_negotiation_downgrade() { // Start the handshake and spoof a VN packet. let initial = client.process_output(now()).dgram().unwrap(); let vn = create_vn(&initial, &[DOWNGRADE.wire_version()]); - let dgram = Datagram::new(addr(), addr(), vn); + let dgram = datagram(vn); client.process_input(&dgram, now()); connect_fail( diff --git a/neqo-transport/src/path.rs b/neqo-transport/src/path.rs index 40014c73a1..2b357e0bb1 100644 --- a/neqo-transport/src/path.rs +++ b/neqo-transport/src/path.rs @@ -31,7 +31,7 @@ use crate::{ Error, Res, }; -use neqo_common::{hex, qdebug, qinfo, qlog::NeqoQlog, qtrace, Datagram, Encoder}; +use neqo_common::{hex, qdebug, qinfo, qlog::NeqoQlog, qtrace, Datagram, Encoder, IpTos}; use neqo_crypto::random; /// This is the MTU that we assume when using IPv6. @@ -539,6 +539,10 @@ pub struct Path { rtt: RttEstimate, /// A packet sender for the path, which includes congestion control and a pacer. sender: PacketSender, + /// The DSCP/ECN marking to use for outgoing packets on this path. + tos: IpTos, + /// The IP TTL to use for outgoing packets on this path. + ttl: u8, /// The number of bytes received on this path. /// Note that this value might saturate on a long-lived connection, @@ -575,6 +579,8 @@ impl Path { challenge: None, rtt: RttEstimate::default(), sender, + tos: IpTos::default(), // TODO: Default to Ect0 when ECN is supported. + ttl: 64, // This is the default TTL on many OSes. received_bytes: 0, sent_bytes: 0, qlog, @@ -697,7 +703,7 @@ impl Path { /// Make a datagram. pub fn datagram>>(&self, payload: V) -> Datagram { - Datagram::new(self.local, self.remote, payload) + Datagram::new(self.local, self.remote, self.tos, Some(self.ttl), payload) } /// Get local address as `SocketAddr` diff --git a/neqo-transport/src/server.rs b/neqo-transport/src/server.rs index 08d21915c6..288ec1a605 100644 --- a/neqo-transport/src/server.rs +++ b/neqo-transport/src/server.rs @@ -349,7 +349,13 @@ impl Server { &initial.dst_cid, ); if let Ok(p) = packet { - let retry = Datagram::new(dgram.destination(), dgram.source(), p); + let retry = Datagram::new( + dgram.destination(), + dgram.source(), + dgram.tos(), + dgram.ttl(), + p, + ); Some(retry) } else { qerror!([self], "unable to encode retry, dropping packet"); @@ -594,7 +600,13 @@ impl Server { packet.wire_version(), ); - return Some(Datagram::new(dgram.destination(), dgram.source(), vn)); + return Some(Datagram::new( + dgram.destination(), + dgram.source(), + dgram.tos(), + dgram.ttl(), + vn, + )); } match packet.packet_type() { diff --git a/neqo-transport/tests/conn_vectors.rs b/neqo-transport/tests/conn_vectors.rs index f088ebea3f..7597c81621 100644 --- a/neqo-transport/tests/conn_vectors.rs +++ b/neqo-transport/tests/conn_vectors.rs @@ -8,11 +8,10 @@ #![deny(clippy::pedantic)] #![cfg(not(feature = "fuzzing"))] -use neqo_common::Datagram; use neqo_transport::{ Connection, ConnectionParameters, RandomConnectionIdGenerator, State, Version, }; -use test_fixture::{self, addr, now}; +use test_fixture::{self, datagram, now}; use std::cell::RefCell; use std::rc::Rc; @@ -265,7 +264,7 @@ fn make_server(v: Version) -> Connection { fn process_client_initial(v: Version, packet: &[u8]) { let mut server = make_server(v); - let dgram = Datagram::new(addr(), addr(), packet); + let dgram = datagram(packet.to_vec()); assert_eq!(*server.state(), State::Init); let out = server.process(Some(&dgram), now()); assert_eq!(*server.state(), State::Handshaking); diff --git a/neqo-transport/tests/connection.rs b/neqo-transport/tests/connection.rs index 13c70590fa..661909fd22 100644 --- a/neqo-transport/tests/connection.rs +++ b/neqo-transport/tests/connection.rs @@ -39,6 +39,8 @@ fn truncate_long_packet() { let truncated = Datagram::new( dupe.source(), dupe.destination(), + dupe.tos(), + dupe.ttl(), &dupe[..(dupe.len() - tail)], ); let hs_probe = client.process(Some(&truncated), now()).dgram(); @@ -108,6 +110,8 @@ fn reorder_server_initial() { let reordered = Datagram::new( server_initial.source(), server_initial.destination(), + server_initial.tos(), + server_initial.ttl(), packet, ); @@ -182,6 +186,8 @@ fn overflow_crypto() { let dgram = Datagram::new( server_initial.source(), server_initial.destination(), + server_initial.tos(), + server_initial.ttl(), packet, ); client.process_input(&dgram, now()); diff --git a/neqo-transport/tests/retry.rs b/neqo-transport/tests/retry.rs index 0b51eacab1..3fffcba3da 100644 --- a/neqo-transport/tests/retry.rs +++ b/neqo-transport/tests/retry.rs @@ -21,7 +21,7 @@ use std::convert::TryFrom; use std::mem; use std::net::{IpAddr, Ipv4Addr, SocketAddr}; use std::time::Duration; -use test_fixture::{self, addr, assertions, default_client, now, split_datagram}; +use test_fixture::{self, assertions, datagram, default_client, now, split_datagram}; #[test] fn retry_basic() { @@ -150,7 +150,13 @@ fn retry_different_ip() { let dgram = dgram.unwrap(); let other_v4 = IpAddr::V4(Ipv4Addr::new(127, 0, 0, 2)); let other_addr = SocketAddr::new(other_v4, 443); - let from_other = Datagram::new(other_addr, dgram.destination(), &dgram[..]); + let from_other = Datagram::new( + other_addr, + dgram.destination(), + dgram.tos(), + dgram.ttl(), + &dgram[..], + ); let dgram = server.process(Some(&from_other), now()).dgram(); assert!(dgram.is_none()); } @@ -171,7 +177,13 @@ fn new_token_different_ip() { // Now rewrite the source address. let d = dgram.unwrap(); let src = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 2)), d.source().port()); - let dgram = Some(Datagram::new(src, d.destination(), &d[..])); + let dgram = Some(Datagram::new( + src, + d.destination(), + d.tos(), + d.ttl(), + &d[..], + )); let dgram = server.process(dgram.as_ref(), now()).dgram(); // Retry assert!(dgram.is_some()); assertions::assert_retry(dgram.as_ref().unwrap()); @@ -196,7 +208,13 @@ fn new_token_expired() { let the_future = now() + Duration::from_secs(60 * 60 * 24 * 30); let d = dgram.unwrap(); let src = SocketAddr::new(d.source().ip(), d.source().port() + 1); - let dgram = Some(Datagram::new(src, d.destination(), &d[..])); + let dgram = Some(Datagram::new( + src, + d.destination(), + d.tos(), + d.ttl(), + &d[..], + )); let dgram = server.process(dgram.as_ref(), the_future).dgram(); // Retry assert!(dgram.is_some()); assertions::assert_retry(dgram.as_ref().unwrap()); @@ -257,7 +275,13 @@ fn retry_bad_integrity() { let mut tweaked = retry.to_vec(); tweaked[retry.len() - 1] ^= 0x45; // damage the auth tag - let tweaked_packet = Datagram::new(retry.source(), retry.destination(), tweaked); + let tweaked_packet = Datagram::new( + retry.source(), + retry.destination(), + retry.tos(), + retry.ttl(), + tweaked, + ); // The client should ignore this packet. let dgram = client.process(Some(&tweaked_packet), now()).dgram(); @@ -338,7 +362,7 @@ fn vn_after_retry() { encoder.encode_vec(1, &client.odcid().unwrap()[..]); encoder.encode_vec(1, &[]); encoder.encode_uint(4, 0x5a5a_6a6a_u64); - let vn = Datagram::new(addr(), addr(), encoder); + let vn = datagram(encoder.into()); assert_ne!( client.process(Some(&vn), now()).callback(), @@ -425,6 +449,8 @@ fn mitm_retry() { let new_datagram = Datagram::new( client_initial2.source(), client_initial2.destination(), + client_initial2.tos(), + client_initial2.ttl(), notoken_packet, ); qdebug!("passing modified Initial to the main server"); diff --git a/neqo-transport/tests/server.rs b/neqo-transport/tests/server.rs index a4f07def87..2f1ee3b493 100644 --- a/neqo-transport/tests/server.rs +++ b/neqo-transport/tests/server.rs @@ -23,7 +23,7 @@ use neqo_transport::{ Connection, ConnectionError, ConnectionParameters, Error, Output, State, StreamType, Version, }; use test_fixture::{ - self, assertions, default_client, new_client, now, split_datagram, + self, assertions, datagram, default_client, new_client, now, split_datagram, CountingConnectionIdGenerator, }; @@ -157,6 +157,8 @@ fn duplicate_initial_new_path() { let other = Datagram::new( SocketAddr::new(initial.source().ip(), initial.source().port() ^ 23), initial.destination(), + initial.tos(), + initial.ttl(), &initial[..], ); @@ -235,7 +237,7 @@ fn drop_non_initial() { let mut bogus_data: Vec = header.into(); bogus_data.resize(1200, 66); - let bogus = Datagram::new(test_fixture::addr(), test_fixture::addr(), bogus_data); + let bogus = datagram(bogus_data); assert!(server.process(Some(&bogus), now()).dgram().is_none()); } @@ -254,7 +256,7 @@ fn drop_short_initial() { let mut bogus_data: Vec = header.into(); bogus_data.resize(1199, 66); - let bogus = Datagram::new(test_fixture::addr(), test_fixture::addr(), bogus_data); + let bogus = datagram(bogus_data); assert!(server.process(Some(&bogus), now()).dgram().is_none()); } @@ -371,7 +373,13 @@ fn new_token_different_port() { // Now rewrite the source port, which should not change that the token is OK. let d = dgram.unwrap(); let src = SocketAddr::new(d.source().ip(), d.source().port() + 1); - let dgram = Some(Datagram::new(src, d.destination(), &d[..])); + let dgram = Some(Datagram::new( + src, + d.destination(), + d.tos(), + d.ttl(), + &d[..], + )); let dgram = server.process(dgram.as_ref(), now()).dgram(); // Retry assert!(dgram.is_some()); assertions::assert_initial(dgram.as_ref().unwrap(), false); @@ -426,7 +434,13 @@ fn bad_client_initial() { &mut ciphertext, (header_enc.len() - 1)..header_enc.len(), ); - let bad_dgram = Datagram::new(dgram.source(), dgram.destination(), ciphertext); + let bad_dgram = Datagram::new( + dgram.source(), + dgram.destination(), + dgram.tos(), + dgram.ttl(), + ciphertext, + ); // The server should reject this. let response = server.process(Some(&bad_dgram), now()); @@ -474,7 +488,13 @@ fn version_negotiation_ignored() { let dgram = client.process(None, now()).dgram().expect("a datagram"); let mut input = dgram.to_vec(); input[1] ^= 0x12; - let damaged = Datagram::new(dgram.source(), dgram.destination(), input.clone()); + let damaged = Datagram::new( + dgram.source(), + dgram.destination(), + dgram.tos(), + dgram.ttl(), + input.clone(), + ); let vn = server.process(Some(&damaged), now()).dgram(); let mut dec = Decoder::from(&input[5..]); // Skip past version. diff --git a/test-fixture/src/lib.rs b/test-fixture/src/lib.rs index fa9276bf5c..e4089c43c2 100644 --- a/test-fixture/src/lib.rs +++ b/test-fixture/src/lib.rs @@ -11,7 +11,7 @@ use neqo_common::{ event::Provider, hex, qlog::{new_trace, NeqoQlog}, - qtrace, Datagram, Decoder, Role, + qtrace, Datagram, Decoder, IpTos, Role, }; use neqo_crypto::{init_db, random, AllowZeroRtt, AntiReplay, AuthenticationStatus}; @@ -85,6 +85,12 @@ pub const LONG_CERT_KEYS: &[&str] = &["A long cert"]; pub const DEFAULT_ALPN: &[&str] = &["alpn"]; pub const DEFAULT_ALPN_H3: &[&str] = &["h3"]; +// Create a default datagram with the given data. +#[must_use] +pub fn datagram(data: Vec) -> Datagram { + Datagram::new(addr(), addr(), IpTos::default(), Some(128), data) +} + /// Create a default socket address. #[must_use] pub fn addr() -> SocketAddr { @@ -332,8 +338,8 @@ fn split_packet(buf: &[u8]) -> (&[u8], Option<&[u8]>) { pub fn split_datagram(d: &Datagram) -> (Datagram, Option) { let (a, b) = split_packet(&d[..]); ( - Datagram::new(d.source(), d.destination(), a), - b.map(|b| Datagram::new(d.source(), d.destination(), b)), + Datagram::new(d.source(), d.destination(), d.tos(), d.ttl(), a), + b.map(|b| Datagram::new(d.source(), d.destination(), d.tos(), d.ttl(), b)), ) } From ed13307545bbbb95c59fa6288a6958bd631d8f50 Mon Sep 17 00:00:00 2001 From: Max Inden Date: Tue, 23 Jan 2024 04:10:34 +0100 Subject: [PATCH 074/321] fix(client): prevent out-of-range slice access (#1569) * fix(client): prevent out-of-range slice access The Quic Network Simulator `handshakeloss` testcase instructs the `neqo-client` to download from multiple URLs in series. The client takes one URL after the next and tries to request it: ``` rust to_request = &remaining[..1]; remaining = &remaining[1..]; ``` This will panic on the last URL, trying to access the first element in an empty slice. This commit removes the out-of-range access. Instead of using slice operations which hide potential panics, it uses owned collections (`VecDeque` and `Vec`) and their safe methods (e.g. `pop_front`, `drain`). * Pass VecDeque * Fold two loops into iterator chain * Move first check outside of loop and refactor into while --- neqo-client/src/main.rs | 71 +++++++++++++++++++---------------------- 1 file changed, 33 insertions(+), 38 deletions(-) diff --git a/neqo-client/src/main.rs b/neqo-client/src/main.rs index 3bf7585e9f..00674c9717 100644 --- a/neqo-client/src/main.rs +++ b/neqo-client/src/main.rs @@ -831,7 +831,7 @@ fn handle_test( local_addr: SocketAddr, remote_addr: SocketAddr, hostname: &str, - urls: &[Url], + url_queue: VecDeque, resumption_token: Option, ) -> Res> { let key_update = KeyUpdateState(args.key_update); @@ -841,7 +841,7 @@ fn handle_test( .expect("failed to create client"); args.method = String::from("POST"); let url_handler = URLHandler { - url_queue: VecDeque::from(urls.to_vec()), + url_queue, stream_handlers: HashMap::new(), all_paths: Vec::new(), handler_type: StreamHandlerType::Upload, @@ -908,7 +908,7 @@ fn client( local_addr: SocketAddr, remote_addr: SocketAddr, hostname: &str, - urls: &[Url], + url_queue: VecDeque, resumption_token: Option, ) -> Res> { let testcase = args.test.clone(); @@ -921,7 +921,7 @@ fn client( local_addr, remote_addr, hostname, - urls, + url_queue, resumption_token, ); } @@ -930,7 +930,7 @@ fn client( .expect("failed to create client"); let key_update = KeyUpdateState(args.key_update); let url_handler = URLHandler { - url_queue: VecDeque::from(urls.to_vec()), + url_queue, stream_handlers: HashMap::new(), all_paths: Vec::new(), handler_type: StreamHandlerType::Download, @@ -1021,19 +1021,29 @@ fn main() -> Res<()> { } } - let mut urls_by_origin: HashMap> = HashMap::new(); - for url in &args.urls { - let entry = urls_by_origin.entry(url.origin()).or_default(); - entry.push(url.clone()); - } + let urls_by_origin = args + .urls + .clone() + .into_iter() + .fold(HashMap::>::new(), |mut urls, url| { + urls.entry(url.origin()).or_default().push_back(url); + urls + }) + .into_iter() + .filter_map(|(origin, urls)| match origin { + Origin::Tuple(_scheme, h, p) => Some(((h, p), urls)), + Origin::Opaque(x) => { + eprintln!("Opaque origin {x:?}"); + None + } + }); - for ((_scheme, host, port), urls) in urls_by_origin.into_iter().filter_map(|(k, v)| match k { - Origin::Tuple(s, h, p) => Some(((s, h, p), v)), - Origin::Opaque(x) => { - eprintln!("Opaque origin {x:?}"); - None + for ((host, port), mut urls) in urls_by_origin { + if args.resume && urls.len() < 2 { + eprintln!("Resumption to {host} cannot work without at least 2 URLs."); + exit(127); } - }) { + let remote_addr = format!("{host}:{port}").to_socket_addrs()?.find(|addr| { !matches!( (addr, args.ipv4_only, args.ipv6_only), @@ -1076,28 +1086,13 @@ fn main() -> Res<()> { let hostname = format!("{host}"); let mut token: Option = None; - let mut remaining = &urls[..]; - let mut first = true; - loop { - let to_request; - if (args.resume && first) || args.download_in_series { - to_request = &remaining[..1]; - remaining = &remaining[1..]; - if args.resume && first && remaining.is_empty() { - println!( - "Error: resumption to {hostname} cannot work without at least 2 URLs." - ); - exit(127); - } + while !urls.is_empty() { + let to_request = if args.resume || args.download_in_series { + urls.pop_front().into_iter().collect() } else { - to_request = remaining; - remaining = &[][..]; - } - if to_request.is_empty() { - break; - } + std::mem::take(&mut urls) + }; - first = false; token = if args.use_old_http { old::old_client( &args, @@ -1425,7 +1420,7 @@ mod old { local_addr: SocketAddr, remote_addr: SocketAddr, origin: &str, - urls: &[Url], + url_queue: VecDeque, token: Option, ) -> Res> { let alpn = match args.alpn.as_str() { @@ -1457,7 +1452,7 @@ mod old { let key_update = KeyUpdateState(args.key_update); let mut h = HandlerOld { streams: HashMap::new(), - url_queue: VecDeque::from(urls.to_vec()), + url_queue, all_paths: Vec::new(), args, token: None, From 1221cfdf981e8edaa3c9aa40e5880a3de0e9fc5e Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Tue, 23 Jan 2024 09:26:23 +0200 Subject: [PATCH 075/321] chore: Bump various dependencies to what FF ships with (#1556) * chore: Bump various dependencies to what FF ships with * Allow patch-level variations in deps * Update neqo-common/Cargo.toml Co-authored-by: Martin Thomson * Update neqo-crypto/Cargo.toml Co-authored-by: Martin Thomson * Update neqo-http3/Cargo.toml Co-authored-by: Martin Thomson * Update neqo-client/Cargo.toml Co-authored-by: Max Inden * Update neqo-http3/Cargo.toml Co-authored-by: Martin Thomson * Update test-fixture/Cargo.toml Co-authored-by: Martin Thomson * Update neqo-interop/Cargo.toml Co-authored-by: Martin Thomson * Update neqo-transport/Cargo.toml Co-authored-by: Martin Thomson * Update neqo-transport/Cargo.toml Co-authored-by: Martin Thomson * Update neqo-server/Cargo.toml Co-authored-by: Martin Thomson * Update neqo-server/Cargo.toml Co-authored-by: Martin Thomson --------- Co-authored-by: Martin Thomson Co-authored-by: Max Inden --- neqo-client/Cargo.toml | 10 +++++----- neqo-common/Cargo.toml | 8 ++++---- neqo-crypto/Cargo.toml | 10 +++++----- neqo-http3/Cargo.toml | 14 +++++++------- neqo-interop/Cargo.toml | 9 ++++----- neqo-qpack/Cargo.toml | 8 ++++---- neqo-server/Cargo.toml | 14 +++++++------- neqo-transport/Cargo.toml | 10 +++++----- test-fixture/Cargo.toml | 4 ++-- 9 files changed, 43 insertions(+), 44 deletions(-) diff --git a/neqo-client/Cargo.toml b/neqo-client/Cargo.toml index 43ff45d9e5..ca11186f95 100644 --- a/neqo-client/Cargo.toml +++ b/neqo-client/Cargo.toml @@ -9,15 +9,15 @@ rust-version = "1.70.0" license = "MIT OR Apache-2.0" [dependencies] -neqo-crypto = { path = "./../neqo-crypto" } -neqo-transport = { path = "./../neqo-transport" } +mio = "~0.6.23" neqo-common = { path="./../neqo-common" } +neqo-crypto = { path = "./../neqo-crypto" } neqo-http3 = { path = "./../neqo-http3" } neqo-qpack = { path = "./../neqo-qpack" } -structopt = "0.3.7" -url = "2.0" +neqo-transport = { path = "./../neqo-transport" } qlog = "0.11.0" -mio = "0.6.17" +structopt = "0.3" +url = "~2.5.0" [features] deny-warnings = [] diff --git a/neqo-common/Cargo.toml b/neqo-common/Cargo.toml index 584f3100d4..f6fd952a18 100644 --- a/neqo-common/Cargo.toml +++ b/neqo-common/Cargo.toml @@ -8,12 +8,12 @@ license = "MIT OR Apache-2.0" build = "build.rs" [dependencies] -log = { version = "0.4.0", default-features = false } -enum-map = "~2.7.3" +enum-map = "2.7" env_logger = { version = "0.10", default-features = false } -lazy_static = "1.3.0" +lazy_static = "1.4" +log = { version = "0.4", default-features = false } qlog = "0.11.0" -time = { version = "0.3", features = ["formatting"] } +time = {version = "0.3.23", features = ["formatting"]} [dev-dependencies] test-fixture = { path = "../test-fixture" } diff --git a/neqo-crypto/Cargo.toml b/neqo-crypto/Cargo.toml index 79d6dc21d9..c7cad21c87 100644 --- a/neqo-crypto/Cargo.toml +++ b/neqo-crypto/Cargo.toml @@ -8,15 +8,15 @@ build = "build.rs" license = "MIT OR Apache-2.0" [dependencies] +log = {version = "~0.4.17", default-features = false} neqo-common = { path = "../neqo-common" } -log = {version = "0.4.0", default-features = false} [build-dependencies] -bindgen = {version = "0.69", default-features = false, features= ["runtime"]} -serde = "1.0" -serde_derive = "1.0" -toml = "0.5" +bindgen = {version = "0.69.1", default-features = false, features= ["runtime"]} mozbuild = {version = "0.1", optional = true} +serde = "1.0.195" +serde_derive = "1.0.195" +toml = "0.5.11" [dev-dependencies] test-fixture = { path = "../test-fixture" } diff --git a/neqo-http3/Cargo.toml b/neqo-http3/Cargo.toml index bea90f159c..8dafbe8b40 100644 --- a/neqo-http3/Cargo.toml +++ b/neqo-http3/Cargo.toml @@ -7,17 +7,17 @@ rust-version = "1.70.0" license = "MIT OR Apache-2.0" [dependencies] +enumset = "1.1.2" +lazy_static = "1.4" +log = {version = "0.4.17", default-features = false} neqo-common = { path = "./../neqo-common" } neqo-crypto = { path = "./../neqo-crypto" } -neqo-transport = { path = "./../neqo-transport" } neqo-qpack = { path = "./../neqo-qpack" } -log = {version = "0.4.0", default-features = false} -smallvec = "1.0.0" +neqo-transport = { path = "./../neqo-transport" } qlog = "0.11.0" -sfv = "0.9.1" -url = "2.0" -lazy_static = "1.3.0" -enumset = "1.1.2" +sfv = "0.9.3" +smallvec = "1.11.1" +url = "2.5" [dev-dependencies] test-fixture = { path = "../test-fixture" } diff --git a/neqo-interop/Cargo.toml b/neqo-interop/Cargo.toml index 7660b0f1d0..a197aa2203 100644 --- a/neqo-interop/Cargo.toml +++ b/neqo-interop/Cargo.toml @@ -7,14 +7,13 @@ rust-version = "1.70.0" license = "MIT OR Apache-2.0" [dependencies] -neqo-crypto = { path = "./../neqo-crypto" } -neqo-transport = { path = "./../neqo-transport" } +lazy_static = "1.4" neqo-common = { path="./../neqo-common" } +neqo-crypto = { path = "./../neqo-crypto" } neqo-http3 = { path = "./../neqo-http3" } neqo-qpack = { path = "./../neqo-qpack" } - -structopt = "0.3.7" -lazy_static = "1.3.0" +neqo-transport = { path = "./../neqo-transport" } +structopt = "~0.3" [features] deny-warnings = [] diff --git a/neqo-qpack/Cargo.toml b/neqo-qpack/Cargo.toml index 0cc6cb8c2e..c07a7fcec0 100644 --- a/neqo-qpack/Cargo.toml +++ b/neqo-qpack/Cargo.toml @@ -7,13 +7,13 @@ rust-version = "1.70.0" license = "MIT OR Apache-2.0" [dependencies] +lazy_static = "~1.4.0" +log = {version = "~0.4.17", default-features = false} neqo-common = { path = "./../neqo-common" } -neqo-transport = { path = "./../neqo-transport" } neqo-crypto = { path = "./../neqo-crypto" } -log = {version = "0.4.0", default-features = false} -static_assertions = "1.1.0" +neqo-transport = { path = "./../neqo-transport" } qlog = "0.11.0" -lazy_static = "1.3.0" +static_assertions = "~1.1.0" [dev-dependencies] test-fixture = { path = "../test-fixture" } diff --git a/neqo-server/Cargo.toml b/neqo-server/Cargo.toml index 09ac930d50..888df43163 100644 --- a/neqo-server/Cargo.toml +++ b/neqo-server/Cargo.toml @@ -7,17 +7,17 @@ rust-version = "1.70.0" license = "MIT OR Apache-2.0" [dependencies] -neqo-crypto = { path = "./../neqo-crypto" } -neqo-transport = { path = "./../neqo-transport" } +log = {version = "0.4.17", default-features = false} +mio = "0.6.23" +mio-extras = "2.0.6" neqo-common = { path="./../neqo-common" } +neqo-crypto = { path = "./../neqo-crypto" } neqo-http3 = { path = "./../neqo-http3" } neqo-qpack = { path = "./../neqo-qpack" } -structopt = "0.3.7" -regex = "1" -mio = "0.6.17" -mio-extras = "2.0.5" -log = {version = "0.4.0", default-features = false} +neqo-transport = { path = "./../neqo-transport" } qlog = "0.11.0" +regex = "1.9" +structopt = "0.3" [features] deny-warnings = [] diff --git a/neqo-transport/Cargo.toml b/neqo-transport/Cargo.toml index a4da735a8a..3263991be9 100644 --- a/neqo-transport/Cargo.toml +++ b/neqo-transport/Cargo.toml @@ -7,13 +7,13 @@ rust-version = "1.70.0" license = "MIT OR Apache-2.0" [dependencies] -neqo-crypto = { path = "../neqo-crypto" } +indexmap = "1.9.3" +lazy_static = "1.4" +log = {version = "0.4.17", default-features = false} neqo-common = { path = "../neqo-common" } -lazy_static = "1.3.0" -log = {version = "0.4.0", default-features = false} -smallvec = "1.0.0" +neqo-crypto = { path = "../neqo-crypto" } qlog = "0.11.0" -indexmap = "1.0" +smallvec = "1.11.1" [dev-dependencies] test-fixture = { path = "../test-fixture" } diff --git a/test-fixture/Cargo.toml b/test-fixture/Cargo.toml index dddabfbc2d..f142c9a2f4 100644 --- a/test-fixture/Cargo.toml +++ b/test-fixture/Cargo.toml @@ -7,8 +7,8 @@ rust-version = "1.70.0" license = "MIT OR Apache-2.0" [dependencies] -lazy_static = "1.3.0" -log = {version = "0.4.0", default-features = false} +lazy_static = "1.4" +log = {version = "0.4.17", default-features = false} neqo-common = { path = "../neqo-common" } neqo-crypto = { path = "../neqo-crypto" } neqo-http3 = { path = "../neqo-http3" } From e334cf0508c5af4b7de0bb926599a07cec3f3a7c Mon Sep 17 00:00:00 2001 From: Max Inden Date: Tue, 23 Jan 2024 14:40:20 +0100 Subject: [PATCH 076/321] fix: Rust intra doc links (#1571) - Fix minor typos in Rust intra doc links. - Add CI check. --- .github/workflows/check.yml | 3 ++ neqo-http3/src/connection_client.rs | 62 ++++++++++++++--------------- neqo-http3/src/lib.rs | 2 +- neqo-transport/src/packet/mod.rs | 2 +- test-fixture/src/lib.rs | 5 ++- 5 files changed, 39 insertions(+), 35 deletions(-) diff --git a/.github/workflows/check.yml b/.github/workflows/check.yml index 140d29bccf..14886845ad 100644 --- a/.github/workflows/check.yml +++ b/.github/workflows/check.yml @@ -121,6 +121,9 @@ jobs: if: success() || failure() continue-on-error: ${{ matrix.rust-toolchain == 'beta' }} + - name: Check rustdoc links + run: RUSTDOCFLAGS="--deny rustdoc::broken_intra_doc_links --deny warnings" cargo doc --verbose --workspace --no-deps --document-private-items + - name: Upload coverage reports to Codecov uses: codecov/codecov-action@v3 with: diff --git a/neqo-http3/src/connection_client.rs b/neqo-http3/src/connection_client.rs index 13a3c4a47c..0be8acaa04 100644 --- a/neqo-http3/src/connection_client.rs +++ b/neqo-http3/src/connection_client.rs @@ -71,43 +71,43 @@ fn alpn_from_quic_version(version: Version) -> &'static str { /// The API is used for: /// - create and close an endpoint: -/// - [`new`](struct.Http3Client.html#method.new) -/// - [`new_with_conn`](struct.Http3Client.html#method.new_with_conn) -/// - [`close`](struct.Http3Client.html#method.close) +/// - [`Http3Client::new`] +/// - [`Http3Client::new_with_conn`] +/// - [`Http3Client::close`] /// - configuring an endpoint: -/// - [`authenticated`](struct.Http3Client.html#method.authenticated) -/// - [`enable_ech`](struct.Http3Client.html#method.enable_ech) -/// - [`enable_resumption`](struct.Http3Client.html#method.enable_resumption) -/// - [`initiate_key_update`](struct.Http3Client.html#method.initiate_key_update) -/// - [`set_qlog`](struct.Http3Client.html#method.set_qlog) +/// - [`Http3Client::authenticated`] +/// - [`Http3Client::enable_ech`] +/// - [`Http3Client::enable_resumption`] +/// - [`Http3Client::initiate_key_update`] +/// - [`Http3Client::set_qlog`] /// - retrieving information about a connection: -/// - [`peer_certificate`](struct.Http3Client.html#method.peer_certificate) -/// - [`qpack_decoder_stats`](struct.Http3Client.html#method.qpack_decoder_stats) -/// - [`qpack_encoder_stats`](struct.Http3Client.html#method.qpack_encoder_stats) -/// - [`transport_stats`](struct.Http3Client.html#method.transport_stats) -/// - [`state`](struct.Http3Client.html#method.state) -/// - [`take_resumption_token`](struct.Http3Client.html#method.take_resumption_token) -/// - [`tls_inf`](struct.Http3Client.html#method.tls_info) +/// - [`Http3Client::peer_certificate`] +/// - [`Http3Client::qpack_decoder_stats`] +/// - [`Http3Client::qpack_encoder_stats`] +/// - [`Http3Client::transport_stats`] +/// - [`Http3Client::state`] +/// - [`Http3Client::take_resumption_token`] +/// - [`Http3Client::tls_info`] /// - driving HTTP/3 session: -/// - [`process_output`](struct.Http3Client.html#method.process_output) -/// - [`process_input`](struct.Http3Client.html#method.process_input) -/// - [`process`](struct.Http3Client.html#method.process) +/// - [`Http3Client::process_output`] +/// - [`Http3Client::process_input`] +/// - [`Http3Client::process`] /// - create requests, send/receive data, and cancel requests: -/// - [`fetch`](struct.Http3Client.html#method.fetch) -/// - [`send_data`](struct.Http3Client.html#method.send_data) -/// - [`read_dara`](struct.Http3Client.html#method.read_data) -/// - [`stream_close_send`](struct.Http3Client.html#method.stream_close_send) -/// - [`cancel_fetch`](struct.Http3Client.html#method.cancel_fetch) -/// - [`stream_reset_send`](struct.Http3Client.html#method.stream_reset_send) -/// - [`stream_stop_sending`](struct.Http3Client.html#method.stream_stop_sending) -/// - [`set_stream_max_data`](struct.Http3Client.html#method.set_stream_max_data) +/// - [`Http3Client::fetch`] +/// - [`Http3Client::send_data`] +/// - [`Http3Client::read_data`] +/// - [`Http3Client::stream_close_send`] +/// - [`Http3Client::cancel_fetch`] +/// - [`Http3Client::stream_reset_send`] +/// - [`Http3Client::stream_stop_sending`] +/// - [`Http3Client::set_stream_max_data`] /// - priority feature: -/// - [`priority_update`](struct.Http3Client.html#method.priority_update) +/// - [`Http3Client::priority_update`] /// - `WebTransport` feature: -/// - [`webtransport_create_session`](struct.Http3Client.html#method.webtransport_create_session) -/// - [`webtransport_close_session`](struct.Http3Client.html#method.webtransport_close_session) -/// - [`webtransport_create_stream`](struct.Http3Client.html#method.webtransport_create_sstream) -/// - [`webtransport_enabled`](struct.Http3Client.html#method.webtransport_enabled) +/// - [`Http3Client::webtransport_create_session`] +/// - [`Http3Client::webtransport_close_session`] +/// - [`Http3Client::webtransport_create_stream`] +/// - [`Http3Client::webtransport_enabled`] /// /// ## Examples /// diff --git a/neqo-http3/src/lib.rs b/neqo-http3/src/lib.rs index 76be301a8e..e0dc4c3c93 100644 --- a/neqo-http3/src/lib.rs +++ b/neqo-http3/src/lib.rs @@ -35,7 +35,7 @@ supported and can be enabled using [`Http3Parameters`](struct.Http3Parameters.ht The crate does not create an OS level UDP socket, it produces, i.e. encodes, data that should be sent as a payload in a UDP packet and consumes data received on the UDP socket. For example, -[`std::net::UdpSocket`](std::net::UdpSocket) or [`mio::net::UdpSocket`](https://crates.io/crates/mio) +[`std::net::UdpSocket`] or [`mio::net::UdpSocket`](https://crates.io/crates/mio) could be used for creating UDP sockets. The application is responsible for creating a socket, polling the socket, and sending and receiving diff --git a/neqo-transport/src/packet/mod.rs b/neqo-transport/src/packet/mod.rs index acd3b5b2be..080cf6649a 100644 --- a/neqo-transport/src/packet/mod.rs +++ b/neqo-transport/src/packet/mod.rs @@ -409,7 +409,7 @@ impl PacketBuilder { /// Make a retry packet. /// As this is a simple packet, this is just an associated function. /// As Retry is odd (it has to be constructed with leading bytes), - /// this returns a Vec rather than building on an encoder. + /// this returns a [`Vec`] rather than building on an encoder. pub fn retry( version: Version, dcid: &[u8], diff --git a/test-fixture/src/lib.rs b/test-fixture/src/lib.rs index e4089c43c2..e431ace9a1 100644 --- a/test-fixture/src/lib.rs +++ b/test-fixture/src/lib.rs @@ -363,8 +363,9 @@ impl ToString for SharedVec { } } -/// Returns a pair of new enabled `NeqoQlog` that is backed by a Vec together with a -/// `Cursor>` that can be used to read the contents of the log. +/// Returns a pair of new enabled `NeqoQlog` that is backed by a [`Vec`] +/// together with a [`Cursor>`] that can be used to read the contents of +/// the log. /// # Panics /// Panics if the log cannot be created. #[must_use] From 1f0694e8fe72d8d6cbd9950e64e5ccb06eeda767 Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Tue, 23 Jan 2024 16:17:44 +0200 Subject: [PATCH 077/321] Fix unresolved link errors in rustdoc CI (#1575) --- neqo-common/src/tos.rs | 54 +++++++++++++++++++++--------------------- 1 file changed, 27 insertions(+), 27 deletions(-) diff --git a/neqo-common/src/tos.rs b/neqo-common/src/tos.rs index 7693ef1808..aa360d1d53 100644 --- a/neqo-common/src/tos.rs +++ b/neqo-common/src/tos.rs @@ -15,16 +15,16 @@ use enum_map::Enum; #[repr(u8)] pub enum IpTosEcn { #[default] - /// Not-ECT, Not ECN-Capable Transport, [RFC3168] + /// Not-ECT, Not ECN-Capable Transport, RFC3168 NotEct = 0b00, - /// ECT(1), ECN-Capable Transport(1), [RFC8311][RFC9331] + /// ECT(1), ECN-Capable Transport(1), RFC8311 and RFC9331 Ect1 = 0b01, - /// ECT(0), ECN-Capable Transport(0), [RFC3168] + /// ECT(0), ECN-Capable Transport(0), RFC3168 Ect0 = 0b10, - /// CE, Congestion Experienced, [RFC3168] + /// CE, Congestion Experienced, RFC3168 Ce = 0b11, } @@ -52,73 +52,73 @@ impl From for IpTosEcn { #[repr(u8)] pub enum IpTosDscp { #[default] - /// Class Selector 0, [RFC2474] + /// Class Selector 0, RFC2474 Cs0 = 0b0000_0000, - /// Class Selector 1, [RFC2474] + /// Class Selector 1, RFC2474 Cs1 = 0b0010_0000, - /// Class Selector 2, [RFC2474] + /// Class Selector 2, RFC2474 Cs2 = 0b0100_0000, - /// Class Selector 3, [RFC2474] + /// Class Selector 3, RFC2474 Cs3 = 0b0110_0000, - /// Class Selector 4, [RFC2474] + /// Class Selector 4, RFC2474 Cs4 = 0b1000_0000, - /// Class Selector 5, [RFC2474] + /// Class Selector 5, RFC2474 Cs5 = 0b1010_0000, - /// Class Selector 6, [RFC2474] + /// Class Selector 6, RFC2474 Cs6 = 0b1100_0000, - /// Class Selector 7, [RFC2474] + /// Class Selector 7, RFC2474 Cs7 = 0b1110_0000, - /// Assured Forwarding 11, [RFC2597] + /// Assured Forwarding 11, RFC2597 Af11 = 0b0010_1000, - /// Assured Forwarding 12, [RFC2597] + /// Assured Forwarding 12, RFC2597 Af12 = 0b0011_0000, - /// Assured Forwarding 13, [RFC2597] + /// Assured Forwarding 13, RFC2597 Af13 = 0b0011_1000, - /// Assured Forwarding 21, [RFC2597] + /// Assured Forwarding 21, RFC2597 Af21 = 0b0100_1000, - /// Assured Forwarding 22, [RFC2597] + /// Assured Forwarding 22, RFC2597 Af22 = 0b0101_0000, - /// Assured Forwarding 23, [RFC2597] + /// Assured Forwarding 23, RFC2597 Af23 = 0b0101_1000, - /// Assured Forwarding 31, [RFC2597] + /// Assured Forwarding 31, RFC2597 Af31 = 0b0110_1000, - /// Assured Forwarding 32, [RFC2597] + /// Assured Forwarding 32, RFC2597 Af32 = 0b0111_0000, - /// Assured Forwarding 33, [RFC2597] + /// Assured Forwarding 33, RFC2597 Af33 = 0b0111_1000, - /// Assured Forwarding 41, [RFC2597] + /// Assured Forwarding 41, RFC2597 Af41 = 0b1000_1000, - /// Assured Forwarding 42, [RFC2597] + /// Assured Forwarding 42, RFC2597 Af42 = 0b1001_0000, - /// Assured Forwarding 43, [RFC2597] + /// Assured Forwarding 43, RFC2597 Af43 = 0b1001_1000, - /// Expedited Forwarding, [RFC3246] + /// Expedited Forwarding, RFC3246 Ef = 0b1011_1000, - /// Capacity-Admitted Traffic, [RFC5865] + /// Capacity-Admitted Traffic, RFC5865 VoiceAdmit = 0b1011_0000, - /// Lower-Effort, [RFC8622] + /// Lower-Effort, RFC8622 Le = 0b0000_0100, } From e447eef4d1f7fb2d654a2bac800233e141bc317a Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Tue, 23 Jan 2024 17:27:15 +0200 Subject: [PATCH 078/321] Use matrix toolchain when checking rustdoc (#1576) --- .github/workflows/check.yml | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/.github/workflows/check.yml b/.github/workflows/check.yml index 14886845ad..77dfac74ea 100644 --- a/.github/workflows/check.yml +++ b/.github/workflows/check.yml @@ -122,7 +122,10 @@ jobs: continue-on-error: ${{ matrix.rust-toolchain == 'beta' }} - name: Check rustdoc links - run: RUSTDOCFLAGS="--deny rustdoc::broken_intra_doc_links --deny warnings" cargo doc --verbose --workspace --no-deps --document-private-items + run: cargo +${{ matrix.rust-toolchain }} doc --verbose --workspace --no-deps --document-private-items + env: + RUSTDOCFLAGS: "--deny rustdoc::broken_intra_doc_links --deny warnings" + if: success() || failure() - name: Upload coverage reports to Codecov uses: codecov/codecov-action@v3 From 6acf9ef4ab5fb92405ffa536f8693bcac63cc0fa Mon Sep 17 00:00:00 2001 From: Max Inden Date: Wed, 24 Jan 2024 00:01:02 +0100 Subject: [PATCH 079/321] fix(client): only take single url on (args.resume && first) (#1578) QUIC Interop Runner testcases `resumption` and `0-rtt` expect the client to download the first file, close the connection, and then download the remaining files on a second connection. https://github.com/quic-interop/quic-interop-runner/tree/master#test-cases Thus `neqo-client`, when `args.resume` is `true` must only take a single URL on the **first** loop iteration. On the second iteration it must take all remaining URLs. Regression introduced in https://github.com/mozilla/neqo/pull/1569. --- neqo-client/src/main.rs | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/neqo-client/src/main.rs b/neqo-client/src/main.rs index 00674c9717..677829ad05 100644 --- a/neqo-client/src/main.rs +++ b/neqo-client/src/main.rs @@ -1086,13 +1086,16 @@ fn main() -> Res<()> { let hostname = format!("{host}"); let mut token: Option = None; + let mut first = true; while !urls.is_empty() { - let to_request = if args.resume || args.download_in_series { + let to_request = if (args.resume && first) || args.download_in_series { urls.pop_front().into_iter().collect() } else { std::mem::take(&mut urls) }; + first = false; + token = if args.use_old_http { old::old_client( &args, From c64b3c23eae8faca3be8ef8eaf357fe630bf79c7 Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Wed, 24 Jan 2024 10:41:57 +0200 Subject: [PATCH 080/321] ci: Add a data transfer test (#1574) * ci: Add a data transfer test * Use +${{ matrix.rust-toolchain }} * Fix URL * Try and avoid Windows crash * Simplify * Build first * Deal with EWOULDBLOCK * Another EWOULDBLOCK * Address code review comments and align variable names * When will I learn to run tests locally before commit? --- .github/workflows/check.yml | 13 +++++++++++++ neqo-client/src/main.rs | 11 ++++++++--- neqo-server/src/main.rs | 30 +++++++++++++++++++++--------- 3 files changed, 42 insertions(+), 12 deletions(-) diff --git a/.github/workflows/check.yml b/.github/workflows/check.yml index 77dfac74ea..685d36360d 100644 --- a/.github/workflows/check.yml +++ b/.github/workflows/check.yml @@ -112,6 +112,19 @@ jobs: - name: Run tests and determine coverage run: cargo +${{ matrix.rust-toolchain }} llvm-cov nextest --features ci --all-targets --no-fail-fast --lcov --output-path lcov.info + - name: Run client/server transfer + run: | + cargo +${{ matrix.rust-toolchain }} build --bin neqo-client --bin neqo-server + cargo +${{ matrix.rust-toolchain }} run --bin neqo-server -- $HOST:4433 & + PID=$! + cargo +${{ matrix.rust-toolchain }} run --bin neqo-client -- --output-dir . https://$HOST:4433/$SIZE + kill $PID + [ "$(wc -c <"$SIZE")" -eq "$SIZE" ] || exit 1 + env: + HOST: localhost + SIZE: 54321 + RUST_LOG: info + - name: Check formatting run: cargo +${{ matrix.rust-toolchain }} fmt --all -- --check if: success() || failure() diff --git a/neqo-client/src/main.rs b/neqo-client/src/main.rs index 677829ad05..a61a3ced88 100644 --- a/neqo-client/src/main.rs +++ b/neqo-client/src/main.rs @@ -452,9 +452,14 @@ fn process_loop( 'write: loop { match client.process_output(Instant::now()) { Output::Datagram(dgram) => { - if let Err(e) = emit_datagram(socket, dgram) { - eprintln!("UDP write error: {e}"); - client.close(Instant::now(), 0, e.to_string()); + if let Err(err) = emit_datagram(socket, dgram) { + if err.kind() == ErrorKind::WouldBlock + || err.kind() == ErrorKind::Interrupted + { + break 'write; + } + eprintln!("UDP write error: {err}"); + client.close(Instant::now(), 0, err.to_string()); exiting = true; break 'write; } diff --git a/neqo-server/src/main.rs b/neqo-server/src/main.rs index eb6a00b8cc..cabae35232 100644 --- a/neqo-server/src/main.rs +++ b/neqo-server/src/main.rs @@ -318,9 +318,15 @@ impl QuicParameters { } fn emit_packet(socket: &mut UdpSocket, out_dgram: Datagram) { - let sent = socket - .send_to(&out_dgram, &out_dgram.destination()) - .expect("Error sending datagram"); + let sent = match socket.send_to(&out_dgram, &out_dgram.destination()) { + Err(ref err) => { + if err.kind() != io::ErrorKind::WouldBlock || err.kind() == io::ErrorKind::Interrupted { + eprintln!("UDP send error: {err:?}"); + } + 0 + } + Ok(res) => res, + }; if sent != out_dgram.len() { eprintln!("Unable to send all {} bytes of datagram", out_dgram.len()); } @@ -590,7 +596,12 @@ fn read_dgram( ) -> Result, io::Error> { let buf = &mut [0u8; 2048]; let (sz, remote_addr) = match socket.recv_from(&mut buf[..]) { - Err(ref err) if err.kind() == io::ErrorKind::WouldBlock => return Ok(None), + Err(ref err) + if err.kind() == io::ErrorKind::WouldBlock + || err.kind() == io::ErrorKind::Interrupted => + { + return Ok(None) + } Err(err) => { eprintln!("UDP recv error: {err:?}"); return Err(err); @@ -672,12 +683,13 @@ impl ServersRunner { Ok(s) => s, }; - let also_v4 = if socket.only_v6().unwrap_or(true) { - "" - } else { - " as well as V4" + print!("Server waiting for connection on: {local_addr:?}"); + // On Windows, this is not supported. + #[cfg(not(target_os = "windows"))] + if !socket.only_v6().unwrap_or(true) { + print!(" as well as V4"); }; - println!("Server waiting for connection on: {local_addr:?}{also_v4}"); + println!(); self.poll.register( &socket, From 8169002f3d1217690d67e3d1ecaea9215856d09c Mon Sep 17 00:00:00 2001 From: Martin Thomson Date: Thu, 25 Jan 2024 09:43:49 +1100 Subject: [PATCH 081/321] Create a security policy (#1573) * Create a security policy A simple document describing how we handle advisories is probably long overdue. * Review feedback Co-authored-by: Lars Eggert * spacing --------- Co-authored-by: Lars Eggert --- SECURITY.md | 31 +++++++++++++++++++++++++++++++ 1 file changed, 31 insertions(+) create mode 100644 SECURITY.md diff --git a/SECURITY.md b/SECURITY.md new file mode 100644 index 0000000000..5b70d7ba3b --- /dev/null +++ b/SECURITY.md @@ -0,0 +1,31 @@ +# Security Policy + +This document describes how security vulnerabilities in this project should be reported. + +## Supported Versions + +Support for neqo is based on the Firefox version in which it has landed. +Versions of neqo in [current versions of Firefox](https://whattrainisitnow.com/calendar/) are actively supported. + +The version of neqo that is active can be found in the Firefox repositories: + +- [release](https://hg.mozilla.org/mozilla-unified/file/release/third_party/rust/neqo-transport/Cargo.toml), +- [beta](https://hg.mozilla.org/mozilla-unified/file/beta/third_party/rust/neqo-transport/Cargo.toml), and +- [trunk/central](https://hg.mozilla.org/mozilla-unified/file/central/third_party/rust/neqo-transport/Cargo.toml), +- [ESR 115](https://hg.mozilla.org/mozilla-unified/file/esr115/third_party/rust/neqo-transport/Cargo.toml). + +The listed version in these files corresponds to [tags](https://github.com/mozilla/neqo/tags) on this repository. +Releases do not always correspond to a branch. + +We welcome reports of security vulnerabilities in any of these released versions or the latest code on the `main` branch. + +## Reporting a Vulnerability + +To report a security problem with neqo, create a bug in Mozilla's Bugzilla instance in the [Core :: Networking](https://bugzilla.mozilla.org/enter_bug.cgi?product=Core&component=Networking) component. + +**IMPORTANT: For security issues, please make sure that you check the box labelled "Many users could be harmed by this security problem".** +We advise that you check this option for anything that involves anything security-relevant, including memory safety, crashes, race conditions, and handling of confidential information. + +Review Mozilla's [guides on bug reporting](https://bugzilla.mozilla.org/page.cgi?id=bug-writing.html) before you open a bug. + +Mozilla operates a [bug bounty program](https://www.mozilla.org/en-US/security/bug-bounty/), for which this project is eligible. From 4e7d9031435f428e500e7217beb137bc6475bd91 Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Thu, 25 Jan 2024 00:46:25 +0200 Subject: [PATCH 082/321] Remove docker/Dockerfile, which is unused (#1579) We don't use a docker image for CI anymore --- docker/Dockerfile | 64 ----------------------------------------------- 1 file changed, 64 deletions(-) delete mode 100644 docker/Dockerfile diff --git a/docker/Dockerfile b/docker/Dockerfile deleted file mode 100644 index e3a7dfbadf..0000000000 --- a/docker/Dockerfile +++ /dev/null @@ -1,64 +0,0 @@ -# This image is used for running CI tests. -# The image is not built unless the `docker` branch is updated. -# Push to `docker` to trigger a build: -# $ git push origin main:docker - -FROM ubuntu:20.04 -LABEL maintainer="Martin Thomson " - -RUN apt-get update && apt-get install -y --no-install-recommends \ - ca-certificates \ - coreutils \ - curl \ - git \ - make \ - mercurial \ - ssh \ - build-essential \ - clang \ - llvm \ - libclang-dev \ - lld \ - gyp \ - ninja-build \ - pkg-config \ - python-is-python3 \ - python3 \ - python3-pip \ - sudo \ - zlib1g-dev \ - && apt-get autoremove -y && apt-get clean -y \ - && rm -rf /var/lib/apt/lists/* - -ENV RUSTUP_HOME=/usr/local/rustup \ - CARGO_HOME=/usr/local/cargo \ - PATH=/usr/local/cargo/bin:$PATH \ - RUST_VERSION=1.51.0 - -RUN set -eux; \ - curl -sSLf "https://static.rust-lang.org/rustup/archive/1.20.2/x86_64-unknown-linux-gnu/rustup-init" -o rustup-init; \ - echo 'e68f193542c68ce83c449809d2cad262cc2bbb99640eb47c58fc1dc58cc30add *rustup-init' | sha256sum -c -; \ - chmod +x rustup-init; \ - ./rustup-init -y -q --no-modify-path --profile minimal --component rustfmt --component clippy --default-toolchain "$RUST_VERSION"; \ - rm -f rustup-init; \ - chmod -R a+w "$RUSTUP_HOME" "$CARGO_HOME" - -ENV USER neqo -ENV LOGNAME $USER -ENV HOSTNAME $USER -ENV HOME /home/$USER -ENV SHELL /bin/bash - -RUN useradd -d "$HOME" -s "$SHELL" -m "$USER" -RUN echo "$USER ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers -WORKDIR $HOME -USER $USER - -ENV NSS_DIR=$HOME/nss \ - NSPR_DIR=$HOME/nspr \ - LD_LIBRARY_PATH=$HOME/dist/Debug/lib \ - RUSTFLAGS="-C link-arg=-fuse-ld=lld" - -RUN set -eux; \ - hg clone -u c7a1c91cd9be https://hg.mozilla.org/projects/nss "$NSS_DIR"; \ - hg clone -u NSPR_4_25_RTM https://hg.mozilla.org/projects/nspr "$NSPR_DIR" From b164ab01aa7a20b957f62f28485b1cd33959289f Mon Sep 17 00:00:00 2001 From: Kershaw Date: Thu, 25 Jan 2024 10:04:39 +0100 Subject: [PATCH 083/321] add support to log rtt (#1522) * Log RttEstimate * address comment * Update neqo-transport/src/cc/classic_cc.rs Co-authored-by: Lars Eggert * Update neqo-transport/src/cc/classic_cc.rs Co-authored-by: Lars Eggert * Update neqo-transport/src/sender.rs Co-authored-by: Lars Eggert * Update neqo-transport/src/sender.rs Co-authored-by: Lars Eggert * Update neqo-transport/src/cc/mod.rs Co-authored-by: Lars Eggert * Update neqo-transport/src/cc/classic_cc.rs Co-authored-by: Lars Eggert * Update neqo-transport/src/cc/classic_cc.rs Co-authored-by: Lars Eggert --------- Co-authored-by: Lars Eggert --- neqo-transport/src/cc/classic_cc.rs | 22 +++++++++++++--------- neqo-transport/src/cc/mod.rs | 4 ++-- neqo-transport/src/cc/tests/cubic.rs | 4 +++- neqo-transport/src/cc/tests/new_reno.rs | 21 ++++++++++++++------- neqo-transport/src/path.rs | 3 +-- neqo-transport/src/rtt.rs | 12 ++++++++++++ neqo-transport/src/sender.rs | 10 ++++++++-- 7 files changed, 53 insertions(+), 23 deletions(-) diff --git a/neqo-transport/src/cc/classic_cc.rs b/neqo-transport/src/cc/classic_cc.rs index 000d9bf4d5..c1d8fd08a6 100644 --- a/neqo-transport/src/cc/classic_cc.rs +++ b/neqo-transport/src/cc/classic_cc.rs @@ -19,6 +19,7 @@ use crate::{ cc::MAX_DATAGRAM_SIZE, packet::PacketNumber, qlog::{self, QlogMetric}, + rtt::RttEstimate, sender::PACING_BURST_SIZE, tracking::SentPacket, }; @@ -161,17 +162,18 @@ impl CongestionControl for ClassicCongestionControl { } // Multi-packet version of OnPacketAckedCC - fn on_packets_acked(&mut self, acked_pkts: &[SentPacket], min_rtt: Duration, now: Instant) { + fn on_packets_acked(&mut self, acked_pkts: &[SentPacket], rtt_est: &RttEstimate, now: Instant) { let mut is_app_limited = true; let mut new_acked = 0; for pkt in acked_pkts { qinfo!( - "packet_acked this={:p}, pn={}, ps={}, ignored={}, lost={}", + "packet_acked this={:p}, pn={}, ps={}, ignored={}, lost={}, rtt_est={:?}", self, pkt.pn, pkt.size, i32::from(!pkt.cc_outstanding()), - i32::from(pkt.lost()) + i32::from(pkt.lost()), + rtt_est, ); if !pkt.cc_outstanding() { continue; @@ -222,7 +224,7 @@ impl CongestionControl for ClassicCongestionControl { let bytes_for_increase = self.cc_algorithm.bytes_for_cwnd_increase( self.congestion_window, new_acked, - min_rtt, + rtt_est.minimum(), now, ); debug_assert!(bytes_for_increase > 0); @@ -546,6 +548,7 @@ mod tests { CongestionControl, CongestionControlAlgorithm, CWND_INITIAL_PKTS, MAX_DATAGRAM_SIZE, }, packet::{PacketNumber, PacketType}, + rtt::RttEstimate, tracking::SentPacket, }; use neqo_common::qinfo; @@ -557,6 +560,7 @@ mod tests { const PTO: Duration = Duration::from_millis(100); const RTT: Duration = Duration::from_millis(98); + const RTT_ESTIMATE: RttEstimate = RttEstimate::from_duration(Duration::from_millis(98)); const ZERO: Duration = Duration::from_secs(0); const EPSILON: Duration = Duration::from_nanos(1); const GAP: Duration = Duration::from_secs(1); @@ -1025,7 +1029,7 @@ mod tests { } assert_eq!(cc.bytes_in_flight(), packet_burst_size * MAX_DATAGRAM_SIZE); now += RTT; - cc.on_packets_acked(&pkts, RTT, now); + cc.on_packets_acked(&pkts, &RTT_ESTIMATE, now); assert_eq!(cc.bytes_in_flight(), 0); assert_eq!(cc.acked_bytes, 0); assert_eq!(cwnd, cc.congestion_window); // CWND doesn't grow because we're app limited @@ -1054,7 +1058,7 @@ mod tests { now += RTT; // Check if congestion window gets increased for all packets currently in flight for (i, pkt) in pkts.into_iter().enumerate() { - cc.on_packets_acked(&[pkt], RTT, now); + cc.on_packets_acked(&[pkt], &RTT_ESTIMATE, now); assert_eq!( cc.bytes_in_flight(), @@ -1101,7 +1105,7 @@ mod tests { ); cc.on_packet_sent(&p_not_lost); now += RTT; - cc.on_packets_acked(&[p_not_lost], RTT, now); + cc.on_packets_acked(&[p_not_lost], &RTT_ESTIMATE, now); cwnd_is_halved(&cc); // cc is app limited therefore cwnd in not increased. assert_eq!(cc.acked_bytes, 0); @@ -1129,7 +1133,7 @@ mod tests { assert_eq!(cc.bytes_in_flight(), packet_burst_size * MAX_DATAGRAM_SIZE); now += RTT; for (i, pkt) in pkts.into_iter().enumerate() { - cc.on_packets_acked(&[pkt], RTT, now); + cc.on_packets_acked(&[pkt], &RTT_ESTIMATE, now); assert_eq!( cc.bytes_in_flight(), @@ -1164,7 +1168,7 @@ mod tests { let mut last_acked_bytes = 0; // Check if congestion window gets increased for all packets currently in flight for (i, pkt) in pkts.into_iter().enumerate() { - cc.on_packets_acked(&[pkt], RTT, now); + cc.on_packets_acked(&[pkt], &RTT_ESTIMATE, now); assert_eq!( cc.bytes_in_flight(), diff --git a/neqo-transport/src/cc/mod.rs b/neqo-transport/src/cc/mod.rs index 675168367a..0321ab1de5 100644 --- a/neqo-transport/src/cc/mod.rs +++ b/neqo-transport/src/cc/mod.rs @@ -7,7 +7,7 @@ // Congestion control #![deny(clippy::pedantic)] -use crate::{path::PATH_MTU_V6, tracking::SentPacket, Error}; +use crate::{path::PATH_MTU_V6, rtt::RttEstimate, tracking::SentPacket, Error}; use neqo_common::qlog::NeqoQlog; use std::{ @@ -42,7 +42,7 @@ pub trait CongestionControl: Display + Debug { #[must_use] fn cwnd_avail(&self) -> usize; - fn on_packets_acked(&mut self, acked_pkts: &[SentPacket], min_rtt: Duration, now: Instant); + fn on_packets_acked(&mut self, acked_pkts: &[SentPacket], rtt_est: &RttEstimate, now: Instant); /// Returns true if the congestion window was reduced. fn on_packets_lost( diff --git a/neqo-transport/src/cc/tests/cubic.rs b/neqo-transport/src/cc/tests/cubic.rs index 1229e6307f..b24f1fc118 100644 --- a/neqo-transport/src/cc/tests/cubic.rs +++ b/neqo-transport/src/cc/tests/cubic.rs @@ -17,6 +17,7 @@ use crate::{ CongestionControl, MAX_DATAGRAM_SIZE, MAX_DATAGRAM_SIZE_F64, }, packet::PacketType, + rtt::RttEstimate, tracking::SentPacket, }; use std::{ @@ -27,6 +28,7 @@ use std::{ use test_fixture::now; const RTT: Duration = Duration::from_millis(100); +const RTT_ESTIMATE: RttEstimate = RttEstimate::from_duration(Duration::from_millis(100)); const CWND_INITIAL_F64: f64 = 10.0 * MAX_DATAGRAM_SIZE_F64; const CWND_INITIAL_10_F64: f64 = 10.0 * CWND_INITIAL_F64; const CWND_INITIAL_10: usize = 10 * CWND_INITIAL; @@ -59,7 +61,7 @@ fn ack_packet(cc: &mut ClassicCongestionControl, pn: u64, now: Instant) { Vec::new(), // tokens MAX_DATAGRAM_SIZE, // size ); - cc.on_packets_acked(&[acked], RTT, now); + cc.on_packets_acked(&[acked], &RTT_ESTIMATE, now); } fn packet_lost(cc: &mut ClassicCongestionControl, pn: u64) { diff --git a/neqo-transport/src/cc/tests/new_reno.rs b/neqo-transport/src/cc/tests/new_reno.rs index 0e4322c08c..f86e87b953 100644 --- a/neqo-transport/src/cc/tests/new_reno.rs +++ b/neqo-transport/src/cc/tests/new_reno.rs @@ -7,15 +7,22 @@ // Congestion control #![deny(clippy::pedantic)] -use crate::cc::new_reno::NewReno; -use crate::cc::{ClassicCongestionControl, CongestionControl, CWND_INITIAL, MAX_DATAGRAM_SIZE}; -use crate::packet::PacketType; -use crate::tracking::SentPacket; +use crate::{ + cc::{ + new_reno::NewReno, ClassicCongestionControl, CongestionControl, CWND_INITIAL, + MAX_DATAGRAM_SIZE, + }, + packet::PacketType, + rtt::RttEstimate, + tracking::SentPacket, +}; + use std::time::Duration; use test_fixture::now; const PTO: Duration = Duration::from_millis(100); const RTT: Duration = Duration::from_millis(98); +const RTT_ESTIMATE: RttEstimate = RttEstimate::from_duration(Duration::from_millis(98)); fn cwnd_is_default(cc: &ClassicCongestionControl) { assert_eq!(cc.cwnd(), CWND_INITIAL); @@ -117,7 +124,7 @@ fn issue_876() { assert_eq!(cc.bytes_in_flight(), 6 * MAX_DATAGRAM_SIZE - 5); // and ack it. cwnd increases slightly - cc.on_packets_acked(&sent_packets[6..], RTT, time_now); + cc.on_packets_acked(&sent_packets[6..], &RTT_ESTIMATE, time_now); assert_eq!(cc.acked_bytes(), sent_packets[6].size); cwnd_is_halved(&cc); assert_eq!(cc.bytes_in_flight(), 5 * MAX_DATAGRAM_SIZE - 2); @@ -181,7 +188,7 @@ fn issue_1465() { // the acked packets before on_packet_sent were the cause of // https://github.com/mozilla/neqo/pull/1465 - cc.on_packets_acked(&[p2], RTT, now); + cc.on_packets_acked(&[p2], &RTT_ESTIMATE, now); assert_eq!(cc.bytes_in_flight(), 0); @@ -189,7 +196,7 @@ fn issue_1465() { let p4 = send_next(&mut cc, now); cc.on_packet_sent(&p4); now += RTT; - cc.on_packets_acked(&[p4], RTT, now); + cc.on_packets_acked(&[p4], &RTT_ESTIMATE, now); // do the same as in the first rtt but now the bug appears let p5 = send_next(&mut cc, now); diff --git a/neqo-transport/src/path.rs b/neqo-transport/src/path.rs index 2b357e0bb1..4430bb2bdb 100644 --- a/neqo-transport/src/path.rs +++ b/neqo-transport/src/path.rs @@ -973,8 +973,7 @@ impl Path { /// Record packets as acknowledged with the sender. pub fn on_packets_acked(&mut self, acked_pkts: &[SentPacket], now: Instant) { debug_assert!(self.is_primary()); - self.sender - .on_packets_acked(acked_pkts, self.rtt.minimum(), now); + self.sender.on_packets_acked(acked_pkts, &self.rtt, now); } /// Record packets as lost with the sender. diff --git a/neqo-transport/src/rtt.rs b/neqo-transport/src/rtt.rs index 3d6d0e70f8..a5ceb37da2 100644 --- a/neqo-transport/src/rtt.rs +++ b/neqo-transport/src/rtt.rs @@ -47,6 +47,18 @@ impl RttEstimate { self.rttvar = rtt / 2; } + #[cfg(test)] + pub const fn from_duration(rtt: Duration) -> Self { + Self { + first_sample_time: None, + latest_rtt: rtt, + smoothed_rtt: rtt, + rttvar: Duration::from_millis(0), + min_rtt: rtt, + ack_delay: PeerAckDelay::Fixed(Duration::from_millis(25)), + } + } + pub fn set_initial(&mut self, rtt: Duration) { qtrace!("initial RTT={:?}", rtt); if rtt >= GRANULARITY { diff --git a/neqo-transport/src/sender.rs b/neqo-transport/src/sender.rs index 3d8302369c..0c1e66ff9a 100644 --- a/neqo-transport/src/sender.rs +++ b/neqo-transport/src/sender.rs @@ -12,6 +12,7 @@ use crate::cc::{ ClassicCongestionControl, CongestionControl, CongestionControlAlgorithm, Cubic, NewReno, }; use crate::pace::Pacer; +use crate::rtt::RttEstimate; use crate::tracking::SentPacket; use neqo_common::qlog::NeqoQlog; @@ -68,8 +69,13 @@ impl PacketSender { self.cc.cwnd_avail() } - pub fn on_packets_acked(&mut self, acked_pkts: &[SentPacket], min_rtt: Duration, now: Instant) { - self.cc.on_packets_acked(acked_pkts, min_rtt, now); + pub fn on_packets_acked( + &mut self, + acked_pkts: &[SentPacket], + rtt_est: &RttEstimate, + now: Instant, + ) { + self.cc.on_packets_acked(acked_pkts, rtt_est, now); } /// Called when packets are lost. Returns true if the congestion window was reduced. From b51cad763f319698ae4c0a8210e2f2bc3b1f2698 Mon Sep 17 00:00:00 2001 From: Martin Thomson Date: Thu, 25 Jan 2024 20:32:56 +1100 Subject: [PATCH 084/321] Simplify timeouts (#1583) * Simplify timeouts We use `Option` but then always use this with `.or_else(|| Some(Duration::new(0, 0)))`. We don't need the option. * Simplify timeouts No need to wrap the timeout in Option when we always use it with `.or_else(|| Some(...))` --- neqo-client/src/main.rs | 18 ++++++------------ 1 file changed, 6 insertions(+), 12 deletions(-) diff --git a/neqo-client/src/main.rs b/neqo-client/src/main.rs index a61a3ced88..1a038ddba3 100644 --- a/neqo-client/src/main.rs +++ b/neqo-client/src/main.rs @@ -405,12 +405,9 @@ fn process_loop( ) -> Res { let buf = &mut [0u8; 2048]; let mut events = Events::with_capacity(1024); - let mut timeout: Option = None; + let mut timeout = Duration::new(0, 0); loop { - poll.poll( - &mut events, - timeout.or_else(|| Some(Duration::from_millis(0))), - )?; + poll.poll(&mut events, Some(timeout))?; let mut datagrams: Vec = Vec::new(); 'read: loop { @@ -465,7 +462,7 @@ fn process_loop( } } Output::Callback(new_timeout) => { - timeout = Some(new_timeout); + timeout = new_timeout; break 'write; } Output::None => { @@ -1347,12 +1344,9 @@ mod old { ) -> Res { let buf = &mut [0u8; 2048]; let mut events = Events::with_capacity(1024); - let mut timeout: Option = None; + let mut timeout = Duration::new(0, 0); loop { - poll.poll( - &mut events, - timeout.or_else(|| Some(Duration::from_millis(0))), - )?; + poll.poll(&mut events, Some(timeout))?; 'read: loop { match socket.recv_from(&mut buf[..]) { @@ -1403,7 +1397,7 @@ mod old { } } Output::Callback(new_timeout) => { - timeout = Some(new_timeout); + timeout = new_timeout; break 'write; } Output::None => { From 29926165d593715b6caa6abc727d82f9fcb4e36c Mon Sep 17 00:00:00 2001 From: Martin Thomson Date: Thu, 25 Jan 2024 20:37:48 +1100 Subject: [PATCH 085/321] Lars owns this now (#1584) --- .github/CODEOWNERS | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index bf1ad1efba..42caa3deee 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -1,4 +1,4 @@ -* @KershawChang @martinthomson +* @KershawChang @martinthomson @larseggert /docker/ @martinthomson /hooks/ @martinthomson /neqo-crypto/ @martinthomson From f4f6cc3c002c650af4d6d572063901fe0071872d Mon Sep 17 00:00:00 2001 From: jesup Date: Thu, 25 Jan 2024 11:30:54 -0500 Subject: [PATCH 086/321] Improve coalesce_acked_from_zero perf by avoiding an extra vector (#1585) --- neqo-transport/src/send_stream.rs | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) diff --git a/neqo-transport/src/send_stream.rs b/neqo-transport/src/send_stream.rs index ed227b2a31..e171dfab83 100644 --- a/neqo-transport/src/send_stream.rs +++ b/neqo-transport/src/send_stream.rs @@ -276,8 +276,6 @@ impl RangeTracker { .map(|(len, _)| *len); if let Some(len_from_zero) = acked_range_from_zero { - let mut to_remove = SmallVec::<[_; 8]>::new(); - let mut new_len_from_zero = len_from_zero; // See if there's another Acked range entry contiguous to this one @@ -286,17 +284,14 @@ impl RangeTracker { .get(&new_len_from_zero) .filter(|(_, state)| *state == RangeState::Acked) { - to_remove.push(new_len_from_zero); + let to_remove = new_len_from_zero; new_len_from_zero += *next_len; + self.used.remove(&to_remove); } if len_from_zero != new_len_from_zero { self.used.get_mut(&0).expect("must be there").0 = new_len_from_zero; } - - for val in to_remove { - self.used.remove(&val); - } } } From 04def1eaceec5c3227edcd59c73cea83b607e906 Mon Sep 17 00:00:00 2001 From: Kershaw Date: Fri, 26 Jan 2024 00:51:40 +0100 Subject: [PATCH 087/321] log SendProfile when nothing to send (#1586) --- neqo-transport/src/connection/mod.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/neqo-transport/src/connection/mod.rs b/neqo-transport/src/connection/mod.rs index f3519f7daa..49e4ec43aa 100644 --- a/neqo-transport/src/connection/mod.rs +++ b/neqo-transport/src/connection/mod.rs @@ -2260,6 +2260,7 @@ impl Connection { } if encoder.is_empty() { + qinfo!("TX blocked, profile={:?} ", profile); Ok(SendOption::No(profile.paced())) } else { // Perform additional padding for Initial packets as necessary. From 34c062b3fed13356eb57d316bdac019debf7af8d Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Fri, 26 Jan 2024 18:02:36 +0200 Subject: [PATCH 088/321] chore: Fix a beta toolchain clippy warning that pops up in CI (#1591) --- neqo-transport/src/pace.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/neqo-transport/src/pace.rs b/neqo-transport/src/pace.rs index 6b86575eb5..f1cec80ac6 100644 --- a/neqo-transport/src/pace.rs +++ b/neqo-transport/src/pace.rs @@ -82,7 +82,7 @@ impl Pacer { } /// Spend credit. This cannot fail; users of this API are expected to call - /// next() to determine when to spend. This takes the current time (`now`), + /// `next()` to determine when to spend. This takes the current time (`now`), /// an estimate of the round trip time (`rtt`), the estimated congestion /// window (`cwnd`), and the number of bytes that were sent (`count`). pub fn spend(&mut self, now: Instant, rtt: Duration, cwnd: usize, count: usize) { From 5496be25e7c4a651bf77a2cf77d1b61f213cbba0 Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Fri, 26 Jan 2024 18:03:12 +0200 Subject: [PATCH 089/321] ci: Use lld to link (#1590) * ci: Use lld to link To make things a bit faster * Add the various lld packages * Add lld to PATH on MacOS * Also log level during transfer to `warn`, to make it less noisy --- .github/workflows/check.yml | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/.github/workflows/check.yml b/.github/workflows/check.yml index 685d36360d..4df5f3c2a1 100644 --- a/.github/workflows/check.yml +++ b/.github/workflows/check.yml @@ -9,6 +9,7 @@ on: env: CARGO_TERM_COLOR: always RUST_BACKTRACE: 1 + RUSTFLAGS: -C link-arg=-fuse-ld=lld jobs: check: @@ -35,7 +36,7 @@ jobs: env: DEBIAN_FRONTEND: noninteractive run: | - sudo apt-get install -y --no-install-recommends gyp mercurial ninja-build + sudo apt-get install -y --no-install-recommends gyp mercurial ninja-build lld curl -L --proto '=https' --tlsv1.2 -sSf https://raw.githubusercontent.com/cargo-bins/cargo-binstall/main/install-from-binstall-release.sh | bash # In addition to installing dependencies, first make sure System Integrity Protection (SIP) @@ -47,7 +48,8 @@ jobs: if: runner.os == 'MacOS' run: | csrutil status | grep disabled - brew install ninja mercurial cargo-binstall + brew install ninja mercurial cargo-binstall llvm + echo "/usr/local/opt/llvm/bin" >> "$GITHUB_PATH" # python3 -m pip install gyp-next # Above does not work, since pypi only has gyp 0.15.0, which is too old # for the homebrew python3. Install from source instead. @@ -65,7 +67,7 @@ jobs: run: | echo "C:\\msys64\\usr\\bin" >> "$GITHUB_PATH" echo "C:\\msys64\\mingw64\\bin" >> "$GITHUB_PATH" - /c/msys64/usr/bin/pacman -S --noconfirm nsinstall + /c/msys64/usr/bin/pacman -S --noconfirm nsinstall lld python3 -m pip install git+https://github.com/nodejs/gyp-next echo "$(python3 -m site --user-base)/bin" >> "$GITHUB_PATH" @@ -123,7 +125,7 @@ jobs: env: HOST: localhost SIZE: 54321 - RUST_LOG: info + RUST_LOG: warn - name: Check formatting run: cargo +${{ matrix.rust-toolchain }} fmt --all -- --check From 37f121de3d21f50a84f2061bf1489fc3171c5261 Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Mon, 29 Jan 2024 00:49:18 +0200 Subject: [PATCH 090/321] chore: Remove some outdated files (#1588) The .gitignore files serve no purpose, and if there is anything in the TODO files left to be done, those things should become GitHub issues. --- neqo-crypto/.gitignore | 6 ------ neqo-crypto/TODO | 4 ---- neqo-transport/.gitignore | 3 --- neqo-transport/TODO | 9 --------- 4 files changed, 22 deletions(-) delete mode 100644 neqo-crypto/.gitignore delete mode 100644 neqo-crypto/TODO delete mode 100644 neqo-transport/.gitignore delete mode 100755 neqo-transport/TODO diff --git a/neqo-crypto/.gitignore b/neqo-crypto/.gitignore deleted file mode 100644 index 0136220822..0000000000 --- a/neqo-crypto/.gitignore +++ /dev/null @@ -1,6 +0,0 @@ -Cargo.lock -/target -**/*.rs.bk -/nss -/nspr -/dist diff --git a/neqo-crypto/TODO b/neqo-crypto/TODO deleted file mode 100644 index b0552ea10f..0000000000 --- a/neqo-crypto/TODO +++ /dev/null @@ -1,4 +0,0 @@ -early data - API in place for inspection, but depends on resumption -handle panics more gracefully for extension handlers -client certificates -read/write - probably never \ No newline at end of file diff --git a/neqo-transport/.gitignore b/neqo-transport/.gitignore deleted file mode 100644 index aa085cd807..0000000000 --- a/neqo-transport/.gitignore +++ /dev/null @@ -1,3 +0,0 @@ -Cargo.lock -/target -**/*.rs.bk diff --git a/neqo-transport/TODO b/neqo-transport/TODO deleted file mode 100755 index 151dbd1753..0000000000 --- a/neqo-transport/TODO +++ /dev/null @@ -1,9 +0,0 @@ -Use stream events in h3 // grover or dragana? -harmonize our rust usage: - - use foo::* or use foo::{bar, baz} and ordering/grouping - - remove extern crate - - sort #[derive()] args -cleanup public API -write docs for public API -write docs for everything else -CI From fca21791a2a0f32671959b08aec9b50c532c9980 Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Mon, 29 Jan 2024 10:29:51 +0200 Subject: [PATCH 091/321] chore: Make the qns image multiarch & shrink the build context further (#1589) * chore: Make the qns image multiarch, and shrink the build context further * Remove some more things * Update .dockerignore Co-authored-by: Max Inden * Update qns/Dockerfile Co-authored-by: Martin Thomson * Update qns/Dockerfile * Update .dockerignore --------- Co-authored-by: Max Inden Co-authored-by: Martin Thomson --- .dockerignore | 12 +++++++++--- qns/Dockerfile | 17 ++++++----------- 2 files changed, 15 insertions(+), 14 deletions(-) diff --git a/.dockerignore b/.dockerignore index 8012c0d3ae..cc95fda49e 100644 --- a/.dockerignore +++ b/.dockerignore @@ -1,3 +1,9 @@ -nss -nspr -target +# Ignore everything: +* +# Except for the following: +!**/*.toml +!**/*.rs +!**/*.h +!**/*.hpp +!qns +!Cargo.lock diff --git a/qns/Dockerfile b/qns/Dockerfile index 051cf5b8a5..f3a45c23a5 100644 --- a/qns/Dockerfile +++ b/qns/Dockerfile @@ -1,9 +1,9 @@ FROM martenseemann/quic-network-simulator-endpoint:latest AS buildimage RUN apt-get update && apt-get install -y --no-install-recommends \ - ca-certificates coreutils curl git make mercurial ssh \ - build-essential clang llvm libclang-dev lld \ - gyp ninja-build pkg-config zlib1g-dev python \ + curl git mercurial \ + build-essential libclang-dev lld \ + gyp ninja-build zlib1g-dev python \ && apt-get autoremove -y && apt-get clean -y \ && rm -rf /var/lib/apt/lists/* @@ -13,20 +13,15 @@ ENV RUSTUP_HOME=/usr/local/rustup \ CARGO_HOME=/usr/local/cargo \ PATH=/usr/local/cargo/bin:$PATH -RUN set -eux; \ - curl -sSLf "https://static.rust-lang.org/rustup/archive/1.26.0/x86_64-unknown-linux-gnu/rustup-init" -o rustup-init; \ - echo '0b2f6c8f85a3d02fde2efc0ced4657869d73fccfce59defb4e8d29233116e6db *rustup-init' | sha256sum -c -; \ - chmod +x rustup-init; \ - ./rustup-init -y -q --no-modify-path --profile minimal --default-toolchain "$RUST_VERSION"; \ - rm -f rustup-init; \ - chmod -R a+w "$RUSTUP_HOME" "$CARGO_HOME" +RUN curl https://sh.rustup.rs -sSf | \ + sh -s -- -y -q --no-modify-path --profile minimal --default-toolchain $RUST_VERSION ENV NSS_DIR=/nss \ NSPR_DIR=/nspr \ LD_LIBRARY_PATH=/dist/Release/lib RUN set -eux; \ - hg clone https://hg.mozilla.org/projects/nss "$NSS_DIR"; \ + git clone --depth=1 https://github.com/nss-dev/nss "$NSS_DIR"; \ hg clone https://hg.mozilla.org/projects/nspr "$NSPR_DIR" RUN "$NSS_DIR"/build.sh --static -Ddisable_tests=1 -o From 2bbbfed6546f53693590432492bb8953fea95898 Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Tue, 30 Jan 2024 09:38:50 +0200 Subject: [PATCH 092/321] fix: Don't send superfluous PING-only Initial packets during handshake (#1598) * fix: Don't send superfluous PING-only Initial packets during handshake This limits the Initial packet number space to sending one packet when a PTO fires (other packet number spaces will continue to send two.) This stops PING-only Initial packets during handshake. Some tests based in the assumption that those PINGs would be sent. Fix those, too. I'd appreciate if someone could esp. double-check the test modifications, esp. to the `idle_caching` test, which is gnarly. Fixes #744 * Rework logic Only limits the PTO packet count to one if we have not received any packet from the peer yet. --- .../src/connection/tests/recovery.rs | 11 +++-- neqo-transport/src/recovery.rs | 41 ++++++++++++++----- neqo-transport/tests/retry.rs | 6 +-- 3 files changed, 38 insertions(+), 20 deletions(-) diff --git a/neqo-transport/src/connection/tests/recovery.rs b/neqo-transport/src/connection/tests/recovery.rs index 073f1ca156..421dded3d4 100644 --- a/neqo-transport/src/connection/tests/recovery.rs +++ b/neqo-transport/src/connection/tests/recovery.rs @@ -13,7 +13,9 @@ use super::{ use crate::{ cc::CWND_MIN, path::PATH_MTU_V6, - recovery::{FAST_PTO_SCALE, MAX_OUTSTANDING_UNACK, MIN_OUTSTANDING_UNACK, PTO_PACKET_COUNT}, + recovery::{ + FAST_PTO_SCALE, MAX_OUTSTANDING_UNACK, MAX_PTO_PACKET_COUNT, MIN_OUTSTANDING_UNACK, + }, rtt::GRANULARITY, stats::MAX_PTO_COUNTS, tparams::TransportParameter, @@ -173,10 +175,6 @@ fn pto_initial() { assert!(pkt2.is_some()); assert_eq!(pkt2.unwrap().len(), PATH_MTU_V6); - let pkt3 = client.process(None, now).dgram(); - assert!(pkt3.is_some()); - assert_eq!(pkt3.unwrap().len(), PATH_MTU_V6); - let delay = client.process(None, now).callback(); // PTO has doubled. assert_eq!(delay, INITIAL_PTO * 2); @@ -468,7 +466,8 @@ fn ack_after_pto() { // Jump forward to the PTO and drain the PTO packets. now += AT_LEAST_PTO; - for _ in 0..PTO_PACKET_COUNT { + // We can use MAX_PTO_PACKET_COUNT, because we know the handshake is over. + for _ in 0..MAX_PTO_PACKET_COUNT { let dgram = client.process(None, now).dgram(); assert!(dgram.is_some()); } diff --git a/neqo-transport/src/recovery.rs b/neqo-transport/src/recovery.rs index 23c296949d..8318c66c06 100644 --- a/neqo-transport/src/recovery.rs +++ b/neqo-transport/src/recovery.rs @@ -40,9 +40,9 @@ pub(crate) const PACKET_THRESHOLD: u64 = 3; /// `ACK_ONLY_SIZE_LIMIT` is the minimum size of the congestion window. /// If the congestion window is this small, we will only send ACK frames. pub(crate) const ACK_ONLY_SIZE_LIMIT: usize = 256; -/// The number of packets we send on a PTO. -/// And the number to declare lost when the PTO timer is hit. -pub const PTO_PACKET_COUNT: usize = 2; +/// The maximum number of packets we send on a PTO. +/// And the maximum number to declare lost when the PTO timer is hit. +pub const MAX_PTO_PACKET_COUNT: usize = 2; /// The preferred limit on the number of packets that are tracked. /// If we exceed this number, we start sending `PING` frames sooner to /// force the peer to acknowledge some of them. @@ -520,21 +520,34 @@ struct PtoState { } impl PtoState { - pub fn new(space: PacketNumberSpace, probe: PacketNumberSpaceSet) -> Self { + /// The number of packets we send on a PTO. + /// And the number to declare lost when the PTO timer is hit. + fn pto_packet_count(space: PacketNumberSpace, rx_count: usize) -> usize { + if space == PacketNumberSpace::Initial && rx_count == 0 { + // For the Initial space, we only send one packet on PTO if we have not received any packets + // from the peer yet. This avoids sending useless PING-only packets when the Client Initial + // is deemed lost. + 1 + } else { + MAX_PTO_PACKET_COUNT + } + } + + pub fn new(space: PacketNumberSpace, probe: PacketNumberSpaceSet, rx_count: usize) -> Self { debug_assert!(probe[space]); Self { space, count: 1, - packets: PTO_PACKET_COUNT, + packets: Self::pto_packet_count(space, rx_count), probe, } } - pub fn pto(&mut self, space: PacketNumberSpace, probe: PacketNumberSpaceSet) { + pub fn pto(&mut self, space: PacketNumberSpace, probe: PacketNumberSpaceSet, rx_count: usize) { debug_assert!(probe[space]); self.space = space; self.count += 1; - self.packets = PTO_PACKET_COUNT; + self.packets = Self::pto_packet_count(space, rx_count); self.probe = probe; } @@ -877,10 +890,11 @@ impl LossRecovery { } fn fire_pto(&mut self, pn_space: PacketNumberSpace, allow_probes: PacketNumberSpaceSet) { + let rx_count = self.stats.borrow().packets_rx; if let Some(st) = &mut self.pto_state { - st.pto(pn_space, allow_probes); + st.pto(pn_space, allow_probes, rx_count); } else { - self.pto_state = Some(PtoState::new(pn_space, allow_probes)); + self.pto_state = Some(PtoState::new(pn_space, allow_probes, rx_count)); } self.pto_state @@ -910,7 +924,14 @@ impl LossRecovery { if t <= now { qdebug!([self], "PTO timer fired for {}", pn_space); let space = self.spaces.get_mut(*pn_space).unwrap(); - lost.extend(space.pto_packets(PTO_PACKET_COUNT).cloned()); + lost.extend( + space + .pto_packets(PtoState::pto_packet_count( + *pn_space, + self.stats.borrow().packets_rx, + )) + .cloned(), + ); pto_space = pto_space.or(Some(*pn_space)); } diff --git a/neqo-transport/tests/retry.rs b/neqo-transport/tests/retry.rs index 3fffcba3da..eb20b8144a 100644 --- a/neqo-transport/tests/retry.rs +++ b/neqo-transport/tests/retry.rs @@ -326,10 +326,8 @@ fn retry_after_pto() { // Let PTO fire on the client and then let it exhaust its PTO packets. now += Duration::from_secs(1); - let pto1 = client.process(None, now).dgram(); - assert!(pto1.unwrap().len() >= 1200); - let pto2 = client.process(None, now).dgram(); - assert!(pto2.unwrap().len() >= 1200); + let pto = client.process(None, now).dgram(); + assert!(pto.unwrap().len() >= 1200); let cb = client.process(None, now).callback(); assert_ne!(cb, Duration::new(0, 0)); From 9192b69ba10d7deb69445b2b6f64efb9196c4aed Mon Sep 17 00:00:00 2001 From: Martin Thomson Date: Tue, 30 Jan 2024 19:05:59 +1100 Subject: [PATCH 093/321] Don't use lld on windows (#1600) * Don't use lld on windows * Remove merge markers * Maybe without the quoting * Install bench * Trial and error, mostly error * OK, try this now * Try lld.exe --------- Co-authored-by: Lars Eggert --- .github/workflows/check.yml | 7 ++++++- neqo-transport/Cargo.toml | 1 + 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/.github/workflows/check.yml b/.github/workflows/check.yml index 4df5f3c2a1..7ec34331e0 100644 --- a/.github/workflows/check.yml +++ b/.github/workflows/check.yml @@ -9,7 +9,6 @@ on: env: CARGO_TERM_COLOR: always RUST_BACKTRACE: 1 - RUSTFLAGS: -C link-arg=-fuse-ld=lld jobs: check: @@ -38,6 +37,7 @@ jobs: run: | sudo apt-get install -y --no-install-recommends gyp mercurial ninja-build lld curl -L --proto '=https' --tlsv1.2 -sSf https://raw.githubusercontent.com/cargo-bins/cargo-binstall/main/install-from-binstall-release.sh | bash + echo "RUSTFLAGS=-C link-arg=-fuse-ld=lld" >> "$GITHUB_ENV" # In addition to installing dependencies, first make sure System Integrity Protection (SIP) # is disabled on this MacOS runner. This is needed to allow the NSS libraries to be loaded @@ -56,6 +56,7 @@ jobs: python3 -m pip install git+https://github.com/nodejs/gyp-next python3 -m pip install packaging echo "$(python3 -m site --user-base)/bin" >> "$GITHUB_PATH" + echo "RUSTFLAGS=-C link-arg=-fuse-ld=lld" >> "$GITHUB_ENV" - name: Install dependencies (Windows) if: runner.os == 'Windows' @@ -70,6 +71,7 @@ jobs: /c/msys64/usr/bin/pacman -S --noconfirm nsinstall lld python3 -m pip install git+https://github.com/nodejs/gyp-next echo "$(python3 -m site --user-base)/bin" >> "$GITHUB_PATH" + echo "RUSTFLAGS=-C link-arg=-fuse-ld=lld.exe" >> "$GITHUB_ENV" - name: Set up MSVC build environment (Windows) if: runner.os == 'Windows' @@ -114,6 +116,9 @@ jobs: - name: Run tests and determine coverage run: cargo +${{ matrix.rust-toolchain }} llvm-cov nextest --features ci --all-targets --no-fail-fast --lcov --output-path lcov.info + - name: Benches should at least build + run: cargo +${{ matrix.rust-toolchain }} build --features bench --benches + - name: Run client/server transfer run: | cargo +${{ matrix.rust-toolchain }} build --bin neqo-client --bin neqo-server diff --git a/neqo-transport/Cargo.toml b/neqo-transport/Cargo.toml index 3263991be9..8b6130197f 100644 --- a/neqo-transport/Cargo.toml +++ b/neqo-transport/Cargo.toml @@ -19,5 +19,6 @@ smallvec = "1.11.1" test-fixture = { path = "../test-fixture" } [features] +bench = [] deny-warnings = [] fuzzing = ["neqo-crypto/fuzzing"] From 8b507d17312da4bae72b69c0d8844f1d3de629c0 Mon Sep 17 00:00:00 2001 From: Martin Thomson Date: Wed, 31 Jan 2024 19:45:31 +1100 Subject: [PATCH 094/321] Cap the PTO count when working out the period (#1605) The previous code would overflow and produce a 0ns timeout when the PTO count got large (well over 24). This is because the `fast_pto` value tends to have trailing zero bits and shift values between 32-(number of zeros) and 31 (inclusive) would still shift left. This code instead stops increasing the PTO once it hits 24. That should be enough for most cases. Closes #1602. --- neqo-transport/src/recovery.rs | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/neqo-transport/src/recovery.rs b/neqo-transport/src/recovery.rs index 8318c66c06..3e4e9134bb 100644 --- a/neqo-transport/src/recovery.rs +++ b/neqo-transport/src/recovery.rs @@ -848,11 +848,7 @@ impl LossRecovery { // where F = fast_pto / FAST_PTO_SCALE (== 1 by default) let pto_count = pto_state.map_or(0, |p| u32::try_from(p.count).unwrap_or(0)); rtt.pto(pn_space) - .checked_mul( - u32::from(fast_pto) - .checked_shl(pto_count) - .unwrap_or(u32::MAX), - ) + .checked_mul(u32::from(fast_pto) << min(pto_count, u32::BITS - u8::BITS)) .map_or(Duration::from_secs(3600), |p| p / u32::from(FAST_PTO_SCALE)) } From e43b9a72bde926649e2e4377b186ce5653bb2069 Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Wed, 31 Jan 2024 12:12:19 +0200 Subject: [PATCH 095/321] ci: Clone NSS from hg and not the GitHub mirror (#1606) * ci: Clone NSS from hg and not the GitHub mirror Because the latter can be outdated. This is why #1081 is failing CI. * Also make this change in the qns Dockerfile --- .github/workflows/check.yml | 3 ++- qns/Dockerfile | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/.github/workflows/check.yml b/.github/workflows/check.yml index 7ec34331e0..ec541066bb 100644 --- a/.github/workflows/check.yml +++ b/.github/workflows/check.yml @@ -87,10 +87,11 @@ jobs: # version of NSS. Ubuntu 20.04 only has 3.49, which is far too old. # (neqo-crypto/build.rs would also need to query pkg-config to get the # right build flags rather than building NSS.) + # Clone from the main hg repo, because the GitHub mirror can be out of date. - name: Fetch NSS and NSPR run: | hg clone https://hg.mozilla.org/projects/nspr "$NSPR_DIR" - git clone --depth=1 https://github.com/nss-dev/nss "$NSS_DIR" + hg clone https://hg.mozilla.org/projects/nss "$NSS_DIR" echo "NSS_DIR=$NSS_DIR" >> "$GITHUB_ENV" echo "NSPR_DIR=$NSPR_DIR" >> "$GITHUB_ENV" env: diff --git a/qns/Dockerfile b/qns/Dockerfile index f3a45c23a5..ff9cc7c8f9 100644 --- a/qns/Dockerfile +++ b/qns/Dockerfile @@ -21,7 +21,7 @@ ENV NSS_DIR=/nss \ LD_LIBRARY_PATH=/dist/Release/lib RUN set -eux; \ - git clone --depth=1 https://github.com/nss-dev/nss "$NSS_DIR"; \ + hg clone https://hg.mozilla.org/projects/nss "$NSS_DIR"; \ hg clone https://hg.mozilla.org/projects/nspr "$NSPR_DIR" RUN "$NSS_DIR"/build.sh --static -Ddisable_tests=1 -o From fbc5d62697c968dec968941d4c495f4fa6869c9d Mon Sep 17 00:00:00 2001 From: John Schanck Date: Wed, 31 Jan 2024 02:52:11 -0800 Subject: [PATCH 096/321] Add key exchange group configuration knobs (#1599) * Add key exchange group configuration knobs * Change default key exchange group configuration * Add key exchange group config tests * Set minimum NSS version to 3.97 --------- Co-authored-by: Lars Eggert --- neqo-crypto/bindings/bindings.toml | 1 + neqo-crypto/src/agent.rs | 10 +++++ neqo-crypto/src/constants.rs | 1 + neqo-crypto/src/lib.rs | 2 +- neqo-crypto/tests/agent.rs | 65 +++++++++++++++++++++++++++- neqo-transport/src/connection/mod.rs | 22 +++++++++- neqo-transport/src/crypto.rs | 8 ++++ 7 files changed, 106 insertions(+), 3 deletions(-) diff --git a/neqo-crypto/bindings/bindings.toml b/neqo-crypto/bindings/bindings.toml index 7c35a0a224..3e5c1fdf7d 100644 --- a/neqo-crypto/bindings/bindings.toml +++ b/neqo-crypto/bindings/bindings.toml @@ -49,6 +49,7 @@ functions = [ "SSL_PeerSignedCertTimestamps", "SSL_PeerStapledOCSPResponses", "SSL_ResetHandshake", + "SSL_SendAdditionalKeyShares", "SSL_SetNextProtoNego", "SSL_SetURL", "SSL_VersionRangeSet", diff --git a/neqo-crypto/src/agent.rs b/neqo-crypto/src/agent.rs index 3612fec7e3..3868c525bc 100644 --- a/neqo-crypto/src/agent.rs +++ b/neqo-crypto/src/agent.rs @@ -461,6 +461,16 @@ impl SecretAgent { }) } + /// Set the number of additional key shares that will be sent in the client hello + /// + /// # Errors + /// If the underlying API fails (which shouldn't happen). + pub fn send_additional_key_shares(&mut self, count: usize) -> Res<()> { + secstatus_to_res(unsafe { + ssl::SSL_SendAdditionalKeyShares(self.fd, c_uint::try_from(count)?) + }) + } + /// Set TLS options. /// /// # Errors diff --git a/neqo-crypto/src/constants.rs b/neqo-crypto/src/constants.rs index 21e1a5aceb..76db972290 100644 --- a/neqo-crypto/src/constants.rs +++ b/neqo-crypto/src/constants.rs @@ -62,6 +62,7 @@ remap_enum! { TLS_GRP_EC_SECP384R1 = ssl_grp_ec_secp384r1, TLS_GRP_EC_SECP521R1 = ssl_grp_ec_secp521r1, TLS_GRP_EC_X25519 = ssl_grp_ec_curve25519, + TLS_GRP_KEM_XYBER768D00 = ssl_grp_kem_xyber768d00, } } diff --git a/neqo-crypto/src/lib.rs b/neqo-crypto/src/lib.rs index 332e58a033..2533c727e7 100644 --- a/neqo-crypto/src/lib.rs +++ b/neqo-crypto/src/lib.rs @@ -74,7 +74,7 @@ use std::{ ptr::null, }; -const MINIMUM_NSS_VERSION: &str = "3.74"; +const MINIMUM_NSS_VERSION: &str = "3.97"; #[allow(non_upper_case_globals, clippy::redundant_static_lifetimes)] #[allow(clippy::upper_case_acronyms)] diff --git a/neqo-crypto/tests/agent.rs b/neqo-crypto/tests/agent.rs index 82e105fd1a..27017f0a4e 100644 --- a/neqo-crypto/tests/agent.rs +++ b/neqo-crypto/tests/agent.rs @@ -4,7 +4,7 @@ use neqo_crypto::{ generate_ech_keys, AuthenticationStatus, Client, Error, HandshakeState, SecretAgentPreInfo, Server, ZeroRttCheckResult, ZeroRttChecker, TLS_AES_128_GCM_SHA256, - TLS_CHACHA20_POLY1305_SHA256, TLS_GRP_EC_SECP256R1, TLS_VERSION_1_3, + TLS_CHACHA20_POLY1305_SHA256, TLS_GRP_EC_SECP256R1, TLS_GRP_EC_X25519, TLS_VERSION_1_3, }; use std::boxed::Box; @@ -155,6 +155,48 @@ fn chacha_client() { ); } +#[test] +fn server_prefers_first_client_share() { + fixture_init(); + let mut client = Client::new("server.example", true).expect("should create client"); + let mut server = Server::new(&["key"]).expect("should create server"); + server + .set_groups(&[TLS_GRP_EC_X25519, TLS_GRP_EC_SECP256R1]) + .expect("groups set"); + client + .set_groups(&[TLS_GRP_EC_X25519, TLS_GRP_EC_SECP256R1]) + .expect("groups set"); + client + .send_additional_key_shares(1) + .expect("should set additional key share count"); + + connect(&mut client, &mut server); + + assert_eq!(client.info().unwrap().key_exchange(), TLS_GRP_EC_X25519); + assert_eq!(server.info().unwrap().key_exchange(), TLS_GRP_EC_X25519); +} + +#[test] +fn server_prefers_second_client_share() { + fixture_init(); + let mut client = Client::new("server.example", true).expect("should create client"); + let mut server = Server::new(&["key"]).expect("should create server"); + server + .set_groups(&[TLS_GRP_EC_SECP256R1, TLS_GRP_EC_X25519]) + .expect("groups set"); + client + .set_groups(&[TLS_GRP_EC_X25519, TLS_GRP_EC_SECP256R1]) + .expect("groups set"); + client + .send_additional_key_shares(1) + .expect("should set additional key share count"); + + connect(&mut client, &mut server); + + assert_eq!(client.info().unwrap().key_exchange(), TLS_GRP_EC_SECP256R1); + assert_eq!(server.info().unwrap().key_exchange(), TLS_GRP_EC_SECP256R1); +} + #[test] fn p256_server() { fixture_init(); @@ -170,6 +212,27 @@ fn p256_server() { assert_eq!(server.info().unwrap().key_exchange(), TLS_GRP_EC_SECP256R1); } +#[test] +fn p256_server_hrr() { + fixture_init(); + let mut client = Client::new("server.example", true).expect("should create client"); + let mut server = Server::new(&["key"]).expect("should create server"); + server + .set_groups(&[TLS_GRP_EC_SECP256R1]) + .expect("groups set"); + client + .set_groups(&[TLS_GRP_EC_X25519, TLS_GRP_EC_SECP256R1]) + .expect("groups set"); + client + .send_additional_key_shares(0) + .expect("should set additional key share count"); + + connect(&mut client, &mut server); + + assert_eq!(client.info().unwrap().key_exchange(), TLS_GRP_EC_SECP256R1); + assert_eq!(server.info().unwrap().key_exchange(), TLS_GRP_EC_SECP256R1); +} + #[test] fn alpn() { fixture_init(); diff --git a/neqo-transport/src/connection/mod.rs b/neqo-transport/src/connection/mod.rs index 49e4ec43aa..7e8c1d4737 100644 --- a/neqo-transport/src/connection/mod.rs +++ b/neqo-transport/src/connection/mod.rs @@ -42,7 +42,7 @@ use neqo_common::{ qlog::NeqoQlog, qtrace, qwarn, Datagram, Decoder, Encoder, Role, }; use neqo_crypto::{ - agent::CertificateInfo, random, Agent, AntiReplay, AuthenticationStatus, Cipher, Client, + agent::CertificateInfo, random, Agent, AntiReplay, AuthenticationStatus, Cipher, Client, Group, HandshakeState, PrivateKey, PublicKey, ResumptionToken, SecretAgentInfo, SecretAgentPreInfo, Server, ZeroRttChecker, }; @@ -545,6 +545,26 @@ impl Connection { Ok(()) } + /// Enable a set of key exchange groups. + pub fn set_groups(&mut self, groups: &[Group]) -> Res<()> { + if self.state != State::Init { + qerror!([self], "Cannot enable groups in state {:?}", self.state); + return Err(Error::ConnectionState); + } + self.crypto.tls.set_groups(groups)?; + Ok(()) + } + + /// Set the number of additional key shares to send in the client hello. + pub fn send_additional_key_shares(&mut self, count: usize) -> Res<()> { + if self.state != State::Init { + qerror!([self], "Cannot enable groups in state {:?}", self.state); + return Err(Error::ConnectionState); + } + self.crypto.tls.send_additional_key_shares(count)?; + Ok(()) + } + fn make_resumption_token(&mut self) -> ResumptionToken { debug_assert_eq!(self.role, Role::Client); debug_assert!(self.crypto.has_resumption_token()); diff --git a/neqo-transport/src/crypto.rs b/neqo-transport/src/crypto.rs index 803c049de5..898eb44372 100644 --- a/neqo-transport/src/crypto.rs +++ b/neqo-transport/src/crypto.rs @@ -22,6 +22,7 @@ use neqo_crypto::{ PrivateKey, PublicKey, Record, RecordList, ResumptionToken, SymKey, ZeroRttChecker, TLS_AES_128_GCM_SHA256, TLS_AES_256_GCM_SHA384, TLS_CHACHA20_POLY1305_SHA256, TLS_CT_HANDSHAKE, TLS_EPOCH_APPLICATION_DATA, TLS_EPOCH_HANDSHAKE, TLS_EPOCH_INITIAL, TLS_EPOCH_ZERO_RTT, + TLS_GRP_EC_SECP256R1, TLS_GRP_EC_SECP384R1, TLS_GRP_EC_SECP521R1, TLS_GRP_EC_X25519, TLS_VERSION_1_3, }; @@ -78,6 +79,13 @@ impl Crypto { TLS_AES_256_GCM_SHA384, TLS_CHACHA20_POLY1305_SHA256, ])?; + agent.set_groups(&[ + TLS_GRP_EC_X25519, + TLS_GRP_EC_SECP256R1, + TLS_GRP_EC_SECP384R1, + TLS_GRP_EC_SECP521R1, + ])?; + agent.send_additional_key_shares(1)?; agent.set_alpn(&protocols)?; agent.disable_end_of_early_data()?; // Always enable 0-RTT on the client, but the server needs From 5d54da7e22f187e4d60daad486830f9d25bb4d83 Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Wed, 31 Jan 2024 23:51:15 +0200 Subject: [PATCH 097/321] Do proper congestion control before the first ACK has come in (#1603) * Properly adjust CC state when PTOs fire before ACKs are received * Fix test * Fix test * Fix another test * Progress on the tests * Fix another test * Fix more tests * Simplify assert_idle * Try and fix sim. Doesn't work yet. * Undo * Fix * Update neqo-transport/src/recovery.rs Co-authored-by: Martin Thomson * Make RTT in `idle_timeout_crazy_rtt` less crazy, so #1605 works * Address code review comments * Don't need to look at `tls_info`. --------- Co-authored-by: Martin Thomson --- neqo-common/src/lib.rs | 4 +- neqo-transport/Cargo.toml | 1 + neqo-transport/src/connection/tests/cc.rs | 6 +- .../src/connection/tests/handshake.rs | 3 +- neqo-transport/src/connection/tests/mod.rs | 92 +++++++++++-------- .../src/connection/tests/recovery.rs | 38 ++++---- neqo-transport/src/path.rs | 5 +- neqo-transport/src/recovery.rs | 12 ++- neqo-transport/src/stats.rs | 2 + neqo-transport/tests/network.rs | 4 +- 10 files changed, 96 insertions(+), 71 deletions(-) diff --git a/neqo-common/src/lib.rs b/neqo-common/src/lib.rs index 202f39e0fb..d31f47c664 100644 --- a/neqo-common/src/lib.rs +++ b/neqo-common/src/lib.rs @@ -28,6 +28,8 @@ pub use self::tos::{IpTos, IpTosDscp, IpTosEcn}; use std::fmt::Write; +use enum_map::Enum; + #[must_use] pub fn hex(buf: impl AsRef<[u8]>) -> String { let mut ret = String::with_capacity(buf.as_ref().len() * 2); @@ -77,7 +79,7 @@ pub const fn const_min(a: usize, b: usize) -> usize { [a, b][(a >= b) as usize] } -#[derive(Debug, PartialEq, Eq, Copy, Clone)] +#[derive(Debug, PartialEq, Eq, Copy, Clone, Enum)] /// Client or Server. pub enum Role { Client, diff --git a/neqo-transport/Cargo.toml b/neqo-transport/Cargo.toml index 8b6130197f..ac2bdaa85d 100644 --- a/neqo-transport/Cargo.toml +++ b/neqo-transport/Cargo.toml @@ -16,6 +16,7 @@ qlog = "0.11.0" smallvec = "1.11.1" [dev-dependencies] +enum-map = "2.7" test-fixture = { path = "../test-fixture" } [features] diff --git a/neqo-transport/src/connection/tests/cc.rs b/neqo-transport/src/connection/tests/cc.rs index f974fd94a0..6c70e424ea 100644 --- a/neqo-transport/src/connection/tests/cc.rs +++ b/neqo-transport/src/connection/tests/cc.rs @@ -7,8 +7,8 @@ use super::super::Output; use super::{ ack_bytes, assert_full_cwnd, connect_rtt_idle, cwnd, cwnd_avail, cwnd_packets, default_client, - default_server, fill_cwnd, induce_persistent_congestion, send_something, DEFAULT_RTT, - FORCE_IDLE_CLIENT_1RTT_PACKETS, POST_HANDSHAKE_CWND, + default_server, fill_cwnd, induce_persistent_congestion, send_something, + CLIENT_HANDSHAKE_1RTT_PACKETS, DEFAULT_RTT, POST_HANDSHAKE_CWND, }; use crate::cc::MAX_DATAGRAM_SIZE; use crate::packet::PacketNumber; @@ -54,7 +54,7 @@ fn cc_slow_start_to_cong_avoidance_recovery_period() { // We have already sent packets in `connect_rtt_idle`, // so include a fudge factor. let flight1_largest = - PacketNumber::try_from(c_tx_dgrams.len() + FORCE_IDLE_CLIENT_1RTT_PACKETS).unwrap(); + PacketNumber::try_from(c_tx_dgrams.len() + CLIENT_HANDSHAKE_1RTT_PACKETS).unwrap(); // Server: Receive and generate ack now += DEFAULT_RTT / 2; diff --git a/neqo-transport/src/connection/tests/handshake.rs b/neqo-transport/src/connection/tests/handshake.rs index 55cd10b667..33aff5d528 100644 --- a/neqo-transport/src/connection/tests/handshake.rs +++ b/neqo-transport/src/connection/tests/handshake.rs @@ -30,6 +30,7 @@ use std::mem; use std::net::{IpAddr, Ipv6Addr, SocketAddr}; use std::rc::Rc; use std::time::Duration; +use test_fixture::assertions::assert_coalesced_0rtt; use test_fixture::{self, addr, assertions, datagram, fixture_init, now, split_datagram}; const ECH_CONFIG_ID: u8 = 7; @@ -380,10 +381,10 @@ fn reorder_05rtt_with_0rtt() { // Now PTO at the client and cause the server to re-send handshake packets. now += AT_LEAST_PTO; let c3 = client.process(None, now).dgram(); + assert_coalesced_0rtt(c3.as_ref().unwrap()); now += RTT / 2; let s3 = server.process(c3.as_ref(), now).dgram().unwrap(); - assertions::assert_no_1rtt(&s3[..]); // The client should be able to process the 0.5 RTT now. // This should contain an ACK, so we are processing an ACK from the past. diff --git a/neqo-transport/src/connection/tests/mod.rs b/neqo-transport/src/connection/tests/mod.rs index b722feff78..ab520c3198 100644 --- a/neqo-transport/src/connection/tests/mod.rs +++ b/neqo-transport/src/connection/tests/mod.rs @@ -12,6 +12,8 @@ use crate::{ cc::{CWND_INITIAL_PKTS, CWND_MIN}, cid::ConnectionIdRef, events::ConnectionEvent, + frame::FRAME_TYPE_PING, + packet::PacketBuilder, path::PATH_MTU_V6, recovery::ACK_ONLY_SIZE_LIMIT, stats::{FrameStats, Stats, MAX_PTO_COUNTS}, @@ -32,6 +34,8 @@ use neqo_common::{event::Provider, qdebug, qtrace, Datagram, Decoder, Role}; use neqo_crypto::{random, AllowZeroRtt, AuthenticationStatus, ResumptionToken}; use test_fixture::{self, addr, fixture_init, new_neqo_qlog, now}; +use enum_map::enum_map; + // All the tests. mod ackrate; mod cc; @@ -53,7 +57,7 @@ const DEFAULT_RTT: Duration = Duration::from_millis(100); const AT_LEAST_PTO: Duration = Duration::from_secs(1); const DEFAULT_STREAM_DATA: &[u8] = b"message"; /// The number of 1-RTT packets sent in `force_idle` by a client. -const FORCE_IDLE_CLIENT_1RTT_PACKETS: usize = 3; +const CLIENT_HANDSHAKE_1RTT_PACKETS: usize = 1; /// WARNING! In this module, this version of the generator needs to be used. /// This copies the implementation from @@ -151,6 +155,25 @@ pub fn maybe_authenticate(conn: &mut Connection) -> bool { false } +/// Compute the RTT variance after `n` ACKs or other RTT updates. +pub fn rttvar_after_n_updates(n: usize, rtt: Duration) -> Duration { + assert!(n > 0); + let mut rttvar = rtt / 2; + for _ in 1..n { + rttvar = rttvar * 3 / 4; + } + rttvar +} + +/// This inserts a PING frame into packets. +struct PingWriter {} + +impl crate::connection::test_internal::FrameWriter for PingWriter { + fn write_frames(&mut self, builder: &mut PacketBuilder) { + builder.encode_varint(FRAME_TYPE_PING); + } +} + /// Drive the handshake between the client and server. fn handshake( client: &mut Connection, @@ -170,10 +193,28 @@ fn handshake( ) }; + let mut did_ping = enum_map! {_ => false}; while !is_done(a) { _ = maybe_authenticate(a); let had_input = input.is_some(); + // Insert a PING frame into the first application data packet an endpoint sends, + // in order to force the peer to ACK it. For the server, this is depending on the + // client's connection state, which is accessible during the tests. + // + // We're doing this to prevent packet loss from delaying ACKs, which would cause + // cwnd to shrink, and also to prevent the delayed ACK timer from being armed after + // the handshake, which is not something the tests are written to account for. + let should_ping = !did_ping[a.role()] + && (a.role() == Role::Client && *a.state() == State::Connected + || (a.role() == Role::Server && *b.state() == State::Connected)); + if should_ping { + a.test_frame_writer = Some(Box::new(PingWriter {})); + } let output = a.process(input.as_ref(), now).dgram(); + if should_ping { + a.test_frame_writer = None; + did_ping[a.role()] = true; + } assert!(had_input || output.is_some()); input = output; qtrace!("handshake: t += {:?}", rtt / 2); @@ -205,9 +246,9 @@ fn connect_with_rtt( ) -> Instant { fn check_rtt(stats: &Stats, rtt: Duration) { assert_eq!(stats.rtt, rtt); - // Confirmation takes 2 round trips, - // so rttvar is reduced by 1/4 (from rtt/2). - assert_eq!(stats.rttvar, rtt * 3 / 8); + // Validate that rttvar has been computed correctly based on the number of RTT updates. + let n = stats.frame_rx.ack + usize::from(stats.rtt_init_guess); + assert_eq!(stats.rttvar, rttvar_after_n_updates(n, rtt)); } let now = handshake(client, server, now, rtt); assert_eq!(*client.state(), State::Confirmed); @@ -247,51 +288,26 @@ fn exchange_ticket( get_tokens(client).pop().expect("should have token") } -/// Getting the client and server to reach an idle state is surprisingly hard. -/// The server sends `HANDSHAKE_DONE` at the end of the handshake, and the client -/// doesn't immediately acknowledge it. Reordering packets does the trick. -fn force_idle( - client: &mut Connection, - server: &mut Connection, - rtt: Duration, - mut now: Instant, -) -> Instant { - // The client has sent NEW_CONNECTION_ID, so ensure that the server generates - // an acknowledgment by sending some reordered packets. - qtrace!("force_idle: send reordered client packets"); - let c1 = send_something(client, now); - let c2 = send_something(client, now); - now += rtt / 2; - server.process_input(&c2, now); - server.process_input(&c1, now); - - // Now do the same for the server. (The ACK is in the first one.) - qtrace!("force_idle: send reordered server packets"); - let s1 = send_something(server, now); - let s2 = send_something(server, now); - now += rtt / 2; - // Delivering s2 first at the client causes it to want to ACK. - client.process_input(&s2, now); - // Delivering s1 should not have the client change its mind about the ACK. - let ack = client.process(Some(&s1), now); - assert!(ack.as_dgram_ref().is_some()); +/// The `handshake` method inserts PING frames into the first application data packets, +/// which forces each peer to ACK them. As a side effect, that causes both sides of the +/// connection to be idle aftwerwards. This method simply verifies that this is the case. +fn assert_idle(client: &mut Connection, server: &mut Connection, rtt: Duration, now: Instant) { let idle_timeout = min( client.conn_params.get_idle_timeout(), server.conn_params.get_idle_timeout(), ); - assert_eq!(client.process_output(now), Output::Callback(idle_timeout)); - now += rtt / 2; + // Client started its idle period half an RTT before now. assert_eq!( - server.process(ack.as_dgram_ref(), now), - Output::Callback(idle_timeout) + client.process_output(now), + Output::Callback(idle_timeout - rtt / 2) ); - now + assert_eq!(server.process_output(now), Output::Callback(idle_timeout)); } /// Connect with an RTT and then force both peers to be idle. fn connect_rtt_idle(client: &mut Connection, server: &mut Connection, rtt: Duration) -> Instant { let now = connect_with_rtt(client, server, now(), rtt); - let now = force_idle(client, server, rtt, now); + assert_idle(client, server, rtt, now); // Drain events from both as well. _ = client.events().count(); _ = server.events().count(); diff --git a/neqo-transport/src/connection/tests/recovery.rs b/neqo-transport/src/connection/tests/recovery.rs index 421dded3d4..87b2b37839 100644 --- a/neqo-transport/src/connection/tests/recovery.rs +++ b/neqo-transport/src/connection/tests/recovery.rs @@ -29,7 +29,10 @@ use std::{ mem, time::{Duration, Instant}, }; -use test_fixture::{self, now, split_datagram}; +use test_fixture::{ + assertions::{assert_handshake, assert_initial}, + now, split_datagram, +}; #[test] fn pto_works_basic() { @@ -210,14 +213,17 @@ fn pto_handshake_complete() { let mut server = default_server(); let pkt = client.process(None, now).dgram(); + assert_initial(pkt.as_ref().unwrap(), false); let cb = client.process(None, now).callback(); assert_eq!(cb, Duration::from_millis(300)); now += HALF_RTT; let pkt = server.process(pkt.as_ref(), now).dgram(); + assert_initial(pkt.as_ref().unwrap(), false); now += HALF_RTT; let pkt = client.process(pkt.as_ref(), now).dgram(); + assert_handshake(pkt.as_ref().unwrap()); let cb = client.process(None, now).callback(); // The client now has a single RTT estimate (20ms), so @@ -233,7 +239,7 @@ fn pto_handshake_complete() { qdebug!("---- client: SH..FIN -> FIN"); let pkt1 = client.process(None, now).dgram(); - assert!(pkt1.is_some()); + assert_handshake(pkt1.as_ref().unwrap()); assert_eq!(*client.state(), State::Connected); let cb = client.process(None, now).callback(); @@ -247,6 +253,7 @@ fn pto_handshake_complete() { qdebug!("---- client: PTO"); now += HALF_RTT * 6; let pkt2 = client.process(None, now).dgram(); + assert_handshake(pkt2.as_ref().unwrap()); pto_counts[0] = 1; assert_eq!(client.stats.borrow().pto_counts, pto_counts); @@ -257,7 +264,10 @@ fn pto_handshake_complete() { let stream_id = client.stream_create(StreamType::UniDi).unwrap(); client.stream_close_send(stream_id).unwrap(); let pkt3 = client.process(None, now).dgram(); + assert_handshake(pkt3.as_ref().unwrap()); let (pkt3_hs, pkt3_1rtt) = split_datagram(&pkt3.unwrap()); + assert_handshake(&pkt3_hs); + assert!(pkt3_1rtt.is_some()); // PTO has been doubled. let cb = client.process(None, now).callback(); @@ -283,16 +293,21 @@ fn pto_handshake_complete() { // Check that the other packets (pkt2, pkt3) are Handshake packets. // The server discarded the Handshake keys already, therefore they are dropped. // Note that these don't include 1-RTT packets, because 1-RTT isn't send on PTO. + let (pkt2_hs, pkt2_1rtt) = split_datagram(&pkt2.unwrap()); + assert_handshake(&pkt2_hs); + assert!(pkt2_1rtt.is_some()); let dropped_before1 = server.stats().dropped_rx; let server_frames = server.stats().frame_rx.all; - server.process_input(&pkt2.unwrap(), now); + server.process_input(&pkt2_hs, now); assert_eq!(1, server.stats().dropped_rx - dropped_before1); assert_eq!(server.stats().frame_rx.all, server_frames); + server.process_input(&pkt2_1rtt.unwrap(), now); + let server_frames2 = server.stats().frame_rx.all; let dropped_before2 = server.stats().dropped_rx; server.process_input(&pkt3_hs, now); assert_eq!(1, server.stats().dropped_rx - dropped_before2); - assert_eq!(server.stats().frame_rx.all, server_frames); + assert_eq!(server.stats().frame_rx.all, server_frames2); now += HALF_RTT; @@ -307,13 +322,6 @@ fn pto_handshake_complete() { now += cb; let out = client.process(None, now).dgram(); assert!(out.is_some()); - let cb = client.process(None, now).callback(); - // The handshake keys are discarded, but now we're back to the idle timeout. - // We don't send another PING because the handshake space is done and there - // is nothing to probe for. - - let idle_timeout = ConnectionParameters::default().get_idle_timeout(); - assert_eq!(cb, idle_timeout - expected_ack_delay); } /// Test that PTO in the Handshake space contains the right frames. @@ -616,14 +624,6 @@ fn loss_time_past_largest_acked() { let lr_time = client.process(None, now).callback(); assert_ne!(lr_time, Duration::from_secs(0)); assert!(lr_time < (RTT / 2)); - - // Skipping forward by the loss recovery timer should cause the client to - // mark packets as lost and retransmit, after which we should be on the PTO - // timer. - now += lr_time; - let delay = client.process(None, now).callback(); - assert_ne!(delay, Duration::from_secs(0)); - assert!(delay > lr_time); } /// `sender` sends a little, `receiver` acknowledges it. diff --git a/neqo-transport/src/path.rs b/neqo-transport/src/path.rs index 4430bb2bdb..2ab90c169c 100644 --- a/neqo-transport/src/path.rs +++ b/neqo-transport/src/path.rs @@ -28,7 +28,7 @@ use crate::{ sender::PacketSender, stats::FrameStats, tracking::{PacketNumberSpace, SentPacket}, - Error, Res, + Error, Res, Stats, }; use neqo_common::{hex, qdebug, qinfo, qlog::NeqoQlog, qtrace, Datagram, Encoder, IpTos}; @@ -946,7 +946,7 @@ impl Path { } /// Discard a packet that previously might have been in-flight. - pub fn discard_packet(&mut self, sent: &SentPacket, now: Instant) { + pub fn discard_packet(&mut self, sent: &SentPacket, now: Instant, stats: &mut Stats) { if self.rtt.first_sample_time().is_none() { // When discarding a packet there might not be a good RTT estimate. // But discards only occur after receiving something, so that means @@ -958,6 +958,7 @@ impl Path { "discarding a packet without an RTT estimate; guessing RTT={:?}", now - sent.time_sent ); + stats.rtt_init_guess = true; self.rtt.update( &mut self.qlog, now - sent.time_sent, diff --git a/neqo-transport/src/recovery.rs b/neqo-transport/src/recovery.rs index 3e4e9134bb..a640b75371 100644 --- a/neqo-transport/src/recovery.rs +++ b/neqo-transport/src/recovery.rs @@ -412,7 +412,7 @@ impl LossRecoverySpace { .sent_packets .iter_mut() // BTreeMap iterates in order of ascending PN - .take_while(|(&k, _)| Some(k) < largest_acked) + .take_while(|(&k, _)| k < largest_acked.unwrap_or(PacketNumber::MAX)) { // Packets sent before now - loss_delay are deemed lost. if packet.time_sent + loss_delay <= now { @@ -430,7 +430,9 @@ impl LossRecoverySpace { largest_acked ); } else { - self.first_ooo_time = Some(packet.time_sent); + if largest_acked.is_some() { + self.first_ooo_time = Some(packet.time_sent); + } // No more packets can be declared lost after this one. break; }; @@ -622,7 +624,7 @@ impl LossRecovery { .collect::>(); let mut path = primary_path.borrow_mut(); for p in &mut dropped { - path.discard_packet(p, now); + path.discard_packet(p, now, &mut self.stats.borrow_mut()); } dropped } @@ -762,7 +764,7 @@ impl LossRecovery { .collect::>(); let mut path = primary_path.borrow_mut(); for p in &mut dropped { - path.discard_packet(p, now); + path.discard_packet(p, now, &mut self.stats.borrow_mut()); } dropped } @@ -795,7 +797,7 @@ impl LossRecovery { qdebug!([self], "Reset loss recovery state for {}", space); let mut path = primary_path.borrow_mut(); for p in self.spaces.drop_space(space) { - path.discard_packet(&p, now); + path.discard_packet(&p, now, &mut self.stats.borrow_mut()); } // We just made progress, so discard PTO count. diff --git a/neqo-transport/src/stats.rs b/neqo-transport/src/stats.rs index 9428b61949..9e956eb02e 100644 --- a/neqo-transport/src/stats.rs +++ b/neqo-transport/src/stats.rs @@ -141,6 +141,8 @@ pub struct Stats { pub rtt: Duration, /// The current, estimated round-trip time variation on the primary path. pub rttvar: Duration, + /// Whether the first RTT sample was guessed from a discarded packet. + pub rtt_init_guess: bool, /// Count PTOs. Single PTOs, 2 PTOs in a row, 3 PTOs in row, etc. are counted /// separately. diff --git a/neqo-transport/tests/network.rs b/neqo-transport/tests/network.rs index 3f9d2240a0..e2389090a7 100644 --- a/neqo-transport/tests/network.rs +++ b/neqo-transport/tests/network.rs @@ -67,7 +67,7 @@ simulate!( ))) ] ), - Delay::new(weeks(150)..weeks(150)), + Delay::new(weeks(15)..weeks(15)), Drop::percentage(10), ConnectionNode::new_server( ConnectionParameters::default().idle_timeout(weeks(1000)), @@ -78,7 +78,7 @@ simulate!( ))) ] ), - Delay::new(weeks(100)..weeks(100)), + Delay::new(weeks(10)..weeks(10)), Drop::percentage(10), ], ); From d0ae17fd20b82af60e12d79842edeaf088689e48 Mon Sep 17 00:00:00 2001 From: Max Inden Date: Wed, 31 Jan 2024 23:19:30 +0100 Subject: [PATCH 098/321] Merge pull request from GHSA-5m9j-vr32-g7j5 * fix(transport): bound ACK range count in ACK frame * Update encode_varint comments Co-authored-by: Martin Thomson * Move constant adjacent to code * Mention Ethernet MTU * Fix spacing --------- Co-authored-by: Martin Thomson --- neqo-transport/src/frame.rs | 31 ++++++++++++++++++++++++++++++- 1 file changed, 30 insertions(+), 1 deletion(-) diff --git a/neqo-transport/src/frame.rs b/neqo-transport/src/frame.rs index 7eeba507bc..8081baef6c 100644 --- a/neqo-transport/src/frame.rs +++ b/neqo-transport/src/frame.rs @@ -387,6 +387,17 @@ impl<'a> Frame<'a> { } pub fn decode(dec: &mut Decoder<'a>) -> Res { + /// Maximum ACK Range Count in ACK Frame + /// + /// Given a max UDP datagram size of 64k bytes and a minimum ACK Range size of 2 + /// bytes (2 QUIC varints), a single datagram can at most contain 32k ACK + /// Ranges. + /// + /// Note that the maximum (jumbogram) Ethernet MTU of 9216 or on the + /// Internet the regular Ethernet MTU of 1518 are more realistically to + /// be the limiting factor. Though for simplicity the higher limit is chosen. + const MAX_ACK_RANGE_COUNT: u64 = 32 * 1024; + fn d(v: Option) -> Res { v.ok_or(Error::NoMoreData) } @@ -410,7 +421,13 @@ impl<'a> Frame<'a> { FRAME_TYPE_ACK | FRAME_TYPE_ACK_ECN => { let la = dv(dec)?; let ad = dv(dec)?; - let nr = dv(dec)?; + let nr = dv(dec).and_then(|nr| { + if nr < MAX_ACK_RANGE_COUNT { + Ok(nr) + } else { + Err(Error::TooMuchData) + } + })?; let fa = dv(dec)?; let mut arr: Vec = Vec::with_capacity(nr as usize); for _ in 0..nr { @@ -943,4 +960,16 @@ mod tests { }; just_dec(&f, "403103010203"); } + + #[test] + fn frame_decode_enforces_bound_on_ack_range() { + let mut e = Encoder::new(); + + e.encode_varint(FRAME_TYPE_ACK); + e.encode_varint(0u64); // largest acknowledged + e.encode_varint(0u64); // ACK delay + e.encode_varint(u32::MAX); // ACK range count = huge, but maybe available for allocation + + assert_eq!(Err(Error::TooMuchData), Frame::decode(&mut e.as_decoder())); + } } From f85f10049a59ee45f7185a06f31ac2b9d8bc5763 Mon Sep 17 00:00:00 2001 From: Kershaw Date: Thu, 1 Feb 2024 09:10:13 +0100 Subject: [PATCH 099/321] neqo v0.6.9 (#1608) --- neqo-client/Cargo.toml | 2 +- neqo-common/Cargo.toml | 2 +- neqo-crypto/Cargo.toml | 2 +- neqo-http3/Cargo.toml | 2 +- neqo-interop/Cargo.toml | 2 +- neqo-qpack/Cargo.toml | 2 +- neqo-server/Cargo.toml | 2 +- neqo-transport/Cargo.toml | 2 +- test-fixture/Cargo.toml | 2 +- 9 files changed, 9 insertions(+), 9 deletions(-) diff --git a/neqo-client/Cargo.toml b/neqo-client/Cargo.toml index ca11186f95..fba2110d6d 100644 --- a/neqo-client/Cargo.toml +++ b/neqo-client/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "neqo-client" -version = "0.6.8" +version = "0.6.9" authors = ["Martin Thomson ", "Dragana Damjanovic ", "Andy Grover "] diff --git a/neqo-common/Cargo.toml b/neqo-common/Cargo.toml index f6fd952a18..b7136aaa60 100644 --- a/neqo-common/Cargo.toml +++ b/neqo-common/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "neqo-common" -version = "0.6.8" +version = "0.6.9" authors = ["Bobby Holley "] edition = "2018" rust-version = "1.70.0" diff --git a/neqo-crypto/Cargo.toml b/neqo-crypto/Cargo.toml index c7cad21c87..c5909ac5e5 100644 --- a/neqo-crypto/Cargo.toml +++ b/neqo-crypto/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "neqo-crypto" -version = "0.6.8" +version = "0.6.9" authors = ["Martin Thomson "] edition = "2018" rust-version = "1.70.0" diff --git a/neqo-http3/Cargo.toml b/neqo-http3/Cargo.toml index 8dafbe8b40..9956cef05c 100644 --- a/neqo-http3/Cargo.toml +++ b/neqo-http3/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "neqo-http3" -version = "0.6.8" +version = "0.6.9" authors = ["Dragana Damjanovic "] edition = "2018" rust-version = "1.70.0" diff --git a/neqo-interop/Cargo.toml b/neqo-interop/Cargo.toml index a197aa2203..8b298167f2 100644 --- a/neqo-interop/Cargo.toml +++ b/neqo-interop/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "neqo-interop" -version = "0.6.8" +version = "0.6.9" authors = ["EKR "] edition = "2018" rust-version = "1.70.0" diff --git a/neqo-qpack/Cargo.toml b/neqo-qpack/Cargo.toml index c07a7fcec0..31a1bf28e6 100644 --- a/neqo-qpack/Cargo.toml +++ b/neqo-qpack/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "neqo-qpack" -version = "0.6.8" +version = "0.6.9" authors = ["Dragana Damjanovic "] edition = "2018" rust-version = "1.70.0" diff --git a/neqo-server/Cargo.toml b/neqo-server/Cargo.toml index 888df43163..b3f8aae462 100644 --- a/neqo-server/Cargo.toml +++ b/neqo-server/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "neqo-server" -version = "0.6.8" +version = "0.6.9" authors = ["Dragana Damjanovic "] edition = "2018" rust-version = "1.70.0" diff --git a/neqo-transport/Cargo.toml b/neqo-transport/Cargo.toml index ac2bdaa85d..ae33822018 100644 --- a/neqo-transport/Cargo.toml +++ b/neqo-transport/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "neqo-transport" -version = "0.6.8" +version = "0.6.9" authors = ["EKR ", "Andy Grover "] edition = "2018" rust-version = "1.70.0" diff --git a/test-fixture/Cargo.toml b/test-fixture/Cargo.toml index f142c9a2f4..2c163fbb07 100644 --- a/test-fixture/Cargo.toml +++ b/test-fixture/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "test-fixture" -version = "0.6.8" +version = "0.6.9" authors = ["Martin Thomson "] edition = "2018" rust-version = "1.70.0" From ca7d2271773a572730451112ff2cef54c19abb45 Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Thu, 1 Feb 2024 14:49:37 +0200 Subject: [PATCH 100/321] chore: Add a `.rustfmt.toml` and reformat (#1607) * chore: Add a `.rustfmt.toml` and reformat Some of the settings in `.rustfmt.toml` require the nightly toolchain, i.e, you need to `cargo +nightly fmt` to have them be used. For VS Code, you can make is use the nightly `rustfmt` by default by adding ``` "rust-analyzer.rustfmt.extraArgs": [ "+nightly" ], ``` to your `settings.json` file. Fixes #1417 * Normalize comment headings * Spacing fixes --- .rustfmt.toml | 7 + neqo-client/src/main.rs | 41 ++- neqo-common/src/codec.rs | 22 ++ neqo-common/src/datagram.rs | 3 +- neqo-common/src/event.rs | 3 +- neqo-common/src/hrtime.rs | 3 +- neqo-common/src/incrdecoder.rs | 6 + neqo-common/src/lib.rs | 16 +- neqo-common/src/log.rs | 5 +- neqo-common/src/qlog.rs | 1 + neqo-common/src/timer.rs | 23 +- neqo-crypto/build.rs | 12 +- neqo-crypto/src/aead.rs | 19 +- neqo-crypto/src/aead_fuzzing.rs | 15 +- neqo-crypto/src/agent.rs | 67 ++-- neqo-crypto/src/agentio.rs | 29 +- neqo-crypto/src/cert.rs | 22 +- neqo-crypto/src/ech.rs | 21 +- neqo-crypto/src/err.rs | 14 +- neqo-crypto/src/ext.rs | 17 +- neqo-crypto/src/hkdf.rs | 16 +- neqo-crypto/src/hp.rs | 23 +- neqo-crypto/src/lib.rs | 30 +- neqo-crypto/src/p11.rs | 20 +- neqo-crypto/src/replay.rs | 12 +- neqo-crypto/src/secrets.rs | 6 +- neqo-crypto/src/selfencrypt.rs | 20 +- neqo-crypto/src/ssl.rs | 8 +- neqo-crypto/src/time.rs | 19 +- neqo-crypto/tests/aead.rs | 7 +- neqo-crypto/tests/agent.rs | 7 +- neqo-crypto/tests/ext.rs | 12 +- neqo-crypto/tests/handshake.rs | 4 +- neqo-crypto/tests/hkdf.rs | 10 +- neqo-crypto/tests/hp.rs | 3 +- neqo-crypto/tests/selfencrypt.rs | 8 +- neqo-http3/src/buffered_send_stream.rs | 7 +- neqo-http3/src/client_events.rs | 17 +- neqo-http3/src/conn_params.rs | 5 +- neqo-http3/src/connection.rs | 119 ++++--- neqo-http3/src/connection_client.rs | 304 ++++++++++++------ neqo-http3/src/connection_server.rs | 57 ++-- neqo-http3/src/control_stream_local.rs | 11 +- neqo-http3/src/control_stream_remote.rs | 10 +- .../src/features/extended_connect/mod.rs | 14 +- .../tests/webtransport/datagrams.rs | 14 +- .../tests/webtransport/mod.rs | 18 +- .../tests/webtransport/negotiation.rs | 12 +- .../tests/webtransport/sessions.rs | 24 +- .../tests/webtransport/streams.rs | 22 +- .../extended_connect/webtransport_session.rs | 26 +- .../extended_connect/webtransport_streams.rs | 9 +- neqo-http3/src/features/mod.rs | 15 +- neqo-http3/src/frames/hframe.rs | 7 +- neqo-http3/src/frames/reader.rs | 20 +- neqo-http3/src/frames/tests/hframe.rs | 7 +- neqo-http3/src/frames/tests/mod.rs | 10 +- neqo-http3/src/frames/tests/reader.rs | 11 +- neqo-http3/src/frames/wtframe.rs | 6 +- neqo-http3/src/headers_checks.rs | 15 +- neqo-http3/src/lib.rs | 48 ++- neqo-http3/src/priority.rs | 19 +- neqo-http3/src/push_controller.rs | 73 +++-- neqo-http3/src/qlog.rs | 5 +- neqo-http3/src/qpack_decoder_receiver.rs | 7 +- neqo-http3/src/qpack_encoder_receiver.rs | 7 +- neqo-http3/src/recv_message.rs | 25 +- neqo-http3/src/request_target.rs | 3 + neqo-http3/src/send_message.rs | 24 +- neqo-http3/src/server.rs | 46 +-- neqo-http3/src/server_connection_events.rs | 13 +- neqo-http3/src/server_events.rs | 56 +++- neqo-http3/src/settings.rs | 7 +- neqo-http3/src/stream_type_reader.rs | 63 ++-- neqo-http3/tests/httpconn.rs | 11 +- neqo-http3/tests/priority.rs | 5 +- neqo-http3/tests/webtransport.rs | 4 +- neqo-interop/src/main.rs | 19 +- neqo-qpack/src/decoder.rs | 48 ++- neqo-qpack/src/decoder_instructions.rs | 31 +- neqo-qpack/src/encoder.rs | 139 +++++--- neqo-qpack/src/encoder_instructions.rs | 32 +- neqo-qpack/src/header_block.rs | 12 +- neqo-qpack/src/huffman.rs | 17 +- neqo-qpack/src/huffman_decode_helper.rs | 6 +- neqo-qpack/src/lib.rs | 9 +- neqo-qpack/src/prefix.rs | 12 +- neqo-qpack/src/qlog.rs | 6 +- neqo-qpack/src/qpack_send_buf.rs | 8 +- neqo-qpack/src/reader.rs | 35 +- neqo-qpack/src/table.rs | 58 ++-- neqo-server/src/main.rs | 7 +- neqo-server/src/old_https.rs | 12 +- neqo-transport/src/ackrate.rs | 14 +- neqo-transport/src/addr_valid.rs | 24 +- neqo-transport/src/cc/classic_cc.rs | 15 +- neqo-transport/src/cc/cubic.rs | 22 +- neqo-transport/src/cc/mod.rs | 7 +- neqo-transport/src/cc/new_reno.rs | 6 +- neqo-transport/src/cc/tests/cubic.rs | 47 +-- neqo-transport/src/cc/tests/new_reno.rs | 11 +- neqo-transport/src/cid.rs | 26 +- neqo-transport/src/connection/idle.rs | 6 +- neqo-transport/src/connection/mod.rs | 91 ++++-- neqo-transport/src/connection/params.rs | 39 ++- neqo-transport/src/connection/saved.rs | 6 +- neqo-transport/src/connection/state.rs | 27 +- .../src/connection/tests/ackrate.rs | 7 +- neqo-transport/src/connection/tests/cc.rs | 28 +- neqo-transport/src/connection/tests/close.rs | 15 +- .../src/connection/tests/datagram.rs | 18 +- .../src/connection/tests/fuzzing.rs | 5 +- .../src/connection/tests/handshake.rs | 47 +-- neqo-transport/src/connection/tests/idle.rs | 26 +- neqo-transport/src/connection/tests/keys.rs | 23 +- .../src/connection/tests/migration.rs | 27 +- neqo-transport/src/connection/tests/mod.rs | 31 +- .../src/connection/tests/priority.rs | 9 +- .../src/connection/tests/recovery.rs | 23 +- .../src/connection/tests/resumption.rs | 16 +- neqo-transport/src/connection/tests/stream.rs | 10 +- neqo-transport/src/connection/tests/vn.rs | 24 +- .../src/connection/tests/zerortt.rs | 16 +- neqo-transport/src/crypto.rs | 5 +- neqo-transport/src/dump.rs | 13 +- neqo-transport/src/events.rs | 17 +- neqo-transport/src/fc.rs | 18 +- neqo-transport/src/frame.rs | 18 +- neqo-transport/src/lib.rs | 7 +- neqo-transport/src/pace.rs | 16 +- neqo-transport/src/packet/mod.rs | 32 +- neqo-transport/src/packet/retry.rs | 5 +- neqo-transport/src/path.rs | 11 +- neqo-transport/src/qlog.rs | 3 +- neqo-transport/src/quic_datagrams.rs | 19 +- neqo-transport/src/recovery.rs | 31 +- neqo-transport/src/recv_stream.rs | 11 +- neqo-transport/src/rtt.rs | 20 +- neqo-transport/src/send_stream.rs | 22 +- neqo-transport/src/sender.rs | 17 +- neqo-transport/src/server.rs | 37 +-- neqo-transport/src/stats.rs | 7 +- neqo-transport/src/stream_id.rs | 3 +- neqo-transport/src/streams.rs | 14 +- neqo-transport/src/tparams.rs | 24 +- neqo-transport/src/tracking.rs | 11 +- neqo-transport/src/version.rs | 6 +- neqo-transport/tests/common/mod.rs | 8 +- neqo-transport/tests/conn_vectors.rs | 5 +- neqo-transport/tests/connection.rs | 3 +- neqo-transport/tests/network.rs | 4 +- neqo-transport/tests/retry.rs | 11 +- neqo-transport/tests/server.rs | 7 +- neqo-transport/tests/sim/connection.rs | 14 +- neqo-transport/tests/sim/delay.rs | 16 +- neqo-transport/tests/sim/drop.rs | 10 +- neqo-transport/tests/sim/mod.rs | 20 +- neqo-transport/tests/sim/rng.rs | 4 +- neqo-transport/tests/sim/taildrop.rs | 16 +- test-fixture/src/assertions.rs | 28 +- test-fixture/src/lib.rs | 48 ++- 161 files changed, 2060 insertions(+), 1345 deletions(-) create mode 100644 .rustfmt.toml diff --git a/.rustfmt.toml b/.rustfmt.toml new file mode 100644 index 0000000000..482732e6a5 --- /dev/null +++ b/.rustfmt.toml @@ -0,0 +1,7 @@ +comment_width=100 +wrap_comments=true + +imports_granularity="Crate" +group_imports="StdExternalCrate" + +format_code_in_doc_comments=true diff --git a/neqo-client/src/main.rs b/neqo-client/src/main.rs index 1a038ddba3..3db90aac10 100644 --- a/neqo-client/src/main.rs +++ b/neqo-client/src/main.rs @@ -7,25 +7,6 @@ #![cfg_attr(feature = "deny-warnings", deny(warnings))] #![warn(clippy::use_self)] -use common::IpTos; -use qlog::{events::EventImportance, streamer::QlogStreamer}; - -use mio::{net::UdpSocket, Events, Poll, PollOpt, Ready, Token}; - -use neqo_common::{self as common, event::Provider, hex, qlog::NeqoQlog, Datagram, Role}; -use neqo_crypto::{ - constants::{TLS_AES_128_GCM_SHA256, TLS_AES_256_GCM_SHA384, TLS_CHACHA20_POLY1305_SHA256}, - init, AuthenticationStatus, Cipher, ResumptionToken, -}; -use neqo_http3::{ - self, Error, Header, Http3Client, Http3ClientEvent, Http3Parameters, Http3State, Output, - Priority, -}; -use neqo_transport::{ - CongestionControlAlgorithm, Connection, ConnectionId, ConnectionParameters, - EmptyConnectionIdGenerator, Error as TransportError, StreamId, StreamType, Version, -}; - use std::{ cell::RefCell, collections::{HashMap, VecDeque}, @@ -41,6 +22,22 @@ use std::{ time::{Duration, Instant}, }; +use common::IpTos; +use mio::{net::UdpSocket, Events, Poll, PollOpt, Ready, Token}; +use neqo_common::{self as common, event::Provider, hex, qlog::NeqoQlog, Datagram, Role}; +use neqo_crypto::{ + constants::{TLS_AES_128_GCM_SHA256, TLS_AES_256_GCM_SHA384, TLS_CHACHA20_POLY1305_SHA256}, + init, AuthenticationStatus, Cipher, ResumptionToken, +}; +use neqo_http3::{ + self, Error, Header, Http3Client, Http3ClientEvent, Http3Parameters, Http3State, Output, + Priority, +}; +use neqo_transport::{ + CongestionControlAlgorithm, Connection, ConnectionId, ConnectionParameters, + EmptyConnectionIdGenerator, Error as TransportError, StreamId, StreamType, Version, +}; +use qlog::{events::EventImportance, streamer::QlogStreamer}; use structopt::StructOpt; use url::{Origin, Url}; @@ -1140,9 +1137,6 @@ mod old { time::{Duration, Instant}, }; - use url::Url; - - use super::{qlog_new, KeyUpdateState, Res}; use mio::{Events, Poll}; use neqo_common::{event::Provider, Datagram, IpTos}; use neqo_crypto::{AuthenticationStatus, ResumptionToken}; @@ -1150,8 +1144,9 @@ mod old { Connection, ConnectionEvent, EmptyConnectionIdGenerator, Error, Output, State, StreamId, StreamType, }; + use url::Url; - use super::{emit_datagram, get_output_file, Args}; + use super::{emit_datagram, get_output_file, qlog_new, Args, KeyUpdateState, Res}; struct HandlerOld<'b> { streams: HashMap>, diff --git a/neqo-common/src/codec.rs b/neqo-common/src/codec.rs index 99ba9ec52a..57ff13f39f 100644 --- a/neqo-common/src/codec.rs +++ b/neqo-common/src/codec.rs @@ -34,7 +34,9 @@ impl<'a> Decoder<'a> { } /// Skip n bytes. + /// /// # Panics + /// /// If the remaining quantity is less than `n`. pub fn skip(&mut self, n: usize) { assert!(self.remaining() >= n, "insufficient data"); @@ -90,7 +92,9 @@ impl<'a> Decoder<'a> { } /// Decodes an unsigned integer of length 1..=8. + /// /// # Panics + /// /// This panics if `n` is not in the range `1..=8`. pub fn decode_uint(&mut self, n: usize) -> Option { assert!(n > 0 && n <= 8); @@ -198,7 +202,9 @@ pub struct Encoder { impl Encoder { /// Static helper function for previewing the results of encoding without doing it. + /// /// # Panics + /// /// When `v` is too large. #[must_use] pub const fn varint_len(v: u64) -> usize { @@ -212,7 +218,9 @@ impl Encoder { } /// Static helper to determine how long a varint-prefixed array encodes to. + /// /// # Panics + /// /// When `len` doesn't fit in a `u64`. #[must_use] pub fn vvec_len(len: usize) -> usize { @@ -261,7 +269,9 @@ impl Encoder { } /// Don't use this except in testing. + /// /// # Panics + /// /// When `s` contains non-hex values or an odd number of values. #[must_use] pub fn from_hex(s: impl AsRef) -> Self { @@ -291,7 +301,9 @@ impl Encoder { } /// Encode an integer of any size up to u64. + /// /// # Panics + /// /// When `n` is outside the range `1..=8`. #[allow(clippy::cast_possible_truncation)] pub fn encode_uint>(&mut self, n: usize, v: T) -> &mut Self { @@ -304,7 +316,9 @@ impl Encoder { } /// Encode a QUIC varint. + /// /// # Panics + /// /// When `v >= 1<<62`. pub fn encode_varint>(&mut self, v: T) -> &mut Self { let v = v.into(); @@ -319,7 +333,9 @@ impl Encoder { } /// Encode a vector in TLS style. + /// /// # Panics + /// /// When `v` is longer than 2^64. pub fn encode_vec(&mut self, n: usize, v: &[u8]) -> &mut Self { self.encode_uint(n, u64::try_from(v.as_ref().len()).unwrap()) @@ -327,7 +343,9 @@ impl Encoder { } /// Encode a vector in TLS style using a closure for the contents. + /// /// # Panics + /// /// When `f()` returns a length larger than `2^8n`. #[allow(clippy::cast_possible_truncation)] pub fn encode_vec_with(&mut self, n: usize, f: F) -> &mut Self { @@ -343,7 +361,9 @@ impl Encoder { } /// Encode a vector with a varint length. + /// /// # Panics + /// /// When `v` is longer than 2^64. pub fn encode_vvec(&mut self, v: &[u8]) -> &mut Self { self.encode_varint(u64::try_from(v.as_ref().len()).unwrap()) @@ -351,7 +371,9 @@ impl Encoder { } /// Encode a vector with a varint length using a closure. + /// /// # Panics + /// /// When `f()` writes more than 2^62 bytes. #[allow(clippy::cast_possible_truncation)] pub fn encode_vvec_with(&mut self, f: F) -> &mut Self { diff --git a/neqo-common/src/datagram.rs b/neqo-common/src/datagram.rs index cdd61753a3..1729c8ed8d 100644 --- a/neqo-common/src/datagram.rs +++ b/neqo-common/src/datagram.rs @@ -4,8 +4,7 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use std::net::SocketAddr; -use std::ops::Deref; +use std::{net::SocketAddr, ops::Deref}; use crate::{hex_with_len, IpTos}; diff --git a/neqo-common/src/event.rs b/neqo-common/src/event.rs index 8598383e76..26052b7571 100644 --- a/neqo-common/src/event.rs +++ b/neqo-common/src/event.rs @@ -4,8 +4,7 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use std::iter::Iterator; -use std::marker::PhantomData; +use std::{iter::Iterator, marker::PhantomData}; /// An event provider is able to generate a stream of events. pub trait Provider { diff --git a/neqo-common/src/hrtime.rs b/neqo-common/src/hrtime.rs index 1187e39a5a..62d2567d42 100644 --- a/neqo-common/src/hrtime.rs +++ b/neqo-common/src/hrtime.rs @@ -379,12 +379,13 @@ impl Drop for Time { not(all(any(target_os = "macos", target_os = "windows"), feature = "ci")) ))] mod test { - use super::Time; use std::{ thread::{sleep, spawn}, time::{Duration, Instant}, }; + use super::Time; + const ONE: Duration = Duration::from_millis(1); const ONE_AND_A_BIT: Duration = Duration::from_micros(1500); /// A limit for when high resolution timers are disabled. diff --git a/neqo-common/src/incrdecoder.rs b/neqo-common/src/incrdecoder.rs index e78a90f786..8468102cb6 100644 --- a/neqo-common/src/incrdecoder.rs +++ b/neqo-common/src/incrdecoder.rs @@ -21,7 +21,9 @@ impl IncrementalDecoderUint { } /// Consume some data. + /// /// # Panics + /// /// Never, but this is not something the compiler can tell. pub fn consume(&mut self, dv: &mut Decoder) -> Option { if let Some(r) = &mut self.remaining { @@ -87,7 +89,9 @@ impl IncrementalDecoderBuffer { } /// Consume some bytes from the decoder. + /// /// # Panics + /// /// Never; but rust doesn't know that. pub fn consume(&mut self, dv: &mut Decoder) -> Option> { let amount = min(self.remaining, dv.remaining()); @@ -109,7 +113,9 @@ pub struct IncrementalDecoderIgnore { impl IncrementalDecoderIgnore { /// Make a new ignoring decoder. + /// /// # Panics + /// /// If the amount to ignore is zero. #[must_use] pub fn new(n: usize) -> Self { diff --git a/neqo-common/src/lib.rs b/neqo-common/src/lib.rs index d31f47c664..853b05705b 100644 --- a/neqo-common/src/lib.rs +++ b/neqo-common/src/lib.rs @@ -18,18 +18,18 @@ pub mod qlog; pub mod timer; pub mod tos; -pub use self::codec::{Decoder, Encoder}; -pub use self::datagram::Datagram; -pub use self::header::Header; -pub use self::incrdecoder::{ - IncrementalDecoderBuffer, IncrementalDecoderIgnore, IncrementalDecoderUint, -}; -pub use self::tos::{IpTos, IpTosDscp, IpTosEcn}; - use std::fmt::Write; use enum_map::Enum; +pub use self::{ + codec::{Decoder, Encoder}, + datagram::Datagram, + header::Header, + incrdecoder::{IncrementalDecoderBuffer, IncrementalDecoderIgnore, IncrementalDecoderUint}, + tos::{IpTos, IpTosDscp, IpTosEcn}, +}; + #[must_use] pub fn hex(buf: impl AsRef<[u8]>) -> String { let mut ret = String::with_capacity(buf.as_ref().len() * 2); diff --git a/neqo-common/src/log.rs b/neqo-common/src/log.rs index e376765523..d9c30b98b1 100644 --- a/neqo-common/src/log.rs +++ b/neqo-common/src/log.rs @@ -6,11 +6,10 @@ #![allow(clippy::module_name_repetitions)] +use std::{io::Write, sync::Once, time::Instant}; + use env_logger::Builder; use lazy_static::lazy_static; -use std::io::Write; -use std::sync::Once; -use std::time::Instant; #[macro_export] macro_rules! do_log { diff --git a/neqo-common/src/qlog.rs b/neqo-common/src/qlog.rs index 5ff74750b0..3da8350990 100644 --- a/neqo-common/src/qlog.rs +++ b/neqo-common/src/qlog.rs @@ -31,6 +31,7 @@ pub struct NeqoQlogShared { impl NeqoQlog { /// Create an enabled `NeqoQlog` configuration. + /// /// # Errors /// /// Will return `qlog::Error` if cannot write to the new log. diff --git a/neqo-common/src/timer.rs b/neqo-common/src/timer.rs index 24cb0abdbc..e8532af442 100644 --- a/neqo-common/src/timer.rs +++ b/neqo-common/src/timer.rs @@ -4,9 +4,11 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use std::convert::TryFrom; -use std::mem; -use std::time::{Duration, Instant}; +use std::{ + convert::TryFrom, + mem, + time::{Duration, Instant}, +}; /// Internal structure for a timer item. struct TimerItem { @@ -21,10 +23,10 @@ impl TimerItem { } /// A timer queue. -/// This uses a classic timer wheel arrangement, with some characteristics that might be considered peculiar. -/// Each slot in the wheel is sorted (complexity O(N) insertions, but O(logN) to find cut points). -/// Time is relative, the wheel has an origin time and it is unable to represent times that are more than -/// `granularity * capacity` past that time. +/// This uses a classic timer wheel arrangement, with some characteristics that might be considered +/// peculiar. Each slot in the wheel is sorted (complexity O(N) insertions, but O(logN) to find cut +/// points). Time is relative, the wheel has an origin time and it is unable to represent times that +/// are more than `granularity * capacity` past that time. pub struct Timer { items: Vec>>, now: Instant, @@ -34,7 +36,9 @@ pub struct Timer { impl Timer { /// Construct a new wheel at the given granularity, starting at the given time. + /// /// # Panics + /// /// When `capacity` is too large to fit in `u32` or `granularity` is zero. pub fn new(now: Instant, granularity: Duration, capacity: usize) -> Self { assert!(u32::try_from(capacity).is_ok()); @@ -109,7 +113,9 @@ impl Timer { } /// Asserts if the time given is in the past or too far in the future. + /// /// # Panics + /// /// When `time` is in the past relative to previous calls. pub fn add(&mut self, time: Instant, item: T) { assert!(time >= self.now); @@ -241,9 +247,10 @@ impl Timer { #[cfg(test)] mod test { - use super::{Duration, Instant, Timer}; use lazy_static::lazy_static; + use super::{Duration, Instant, Timer}; + lazy_static! { static ref NOW: Instant = Instant::now(); } diff --git a/neqo-crypto/build.rs b/neqo-crypto/build.rs index c462b3db19..a63c34dedb 100644 --- a/neqo-crypto/build.rs +++ b/neqo-crypto/build.rs @@ -7,13 +7,15 @@ #![cfg_attr(feature = "deny-warnings", deny(warnings))] #![warn(clippy::pedantic)] +use std::{ + collections::HashMap, + env, fs, + path::{Path, PathBuf}, + process::Command, +}; + use bindgen::Builder; use serde_derive::Deserialize; -use std::collections::HashMap; -use std::env; -use std::fs; -use std::path::{Path, PathBuf}; -use std::process::Command; const BINDINGS_DIR: &str = "bindings"; const BINDINGS_CONFIG: &str = "bindings.toml"; diff --git a/neqo-crypto/src/aead.rs b/neqo-crypto/src/aead.rs index 41cdf66469..a2f009a403 100644 --- a/neqo-crypto/src/aead.rs +++ b/neqo-crypto/src/aead.rs @@ -4,6 +4,14 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +use std::{ + convert::{TryFrom, TryInto}, + fmt, + ops::{Deref, DerefMut}, + os::raw::{c_char, c_uint}, + ptr::null_mut, +}; + use crate::{ constants::{Cipher, Version}, err::Res, @@ -13,14 +21,6 @@ use crate::{ ssl::{self, PRUint16, PRUint64, PRUint8, SSLAeadContext}, }; -use std::{ - convert::{TryFrom, TryInto}, - fmt, - ops::{Deref, DerefMut}, - os::raw::{c_char, c_uint}, - ptr::null_mut, -}; - experimental_api!(SSL_MakeAead( version: PRUint16, cipher: PRUint16, @@ -62,6 +62,7 @@ impl RealAead { /// Create a new AEAD based on the indicated TLS version and cipher suite. /// /// # Errors + /// /// Returns `Error` when the supporting NSS functions fail. pub fn new( _fuzzing: bool, @@ -107,6 +108,7 @@ impl RealAead { /// the value provided in `Aead::expansion`. /// /// # Errors + /// /// If the input can't be protected or any input is too large for NSS. pub fn encrypt<'a>( &self, @@ -139,6 +141,7 @@ impl RealAead { /// the final result will be shorter. /// /// # Errors + /// /// If the input isn't authenticated or any input is too large for NSS. pub fn decrypt<'a>( &self, diff --git a/neqo-crypto/src/aead_fuzzing.rs b/neqo-crypto/src/aead_fuzzing.rs index 4293d2bc70..4e5a6de07f 100644 --- a/neqo-crypto/src/aead_fuzzing.rs +++ b/neqo-crypto/src/aead_fuzzing.rs @@ -4,12 +4,15 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use crate::constants::{Cipher, Version}; -use crate::err::{sec::SEC_ERROR_BAD_DATA, Error, Res}; -use crate::p11::SymKey; -use crate::RealAead; use std::fmt; +use crate::{ + constants::{Cipher, Version}, + err::{sec::SEC_ERROR_BAD_DATA, Error, Res}, + p11::SymKey, + RealAead, +}; + pub const FIXED_TAG_FUZZING: &[u8] = &[0x0a; 16]; pub struct FuzzingAead { @@ -76,8 +79,8 @@ impl FuzzingAead { let len_encrypted = input.len() - FIXED_TAG_FUZZING.len(); // Check that: // 1) expansion is all zeros and - // 2) if the encrypted data is also supplied that at least some values - // are no zero (otherwise padding will be interpreted as a valid packet) + // 2) if the encrypted data is also supplied that at least some values are no zero + // (otherwise padding will be interpreted as a valid packet) if &input[len_encrypted..] == FIXED_TAG_FUZZING && (len_encrypted == 0 || input[..len_encrypted].iter().any(|x| *x != 0x0)) { diff --git a/neqo-crypto/src/agent.rs b/neqo-crypto/src/agent.rs index 3868c525bc..cd0bb4cb12 100644 --- a/neqo-crypto/src/agent.rs +++ b/neqo-crypto/src/agent.rs @@ -4,6 +4,21 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +use std::{ + cell::RefCell, + convert::TryFrom, + ffi::{CStr, CString}, + mem::{self, MaybeUninit}, + ops::{Deref, DerefMut}, + os::raw::{c_uint, c_void}, + pin::Pin, + ptr::{null, null_mut}, + rc::Rc, + time::Instant, +}; + +use neqo_common::{hex_snip_middle, hex_with_len, qdebug, qinfo, qtrace, qwarn}; + pub use crate::{ agentio::{as_c_void, Record, RecordList}, cert::CertificateInfo, @@ -25,19 +40,6 @@ use crate::{ ssl::{self, PRBool}, time::{Time, TimeHolder}, }; -use neqo_common::{hex_snip_middle, hex_with_len, qdebug, qinfo, qtrace, qwarn}; -use std::{ - cell::RefCell, - convert::TryFrom, - ffi::{CStr, CString}, - mem::{self, MaybeUninit}, - ops::{Deref, DerefMut}, - os::raw::{c_uint, c_void}, - pin::Pin, - ptr::{null, null_mut}, - rc::Rc, - time::Instant, -}; /// The maximum number of tickets to remember for a given connection. const MAX_TICKETS: usize = 4; @@ -157,6 +159,7 @@ impl SecretAgentPreInfo { } /// # Panics + /// /// If `usize` is less than 32 bits and the value is too large. #[must_use] pub fn max_early_data(&self) -> usize { @@ -183,6 +186,7 @@ impl SecretAgentPreInfo { /// which contains a valid ECH configuration. /// /// # Errors + /// /// When the public name is not valid UTF-8. (Note: names should be ASCII.) pub fn ech_public_name(&self) -> Res> { if self.info.valuesSet & ssl::ssl_preinfo_ech == 0 || self.info.echPublicName.is_null() { @@ -395,6 +399,7 @@ impl SecretAgent { /// Default configuration. /// /// # Errors + /// /// If `set_version_range` fails. fn configure(&mut self, grease: bool) -> Res<()> { self.set_version_range(TLS_VERSION_1_3, TLS_VERSION_1_3)?; @@ -411,6 +416,7 @@ impl SecretAgent { /// Set the versions that are supported. /// /// # Errors + /// /// If the range of versions isn't supported. pub fn set_version_range(&mut self, min: Version, max: Version) -> Res<()> { let range = ssl::SSLVersionRange { min, max }; @@ -420,6 +426,7 @@ impl SecretAgent { /// Enable a set of ciphers. Note that the order of these is not respected. /// /// # Errors + /// /// If NSS can't enable or disable ciphers. pub fn set_ciphers(&mut self, ciphers: &[Cipher]) -> Res<()> { if self.state != HandshakeState::New { @@ -447,6 +454,7 @@ impl SecretAgent { /// Set key exchange groups. /// /// # Errors + /// /// If the underlying API fails (which shouldn't happen). pub fn set_groups(&mut self, groups: &[Group]) -> Res<()> { // SSLNamedGroup is a different size to Group, so copy one by one. @@ -464,6 +472,7 @@ impl SecretAgent { /// Set the number of additional key shares that will be sent in the client hello /// /// # Errors + /// /// If the underlying API fails (which shouldn't happen). pub fn send_additional_key_shares(&mut self, count: usize) -> Res<()> { secstatus_to_res(unsafe { @@ -474,6 +483,7 @@ impl SecretAgent { /// Set TLS options. /// /// # Errors + /// /// Returns an error if the option or option value is invalid; i.e., never. pub fn set_option(&mut self, opt: ssl::Opt, value: bool) -> Res<()> { opt.set(self.fd, value) @@ -482,6 +492,7 @@ impl SecretAgent { /// Enable 0-RTT. /// /// # Errors + /// /// See `set_option`. pub fn enable_0rtt(&mut self) -> Res<()> { self.set_option(ssl::Opt::EarlyData, true) @@ -490,6 +501,7 @@ impl SecretAgent { /// Disable the `EndOfEarlyData` message. /// /// # Errors + /// /// See `set_option`. pub fn disable_end_of_early_data(&mut self) -> Res<()> { self.set_option(ssl::Opt::SuppressEndOfEarlyData, true) @@ -503,8 +515,11 @@ impl SecretAgent { /// 255 octets in length. /// /// # Errors + /// /// This should always panic rather than return an error. + /// /// # Panics + /// /// If any of the provided `protocols` are more than 255 bytes long. /// /// [RFC7301]: https://datatracker.ietf.org/doc/html/rfc7301 @@ -549,11 +564,12 @@ impl SecretAgent { /// Install an extension handler. /// - /// This can be called multiple times with different values for `ext`. The handler is provided as - /// `Rc>` so that the caller is able to hold a reference to the handler and later - /// access any state that it accumulates. + /// This can be called multiple times with different values for `ext`. The handler is provided + /// as `Rc>` so that the caller is able to hold a reference to the handler + /// and later access any state that it accumulates. /// /// # Errors + /// /// When the extension handler can't be successfully installed. pub fn extension_handler( &mut self, @@ -597,6 +613,7 @@ impl SecretAgent { /// Calling this function collects all the relevant information. /// /// # Errors + /// /// When the underlying socket functions fail. pub fn preinfo(&self) -> Res { SecretAgentPreInfo::new(self.fd) @@ -615,7 +632,9 @@ impl SecretAgent { } /// Call this function to mark the peer as authenticated. + /// /// # Panics + /// /// If the handshake doesn't need to be authenticated. pub fn authenticated(&mut self, status: AuthenticationStatus) { assert!(self.state.authentication_needed()); @@ -664,6 +683,7 @@ impl SecretAgent { /// function if you want to proceed, because this will mark the certificate as OK. /// /// # Errors + /// /// When the handshake fails this returns an error. pub fn handshake(&mut self, now: Instant, input: &[u8]) -> Res> { self.now.set(now)?; @@ -700,6 +720,7 @@ impl SecretAgent { /// If you send data from multiple epochs, you might end up being sad. /// /// # Errors + /// /// When the handshake fails this returns an error. pub fn handshake_raw(&mut self, now: Instant, input: Option) -> Res { self.now.set(now)?; @@ -727,6 +748,7 @@ impl SecretAgent { } /// # Panics + /// /// If setup fails. #[allow(unknown_lints, clippy::branches_sharing_code)] pub fn close(&mut self) { @@ -832,6 +854,7 @@ impl Client { /// Create a new client agent. /// /// # Errors + /// /// Errors returned if the socket can't be created or configured. pub fn new(server_name: impl Into, grease: bool) -> Res { let server_name = server_name.into(); @@ -921,6 +944,7 @@ impl Client { /// Enable resumption, using a token previously provided. /// /// # Errors + /// /// Error returned when the resumption token is invalid or /// the socket is not able to use the value. pub fn enable_resumption(&mut self, token: impl AsRef<[u8]>) -> Res<()> { @@ -944,6 +968,7 @@ impl Client { /// ECH greasing. When that is done, there is no need to look for `EchRetry` /// /// # Errors + /// /// Error returned when the configuration is invalid. pub fn enable_ech(&mut self, ech_config_list: impl AsRef<[u8]>) -> Res<()> { let config = ech_config_list.as_ref(); @@ -996,7 +1021,8 @@ pub enum ZeroRttCheckResult { Fail, } -/// A `ZeroRttChecker` is used by the agent to validate the application token (as provided by `send_ticket`) +/// A `ZeroRttChecker` is used by the agent to validate the application token (as provided by +/// `send_ticket`) pub trait ZeroRttChecker: std::fmt::Debug + std::marker::Unpin { fn check(&self, token: &[u8]) -> ZeroRttCheckResult; } @@ -1037,6 +1063,7 @@ impl Server { /// Create a new server agent. /// /// # Errors + /// /// Errors returned when NSS fails. pub fn new(certificates: &[impl AsRef]) -> Res { let mut agent = SecretAgent::new()?; @@ -1090,7 +1117,8 @@ impl Server { ssl::SSLHelloRetryRequestAction::ssl_hello_retry_reject_0rtt } ZeroRttCheckResult::HelloRetryRequest(tok) => { - // Don't bother propagating errors from this, because it should be caught in testing. + // Don't bother propagating errors from this, because it should be caught in + // testing. assert!(tok.len() <= usize::try_from(retry_token_max).unwrap()); let slc = std::slice::from_raw_parts_mut(retry_token, tok.len()); slc.copy_from_slice(&tok); @@ -1104,6 +1132,7 @@ impl Server { /// via the Deref implementation on Server. /// /// # Errors + /// /// Returns an error if the underlying NSS functions fail. pub fn enable_0rtt( &mut self, @@ -1131,6 +1160,7 @@ impl Server { /// The records that are sent are captured and returned. /// /// # Errors + /// /// If NSS is unable to send a ticket, or if this agent is incorrectly configured. pub fn send_ticket(&mut self, now: Instant, extra: &[u8]) -> Res { self.agent.now.set(now)?; @@ -1146,6 +1176,7 @@ impl Server { /// Enable encrypted client hello (ECH). /// /// # Errors + /// /// Fails when NSS cannot create a key pair. pub fn enable_ech( &mut self, diff --git a/neqo-crypto/src/agentio.rs b/neqo-crypto/src/agentio.rs index 1d39b2398a..2bcc540530 100644 --- a/neqo-crypto/src/agentio.rs +++ b/neqo-crypto/src/agentio.rs @@ -4,21 +4,24 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use crate::constants::{ContentType, Epoch}; -use crate::err::{nspr, Error, PR_SetError, Res}; -use crate::prio; -use crate::ssl; +use std::{ + cmp::min, + convert::{TryFrom, TryInto}, + fmt, mem, + ops::Deref, + os::raw::{c_uint, c_void}, + pin::Pin, + ptr::{null, null_mut}, + vec::Vec, +}; use neqo_common::{hex, hex_with_len, qtrace}; -use std::cmp::min; -use std::convert::{TryFrom, TryInto}; -use std::fmt; -use std::mem; -use std::ops::Deref; -use std::os::raw::{c_uint, c_void}; -use std::pin::Pin; -use std::ptr::{null, null_mut}; -use std::vec::Vec; + +use crate::{ + constants::{ContentType, Epoch}, + err::{nspr, Error, PR_SetError, Res}, + prio, ssl, +}; // Alias common types. type PrFd = *mut prio::PRFileDesc; diff --git a/neqo-crypto/src/cert.rs b/neqo-crypto/src/cert.rs index 14d91843d3..64e63ec71a 100644 --- a/neqo-crypto/src/cert.rs +++ b/neqo-crypto/src/cert.rs @@ -4,18 +4,22 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use crate::err::secstatus_to_res; -use crate::p11::{CERTCertListNode, CERT_GetCertificateDer, CertList, Item, SECItem, SECItemArray}; -use crate::ssl::{ - PRFileDesc, SSL_PeerCertificateChain, SSL_PeerSignedCertTimestamps, - SSL_PeerStapledOCSPResponses, +use std::{ + convert::TryFrom, + ptr::{addr_of, NonNull}, + slice, }; -use neqo_common::qerror; -use std::convert::TryFrom; -use std::ptr::{addr_of, NonNull}; +use neqo_common::qerror; -use std::slice; +use crate::{ + err::secstatus_to_res, + p11::{CERTCertListNode, CERT_GetCertificateDer, CertList, Item, SECItem, SECItemArray}, + ssl::{ + PRFileDesc, SSL_PeerCertificateChain, SSL_PeerSignedCertTimestamps, + SSL_PeerStapledOCSPResponses, + }, +}; pub struct CertificateInfo { certs: CertList, diff --git a/neqo-crypto/src/ech.rs b/neqo-crypto/src/ech.rs index c4b33b0bee..1f54c4592e 100644 --- a/neqo-crypto/src/ech.rs +++ b/neqo-crypto/src/ech.rs @@ -4,6 +4,15 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +use std::{ + convert::TryFrom, + ffi::CString, + os::raw::{c_char, c_uint}, + ptr::{addr_of_mut, null_mut}, +}; + +use neqo_common::qtrace; + use crate::{ err::{ssl::SSL_ERROR_ECH_RETRY_WITH_ECH, Error, Res}, experimental_api, @@ -13,14 +22,6 @@ use crate::{ }, ssl::{PRBool, PRFileDesc}, }; -use neqo_common::qtrace; -use std::{ - convert::TryFrom, - ffi::CString, - os::raw::{c_char, c_uint}, - ptr::{addr_of_mut, null_mut}, -}; - pub use crate::{ p11::{HpkeAeadId as AeadId, HpkeKdfId as KdfId, HpkeKemId as KemId}, ssl::HpkeSymmetricSuite as SymmetricSuite, @@ -89,8 +90,11 @@ pub fn convert_ech_error(fd: *mut PRFileDesc, err: Error) -> Error { /// Generate a key pair for encrypted client hello (ECH). /// /// # Errors +/// /// When NSS fails to generate a key pair or when the KEM is not supported. +/// /// # Panics +/// /// When underlying types aren't large enough to hold keys. So never. pub fn generate_keys() -> Res<(PrivateKey, PublicKey)> { let slot = Slot::internal()?; @@ -153,6 +157,7 @@ pub fn generate_keys() -> Res<(PrivateKey, PublicKey)> { /// Encode a configuration for encrypted client hello (ECH). /// /// # Errors +/// /// When NSS fails to generate a valid configuration encoding (i.e., unlikely). pub fn encode_config(config: u8, public_name: &str, pk: &PublicKey) -> Res> { // A sensible fixed value for the maximum length of a name. diff --git a/neqo-crypto/src/err.rs b/neqo-crypto/src/err.rs index fae81f9cb9..187303d2a9 100644 --- a/neqo-crypto/src/err.rs +++ b/neqo-crypto/src/err.rs @@ -7,8 +7,7 @@ #![allow(dead_code)] #![allow(clippy::upper_case_acronyms)] -use std::os::raw::c_char; -use std::str::Utf8Error; +use std::{os::raw::c_char, str::Utf8Error}; use crate::ssl::{SECStatus, SECSuccess}; @@ -19,9 +18,7 @@ mod codes { include!(concat!(env!("OUT_DIR"), "/nss_sslerr.rs")); include!(concat!(env!("OUT_DIR"), "/mozpkix.rs")); } -pub use codes::mozilla_pkix_ErrorCode as mozpkix; -pub use codes::SECErrorCodes as sec; -pub use codes::SSLErrorCodes as ssl; +pub use codes::{mozilla_pkix_ErrorCode as mozpkix, SECErrorCodes as sec, SSLErrorCodes as ssl}; pub mod nspr { include!(concat!(env!("OUT_DIR"), "/nspr_err.rs")); } @@ -137,10 +134,13 @@ pub fn is_blocked(result: &Res<()>) -> bool { #[cfg(test)] mod tests { - use crate::err::{self, is_blocked, secstatus_to_res, Error, PRErrorCode, PR_SetError}; - use crate::ssl::{SECFailure, SECSuccess}; use test_fixture::fixture_init; + use crate::{ + err::{self, is_blocked, secstatus_to_res, Error, PRErrorCode, PR_SetError}, + ssl::{SECFailure, SECSuccess}, + }; + fn set_error_code(code: PRErrorCode) { // This code doesn't work without initializing NSS first. fixture_init(); diff --git a/neqo-crypto/src/ext.rs b/neqo-crypto/src/ext.rs index 010b9f120e..310e87a1b7 100644 --- a/neqo-crypto/src/ext.rs +++ b/neqo-crypto/src/ext.rs @@ -4,6 +4,14 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +use std::{ + cell::RefCell, + convert::TryFrom, + os::raw::{c_uint, c_void}, + pin::Pin, + rc::Rc, +}; + use crate::{ agentio::as_c_void, constants::{Extension, HandshakeMessage, TLS_HS_CLIENT_HELLO, TLS_HS_ENCRYPTED_EXTENSIONS}, @@ -13,13 +21,6 @@ use crate::{ SSLExtensionHandler, SSLExtensionWriter, SSLHandshakeType, }, }; -use std::{ - cell::RefCell, - convert::TryFrom, - os::raw::{c_uint, c_void}, - pin::Pin, - rc::Rc, -}; experimental_api!(SSL_InstallExtensionHooks( fd: *mut PRFileDesc, @@ -121,11 +122,13 @@ impl ExtensionTracker { /// Use the provided handler to manage an extension. This is quite unsafe. /// /// # Safety + /// /// The holder of this `ExtensionTracker` needs to ensure that it lives at /// least as long as the file descriptor, as NSS provides no way to remove /// an extension handler once it is configured. /// /// # Errors + /// /// If the underlying NSS API fails to register a handler. pub unsafe fn new( fd: *mut PRFileDesc, diff --git a/neqo-crypto/src/hkdf.rs b/neqo-crypto/src/hkdf.rs index 44df30ecfd..e3cf77418c 100644 --- a/neqo-crypto/src/hkdf.rs +++ b/neqo-crypto/src/hkdf.rs @@ -4,6 +4,12 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +use std::{ + convert::TryFrom, + os::raw::{c_char, c_uint}, + ptr::null_mut, +}; + use crate::{ constants::{ Cipher, Version, TLS_AES_128_GCM_SHA256, TLS_AES_256_GCM_SHA384, @@ -16,12 +22,6 @@ use crate::{ }, }; -use std::{ - convert::TryFrom, - os::raw::{c_char, c_uint}, - ptr::null_mut, -}; - experimental_api!(SSL_HkdfExtract( version: Version, cipher: Cipher, @@ -54,6 +54,7 @@ fn key_size(version: Version, cipher: Cipher) -> Res { /// Generate a random key of the right size for the given suite. /// /// # Errors +/// /// Only if NSS fails. pub fn generate_key(version: Version, cipher: Cipher) -> Res { import_key(version, &random(key_size(version, cipher)?)) @@ -62,6 +63,7 @@ pub fn generate_key(version: Version, cipher: Cipher) -> Res { /// Import a symmetric key for use with HKDF. /// /// # Errors +/// /// Errors returned if the key buffer is an incompatible size or the NSS functions fail. pub fn import_key(version: Version, buf: &[u8]) -> Res { if version != TLS_VERSION_1_3 { @@ -85,6 +87,7 @@ pub fn import_key(version: Version, buf: &[u8]) -> Res { /// Extract a PRK from the given salt and IKM using the algorithm defined in RFC 5869. /// /// # Errors +/// /// Errors returned if inputs are too large or the NSS functions fail. pub fn extract( version: Version, @@ -104,6 +107,7 @@ pub fn extract( /// Expand a PRK using the HKDF-Expand-Label function defined in RFC 8446. /// /// # Errors +/// /// Errors returned if inputs are too large or the NSS functions fail. pub fn expand_label( version: Version, diff --git a/neqo-crypto/src/hp.rs b/neqo-crypto/src/hp.rs index 2409521903..2479eff8f5 100644 --- a/neqo-crypto/src/hp.rs +++ b/neqo-crypto/src/hp.rs @@ -4,6 +4,15 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +use std::{ + cell::RefCell, + convert::TryFrom, + fmt::{self, Debug}, + os::raw::{c_char, c_int, c_uint}, + ptr::{addr_of_mut, null, null_mut}, + rc::Rc, +}; + use crate::{ constants::{ Cipher, Version, TLS_AES_128_GCM_SHA256, TLS_AES_256_GCM_SHA384, @@ -16,14 +25,6 @@ use crate::{ CK_CHACHA20_PARAMS, CK_MECHANISM_TYPE, }, }; -use std::{ - cell::RefCell, - convert::TryFrom, - fmt::{self, Debug}, - os::raw::{c_char, c_int, c_uint}, - ptr::{addr_of_mut, null, null_mut}, - rc::Rc, -}; experimental_api!(SSL_HkdfExpandLabelWithMech( version: Version, @@ -62,8 +63,11 @@ impl HpKey { /// QUIC-specific API for extracting a header-protection key. /// /// # Errors + /// /// Errors if HKDF fails or if the label is too long to fit in a `c_uint`. + /// /// # Panics + /// /// When `cipher` is not known to this code. #[allow(clippy::cast_sign_loss)] // Cast for PK11_GetBlockSize is safe. pub fn extract(version: Version, cipher: Cipher, prk: &SymKey, label: &str) -> Res { @@ -141,9 +145,12 @@ impl HpKey { /// Generate a header protection mask for QUIC. /// /// # Errors + /// /// An error is returned if the NSS functions fail; a sample of the /// wrong size is the obvious cause. + /// /// # Panics + /// /// When the mechanism for our key is not supported. pub fn mask(&self, sample: &[u8]) -> Res> { let mut output = vec![0_u8; self.block_size()]; diff --git a/neqo-crypto/src/lib.rs b/neqo-crypto/src/lib.rs index 2533c727e7..05424ee1f3 100644 --- a/neqo-crypto/src/lib.rs +++ b/neqo-crypto/src/lib.rs @@ -37,15 +37,19 @@ pub mod selfencrypt; mod ssl; mod time; +use std::{ + ffi::CString, + path::{Path, PathBuf}, + ptr::null, +}; + #[cfg(not(feature = "fuzzing"))] pub use self::aead::RealAead as Aead; - -#[cfg(feature = "fuzzing")] -pub use self::aead_fuzzing::FuzzingAead as Aead; - #[cfg(feature = "fuzzing")] pub use self::aead::RealAead; - +#[cfg(feature = "fuzzing")] +pub use self::aead_fuzzing::FuzzingAead as Aead; +use self::once::OnceResult; pub use self::{ agent::{ Agent, AllowZeroRtt, Client, HandshakeState, Record, RecordList, ResumptionToken, @@ -66,14 +70,6 @@ pub use self::{ ssl::Opt, }; -use self::once::OnceResult; - -use std::{ - ffi::CString, - path::{Path, PathBuf}, - ptr::null, -}; - const MINIMUM_NSS_VERSION: &str = "3.97"; #[allow(non_upper_case_globals, clippy::redundant_static_lifetimes)] @@ -119,8 +115,11 @@ fn version_check() { ); } -/// Initialize NSS. This only executes the initialization routines once, so if there is any chance that +/// Initialize NSS. This only executes the initialization routines once, so if there is any chance +/// that +/// /// # Panics +/// /// When NSS initialization fails. pub fn init() { // Set time zero. @@ -153,7 +152,9 @@ fn enable_ssl_trace() { } /// Initialize with a database. +/// /// # Panics +/// /// If NSS cannot be initialized. pub fn init_db>(dir: P) { time::init(); @@ -196,6 +197,7 @@ pub fn init_db>(dir: P) { } /// # Panics +/// /// If NSS isn't initialized. pub fn assert_initialized() { unsafe { diff --git a/neqo-crypto/src/p11.rs b/neqo-crypto/src/p11.rs index ebd641c17e..508d240062 100644 --- a/neqo-crypto/src/p11.rs +++ b/neqo-crypto/src/p11.rs @@ -9,8 +9,6 @@ #![allow(non_camel_case_types)] #![allow(non_snake_case)] -use crate::err::{secstatus_to_res, Error, Res}; -use neqo_common::hex_with_len; use std::{ convert::TryFrom, mem, @@ -19,6 +17,10 @@ use std::{ ptr::null_mut, }; +use neqo_common::hex_with_len; + +use crate::err::{secstatus_to_res, Error, Res}; + #[allow(clippy::upper_case_acronyms)] #[allow(clippy::unreadable_literal)] #[allow(unknown_lints, clippy::borrow_as_ptr)] @@ -39,6 +41,7 @@ macro_rules! scoped_ptr { /// Create a new instance of `$scoped` from a pointer. /// /// # Errors + /// /// When passed a null pointer generates an error. pub fn from_ptr(ptr: *mut $target) -> Result { if ptr.is_null() { @@ -80,8 +83,11 @@ impl PublicKey { /// Get the HPKE serialization of the public key. /// /// # Errors + /// /// When the key cannot be exported, which can be because the type is not supported. + /// /// # Panics + /// /// When keys are too large to fit in `c_uint/usize`. So only on programming error. pub fn key_data(&self) -> Res> { let mut buf = vec![0; 100]; @@ -124,9 +130,12 @@ impl PrivateKey { /// Get the bits of the private key. /// /// # Errors + /// /// When the key cannot be exported, which can be because the type is not supported /// or because the key data cannot be extracted from the PKCS#11 module. + /// /// # Panics + /// /// When the values are too large to fit. So never. pub fn key_data(&self) -> Res> { let mut key_item = Item::make_empty(); @@ -188,6 +197,7 @@ impl SymKey { /// You really don't want to use this. /// /// # Errors + /// /// Internal errors in case of failures in NSS. pub fn as_bytes(&self) -> Res<&[u8]> { secstatus_to_res(unsafe { PK11_ExtractKeyValue(self.ptr) })?; @@ -269,6 +279,7 @@ impl Item { /// content that is referenced there. /// /// # Safety + /// /// This dereferences two pointers. It doesn't get much less safe. pub unsafe fn into_vec(self) -> Vec { let b = self.ptr.as_ref().unwrap(); @@ -280,7 +291,9 @@ impl Item { } /// Generate a randomized buffer. +/// /// # Panics +/// /// When `size` is too large or NSS fails. #[must_use] pub fn random(size: usize) -> Vec { @@ -294,9 +307,10 @@ pub fn random(size: usize) -> Vec { #[cfg(test)] mod test { - use super::random; use test_fixture::fixture_init; + use super::random; + #[test] fn randomness() { fixture_init(); diff --git a/neqo-crypto/src/replay.rs b/neqo-crypto/src/replay.rs index 8f35ed6401..d4d3677f5c 100644 --- a/neqo-crypto/src/replay.rs +++ b/neqo-crypto/src/replay.rs @@ -4,11 +4,6 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use crate::{ - err::Res, - ssl::PRFileDesc, - time::{Interval, PRTime, Time}, -}; use std::{ convert::{TryFrom, TryInto}, ops::{Deref, DerefMut}, @@ -17,6 +12,12 @@ use std::{ time::{Duration, Instant}, }; +use crate::{ + err::Res, + ssl::PRFileDesc, + time::{Interval, PRTime, Time}, +}; + // This is an opaque struct in NSS. #[allow(clippy::upper_case_acronyms)] #[allow(clippy::empty_enum)] @@ -55,6 +56,7 @@ impl AntiReplay { /// See the documentation in NSS for advice on how to set these values. /// /// # Errors + /// /// Returns an error if `now` is in the past relative to our baseline or /// NSS is unable to generate an anti-replay context. pub fn new(now: Instant, window: Duration, k: usize, bits: usize) -> Res { diff --git a/neqo-crypto/src/secrets.rs b/neqo-crypto/src/secrets.rs index 7fff5d4f68..75677636b6 100644 --- a/neqo-crypto/src/secrets.rs +++ b/neqo-crypto/src/secrets.rs @@ -4,6 +4,10 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +use std::{os::raw::c_void, pin::Pin}; + +use neqo_common::qdebug; + use crate::{ agentio::as_c_void, constants::Epoch, @@ -11,8 +15,6 @@ use crate::{ p11::{PK11SymKey, PK11_ReferenceSymKey, SymKey}, ssl::{PRFileDesc, SSLSecretCallback, SSLSecretDirection}, }; -use neqo_common::qdebug; -use std::{os::raw::c_void, pin::Pin}; experimental_api!(SSL_SecretCallback( fd: *mut PRFileDesc, diff --git a/neqo-crypto/src/selfencrypt.rs b/neqo-crypto/src/selfencrypt.rs index 62d7057435..b8a63153fd 100644 --- a/neqo-crypto/src/selfencrypt.rs +++ b/neqo-crypto/src/selfencrypt.rs @@ -4,14 +4,17 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use crate::constants::{Cipher, Version}; -use crate::err::{Error, Res}; -use crate::p11::{random, SymKey}; -use crate::{hkdf, Aead}; +use std::mem; use neqo_common::{hex, qinfo, qtrace, Encoder}; -use std::mem; +use crate::{ + constants::{Cipher, Version}, + err::{Error, Res}, + hkdf, + p11::{random, SymKey}, + Aead, +}; #[derive(Debug)] pub struct SelfEncrypt { @@ -27,6 +30,7 @@ impl SelfEncrypt { const SALT_LENGTH: usize = 16; /// # Errors + /// /// Failure to generate a new HKDF key using NSS results in an error. pub fn new(version: Version, cipher: Cipher) -> Res { let key = hkdf::generate_key(version, cipher)?; @@ -46,9 +50,11 @@ impl SelfEncrypt { Aead::new(false, self.version, self.cipher, &secret, "neqo self") } - /// Rotate keys. This causes any previous key that is being held to be replaced by the current key. + /// Rotate keys. This causes any previous key that is being held to be replaced by the current + /// key. /// /// # Errors + /// /// Failure to generate a new HKDF key using NSS results in an error. pub fn rotate(&mut self) -> Res<()> { let new_key = hkdf::generate_key(self.version, self.cipher)?; @@ -65,6 +71,7 @@ impl SelfEncrypt { /// caller is responsible for carrying the AAD as appropriate. /// /// # Errors + /// /// Failure to protect using NSS AEAD APIs produces an error. pub fn seal(&self, aad: &[u8], plaintext: &[u8]) -> Res> { // Format is: @@ -117,6 +124,7 @@ impl SelfEncrypt { /// Open the protected `ciphertext`. /// /// # Errors + /// /// Returns an error when the self-encrypted object is invalid; /// when the keys have been rotated; or when NSS fails. #[allow(clippy::similar_names)] // aad is similar to aead diff --git a/neqo-crypto/src/ssl.rs b/neqo-crypto/src/ssl.rs index 08776f34ba..8aaacffae6 100644 --- a/neqo-crypto/src/ssl.rs +++ b/neqo-crypto/src/ssl.rs @@ -15,11 +15,13 @@ clippy::borrow_as_ptr )] -use crate::constants::Epoch; -use crate::err::{secstatus_to_res, Res}; - use std::os::raw::{c_uint, c_void}; +use crate::{ + constants::Epoch, + err::{secstatus_to_res, Res}, +}; + include!(concat!(env!("OUT_DIR"), "/nss_ssl.rs")); mod SSLOption { include!(concat!(env!("OUT_DIR"), "/nss_sslopt.rs")); diff --git a/neqo-crypto/src/time.rs b/neqo-crypto/src/time.rs index 981ac6f420..84dbfdb4a5 100644 --- a/neqo-crypto/src/time.rs +++ b/neqo-crypto/src/time.rs @@ -6,13 +6,6 @@ #![allow(clippy::upper_case_acronyms)] -use crate::{ - agentio::as_c_void, - err::{Error, Res}, - once::OnceResult, - ssl::{PRFileDesc, SSLTimeFunc}, -}; - use std::{ boxed::Box, convert::{TryFrom, TryInto}, @@ -22,6 +15,13 @@ use std::{ time::{Duration, Instant}, }; +use crate::{ + agentio::as_c_void, + err::{Error, Res}, + once::OnceResult, + ssl::{PRFileDesc, SSLTimeFunc}, +}; + include!(concat!(env!("OUT_DIR"), "/nspr_time.rs")); experimental_api!(SSL_SetTimeFunc( @@ -207,13 +207,14 @@ impl Default for TimeHolder { #[cfg(test)] mod test { - use super::{get_base, init, Interval, PRTime, Time}; - use crate::err::Res; use std::{ convert::{TryFrom, TryInto}, time::{Duration, Instant}, }; + use super::{get_base, init, Interval, PRTime, Time}; + use crate::err::Res; + #[test] fn convert_stable() { init(); diff --git a/neqo-crypto/tests/aead.rs b/neqo-crypto/tests/aead.rs index b9721e3d64..0ee1e66c38 100644 --- a/neqo-crypto/tests/aead.rs +++ b/neqo-crypto/tests/aead.rs @@ -2,9 +2,10 @@ #![warn(clippy::pedantic)] #![cfg(not(feature = "fuzzing"))] -use neqo_crypto::constants::{Cipher, TLS_AES_128_GCM_SHA256, TLS_VERSION_1_3}; -use neqo_crypto::hkdf; -use neqo_crypto::Aead; +use neqo_crypto::{ + constants::{Cipher, TLS_AES_128_GCM_SHA256, TLS_VERSION_1_3}, + hkdf, Aead, +}; use test_fixture::fixture_init; const AAD: &[u8] = &[ diff --git a/neqo-crypto/tests/agent.rs b/neqo-crypto/tests/agent.rs index 27017f0a4e..c2c83c467c 100644 --- a/neqo-crypto/tests/agent.rs +++ b/neqo-crypto/tests/agent.rs @@ -1,20 +1,21 @@ #![cfg_attr(feature = "deny-warnings", deny(warnings))] #![warn(clippy::pedantic)] +use std::boxed::Box; + use neqo_crypto::{ generate_ech_keys, AuthenticationStatus, Client, Error, HandshakeState, SecretAgentPreInfo, Server, ZeroRttCheckResult, ZeroRttChecker, TLS_AES_128_GCM_SHA256, TLS_CHACHA20_POLY1305_SHA256, TLS_GRP_EC_SECP256R1, TLS_GRP_EC_X25519, TLS_VERSION_1_3, }; -use std::boxed::Box; - mod handshake; +use test_fixture::{fixture_init, now}; + use crate::handshake::{ connect, connect_fail, forward_records, resumption_setup, PermissiveZeroRttChecker, Resumption, ZERO_RTT_TOKEN_DATA, }; -use test_fixture::{fixture_init, now}; #[test] fn make_client() { diff --git a/neqo-crypto/tests/ext.rs b/neqo-crypto/tests/ext.rs index 02d78603b6..9ae81133f5 100644 --- a/neqo-crypto/tests/ext.rs +++ b/neqo-crypto/tests/ext.rs @@ -1,11 +1,13 @@ #![cfg_attr(feature = "deny-warnings", deny(warnings))] #![warn(clippy::pedantic)] -use neqo_crypto::constants::{HandshakeMessage, TLS_HS_CLIENT_HELLO, TLS_HS_ENCRYPTED_EXTENSIONS}; -use neqo_crypto::ext::{ExtensionHandler, ExtensionHandlerResult, ExtensionWriterResult}; -use neqo_crypto::{Client, Server}; -use std::cell::RefCell; -use std::rc::Rc; +use std::{cell::RefCell, rc::Rc}; + +use neqo_crypto::{ + constants::{HandshakeMessage, TLS_HS_CLIENT_HELLO, TLS_HS_ENCRYPTED_EXTENSIONS}, + ext::{ExtensionHandler, ExtensionHandlerResult, ExtensionWriterResult}, + Client, Server, +}; use test_fixture::fixture_init; mod handshake; diff --git a/neqo-crypto/tests/handshake.rs b/neqo-crypto/tests/handshake.rs index 779ec5ac22..b2d8b9cc34 100644 --- a/neqo-crypto/tests/handshake.rs +++ b/neqo-crypto/tests/handshake.rs @@ -1,12 +1,12 @@ #![allow(dead_code)] +use std::{mem, time::Instant}; + use neqo_common::qinfo; use neqo_crypto::{ AntiReplay, AuthenticationStatus, Client, HandshakeState, RecordList, Res, ResumptionToken, SecretAgent, Server, ZeroRttCheckResult, ZeroRttChecker, }; -use std::mem; -use std::time::Instant; use test_fixture::{anti_replay, fixture_init, now}; /// Consume records until the handshake state changes. diff --git a/neqo-crypto/tests/hkdf.rs b/neqo-crypto/tests/hkdf.rs index 10a66f10b7..b4dde482f8 100644 --- a/neqo-crypto/tests/hkdf.rs +++ b/neqo-crypto/tests/hkdf.rs @@ -1,11 +1,13 @@ #![cfg_attr(feature = "deny-warnings", deny(warnings))] #![warn(clippy::pedantic)] -use neqo_crypto::constants::{ - Cipher, TLS_AES_128_GCM_SHA256, TLS_AES_256_GCM_SHA384, TLS_CHACHA20_POLY1305_SHA256, - TLS_VERSION_1_3, +use neqo_crypto::{ + constants::{ + Cipher, TLS_AES_128_GCM_SHA256, TLS_AES_256_GCM_SHA384, TLS_CHACHA20_POLY1305_SHA256, + TLS_VERSION_1_3, + }, + hkdf, SymKey, }; -use neqo_crypto::{hkdf, SymKey}; use test_fixture::fixture_init; const SALT: &[u8] = &[ diff --git a/neqo-crypto/tests/hp.rs b/neqo-crypto/tests/hp.rs index 2e0aea6b8a..43b96869d8 100644 --- a/neqo-crypto/tests/hp.rs +++ b/neqo-crypto/tests/hp.rs @@ -1,6 +1,8 @@ #![cfg_attr(feature = "deny-warnings", deny(warnings))] #![warn(clippy::pedantic)] +use std::mem; + use neqo_crypto::{ constants::{ Cipher, TLS_AES_128_GCM_SHA256, TLS_AES_256_GCM_SHA384, TLS_CHACHA20_POLY1305_SHA256, @@ -9,7 +11,6 @@ use neqo_crypto::{ hkdf, hp::HpKey, }; -use std::mem; use test_fixture::fixture_init; fn make_hp(cipher: Cipher) -> HpKey { diff --git a/neqo-crypto/tests/selfencrypt.rs b/neqo-crypto/tests/selfencrypt.rs index 5828f09392..fd9d4ea1ea 100644 --- a/neqo-crypto/tests/selfencrypt.rs +++ b/neqo-crypto/tests/selfencrypt.rs @@ -2,8 +2,12 @@ #![warn(clippy::pedantic)] #![cfg(not(feature = "fuzzing"))] -use neqo_crypto::constants::{TLS_AES_128_GCM_SHA256, TLS_VERSION_1_3}; -use neqo_crypto::{init, selfencrypt::SelfEncrypt, Error}; +use neqo_crypto::{ + constants::{TLS_AES_128_GCM_SHA256, TLS_VERSION_1_3}, + init, + selfencrypt::SelfEncrypt, + Error, +}; #[test] fn se_create() { diff --git a/neqo-http3/src/buffered_send_stream.rs b/neqo-http3/src/buffered_send_stream.rs index 2a7d01bb74..4f6761fa80 100644 --- a/neqo-http3/src/buffered_send_stream.rs +++ b/neqo-http3/src/buffered_send_stream.rs @@ -4,10 +4,11 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use crate::Res; use neqo_common::qtrace; use neqo_transport::{Connection, StreamId}; +use crate::Res; + #[derive(Debug, PartialEq, Eq)] pub enum BufferedStream { Uninitialized, @@ -36,6 +37,7 @@ impl BufferedStream { } /// # Panics + /// /// If the `BufferedStream` is initialized more than one it will panic. pub fn init(&mut self, stream_id: StreamId) { debug_assert!(&Self::Uninitialized == self); @@ -46,6 +48,7 @@ impl BufferedStream { } /// # Panics + /// /// This functon cannot be called before the `BufferedStream` is initialized. pub fn buffer(&mut self, to_buf: &[u8]) { if let Self::Initialized { buf, .. } = self { @@ -56,6 +59,7 @@ impl BufferedStream { } /// # Errors + /// /// Returns `neqo_transport` errors. pub fn send_buffer(&mut self, conn: &mut Connection) -> Res { let label = ::neqo_common::log_subject!(::log::Level::Debug, self); @@ -76,6 +80,7 @@ impl BufferedStream { } /// # Errors + /// /// Returns `neqo_transport` errors. pub fn send_atomic(&mut self, conn: &mut Connection, to_send: &[u8]) -> Res { // First try to send anything that is in the buffer. diff --git a/neqo-http3/src/client_events.rs b/neqo-http3/src/client_events.rs index f21ec5929e..4b2ebc6c30 100644 --- a/neqo-http3/src/client_events.rs +++ b/neqo-http3/src/client_events.rs @@ -6,19 +6,18 @@ #![allow(clippy::module_name_repetitions)] -use crate::connection::Http3State; -use crate::settings::HSettingType; -use crate::{ - features::extended_connect::{ExtendedConnectEvents, ExtendedConnectType, SessionCloseReason}, - CloseType, Http3StreamInfo, HttpRecvStreamEvents, RecvStreamEvents, SendStreamEvents, -}; +use std::{cell::RefCell, collections::VecDeque, rc::Rc}; + use neqo_common::{event::Provider as EventProvider, Header}; use neqo_crypto::ResumptionToken; use neqo_transport::{AppError, StreamId, StreamType}; -use std::cell::RefCell; -use std::collections::VecDeque; -use std::rc::Rc; +use crate::{ + connection::Http3State, + features::extended_connect::{ExtendedConnectEvents, ExtendedConnectType, SessionCloseReason}, + settings::HSettingType, + CloseType, Http3StreamInfo, HttpRecvStreamEvents, RecvStreamEvents, SendStreamEvents, +}; #[derive(Debug, PartialEq, Eq, Clone)] pub enum WebTransportEvent { diff --git a/neqo-http3/src/conn_params.rs b/neqo-http3/src/conn_params.rs index 1ba2a601ad..23a5d2cc67 100644 --- a/neqo-http3/src/conn_params.rs +++ b/neqo-http3/src/conn_params.rs @@ -4,9 +4,10 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +use std::cmp::min; + use neqo_qpack::QpackSettings; use neqo_transport::ConnectionParameters; -use std::cmp::min; const QPACK_MAX_TABLE_SIZE_DEFAULT: u64 = 65536; const QPACK_TABLE_SIZE_LIMIT: u64 = (1 << 30) - 1; @@ -53,6 +54,7 @@ impl Http3Parameters { } /// # Panics + /// /// The table size must be smaller than 1 << 30 by the spec. #[must_use] pub fn max_table_size_encoder(mut self, mut max_table: u64) -> Self { @@ -68,6 +70,7 @@ impl Http3Parameters { } /// # Panics + /// /// The table size must be smaller than 1 << 30 by the spec. #[must_use] pub fn max_table_size_decoder(mut self, mut max_table: u64) -> Self { diff --git a/neqo-http3/src/connection.rs b/neqo-http3/src/connection.rs index f2d0f28806..bb2b5a6ce0 100644 --- a/neqo-http3/src/connection.rs +++ b/neqo-http3/src/connection.rs @@ -6,41 +6,43 @@ #![allow(clippy::module_name_repetitions)] -use crate::control_stream_local::ControlStreamLocal; -use crate::control_stream_remote::ControlStreamRemote; -use crate::features::extended_connect::{ - webtransport_session::WebTransportSession, - webtransport_streams::{WebTransportRecvStream, WebTransportSendStream}, - ExtendedConnectEvents, ExtendedConnectFeature, ExtendedConnectType, -}; -use crate::frames::HFrame; -use crate::push_controller::PushController; -use crate::qpack_decoder_receiver::DecoderRecvStream; -use crate::qpack_encoder_receiver::EncoderRecvStream; -use crate::recv_message::{RecvMessage, RecvMessageInfo}; -use crate::request_target::{AsRequestTarget, RequestTarget}; -use crate::send_message::SendMessage; -use crate::settings::{HSettingType, HSettings, HttpZeroRttChecker}; -use crate::stream_type_reader::NewStreamHeadReader; -use crate::{ - client_events::Http3ClientEvents, CloseType, Http3Parameters, Http3StreamType, - HttpRecvStreamEvents, NewStreamType, Priority, PriorityHandler, ReceiveOutput, RecvStream, - RecvStreamEvents, SendStream, SendStreamEvents, +use std::{ + cell::RefCell, + collections::{BTreeSet, HashMap}, + fmt::Debug, + mem, + rc::Rc, }; + use neqo_common::{qdebug, qerror, qinfo, qtrace, qwarn, Decoder, Header, MessageType, Role}; -use neqo_qpack::decoder::QPackDecoder; -use neqo_qpack::encoder::QPackEncoder; +use neqo_qpack::{decoder::QPackDecoder, encoder::QPackEncoder}; use neqo_transport::{ streams::SendOrder, AppError, Connection, ConnectionError, DatagramTracking, State, StreamId, StreamType, ZeroRttState, }; -use std::cell::RefCell; -use std::collections::{BTreeSet, HashMap}; -use std::fmt::Debug; -use std::mem; -use std::rc::Rc; -use crate::{Error, Res}; +use crate::{ + client_events::Http3ClientEvents, + control_stream_local::ControlStreamLocal, + control_stream_remote::ControlStreamRemote, + features::extended_connect::{ + webtransport_session::WebTransportSession, + webtransport_streams::{WebTransportRecvStream, WebTransportSendStream}, + ExtendedConnectEvents, ExtendedConnectFeature, ExtendedConnectType, + }, + frames::HFrame, + push_controller::PushController, + qpack_decoder_receiver::DecoderRecvStream, + qpack_encoder_receiver::EncoderRecvStream, + recv_message::{RecvMessage, RecvMessageInfo}, + request_target::{AsRequestTarget, RequestTarget}, + send_message::SendMessage, + settings::{HSettingType, HSettings, HttpZeroRttChecker}, + stream_type_reader::NewStreamHeadReader, + CloseType, Error, Http3Parameters, Http3StreamType, HttpRecvStreamEvents, NewStreamType, + Priority, PriorityHandler, ReceiveOutput, RecvStream, RecvStreamEvents, Res, SendStream, + SendStreamEvents, +}; pub(crate) struct RequestDescription<'b, 't, T> where @@ -79,8 +81,8 @@ enum Http3RemoteSettingsState { /// - `ZeroRtt`: 0-RTT has been enabled and is active /// - Connected /// - GoingAway(StreamId): The connection has received a `GOAWAY` frame -/// - Closing(ConnectionError): The connection is closed. The closing has been initiated by this -/// end of the connection, e.g., the `CONNECTION_CLOSE` frame has been sent. In this state, the +/// - Closing(ConnectionError): The connection is closed. The closing has been initiated by this end +/// of the connection, e.g., the `CONNECTION_CLOSE` frame has been sent. In this state, the /// connection waits a certain amount of time to retransmit the `CONNECTION_CLOSE` frame if /// needed. /// - Closed(ConnectionError): This is the final close state: closing has been initialized by the @@ -384,7 +386,8 @@ impl Http3Connection { Ok(()) } - /// Inform a `HttpConnection` that a stream has data to send and that `send` should be called for the stream. + /// Inform a `HttpConnection` that a stream has data to send and that `send` should be called + /// for the stream. pub fn stream_has_pending_data(&mut self, stream_id: StreamId) { self.streams_with_pending_data.insert(stream_id); } @@ -502,8 +505,8 @@ impl Http3Connection { /// stream and unidi stream that are still do not have a type. /// The function cannot handle: /// 1) a `Push(_)`, `Htttp` or `WebTransportStream(_)` stream - /// 2) frames `MaxPushId`, `PriorityUpdateRequest`, `PriorityUpdateRequestPush` or `Goaway` - /// must be handled by `Http3Client`/`Server`. + /// 2) frames `MaxPushId`, `PriorityUpdateRequest`, `PriorityUpdateRequestPush` or `Goaway` must + /// be handled by `Http3Client`/`Server`. /// The function returns `ReceiveOutput`. pub fn handle_stream_readable( &mut self, @@ -579,8 +582,8 @@ impl Http3Connection { Ok(()) } - /// This is called when `neqo_transport::Connection` state has been change to take proper actions in - /// the HTTP3 layer. + /// This is called when `neqo_transport::Connection` state has been change to take proper + /// actions in the HTTP3 layer. pub fn handle_state_change(&mut self, conn: &mut Connection, state: &State) -> Res { qdebug!([self], "Handle state change {:?}", state); match state { @@ -626,7 +629,8 @@ impl Http3Connection { } } - /// This is called when 0RTT has been reset to clear `send_streams`, `recv_streams` and settings. + /// This is called when 0RTT has been reset to clear `send_streams`, `recv_streams` and + /// settings. pub fn handle_zero_rtt_rejected(&mut self) -> Res<()> { if self.state == Http3State::ZeroRtt { self.state = Http3State::Initializing; @@ -774,16 +778,16 @@ impl Http3Connection { /// This function will not handle the output of the function completely, but only /// handle the indication that a stream is closed. There are 2 cases: /// - an error occurred or - /// - the stream is done, i.e. the second value in `output` tuple is true if - /// the stream is done and can be removed from the `recv_streams` + /// - the stream is done, i.e. the second value in `output` tuple is true if the stream is done + /// and can be removed from the `recv_streams` /// How it is handling `output`: /// - if the stream is done, it removes the stream from `recv_streams` /// - if the stream is not done and there is no error, return `output` and the caller will /// handle it. /// - in case of an error: - /// - if it is only a stream error and the stream is not critical, send `STOP_SENDING` - /// frame, remove the stream from `recv_streams` and inform the listener that the stream - /// has been reset. + /// - if it is only a stream error and the stream is not critical, send `STOP_SENDING` frame, + /// remove the stream from `recv_streams` and inform the listener that the stream has been + /// reset. /// - otherwise this is a connection error. In this case, propagate the error to the caller /// that will handle it properly. fn handle_stream_manipulation_output( @@ -861,7 +865,8 @@ impl Http3Connection { } fn create_bidi_transport_stream(&self, conn: &mut Connection) -> Res { - // Requests cannot be created when a connection is in states: Initializing, GoingAway, Closing and Closed. + // Requests cannot be created when a connection is in states: Initializing, GoingAway, + // Closing and Closed. match self.state() { Http3State::GoingAway(..) | Http3State::Closing(..) | Http3State::Closed(..) => { return Err(Error::AlreadyClosed) @@ -927,8 +932,9 @@ impl Http3Connection { )), ); - // Call immediately send so that at least headers get sent. This will make Firefox faster, since - // it can send request body immediately in most cases and does not need to do a complete process loop. + // Call immediately send so that at least headers get sent. This will make Firefox faster, + // since it can send request body immediately in most cases and does not need to do + // a complete process loop. self.send_streams .get_mut(&stream_id) .ok_or(Error::InvalidStreamId)? @@ -936,11 +942,13 @@ impl Http3Connection { Ok(()) } - /// Stream data are read directly into a buffer supplied as a parameter of this function to avoid copying - /// data. + /// Stream data are read directly into a buffer supplied as a parameter of this function to + /// avoid copying data. + /// /// # Errors - /// It returns an error if a stream does not exist or an error happens while reading a stream, e.g. - /// early close, protocol error, etc. + /// + /// It returns an error if a stream does not exist or an error happens while reading a stream, + /// e.g. early close, protocol error, etc. pub fn read_data( &mut self, conn: &mut Connection, @@ -1004,7 +1012,9 @@ impl Http3Connection { } /// Set the stream `SendOrder`. + /// /// # Errors + /// /// Returns `InvalidStreamId` if the stream id doesn't exist pub fn stream_set_sendorder( conn: &mut Connection, @@ -1018,7 +1028,9 @@ impl Http3Connection { /// Set the stream Fairness. Fair streams will share bandwidth with other /// streams of the same sendOrder group (or the unordered group). Unfair streams /// will give bandwidth preferentially to the lowest streamId with data to send. + /// /// # Errors + /// /// Returns `InvalidStreamId` if the stream id doesn't exist pub fn stream_set_fairness( conn: &mut Connection, @@ -1088,8 +1100,8 @@ impl Http3Connection { .send_streams .get_mut(&stream_id) .ok_or(Error::InvalidStreamId)?; - // The following function may return InvalidStreamId from the transport layer if the stream has been closed - // already. It is ok to ignore it here. + // The following function may return InvalidStreamId from the transport layer if the stream + // has been closed already. It is ok to ignore it here. mem::drop(send_stream.close(conn)); if send_stream.done() { self.remove_send_stream(stream_id, conn); @@ -1184,7 +1196,8 @@ impl Http3Connection { .is_ok() { mem::drop(self.stream_close_send(conn, stream_id)); - // TODO issue 1294: add a timer to clean up the recv_stream if the peer does not do that in a short time. + // TODO issue 1294: add a timer to clean up the recv_stream if the peer does not + // do that in a short time. self.streams_with_pending_data.insert(stream_id); } else { self.cancel_fetch(stream_id, Error::HttpRequestRejected.code(), conn)?; @@ -1571,8 +1584,8 @@ impl Http3Connection { for id in recv { qtrace!("Remove the extended connect sub receiver stream {}", id); - // Use CloseType::ResetRemote so that an event will be sent. CloseType::LocalError would have - // the same effect. + // Use CloseType::ResetRemote so that an event will be sent. CloseType::LocalError would + // have the same effect. if let Some(mut s) = self.recv_streams.remove(&id) { mem::drop(s.reset(CloseType::ResetRemote(Error::HttpRequestCancelled.code()))); } diff --git a/neqo-http3/src/connection_client.rs b/neqo-http3/src/connection_client.rs index 0be8acaa04..5cc0541c0c 100644 --- a/neqo-http3/src/connection_client.rs +++ b/neqo-http3/src/connection_client.rs @@ -4,16 +4,16 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use crate::{ - client_events::{Http3ClientEvent, Http3ClientEvents}, - connection::{Http3Connection, Http3State, RequestDescription}, - frames::HFrame, - push_controller::{PushController, RecvPushEvents}, - recv_message::{RecvMessage, RecvMessageInfo}, - request_target::AsRequestTarget, - settings::HSettings, - Http3Parameters, Http3StreamType, NewStreamType, Priority, PriorityHandler, ReceiveOutput, +use std::{ + cell::RefCell, + convert::TryFrom, + fmt::{Debug, Display}, + mem, + net::SocketAddr, + rc::Rc, + time::Instant, }; + use neqo_common::{ event::Provider as EventProvider, hex, hex_with_len, qdebug, qinfo, qlog::NeqoQlog, qtrace, Datagram, Decoder, Encoder, Header, MessageType, Role, @@ -25,20 +25,21 @@ use neqo_transport::{ DatagramTracking, Output, RecvStreamStats, SendStreamStats, Stats as TransportStats, StreamId, StreamType, Version, ZeroRttState, }; -use std::{ - cell::RefCell, - convert::TryFrom, - fmt::{Debug, Display}, - mem, - net::SocketAddr, - rc::Rc, - time::Instant, -}; -use crate::{Error, Res}; +use crate::{ + client_events::{Http3ClientEvent, Http3ClientEvents}, + connection::{Http3Connection, Http3State, RequestDescription}, + frames::HFrame, + push_controller::{PushController, RecvPushEvents}, + recv_message::{RecvMessage, RecvMessageInfo}, + request_target::AsRequestTarget, + settings::HSettings, + Error, Http3Parameters, Http3StreamType, NewStreamType, Priority, PriorityHandler, + ReceiveOutput, Res, +}; -// This is used for filtering send_streams and recv_Streams with a stream_ids greater than or equal a given id. -// Only the same type (bidirectional or unidirectionsl) streams are filtered. +// This is used for filtering send_streams and recv_Streams with a stream_ids greater than or equal +// a given id. Only the same type (bidirectional or unidirectionsl) streams are filtered. fn id_gte(base: StreamId) -> impl FnMut((&StreamId, &U)) -> Option + 'static where U: ?Sized, @@ -161,7 +162,7 @@ fn alpn_from_quic_version(version: Version) -> &'static str { /// } /// } /// } -///``` +/// ``` /// /// ### Creating a `WebTransport` session /// @@ -198,8 +199,7 @@ fn alpn_from_quic_version(version: Version) -> &'static str { /// } /// } /// } -/// -///``` +/// ``` /// /// ### `WebTransport`: create a stream, send and receive data on the stream /// @@ -287,7 +287,6 @@ fn alpn_from_quic_version(version: Version) -> &'static str { /// } /// } /// ``` -/// pub struct Http3Client { conn: Connection, base_handler: Http3Connection, @@ -303,8 +302,9 @@ impl Display for Http3Client { impl Http3Client { /// # Errors - /// Making a `neqo-transport::connection` may produce an error. This can only be a crypto error if - /// the crypto context can't be created or configured. + /// + /// Making a `neqo-transport::connection` may produce an error. This can only be a crypto error + /// if the crypto context can't be created or configured. pub fn new( server_name: impl Into, cid_manager: Rc>, @@ -391,6 +391,7 @@ impl Http3Client { /// Enable encrypted client hello (ECH). /// /// # Errors + /// /// Fails when the configuration provided is bad. pub fn enable_ech(&mut self, ech_config_list: impl AsRef<[u8]>) -> Res<()> { self.conn.client_enable_ech(ech_config_list)?; @@ -399,7 +400,9 @@ impl Http3Client { /// Get the connection id, which is useful for disambiguating connections to /// the same origin. + /// /// # Panics + /// /// Never, because clients always have this field. #[must_use] pub fn connection_id(&self) -> &ConnectionId { @@ -433,14 +436,18 @@ impl Http3Client { .and_then(|t| self.encode_resumption_token(&t)) } - /// This may be call if an application has a resumption token. This must be called before connection starts. + /// This may be call if an application has a resumption token. This must be called before + /// connection starts. /// /// The resumption token also contains encoded HTTP/3 settings. The settings will be decoded /// and used until the setting are received from the server. /// /// # Errors + /// /// An error is return if token cannot be decoded or a connection is is a wrong state. + /// /// # Panics + /// /// On closing if the base handler can't handle it (debug only). pub fn enable_resumption(&mut self, now: Instant, token: impl AsRef<[u8]>) -> Res<()> { if self.base_handler.state != Http3State::Initializing { @@ -499,7 +506,9 @@ impl Http3Client { } /// Attempt to force a key update. + /// /// # Errors + /// /// If the connection isn't confirmed, or there is an outstanding key update, this /// returns `Err(Error::TransportError(neqo_transport::Error::KeyUpdateBlocked))`. pub fn initiate_key_update(&mut self) -> Res<()> { @@ -512,9 +521,13 @@ impl Http3Client { /// The function fetches a resource using `method`, `target` and `headers`. A response body /// may be added by calling `send_data`. `stream_close_send` must be sent to finish the request /// even if request data are not sent. + /// /// # Errors + /// /// If a new stream cannot be created an error will be return. + /// /// # Panics + /// /// `SendMessage` implements `http_stream` so it will not panic. pub fn fetch<'x, 't: 'x, T>( &mut self, @@ -550,7 +563,9 @@ impl Http3Client { /// Send an [`PRIORITY_UPDATE`-frame][1] on next `Http3Client::process_output()` call. /// Returns if the priority got changed. + /// /// # Errors + /// /// `InvalidStreamId` if the stream does not exist /// /// [1]: https://datatracker.ietf.org/doc/html/draft-kazuho-httpbis-priority-04#section-5.2 @@ -560,7 +575,9 @@ impl Http3Client { /// An application may cancel a stream(request). /// Both sides, the receiviing and sending side, sending and receiving side, will be closed. + /// /// # Errors + /// /// An error will be return if a stream does not exist. pub fn cancel_fetch(&mut self, stream_id: StreamId, error: AppError) -> Res<()> { qinfo!([self], "reset_stream {} error={}.", stream_id, error); @@ -569,7 +586,9 @@ impl Http3Client { } /// This is call when application is done sending a request. + /// /// # Errors + /// /// An error will be return if stream does not exist. pub fn stream_close_send(&mut self, stream_id: StreamId) -> Res<()> { qinfo!([self], "Close sending side stream={}.", stream_id); @@ -578,6 +597,7 @@ impl Http3Client { } /// # Errors + /// /// An error will be return if a stream does not exist. pub fn stream_reset_send(&mut self, stream_id: StreamId, error: AppError) -> Res<()> { qinfo!([self], "stream_reset_send {} error={}.", stream_id, error); @@ -586,6 +606,7 @@ impl Http3Client { } /// # Errors + /// /// An error will be return if a stream does not exist. pub fn stream_stop_sending(&mut self, stream_id: StreamId, error: AppError) -> Res<()> { qinfo!([self], "stream_stop_sending {} error={}.", stream_id, error); @@ -598,11 +619,13 @@ impl Http3Client { /// headers are supplied through the `fetch` function. /// /// # Errors + /// /// `InvalidStreamId` if the stream does not exist, /// `AlreadyClosed` if the stream has already been closed. - /// `TransportStreamDoesNotExist` if the transport stream does not exist (this may happen if `process_output` - /// has not been called when needed, and HTTP3 layer has not picked up the info that the stream has been closed.) - /// `InvalidInput` if an empty buffer has been supplied. + /// `TransportStreamDoesNotExist` if the transport stream does not exist (this may happen if + /// `process_output` has not been called when needed, and HTTP3 layer has not picked up the + /// info that the stream has been closed.) `InvalidInput` if an empty buffer has been + /// supplied. pub fn send_data(&mut self, stream_id: StreamId, buf: &[u8]) -> Res { qinfo!( [self], @@ -617,11 +640,13 @@ impl Http3Client { .send_data(&mut self.conn, buf) } - /// Response data are read directly into a buffer supplied as a parameter of this function to avoid copying - /// data. + /// Response data are read directly into a buffer supplied as a parameter of this function to + /// avoid copying data. + /// /// # Errors - /// It returns an error if a stream does not exist or an error happen while reading a stream, e.g. - /// early close, protocol error, etc. + /// + /// It returns an error if a stream does not exist or an error happen while reading a stream, + /// e.g. early close, protocol error, etc. pub fn read_data( &mut self, now: Instant, @@ -641,7 +666,9 @@ impl Http3Client { // API: Push streams /// Cancel a push + /// /// # Errors + /// /// `InvalidStreamId` if the stream does not exist. pub fn cancel_push(&mut self, push_id: u64) -> Res<()> { self.push_handler @@ -651,9 +678,11 @@ impl Http3Client { /// Push response data are read directly into a buffer supplied as a parameter of this function /// to avoid copying data. + /// /// # Errors - /// It returns an error if a stream does not exist(`InvalidStreamId`) or an error has happened while - /// reading a stream, e.g. early close, protocol error, etc. + /// + /// It returns an error if a stream does not exist(`InvalidStreamId`) or an error has happened + /// while reading a stream, e.g. early close, protocol error, etc. pub fn push_read_data( &mut self, now: Instant, @@ -670,8 +699,9 @@ impl Http3Client { } // API WebTransport - + // /// # Errors + /// /// If `WebTransport` cannot be created, e.g. the `WebTransport` support is /// not negotiated or the HTTP/3 connection is closed. pub fn webtransport_create_session<'x, 't: 'x, T>( @@ -699,11 +729,14 @@ impl Http3Client { } /// Close `WebTransport` cleanly + /// /// # Errors + /// /// `InvalidStreamId` if the stream does not exist, - /// `TransportStreamDoesNotExist` if the transport stream does not exist (this may happen if `process_output` - /// has not been called when needed, and HTTP3 layer has not picked up the info that the stream has been closed.) - /// `InvalidInput` if an empty buffer has been supplied. + /// `TransportStreamDoesNotExist` if the transport stream does not exist (this may happen if + /// `process_output` has not been called when needed, and HTTP3 layer has not picked up the + /// info that the stream has been closed.) `InvalidInput` if an empty buffer has been + /// supplied. pub fn webtransport_close_session( &mut self, session_id: StreamId, @@ -715,6 +748,7 @@ impl Http3Client { } /// # Errors + /// /// This may return an error if the particular session does not exist /// or the connection is not in the active state. pub fn webtransport_create_stream( @@ -732,7 +766,9 @@ impl Http3Client { } /// Send `WebTransport` datagram. + /// /// # Errors + /// /// It may return `InvalidStreamId` if a stream does not exist anymore. /// The function returns `TooMuchData` if the supply buffer is bigger than /// the allowed remote datagram size. @@ -749,10 +785,14 @@ impl Http3Client { /// Returns the current max size of a datagram that can fit into a packet. /// The value will change over time depending on the encoded size of the - /// packet number, ack frames, etc. + /// packet number, ack frames, etc. + /// /// # Errors + /// /// The function returns `NotAvailable` if datagrams are not enabled. + /// /// # Panics + /// /// This cannot panic. The max varint length is 8. pub fn webtransport_max_datagram_size(&self, session_id: StreamId) -> Res { Ok(self.conn.max_datagram_size()? @@ -760,9 +800,13 @@ impl Http3Client { } /// Sets the `SendOrder` for a given stream + /// /// # Errors + /// /// It may return `InvalidStreamId` if a stream does not exist anymore. + /// /// # Panics + /// /// This cannot panic. pub fn webtransport_set_sendorder( &mut self, @@ -773,16 +817,22 @@ impl Http3Client { } /// Sets the `Fairness` for a given stream + /// /// # Errors + /// /// It may return `InvalidStreamId` if a stream does not exist anymore. + /// /// # Panics + /// /// This cannot panic. pub fn webtransport_set_fairness(&mut self, stream_id: StreamId, fairness: bool) -> Res<()> { Http3Connection::stream_set_fairness(&mut self.conn, stream_id, fairness) } /// Returns the current `SendStreamStats` of a `WebTransportSendStream`. + /// /// # Errors + /// /// `InvalidStreamId` if the stream does not exist. pub fn webtransport_send_stream_stats(&mut self, stream_id: StreamId) -> Res { self.base_handler @@ -793,7 +843,9 @@ impl Http3Client { } /// Returns the current `RecvStreamStats` of a `WebTransportRecvStream`. + /// /// # Errors + /// /// `InvalidStreamId` if the stream does not exist. pub fn webtransport_recv_stream_stats(&mut self, stream_id: StreamId) -> Res { self.base_handler @@ -883,7 +935,8 @@ impl Http3Client { /// /// `process_output` can return: /// - a [`Output::Datagram(Datagram)`][1]: data that should be sent as a UDP payload, - /// - a [`Output::Callback(Duration)`][1]: the duration of a timer. `process_output` should be called at least after the time expires, + /// - a [`Output::Callback(Duration)`][1]: the duration of a timer. `process_output` should be + /// called at least after the time expires, /// - [`Output::None`][1]: this is returned when `Nttp3Client` is done and can be destroyed. /// /// The application should call this function repeatedly until a timer value or None is @@ -938,14 +991,14 @@ impl Http3Client { } } - /// This function checks [`ConnectionEvent`][2]s emitted by the QUIC layer, e.g. connection change - /// state events, new incoming stream data is available, a stream is was reset, etc. The HTTP/3 - /// layer needs to handle these events. Most of the events are handled by + /// This function checks [`ConnectionEvent`][2]s emitted by the QUIC layer, e.g. connection + /// change state events, new incoming stream data is available, a stream is was reset, etc. + /// The HTTP/3 layer needs to handle these events. Most of the events are handled by /// [`Http3Connection`][1] by calling appropriate functions, e.g. `handle_state_change`, /// `handle_stream_reset`, etc. [`Http3Connection`][1] handle functionalities that are common /// for the client and server side. Some of the functionalities are specific to the client and - /// they are handled by `Http3Client`. For example, [`ConnectionEvent::RecvStreamReadable`][3] event - /// is handled by `Http3Client::handle_stream_readable`. The function calls + /// they are handled by `Http3Client`. For example, [`ConnectionEvent::RecvStreamReadable`][3] + /// event is handled by `Http3Client::handle_stream_readable`. The function calls /// `Http3Connection::handle_stream_readable` and then hands the return value as appropriate /// for the client-side. /// @@ -958,11 +1011,11 @@ impl Http3Client { qdebug!([self], "check_connection_events - event {:?}.", e); match e { ConnectionEvent::NewStream { stream_id } => { - // During this event we only add a new stream to the Http3Connection stream list, - // with NewStreamHeadReader stream handler. + // During this event we only add a new stream to the Http3Connection stream + // list, with NewStreamHeadReader stream handler. // This function will not read from the stream and try to decode the stream. - // RecvStreamReadable will be emitted after this event and reading, i.e. decoding - // of a stream will happen during that event. + // RecvStreamReadable will be emitted after this event and reading, i.e. + // decoding of a stream will happen during that event. self.base_handler.add_new_stream(stream_id); } ConnectionEvent::SendStreamWritable { stream_id } => { @@ -1036,12 +1089,12 @@ impl Http3Client { /// - `ReceiveOutput::NewStream(NewStreamType::WebTransportStream(_))` - because /// `Http3ClientEvents`is needed and events handler is specific to the client. /// - `ReceiveOutput::ControlFrames(control_frames)` - some control frame handling differs - /// between the client and the server: + /// between the client and the server: /// - `HFrame::CancelPush` - only the client-side may receive it, /// - `HFrame::MaxPushId { .. }`, `HFrame::PriorityUpdateRequest { .. } ` and - /// `HFrame::PriorityUpdatePush` can only be receive on the server side, + /// `HFrame::PriorityUpdatePush` can only be receive on the server side, /// - `HFrame::Goaway { stream_id }` needs specific handling by the client by the protocol - /// specification. + /// specification. /// /// [1]: https://github.com/mozilla/neqo/blob/main/neqo-http3/src/connection.rs fn handle_stream_readable(&mut self, stream_id: StreamId) -> Res<()> { @@ -1194,7 +1247,9 @@ impl Http3Client { } /// Increases `max_stream_data` for a `stream_id`. + /// /// # Errors + /// /// Returns `InvalidStreamId` if a stream does not exist or the receiving /// side is closed. pub fn set_stream_max_data(&mut self, stream_id: StreamId, max_data: u64) -> Res<()> { @@ -1241,16 +1296,8 @@ impl EventProvider for Http3Client { #[cfg(test)] mod tests { - use super::{ - AuthenticationStatus, Connection, Error, HSettings, Header, Http3Client, Http3ClientEvent, - Http3Parameters, Http3State, Rc, RefCell, - }; - use crate::{ - frames::{HFrame, H3_FRAME_TYPE_SETTINGS, H3_RESERVED_FRAME_TYPES}, - qpack_encoder_receiver::EncoderRecvStream, - settings::{HSetting, HSettingType, H3_RESERVED_SETTINGS}, - Http3Server, Priority, RecvStream, - }; + use std::{convert::TryFrom, mem, time::Duration}; + use neqo_common::{event::Provider, qtrace, Datagram, Decoder, Encoder}; use neqo_crypto::{AllowZeroRtt, AntiReplay, ResumptionToken}; use neqo_qpack::{encoder::QPackEncoder, QpackSettings}; @@ -1258,12 +1305,22 @@ mod tests { ConnectionError, ConnectionEvent, ConnectionParameters, Output, State, StreamId, StreamType, Version, RECV_BUFFER_SIZE, SEND_BUFFER_SIZE, }; - use std::{convert::TryFrom, mem, time::Duration}; use test_fixture::{ addr, anti_replay, default_server_h3, fixture_init, new_server, now, CountingConnectionIdGenerator, DEFAULT_ALPN_H3, DEFAULT_KEYS, DEFAULT_SERVER_NAME, }; + use super::{ + AuthenticationStatus, Connection, Error, HSettings, Header, Http3Client, Http3ClientEvent, + Http3Parameters, Http3State, Rc, RefCell, + }; + use crate::{ + frames::{HFrame, H3_FRAME_TYPE_SETTINGS, H3_RESERVED_FRAME_TYPES}, + qpack_encoder_receiver::EncoderRecvStream, + settings::{HSetting, HSettingType, H3_RESERVED_SETTINGS}, + Http3Server, Priority, RecvStream, + }; + fn assert_closed(client: &Http3Client, expected: &Error) { match client.state() { Http3State::Closing(err) | Http3State::Closed(err) => { @@ -1710,8 +1767,8 @@ mod tests { 0x43, 0xd3, 0xc1, ]; - // For fetch request fetch("GET", "https", "something.com", "/", &[(String::from("myheaders", "myvalue"))]) - // the following request header frame will be sent: + // For fetch request fetch("GET", "https", "something.com", "/", &[(String::from("myheaders", + // "myvalue"))]) the following request header frame will be sent: const EXPECTED_REQUEST_HEADER_FRAME_VERSION2: &[u8] = &[ 0x01, 0x11, 0x02, 0x80, 0xd1, 0xd7, 0x50, 0x89, 0x41, 0xe9, 0x2a, 0x67, 0x35, 0x53, 0x2e, 0x43, 0xd3, 0xc1, 0x10, @@ -1719,8 +1776,8 @@ mod tests { const HTTP_HEADER_FRAME_0: &[u8] = &[0x01, 0x06, 0x00, 0x00, 0xd9, 0x54, 0x01, 0x30]; - // The response header from HTTP_HEADER_FRAME (0x01, 0x06, 0x00, 0x00, 0xd9, 0x54, 0x01, 0x30) are - // decoded into: + // The response header from HTTP_HEADER_FRAME (0x01, 0x06, 0x00, 0x00, 0xd9, 0x54, 0x01, 0x30) + // are decoded into: fn check_response_header_0(header: &[Header]) { let expected_response_header_0 = &[ Header::new(":status", "200"), @@ -2487,7 +2544,8 @@ mod tests { #[test] fn fetch_basic() { - // Connect exchange headers and send a request. Also check if the correct header frame has been sent. + // Connect exchange headers and send a request. Also check if the correct header frame has + // been sent. let (mut client, mut server, request_stream_id) = connect_and_send_request(true); // send response - 200 Content-Length: 7 @@ -2627,7 +2685,8 @@ mod tests { // Send a request with the request body. #[test] fn fetch_with_data() { - // Connect exchange headers and send a request. Also check if the correct header frame has been sent. + // Connect exchange headers and send a request. Also check if the correct header frame has + // been sent. let (mut client, mut server, request_stream_id) = connect_and_send_request(false); // Get DataWritable for the request stream so that we can write the request body. @@ -2669,9 +2728,11 @@ mod tests { read_response(&mut client, &mut server.conn, request_stream_id); } - // send a request with request body containing request_body. We expect to receive expected_data_frame_header. + // send a request with request body containing request_body. We expect to receive + // expected_data_frame_header. fn fetch_with_data_length_xbytes(request_body: &[u8], expected_data_frame_header: &[u8]) { - // Connect exchange headers and send a request. Also check if the correct header frame has been sent. + // Connect exchange headers and send a request. Also check if the correct header frame has + // been sent. let (mut client, mut server, request_stream_id) = connect_and_send_request(false); // Get DataWritable for the request stream so that we can write the request body. @@ -2757,7 +2818,8 @@ mod tests { expected_second_data_frame_header: &[u8], expected_second_data_frame: &[u8], ) { - // Connect exchange headers and send a request. Also check if the correct header frame has been sent. + // Connect exchange headers and send a request. Also check if the correct header frame has + // been sent. let (mut client, mut server, request_stream_id) = connect_and_send_request(false); // Get DataWritable for the request stream so that we can write the request body. @@ -2783,7 +2845,7 @@ mod tests { out = client.process(out.as_dgram_ref(), now()); } - // check received frames and send a response. + // Check received frames and send a response. while let Some(e) = server.conn.next_event() { if let ConnectionEvent::RecvStreamReadable { stream_id } = e { if stream_id == request_stream_id { @@ -2872,7 +2934,8 @@ mod tests { } // Send 2 frames. For the second one we can only send 16383 bytes. - // After the first frame there is exactly 16383+4 bytes left in the send buffer, but we can only send 16383 bytes. + // After the first frame there is exactly 16383+4 bytes left in the send buffer, but we can only + // send 16383 bytes. #[test] fn fetch_two_data_frame_second_16383bytes_place_for_16387() { let (buf, hdr) = alloc_buffer(SEND_BUFFER_SIZE - 16410); @@ -2880,7 +2943,8 @@ mod tests { } // Send 2 frames. For the second one we can only send 16383 bytes. - // After the first frame there is exactly 16383+5 bytes left in the send buffer, but we can only send 16383 bytes. + // After the first frame there is exactly 16383+5 bytes left in the send buffer, but we can only + // send 16383 bytes. #[test] fn fetch_two_data_frame_second_16383bytes_place_for_16388() { let (buf, hdr) = alloc_buffer(SEND_BUFFER_SIZE - 16411); @@ -2888,7 +2952,8 @@ mod tests { } // Send 2 frames. For the second one we can send 16384 bytes. - // After the first frame there is exactly 16384+5 bytes left in the send buffer, but we can send 16384 bytes. + // After the first frame there is exactly 16384+5 bytes left in the send buffer, but we can send + // 16384 bytes. #[test] fn fetch_two_data_frame_second_16384bytes_place_for_16389() { let (buf, hdr) = alloc_buffer(SEND_BUFFER_SIZE - 16412); @@ -2898,7 +2963,8 @@ mod tests { // Test receiving STOP_SENDING with the HttpNoError error code. #[test] fn test_stop_sending_early_response() { - // Connect exchange headers and send a request. Also check if the correct header frame has been sent. + // Connect exchange headers and send a request. Also check if the correct header frame has + // been sent. let (mut client, mut server, request_stream_id) = connect_and_send_request(false); // Stop sending with early_response. @@ -2975,7 +3041,8 @@ mod tests { // Server sends stop sending and reset. #[test] fn test_stop_sending_other_error_with_reset() { - // Connect exchange headers and send a request. Also check if the correct header frame has been sent. + // Connect exchange headers and send a request. Also check if the correct header frame has + // been sent. let (mut client, mut server, request_stream_id) = connect_and_send_request(false); // Stop sending with RequestRejected. @@ -3038,7 +3105,8 @@ mod tests { // Server sends stop sending with RequestRejected, but it does not send reset. #[test] fn test_stop_sending_other_error_wo_reset() { - // Connect exchange headers and send a request. Also check if the correct header frame has been sent. + // Connect exchange headers and send a request. Also check if the correct header frame has + // been sent. let (mut client, mut server, request_stream_id) = connect_and_send_request(false); // Stop sending with RequestRejected. @@ -3085,7 +3153,8 @@ mod tests { // in client.events. The events will be removed. #[test] fn test_stop_sending_and_reset_other_error_with_events() { - // Connect exchange headers and send a request. Also check if the correct header frame has been sent. + // Connect exchange headers and send a request. Also check if the correct header frame has + // been sent. let (mut client, mut server, request_stream_id) = connect_and_send_request(false); // send response - 200 Content-Length: 3 @@ -3158,7 +3227,8 @@ mod tests { // The events will be removed. #[test] fn test_stop_sending_other_error_with_events() { - // Connect exchange headers and send a request. Also check if the correct header frame has been sent. + // Connect exchange headers and send a request. Also check if the correct header frame has + // been sent. let (mut client, mut server, request_stream_id) = connect_and_send_request(false); // send response - 200 Content-Length: 3 @@ -3221,7 +3291,8 @@ mod tests { // Server sends a reset. We will close sending side as well. #[test] fn test_reset_wo_stop_sending() { - // Connect exchange headers and send a request. Also check if the correct header frame has been sent. + // Connect exchange headers and send a request. Also check if the correct header frame has + // been sent. let (mut client, mut server, request_stream_id) = connect_and_send_request(false); // Send a reset. @@ -3958,7 +4029,8 @@ mod tests { header_block: encoded_headers.to_vec(), }; - // Send the encoder instructions, but delay them so that the stream is blocked on decoding headers. + // Send the encoder instructions, but delay them so that the stream is blocked on decoding + // headers. let encoder_inst_pkt = server.conn.process(None, now()); // Send response @@ -4026,7 +4098,8 @@ mod tests { header_block: encoded_headers.to_vec(), }; - // Send the encoder instructions, but delay them so that the stream is blocked on decoding headers. + // Send the encoder instructions, but delay them so that the stream is blocked on decoding + // headers. let encoder_inst_pkt = server.conn.process(None, now()); let mut d = Encoder::default(); @@ -4790,7 +4863,8 @@ mod tests { #[test] fn no_data_ready_events_after_fin() { - // Connect exchange headers and send a request. Also check if the correct header frame has been sent. + // Connect exchange headers and send a request. Also check if the correct header frame has + // been sent. let (mut client, mut server, request_stream_id) = connect_and_send_request(true); // send response - 200 Content-Length: 7 @@ -5003,7 +5077,8 @@ mod tests { assert_eq!(client.state(), Http3State::Connected); - // Check that the push has been closed, e.g. calling cancel_push should return InvalidStreamId. + // Check that the push has been closed, e.g. calling cancel_push should return + // InvalidStreamId. assert_eq!(client.cancel_push(0), Err(Error::InvalidStreamId)); } @@ -5094,7 +5169,8 @@ mod tests { assert_eq!(client.state(), Http3State::Connected); - // Check that the push has been closed, e.g. calling cancel_push should return InvalidStreamId. + // Check that the push has been closed, e.g. calling cancel_push should return + // InvalidStreamId. assert_eq!(client.cancel_push(0), Err(Error::InvalidStreamId)); assert_eq!(client.cancel_push(1), Err(Error::InvalidStreamId)); } @@ -5444,7 +5520,7 @@ mod tests { assert!(!client.events().any(push_event)); } - // Test that max_push_id is enforced when a push promise frame is received. + // Test that max_push_id is enforced when a push promise frame is received. #[test] fn exceed_max_push_id_promise() { // Connect and send a request @@ -5608,7 +5684,8 @@ mod tests { ))); } - // Test CANCEL_PUSH frame: after cancel push any new PUSH_PROMISE or push stream will be ignored. + // Test CANCEL_PUSH frame: after cancel push any new PUSH_PROMISE or push stream will be + // ignored. #[test] fn cancel_push_ignore_promise() { // Connect and send a request @@ -5624,7 +5701,8 @@ mod tests { // Assert that we do not have any push event. assert!(!check_push_events(&mut client)); - // Check that the push has been closed, e.g. calling cancel_push should return InvalidStreamId. + // Check that the push has been closed, e.g. calling cancel_push should return + // InvalidStreamId. assert_eq!(client.cancel_push(0), Err(Error::InvalidStreamId)); // Check that the push has been canceled by the client. @@ -5653,7 +5731,8 @@ mod tests { // Assert that we do not have any push event. assert!(!check_push_events(&mut client)); - // Check that the push has been closed, e.g. calling cancel_push should return InvalidStreamId. + // Check that the push has been closed, e.g. calling cancel_push should return + // InvalidStreamId. assert_eq!(client.cancel_push(0), Err(Error::InvalidStreamId)); // Check that the push has been canceled by the client. @@ -5681,7 +5760,8 @@ mod tests { // Assert that we do not have any push event. assert!(!check_push_events(&mut client)); - // Check that the push has been closed, e.g. calling cancel_push should return InvalidStreamId. + // Check that the push has been closed, e.g. calling cancel_push should return + // InvalidStreamId. assert_eq!(client.cancel_push(0), Err(Error::InvalidStreamId)); // Check that the push has been canceled by the client. @@ -5694,7 +5774,8 @@ mod tests { assert_eq!(client.state(), Http3State::Connected); } - // Test a push stream reset after a new PUSH_PROMISE or/and push stream. The events will be ignored. + // Test a push stream reset after a new PUSH_PROMISE or/and push stream. The events will be + // ignored. #[test] fn cancel_push_stream_after_push_promise_and_push_stream() { // Connect and send a request @@ -5715,7 +5796,8 @@ mod tests { // Assert that we do not have any push event. assert!(!check_push_events(&mut client)); - // Check that the push has been closed, e.g. calling cancel_push should return InvalidStreamId. + // Check that the push has been closed, e.g. calling cancel_push should return + // InvalidStreamId. assert_eq!(client.cancel_push(0), Err(Error::InvalidStreamId)); assert_eq!(client.state(), Http3State::Connected); @@ -5743,7 +5825,8 @@ mod tests { // Assert that we do not have any push event. assert!(!check_push_events(&mut client)); - // Check that the push has been closed, e.g. calling cancel_push should return InvalidStreamId. + // Check that the push has been closed, e.g. calling cancel_push should return + // InvalidStreamId. assert_eq!(client.cancel_push(0), Err(Error::InvalidStreamId)); assert_eq!(client.state(), Http3State::Connected); @@ -5762,13 +5845,15 @@ mod tests { // Assert that we do not have any push event. assert!(!check_push_events(&mut client)); - // Check that the push has been closed, e.g. calling cancel_push should return InvalidStreamId. + // Check that the push has been closed, e.g. calling cancel_push should return + // InvalidStreamId. assert_eq!(client.cancel_push(0), Err(Error::InvalidStreamId)); assert_eq!(client.state(), Http3State::Connected); } - // Test that push_promise and push data events will be removed after application calls cancel_push. + // Test that push_promise and push data events will be removed after application calls + // cancel_push. #[test] fn app_cancel_push_after_push_promise_and_push_stream() { // Connect and send a request @@ -5785,7 +5870,8 @@ mod tests { // Assert that we do not have any push event. assert!(!check_push_events(&mut client)); - // Check that the push has been closed, e.g. calling cancel_push should return InvalidStreamId. + // Check that the push has been closed, e.g. calling cancel_push should return + // InvalidStreamId. assert_eq!(client.cancel_push(0), Err(Error::InvalidStreamId)); // Check that the push has been canceled by the client. @@ -5817,7 +5903,8 @@ mod tests { // Assert that we do not have any push event. assert!(!check_push_events(&mut client)); - // Check that the push has been closed, e.g. calling cancel_push should return InvalidStreamId. + // Check that the push has been closed, e.g. calling cancel_push should return + // InvalidStreamId. assert_eq!(client.cancel_push(0), Err(Error::InvalidStreamId)); // Check that the push has been canceled by the client. @@ -5899,7 +5986,8 @@ mod tests { header_block: encoded_headers.to_vec(), }; - // Send the encoder instructions, but delay them so that the stream is blocked on decoding headers. + // Send the encoder instructions, but delay them so that the stream is blocked on decoding + // headers. let encoder_inst_pkt = server.conn.process(None, now()).dgram(); assert!(encoder_inst_pkt.is_some()); @@ -7106,11 +7194,12 @@ mod tests { let out = server.conn.process(out.as_dgram_ref(), now()); // the server increased the max_data during the second read if that isn't the case - // in the future and therefore this asserts fails, the request data on stream 0 could be read - // to cause a max_update frame + // in the future and therefore this asserts fails, the request data on stream 0 could be + // read to cause a max_update frame assert_eq!(md_before + 1, server.conn.stats().frame_tx.max_data); - // make sure that the server didn't receive a priority_update on client control stream (stream_id 2) yet + // make sure that the server didn't receive a priority_update on client control stream + // (stream_id 2) yet let mut buf = [0; 32]; assert_eq!( server.conn.stream_recv(StreamId::new(2), &mut buf), @@ -7149,7 +7238,8 @@ mod tests { header_block: encoded_headers.to_vec(), }; - // Send the encoder instructions, but delay them so that the stream is blocked on decoding headers. + // Send the encoder instructions, but delay them so that the stream is blocked on decoding + // headers. let encoder_inst_pkt = server.conn.process(None, now()); // Send response diff --git a/neqo-http3/src/connection_server.rs b/neqo-http3/src/connection_server.rs index c8cab52dd0..097209a226 100644 --- a/neqo-http3/src/connection_server.rs +++ b/neqo-http3/src/connection_server.rs @@ -4,21 +4,22 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use crate::connection::{Http3Connection, Http3State, WebTransportSessionAcceptAction}; -use crate::frames::HFrame; -use crate::recv_message::{RecvMessage, RecvMessageInfo}; -use crate::send_message::SendMessage; -use crate::server_connection_events::{Http3ServerConnEvent, Http3ServerConnEvents}; -use crate::{ - Error, Http3Parameters, Http3StreamType, NewStreamType, Priority, PriorityHandler, - ReceiveOutput, Res, -}; +use std::{rc::Rc, time::Instant}; + use neqo_common::{event::Provider, qdebug, qinfo, qtrace, Header, MessageType, Role}; use neqo_transport::{ AppError, Connection, ConnectionEvent, DatagramTracking, StreamId, StreamType, }; -use std::rc::Rc; -use std::time::Instant; + +use crate::{ + connection::{Http3Connection, Http3State, WebTransportSessionAcceptAction}, + frames::HFrame, + recv_message::{RecvMessage, RecvMessageInfo}, + send_message::SendMessage, + server_connection_events::{Http3ServerConnEvent, Http3ServerConnEvents}, + Error, Http3Parameters, Http3StreamType, NewStreamType, Priority, PriorityHandler, + ReceiveOutput, Res, +}; #[derive(Debug)] pub struct Http3ServerHandler { @@ -48,12 +49,15 @@ impl Http3ServerHandler { } /// Supply a response for a request. + /// /// # Errors + /// /// `InvalidStreamId` if the stream does not exist, /// `AlreadyClosed` if the stream has already been closed. - /// `TransportStreamDoesNotExist` if the transport stream does not exist (this may happen if `process_output` - /// has not been called when needed, and HTTP3 layer has not picked up the info that the stream has been closed.) - /// `InvalidInput` if an empty buffer has been supplied. + /// `TransportStreamDoesNotExist` if the transport stream does not exist (this may happen if + /// `process_output` has not been called when needed, and HTTP3 layer has not picked up the + /// info that the stream has been closed.) `InvalidInput` if an empty buffer has been + /// supplied. pub(crate) fn send_data( &mut self, stream_id: StreamId, @@ -89,7 +93,9 @@ impl Http3ServerHandler { } /// This is called when application is done sending a request. + /// /// # Errors + /// /// An error will be returned if stream does not exist. pub fn stream_close_send(&mut self, stream_id: StreamId, conn: &mut Connection) -> Res<()> { qinfo!([self], "Close sending side stream={}.", stream_id); @@ -101,7 +107,9 @@ impl Http3ServerHandler { /// An application may reset a stream(request). /// Both sides, sending and receiving side, will be closed. + /// /// # Errors + /// /// An error will be return if a stream does not exist. pub fn cancel_fetch( &mut self, @@ -154,11 +162,14 @@ impl Http3ServerHandler { } /// Close `WebTransport` cleanly + /// /// # Errors + /// /// `InvalidStreamId` if the stream does not exist, - /// `TransportStreamDoesNotExist` if the transport stream does not exist (this may happen if `process_output` - /// has not been called when needed, and HTTP3 layer has not picked up the info that the stream has been closed.) - /// `InvalidInput` if an empty buffer has been supplied. + /// `TransportStreamDoesNotExist` if the transport stream does not exist (this may happen if + /// `process_output` has not been called when needed, and HTTP3 layer has not picked up the + /// info that the stream has been closed.) `InvalidInput` if an empty buffer has been + /// supplied. pub fn webtransport_close_session( &mut self, conn: &mut Connection, @@ -354,7 +365,7 @@ impl Http3ServerHandler { } HFrame::PriorityUpdatePush { element_id, priority } => { // TODO: check if the element_id references a promised push stream or - // is greater than the maximum Push ID. + // is greater than the maximum Push ID. self.events.priority_update(StreamId::from(element_id), priority); Ok(()) } @@ -383,11 +394,13 @@ impl Http3ServerHandler { } } - /// Response data are read directly into a buffer supplied as a parameter of this function to avoid copying - /// data. + /// Response data are read directly into a buffer supplied as a parameter of this function to + /// avoid copying data. + /// /// # Errors - /// It returns an error if a stream does not exist or an error happen while reading a stream, e.g. - /// early close, protocol error, etc. + /// + /// It returns an error if a stream does not exist or an error happen while reading a stream, + /// e.g. early close, protocol error, etc. pub fn read_data( &mut self, conn: &mut Connection, diff --git a/neqo-http3/src/control_stream_local.rs b/neqo-http3/src/control_stream_local.rs index e6d63c3502..62676ee391 100644 --- a/neqo-http3/src/control_stream_local.rs +++ b/neqo-http3/src/control_stream_local.rs @@ -4,12 +4,15 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use crate::frames::HFrame; -use crate::{BufferedStream, Http3StreamType, RecvStream, Res}; +use std::{ + collections::{HashMap, VecDeque}, + convert::TryFrom, +}; + use neqo_common::{qtrace, Encoder}; use neqo_transport::{Connection, StreamId, StreamType}; -use std::collections::{HashMap, VecDeque}; -use std::convert::TryFrom; + +use crate::{frames::HFrame, BufferedStream, Http3StreamType, RecvStream, Res}; pub const HTTP3_UNI_STREAM_TYPE_CONTROL: u64 = 0x0; diff --git a/neqo-http3/src/control_stream_remote.rs b/neqo-http3/src/control_stream_remote.rs index 7b42ed2b11..aef4b4c0a4 100644 --- a/neqo-http3/src/control_stream_remote.rs +++ b/neqo-http3/src/control_stream_remote.rs @@ -4,12 +4,16 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use crate::frames::{FrameReader, HFrame, StreamReaderConnectionWrapper}; -use crate::{CloseType, Error, Http3StreamType, ReceiveOutput, RecvStream, Res, Stream}; use neqo_common::qdebug; use neqo_transport::{Connection, StreamId}; -/// The remote control stream is responsible only for reading frames. The frames are handled by `Http3Connection`. +use crate::{ + frames::{FrameReader, HFrame, StreamReaderConnectionWrapper}, + CloseType, Error, Http3StreamType, ReceiveOutput, RecvStream, Res, Stream, +}; + +/// The remote control stream is responsible only for reading frames. The frames are handled by +/// `Http3Connection`. #[derive(Debug)] pub(crate) struct ControlStreamRemote { stream_id: StreamId, diff --git a/neqo-http3/src/features/extended_connect/mod.rs b/neqo-http3/src/features/extended_connect/mod.rs index 6be92dabba..77655833f7 100644 --- a/neqo-http3/src/features/extended_connect/mod.rs +++ b/neqo-http3/src/features/extended_connect/mod.rs @@ -9,15 +9,19 @@ pub(crate) mod webtransport_session; pub(crate) mod webtransport_streams; -use crate::client_events::Http3ClientEvents; -use crate::features::NegotiationState; -use crate::settings::{HSettingType, HSettings}; -use crate::{CloseType, Http3StreamInfo, Http3StreamType}; +use std::fmt::Debug; + use neqo_common::Header; use neqo_transport::{AppError, StreamId}; -use std::fmt::Debug; pub(crate) use webtransport_session::WebTransportSession; +use crate::{ + client_events::Http3ClientEvents, + features::NegotiationState, + settings::{HSettingType, HSettings}, + CloseType, Http3StreamInfo, Http3StreamType, +}; + #[derive(Debug, PartialEq, Eq, Clone)] pub enum SessionCloseReason { Error(AppError), diff --git a/neqo-http3/src/features/extended_connect/tests/webtransport/datagrams.rs b/neqo-http3/src/features/extended_connect/tests/webtransport/datagrams.rs index 1b9511b255..1c58596dd3 100644 --- a/neqo-http3/src/features/extended_connect/tests/webtransport/datagrams.rs +++ b/neqo-http3/src/features/extended_connect/tests/webtransport/datagrams.rs @@ -4,13 +4,17 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use crate::features::extended_connect::tests::webtransport::{ - wt_default_parameters, WtTest, DATAGRAM_SIZE, -}; -use crate::{Error, Http3Parameters, WebTransportRequest}; +use std::convert::TryFrom; + use neqo_common::Encoder; use neqo_transport::Error as TransportError; -use std::convert::TryFrom; + +use crate::{ + features::extended_connect::tests::webtransport::{ + wt_default_parameters, WtTest, DATAGRAM_SIZE, + }, + Error, Http3Parameters, WebTransportRequest, +}; const DGRAM: &[u8] = &[0, 100]; diff --git a/neqo-http3/src/features/extended_connect/tests/webtransport/mod.rs b/neqo-http3/src/features/extended_connect/tests/webtransport/mod.rs index fcdcff0fe1..51dc47e4c1 100644 --- a/neqo-http3/src/features/extended_connect/tests/webtransport/mod.rs +++ b/neqo-http3/src/features/extended_connect/tests/webtransport/mod.rs @@ -8,7 +8,15 @@ mod datagrams; mod negotiation; mod sessions; mod streams; +use std::{cell::RefCell, rc::Rc, time::Duration}; + use neqo_common::event::Provider; +use neqo_crypto::AuthenticationStatus; +use neqo_transport::{ConnectionParameters, StreamId, StreamType}; +use test_fixture::{ + addr, anti_replay, fixture_init, now, CountingConnectionIdGenerator, DEFAULT_ALPN_H3, + DEFAULT_KEYS, DEFAULT_SERVER_NAME, +}; use crate::{ features::extended_connect::SessionCloseReason, Error, Header, Http3Client, Http3ClientEvent, @@ -16,16 +24,6 @@ use crate::{ RecvStreamStats, SendStreamStats, WebTransportEvent, WebTransportRequest, WebTransportServerEvent, WebTransportSessionAcceptAction, }; -use neqo_crypto::AuthenticationStatus; -use neqo_transport::{ConnectionParameters, StreamId, StreamType}; -use std::cell::RefCell; -use std::rc::Rc; -use std::time::Duration; - -use test_fixture::{ - addr, anti_replay, fixture_init, now, CountingConnectionIdGenerator, DEFAULT_ALPN_H3, - DEFAULT_KEYS, DEFAULT_SERVER_NAME, -}; const DATAGRAM_SIZE: u64 = 1200; diff --git a/neqo-http3/src/features/extended_connect/tests/webtransport/negotiation.rs b/neqo-http3/src/features/extended_connect/tests/webtransport/negotiation.rs index e838646ab2..27f669861d 100644 --- a/neqo-http3/src/features/extended_connect/tests/webtransport/negotiation.rs +++ b/neqo-http3/src/features/extended_connect/tests/webtransport/negotiation.rs @@ -4,17 +4,19 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +use std::time::Duration; + +use neqo_common::{event::Provider, Encoder}; +use neqo_crypto::AuthenticationStatus; +use neqo_transport::{Connection, ConnectionError, StreamType}; +use test_fixture::{default_server_h3, now}; + use super::{connect, default_http3_client, default_http3_server, exchange_packets}; use crate::{ settings::{HSetting, HSettingType, HSettings}, Error, HFrame, Http3Client, Http3ClientEvent, Http3Parameters, Http3Server, Http3State, WebTransportEvent, }; -use neqo_common::{event::Provider, Encoder}; -use neqo_crypto::AuthenticationStatus; -use neqo_transport::{Connection, ConnectionError, StreamType}; -use std::time::Duration; -use test_fixture::{default_server_h3, now}; fn check_wt_event(client: &mut Http3Client, wt_enable_client: bool, wt_enable_server: bool) { let wt_event = client.events().find_map(|e| { diff --git a/neqo-http3/src/features/extended_connect/tests/webtransport/sessions.rs b/neqo-http3/src/features/extended_connect/tests/webtransport/sessions.rs index 06d9318b87..5f929d0e4b 100644 --- a/neqo-http3/src/features/extended_connect/tests/webtransport/sessions.rs +++ b/neqo-http3/src/features/extended_connect/tests/webtransport/sessions.rs @@ -4,19 +4,25 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use crate::features::extended_connect::tests::webtransport::{ - default_http3_client, default_http3_server, wt_default_parameters, WtTest, -}; -use crate::{ - features::extended_connect::SessionCloseReason, frames::WebTransportFrame, Error, Header, - Http3ClientEvent, Http3OrWebTransportStream, Http3Server, Http3ServerEvent, Http3State, - Priority, WebTransportEvent, WebTransportServerEvent, WebTransportSessionAcceptAction, -}; +use std::mem; + use neqo_common::{event::Provider, Encoder}; use neqo_transport::StreamType; -use std::mem; use test_fixture::now; +use crate::{ + features::extended_connect::{ + tests::webtransport::{ + default_http3_client, default_http3_server, wt_default_parameters, WtTest, + }, + SessionCloseReason, + }, + frames::WebTransportFrame, + Error, Header, Http3ClientEvent, Http3OrWebTransportStream, Http3Server, Http3ServerEvent, + Http3State, Priority, WebTransportEvent, WebTransportServerEvent, + WebTransportSessionAcceptAction, +}; + #[test] fn wt_session() { let mut wt = WtTest::new(); diff --git a/neqo-http3/src/features/extended_connect/tests/webtransport/streams.rs b/neqo-http3/src/features/extended_connect/tests/webtransport/streams.rs index a50c45d518..b898dbb31e 100644 --- a/neqo-http3/src/features/extended_connect/tests/webtransport/streams.rs +++ b/neqo-http3/src/features/extended_connect/tests/webtransport/streams.rs @@ -4,11 +4,15 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use crate::features::extended_connect::tests::webtransport::WtTest; -use crate::{features::extended_connect::SessionCloseReason, Error}; -use neqo_transport::StreamType; use std::mem; +use neqo_transport::StreamType; + +use crate::{ + features::extended_connect::{tests::webtransport::WtTest, SessionCloseReason}, + Error, +}; + #[test] fn wt_client_stream_uni() { const BUF_CLIENT: &[u8] = &[0; 10]; @@ -287,13 +291,17 @@ fn wt_server_stream_bidi_stop_sending() { // 1) Both sides of a bidirectional client stream are opened. // 2) A client unidirectional stream is opened. // 3) A client unidirectional stream has been closed and both sides consumed the closing info. -// 4) A client unidirectional stream has been closed, but only the server has consumed the closing info. -// 5) A client unidirectional stream has been closed, but only the client has consum the closing info. +// 4) A client unidirectional stream has been closed, but only the server has consumed the closing +// info. +// 5) A client unidirectional stream has been closed, but only the client has consum the closing +// info. // 6) Both sides of a bidirectional server stream are opened. // 7) A server unidirectional stream is opened. // 8) A server unidirectional stream has been closed and both sides consumed the closing info. -// 9) A server unidirectional stream has been closed, but only the server has consumed the closing info. -// 10) A server unidirectional stream has been closed, but only the client has consumed the closing info. +// 9) A server unidirectional stream has been closed, but only the server has consumed the closing +// info. +// 10) A server unidirectional stream has been closed, but only the client has consumed the closing +// info. // 11) Both sides of a bidirectional stream have been closed and consumed by both sides. // 12) Both sides of a bidirectional stream have been closed, but not consumed by both sides. // 13) Multiples open streams diff --git a/neqo-http3/src/features/extended_connect/webtransport_session.rs b/neqo-http3/src/features/extended_connect/webtransport_session.rs index c446fd3843..adbdf07e11 100644 --- a/neqo-http3/src/features/extended_connect/webtransport_session.rs +++ b/neqo-http3/src/features/extended_connect/webtransport_session.rs @@ -6,6 +6,12 @@ #![allow(clippy::module_name_repetitions)] +use std::{any::Any, cell::RefCell, collections::BTreeSet, mem, rc::Rc}; + +use neqo_common::{qtrace, Encoder, Header, MessageType, Role}; +use neqo_qpack::{QPackDecoder, QPackEncoder}; +use neqo_transport::{streams::SendOrder, Connection, DatagramTracking, StreamId}; + use super::{ExtendedConnectEvents, ExtendedConnectType, SessionCloseReason}; use crate::{ frames::{FrameReader, StreamReaderRecvStreamWrapper, WebTransportFrame}, @@ -15,14 +21,6 @@ use crate::{ HttpRecvStreamEvents, Priority, PriorityHandler, ReceiveOutput, RecvStream, RecvStreamEvents, Res, SendStream, SendStreamEvents, Stream, }; -use neqo_common::{qtrace, Encoder, Header, MessageType, Role}; -use neqo_qpack::{QPackDecoder, QPackEncoder}; -use neqo_transport::{streams::SendOrder, Connection, DatagramTracking, StreamId}; -use std::any::Any; -use std::cell::RefCell; -use std::collections::BTreeSet; -use std::mem; -use std::rc::Rc; #[derive(Debug, PartialEq)] enum SessionState { @@ -100,6 +98,7 @@ impl WebTransportSession { } /// # Panics + /// /// This function is only called with `RecvStream` and `SendStream` that also implement /// the http specific functions and `http_stream()` will never return `None`. #[must_use] @@ -134,8 +133,11 @@ impl WebTransportSession { } /// # Errors + /// /// The function can only fail if supplied headers are not valid http headers. + /// /// # Panics + /// /// `control_stream_send` implements the http specific functions and `http_stream()` /// will never return `None`. pub fn send_request(&mut self, headers: &[Header], conn: &mut Connection) -> Res<()> { @@ -220,6 +222,7 @@ impl WebTransportSession { } /// # Panics + /// /// This cannot panic because headers are checked before this function called. pub fn maybe_check_headers(&mut self) { if SessionState::Negotiating != self.state { @@ -335,6 +338,7 @@ impl WebTransportSession { } /// # Errors + /// /// It may return an error if the frame is not correctly decoded. pub fn read_control_stream(&mut self, conn: &mut Connection) -> Res<()> { let (f, fin) = self @@ -373,8 +377,9 @@ impl WebTransportSession { } /// # Errors - /// Return an error if the stream was closed on the transport layer, but that information is not yet - /// consumed on the http/3 layer. + /// + /// Return an error if the stream was closed on the transport layer, but that information is not + /// yet consumed on the http/3 layer. pub fn close_session(&mut self, conn: &mut Connection, error: u32, message: &str) -> Res<()> { self.state = SessionState::Done; let close_frame = WebTransportFrame::CloseSession { @@ -399,6 +404,7 @@ impl WebTransportSession { } /// # Errors + /// /// Returns an error if the datagram exceeds the remote datagram size limit. pub fn send_datagram( &self, diff --git a/neqo-http3/src/features/extended_connect/webtransport_streams.rs b/neqo-http3/src/features/extended_connect/webtransport_streams.rs index ca918dce9e..84dcd20618 100644 --- a/neqo-http3/src/features/extended_connect/webtransport_streams.rs +++ b/neqo-http3/src/features/extended_connect/webtransport_streams.rs @@ -4,15 +4,16 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +use std::{cell::RefCell, rc::Rc}; + +use neqo_common::Encoder; +use neqo_transport::{Connection, RecvStreamStats, SendStreamStats, StreamId}; + use super::WebTransportSession; use crate::{ CloseType, Http3StreamInfo, Http3StreamType, ReceiveOutput, RecvStream, RecvStreamEvents, Res, SendStream, SendStreamEvents, Stream, }; -use neqo_common::Encoder; -use neqo_transport::{Connection, RecvStreamStats, SendStreamStats, StreamId}; -use std::cell::RefCell; -use std::rc::Rc; pub const WEBTRANSPORT_UNI_STREAM: u64 = 0x54; pub const WEBTRANSPORT_STREAM: u64 = 0x41; diff --git a/neqo-http3/src/features/mod.rs b/neqo-http3/src/features/mod.rs index 0e045ed80b..34e21f50ac 100644 --- a/neqo-http3/src/features/mod.rs +++ b/neqo-http3/src/features/mod.rs @@ -4,23 +4,24 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +use std::{fmt::Debug, mem}; + +use neqo_common::qtrace; + use crate::{ client_events::Http3ClientEvents, settings::{HSettingType, HSettings}, }; -use neqo_common::qtrace; -use std::fmt::Debug; -use std::mem; pub mod extended_connect; /// States: /// - `Disable` - it is not turned on for this connection. -/// - `Negotiating` - the feature is enabled locally, but settings from the peer -/// have not been received yet. +/// - `Negotiating` - the feature is enabled locally, but settings from the peer have not been +/// received yet. /// - `Negotiated` - the settings have been received and both sides support the feature. -/// - `NegotiationFailed` - the settings have been received and the peer does not -/// support the feature. +/// - `NegotiationFailed` - the settings have been received and the peer does not support the +/// feature. #[derive(Debug)] pub enum NegotiationState { Disabled, diff --git a/neqo-http3/src/frames/hframe.rs b/neqo-http3/src/frames/hframe.rs index 28ce7608f9..83e69ba894 100644 --- a/neqo-http3/src/frames/hframe.rs +++ b/neqo-http3/src/frames/hframe.rs @@ -4,12 +4,13 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use crate::{frames::reader::FrameDecoder, settings::HSettings, Error, Priority, Res}; +use std::{fmt::Debug, io::Write}; + use neqo_common::{Decoder, Encoder}; use neqo_crypto::random; use neqo_transport::StreamId; -use std::fmt::Debug; -use std::io::Write; + +use crate::{frames::reader::FrameDecoder, settings::HSettings, Error, Priority, Res}; pub(crate) type HFrameType = u64; diff --git a/neqo-http3/src/frames/reader.rs b/neqo-http3/src/frames/reader.rs index 9d81f2c1c1..5017c666a4 100644 --- a/neqo-http3/src/frames/reader.rs +++ b/neqo-http3/src/frames/reader.rs @@ -6,34 +6,39 @@ #![allow(clippy::module_name_repetitions)] -use crate::{Error, RecvStream, Res}; +use std::{convert::TryFrom, fmt::Debug}; + use neqo_common::{ hex_with_len, qtrace, Decoder, IncrementalDecoderBuffer, IncrementalDecoderIgnore, IncrementalDecoderUint, }; use neqo_transport::{Connection, StreamId}; -use std::convert::TryFrom; -use std::fmt::Debug; + +use crate::{Error, RecvStream, Res}; const MAX_READ_SIZE: usize = 4096; pub(crate) trait FrameDecoder { fn is_known_type(frame_type: u64) -> bool; /// # Errors + /// /// Returns `HttpFrameUnexpected` if frames is not alowed, i.e. is a `H3_RESERVED_FRAME_TYPES`. fn frame_type_allowed(_frame_type: u64) -> Res<()> { Ok(()) } + /// # Errors + /// /// If a frame cannot be properly decoded. fn decode(frame_type: u64, frame_len: u64, data: Option<&[u8]>) -> Res>; } pub(crate) trait StreamReader { /// # Errors + /// /// An error may happen while reading a stream, e.g. early close, protocol error, etc. - /// Return an error if the stream was closed on the transport layer, but that information is not yet - /// consumed on the http/3 layer. + /// Return an error if the stream was closed on the transport layer, but that information is not + /// yet consumed on the http/3 layer. fn read_data(&mut self, buf: &mut [u8]) -> Res<(usize, bool)>; } @@ -50,6 +55,7 @@ impl<'a> StreamReaderConnectionWrapper<'a> { impl<'a> StreamReader for StreamReaderConnectionWrapper<'a> { /// # Errors + /// /// An error may happen while reading a stream, e.g. early close, protocol error, etc. fn read_data(&mut self, buf: &mut [u8]) -> Res<(usize, bool)> { let res = self.conn.stream_recv(self.stream_id, buf)?; @@ -70,6 +76,7 @@ impl<'a> StreamReaderRecvStreamWrapper<'a> { impl<'a> StreamReader for StreamReaderRecvStreamWrapper<'a> { /// # Errors + /// /// An error may happen while reading a stream, e.g. early close, protocol error, etc. fn read_data(&mut self, buf: &mut [u8]) -> Res<(usize, bool)> { self.recv_stream.read_data(self.conn, buf) @@ -146,7 +153,9 @@ impl FrameReader { } /// returns true if quic stream was closed. + /// /// # Errors + /// /// May return `HttpFrame` if a frame cannot be decoded. /// and `TransportStreamDoesNotExist` if `stream_recv` fails. pub fn receive>( @@ -186,6 +195,7 @@ impl FrameReader { } /// # Errors + /// /// May return `HttpFrame` if a frame cannot be decoded. fn consume>(&mut self, mut input: Decoder) -> Res> { match &mut self.state { diff --git a/neqo-http3/src/frames/tests/hframe.rs b/neqo-http3/src/frames/tests/hframe.rs index 54b7c94c8e..3da7e7fc36 100644 --- a/neqo-http3/src/frames/tests/hframe.rs +++ b/neqo-http3/src/frames/tests/hframe.rs @@ -4,15 +4,16 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +use neqo_common::{Decoder, Encoder}; +use neqo_transport::StreamId; +use test_fixture::fixture_init; + use super::enc_dec_hframe; use crate::{ frames::HFrame, settings::{HSetting, HSettingType, HSettings}, Priority, }; -use neqo_common::{Decoder, Encoder}; -use neqo_transport::StreamId; -use test_fixture::fixture_init; #[test] fn test_data_frame() { diff --git a/neqo-http3/src/frames/tests/mod.rs b/neqo-http3/src/frames/tests/mod.rs index 086af90300..33eea5497a 100644 --- a/neqo-http3/src/frames/tests/mod.rs +++ b/neqo-http3/src/frames/tests/mod.rs @@ -4,15 +4,17 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use crate::frames::{ - reader::FrameDecoder, FrameReader, HFrame, StreamReaderConnectionWrapper, WebTransportFrame, -}; +use std::mem; + use neqo_common::Encoder; use neqo_crypto::AuthenticationStatus; use neqo_transport::StreamType; -use std::mem; use test_fixture::{default_client, default_server, now}; +use crate::frames::{ + reader::FrameDecoder, FrameReader, HFrame, StreamReaderConnectionWrapper, WebTransportFrame, +}; + #[allow(clippy::many_single_char_names)] pub(crate) fn enc_dec>(d: &Encoder, st: &str, remaining: usize) -> T { // For data, headers and push_promise we do not read all bytes from the buffer diff --git a/neqo-http3/src/frames/tests/reader.rs b/neqo-http3/src/frames/tests/reader.rs index 8923a0994b..fed1477ba4 100644 --- a/neqo-http3/src/frames/tests/reader.rs +++ b/neqo-http3/src/frames/tests/reader.rs @@ -4,6 +4,12 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +use std::{fmt::Debug, mem}; + +use neqo_common::Encoder; +use neqo_transport::{Connection, StreamId, StreamType}; +use test_fixture::{connect, now}; + use crate::{ frames::{ reader::FrameDecoder, FrameReader, HFrame, StreamReaderConnectionWrapper, WebTransportFrame, @@ -11,11 +17,6 @@ use crate::{ settings::{HSetting, HSettingType, HSettings}, Error, }; -use neqo_common::Encoder; -use neqo_transport::{Connection, StreamId, StreamType}; -use std::fmt::Debug; -use std::mem; -use test_fixture::{connect, now}; struct FrameReaderTest { pub fr: FrameReader, diff --git a/neqo-http3/src/frames/wtframe.rs b/neqo-http3/src/frames/wtframe.rs index b5f76161c5..deb7a026a0 100644 --- a/neqo-http3/src/frames/wtframe.rs +++ b/neqo-http3/src/frames/wtframe.rs @@ -4,10 +4,12 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use crate::{frames::reader::FrameDecoder, Error, Res}; -use neqo_common::{Decoder, Encoder}; use std::convert::TryFrom; +use neqo_common::{Decoder, Encoder}; + +use crate::{frames::reader::FrameDecoder, Error, Res}; + pub(crate) type WebTransportFrameType = u64; const WT_FRAME_CLOSE_SESSION: WebTransportFrameType = 0x2843; diff --git a/neqo-http3/src/headers_checks.rs b/neqo-http3/src/headers_checks.rs index 7d679409ad..9bf661c8fe 100644 --- a/neqo-http3/src/headers_checks.rs +++ b/neqo-http3/src/headers_checks.rs @@ -6,10 +6,12 @@ #![allow(clippy::unused_unit)] // see https://github.com/Lymia/enumset/issues/44 -use crate::{Error, MessageType, Res}; +use std::convert::TryFrom; + use enumset::{enum_set, EnumSet, EnumSetType}; use neqo_common::Header; -use std::convert::TryFrom; + +use crate::{Error, MessageType, Res}; #[derive(EnumSetType, Debug)] enum PseudoHeaderState { @@ -45,7 +47,9 @@ impl TryFrom<(MessageType, &str)> for PseudoHeaderState { } /// Check whether the response is informational(1xx). +/// /// # Errors +/// /// Returns an error if response headers do not contain /// a status header or if the value of the header is 101 or cannot be parsed. pub fn is_interim(headers: &[Header]) -> Res { @@ -89,7 +93,9 @@ fn track_pseudo( /// Checks if request/response headers are well formed, i.e. contain /// allowed pseudo headers and in a right order, etc. +/// /// # Errors +/// /// Returns an error if headers are not well formed. pub fn headers_valid(headers: &[Header], message_type: MessageType) -> Res<()> { let mut method_value: Option<&str> = None; @@ -155,7 +161,9 @@ pub fn headers_valid(headers: &[Header], message_type: MessageType) -> Res<()> { /// Checks if trailers are well formed, i.e. pseudo headers are not /// allowed in trailers. +/// /// # Errors +/// /// Returns an error if trailers are not well formed. pub fn trailers_valid(headers: &[Header]) -> Res<()> { for header in headers { @@ -168,9 +176,10 @@ pub fn trailers_valid(headers: &[Header]) -> Res<()> { #[cfg(test)] mod tests { + use neqo_common::Header; + use super::headers_valid; use crate::MessageType; - use neqo_common::Header; fn create_connect_headers() -> Vec
{ vec![ diff --git a/neqo-http3/src/lib.rs b/neqo-http3/src/lib.rs index e0dc4c3c93..635707ca7c 100644 --- a/neqo-http3/src/lib.rs +++ b/neqo-http3/src/lib.rs @@ -160,14 +160,8 @@ mod server_events; mod settings; mod stream_type_reader; -use neqo_qpack::Error as QpackError; -pub use neqo_transport::{streams::SendOrder, Output, StreamId}; -use neqo_transport::{ - AppError, Connection, Error as TransportError, RecvStreamStats, SendStreamStats, -}; -use std::fmt::Debug; +use std::{any::Any, cell::RefCell, fmt::Debug, rc::Rc}; -use crate::priority::PriorityHandler; use buffered_send_stream::BufferedStream; pub use client_events::{Http3ClientEvent, WebTransportEvent}; pub use conn_params::Http3Parameters; @@ -177,23 +171,28 @@ use features::extended_connect::WebTransportSession; use frames::HFrame; pub use neqo_common::Header; use neqo_common::MessageType; +use neqo_qpack::Error as QpackError; +pub use neqo_transport::{streams::SendOrder, Output, StreamId}; +use neqo_transport::{ + AppError, Connection, Error as TransportError, RecvStreamStats, SendStreamStats, +}; pub use priority::Priority; pub use server::Http3Server; pub use server_events::{ Http3OrWebTransportStream, Http3ServerEvent, WebTransportRequest, WebTransportServerEvent, }; -use std::any::Any; -use std::cell::RefCell; -use std::rc::Rc; use stream_type_reader::NewStreamType; +use crate::priority::PriorityHandler; + type Res = Result; #[derive(Clone, Debug, PartialEq, Eq)] pub enum Error { HttpNoError, HttpGeneralProtocol, - HttpGeneralProtocolStream, //this is the same as the above but it should only close a stream not a connection. + HttpGeneralProtocolStream, /* this is the same as the above but it should only close a + * stream not a connection. */ // When using this error, you need to provide a value that is unique, which // will allow the specific error to be identified. This will be validated in CI. HttpInternal(u16), @@ -288,6 +287,7 @@ impl Error { } /// # Panics + /// /// On unexpected errors, in debug mode. #[must_use] pub fn map_stream_send_errors(err: &Error) -> Self { @@ -304,6 +304,7 @@ impl Error { } /// # Panics + /// /// On unexpected errors, in debug mode. #[must_use] pub fn map_stream_create_errors(err: &TransportError) -> Self { @@ -318,6 +319,7 @@ impl Error { } /// # Panics + /// /// On unexpected errors, in debug mode. #[must_use] pub fn map_stream_recv_errors(err: &Error) -> Self { @@ -345,8 +347,11 @@ impl Error { } /// # Errors - /// Any error is mapped to the indicated type. + /// + /// Any error is mapped to the indicated type. + /// /// # Panics + /// /// On internal errors, in debug mode. fn map_error(r: Result>, err: Self) -> Result { r.map_err(|e| { @@ -450,16 +455,23 @@ trait RecvStream: Stream { /// The stream reads data from the corresponding quic stream and returns `ReceiveOutput`. /// The function also returns true as the second parameter if the stream is done and /// could be forgotten, i.e. removed from all records. + /// /// # Errors + /// /// An error may happen while reading a stream, e.g. early close, protocol error, etc. fn receive(&mut self, conn: &mut Connection) -> Res<(ReceiveOutput, bool)>; + /// # Errors + /// /// An error may happen while reading a stream, e.g. early close, etc. fn reset(&mut self, close_type: CloseType) -> Res<()>; + /// The function allows an app to read directly from the quic stream. The function /// returns the number of bytes written into `buf` and true/false if the stream is /// completely done and can be forgotten, i.e. removed from all records. + /// /// # Errors + /// /// An error may happen while reading a stream, e.g. early close, protocol error, etc. fn read_data(&mut self, _conn: &mut Connection, _buf: &mut [u8]) -> Res<(usize, bool)> { Err(Error::InvalidStreamId) @@ -483,7 +495,9 @@ trait HttpRecvStream: RecvStream { /// This function is similar to the receive function and has the same output, i.e. /// a `ReceiveOutput` enum and bool. The bool is true if the stream is completely done /// and can be forgotten, i.e. removed from all records. + /// /// # Errors + /// /// An error may happen while reading a stream, e.g. early close, protocol error, etc. fn header_unblocked(&mut self, conn: &mut Connection) -> Res<(ReceiveOutput, bool)>; @@ -552,6 +566,7 @@ trait HttpRecvStreamEvents: RecvStreamEvents { trait SendStream: Stream { /// # Errors + /// /// Error my occur during sending data, e.g. protocol error, etc. fn send(&mut self, conn: &mut Connection) -> Res<()>; fn has_data_to_send(&self) -> bool; @@ -559,14 +574,19 @@ trait SendStream: Stream { fn done(&self) -> bool; fn set_sendorder(&mut self, conn: &mut Connection, sendorder: Option) -> Res<()>; fn set_fairness(&mut self, conn: &mut Connection, fairness: bool) -> Res<()>; + /// # Errors + /// /// Error my occur during sending data, e.g. protocol error, etc. fn send_data(&mut self, _conn: &mut Connection, _buf: &[u8]) -> Res; /// # Errors + /// /// It may happen that the transport stream is already close. This is unlikely. fn close(&mut self, conn: &mut Connection) -> Res<()>; + /// # Errors + /// /// It may happen that the transport stream is already close. This is unlikely. fn close_with_message( &mut self, @@ -576,6 +596,7 @@ trait SendStream: Stream { ) -> Res<()> { Err(Error::InvalidStreamId) } + /// This function is called when sending side is closed abruptly by the peer or /// the application. fn handle_stop_sending(&mut self, close_type: CloseType); @@ -584,6 +605,7 @@ trait SendStream: Stream { } /// # Errors + /// /// It may happen that the transport stream is already close. This is unlikely. fn send_data_atomic(&mut self, _conn: &mut Connection, _buf: &[u8]) -> Res<()> { Err(Error::InvalidStreamId) @@ -599,7 +621,9 @@ trait HttpSendStream: SendStream { /// This function is used to supply headers to a http message. The /// function is used for request headers, response headers, 1xx response and /// trailers. + /// /// # Errors + /// /// This can also return an error if the underlying stream is closed. fn send_headers(&mut self, headers: &[Header], conn: &mut Connection) -> Res<()>; fn set_new_listener(&mut self, _conn_events: Box) {} diff --git a/neqo-http3/src/priority.rs b/neqo-http3/src/priority.rs index 6a391de578..f2651d3bb5 100644 --- a/neqo-http3/src/priority.rs +++ b/neqo-http3/src/priority.rs @@ -1,8 +1,9 @@ -use crate::{frames::HFrame, Error, Header, Res}; +use std::{convert::TryFrom, fmt}; + use neqo_transport::StreamId; use sfv::{BareItem, Item, ListEntry, Parser}; -use std::convert::TryFrom; -use std::fmt; + +use crate::{frames::HFrame, Error, Header, Res}; #[derive(Debug, Copy, Clone, PartialEq, Eq)] pub struct Priority { @@ -21,6 +22,7 @@ impl Default for Priority { impl Priority { /// # Panics + /// /// If an invalid urgency (>7 is given) #[must_use] pub fn new(urgency: u8, incremental: bool) -> Priority { @@ -44,9 +46,13 @@ impl Priority { } /// Constructs a priority from raw bytes (either a field value of frame content). + /// /// # Errors + /// /// When the contained syntax is invalid. + /// /// # Panics + /// /// Never, but the compiler is not smart enough to work that out. pub fn from_bytes(bytes: &[u8]) -> Res { let dict = Parser::parse_dictionary(bytes).map_err(|_| Error::HttpFrame)?; @@ -149,10 +155,10 @@ impl PriorityHandler { #[cfg(test)] mod test { - use crate::priority::PriorityHandler; - use crate::{HFrame, Priority}; use neqo_transport::StreamId; + use crate::{priority::PriorityHandler, HFrame, Priority}; + #[test] fn priority_updates_ignore_same() { let mut p = PriorityHandler::new(false, Priority::new(5, false)); @@ -183,7 +189,8 @@ mod test { let mut p = PriorityHandler::new(false, Priority::new(5, false)); assert!(p.maybe_update_priority(Priority::new(6, false))); assert!(p.maybe_update_priority(Priority::new(7, false))); - // updating two times with a different priority -> the last priority update should be in the next frame + // updating two times with a different priority -> the last priority update should be in the + // next frame let expected = HFrame::PriorityUpdateRequest { element_id: 4, priority: Priority::new(7, false), diff --git a/neqo-http3/src/push_controller.rs b/neqo-http3/src/push_controller.rs index 62171039e3..c4591991ae 100644 --- a/neqo-http3/src/push_controller.rs +++ b/neqo-http3/src/push_controller.rs @@ -3,28 +3,33 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use crate::client_events::{Http3ClientEvent, Http3ClientEvents}; -use crate::connection::Http3Connection; -use crate::frames::HFrame; -use crate::{CloseType, Error, Http3StreamInfo, HttpRecvStreamEvents, RecvStreamEvents, Res}; +use std::{ + cell::RefCell, + collections::VecDeque, + convert::TryFrom, + fmt::{Debug, Display}, + mem, + rc::Rc, + slice::SliceIndex, +}; + use neqo_common::{qerror, qinfo, qtrace, Header}; use neqo_transport::{Connection, StreamId}; -use std::cell::RefCell; -use std::collections::VecDeque; -use std::convert::TryFrom; -use std::fmt::Debug; -use std::fmt::Display; -use std::mem; -use std::rc::Rc; -use std::slice::SliceIndex; + +use crate::{ + client_events::{Http3ClientEvent, Http3ClientEvents}, + connection::Http3Connection, + frames::HFrame, + CloseType, Error, Http3StreamInfo, HttpRecvStreamEvents, RecvStreamEvents, Res, +}; /// `PushStates`: -/// `Init`: there is no push stream nor a push promise. This state is only used to keep track of opened and closed -/// push streams. +/// `Init`: there is no push stream nor a push promise. This state is only used to keep track of +/// opened and closed push streams. /// `PushPromise`: the push has only ever receive a pushpromise frame -/// `OnlyPushStream`: there is only a push stream. All push stream events, i.e. `PushHeaderReady` and -/// `PushDataReadable` will be delayed until a push promise is received (they are kept in -/// `events`). +/// `OnlyPushStream`: there is only a push stream. All push stream events, i.e. `PushHeaderReady` +/// and `PushDataReadable` will be delayed until a push promise is received +/// (they are kept in `events`). /// `Active`: there is a push steam and at least one push promise frame. /// `Close`: the push stream has been closed or reset already. #[derive(Debug, PartialEq, Clone)] @@ -122,21 +127,22 @@ impl ActivePushStreams { /// `PushController` keeps information about push stream states. /// -/// A `PushStream` calls `add_new_push_stream` that may change the push state from Init to `OnlyPushStream` or from -/// `PushPromise` to `Active`. If a stream has already been closed `add_new_push_stream` returns false (the `PushStream` -/// will close the transport stream). +/// A `PushStream` calls `add_new_push_stream` that may change the push state from Init to +/// `OnlyPushStream` or from `PushPromise` to `Active`. If a stream has already been closed +/// `add_new_push_stream` returns false (the `PushStream` will close the transport stream). /// A `PushStream` calls `push_stream_reset` if the transport stream has been canceled. /// When a push stream is done it calls `close`. /// /// The `PushController` handles: -/// `PUSH_PROMISE` frame: frames may change the push state from Init to `PushPromise` and from `OnlyPushStream` to -/// `Active`. Frames for a closed steams are ignored. -/// `CANCEL_PUSH` frame: (`handle_cancel_push` will be called). If a push is in state `PushPromise` or `Active`, any -/// posted events will be removed and a `PushCanceled` event will be posted. If a push is in -/// state `OnlyPushStream` or `Active` the transport stream and the `PushStream` will be closed. -/// The frame will be ignored for already closed pushes. -/// Application calling cancel: the actions are similar to the `CANCEL_PUSH` frame. The difference is that -/// `PushCanceled` will not be posted and a `CANCEL_PUSH` frame may be sent. +/// `PUSH_PROMISE` frame: frames may change the push state from Init to `PushPromise` and from +/// `OnlyPushStream` to `Active`. Frames for a closed steams are ignored. +/// `CANCEL_PUSH` frame: (`handle_cancel_push` will be called). If a push is in state `PushPromise` +/// or `Active`, any posted events will be removed and a `PushCanceled` event +/// will be posted. If a push is in state `OnlyPushStream` or `Active` the +/// transport stream and the `PushStream` will be closed. The frame will be +/// ignored for already closed pushes. Application calling cancel: the actions are similar to the +/// `CANCEL_PUSH` frame. The difference is that `PushCanceled` will not +/// be posted and a `CANCEL_PUSH` frame may be sent. #[derive(Debug)] pub(crate) struct PushController { max_concurent_push: u64, @@ -145,8 +151,8 @@ pub(crate) struct PushController { // We keep a stream until the stream has been closed. push_streams: ActivePushStreams, // The keeps the next consecutive push_id that should be open. - // All push_id < next_push_id_to_open are in the push_stream lists. If they are not in the list they have - // been already closed. + // All push_id < next_push_id_to_open are in the push_stream lists. If they are not in the list + // they have been already closed. conn_events: Http3ClientEvents, } @@ -169,7 +175,9 @@ impl Display for PushController { impl PushController { /// A new `push_promise` has been received. + /// /// # Errors + /// /// `HttpId` if `push_id` greater than it is allowed has been received. pub fn new_push_promise( &mut self, @@ -338,8 +346,9 @@ impl PushController { match self.push_streams.get(push_id) { None => { qtrace!("Push has already been closed."); - // If we have some events for the push_id in the event queue, the caller still does not - // not know that the push has been closed. Otherwise return InvalidStreamId. + // If we have some events for the push_id in the event queue, the caller still does + // not not know that the push has been closed. Otherwise return + // InvalidStreamId. if self.conn_events.has_push(push_id) { self.conn_events.remove_events_for_push_id(push_id); Ok(()) diff --git a/neqo-http3/src/qlog.rs b/neqo-http3/src/qlog.rs index 84c13dad43..c3a13fd19f 100644 --- a/neqo-http3/src/qlog.rs +++ b/neqo-http3/src/qlog.rs @@ -8,14 +8,13 @@ use std::convert::TryFrom; +use neqo_common::qlog::NeqoQlog; +use neqo_transport::StreamId; use qlog::{ self, events::{DataRecipient, EventData}, }; -use neqo_common::qlog::NeqoQlog; -use neqo_transport::StreamId; - pub fn h3_data_moved_up(qlog: &mut NeqoQlog, stream_id: StreamId, amount: usize) { qlog.add_event_data(|| { let ev_data = EventData::DataMoved(qlog::events::quic::DataMoved { diff --git a/neqo-http3/src/qpack_decoder_receiver.rs b/neqo-http3/src/qpack_decoder_receiver.rs index 3cdfdf74cd..46b9ca590b 100644 --- a/neqo-http3/src/qpack_decoder_receiver.rs +++ b/neqo-http3/src/qpack_decoder_receiver.rs @@ -4,11 +4,12 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use crate::{CloseType, Error, Http3StreamType, ReceiveOutput, RecvStream, Res, Stream}; +use std::{cell::RefCell, rc::Rc}; + use neqo_qpack::QPackDecoder; use neqo_transport::{Connection, StreamId}; -use std::cell::RefCell; -use std::rc::Rc; + +use crate::{CloseType, Error, Http3StreamType, ReceiveOutput, RecvStream, Res, Stream}; #[derive(Debug)] pub(crate) struct DecoderRecvStream { diff --git a/neqo-http3/src/qpack_encoder_receiver.rs b/neqo-http3/src/qpack_encoder_receiver.rs index efe234173f..76c779bcf2 100644 --- a/neqo-http3/src/qpack_encoder_receiver.rs +++ b/neqo-http3/src/qpack_encoder_receiver.rs @@ -4,11 +4,12 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use crate::{CloseType, Error, Http3StreamType, ReceiveOutput, RecvStream, Res, Stream}; +use std::{cell::RefCell, rc::Rc}; + use neqo_qpack::QPackEncoder; use neqo_transport::{Connection, StreamId}; -use std::cell::RefCell; -use std::rc::Rc; + +use crate::{CloseType, Error, Http3StreamType, ReceiveOutput, RecvStream, Res, Stream}; #[derive(Debug)] pub(crate) struct EncoderRecvStream { diff --git a/neqo-http3/src/recv_message.rs b/neqo-http3/src/recv_message.rs index dd27c51337..36e8f65b19 100644 --- a/neqo-http3/src/recv_message.rs +++ b/neqo-http3/src/recv_message.rs @@ -4,24 +4,22 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use crate::frames::{FrameReader, HFrame, StreamReaderConnectionWrapper, H3_FRAME_TYPE_HEADERS}; -use crate::push_controller::PushController; +use std::{ + any::Any, cell::RefCell, cmp::min, collections::VecDeque, convert::TryFrom, fmt::Debug, rc::Rc, +}; + +use neqo_common::{qdebug, qinfo, qtrace, Header}; +use neqo_qpack::decoder::QPackDecoder; +use neqo_transport::{Connection, StreamId}; + use crate::{ + frames::{FrameReader, HFrame, StreamReaderConnectionWrapper, H3_FRAME_TYPE_HEADERS}, headers_checks::{headers_valid, is_interim}, priority::PriorityHandler, + push_controller::PushController, qlog, CloseType, Error, Http3StreamInfo, Http3StreamType, HttpRecvStream, HttpRecvStreamEvents, MessageType, Priority, ReceiveOutput, RecvStream, Res, Stream, }; -use neqo_common::{qdebug, qinfo, qtrace, Header}; -use neqo_qpack::decoder::QPackDecoder; -use neqo_transport::{Connection, StreamId}; -use std::any::Any; -use std::cell::RefCell; -use std::cmp::min; -use std::collections::VecDeque; -use std::convert::TryFrom; -use std::fmt::Debug; -use std::rc::Rc; #[allow(clippy::module_name_repetitions)] pub(crate) struct RecvMessageInfo { @@ -348,7 +346,8 @@ impl RecvMessage { panic!("Stream readable after being closed!"); } RecvMessageState::ExtendedConnect => { - // Ignore read event, this request is waiting to be picked up by a new WebTransportSession + // Ignore read event, this request is waiting to be picked up by a new + // WebTransportSession break Ok(()); } }; diff --git a/neqo-http3/src/request_target.rs b/neqo-http3/src/request_target.rs index a58445b5d7..28bc22ac2d 100644 --- a/neqo-http3/src/request_target.rs +++ b/neqo-http3/src/request_target.rs @@ -7,6 +7,7 @@ #![allow(clippy::module_name_repetitions)] use std::fmt::{Debug, Formatter}; + use url::{ParseError, Url}; pub trait RequestTarget: Debug { @@ -58,7 +59,9 @@ pub trait AsRequestTarget<'x> { type Target: RequestTarget; type Error; /// Produce a `RequestTarget` that refers to `self`. + /// /// # Errors + /// /// This method can generate an error of type `Self::Error` /// if the conversion is unsuccessful. fn as_request_target(&'x self) -> Result; diff --git a/neqo-http3/src/send_message.rs b/neqo-http3/src/send_message.rs index 531f804937..96156938a0 100644 --- a/neqo-http3/src/send_message.rs +++ b/neqo-http3/src/send_message.rs @@ -4,22 +4,19 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use crate::frames::HFrame; +use std::{any::Any, cell::RefCell, cmp::min, fmt::Debug, rc::Rc}; + +use neqo_common::{qdebug, qinfo, qtrace, Encoder, Header, MessageType}; +use neqo_qpack::encoder::QPackEncoder; +use neqo_transport::{streams::SendOrder, Connection, StreamId}; + use crate::{ + frames::HFrame, headers_checks::{headers_valid, is_interim, trailers_valid}, qlog, BufferedStream, CloseType, Error, Http3StreamInfo, Http3StreamType, HttpSendStream, Res, SendStream, SendStreamEvents, Stream, }; -use neqo_common::{qdebug, qinfo, qtrace, Encoder, Header, MessageType}; -use neqo_qpack::encoder::QPackEncoder; -use neqo_transport::{streams::SendOrder, Connection, StreamId}; -use std::any::Any; -use std::cell::RefCell; -use std::cmp::min; -use std::fmt::Debug; -use std::rc::Rc; - const MAX_DATA_HEADER_SIZE_2: usize = (1 << 6) - 1; // Maximal amount of data with DATA frame header size 2 const MAX_DATA_HEADER_SIZE_2_LIMIT: usize = MAX_DATA_HEADER_SIZE_2 + 3; // 63 + 3 (size of the next buffer data frame header) const MAX_DATA_HEADER_SIZE_3: usize = (1 << 14) - 1; // Maximal amount of data with DATA frame header size 3 @@ -134,6 +131,7 @@ impl SendMessage { } /// # Errors + /// /// `ClosedCriticalStream` if the encoder stream is closed. /// `InternalError` if an unexpected error occurred. fn encode( @@ -236,11 +234,13 @@ impl SendStream for SendMessage { } /// # Errors + /// /// `InternalError` if an unexpected error occurred. /// `InvalidStreamId` if the stream does not exist, /// `AlreadyClosed` if the stream has already been closed. - /// `TransportStreamDoesNotExist` if the transport stream does not exist (this may happen if `process_output` - /// has not been called when needed, and HTTP3 layer has not picked up the info that the stream has been closed.) + /// `TransportStreamDoesNotExist` if the transport stream does not exist (this may happen if + /// `process_output` has not been called when needed, and HTTP3 layer has not picked up the + /// info that the stream has been closed.) fn send(&mut self, conn: &mut Connection) -> Res<()> { let sent = Error::map_error(self.stream.send_buffer(conn), Error::HttpInternal(5))?; qlog::h3_data_moved_down(conn.qlog_mut(), self.stream_id(), sent); diff --git a/neqo-http3/src/server.rs b/neqo-http3/src/server.rs index c432039972..b29f715451 100644 --- a/neqo-http3/src/server.rs +++ b/neqo-http3/src/server.rs @@ -6,6 +6,21 @@ #![allow(clippy::module_name_repetitions)] +use std::{ + cell::{RefCell, RefMut}, + collections::HashMap, + path::PathBuf, + rc::Rc, + time::Instant, +}; + +use neqo_common::{qtrace, Datagram}; +use neqo_crypto::{AntiReplay, Cipher, PrivateKey, PublicKey, ZeroRttChecker}; +use neqo_transport::{ + server::{ActiveConnectionRef, Server, ValidateAddress}, + ConnectionIdGenerator, Output, +}; + use crate::{ connection::Http3State, connection_server::Http3ServerHandler, @@ -16,19 +31,6 @@ use crate::{ settings::HttpZeroRttChecker, Http3Parameters, Http3StreamInfo, Res, }; -use neqo_common::{qtrace, Datagram}; -use neqo_crypto::{AntiReplay, Cipher, PrivateKey, PublicKey, ZeroRttChecker}; -use neqo_transport::{ - server::{ActiveConnectionRef, Server, ValidateAddress}, - ConnectionIdGenerator, Output, -}; -use std::{ - cell::{RefCell, RefMut}, - collections::HashMap, - path::PathBuf, - rc::Rc, - time::Instant, -}; type HandlerRef = Rc>; @@ -49,6 +51,7 @@ impl ::std::fmt::Display for Http3Server { impl Http3Server { /// # Errors + /// /// Making a `neqo_transport::Server` may produce an error. This can only be a crypto error if /// the socket can't be created or configured. pub fn new( @@ -92,6 +95,7 @@ impl Http3Server { /// Enable encrypted client hello (ECH). /// /// # Errors + /// /// Only when NSS can't serialize a configuration. pub fn enable_ech( &mut self, @@ -309,24 +313,26 @@ fn prepare_data( #[cfg(test)] mod tests { - use super::{Http3Server, Http3ServerEvent, Http3State, Rc, RefCell}; - use crate::{Error, HFrame, Header, Http3Parameters, Priority}; + use std::{ + collections::HashMap, + mem, + ops::{Deref, DerefMut}, + }; + use neqo_common::{event::Provider, Encoder}; use neqo_crypto::{AuthenticationStatus, ZeroRttCheckResult, ZeroRttChecker}; use neqo_qpack::{encoder::QPackEncoder, QpackSettings}; use neqo_transport::{ Connection, ConnectionError, ConnectionEvent, State, StreamId, StreamType, ZeroRttState, }; - use std::{ - collections::HashMap, - mem, - ops::{Deref, DerefMut}, - }; use test_fixture::{ anti_replay, default_client, fixture_init, now, CountingConnectionIdGenerator, DEFAULT_ALPN, DEFAULT_KEYS, }; + use super::{Http3Server, Http3ServerEvent, Http3State, Rc, RefCell}; + use crate::{Error, HFrame, Header, Http3Parameters, Priority}; + const DEFAULT_SETTINGS: QpackSettings = QpackSettings { max_table_size_encoder: 100, max_table_size_decoder: 100, diff --git a/neqo-http3/src/server_connection_events.rs b/neqo-http3/src/server_connection_events.rs index f56288e204..cbc8e6d56e 100644 --- a/neqo-http3/src/server_connection_events.rs +++ b/neqo-http3/src/server_connection_events.rs @@ -4,17 +4,16 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use crate::connection::Http3State; +use std::{cell::RefCell, collections::VecDeque, rc::Rc}; + +use neqo_common::Header; +use neqo_transport::{AppError, StreamId}; + use crate::{ + connection::Http3State, features::extended_connect::{ExtendedConnectEvents, ExtendedConnectType, SessionCloseReason}, CloseType, Http3StreamInfo, HttpRecvStreamEvents, Priority, RecvStreamEvents, SendStreamEvents, }; -use neqo_common::Header; -use neqo_transport::AppError; -use neqo_transport::StreamId; -use std::cell::RefCell; -use std::collections::VecDeque; -use std::rc::Rc; #[derive(Debug, PartialEq, Eq, Clone)] pub(crate) enum Http3ServerConnEvent { diff --git a/neqo-http3/src/server_events.rs b/neqo-http3/src/server_events.rs index e0cc84ed4c..4be48363df 100644 --- a/neqo-http3/src/server_events.rs +++ b/neqo-http3/src/server_events.rs @@ -6,20 +6,25 @@ #![allow(clippy::module_name_repetitions)] -use crate::connection::{Http3State, WebTransportSessionAcceptAction}; -use crate::connection_server::Http3ServerHandler; -use crate::{ - features::extended_connect::SessionCloseReason, Http3StreamInfo, Http3StreamType, Priority, Res, +use std::{ + cell::RefCell, + collections::VecDeque, + convert::TryFrom, + ops::{Deref, DerefMut}, + rc::Rc, }; + use neqo_common::{qdebug, qinfo, Encoder, Header}; -use neqo_transport::server::ActiveConnectionRef; -use neqo_transport::{AppError, Connection, DatagramTracking, StreamId, StreamType}; +use neqo_transport::{ + server::ActiveConnectionRef, AppError, Connection, DatagramTracking, StreamId, StreamType, +}; -use std::cell::RefCell; -use std::collections::VecDeque; -use std::convert::TryFrom; -use std::ops::{Deref, DerefMut}; -use std::rc::Rc; +use crate::{ + connection::{Http3State, WebTransportSessionAcceptAction}, + connection_server::Http3ServerHandler, + features::extended_connect::SessionCloseReason, + Http3StreamInfo, Http3StreamType, Priority, Res, +}; #[derive(Debug, Clone)] pub struct StreamHandler { @@ -57,7 +62,9 @@ impl StreamHandler { } /// Supply a response header to a request. + /// /// # Errors + /// /// It may return `InvalidStreamId` if a stream does not exist anymore. pub fn send_headers(&mut self, headers: &[Header]) -> Res<()> { self.handler.borrow_mut().send_headers( @@ -68,7 +75,9 @@ impl StreamHandler { } /// Supply response data to a request. + /// /// # Errors + /// /// It may return `InvalidStreamId` if a stream does not exist anymore. pub fn send_data(&mut self, buf: &[u8]) -> Res { self.handler @@ -77,7 +86,9 @@ impl StreamHandler { } /// Close sending side. + /// /// # Errors + /// /// It may return `InvalidStreamId` if a stream does not exist anymore. pub fn stream_close_send(&mut self) -> Res<()> { self.handler @@ -86,7 +97,9 @@ impl StreamHandler { } /// Request a peer to stop sending a stream. + /// /// # Errors + /// /// It may return `InvalidStreamId` if a stream does not exist anymore. pub fn stream_stop_sending(&mut self, app_error: AppError) -> Res<()> { qdebug!( @@ -103,7 +116,9 @@ impl StreamHandler { } /// Reset sending side of a stream. + /// /// # Errors + /// /// It may return `InvalidStreamId` if a stream does not exist anymore. pub fn stream_reset_send(&mut self, app_error: AppError) -> Res<()> { qdebug!( @@ -120,7 +135,9 @@ impl StreamHandler { } /// Reset a stream/request. + /// /// # Errors + /// /// It may return `InvalidStreamId` if a stream does not exist anymore pub fn cancel_fetch(&mut self, app_error: AppError) -> Res<()> { qdebug!([self], "reset error:{}.", app_error); @@ -159,14 +176,18 @@ impl Http3OrWebTransportStream { } /// Supply a response header to a request. + /// /// # Errors + /// /// It may return `InvalidStreamId` if a stream does not exist anymore. pub fn send_headers(&mut self, headers: &[Header]) -> Res<()> { self.stream_handler.send_headers(headers) } /// Supply response data to a request. + /// /// # Errors + /// /// It may return `InvalidStreamId` if a stream does not exist anymore. pub fn send_data(&mut self, data: &[u8]) -> Res { qinfo!([self], "Set new response."); @@ -174,7 +195,9 @@ impl Http3OrWebTransportStream { } /// Close sending side. + /// /// # Errors + /// /// It may return `InvalidStreamId` if a stream does not exist anymore. pub fn stream_close_send(&mut self) -> Res<()> { qinfo!([self], "Set new response."); @@ -243,7 +266,9 @@ impl WebTransportRequest { } /// Respond to a `WebTransport` session request. + /// /// # Errors + /// /// It may return `InvalidStreamId` if a stream does not exist anymore. pub fn response(&mut self, accept: &WebTransportSessionAcceptAction) -> Res<()> { qinfo!([self], "Set a response for a WebTransport session."); @@ -258,6 +283,7 @@ impl WebTransportRequest { } /// # Errors + /// /// It may return `InvalidStreamId` if a stream does not exist anymore. /// Also return an error if the stream was closed on the transport layer, /// but that information is not yet consumed on the http/3 layer. @@ -279,7 +305,9 @@ impl WebTransportRequest { } /// Close sending side. + /// /// # Errors + /// /// It may return `InvalidStreamId` if a stream does not exist anymore. pub fn create_stream(&mut self, stream_type: StreamType) -> Res { let session_id = self.stream_handler.stream_id(); @@ -301,7 +329,9 @@ impl WebTransportRequest { } /// Send `WebTransport` datagram. + /// /// # Errors + /// /// It may return `InvalidStreamId` if a stream does not exist anymore. /// The function returns `TooMuchData` if the supply buffer is bigger than /// the allowed remote datagram size. @@ -326,9 +356,13 @@ impl WebTransportRequest { /// Returns the current max size of a datagram that can fit into a packet. /// The value will change over time depending on the encoded size of the /// packet number, ack frames, etc. + /// /// # Errors + /// /// The function returns `NotAvailable` if datagrams are not enabled. + /// /// # Panics + /// /// This cannot panic. The max varint length is 8. pub fn max_datagram_size(&self) -> Res { let max_size = self.stream_handler.conn.borrow().max_datagram_size()?; diff --git a/neqo-http3/src/settings.rs b/neqo-http3/src/settings.rs index 1e952dae6d..9cd4b994b7 100644 --- a/neqo-http3/src/settings.rs +++ b/neqo-http3/src/settings.rs @@ -6,10 +6,12 @@ #![allow(clippy::module_name_repetitions)] -use crate::{Error, Http3Parameters, Res}; +use std::ops::Deref; + use neqo_common::{Decoder, Encoder}; use neqo_crypto::{ZeroRttCheckResult, ZeroRttChecker}; -use std::ops::Deref; + +use crate::{Error, Http3Parameters, Res}; type SettingsType = u64; @@ -120,6 +122,7 @@ impl HSettings { } /// # Errors + /// /// Returns an error if settings types are reserved of settings value are not permitted. pub fn decode_frame_contents(&mut self, dec: &mut Decoder) -> Res<()> { while dec.remaining() > 0 { diff --git a/neqo-http3/src/stream_type_reader.rs b/neqo-http3/src/stream_type_reader.rs index 775d8dc233..f36181d3b1 100644 --- a/neqo-http3/src/stream_type_reader.rs +++ b/neqo-http3/src/stream_type_reader.rs @@ -6,14 +6,15 @@ #![allow(clippy::module_name_repetitions)] -use crate::control_stream_local::HTTP3_UNI_STREAM_TYPE_CONTROL; -use crate::frames::H3_FRAME_TYPE_HEADERS; -use crate::{CloseType, Error, Http3StreamType, ReceiveOutput, RecvStream, Res, Stream}; use neqo_common::{qtrace, Decoder, IncrementalDecoderUint, Role}; -use neqo_qpack::decoder::QPACK_UNI_STREAM_TYPE_DECODER; -use neqo_qpack::encoder::QPACK_UNI_STREAM_TYPE_ENCODER; +use neqo_qpack::{decoder::QPACK_UNI_STREAM_TYPE_DECODER, encoder::QPACK_UNI_STREAM_TYPE_ENCODER}; use neqo_transport::{Connection, StreamId, StreamType}; +use crate::{ + control_stream_local::HTTP3_UNI_STREAM_TYPE_CONTROL, frames::H3_FRAME_TYPE_HEADERS, CloseType, + Error, Http3StreamType, ReceiveOutput, RecvStream, Res, Stream, +}; + pub(crate) const HTTP3_UNI_STREAM_TYPE_PUSH: u64 = 0x1; pub(crate) const WEBTRANSPORT_UNI_STREAM: u64 = 0x54; pub(crate) const WEBTRANSPORT_STREAM: u64 = 0x41; @@ -33,7 +34,9 @@ impl NewStreamType { /// Get the final `NewStreamType` from a stream type. All streams, except Push stream, /// are identified by the type only. This function will return None for the Push stream /// because it needs the ID besides the type. - /// # Error + /// + /// # Errors + /// /// Push streams received by the server are not allowed and this function will return /// `HttpStreamCreation` error. fn final_stream_type( @@ -67,12 +70,11 @@ impl NewStreamType { /// `NewStreamHeadReader` reads the head of an unidirectional stream to identify the stream. /// There are 2 type of streams: -/// - streams identified by the single type (varint encoded). Most streams belong to -/// this category. The `NewStreamHeadReader` will switch from `ReadType`to `Done` state. -/// - streams identified by the type and the ID (both varint encoded). For example, a -/// push stream is identified by the type and `PushId`. After reading the type in -/// the `ReadType` state, `NewStreamHeadReader` changes to `ReadId` state and from there -/// to `Done` state +/// - streams identified by the single type (varint encoded). Most streams belong to this category. +/// The `NewStreamHeadReader` will switch from `ReadType`to `Done` state. +/// - streams identified by the type and the ID (both varint encoded). For example, a push stream +/// is identified by the type and `PushId`. After reading the type in the `ReadType` state, +/// `NewStreamHeadReader` changes to `ReadId` state and from there to `Done` state #[derive(Debug)] pub(crate) enum NewStreamHeadReader { ReadType { @@ -140,12 +142,12 @@ impl NewStreamHeadReader { role, stream_id, .. } => { // final_stream_type may return: - // - an error if a stream type is not allowed for the role, e.g. Push - // stream received at the server. + // - an error if a stream type is not allowed for the role, e.g. Push stream + // received at the server. // - a final type if a stream is only identify by the type // - None - if a stream is not identified by the type only, but it needs - // additional data from the header to produce the final type, e.g. - // a push stream needs pushId as well. + // additional data from the header to produce the final type, e.g. a push + // stream needs pushId as well. let final_type = NewStreamType::final_stream_type(output, stream_id.stream_type(), *role); match (&final_type, fin) { @@ -234,20 +236,23 @@ impl RecvStream for NewStreamHeadReader { #[cfg(test)] mod tests { - use super::{ - NewStreamHeadReader, HTTP3_UNI_STREAM_TYPE_PUSH, WEBTRANSPORT_STREAM, - WEBTRANSPORT_UNI_STREAM, + use std::mem; + + use neqo_common::{Encoder, Role}; + use neqo_qpack::{ + decoder::QPACK_UNI_STREAM_TYPE_DECODER, encoder::QPACK_UNI_STREAM_TYPE_ENCODER, }; use neqo_transport::{Connection, StreamId, StreamType}; - use std::mem; use test_fixture::{connect, now}; - use crate::control_stream_local::HTTP3_UNI_STREAM_TYPE_CONTROL; - use crate::frames::H3_FRAME_TYPE_HEADERS; - use crate::{CloseType, Error, NewStreamType, ReceiveOutput, RecvStream, Res}; - use neqo_common::{Encoder, Role}; - use neqo_qpack::decoder::QPACK_UNI_STREAM_TYPE_DECODER; - use neqo_qpack::encoder::QPACK_UNI_STREAM_TYPE_ENCODER; + use super::{ + NewStreamHeadReader, HTTP3_UNI_STREAM_TYPE_PUSH, WEBTRANSPORT_STREAM, + WEBTRANSPORT_UNI_STREAM, + }; + use crate::{ + control_stream_local::HTTP3_UNI_STREAM_TYPE_CONTROL, frames::H3_FRAME_TYPE_HEADERS, + CloseType, Error, NewStreamType, ReceiveOutput, RecvStream, Res, + }; struct Test { conn_c: Connection, @@ -397,7 +402,8 @@ mod tests { let mut t = Test::new(StreamType::UniDi, Role::Server); t.decode( - &[H3_FRAME_TYPE_HEADERS], // this is the same as a HTTP3_UNI_STREAM_TYPE_PUSH which is not aallowed on the server side. + &[H3_FRAME_TYPE_HEADERS], /* this is the same as a HTTP3_UNI_STREAM_TYPE_PUSH which + * is not aallowed on the server side. */ false, &Err(Error::HttpStreamCreation), true, @@ -413,7 +419,8 @@ mod tests { let mut t = Test::new(StreamType::UniDi, Role::Client); t.decode( - &[H3_FRAME_TYPE_HEADERS, 0xaaaa_aaaa], // this is the same as a HTTP3_UNI_STREAM_TYPE_PUSH + &[H3_FRAME_TYPE_HEADERS, 0xaaaa_aaaa], /* this is the same as a + * HTTP3_UNI_STREAM_TYPE_PUSH */ false, &Ok(( ReceiveOutput::NewStream(NewStreamType::Push(0xaaaa_aaaa)), diff --git a/neqo-http3/tests/httpconn.rs b/neqo-http3/tests/httpconn.rs index fc49851e5b..a0b2bcdb80 100644 --- a/neqo-http3/tests/httpconn.rs +++ b/neqo-http3/tests/httpconn.rs @@ -6,6 +6,11 @@ #![allow(unused_assignments)] +use std::{ + mem, + time::{Duration, Instant}, +}; + use neqo_common::{event::Provider, qtrace, Datagram}; use neqo_crypto::{AuthenticationStatus, ResumptionToken}; use neqo_http3::{ @@ -13,8 +18,6 @@ use neqo_http3::{ Http3ServerEvent, Http3State, Priority, }; use neqo_transport::{ConnectionError, ConnectionParameters, Error, Output, StreamType}; -use std::mem; -use std::time::{Duration, Instant}; use test_fixture::*; const RESPONSE_DATA: &[u8] = &[0x61, 0x62, 0x63]; @@ -96,7 +99,7 @@ fn connect_peers(hconn_c: &mut Http3Client, hconn_s: &mut Http3Server) -> Option let out = hconn_c.process(None, now()); // Initial let out = hconn_s.process(out.as_dgram_ref(), now()); // Initial + Handshake let out = hconn_c.process(out.as_dgram_ref(), now()); // ACK - mem::drop(hconn_s.process(out.as_dgram_ref(), now())); //consume ACK + mem::drop(hconn_s.process(out.as_dgram_ref(), now())); // consume ACK let authentication_needed = |e| matches!(e, Http3ClientEvent::AuthenticationNeeded); assert!(hconn_c.events().any(authentication_needed)); hconn_c.authenticated(AuthenticationStatus::Ok, now()); @@ -126,7 +129,7 @@ fn connect_peers_with_network_propagation_delay( now += net_delay; let out = hconn_c.process(out.as_dgram_ref(), now); // ACK now += net_delay; - let out = hconn_s.process(out.as_dgram_ref(), now); //consume ACK + let out = hconn_s.process(out.as_dgram_ref(), now); // consume ACK assert!(out.dgram().is_none()); let authentication_needed = |e| matches!(e, Http3ClientEvent::AuthenticationNeeded); assert!(hconn_c.events().any(authentication_needed)); diff --git a/neqo-http3/tests/priority.rs b/neqo-http3/tests/priority.rs index 4ecd2e7a40..cdec161058 100644 --- a/neqo-http3/tests/priority.rs +++ b/neqo-http3/tests/priority.rs @@ -4,14 +4,13 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use neqo_common::event::Provider; +use std::time::Instant; +use neqo_common::event::Provider; use neqo_crypto::AuthenticationStatus; use neqo_http3::{ Header, Http3Client, Http3ClientEvent, Http3Server, Http3ServerEvent, Http3State, Priority, }; - -use std::time::Instant; use test_fixture::*; fn exchange_packets(client: &mut Http3Client, server: &mut Http3Server) { diff --git a/neqo-http3/tests/webtransport.rs b/neqo-http3/tests/webtransport.rs index fb82350dd3..4e943d86cb 100644 --- a/neqo-http3/tests/webtransport.rs +++ b/neqo-http3/tests/webtransport.rs @@ -4,6 +4,8 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +use std::{cell::RefCell, rc::Rc}; + use neqo_common::{event::Provider, Header}; use neqo_crypto::AuthenticationStatus; use neqo_http3::{ @@ -12,8 +14,6 @@ use neqo_http3::{ WebTransportSessionAcceptAction, }; use neqo_transport::{StreamId, StreamType}; -use std::cell::RefCell; -use std::rc::Rc; use test_fixture::{ addr, anti_replay, fixture_init, now, CountingConnectionIdGenerator, DEFAULT_ALPN_H3, DEFAULT_KEYS, DEFAULT_SERVER_NAME, diff --git a/neqo-interop/src/main.rs b/neqo-interop/src/main.rs index 254b953f22..b1dae43c9c 100644 --- a/neqo-interop/src/main.rs +++ b/neqo-interop/src/main.rs @@ -7,14 +7,6 @@ #![cfg_attr(feature = "deny-warnings", deny(warnings))] #![warn(clippy::use_self)] -use neqo_common::{event::Provider, hex, Datagram, IpTos}; -use neqo_crypto::{init, AuthenticationStatus, ResumptionToken}; -use neqo_http3::{Header, Http3Client, Http3ClientEvent, Http3Parameters, Http3State, Priority}; -use neqo_transport::{ - Connection, ConnectionError, ConnectionEvent, ConnectionParameters, EmptyConnectionIdGenerator, - Error, Output, State, StreamId, StreamType, -}; - use std::{ cell::RefCell, cmp::min, @@ -31,6 +23,14 @@ use std::{ thread, time::{Duration, Instant}, }; + +use neqo_common::{event::Provider, hex, Datagram, IpTos}; +use neqo_crypto::{init, AuthenticationStatus, ResumptionToken}; +use neqo_http3::{Header, Http3Client, Http3ClientEvent, Http3Parameters, Http3State, Priority}; +use neqo_transport::{ + Connection, ConnectionError, ConnectionEvent, ConnectionParameters, EmptyConnectionIdGenerator, + Error, Output, State, StreamId, StreamType, +}; use structopt::StructOpt; #[derive(Debug, StructOpt, Clone)] @@ -560,7 +560,8 @@ fn test_h3(nctx: &NetworkCtx, peer: &Peer, client: Connection, test: &Test) -> R } if *test == Test::D { - // Send another request, when the first one was send we probably did not have the peer's qpack parameter. + // Send another request, when the first one was send we probably did not have the peer's + // qpack parameter. let client_stream_id = hc .h3 .fetch( diff --git a/neqo-qpack/src/decoder.rs b/neqo-qpack/src/decoder.rs index 5b3b93dcee..2119db0256 100644 --- a/neqo-qpack/src/decoder.rs +++ b/neqo-qpack/src/decoder.rs @@ -4,6 +4,11 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +use std::convert::TryFrom; + +use neqo_common::{qdebug, Header}; +use neqo_transport::{Connection, StreamId}; + use crate::{ decoder_instructions::DecoderInstruction, encoder_instructions::{DecodedEncoderInstruction, EncoderInstructionReader}, @@ -14,9 +19,6 @@ use crate::{ table::HeaderTable, Error, QpackSettings, Res, }; -use neqo_common::{qdebug, Header}; -use neqo_transport::{Connection, StreamId}; -use std::convert::TryFrom; pub const QPACK_UNI_STREAM_TYPE_DECODER: u64 = 0x3; @@ -30,12 +32,13 @@ pub struct QPackDecoder { local_stream_id: Option, max_table_size: u64, max_blocked_streams: usize, - blocked_streams: Vec<(StreamId, u64)>, //stream_id and requested inserts count. + blocked_streams: Vec<(StreamId, u64)>, // stream_id and requested inserts count. stats: Stats, } impl QPackDecoder { /// # Panics + /// /// If settings include invalid values. #[must_use] pub fn new(qpack_settings: &QpackSettings) -> Self { @@ -67,6 +70,7 @@ impl QPackDecoder { } /// # Panics + /// /// If the number of blocked streams is too large. #[must_use] pub fn get_blocked_streams(&self) -> u16 { @@ -74,7 +78,9 @@ impl QPackDecoder { } /// returns a list of unblocked streams + /// /// # Errors + /// /// May return: `ClosedCriticalStream` if stream has been closed or `EncoderStream` /// in case of any other transport error. pub fn receive(&mut self, conn: &mut Connection, stream_id: StreamId) -> Res> { @@ -164,8 +170,11 @@ impl QPackDecoder { } /// # Errors + /// /// May return an error in case of any transport error. TODO: define transport errors. + /// /// # Panics + /// /// Never, but rust doesn't know that. #[allow(clippy::map_err_ignore)] pub fn send(&mut self, conn: &mut Connection) -> Res<()> { @@ -186,6 +195,7 @@ impl QPackDecoder { } /// # Errors + /// /// May return `DecompressionFailed` if header block is incorrect or incomplete. pub fn refers_dynamic_table(&self, buf: &[u8]) -> Res { HeaderDecoder::new(buf).refers_dynamic_table(self.max_entries, self.table.base()) @@ -193,9 +203,13 @@ impl QPackDecoder { /// This function returns None if the stream is blocked waiting for table insertions. /// 'buf' must contain the complete header block. + /// /// # Errors + /// /// May return `DecompressionFailed` if header block is incorrect or incomplete. + /// /// # Panics + /// /// When there is a programming error. pub fn decode_header_block( &mut self, @@ -236,6 +250,7 @@ impl QPackDecoder { } /// # Panics + /// /// When a stream has already been added. pub fn add_send_stream(&mut self, stream_id: StreamId) { assert!( @@ -272,13 +287,15 @@ fn map_error(err: &Error) -> Error { #[cfg(test)] mod tests { - use super::{Connection, Error, QPackDecoder, Res}; - use crate::QpackSettings; + use std::{convert::TryFrom, mem}; + use neqo_common::Header; use neqo_transport::{StreamId, StreamType}; - use std::{convert::TryFrom, mem}; use test_fixture::now; + use super::{Connection, Error, QPackDecoder, Res}; + use crate::QpackSettings; + const STREAM_0: StreamId = StreamId::new(0); struct TestDecoder { @@ -434,7 +451,8 @@ mod tests { ); } - // this test tests header decoding, the header acks command and the insert count increment command. + // this test tests header decoding, the header acks command and the insert count increment + // command. #[test] fn test_duplicate() { let mut decoder = connect(); @@ -467,8 +485,8 @@ mod tests { fn test_encode_incr_encode_header_ack_some() { // 1. Decoder receives an instruction (header and value both as literal) // 2. Decoder process the instruction and sends an increment instruction. - // 3. Decoder receives another two instruction (header and value both as literal) and - // a header block. + // 3. Decoder receives another two instruction (header and value both as literal) and a + // header block. // 4. Now it sends only a header ack and an increment instruction with increment==1. let headers = vec![ Header::new("my-headera", "my-valuea"), @@ -504,8 +522,8 @@ mod tests { fn test_encode_incr_encode_header_ack_all() { // 1. Decoder receives an instruction (header and value both as literal) // 2. Decoder process the instruction and sends an increment instruction. - // 3. Decoder receives another instruction (header and value both as literal) and - // a header block. + // 3. Decoder receives another instruction (header and value both as literal) and a header + // block. // 4. Now it sends only a header ack. let headers = vec![ Header::new("my-headera", "my-valuea"), @@ -604,7 +622,8 @@ mod tests { ], encoder_inst: &[], }, - // test adding a new header and encode_post_base_index, also test fix_header_block_prefix + // test adding a new header and encode_post_base_index, also test + // fix_header_block_prefix TestElement { headers: vec![Header::new("my-header", "my-value")], header_block: &[0x02, 0x80, 0x10], @@ -683,7 +702,8 @@ mod tests { ], encoder_inst: &[], }, - // test adding a new header and encode_post_base_index, also test fix_header_block_prefix + // test adding a new header and encode_post_base_index, also test + // fix_header_block_prefix TestElement { headers: vec![Header::new("my-header", "my-value")], header_block: &[0x02, 0x80, 0x10], diff --git a/neqo-qpack/src/decoder_instructions.rs b/neqo-qpack/src/decoder_instructions.rs index eb8a331f3a..029cd61db6 100644 --- a/neqo-qpack/src/decoder_instructions.rs +++ b/neqo-qpack/src/decoder_instructions.rs @@ -4,15 +4,17 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use crate::prefix::{ - DECODER_HEADER_ACK, DECODER_INSERT_COUNT_INCREMENT, DECODER_STREAM_CANCELLATION, -}; -use crate::qpack_send_buf::QpackData; -use crate::reader::{IntReader, ReadByte}; -use crate::Res; +use std::mem; + use neqo_common::{qdebug, qtrace}; use neqo_transport::StreamId; -use std::mem; + +use crate::{ + prefix::{DECODER_HEADER_ACK, DECODER_INSERT_COUNT_INCREMENT, DECODER_STREAM_CANCELLATION}, + qpack_send_buf::QpackData, + reader::{IntReader, ReadByte}, + Res, +}; #[derive(Debug, Copy, Clone, PartialEq, Eq)] pub enum DecoderInstruction { @@ -81,10 +83,11 @@ impl DecoderInstructionReader { } } - /// ### Errors - /// 1) `NeedMoreData` if the reader needs more data - /// 2) `ClosedCriticalStream` - /// 3) other errors will be translated to `DecoderStream` by the caller of this function. + /// # Errors + /// + /// 1) `NeedMoreData` if the reader needs more data + /// 2) `ClosedCriticalStream` + /// 3) other errors will be translated to `DecoderStream` by the caller of this function. pub fn read_instructions(&mut self, recv: &mut R) -> Res { qdebug!([self], "read a new instraction"); loop { @@ -137,11 +140,11 @@ impl DecoderInstructionReader { #[cfg(test)] mod test { - use super::{DecoderInstruction, DecoderInstructionReader, QpackData}; - use crate::reader::test_receiver::TestReceiver; - use crate::Error; use neqo_transport::StreamId; + use super::{DecoderInstruction, DecoderInstructionReader, QpackData}; + use crate::{reader::test_receiver::TestReceiver, Error}; + fn test_encoding_decoding(instruction: DecoderInstruction) { let mut buf = QpackData::default(); instruction.marshal(&mut buf); diff --git a/neqo-qpack/src/encoder.rs b/neqo-qpack/src/encoder.rs index 9893229dbc..f53cf51d85 100644 --- a/neqo-qpack/src/encoder.rs +++ b/neqo-qpack/src/encoder.rs @@ -4,19 +4,25 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use crate::decoder_instructions::{DecoderInstruction, DecoderInstructionReader}; -use crate::encoder_instructions::EncoderInstruction; -use crate::header_block::HeaderEncoder; -use crate::qlog; -use crate::qpack_send_buf::QpackData; -use crate::reader::ReceiverConnWrapper; -use crate::stats::Stats; -use crate::table::{HeaderTable, LookupResult, ADDITIONAL_TABLE_ENTRY_SIZE}; -use crate::{Error, QpackSettings, Res}; +use std::{ + collections::{HashMap, HashSet, VecDeque}, + convert::TryFrom, +}; + use neqo_common::{qdebug, qerror, qlog::NeqoQlog, qtrace, Header}; use neqo_transport::{Connection, Error as TransportError, StreamId}; -use std::collections::{HashMap, HashSet, VecDeque}; -use std::convert::TryFrom; + +use crate::{ + decoder_instructions::{DecoderInstruction, DecoderInstructionReader}, + encoder_instructions::EncoderInstruction, + header_block::HeaderEncoder, + qlog, + qpack_send_buf::QpackData, + reader::ReceiverConnWrapper, + stats::Stats, + table::{HeaderTable, LookupResult, ADDITIONAL_TABLE_ENTRY_SIZE}, + Error, QpackSettings, Res, +}; pub const QPACK_UNI_STREAM_TYPE_ENCODER: u64 = 0x2; @@ -45,9 +51,9 @@ pub struct QPackEncoder { local_stream: LocalStreamState, max_blocked_streams: u16, // Remember header blocks that are referring to dynamic table. - // There can be multiple header blocks in one stream, headers, trailer, push stream request, etc. - // This HashMap maps a stream ID to a list of header blocks. Each header block is a list of - // referenced dynamic table entries. + // There can be multiple header blocks in one stream, headers, trailer, push stream request, + // etc. This HashMap maps a stream ID to a list of header blocks. Each header block is a + // list of referenced dynamic table entries. unacked_header_blocks: HashMap>>, blocked_stream_cnt: u16, use_huffman: bool, @@ -75,7 +81,9 @@ impl QPackEncoder { /// This function is use for setting encoders table max capacity. The value is received as /// a `SETTINGS_QPACK_MAX_TABLE_CAPACITY` setting parameter. + /// /// # Errors + /// /// `EncoderStream` if value is too big. /// `ChangeCapacity` if table capacity cannot be reduced. pub fn set_max_capacity(&mut self, cap: u64) -> Res<()> { @@ -103,7 +111,9 @@ impl QPackEncoder { /// This function is use for setting encoders max blocked streams. The value is received as /// a `SETTINGS_QPACK_BLOCKED_STREAMS` setting parameter. + /// /// # Errors + /// /// `EncoderStream` if value is too big. pub fn set_max_blocked_streams(&mut self, blocked_streams: u64) -> Res<()> { self.max_blocked_streams = u16::try_from(blocked_streams).or(Err(Error::EncoderStream))?; @@ -111,7 +121,9 @@ impl QPackEncoder { } /// Reads decoder instructions. + /// /// # Errors + /// /// May return: `ClosedCriticalStream` if stream has been closed or `DecoderStream` /// in case of any other transport error. pub fn receive(&mut self, conn: &mut Connection, stream_id: StreamId) -> Res<()> { @@ -221,14 +233,20 @@ impl QPackEncoder { } } - /// Inserts a new entry into a table and sends the corresponding instruction to a peer. An entry is added only - /// if it is possible to send the corresponding instruction immediately, i.e. the encoder stream is not - /// blocked by the flow control (or stream internal buffer(this is very unlikely)). - /// ### Errors + /// Inserts a new entry into a table and sends the corresponding instruction to a peer. An entry + /// is added only if it is possible to send the corresponding instruction immediately, i.e. + /// the encoder stream is not blocked by the flow control (or stream internal buffer(this is + /// very unlikely)). + /// + /// # Errors + /// /// `EncoderStreamBlocked` if the encoder stream is blocked by the flow control. /// `DynamicTableFull` if the dynamic table does not have enough space for the entry. - /// The function can return transport errors: `InvalidStreamId`, `InvalidInput` and `FinalSizeError`. + /// The function can return transport errors: `InvalidStreamId`, `InvalidInput` and + /// `FinalSizeError`. + /// /// # Panics + /// /// When the insertion fails (it should not). pub fn send_and_insert( &mut self, @@ -279,7 +297,8 @@ impl QPackEncoder { stream_id: StreamId, ) -> Res<()> { if let Some(cap) = self.next_capacity { - // Check if it is possible to reduce the capacity, e.g. if enough space can be make free for the reduction. + // Check if it is possible to reduce the capacity, e.g. if enough space can be make free + // for the reduction. if cap < self.table.capacity() && !self.table.can_evict_to(cap) { return Err(Error::DynamicTableFull); } @@ -302,7 +321,9 @@ impl QPackEncoder { } /// Sends any qpack encoder instructions. + /// /// # Errors + /// /// returns `EncoderStream` in case of an error. pub fn send_encoder_updates(&mut self, conn: &mut Connection) -> Res<()> { match self.local_stream { @@ -338,10 +359,14 @@ impl QPackEncoder { } /// Encodes headers + /// /// # Errors + /// /// `ClosedCriticalStream` if the encoder stream is closed. /// `InternalError` if an unexpected error occurred. + /// /// # Panics + /// /// If there is a programming error. pub fn encode_header_block( &mut self, @@ -358,11 +383,9 @@ impl QPackEncoder { // to write to the encoder stream AND if it can't uses // literal instructions. // The errors can be: - // 1) `EncoderStreamBlocked` - this is an error that - // can occur. + // 1) `EncoderStreamBlocked` - this is an error that can occur. // 2) `InternalError` - this is unexpected error. - // 3) `ClosedCriticalStream` - this is error that should - // close the HTTP/3 session. + // 3) `ClosedCriticalStream` - this is error that should close the HTTP/3 session. // The last 2 errors are ignored here and will be picked up // by the main loop. encoder_blocked = true; @@ -406,8 +429,9 @@ impl QPackEncoder { self.table.add_ref(index); } } else if can_block && !encoder_blocked { - // Insert using an InsertWithNameLiteral instruction. This entry name does not match any name in the - // tables therefore we cannot use any other instruction. + // Insert using an InsertWithNameLiteral instruction. This entry name does not match + // any name in the tables therefore we cannot use any other + // instruction. if let Ok(index) = self.send_and_insert(conn, &name, &value) { encoded_h.encode_indexed_dynamic(index); ref_entries.insert(index); @@ -417,16 +441,15 @@ impl QPackEncoder { // to write to the encoder stream AND if it can't uses // literal instructions. // The errors can be: - // 1) `EncoderStreamBlocked` - this is an error that - // can occur. - // 2) `DynamicTableFull` - this is an error that - // can occur. + // 1) `EncoderStreamBlocked` - this is an error that can occur. + // 2) `DynamicTableFull` - this is an error that can occur. // 3) `InternalError` - this is unexpected error. - // 4) `ClosedCriticalStream` - this is error that should - // close the HTTP/3 session. + // 4) `ClosedCriticalStream` - this is error that should close the HTTP/3 + // session. // The last 2 errors are ignored here and will be picked up // by the main loop. - // As soon as one of the instructions cannot be written or the table is full, do not try again. + // As soon as one of the instructions cannot be written or the table is full, do + // not try again. encoder_blocked = true; encoded_h.encode_literal_with_name_literal(&name, &value); } @@ -458,7 +481,9 @@ impl QPackEncoder { } /// Encoder stream has been created. Add the stream id. + /// /// # Panics + /// /// If a stream has already been added. pub fn add_send_stream(&mut self, stream_id: StreamId) { if self.local_stream == LocalStreamState::NoStream { @@ -512,12 +537,14 @@ fn map_stream_send_atomic_error(err: &TransportError) -> Error { #[cfg(test)] mod tests { - use super::{Connection, Error, Header, QPackEncoder, Res}; - use crate::QpackSettings; - use neqo_transport::{ConnectionParameters, StreamId, StreamType}; use std::mem; + + use neqo_transport::{ConnectionParameters, StreamId, StreamType}; use test_fixture::{default_client, default_server, handshake, new_server, now, DEFAULT_ALPN}; + use super::{Connection, Error, Header, QPackEncoder, Res}; + use crate::QpackSettings; + struct TestEncoder { encoder: QPackEncoder, send_stream_id: StreamId, @@ -529,7 +556,8 @@ mod tests { impl TestEncoder { pub fn change_capacity(&mut self, capacity: u64) -> Res<()> { self.encoder.set_max_capacity(capacity).unwrap(); - // We will try to really change the table only when we send the change capacity instruction. + // We will try to really change the table only when we send the change capacity + // instruction. self.encoder.send_encoder_updates(&mut self.conn) } @@ -722,7 +750,8 @@ mod tests { ], encoder_inst: &[], }, - // test adding a new header and encode_post_base_index, also test fix_header_block_prefix + // test adding a new header and encode_post_base_index, also test + // fix_header_block_prefix TestElement { headers: vec![Header::new("my-header", "my-value")], header_block: &[0x02, 0x80, 0x10], @@ -796,7 +825,8 @@ mod tests { ], encoder_inst: &[], }, - // test adding a new header and encode_post_base_index, also test fix_header_block_prefix + // test adding a new header and encode_post_base_index, also test + // fix_header_block_prefix TestElement { headers: vec![Header::new("my-header", "my-value")], header_block: &[0x02, 0x80, 0x10], @@ -870,7 +900,8 @@ mod tests { assert!(res.is_ok()); encoder.send_instructions(HEADER_CONTENT_LENGTH_VALUE_1_NAME_LITERAL); - // insert "content-length: 12345 which will fail because the ntry in the table cannot be evicted. + // insert "content-length: 12345 which will fail because the ntry in the table cannot be + // evicted. let res = encoder .encoder @@ -921,7 +952,8 @@ mod tests { assert_eq!(&buf[..], ENCODE_INDEXED_REF_DYNAMIC); encoder.send_instructions(&[]); - // insert "content-length: 12345 which will fail because the entry in the table cannot be evicted + // insert "content-length: 12345 which will fail because the entry in the table cannot be + // evicted let res = encoder .encoder @@ -1004,8 +1036,8 @@ mod tests { encoder.send_instructions(&[]); - // The next one will not use the dynamic entry because it is exceeding the max_blocked_streams - // limit. + // The next one will not use the dynamic entry because it is exceeding the + // max_blocked_streams limit. let buf = encoder.encoder.encode_header_block( &mut encoder.conn, &[Header::new("content-length", "1234")], @@ -1099,7 +1131,8 @@ mod tests { assert_eq!(encoder.encoder.blocked_stream_cnt(), 1); - // The next one will not create a new entry because the encoder is on max_blocked_streams limit. + // The next one will not create a new entry because the encoder is on max_blocked_streams + // limit. let buf = encoder.encoder.encode_header_block( &mut encoder.conn, &[Header::new("name2", "value2")], @@ -1274,8 +1307,8 @@ mod tests { assert_eq!(encoder.encoder.blocked_stream_cnt(), 2); // receive a stream cancel for the first stream. - // This will remove the first stream as blocking but it will not mark the instruction as acked. - // and the second steam will still be blocking. + // This will remove the first stream as blocking but it will not mark the instruction as + // acked. and the second steam will still be blocking. recv_instruction(&mut encoder, STREAM_CANCELED_ID_1); // The stream is not blocking anymore because header ack also acks the instruction. @@ -1507,9 +1540,10 @@ mod tests { assert!(encoder.encoder.set_max_capacity(1000).is_ok()); encoder.send_instructions(CAP_INSTRUCTION_1000); - // Encode a header block with 2 headers. The first header will be added to the dynamic table. - // The second will not be added to the dynamic table, because the corresponding instruction - // cannot be written immediately due to the flow control limit. + // Encode a header block with 2 headers. The first header will be added to the dynamic + // table. The second will not be added to the dynamic table, because the + // corresponding instruction cannot be written immediately due to the flow control + // limit. let buf1 = encoder.encoder.encode_header_block( &mut encoder.conn, &[ @@ -1524,7 +1558,8 @@ mod tests { // Assert that the second header is encoded as a literal with a name literal assert_eq!(buf1[3] & 0xf0, 0x20); - // Try to encode another header block. Here both headers will be encoded as a literal with a name literal + // Try to encode another header block. Here both headers will be encoded as a literal with a + // name literal let buf2 = encoder.encoder.encode_header_block( &mut encoder.conn, &[ @@ -1542,8 +1577,8 @@ mod tests { let out = encoder.peer_conn.process(None, now()); mem::drop(encoder.conn.process(out.as_dgram_ref(), now())); - // Try writing a new header block. Now, headers will be added to the dynamic table again, because - // instructions can be sent. + // Try writing a new header block. Now, headers will be added to the dynamic table again, + // because instructions can be sent. let buf3 = encoder.encoder.encode_header_block( &mut encoder.conn, &[ diff --git a/neqo-qpack/src/encoder_instructions.rs b/neqo-qpack/src/encoder_instructions.rs index 93be06bf7f..5564af969e 100644 --- a/neqo-qpack/src/encoder_instructions.rs +++ b/neqo-qpack/src/encoder_instructions.rs @@ -4,16 +4,20 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use crate::prefix::{ - ENCODER_CAPACITY, ENCODER_DUPLICATE, ENCODER_INSERT_WITH_NAME_LITERAL, - ENCODER_INSERT_WITH_NAME_REF_DYNAMIC, ENCODER_INSERT_WITH_NAME_REF_STATIC, NO_PREFIX, -}; -use crate::qpack_send_buf::QpackData; -use crate::reader::{IntReader, LiteralReader, ReadByte, Reader}; -use crate::Res; -use neqo_common::{qdebug, qtrace}; use std::mem; +use neqo_common::{qdebug, qtrace}; + +use crate::{ + prefix::{ + ENCODER_CAPACITY, ENCODER_DUPLICATE, ENCODER_INSERT_WITH_NAME_LITERAL, + ENCODER_INSERT_WITH_NAME_REF_DYNAMIC, ENCODER_INSERT_WITH_NAME_REF_STATIC, NO_PREFIX, + }, + qpack_send_buf::QpackData, + reader::{IntReader, LiteralReader, ReadByte, Reader}, + Res, +}; + // The encoder only uses InsertWithNameLiteral, therefore clippy is complaining about dead_code. // We may decide to use othe instruction in the future. // All instructions are used for testing, therefore they are defined. @@ -183,10 +187,11 @@ impl EncoderInstructionReader { Ok(()) } - /// ### Errors - /// 1) `NeedMoreData` if the reader needs more data - /// 2) `ClosedCriticalStream` - /// 3) other errors will be translated to `EncoderStream` by the caller of this function. + /// # Errors + /// + /// 1) `NeedMoreData` if the reader needs more data + /// 2) `ClosedCriticalStream` + /// 3) other errors will be translated to `EncoderStream` by the caller of this function. pub fn read_instructions( &mut self, recv: &mut T, @@ -265,8 +270,7 @@ impl EncoderInstructionReader { mod test { use super::{EncoderInstruction, EncoderInstructionReader, QpackData}; - use crate::reader::test_receiver::TestReceiver; - use crate::Error; + use crate::{reader::test_receiver::TestReceiver, Error}; fn test_encoding_decoding(instruction: &EncoderInstruction, use_huffman: bool) { let mut buf = QpackData::default(); diff --git a/neqo-qpack/src/header_block.rs b/neqo-qpack/src/header_block.rs index 3b37db120e..2e15bdf1fe 100644 --- a/neqo-qpack/src/header_block.rs +++ b/neqo-qpack/src/header_block.rs @@ -4,6 +4,13 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +use std::{ + mem, + ops::{Deref, Div}, +}; + +use neqo_common::{qtrace, Header}; + use crate::{ prefix::{ BASE_PREFIX_NEGATIVE, BASE_PREFIX_POSITIVE, HEADER_FIELD_INDEX_DYNAMIC, @@ -17,11 +24,6 @@ use crate::{ table::HeaderTable, Error, Res, }; -use neqo_common::{qtrace, Header}; -use std::{ - mem, - ops::{Deref, Div}, -}; #[derive(Default, Debug, PartialEq)] pub struct HeaderEncoder { diff --git a/neqo-qpack/src/huffman.rs b/neqo-qpack/src/huffman.rs index 31657ca826..283a501b32 100644 --- a/neqo-qpack/src/huffman.rs +++ b/neqo-qpack/src/huffman.rs @@ -4,11 +4,14 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use crate::huffman_decode_helper::{HuffmanDecoderNode, HUFFMAN_DECODE_ROOT}; -use crate::huffman_table::HUFFMAN_TABLE; -use crate::{Error, Res}; use std::convert::TryFrom; +use crate::{ + huffman_decode_helper::{HuffmanDecoderNode, HUFFMAN_DECODE_ROOT}, + huffman_table::HUFFMAN_TABLE, + Error, Res, +}; + struct BitReader<'a> { input: &'a [u8], offset: usize, @@ -65,9 +68,14 @@ impl<'a> BitReader<'a> { } /// Decodes huffman encoded input. +/// /// # Errors -/// This function may return `HuffmanDecompressionFailed` if `input` is not a correct huffman-encoded array of bits. +/// +/// This function may return `HuffmanDecompressionFailed` if `input` is not a correct +/// huffman-encoded array of bits. +/// /// # Panics +/// /// Never, but rust can't know that. pub fn decode_huffman(input: &[u8]) -> Res> { let mut reader = BitReader::new(input); @@ -109,6 +117,7 @@ fn decode_character(reader: &mut BitReader) -> Res> { } /// # Panics +/// /// Never, but rust doesn't know that. #[must_use] pub fn encode_huffman(input: &[u8]) -> Vec { diff --git a/neqo-qpack/src/huffman_decode_helper.rs b/neqo-qpack/src/huffman_decode_helper.rs index 7589ebd11a..122226dd1f 100644 --- a/neqo-qpack/src/huffman_decode_helper.rs +++ b/neqo-qpack/src/huffman_decode_helper.rs @@ -4,10 +4,12 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use crate::huffman_table::HUFFMAN_TABLE; -use lazy_static::lazy_static; use std::convert::TryFrom; +use lazy_static::lazy_static; + +use crate::huffman_table::HUFFMAN_TABLE; + pub struct HuffmanDecoderNode { pub next: [Option>; 2], pub value: Option, diff --git a/neqo-qpack/src/lib.rs b/neqo-qpack/src/lib.rs index 86ccb11ff8..3f9c7b81f7 100644 --- a/neqo-qpack/src/lib.rs +++ b/neqo-qpack/src/lib.rs @@ -6,7 +6,8 @@ #![cfg_attr(feature = "deny-warnings", deny(warnings))] #![warn(clippy::pedantic)] -// This is because of Encoder and Decoder structs. TODO: think about a better namings for crate and structs. +// This is because of Encoder and Decoder structs. TODO: think about a better namings for crate and +// structs. #![allow(clippy::module_name_repetitions)] pub mod decoder; @@ -47,7 +48,8 @@ pub enum Error { InternalError(u16), // These are internal errors, they will be transformed into one of the above. - NeedMoreData, // Return when an input stream does not have more data that a decoder needs.(It does not mean that a stream is closed.) + NeedMoreData, /* Return when an input stream does not have more data that a decoder + * needs.(It does not mean that a stream is closed.) */ HeaderLookup, HuffmanDecompressionFailed, BadUtf8, @@ -78,7 +80,8 @@ impl Error { } /// # Errors - /// Any error is mapped to the indicated type. + /// + /// Any error is mapped to the indicated type. fn map_error(r: Result, err: Self) -> Result { r.map_err(|e| { if matches!(e, Self::ClosedCriticalStream) { diff --git a/neqo-qpack/src/prefix.rs b/neqo-qpack/src/prefix.rs index 5019dd7d6d..0085de0df9 100644 --- a/neqo-qpack/src/prefix.rs +++ b/neqo-qpack/src/prefix.rs @@ -16,9 +16,10 @@ pub struct Prefix { impl Prefix { pub fn new(prefix: u8, len: u8) -> Self { // len should never be larger than 7. - // Most of Prefixes are instantiated as consts bellow. The only place where this construcrtor is used - // is in tests and when literals are encoded and the Huffman bit is added to one of the consts bellow. - // create_prefix guaranty that all const have len < 7 so we can safely assert that len is <=7. + // Most of Prefixes are instantiated as consts bellow. The only place where this + // construcrtor is used is in tests and when literals are encoded and the Huffman + // bit is added to one of the consts bellow. create_prefix guaranty that all const + // have len < 7 so we can safely assert that len is <=7. assert!(len <= 7); assert!((len == 0) || (prefix & ((1 << (8 - len)) - 1) == 0)); Self { @@ -110,7 +111,7 @@ create_prefix!(ENCODER_INSERT_WITH_NAME_LITERAL, 0x40, 2); create_prefix!(ENCODER_DUPLICATE, 0x00, 3); //===================================================================== -//Header block encoding prefixes +// Header block encoding prefixes //===================================================================== create_prefix!(BASE_PREFIX_POSITIVE, 0x00, 1); @@ -137,5 +138,6 @@ create_prefix!(HEADER_FIELD_LITERAL_NAME_REF_DYNAMIC, 0x40, 4, 0xD0); create_prefix!(HEADER_FIELD_LITERAL_NAME_REF_DYNAMIC_POST, 0x00, 5, 0xF0); // | 0 | 0 | 1 | N | H | Index(3+) | -// N is ignored and H is not relevant for decoding this prefix, therefore the mask is 1110 0000 = 0xE0 +// N is ignored and H is not relevant for decoding this prefix, therefore the mask is 1110 0000 = +// 0xE0 create_prefix!(HEADER_FIELD_LITERAL_NAME_LITERAL, 0x20, 4, 0xE0); diff --git a/neqo-qpack/src/qlog.rs b/neqo-qpack/src/qlog.rs index c6ae6b5d0f..8d48efb0aa 100644 --- a/neqo-qpack/src/qlog.rs +++ b/neqo-qpack/src/qlog.rs @@ -6,11 +6,9 @@ // Functions that handle capturing QLOG traces. -use neqo_common::hex; -use neqo_common::qlog::NeqoQlog; +use neqo_common::{hex, qlog::NeqoQlog}; use qlog::events::{ - qpack::QpackInstructionTypeName, - qpack::{QPackInstruction, QpackInstructionParsed}, + qpack::{QPackInstruction, QpackInstructionParsed, QpackInstructionTypeName}, EventData, RawInfo, }; diff --git a/neqo-qpack/src/qpack_send_buf.rs b/neqo-qpack/src/qpack_send_buf.rs index 4fbdbf12bd..a443859081 100644 --- a/neqo-qpack/src/qpack_send_buf.rs +++ b/neqo-qpack/src/qpack_send_buf.rs @@ -4,11 +4,11 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use crate::huffman::encode_huffman; -use crate::prefix::Prefix; +use std::{convert::TryFrom, ops::Deref}; + use neqo_common::Encoder; -use std::convert::TryFrom; -use std::ops::Deref; + +use crate::{huffman::encode_huffman, prefix::Prefix}; #[derive(Default, Debug, PartialEq)] pub(crate) struct QpackData { diff --git a/neqo-qpack/src/reader.rs b/neqo-qpack/src/reader.rs index f47471005d..ff9c42b246 100644 --- a/neqo-qpack/src/reader.rs +++ b/neqo-qpack/src/reader.rs @@ -4,22 +4,26 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use crate::{huffman::decode_huffman, prefix::Prefix, Error, Res}; +use std::{convert::TryInto, mem, str}; + use neqo_common::{qdebug, qerror}; use neqo_transport::{Connection, StreamId}; -use std::{convert::TryInto, mem, str}; + +use crate::{huffman::decode_huffman, prefix::Prefix, Error, Res}; pub trait ReadByte { /// # Errors - /// Return error occurred while reading a byte. - /// The exact error depends on trait implementation. + /// + /// Return error occurred while reading a byte. + /// The exact error depends on trait implementation. fn read_byte(&mut self) -> Res; } pub trait Reader { /// # Errors - /// Return error occurred while reading date into a buffer. - /// The exact error depends on trait implementation. + /// + /// Return error occurred while reading date into a buffer. + /// The exact error depends on trait implementation. fn read(&mut self, buf: &mut [u8]) -> Res; } @@ -154,7 +158,9 @@ pub struct IntReader { impl IntReader { /// `IntReader` is created by suppling the first byte anf prefix length. /// A varint may take only one byte, In that case already the first by has set state to done. + /// /// # Panics + /// /// When `prefix_len` is 8 or larger. #[must_use] pub fn new(first_byte: u8, prefix_len: u8) -> Self { @@ -174,6 +180,7 @@ impl IntReader { } /// # Panics + /// /// Never, but rust doesn't know that. #[must_use] pub fn make(first_byte: u8, prefixes: &[Prefix]) -> Self { @@ -187,7 +194,9 @@ impl IntReader { /// This function reads bytes until the varint is decoded or until stream/buffer does not /// have any more date. + /// /// # Errors + /// /// Possible errors are: /// 1) `NeedMoreData` if the reader needs more data, /// 2) `IntegerOverflow`, @@ -245,7 +254,9 @@ impl LiteralReader { /// Creates `LiteralReader` with the first byte. This constructor is always used /// when a litreral has a prefix. /// For literals without a prefix please use the default constructor. + /// /// # Panics + /// /// If `prefix_len` is 8 or more. #[must_use] pub fn new_with_first_byte(first_byte: u8, prefix_len: u8) -> Self { @@ -261,13 +272,17 @@ impl LiteralReader { /// This function reads bytes until the literal is decoded or until stream/buffer does not /// have any more date ready. + /// /// # Errors + /// /// Possible errors are: /// 1) `NeedMoreData` if the reader needs more data, /// 2) `IntegerOverflow` /// 3) Any `ReadByte`'s error /// It returns value if reading the literal is done or None if it needs more data. + /// /// # Panics + /// /// When this object is complete. pub fn read(&mut self, s: &mut T) -> Res> { loop { @@ -309,7 +324,9 @@ impl LiteralReader { /// This is a helper function used only by `ReceiverBufferWrapper`, therefore it returns /// `DecompressionFailed` if any error happens. +/// /// # Errors +/// /// If an parsing error occurred, the function returns `BadUtf8`. pub fn parse_utf8(v: &[u8]) -> Res<&str> { str::from_utf8(v).map_err(|_| Error::BadUtf8) @@ -318,9 +335,10 @@ pub fn parse_utf8(v: &[u8]) -> Res<&str> { #[cfg(test)] pub(crate) mod test_receiver { - use super::{Error, ReadByte, Reader, Res}; use std::collections::VecDeque; + use super::{Error, ReadByte, Reader, Res}; + #[derive(Default)] pub struct TestReceiver { buf: VecDeque, @@ -358,11 +376,12 @@ pub(crate) mod test_receiver { #[cfg(test)] mod tests { + use test_receiver::TestReceiver; + use super::{ parse_utf8, str, test_receiver, Error, IntReader, LiteralReader, ReadByte, ReceiverBufferWrapper, Res, }; - use test_receiver::TestReceiver; const TEST_CASES_NUMBERS: [(&[u8], u8, u64); 7] = [ (&[0xEA], 3, 10), diff --git a/neqo-qpack/src/table.rs b/neqo-qpack/src/table.rs index cc9844ee27..7ce8572542 100644 --- a/neqo-qpack/src/table.rs +++ b/neqo-qpack/src/table.rs @@ -4,11 +4,14 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use crate::static_table::{StaticTableEntry, HEADER_STATIC_TABLE}; -use crate::{Error, Res}; +use std::{collections::VecDeque, convert::TryFrom}; + use neqo_common::qtrace; -use std::collections::VecDeque; -use std::convert::TryFrom; + +use crate::{ + static_table::{StaticTableEntry, HEADER_STATIC_TABLE}, + Error, Res, +}; pub const ADDITIONAL_TABLE_ENTRY_SIZE: usize = 32; @@ -106,9 +109,12 @@ impl HeaderTable { } /// Change the dynamic table capacity. - /// ### Errors + /// + /// # Errors + /// /// `ChangeCapacity` if table capacity cannot be reduced. - /// The table cannot be reduce if there are entries that are referred at the moment or their inserts are unacked. + /// The table cannot be reduce if there are entries that are referred at the moment or their + /// inserts are unacked. pub fn set_capacity(&mut self, cap: u64) -> Res<()> { qtrace!([self], "set capacity to {}", cap); if !self.evict_to(cap) { @@ -119,7 +125,9 @@ impl HeaderTable { } /// Get a static entry with `index`. - /// ### Errors + /// + /// # Errors + /// /// `HeaderLookup` if the index does not exist in the static table. pub fn get_static(index: u64) -> Res<&'static StaticTableEntry> { let inx = usize::try_from(index).or(Err(Error::HeaderLookup))?; @@ -151,7 +159,9 @@ impl HeaderTable { } /// Get a entry in the dynamic table. - /// ### Errors + /// + /// # Errors + /// /// `HeaderLookup` if entry does not exist. pub fn get_dynamic(&self, index: u64, base: u64, post: bool) -> Res<&DynamicTableEntry> { let inx = if post { @@ -186,8 +196,8 @@ impl HeaderTable { } /// Look for a header pair. - /// The function returns `LookupResult`: `index`, `static_table` (if it is a static table entry) and `value_matches` - /// (if the header value matches as well not only header name) + /// The function returns `LookupResult`: `index`, `static_table` (if it is a static table entry) + /// and `value_matches` (if the header value matches as well not only header name) pub fn lookup(&mut self, name: &[u8], value: &[u8], can_block: bool) -> Option { qtrace!( [self], @@ -280,9 +290,11 @@ impl HeaderTable { } /// Insert a new entry. - /// ### Errors - /// `DynamicTableFull` if an entry cannot be added to the table because there is not enough space and/or - /// other entry cannot be evicted. + /// + /// # Errors + /// + /// `DynamicTableFull` if an entry cannot be added to the table because there is not enough + /// space and/or other entry cannot be evicted. pub fn insert(&mut self, name: &[u8], value: &[u8]) -> Res { qtrace!([self], "insert name={:?} value={:?}", name, value); let entry = DynamicTableEntry { @@ -304,9 +316,11 @@ impl HeaderTable { } /// Insert a new entry with the name refer to by a index to static or dynamic table. - /// ### Errors - /// `DynamicTableFull` if an entry cannot be added to the table because there is not enough space and/or - /// other entry cannot be evicted. + /// + /// # Errors + /// + /// `DynamicTableFull` if an entry cannot be added to the table because there is not enough + /// space and/or other entry cannot be evicted. /// `HeaderLookup` if the index dos not exits in the static/dynamic table. pub fn insert_with_name_ref( &mut self, @@ -336,9 +350,11 @@ impl HeaderTable { } /// Duplicate an entry. - /// ### Errors - /// `DynamicTableFull` if an entry cannot be added to the table because there is not enough space and/or - /// other entry cannot be evicted. + /// + /// # Errors + /// + /// `DynamicTableFull` if an entry cannot be added to the table because there is not enough + /// space and/or other entry cannot be evicted. /// `HeaderLookup` if the index dos not exits in the static/dynamic table. pub fn duplicate(&mut self, index: u64) -> Res { qtrace!([self], "duplicate entry={}", index); @@ -355,7 +371,9 @@ impl HeaderTable { } /// Increment number of acknowledge entries. - /// ### Errors + /// + /// # Errors + /// /// `IncrementAck` if ack is greater than actual number of inserts. pub fn increment_acked(&mut self, increment: u64) -> Res<()> { qtrace!([self], "increment acked by {}", increment); diff --git a/neqo-server/src/main.rs b/neqo-server/src/main.rs index cabae35232..590e0d55db 100644 --- a/neqo-server/src/main.rs +++ b/neqo-server/src/main.rs @@ -27,9 +27,6 @@ use std::{ use mio::{net::UdpSocket, Events, Poll, PollOpt, Ready, Token}; use mio_extras::timer::{Builder, Timeout, Timer}; -use neqo_transport::ConnectionIdGenerator; -use structopt::StructOpt; - use neqo_common::{hex, qdebug, qinfo, qwarn, Datagram, Header, IpTos}; use neqo_crypto::{ constants::{TLS_AES_128_GCM_SHA256, TLS_AES_256_GCM_SHA384, TLS_CHACHA20_POLY1305_SHA256}, @@ -40,8 +37,10 @@ use neqo_http3::{ }; use neqo_transport::{ server::ValidateAddress, tparams::PreferredAddress, CongestionControlAlgorithm, - ConnectionParameters, Output, RandomConnectionIdGenerator, StreamType, Version, + ConnectionIdGenerator, ConnectionParameters, Output, RandomConnectionIdGenerator, StreamType, + Version, }; +use structopt::StructOpt; use crate::old_https::Http09Server; diff --git a/neqo-server/src/old_https.rs b/neqo-server/src/old_https.rs index 61ebd53258..f254446695 100644 --- a/neqo-server/src/old_https.rs +++ b/neqo-server/src/old_https.rs @@ -7,14 +7,9 @@ #![cfg_attr(feature = "deny-warnings", deny(warnings))] #![warn(clippy::use_self)] -use std::cell::RefCell; -use std::collections::HashMap; -use std::fmt::Display; -use std::path::PathBuf; -use std::rc::Rc; -use std::time::Instant; - -use regex::Regex; +use std::{ + cell::RefCell, collections::HashMap, fmt::Display, path::PathBuf, rc::Rc, time::Instant, +}; use neqo_common::{event::Provider, hex, qdebug, Datagram}; use neqo_crypto::{generate_ech_keys, random, AllowZeroRtt, AntiReplay, Cipher}; @@ -23,6 +18,7 @@ use neqo_transport::{ server::{ActiveConnectionRef, Server, ValidateAddress}, ConnectionEvent, ConnectionIdGenerator, ConnectionParameters, Output, State, StreamId, }; +use regex::Regex; use super::{qns_read_response, Args, HttpServer}; diff --git a/neqo-transport/src/ackrate.rs b/neqo-transport/src/ackrate.rs index 6c4ae44f86..cf68f9021f 100644 --- a/neqo-transport/src/ackrate.rs +++ b/neqo-transport/src/ackrate.rs @@ -7,16 +7,14 @@ // Management of the peer's ack rate. #![deny(clippy::pedantic)] -use crate::connection::params::ACK_RATIO_SCALE; -use crate::frame::FRAME_TYPE_ACK_FREQUENCY; -use crate::packet::PacketBuilder; -use crate::recovery::RecoveryToken; -use crate::stats::FrameStats; +use std::{cmp::max, convert::TryFrom, time::Duration}; use neqo_common::qtrace; -use std::cmp::max; -use std::convert::TryFrom; -use std::time::Duration; + +use crate::{ + connection::params::ACK_RATIO_SCALE, frame::FRAME_TYPE_ACK_FREQUENCY, packet::PacketBuilder, + recovery::RecoveryToken, stats::FrameStats, +}; #[derive(Debug, Clone)] pub struct AckRate { diff --git a/neqo-transport/src/addr_valid.rs b/neqo-transport/src/addr_valid.rs index fcb8106742..9105c89a54 100644 --- a/neqo-transport/src/addr_valid.rs +++ b/neqo-transport/src/addr_valid.rs @@ -6,22 +6,23 @@ // This file implements functions necessary for address validation. +use std::{ + convert::TryFrom, + net::{IpAddr, SocketAddr}, + time::{Duration, Instant}, +}; + use neqo_common::{qinfo, qtrace, Decoder, Encoder, Role}; use neqo_crypto::{ constants::{TLS_AES_128_GCM_SHA256, TLS_VERSION_1_3}, selfencrypt::SelfEncrypt, }; - -use crate::cid::ConnectionId; -use crate::packet::PacketBuilder; -use crate::recovery::RecoveryToken; -use crate::stats::FrameStats; -use crate::{Error, Res}; - use smallvec::SmallVec; -use std::convert::TryFrom; -use std::net::{IpAddr, SocketAddr}; -use std::time::{Duration, Instant}; + +use crate::{ + cid::ConnectionId, packet::PacketBuilder, recovery::RecoveryToken, stats::FrameStats, Error, + Res, +}; /// A prefix we add to Retry tokens to distinguish them from NEW_TOKEN tokens. const TOKEN_IDENTIFIER_RETRY: &[u8] = &[0x52, 0x65, 0x74, 0x72, 0x79]; @@ -460,9 +461,10 @@ impl NewTokenSender { #[cfg(test)] mod tests { - use super::NewTokenState; use neqo_common::Role; + use super::NewTokenState; + const ONE: &[u8] = &[1, 2, 3]; const TWO: &[u8] = &[4, 5]; diff --git a/neqo-transport/src/cc/classic_cc.rs b/neqo-transport/src/cc/classic_cc.rs index c1d8fd08a6..6f4a01d795 100644 --- a/neqo-transport/src/cc/classic_cc.rs +++ b/neqo-transport/src/cc/classic_cc.rs @@ -14,7 +14,6 @@ use std::{ }; use super::CongestionControl; - use crate::{ cc::MAX_DATAGRAM_SIZE, packet::PacketNumber, @@ -537,6 +536,14 @@ impl ClassicCongestionControl { #[cfg(test)] mod tests { + use std::{ + convert::TryFrom, + time::{Duration, Instant}, + }; + + use neqo_common::qinfo; + use test_fixture::now; + use super::{ ClassicCongestionControl, WindowAdjustment, CWND_INITIAL, CWND_MIN, PERSISTENT_CONG_THRESH, }; @@ -551,12 +558,6 @@ mod tests { rtt::RttEstimate, tracking::SentPacket, }; - use neqo_common::qinfo; - use std::{ - convert::TryFrom, - time::{Duration, Instant}, - }; - use test_fixture::now; const PTO: Duration = Duration::from_millis(100); const RTT: Duration = Duration::from_millis(98); diff --git a/neqo-transport/src/cc/cubic.rs b/neqo-transport/src/cc/cubic.rs index 1a2928cdf7..c04a29b443 100644 --- a/neqo-transport/src/cc/cubic.rs +++ b/neqo-transport/src/cc/cubic.rs @@ -6,12 +6,15 @@ #![deny(clippy::pedantic)] -use std::fmt::{self, Display}; -use std::time::{Duration, Instant}; +use std::{ + convert::TryFrom, + fmt::{self, Display}, + time::{Duration, Instant}, +}; -use crate::cc::{classic_cc::WindowAdjustment, MAX_DATAGRAM_SIZE_F64}; use neqo_common::qtrace; -use std::convert::TryFrom; + +use crate::cc::{classic_cc::WindowAdjustment, MAX_DATAGRAM_SIZE_F64}; // CUBIC congestion control @@ -163,8 +166,8 @@ impl WindowAdjustment for Cubic { // of `MAX_DATAGRAM_SIZE` to match the increase of `target - cwnd / cwnd` as defined // in the specification (Sections 4.4 and 4.5). // The amount of data required therefore reduces asymptotically as the target increases. - // If the target is not significantly higher than the congestion window, require a very large - // amount of acknowledged data (effectively block increases). + // If the target is not significantly higher than the congestion window, require a very + // large amount of acknowledged data (effectively block increases). let mut acked_to_increase = MAX_DATAGRAM_SIZE_F64 * curr_cwnd_f64 / (target_cwnd - curr_cwnd_f64).max(1.0); @@ -178,9 +181,10 @@ impl WindowAdjustment for Cubic { fn reduce_cwnd(&mut self, curr_cwnd: usize, acked_bytes: usize) -> (usize, usize) { let curr_cwnd_f64 = convert_to_f64(curr_cwnd); // Fast Convergence - // If congestion event occurs before the maximum congestion window before the last congestion event, - // we reduce the the maximum congestion window and thereby W_max. - // check cwnd + MAX_DATAGRAM_SIZE instead of cwnd because with cwnd in bytes, cwnd may be slightly off. + // If congestion event occurs before the maximum congestion window before the last + // congestion event, we reduce the the maximum congestion window and thereby W_max. + // check cwnd + MAX_DATAGRAM_SIZE instead of cwnd because with cwnd in bytes, cwnd may be + // slightly off. self.last_max_cwnd = if curr_cwnd_f64 + MAX_DATAGRAM_SIZE_F64 < self.last_max_cwnd { curr_cwnd_f64 * CUBIC_FAST_CONVERGENCE } else { diff --git a/neqo-transport/src/cc/mod.rs b/neqo-transport/src/cc/mod.rs index 0321ab1de5..a1a43bd157 100644 --- a/neqo-transport/src/cc/mod.rs +++ b/neqo-transport/src/cc/mod.rs @@ -7,15 +7,16 @@ // Congestion control #![deny(clippy::pedantic)] -use crate::{path::PATH_MTU_V6, rtt::RttEstimate, tracking::SentPacket, Error}; -use neqo_common::qlog::NeqoQlog; - use std::{ fmt::{Debug, Display}, str::FromStr, time::{Duration, Instant}, }; +use neqo_common::qlog::NeqoQlog; + +use crate::{path::PATH_MTU_V6, rtt::RttEstimate, tracking::SentPacket, Error}; + mod classic_cc; mod cubic; mod new_reno; diff --git a/neqo-transport/src/cc/new_reno.rs b/neqo-transport/src/cc/new_reno.rs index d34cdfbab9..e51b3d6cc0 100644 --- a/neqo-transport/src/cc/new_reno.rs +++ b/neqo-transport/src/cc/new_reno.rs @@ -7,10 +7,12 @@ // Congestion control #![deny(clippy::pedantic)] -use std::fmt::{self, Display}; +use std::{ + fmt::{self, Display}, + time::{Duration, Instant}, +}; use crate::cc::classic_cc::WindowAdjustment; -use std::time::{Duration, Instant}; #[derive(Debug, Default)] pub struct NewReno {} diff --git a/neqo-transport/src/cc/tests/cubic.rs b/neqo-transport/src/cc/tests/cubic.rs index b24f1fc118..0c82e47817 100644 --- a/neqo-transport/src/cc/tests/cubic.rs +++ b/neqo-transport/src/cc/tests/cubic.rs @@ -7,6 +7,14 @@ #![allow(clippy::cast_possible_truncation)] #![allow(clippy::cast_sign_loss)] +use std::{ + convert::TryFrom, + ops::Sub, + time::{Duration, Instant}, +}; + +use test_fixture::now; + use crate::{ cc::{ classic_cc::{ClassicCongestionControl, CWND_INITIAL}, @@ -20,12 +28,6 @@ use crate::{ rtt::RttEstimate, tracking::SentPacket, }; -use std::{ - convert::TryFrom, - ops::Sub, - time::{Duration, Instant}, -}; -use test_fixture::now; const RTT: Duration = Duration::from_millis(100); const RTT_ESTIMATE: RttEstimate = RttEstimate::from_duration(Duration::from_millis(100)); @@ -109,7 +111,7 @@ fn tcp_phase() { for _ in 0..num_tcp_increases { let cwnd_rtt_start = cubic.cwnd(); - //Expected acks during a period of RTT / CUBIC_ALPHA. + // Expected acks during a period of RTT / CUBIC_ALPHA. let acks = expected_tcp_acks(cwnd_rtt_start); // The time between acks if they are ideally paced over a RTT. let time_increase = RTT / u32::try_from(cwnd_rtt_start / MAX_DATAGRAM_SIZE).unwrap(); @@ -145,9 +147,10 @@ fn tcp_phase() { let expected_ack_tcp_increase = expected_tcp_acks(cwnd_rtt_start); assert!(num_acks < expected_ack_tcp_increase); - // This first increase after a TCP phase may be shorter than what it would take by a regular cubic phase, - // because of the proper byte counting and the credit it already had before entering this phase. Therefore - // We will perform another round and compare it to expected increase using the cubic equation. + // This first increase after a TCP phase may be shorter than what it would take by a regular + // cubic phase, because of the proper byte counting and the credit it already had before + // entering this phase. Therefore We will perform another round and compare it to expected + // increase using the cubic equation. let cwnd_rtt_start_after_tcp = cubic.cwnd(); let elapsed_time = now - start_time; @@ -167,12 +170,12 @@ fn tcp_phase() { let expected_ack_tcp_increase2 = expected_tcp_acks(cwnd_rtt_start_after_tcp); assert!(num_acks2 < expected_ack_tcp_increase2); - // The time needed to increase cwnd by MAX_DATAGRAM_SIZE using the cubic equation will be calculates from: - // W_cubic(elapsed_time + t_to_increase) - W_cubis(elapsed_time) = MAX_DATAGRAM_SIZE => - // CUBIC_C * (elapsed_time + t_to_increase)^3 * MAX_DATAGRAM_SIZE + CWND_INITIAL - - // CUBIC_C * elapsed_time^3 * MAX_DATAGRAM_SIZE + CWND_INITIAL = MAX_DATAGRAM_SIZE => - // t_to_increase = cbrt((1 + CUBIC_C * elapsed_time^3) / CUBIC_C) - elapsed_time - // (t_to_increase is in seconds) + // The time needed to increase cwnd by MAX_DATAGRAM_SIZE using the cubic equation will be + // calculates from: W_cubic(elapsed_time + t_to_increase) - W_cubis(elapsed_time) = + // MAX_DATAGRAM_SIZE => CUBIC_C * (elapsed_time + t_to_increase)^3 * MAX_DATAGRAM_SIZE + + // CWND_INITIAL - CUBIC_C * elapsed_time^3 * MAX_DATAGRAM_SIZE + CWND_INITIAL = + // MAX_DATAGRAM_SIZE => t_to_increase = cbrt((1 + CUBIC_C * elapsed_time^3) / CUBIC_C) - + // elapsed_time (t_to_increase is in seconds) // number of ack needed is t_to_increase / time_increase. let expected_ack_cubic_increase = ((((1.0 + CUBIC_C * (elapsed_time).as_secs_f64().powi(3)) / CUBIC_C).cbrt() @@ -180,15 +183,16 @@ fn tcp_phase() { / time_increase.as_secs_f64()) .ceil() as u64; // num_acks is very close to the calculated value. The exact value is hard to calculate - // because the proportional increase(i.e. curr_cwnd_f64 / (target - curr_cwnd_f64) * MAX_DATAGRAM_SIZE_F64) - // and the byte counting. + // because the proportional increase(i.e. curr_cwnd_f64 / (target - curr_cwnd_f64) * + // MAX_DATAGRAM_SIZE_F64) and the byte counting. assert_eq!(num_acks2, expected_ack_cubic_increase + 2); } #[test] fn cubic_phase() { let mut cubic = ClassicCongestionControl::new(Cubic::default()); - // Set last_max_cwnd to a higher number make sure that cc is the cubic phase (cwnd is calculated by the cubic equation). + // Set last_max_cwnd to a higher number make sure that cc is the cubic phase (cwnd is calculated + // by the cubic equation). cubic.set_last_max_cwnd(CWND_INITIAL_10_F64); // Set ssthresh to something small to make sure that cc is in the congection avoidance phase. cubic.set_ssthresh(1); @@ -205,7 +209,7 @@ fn cubic_phase() { let num_rtts_w_max = (k / RTT.as_secs_f64()).round() as u64; for _ in 0..num_rtts_w_max { let cwnd_rtt_start = cubic.cwnd(); - //Expected acks + // Expected acks let acks = cwnd_rtt_start / MAX_DATAGRAM_SIZE; let time_increase = RTT / u32::try_from(acks).unwrap(); for _ in 0..acks { @@ -264,7 +268,8 @@ fn congestion_event_congestion_avoidance() { // Set ssthresh to something small to make sure that cc is in the congection avoidance phase. cubic.set_ssthresh(1); - // Set last_max_cwnd to something smaller than cwnd so that the fast convergence is not triggered. + // Set last_max_cwnd to something smaller than cwnd so that the fast convergence is not + // triggered. cubic.set_last_max_cwnd(3.0 * MAX_DATAGRAM_SIZE_F64); _ = fill_cwnd(&mut cubic, 0, now()); diff --git a/neqo-transport/src/cc/tests/new_reno.rs b/neqo-transport/src/cc/tests/new_reno.rs index f86e87b953..a73844a755 100644 --- a/neqo-transport/src/cc/tests/new_reno.rs +++ b/neqo-transport/src/cc/tests/new_reno.rs @@ -7,6 +7,10 @@ // Congestion control #![deny(clippy::pedantic)] +use std::time::Duration; + +use test_fixture::now; + use crate::{ cc::{ new_reno::NewReno, ClassicCongestionControl, CongestionControl, CWND_INITIAL, @@ -17,9 +21,6 @@ use crate::{ tracking::SentPacket, }; -use std::time::Duration; -use test_fixture::now; - const PTO: Duration = Duration::from_millis(100); const RTT: Duration = Duration::from_millis(98); const RTT_ESTIMATE: RttEstimate = RttEstimate::from_duration(Duration::from_millis(98)); @@ -169,8 +170,8 @@ fn issue_1465() { cwnd_is_default(&cc); assert_eq!(cc.bytes_in_flight(), 3 * MAX_DATAGRAM_SIZE); - // advance one rtt to detect lost packet there this simplifies the timers, because on_packet_loss - // would only be called after RTO, but that is not relevant to the problem + // advance one rtt to detect lost packet there this simplifies the timers, because + // on_packet_loss would only be called after RTO, but that is not relevant to the problem now += RTT; cc.on_packets_lost(Some(now), None, PTO, &[p1]); diff --git a/neqo-transport/src/cid.rs b/neqo-transport/src/cid.rs index eefc3104a9..7096ae1874 100644 --- a/neqo-transport/src/cid.rs +++ b/neqo-transport/src/cid.rs @@ -6,15 +6,6 @@ // Representation and management of connection IDs. -use crate::{ - frame::FRAME_TYPE_NEW_CONNECTION_ID, packet::PacketBuilder, recovery::RecoveryToken, - stats::FrameStats, Error, Res, -}; - -use neqo_common::{hex, hex_with_len, qinfo, Decoder, Encoder}; -use neqo_crypto::random; - -use smallvec::SmallVec; use std::{ borrow::Borrow, cell::{Ref, RefCell}, @@ -24,6 +15,15 @@ use std::{ rc::Rc, }; +use neqo_common::{hex, hex_with_len, qinfo, Decoder, Encoder}; +use neqo_crypto::random; +use smallvec::SmallVec; + +use crate::{ + frame::FRAME_TYPE_NEW_CONNECTION_ID, packet::PacketBuilder, recovery::RecoveryToken, + stats::FrameStats, Error, Res, +}; + pub const MAX_CONNECTION_ID_LEN: usize = 20; pub const LOCAL_ACTIVE_CID_LIMIT: usize = 8; pub const CONNECTION_ID_SEQNO_INITIAL: u64 = 0; @@ -421,8 +421,9 @@ pub struct ConnectionIdManager { /// The `ConnectionIdGenerator` instance that is used to create connection IDs. generator: Rc>, /// The connection IDs that we will accept. - /// This includes any we advertise in `NEW_CONNECTION_ID` that haven't been bound to a path yet. - /// During the handshake at the server, it also includes the randomized DCID pick by the client. + /// This includes any we advertise in `NEW_CONNECTION_ID` that haven't been bound to a path + /// yet. During the handshake at the server, it also includes the randomized DCID pick by + /// the client. connection_ids: ConnectionIdStore<()>, /// The maximum number of connection IDs this will accept. This is at least 2 and won't /// be more than `LOCAL_ACTIVE_CID_LIMIT`. @@ -595,9 +596,10 @@ impl ConnectionIdManager { #[cfg(test)] mod tests { - use super::*; use test_fixture::fixture_init; + use super::*; + #[test] fn generate_initial_cid() { fixture_init(); diff --git a/neqo-transport/src/connection/idle.rs b/neqo-transport/src/connection/idle.rs index da1c520777..e33f3defb3 100644 --- a/neqo-transport/src/connection/idle.rs +++ b/neqo-transport/src/connection/idle.rs @@ -4,13 +4,15 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use crate::recovery::RecoveryToken; -use neqo_common::qtrace; use std::{ cmp::{max, min}, time::{Duration, Instant}, }; +use neqo_common::qtrace; + +use crate::recovery::RecoveryToken; + #[derive(Debug, Clone)] /// There's a little bit of different behavior for resetting idle timeout. See /// -transport 10.2 ("Idle Timeout"). diff --git a/neqo-transport/src/connection/mod.rs b/neqo-transport/src/connection/mod.rs index 7e8c1d4737..8aaf987db9 100644 --- a/neqo-transport/src/connection/mod.rs +++ b/neqo-transport/src/connection/mod.rs @@ -6,6 +6,29 @@ // The class implementing a QUIC connection. +use std::{ + cell::RefCell, + cmp::{max, min}, + convert::TryFrom, + fmt::{self, Debug}, + mem, + net::{IpAddr, SocketAddr}, + ops::RangeInclusive, + rc::{Rc, Weak}, + time::{Duration, Instant}, +}; + +use neqo_common::{ + event::Provider as EventProvider, hex, hex_snip_middle, hrtime, qdebug, qerror, qinfo, + qlog::NeqoQlog, qtrace, qwarn, Datagram, Decoder, Encoder, Role, +}; +use neqo_crypto::{ + agent::CertificateInfo, random, Agent, AntiReplay, AuthenticationStatus, Cipher, Client, Group, + HandshakeState, PrivateKey, PublicKey, ResumptionToken, SecretAgentInfo, SecretAgentPreInfo, + Server, ZeroRttChecker, +}; +use smallvec::SmallVec; + use crate::{ addr_valid::{AddressValidation, NewTokenState}, cid::{ @@ -37,27 +60,6 @@ use crate::{ version::{Version, WireVersion}, AppError, ConnectionError, Error, Res, StreamId, }; -use neqo_common::{ - event::Provider as EventProvider, hex, hex_snip_middle, hrtime, qdebug, qerror, qinfo, - qlog::NeqoQlog, qtrace, qwarn, Datagram, Decoder, Encoder, Role, -}; -use neqo_crypto::{ - agent::CertificateInfo, random, Agent, AntiReplay, AuthenticationStatus, Cipher, Client, Group, - HandshakeState, PrivateKey, PublicKey, ResumptionToken, SecretAgentInfo, SecretAgentPreInfo, - Server, ZeroRttChecker, -}; -use smallvec::SmallVec; -use std::{ - cell::RefCell, - cmp::{max, min}, - convert::TryFrom, - fmt::{self, Debug}, - mem, - net::{IpAddr, SocketAddr}, - ops::RangeInclusive, - rc::{Rc, Weak}, - time::{Duration, Instant}, -}; mod idle; pub mod params; @@ -66,16 +68,16 @@ mod state; #[cfg(test)] pub mod test_internal; -pub use crate::send_stream::{RetransmissionPriority, SendStreamStats, TransmissionPriority}; +use idle::IdleTimeout; pub use params::ConnectionParameters; +use params::PreferredAddressConfig; #[cfg(test)] pub use params::ACK_RATIO_SCALE; -pub use state::{ClosingFrame, State}; - -use idle::IdleTimeout; -use params::PreferredAddressConfig; use saved::SavedDatagrams; use state::StateSignaling; +pub use state::{ClosingFrame, State}; + +pub use crate::send_stream::{RetransmissionPriority, SendStreamStats, TransmissionPriority}; #[derive(Debug, Default)] struct Packet(Vec); @@ -476,7 +478,9 @@ impl Connection { /// Set a local transport parameter, possibly overriding a default value. /// This only sets transport parameters without dealing with other aspects of /// setting the value. + /// /// # Panics + /// /// This panics if the transport parameter is known to this crate. pub fn set_local_tparam(&self, tp: TransportParameterId, value: TransportParameter) -> Res<()> { #[cfg(not(test))] @@ -494,9 +498,9 @@ impl Connection { } /// `odcid` is their original choice for our CID, which we get from the Retry token. - /// `remote_cid` is the value from the Source Connection ID field of - /// an incoming packet: what the peer wants us to use now. - /// `retry_cid` is what we asked them to use when we sent the Retry. + /// `remote_cid` is the value from the Source Connection ID field of an incoming packet: what + /// the peer wants us to use now. `retry_cid` is what we asked them to use when we sent the + /// Retry. pub(crate) fn set_retry_cids( &mut self, odcid: ConnectionId, @@ -642,7 +646,9 @@ impl Connection { /// problem for short-lived connections, where the connection is closed before any events are /// released. This function retrieves the token, without waiting for a `NEW_TOKEN` frame to /// arrive. + /// /// # Panics + /// /// If this is called on a server. pub fn take_resumption_token(&mut self, now: Instant) -> Option { assert_eq!(self.role, Role::Client); @@ -849,8 +855,8 @@ impl Connection { qwarn!([self], "Closing again after error {:?}", err); } State::Init => { - // We have not even sent anything just close the connection without sending any error. - // This may happen when client_start fails. + // We have not even sent anything just close the connection without sending any + // error. This may happen when client_start fails. self.set_state(State::Closed(error)); } State::WaitInitial => { @@ -1672,6 +1678,7 @@ impl Connection { /// Either way, the path is probed and will be abandoned if the probe fails. /// /// # Errors + /// /// Fails if this is not a client, not confirmed, or there are not enough connection /// IDs available to use. pub fn migrate( @@ -2962,7 +2969,9 @@ impl Connection { /// Create a stream. /// Returns new stream id + /// /// # Errors + /// /// `ConnectionState` if the connecton stat does not allow to create streams. /// `StreamLimitError` if we are limiied by server's stream concurence. pub fn stream_create(&mut self, st: StreamType) -> Res { @@ -2984,7 +2993,9 @@ impl Connection { } /// Set the priority of a stream. + /// /// # Errors + /// /// `InvalidStreamId` the stream does not exist. pub fn stream_priority( &mut self, @@ -2999,7 +3010,9 @@ impl Connection { } /// Set the SendOrder of a stream. Re-enqueues to keep the ordering correct + /// /// # Errors + /// /// Returns InvalidStreamId if the stream id doesn't exist pub fn stream_sendorder( &mut self, @@ -3010,7 +3023,9 @@ impl Connection { } /// Set the Fairness of a stream + /// /// # Errors + /// /// Returns InvalidStreamId if the stream id doesn't exist pub fn stream_fairness(&mut self, stream_id: StreamId, fairness: bool) -> Res<()> { self.streams.set_fairness(stream_id, fairness) @@ -3029,7 +3044,9 @@ impl Connection { /// Send data on a stream. /// Returns how many bytes were successfully sent. Could be less /// than total, based on receiver credit space available, etc. + /// /// # Errors + /// /// `InvalidStreamId` the stream does not exist, /// `InvalidInput` if length of `data` is zero, /// `FinalSizeError` if the stream has already been closed. @@ -3040,7 +3057,9 @@ impl Connection { /// Send all data or nothing on a stream. May cause DATA_BLOCKED or /// STREAM_DATA_BLOCKED frames to be sent. /// Returns true if data was successfully sent, otherwise false. + /// /// # Errors + /// /// `InvalidStreamId` the stream does not exist, /// `InvalidInput` if length of `data` is zero, /// `FinalSizeError` if the stream has already been closed. @@ -3081,7 +3100,9 @@ impl Connection { /// Read buffered data from stream. bool says whether read bytes includes /// the final data on stream. + /// /// # Errors + /// /// `InvalidStreamId` if the stream does not exist. /// `NoMoreData` if data and fin bit were previously read by the application. pub fn stream_recv(&mut self, stream_id: StreamId, data: &mut [u8]) -> Res<(usize, bool)> { @@ -3100,7 +3121,9 @@ impl Connection { } /// Increases `max_stream_data` for a `stream_id`. + /// /// # Errors + /// /// Returns `InvalidStreamId` if a stream does not exist or the receiving /// side is closed. pub fn set_stream_max_data(&mut self, stream_id: StreamId, max_data: u64) -> Res<()> { @@ -3114,7 +3137,9 @@ impl Connection { /// (if `keep` is `true`) or no longer important (if `keep` is `false`). If any /// stream is marked this way, PING frames will be used to keep the connection /// alive, even when there is no activity. + /// /// # Errors + /// /// Returns `InvalidStreamId` if a stream does not exist or the receiving /// side is closed. pub fn stream_keep_alive(&mut self, stream_id: StreamId, keep: bool) -> Res<()> { @@ -3128,7 +3153,9 @@ impl Connection { /// Returns the current max size of a datagram that can fit into a packet. /// The value will change over time depending on the encoded size of the /// packet number, ack frames, etc. + /// /// # Error + /// /// The function returns `NotAvailable` if datagrams are not enabled. pub fn max_datagram_size(&self) -> Res { let max_dgram_size = self.quic_datagrams.remote_datagram_size(); @@ -3169,7 +3196,9 @@ impl Connection { } /// Queue a datagram for sending. + /// /// # Error + /// /// The function returns `TooMuchData` if the supply buffer is bigger than /// the allowed remote datagram size. The funcion does not check if the /// datagram can fit into a packet (i.e. MTU limit). This is checked during diff --git a/neqo-transport/src/connection/params.rs b/neqo-transport/src/connection/params.rs index 3d8dff67a6..48aba4303b 100644 --- a/neqo-transport/src/connection/params.rs +++ b/neqo-transport/src/connection/params.rs @@ -4,18 +4,19 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use crate::connection::{ConnectionIdManager, Role, LOCAL_ACTIVE_CID_LIMIT}; +use std::{cmp::max, convert::TryFrom, time::Duration}; + pub use crate::recovery::FAST_PTO_SCALE; -use crate::recv_stream::RECV_BUFFER_SIZE; -use crate::rtt::GRANULARITY; -use crate::stream_id::StreamType; -use crate::tparams::{self, PreferredAddress, TransportParameter, TransportParametersHandler}; -use crate::tracking::DEFAULT_ACK_DELAY; -use crate::version::{Version, VersionConfig}; -use crate::{CongestionControlAlgorithm, Res}; -use std::cmp::max; -use std::convert::TryFrom; -use std::time::Duration; +use crate::{ + connection::{ConnectionIdManager, Role, LOCAL_ACTIVE_CID_LIMIT}, + recv_stream::RECV_BUFFER_SIZE, + rtt::GRANULARITY, + stream_id::StreamType, + tparams::{self, PreferredAddress, TransportParameter, TransportParametersHandler}, + tracking::DEFAULT_ACK_DELAY, + version::{Version, VersionConfig}, + CongestionControlAlgorithm, Res, +}; const LOCAL_MAX_DATA: u64 = 0x3FFF_FFFF_FFFF_FFFF; // 2^62-1 const LOCAL_STREAM_LIMIT_BIDI: u64 = 16; @@ -49,11 +50,14 @@ pub struct ConnectionParameters { cc_algorithm: CongestionControlAlgorithm, /// Initial connection-level flow control limit. max_data: u64, - /// Initial flow control limit for receiving data on bidirectional streams that the peer creates. + /// Initial flow control limit for receiving data on bidirectional streams that the peer + /// creates. max_stream_data_bidi_remote: u64, - /// Initial flow control limit for receiving data on bidirectional streams that this endpoint creates. + /// Initial flow control limit for receiving data on bidirectional streams that this endpoint + /// creates. max_stream_data_bidi_local: u64, - /// Initial flow control limit for receiving data on unidirectional streams that the peer creates. + /// Initial flow control limit for receiving data on unidirectional streams that the peer + /// creates. max_stream_data_uni: u64, /// Initial limit on bidirectional streams that the peer creates. max_streams_bidi: u64, @@ -147,6 +151,7 @@ impl ConnectionParameters { } /// # Panics + /// /// If v > 2^60 (the maximum allowed by the protocol). pub fn max_streams(mut self, stream_type: StreamType, v: u64) -> Self { assert!(v <= (1 << 60), "max_streams is too large"); @@ -162,7 +167,9 @@ impl ConnectionParameters { } /// Get the maximum stream data that we will accept on different types of streams. + /// /// # Panics + /// /// If `StreamType::UniDi` and `false` are passed as that is not a valid combination. pub fn get_max_stream_data(&self, stream_type: StreamType, remote: bool) -> u64 { match (stream_type, remote) { @@ -176,7 +183,9 @@ impl ConnectionParameters { } /// Set the maximum stream data that we will accept on different types of streams. + /// /// # Panics + /// /// If `StreamType::UniDi` and `false` are passed as that is not a valid combination /// or if v >= 62 (the maximum allowed by the protocol). pub fn max_stream_data(mut self, stream_type: StreamType, remote: bool, v: u64) -> Self { @@ -224,6 +233,7 @@ impl ConnectionParameters { } /// # Panics + /// /// If `timeout` is 2^62 milliseconds or more. pub fn idle_timeout(mut self, timeout: Duration) -> Self { assert!(timeout.as_millis() < (1 << 62), "idle timeout is too long"); @@ -281,6 +291,7 @@ impl ConnectionParameters { /// congestion. /// /// # Panics + /// /// A value of 0 is invalid and will cause a panic. pub fn fast_pto(mut self, scale: u8) -> Self { assert_ne!(scale, 0); diff --git a/neqo-transport/src/connection/saved.rs b/neqo-transport/src/connection/saved.rs index 368a859f5d..f5616c732a 100644 --- a/neqo-transport/src/connection/saved.rs +++ b/neqo-transport/src/connection/saved.rs @@ -4,12 +4,12 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use std::mem; -use std::time::Instant; +use std::{mem, time::Instant}; -use crate::crypto::CryptoSpace; use neqo_common::{qdebug, qinfo, Datagram}; +use crate::crypto::CryptoSpace; + /// The number of datagrams that are saved during the handshake when /// keys to decrypt them are not yet available. const MAX_SAVED_DATAGRAMS: usize = 4; diff --git a/neqo-transport/src/connection/state.rs b/neqo-transport/src/connection/state.rs index ffd9f16b51..f739c147ab 100644 --- a/neqo-transport/src/connection/state.rs +++ b/neqo-transport/src/connection/state.rs @@ -4,20 +4,25 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +use std::{ + cmp::{min, Ordering}, + mem, + rc::Rc, + time::Instant, +}; + use neqo_common::Encoder; -use std::cmp::{min, Ordering}; -use std::mem; -use std::rc::Rc; -use std::time::Instant; -use crate::frame::{ - FrameType, FRAME_TYPE_CONNECTION_CLOSE_APPLICATION, FRAME_TYPE_CONNECTION_CLOSE_TRANSPORT, - FRAME_TYPE_HANDSHAKE_DONE, +use crate::{ + frame::{ + FrameType, FRAME_TYPE_CONNECTION_CLOSE_APPLICATION, FRAME_TYPE_CONNECTION_CLOSE_TRANSPORT, + FRAME_TYPE_HANDSHAKE_DONE, + }, + packet::PacketBuilder, + path::PathRef, + recovery::RecoveryToken, + ConnectionError, Error, Res, }; -use crate::packet::PacketBuilder; -use crate::path::PathRef; -use crate::recovery::RecoveryToken; -use crate::{ConnectionError, Error, Res}; #[derive(Clone, Debug, PartialEq, Eq)] /// The state of the Connection. diff --git a/neqo-transport/src/connection/tests/ackrate.rs b/neqo-transport/src/connection/tests/ackrate.rs index 3c909bcc70..1b83d42acd 100644 --- a/neqo-transport/src/connection/tests/ackrate.rs +++ b/neqo-transport/src/connection/tests/ackrate.rs @@ -4,6 +4,10 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +use std::{mem, time::Duration}; + +use test_fixture::{addr_v4, assertions}; + use super::{ super::{ConnectionParameters, ACK_RATIO_SCALE}, ack_bytes, connect_rtt_idle, default_client, default_server, fill_cwnd, increase_cwnd, @@ -11,9 +15,6 @@ use super::{ }; use crate::stream_id::StreamType; -use std::{mem, time::Duration}; -use test_fixture::{addr_v4, assertions}; - /// With the default RTT here (100ms) and default ratio (4), endpoints won't send /// `ACK_FREQUENCY` as the ACK delay isn't different enough from the default. #[test] diff --git a/neqo-transport/src/connection/tests/cc.rs b/neqo-transport/src/connection/tests/cc.rs index 6c70e424ea..b3467ea67c 100644 --- a/neqo-transport/src/connection/tests/cc.rs +++ b/neqo-transport/src/connection/tests/cc.rs @@ -4,23 +4,23 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use super::super::Output; +use std::{convert::TryFrom, mem, time::Duration}; + +use neqo_common::{qdebug, qinfo, Datagram}; + use super::{ - ack_bytes, assert_full_cwnd, connect_rtt_idle, cwnd, cwnd_avail, cwnd_packets, default_client, - default_server, fill_cwnd, induce_persistent_congestion, send_something, + super::Output, ack_bytes, assert_full_cwnd, connect_rtt_idle, cwnd, cwnd_avail, cwnd_packets, + default_client, default_server, fill_cwnd, induce_persistent_congestion, send_something, CLIENT_HANDSHAKE_1RTT_PACKETS, DEFAULT_RTT, POST_HANDSHAKE_CWND, }; -use crate::cc::MAX_DATAGRAM_SIZE; -use crate::packet::PacketNumber; -use crate::recovery::{ACK_ONLY_SIZE_LIMIT, PACKET_THRESHOLD}; -use crate::sender::PACING_BURST_SIZE; -use crate::stream_id::StreamType; -use crate::tracking::DEFAULT_ACK_PACKET_TOLERANCE; - -use neqo_common::{qdebug, qinfo, Datagram}; -use std::convert::TryFrom; -use std::mem; -use std::time::Duration; +use crate::{ + cc::MAX_DATAGRAM_SIZE, + packet::PacketNumber, + recovery::{ACK_ONLY_SIZE_LIMIT, PACKET_THRESHOLD}, + sender::PACING_BURST_SIZE, + stream_id::StreamType, + tracking::DEFAULT_ACK_PACKET_TOLERANCE, +}; #[test] /// Verify initial CWND is honored. diff --git a/neqo-transport/src/connection/tests/close.rs b/neqo-transport/src/connection/tests/close.rs index 39b1106ce0..f45e77e549 100644 --- a/neqo-transport/src/connection/tests/close.rs +++ b/neqo-transport/src/connection/tests/close.rs @@ -4,14 +4,19 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use super::super::{Connection, Output, State}; -use super::{connect, connect_force_idle, default_client, default_server, send_something}; -use crate::tparams::{self, TransportParameter}; -use crate::{AppError, ConnectionError, Error, ERROR_APPLICATION_CLOSE}; - use std::time::Duration; + use test_fixture::{self, datagram, now}; +use super::{ + super::{Connection, Output, State}, + connect, connect_force_idle, default_client, default_server, send_something, +}; +use crate::{ + tparams::{self, TransportParameter}, + AppError, ConnectionError, Error, ERROR_APPLICATION_CLOSE, +}; + fn assert_draining(c: &Connection, expected: &Error) { assert!(c.state().closed()); if let State::Draining { diff --git a/neqo-transport/src/connection/tests/datagram.rs b/neqo-transport/src/connection/tests/datagram.rs index 891773ddaa..5b7b8dc0b4 100644 --- a/neqo-transport/src/connection/tests/datagram.rs +++ b/neqo-transport/src/connection/tests/datagram.rs @@ -4,21 +4,23 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +use std::{cell::RefCell, convert::TryFrom, rc::Rc}; + +use neqo_common::event::Provider; +use test_fixture::now; + use super::{ assert_error, connect_force_idle, default_client, default_server, new_client, new_server, AT_LEAST_PTO, }; -use crate::events::{ConnectionEvent, OutgoingDatagramOutcome}; -use crate::frame::FRAME_TYPE_DATAGRAM; -use crate::packet::PacketBuilder; -use crate::quic_datagrams::MAX_QUIC_DATAGRAM; use crate::{ + events::{ConnectionEvent, OutgoingDatagramOutcome}, + frame::FRAME_TYPE_DATAGRAM, + packet::PacketBuilder, + quic_datagrams::MAX_QUIC_DATAGRAM, send_stream::{RetransmissionPriority, TransmissionPriority}, Connection, ConnectionError, ConnectionParameters, Error, StreamType, }; -use neqo_common::event::Provider; -use std::{cell::RefCell, convert::TryFrom, rc::Rc}; -use test_fixture::now; const DATAGRAM_LEN_MTU: u64 = 1310; const DATA_MTU: &[u8] = &[1; 1310]; @@ -323,7 +325,7 @@ fn datagram_lost() { let pings_sent = client.stats().frame_tx.ping; let dgram_lost = client.stats().datagram_tx.lost; let out = client.process_output(now).dgram(); - assert!(out.is_some()); //PING probing + assert!(out.is_some()); // PING probing // Datagram is not sent again. assert_eq!(client.stats().frame_tx.ping, pings_sent + 1); assert_eq!(client.stats().frame_tx.datagram, dgram_sent2); diff --git a/neqo-transport/src/connection/tests/fuzzing.rs b/neqo-transport/src/connection/tests/fuzzing.rs index 75caa7e857..5425e1a16e 100644 --- a/neqo-transport/src/connection/tests/fuzzing.rs +++ b/neqo-transport/src/connection/tests/fuzzing.rs @@ -8,11 +8,12 @@ #![warn(clippy::pedantic)] #![cfg(feature = "fuzzing")] -use super::{connect_force_idle, default_client, default_server}; -use crate::StreamType; use neqo_crypto::FIXED_TAG_FUZZING; use test_fixture::now; +use super::{connect_force_idle, default_client, default_server}; +use crate::StreamType; + #[test] fn no_encryption() { const DATA_CLIENT: &[u8] = &[2; 40]; diff --git a/neqo-transport/src/connection/tests/handshake.rs b/neqo-transport/src/connection/tests/handshake.rs index 33aff5d528..93385ac1bc 100644 --- a/neqo-transport/src/connection/tests/handshake.rs +++ b/neqo-transport/src/connection/tests/handshake.rs @@ -4,35 +4,40 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use super::super::{Connection, Output, State}; +use std::{ + cell::RefCell, + convert::TryFrom, + mem, + net::{IpAddr, Ipv6Addr, SocketAddr}, + rc::Rc, + time::Duration, +}; + +use neqo_common::{event::Provider, qdebug, Datagram}; +use neqo_crypto::{ + constants::TLS_CHACHA20_POLY1305_SHA256, generate_ech_keys, AuthenticationStatus, +}; +use test_fixture::{ + self, addr, assertions, assertions::assert_coalesced_0rtt, datagram, fixture_init, now, + split_datagram, +}; + use super::{ + super::{Connection, Output, State}, assert_error, connect, connect_force_idle, connect_with_rtt, default_client, default_server, get_tokens, handshake, maybe_authenticate, resumed_server, send_something, CountingConnectionIdGenerator, AT_LEAST_PTO, DEFAULT_RTT, DEFAULT_STREAM_DATA, }; -use crate::connection::AddressValidation; -use crate::events::ConnectionEvent; -use crate::path::PATH_MTU_V6; -use crate::server::ValidateAddress; -use crate::tparams::{TransportParameter, MIN_ACK_DELAY}; -use crate::tracking::DEFAULT_ACK_DELAY; use crate::{ + connection::AddressValidation, + events::ConnectionEvent, + path::PATH_MTU_V6, + server::ValidateAddress, + tparams::{TransportParameter, MIN_ACK_DELAY}, + tracking::DEFAULT_ACK_DELAY, ConnectionError, ConnectionParameters, EmptyConnectionIdGenerator, Error, StreamType, Version, }; -use neqo_common::{event::Provider, qdebug, Datagram}; -use neqo_crypto::{ - constants::TLS_CHACHA20_POLY1305_SHA256, generate_ech_keys, AuthenticationStatus, -}; -use std::cell::RefCell; -use std::convert::TryFrom; -use std::mem; -use std::net::{IpAddr, Ipv6Addr, SocketAddr}; -use std::rc::Rc; -use std::time::Duration; -use test_fixture::assertions::assert_coalesced_0rtt; -use test_fixture::{self, addr, assertions, datagram, fixture_init, now, split_datagram}; - const ECH_CONFIG_ID: u8 = 7; const ECH_PUBLIC_NAME: &str = "public.example"; @@ -128,7 +133,7 @@ fn no_alpn() { handshake(&mut client, &mut server, now(), Duration::new(0, 0)); // TODO (mt): errors are immediate, which means that we never send CONNECTION_CLOSE // and the client never sees the server's rejection of its handshake. - //assert_error(&client, ConnectionError::Transport(Error::CryptoAlert(120))); + // assert_error(&client, ConnectionError::Transport(Error::CryptoAlert(120))); assert_error( &server, &ConnectionError::Transport(Error::CryptoAlert(120)), diff --git a/neqo-transport/src/connection/tests/idle.rs b/neqo-transport/src/connection/tests/idle.rs index 1b7dac9de9..c33726917a 100644 --- a/neqo-transport/src/connection/tests/idle.rs +++ b/neqo-transport/src/connection/tests/idle.rs @@ -4,6 +4,14 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +use std::{ + mem, + time::{Duration, Instant}, +}; + +use neqo_common::{qtrace, Encoder}; +use test_fixture::{self, now, split_datagram}; + use super::{ super::{Connection, ConnectionParameters, IdleTimeout, Output, State}, connect, connect_force_idle, connect_rtt_idle, connect_with_rtt, default_client, @@ -18,13 +26,6 @@ use crate::{ tracking::PacketNumberSpace, }; -use neqo_common::{qtrace, Encoder}; -use std::{ - mem, - time::{Duration, Instant}, -}; -use test_fixture::{self, now, split_datagram}; - fn default_timeout() -> Duration { ConnectionParameters::default().get_idle_timeout() } @@ -678,11 +679,14 @@ fn keep_alive_uni() { fn keep_alive_with_ack_eliciting_packet_lost() { const RTT: Duration = Duration::from_millis(500); // PTO will be ~1.1125s - // The idle time out will be set to ~ 5 * PTO. (IDLE_TIMEOUT/2 > pto and IDLE_TIMEOUT/2 < pto + 2pto) - // After handshake all packets will be lost. The following steps will happen after the handshake: + // The idle time out will be set to ~ 5 * PTO. (IDLE_TIMEOUT/2 > pto and IDLE_TIMEOUT/2 < pto + // + 2pto) After handshake all packets will be lost. The following steps will happen after + // the handshake: // - data will be sent on a stream that is marked for keep-alive, (at start time) - // - PTO timer will trigger first, and the data will be retransmited toghether with a PING, (at the start time + pto) - // - keep-alive timer will trigger and a keep-alive PING will be sent, (at the start time + IDLE_TIMEOUT / 2) + // - PTO timer will trigger first, and the data will be retransmited toghether with a PING, (at + // the start time + pto) + // - keep-alive timer will trigger and a keep-alive PING will be sent, (at the start time + + // IDLE_TIMEOUT / 2) // - PTO timer will trigger again. (at the start time + pto + 2*pto) // - Idle time out will trigger (at the timeout + IDLE_TIMEOUT) const IDLE_TIMEOUT: Duration = Duration::from_millis(6000); diff --git a/neqo-transport/src/connection/tests/keys.rs b/neqo-transport/src/connection/tests/keys.rs index a0e3b6596e..c247bba670 100644 --- a/neqo-transport/src/connection/tests/keys.rs +++ b/neqo-transport/src/connection/tests/keys.rs @@ -4,19 +4,24 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use super::super::super::{ConnectionError, ERROR_AEAD_LIMIT_REACHED}; -use super::super::{Connection, ConnectionParameters, Error, Output, State, StreamType}; +use std::mem; + +use neqo_common::{qdebug, Datagram}; +use test_fixture::{self, now}; + use super::{ + super::{ + super::{ConnectionError, ERROR_AEAD_LIMIT_REACHED}, + Connection, ConnectionParameters, Error, Output, State, StreamType, + }, connect, connect_force_idle, default_client, default_server, maybe_authenticate, send_and_receive, send_something, AT_LEAST_PTO, }; -use crate::crypto::{OVERWRITE_INVOCATIONS, UPDATE_WRITE_KEYS_AT}; -use crate::packet::PacketNumber; -use crate::path::PATH_MTU_V6; - -use neqo_common::{qdebug, Datagram}; -use std::mem; -use test_fixture::{self, now}; +use crate::{ + crypto::{OVERWRITE_INVOCATIONS, UPDATE_WRITE_KEYS_AT}, + packet::PacketNumber, + path::PATH_MTU_V6, +}; fn check_discarded( peer: &mut Connection, diff --git a/neqo-transport/src/connection/tests/migration.rs b/neqo-transport/src/connection/tests/migration.rs index 79c13faa77..8307a7dd84 100644 --- a/neqo-transport/src/connection/tests/migration.rs +++ b/neqo-transport/src/connection/tests/migration.rs @@ -4,6 +4,20 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +use std::{ + cell::RefCell, + net::{IpAddr, Ipv6Addr, SocketAddr}, + rc::Rc, + time::{Duration, Instant}, +}; + +use neqo_common::{Datagram, Decoder}; +use test_fixture::{ + self, addr, addr_v4, + assertions::{assert_v4_path, assert_v6_path}, + fixture_init, new_neqo_qlog, now, +}; + use super::{ super::{Connection, Output, State, StreamType}, connect_fail, connect_force_idle, connect_rtt_idle, default_client, default_server, @@ -20,19 +34,6 @@ use crate::{ ConnectionParameters, EmptyConnectionIdGenerator, Error, }; -use neqo_common::{Datagram, Decoder}; -use std::{ - cell::RefCell, - net::{IpAddr, Ipv6Addr, SocketAddr}, - rc::Rc, - time::{Duration, Instant}, -}; -use test_fixture::{ - self, addr, addr_v4, - assertions::{assert_v4_path, assert_v6_path}, - fixture_init, new_neqo_qlog, now, -}; - /// This should be a valid-seeming transport parameter. /// And it should have different values to `addr` and `addr_v4`. const SAMPLE_PREFERRED_ADDRESS: &[u8] = &[ diff --git a/neqo-transport/src/connection/tests/mod.rs b/neqo-transport/src/connection/tests/mod.rs index ab520c3198..8a999f4048 100644 --- a/neqo-transport/src/connection/tests/mod.rs +++ b/neqo-transport/src/connection/tests/mod.rs @@ -6,6 +6,20 @@ #![deny(clippy::pedantic)] +use std::{ + cell::RefCell, + cmp::min, + convert::TryFrom, + mem, + rc::Rc, + time::{Duration, Instant}, +}; + +use enum_map::enum_map; +use neqo_common::{event::Provider, qdebug, qtrace, Datagram, Decoder, Role}; +use neqo_crypto::{random, AllowZeroRtt, AuthenticationStatus, ResumptionToken}; +use test_fixture::{self, addr, fixture_init, new_neqo_qlog, now}; + use super::{Connection, ConnectionError, ConnectionId, Output, State}; use crate::{ addr_valid::{AddressValidation, ValidateAddress}, @@ -21,21 +35,6 @@ use crate::{ Version, }; -use std::{ - cell::RefCell, - cmp::min, - convert::TryFrom, - mem, - rc::Rc, - time::{Duration, Instant}, -}; - -use neqo_common::{event::Provider, qdebug, qtrace, Datagram, Decoder, Role}; -use neqo_crypto::{random, AllowZeroRtt, AuthenticationStatus, ResumptionToken}; -use test_fixture::{self, addr, fixture_init, new_neqo_qlog, now}; - -use enum_map::enum_map; - // All the tests. mod ackrate; mod cc; @@ -405,7 +404,9 @@ fn increase_cwnd( } /// Receive multiple packets and generate an ack-only packet. +/// /// # Panics +/// /// The caller is responsible for ensuring that `dest` has received /// enough data that it wants to generate an ACK. This panics if /// no ACK frame is generated. diff --git a/neqo-transport/src/connection/tests/priority.rs b/neqo-transport/src/connection/tests/priority.rs index 5fb27b3a4d..1f86aa22e5 100644 --- a/neqo-transport/src/connection/tests/priority.rs +++ b/neqo-transport/src/connection/tests/priority.rs @@ -4,6 +4,11 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +use std::{cell::RefCell, mem, rc::Rc}; + +use neqo_common::event::Provider; +use test_fixture::{self, now}; + use super::{ super::{Connection, Error, Output}, connect, default_client, default_server, fill_cwnd, maybe_authenticate, @@ -14,10 +19,6 @@ use crate::{ ConnectionEvent, StreamId, StreamType, }; -use neqo_common::event::Provider; -use std::{cell::RefCell, mem, rc::Rc}; -use test_fixture::{self, now}; - const BLOCK_SIZE: usize = 4_096; fn fill_stream(c: &mut Connection, id: StreamId) { diff --git a/neqo-transport/src/connection/tests/recovery.rs b/neqo-transport/src/connection/tests/recovery.rs index 87b2b37839..0f12d03107 100644 --- a/neqo-transport/src/connection/tests/recovery.rs +++ b/neqo-transport/src/connection/tests/recovery.rs @@ -4,6 +4,18 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +use std::{ + mem, + time::{Duration, Instant}, +}; + +use neqo_common::qdebug; +use neqo_crypto::AuthenticationStatus; +use test_fixture::{ + assertions::{assert_handshake, assert_initial}, + now, split_datagram, +}; + use super::{ super::{Connection, ConnectionParameters, Output, State}, assert_full_cwnd, connect, connect_force_idle, connect_rtt_idle, connect_with_rtt, cwnd, @@ -23,17 +35,6 @@ use crate::{ StreamType, }; -use neqo_common::qdebug; -use neqo_crypto::AuthenticationStatus; -use std::{ - mem, - time::{Duration, Instant}, -}; -use test_fixture::{ - assertions::{assert_handshake, assert_initial}, - now, split_datagram, -}; - #[test] fn pto_works_basic() { let mut client = default_client(); diff --git a/neqo-transport/src/connection/tests/resumption.rs b/neqo-transport/src/connection/tests/resumption.rs index fa56f6eae2..a8c45a9f06 100644 --- a/neqo-transport/src/connection/tests/resumption.rs +++ b/neqo-transport/src/connection/tests/resumption.rs @@ -4,18 +4,18 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +use std::{cell::RefCell, mem, rc::Rc, time::Duration}; + +use test_fixture::{self, assertions, now}; + use super::{ connect, connect_with_rtt, default_client, default_server, exchange_ticket, get_tokens, new_client, resumed_server, send_something, AT_LEAST_PTO, }; -use crate::addr_valid::{AddressValidation, ValidateAddress}; -use crate::{ConnectionParameters, Error, Version}; - -use std::cell::RefCell; -use std::mem; -use std::rc::Rc; -use std::time::Duration; -use test_fixture::{self, assertions, now}; +use crate::{ + addr_valid::{AddressValidation, ValidateAddress}, + ConnectionParameters, Error, Version, +}; #[test] fn resume() { diff --git a/neqo-transport/src/connection/tests/stream.rs b/neqo-transport/src/connection/tests/stream.rs index d83ca07b61..586a537b9d 100644 --- a/neqo-transport/src/connection/tests/stream.rs +++ b/neqo-transport/src/connection/tests/stream.rs @@ -4,6 +4,11 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +use std::{cmp::max, collections::HashMap, convert::TryFrom, mem}; + +use neqo_common::{event::Provider, qdebug}; +use test_fixture::now; + use super::{ super::State, assert_error, connect, connect_force_idle, default_client, default_server, maybe_authenticate, new_client, new_server, send_something, DEFAULT_STREAM_DATA, @@ -22,11 +27,6 @@ use crate::{ StreamId, StreamType, }; -use std::collections::HashMap; - -use neqo_common::{event::Provider, qdebug}; -use std::{cmp::max, convert::TryFrom, mem}; -use test_fixture::now; #[test] fn stream_create() { diff --git a/neqo-transport/src/connection/tests/vn.rs b/neqo-transport/src/connection/tests/vn.rs index e289bc654c..22f15c991c 100644 --- a/neqo-transport/src/connection/tests/vn.rs +++ b/neqo-transport/src/connection/tests/vn.rs @@ -4,19 +4,21 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use super::super::{ConnectionError, ConnectionEvent, Output, State, ZeroRttState}; +use std::{mem, time::Duration}; + +use neqo_common::{event::Provider, Decoder, Encoder}; +use test_fixture::{self, assertions, datagram, now}; + use super::{ + super::{ConnectionError, ConnectionEvent, Output, State, ZeroRttState}, connect, connect_fail, default_client, default_server, exchange_ticket, new_client, new_server, send_something, }; -use crate::packet::PACKET_BIT_LONG; -use crate::tparams::{self, TransportParameter}; -use crate::{ConnectionParameters, Error, Version}; - -use neqo_common::{event::Provider, Decoder, Encoder}; -use std::mem; -use std::time::Duration; -use test_fixture::{self, assertions, datagram, now}; +use crate::{ + packet::PACKET_BIT_LONG, + tparams::{self, TransportParameter}, + ConnectionParameters, Error, Version, +}; // The expected PTO duration after the first Initial is sent. const INITIAL_PTO: Duration = Duration::from_millis(300); @@ -217,8 +219,8 @@ fn compatible_upgrade() { assert_eq!(server.version(), Version::Version2); } -/// When the first packet from the client is gigantic, the server might generate acknowledgment packets in -/// version 1. Both client and server need to handle that gracefully. +/// When the first packet from the client is gigantic, the server might generate acknowledgment +/// packets in version 1. Both client and server need to handle that gracefully. #[test] fn compatible_upgrade_large_initial() { let params = ConnectionParameters::default().versions( diff --git a/neqo-transport/src/connection/tests/zerortt.rs b/neqo-transport/src/connection/tests/zerortt.rs index f896b30730..0aa5573c98 100644 --- a/neqo-transport/src/connection/tests/zerortt.rs +++ b/neqo-transport/src/connection/tests/zerortt.rs @@ -4,20 +4,18 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use super::super::Connection; -use super::{ - connect, default_client, default_server, exchange_ticket, new_server, resumed_server, - CountingConnectionIdGenerator, -}; -use crate::events::ConnectionEvent; -use crate::{ConnectionParameters, Error, StreamType, Version}; +use std::{cell::RefCell, rc::Rc}; use neqo_common::event::Provider; use neqo_crypto::{AllowZeroRtt, AntiReplay}; -use std::cell::RefCell; -use std::rc::Rc; use test_fixture::{self, assertions, now}; +use super::{ + super::Connection, connect, default_client, default_server, exchange_ticket, new_server, + resumed_server, CountingConnectionIdGenerator, +}; +use crate::{events::ConnectionEvent, ConnectionParameters, Error, StreamType, Version}; + #[test] fn zero_rtt_negotiate() { // Note that the two servers in this test will get different anti-replay filters. diff --git a/neqo-transport/src/crypto.rs b/neqo-transport/src/crypto.rs index 898eb44372..4e152db0f2 100644 --- a/neqo-transport/src/crypto.rs +++ b/neqo-transport/src/crypto.rs @@ -16,7 +16,6 @@ use std::{ }; use neqo_common::{hex, hex_snip_middle, qdebug, qinfo, qtrace, Encoder, Role}; - use neqo_crypto::{ hkdf, hp::HpKey, Aead, Agent, AntiReplay, Cipher, Epoch, Error as CryptoError, HandshakeState, PrivateKey, PublicKey, Record, RecordList, ResumptionToken, SymKey, ZeroRttChecker, @@ -1542,8 +1541,8 @@ impl CryptoStreams { } // Calculate length of data based on the minimum of: // - available data - // - remaining space, less the header, which counts only one byte - // for the length at first to avoid underestimating length + // - remaining space, less the header, which counts only one byte for the length at + // first to avoid underestimating length let length = min(data.len(), builder.remaining() - header_len); header_len += Encoder::varint_len(u64::try_from(length).unwrap()) - 1; let length = min(data.len(), builder.remaining() - header_len); diff --git a/neqo-transport/src/dump.rs b/neqo-transport/src/dump.rs index 7dac137340..5d8a72f300 100644 --- a/neqo-transport/src/dump.rs +++ b/neqo-transport/src/dump.rs @@ -7,13 +7,16 @@ // Enable just this file for logging to just see packets. // e.g. "RUST_LOG=neqo_transport::dump neqo-client ..." -use crate::connection::Connection; -use crate::frame::Frame; -use crate::packet::{PacketNumber, PacketType}; -use crate::path::PathRef; +use std::fmt::Write; + use neqo_common::{qdebug, Decoder}; -use std::fmt::Write; +use crate::{ + connection::Connection, + frame::Frame, + packet::{PacketNumber, PacketType}, + path::PathRef, +}; #[allow(clippy::module_name_repetitions)] pub fn dump_packet( diff --git a/neqo-transport/src/events.rs b/neqo-transport/src/events.rs index 93cb63a86f..88a85250ee 100644 --- a/neqo-transport/src/events.rs +++ b/neqo-transport/src/events.rs @@ -6,17 +6,18 @@ // Collecting a list of events relevant to whoever is using the Connection. -use std::cell::RefCell; -use std::collections::VecDeque; -use std::rc::Rc; - -use crate::connection::State; -use crate::quic_datagrams::DatagramTracking; -use crate::stream_id::{StreamId, StreamType}; -use crate::{AppError, Stats}; +use std::{cell::RefCell, collections::VecDeque, rc::Rc}; + use neqo_common::event::Provider as EventProvider; use neqo_crypto::ResumptionToken; +use crate::{ + connection::State, + quic_datagrams::DatagramTracking, + stream_id::{StreamId, StreamType}, + AppError, Stats, +}; + #[derive(Debug, PartialOrd, Ord, PartialEq, Eq)] pub enum OutgoingDatagramOutcome { DroppedTooBig, diff --git a/neqo-transport/src/fc.rs b/neqo-transport/src/fc.rs index 090afdc538..a219ca7e8d 100644 --- a/neqo-transport/src/fc.rs +++ b/neqo-transport/src/fc.rs @@ -7,6 +7,14 @@ // Tracks possibly-redundant flow control signals from other code and converts // into flow control frames needing to be sent to the remote. +use std::{ + convert::TryFrom, + fmt::Debug, + ops::{Deref, DerefMut, Index, IndexMut}, +}; + +use neqo_common::{qtrace, Role}; + use crate::{ frame::{ FRAME_TYPE_DATA_BLOCKED, FRAME_TYPE_MAX_DATA, FRAME_TYPE_MAX_STREAMS_BIDI, @@ -19,13 +27,6 @@ use crate::{ stream_id::{StreamId, StreamType}, Error, Res, }; -use neqo_common::{qtrace, Role}; - -use std::{ - convert::TryFrom, - fmt::Debug, - ops::{Deref, DerefMut, Index, IndexMut}, -}; #[derive(Debug)] pub struct SenderFlowControl @@ -575,6 +576,8 @@ impl IndexMut for LocalStreamLimits { #[cfg(test)] mod test { + use neqo_common::{Encoder, Role}; + use super::{LocalStreamLimits, ReceiverFlowControl, RemoteStreamLimits, SenderFlowControl}; use crate::{ packet::PacketBuilder, @@ -582,7 +585,6 @@ mod test { stream_id::{StreamId, StreamType}, Error, }; - use neqo_common::{Encoder, Role}; #[test] fn blocked_at_zero() { diff --git a/neqo-transport/src/frame.rs b/neqo-transport/src/frame.rs index 8081baef6c..f3d567ac7c 100644 --- a/neqo-transport/src/frame.rs +++ b/neqo-transport/src/frame.rs @@ -6,15 +6,16 @@ // Directly relating to QUIC frames. -use neqo_common::{qtrace, Decoder}; +use std::{convert::TryFrom, ops::RangeInclusive}; -use crate::cid::MAX_CONNECTION_ID_LEN; -use crate::packet::PacketType; -use crate::stream_id::{StreamId, StreamType}; -use crate::{AppError, ConnectionError, Error, Res, TransportError}; +use neqo_common::{qtrace, Decoder}; -use std::convert::TryFrom; -use std::ops::RangeInclusive; +use crate::{ + cid::MAX_CONNECTION_ID_LEN, + packet::PacketType, + stream_id::{StreamId, StreamType}, + AppError, ConnectionError, Error, Res, TransportError, +}; #[allow(clippy::module_name_repetitions)] pub type FrameType = u64; @@ -612,9 +613,10 @@ impl<'a> Frame<'a> { #[cfg(test)] mod tests { - use super::*; use neqo_common::{Decoder, Encoder}; + use super::*; + fn just_dec(f: &Frame, s: &str) { let encoded = Encoder::from_hex(s); let decoded = Frame::decode(&mut encoded.as_decoder()).unwrap(); diff --git a/neqo-transport/src/lib.rs b/neqo-transport/src/lib.rs index 35bdd7d34a..d10ea7e9e6 100644 --- a/neqo-transport/src/lib.rs +++ b/neqo-transport/src/lib.rs @@ -51,16 +51,13 @@ pub use self::{ events::{ConnectionEvent, ConnectionEvents}, frame::CloseError, quic_datagrams::DatagramTracking, + recv_stream::{RecvStreamStats, RECV_BUFFER_SIZE}, + send_stream::{SendStreamStats, SEND_BUFFER_SIZE}, stats::Stats, stream_id::{StreamId, StreamType}, version::Version, }; -pub use self::{ - recv_stream::{RecvStreamStats, RECV_BUFFER_SIZE}, - send_stream::{SendStreamStats, SEND_BUFFER_SIZE}, -}; - pub type TransportError = u64; const ERROR_APPLICATION_CLOSE: TransportError = 12; const ERROR_CRYPTO_BUFFER_EXCEEDED: TransportError = 13; diff --git a/neqo-transport/src/pace.rs b/neqo-transport/src/pace.rs index f1cec80ac6..e5214c1bc8 100644 --- a/neqo-transport/src/pace.rs +++ b/neqo-transport/src/pace.rs @@ -7,12 +7,14 @@ // Pacer #![deny(clippy::pedantic)] -use neqo_common::qtrace; +use std::{ + cmp::min, + convert::TryFrom, + fmt::{Debug, Display}, + time::{Duration, Instant}, +}; -use std::cmp::min; -use std::convert::TryFrom; -use std::fmt::{Debug, Display}; -use std::time::{Duration, Instant}; +use neqo_common::qtrace; /// This value determines how much faster the pacer operates than the /// congestion window. @@ -123,10 +125,12 @@ impl Debug for Pacer { #[cfg(test)] mod tests { - use super::Pacer; use std::time::Duration; + use test_fixture::now; + use super::Pacer; + const RTT: Duration = Duration::from_millis(1000); const PACKET: usize = 1000; const CWND: usize = PACKET * 10; diff --git a/neqo-transport/src/packet/mod.rs b/neqo-transport/src/packet/mod.rs index 080cf6649a..0968bb9ae2 100644 --- a/neqo-transport/src/packet/mod.rs +++ b/neqo-transport/src/packet/mod.rs @@ -5,16 +5,6 @@ // except according to those terms. // Encoding and decoding packets off the wire. -use crate::{ - cid::{ConnectionId, ConnectionIdDecoder, ConnectionIdRef, MAX_CONNECTION_ID_LEN}, - crypto::{CryptoDxState, CryptoSpace, CryptoStates}, - version::{Version, WireVersion}, - Error, Res, -}; - -use neqo_common::{hex, hex_with_len, qtrace, qwarn, Decoder, Encoder}; -use neqo_crypto::random; - use std::{ cmp::min, convert::TryFrom, @@ -24,6 +14,16 @@ use std::{ time::Instant, }; +use neqo_common::{hex, hex_with_len, qtrace, qwarn, Decoder, Encoder}; +use neqo_crypto::random; + +use crate::{ + cid::{ConnectionId, ConnectionIdDecoder, ConnectionIdRef, MAX_CONNECTION_ID_LEN}, + crypto::{CryptoDxState, CryptoSpace, CryptoStates}, + version::{Version, WireVersion}, + Error, Res, +}; + pub const PACKET_BIT_LONG: u8 = 0x80; const PACKET_BIT_SHORT: u8 = 0x00; const PACKET_BIT_FIXED_QUIC: u8 = 0x40; @@ -501,8 +501,8 @@ pub struct PublicPacket<'a> { dcid: ConnectionIdRef<'a>, /// The source connection ID, if this is a long header packet. scid: Option>, - /// Any token that is included in the packet (Retry always has a token; Initial sometimes does). - /// This is empty when there is no token. + /// Any token that is included in the packet (Retry always has a token; Initial sometimes + /// does). This is empty when there is no token. token: &'a [u8], /// The size of the header, not including the packet number. header_len: usize, @@ -865,13 +865,14 @@ impl Deref for DecryptedPacket { #[cfg(all(test, not(feature = "fuzzing")))] mod tests { + use neqo_common::Encoder; + use test_fixture::{fixture_init, now}; + use super::*; use crate::{ crypto::{CryptoDxState, CryptoStates}, EmptyConnectionIdGenerator, RandomConnectionIdGenerator, Version, }; - use neqo_common::Encoder; - use test_fixture::{fixture_init, now}; const CLIENT_CID: &[u8] = &[0x83, 0x94, 0xc8, 0xf0, 0x3e, 0x51, 0x57, 0x08]; const SERVER_CID: &[u8] = &[0xf0, 0x67, 0xa5, 0x50, 0x2a, 0x42, 0x62, 0xb5]; @@ -1023,7 +1024,8 @@ mod tests { assert_eq!(&decrypted[..], SAMPLE_SHORT_PAYLOAD); } - /// By telling the decoder that the connection ID is shorter than it really is, we get a decryption error. + /// By telling the decoder that the connection ID is shorter than it really is, we get a + /// decryption error. #[test] fn decode_short_bad_cid() { fixture_init(); diff --git a/neqo-transport/src/packet/retry.rs b/neqo-transport/src/packet/retry.rs index e9a7e90ab9..a1333a0150 100644 --- a/neqo-transport/src/packet/retry.rs +++ b/neqo-transport/src/packet/retry.rs @@ -6,13 +6,12 @@ #![deny(clippy::pedantic)] -use crate::version::Version; -use crate::{Error, Res}; +use std::cell::RefCell; use neqo_common::qerror; use neqo_crypto::{hkdf, Aead, TLS_AES_128_GCM_SHA256, TLS_VERSION_1_3}; -use std::cell::RefCell; +use crate::{version::Version, Error, Res}; /// The AEAD used for Retry is fixed, so use thread local storage. fn make_aead(version: Version) -> Aead { diff --git a/neqo-transport/src/path.rs b/neqo-transport/src/path.rs index 2ab90c169c..06cc8a6a61 100644 --- a/neqo-transport/src/path.rs +++ b/neqo-transport/src/path.rs @@ -17,6 +17,9 @@ use std::{ time::{Duration, Instant}, }; +use neqo_common::{hex, qdebug, qinfo, qlog::NeqoQlog, qtrace, Datagram, Encoder, IpTos}; +use neqo_crypto::random; + use crate::{ ackrate::{AckRate, PeerAckDelay}, cc::CongestionControlAlgorithm, @@ -31,9 +34,6 @@ use crate::{ Error, Res, Stats, }; -use neqo_common::{hex, qdebug, qinfo, qlog::NeqoQlog, qtrace, Datagram, Encoder, IpTos}; -use neqo_crypto::random; - /// This is the MTU that we assume when using IPv6. /// We use this size for Initial packets, so we don't need to worry about probing for support. /// If the path doesn't support this MTU, we will assume that it doesn't support QUIC. @@ -498,7 +498,7 @@ enum ProbeState { } impl ProbeState { - /// Determine whether the current state requires probing. + /// Determine whether the current state requires probing. fn probe_needed(&self) -> bool { matches!(self, Self::ProbeNeeded { .. }) } @@ -1008,7 +1008,8 @@ impl Path { .map_or(usize::MAX, |limit| { let budget = if limit == 0 { // If we have received absolutely nothing thus far, then this endpoint - // is the one initiating communication on this path. Allow enough space for probing. + // is the one initiating communication on this path. Allow enough space for + // probing. self.mtu() * 5 } else { limit diff --git a/neqo-transport/src/qlog.rs b/neqo-transport/src/qlog.rs index 1639da6e74..434395fd23 100644 --- a/neqo-transport/src/qlog.rs +++ b/neqo-transport/src/qlog.rs @@ -13,6 +13,7 @@ use std::{ time::Duration, }; +use neqo_common::{hex, qinfo, qlog::NeqoQlog, Decoder}; use qlog::events::{ connectivity::{ConnectionStarted, ConnectionState, ConnectionStateUpdated}, quic::{ @@ -21,8 +22,6 @@ use qlog::events::{ }, EventData, RawInfo, }; - -use neqo_common::{hex, qinfo, qlog::NeqoQlog, Decoder}; use smallvec::SmallVec; use crate::{ diff --git a/neqo-transport/src/quic_datagrams.rs b/neqo-transport/src/quic_datagrams.rs index e9c4497cde..07f3594768 100644 --- a/neqo-transport/src/quic_datagrams.rs +++ b/neqo-transport/src/quic_datagrams.rs @@ -6,14 +6,17 @@ // https://datatracker.ietf.org/doc/html/draft-ietf-quic-datagram -use crate::frame::{FRAME_TYPE_DATAGRAM, FRAME_TYPE_DATAGRAM_WITH_LEN}; -use crate::packet::PacketBuilder; -use crate::recovery::RecoveryToken; -use crate::{events::OutgoingDatagramOutcome, ConnectionEvents, Error, Res, Stats}; +use std::{cmp::min, collections::VecDeque, convert::TryFrom}; + use neqo_common::Encoder; -use std::cmp::min; -use std::collections::VecDeque; -use std::convert::TryFrom; + +use crate::{ + events::OutgoingDatagramOutcome, + frame::{FRAME_TYPE_DATAGRAM, FRAME_TYPE_DATAGRAM_WITH_LEN}, + packet::PacketBuilder, + recovery::RecoveryToken, + ConnectionEvents, Error, Res, Stats, +}; pub const MAX_QUIC_DATAGRAM: u64 = 65535; @@ -140,7 +143,9 @@ impl QuicDatagrams { } /// Returns true if there was an unsent datagram that has been dismissed. + /// /// # Error + /// /// The function returns `TooMuchData` if the supply buffer is bigger than /// the allowed remote datagram size. The funcion does not check if the /// datagram can fit into a packet (i.e. MTU limit). This is checked during diff --git a/neqo-transport/src/recovery.rs b/neqo-transport/src/recovery.rs index a640b75371..d90989b486 100644 --- a/neqo-transport/src/recovery.rs +++ b/neqo-transport/src/recovery.rs @@ -17,9 +17,8 @@ use std::{ time::{Duration, Instant}, }; -use smallvec::{smallvec, SmallVec}; - use neqo_common::{qdebug, qinfo, qlog::NeqoQlog, qtrace, qwarn}; +use smallvec::{smallvec, SmallVec}; use crate::{ ackrate::AckRate, @@ -464,7 +463,9 @@ impl LossRecoverySpaces { /// Drop a packet number space and return all the packets that were /// outstanding, so that those can be marked as lost. + /// /// # Panics + /// /// If the space has already been removed. pub fn drop_space(&mut self, space: PacketNumberSpace) -> impl IntoIterator { let sp = match space { @@ -526,9 +527,9 @@ impl PtoState { /// And the number to declare lost when the PTO timer is hit. fn pto_packet_count(space: PacketNumberSpace, rx_count: usize) -> usize { if space == PacketNumberSpace::Initial && rx_count == 0 { - // For the Initial space, we only send one packet on PTO if we have not received any packets - // from the peer yet. This avoids sending useless PING-only packets when the Client Initial - // is deemed lost. + // For the Initial space, we only send one packet on PTO if we have not received any + // packets from the peer yet. This avoids sending useless PING-only packets + // when the Client Initial is deemed lost. 1 } else { MAX_PTO_PACKET_COUNT @@ -1017,6 +1018,17 @@ impl ::std::fmt::Display for LossRecovery { #[cfg(test)] mod tests { + use std::{ + cell::RefCell, + convert::TryInto, + ops::{Deref, DerefMut, RangeInclusive}, + rc::Rc, + time::{Duration, Instant}, + }; + + use neqo_common::qlog::NeqoQlog; + use test_fixture::{addr, now}; + use super::{ LossRecovery, LossRecoverySpace, PacketNumberSpace, SendProfile, SentPacket, FAST_PTO_SCALE, }; @@ -1028,15 +1040,6 @@ mod tests { rtt::RttEstimate, stats::{Stats, StatsCell}, }; - use neqo_common::qlog::NeqoQlog; - use std::{ - cell::RefCell, - convert::TryInto, - ops::{Deref, DerefMut, RangeInclusive}, - rc::Rc, - time::{Duration, Instant}, - }; - use test_fixture::{addr, now}; // Shorthand for a time in milliseconds. const fn ms(t: u64) -> Duration { diff --git a/neqo-transport/src/recv_stream.rs b/neqo-transport/src/recv_stream.rs index 04db42d36b..0b2863c425 100644 --- a/neqo-transport/src/recv_stream.rs +++ b/neqo-transport/src/recv_stream.rs @@ -8,6 +8,7 @@ // incoming STREAM frames. use std::{ + cell::RefCell, cmp::max, collections::BTreeMap, convert::TryFrom, @@ -15,6 +16,7 @@ use std::{ rc::{Rc, Weak}, }; +use neqo_common::{qtrace, Role}; use smallvec::SmallVec; use crate::{ @@ -28,8 +30,6 @@ use crate::{ stream_id::StreamId, AppError, Error, Res, }; -use neqo_common::{qtrace, Role}; -use std::cell::RefCell; const RX_STREAM_DATA_WINDOW: u64 = 0x10_0000; // 1MiB @@ -768,6 +768,7 @@ impl RecvStream { } /// # Errors + /// /// `NoMoreData` if data and fin bit were previously read by the application. pub fn read(&mut self, buf: &mut [u8]) -> Res<(usize, bool)> { let data_recvd_state = matches!(self.state, RecvStreamState::DataRecvd { .. }); @@ -965,10 +966,12 @@ impl RecvStream { #[cfg(test)] mod tests { - use super::*; - use neqo_common::Encoder; use std::ops::Range; + use neqo_common::Encoder; + + use super::*; + const SESSION_WINDOW: usize = 1024; fn recv_ranges(ranges: &[Range], available: usize) { diff --git a/neqo-transport/src/rtt.rs b/neqo-transport/src/rtt.rs index a5ceb37da2..4b05198bc9 100644 --- a/neqo-transport/src/rtt.rs +++ b/neqo-transport/src/rtt.rs @@ -8,17 +8,21 @@ #![deny(clippy::pedantic)] -use std::cmp::{max, min}; -use std::time::{Duration, Instant}; +use std::{ + cmp::{max, min}, + time::{Duration, Instant}, +}; use neqo_common::{qlog::NeqoQlog, qtrace}; -use crate::ackrate::{AckRate, PeerAckDelay}; -use crate::packet::PacketBuilder; -use crate::qlog::{self, QlogMetric}; -use crate::recovery::RecoveryToken; -use crate::stats::FrameStats; -use crate::tracking::PacketNumberSpace; +use crate::{ + ackrate::{AckRate, PeerAckDelay}, + packet::PacketBuilder, + qlog::{self, QlogMetric}, + recovery::RecoveryToken, + stats::FrameStats, + tracking::PacketNumberSpace, +}; /// The smallest time that the system timer (via `sleep()`, `nanosleep()`, /// `select()`, or similar) can reliably deliver; see `neqo_common::hrtime`. diff --git a/neqo-transport/src/send_stream.rs b/neqo-transport/src/send_stream.rs index e171dfab83..5feb785ac6 100644 --- a/neqo-transport/src/send_stream.rs +++ b/neqo-transport/src/send_stream.rs @@ -11,16 +11,15 @@ use std::{ cmp::{max, min, Ordering}, collections::{BTreeMap, VecDeque}, convert::TryFrom, + hash::{Hash, Hasher}, mem, ops::Add, rc::Rc, }; use indexmap::IndexMap; -use smallvec::SmallVec; -use std::hash::{Hash, Hasher}; - use neqo_common::{qdebug, qerror, qinfo, qtrace, Encoder, Role}; +use smallvec::SmallVec; use crate::{ events::ConnectionEvents, @@ -1280,7 +1279,8 @@ pub struct OrderGroupIter<'a> { // We store the next position in the OrderGroup. // Otherwise we'd need an explicit "done iterating" call to be made, or implement Drop to // copy the value back. - // This is where next was when we iterated for the first time; when we get back to that we stop. + // This is where next was when we iterated for the first time; when we get back to that we + // stop. started_at: Option, } @@ -1321,7 +1321,10 @@ impl OrderGroup { pub fn insert(&mut self, stream_id: StreamId) { match self.vec.binary_search(&stream_id) { - Ok(_) => panic!("Duplicate stream_id {}", stream_id), // element already in vector @ `pos` + Ok(_) => { + // element already in vector @ `pos` + panic!("Duplicate stream_id {}", stream_id) + } Err(pos) => self.vec.insert(pos, stream_id), } } @@ -1331,7 +1334,10 @@ impl OrderGroup { Ok(pos) => { self.vec.remove(pos); } - Err(_) => panic!("Missing stream_id {}", stream_id), // element already in vector @ `pos` + Err(_) => { + // element already in vector @ `pos` + panic!("Missing stream_id {}", stream_id) + } } } } @@ -1634,10 +1640,10 @@ pub struct SendStreamRecoveryToken { #[cfg(test)] mod tests { - use super::*; + use neqo_common::{event::Provider, hex_with_len, qtrace}; + use super::*; use crate::events::ConnectionEvent; - use neqo_common::{event::Provider, hex_with_len, qtrace}; fn connection_fc(limit: u64) -> Rc>> { Rc::new(RefCell::new(SenderFlowControl::new((), limit))) diff --git a/neqo-transport/src/sender.rs b/neqo-transport/src/sender.rs index 0c1e66ff9a..9a00dfc7a7 100644 --- a/neqo-transport/src/sender.rs +++ b/neqo-transport/src/sender.rs @@ -8,16 +8,19 @@ #![deny(clippy::pedantic)] #![allow(clippy::module_name_repetitions)] -use crate::cc::{ - ClassicCongestionControl, CongestionControl, CongestionControlAlgorithm, Cubic, NewReno, +use std::{ + fmt::{self, Debug, Display}, + time::{Duration, Instant}, }; -use crate::pace::Pacer; -use crate::rtt::RttEstimate; -use crate::tracking::SentPacket; + use neqo_common::qlog::NeqoQlog; -use std::fmt::{self, Debug, Display}; -use std::time::{Duration, Instant}; +use crate::{ + cc::{ClassicCongestionControl, CongestionControl, CongestionControlAlgorithm, Cubic, NewReno}, + pace::Pacer, + rtt::RttEstimate, + tracking::SentPacket, +}; /// The number of packets we allow to burst from the pacer. pub const PACING_BURST_SIZE: usize = 2; diff --git a/neqo-transport/src/server.rs b/neqo-transport/src/server.rs index 288ec1a605..12a7d2f9e0 100644 --- a/neqo-transport/src/server.rs +++ b/neqo-transport/src/server.rs @@ -6,6 +6,18 @@ // This file implements a server that can handle multiple connections. +use std::{ + cell::RefCell, + collections::{HashMap, HashSet, VecDeque}, + fs::OpenOptions, + mem, + net::SocketAddr, + ops::{Deref, DerefMut}, + path::PathBuf, + rc::{Rc, Weak}, + time::{Duration, Instant}, +}; + use neqo_common::{ self as common, event::Provider, hex, qdebug, qerror, qinfo, qlog::NeqoQlog, qtrace, qwarn, timer::Timer, Datagram, Decoder, Role, @@ -25,18 +37,6 @@ use crate::{ ConnectionParameters, Res, Version, }; -use std::{ - cell::RefCell, - collections::{HashMap, HashSet, VecDeque}, - fs::OpenOptions, - mem, - net::SocketAddr, - ops::{Deref, DerefMut}, - path::PathBuf, - rc::{Rc, Weak}, - time::{Duration, Instant}, -}; - pub enum InitialResult { Accept, Drop, @@ -190,11 +190,11 @@ impl Server { /// * `certs` is a list of the certificates that should be configured. /// * `protocols` is the preference list of ALPN values. /// * `anti_replay` is an anti-replay context. - /// * `zero_rtt_checker` determines whether 0-RTT should be accepted. This - /// will be passed the value of the `extra` argument that was passed to - /// `Connection::send_ticket` to see if it is OK. - /// * `cid_generator` is responsible for generating connection IDs and parsing them; - /// connection IDs produced by the manager cannot be zero-length. + /// * `zero_rtt_checker` determines whether 0-RTT should be accepted. This will be passed the + /// value of the `extra` argument that was passed to `Connection::send_ticket` to see if it is + /// OK. + /// * `cid_generator` is responsible for generating connection IDs and parsing them; connection + /// IDs produced by the manager cannot be zero-length. pub fn new( now: Instant, certs: &[impl AsRef], @@ -615,7 +615,8 @@ impl Server { qdebug!([self], "Drop initial: too short"); return None; } - // Copy values from `packet` because they are currently still borrowing from `dgram`. + // Copy values from `packet` because they are currently still borrowing from + // `dgram`. let initial = InitialDetails::new(&packet); self.handle_initial(initial, dgram, now) } diff --git a/neqo-transport/src/stats.rs b/neqo-transport/src/stats.rs index 9e956eb02e..d6c7a911f9 100644 --- a/neqo-transport/src/stats.rs +++ b/neqo-transport/src/stats.rs @@ -7,8 +7,6 @@ // Tracking of some useful statistics. #![deny(clippy::pedantic)] -use crate::packet::PacketNumber; -use neqo_common::qinfo; use std::{ cell::RefCell, fmt::{self, Debug}, @@ -17,6 +15,10 @@ use std::{ time::Duration, }; +use neqo_common::qinfo; + +use crate::packet::PacketNumber; + pub(crate) const MAX_PTO_COUNTS: usize = 16; #[derive(Default, Clone)] @@ -176,6 +178,7 @@ impl Stats { } /// # Panics + /// /// When preconditions are violated. pub fn add_pto_count(&mut self, count: usize) { debug_assert!(count > 0); diff --git a/neqo-transport/src/stream_id.rs b/neqo-transport/src/stream_id.rs index 51df2ca9fb..f3b07b86a8 100644 --- a/neqo-transport/src/stream_id.rs +++ b/neqo-transport/src/stream_id.rs @@ -133,9 +133,10 @@ impl ::std::fmt::Display for StreamId { #[cfg(test)] mod test { - use super::StreamId; use neqo_common::Role; + use super::StreamId; + #[test] fn bidi_stream_properties() { let id1 = StreamId::from(16); diff --git a/neqo-transport/src/streams.rs b/neqo-transport/src/streams.rs index 507cfbc214..7cbb29ce02 100644 --- a/neqo-transport/src/streams.rs +++ b/neqo-transport/src/streams.rs @@ -5,6 +5,10 @@ // except according to those terms. // Stream management for a connection. +use std::{cell::RefCell, cmp::Ordering, rc::Rc}; + +use neqo_common::{qtrace, qwarn, Role}; + use crate::{ fc::{LocalStreamLimits, ReceiverFlowControl, RemoteStreamLimits, SenderFlowControl}, frame::Frame, @@ -17,9 +21,6 @@ use crate::{ tparams::{self, TransportParametersHandler}, ConnectionEvents, Error, Res, }; -use neqo_common::{qtrace, qwarn, Role}; -use std::cmp::Ordering; -use std::{cell::RefCell, rc::Rc}; pub type SendOrder = i64; @@ -438,9 +439,10 @@ impl Streams { if st == StreamType::BiDi { // From the local perspective, this is a local- originated BiDi stream. From the - // remote perspective, this is a remote-originated BiDi stream. Therefore, look at - // the local transport parameters for the INITIAL_MAX_STREAM_DATA_BIDI_LOCAL value - // to decide how much this endpoint will allow its peer to send. + // remote perspective, this is a remote-originated BiDi stream. Therefore, look + // at the local transport parameters for the + // INITIAL_MAX_STREAM_DATA_BIDI_LOCAL value to decide how + // much this endpoint will allow its peer to send. let recv_initial_max_stream_data = self .tps .borrow() diff --git a/neqo-transport/src/tparams.rs b/neqo-transport/src/tparams.rs index ea5f78fc36..1297829094 100644 --- a/neqo-transport/src/tparams.rs +++ b/neqo-transport/src/tparams.rs @@ -6,10 +6,12 @@ // Transport parameters. See -transport section 7.3. -use crate::{ - cid::{ConnectionId, ConnectionIdEntry, CONNECTION_ID_SEQNO_PREFERRED, MAX_CONNECTION_ID_LEN}, - version::{Version, VersionConfig, WireVersion}, - Error, Res, +use std::{ + cell::RefCell, + collections::HashMap, + convert::TryFrom, + net::{Ipv4Addr, Ipv6Addr, SocketAddrV4, SocketAddrV6}, + rc::Rc, }; use neqo_common::{hex, qdebug, qinfo, qtrace, Decoder, Encoder, Role}; @@ -19,12 +21,10 @@ use neqo_crypto::{ random, HandshakeMessage, ZeroRttCheckResult, ZeroRttChecker, }; -use std::{ - cell::RefCell, - collections::HashMap, - convert::TryFrom, - net::{Ipv4Addr, Ipv6Addr, SocketAddrV4, SocketAddrV6}, - rc::Rc, +use crate::{ + cid::{ConnectionId, ConnectionIdEntry, CONNECTION_ID_SEQNO_PREFERRED, MAX_CONNECTION_ID_LEN}, + version::{Version, VersionConfig, WireVersion}, + Error, Res, }; pub type TransportParameterId = u64; @@ -71,6 +71,7 @@ impl PreferredAddress { /// Make a new preferred address configuration. /// /// # Panics + /// /// If neither address is provided, or if either address is of the wrong type. #[must_use] pub fn new(v4: Option, v6: Option) -> Self { @@ -1023,7 +1024,8 @@ mod tests { fn active_connection_id_limit_min_2() { let mut tps = TransportParameters::default(); - // Intentionally set an invalid value for the ACTIVE_CONNECTION_ID_LIMIT transport parameter. + // Intentionally set an invalid value for the ACTIVE_CONNECTION_ID_LIMIT transport + // parameter. tps.params .insert(ACTIVE_CONNECTION_ID_LIMIT, TransportParameter::Integer(1)); diff --git a/neqo-transport/src/tracking.rs b/neqo-transport/src/tracking.rs index 32f1c8d1b7..62e7398ede 100644 --- a/neqo-transport/src/tracking.rs +++ b/neqo-transport/src/tracking.rs @@ -18,6 +18,7 @@ use std::{ use neqo_common::{qdebug, qinfo, qtrace, qwarn}; use neqo_crypto::{Epoch, TLS_EPOCH_HANDSHAKE, TLS_EPOCH_INITIAL}; +use smallvec::{smallvec, SmallVec}; use crate::{ packet::{PacketBuilder, PacketNumber, PacketType}, @@ -26,8 +27,6 @@ use crate::{ Error, Res, }; -use smallvec::{smallvec, SmallVec}; - // TODO(mt) look at enabling EnumMap for this: https://stackoverflow.com/a/44905797/1375574 #[derive(Clone, Copy, Debug, PartialEq, PartialOrd, Ord, Eq)] pub enum PacketNumberSpace { @@ -750,6 +749,11 @@ impl Default for AckTracker { #[cfg(test)] mod tests { + use std::collections::HashSet; + + use lazy_static::lazy_static; + use neqo_common::Encoder; + use super::{ AckTracker, Duration, Instant, PacketNumberSpace, PacketNumberSpaceSet, RecoveryToken, RecvdPackets, MAX_TRACKED_RANGES, @@ -759,9 +763,6 @@ mod tests { packet::{PacketBuilder, PacketNumber}, stats::FrameStats, }; - use lazy_static::lazy_static; - use neqo_common::Encoder; - use std::collections::HashSet; const RTT: Duration = Duration::from_millis(100); lazy_static! { diff --git a/neqo-transport/src/version.rs b/neqo-transport/src/version.rs index 4cb9b964ce..13db0bf024 100644 --- a/neqo-transport/src/version.rs +++ b/neqo-transport/src/version.rs @@ -4,10 +4,12 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use crate::{Error, Res}; -use neqo_common::qdebug; use std::convert::TryFrom; +use neqo_common::qdebug; + +use crate::{Error, Res}; + pub type WireVersion = u32; #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] diff --git a/neqo-transport/tests/common/mod.rs b/neqo-transport/tests/common/mod.rs index 1a414df5b0..a43f91e3fe 100644 --- a/neqo-transport/tests/common/mod.rs +++ b/neqo-transport/tests/common/mod.rs @@ -8,6 +8,8 @@ #![warn(clippy::pedantic)] #![allow(unused)] +use std::{cell::RefCell, convert::TryFrom, mem, ops::Range, rc::Rc}; + use neqo_common::{event::Provider, hex_with_len, qtrace, Datagram, Decoder, Role}; use neqo_crypto::{ constants::{TLS_AES_128_GCM_SHA256, TLS_VERSION_1_3}, @@ -21,12 +23,6 @@ use neqo_transport::{ }; use test_fixture::{self, default_client, now, CountingConnectionIdGenerator}; -use std::cell::RefCell; -use std::convert::TryFrom; -use std::mem; -use std::ops::Range; -use std::rc::Rc; - /// Create a server. This is different than the one in the fixture, which is a single connection. pub fn new_server(params: ConnectionParameters) -> Server { Server::new( diff --git a/neqo-transport/tests/conn_vectors.rs b/neqo-transport/tests/conn_vectors.rs index 7597c81621..91dbbf31cc 100644 --- a/neqo-transport/tests/conn_vectors.rs +++ b/neqo-transport/tests/conn_vectors.rs @@ -8,14 +8,13 @@ #![deny(clippy::pedantic)] #![cfg(not(feature = "fuzzing"))] +use std::{cell::RefCell, rc::Rc}; + use neqo_transport::{ Connection, ConnectionParameters, RandomConnectionIdGenerator, State, Version, }; use test_fixture::{self, datagram, now}; -use std::cell::RefCell; -use std::rc::Rc; - const INITIAL_PACKET_V2: &[u8] = &[ 0xd7, 0x6b, 0x33, 0x43, 0xcf, 0x08, 0x83, 0x94, 0xc8, 0xf0, 0x3e, 0x51, 0x57, 0x08, 0x00, 0x00, 0x44, 0x9e, 0xa0, 0xc9, 0x5e, 0x82, 0xff, 0xe6, 0x7b, 0x6a, 0xbc, 0xdb, 0x42, 0x98, 0xb4, 0x85, diff --git a/neqo-transport/tests/connection.rs b/neqo-transport/tests/connection.rs index 661909fd22..4cbf57f405 100644 --- a/neqo-transport/tests/connection.rs +++ b/neqo-transport/tests/connection.rs @@ -9,12 +9,13 @@ mod common; +use std::convert::TryFrom; + use common::{ apply_header_protection, decode_initial_header, initial_aead_and_hp, remove_header_protection, }; use neqo_common::{Datagram, Decoder, Encoder, Role}; use neqo_transport::{ConnectionError, ConnectionParameters, Error, State, Version}; -use std::convert::TryFrom; use test_fixture::{self, default_client, default_server, new_client, now, split_datagram}; #[test] diff --git a/neqo-transport/tests/network.rs b/neqo-transport/tests/network.rs index e2389090a7..5d67ca7938 100644 --- a/neqo-transport/tests/network.rs +++ b/neqo-transport/tests/network.rs @@ -9,14 +9,14 @@ mod sim; +use std::{ops::Range, time::Duration}; + use neqo_transport::{ConnectionError, ConnectionParameters, Error, State}; use sim::{ connection::{ConnectionNode, ReachState, ReceiveData, SendData}, network::{Delay, Drop, TailDrop}, Simulator, }; -use std::ops::Range; -use std::time::Duration; /// The amount of transfer. Much more than this takes a surprising amount of time. const TRANSFER_AMOUNT: usize = 1 << 20; // 1M diff --git a/neqo-transport/tests/retry.rs b/neqo-transport/tests/retry.rs index eb20b8144a..93759c7df9 100644 --- a/neqo-transport/tests/retry.rs +++ b/neqo-transport/tests/retry.rs @@ -10,6 +10,13 @@ mod common; +use std::{ + convert::TryFrom, + mem, + net::{IpAddr, Ipv4Addr, SocketAddr}, + time::Duration, +}; + use common::{ apply_header_protection, connected_server, decode_initial_header, default_server, generate_ticket, initial_aead_and_hp, remove_header_protection, @@ -17,10 +24,6 @@ use common::{ use neqo_common::{hex_with_len, qdebug, qtrace, Datagram, Encoder, Role}; use neqo_crypto::AuthenticationStatus; use neqo_transport::{server::ValidateAddress, ConnectionError, Error, State, StreamType}; -use std::convert::TryFrom; -use std::mem; -use std::net::{IpAddr, Ipv4Addr, SocketAddr}; -use std::time::Duration; use test_fixture::{self, assertions, datagram, default_client, now, split_datagram}; #[test] diff --git a/neqo-transport/tests/server.rs b/neqo-transport/tests/server.rs index 2f1ee3b493..d6c9c2df95 100644 --- a/neqo-transport/tests/server.rs +++ b/neqo-transport/tests/server.rs @@ -9,11 +9,12 @@ mod common; +use std::{cell::RefCell, convert::TryFrom, mem, net::SocketAddr, rc::Rc, time::Duration}; + use common::{ apply_header_protection, connect, connected_server, decode_initial_header, default_server, find_ticket, generate_ticket, initial_aead_and_hp, new_server, remove_header_protection, }; - use neqo_common::{qtrace, Datagram, Decoder, Encoder, Role}; use neqo_crypto::{ generate_ech_keys, AllowZeroRtt, AuthenticationStatus, ZeroRttCheckResult, ZeroRttChecker, @@ -27,12 +28,12 @@ use test_fixture::{ CountingConnectionIdGenerator, }; -use std::{cell::RefCell, convert::TryFrom, mem, net::SocketAddr, rc::Rc, time::Duration}; - /// Take a pair of connections in any state and complete the handshake. /// The `datagram` argument is a packet that was received from the server. /// See `connect` for what this returns. +/// /// # Panics +/// /// Only when the connection fails. pub fn complete_connection( client: &mut Connection, diff --git a/neqo-transport/tests/sim/connection.rs b/neqo-transport/tests/sim/connection.rs index b624c119bd..45a5234512 100644 --- a/neqo-transport/tests/sim/connection.rs +++ b/neqo-transport/tests/sim/connection.rs @@ -6,18 +6,20 @@ #![allow(clippy::module_name_repetitions)] -use super::{Node, Rng}; -use neqo_common::{event::Provider, qdebug, qtrace, Datagram}; -use neqo_crypto::AuthenticationStatus; -use neqo_transport::{ - Connection, ConnectionEvent, ConnectionParameters, Output, State, StreamId, StreamType, -}; use std::{ cmp::min, fmt::{self, Debug}, time::Instant, }; +use neqo_common::{event::Provider, qdebug, qtrace, Datagram}; +use neqo_crypto::AuthenticationStatus; +use neqo_transport::{ + Connection, ConnectionEvent, ConnectionParameters, Output, State, StreamId, StreamType, +}; + +use super::{Node, Rng}; + /// The status of the processing of an event. #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum GoalStatus { diff --git a/neqo-transport/tests/sim/delay.rs b/neqo-transport/tests/sim/delay.rs index 95188c0562..34cb923084 100644 --- a/neqo-transport/tests/sim/delay.rs +++ b/neqo-transport/tests/sim/delay.rs @@ -6,14 +6,18 @@ #![allow(clippy::module_name_repetitions)] -use super::{Node, Rng}; +use std::{ + collections::BTreeMap, + convert::TryFrom, + fmt::{self, Debug}, + ops::Range, + time::{Duration, Instant}, +}; + use neqo_common::Datagram; use neqo_transport::Output; -use std::collections::BTreeMap; -use std::convert::TryFrom; -use std::fmt::{self, Debug}; -use std::ops::Range; -use std::time::{Duration, Instant}; + +use super::{Node, Rng}; /// An iterator that shares a `Random` instance and produces uniformly /// random `Duration`s within a specified range. diff --git a/neqo-transport/tests/sim/drop.rs b/neqo-transport/tests/sim/drop.rs index d42913d99d..629fbf48d3 100644 --- a/neqo-transport/tests/sim/drop.rs +++ b/neqo-transport/tests/sim/drop.rs @@ -6,11 +6,15 @@ #![allow(clippy::module_name_repetitions)] -use super::{Node, Rng}; +use std::{ + fmt::{self, Debug}, + time::Instant, +}; + use neqo_common::{qtrace, Datagram}; use neqo_transport::Output; -use std::fmt::{self, Debug}; -use std::time::Instant; + +use super::{Node, Rng}; /// A random dropper. pub struct Drop { diff --git a/neqo-transport/tests/sim/mod.rs b/neqo-transport/tests/sim/mod.rs index f7646aac56..9ab9d57a4a 100644 --- a/neqo-transport/tests/sim/mod.rs +++ b/neqo-transport/tests/sim/mod.rs @@ -14,23 +14,23 @@ mod drop; pub mod rng; mod taildrop; +use std::{ + cell::RefCell, + cmp::min, + convert::TryFrom, + fmt::Debug, + rc::Rc, + time::{Duration, Instant}, +}; + use neqo_common::{qdebug, qinfo, qtrace, Datagram, Encoder}; use neqo_transport::Output; use rng::Random; -use std::cell::RefCell; -use std::cmp::min; -use std::convert::TryFrom; -use std::fmt::Debug; -use std::rc::Rc; -use std::time::{Duration, Instant}; use test_fixture::{self, now}; - use NodeState::{Active, Idle, Waiting}; pub mod network { - pub use super::delay::Delay; - pub use super::drop::Drop; - pub use super::taildrop::TailDrop; + pub use super::{delay::Delay, drop::Drop, taildrop::TailDrop}; } type Rng = Rc>; diff --git a/neqo-transport/tests/sim/rng.rs b/neqo-transport/tests/sim/rng.rs index d314e8b36f..af4f70eb5f 100644 --- a/neqo-transport/tests/sim/rng.rs +++ b/neqo-transport/tests/sim/rng.rs @@ -4,9 +4,9 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +use std::{convert::TryFrom, ops::Range}; + use neqo_common::Decoder; -use std::convert::TryFrom; -use std::ops::Range; /// An implementation of a xoshiro256** pseudorandom generator. pub struct Random { diff --git a/neqo-transport/tests/sim/taildrop.rs b/neqo-transport/tests/sim/taildrop.rs index 7346b27178..26813800c9 100644 --- a/neqo-transport/tests/sim/taildrop.rs +++ b/neqo-transport/tests/sim/taildrop.rs @@ -6,14 +6,18 @@ #![allow(clippy::module_name_repetitions)] -use super::Node; +use std::{ + cmp::max, + collections::VecDeque, + convert::TryFrom, + fmt::{self, Debug}, + time::{Duration, Instant}, +}; + use neqo_common::{qtrace, Datagram}; use neqo_transport::Output; -use std::cmp::max; -use std::collections::VecDeque; -use std::convert::TryFrom; -use std::fmt::{self, Debug}; -use std::time::{Duration, Instant}; + +use super::Node; /// One second in nanoseconds. const ONE_SECOND_NS: u128 = 1_000_000_000; diff --git a/test-fixture/src/assertions.rs b/test-fixture/src/assertions.rs index 339f11df64..7e772daabf 100644 --- a/test-fixture/src/assertions.rs +++ b/test-fixture/src/assertions.rs @@ -4,12 +4,15 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use crate::{addr, addr_v4}; +use std::{ + convert::{TryFrom, TryInto}, + net::SocketAddr, +}; + use neqo_common::{Datagram, Decoder}; -use neqo_transport::version::WireVersion; -use neqo_transport::Version; -use std::convert::{TryFrom, TryInto}; -use std::net::SocketAddr; +use neqo_transport::{version::WireVersion, Version}; + +use crate::{addr, addr_v4}; const PACKET_TYPE_MASK: u8 = 0b1011_0000; @@ -32,7 +35,9 @@ fn assert_long_packet_type(b: u8, v1_expected: u8, version: Version) { } /// Simple checks for the version being correct. +/// /// # Panics +/// /// If this is not a long header packet with the given version. pub fn assert_version(payload: &[u8], v: u32) { let mut dec = Decoder::from(payload); @@ -41,7 +46,9 @@ pub fn assert_version(payload: &[u8], v: u32) { } /// Simple checks for a Version Negotiation packet. +/// /// # Panics +/// /// If this is clearly not a Version Negotiation packet. pub fn assert_vn(payload: &[u8]) { let mut dec = Decoder::from(payload); @@ -53,7 +60,9 @@ pub fn assert_vn(payload: &[u8]) { } /// Do a simple decode of the datagram to verify that it is coalesced. +/// /// # Panics +/// /// If the tests fail. pub fn assert_coalesced_0rtt(payload: &[u8]) { assert!(payload.len() >= 1200); @@ -71,6 +80,7 @@ pub fn assert_coalesced_0rtt(payload: &[u8]) { } /// # Panics +/// /// If the tests fail. pub fn assert_retry(payload: &[u8]) { let mut dec = Decoder::from(payload); @@ -80,7 +90,9 @@ pub fn assert_retry(payload: &[u8]) { } /// Assert that this is an Initial packet with (or without) a token. +/// /// # Panics +/// /// If the tests fail. pub fn assert_initial(payload: &[u8], expect_token: bool) { let mut dec = Decoder::from(payload); @@ -94,7 +106,9 @@ pub fn assert_initial(payload: &[u8], expect_token: bool) { } /// Assert that this is a Handshake packet. +/// /// # Panics +/// /// If the tests fail. pub fn assert_handshake(payload: &[u8]) { let mut dec = Decoder::from(payload); @@ -104,6 +118,7 @@ pub fn assert_handshake(payload: &[u8]) { } /// # Panics +/// /// If the tests fail. pub fn assert_no_1rtt(payload: &[u8]) { let mut dec = Decoder::from(payload); @@ -135,6 +150,7 @@ pub fn assert_no_1rtt(payload: &[u8]) { } /// # Panics +/// /// When the path doesn't use the given socket address at both ends. pub fn assert_path(dgram: &Datagram, path_addr: SocketAddr) { assert_eq!(dgram.source(), path_addr); @@ -142,6 +158,7 @@ pub fn assert_path(dgram: &Datagram, path_addr: SocketAddr) { } /// # Panics +/// /// When the path doesn't use the default v4 socket address at both ends. pub fn assert_v4_path(dgram: &Datagram, padded: bool) { assert_path(dgram, addr_v4()); @@ -151,6 +168,7 @@ pub fn assert_v4_path(dgram: &Datagram, padded: bool) { } /// # Panics +/// /// When the path doesn't use the default v6 socket address at both ends. pub fn assert_v6_path(dgram: &Datagram, padded: bool) { assert_path(dgram, addr()); diff --git a/test-fixture/src/lib.rs b/test-fixture/src/lib.rs index e431ace9a1..8635e8a840 100644 --- a/test-fixture/src/lib.rs +++ b/test-fixture/src/lib.rs @@ -7,22 +7,6 @@ #![cfg_attr(feature = "deny-warnings", deny(warnings))] #![warn(clippy::pedantic)] -use neqo_common::{ - event::Provider, - hex, - qlog::{new_trace, NeqoQlog}, - qtrace, Datagram, Decoder, IpTos, Role, -}; - -use neqo_crypto::{init_db, random, AllowZeroRtt, AntiReplay, AuthenticationStatus}; -use neqo_http3::{Http3Client, Http3Parameters, Http3Server}; -use neqo_transport::{ - version::WireVersion, Connection, ConnectionEvent, ConnectionId, ConnectionIdDecoder, - ConnectionIdGenerator, ConnectionIdRef, ConnectionParameters, State, Version, -}; - -use qlog::{events::EventImportance, streamer::QlogStreamer}; - use std::{ cell::RefCell, cmp::max, @@ -36,6 +20,19 @@ use std::{ }; use lazy_static::lazy_static; +use neqo_common::{ + event::Provider, + hex, + qlog::{new_trace, NeqoQlog}, + qtrace, Datagram, Decoder, IpTos, Role, +}; +use neqo_crypto::{init_db, random, AllowZeroRtt, AntiReplay, AuthenticationStatus}; +use neqo_http3::{Http3Client, Http3Parameters, Http3Server}; +use neqo_transport::{ + version::WireVersion, Connection, ConnectionEvent, ConnectionId, ConnectionIdDecoder, + ConnectionIdGenerator, ConnectionIdRef, ConnectionParameters, State, Version, +}; +use qlog::{events::EventImportance, streamer::QlogStreamer}; pub mod assertions; @@ -64,15 +61,19 @@ fn earlier() -> Instant { /// The current time for the test. Which is in the future, /// because 0-RTT tests need to run at least `ANTI_REPLAY_WINDOW` in the past. +/// /// # Panics +/// /// When the setup fails. #[must_use] pub fn now() -> Instant { earlier().checked_add(ANTI_REPLAY_WINDOW).unwrap() } -// Create a default anti-replay context. +/// Create a default anti-replay context. +/// /// # Panics +/// /// When the setup fails. #[must_use] pub fn anti_replay() -> AntiReplay { @@ -140,7 +141,9 @@ impl ConnectionIdGenerator for CountingConnectionIdGenerator { } /// Create a new client. +/// /// # Panics +/// /// If this doesn't work. #[must_use] pub fn new_client(params: ConnectionParameters) -> Connection { @@ -179,7 +182,9 @@ pub fn default_server_h3() -> Connection { } /// Create a transport server with a configuration. +/// /// # Panics +/// /// If this doesn't work. #[must_use] pub fn new_server(alpn: &[impl AsRef], params: ConnectionParameters) -> Connection { @@ -229,6 +234,7 @@ pub fn handshake(client: &mut Connection, server: &mut Connection) { } /// # Panics +/// /// When the connection fails. #[must_use] pub fn connect() -> (Connection, Connection) { @@ -241,7 +247,9 @@ pub fn connect() -> (Connection, Connection) { } /// Create a http3 client with default configuration. +/// /// # Panics +/// /// When the client can't be created. #[must_use] pub fn default_http3_client() -> Http3Client { @@ -262,7 +270,9 @@ pub fn default_http3_client() -> Http3Client { } /// Create a http3 client. +/// /// # Panics +/// /// When the client can't be created. #[must_use] pub fn http3_client_with_params(params: Http3Parameters) -> Http3Client { @@ -279,7 +289,9 @@ pub fn http3_client_with_params(params: Http3Parameters) -> Http3Client { } /// Create a http3 server with default configuration. +/// /// # Panics +/// /// When the server can't be created. #[must_use] pub fn default_http3_server() -> Http3Server { @@ -366,7 +378,9 @@ impl ToString for SharedVec { /// Returns a pair of new enabled `NeqoQlog` that is backed by a [`Vec`] /// together with a [`Cursor>`] that can be used to read the contents of /// the log. +/// /// # Panics +/// /// Panics if the log cannot be created. #[must_use] pub fn new_neqo_qlog() -> (NeqoQlog, SharedVec) { From 5e3269670aaee2997b49275671a4b6281fdebcc4 Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Thu, 1 Feb 2024 17:31:24 +0200 Subject: [PATCH 101/321] ci: Further lower the RTT for the `idle_timeout_crazy_rtt` test (#1611) Since what we had before was still larger than our max. PTO, making deadlock more likely with 10% loss. --- neqo-transport/tests/network.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/neqo-transport/tests/network.rs b/neqo-transport/tests/network.rs index 5d67ca7938..8c388457c5 100644 --- a/neqo-transport/tests/network.rs +++ b/neqo-transport/tests/network.rs @@ -67,7 +67,7 @@ simulate!( ))) ] ), - Delay::new(weeks(15)..weeks(15)), + Delay::new(weeks(6)..weeks(6)), Drop::percentage(10), ConnectionNode::new_server( ConnectionParameters::default().idle_timeout(weeks(1000)), @@ -78,7 +78,7 @@ simulate!( ))) ] ), - Delay::new(weeks(10)..weeks(10)), + Delay::new(weeks(8)..weeks(8)), Drop::percentage(10), ], ); From 20c8e8c77e650de0f45492442bd55b499ff71755 Mon Sep 17 00:00:00 2001 From: Max Inden Date: Thu, 1 Feb 2024 17:30:53 +0100 Subject: [PATCH 102/321] refactor(server): replace mio with tokio (#1581) * refactor(server): replace mio with tokio * Move ready logic into fn * Extend expect docs * Restrict tokio features * Only process datagram once * Remove superfluous pub * fmt * Fix busy loop * Fold `ServersRunner::init into ServersRunner::new * Fix imports --------- Signed-off-by: Max Inden --- neqo-server/Cargo.toml | 4 +- neqo-server/src/main.rs | 251 +++++++++++++++------------------------- 2 files changed, 95 insertions(+), 160 deletions(-) diff --git a/neqo-server/Cargo.toml b/neqo-server/Cargo.toml index b3f8aae462..1d6b5df86b 100644 --- a/neqo-server/Cargo.toml +++ b/neqo-server/Cargo.toml @@ -7,9 +7,8 @@ rust-version = "1.70.0" license = "MIT OR Apache-2.0" [dependencies] +futures = "0.3" log = {version = "0.4.17", default-features = false} -mio = "0.6.23" -mio-extras = "2.0.6" neqo-common = { path="./../neqo-common" } neqo-crypto = { path = "./../neqo-crypto" } neqo-http3 = { path = "./../neqo-http3" } @@ -18,6 +17,7 @@ neqo-transport = { path = "./../neqo-transport" } qlog = "0.11.0" regex = "1.9" structopt = "0.3" +tokio = { version = "1", features = ["net", "time", "macros", "rt", "rt-multi-thread"] } [features] deny-warnings = [] diff --git a/neqo-server/src/main.rs b/neqo-server/src/main.rs index 590e0d55db..0000ea4f80 100644 --- a/neqo-server/src/main.rs +++ b/neqo-server/src/main.rs @@ -10,23 +10,27 @@ use std::{ cell::RefCell, cmp::min, - collections::{HashMap, HashSet}, + collections::HashMap, convert::TryFrom, fmt::{self, Display}, fs::OpenOptions, io, io::Read, - mem, net::{SocketAddr, ToSocketAddrs}, path::PathBuf, + pin::Pin, process::exit, rc::Rc, str::FromStr, time::{Duration, Instant}, }; -use mio::{net::UdpSocket, Events, Poll, PollOpt, Ready, Token}; -use mio_extras::timer::{Builder, Timeout, Timer}; +use futures::{ + future::{select, select_all, Either}, + FutureExt, +}; +use tokio::{net::UdpSocket, time::Sleep}; + use neqo_common::{hex, qdebug, qinfo, qwarn, Datagram, Header, IpTos}; use neqo_crypto::{ constants::{TLS_AES_128_GCM_SHA256, TLS_AES_256_GCM_SHA384, TLS_CHACHA20_POLY1305_SHA256}, @@ -44,7 +48,6 @@ use structopt::StructOpt; use crate::old_https::Http09Server; -const TIMER_TOKEN: Token = Token(0xffff_ffff); const ANTI_REPLAY_WINDOW: Duration = Duration::from_secs(10); mod old_https; @@ -316,8 +319,8 @@ impl QuicParameters { } } -fn emit_packet(socket: &mut UdpSocket, out_dgram: Datagram) { - let sent = match socket.send_to(&out_dgram, &out_dgram.destination()) { +async fn emit_packet(socket: &mut UdpSocket, out_dgram: Datagram) { + let sent = match socket.send_to(&out_dgram, &out_dgram.destination()).await { Err(ref err) => { if err.kind() != io::ErrorKind::WouldBlock || err.kind() == io::ErrorKind::Interrupted { eprintln!("UDP send error: {err:?}"); @@ -594,7 +597,7 @@ fn read_dgram( local_address: &SocketAddr, ) -> Result, io::Error> { let buf = &mut [0u8; 2048]; - let (sz, remote_addr) = match socket.recv_from(&mut buf[..]) { + let (sz, remote_addr) = match socket.try_recv_from(&mut buf[..]) { Err(ref err) if err.kind() == io::ErrorKind::WouldBlock || err.kind() == io::ErrorKind::Interrupted => @@ -628,82 +631,36 @@ fn read_dgram( struct ServersRunner { args: Args, - poll: Poll, - hosts: Vec, server: Box, - timeout: Option, - sockets: Vec, - active_sockets: HashSet, - timer: Timer, + timeout: Option>>, + sockets: Vec<(SocketAddr, UdpSocket)>, } impl ServersRunner { pub fn new(args: Args) -> Result { - let server = Self::create_server(&args); - let mut runner = Self { - args, - poll: Poll::new()?, - hosts: Vec::new(), - server, - timeout: None, - sockets: Vec::new(), - active_sockets: HashSet::new(), - timer: Builder::default() - .tick_duration(Duration::from_millis(1)) - .build::(), - }; - runner.init()?; - Ok(runner) - } - - /// Init Poll for all hosts. Create sockets, and a map of the - /// socketaddrs to instances of the HttpServer handling that addr. - fn init(&mut self) -> Result<(), io::Error> { - self.hosts = self.args.listen_addresses(); - if self.hosts.is_empty() { + let hosts = args.listen_addresses(); + if hosts.is_empty() { eprintln!("No valid hosts defined"); return Err(io::Error::new(io::ErrorKind::InvalidInput, "No hosts")); } + let sockets = hosts + .into_iter() + .map(|host| { + let socket = std::net::UdpSocket::bind(host)?; + let local_addr = socket.local_addr()?; + println!("Server waiting for connection on: {local_addr:?}"); + socket.set_nonblocking(true)?; + Ok((host, UdpSocket::from_std(socket)?)) + }) + .collect::>()?; + let server = Self::create_server(&args); - for (i, host) in self.hosts.iter().enumerate() { - let socket = match UdpSocket::bind(host) { - Err(err) => { - eprintln!("Unable to bind UDP socket: {err}"); - return Err(err); - } - Ok(s) => s, - }; - - let local_addr = match socket.local_addr() { - Err(err) => { - eprintln!("Socket local address not bound: {err}"); - return Err(err); - } - Ok(s) => s, - }; - - print!("Server waiting for connection on: {local_addr:?}"); - // On Windows, this is not supported. - #[cfg(not(target_os = "windows"))] - if !socket.only_v6().unwrap_or(true) { - print!(" as well as V4"); - }; - println!(); - - self.poll.register( - &socket, - Token(i), - Ready::readable() | Ready::writable(), - PollOpt::edge(), - )?; - - self.sockets.push(socket); - } - - self.poll - .register(&self.timer, TIMER_TOKEN, Ready::readable(), PollOpt::edge())?; - - Ok(()) + Ok(Self { + args, + server, + timeout: None, + sockets, + }) } fn create_server(args: &Args) -> Box { @@ -741,110 +698,88 @@ impl ServersRunner { /// Tries to find a socket, but then just falls back to sending from the first. fn find_socket(&mut self, addr: SocketAddr) -> &mut UdpSocket { - let (first, rest) = self.sockets.split_first_mut().unwrap(); + let ((_host, first_socket), rest) = self.sockets.split_first_mut().unwrap(); rest.iter_mut() - .find(|s| { - s.local_addr() + .map(|(_host, socket)| socket) + .find(|socket| { + socket + .local_addr() .ok() .map_or(false, |socket_addr| socket_addr == addr) }) - .unwrap_or(first) + .unwrap_or(first_socket) } - fn process(&mut self, inx: usize, dgram: Option<&Datagram>) -> bool { - match self.server.process(dgram, self.args.now()) { - Output::Datagram(dgram) => { - let socket = self.find_socket(dgram.source()); - emit_packet(socket, dgram); - true - } - Output::Callback(new_timeout) => { - if let Some(to) = &self.timeout { - self.timer.cancel_timeout(to); + async fn process(&mut self, mut dgram: Option<&Datagram>) { + loop { + match self.server.process(dgram.take(), self.args.now()) { + Output::Datagram(dgram) => { + let socket = self.find_socket(dgram.source()); + emit_packet(socket, dgram).await; + } + Output::Callback(new_timeout) => { + qinfo!("Setting timeout of {:?}", new_timeout); + self.timeout = Some(Box::pin(tokio::time::sleep(new_timeout))); + break; + } + Output::None => { + qdebug!("Output::None"); + break; } - - qinfo!("Setting timeout of {:?} for socket {}", new_timeout, inx); - self.timeout = Some(self.timer.set_timeout(new_timeout, inx)); - false - } - Output::None => { - qdebug!("Output::None"); - false } } } - fn process_datagrams_and_events( - &mut self, - inx: usize, - read_socket: bool, - ) -> Result<(), io::Error> { - if self.sockets.get_mut(inx).is_some() { - if read_socket { - loop { - let socket = self.sockets.get_mut(inx).unwrap(); - let dgram = read_dgram(socket, &self.hosts[inx])?; + // Wait for any of the sockets to be readable or the timeout to fire. + async fn ready(&mut self) -> Result { + let sockets_ready = select_all( + self.sockets + .iter() + .map(|(_host, socket)| Box::pin(socket.readable())), + ) + .map(|(res, inx, _)| match res { + Ok(()) => Ok(Ready::Socket(inx)), + Err(e) => Err(e), + }); + let timeout_ready = self + .timeout + .as_mut() + .map(Either::Left) + .unwrap_or(Either::Right(futures::future::pending())) + .map(|()| Ok(Ready::Timeout)); + select(sockets_ready, timeout_ready).await.factor_first().0 + } + + async fn run(&mut self) -> Result<(), io::Error> { + loop { + match self.ready().await? { + Ready::Socket(inx) => loop { + let (host, socket) = self.sockets.get_mut(inx).unwrap(); + let dgram = read_dgram(socket, host)?; if dgram.is_none() { break; } - _ = self.process(inx, dgram.as_ref()); + self.process(dgram.as_ref()).await; + }, + Ready::Timeout => { + self.timeout = None; + self.process(None).await; } - } else { - _ = self.process(inx, None); } - self.server.process_events(&self.args, self.args.now()); - if self.process(inx, None) { - self.active_sockets.insert(inx); - } - } - Ok(()) - } - - fn process_active_conns(&mut self) -> Result<(), io::Error> { - let curr_active = mem::take(&mut self.active_sockets); - for inx in curr_active { - self.process_datagrams_and_events(inx, false)?; - } - Ok(()) - } - fn process_timeout(&mut self) -> Result<(), io::Error> { - while let Some(inx) = self.timer.poll() { - qinfo!("Timer expired for {:?}", inx); - self.process_datagrams_and_events(inx, false)?; + self.server.process_events(&self.args, self.args.now()); + self.process(None).await; } - Ok(()) } +} - pub fn run(&mut self) -> Result<(), io::Error> { - let mut events = Events::with_capacity(1024); - loop { - // If there are active servers do not block in poll. - self.poll.poll( - &mut events, - if self.active_sockets.is_empty() { - None - } else { - Some(Duration::from_millis(0)) - }, - )?; - - for event in &events { - if event.token() == TIMER_TOKEN { - self.process_timeout()?; - } else { - if !event.readiness().is_readable() { - continue; - } - self.process_datagrams_and_events(event.token().0, true)?; - } - } - self.process_active_conns()?; - } - } +enum Ready { + Socket(usize), + Timeout, } -fn main() -> Result<(), io::Error> { +#[tokio::main] +async fn main() -> Result<(), io::Error> { const HQ_INTEROP: &str = "hq-interop"; let mut args = Args::from_args(); @@ -896,5 +831,5 @@ fn main() -> Result<(), io::Error> { } let mut servers_runner = ServersRunner::new(args)?; - servers_runner.run() + servers_runner.run().await } From 9493b22861bc87e169b6fca724b1216145c24146 Mon Sep 17 00:00:00 2001 From: Gabriel Grubba <70247653+Grubba27@users.noreply.github.com> Date: Fri, 2 Feb 2024 07:39:40 +0100 Subject: [PATCH 103/321] chore: Removed InternalErrors added in #1085 (#1323) * removed internal errors * adjusted formating * Fix issues * Remove numeric error codes from `InternalError`. * cargo fmt & clippy --------- Signed-off-by: Lars Eggert Co-authored-by: Lars Eggert --- neqo-qpack/src/encoder.rs | 4 +- neqo-qpack/src/lib.rs | 2 +- neqo-server/src/main.rs | 3 +- neqo-transport/src/addr_valid.rs | 6 +- neqo-transport/src/cid.rs | 4 -- neqo-transport/src/connection/mod.rs | 9 +-- neqo-transport/src/connection/state.rs | 3 - neqo-transport/src/crypto.rs | 13 ++--- neqo-transport/src/lib.rs | 2 +- neqo-transport/src/packet/mod.rs | 2 +- neqo-transport/src/packet/retry.rs | 2 +- neqo-transport/src/path.rs | 25 +++----- neqo-transport/src/tracking.rs | 79 +++++++++++--------------- 13 files changed, 56 insertions(+), 98 deletions(-) diff --git a/neqo-qpack/src/encoder.rs b/neqo-qpack/src/encoder.rs index f53cf51d85..c7921ee2c0 100644 --- a/neqo-qpack/src/encoder.rs +++ b/neqo-qpack/src/encoder.rs @@ -312,7 +312,7 @@ impl QPackEncoder { false, "can_evict_to should have checked and make sure this operation is possible" ); - return Err(Error::InternalError(1)); + return Err(Error::InternalError); } self.max_entries = cap / 32; self.next_capacity = None; @@ -530,7 +530,7 @@ fn map_stream_send_atomic_error(err: &TransportError) -> Error { } _ => { debug_assert!(false, "Unexpected error"); - Error::InternalError(2) + Error::InternalError } } } diff --git a/neqo-qpack/src/lib.rs b/neqo-qpack/src/lib.rs index 3f9c7b81f7..1581712017 100644 --- a/neqo-qpack/src/lib.rs +++ b/neqo-qpack/src/lib.rs @@ -45,7 +45,7 @@ pub enum Error { EncoderStream, DecoderStream, ClosedCriticalStream, - InternalError(u16), + InternalError, // These are internal errors, they will be transformed into one of the above. NeedMoreData, /* Return when an input stream does not have more data that a decoder diff --git a/neqo-server/src/main.rs b/neqo-server/src/main.rs index 0000ea4f80..9b924504cc 100644 --- a/neqo-server/src/main.rs +++ b/neqo-server/src/main.rs @@ -29,8 +29,6 @@ use futures::{ future::{select, select_all, Either}, FutureExt, }; -use tokio::{net::UdpSocket, time::Sleep}; - use neqo_common::{hex, qdebug, qinfo, qwarn, Datagram, Header, IpTos}; use neqo_crypto::{ constants::{TLS_AES_128_GCM_SHA256, TLS_AES_256_GCM_SHA384, TLS_CHACHA20_POLY1305_SHA256}, @@ -45,6 +43,7 @@ use neqo_transport::{ Version, }; use structopt::StructOpt; +use tokio::{net::UdpSocket, time::Sleep}; use crate::old_https::Http09Server; diff --git a/neqo-transport/src/addr_valid.rs b/neqo-transport/src/addr_valid.rs index 9105c89a54..b5ed2d07d1 100644 --- a/neqo-transport/src/addr_valid.rs +++ b/neqo-transport/src/addr_valid.rs @@ -20,8 +20,7 @@ use neqo_crypto::{ use smallvec::SmallVec; use crate::{ - cid::ConnectionId, packet::PacketBuilder, recovery::RecoveryToken, stats::FrameStats, Error, - Res, + cid::ConnectionId, packet::PacketBuilder, recovery::RecoveryToken, stats::FrameStats, Res, }; /// A prefix we add to Retry tokens to distinguish them from NEW_TOKEN tokens. @@ -434,9 +433,6 @@ impl NewTokenSender { builder.encode_varint(crate::frame::FRAME_TYPE_NEW_TOKEN); builder.encode_vvec(&t.token); - if builder.len() > builder.limit() { - return Err(Error::InternalError(7)); - } tokens.push(RecoveryToken::NewToken(t.seqno)); stats.new_token += 1; diff --git a/neqo-transport/src/cid.rs b/neqo-transport/src/cid.rs index 7096ae1874..be202daf25 100644 --- a/neqo-transport/src/cid.rs +++ b/neqo-transport/src/cid.rs @@ -532,10 +532,6 @@ impl ConnectionIdManager { builder.encode_varint(0u64); builder.encode_vec(1, &entry.cid); builder.encode(&entry.srt); - if builder.len() > builder.limit() { - return Err(Error::InternalError(8)); - } - stats.new_connection_id += 1; Ok(true) } diff --git a/neqo-transport/src/connection/mod.rs b/neqo-transport/src/connection/mod.rs index 8aaf987db9..e42eeabde6 100644 --- a/neqo-transport/src/connection/mod.rs +++ b/neqo-transport/src/connection/mod.rs @@ -1935,9 +1935,6 @@ impl Connection { .as_ref() .unwrap_or(&close) .write_frame(&mut builder); - if builder.len() > builder.limit() { - return Err(Error::InternalError(10)); - } encoder = builder.build(tx)?; } @@ -1982,7 +1979,7 @@ impl Connection { if builder.is_full() { return Ok(()); } - self.paths.write_frames(builder, tokens, frame_stats)?; + self.paths.write_frames(builder, tokens, frame_stats); if builder.is_full() { return Ok(()); } @@ -2107,7 +2104,7 @@ impl Connection { builder, &mut tokens, stats, - )?; + ); } let ack_end = builder.len(); @@ -2122,7 +2119,7 @@ impl Connection { &mut self.stats.borrow_mut().frame_tx, full_mtu, now, - )? { + ) { builder.enable_padding(true); } } diff --git a/neqo-transport/src/connection/state.rs b/neqo-transport/src/connection/state.rs index f739c147ab..9afb42174f 100644 --- a/neqo-transport/src/connection/state.rs +++ b/neqo-transport/src/connection/state.rs @@ -218,9 +218,6 @@ impl StateSignaling { if matches!(self, Self::HandshakeDone) && builder.remaining() >= 1 { *self = Self::Idle; builder.encode_varint(FRAME_TYPE_HANDSHAKE_DONE); - if builder.len() > builder.limit() { - return Err(Error::InternalError(14)); - } Ok(Some(RecoveryToken::HandshakeDone)) } else { Ok(None) diff --git a/neqo-transport/src/crypto.rs b/neqo-transport/src/crypto.rs index 4e152db0f2..f6cc7c0e2f 100644 --- a/neqo-transport/src/crypto.rs +++ b/neqo-transport/src/crypto.rs @@ -225,7 +225,7 @@ impl Crypto { self.tls.read_secret(TLS_EPOCH_ZERO_RTT), ), }; - let secret = secret.ok_or(Error::InternalError(1))?; + let secret = secret.ok_or(Error::InternalError)?; self.states .set_0rtt_keys(version, dir, &secret, cipher.unwrap()); Ok(true) @@ -259,12 +259,12 @@ impl Crypto { let read_secret = self .tls .read_secret(TLS_EPOCH_HANDSHAKE) - .ok_or(Error::InternalError(2))?; + .ok_or(Error::InternalError)?; let cipher = match self.tls.info() { None => self.tls.preinfo()?.cipher_suite(), Some(info) => Some(info.cipher_suite()), } - .ok_or(Error::InternalError(3))?; + .ok_or(Error::InternalError)?; self.states .set_handshake_keys(self.version, &write_secret, &read_secret, cipher); qdebug!([self], "Handshake keys installed"); @@ -288,7 +288,7 @@ impl Crypto { let read_secret = self .tls .read_secret(TLS_EPOCH_APPLICATION_DATA) - .ok_or(Error::InternalError(4))?; + .ok_or(Error::InternalError)?; self.states .set_application_read_key(version, read_secret, expire_0rtt)?; qdebug!([self], "application read keys installed"); @@ -662,7 +662,7 @@ impl CryptoDxState { // The numbers in `Self::limit` assume a maximum packet size of 2^11. if body.len() > 2048 { debug_assert!(false); - return Err(Error::InternalError(12)); + return Err(Error::InternalError); } self.invoked()?; @@ -1550,9 +1550,6 @@ impl CryptoStreams { builder.encode_varint(crate::frame::FRAME_TYPE_CRYPTO); builder.encode_varint(offset); builder.encode_vvec(&data[..length]); - if builder.len() > builder.limit() { - return Err(Error::InternalError(15)); - } cs.tx.mark_as_sent(offset, length); diff --git a/neqo-transport/src/lib.rs b/neqo-transport/src/lib.rs index d10ea7e9e6..ecea2453f1 100644 --- a/neqo-transport/src/lib.rs +++ b/neqo-transport/src/lib.rs @@ -68,7 +68,7 @@ pub enum Error { NoError, // Each time tihe error is return a different parameter is supply. // This will be use to distinguish each occurance of this error. - InternalError(u16), + InternalError, ConnectionRefused, FlowControlError, StreamLimitError, diff --git a/neqo-transport/src/packet/mod.rs b/neqo-transport/src/packet/mod.rs index 0968bb9ae2..ccfd212d5f 100644 --- a/neqo-transport/src/packet/mod.rs +++ b/neqo-transport/src/packet/mod.rs @@ -356,7 +356,7 @@ impl PacketBuilder { if self.len() > self.limit { qwarn!("Packet contents are more than the limit"); debug_assert!(false); - return Err(Error::InternalError(5)); + return Err(Error::InternalError); } self.pad_for_crypto(crypto); diff --git a/neqo-transport/src/packet/retry.rs b/neqo-transport/src/packet/retry.rs index a1333a0150..004e9de6e7 100644 --- a/neqo-transport/src/packet/retry.rs +++ b/neqo-transport/src/packet/retry.rs @@ -45,7 +45,7 @@ where .try_with(|aead| f(&aead.borrow())) .map_err(|e| { qerror!("Unable to access Retry AEAD: {:?}", e); - Error::InternalError(6) + Error::InternalError })? } diff --git a/neqo-transport/src/path.rs b/neqo-transport/src/path.rs index 06cc8a6a61..d6920c8d94 100644 --- a/neqo-transport/src/path.rs +++ b/neqo-transport/src/path.rs @@ -31,7 +31,7 @@ use crate::{ sender::PacketSender, stats::FrameStats, tracking::{PacketNumberSpace, SentPacket}, - Error, Res, Stats, + Stats, }; /// This is the MTU that we assume when using IPv6. @@ -415,7 +415,7 @@ impl Paths { builder: &mut PacketBuilder, tokens: &mut Vec, stats: &mut FrameStats, - ) -> Res<()> { + ) { while let Some(seqno) = self.to_retire.pop() { if builder.remaining() < 1 + Encoder::varint_len(seqno) { self.to_retire.push(seqno); @@ -423,9 +423,6 @@ impl Paths { } builder.encode_varint(FRAME_TYPE_RETIRE_CONNECTION_ID); builder.encode_varint(seqno); - if builder.len() > builder.limit() { - return Err(Error::InternalError(20)); - } tokens.push(RecoveryToken::RetireConnectionId(seqno)); stats.retire_connection_id += 1; } @@ -434,8 +431,6 @@ impl Paths { self.primary() .borrow_mut() .write_cc_frames(builder, tokens, stats); - - Ok(()) } pub fn lost_retire_cid(&mut self, lost: u64) { @@ -774,9 +769,9 @@ impl Path { stats: &mut FrameStats, mtu: bool, // Whether the packet we're writing into will be a full MTU. now: Instant, - ) -> Res { + ) -> bool { if builder.remaining() < 9 { - return Ok(false); + return false; } // Send PATH_RESPONSE. @@ -784,9 +779,6 @@ impl Path { qtrace!([self], "Responding to path challenge {}", hex(challenge)); builder.encode_varint(FRAME_TYPE_PATH_RESPONSE); builder.encode(&challenge[..]); - if builder.len() > builder.limit() { - return Err(Error::InternalError(21)); - } // These frames are not retransmitted in the usual fashion. // There is no token, therefore we need to count `all` specially. @@ -794,7 +786,7 @@ impl Path { stats.all += 1; if builder.remaining() < 9 { - return Ok(true); + return true; } true } else { @@ -807,9 +799,6 @@ impl Path { let data = <[u8; 8]>::try_from(&random(8)[..]).unwrap(); builder.encode_varint(FRAME_TYPE_PATH_CHALLENGE); builder.encode(&data); - if builder.len() > builder.limit() { - return Err(Error::InternalError(22)); - } // As above, no recovery token. stats.path_challenge += 1; @@ -821,9 +810,9 @@ impl Path { mtu, sent: now, }; - Ok(true) + true } else { - Ok(resp_sent) + resp_sent } } diff --git a/neqo-transport/src/tracking.rs b/neqo-transport/src/tracking.rs index 62e7398ede..64d00257d3 100644 --- a/neqo-transport/src/tracking.rs +++ b/neqo-transport/src/tracking.rs @@ -24,7 +24,6 @@ use crate::{ packet::{PacketBuilder, PacketNumber, PacketType}, recovery::RecoveryToken, stats::FrameStats, - Error, Res, }; // TODO(mt) look at enabling EnumMap for this: https://stackoverflow.com/a/44905797/1375574 @@ -724,14 +723,10 @@ impl AckTracker { builder: &mut PacketBuilder, tokens: &mut Vec, stats: &mut FrameStats, - ) -> Res<()> { + ) { if let Some(space) = self.get_mut(pn_space) { space.write_frame(now, rtt, builder, tokens, stats); - if builder.len() > builder.limit() { - return Err(Error::InternalError(24)); - } } - Ok(()) } } @@ -1060,16 +1055,14 @@ mod tests { let mut tokens = Vec::new(); let mut stats = FrameStats::default(); - tracker - .write_frame( - PacketNumberSpace::Initial, - *NOW, - RTT, - &mut builder, - &mut tokens, - &mut stats, - ) - .unwrap(); + tracker.write_frame( + PacketNumberSpace::Initial, + *NOW, + RTT, + &mut builder, + &mut tokens, + &mut stats, + ); assert_eq!(stats.ack, 1); // Mark another packet as received so we have cause to send another ACK in that space. @@ -1088,16 +1081,14 @@ mod tests { assert!(tracker .ack_time(NOW.checked_sub(Duration::from_millis(1)).unwrap()) .is_none()); - tracker - .write_frame( - PacketNumberSpace::Initial, - *NOW, - RTT, - &mut builder, - &mut tokens, - &mut stats, - ) - .unwrap(); + tracker.write_frame( + PacketNumberSpace::Initial, + *NOW, + RTT, + &mut builder, + &mut tokens, + &mut stats, + ); assert_eq!(stats.ack, 1); if let RecoveryToken::Ack(tok) = &tokens[0] { tracker.acked(tok); // Should be a noop. @@ -1121,16 +1112,14 @@ mod tests { builder.set_limit(10); let mut stats = FrameStats::default(); - tracker - .write_frame( - PacketNumberSpace::Initial, - *NOW, - RTT, - &mut builder, - &mut Vec::new(), - &mut stats, - ) - .unwrap(); + tracker.write_frame( + PacketNumberSpace::Initial, + *NOW, + RTT, + &mut builder, + &mut Vec::new(), + &mut stats, + ); assert_eq!(stats.ack, 0); assert_eq!(builder.len(), 1); // Only the short packet header has been added. } @@ -1154,16 +1143,14 @@ mod tests { builder.set_limit(32); let mut stats = FrameStats::default(); - tracker - .write_frame( - PacketNumberSpace::Initial, - *NOW, - RTT, - &mut builder, - &mut Vec::new(), - &mut stats, - ) - .unwrap(); + tracker.write_frame( + PacketNumberSpace::Initial, + *NOW, + RTT, + &mut builder, + &mut Vec::new(), + &mut stats, + ); assert_eq!(stats.ack, 1); let mut dec = builder.as_decoder(); From 43e3a3f1073e4b1c8699be83011bd63f083af1cf Mon Sep 17 00:00:00 2001 From: Martin Thomson Date: Fri, 2 Feb 2024 19:09:56 +1100 Subject: [PATCH 104/321] ci: Switch to rust nightly for CI canary (#1613) * Switch to rust nightly for CI And fix the pre-commit hook to check using nightly. This is because we now rely on rust nightly for some rustfmt configuration options. This will make our canary builds a little more forward-looking which comes with a little less stability, but I think that's better than adding a specific build just for formatting. * fix: Add a configuration for "Semantic PR" Since this PR hits the "missing config" issue that happens when one isn't super-diligent with semantic commits )which we shouldn't be). * fix: Add quotes * chore: cargo fmt It seem like the CI check works now :-) * fix: Try and avoid `Error: file `false` does not exist` * Skip the quotes * See if Windows likes a non-existent path better * Try an empty temp file * Another go * While I'm here, make the various builds less verbose --------- Co-authored-by: Lars Eggert --- .github/semantic.yml | 3 +++ .github/workflows/check.yml | 16 ++++++++++------ hooks/pre-commit | 12 ++++++++++-- 3 files changed, 23 insertions(+), 8 deletions(-) create mode 100644 .github/semantic.yml diff --git a/.github/semantic.yml b/.github/semantic.yml new file mode 100644 index 0000000000..be3439f6b9 --- /dev/null +++ b/.github/semantic.yml @@ -0,0 +1,3 @@ +enabled: true +titleOnly: true +targetUrl: "https://www.conventionalcommits.org/en/v1.0.0/#summary" diff --git a/.github/workflows/check.yml b/.github/workflows/check.yml index ec541066bb..0ac81a64d5 100644 --- a/.github/workflows/check.yml +++ b/.github/workflows/check.yml @@ -17,7 +17,7 @@ jobs: fail-fast: false matrix: os: [ubuntu-latest, macos-13, windows-latest] - rust-toolchain: [1.70.0, stable, beta] + rust-toolchain: [1.70.0, stable, nightly] runs-on: ${{ matrix.os }} defaults: run: @@ -109,7 +109,7 @@ jobs: - name: Build run: | - cargo +${{ matrix.rust-toolchain }} build -v --all-targets + cargo +${{ matrix.rust-toolchain }} build --all-targets echo "LD_LIBRARY_PATH=${{ github.workspace }}/dist/Debug/lib" >> "$GITHUB_ENV" echo "DYLD_FALLBACK_LIBRARY_PATH=${{ github.workspace }}/dist/Debug/lib" >> "$GITHUB_ENV" echo "${{ github.workspace }}/dist/Debug/lib" >> "$GITHUB_PATH" @@ -134,16 +134,20 @@ jobs: RUST_LOG: warn - name: Check formatting - run: cargo +${{ matrix.rust-toolchain }} fmt --all -- --check + run: | + if [ "${{ matrix.rust-toolchain }}" != "nightly" ]; then + export CONFIG_PATH="--config-path=$(mktemp)" + fi + cargo +${{ matrix.rust-toolchain }} fmt --all -- --check $CONFIG_PATH if: success() || failure() - name: Clippy - run: cargo +${{ matrix.rust-toolchain }} clippy -v --tests -- -D warnings + run: cargo +${{ matrix.rust-toolchain }} clippy --tests -- -D warnings if: success() || failure() - continue-on-error: ${{ matrix.rust-toolchain == 'beta' }} + continue-on-error: ${{ matrix.rust-toolchain == 'nightly' }} - name: Check rustdoc links - run: cargo +${{ matrix.rust-toolchain }} doc --verbose --workspace --no-deps --document-private-items + run: cargo +${{ matrix.rust-toolchain }} doc --workspace --no-deps --document-private-items env: RUSTDOCFLAGS: "--deny rustdoc::broken_intra_doc_links --deny warnings" if: success() || failure() diff --git a/hooks/pre-commit b/hooks/pre-commit index 2a6022b3d4..377a70c89d 100755 --- a/hooks/pre-commit +++ b/hooks/pre-commit @@ -32,12 +32,20 @@ if [[ ./neqo-crypto/bindings/bindings.toml -nt ./neqo-crypto/src/lib.rs ]]; then exit 1 fi +toolchain=nightly +fmtconfig="$root/.rustfmt.toml" +if cargo "+$toolchain" version >/dev/null; then + echo "warning: A rust $toolchain toolchain is recommended to check formatting." + toolchain=stable + fmtconfig=/dev/null +fi + # Check formatting. trap 'git stash pop -q' EXIT git stash push -k -u -q -m "pre-commit stash" -if ! errors=($(cargo fmt -- --check -l)); then +if ! errors=($(cargo "+$toolchain" fmt -- --check -l --config-path="$fmtconfig")); then echo "Formatting errors found." - echo "Run \`cargo fmt\` to fix the following files:" + echo "Run \`cargo fmt +$toolchain\` to fix the following files:" for err in "${errors[@]}"; do echo " $err" done From 72e670c9adbe85fb6c9582eb7be44bf090a48573 Mon Sep 17 00:00:00 2001 From: jesup Date: Fri, 2 Feb 2024 03:38:09 -0500 Subject: [PATCH 105/321] test: Benchmarks for RxStreamOrderer (#1609) * Benchmarks for RxStreamOrderer * ci: Run clippy on benches * Address various code review comments --------- Co-authored-by: Lars Eggert --- .github/workflows/check.yml | 2 +- neqo-transport/Cargo.toml | 8 +++++++- neqo-transport/benches/rx_stream_orderer.rs | 20 ++++++++++++++++++++ neqo-transport/src/lib.rs | 5 ++++- 4 files changed, 32 insertions(+), 3 deletions(-) create mode 100644 neqo-transport/benches/rx_stream_orderer.rs diff --git a/.github/workflows/check.yml b/.github/workflows/check.yml index 0ac81a64d5..cf87472b87 100644 --- a/.github/workflows/check.yml +++ b/.github/workflows/check.yml @@ -142,7 +142,7 @@ jobs: if: success() || failure() - name: Clippy - run: cargo +${{ matrix.rust-toolchain }} clippy --tests -- -D warnings + run: cargo +${{ matrix.rust-toolchain }} clippy --tests --benches -- -D warnings if: success() || failure() continue-on-error: ${{ matrix.rust-toolchain == 'nightly' }} diff --git a/neqo-transport/Cargo.toml b/neqo-transport/Cargo.toml index ae33822018..e119f074c2 100644 --- a/neqo-transport/Cargo.toml +++ b/neqo-transport/Cargo.toml @@ -9,13 +9,14 @@ license = "MIT OR Apache-2.0" [dependencies] indexmap = "1.9.3" lazy_static = "1.4" -log = {version = "0.4.17", default-features = false} +log = { version = "0.4.17", default-features = false } neqo-common = { path = "../neqo-common" } neqo-crypto = { path = "../neqo-crypto" } qlog = "0.11.0" smallvec = "1.11.1" [dev-dependencies] +criterion = "0.5.1" enum-map = "2.7" test-fixture = { path = "../test-fixture" } @@ -23,3 +24,8 @@ test-fixture = { path = "../test-fixture" } bench = [] deny-warnings = [] fuzzing = ["neqo-crypto/fuzzing"] + +[[bench]] +name = "rx_stream_orderer" +harness = false +required-features = ["bench"] diff --git a/neqo-transport/benches/rx_stream_orderer.rs b/neqo-transport/benches/rx_stream_orderer.rs new file mode 100644 index 0000000000..03b401ba06 --- /dev/null +++ b/neqo-transport/benches/rx_stream_orderer.rs @@ -0,0 +1,20 @@ +use criterion::{criterion_group, criterion_main, Criterion}; +use neqo_transport::recv_stream::RxStreamOrderer; + +fn rx_stream_orderer() { + let mut rx = RxStreamOrderer::new(); + let data: &[u8] = &[0; 1337]; + + for i in 0..100000 { + rx.inbound_frame(i * 1337, data); + } +} + +fn criterion_benchmark(c: &mut Criterion) { + c.bench_function("RxStreamOrderer::inbound_frame()", |b| { + b.iter(rx_stream_orderer) + }); +} + +criterion_group!(benches, criterion_benchmark); +criterion_main!(benches); diff --git a/neqo-transport/src/lib.rs b/neqo-transport/src/lib.rs index ecea2453f1..de6898f3f8 100644 --- a/neqo-transport/src/lib.rs +++ b/neqo-transport/src/lib.rs @@ -26,6 +26,9 @@ mod path; mod qlog; mod quic_datagrams; mod recovery; +#[cfg(feature = "bench")] +pub mod recv_stream; +#[cfg(not(feature = "bench"))] mod recv_stream; mod rtt; mod send_stream; @@ -181,7 +184,7 @@ impl From for Error { } impl ::std::error::Error for Error { - fn source(&self) -> Option<&(dyn ::std::error::Error + 'static)> { + fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { match self { Self::CryptoError(e) => Some(e), _ => None, From f1792ed75ef564ed419acea67b7139ad952f0a5a Mon Sep 17 00:00:00 2001 From: jesup Date: Fri, 2 Feb 2024 03:57:01 -0500 Subject: [PATCH 106/321] perf: Optimize packet reception for the common case of adding to the end (#1587) * Optimize packet reception for the common case of adding to the end * Review changes * fix for exact overlap --------- Co-authored-by: Lars Eggert --- neqo-transport/src/recv_stream.rs | 68 +++++++++++++++++++++---------- 1 file changed, 46 insertions(+), 22 deletions(-) diff --git a/neqo-transport/src/recv_stream.rs b/neqo-transport/src/recv_stream.rs index 0b2863c425..06ca59685d 100644 --- a/neqo-transport/src/recv_stream.rs +++ b/neqo-transport/src/recv_stream.rs @@ -200,26 +200,49 @@ impl RxStreamOrderer { false }; - // Now handle possible overlap with next entries - let mut to_remove = SmallVec::<[_; 8]>::new(); let mut to_add = new_data; - - for (&next_start, next_data) in self.data_ranges.range_mut(new_start..) { - let next_end = next_start + u64::try_from(next_data.len()).unwrap(); - let overlap = new_end.saturating_sub(next_start); - if overlap == 0 { - break; - } else if next_end >= new_end { - qtrace!( - "New frame {}-{} overlaps with next frame by {}, truncating", - new_start, - new_end, - overlap - ); - let truncate_to = new_data.len() - usize::try_from(overlap).unwrap(); - to_add = &new_data[..truncate_to]; - break; - } else { + if self + .data_ranges + .last_entry() + .map_or(false, |e| *e.key() >= new_start) + { + // Is this at the end (common case)? If so, nothing to do in this block + // Common case: + // PPPPPP -> PPPPPP + // NNNNNNN NNNNNNN + // or + // PPPPPP -> PPPPPP + // NNNNNNN NNNNNNN + // + // Not the common case, handle possible overlap with next entries + // PPPPPP AAA -> PPPPPP + // NNNNNNN NNNNNNN + // or + // PPPPPP AAAA -> PPPPPP AAAA + // NNNNNNN NNNNN + // or (this is where to_remove is used) + // PPPPPP AA -> PPPPPP + // NNNNNNN NNNNNNN + + let mut to_remove = SmallVec::<[_; 8]>::new(); + + for (&next_start, next_data) in self.data_ranges.range_mut(new_start..) { + let next_end = next_start + u64::try_from(next_data.len()).unwrap(); + let overlap = new_end.saturating_sub(next_start); + if overlap == 0 { + // Fills in the hole, exactly (probably common) + break; + } else if next_end >= new_end { + qtrace!( + "New frame {}-{} overlaps with next frame by {}, truncating", + new_start, + new_end, + overlap + ); + let truncate_to = new_data.len() - usize::try_from(overlap).unwrap(); + to_add = &new_data[..truncate_to]; + break; + } qtrace!( "New frame {}-{} spans entire next frame {}-{}, replacing", new_start, @@ -228,11 +251,12 @@ impl RxStreamOrderer { next_end ); to_remove.push(next_start); + // Continue, since we may have more overlaps } - } - for start in to_remove { - self.data_ranges.remove(&start); + for start in to_remove { + self.data_ranges.remove(&start); + } } if !to_add.is_empty() { From d62e5f45cff954bc0ced546604b3ccb2d946bf28 Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Fri, 2 Feb 2024 11:07:33 +0200 Subject: [PATCH 107/321] ci: Use codecov-action@v4 to avoid GitHub warning about node@16 (#1614) --- .github/workflows/check.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/check.yml b/.github/workflows/check.yml index cf87472b87..7951477355 100644 --- a/.github/workflows/check.yml +++ b/.github/workflows/check.yml @@ -153,7 +153,7 @@ jobs: if: success() || failure() - name: Upload coverage reports to Codecov - uses: codecov/codecov-action@v3 + uses: codecov/codecov-action@v4 with: file: lcov.info fail_ci_if_error: false From 141b5ea0ba1f7c9067afde2dc747b8f8688039ef Mon Sep 17 00:00:00 2001 From: Max Inden Date: Fri, 2 Feb 2024 12:12:25 +0100 Subject: [PATCH 108/321] fix(transport/dump): check module not max log level and move to conn (#1601) This commit contains two changes: The first change. To avoid expensive packet decoding and string encoding, the `neqo-transport` `dump_packet` function first checks the current log level: ``` rust if ::log::Level::Debug > ::log::max_level() { return; } ``` This is problematic when e.g. setting `RUST_LOG=info,neqo_crypto=debug`. While `::log::max_level` will return `::log::Level::Debug`, given the `neqo_crypto=debug`, the log level of the `neqo_transport` crate and `dump` module will be `info`. Thus the packet will be de-/encoded, but never logged, given that the `qdebug!` call calls `log::debug!`, which will check the module log level and not use the maximum log level. Instead, with this commit, `dump_packet` checks the module log level. ``` rust if !log::log_enabled!(log::Level::Debug) { return; } ``` The second change. `dump_packet` is in `neqo_transport::dump`, but only called in `neqo_transport::connection`. `RUST_LOG=info,neqo_transport::connection=debug` will thus not dump the packet as `dump_packet` checks its own log level, not the log level of its call side. To remove this small footgun, move `dump_packet` to `neqo_transport::connection::dump` and thus enable logging on e.g. `RUST_LOG=info,neqo_transport::connection=debug`. Note that the `dump` module was never exposed beyond `neqo_transport` and thus this is not a breaking change to other crates. An alternative, arguably more complex, approach would be to write `dump_packet` as a proc macro. Co-authored-by: Lars Eggert --- neqo-transport/src/{ => connection}/dump.rs | 2 +- neqo-transport/src/connection/mod.rs | 5 ++--- neqo-transport/src/lib.rs | 1 - 3 files changed, 3 insertions(+), 5 deletions(-) rename neqo-transport/src/{ => connection}/dump.rs (96%) diff --git a/neqo-transport/src/dump.rs b/neqo-transport/src/connection/dump.rs similarity index 96% rename from neqo-transport/src/dump.rs rename to neqo-transport/src/connection/dump.rs index 5d8a72f300..77d51c605c 100644 --- a/neqo-transport/src/dump.rs +++ b/neqo-transport/src/connection/dump.rs @@ -27,7 +27,7 @@ pub fn dump_packet( pn: PacketNumber, payload: &[u8], ) { - if ::log::Level::Debug > ::log::max_level() { + if !log::log_enabled!(log::Level::Debug) { return; } diff --git a/neqo-transport/src/connection/mod.rs b/neqo-transport/src/connection/mod.rs index e42eeabde6..2de388418a 100644 --- a/neqo-transport/src/connection/mod.rs +++ b/neqo-transport/src/connection/mod.rs @@ -36,7 +36,6 @@ use crate::{ ConnectionIdRef, ConnectionIdStore, LOCAL_ACTIVE_CID_LIMIT, }, crypto::{Crypto, CryptoDxState, CryptoSpace}, - dump::*, events::{ConnectionEvent, ConnectionEvents, OutgoingDatagramOutcome}, frame::{ CloseError, Frame, FrameType, FRAME_TYPE_CONNECTION_CLOSE_APPLICATION, @@ -60,14 +59,14 @@ use crate::{ version::{Version, WireVersion}, AppError, ConnectionError, Error, Res, StreamId, }; - +mod dump; mod idle; pub mod params; mod saved; mod state; #[cfg(test)] pub mod test_internal; - +use dump::dump_packet; use idle::IdleTimeout; pub use params::ConnectionParameters; use params::PreferredAddressConfig; diff --git a/neqo-transport/src/lib.rs b/neqo-transport/src/lib.rs index de6898f3f8..ecf7ee2f73 100644 --- a/neqo-transport/src/lib.rs +++ b/neqo-transport/src/lib.rs @@ -16,7 +16,6 @@ mod cc; mod cid; mod connection; mod crypto; -mod dump; mod events; mod fc; mod frame; From 0a68ea25a21284e58ac235bb95df7fca250e5e66 Mon Sep 17 00:00:00 2001 From: Kershaw Date: Fri, 2 Feb 2024 15:34:21 +0100 Subject: [PATCH 109/321] Use temp rev for qlog (#1617) --- neqo-client/Cargo.toml | 2 +- neqo-common/Cargo.toml | 2 +- neqo-http3/Cargo.toml | 2 +- neqo-qpack/Cargo.toml | 2 +- neqo-server/Cargo.toml | 2 +- neqo-transport/Cargo.toml | 2 +- test-fixture/Cargo.toml | 2 +- 7 files changed, 7 insertions(+), 7 deletions(-) diff --git a/neqo-client/Cargo.toml b/neqo-client/Cargo.toml index fba2110d6d..3e5be383f2 100644 --- a/neqo-client/Cargo.toml +++ b/neqo-client/Cargo.toml @@ -15,7 +15,7 @@ neqo-crypto = { path = "./../neqo-crypto" } neqo-http3 = { path = "./../neqo-http3" } neqo-qpack = { path = "./../neqo-qpack" } neqo-transport = { path = "./../neqo-transport" } -qlog = "0.11.0" +qlog = { git = "https://github.com/cloudflare/quiche", rev = "09ea4b244096a013071cfe2175bbf2945fb7f8d1" } structopt = "0.3" url = "~2.5.0" diff --git a/neqo-common/Cargo.toml b/neqo-common/Cargo.toml index b7136aaa60..25b74609d3 100644 --- a/neqo-common/Cargo.toml +++ b/neqo-common/Cargo.toml @@ -12,7 +12,7 @@ enum-map = "2.7" env_logger = { version = "0.10", default-features = false } lazy_static = "1.4" log = { version = "0.4", default-features = false } -qlog = "0.11.0" +qlog = { git = "https://github.com/cloudflare/quiche", rev = "09ea4b244096a013071cfe2175bbf2945fb7f8d1" } time = {version = "0.3.23", features = ["formatting"]} [dev-dependencies] diff --git a/neqo-http3/Cargo.toml b/neqo-http3/Cargo.toml index 9956cef05c..064d884279 100644 --- a/neqo-http3/Cargo.toml +++ b/neqo-http3/Cargo.toml @@ -14,7 +14,7 @@ neqo-common = { path = "./../neqo-common" } neqo-crypto = { path = "./../neqo-crypto" } neqo-qpack = { path = "./../neqo-qpack" } neqo-transport = { path = "./../neqo-transport" } -qlog = "0.11.0" +qlog = { git = "https://github.com/cloudflare/quiche", rev = "09ea4b244096a013071cfe2175bbf2945fb7f8d1" } sfv = "0.9.3" smallvec = "1.11.1" url = "2.5" diff --git a/neqo-qpack/Cargo.toml b/neqo-qpack/Cargo.toml index 31a1bf28e6..8ac57b1eea 100644 --- a/neqo-qpack/Cargo.toml +++ b/neqo-qpack/Cargo.toml @@ -12,7 +12,7 @@ log = {version = "~0.4.17", default-features = false} neqo-common = { path = "./../neqo-common" } neqo-crypto = { path = "./../neqo-crypto" } neqo-transport = { path = "./../neqo-transport" } -qlog = "0.11.0" +qlog = { git = "https://github.com/cloudflare/quiche", rev = "09ea4b244096a013071cfe2175bbf2945fb7f8d1" } static_assertions = "~1.1.0" [dev-dependencies] diff --git a/neqo-server/Cargo.toml b/neqo-server/Cargo.toml index 1d6b5df86b..0129fd04e4 100644 --- a/neqo-server/Cargo.toml +++ b/neqo-server/Cargo.toml @@ -14,7 +14,7 @@ neqo-crypto = { path = "./../neqo-crypto" } neqo-http3 = { path = "./../neqo-http3" } neqo-qpack = { path = "./../neqo-qpack" } neqo-transport = { path = "./../neqo-transport" } -qlog = "0.11.0" +qlog = { git = "https://github.com/cloudflare/quiche", rev = "09ea4b244096a013071cfe2175bbf2945fb7f8d1" } regex = "1.9" structopt = "0.3" tokio = { version = "1", features = ["net", "time", "macros", "rt", "rt-multi-thread"] } diff --git a/neqo-transport/Cargo.toml b/neqo-transport/Cargo.toml index e119f074c2..98df1017cf 100644 --- a/neqo-transport/Cargo.toml +++ b/neqo-transport/Cargo.toml @@ -12,7 +12,7 @@ lazy_static = "1.4" log = { version = "0.4.17", default-features = false } neqo-common = { path = "../neqo-common" } neqo-crypto = { path = "../neqo-crypto" } -qlog = "0.11.0" +qlog = { git = "https://github.com/cloudflare/quiche", rev = "09ea4b244096a013071cfe2175bbf2945fb7f8d1" } smallvec = "1.11.1" [dev-dependencies] diff --git a/test-fixture/Cargo.toml b/test-fixture/Cargo.toml index 2c163fbb07..751046471e 100644 --- a/test-fixture/Cargo.toml +++ b/test-fixture/Cargo.toml @@ -14,7 +14,7 @@ neqo-crypto = { path = "../neqo-crypto" } neqo-http3 = { path = "../neqo-http3" } neqo-qpack = { path = "../neqo-qpack" } neqo-transport = { path = "../neqo-transport" } -qlog = "0.11.0" +qlog = { git = "https://github.com/cloudflare/quiche", rev = "09ea4b244096a013071cfe2175bbf2945fb7f8d1" } [features] deny-warnings = [] From 9489511f7c82786f55bc9c713cddbff825507ed7 Mon Sep 17 00:00:00 2001 From: Kershaw Date: Fri, 2 Feb 2024 15:52:06 +0100 Subject: [PATCH 110/321] v0.7.0 (#1618) --- neqo-client/Cargo.toml | 2 +- neqo-common/Cargo.toml | 2 +- neqo-crypto/Cargo.toml | 2 +- neqo-http3/Cargo.toml | 2 +- neqo-interop/Cargo.toml | 2 +- neqo-qpack/Cargo.toml | 2 +- neqo-server/Cargo.toml | 2 +- neqo-transport/Cargo.toml | 2 +- test-fixture/Cargo.toml | 2 +- 9 files changed, 9 insertions(+), 9 deletions(-) diff --git a/neqo-client/Cargo.toml b/neqo-client/Cargo.toml index 3e5be383f2..5419e8a5f8 100644 --- a/neqo-client/Cargo.toml +++ b/neqo-client/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "neqo-client" -version = "0.6.9" +version = "0.7.0" authors = ["Martin Thomson ", "Dragana Damjanovic ", "Andy Grover "] diff --git a/neqo-common/Cargo.toml b/neqo-common/Cargo.toml index 25b74609d3..de754531be 100644 --- a/neqo-common/Cargo.toml +++ b/neqo-common/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "neqo-common" -version = "0.6.9" +version = "0.7.0" authors = ["Bobby Holley "] edition = "2018" rust-version = "1.70.0" diff --git a/neqo-crypto/Cargo.toml b/neqo-crypto/Cargo.toml index c5909ac5e5..492e501e58 100644 --- a/neqo-crypto/Cargo.toml +++ b/neqo-crypto/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "neqo-crypto" -version = "0.6.9" +version = "0.7.0" authors = ["Martin Thomson "] edition = "2018" rust-version = "1.70.0" diff --git a/neqo-http3/Cargo.toml b/neqo-http3/Cargo.toml index 064d884279..f05cae5f03 100644 --- a/neqo-http3/Cargo.toml +++ b/neqo-http3/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "neqo-http3" -version = "0.6.9" +version = "0.7.0" authors = ["Dragana Damjanovic "] edition = "2018" rust-version = "1.70.0" diff --git a/neqo-interop/Cargo.toml b/neqo-interop/Cargo.toml index 8b298167f2..ce5bd9af8b 100644 --- a/neqo-interop/Cargo.toml +++ b/neqo-interop/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "neqo-interop" -version = "0.6.9" +version = "0.7.0" authors = ["EKR "] edition = "2018" rust-version = "1.70.0" diff --git a/neqo-qpack/Cargo.toml b/neqo-qpack/Cargo.toml index 8ac57b1eea..96531550bd 100644 --- a/neqo-qpack/Cargo.toml +++ b/neqo-qpack/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "neqo-qpack" -version = "0.6.9" +version = "0.7.0" authors = ["Dragana Damjanovic "] edition = "2018" rust-version = "1.70.0" diff --git a/neqo-server/Cargo.toml b/neqo-server/Cargo.toml index 0129fd04e4..09a7d4aa3c 100644 --- a/neqo-server/Cargo.toml +++ b/neqo-server/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "neqo-server" -version = "0.6.9" +version = "0.7.0" authors = ["Dragana Damjanovic "] edition = "2018" rust-version = "1.70.0" diff --git a/neqo-transport/Cargo.toml b/neqo-transport/Cargo.toml index 98df1017cf..b1d86fc789 100644 --- a/neqo-transport/Cargo.toml +++ b/neqo-transport/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "neqo-transport" -version = "0.6.9" +version = "0.7.0" authors = ["EKR ", "Andy Grover "] edition = "2018" rust-version = "1.70.0" diff --git a/test-fixture/Cargo.toml b/test-fixture/Cargo.toml index 751046471e..f0feace31d 100644 --- a/test-fixture/Cargo.toml +++ b/test-fixture/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "test-fixture" -version = "0.6.9" +version = "0.7.0" authors = ["Martin Thomson "] edition = "2018" rust-version = "1.70.0" From 0b4b9382a9da349e761bf70017f39508c2ae9a04 Mon Sep 17 00:00:00 2001 From: Kershaw Date: Mon, 5 Feb 2024 08:28:12 +0100 Subject: [PATCH 111/321] Add tc commands for upload testing on Linux. (#1596) * Add tc commands * fix nits * address comments * use eval --------- Co-authored-by: Lars Eggert --- test/upload_test.sh | 40 +++++++++++++++++++++++++--------------- 1 file changed, 25 insertions(+), 15 deletions(-) diff --git a/test/upload_test.sh b/test/upload_test.sh index d75bcda1bd..40c3aaaeb9 100755 --- a/test/upload_test.sh +++ b/test/upload_test.sh @@ -5,9 +5,14 @@ set -e server_address=127.0.0.1 server_port=4433 upload_size=8388608 -client="cargo run --release --bin neqo-client -- http://$server_address:$server_port/ --test upload --upload-size $upload_size" +cc=cubic +client="cargo run --release --bin neqo-client -- http://$server_address:$server_port/ --test upload --upload-size $upload_size --cc $cc" server="cargo run --release --bin neqo-server -- --db ../test-fixture/db $server_address:$server_port" server_pid=0 +pacing=true +if [ "$pacing" = true ]; then + client="$client --pacing" +fi # Define two indexed arrays to store network conditions network_conditions=("cable" "3g_slow" "DSL" "LTE" "fast wifi") @@ -17,10 +22,6 @@ plrs=("0.0001" "0.0005" "0.001" "0.002" "0.005") runs=1 -echo -n "Enter root password: " -read -s root_password -echo - setup_network_conditions() { bw="$1" delay_ms="$2" @@ -38,7 +39,8 @@ setup_network_conditions() { # Convert BDP to kilobytes bdp_kb=$(echo "scale=2; $bdp_bits / 8 / 1024" | bc) - bdp_kb_rounded_up=$(printf "%.0f" "$bdp_kb") + bdp_kb_rounded_up=$(LC_NUMERIC=C printf "%.0f" "$bdp_kb") + # if we are on MacOS X, configure the firewall to add delay and queue traffic if [ -x /usr/sbin/dnctl ]; then @@ -50,13 +52,20 @@ setup_network_conditions() { "sudo pfctl -e || true" ) else - # TODO implement commands for linux - return 0 + bw_in_bits_per_sec="${bw%/s}" + bdp_bytes=$(echo "scale=2; $bdp_bits / 8" | bc) + bdp_bytes_rounded_up=$(LC_NUMERIC=C printf "%.0f" "$bdp_bytes") + plr_p=$(echo "scale=4; $plr * 100" | bc) + plr_p=$(LC_NUMERIC=C printf "%.2f" "$plr_p") + set_condition_commands=( + "sudo tc qdisc add dev lo root handle 1: tbf rate $bw_in_bits_per_sec burst $bdp_bytes_rounded_up limit 30000" + "sudo tc qdisc add dev lo parent 1:1 handle 10: netem delay ${delay_ms}ms loss ${plr_p}%" + ) fi for command in "${set_condition_commands[@]}"; do - echo $command - echo $root_password | sudo -S bash -c "$command" + echo "$command" + eval "$command" done } @@ -67,12 +76,13 @@ stop_network_conditions() { "sudo dnctl -q flush" ) else - # TODO implement commands for linux - return 0 + stop_condition_commands=( + "sudo tc qdisc del dev lo root" + ) fi - for command in "${set_condition_commands[@]}"; do - echo $root_password | sudo -S bash -c "$command" + for command in "${stop_condition_commands[@]}"; do + eval "$command" done } @@ -80,7 +90,7 @@ stop_server() { echo "stop server" server_pid=$(pgrep -f "neqo-server") # Kill the server - kill $server_pid + kill "$server_pid" } start_test() { From cb2d6230c250a9716198458c254331965b640efe Mon Sep 17 00:00:00 2001 From: jesup Date: Mon, 5 Feb 2024 02:44:13 -0500 Subject: [PATCH 112/321] perf: Add a cache for first_unmarked_range() (#1582) * Cache results of RangeTracker::first_unmarked_range() Coalesce additions to the tail of a RangeTracker tree to avoid unnecessary tree growth * clean up * more cleanup * review fixes * allow overlap * final_cleanup * Review responses --------- Co-authored-by: Lars Eggert --- neqo-transport/src/send_stream.rs | 61 ++++++++++++++++++++++++++----- 1 file changed, 51 insertions(+), 10 deletions(-) diff --git a/neqo-transport/src/send_stream.rs b/neqo-transport/src/send_stream.rs index 5feb785ac6..0464b3e490 100644 --- a/neqo-transport/src/send_stream.rs +++ b/neqo-transport/src/send_stream.rs @@ -145,8 +145,10 @@ enum RangeState { /// range implies needing-to-be-sent, either initially or as a retransmission. #[derive(Debug, Default, PartialEq)] struct RangeTracker { - // offset, (len, RangeState). Use u64 for len because ranges can exceed 32bits. + /// offset, (len, RangeState). Use u64 for len because ranges can exceed 32bits. used: BTreeMap, + /// this is a cache for first_unmarked_range(), which we check a log + first_unmarked: Option<(u64, Option)>, } impl RangeTracker { @@ -166,19 +168,46 @@ impl RangeTracker { /// Find the first unmarked range. If all are contiguous, this will return /// (highest_offset(), None). - fn first_unmarked_range(&self) -> (u64, Option) { + fn first_unmarked_range(&mut self) -> (u64, Option) { let mut prev_end = 0; + if let Some(first_unmarked) = self.first_unmarked { + return first_unmarked; + } + for (cur_off, (cur_len, _)) in &self.used { if prev_end == *cur_off { prev_end = cur_off + cur_len; } else { - return (prev_end, Some(cur_off - prev_end)); + let res = (prev_end, Some(cur_off - prev_end)); + self.first_unmarked = Some(res); + return res; } } + self.first_unmarked = Some((prev_end, None)); (prev_end, None) } + /// Check for the common case of adding to the end. If we can, do it and + /// return true. + fn extend_final_range(&mut self, new_off: u64, new_len: u64, new_state: RangeState) -> bool { + if let Some(mut last) = self.used.last_entry() { + let prev_off = *last.key(); + let (prev_len, prev_state) = last.get_mut(); + // allow for overlap between new chunk and the last entry + if new_off >= prev_off + && new_off <= prev_off + *prev_len + && new_off + new_len > prev_off + *prev_len + && new_state == *prev_state + { + // simple case, extend the last entry + *prev_len = new_off + new_len - prev_off; + return true; + } + } + false + } + /// Turn one range into a list of subranges that align with existing /// ranges. /// Check impermissible overlaps in subregions: Sent cannot overwrite Acked. @@ -207,6 +236,8 @@ impl RangeTracker { let mut tmp_len = new_len; let mut v = Vec::new(); + // we already handled the case of a simple extension of the last item + // cut previous overlapping range if needed let prev = self.used.range_mut(..tmp_off).next_back(); if let Some((prev_off, (prev_len, prev_state))) = prev { @@ -300,6 +331,10 @@ impl RangeTracker { return; } + self.first_unmarked = None; + if self.extend_final_range(off, len as u64, state) { + return; + } let subranges = self.chunk_range_on_edges(off, len as u64, state); for (sub_off, sub_len, sub_state) in subranges { @@ -315,6 +350,7 @@ impl RangeTracker { return; } + self.first_unmarked = None; let len = u64::try_from(len).unwrap(); let end_off = off + len; @@ -404,7 +440,7 @@ impl TxBuffer { can_buffer } - pub fn next_bytes(&self) -> Option<(u64, &[u8])> { + pub fn next_bytes(&mut self) -> Option<(u64, &[u8])> { let (start, maybe_len) = self.ranges.first_unmarked_range(); if start == self.retired + u64::try_from(self.buffered()).unwrap() { @@ -766,11 +802,13 @@ impl SendStream { /// offset. fn next_bytes(&mut self, retransmission_only: bool) -> Option<(u64, &[u8])> { match self.state { - SendStreamState::Send { ref send_buf, .. } => { - send_buf.next_bytes().and_then(|(offset, slice)| { + SendStreamState::Send { + ref mut send_buf, .. + } => { + let result = send_buf.next_bytes(); + if let Some((offset, slice)) = result { if retransmission_only { qtrace!( - [self], "next_bytes apply retransmission limit at {}", self.retransmission_offset ); @@ -786,13 +824,16 @@ impl SendStream { } else { Some((offset, slice)) } - }) + } else { + None + } } SendStreamState::DataSent { - ref send_buf, + ref mut send_buf, fin_sent, .. } => { + let used = send_buf.used(); // immutable first let bytes = send_buf.next_bytes(); if bytes.is_some() { bytes @@ -800,7 +841,7 @@ impl SendStream { None } else { // Send empty stream frame with fin set - Some((send_buf.used(), &[])) + Some((used, &[])) } } SendStreamState::Ready { .. } From 9260ba3a82ebd7721a6a537120d2c9b5ecf94e62 Mon Sep 17 00:00:00 2001 From: Martin Thomson Date: Mon, 5 Feb 2024 21:14:36 +1100 Subject: [PATCH 113/321] feat: Use the simulator as a benchmark (#1616) * Moving simulator to the test fixture * Split the simulator into two pieces in preparation for bench * Remove println statements * Fix server fmt * chore: add a file that I missed * Add a fixed seed for the test And increase the time available for benching. --------- Signed-off-by: Lars Eggert Co-authored-by: Lars Eggert --- neqo-transport/Cargo.toml | 7 +- neqo-transport/benches/transfer.rs | 64 ++++++++++ neqo-transport/tests/network.rs | 114 ++++++++++------- test-fixture/Cargo.toml | 2 +- test-fixture/src/lib.rs | 1 + .../src}/sim/connection.rs | 71 ++++++++--- .../tests => test-fixture/src}/sim/delay.rs | 1 + .../tests => test-fixture/src}/sim/drop.rs | 6 + .../tests => test-fixture/src}/sim/mod.rs | 120 +++++++++++++----- .../tests => test-fixture/src}/sim/net.rs | 0 .../tests => test-fixture/src}/sim/rng.rs | 7 +- .../src}/sim/taildrop.rs | 20 ++- 12 files changed, 309 insertions(+), 104 deletions(-) create mode 100644 neqo-transport/benches/transfer.rs rename {neqo-transport/tests => test-fixture/src}/sim/connection.rs (81%) rename {neqo-transport/tests => test-fixture/src}/sim/delay.rs (99%) rename {neqo-transport/tests => test-fixture/src}/sim/drop.rs (90%) rename {neqo-transport/tests => test-fixture/src}/sim/mod.rs (72%) rename {neqo-transport/tests => test-fixture/src}/sim/net.rs (100%) rename {neqo-transport/tests => test-fixture/src}/sim/rng.rs (92%) rename {neqo-transport/tests => test-fixture/src}/sim/taildrop.rs (95%) diff --git a/neqo-transport/Cargo.toml b/neqo-transport/Cargo.toml index b1d86fc789..00c46eb37b 100644 --- a/neqo-transport/Cargo.toml +++ b/neqo-transport/Cargo.toml @@ -16,7 +16,7 @@ qlog = { git = "https://github.com/cloudflare/quiche", rev = "09ea4b244096a01307 smallvec = "1.11.1" [dev-dependencies] -criterion = "0.5.1" +criterion = { version = "0.5.1", features = ["html_reports"] } enum-map = "2.7" test-fixture = { path = "../test-fixture" } @@ -25,6 +25,11 @@ bench = [] deny-warnings = [] fuzzing = ["neqo-crypto/fuzzing"] +[[bench]] +name = "transfer" +harness = false +required-features = ["bench"] + [[bench]] name = "rx_stream_orderer" harness = false diff --git a/neqo-transport/benches/transfer.rs b/neqo-transport/benches/transfer.rs new file mode 100644 index 0000000000..59f0264a98 --- /dev/null +++ b/neqo-transport/benches/transfer.rs @@ -0,0 +1,64 @@ +use std::time::Duration; + +use criterion::{criterion_group, criterion_main, BatchSize::SmallInput, Criterion}; +use test_fixture::{ + boxed, + sim::{ + connection::{ConnectionNode, ReceiveData, SendData}, + network::{Delay, TailDrop}, + Simulator, + }, +}; + +const ZERO: Duration = Duration::from_millis(0); +const JITTER: Duration = Duration::from_millis(10); +const TRANSFER_AMOUNT: usize = 1 << 22; // 4Mbyte + +fn benchmark_transfer(c: &mut Criterion, label: &str, seed: Option>) { + c.bench_function(label, |b| { + b.iter_batched( + || { + let nodes = boxed![ + ConnectionNode::default_client(boxed![SendData::new(TRANSFER_AMOUNT)]), + TailDrop::dsl_uplink(), + Delay::new(ZERO..JITTER), + ConnectionNode::default_server(boxed![ReceiveData::new(TRANSFER_AMOUNT)]), + TailDrop::dsl_downlink(), + Delay::new(ZERO..JITTER), + ]; + let mut sim = Simulator::new(label, nodes); + if let Some(seed) = &seed { + sim.seed_str(seed); + } + sim.setup() + }, + |sim| { + sim.run(); + }, + SmallInput, + ) + }); +} + +fn benchmark_transfer_variable(c: &mut Criterion) { + benchmark_transfer( + c, + "Run multiple transfers with varying seeds", + std::env::var("SIMULATION_SEED").ok(), + ); +} + +fn benchmark_transfer_fixed(c: &mut Criterion) { + benchmark_transfer( + c, + "Run multiple transfers with the same seed", + Some("62df6933ba1f543cece01db8f27fb2025529b27f93df39e19f006e1db3b8c843"), + ); +} + +criterion_group! { + name = transfer; + config = Criterion::default().warm_up_time(Duration::from_secs(5)).measurement_time(Duration::from_secs(15)); + targets = benchmark_transfer_variable, benchmark_transfer_fixed +} +criterion_main!(transfer); diff --git a/neqo-transport/tests/network.rs b/neqo-transport/tests/network.rs index 8c388457c5..d7a537159b 100644 --- a/neqo-transport/tests/network.rs +++ b/neqo-transport/tests/network.rs @@ -7,15 +7,17 @@ #![cfg_attr(feature = "deny-warnings", deny(warnings))] #![warn(clippy::pedantic)] -mod sim; - use std::{ops::Range, time::Duration}; use neqo_transport::{ConnectionError, ConnectionParameters, Error, State}; -use sim::{ - connection::{ConnectionNode, ReachState, ReceiveData, SendData}, - network::{Delay, Drop, TailDrop}, - Simulator, +use test_fixture::{ + boxed, + sim::{ + connection::{ConnectionNode, ReachState, ReceiveData, SendData}, + network::{Delay, Drop, TailDrop}, + Simulator, + }, + simulate, }; /// The amount of transfer. Much more than this takes a surprising amount of time. @@ -32,26 +34,28 @@ const fn weeks(m: u32) -> Duration { simulate!( connect_direct, [ - ConnectionNode::default_client(boxed![ReachState::new(State::Confirmed)]), - ConnectionNode::default_server(boxed![ReachState::new(State::Confirmed)]), + ConnectionNode::new_client( + ConnectionParameters::default(), + [], + boxed![ReachState::new(State::Confirmed)] + ), + ConnectionNode::new_server( + ConnectionParameters::default(), + [], + boxed![ReachState::new(State::Confirmed)] + ), ] ); simulate!( idle_timeout, [ - ConnectionNode::default_client(boxed![ - ReachState::new(State::Confirmed), - ReachState::new(State::Closed(ConnectionError::Transport( - Error::IdleTimeout - ))) - ]), - ConnectionNode::default_server(boxed![ - ReachState::new(State::Confirmed), - ReachState::new(State::Closed(ConnectionError::Transport( - Error::IdleTimeout - ))) - ]), + ConnectionNode::default_client(boxed![ReachState::new(State::Closed( + ConnectionError::Transport(Error::IdleTimeout) + ))]), + ConnectionNode::default_server(boxed![ReachState::new(State::Closed( + ConnectionError::Transport(Error::IdleTimeout) + ))]), ] ); @@ -60,23 +64,19 @@ simulate!( [ ConnectionNode::new_client( ConnectionParameters::default().idle_timeout(weeks(1000)), - boxed![ - ReachState::new(State::Confirmed), - ReachState::new(State::Closed(ConnectionError::Transport( - Error::IdleTimeout - ))) - ] + boxed![ReachState::new(State::Confirmed),], + boxed![ReachState::new(State::Closed(ConnectionError::Transport( + Error::IdleTimeout + )))] ), Delay::new(weeks(6)..weeks(6)), Drop::percentage(10), ConnectionNode::new_server( ConnectionParameters::default().idle_timeout(weeks(1000)), - boxed![ - ReachState::new(State::Confirmed), - ReachState::new(State::Closed(ConnectionError::Transport( - Error::IdleTimeout - ))) - ] + boxed![ReachState::new(State::Confirmed),], + boxed![ReachState::new(State::Closed(ConnectionError::Transport( + Error::IdleTimeout + )))] ), Delay::new(weeks(8)..weeks(8)), Drop::percentage(10), @@ -94,9 +94,17 @@ simulate!( simulate!( connect_fixed_rtt, [ - ConnectionNode::default_client(boxed![ReachState::new(State::Confirmed)]), + ConnectionNode::new_client( + ConnectionParameters::default(), + [], + boxed![ReachState::new(State::Confirmed)] + ), Delay::new(DELAY..DELAY), - ConnectionNode::default_server(boxed![ReachState::new(State::Confirmed)]), + ConnectionNode::new_server( + ConnectionParameters::default(), + [], + boxed![ReachState::new(State::Confirmed)] + ), Delay::new(DELAY..DELAY), ], ); @@ -104,22 +112,38 @@ simulate!( simulate!( connect_taildrop_jitter, [ - ConnectionNode::default_client(boxed![ReachState::new(State::Confirmed)]), - TailDrop::dsl_uplink(), - Delay::new(ZERO..JITTER), - ConnectionNode::default_server(boxed![ReachState::new(State::Confirmed)]), + ConnectionNode::new_client( + ConnectionParameters::default(), + [], + boxed![ReachState::new(State::Confirmed)] + ), TailDrop::dsl_downlink(), Delay::new(ZERO..JITTER), + ConnectionNode::new_server( + ConnectionParameters::default(), + [], + boxed![ReachState::new(State::Confirmed)] + ), + TailDrop::dsl_uplink(), + Delay::new(ZERO..JITTER), ], ); simulate!( connect_taildrop, [ - ConnectionNode::default_client(boxed![ReachState::new(State::Confirmed)]), - TailDrop::dsl_uplink(), - ConnectionNode::default_server(boxed![ReachState::new(State::Confirmed)]), + ConnectionNode::new_client( + ConnectionParameters::default(), + [], + boxed![ReachState::new(State::Confirmed)] + ), TailDrop::dsl_downlink(), + ConnectionNode::new_server( + ConnectionParameters::default(), + [], + boxed![ReachState::new(State::Confirmed)] + ), + TailDrop::dsl_uplink(), ], ); @@ -139,9 +163,9 @@ simulate!( transfer_taildrop, [ ConnectionNode::default_client(boxed![SendData::new(TRANSFER_AMOUNT)]), - TailDrop::dsl_uplink(), - ConnectionNode::default_server(boxed![ReceiveData::new(TRANSFER_AMOUNT)]), TailDrop::dsl_downlink(), + ConnectionNode::default_server(boxed![ReceiveData::new(TRANSFER_AMOUNT)]), + TailDrop::dsl_uplink(), ], ); @@ -149,10 +173,10 @@ simulate!( transfer_taildrop_jitter, [ ConnectionNode::default_client(boxed![SendData::new(TRANSFER_AMOUNT)]), - TailDrop::dsl_uplink(), + TailDrop::dsl_downlink(), Delay::new(ZERO..JITTER), ConnectionNode::default_server(boxed![ReceiveData::new(TRANSFER_AMOUNT)]), - TailDrop::dsl_downlink(), + TailDrop::dsl_uplink(), Delay::new(ZERO..JITTER), ], ); diff --git a/test-fixture/Cargo.toml b/test-fixture/Cargo.toml index f0feace31d..6dfe8d7f4c 100644 --- a/test-fixture/Cargo.toml +++ b/test-fixture/Cargo.toml @@ -17,4 +17,4 @@ neqo-transport = { path = "../neqo-transport" } qlog = { git = "https://github.com/cloudflare/quiche", rev = "09ea4b244096a013071cfe2175bbf2945fb7f8d1" } [features] -deny-warnings = [] +deny-warnings = [] \ No newline at end of file diff --git a/test-fixture/src/lib.rs b/test-fixture/src/lib.rs index 8635e8a840..2c94767a97 100644 --- a/test-fixture/src/lib.rs +++ b/test-fixture/src/lib.rs @@ -35,6 +35,7 @@ use neqo_transport::{ use qlog::{events::EventImportance, streamer::QlogStreamer}; pub mod assertions; +pub mod sim; /// The path for the database used in tests. pub const NSS_DB_PATH: &str = concat!(env!("CARGO_MANIFEST_DIR"), "/db"); diff --git a/neqo-transport/tests/sim/connection.rs b/test-fixture/src/sim/connection.rs similarity index 81% rename from neqo-transport/tests/sim/connection.rs rename to test-fixture/src/sim/connection.rs index 45a5234512..d05979cfca 100644 --- a/neqo-transport/tests/sim/connection.rs +++ b/test-fixture/src/sim/connection.rs @@ -12,13 +12,16 @@ use std::{ time::Instant, }; -use neqo_common::{event::Provider, qdebug, qtrace, Datagram}; +use neqo_common::{event::Provider, qdebug, qinfo, qtrace, Datagram}; use neqo_crypto::AuthenticationStatus; use neqo_transport::{ Connection, ConnectionEvent, ConnectionParameters, Output, State, StreamId, StreamType, }; -use super::{Node, Rng}; +use crate::{ + boxed, + sim::{Node, Rng}, +}; /// The status of the processing of an event. #[derive(Debug, Clone, Copy, PartialEq, Eq)] @@ -33,7 +36,7 @@ pub enum GoalStatus { /// A goal for the connection. /// Goals can be accomplished in any order. -pub trait ConnectionGoal { +pub trait ConnectionGoal: Debug { fn init(&mut self, _c: &mut Connection, _now: Instant) {} /// Perform some processing. fn process(&mut self, _c: &mut Connection, _now: Instant) -> GoalStatus { @@ -47,36 +50,49 @@ pub trait ConnectionGoal { pub struct ConnectionNode { c: Connection, + setup_goals: Vec>, goals: Vec>, } impl ConnectionNode { pub fn new_client( params: ConnectionParameters, + setup: impl IntoIterator>, goals: impl IntoIterator>, ) -> Self { Self { - c: test_fixture::new_client(params), + c: crate::new_client(params), + setup_goals: setup.into_iter().collect(), goals: goals.into_iter().collect(), } } pub fn new_server( params: ConnectionParameters, + setup: impl IntoIterator>, goals: impl IntoIterator>, ) -> Self { Self { - c: test_fixture::new_server(test_fixture::DEFAULT_ALPN, params), + c: crate::new_server(crate::DEFAULT_ALPN, params), + setup_goals: setup.into_iter().collect(), goals: goals.into_iter().collect(), } } pub fn default_client(goals: impl IntoIterator>) -> Self { - Self::new_client(ConnectionParameters::default(), goals) + Self::new_client( + ConnectionParameters::default(), + boxed![ReachState::new(State::Confirmed)], + goals, + ) } pub fn default_server(goals: impl IntoIterator>) -> Self { - Self::new_server(ConnectionParameters::default(), goals) + Self::new_server( + ConnectionParameters::default(), + boxed![ReachState::new(State::Confirmed)], + goals, + ) } #[allow(dead_code)] @@ -89,13 +105,20 @@ impl ConnectionNode { self.goals.push(goal); } + /// On the first call to this method, the setup goals will turn into the active goals. + /// On the second call, they will be swapped back and the main goals will run. + fn setup_goals(&mut self, now: Instant) { + std::mem::swap(&mut self.goals, &mut self.setup_goals); + for g in &mut self.goals { + g.init(&mut self.c, now); + } + } + /// Process all goals using the given closure and return whether any were active. fn process_goals(&mut self, mut f: F) -> bool where F: FnMut(&mut Box, &mut Connection) -> GoalStatus, { - // Waiting on drain_filter... - // self.goals.drain_filter(|g| f(g, &mut self.c, &e)).count(); let mut active = false; let mut i = 0; while i < self.goals.len() { @@ -114,15 +137,13 @@ impl ConnectionNode { impl Node for ConnectionNode { fn init(&mut self, _rng: Rng, now: Instant) { - for g in &mut self.goals { - g.init(&mut self.c, now); - } + self.setup_goals(now); } - fn process(&mut self, mut d: Option, now: Instant) -> Output { + fn process(&mut self, mut dgram: Option, now: Instant) -> Output { _ = self.process_goals(|goal, c| goal.process(c, now)); loop { - let res = self.c.process(d.take().as_ref(), now); + let res = self.c.process(dgram.take().as_ref(), now); let mut active = false; while let Some(e) = self.c.next_event() { @@ -145,12 +166,18 @@ impl Node for ConnectionNode { } } + fn prepare(&mut self, now: Instant) { + assert!(self.done(), "ConnectionNode::prepare: setup not complete"); + self.setup_goals(now); + assert!(!self.done(), "ConnectionNode::prepare: setup not complete"); + } + fn done(&self) -> bool { self.goals.is_empty() } fn print_summary(&self, test_name: &str) { - println!("{}: {:?}", test_name, self.c.stats()); + qinfo!("{}: {:?}", test_name, self.c.stats()); } } @@ -160,12 +187,15 @@ impl Debug for ConnectionNode { } } +/// A target for a connection that involves reaching a given connection state. #[derive(Debug, Clone)] pub struct ReachState { target: State, } impl ReachState { + /// Create a new instance that intends to reach the indicated state. + #[must_use] pub fn new(target: State) -> Self { Self { target } } @@ -186,13 +216,15 @@ impl ConnectionGoal for ReachState { } } -#[derive(Debug)] +/// A target for a connection that involves sending a given amount of data on the indicated stream. +#[derive(Debug, Clone)] pub struct SendData { remaining: usize, stream_id: Option, } impl SendData { + #[must_use] pub fn new(amount: usize) -> Self { Self { remaining: amount, @@ -248,9 +280,7 @@ impl ConnectionGoal for SendData { match e { ConnectionEvent::SendStreamCreatable { stream_type: StreamType::UniDi, - } - // TODO(mt): remove the second condition when #842 is fixed. - | ConnectionEvent::StateChange(_) => { + } => { self.make_stream(c); GoalStatus::Active } @@ -270,12 +300,13 @@ impl ConnectionGoal for SendData { } /// Receive a prescribed amount of data from any stream. -#[derive(Debug)] +#[derive(Debug, Clone)] pub struct ReceiveData { remaining: usize, } impl ReceiveData { + #[must_use] pub fn new(amount: usize) -> Self { Self { remaining: amount } } diff --git a/neqo-transport/tests/sim/delay.rs b/test-fixture/src/sim/delay.rs similarity index 99% rename from neqo-transport/tests/sim/delay.rs rename to test-fixture/src/sim/delay.rs index 34cb923084..e66e65f9d8 100644 --- a/neqo-transport/tests/sim/delay.rs +++ b/test-fixture/src/sim/delay.rs @@ -58,6 +58,7 @@ pub struct Delay { } impl Delay { + #[must_use] pub fn new(bounds: Range) -> Self { Self { random: RandomDelay::new(bounds), diff --git a/neqo-transport/tests/sim/drop.rs b/test-fixture/src/sim/drop.rs similarity index 90% rename from neqo-transport/tests/sim/drop.rs rename to test-fixture/src/sim/drop.rs index 629fbf48d3..6529a95d04 100644 --- a/neqo-transport/tests/sim/drop.rs +++ b/test-fixture/src/sim/drop.rs @@ -27,6 +27,7 @@ impl Drop { /// Make a new random drop generator. Each `drop` is called, this generates a /// random value between 0 and `max` (exclusive). If this value is less than /// `threshold` a value of `true` is returned. + #[must_use] pub fn new(threshold: u64, max: u64) -> Self { Self { threshold, @@ -36,11 +37,16 @@ impl Drop { } /// Generate random drops with the given percentage. + #[must_use] pub fn percentage(pct: u8) -> Self { // Multiply by 10 so that the random number generator works more efficiently. Self::new(u64::from(pct) * 10, 1000) } + /// Determine whether or not to drop a packet. + /// # Panics + /// When this is invoked after test configuration has been torn down, + /// such that the RNG is no longer available. pub fn drop(&mut self) -> bool { let mut rng = self.rng.as_ref().unwrap().borrow_mut(); let r = rng.random_from(0..self.max); diff --git a/neqo-transport/tests/sim/mod.rs b/test-fixture/src/sim/mod.rs similarity index 72% rename from neqo-transport/tests/sim/mod.rs rename to test-fixture/src/sim/mod.rs index 9ab9d57a4a..f4b7a52739 100644 --- a/neqo-transport/tests/sim/mod.rs +++ b/test-fixture/src/sim/mod.rs @@ -4,10 +4,7 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -// Tests with simulated network -#![cfg_attr(feature = "deny-warnings", deny(warnings))] -#![warn(clippy::pedantic)] - +/// Tests with simulated network components. pub mod connection; mod delay; mod drop; @@ -19,6 +16,7 @@ use std::{ cmp::min, convert::TryFrom, fmt::Debug, + ops::{Deref, DerefMut}, rc::Rc, time::{Duration, Instant}, }; @@ -26,9 +24,10 @@ use std::{ use neqo_common::{qdebug, qinfo, qtrace, Datagram, Encoder}; use neqo_transport::Output; use rng::Random; -use test_fixture::{self, now}; use NodeState::{Active, Idle, Waiting}; +use crate::now; + pub mod network { pub use super::{delay::Delay, drop::Drop, taildrop::TailDrop}; } @@ -78,17 +77,21 @@ pub trait Node: Debug { /// Perform processing. This optionally takes a datagram and produces either /// another data, a time that the simulator needs to wait, or nothing. fn process(&mut self, d: Option, now: Instant) -> Output; + /// This is called after setup is complete and before the main processing starts. + fn prepare(&mut self, _now: Instant) {} /// An node can report when it considers itself "done". + /// Prior to calling `prepare`, this should return `true` if it is ready. fn done(&self) -> bool { true } + /// Print out a summary of the state of the node. fn print_summary(&self, _test_name: &str) {} } /// The state of a single node. Nodes will be activated if they are `Active` /// or if the previous node in the loop generated a datagram. Nodes that return /// `true` from `Node::done` will be activated as normal. -#[derive(Debug, PartialEq)] +#[derive(Clone, Copy, Debug, PartialEq)] enum NodeState { /// The node just produced a datagram. It should be activated again as soon as possible. Active, @@ -114,6 +117,19 @@ impl NodeHolder { } } +impl Deref for NodeHolder { + type Target = dyn Node; + fn deref(&self) -> &Self::Target { + self.node.as_ref() + } +} + +impl DerefMut for NodeHolder { + fn deref_mut(&mut self) -> &mut Self::Target { + self.node.as_mut() + } +} + pub struct Simulator { name: String, nodes: Vec, @@ -146,7 +162,8 @@ impl Simulator { } /// Seed from a hex string. - /// Though this is convenient, it panics if this isn't a 64 character hex string. + /// # Panics + /// When the provided string is not 32 bytes of hex (64 characters). pub fn seed_str(&mut self, seed: impl AsRef) { let seed = Encoder::from_hex(seed); self.seed(<[u8; 32]>::try_from(seed.as_ref()).unwrap()); @@ -164,18 +181,8 @@ impl Simulator { next.expect("a node cannot be idle and not done") } - /// Runs the simulation. - pub fn run(mut self) -> Duration { - let start = now(); - let mut now = start; + fn process_loop(&mut self, start: Instant, mut now: Instant) -> Instant { let mut dgram = None; - - for n in &mut self.nodes { - n.node.init(self.rng.clone(), now); - } - println!("{}: seed {}", self.name, self.rng.borrow().seed_str()); - - let real_start = Instant::now(); loop { for n in &mut self.nodes { if dgram.is_none() && !n.ready(now) { @@ -184,7 +191,7 @@ impl Simulator { } qdebug!([self.name], "processing {:?}", n.node); - let res = n.node.process(dgram.take(), now); + let res = n.process(dgram.take(), now); n.state = match res { Output::Datagram(d) => { qtrace!([self.name], " => datagram {}", d.len()); @@ -198,21 +205,14 @@ impl Simulator { } Output::None => { qtrace!([self.name], " => nothing"); - assert!(n.node.done(), "nodes have to be done when they go idle"); + assert!(n.done(), "nodes should be done when they go idle"); Idle } }; } - if self.nodes.iter().all(|n| n.node.done()) { - let real_elapsed = real_start.elapsed(); - println!("{}: real elapsed time: {:?}", self.name, real_elapsed); - let elapsed = now - start; - println!("{}: simulated elapsed time: {:?}", self.name, elapsed); - for n in &self.nodes { - n.node.print_summary(&self.name); - } - return elapsed; + if self.nodes.iter().all(|n| n.done()) { + return now; } if dgram.is_none() { @@ -229,4 +229,66 @@ impl Simulator { } } } + + #[must_use] + pub fn setup(mut self) -> ReadySimulator { + let start = now(); + + qinfo!("{}: seed {}", self.name, self.rng.borrow().seed_str()); + for n in &mut self.nodes { + n.init(self.rng.clone(), start); + } + + let setup_start = Instant::now(); + let now = self.process_loop(start, start); + let setup_time = now - start; + qinfo!( + "{t}: Setup took {wall:?} (wall) {setup_time:?} (simulated)", + t = self.name, + wall = setup_start.elapsed(), + ); + + for n in &mut self.nodes { + n.prepare(now); + } + + ReadySimulator { + sim: self, + start, + now, + } + } + + /// Runs the simulation. + /// # Panics + /// When sanity checks fail in unexpected ways; this is a testing function after all. + pub fn run(self) { + self.setup().run(); + } + + fn print_summary(&self) { + for n in &self.nodes { + n.print_summary(&self.name); + } + } +} + +pub struct ReadySimulator { + sim: Simulator, + start: Instant, + now: Instant, +} + +impl ReadySimulator { + pub fn run(mut self) { + let real_start = Instant::now(); + let end = self.sim.process_loop(self.start, self.now); + let sim_time = end - self.now; + qinfo!( + "{t}: Simulation took {wall:?} (wall) {sim_time:?} (simulated)", + t = self.sim.name, + wall = real_start.elapsed(), + ); + self.sim.print_summary(); + } } diff --git a/neqo-transport/tests/sim/net.rs b/test-fixture/src/sim/net.rs similarity index 100% rename from neqo-transport/tests/sim/net.rs rename to test-fixture/src/sim/net.rs diff --git a/neqo-transport/tests/sim/rng.rs b/test-fixture/src/sim/rng.rs similarity index 92% rename from neqo-transport/tests/sim/rng.rs rename to test-fixture/src/sim/rng.rs index af4f70eb5f..094c5fd791 100644 --- a/neqo-transport/tests/sim/rng.rs +++ b/test-fixture/src/sim/rng.rs @@ -14,6 +14,8 @@ pub struct Random { } impl Random { + #[must_use] + #[allow(clippy::missing_panics_doc)] // These are impossible. pub fn new(seed: [u8; 32]) -> Self { assert!(seed.iter().any(|&x| x != 0)); let mut dec = Decoder::from(&seed); @@ -48,6 +50,7 @@ impl Random { /// Generate a random value from the range. /// If the range is empty or inverted (`range.start > range.end`), then /// this returns the value of `range.start` without generating any random values. + #[must_use] pub fn random_from(&mut self, range: Range) -> u64 { let max = range.end.saturating_sub(range.start); if max == 0 { @@ -55,7 +58,6 @@ impl Random { } let shift = (max - 1).leading_zeros(); - assert_ne!(max, 0); loop { let r = self.random() >> shift; if r < max { @@ -64,7 +66,8 @@ impl Random { } } - /// Get the seed necessary to continue from this point. + /// Get the seed necessary to continue from the current state of the RNG. + #[must_use] pub fn seed_str(&self) -> String { format!( "{:8x}{:8x}{:8x}{:8x}", diff --git a/neqo-transport/tests/sim/taildrop.rs b/test-fixture/src/sim/taildrop.rs similarity index 95% rename from neqo-transport/tests/sim/taildrop.rs rename to test-fixture/src/sim/taildrop.rs index 26813800c9..c23dae10c6 100644 --- a/neqo-transport/tests/sim/taildrop.rs +++ b/test-fixture/src/sim/taildrop.rs @@ -14,7 +14,7 @@ use std::{ time::{Duration, Instant}, }; -use neqo_common::{qtrace, Datagram}; +use neqo_common::{qinfo, qtrace, Datagram}; use neqo_transport::Output; use super::Node; @@ -23,6 +23,7 @@ use super::Node; const ONE_SECOND_NS: u128 = 1_000_000_000; /// This models a link with a tail drop router at the front of it. +#[derive(Clone)] pub struct TailDrop { /// An overhead associated with each entry. This accounts for /// layer 2, IP, and UDP overheads. @@ -60,6 +61,7 @@ pub struct TailDrop { impl TailDrop { /// Make a new taildrop node with the given rate, queue capacity, and link delay. + #[must_use] pub fn new(rate: usize, capacity: usize, delay: Duration) -> Self { Self { overhead: 64, @@ -80,12 +82,14 @@ impl TailDrop { /// A tail drop queue on a 10Mbps link (approximated to 1 million bytes per second) /// with a fat 32k buffer (about 30ms), and the default forward delay of 50ms. - pub fn dsl_uplink() -> Self { + #[must_use] + pub fn dsl_downlink() -> Self { TailDrop::new(1_000_000, 32_768, Duration::from_millis(50)) } - /// Cut downlink to one fifth of the uplink (2Mbps), and reduce the buffer to 1/4. - pub fn dsl_downlink() -> Self { + /// Cut uplink to one fifth of the downlink (2Mbps), and reduce the buffer to 1/4. + #[must_use] + pub fn dsl_uplink() -> Self { TailDrop::new(200_000, 8_192, Duration::from_millis(50)) } @@ -174,9 +178,13 @@ impl Node for TailDrop { } fn print_summary(&self, test_name: &str) { - println!( + qinfo!( "{}: taildrop: rx {} drop {} tx {} maxq {}", - test_name, self.received, self.dropped, self.delivered, self.maxq, + test_name, + self.received, + self.dropped, + self.delivered, + self.maxq, ); } } From 40a9346f4368b34f94f60aa3c192181128c12d02 Mon Sep 17 00:00:00 2001 From: Kershaw Date: Mon, 5 Feb 2024 14:52:07 +0100 Subject: [PATCH 114/321] qlog 0.12.0 (#1621) --- neqo-client/Cargo.toml | 2 +- neqo-common/Cargo.toml | 2 +- neqo-http3/Cargo.toml | 2 +- neqo-qpack/Cargo.toml | 2 +- neqo-server/Cargo.toml | 2 +- neqo-transport/Cargo.toml | 2 +- test-fixture/Cargo.toml | 2 +- 7 files changed, 7 insertions(+), 7 deletions(-) diff --git a/neqo-client/Cargo.toml b/neqo-client/Cargo.toml index 5419e8a5f8..ddf33cca08 100644 --- a/neqo-client/Cargo.toml +++ b/neqo-client/Cargo.toml @@ -15,7 +15,7 @@ neqo-crypto = { path = "./../neqo-crypto" } neqo-http3 = { path = "./../neqo-http3" } neqo-qpack = { path = "./../neqo-qpack" } neqo-transport = { path = "./../neqo-transport" } -qlog = { git = "https://github.com/cloudflare/quiche", rev = "09ea4b244096a013071cfe2175bbf2945fb7f8d1" } +qlog = "0.12.0" structopt = "0.3" url = "~2.5.0" diff --git a/neqo-common/Cargo.toml b/neqo-common/Cargo.toml index de754531be..183a4d9450 100644 --- a/neqo-common/Cargo.toml +++ b/neqo-common/Cargo.toml @@ -12,7 +12,7 @@ enum-map = "2.7" env_logger = { version = "0.10", default-features = false } lazy_static = "1.4" log = { version = "0.4", default-features = false } -qlog = { git = "https://github.com/cloudflare/quiche", rev = "09ea4b244096a013071cfe2175bbf2945fb7f8d1" } +qlog = "0.12.0" time = {version = "0.3.23", features = ["formatting"]} [dev-dependencies] diff --git a/neqo-http3/Cargo.toml b/neqo-http3/Cargo.toml index f05cae5f03..1605a2b609 100644 --- a/neqo-http3/Cargo.toml +++ b/neqo-http3/Cargo.toml @@ -14,7 +14,7 @@ neqo-common = { path = "./../neqo-common" } neqo-crypto = { path = "./../neqo-crypto" } neqo-qpack = { path = "./../neqo-qpack" } neqo-transport = { path = "./../neqo-transport" } -qlog = { git = "https://github.com/cloudflare/quiche", rev = "09ea4b244096a013071cfe2175bbf2945fb7f8d1" } +qlog = "0.12.0" sfv = "0.9.3" smallvec = "1.11.1" url = "2.5" diff --git a/neqo-qpack/Cargo.toml b/neqo-qpack/Cargo.toml index 96531550bd..229345e977 100644 --- a/neqo-qpack/Cargo.toml +++ b/neqo-qpack/Cargo.toml @@ -12,7 +12,7 @@ log = {version = "~0.4.17", default-features = false} neqo-common = { path = "./../neqo-common" } neqo-crypto = { path = "./../neqo-crypto" } neqo-transport = { path = "./../neqo-transport" } -qlog = { git = "https://github.com/cloudflare/quiche", rev = "09ea4b244096a013071cfe2175bbf2945fb7f8d1" } +qlog = "0.12.0" static_assertions = "~1.1.0" [dev-dependencies] diff --git a/neqo-server/Cargo.toml b/neqo-server/Cargo.toml index 09a7d4aa3c..d4ee99c9b0 100644 --- a/neqo-server/Cargo.toml +++ b/neqo-server/Cargo.toml @@ -14,7 +14,7 @@ neqo-crypto = { path = "./../neqo-crypto" } neqo-http3 = { path = "./../neqo-http3" } neqo-qpack = { path = "./../neqo-qpack" } neqo-transport = { path = "./../neqo-transport" } -qlog = { git = "https://github.com/cloudflare/quiche", rev = "09ea4b244096a013071cfe2175bbf2945fb7f8d1" } +qlog = "0.12.0" regex = "1.9" structopt = "0.3" tokio = { version = "1", features = ["net", "time", "macros", "rt", "rt-multi-thread"] } diff --git a/neqo-transport/Cargo.toml b/neqo-transport/Cargo.toml index 00c46eb37b..c75142e102 100644 --- a/neqo-transport/Cargo.toml +++ b/neqo-transport/Cargo.toml @@ -12,7 +12,7 @@ lazy_static = "1.4" log = { version = "0.4.17", default-features = false } neqo-common = { path = "../neqo-common" } neqo-crypto = { path = "../neqo-crypto" } -qlog = { git = "https://github.com/cloudflare/quiche", rev = "09ea4b244096a013071cfe2175bbf2945fb7f8d1" } +qlog = "0.12.0" smallvec = "1.11.1" [dev-dependencies] diff --git a/test-fixture/Cargo.toml b/test-fixture/Cargo.toml index 6dfe8d7f4c..ed480c9c26 100644 --- a/test-fixture/Cargo.toml +++ b/test-fixture/Cargo.toml @@ -14,7 +14,7 @@ neqo-crypto = { path = "../neqo-crypto" } neqo-http3 = { path = "../neqo-http3" } neqo-qpack = { path = "../neqo-qpack" } neqo-transport = { path = "../neqo-transport" } -qlog = { git = "https://github.com/cloudflare/quiche", rev = "09ea4b244096a013071cfe2175bbf2945fb7f8d1" } +qlog = "0.12.0" [features] deny-warnings = [] \ No newline at end of file From 9a394e95fe5562070bb8139562ea94b709b9a7f2 Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Mon, 5 Feb 2024 16:29:05 +0200 Subject: [PATCH 115/321] ci: Build docker image for multiple architectures (#1619) * ci: Build docker image for multiple architectures * Print executable name and version * Better way to print versions during qns run --- .github/workflows/qns.yml | 6 +++++- qns/interop.sh | 2 ++ 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/.github/workflows/qns.yml b/.github/workflows/qns.yml index 353d0ae696..ea9c7fb041 100644 --- a/.github/workflows/qns.yml +++ b/.github/workflows/qns.yml @@ -5,7 +5,7 @@ on: - cron: '42 3 * * 2,5' # Runs at 03:42 UTC (m and h chosen arbitrarily) twice a week. workflow_dispatch: pull_request: - branch: ["main"] + branches: ["main"] paths: - 'qns/**' - '.github/workflows/qns.yml' @@ -13,6 +13,9 @@ jobs: docker-image: runs-on: ubuntu-latest steps: + - name: Set up QEMU + uses: docker/setup-qemu-action@v3 + - name: Set up Docker Buildx uses: docker/setup-buildx-action@v3 @@ -47,3 +50,4 @@ jobs: RUST_VERSION=stable cache-from: type=gha cache-to: type=gha,mode=max + platforms: linux/amd64, linux/arm64 diff --git a/qns/interop.sh b/qns/interop.sh index 3c828ead9e..4baa6b7e8f 100755 --- a/qns/interop.sh +++ b/qns/interop.sh @@ -13,6 +13,7 @@ case "$ROLE" in client) /wait-for-it.sh sim:57832 -s -t 30 sleep 5 + neqo-client --help | head -n 1 RUST_LOG=debug RUST_BACKTRACE=1 neqo-client --cc cubic --qns-test "$TESTCASE" \ --qlog-dir "$QLOGDIR" --output-dir /downloads $REQUESTS ;; @@ -27,6 +28,7 @@ case "$ROLE" in -name "$CERT" -passout pass: -out "$P12CERT" pk12util -d "sql:$DB" -i "$P12CERT" -W '' certutil -L -d "sql:$DB" -n "$CERT" + neqo-server --help | head -n 1 RUST_LOG=info RUST_BACKTRACE=1 neqo-server --cc cubic --qns-test "$TESTCASE" \ --qlog-dir "$QLOGDIR" -d "$DB" -k "$CERT" [::]:443 ;; From 3587c23aca441a27266267c5692fb70d29e825b0 Mon Sep 17 00:00:00 2001 From: Max Inden Date: Tue, 6 Feb 2024 06:48:49 +0100 Subject: [PATCH 116/321] refactor(client): replace mio with tokio (#1612) * refactor(client): replace mio with tokio - Use `tokio` instead of `mio`. - Refactor `neqo-client` to be more consistent with `neqo-server`. - Introduce `read_datagram` and `ready`. - Introduce `ClientRunner` and `old::ClientRunner` (consistent with `ServersRunner`). - Fold `handle_test` into `client` (now `ClientRunner::new`). * http3/client_events: remove previous hot-fix * transport/tests/handshake: add (for now failing) test * fix(transport/connection): emit AuthenticationNeeded once * Assert that we have the certificate too; add comments --------- Co-authored-by: Martin Thomson --- neqo-client/Cargo.toml | 4 +- neqo-client/src/main.rs | 609 +++++++++--------- neqo-transport/src/connection/mod.rs | 8 +- .../src/connection/tests/handshake.rs | 51 ++ 4 files changed, 369 insertions(+), 303 deletions(-) diff --git a/neqo-client/Cargo.toml b/neqo-client/Cargo.toml index ddf33cca08..4ca69647e1 100644 --- a/neqo-client/Cargo.toml +++ b/neqo-client/Cargo.toml @@ -9,7 +9,8 @@ rust-version = "1.70.0" license = "MIT OR Apache-2.0" [dependencies] -mio = "~0.6.23" +futures = "0.3" +log = {version = "0.4.17", default-features = false} neqo-common = { path="./../neqo-common" } neqo-crypto = { path = "./../neqo-crypto" } neqo-http3 = { path = "./../neqo-http3" } @@ -17,6 +18,7 @@ neqo-qpack = { path = "./../neqo-qpack" } neqo-transport = { path = "./../neqo-transport" } qlog = "0.12.0" structopt = "0.3" +tokio = { version = "1", features = ["net", "time", "macros", "rt", "rt-multi-thread"] } url = "~2.5.0" [features] diff --git a/neqo-client/src/main.rs b/neqo-client/src/main.rs index 3db90aac10..d8444542ea 100644 --- a/neqo-client/src/main.rs +++ b/neqo-client/src/main.rs @@ -13,9 +13,10 @@ use std::{ convert::TryFrom, fmt::{self, Display}, fs::{create_dir_all, File, OpenOptions}, - io::{self, ErrorKind, Write}, + io::{self, Write}, net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr, ToSocketAddrs}, path::PathBuf, + pin::Pin, process::exit, rc::Rc, str::FromStr, @@ -23,8 +24,13 @@ use std::{ }; use common::IpTos; -use mio::{net::UdpSocket, Events, Poll, PollOpt, Ready, Token}; -use neqo_common::{self as common, event::Provider, hex, qlog::NeqoQlog, Datagram, Role}; +use futures::{ + future::{select, Either}, + FutureExt, TryFutureExt, +}; +use neqo_common::{ + self as common, event::Provider, hex, qdebug, qinfo, qlog::NeqoQlog, Datagram, Role, +}; use neqo_crypto::{ constants::{TLS_AES_128_GCM_SHA256, TLS_AES_256_GCM_SHA384, TLS_CHACHA20_POLY1305_SHA256}, init, AuthenticationStatus, Cipher, ResumptionToken, @@ -39,6 +45,7 @@ use neqo_transport::{ }; use qlog::{events::EventImportance, streamer::QlogStreamer}; use structopt::StructOpt; +use tokio::{net::UdpSocket, time::Sleep}; use url::{Origin, Url}; #[derive(Debug)] @@ -343,10 +350,17 @@ impl QuicParameters { } } -fn emit_datagram(socket: &mio::net::UdpSocket, d: Datagram) -> io::Result<()> { - let sent = socket.send_to(&d[..], &d.destination())?; - if sent != d.len() { - eprintln!("Unable to send all {} bytes of datagram", d.len()); +async fn emit_datagram(socket: &UdpSocket, out_dgram: Datagram) -> Result<(), io::Error> { + let sent = match socket.send_to(&out_dgram, &out_dgram.destination()).await { + Ok(res) => res, + Err(ref err) if err.kind() != io::ErrorKind::WouldBlock => { + eprintln!("UDP send error: {err:?}"); + 0 + } + Err(e) => return Err(e), + }; + if sent != out_dgram.len() { + eprintln!("Unable to send all {} bytes of datagram", out_dgram.len()); } Ok(()) } @@ -393,86 +407,59 @@ fn get_output_file( } } -fn process_loop( - local_addr: &SocketAddr, +enum Ready { + Socket, + Timeout, +} + +// Wait for the socket to be readable or the timeout to fire. +async fn ready( socket: &UdpSocket, - poll: &Poll, - client: &mut Http3Client, - handler: &mut Handler, -) -> Res { + mut timeout: Option<&mut Pin>>, +) -> Result { + let socket_ready = Box::pin(socket.readable()).map_ok(|()| Ready::Socket); + let timeout_ready = timeout + .as_mut() + .map(Either::Left) + .unwrap_or(Either::Right(futures::future::pending())) + .map(|()| Ok(Ready::Timeout)); + select(socket_ready, timeout_ready).await.factor_first().0 +} + +fn read_dgram( + socket: &UdpSocket, + local_address: &SocketAddr, +) -> Result, io::Error> { let buf = &mut [0u8; 2048]; - let mut events = Events::with_capacity(1024); - let mut timeout = Duration::new(0, 0); - loop { - poll.poll(&mut events, Some(timeout))?; - - let mut datagrams: Vec = Vec::new(); - 'read: loop { - match socket.recv_from(&mut buf[..]) { - Err(ref err) - if err.kind() == ErrorKind::WouldBlock - || err.kind() == ErrorKind::Interrupted => - { - break 'read - } - Err(ref err) => { - eprintln!("UDP error: {err}"); - exit(1); - } - Ok((sz, remote)) => { - if sz == buf.len() { - eprintln!("Received more than {} bytes", buf.len()); - break 'read; - } - if sz > 0 { - let d = - Datagram::new(remote, *local_addr, IpTos::default(), None, &buf[..sz]); - datagrams.push(d); - } - } - }; + let (sz, remote_addr) = match socket.try_recv_from(&mut buf[..]) { + Err(ref err) + if err.kind() == io::ErrorKind::WouldBlock + || err.kind() == io::ErrorKind::Interrupted => + { + return Ok(None) } - if !datagrams.is_empty() { - client.process_multiple_input(&datagrams, Instant::now()); - handler.maybe_key_update(client)?; + Err(err) => { + eprintln!("UDP recv error: {err:?}"); + return Err(err); } + Ok(res) => res, + }; - if let Http3State::Closed(..) = client.state() { - return Ok(client.state()); - } - - let mut exiting = !handler.handle(client)?; - - 'write: loop { - match client.process_output(Instant::now()) { - Output::Datagram(dgram) => { - if let Err(err) = emit_datagram(socket, dgram) { - if err.kind() == ErrorKind::WouldBlock - || err.kind() == ErrorKind::Interrupted - { - break 'write; - } - eprintln!("UDP write error: {err}"); - client.close(Instant::now(), 0, err.to_string()); - exiting = true; - break 'write; - } - } - Output::Callback(new_timeout) => { - timeout = new_timeout; - break 'write; - } - Output::None => { - // Not strictly necessary, since we're about to exit - exiting = true; - break 'write; - } - } - } + if sz == buf.len() { + eprintln!("Might have received more than {} bytes", buf.len()); + } - if exiting { - return Ok(client.state()); - } + if sz == 0 { + eprintln!("zero length datagram received?"); + Ok(None) + } else { + Ok(Some(Datagram::new( + remote_addr, + *local_address, + IpTos::default(), + None, + &buf[..sz], + ))) } } @@ -821,39 +808,122 @@ fn to_headers(values: &[impl AsRef]) -> Vec
{ .collect() } -#[allow(clippy::too_many_arguments)] -fn handle_test( - testcase: &String, - args: &mut Args, - socket: &UdpSocket, - poll: &Poll, +struct ClientRunner<'a> { local_addr: SocketAddr, - remote_addr: SocketAddr, - hostname: &str, - url_queue: VecDeque, - resumption_token: Option, -) -> Res> { - let key_update = KeyUpdateState(args.key_update); - if testcase.as_str() == "upload" { - let mut client = - create_http3_client(args, local_addr, remote_addr, hostname, resumption_token) - .expect("failed to create client"); - args.method = String::from("POST"); + socket: &'a UdpSocket, + client: Http3Client, + handler: Handler<'a>, + timeout: Option>>, + args: &'a Args, +} + +impl<'a> ClientRunner<'a> { + async fn new( + args: &'a mut Args, + socket: &'a UdpSocket, + local_addr: SocketAddr, + remote_addr: SocketAddr, + hostname: &str, + url_queue: VecDeque, + resumption_token: Option, + ) -> Res> { + if let Some(testcase) = &args.test { + if testcase.as_str() != "upload" { + eprintln!("Unsupported test case: {testcase}"); + exit(127) + } + } + + let client = create_http3_client(args, local_addr, remote_addr, hostname, resumption_token) + .expect("failed to create client"); + if args.test.is_some() { + args.method = String::from("POST"); + } + let key_update = KeyUpdateState(args.key_update); let url_handler = URLHandler { url_queue, stream_handlers: HashMap::new(), all_paths: Vec::new(), - handler_type: StreamHandlerType::Upload, + handler_type: if args.test.is_some() { + StreamHandlerType::Upload + } else { + StreamHandlerType::Download + }, args, }; - let mut h = Handler::new(url_handler, key_update, args.output_read_data); - process_loop(&local_addr, socket, poll, &mut client, &mut h)?; - } else { - eprintln!("Unsupported test case: {testcase}"); - exit(127) + let handler = Handler::new(url_handler, key_update, args.output_read_data); + + Ok(Self { + local_addr, + socket, + client, + handler, + timeout: None, + args, + }) + } + + async fn run(mut self) -> Res> { + loop { + if !self.handler.handle(&mut self.client)? { + break; + } + + self.process(None).await?; + + match ready(self.socket, self.timeout.as_mut()).await? { + Ready::Socket => loop { + let dgram = read_dgram(self.socket, &self.local_addr)?; + if dgram.is_none() { + break; + } + self.process(dgram.as_ref()).await?; + self.handler.maybe_key_update(&mut self.client)?; + }, + Ready::Timeout => { + self.timeout = None; + } + } + + if let Http3State::Closed(..) = self.client.state() { + break; + } + } + + let token = if self.args.test.is_none() && self.args.resume { + // If we haven't received an event, take a token if there is one. + // Lots of servers don't provide NEW_TOKEN, but a session ticket + // without NEW_TOKEN is better than nothing. + self.handler + .token + .take() + .or_else(|| self.client.take_resumption_token(Instant::now())) + } else { + None + }; + Ok(token) } - Ok(None) + async fn process(&mut self, mut dgram: Option<&Datagram>) -> Result<(), io::Error> { + loop { + match self.client.process(dgram.take(), Instant::now()) { + Output::Datagram(dgram) => { + emit_datagram(self.socket, dgram).await?; + } + Output::Callback(new_timeout) => { + qinfo!("Setting timeout of {:?}", new_timeout); + self.timeout = Some(Box::pin(tokio::time::sleep(new_timeout))); + break; + } + Output::None => { + qdebug!("Output::None"); + break; + } + } + } + + Ok(()) + } } fn create_http3_client( @@ -899,58 +969,6 @@ fn create_http3_client( Ok(client) } -#[allow(clippy::too_many_arguments)] -fn client( - args: &mut Args, - socket: &UdpSocket, - poll: &Poll, - local_addr: SocketAddr, - remote_addr: SocketAddr, - hostname: &str, - url_queue: VecDeque, - resumption_token: Option, -) -> Res> { - let testcase = args.test.clone(); - if let Some(testcase) = testcase { - return handle_test( - &testcase, - args, - socket, - poll, - local_addr, - remote_addr, - hostname, - url_queue, - resumption_token, - ); - } - - let mut client = create_http3_client(args, local_addr, remote_addr, hostname, resumption_token) - .expect("failed to create client"); - let key_update = KeyUpdateState(args.key_update); - let url_handler = URLHandler { - url_queue, - stream_handlers: HashMap::new(), - all_paths: Vec::new(), - handler_type: StreamHandlerType::Download, - args, - }; - let mut h = Handler::new(url_handler, key_update, args.output_read_data); - - process_loop(&local_addr, socket, poll, &mut client, &mut h)?; - - let token = if args.resume { - // If we haven't received an event, take a token if there is one. - // Lots of servers don't provide NEW_TOKEN, but a session ticket - // without NEW_TOKEN is better than nothing. - h.token - .or_else(|| client.take_resumption_token(Instant::now())) - } else { - None - }; - Ok(token) -} - fn qlog_new(args: &Args, hostname: &str, cid: &ConnectionId) -> Res { if let Some(qlog_dir) = &args.qlog_dir { let mut qlog_path = qlog_dir.to_path_buf(); @@ -980,7 +998,8 @@ fn qlog_new(args: &Args, hostname: &str, cid: &ConnectionId) -> Res { } } -fn main() -> Res<()> { +#[tokio::main] +async fn main() -> Res<()> { init(); let mut args = Args::from_args(); @@ -1059,21 +1078,15 @@ fn main() -> Res<()> { SocketAddr::V6(..) => SocketAddr::new(IpAddr::V6(Ipv6Addr::from([0; 16])), 0), }; - let socket = match UdpSocket::bind(&local_addr) { + let socket = match std::net::UdpSocket::bind(local_addr) { Err(e) => { eprintln!("Unable to bind UDP socket: {e}"); exit(1) } Ok(s) => s, }; - - let poll = Poll::new()?; - poll.register( - &socket, - Token(0), - Ready::readable() | Ready::writable(), - PollOpt::edge(), - )?; + socket.set_nonblocking(true)?; + let socket = UdpSocket::from_std(socket)?; let real_local = socket.local_addr().unwrap(); println!( @@ -1096,27 +1109,31 @@ fn main() -> Res<()> { first = false; token = if args.use_old_http { - old::old_client( + old::ClientRunner::new( &args, &socket, - &poll, real_local, remote_addr, &hostname, to_request, token, - )? + ) + .await? + .run() + .await? } else { - client( + ClientRunner::new( &mut args, &socket, - &poll, real_local, remote_addr, &hostname, to_request, token, - )? + ) + .await? + .run() + .await? }; } } @@ -1129,24 +1146,25 @@ mod old { cell::RefCell, collections::{HashMap, VecDeque}, fs::File, - io::{ErrorKind, Write}, + io::{self, Write}, net::SocketAddr, path::PathBuf, - process::exit, + pin::Pin, rc::Rc, - time::{Duration, Instant}, + time::Instant, }; - use mio::{Events, Poll}; - use neqo_common::{event::Provider, Datagram, IpTos}; + use neqo_common::{event::Provider, qdebug, qinfo, Datagram}; use neqo_crypto::{AuthenticationStatus, ResumptionToken}; use neqo_transport::{ Connection, ConnectionEvent, EmptyConnectionIdGenerator, Error, Output, State, StreamId, StreamType, }; + use tokio::{net::UdpSocket, time::Sleep}; use url::Url; - use super::{emit_datagram, get_output_file, qlog_new, Args, KeyUpdateState, Res}; + use super::{get_output_file, qlog_new, read_dgram, ready, Args, KeyUpdateState, Ready, Res}; + use crate::emit_datagram; struct HandlerOld<'b> { streams: HashMap>, @@ -1330,143 +1348,132 @@ mod old { } } - fn process_loop_old( - local_addr: &SocketAddr, - socket: &mio::net::UdpSocket, - poll: &Poll, - client: &mut Connection, - handler: &mut HandlerOld, - ) -> Res { - let buf = &mut [0u8; 2048]; - let mut events = Events::with_capacity(1024); - let mut timeout = Duration::new(0, 0); - loop { - poll.poll(&mut events, Some(timeout))?; - - 'read: loop { - match socket.recv_from(&mut buf[..]) { - Err(ref err) - if err.kind() == ErrorKind::WouldBlock - || err.kind() == ErrorKind::Interrupted => - { - break 'read - } - Err(ref err) => { - eprintln!("UDP error: {err}"); - exit(1); - } - Ok((sz, remote)) => { - if sz == buf.len() { - eprintln!("Received more than {} bytes", buf.len()); - break 'read; - } - if sz > 0 { - let d = Datagram::new( - remote, - *local_addr, - IpTos::default(), - None, - &buf[..sz], - ); - client.process_input(&d, Instant::now()); - handler.maybe_key_update(client)?; + pub struct ClientRunner<'a> { + local_addr: SocketAddr, + socket: &'a UdpSocket, + client: Connection, + handler: HandlerOld<'a>, + timeout: Option>>, + args: &'a Args, + } + + impl<'a> ClientRunner<'a> { + pub async fn new( + args: &'a Args, + socket: &'a UdpSocket, + local_addr: SocketAddr, + remote_addr: SocketAddr, + origin: &str, + url_queue: VecDeque, + token: Option, + ) -> Res> { + let alpn = match args.alpn.as_str() { + "hq-29" | "hq-30" | "hq-31" | "hq-32" => args.alpn.as_str(), + _ => "hq-interop", + }; + + let mut client = Connection::new_client( + origin, + &[alpn], + Rc::new(RefCell::new(EmptyConnectionIdGenerator::default())), + local_addr, + remote_addr, + args.quic_parameters.get(alpn), + Instant::now(), + )?; + + if let Some(tok) = token { + client.enable_resumption(Instant::now(), tok)?; + } + + let ciphers = args.get_ciphers(); + if !ciphers.is_empty() { + client.set_ciphers(&ciphers)?; + } + + client.set_qlog(qlog_new(args, origin, client.odcid().unwrap())?); + + let key_update = KeyUpdateState(args.key_update); + let handler = HandlerOld { + streams: HashMap::new(), + url_queue, + all_paths: Vec::new(), + args, + token: None, + key_update, + }; + + Ok(Self { + local_addr, + socket, + client, + handler, + timeout: None, + args, + }) + } + + pub async fn run(mut self) -> Res> { + loop { + if !self.handler.handle(&mut self.client)? { + break; + } + + self.process(None).await?; + + match ready(self.socket, self.timeout.as_mut()).await? { + Ready::Socket => loop { + let dgram = read_dgram(self.socket, &self.local_addr)?; + if dgram.is_none() { + break; } + self.process(dgram.as_ref()).await?; + self.handler.maybe_key_update(&mut self.client)?; + }, + Ready::Timeout => { + self.timeout = None; } - }; - } + } - if let State::Closed(..) = client.state() { - return Ok(client.state().clone()); + if let State::Closed(..) = self.client.state() { + break; + } } - let mut exiting = !handler.handle(client)?; + let token = if self.args.resume { + // If we haven't received an event, take a token if there is one. + // Lots of servers don't provide NEW_TOKEN, but a session ticket + // without NEW_TOKEN is better than nothing. + self.handler + .token + .take() + .or_else(|| self.client.take_resumption_token(Instant::now())) + } else { + None + }; + + Ok(token) + } - 'write: loop { - match client.process_output(Instant::now()) { + async fn process(&mut self, mut dgram: Option<&Datagram>) -> Result<(), io::Error> { + loop { + match self.client.process(dgram.take(), Instant::now()) { Output::Datagram(dgram) => { - if let Err(e) = emit_datagram(socket, dgram) { - eprintln!("UDP write error: {e}"); - client.close(Instant::now(), 0, e.to_string()); - exiting = true; - break 'write; - } + emit_datagram(self.socket, dgram).await?; } Output::Callback(new_timeout) => { - timeout = new_timeout; - break 'write; + qinfo!("Setting timeout of {:?}", new_timeout); + self.timeout = Some(Box::pin(tokio::time::sleep(new_timeout))); + break; } Output::None => { - // Not strictly necessary, since we're about to exit - exiting = true; - break 'write; + qdebug!("Output::None"); + break; } } } - if exiting { - return Ok(client.state().clone()); - } - } - } - - #[allow(clippy::too_many_arguments)] - pub fn old_client( - args: &Args, - socket: &mio::net::UdpSocket, - poll: &Poll, - local_addr: SocketAddr, - remote_addr: SocketAddr, - origin: &str, - url_queue: VecDeque, - token: Option, - ) -> Res> { - let alpn = match args.alpn.as_str() { - "hq-29" | "hq-30" | "hq-31" | "hq-32" => args.alpn.as_str(), - _ => "hq-interop", - }; - - let mut client = Connection::new_client( - origin, - &[alpn], - Rc::new(RefCell::new(EmptyConnectionIdGenerator::default())), - local_addr, - remote_addr, - args.quic_parameters.get(alpn), - Instant::now(), - )?; - - if let Some(tok) = token { - client.enable_resumption(Instant::now(), tok)?; - } - - let ciphers = args.get_ciphers(); - if !ciphers.is_empty() { - client.set_ciphers(&ciphers)?; + Ok(()) } - - client.set_qlog(qlog_new(args, origin, client.odcid().unwrap())?); - - let key_update = KeyUpdateState(args.key_update); - let mut h = HandlerOld { - streams: HashMap::new(), - url_queue, - all_paths: Vec::new(), - args, - token: None, - key_update, - }; - - process_loop_old(&local_addr, socket, poll, &mut client, &mut h)?; - - let token = if args.resume { - // If we haven't received an event, take a token if there is one. - // Lots of servers don't provide NEW_TOKEN, but a session ticket - // without NEW_TOKEN is better than nothing. - h.token - .or_else(|| client.take_resumption_token(Instant::now())) - } else { - None - }; - Ok(token) } } diff --git a/neqo-transport/src/connection/mod.rs b/neqo-transport/src/connection/mod.rs index 2de388418a..1678e0b8bd 100644 --- a/neqo-transport/src/connection/mod.rs +++ b/neqo-transport/src/connection/mod.rs @@ -2585,10 +2585,16 @@ impl Connection { ) -> Res<()> { qtrace!([self], "Handshake space={} data={:0x?}", space, data); + let was_authentication_pending = + *self.crypto.tls.state() == HandshakeState::AuthenticationPending; let try_update = data.is_some(); match self.crypto.handshake(now, space, data)? { HandshakeState::Authenticated(_) | HandshakeState::InProgress => (), - HandshakeState::AuthenticationPending => self.events.authentication_needed(), + HandshakeState::AuthenticationPending => { + if !was_authentication_pending { + self.events.authentication_needed() + } + } HandshakeState::EchFallbackAuthenticationPending(public_name) => self .events .ech_fallback_authentication_needed(public_name.clone()), diff --git a/neqo-transport/src/connection/tests/handshake.rs b/neqo-transport/src/connection/tests/handshake.rs index 93385ac1bc..a91ecf1b4a 100644 --- a/neqo-transport/src/connection/tests/handshake.rs +++ b/neqo-transport/src/connection/tests/handshake.rs @@ -1135,3 +1135,54 @@ fn implicit_rtt_server() { // an RTT estimate from having discarded the Initial packet number space. assert_eq!(server.stats().rtt, RTT); } + +#[test] +fn emit_authentication_needed_once() { + let mut client = default_client(); + + let mut server = Connection::new_server( + test_fixture::LONG_CERT_KEYS, + test_fixture::DEFAULT_ALPN, + Rc::new(RefCell::new(CountingConnectionIdGenerator::default())), + ConnectionParameters::default(), + ) + .expect("create a server"); + + let client1 = client.process(None, now()); + assert!(client1.as_dgram_ref().is_some()); + + // The entire server flight doesn't fit in a single packet because the + // certificate is large, therefore the server will produce 2 packets. + let server1 = server.process(client1.as_dgram_ref(), now()); + assert!(server1.as_dgram_ref().is_some()); + let server2 = server.process(None, now()); + assert!(server2.as_dgram_ref().is_some()); + + let authentication_needed_count = |client: &mut Connection| { + client + .events() + .filter(|e| matches!(e, ConnectionEvent::AuthenticationNeeded)) + .count() + }; + + // Upon receiving the first packet, the client has the server certificate, + // but not yet all required handshake data. It moves to + // `HandshakeState::AuthenticationPending` and emits a + // `ConnectionEvent::AuthenticationNeeded` event. + // + // Note that this is a tiny bit fragile in that it depends on having a certificate + // that is within a fairly narrow range of sizes. It has to fit in a single + // packet, but be large enough that the CertificateVerify message does not + // also fit in the same packet. Our default test setup achieves this, but + // changes to the setup might invalidate this test. + let _ = client.process(server1.as_dgram_ref(), now()); + assert_eq!(1, authentication_needed_count(&mut client)); + assert!(client.peer_certificate().is_some()); + + // The `AuthenticationNeeded` event is still pending a call to + // `Connection::authenticated`. On receiving the second packet from the + // server, the client must not emit a another + // `ConnectionEvent::AuthenticationNeeded`. + let _ = client.process(server2.as_dgram_ref(), now()); + assert_eq!(0, authentication_needed_count(&mut client)); +} From 9ebf23b3d4fa91fdf2ff6934a1fd115eb9658921 Mon Sep 17 00:00:00 2001 From: Martin Thomson Date: Tue, 6 Feb 2024 18:00:18 +1100 Subject: [PATCH 117/321] Range tracker benchmark (#1622) Co-authored-by: Lars Eggert --- neqo-transport/Cargo.toml | 5 +++ neqo-transport/benches/range_tracker.rs | 44 +++++++++++++++++++++++++ neqo-transport/src/lib.rs | 3 ++ neqo-transport/src/send_stream.rs | 8 ++--- 4 files changed, 56 insertions(+), 4 deletions(-) create mode 100644 neqo-transport/benches/range_tracker.rs diff --git a/neqo-transport/Cargo.toml b/neqo-transport/Cargo.toml index c75142e102..49ece8661c 100644 --- a/neqo-transport/Cargo.toml +++ b/neqo-transport/Cargo.toml @@ -34,3 +34,8 @@ required-features = ["bench"] name = "rx_stream_orderer" harness = false required-features = ["bench"] + +[[bench]] +name = "range_tracker" +harness = false +required-features = ["bench"] \ No newline at end of file diff --git a/neqo-transport/benches/range_tracker.rs b/neqo-transport/benches/range_tracker.rs new file mode 100644 index 0000000000..a328866f7e --- /dev/null +++ b/neqo-transport/benches/range_tracker.rs @@ -0,0 +1,44 @@ +use criterion::{criterion_group, criterion_main, Criterion}; // black_box +use neqo_transport::send_stream::{RangeState, RangeTracker}; + +const CHUNK: u64 = 1000; +const END: u64 = 100_000; +fn build_coalesce(len: u64) -> RangeTracker { + let mut used = RangeTracker::default(); + used.mark_range(0, CHUNK as usize, RangeState::Acked); + used.mark_range(CHUNK, END as usize, RangeState::Sent); + // leave a gap or it will coalesce here + for i in 2..=len { + // These do not get immediately coalesced when marking since they're not at the end or start + used.mark_range(i * CHUNK, CHUNK as usize, RangeState::Acked); + } + used +} + +fn coalesce(c: &mut Criterion, count: u64) { + c.bench_function( + &format!("coalesce_acked_from_zero {count}+1 entries"), + |b| { + b.iter_batched_ref( + || build_coalesce(count), + |used| { + used.mark_range(CHUNK, CHUNK as usize, RangeState::Acked); + let tail = (count + 1) * CHUNK; + used.mark_range(tail, CHUNK as usize, RangeState::Sent); + used.mark_range(tail, CHUNK as usize, RangeState::Acked); + }, + criterion::BatchSize::SmallInput, + ) + }, + ); +} + +fn benchmark_coalesce(c: &mut Criterion) { + coalesce(c, 1); + coalesce(c, 3); + coalesce(c, 10); + coalesce(c, 1000); +} + +criterion_group!(benches, benchmark_coalesce); +criterion_main!(benches); diff --git a/neqo-transport/src/lib.rs b/neqo-transport/src/lib.rs index ecf7ee2f73..2b5ad57579 100644 --- a/neqo-transport/src/lib.rs +++ b/neqo-transport/src/lib.rs @@ -30,6 +30,9 @@ pub mod recv_stream; #[cfg(not(feature = "bench"))] mod recv_stream; mod rtt; +#[cfg(feature = "bench")] +pub mod send_stream; +#[cfg(not(feature = "bench"))] mod send_stream; mod sender; pub mod server; diff --git a/neqo-transport/src/send_stream.rs b/neqo-transport/src/send_stream.rs index 0464b3e490..62373e22f3 100644 --- a/neqo-transport/src/send_stream.rs +++ b/neqo-transport/src/send_stream.rs @@ -135,8 +135,8 @@ impl Default for RetransmissionPriority { } } -#[derive(Debug, PartialEq, Clone, Copy)] -enum RangeState { +#[derive(Debug, PartialEq, Eq, Clone, Copy)] +pub enum RangeState { Sent, Acked, } @@ -144,7 +144,7 @@ enum RangeState { /// Track ranges in the stream as sent or acked. Acked implies sent. Not in a /// range implies needing-to-be-sent, either initially or as a retransmission. #[derive(Debug, Default, PartialEq)] -struct RangeTracker { +pub struct RangeTracker { /// offset, (len, RangeState). Use u64 for len because ranges can exceed 32bits. used: BTreeMap, /// this is a cache for first_unmarked_range(), which we check a log @@ -325,7 +325,7 @@ impl RangeTracker { } } - fn mark_range(&mut self, off: u64, len: usize, state: RangeState) { + pub fn mark_range(&mut self, off: u64, len: usize, state: RangeState) { if len == 0 { qinfo!("mark 0-length range at {}", off); return; From a5a2e13f7f0c57bc5d8ac0f11268ac6f9c7ac0a6 Mon Sep 17 00:00:00 2001 From: Martin Thomson Date: Tue, 6 Feb 2024 19:07:34 +1100 Subject: [PATCH 118/321] Test benchmarks (#1623) * Don't build benches separately * Use --all-targets -F bench for build, test, and clippy * Restore build before transfer run --------- Co-authored-by: Lars Eggert --- .github/workflows/check.yml | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/.github/workflows/check.yml b/.github/workflows/check.yml index 7951477355..71b8e5655c 100644 --- a/.github/workflows/check.yml +++ b/.github/workflows/check.yml @@ -109,16 +109,14 @@ jobs: - name: Build run: | - cargo +${{ matrix.rust-toolchain }} build --all-targets + cargo +${{ matrix.rust-toolchain }} build --all-targets --features ci,bench echo "LD_LIBRARY_PATH=${{ github.workspace }}/dist/Debug/lib" >> "$GITHUB_ENV" echo "DYLD_FALLBACK_LIBRARY_PATH=${{ github.workspace }}/dist/Debug/lib" >> "$GITHUB_ENV" echo "${{ github.workspace }}/dist/Debug/lib" >> "$GITHUB_PATH" - name: Run tests and determine coverage - run: cargo +${{ matrix.rust-toolchain }} llvm-cov nextest --features ci --all-targets --no-fail-fast --lcov --output-path lcov.info - - - name: Benches should at least build - run: cargo +${{ matrix.rust-toolchain }} build --features bench --benches + run: cargo +${{ matrix.rust-toolchain }} llvm-cov nextest \ + --all-targets --features ci,bench --no-fail-fast --lcov --output-path lcov.info - name: Run client/server transfer run: | @@ -142,7 +140,7 @@ jobs: if: success() || failure() - name: Clippy - run: cargo +${{ matrix.rust-toolchain }} clippy --tests --benches -- -D warnings + run: cargo +${{ matrix.rust-toolchain }} clippy --all-targets -- -D warnings if: success() || failure() continue-on-error: ${{ matrix.rust-toolchain == 'nightly' }} From 6ef2c5e84162735f55d0afea83556acceabfd2fa Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Tue, 6 Feb 2024 11:46:48 +0200 Subject: [PATCH 119/321] fix: Don't call `write_entry` from inside `debug_assert!` (#1624) * fix: Don't call `write_entry` from inside `debug_assert!` Fixes #1481 * Try and add some release builds/tests to the matrix * Fix * Type * Don't exit * Fix env * Remove echo step * Don't linebreak a `run:` statement, it makes it silently fail * Indicate correct lib dir for build type --------- Signed-off-by: Lars Eggert --- .github/workflows/check.yml | 26 +++++++++++++++++--------- neqo-transport/src/cid.rs | 2 +- 2 files changed, 18 insertions(+), 10 deletions(-) diff --git a/.github/workflows/check.yml b/.github/workflows/check.yml index 71b8e5655c..bf04a94da4 100644 --- a/.github/workflows/check.yml +++ b/.github/workflows/check.yml @@ -18,6 +18,13 @@ jobs: matrix: os: [ubuntu-latest, macos-13, windows-latest] rust-toolchain: [1.70.0, stable, nightly] + type: [debug] + include: + - os: ubuntu-latest + rust-toolchain: stable + type: release + env: + BUILD_TYPE: ${{ matrix.type == 'release' && '--release' || '' }} runs-on: ${{ matrix.os }} defaults: run: @@ -109,21 +116,22 @@ jobs: - name: Build run: | - cargo +${{ matrix.rust-toolchain }} build --all-targets --features ci,bench - echo "LD_LIBRARY_PATH=${{ github.workspace }}/dist/Debug/lib" >> "$GITHUB_ENV" - echo "DYLD_FALLBACK_LIBRARY_PATH=${{ github.workspace }}/dist/Debug/lib" >> "$GITHUB_ENV" - echo "${{ github.workspace }}/dist/Debug/lib" >> "$GITHUB_PATH" + cargo +${{ matrix.rust-toolchain }} build $BUILD_TYPE --all-targets --features ci,bench + echo "LD_LIBRARY_PATH=${{ github.workspace }}/dist/$LIB_DIR/lib" >> "$GITHUB_ENV" + echo "DYLD_FALLBACK_LIBRARY_PATH=${{ github.workspace }}/dist/$LIB_DIR/lib" >> "$GITHUB_ENV" + echo "${{ github.workspace }}/dist/$LIB_DIR/lib" >> "$GITHUB_PATH" + env: + LIB_DIR: ${{ matrix.type == 'release' && 'Release' || 'Debug' }} - name: Run tests and determine coverage - run: cargo +${{ matrix.rust-toolchain }} llvm-cov nextest \ - --all-targets --features ci,bench --no-fail-fast --lcov --output-path lcov.info + run: cargo +${{ matrix.rust-toolchain }} llvm-cov nextest $BUILD_TYPE --all-targets --features ci,bench --no-fail-fast --lcov --output-path lcov.info - name: Run client/server transfer run: | - cargo +${{ matrix.rust-toolchain }} build --bin neqo-client --bin neqo-server - cargo +${{ matrix.rust-toolchain }} run --bin neqo-server -- $HOST:4433 & + cargo +${{ matrix.rust-toolchain }} build $BUILD_TYPE --bin neqo-client --bin neqo-server + cargo +${{ matrix.rust-toolchain }} run $BUILD_TYPE --bin neqo-server -- $HOST:4433 & PID=$! - cargo +${{ matrix.rust-toolchain }} run --bin neqo-client -- --output-dir . https://$HOST:4433/$SIZE + cargo +${{ matrix.rust-toolchain }} run $BUILD_TYPE --bin neqo-client -- --output-dir . https://$HOST:4433/$SIZE kill $PID [ "$(wc -c <"$SIZE")" -eq "$SIZE" ] || exit 1 env: diff --git a/neqo-transport/src/cid.rs b/neqo-transport/src/cid.rs index be202daf25..429751bef2 100644 --- a/neqo-transport/src/cid.rs +++ b/neqo-transport/src/cid.rs @@ -573,7 +573,7 @@ impl ConnectionIdManager { .add_local(ConnectionIdEntry::new(seqno, cid.clone(), ())); let entry = ConnectionIdEntry::new(seqno, cid, srt); - debug_assert!(self.write_entry(&entry, builder, stats)?); + self.write_entry(&entry, builder, stats)?; tokens.push(RecoveryToken::NewConnectionId(entry)); } } From a8a86863db8d1efa920a12edc99a764e2efeeb9f Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Tue, 6 Feb 2024 11:57:12 +0200 Subject: [PATCH 120/321] chore: Fix clippy nightly nit (#1625) --- neqo-common/src/codec.rs | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/neqo-common/src/codec.rs b/neqo-common/src/codec.rs index 57ff13f39f..620c669ae6 100644 --- a/neqo-common/src/codec.rs +++ b/neqo-common/src/codec.rs @@ -112,9 +112,7 @@ impl<'a> Decoder<'a> { /// Decodes a QUIC varint. pub fn decode_varint(&mut self) -> Option { - let Some(b1) = self.decode_byte() else { - return None; - }; + let b1 = self.decode_byte()?; match b1 >> 6 { 0 => Some(u64::from(b1 & 0x3f)), 1 => Some((u64::from(b1 & 0x3f) << 8) | self.decode_uint(1)?), From 816182fe204daf23d22328b36db49a35841cb41a Mon Sep 17 00:00:00 2001 From: Martin Thomson Date: Tue, 6 Feb 2024 21:53:40 +1100 Subject: [PATCH 121/321] Run client/server test with the ci,bench features (#1626) Co-authored-by: Lars Eggert --- .github/workflows/check.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/check.yml b/.github/workflows/check.yml index bf04a94da4..fb2e3afcff 100644 --- a/.github/workflows/check.yml +++ b/.github/workflows/check.yml @@ -128,10 +128,10 @@ jobs: - name: Run client/server transfer run: | - cargo +${{ matrix.rust-toolchain }} build $BUILD_TYPE --bin neqo-client --bin neqo-server - cargo +${{ matrix.rust-toolchain }} run $BUILD_TYPE --bin neqo-server -- $HOST:4433 & + cargo +${{ matrix.rust-toolchain }} build $BUILD_TYPE --features ci,bench --bin neqo-client --bin neqo-server + cargo +${{ matrix.rust-toolchain }} run $BUILD_TYPE --features ci,bench --bin neqo-server -- $HOST:4433 & PID=$! - cargo +${{ matrix.rust-toolchain }} run $BUILD_TYPE --bin neqo-client -- --output-dir . https://$HOST:4433/$SIZE + cargo +${{ matrix.rust-toolchain }} run $BUILD_TYPE --features ci,bench --bin neqo-client -- --output-dir . https://$HOST:4433/$SIZE kill $PID [ "$(wc -c <"$SIZE")" -eq "$SIZE" ] || exit 1 env: From b16eaf98ae98ec817548060fe1df7d570375a43d Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Tue, 6 Feb 2024 22:59:54 +0200 Subject: [PATCH 122/321] fix: More green for the neqo fields in the interop runner (#1627) * fix: Only allow v1 for most interop runner runs * Fix v2 --- neqo-client/src/main.rs | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/neqo-client/src/main.rs b/neqo-client/src/main.rs index d8444542ea..931ce5a6e4 100644 --- a/neqo-client/src/main.rs +++ b/neqo-client/src/main.rs @@ -1005,7 +1005,10 @@ async fn main() -> Res<()> { let mut args = Args::from_args(); if let Some(testcase) = args.qns_test.as_ref() { + // Only use v1 for most QNS tests. + args.quic_parameters.quic_version = vec![VersionArg(Version::Version1)]; match testcase.as_str() { + // TODO: Add "ecn" when that is ready. "http3" => {} "handshake" | "transfer" | "retry" => { args.use_old_http = true; @@ -1034,6 +1037,8 @@ async fn main() -> Res<()> { } "v2" => { args.use_old_http = true; + // Use default version set for this test (which allows compatible vneg.) + args.quic_parameters.quic_version.clear(); } _ => exit(127), } From 09bb44146ed988963016038fb6d67565d3283363 Mon Sep 17 00:00:00 2001 From: Martin Thomson Date: Wed, 7 Feb 2024 18:20:11 +1100 Subject: [PATCH 123/321] These addresses can now be constants (#1630) * These addresses can now be constants * Missed a few --- neqo-http3/src/connection_client.rs | 9 +-- .../tests/webtransport/mod.rs | 6 +- neqo-http3/tests/webtransport.rs | 6 +- .../src/connection/tests/ackrate.rs | 4 +- .../src/connection/tests/handshake.rs | 16 ++--- .../src/connection/tests/migration.rs | 72 +++++++++---------- neqo-transport/src/connection/tests/mod.rs | 6 +- neqo-transport/src/recovery.rs | 11 ++- test-fixture/src/assertions.rs | 6 +- test-fixture/src/lib.rs | 31 ++++---- 10 files changed, 91 insertions(+), 76 deletions(-) diff --git a/neqo-http3/src/connection_client.rs b/neqo-http3/src/connection_client.rs index 5cc0541c0c..b98533b043 100644 --- a/neqo-http3/src/connection_client.rs +++ b/neqo-http3/src/connection_client.rs @@ -1306,8 +1306,9 @@ mod tests { StreamType, Version, RECV_BUFFER_SIZE, SEND_BUFFER_SIZE, }; use test_fixture::{ - addr, anti_replay, default_server_h3, fixture_init, new_server, now, - CountingConnectionIdGenerator, DEFAULT_ALPN_H3, DEFAULT_KEYS, DEFAULT_SERVER_NAME, + anti_replay, default_server_h3, fixture_init, new_server, now, + CountingConnectionIdGenerator, DEFAULT_ADDR, DEFAULT_ALPN_H3, DEFAULT_KEYS, + DEFAULT_SERVER_NAME, }; use super::{ @@ -1340,8 +1341,8 @@ mod tests { Http3Client::new( DEFAULT_SERVER_NAME, Rc::new(RefCell::new(CountingConnectionIdGenerator::default())), - addr(), - addr(), + DEFAULT_ADDR, + DEFAULT_ADDR, Http3Parameters::default() .connection_parameters( // Disable compatible upgrade, which complicates tests. diff --git a/neqo-http3/src/features/extended_connect/tests/webtransport/mod.rs b/neqo-http3/src/features/extended_connect/tests/webtransport/mod.rs index 51dc47e4c1..3753c3122d 100644 --- a/neqo-http3/src/features/extended_connect/tests/webtransport/mod.rs +++ b/neqo-http3/src/features/extended_connect/tests/webtransport/mod.rs @@ -14,7 +14,7 @@ use neqo_common::event::Provider; use neqo_crypto::AuthenticationStatus; use neqo_transport::{ConnectionParameters, StreamId, StreamType}; use test_fixture::{ - addr, anti_replay, fixture_init, now, CountingConnectionIdGenerator, DEFAULT_ALPN_H3, + anti_replay, fixture_init, now, CountingConnectionIdGenerator, DEFAULT_ADDR, DEFAULT_ALPN_H3, DEFAULT_KEYS, DEFAULT_SERVER_NAME, }; @@ -38,8 +38,8 @@ pub fn default_http3_client(client_params: Http3Parameters) -> Http3Client { Http3Client::new( DEFAULT_SERVER_NAME, Rc::new(RefCell::new(CountingConnectionIdGenerator::default())), - addr(), - addr(), + DEFAULT_ADDR, + DEFAULT_ADDR, client_params, now(), ) diff --git a/neqo-http3/tests/webtransport.rs b/neqo-http3/tests/webtransport.rs index 4e943d86cb..b1e18a5a98 100644 --- a/neqo-http3/tests/webtransport.rs +++ b/neqo-http3/tests/webtransport.rs @@ -15,7 +15,7 @@ use neqo_http3::{ }; use neqo_transport::{StreamId, StreamType}; use test_fixture::{ - addr, anti_replay, fixture_init, now, CountingConnectionIdGenerator, DEFAULT_ALPN_H3, + anti_replay, fixture_init, now, CountingConnectionIdGenerator, DEFAULT_ADDR, DEFAULT_ALPN_H3, DEFAULT_KEYS, DEFAULT_SERVER_NAME, }; @@ -24,8 +24,8 @@ fn connect() -> (Http3Client, Http3Server) { let mut client = Http3Client::new( DEFAULT_SERVER_NAME, Rc::new(RefCell::new(CountingConnectionIdGenerator::default())), - addr(), - addr(), + DEFAULT_ADDR, + DEFAULT_ADDR, Http3Parameters::default().webtransport(true), now(), ) diff --git a/neqo-transport/src/connection/tests/ackrate.rs b/neqo-transport/src/connection/tests/ackrate.rs index 1b83d42acd..f0a1d17cd9 100644 --- a/neqo-transport/src/connection/tests/ackrate.rs +++ b/neqo-transport/src/connection/tests/ackrate.rs @@ -6,7 +6,7 @@ use std::{mem, time::Duration}; -use test_fixture::{addr_v4, assertions}; +use test_fixture::{assertions, DEFAULT_ADDR_V4}; use super::{ super::{ConnectionParameters, ACK_RATIO_SCALE}, @@ -164,7 +164,7 @@ fn migrate_ack_delay() { let mut now = connect_rtt_idle(&mut client, &mut server, DEFAULT_RTT); client - .migrate(Some(addr_v4()), Some(addr_v4()), true, now) + .migrate(Some(DEFAULT_ADDR_V4), Some(DEFAULT_ADDR_V4), true, now) .unwrap(); let client1 = send_something(&mut client, now); diff --git a/neqo-transport/src/connection/tests/handshake.rs b/neqo-transport/src/connection/tests/handshake.rs index a91ecf1b4a..52077c8e88 100644 --- a/neqo-transport/src/connection/tests/handshake.rs +++ b/neqo-transport/src/connection/tests/handshake.rs @@ -18,8 +18,8 @@ use neqo_crypto::{ constants::TLS_CHACHA20_POLY1305_SHA256, generate_ech_keys, AuthenticationStatus, }; use test_fixture::{ - self, addr, assertions, assertions::assert_coalesced_0rtt, datagram, fixture_init, now, - split_datagram, + self, assertions, assertions::assert_coalesced_0rtt, datagram, fixture_init, now, + split_datagram, DEFAULT_ADDR, }; use super::{ @@ -122,8 +122,8 @@ fn no_alpn() { "example.com", &["bad-alpn"], Rc::new(RefCell::new(CountingConnectionIdGenerator::default())), - addr(), - addr(), + DEFAULT_ADDR, + DEFAULT_ADDR, ConnectionParameters::default(), now(), ) @@ -251,8 +251,8 @@ fn chacha20poly1305() { test_fixture::DEFAULT_SERVER_NAME, test_fixture::DEFAULT_ALPN, Rc::new(RefCell::new(EmptyConnectionIdGenerator::default())), - addr(), - addr(), + DEFAULT_ADDR, + DEFAULT_ADDR, ConnectionParameters::default(), now(), ) @@ -730,8 +730,8 @@ fn connect_one_version() { test_fixture::DEFAULT_SERVER_NAME, test_fixture::DEFAULT_ALPN, Rc::new(RefCell::new(CountingConnectionIdGenerator::default())), - addr(), - addr(), + DEFAULT_ADDR, + DEFAULT_ADDR, ConnectionParameters::default().versions(version, vec![version]), now(), ) diff --git a/neqo-transport/src/connection/tests/migration.rs b/neqo-transport/src/connection/tests/migration.rs index 8307a7dd84..7a47ec4156 100644 --- a/neqo-transport/src/connection/tests/migration.rs +++ b/neqo-transport/src/connection/tests/migration.rs @@ -13,9 +13,9 @@ use std::{ use neqo_common::{Datagram, Decoder}; use test_fixture::{ - self, addr, addr_v4, + self, assertions::{assert_v4_path, assert_v6_path}, - fixture_init, new_neqo_qlog, now, + fixture_init, new_neqo_qlog, now, DEFAULT_ADDR, DEFAULT_ADDR_V4, }; use super::{ @@ -94,8 +94,8 @@ fn rebinding_port() { server.stream_close_send(stream_id).unwrap(); let dgram = server.process_output(now()).dgram(); let dgram = dgram.unwrap(); - assert_eq!(dgram.source(), addr()); - assert_eq!(dgram.destination(), new_port(addr())); + assert_eq!(dgram.source(), DEFAULT_ADDR); + assert_eq!(dgram.destination(), new_port(DEFAULT_ADDR)); } /// This simulates an attack where a valid packet is forwarded on @@ -109,7 +109,7 @@ fn path_forwarding_attack() { let mut now = now(); let dgram = send_something(&mut client, now); - let dgram = change_path(&dgram, addr_v4()); + let dgram = change_path(&dgram, DEFAULT_ADDR_V4); server.process_input(&dgram, now); // The server now probes the new (primary) path. @@ -188,7 +188,7 @@ fn migrate_immediate() { let now = now(); client - .migrate(Some(addr_v4()), Some(addr_v4()), true, now) + .migrate(Some(DEFAULT_ADDR_V4), Some(DEFAULT_ADDR_V4), true, now) .unwrap(); let client1 = send_something(&mut client, now); @@ -229,7 +229,7 @@ fn migrate_rtt() { let now = connect_rtt_idle(&mut client, &mut server, RTT); client - .migrate(Some(addr_v4()), Some(addr_v4()), true, now) + .migrate(Some(DEFAULT_ADDR_V4), Some(DEFAULT_ADDR_V4), true, now) .unwrap(); // The RTT might be increased for the new path, so allow a little flexibility. let rtt = client.paths.rtt(); @@ -245,7 +245,7 @@ fn migrate_immediate_fail() { let mut now = now(); client - .migrate(Some(addr_v4()), Some(addr_v4()), true, now) + .migrate(Some(DEFAULT_ADDR_V4), Some(DEFAULT_ADDR_V4), true, now) .unwrap(); let probe = client.process_output(now).dgram().unwrap(); @@ -293,7 +293,7 @@ fn migrate_same() { let now = now(); client - .migrate(Some(addr()), Some(addr()), true, now) + .migrate(Some(DEFAULT_ADDR), Some(DEFAULT_ADDR), true, now) .unwrap(); let probe = client.process_output(now).dgram().unwrap(); @@ -320,7 +320,7 @@ fn migrate_same_fail() { let mut now = now(); client - .migrate(Some(addr()), Some(addr()), true, now) + .migrate(Some(DEFAULT_ADDR), Some(DEFAULT_ADDR), true, now) .unwrap(); let probe = client.process_output(now).dgram().unwrap(); @@ -375,7 +375,7 @@ fn migration(mut client: Connection) { let now = now(); client - .migrate(Some(addr_v4()), Some(addr_v4()), false, now) + .migrate(Some(DEFAULT_ADDR_V4), Some(DEFAULT_ADDR_V4), false, now) .unwrap(); let probe = client.process_output(now).dgram().unwrap(); @@ -449,8 +449,8 @@ fn migration_client_empty_cid() { test_fixture::DEFAULT_SERVER_NAME, test_fixture::DEFAULT_ALPN, Rc::new(RefCell::new(EmptyConnectionIdGenerator::default())), - addr(), - addr(), + DEFAULT_ADDR, + DEFAULT_ADDR, ConnectionParameters::default(), now(), ) @@ -568,22 +568,22 @@ fn preferred_address(hs_client: SocketAddr, hs_server: SocketAddr, preferred: So /// Migration works for a new port number. #[test] fn preferred_address_new_port() { - let a = addr(); + let a = DEFAULT_ADDR; preferred_address(a, a, new_port(a)); } /// Migration works for a new address too. #[test] fn preferred_address_new_address() { - let mut preferred = addr(); + let mut preferred = DEFAULT_ADDR; preferred.set_ip(IpAddr::V6(Ipv6Addr::new(0xfe80, 0, 0, 0, 0, 0, 0, 2))); - preferred_address(addr(), addr(), preferred); + preferred_address(DEFAULT_ADDR, DEFAULT_ADDR, preferred); } /// Migration works for IPv4 addresses. #[test] fn preferred_address_new_port_v4() { - let a = addr_v4(); + let a = DEFAULT_ADDR_V4; preferred_address(a, a, new_port(a)); } @@ -623,7 +623,7 @@ fn preferred_address_ignore_loopback() { /// A preferred address in the wrong address family is ignored. #[test] fn preferred_address_ignore_different_family() { - preferred_address_ignored(PreferredAddress::new_any(Some(addr_v4()), None)); + preferred_address_ignored(PreferredAddress::new_any(Some(DEFAULT_ADDR_V4), None)); } /// Disabling preferred addresses at the client means that it ignores a perfectly @@ -631,7 +631,7 @@ fn preferred_address_ignore_different_family() { #[test] fn preferred_address_disabled_client() { let mut client = new_client(ConnectionParameters::default().disable_preferred_address()); - let mut preferred = addr(); + let mut preferred = DEFAULT_ADDR; preferred.set_ip(IpAddr::V6(Ipv6Addr::new(0xfe80, 0, 0, 0, 0, 0, 0, 2))); let spa = PreferredAddress::new_any(None, Some(preferred)); let mut server = new_server(ConnectionParameters::default().preferred_address(spa)); @@ -643,7 +643,7 @@ fn preferred_address_disabled_client() { fn preferred_address_empty_cid() { fixture_init(); - let spa = PreferredAddress::new_any(None, Some(new_port(addr()))); + let spa = PreferredAddress::new_any(None, Some(new_port(DEFAULT_ADDR))); let res = Connection::new_server( test_fixture::DEFAULT_KEYS, test_fixture::DEFAULT_ALPN, @@ -706,33 +706,33 @@ fn preferred_address_client() { fn migration_invalid_state() { let mut client = default_client(); assert!(client - .migrate(Some(addr()), Some(addr()), false, now()) + .migrate(Some(DEFAULT_ADDR), Some(DEFAULT_ADDR), false, now()) .is_err()); let mut server = default_server(); assert!(server - .migrate(Some(addr()), Some(addr()), false, now()) + .migrate(Some(DEFAULT_ADDR), Some(DEFAULT_ADDR), false, now()) .is_err()); connect_force_idle(&mut client, &mut server); assert!(server - .migrate(Some(addr()), Some(addr()), false, now()) + .migrate(Some(DEFAULT_ADDR), Some(DEFAULT_ADDR), false, now()) .is_err()); client.close(now(), 0, "closing"); assert!(client - .migrate(Some(addr()), Some(addr()), false, now()) + .migrate(Some(DEFAULT_ADDR), Some(DEFAULT_ADDR), false, now()) .is_err()); let close = client.process(None, now()).dgram(); let dgram = server.process(close.as_ref(), now()).dgram(); assert!(server - .migrate(Some(addr()), Some(addr()), false, now()) + .migrate(Some(DEFAULT_ADDR), Some(DEFAULT_ADDR), false, now()) .is_err()); client.process_input(&dgram.unwrap(), now()); assert!(client - .migrate(Some(addr()), Some(addr()), false, now()) + .migrate(Some(DEFAULT_ADDR), Some(DEFAULT_ADDR), false, now()) .is_err()); } @@ -753,32 +753,32 @@ fn migration_invalid_address() { cant_migrate(None, None); // Providing a zero port number isn't valid. - let mut zero_port = addr(); + let mut zero_port = DEFAULT_ADDR; zero_port.set_port(0); cant_migrate(None, Some(zero_port)); cant_migrate(Some(zero_port), None); // An unspecified remote address is bad. - let mut remote_unspecified = addr(); + let mut remote_unspecified = DEFAULT_ADDR; remote_unspecified.set_ip(IpAddr::V6(Ipv6Addr::from(0))); cant_migrate(None, Some(remote_unspecified)); // Mixed address families is bad. - cant_migrate(Some(addr()), Some(addr_v4())); - cant_migrate(Some(addr_v4()), Some(addr())); + cant_migrate(Some(DEFAULT_ADDR), Some(DEFAULT_ADDR_V4)); + cant_migrate(Some(DEFAULT_ADDR_V4), Some(DEFAULT_ADDR)); // Loopback to non-loopback is bad. - cant_migrate(Some(addr()), Some(loopback())); - cant_migrate(Some(loopback()), Some(addr())); + cant_migrate(Some(DEFAULT_ADDR), Some(loopback())); + cant_migrate(Some(loopback()), Some(DEFAULT_ADDR)); assert_eq!( client - .migrate(Some(addr()), Some(loopback()), true, now()) + .migrate(Some(DEFAULT_ADDR), Some(loopback()), true, now()) .unwrap_err(), Error::InvalidMigration ); assert_eq!( client - .migrate(Some(loopback()), Some(addr()), true, now()) + .migrate(Some(loopback()), Some(DEFAULT_ADDR), true, now()) .unwrap_err(), Error::InvalidMigration ); @@ -864,7 +864,7 @@ fn retire_prior_to_migration_failure() { let original_cid = ConnectionId::from(get_cid(&send_something(&mut client, now()))); client - .migrate(Some(addr_v4()), Some(addr_v4()), false, now()) + .migrate(Some(DEFAULT_ADDR_V4), Some(DEFAULT_ADDR_V4), false, now()) .unwrap(); // The client now probes the new path. @@ -919,7 +919,7 @@ fn retire_prior_to_migration_success() { let original_cid = ConnectionId::from(get_cid(&send_something(&mut client, now()))); client - .migrate(Some(addr_v4()), Some(addr_v4()), false, now()) + .migrate(Some(DEFAULT_ADDR_V4), Some(DEFAULT_ADDR_V4), false, now()) .unwrap(); // The client now probes the new path. diff --git a/neqo-transport/src/connection/tests/mod.rs b/neqo-transport/src/connection/tests/mod.rs index 8a999f4048..d958ecd70c 100644 --- a/neqo-transport/src/connection/tests/mod.rs +++ b/neqo-transport/src/connection/tests/mod.rs @@ -18,7 +18,7 @@ use std::{ use enum_map::enum_map; use neqo_common::{event::Provider, qdebug, qtrace, Datagram, Decoder, Role}; use neqo_crypto::{random, AllowZeroRtt, AuthenticationStatus, ResumptionToken}; -use test_fixture::{self, addr, fixture_init, new_neqo_qlog, now}; +use test_fixture::{self, fixture_init, new_neqo_qlog, now, DEFAULT_ADDR}; use super::{Connection, ConnectionError, ConnectionId, Output, State}; use crate::{ @@ -107,8 +107,8 @@ pub fn new_client(params: ConnectionParameters) -> Connection { test_fixture::DEFAULT_SERVER_NAME, test_fixture::DEFAULT_ALPN, Rc::new(RefCell::new(CountingConnectionIdGenerator::default())), - addr(), - addr(), + DEFAULT_ADDR, + DEFAULT_ADDR, params, now(), ) diff --git a/neqo-transport/src/recovery.rs b/neqo-transport/src/recovery.rs index d90989b486..ec1d7e4a3d 100644 --- a/neqo-transport/src/recovery.rs +++ b/neqo-transport/src/recovery.rs @@ -1027,7 +1027,7 @@ mod tests { }; use neqo_common::qlog::NeqoQlog; - use test_fixture::{addr, now}; + use test_fixture::{now, DEFAULT_ADDR}; use super::{ LossRecovery, LossRecoverySpace, PacketNumberSpace, SendProfile, SentPacket, FAST_PTO_SCALE, @@ -1105,7 +1105,14 @@ mod tests { impl Default for Fixture { fn default() -> Self { const CC: CongestionControlAlgorithm = CongestionControlAlgorithm::NewReno; - let mut path = Path::temporary(addr(), addr(), CC, true, NeqoQlog::default(), now()); + let mut path = Path::temporary( + DEFAULT_ADDR, + DEFAULT_ADDR, + CC, + true, + NeqoQlog::default(), + now(), + ); path.make_permanent( None, ConnectionIdEntry::new(0, ConnectionId::from(&[1, 2, 3]), [0; 16]), diff --git a/test-fixture/src/assertions.rs b/test-fixture/src/assertions.rs index 7e772daabf..dd6d0330ef 100644 --- a/test-fixture/src/assertions.rs +++ b/test-fixture/src/assertions.rs @@ -12,7 +12,7 @@ use std::{ use neqo_common::{Datagram, Decoder}; use neqo_transport::{version::WireVersion, Version}; -use crate::{addr, addr_v4}; +use crate::{DEFAULT_ADDR, DEFAULT_ADDR_V4}; const PACKET_TYPE_MASK: u8 = 0b1011_0000; @@ -161,7 +161,7 @@ pub fn assert_path(dgram: &Datagram, path_addr: SocketAddr) { /// /// When the path doesn't use the default v4 socket address at both ends. pub fn assert_v4_path(dgram: &Datagram, padded: bool) { - assert_path(dgram, addr_v4()); + assert_path(dgram, DEFAULT_ADDR_V4); if padded { assert_eq!(dgram.len(), 1357 /* PATH_MTU_V4 */); } @@ -171,7 +171,7 @@ pub fn assert_v4_path(dgram: &Datagram, padded: bool) { /// /// When the path doesn't use the default v6 socket address at both ends. pub fn assert_v6_path(dgram: &Datagram, padded: bool) { - assert_path(dgram, addr()); + assert_path(dgram, DEFAULT_ADDR); if padded { assert_eq!(dgram.len(), 1337 /* PATH_MTU_V6 */); } diff --git a/test-fixture/src/lib.rs b/test-fixture/src/lib.rs index 2c94767a97..96ed335a83 100644 --- a/test-fixture/src/lib.rs +++ b/test-fixture/src/lib.rs @@ -86,26 +86,33 @@ pub const DEFAULT_KEYS: &[&str] = &["key"]; pub const LONG_CERT_KEYS: &[&str] = &["A long cert"]; pub const DEFAULT_ALPN: &[&str] = &["alpn"]; pub const DEFAULT_ALPN_H3: &[&str] = &["h3"]; +pub const DEFAULT_ADDR: SocketAddr = addr(); +pub const DEFAULT_ADDR_V4: SocketAddr = addr_v4(); // Create a default datagram with the given data. #[must_use] pub fn datagram(data: Vec) -> Datagram { - Datagram::new(addr(), addr(), IpTos::default(), Some(128), data) + Datagram::new( + DEFAULT_ADDR, + DEFAULT_ADDR, + IpTos::default(), + Some(128), + data, + ) } /// Create a default socket address. #[must_use] -pub fn addr() -> SocketAddr { - // These could be const functions, but they aren't... +const fn addr() -> SocketAddr { let v6ip = IpAddr::V6(Ipv6Addr::new(0xfe80, 0, 0, 0, 0, 0, 0, 1)); SocketAddr::new(v6ip, 443) } /// An IPv4 version of the default socket address. #[must_use] -pub fn addr_v4() -> SocketAddr { - let localhost_v4 = IpAddr::V4(Ipv4Addr::from(0xc000_0201)); - SocketAddr::new(localhost_v4, addr().port()) +const fn addr_v4() -> SocketAddr { + let v4ip = IpAddr::V4(Ipv4Addr::new(192, 0, 2, 1)); + SocketAddr::new(v4ip, DEFAULT_ADDR.port()) } /// This connection ID generation scheme is the worst, but it doesn't produce collisions. @@ -154,8 +161,8 @@ pub fn new_client(params: ConnectionParameters) -> Connection { DEFAULT_SERVER_NAME, DEFAULT_ALPN, Rc::new(RefCell::new(CountingConnectionIdGenerator::default())), - addr(), - addr(), + DEFAULT_ADDR, + DEFAULT_ADDR, params.ack_ratio(255), // Tests work better with this set this way. now(), ) @@ -258,8 +265,8 @@ pub fn default_http3_client() -> Http3Client { Http3Client::new( DEFAULT_SERVER_NAME, Rc::new(RefCell::new(CountingConnectionIdGenerator::default())), - addr(), - addr(), + DEFAULT_ADDR, + DEFAULT_ADDR, Http3Parameters::default() .max_table_size_encoder(100) .max_table_size_decoder(100) @@ -281,8 +288,8 @@ pub fn http3_client_with_params(params: Http3Parameters) -> Http3Client { Http3Client::new( DEFAULT_SERVER_NAME, Rc::new(RefCell::new(CountingConnectionIdGenerator::default())), - addr(), - addr(), + DEFAULT_ADDR, + DEFAULT_ADDR, params, now(), ) From e45e234cb1a04cc47367fdbad181c00b414b52e3 Mon Sep 17 00:00:00 2001 From: Martin Thomson Date: Wed, 7 Feb 2024 18:44:01 +1100 Subject: [PATCH 124/321] Drop the logging level for STREAM frame writing (#1629) This is very verbose and it pollutes things like interop runner traces too much. Co-authored-by: Lars Eggert --- neqo-transport/src/send_stream.rs | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/neqo-transport/src/send_stream.rs b/neqo-transport/src/send_stream.rs index 62373e22f3..14c3326ca3 100644 --- a/neqo-transport/src/send_stream.rs +++ b/neqo-transport/src/send_stream.rs @@ -1620,16 +1620,16 @@ impl SendStreams { // Iterate the map, but only those without fairness, then iterate // OrderGroups, then iterate each group - qdebug!("processing streams... unfair:"); + qtrace!("processing streams... unfair:"); for stream in self.map.values_mut() { if !stream.is_fair() { - qdebug!(" {}", stream); + qtrace!(" {}", stream); if !stream.write_frames_with_early_return(priority, builder, tokens, stats) { break; } } } - qdebug!("fair streams:"); + qtrace!("fair streams:"); let stream_ids = self.regular.iter().chain( self.sendordered .values_mut() @@ -1639,9 +1639,9 @@ impl SendStreams { for stream_id in stream_ids { let stream = self.map.get_mut(&stream_id).unwrap(); if let Some(order) = stream.sendorder() { - qdebug!(" {} ({})", stream_id, order) + qtrace!(" {} ({})", stream_id, order) } else { - qdebug!(" None") + qtrace!(" None") } if !stream.write_frames_with_early_return(priority, builder, tokens, stats) { break; From 10e52843155a12b9ca9a1f901a9eb6be544ef817 Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Wed, 7 Feb 2024 15:18:52 +0200 Subject: [PATCH 125/321] ci: Only upload coverage from stable debug builds to Codecov (#1631) * ci: Only upload coverage from stable debug builds to Codecov Eliminates some redundancy * Add .codecov.yml to the repository --- .codecov.yml | 5 +++++ .github/workflows/check.yml | 1 + 2 files changed, 6 insertions(+) create mode 100644 .codecov.yml diff --git a/.codecov.yml b/.codecov.yml new file mode 100644 index 0000000000..d55ee5edd6 --- /dev/null +++ b/.codecov.yml @@ -0,0 +1,5 @@ +# neqo has no test coverage for its example client, server and interop test +ignore: + - "neqo-client" + - "neqo-interop" + - "neqo-server" diff --git a/.github/workflows/check.yml b/.github/workflows/check.yml index fb2e3afcff..b3982a556a 100644 --- a/.github/workflows/check.yml +++ b/.github/workflows/check.yml @@ -164,3 +164,4 @@ jobs: file: lcov.info fail_ci_if_error: false token: ${{ secrets.CODECOV_TOKEN }} + if: matrix.type == 'debug' && matrix.rust-toolchain == 'stable' From b547c6f36a508f882df9f9fde84fd8ab46250ec1 Mon Sep 17 00:00:00 2001 From: Manuel Bucher Date: Wed, 7 Feb 2024 17:48:30 +0100 Subject: [PATCH 126/321] Add script to plot neqo's MOZ_LOG output from the congestion controller (#1536) --- test/README.md | 2 + test/mozlog-neqo-cwnd.py | 208 +++++++++++++++++++++++++++++++++++++++ 2 files changed, 210 insertions(+) create mode 100755 test/mozlog-neqo-cwnd.py diff --git a/test/README.md b/test/README.md index ca1bc0684f..38acb7a90f 100644 --- a/test/README.md +++ b/test/README.md @@ -31,3 +31,5 @@ The `upload_test.sh` script automates testing network conditions for `neqo-clien 3. **Automated Test Execution**: The script sets up network conditions and runs `neqo-client` and `neqo-server` tests. 4. **Cleanup**: At the end, it resets network conditions and stops the server. +## Visualize log file +Run `./mozlog-neqo-cwnd.py upload.log` to view the logs with matplotlib and python. diff --git a/test/mozlog-neqo-cwnd.py b/test/mozlog-neqo-cwnd.py new file mode 100755 index 0000000000..f40c3d5606 --- /dev/null +++ b/test/mozlog-neqo-cwnd.py @@ -0,0 +1,208 @@ +#!/usr/bin/env python3 + +# Licensed under the Apache License, Version 2.0 or the MIT license +# , at your +# option. This file may not be copied, modified, or distributed +# except according to those terms. + +# Author: Manuel Bucher +# Date: 2023-11-02 + +import matplotlib.pyplot as plt +import re +import sys +from collections import defaultdict +from datetime import datetime + +# 2023-11-02 13:32:28.450290 UTC - [Parent 31525: Socket Thread]: I/neqo_transport::* [neqo_transport::cc::classic_cc] packet_sent this=0x7f84d3d31100, pn=111, ps=36 +# 2023-11-02 13:32:28.477635 UTC - [Parent 31525: Socket Thread]: I/neqo_transport::* [neqo_transport::cc::classic_cc] packet_acked this=0x7f84d3d31100, pn=111, ps=36, ignored=0, lost=0 +# 2023-11-02 13:55:02.954829 UTC - [Parent 41203: Socket Thread]: I/neqo_transport::* [neqo_transport::cc::classic_cc] packet_lost this=0x7f2864efcc80, pn=308694, ps=1337 +PATTERN = r" ([a-z_]+) this=0x([0-9a-f]+), pn=(\d+), ps=(\d+)" +events = re.compile(PATTERN) + +# 2023-11-02 13:32:28.477655 UTC - [Parent 31525: Socket Thread]: I/neqo_transport::* [neqo_transport::cc::classic_cc] on_packets_acked this=0x7f84d3d31100, limited=1, bytes_in_flight=0, cwnd=13370, state=SlowStart, new_acked=36 +PATTERN = r" on_packets_acked this=0x([0-9a-f]+), limited=(\d+), bytes_in_flight=(\d+), cwnd=(\d+), state=([a-zA-Z]+), new_acked=(\d+)" +acked = re.compile(PATTERN) +# 2023-11-02 13:55:02.954909 UTC - [Parent 41203: Socket Thread]: I/neqo_transport::* [neqo_transport::cc::classic_cc] on_packets_lost this=0x7f2864efcc80, bytes_in_flight=690883, cwnd=1520187, state=RecoveryStart +PATTERN = r" on_packets_lost this=0x([0-9a-f]+), bytes_in_flight=(\d+), cwnd=(\d+), state=([a-zA-Z]+)" +lost = re.compile(PATTERN) + + +def get_time(line): + # allow garbage data before timestamp + timestamp = line.split(" UTC", 1)[0].split(" ") + timestamp = timestamp[-2] + " " + timestamp[-1] + return datetime.strptime(timestamp, "%Y-%m-%d %H:%M:%S.%f") + + +def main(): + if len(sys.argv) < 2: + print("usage:", sys.argv[0], "LOG_FILE") + return + + data = defaultdict( + lambda: { + "time": [], + "cwnd": [], + "bif": [], + "bif_limited": [], + "bif_limited_time": [], + "last_bytes_in_flight": 0, + "last_state": ("SlowStart", 0), + # color background depending on state + "bg_state": [], + "bg_time": [], + # event occurences + "p": {}, # pn -> y-axis (bytes in flight after packet sent) + "ps": defaultdict( + lambda: defaultdict(lambda: []) + ), # x/y coords of packet_sent/_acked/_lost + } + ) + + for line in open(sys.argv[1]): + if (result := acked.search(line)) is not None: + this = result.group(1) + now = get_time(line) + data[this]["time"].append(now) + data[this]["limited"] = bool(int(result.group(2))) + data[this]["last_bytes_in_flight"] = int(result.group(3)) + data[this]["bif"].append(data[this]["last_bytes_in_flight"]) + data[this]["cwnd"].append(int(result.group(4))) + state = result.group(5) + if data[this]["last_state"][0] != state: + data[this]["bg_state"].append(state) + data[this]["bg_time"].append(now) + data[this]["last_state"] = (state, now) + data[this]["new_acked"] = result.group(6) + if data[this]["limited"]: + data[this]["bif_limited"].append(data[this]["last_bytes_in_flight"]) + data[this]["bif_limited_time"].append(now) + elif (result := events.search(line)) is not None: + this = result.group(2) + now = get_time(line) + event = result.group(1) + pn = int(result.group(3)) + packet_size = int(result.group(4)) + if ( + event == "packet_sent" + or event == "packet_acked" + or event == "packet_lost" + ): + if event == "packet_sent": + data[this]["last_bytes_in_flight"] += packet_size + data[this]["p"][pn] = data[this]["last_bytes_in_flight"] + if data[this]["last_state"][0] == "RecoveryStart": + data[this]["bg_state"].append("CongestionAvoidance") + data[this]["bg_time"].append(now) + data[this]["last_state"] = ("CongestionAvoidance", now) + if data[this]["last_state"] == "PersistentCongestion": + data[this]["bg_state"].append("SlowStart") + data[this]["bg_time"].append(now) + data[this]["last_state"] = ("SlowStart", now) + # only remember events for packets where we sent the packet + if pn in data[this]["p"]: + data[this]["ps"][event]["time"].append(now) + data[this]["ps"][event]["bif"].append(data[this]["p"][pn]) + data[this]["ps"][event]["pn"].append(pn) + elif (result := lost.search(line)) is not None: + this = result.group(1) + now = get_time(line) + data[this]["time"].append(now) + data[this]["last_bytes_in_flight"] = int(result.group(2)) + data[this]["bif"].append(data[this]["last_bytes_in_flight"]) + data[this]["cwnd"].append(int(result.group(3))) + state = result.group(4) + if data[this]["last_state"][0] != state: + data[this]["bg_state"].append(state) + data[this]["bg_time"].append(now) + data[this]["last_state"] = (state, now) + + output = "" + output_num = 0 + for el in data: + if len(data[el]["time"]) > output_num: + output_num = len(data[el]["time"]) + output = el + fig, axs = plt.subplots(2, 1) + + data[output]["bg_time"].append(data[output]["time"][-1]) + for ax in axs: + color_background(ax, data[output]["bg_time"], data[output]["bg_state"]) + + # add plots + graph_pn(axs[0], data[output]) + graph_cwnd(axs[1], data[output]) + + # configure graph + axs[0].set_title(sys.argv[1].split("/")[-1]) + for ax in axs: + ax.legend() + plt.show() + + +COLORS = { + "packet_sent": "black", + "packet_lost": "red", + "packet_acked": "green", +} + + +# plot pn graph +def graph_pn(ax, output_data): + for event in ["packet_sent", "packet_acked", "packet_lost"]: + ax.scatter( + output_data["ps"][event]["time"], + output_data["ps"][event]["pn"], + label=event, + s=10, + color=COLORS[event], + ) + ax.set_xlabel("time in s") + ax.set_ylabel("packet_number") + + +# plot cwnd graph +def graph_cwnd(ax, output_data): + ax.plot(output_data["time"], output_data["cwnd"], label="cwnd") + ax.plot(output_data["time"], output_data["bif"], ".-", label="bytes in flight") + ax.plot( + output_data["bif_limited_time"], + output_data["bif_limited"], + "s", + label="app_limited", + ) + for event in ["packet_sent", "packet_lost"]: + ax.scatter( + output_data["ps"][event]["time"], + output_data["ps"][event]["bif"], + label=event, + s=10, + color=COLORS[event], + ) + ax.set_xlabel("time in s") + ax.set_ylabel("bytes") + + +def color_background(ax, time, states): + # change background depending on congestion controller state + state_colors = { + "SlowStart": "green", + "CongestionAvoidance": "blue", + "RecoveryStart": "gray", + "Recovery": "orange", + "PersistentCongestion": "purple", + } + legend = set() + for time_from, time_to, state in zip(time[:-1], time[1:], states): + color = state_colors[state] + if state in legend: + ax.axvspan(time_from, time_to, facecolor=color, alpha=0.3) + else: + legend.add(state) + ax.axvspan(time_from, time_to, facecolor=color, alpha=0.3, label=state) + + +if __name__ == "__main__": + main() From 90b718fefbdb569f01b987a887b9475fe303bc8a Mon Sep 17 00:00:00 2001 From: Martin Thomson Date: Thu, 8 Feb 2024 17:46:49 +1100 Subject: [PATCH 127/321] perf: Coalesce better (#1594) * benchmarks for coalesce/mark_range * benchmarks for coalesce/mark_range * Make the exposure conditional, fmt * Try to get something better im place * Refactor bench * Working much better now * That was tricky * Fix benches * Some debug assertions and better comments * Even better comments and such * Remove redundant variable * There is always just one contiguous acked range now * Better function comments. * Merge main * Test thoroughly, guided by coverage * Commentary * Tweak commentary. * Update Cargo.toml * Revert this bit * Rebase with upstream * Removing redundant use statements --------- Signed-off-by: Lars Eggert Co-authored-by: Randell Jesup Co-authored-by: Lars Eggert --- neqo-transport/benches/range_tracker.rs | 14 +- neqo-transport/src/connection/mod.rs | 2 + neqo-transport/src/send_stream.rs | 749 ++++++++++++++++++------ 3 files changed, 574 insertions(+), 191 deletions(-) diff --git a/neqo-transport/benches/range_tracker.rs b/neqo-transport/benches/range_tracker.rs index a328866f7e..6115542661 100644 --- a/neqo-transport/benches/range_tracker.rs +++ b/neqo-transport/benches/range_tracker.rs @@ -1,16 +1,16 @@ use criterion::{criterion_group, criterion_main, Criterion}; // black_box -use neqo_transport::send_stream::{RangeState, RangeTracker}; +use neqo_transport::send_stream::RangeTracker; const CHUNK: u64 = 1000; const END: u64 = 100_000; fn build_coalesce(len: u64) -> RangeTracker { let mut used = RangeTracker::default(); - used.mark_range(0, CHUNK as usize, RangeState::Acked); - used.mark_range(CHUNK, END as usize, RangeState::Sent); + used.mark_acked(0, CHUNK as usize); + used.mark_sent(CHUNK, END as usize); // leave a gap or it will coalesce here for i in 2..=len { // These do not get immediately coalesced when marking since they're not at the end or start - used.mark_range(i * CHUNK, CHUNK as usize, RangeState::Acked); + used.mark_acked(i * CHUNK, CHUNK as usize); } used } @@ -22,10 +22,10 @@ fn coalesce(c: &mut Criterion, count: u64) { b.iter_batched_ref( || build_coalesce(count), |used| { - used.mark_range(CHUNK, CHUNK as usize, RangeState::Acked); + used.mark_acked(CHUNK, CHUNK as usize); let tail = (count + 1) * CHUNK; - used.mark_range(tail, CHUNK as usize, RangeState::Sent); - used.mark_range(tail, CHUNK as usize, RangeState::Acked); + used.mark_sent(tail, CHUNK as usize); + used.mark_acked(tail, CHUNK as usize); }, criterion::BatchSize::SmallInput, ) diff --git a/neqo-transport/src/connection/mod.rs b/neqo-transport/src/connection/mod.rs index 1678e0b8bd..1d2cda64ff 100644 --- a/neqo-transport/src/connection/mod.rs +++ b/neqo-transport/src/connection/mod.rs @@ -59,6 +59,7 @@ use crate::{ version::{Version, WireVersion}, AppError, ConnectionError, Error, Res, StreamId, }; + mod dump; mod idle; pub mod params; @@ -66,6 +67,7 @@ mod saved; mod state; #[cfg(test)] pub mod test_internal; + use dump::dump_packet; use idle::IdleTimeout; pub use params::ConnectionParameters; diff --git a/neqo-transport/src/send_stream.rs b/neqo-transport/src/send_stream.rs index 14c3326ca3..f2acf2c113 100644 --- a/neqo-transport/src/send_stream.rs +++ b/neqo-transport/src/send_stream.rs @@ -9,7 +9,7 @@ use std::{ cell::RefCell, cmp::{max, min, Ordering}, - collections::{BTreeMap, VecDeque}, + collections::{btree_map::Entry, BTreeMap, VecDeque}, convert::TryFrom, hash::{Hash, Hasher}, mem, @@ -18,7 +18,7 @@ use std::{ }; use indexmap::IndexMap; -use neqo_common::{qdebug, qerror, qinfo, qtrace, Encoder, Role}; +use neqo_common::{qdebug, qerror, qtrace, Encoder, Role}; use smallvec::SmallVec; use crate::{ @@ -136,7 +136,7 @@ impl Default for RetransmissionPriority { } #[derive(Debug, PartialEq, Eq, Clone, Copy)] -pub enum RangeState { +enum RangeState { Sent, Acked, } @@ -145,38 +145,38 @@ pub enum RangeState { /// range implies needing-to-be-sent, either initially or as a retransmission. #[derive(Debug, Default, PartialEq)] pub struct RangeTracker { - /// offset, (len, RangeState). Use u64 for len because ranges can exceed 32bits. + /// The number of bytes that have been acknowledged starting from offset 0. + acked: u64, + /// A map that tracks the state of ranges. + /// Keys are the offset of the start of the range. + /// Values is a tuple of the range length and its state. used: BTreeMap, - /// this is a cache for first_unmarked_range(), which we check a log + /// This is a cache for the output of `first_unmarked_range`, which we check a lot. first_unmarked: Option<(u64, Option)>, } impl RangeTracker { fn highest_offset(&self) -> u64 { self.used - .range(..) - .next_back() - .map_or(0, |(k, (v, _))| *k + *v) + .last_key_value() + .map_or(self.acked, |(&k, &(v, _))| k + v) } fn acked_from_zero(&self) -> u64 { - self.used - .get(&0) - .filter(|(_, state)| *state == RangeState::Acked) - .map_or(0, |(v, _)| *v) + self.acked } /// Find the first unmarked range. If all are contiguous, this will return /// (highest_offset(), None). fn first_unmarked_range(&mut self) -> (u64, Option) { - let mut prev_end = 0; - if let Some(first_unmarked) = self.first_unmarked { return first_unmarked; } - for (cur_off, (cur_len, _)) in &self.used { - if prev_end == *cur_off { + let mut prev_end = self.acked; + + for (&cur_off, &(cur_len, _)) in &self.used { + if prev_end == cur_off { prev_end = cur_off + cur_len; } else { let res = (prev_end, Some(cur_off - prev_end)); @@ -188,160 +188,222 @@ impl RangeTracker { (prev_end, None) } - /// Check for the common case of adding to the end. If we can, do it and - /// return true. - fn extend_final_range(&mut self, new_off: u64, new_len: u64, new_state: RangeState) -> bool { - if let Some(mut last) = self.used.last_entry() { - let prev_off = *last.key(); - let (prev_len, prev_state) = last.get_mut(); - // allow for overlap between new chunk and the last entry - if new_off >= prev_off - && new_off <= prev_off + *prev_len - && new_off + new_len > prev_off + *prev_len - && new_state == *prev_state - { - // simple case, extend the last entry - *prev_len = new_off + new_len - prev_off; - return true; + /// When the range of acknowledged bytes from zero increases, we need to drop any + /// ranges within that span AND maybe extend it to include any adjacent acknowledged ranges. + fn coalesce_acked(&mut self) { + while let Some(e) = self.used.first_entry() { + match self.acked.cmp(e.key()) { + Ordering::Greater => { + let (off, (len, state)) = e.remove_entry(); + let overflow = (off + len).saturating_sub(self.acked); + if overflow > 0 { + if state == RangeState::Acked { + self.acked += overflow; + } else { + self.used.insert(self.acked, (overflow, state)); + } + break; + } + } + Ordering::Equal => { + if e.get().1 == RangeState::Acked { + let (len, _) = e.remove(); + self.acked += len; + } + break; + } + Ordering::Less => break, } } - false } - /// Turn one range into a list of subranges that align with existing - /// ranges. - /// Check impermissible overlaps in subregions: Sent cannot overwrite Acked. - // - // e.g. given N is new and ABC are existing: - // NNNNNNNNNNNNNNNN - // AAAAA BBBCCCCC ...then we want 5 chunks: - // 1122222333444555 - // - // but also if we have this: - // NNNNNNNNNNNNNNNN - // AAAAAAAAAA BBBB ...then break existing A and B ranges up: - // - // 1111111122222233 - // aaAAAAAAAA BBbb - // - // Doing all this work up front should make handling each chunk much - // easier. - fn chunk_range_on_edges( - &mut self, - new_off: u64, - new_len: u64, - new_state: RangeState, - ) -> Vec<(u64, u64, RangeState)> { - let mut tmp_off = new_off; - let mut tmp_len = new_len; - let mut v = Vec::new(); - - // we already handled the case of a simple extension of the last item - - // cut previous overlapping range if needed - let prev = self.used.range_mut(..tmp_off).next_back(); - if let Some((prev_off, (prev_len, prev_state))) = prev { - let prev_state = *prev_state; - let overlap = (*prev_off + *prev_len).saturating_sub(new_off); - *prev_len -= overlap; - if overlap > 0 { - self.used.insert(new_off, (overlap, prev_state)); - } + /// Mark a range as acknowledged. This is simpler than marking a range as sent + /// because an acknowledged range can never turn back into a sent range, so + /// this function can just override the entire range. + /// + /// The only tricky parts are making sure that we maintain `self.acked`, + /// which is the first acknowledged range. And making sure that we don't create + /// ranges of the same type that are adjacent; these need to be merged. + pub fn mark_acked(&mut self, new_off: u64, new_len: usize) { + let end = new_off + u64::try_from(new_len).unwrap(); + let new_off = max(self.acked, new_off); + let mut new_len = end.saturating_sub(new_off); + if new_len == 0 { + return; } - let mut last_existing_remaining = None; - for (off, (len, state)) in self.used.range(tmp_off..tmp_off + tmp_len) { - // Create chunk for "overhang" before an existing range - if tmp_off < *off { - let sub_len = off - tmp_off; - v.push((tmp_off, sub_len, new_state)); - tmp_off += sub_len; - tmp_len -= sub_len; - } + self.first_unmarked = None; + if new_off == self.acked { + self.acked += new_len; + self.coalesce_acked(); + return; + } + let mut new_end = new_off + new_len; - // Create chunk to match existing range - let sub_len = min(*len, tmp_len); - let remaining_len = len - sub_len; - if new_state == RangeState::Sent && *state == RangeState::Acked { - qinfo!( - "Attempted to downgrade overlapping range Acked range {}-{} with Sent {}-{}", - off, - len, - new_off, - new_len - ); - } else { - v.push((tmp_off, sub_len, new_state)); + // Get all existing ranges that start within this new range. + let mut covered = self + .used + .range(new_off..new_end) + .map(|(&k, _)| k) + .collect::>(); + + if let Entry::Occupied(next_entry) = self.used.entry(new_end) { + // Check if the very next entry is the same type as this. + if next_entry.get().1 == RangeState::Acked { + // If is is acked, drop it and extend this new range. + let (extra_len, _) = next_entry.remove(); + new_len += extra_len; + new_end += extra_len; } - tmp_off += sub_len; - tmp_len -= sub_len; - - if remaining_len > 0 { - last_existing_remaining = Some((*off, sub_len, remaining_len, *state)); + } else if let Some(last) = covered.pop() { + // Otherwise, the last of the existing ranges might overhang this one by some. + let (old_off, (old_len, old_state)) = self.used.remove_entry(&last).unwrap(); // can't fail + let remainder = (old_off + old_len).saturating_sub(new_end); + if remainder > 0 { + if old_state == RangeState::Acked { + // Just extend the current range. + new_len += remainder; + new_end += remainder; + } else { + self.used.insert(new_end, (remainder, RangeState::Sent)); + } } } - - // Maybe break last existing range in two so that a final chunk will - // have the same length as an existing range entry - if let Some((off, sub_len, remaining_len, state)) = last_existing_remaining { - *self.used.get_mut(&off).expect("must be there") = (sub_len, state); - self.used.insert(off + sub_len, (remaining_len, state)); + // All covered ranges can just be trashed. + for k in covered { + self.used.remove(&k); } - // Create final chunk if anything remains of the new range - if tmp_len > 0 { - v.push((tmp_off, tmp_len, new_state)); + // Now either merge with a preceding acked range + // or cut a preceding sent range as needed. + let prev = self.used.range_mut(..new_off).next_back(); + if let Some((prev_off, (prev_len, prev_state))) = prev { + let prev_end = *prev_off + *prev_len; + if prev_end >= new_off { + if *prev_state == RangeState::Sent { + *prev_len = new_off - *prev_off; + if prev_end > new_end { + // There is some extra sent range after the new acked range. + self.used + .insert(new_end, (prev_end - new_end, RangeState::Sent)); + } + } else { + *prev_len = max(prev_end, new_end) - *prev_off; + return; + } + } + } + self.used.insert(new_off, (new_len, RangeState::Acked)); + } + + /// Turn a single sent range into a list of subranges that align with existing + /// acknowledged ranges. + /// + /// This is more complicated than adding acked ranges because any acked ranges + /// need to be kept in place, with sent ranges filling the gaps. + /// + /// This means: + /// ```ignore + /// AAA S AAAS AAAAA + /// + SSSSSSSSSSSSS + /// = AAASSSAAASSAAAAA + /// ``` + /// + /// But we also have to ensure that: + /// ```ignore + /// SSSS + /// + SS + /// = SSSSSS + /// ``` + /// and + /// ```ignore + /// SSSSS + /// + SS + /// = SSSSSS + /// ``` + pub fn mark_sent(&mut self, mut new_off: u64, new_len: usize) { + let new_end = new_off + u64::try_from(new_len).unwrap(); + new_off = max(self.acked, new_off); + let mut new_len = new_end.saturating_sub(new_off); + if new_len == 0 { + return; } - v - } + self.first_unmarked = None; - /// Merge contiguous Acked ranges into the first entry (0). This range may - /// be dropped from the send buffer. - fn coalesce_acked_from_zero(&mut self) { - let acked_range_from_zero = self + // Get all existing ranges that start within this new range. + let covered = self .used - .get_mut(&0) - .filter(|(_, state)| *state == RangeState::Acked) - .map(|(len, _)| *len); - - if let Some(len_from_zero) = acked_range_from_zero { - let mut new_len_from_zero = len_from_zero; - - // See if there's another Acked range entry contiguous to this one - while let Some((next_len, _)) = self - .used - .get(&new_len_from_zero) - .filter(|(_, state)| *state == RangeState::Acked) - { - let to_remove = new_len_from_zero; - new_len_from_zero += *next_len; - self.used.remove(&to_remove); - } - - if len_from_zero != new_len_from_zero { - self.used.get_mut(&0).expect("must be there").0 = new_len_from_zero; + .range(new_off..(new_off + new_len)) + .map(|(&k, _)| k) + .collect::>(); + + if let Entry::Occupied(next_entry) = self.used.entry(new_end) { + if next_entry.get().1 == RangeState::Sent { + // Check if the very next entry is the same type as this, so it can be merged. + let (extra_len, _) = next_entry.remove(); + new_len += extra_len; } } - } - pub fn mark_range(&mut self, off: u64, len: usize, state: RangeState) { - if len == 0 { - qinfo!("mark 0-length range at {}", off); - return; - } + // Merge with any preceding sent range that might overlap, + // or cut the head of this if the preceding range is acked. + let prev = self.used.range(..new_off).next_back(); + if let Some((&prev_off, &(prev_len, prev_state))) = prev { + if prev_off + prev_len >= new_off { + let overlap = prev_off + prev_len - new_off; + new_len = new_len.saturating_sub(overlap); + if new_len == 0 { + // The previous range completely covers this one (no more to do). + return; + } - self.first_unmarked = None; - if self.extend_final_range(off, len as u64, state) { - return; + if prev_state == RangeState::Acked { + // The previous range is acked, so it cuts this one. + new_off += overlap; + } else { + // Extend the current range backwards. + new_off = prev_off; + new_len += prev_len; + // The previous range will be updated below. + // It might need to be cut because of a covered acked range. + } + } } - let subranges = self.chunk_range_on_edges(off, len as u64, state); - for (sub_off, sub_len, sub_state) in subranges { - self.used.insert(sub_off, (sub_len, sub_state)); + // Now interleave new sent chunks with any existing acked chunks. + for old_off in covered { + let Entry::Occupied(e) = self.used.entry(old_off) else { + unreachable!(); + }; + let &(old_len, old_state) = e.get(); + if old_state == RangeState::Acked { + // Now we have to insert a chunk ahead of this acked chunk. + let chunk_len = old_off - new_off; + if chunk_len > 0 { + self.used.insert(new_off, (chunk_len, RangeState::Sent)); + } + let included = chunk_len + old_len; + new_len = new_len.saturating_sub(included); + if new_len == 0 { + return; + } + new_off += included; + } else { + let overhang = (old_off + old_len).saturating_sub(new_off + new_len); + new_len += overhang; + if *e.key() != new_off { + // Retain a sent entry at `new_off`. + // This avoids the work of removing and re-creating an entry. + // The value will be overwritten when the next insert occurs, + // either when this loop hits an acked range (above) + // or for any remainder (below). + e.remove(); + } + } } - self.coalesce_acked_from_zero(); + self.used.insert(new_off, (new_len, RangeState::Sent)); } fn unmark_range(&mut self, off: u64, len: usize) { @@ -420,7 +482,6 @@ impl RangeTracker { /// Buffer to contain queued bytes and track their state. #[derive(Debug, Default, PartialEq)] pub struct TxBuffer { - retired: u64, // contig acked bytes, no longer in buffer send_buf: VecDeque, // buffer of not-acked bytes ranges: RangeTracker, // ranges in buffer that have been sent or acked } @@ -443,13 +504,13 @@ impl TxBuffer { pub fn next_bytes(&mut self) -> Option<(u64, &[u8])> { let (start, maybe_len) = self.ranges.first_unmarked_range(); - if start == self.retired + u64::try_from(self.buffered()).unwrap() { + if start == self.retired() + u64::try_from(self.buffered()).unwrap() { return None; } // Convert from ranges-relative-to-zero to // ranges-relative-to-buffer-start - let buff_off = usize::try_from(start - self.retired).unwrap(); + let buff_off = usize::try_from(start - self.retired()).unwrap(); // Deque returns two slices. Create a subslice from whichever // one contains the first unmarked data. @@ -473,23 +534,21 @@ impl TxBuffer { } pub fn mark_as_sent(&mut self, offset: u64, len: usize) { - self.ranges.mark_range(offset, len, RangeState::Sent); + self.ranges.mark_sent(offset, len) } pub fn mark_as_acked(&mut self, offset: u64, len: usize) { - self.ranges.mark_range(offset, len, RangeState::Acked); + let prev_retired = self.retired(); + self.ranges.mark_acked(offset, len); - // We can drop contig acked range from the buffer - let new_retirable = self.ranges.acked_from_zero() - self.retired; + // Any newly-retired bytes can be dropped from the buffer. + let new_retirable = self.retired() - prev_retired; debug_assert!(new_retirable <= self.buffered() as u64); - let keep_len = - self.buffered() - usize::try_from(new_retirable).expect("should fit in usize"); + let keep = self.buffered() - usize::try_from(new_retirable).unwrap(); // Truncate front - self.send_buf.rotate_left(self.buffered() - keep_len); - self.send_buf.truncate(keep_len); - - self.retired += new_retirable; + self.send_buf.rotate_left(self.buffered() - keep); + self.send_buf.truncate(keep); } pub fn mark_as_lost(&mut self, offset: u64, len: usize) { @@ -502,7 +561,7 @@ impl TxBuffer { } pub fn retired(&self) -> u64 { - self.retired + self.ranges.acked_from_zero() } fn buffered(&self) -> usize { @@ -514,7 +573,7 @@ impl TxBuffer { } fn used(&self) -> u64 { - self.retired + u64::try_from(self.buffered()).unwrap() + self.retired() + u64::try_from(self.buffered()).unwrap() } } @@ -1691,45 +1750,368 @@ mod tests { } #[test] - fn test_mark_range() { + fn mark_acked_from_zero() { let mut rt = RangeTracker::default(); // ranges can go from nothing->Sent if queued for retrans and then // acks arrive - rt.mark_range(5, 5, RangeState::Acked); + rt.mark_acked(5, 5); assert_eq!(rt.highest_offset(), 10); assert_eq!(rt.acked_from_zero(), 0); - rt.mark_range(10, 4, RangeState::Acked); + rt.mark_acked(10, 4); assert_eq!(rt.highest_offset(), 14); assert_eq!(rt.acked_from_zero(), 0); - rt.mark_range(0, 5, RangeState::Sent); + rt.mark_sent(0, 5); assert_eq!(rt.highest_offset(), 14); assert_eq!(rt.acked_from_zero(), 0); - rt.mark_range(0, 5, RangeState::Acked); + rt.mark_acked(0, 5); assert_eq!(rt.highest_offset(), 14); assert_eq!(rt.acked_from_zero(), 14); - rt.mark_range(12, 20, RangeState::Acked); + rt.mark_acked(12, 20); assert_eq!(rt.highest_offset(), 32); assert_eq!(rt.acked_from_zero(), 32); // ack the lot - rt.mark_range(0, 400, RangeState::Acked); + rt.mark_acked(0, 400); assert_eq!(rt.highest_offset(), 400); assert_eq!(rt.acked_from_zero(), 400); // acked trumps sent - rt.mark_range(0, 200, RangeState::Sent); + rt.mark_sent(0, 200); assert_eq!(rt.highest_offset(), 400); assert_eq!(rt.acked_from_zero(), 400); } + /// Check that marked_acked correctly handles all paths. + /// ```ignore + /// SSS SSSAAASSS + /// + AAAAAAAAA + /// = SSSAAAAAAAAASS + /// ``` + #[test] + fn mark_acked_1() { + let mut rt = RangeTracker::default(); + rt.mark_sent(0, 3); + rt.mark_sent(6, 3); + rt.mark_acked(9, 3); + rt.mark_sent(12, 3); + + rt.mark_acked(3, 10); + + let mut canon = RangeTracker::default(); + canon.used.insert(0, (3, RangeState::Sent)); + canon.used.insert(3, (10, RangeState::Acked)); + canon.used.insert(13, (2, RangeState::Sent)); + assert_eq!(rt, canon); + } + + /// Check that marked_acked correctly handles all paths. + /// ```ignore + /// SSS SSS AAA + /// + AAAAAAAAA + /// = SSAAAAAAAAAAAA + /// ``` + #[test] + fn mark_acked_2() { + let mut rt = RangeTracker::default(); + rt.mark_sent(0, 3); + rt.mark_sent(6, 3); + rt.mark_acked(12, 3); + + rt.mark_acked(2, 10); + + let mut canon = RangeTracker::default(); + canon.used.insert(0, (2, RangeState::Sent)); + canon.used.insert(2, (13, RangeState::Acked)); + assert_eq!(rt, canon); + } + + /// Check that marked_acked correctly handles all paths. + /// ```ignore + /// AASSS AAAA + /// + AAAAAAAAA + /// = AAAAAAAAAAAA + /// ``` + #[test] + fn mark_acked_3() { + let mut rt = RangeTracker::default(); + rt.mark_acked(1, 2); + rt.mark_sent(3, 3); + rt.mark_acked(8, 4); + + rt.mark_acked(0, 9); + + let canon = RangeTracker { + acked: 12, + ..RangeTracker::default() + }; + assert_eq!(rt, canon); + } + + /// Check that marked_acked correctly handles all paths. + /// ```ignore + /// SSS + /// + AAAA + /// = AAAASS + /// ``` + #[test] + fn mark_acked_4() { + let mut rt = RangeTracker::default(); + rt.mark_sent(3, 3); + + rt.mark_acked(0, 4); + + let mut canon = RangeTracker { + acked: 4, + ..Default::default() + }; + canon.used.insert(4, (2, RangeState::Sent)); + assert_eq!(rt, canon); + } + + /// Check that marked_acked correctly handles all paths. + /// ```ignore + /// AAAAAASSS + /// + AAA + /// = AAAAAASSS + /// ``` + #[test] + fn mark_acked_5() { + let mut rt = RangeTracker::default(); + rt.mark_acked(0, 6); + rt.mark_sent(6, 3); + + rt.mark_acked(3, 3); + + let mut canon = RangeTracker { + acked: 6, + ..RangeTracker::default() + }; + canon.used.insert(6, (3, RangeState::Sent)); + assert_eq!(rt, canon); + } + + /// Check that marked_acked correctly handles all paths. + /// ```ignore + /// AAA AAA AAA + /// + AAAAAAA + /// = AAAAAAAAAAAAA + /// ``` + #[test] + fn mark_acked_6() { + let mut rt = RangeTracker::default(); + rt.mark_acked(3, 3); + rt.mark_acked(8, 3); + rt.mark_acked(13, 3); + + rt.mark_acked(6, 7); + + let mut canon = RangeTracker::default(); + canon.used.insert(3, (13, RangeState::Acked)); + assert_eq!(rt, canon); + } + + /// Check that marked_acked correctly handles all paths. + /// ```ignore + /// AAA AAA + /// + AAA + /// = AAAAAAAA + /// ``` + #[test] + fn mark_acked_7() { + let mut rt = RangeTracker::default(); + rt.mark_acked(3, 3); + rt.mark_acked(8, 3); + + rt.mark_acked(6, 3); + + let mut canon = RangeTracker::default(); + canon.used.insert(3, (8, RangeState::Acked)); + assert_eq!(rt, canon); + } + + /// Check that marked_acked correctly handles all paths. + /// ```ignore + /// SSSSSSSS + /// + AAAA + /// = SSAAAASS + /// ``` + #[test] + fn mark_acked_8() { + let mut rt = RangeTracker::default(); + rt.mark_sent(0, 8); + + rt.mark_acked(2, 4); + + let mut canon = RangeTracker::default(); + canon.used.insert(0, (2, RangeState::Sent)); + canon.used.insert(2, (4, RangeState::Acked)); + canon.used.insert(6, (2, RangeState::Sent)); + assert_eq!(rt, canon); + } + + /// Check that marked_acked correctly handles all paths. + /// ```ignore + /// SSS + /// + AAA + /// = AAA SSS + /// ``` + #[test] + fn mark_acked_9() { + let mut rt = RangeTracker::default(); + rt.mark_sent(5, 3); + + rt.mark_acked(0, 3); + + let mut canon = RangeTracker { + acked: 3, + ..Default::default() + }; + canon.used.insert(5, (3, RangeState::Sent)); + assert_eq!(rt, canon); + } + + /// Check that marked_sent correctly handles all paths. + /// ```ignore + /// AAA AAA SSS + /// + SSSSSSSSSSSS + /// = AAASSSAAASSSSSS + /// ``` + #[test] + fn mark_sent_1() { + let mut rt = RangeTracker::default(); + rt.mark_acked(0, 3); + rt.mark_acked(6, 3); + rt.mark_sent(12, 3); + + rt.mark_sent(0, 12); + + let mut canon = RangeTracker { + acked: 3, + ..RangeTracker::default() + }; + canon.used.insert(3, (3, RangeState::Sent)); + canon.used.insert(6, (3, RangeState::Acked)); + canon.used.insert(9, (6, RangeState::Sent)); + assert_eq!(rt, canon); + } + + /// Check that marked_sent correctly handles all paths. + /// ```ignore + /// AAASS AAA S SSSS + /// + SSSSSSSSSSSSS + /// = AAASSSAAASSSSSSS + /// ``` + #[test] + fn mark_sent_2() { + let mut rt = RangeTracker::default(); + rt.mark_acked(0, 3); + rt.mark_sent(3, 2); + rt.mark_acked(6, 3); + rt.mark_sent(10, 1); + rt.mark_sent(12, 4); + + rt.mark_sent(0, 13); + + let mut canon = RangeTracker { + acked: 3, + ..RangeTracker::default() + }; + canon.used.insert(3, (3, RangeState::Sent)); + canon.used.insert(6, (3, RangeState::Acked)); + canon.used.insert(9, (7, RangeState::Sent)); + assert_eq!(rt, canon); + } + + /// Check that marked_sent correctly handles all paths. + /// ```ignore + /// AAA AAA + /// + SSSS + /// = AAASSAAA + /// ``` + #[test] + fn mark_sent_3() { + let mut rt = RangeTracker::default(); + rt.mark_acked(0, 3); + rt.mark_acked(5, 3); + + rt.mark_sent(2, 4); + + let mut canon = RangeTracker { + acked: 3, + ..RangeTracker::default() + }; + canon.used.insert(3, (2, RangeState::Sent)); + canon.used.insert(5, (3, RangeState::Acked)); + assert_eq!(rt, canon); + } + + /// Check that marked_sent correctly handles all paths. + /// ```ignore + /// SSS AAA SS + /// + SSSSSSSS + /// = SSSSSAAASSSS + /// ``` + #[test] + fn mark_sent_4() { + let mut rt = RangeTracker::default(); + rt.mark_sent(0, 3); + rt.mark_acked(5, 3); + rt.mark_sent(10, 2); + + rt.mark_sent(2, 8); + + let mut canon = RangeTracker::default(); + canon.used.insert(0, (5, RangeState::Sent)); + canon.used.insert(5, (3, RangeState::Acked)); + canon.used.insert(8, (4, RangeState::Sent)); + assert_eq!(rt, canon); + } + + /// Check that marked_sent correctly handles all paths. + /// ```ignore + /// AAA + /// + SSSSSS + /// = AAASSS + /// ``` + #[test] + fn mark_sent_5() { + let mut rt = RangeTracker::default(); + rt.mark_acked(3, 3); + + rt.mark_sent(3, 6); + + let mut canon = RangeTracker::default(); + canon.used.insert(3, (3, RangeState::Acked)); + canon.used.insert(6, (3, RangeState::Sent)); + assert_eq!(rt, canon); + } + + /// Check that marked_sent correctly handles all paths. + /// ```ignore + /// SSSSS + /// + SSS + /// = SSSSS + /// ``` + #[test] + fn mark_sent_6() { + let mut rt = RangeTracker::default(); + rt.mark_sent(0, 5); + + rt.mark_sent(1, 3); + + let mut canon = RangeTracker::default(); + canon.used.insert(0, (5, RangeState::Sent)); + assert_eq!(rt, canon); + } + #[test] fn unmark_sent_start() { let mut rt = RangeTracker::default(); - rt.mark_range(0, 5, RangeState::Sent); + rt.mark_sent(0, 5); assert_eq!(rt.highest_offset(), 5); assert_eq!(rt.acked_from_zero(), 0); @@ -1743,13 +2125,13 @@ mod tests { fn unmark_sent_middle() { let mut rt = RangeTracker::default(); - rt.mark_range(0, 5, RangeState::Acked); + rt.mark_acked(0, 5); assert_eq!(rt.highest_offset(), 5); assert_eq!(rt.acked_from_zero(), 5); - rt.mark_range(5, 5, RangeState::Sent); + rt.mark_sent(5, 5); assert_eq!(rt.highest_offset(), 10); assert_eq!(rt.acked_from_zero(), 5); - rt.mark_range(10, 5, RangeState::Acked); + rt.mark_acked(10, 5); assert_eq!(rt.highest_offset(), 15); assert_eq!(rt.acked_from_zero(), 5); assert_eq!(rt.first_unmarked_range(), (15, None)); @@ -1764,10 +2146,10 @@ mod tests { fn unmark_sent_end() { let mut rt = RangeTracker::default(); - rt.mark_range(0, 5, RangeState::Acked); + rt.mark_acked(0, 5); assert_eq!(rt.highest_offset(), 5); assert_eq!(rt.acked_from_zero(), 5); - rt.mark_range(5, 5, RangeState::Sent); + rt.mark_sent(5, 5); assert_eq!(rt.highest_offset(), 10); assert_eq!(rt.acked_from_zero(), 5); assert_eq!(rt.first_unmarked_range(), (10, None)); @@ -1793,11 +2175,11 @@ mod tests { } #[test] - fn test_unmark_range() { + fn unmark_range() { let mut rt = RangeTracker::default(); - rt.mark_range(5, 5, RangeState::Acked); - rt.mark_range(10, 5, RangeState::Sent); + rt.mark_acked(5, 5); + rt.mark_sent(10, 5); // Should unmark sent but not acked range rt.unmark_range(7, 6); @@ -1813,11 +2195,11 @@ mod tests { (&13, &(2, RangeState::Sent)) ); assert!(rt.used.iter().nth(2).is_none()); - rt.mark_range(0, 5, RangeState::Sent); + rt.mark_sent(0, 5); let res = rt.first_unmarked_range(); assert_eq!(res, (10, Some(3))); - rt.mark_range(10, 3, RangeState::Sent); + rt.mark_sent(10, 3); let res = rt.first_unmarked_range(); assert_eq!(res, (15, None)); @@ -1874,7 +2256,7 @@ mod tests { Some((start, x)) if x.len() == 5 && start == five_bytes_from_end && x.iter().all(|ch| *ch == 1))); - assert_eq!(txb.retired, five_bytes_from_end); + assert_eq!(txb.retired(), five_bytes_from_end); assert_eq!(txb.buffered(), 35); // Marking that bit as sent should let the last contig bit be returned @@ -1951,7 +2333,7 @@ mod tests { } #[test] - fn test_stream_tx() { + fn stream_tx() { let conn_fc = connection_fc(4096); let conn_events = ConnectionEvents::default(); @@ -2436,8 +2818,7 @@ mod tests { ); let mut send_buf = TxBuffer::new(); - send_buf.retired = u64::try_from(offset).unwrap(); - send_buf.ranges.mark_range(0, offset, RangeState::Acked); + send_buf.ranges.mark_acked(0, offset); let mut fc = SenderFlowControl::new(StreamId::from(stream), MAX_VARINT); fc.consume(offset); let conn_fc = Rc::new(RefCell::new(SenderFlowControl::new((), MAX_VARINT))); From d1719b7e75c4a1929b1b7d00afbc4179abf6623e Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Thu, 8 Feb 2024 09:17:36 +0200 Subject: [PATCH 128/321] ci: Use mozilla-actions/sccache-action (#1632) * ci: Use mozilla-actions/sccache-action * Move sscache after Rust toolchain installation, just in case --- .github/workflows/check.yml | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/.github/workflows/check.yml b/.github/workflows/check.yml index b3982a556a..58b8d3d870 100644 --- a/.github/workflows/check.yml +++ b/.github/workflows/check.yml @@ -37,6 +37,14 @@ jobs: toolchain: ${{ matrix.rust-toolchain }} components: rustfmt, clippy, llvm-tools-preview + - name: Use sccache + uses: mozilla-actions/sccache-action@v0.0.4 + + - name: Enable sscache + run: | + echo "SCCACHE_GHA_ENABLED=true" >> "$GITHUB_ENV" + echo "RUSTC_WRAPPER=sccache" >> "$GITHUB_ENV" + - name: Install dependencies (Linux) if: runner.os == 'Linux' env: From 5ad4cf2d4874091c4fb97432db30f7512dfa7c5d Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Thu, 8 Feb 2024 11:00:10 +0200 Subject: [PATCH 129/321] chore: Add a `[workspace.package]` section and refer to it from members (#1633) * chore: Add a `[workspace.package]` section and refer to it from members This means we only need to change stuff in one place from now on. Also eliminates the need for the `update_version` script, so remove that. * `clippy.toml` is redundant with `rust-version` in the Cargo.toml https://github.com/rust-lang/rust-clippy?tab=readme-ov-file#specifying-the-minimum-supported-rust-version * Unify authors. Also revamp /neqo-qpack/Cargo.toml, which was forgotten. * Sort build * Argh --- Cargo.toml | 9 +++++++++ clippy.toml | 1 - neqo-client/Cargo.toml | 18 +++++++++--------- neqo-common/Cargo.toml | 14 ++++++++------ neqo-crypto/Cargo.toml | 18 ++++++++++-------- neqo-http3/Cargo.toml | 14 ++++++++------ neqo-interop/Cargo.toml | 14 ++++++++------ neqo-qpack/Cargo.toml | 14 ++++++++------ neqo-server/Cargo.toml | 16 +++++++++------- neqo-transport/Cargo.toml | 14 ++++++++------ test-fixture/Cargo.toml | 16 +++++++++------- update_version | 16 ---------------- 12 files changed, 86 insertions(+), 78 deletions(-) delete mode 100644 clippy.toml delete mode 100755 update_version diff --git a/Cargo.toml b/Cargo.toml index b3449d4a5e..39e0f76441 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -10,3 +10,12 @@ members = [ "neqo-interop", "test-fixture", ] + +[workspace.package] +homepage = "https://github.com/mozilla/neqo/" +repository = "https://github.com/mozilla/neqo/" +authors = ["The Neqo Authors "] +version = "0.7.0" +edition = "2018" +rust-version = "1.70.0" +license = "MIT OR Apache-2.0" diff --git a/clippy.toml b/clippy.toml deleted file mode 100644 index 1645c19f32..0000000000 --- a/clippy.toml +++ /dev/null @@ -1 +0,0 @@ -msrv = "1.70.0" diff --git a/neqo-client/Cargo.toml b/neqo-client/Cargo.toml index 4ca69647e1..7b90b01a77 100644 --- a/neqo-client/Cargo.toml +++ b/neqo-client/Cargo.toml @@ -1,17 +1,17 @@ [package] name = "neqo-client" -version = "0.7.0" -authors = ["Martin Thomson ", - "Dragana Damjanovic ", - "Andy Grover "] -edition = "2018" -rust-version = "1.70.0" -license = "MIT OR Apache-2.0" +authors.workspace = true +homepage.workspace = true +repository.workspace = true +version.workspace = true +edition.workspace = true +rust-version.workspace = true +license.workspace = true [dependencies] futures = "0.3" -log = {version = "0.4.17", default-features = false} -neqo-common = { path="./../neqo-common" } +log = { version = "0.4.17", default-features = false } +neqo-common = { path = "./../neqo-common" } neqo-crypto = { path = "./../neqo-crypto" } neqo-http3 = { path = "./../neqo-http3" } neqo-qpack = { path = "./../neqo-qpack" } diff --git a/neqo-common/Cargo.toml b/neqo-common/Cargo.toml index 183a4d9450..91b6458c0c 100644 --- a/neqo-common/Cargo.toml +++ b/neqo-common/Cargo.toml @@ -1,11 +1,13 @@ [package] name = "neqo-common" -version = "0.7.0" -authors = ["Bobby Holley "] -edition = "2018" -rust-version = "1.70.0" -license = "MIT OR Apache-2.0" build = "build.rs" +authors.workspace = true +homepage.workspace = true +repository.workspace = true +version.workspace = true +edition.workspace = true +rust-version.workspace = true +license.workspace = true [dependencies] enum-map = "2.7" @@ -13,7 +15,7 @@ env_logger = { version = "0.10", default-features = false } lazy_static = "1.4" log = { version = "0.4", default-features = false } qlog = "0.12.0" -time = {version = "0.3.23", features = ["formatting"]} +time = { version = "0.3.23", features = ["formatting"] } [dev-dependencies] test-fixture = { path = "../test-fixture" } diff --git a/neqo-crypto/Cargo.toml b/neqo-crypto/Cargo.toml index 492e501e58..8a825b985c 100644 --- a/neqo-crypto/Cargo.toml +++ b/neqo-crypto/Cargo.toml @@ -1,19 +1,21 @@ [package] name = "neqo-crypto" -version = "0.7.0" -authors = ["Martin Thomson "] -edition = "2018" -rust-version = "1.70.0" build = "build.rs" -license = "MIT OR Apache-2.0" +authors.workspace = true +homepage.workspace = true +repository.workspace = true +version.workspace = true +edition.workspace = true +rust-version.workspace = true +license.workspace = true [dependencies] -log = {version = "~0.4.17", default-features = false} +log = { version = "~0.4.17", default-features = false } neqo-common = { path = "../neqo-common" } [build-dependencies] -bindgen = {version = "0.69.1", default-features = false, features= ["runtime"]} -mozbuild = {version = "0.1", optional = true} +bindgen = { version = "0.69.1", default-features = false, features = ["runtime"] } +mozbuild = { version = "0.1", optional = true } serde = "1.0.195" serde_derive = "1.0.195" toml = "0.5.11" diff --git a/neqo-http3/Cargo.toml b/neqo-http3/Cargo.toml index 1605a2b609..a078548f0f 100644 --- a/neqo-http3/Cargo.toml +++ b/neqo-http3/Cargo.toml @@ -1,15 +1,17 @@ [package] name = "neqo-http3" -version = "0.7.0" -authors = ["Dragana Damjanovic "] -edition = "2018" -rust-version = "1.70.0" -license = "MIT OR Apache-2.0" +authors.workspace = true +homepage.workspace = true +repository.workspace = true +version.workspace = true +edition.workspace = true +rust-version.workspace = true +license.workspace = true [dependencies] enumset = "1.1.2" lazy_static = "1.4" -log = {version = "0.4.17", default-features = false} +log = { version = "0.4.17", default-features = false } neqo-common = { path = "./../neqo-common" } neqo-crypto = { path = "./../neqo-crypto" } neqo-qpack = { path = "./../neqo-qpack" } diff --git a/neqo-interop/Cargo.toml b/neqo-interop/Cargo.toml index ce5bd9af8b..34690cd7e1 100644 --- a/neqo-interop/Cargo.toml +++ b/neqo-interop/Cargo.toml @@ -1,14 +1,16 @@ [package] name = "neqo-interop" -version = "0.7.0" -authors = ["EKR "] -edition = "2018" -rust-version = "1.70.0" -license = "MIT OR Apache-2.0" +authors.workspace = true +homepage.workspace = true +repository.workspace = true +version.workspace = true +edition.workspace = true +rust-version.workspace = true +license.workspace = true [dependencies] lazy_static = "1.4" -neqo-common = { path="./../neqo-common" } +neqo-common = { path = "./../neqo-common" } neqo-crypto = { path = "./../neqo-crypto" } neqo-http3 = { path = "./../neqo-http3" } neqo-qpack = { path = "./../neqo-qpack" } diff --git a/neqo-qpack/Cargo.toml b/neqo-qpack/Cargo.toml index 229345e977..32affad81b 100644 --- a/neqo-qpack/Cargo.toml +++ b/neqo-qpack/Cargo.toml @@ -1,14 +1,16 @@ [package] name = "neqo-qpack" -version = "0.7.0" -authors = ["Dragana Damjanovic "] -edition = "2018" -rust-version = "1.70.0" -license = "MIT OR Apache-2.0" +authors.workspace = true +homepage.workspace = true +repository.workspace = true +version.workspace = true +edition.workspace = true +rust-version.workspace = true +license.workspace = true [dependencies] lazy_static = "~1.4.0" -log = {version = "~0.4.17", default-features = false} +log = { version = "~0.4.17", default-features = false } neqo-common = { path = "./../neqo-common" } neqo-crypto = { path = "./../neqo-crypto" } neqo-transport = { path = "./../neqo-transport" } diff --git a/neqo-server/Cargo.toml b/neqo-server/Cargo.toml index d4ee99c9b0..1f9ad24795 100644 --- a/neqo-server/Cargo.toml +++ b/neqo-server/Cargo.toml @@ -1,15 +1,17 @@ [package] name = "neqo-server" -version = "0.7.0" -authors = ["Dragana Damjanovic "] -edition = "2018" -rust-version = "1.70.0" -license = "MIT OR Apache-2.0" +authors.workspace = true +homepage.workspace = true +repository.workspace = true +version.workspace = true +edition.workspace = true +rust-version.workspace = true +license.workspace = true [dependencies] futures = "0.3" -log = {version = "0.4.17", default-features = false} -neqo-common = { path="./../neqo-common" } +log = { version = "0.4.17", default-features = false } +neqo-common = { path = "./../neqo-common" } neqo-crypto = { path = "./../neqo-crypto" } neqo-http3 = { path = "./../neqo-http3" } neqo-qpack = { path = "./../neqo-qpack" } diff --git a/neqo-transport/Cargo.toml b/neqo-transport/Cargo.toml index 49ece8661c..e0f26feca2 100644 --- a/neqo-transport/Cargo.toml +++ b/neqo-transport/Cargo.toml @@ -1,10 +1,12 @@ [package] name = "neqo-transport" -version = "0.7.0" -authors = ["EKR ", "Andy Grover "] -edition = "2018" -rust-version = "1.70.0" -license = "MIT OR Apache-2.0" +authors.workspace = true +homepage.workspace = true +repository.workspace = true +version.workspace = true +edition.workspace = true +rust-version.workspace = true +license.workspace = true [dependencies] indexmap = "1.9.3" @@ -38,4 +40,4 @@ required-features = ["bench"] [[bench]] name = "range_tracker" harness = false -required-features = ["bench"] \ No newline at end of file +required-features = ["bench"] diff --git a/test-fixture/Cargo.toml b/test-fixture/Cargo.toml index ed480c9c26..8fe2ac0602 100644 --- a/test-fixture/Cargo.toml +++ b/test-fixture/Cargo.toml @@ -1,14 +1,16 @@ [package] name = "test-fixture" -version = "0.7.0" -authors = ["Martin Thomson "] -edition = "2018" -rust-version = "1.70.0" -license = "MIT OR Apache-2.0" +authors.workspace = true +homepage.workspace = true +repository.workspace = true +version.workspace = true +edition.workspace = true +rust-version.workspace = true +license.workspace = true [dependencies] lazy_static = "1.4" -log = {version = "0.4.17", default-features = false} +log = { version = "0.4.17", default-features = false } neqo-common = { path = "../neqo-common" } neqo-crypto = { path = "../neqo-crypto" } neqo-http3 = { path = "../neqo-http3" } @@ -17,4 +19,4 @@ neqo-transport = { path = "../neqo-transport" } qlog = "0.12.0" [features] -deny-warnings = [] \ No newline at end of file +deny-warnings = [] diff --git a/update_version b/update_version deleted file mode 100755 index 2be137665e..0000000000 --- a/update_version +++ /dev/null @@ -1,16 +0,0 @@ -#! /usr/bin/env bash - -pushd "$(dirname "$0")" > /dev/null -set -e - -[[ -n "$1" ]] || { echo "Usage: $0 "; exit 1; } - -while IFS= read -r -d '' entry -do - echo "$entry" - line=$(grep -n -m1 "version" "$entry" | cut -d: -f2) - current=$(echo "${line}" | awk -F'"' '{print $2}') - sed -i.bak "s/$current/$1/g" "$entry" && rm "$entry.bak" -done < <(find . -mindepth 2 -name Cargo.toml -print0) - -popd > /dev/null From 9e61f5e3769b8b8302be06e7e83a8922a635e5eb Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Thu, 8 Feb 2024 11:50:56 +0200 Subject: [PATCH 130/321] chore: Replace `structopt` with `clap` (#1634) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * chore: Replace `structopt` with `clap` Modern alternative avoiding some unmaintained dependencies. Fetching advisory database from `https://github.com/RustSec/advisory-db.git` Loaded 596 security advisories (from /Users/lars/.cargo/advisory-db) Updating crates.io index Scanning Cargo.lock for vulnerabilities (196 crate dependencies) Crate: ansi_term Version: 0.12.1 Warning: unmaintained Title: ansi_term is Unmaintained Date: 2021-08-18 ID: RUSTSEC-2021-0139 URL: https://rustsec.org/advisories/RUSTSEC-2021-0139 Dependency tree: ansi_term 0.12.1 └── clap 2.34.0 └── structopt 0.3.26 ├── neqo-server 0.7.0 └── neqo-client 0.7.0 Crate: atty Version: 0.2.14 Warning: unsound Title: Potential unaligned read Date: 2021-07-04 ID: RUSTSEC-2021-0145 URL: https://rustsec.org/advisories/RUSTSEC-2021-0145 Dependency tree: atty 0.2.14 └── clap 2.34.0 └── structopt 0.3.26 ├── neqo-server 0.7.0 └── neqo-client 0.7.0 warning: 2 allowed warnings found * Update neqo-server/src/main.rs Co-authored-by: Max Inden Signed-off-by: Lars Eggert * Address comments * Further simplifications * Don't need hex --------- Signed-off-by: Lars Eggert Co-authored-by: Max Inden --- neqo-client/Cargo.toml | 4 +- neqo-client/src/main.rs | 140 ++++++++++++++------------------------- neqo-interop/Cargo.toml | 3 +- neqo-interop/src/main.rs | 18 ++--- neqo-server/Cargo.toml | 3 +- neqo-server/src/main.rs | 94 ++++++++++++-------------- 6 files changed, 106 insertions(+), 156 deletions(-) diff --git a/neqo-client/Cargo.toml b/neqo-client/Cargo.toml index 7b90b01a77..545081d946 100644 --- a/neqo-client/Cargo.toml +++ b/neqo-client/Cargo.toml @@ -1,5 +1,6 @@ [package] name = "neqo-client" +description = "A basic QUIC HTTP/0.9 and HTTP/3 client." authors.workspace = true homepage.workspace = true repository.workspace = true @@ -9,7 +10,9 @@ rust-version.workspace = true license.workspace = true [dependencies] +clap = { version = "4.4.18", features = ["derive"] } futures = "0.3" +hex = "0.4.3" log = { version = "0.4.17", default-features = false } neqo-common = { path = "./../neqo-common" } neqo-crypto = { path = "./../neqo-crypto" } @@ -17,7 +20,6 @@ neqo-http3 = { path = "./../neqo-http3" } neqo-qpack = { path = "./../neqo-qpack" } neqo-transport = { path = "./../neqo-transport" } qlog = "0.12.0" -structopt = "0.3" tokio = { version = "1", features = ["net", "time", "macros", "rt", "rt-multi-thread"] } url = "~2.5.0" diff --git a/neqo-client/src/main.rs b/neqo-client/src/main.rs index 931ce5a6e4..041989a1ce 100644 --- a/neqo-client/src/main.rs +++ b/neqo-client/src/main.rs @@ -19,10 +19,10 @@ use std::{ pin::Pin, process::exit, rc::Rc, - str::FromStr, time::{Duration, Instant}, }; +use clap::Parser; use common::IpTos; use futures::{ future::{select, Either}, @@ -44,7 +44,6 @@ use neqo_transport::{ EmptyConnectionIdGenerator, Error as TransportError, StreamId, StreamType, Version, }; use qlog::{events::EventImportance, streamer::QlogStreamer}; -use structopt::StructOpt; use tokio::{net::UdpSocket, time::Sleep}; use url::{Origin, Url}; @@ -88,6 +87,8 @@ impl Display for ClientError { } } +impl std::error::Error for ClientError {} + type Res = Result; /// Track whether a key update is needed. @@ -123,45 +124,10 @@ impl KeyUpdateState { } } -#[derive(Debug)] -struct HexArg(Vec); -impl FromStr for HexArg { - type Err = ClientError; - - fn from_str(s: &str) -> Res { - fn v(c: u8) -> Res { - match c { - b'A'..=b'F' => Ok(c - b'A' + 10), - b'a'..=b'f' => Ok(c - b'a' + 10), - b'0'..=b'9' => Ok(c - b'0'), - _ => Err(ClientError::ArgumentError("non-hex character")), - } - } - let s: &[u8] = s.as_ref(); - if s.len() % 2 != 0 { - return Err(ClientError::ArgumentError("invalid length")); - } - let mut buf = vec![0; s.len() / 2]; - for i in 0..buf.len() { - buf[i] = (v(s[i * 2])? << 4) | v(s[i * 2 + 1])?; - } - Ok(Self(buf)) - } -} - -impl AsRef<[u8]> for HexArg { - fn as_ref(&self) -> &[u8] { - &self.0 - } -} - -#[derive(Debug, StructOpt)] -#[structopt( - name = "neqo-client", - about = "A basic QUIC HTTP/0.9 and HTTP/3 client." -)] +#[derive(Debug, Parser)] +#[command(author, version, about, long_about = None)] pub struct Args { - #[structopt(short = "a", long, default_value = "h3")] + #[arg(short = 'a', long, default_value = "h3")] /// ALPN labels to negotiate. /// /// This client still only does HTTP/3 no matter what the ALPN says. @@ -169,89 +135,89 @@ pub struct Args { urls: Vec, - #[structopt(short = "m", default_value = "GET")] + #[arg(short = 'm', default_value = "GET")] method: String, - #[structopt(short = "h", long, number_of_values = 2)] + #[arg(short = 'H', long, number_of_values = 2)] header: Vec, - #[structopt(name = "encoder-table-size", long, default_value = "16384")] + #[arg(name = "encoder-table-size", long, default_value = "16384")] max_table_size_encoder: u64, - #[structopt(name = "decoder-table-size", long, default_value = "16384")] + #[arg(name = "decoder-table-size", long, default_value = "16384")] max_table_size_decoder: u64, - #[structopt(name = "max-blocked-streams", short = "b", long, default_value = "10")] + #[arg(name = "max-blocked-streams", short = 'b', long, default_value = "10")] max_blocked_streams: u16, - #[structopt(name = "max-push", short = "p", long, default_value = "10")] + #[arg(name = "max-push", short = 'p', long, default_value = "10")] max_concurrent_push_streams: u64, - #[structopt(name = "use-old-http", short = "o", long)] + #[arg(name = "use-old-http", short = 'o', long)] /// Use http 0.9 instead of HTTP/3 use_old_http: bool, - #[structopt(name = "download-in-series", long)] + #[arg(name = "download-in-series", long)] /// Download resources in series using separate connections. download_in_series: bool, - #[structopt(name = "concurrency", long, default_value = "100")] + #[arg(name = "concurrency", long, default_value = "100")] /// The maximum number of requests to have outstanding at one time. concurrency: usize, - #[structopt(name = "output-read-data", long)] + #[arg(name = "output-read-data", long)] /// Output received data to stdout output_read_data: bool, - #[structopt(name = "qlog-dir", long)] + #[arg(name = "qlog-dir", long)] /// Enable QLOG logging and QLOG traces to this directory qlog_dir: Option, - #[structopt(name = "output-dir", long)] + #[arg(name = "output-dir", long)] /// Save contents of fetched URLs to a directory output_dir: Option, - #[structopt(name = "qns-test", long)] + #[arg(name = "qns-test", long)] /// Enable special behavior for use with QUIC Network Simulator qns_test: Option, - #[structopt(short = "r", long)] + #[arg(short = 'r', long)] /// Client attempts to resume by making multiple connections to servers. /// Requires that 2 or more URLs are listed for each server. /// Use this for 0-RTT: the stack always attempts 0-RTT on resumption. resume: bool, - #[structopt(name = "key-update", long)] + #[arg(name = "key-update", long)] /// Attempt to initiate a key update immediately after confirming the connection. key_update: bool, - #[structopt(short = "c", long, number_of_values = 1)] + #[arg(short = 'c', long, number_of_values = 1)] /// The set of TLS cipher suites to enable. /// From: TLS_AES_128_GCM_SHA256, TLS_AES_256_GCM_SHA384, TLS_CHACHA20_POLY1305_SHA256. ciphers: Vec, - #[structopt(name = "ech", long)] + #[arg(name = "ech", long, value_parser = |s: &str| hex::decode(s))] /// Enable encrypted client hello (ECH). /// This takes an encoded ECH configuration in hexadecimal format. - ech: Option, + ech: Option>, - #[structopt(flatten)] + #[command(flatten)] quic_parameters: QuicParameters, - #[structopt(name = "ipv4-only", short = "4", long)] + #[arg(name = "ipv4-only", short = '4', long)] /// Connect only over IPv4 ipv4_only: bool, - #[structopt(name = "ipv6-only", short = "6", long)] + #[arg(name = "ipv6-only", short = '6', long)] /// Connect only over IPv6 ipv6_only: bool, /// The test that this client will run. Currently, we only support "upload". - #[structopt(name = "test", long)] + #[arg(name = "test", long)] test: Option, /// The request size that will be used for upload test. - #[structopt(name = "upload-size", long, default_value = "100")] + #[arg(name = "upload-size", long, default_value = "100")] upload_size: usize, } @@ -269,53 +235,45 @@ impl Args { } } -#[derive(Debug, Clone, Copy, PartialEq, Eq)] -struct VersionArg(Version); -impl FromStr for VersionArg { - type Err = ClientError; - - fn from_str(s: &str) -> Res { - let v = u32::from_str_radix(s, 16) - .map_err(|_| ClientError::ArgumentError("versions need to be specified in hex"))?; - Ok(Self(Version::try_from(v).map_err(|_| { - ClientError::ArgumentError("unknown version") - })?)) - } +fn from_str(s: &str) -> Res { + let v = u32::from_str_radix(s, 16) + .map_err(|_| ClientError::ArgumentError("versions need to be specified in hex"))?; + Version::try_from(v).map_err(|_| ClientError::ArgumentError("unknown version")) } -#[derive(Debug, StructOpt)] +#[derive(Debug, Parser)] struct QuicParameters { - #[structopt( - short = "V", + #[arg( + short = 'Q', long, - multiple = true, - use_delimiter = true, - number_of_values = 1 - )] + num_args = 1.., + value_delimiter = ' ', + number_of_values = 1, + value_parser = from_str)] /// A list of versions to support, in hex. /// The first is the version to attempt. /// Adding multiple values adds versions in order of preference. /// If the first listed version appears in the list twice, the position /// of the second entry determines the preference order of that version. - quic_version: Vec, + quic_version: Vec, - #[structopt(long, default_value = "16")] + #[arg(long, default_value = "16")] /// Set the MAX_STREAMS_BIDI limit. max_streams_bidi: u64, - #[structopt(long, default_value = "16")] + #[arg(long, default_value = "16")] /// Set the MAX_STREAMS_UNI limit. max_streams_uni: u64, - #[structopt(long = "idle", default_value = "30")] + #[arg(long = "idle", default_value = "30")] /// The idle timeout for connections, in seconds. idle_timeout: u64, - #[structopt(long = "cc", default_value = "newreno")] + #[arg(long = "cc", default_value = "newreno")] /// The congestion controller to use. congestion_control: CongestionControlAlgorithm, - #[structopt(long = "pacing")] + #[arg(long = "pacing")] /// Whether pacing is enabled. pacing: bool, } @@ -335,7 +293,7 @@ impl QuicParameters { } else { &self.quic_version }; - params.versions(first.0, all.iter().map(|&x| x.0).collect()) + params.versions(first, all.to_vec()) } else { let version = match alpn { "h3" | "hq-interop" => Version::Version1, @@ -1002,11 +960,11 @@ fn qlog_new(args: &Args, hostname: &str, cid: &ConnectionId) -> Res { async fn main() -> Res<()> { init(); - let mut args = Args::from_args(); + let mut args = Args::parse(); if let Some(testcase) = args.qns_test.as_ref() { // Only use v1 for most QNS tests. - args.quic_parameters.quic_version = vec![VersionArg(Version::Version1)]; + args.quic_parameters.quic_version = vec![Version::Version1]; match testcase.as_str() { // TODO: Add "ecn" when that is ready. "http3" => {} diff --git a/neqo-interop/Cargo.toml b/neqo-interop/Cargo.toml index 34690cd7e1..f049f2cb05 100644 --- a/neqo-interop/Cargo.toml +++ b/neqo-interop/Cargo.toml @@ -1,5 +1,6 @@ [package] name = "neqo-interop" +description = "A QUIC interop client." authors.workspace = true homepage.workspace = true repository.workspace = true @@ -9,13 +10,13 @@ rust-version.workspace = true license.workspace = true [dependencies] +clap = { version = "4.4.18", features = ["derive"] } lazy_static = "1.4" neqo-common = { path = "./../neqo-common" } neqo-crypto = { path = "./../neqo-crypto" } neqo-http3 = { path = "./../neqo-http3" } neqo-qpack = { path = "./../neqo-qpack" } neqo-transport = { path = "./../neqo-transport" } -structopt = "~0.3" [features] deny-warnings = [] diff --git a/neqo-interop/src/main.rs b/neqo-interop/src/main.rs index b1dae43c9c..455189dd61 100644 --- a/neqo-interop/src/main.rs +++ b/neqo-interop/src/main.rs @@ -24,6 +24,7 @@ use std::{ time::{Duration, Instant}, }; +use clap::Parser; use neqo_common::{event::Provider, hex, Datagram, IpTos}; use neqo_crypto::{init, AuthenticationStatus, ResumptionToken}; use neqo_http3::{Header, Http3Client, Http3ClientEvent, Http3Parameters, Http3State, Priority}; @@ -31,25 +32,24 @@ use neqo_transport::{ Connection, ConnectionError, ConnectionEvent, ConnectionParameters, EmptyConnectionIdGenerator, Error, Output, State, StreamId, StreamType, }; -use structopt::StructOpt; -#[derive(Debug, StructOpt, Clone)] -#[structopt(name = "neqo-interop", about = "A QUIC interop client.")] +#[derive(Debug, Parser, Clone)] +#[command(author, version, about, long_about = None)] struct Args { - #[structopt(short = "p", long)] + #[arg(short = 'p', long)] // Peers to include include: Vec, - #[structopt(short = "P", long)] + #[arg(short = 'P', long)] exclude: Vec, - #[structopt(short = "t", long)] + #[arg(short = 't', long)] include_tests: Vec, - #[structopt(short = "T", long)] + #[arg(short = 'T', long)] exclude_tests: Vec, - #[structopt(long, default_value = "5")] + #[arg(long, default_value = "5")] timeout: u64, } @@ -905,7 +905,7 @@ const TESTS: [Test; 7] = [ fn main() { let _tests = vec![Test::Connect]; - let args = Args::from_args(); + let args = Args::parse(); init(); Timer::set_timeout(Duration::from_secs(args.timeout)); diff --git a/neqo-server/Cargo.toml b/neqo-server/Cargo.toml index 1f9ad24795..dbd7ef9284 100644 --- a/neqo-server/Cargo.toml +++ b/neqo-server/Cargo.toml @@ -1,5 +1,6 @@ [package] name = "neqo-server" +description = "A basic HTTP3 server." authors.workspace = true homepage.workspace = true repository.workspace = true @@ -9,6 +10,7 @@ rust-version.workspace = true license.workspace = true [dependencies] +clap = { version = "4.4.18", features = ["derive"] } futures = "0.3" log = { version = "0.4.17", default-features = false } neqo-common = { path = "./../neqo-common" } @@ -18,7 +20,6 @@ neqo-qpack = { path = "./../neqo-qpack" } neqo-transport = { path = "./../neqo-transport" } qlog = "0.12.0" regex = "1.9" -structopt = "0.3" tokio = { version = "1", features = ["net", "time", "macros", "rt", "rt-multi-thread"] } [features] diff --git a/neqo-server/src/main.rs b/neqo-server/src/main.rs index 9b924504cc..e7dca12fe6 100644 --- a/neqo-server/src/main.rs +++ b/neqo-server/src/main.rs @@ -14,17 +14,16 @@ use std::{ convert::TryFrom, fmt::{self, Display}, fs::OpenOptions, - io, - io::Read, + io::{self, Read}, net::{SocketAddr, ToSocketAddrs}, path::PathBuf, pin::Pin, process::exit, rc::Rc, - str::FromStr, time::{Duration, Instant}, }; +use clap::Parser; use futures::{ future::{select, select_all, Either}, FutureExt, @@ -42,7 +41,6 @@ use neqo_transport::{ ConnectionIdGenerator, ConnectionParameters, Output, RandomConnectionIdGenerator, StreamType, Version, }; -use structopt::StructOpt; use tokio::{net::UdpSocket, time::Sleep}; use crate::old_https::Http09Server; @@ -91,66 +89,63 @@ impl Display for ServerError { } } -#[derive(Debug, StructOpt)] -#[structopt(name = "neqo-server", about = "A basic HTTP3 server.")] +impl std::error::Error for ServerError {} + +#[derive(Debug, Parser)] +#[command(author, version, about, long_about = None)] struct Args { /// List of IP:port to listen on - #[structopt(default_value = "[::]:4433")] + #[arg(default_value = "[::]:4433")] hosts: Vec, - #[structopt(name = "encoder-table-size", long, default_value = "16384")] + #[arg(name = "encoder-table-size", long, default_value = "16384")] max_table_size_encoder: u64, - #[structopt(name = "decoder-table-size", long, default_value = "16384")] + #[arg(name = "decoder-table-size", long, default_value = "16384")] max_table_size_decoder: u64, - #[structopt(short = "b", long, default_value = "10")] + #[arg(short = 'b', long, default_value = "10")] max_blocked_streams: u16, - #[structopt( - short = "d", - long, - default_value = "./test-fixture/db", - parse(from_os_str) - )] + #[arg(short = 'd', long, default_value = "./test-fixture/db")] /// NSS database directory. db: PathBuf, - #[structopt(short = "k", long, default_value = "key")] + #[arg(short = 'k', long, default_value = "key")] /// Name of key from NSS database. key: String, - #[structopt(short = "a", long, default_value = "h3")] + #[arg(short = 'a', long, default_value = "h3")] /// ALPN labels to negotiate. /// /// This server still only does HTTP3 no matter what the ALPN says. alpn: String, - #[structopt(name = "qlog-dir", long)] + #[arg(name = "qlog-dir", long, value_parser=clap::value_parser!(PathBuf))] /// Enable QLOG logging and QLOG traces to this directory qlog_dir: Option, - #[structopt(name = "qns-test", long)] + #[arg(name = "qns-test", long)] /// Enable special behavior for use with QUIC Network Simulator qns_test: Option, - #[structopt(name = "use-old-http", short = "o", long)] + #[arg(name = "use-old-http", short = 'o', long)] /// Use http 0.9 instead of HTTP/3 use_old_http: bool, - #[structopt(flatten)] + #[command(flatten)] quic_parameters: QuicParameters, - #[structopt(name = "retry", long)] + #[arg(name = "retry", long)] /// Force a retry retry: bool, - #[structopt(short = "c", long, number_of_values = 1)] + #[arg(short = 'c', long, number_of_values = 1)] /// The set of TLS cipher suites to enable. /// From: TLS_AES_128_GCM_SHA256, TLS_AES_256_GCM_SHA384, TLS_CHACHA20_POLY1305_SHA256. ciphers: Vec, - #[structopt(name = "ech", long)] + #[arg(name = "ech", long)] /// Enable encrypted client hello (ECH). /// This generates a new set of ECH keys when it is invoked. /// The resulting configuration is printed to stdout in hexadecimal format. @@ -200,53 +195,46 @@ impl Args { } } -#[derive(Debug, Clone, Copy, PartialEq, Eq)] -struct VersionArg(Version); -impl FromStr for VersionArg { - type Err = ServerError; - - fn from_str(s: &str) -> Result { - let v = u32::from_str_radix(s, 16) - .map_err(|_| ServerError::ArgumentError("versions need to be specified in hex"))?; - Ok(Self(Version::try_from(v).map_err(|_| { - ServerError::ArgumentError("unknown version") - })?)) - } +fn from_str(s: &str) -> Result { + let v = u32::from_str_radix(s, 16) + .map_err(|_| ServerError::ArgumentError("versions need to be specified in hex"))?; + Version::try_from(v).map_err(|_| ServerError::ArgumentError("unknown version")) } -#[derive(Debug, StructOpt)] +#[derive(Debug, Parser)] struct QuicParameters { - #[structopt( - short = "V", + #[arg( + short = 'Q', long, - multiple = true, - use_delimiter = true, - number_of_values = 1 + num_args = 1.., + value_delimiter = ' ', + number_of_values = 1, + value_parser = from_str )] /// A list of versions to support in order of preference, in hex. - quic_version: Vec, + quic_version: Vec, - #[structopt(long, default_value = "16")] + #[arg(long, default_value = "16")] /// Set the MAX_STREAMS_BIDI limit. max_streams_bidi: u64, - #[structopt(long, default_value = "16")] + #[arg(long, default_value = "16")] /// Set the MAX_STREAMS_UNI limit. max_streams_uni: u64, - #[structopt(long = "idle", default_value = "30")] + #[arg(long = "idle", default_value = "30")] /// The idle timeout for connections, in seconds. idle_timeout: u64, - #[structopt(long = "cc", default_value = "newreno")] + #[arg(long = "cc", default_value = "newreno")] /// The congestion controller to use. congestion_control: CongestionControlAlgorithm, - #[structopt(name = "preferred-address-v4", long)] + #[arg(name = "preferred-address-v4", long)] /// An IPv4 address for the server preferred address. preferred_address_v4: Option, - #[structopt(name = "preferred-address-v6", long)] + #[arg(name = "preferred-address-v6", long)] /// An IPv6 address for the server preferred address. preferred_address_v6: Option, } @@ -312,7 +300,7 @@ impl QuicParameters { } if let Some(first) = self.quic_version.first() { - params = params.versions(first.0, self.quic_version.iter().map(|&v| v.0).collect()); + params = params.versions(*first, self.quic_version.to_vec()); } params } @@ -781,7 +769,7 @@ enum Ready { async fn main() -> Result<(), io::Error> { const HQ_INTEROP: &str = "hq-interop"; - let mut args = Args::from_args(); + let mut args = Args::parse(); assert!(!args.key.is_empty(), "Need at least one key"); init_db(args.db.clone()); @@ -792,7 +780,7 @@ async fn main() -> Result<(), io::Error> { // only. Exceptions are testcases `versionnegotiation` (not yet // implemented) and `v2`. if testcase != "v2" { - args.quic_parameters.quic_version = vec![VersionArg(Version::Version1)]; + args.quic_parameters.quic_version = vec![Version::Version1]; } } else { qwarn!("Both -V and --qns-test were set. Ignoring testcase specific versions."); From e7277f910c31285372b6b19788518d97ac0d2031 Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Fri, 9 Feb 2024 07:13:01 +0200 Subject: [PATCH 131/321] chore: Drop patch versions from deps (#1635) --- neqo-client/Cargo.toml | 11 ++++++----- neqo-common/Cargo.toml | 5 +++-- neqo-crypto/Cargo.toml | 12 +++++++----- neqo-http3/Cargo.toml | 11 ++++++----- neqo-interop/Cargo.toml | 3 ++- neqo-qpack/Cargo.toml | 9 +++++---- neqo-server/Cargo.toml | 7 ++++--- neqo-transport/Cargo.toml | 11 ++++++----- test-fixture/Cargo.toml | 5 +++-- 9 files changed, 42 insertions(+), 32 deletions(-) diff --git a/neqo-client/Cargo.toml b/neqo-client/Cargo.toml index 545081d946..5899475e06 100644 --- a/neqo-client/Cargo.toml +++ b/neqo-client/Cargo.toml @@ -10,18 +10,19 @@ rust-version.workspace = true license.workspace = true [dependencies] -clap = { version = "4.4.18", features = ["derive"] } +# neqo-client is not used in Firefox, so we can be liberal with dependency versions +clap = { version = "4.4", features = ["derive"] } futures = "0.3" -hex = "0.4.3" -log = { version = "0.4.17", default-features = false } +hex = "0.4" +log = { version = "0.4", default-features = false } neqo-common = { path = "./../neqo-common" } neqo-crypto = { path = "./../neqo-crypto" } neqo-http3 = { path = "./../neqo-http3" } neqo-qpack = { path = "./../neqo-qpack" } neqo-transport = { path = "./../neqo-transport" } -qlog = "0.12.0" +qlog = "0.12" tokio = { version = "1", features = ["net", "time", "macros", "rt", "rt-multi-thread"] } -url = "~2.5.0" +url = "2.5" [features] deny-warnings = [] diff --git a/neqo-common/Cargo.toml b/neqo-common/Cargo.toml index 91b6458c0c..2fe80edb43 100644 --- a/neqo-common/Cargo.toml +++ b/neqo-common/Cargo.toml @@ -10,12 +10,13 @@ rust-version.workspace = true license.workspace = true [dependencies] +# Sync with https://searchfox.org/mozilla-central/source/Cargo.lock 2024-02-08 enum-map = "2.7" env_logger = { version = "0.10", default-features = false } lazy_static = "1.4" log = { version = "0.4", default-features = false } -qlog = "0.12.0" -time = { version = "0.3.23", features = ["formatting"] } +qlog = "0.12" +time = { version = "0.3", features = ["formatting"] } [dev-dependencies] test-fixture = { path = "../test-fixture" } diff --git a/neqo-crypto/Cargo.toml b/neqo-crypto/Cargo.toml index 8a825b985c..5f3ebea1f4 100644 --- a/neqo-crypto/Cargo.toml +++ b/neqo-crypto/Cargo.toml @@ -10,15 +10,17 @@ rust-version.workspace = true license.workspace = true [dependencies] -log = { version = "~0.4.17", default-features = false } +# Sync with https://searchfox.org/mozilla-central/source/Cargo.lock 2024-02-08 +log = { version = "0.4", default-features = false } neqo-common = { path = "../neqo-common" } [build-dependencies] -bindgen = { version = "0.69.1", default-features = false, features = ["runtime"] } +# Sync with https://searchfox.org/mozilla-central/source/Cargo.lock 2024-02-08 +bindgen = { version = "0.69", default-features = false, features = ["runtime"] } mozbuild = { version = "0.1", optional = true } -serde = "1.0.195" -serde_derive = "1.0.195" -toml = "0.5.11" +serde = "1.0" +serde_derive = "1.0" +toml = "0.5" [dev-dependencies] test-fixture = { path = "../test-fixture" } diff --git a/neqo-http3/Cargo.toml b/neqo-http3/Cargo.toml index a078548f0f..5aee1dda12 100644 --- a/neqo-http3/Cargo.toml +++ b/neqo-http3/Cargo.toml @@ -9,16 +9,17 @@ rust-version.workspace = true license.workspace = true [dependencies] -enumset = "1.1.2" +# Sync with https://searchfox.org/mozilla-central/source/Cargo.lock 2024-02-08 +enumset = "1.1" lazy_static = "1.4" -log = { version = "0.4.17", default-features = false } +log = { version = "0.4", default-features = false } neqo-common = { path = "./../neqo-common" } neqo-crypto = { path = "./../neqo-crypto" } neqo-qpack = { path = "./../neqo-qpack" } neqo-transport = { path = "./../neqo-transport" } -qlog = "0.12.0" -sfv = "0.9.3" -smallvec = "1.11.1" +qlog = "0.12" +sfv = "0.9" +smallvec = "1.11" url = "2.5" [dev-dependencies] diff --git a/neqo-interop/Cargo.toml b/neqo-interop/Cargo.toml index f049f2cb05..e3c2869fa3 100644 --- a/neqo-interop/Cargo.toml +++ b/neqo-interop/Cargo.toml @@ -10,7 +10,8 @@ rust-version.workspace = true license.workspace = true [dependencies] -clap = { version = "4.4.18", features = ["derive"] } +# neqo-interop is not used in Firefox, so we can be liberal with dependency versions +clap = { version = "4.4", features = ["derive"] } lazy_static = "1.4" neqo-common = { path = "./../neqo-common" } neqo-crypto = { path = "./../neqo-crypto" } diff --git a/neqo-qpack/Cargo.toml b/neqo-qpack/Cargo.toml index 32affad81b..2be8513ecc 100644 --- a/neqo-qpack/Cargo.toml +++ b/neqo-qpack/Cargo.toml @@ -9,13 +9,14 @@ rust-version.workspace = true license.workspace = true [dependencies] -lazy_static = "~1.4.0" -log = { version = "~0.4.17", default-features = false } +# Sync with https://searchfox.org/mozilla-central/source/Cargo.lock 2024-02-08 +lazy_static = "1.4" +log = { version = "0.4", default-features = false } neqo-common = { path = "./../neqo-common" } neqo-crypto = { path = "./../neqo-crypto" } neqo-transport = { path = "./../neqo-transport" } -qlog = "0.12.0" -static_assertions = "~1.1.0" +qlog = "0.12" +static_assertions = "1.1" [dev-dependencies] test-fixture = { path = "../test-fixture" } diff --git a/neqo-server/Cargo.toml b/neqo-server/Cargo.toml index dbd7ef9284..ffba762f23 100644 --- a/neqo-server/Cargo.toml +++ b/neqo-server/Cargo.toml @@ -10,15 +10,16 @@ rust-version.workspace = true license.workspace = true [dependencies] -clap = { version = "4.4.18", features = ["derive"] } +# neqo-server is not used in Firefox, so we can be liberal with dependency versions +clap = { version = "4.4", features = ["derive"] } futures = "0.3" -log = { version = "0.4.17", default-features = false } +log = { version = "0.4", default-features = false } neqo-common = { path = "./../neqo-common" } neqo-crypto = { path = "./../neqo-crypto" } neqo-http3 = { path = "./../neqo-http3" } neqo-qpack = { path = "./../neqo-qpack" } neqo-transport = { path = "./../neqo-transport" } -qlog = "0.12.0" +qlog = "0.12" regex = "1.9" tokio = { version = "1", features = ["net", "time", "macros", "rt", "rt-multi-thread"] } diff --git a/neqo-transport/Cargo.toml b/neqo-transport/Cargo.toml index e0f26feca2..b1ff5b01bd 100644 --- a/neqo-transport/Cargo.toml +++ b/neqo-transport/Cargo.toml @@ -9,16 +9,17 @@ rust-version.workspace = true license.workspace = true [dependencies] -indexmap = "1.9.3" +# Sync with https://searchfox.org/mozilla-central/source/Cargo.lock 2024-02-08 +indexmap = "1.9" lazy_static = "1.4" -log = { version = "0.4.17", default-features = false } +log = { version = "0.4", default-features = false } neqo-common = { path = "../neqo-common" } neqo-crypto = { path = "../neqo-crypto" } -qlog = "0.12.0" -smallvec = "1.11.1" +qlog = "0.12" +smallvec = "1.11" [dev-dependencies] -criterion = { version = "0.5.1", features = ["html_reports"] } +criterion = { version = "0.5", features = ["html_reports"] } enum-map = "2.7" test-fixture = { path = "../test-fixture" } diff --git a/test-fixture/Cargo.toml b/test-fixture/Cargo.toml index 8fe2ac0602..aebeba04f4 100644 --- a/test-fixture/Cargo.toml +++ b/test-fixture/Cargo.toml @@ -9,14 +9,15 @@ rust-version.workspace = true license.workspace = true [dependencies] +# Sync with https://searchfox.org/mozilla-central/source/Cargo.lock 2024-02-08 lazy_static = "1.4" -log = { version = "0.4.17", default-features = false } +log = { version = "0.4", default-features = false } neqo-common = { path = "../neqo-common" } neqo-crypto = { path = "../neqo-crypto" } neqo-http3 = { path = "../neqo-http3" } neqo-qpack = { path = "../neqo-qpack" } neqo-transport = { path = "../neqo-transport" } -qlog = "0.12.0" +qlog = "0.12" [features] deny-warnings = [] From 303d9fd4ecea5a71e33e2562593961703d50424c Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Fri, 9 Feb 2024 09:08:40 +0200 Subject: [PATCH 132/321] chore: FF now using Rust 1.74.0 (#1639) * chore: FF now using Rust 1.74.0 This should address the CI build errors with `clap`. * Remove clippy allows --- .github/workflows/check.yml | 4 +++- Cargo.toml | 4 +++- neqo-crypto/src/ech.rs | 2 -- neqo-crypto/src/hkdf.rs | 1 - neqo-crypto/src/hp.rs | 5 ----- neqo-crypto/src/p11.rs | 1 - neqo-transport/src/connection/mod.rs | 1 - 7 files changed, 6 insertions(+), 12 deletions(-) diff --git a/.github/workflows/check.yml b/.github/workflows/check.yml index 58b8d3d870..639f67f69e 100644 --- a/.github/workflows/check.yml +++ b/.github/workflows/check.yml @@ -17,7 +17,9 @@ jobs: fail-fast: false matrix: os: [ubuntu-latest, macos-13, windows-latest] - rust-toolchain: [1.70.0, stable, nightly] + # Don't increase beyond what Firefox is currently using: + # https://firefox-source-docs.mozilla.org/writing-rust-code/update-policy.html#schedule + rust-toolchain: [1.74.0, stable, nightly] type: [debug] include: - os: ubuntu-latest diff --git a/Cargo.toml b/Cargo.toml index 39e0f76441..eb6a0b7207 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -17,5 +17,7 @@ repository = "https://github.com/mozilla/neqo/" authors = ["The Neqo Authors "] version = "0.7.0" edition = "2018" -rust-version = "1.70.0" license = "MIT OR Apache-2.0" +# Don't increase beyond what Firefox is currently using: +# https://firefox-source-docs.mozilla.org/writing-rust-code/update-policy.html#schedule +rust-version = "1.74.0" diff --git a/neqo-crypto/src/ech.rs b/neqo-crypto/src/ech.rs index 1f54c4592e..109d745520 100644 --- a/neqo-crypto/src/ech.rs +++ b/neqo-crypto/src/ech.rs @@ -113,7 +113,6 @@ pub fn generate_keys() -> Res<(PrivateKey, PublicKey)> { // If we have tracing on, try to ensure that key data can be read. let insensitive_secret_ptr = if log::log_enabled!(log::Level::Trace) { - #[allow(clippy::useless_conversion)] // TODO: Remove when we bump the MSRV to 1.74.0. unsafe { p11::PK11_GenerateKeyPairWithOpFlags( *slot, @@ -131,7 +130,6 @@ pub fn generate_keys() -> Res<(PrivateKey, PublicKey)> { }; assert_eq!(insensitive_secret_ptr.is_null(), public_ptr.is_null()); let secret_ptr = if insensitive_secret_ptr.is_null() { - #[allow(clippy::useless_conversion)] // TODO: Remove when we bump the MSRV to 1.74.0. unsafe { p11::PK11_GenerateKeyPairWithOpFlags( *slot, diff --git a/neqo-crypto/src/hkdf.rs b/neqo-crypto/src/hkdf.rs index e3cf77418c..058a63b2bd 100644 --- a/neqo-crypto/src/hkdf.rs +++ b/neqo-crypto/src/hkdf.rs @@ -70,7 +70,6 @@ pub fn import_key(version: Version, buf: &[u8]) -> Res { return Err(Error::UnsupportedVersion); } let slot = Slot::internal()?; - #[allow(clippy::useless_conversion)] // TODO: Remove when we bump the MSRV to 1.74.0. let key_ptr = unsafe { PK11_ImportDataKey( *slot, diff --git a/neqo-crypto/src/hp.rs b/neqo-crypto/src/hp.rs index 2479eff8f5..27d9a6dc79 100644 --- a/neqo-crypto/src/hp.rs +++ b/neqo-crypto/src/hp.rs @@ -76,7 +76,6 @@ impl HpKey { let l = label.as_bytes(); let mut secret: *mut PK11SymKey = null_mut(); - #[allow(clippy::useless_conversion)] // TODO: Remove when we bump the MSRV to 1.74.0. let (mech, key_size) = match cipher { TLS_AES_128_GCM_SHA256 => (CK_MECHANISM_TYPE::from(CKM_AES_ECB), 16), TLS_AES_256_GCM_SHA384 => (CK_MECHANISM_TYPE::from(CKM_AES_ECB), 32), @@ -104,8 +103,6 @@ impl HpKey { let res = match cipher { TLS_AES_128_GCM_SHA256 | TLS_AES_256_GCM_SHA384 => { - // TODO: Remove when we bump the MSRV to 1.74.0. - #[allow(clippy::useless_conversion)] let context_ptr = unsafe { PK11_CreateContextBySymKey( mech, @@ -181,8 +178,6 @@ impl HpKey { }; let mut output_len: c_uint = 0; let mut param_item = Item::wrap_struct(¶ms); - // TODO: Remove when we bump the MSRV to 1.74.0. - #[allow(clippy::useless_conversion)] secstatus_to_res(unsafe { PK11_Encrypt( **key, diff --git a/neqo-crypto/src/p11.rs b/neqo-crypto/src/p11.rs index 508d240062..7a89f14c2b 100644 --- a/neqo-crypto/src/p11.rs +++ b/neqo-crypto/src/p11.rs @@ -139,7 +139,6 @@ impl PrivateKey { /// When the values are too large to fit. So never. pub fn key_data(&self) -> Res> { let mut key_item = Item::make_empty(); - #[allow(clippy::useless_conversion)] // TODO: Remove when we bump the MSRV to 1.74.0. secstatus_to_res(unsafe { PK11_ReadRawAttribute( PK11ObjectType::PK11_TypePrivKey, diff --git a/neqo-transport/src/connection/mod.rs b/neqo-transport/src/connection/mod.rs index 1d2cda64ff..d33f77ed6b 100644 --- a/neqo-transport/src/connection/mod.rs +++ b/neqo-transport/src/connection/mod.rs @@ -1578,7 +1578,6 @@ impl Connection { /// During connection setup, the first path needs to be setup. /// This uses the connection IDs that were provided during the handshake /// to setup that path. - #[allow(clippy::or_fun_call)] // Remove when MSRV >= 1.59 fn setup_handshake_path(&mut self, path: &PathRef, now: Instant) { self.paths.make_permanent( path, From daa9394e55c73e00ed6ce91bf6a58c35194fa43b Mon Sep 17 00:00:00 2001 From: Martin Thomson Date: Fri, 9 Feb 2024 18:41:23 +1100 Subject: [PATCH 133/321] OnceCell is now available to us (#1637) * Use OnceCell/OnceLock instead of lazy_static See https://docs.rs/once_cell/latest/once_cell/#faq for details. OnceCell/OnceLock are now available to us as we use rust 1.70. * Remove OnceResult This existed because we didn't have OnceCell. Now we do, so it can go. * Merge main * Wrap this one comment --- neqo-common/Cargo.toml | 1 - neqo-common/src/log.rs | 17 ++- neqo-common/src/timer.rs | 61 ++++----- neqo-crypto/src/lib.rs | 92 +++++++------ neqo-crypto/src/once.rs | 44 ------- neqo-crypto/src/time.rs | 14 +- neqo-http3/Cargo.toml | 1 - neqo-http3/tests/send_message.rs | 31 ++--- neqo-interop/Cargo.toml | 1 - neqo-interop/src/main.rs | 13 +- neqo-qpack/Cargo.toml | 1 - neqo-qpack/src/huffman.rs | 4 +- neqo-qpack/src/huffman_decode_helper.rs | 9 +- neqo-transport/Cargo.toml | 1 - neqo-transport/src/tracking.rs | 163 ++++++++++++------------ test-fixture/Cargo.toml | 1 - test-fixture/src/lib.rs | 14 +- 17 files changed, 205 insertions(+), 263 deletions(-) delete mode 100644 neqo-crypto/src/once.rs diff --git a/neqo-common/Cargo.toml b/neqo-common/Cargo.toml index 2fe80edb43..f27d97b42a 100644 --- a/neqo-common/Cargo.toml +++ b/neqo-common/Cargo.toml @@ -13,7 +13,6 @@ license.workspace = true # Sync with https://searchfox.org/mozilla-central/source/Cargo.lock 2024-02-08 enum-map = "2.7" env_logger = { version = "0.10", default-features = false } -lazy_static = "1.4" log = { version = "0.4", default-features = false } qlog = "0.12" time = { version = "0.3", features = ["formatting"] } diff --git a/neqo-common/src/log.rs b/neqo-common/src/log.rs index d9c30b98b1..1b181de56b 100644 --- a/neqo-common/src/log.rs +++ b/neqo-common/src/log.rs @@ -6,10 +6,13 @@ #![allow(clippy::module_name_repetitions)] -use std::{io::Write, sync::Once, time::Instant}; +use std::{ + io::Write, + sync::{Once, OnceLock}, + time::{Duration, Instant}, +}; use env_logger::Builder; -use lazy_static::lazy_static; #[macro_export] macro_rules! do_log { @@ -42,17 +45,17 @@ macro_rules! log_subject { }}; } -static INIT_ONCE: Once = Once::new(); - -lazy_static! { - static ref START_TIME: Instant = Instant::now(); +fn since_start() -> Duration { + static START_TIME: OnceLock = OnceLock::new(); + START_TIME.get_or_init(Instant::now).elapsed() } pub fn init() { + static INIT_ONCE: Once = Once::new(); INIT_ONCE.call_once(|| { let mut builder = Builder::from_env("RUST_LOG"); builder.format(|buf, record| { - let elapsed = START_TIME.elapsed(); + let elapsed = since_start(); writeln!( buf, "{}s{:3}ms {} {}", diff --git a/neqo-common/src/timer.rs b/neqo-common/src/timer.rs index e8532af442..6708c70963 100644 --- a/neqo-common/src/timer.rs +++ b/neqo-common/src/timer.rs @@ -247,49 +247,50 @@ impl Timer { #[cfg(test)] mod test { - use lazy_static::lazy_static; + use std::sync::OnceLock; use super::{Duration, Instant, Timer}; - lazy_static! { - static ref NOW: Instant = Instant::now(); + fn now() -> Instant { + static NOW: OnceLock = OnceLock::new(); + *NOW.get_or_init(Instant::now) } const GRANULARITY: Duration = Duration::from_millis(10); const CAPACITY: usize = 10; #[test] fn create() { - let t: Timer<()> = Timer::new(*NOW, GRANULARITY, CAPACITY); + let t: Timer<()> = Timer::new(now(), GRANULARITY, CAPACITY); assert_eq!(t.span(), Duration::from_millis(100)); assert_eq!(None, t.next_time()); } #[test] fn immediate_entry() { - let mut t = Timer::new(*NOW, GRANULARITY, CAPACITY); - t.add(*NOW, 12); - assert_eq!(*NOW, t.next_time().expect("should have an entry")); - let values: Vec<_> = t.take_until(*NOW).collect(); + let mut t = Timer::new(now(), GRANULARITY, CAPACITY); + t.add(now(), 12); + assert_eq!(now(), t.next_time().expect("should have an entry")); + let values: Vec<_> = t.take_until(now()).collect(); assert_eq!(vec![12], values); } #[test] fn same_time() { - let mut t = Timer::new(*NOW, GRANULARITY, CAPACITY); + let mut t = Timer::new(now(), GRANULARITY, CAPACITY); let v1 = 12; let v2 = 13; - t.add(*NOW, v1); - t.add(*NOW, v2); - assert_eq!(*NOW, t.next_time().expect("should have an entry")); - let values: Vec<_> = t.take_until(*NOW).collect(); + t.add(now(), v1); + t.add(now(), v2); + assert_eq!(now(), t.next_time().expect("should have an entry")); + let values: Vec<_> = t.take_until(now()).collect(); assert!(values.contains(&v1)); assert!(values.contains(&v2)); } #[test] fn add() { - let mut t = Timer::new(*NOW, GRANULARITY, CAPACITY); - let near_future = *NOW + Duration::from_millis(17); + let mut t = Timer::new(now(), GRANULARITY, CAPACITY); + let near_future = now() + Duration::from_millis(17); let v = 9; t.add(near_future, v); assert_eq!(near_future, t.next_time().expect("should return a value")); @@ -305,8 +306,8 @@ mod test { #[test] fn add_future() { - let mut t = Timer::new(*NOW, GRANULARITY, CAPACITY); - let future = *NOW + Duration::from_millis(117); + let mut t = Timer::new(now(), GRANULARITY, CAPACITY); + let future = now() + Duration::from_millis(117); let v = 9; t.add(future, v); assert_eq!(future, t.next_time().expect("should return a value")); @@ -315,8 +316,8 @@ mod test { #[test] fn add_far_future() { - let mut t = Timer::new(*NOW, GRANULARITY, CAPACITY); - let far_future = *NOW + Duration::from_millis(892); + let mut t = Timer::new(now(), GRANULARITY, CAPACITY); + let far_future = now() + Duration::from_millis(892); let v = 9; t.add(far_future, v); assert_eq!(far_future, t.next_time().expect("should return a value")); @@ -333,12 +334,12 @@ mod test { ]; fn with_times() -> Timer { - let mut t = Timer::new(*NOW, GRANULARITY, CAPACITY); + let mut t = Timer::new(now(), GRANULARITY, CAPACITY); for (i, time) in TIMES.iter().enumerate() { - t.add(*NOW + *time, i); + t.add(now() + *time, i); } assert_eq!( - *NOW + *TIMES.iter().min().unwrap(), + now() + *TIMES.iter().min().unwrap(), t.next_time().expect("should have a time") ); t @@ -348,7 +349,7 @@ mod test { #[allow(clippy::needless_collect)] // false positive fn multiple_values() { let mut t = with_times(); - let values: Vec<_> = t.take_until(*NOW + *TIMES.iter().max().unwrap()).collect(); + let values: Vec<_> = t.take_until(now() + *TIMES.iter().max().unwrap()).collect(); for i in 0..TIMES.len() { assert!(values.contains(&i)); } @@ -358,7 +359,7 @@ mod test { #[allow(clippy::needless_collect)] // false positive fn take_far_future() { let mut t = with_times(); - let values: Vec<_> = t.take_until(*NOW + Duration::from_secs(100)).collect(); + let values: Vec<_> = t.take_until(now() + Duration::from_secs(100)).collect(); for i in 0..TIMES.len() { assert!(values.contains(&i)); } @@ -368,15 +369,15 @@ mod test { fn remove_each() { let mut t = with_times(); for (i, time) in TIMES.iter().enumerate() { - assert_eq!(Some(i), t.remove(*NOW + *time, |&x| x == i)); + assert_eq!(Some(i), t.remove(now() + *time, |&x| x == i)); } assert_eq!(None, t.next_time()); } #[test] fn remove_future() { - let mut t = Timer::new(*NOW, GRANULARITY, CAPACITY); - let future = *NOW + Duration::from_millis(117); + let mut t = Timer::new(now(), GRANULARITY, CAPACITY); + let future = now() + Duration::from_millis(117); let v = 9; t.add(future, v); @@ -385,9 +386,9 @@ mod test { #[test] fn remove_too_far_future() { - let mut t = Timer::new(*NOW, GRANULARITY, CAPACITY); - let future = *NOW + Duration::from_millis(117); - let too_far_future = *NOW + t.span() + Duration::from_millis(117); + let mut t = Timer::new(now(), GRANULARITY, CAPACITY); + let future = now() + Duration::from_millis(117); + let too_far_future = now() + t.span() + Duration::from_millis(117); let v = 9; t.add(future, v); diff --git a/neqo-crypto/src/lib.rs b/neqo-crypto/src/lib.rs index 05424ee1f3..8d2e3e4ccd 100644 --- a/neqo-crypto/src/lib.rs +++ b/neqo-crypto/src/lib.rs @@ -27,7 +27,6 @@ mod exp; pub mod ext; pub mod hkdf; pub mod hp; -mod once; #[macro_use] mod p11; mod prio; @@ -41,6 +40,7 @@ use std::{ ffi::CString, path::{Path, PathBuf}, ptr::null, + sync::OnceLock, }; #[cfg(not(feature = "fuzzing"))] @@ -49,7 +49,6 @@ pub use self::aead::RealAead as Aead; pub use self::aead::RealAead; #[cfg(feature = "fuzzing")] pub use self::aead_fuzzing::FuzzingAead as Aead; -use self::once::OnceResult; pub use self::{ agent::{ Agent, AllowZeroRtt, Client, HandshakeState, Record, RecordList, ResumptionToken, @@ -100,7 +99,7 @@ impl Drop for NssLoaded { } } -static mut INITIALIZED: OnceResult = OnceResult::new(); +static INITIALIZED: OnceLock = OnceLock::new(); fn already_initialized() -> bool { unsafe { nss::NSS_IsInitialized() != 0 } @@ -124,19 +123,18 @@ fn version_check() { pub fn init() { // Set time zero. time::init(); - unsafe { - INITIALIZED.call_once(|| { - version_check(); - if already_initialized() { - return NssLoaded::External; - } + _ = INITIALIZED.get_or_init(|| { + version_check(); + if already_initialized() { + return NssLoaded::External; + } - secstatus_to_res(nss::NSS_NoDB_Init(null())).expect("NSS_NoDB_Init failed"); - secstatus_to_res(nss::NSS_SetDomesticPolicy()).expect("NSS_SetDomesticPolicy failed"); + secstatus_to_res(unsafe { nss::NSS_NoDB_Init(null()) }).expect("NSS_NoDB_Init failed"); + secstatus_to_res(unsafe { nss::NSS_SetDomesticPolicy() }) + .expect("NSS_SetDomesticPolicy failed"); - NssLoaded::NoDb - }); - } + NssLoaded::NoDb + }); } /// This enables SSLTRACE by calling a simple, harmless function to trigger its @@ -158,51 +156,47 @@ fn enable_ssl_trace() { /// If NSS cannot be initialized. pub fn init_db>(dir: P) { time::init(); - unsafe { - INITIALIZED.call_once(|| { - version_check(); - if already_initialized() { - return NssLoaded::External; - } + _ = INITIALIZED.get_or_init(|| { + version_check(); + if already_initialized() { + return NssLoaded::External; + } - let path = dir.into(); - assert!(path.is_dir()); - let pathstr = path.to_str().expect("path converts to string").to_string(); - let dircstr = CString::new(pathstr).unwrap(); - let empty = CString::new("").unwrap(); - secstatus_to_res(nss::NSS_Initialize( + let path = dir.into(); + assert!(path.is_dir()); + let pathstr = path.to_str().expect("path converts to string").to_string(); + let dircstr = CString::new(pathstr).unwrap(); + let empty = CString::new("").unwrap(); + secstatus_to_res(unsafe { + nss::NSS_Initialize( dircstr.as_ptr(), empty.as_ptr(), empty.as_ptr(), nss::SECMOD_DB.as_ptr().cast(), nss::NSS_INIT_READONLY, - )) - .expect("NSS_Initialize failed"); - - secstatus_to_res(nss::NSS_SetDomesticPolicy()).expect("NSS_SetDomesticPolicy failed"); - secstatus_to_res(ssl::SSL_ConfigServerSessionIDCache( - 1024, - 0, - 0, - dircstr.as_ptr(), - )) - .expect("SSL_ConfigServerSessionIDCache failed"); - - #[cfg(debug_assertions)] - enable_ssl_trace(); - - NssLoaded::Db(path.into_boxed_path()) - }); - } + ) + }) + .expect("NSS_Initialize failed"); + + secstatus_to_res(unsafe { nss::NSS_SetDomesticPolicy() }) + .expect("NSS_SetDomesticPolicy failed"); + secstatus_to_res(unsafe { + ssl::SSL_ConfigServerSessionIDCache(1024, 0, 0, dircstr.as_ptr()) + }) + .expect("SSL_ConfigServerSessionIDCache failed"); + + #[cfg(debug_assertions)] + enable_ssl_trace(); + + NssLoaded::Db(path.into_boxed_path()) + }); } /// # Panics /// /// If NSS isn't initialized. pub fn assert_initialized() { - unsafe { - INITIALIZED.call_once(|| { - panic!("NSS not initialized with init or init_db"); - }); - } + INITIALIZED + .get() + .expect("NSS not initialized with init or init_db"); } diff --git a/neqo-crypto/src/once.rs b/neqo-crypto/src/once.rs deleted file mode 100644 index 80657cfe26..0000000000 --- a/neqo-crypto/src/once.rs +++ /dev/null @@ -1,44 +0,0 @@ -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use std::sync::Once; - -#[allow(clippy::module_name_repetitions)] -pub struct OnceResult { - once: Once, - v: Option, -} - -impl OnceResult { - #[must_use] - pub const fn new() -> Self { - Self { - once: Once::new(), - v: None, - } - } - - pub fn call_once T>(&mut self, f: F) -> &T { - let v = &mut self.v; - self.once.call_once(|| { - *v = Some(f()); - }); - self.v.as_ref().unwrap() - } -} - -#[cfg(test)] -mod test { - use super::OnceResult; - - static mut STATIC_ONCE_RESULT: OnceResult = OnceResult::new(); - - #[test] - fn static_update() { - assert_eq!(*unsafe { STATIC_ONCE_RESULT.call_once(|| 23) }, 23); - assert_eq!(*unsafe { STATIC_ONCE_RESULT.call_once(|| 24) }, 23); - } -} diff --git a/neqo-crypto/src/time.rs b/neqo-crypto/src/time.rs index 84dbfdb4a5..205f32bbf0 100644 --- a/neqo-crypto/src/time.rs +++ b/neqo-crypto/src/time.rs @@ -12,13 +12,13 @@ use std::{ ops::Deref, os::raw::c_void, pin::Pin, + sync::OnceLock, time::{Duration, Instant}, }; use crate::{ agentio::as_c_void, err::{Error, Res}, - once::OnceResult, ssl::{PRFileDesc, SSLTimeFunc}, }; @@ -67,14 +67,13 @@ impl TimeZero { } } -static mut BASE_TIME: OnceResult = OnceResult::new(); +static BASE_TIME: OnceLock = OnceLock::new(); fn get_base() -> &'static TimeZero { - let f = || TimeZero { + BASE_TIME.get_or_init(|| TimeZero { instant: Instant::now(), prtime: unsafe { PR_Now() }, - }; - unsafe { BASE_TIME.call_once(f) } + }) } pub(crate) fn init() { @@ -97,9 +96,8 @@ impl Deref for Time { impl From for Time { /// Convert from an Instant into a Time. fn from(t: Instant) -> Self { - // Call `TimeZero::baseline(t)` so that time zero can be set. - let f = || TimeZero::baseline(t); - _ = unsafe { BASE_TIME.call_once(f) }; + // Initialize `BASE_TIME` using `TimeZero::baseline(t)`. + BASE_TIME.get_or_init(|| TimeZero::baseline(t)); Self { t } } } diff --git a/neqo-http3/Cargo.toml b/neqo-http3/Cargo.toml index 5aee1dda12..aa7d79f029 100644 --- a/neqo-http3/Cargo.toml +++ b/neqo-http3/Cargo.toml @@ -11,7 +11,6 @@ license.workspace = true [dependencies] # Sync with https://searchfox.org/mozilla-central/source/Cargo.lock 2024-02-08 enumset = "1.1" -lazy_static = "1.4" log = { version = "0.4", default-features = false } neqo-common = { path = "./../neqo-common" } neqo-crypto = { path = "./../neqo-crypto" } diff --git a/neqo-http3/tests/send_message.rs b/neqo-http3/tests/send_message.rs index 507c4bd552..fbf9a7a3ea 100644 --- a/neqo-http3/tests/send_message.rs +++ b/neqo-http3/tests/send_message.rs @@ -4,7 +4,8 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use lazy_static::lazy_static; +use std::sync::OnceLock; + use neqo_common::event::Provider; use neqo_crypto::AuthenticationStatus; use neqo_http3::{ @@ -15,14 +16,14 @@ use test_fixture::*; const RESPONSE_DATA: &[u8] = &[0x61, 0x62, 0x63]; -lazy_static! { - static ref RESPONSE_HEADER_NO_DATA: Vec
= - vec![Header::new(":status", "200"), Header::new("something", "3")]; +fn response_header_no_data() -> &'static Vec
{ + static HEADERS: OnceLock> = OnceLock::new(); + HEADERS.get_or_init(|| vec![Header::new(":status", "200"), Header::new("something", "3")]) } -lazy_static! { - static ref RESPONSE_HEADER_103: Vec
= - vec![Header::new(":status", "103"), Header::new("link", "...")]; +fn response_header_103() -> &'static Vec
{ + static HEADERS: OnceLock> = OnceLock::new(); + HEADERS.get_or_init(|| vec![Header::new(":status", "103"), Header::new("link", "...")]) } fn exchange_packets(client: &mut Http3Client, server: &mut Http3Server) { @@ -68,7 +69,7 @@ fn send_trailers(request: &mut Http3OrWebTransportStream) -> Result<(), Error> { } fn send_informational_headers(request: &mut Http3OrWebTransportStream) -> Result<(), Error> { - request.send_headers(&RESPONSE_HEADER_103) + request.send_headers(response_header_103()) } fn send_headers(request: &mut Http3OrWebTransportStream) -> Result<(), Error> { @@ -90,7 +91,7 @@ fn process_client_events(conn: &mut Http3Client) { Header::new(":status", "200"), Header::new("content-length", "3"), ]) - || (headers.as_ref() == *RESPONSE_HEADER_103) + || (headers.as_ref() == *response_header_103()) ); assert!(!fin); response_header_found = true; @@ -116,7 +117,7 @@ fn process_client_events_no_data(conn: &mut Http3Client) { while let Some(event) = conn.next_event() { match event { Http3ClientEvent::HeaderReady { headers, fin, .. } => { - assert_eq!(headers.as_ref(), *RESPONSE_HEADER_NO_DATA); + assert_eq!(headers.as_ref(), *response_header_no_data()); fin_received = fin; response_header_found = true; } @@ -201,7 +202,7 @@ fn response_trailers3() { #[test] fn response_trailers_no_data() { let (mut hconn_c, mut hconn_s, mut request) = connect_send_and_receive_request(); - request.send_headers(&RESPONSE_HEADER_NO_DATA).unwrap(); + request.send_headers(response_header_no_data()).unwrap(); exchange_packets(&mut hconn_c, &mut hconn_s); send_trailers(&mut request).unwrap(); exchange_packets(&mut hconn_c, &mut hconn_s); @@ -258,10 +259,10 @@ fn trailers_after_close() { #[test] fn multiple_response_headers() { let (mut hconn_c, mut hconn_s, mut request) = connect_send_and_receive_request(); - request.send_headers(&RESPONSE_HEADER_NO_DATA).unwrap(); + request.send_headers(response_header_no_data()).unwrap(); assert_eq!( - request.send_headers(&RESPONSE_HEADER_NO_DATA), + request.send_headers(response_header_no_data()), Err(Error::InvalidHeader) ); @@ -273,7 +274,7 @@ fn multiple_response_headers() { #[test] fn informational_after_response_headers() { let (mut hconn_c, mut hconn_s, mut request) = connect_send_and_receive_request(); - request.send_headers(&RESPONSE_HEADER_NO_DATA).unwrap(); + request.send_headers(response_header_no_data()).unwrap(); assert_eq!( send_informational_headers(&mut request), @@ -307,7 +308,7 @@ fn non_trailers_headers_after_data() { exchange_packets(&mut hconn_c, &mut hconn_s); assert_eq!( - request.send_headers(&RESPONSE_HEADER_NO_DATA), + request.send_headers(response_header_no_data()), Err(Error::InvalidHeader) ); diff --git a/neqo-interop/Cargo.toml b/neqo-interop/Cargo.toml index e3c2869fa3..f5996032fb 100644 --- a/neqo-interop/Cargo.toml +++ b/neqo-interop/Cargo.toml @@ -12,7 +12,6 @@ license.workspace = true [dependencies] # neqo-interop is not used in Firefox, so we can be liberal with dependency versions clap = { version = "4.4", features = ["derive"] } -lazy_static = "1.4" neqo-common = { path = "./../neqo-common" } neqo-crypto = { path = "./../neqo-crypto" } neqo-http3 = { path = "./../neqo-http3" } diff --git a/neqo-interop/src/main.rs b/neqo-interop/src/main.rs index 455189dd61..fac76a4daa 100644 --- a/neqo-interop/src/main.rs +++ b/neqo-interop/src/main.rs @@ -14,12 +14,12 @@ use std::{ mem, net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr, ToSocketAddrs, UdpSocket}, rc::Rc, + sync::OnceLock, }; // use std::path::PathBuf; use std::{ str::FromStr, string::ParseError, - sync::Mutex, thread, time::{Duration, Instant}, }; @@ -67,22 +67,21 @@ fn emit_datagram(socket: &UdpSocket, d: Datagram) { } } -lazy_static::lazy_static! { - static ref TEST_TIMEOUT: Mutex = Mutex::new(Duration::from_secs(5)); -} - +static TEST_TIMEOUT: OnceLock = OnceLock::new(); struct Timer { end: Instant, } impl Timer { pub fn new() -> Self { Self { - end: Instant::now() + *TEST_TIMEOUT.lock().unwrap(), + end: Instant::now() + *TEST_TIMEOUT.get_or_init(|| Duration::from_secs(5)), } } pub fn set_timeout(t: Duration) { - *TEST_TIMEOUT.lock().unwrap() = t; + TEST_TIMEOUT + .set(t) + .expect("failed to set a timeout because one was already set"); } pub fn check(&self) -> Result { diff --git a/neqo-qpack/Cargo.toml b/neqo-qpack/Cargo.toml index 2be8513ecc..41f72f9ba2 100644 --- a/neqo-qpack/Cargo.toml +++ b/neqo-qpack/Cargo.toml @@ -10,7 +10,6 @@ license.workspace = true [dependencies] # Sync with https://searchfox.org/mozilla-central/source/Cargo.lock 2024-02-08 -lazy_static = "1.4" log = { version = "0.4", default-features = false } neqo-common = { path = "./../neqo-common" } neqo-crypto = { path = "./../neqo-crypto" } diff --git a/neqo-qpack/src/huffman.rs b/neqo-qpack/src/huffman.rs index 283a501b32..ee53a4e041 100644 --- a/neqo-qpack/src/huffman.rs +++ b/neqo-qpack/src/huffman.rs @@ -7,7 +7,7 @@ use std::convert::TryFrom; use crate::{ - huffman_decode_helper::{HuffmanDecoderNode, HUFFMAN_DECODE_ROOT}, + huffman_decode_helper::{huffman_decoder_root, HuffmanDecoderNode}, huffman_table::HUFFMAN_TABLE, Error, Res, }; @@ -93,7 +93,7 @@ pub fn decode_huffman(input: &[u8]) -> Res> { } fn decode_character(reader: &mut BitReader) -> Res> { - let mut node: &HuffmanDecoderNode = &HUFFMAN_DECODE_ROOT; + let mut node: &HuffmanDecoderNode = huffman_decoder_root(); let mut i = 0; while node.value.is_none() { match reader.read_bit() { diff --git a/neqo-qpack/src/huffman_decode_helper.rs b/neqo-qpack/src/huffman_decode_helper.rs index 122226dd1f..1ce4485b0f 100644 --- a/neqo-qpack/src/huffman_decode_helper.rs +++ b/neqo-qpack/src/huffman_decode_helper.rs @@ -4,9 +4,7 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use std::convert::TryFrom; - -use lazy_static::lazy_static; +use std::{convert::TryFrom, sync::OnceLock}; use crate::huffman_table::HUFFMAN_TABLE; @@ -15,8 +13,9 @@ pub struct HuffmanDecoderNode { pub value: Option, } -lazy_static! { - pub static ref HUFFMAN_DECODE_ROOT: HuffmanDecoderNode = make_huffman_tree(0, 0); +pub fn huffman_decoder_root() -> &'static HuffmanDecoderNode { + static ROOT: OnceLock = OnceLock::new(); + ROOT.get_or_init(|| make_huffman_tree(0, 0)) } fn make_huffman_tree(prefix: u32, len: u8) -> HuffmanDecoderNode { diff --git a/neqo-transport/Cargo.toml b/neqo-transport/Cargo.toml index b1ff5b01bd..55cc117f66 100644 --- a/neqo-transport/Cargo.toml +++ b/neqo-transport/Cargo.toml @@ -11,7 +11,6 @@ license.workspace = true [dependencies] # Sync with https://searchfox.org/mozilla-central/source/Cargo.lock 2024-02-08 indexmap = "1.9" -lazy_static = "1.4" log = { version = "0.4", default-features = false } neqo-common = { path = "../neqo-common" } neqo-crypto = { path = "../neqo-crypto" } diff --git a/neqo-transport/src/tracking.rs b/neqo-transport/src/tracking.rs index 64d00257d3..012c895a18 100644 --- a/neqo-transport/src/tracking.rs +++ b/neqo-transport/src/tracking.rs @@ -746,8 +746,8 @@ impl Default for AckTracker { mod tests { use std::collections::HashSet; - use lazy_static::lazy_static; use neqo_common::Encoder; + use test_fixture::now; use super::{ AckTracker, Duration, Instant, PacketNumberSpace, PacketNumberSpaceSet, RecoveryToken, @@ -760,16 +760,13 @@ mod tests { }; const RTT: Duration = Duration::from_millis(100); - lazy_static! { - static ref NOW: Instant = Instant::now(); - } fn test_ack_range(pns: &[PacketNumber], nranges: usize) { let mut rp = RecvdPackets::new(PacketNumberSpace::Initial); // Any space will do. let mut packets = HashSet::new(); for pn in pns { - rp.set_received(*NOW, *pn, true); + rp.set_received(now(), *pn, true); packets.insert(*pn); } @@ -824,7 +821,7 @@ mod tests { // This will add one too many disjoint ranges. for i in 0..=MAX_TRACKED_RANGES { - rp.set_received(*NOW, (i * 2) as u64, true); + rp.set_received(now(), (i * 2) as u64, true); } assert_eq!(rp.ranges.len(), MAX_TRACKED_RANGES); @@ -843,22 +840,22 @@ mod tests { // Only application data packets are delayed. let mut rp = RecvdPackets::new(PacketNumberSpace::ApplicationData); assert!(rp.ack_time().is_none()); - assert!(!rp.ack_now(*NOW, RTT)); + assert!(!rp.ack_now(now(), RTT)); rp.ack_freq(0, COUNT, DELAY, false); // Some packets won't cause an ACK to be needed. for i in 0..COUNT { - rp.set_received(*NOW, i, true); - assert_eq!(Some(*NOW + DELAY), rp.ack_time()); - assert!(!rp.ack_now(*NOW, RTT)); - assert!(rp.ack_now(*NOW + DELAY, RTT)); + rp.set_received(now(), i, true); + assert_eq!(Some(now() + DELAY), rp.ack_time()); + assert!(!rp.ack_now(now(), RTT)); + assert!(rp.ack_now(now() + DELAY, RTT)); } // Exceeding COUNT will move the ACK time to now. - rp.set_received(*NOW, COUNT, true); - assert_eq!(Some(*NOW), rp.ack_time()); - assert!(rp.ack_now(*NOW, RTT)); + rp.set_received(now(), COUNT, true); + assert_eq!(Some(now()), rp.ack_time()); + assert!(rp.ack_now(now(), RTT)); } #[test] @@ -866,12 +863,12 @@ mod tests { for space in &[PacketNumberSpace::Initial, PacketNumberSpace::Handshake] { let mut rp = RecvdPackets::new(*space); assert!(rp.ack_time().is_none()); - assert!(!rp.ack_now(*NOW, RTT)); + assert!(!rp.ack_now(now(), RTT)); // Any packet in these spaces is acknowledged straight away. - rp.set_received(*NOW, 0, true); - assert_eq!(Some(*NOW), rp.ack_time()); - assert!(rp.ack_now(*NOW, RTT)); + rp.set_received(now(), 0, true); + assert_eq!(Some(now()), rp.ack_time()); + assert!(rp.ack_now(now(), RTT)); } } @@ -879,12 +876,12 @@ mod tests { fn ooo_no_ack_delay_new() { let mut rp = RecvdPackets::new(PacketNumberSpace::ApplicationData); assert!(rp.ack_time().is_none()); - assert!(!rp.ack_now(*NOW, RTT)); + assert!(!rp.ack_now(now(), RTT)); // Anything other than packet 0 is acknowledged immediately. - rp.set_received(*NOW, 1, true); - assert_eq!(Some(*NOW), rp.ack_time()); - assert!(rp.ack_now(*NOW, RTT)); + rp.set_received(now(), 1, true); + assert_eq!(Some(now()), rp.ack_time()); + assert!(rp.ack_now(now(), RTT)); } fn write_frame_at(rp: &mut RecvdPackets, now: Instant) { @@ -897,37 +894,37 @@ mod tests { } fn write_frame(rp: &mut RecvdPackets) { - write_frame_at(rp, *NOW); + write_frame_at(rp, now()); } #[test] fn ooo_no_ack_delay_fill() { let mut rp = RecvdPackets::new(PacketNumberSpace::ApplicationData); - rp.set_received(*NOW, 1, true); + rp.set_received(now(), 1, true); write_frame(&mut rp); // Filling in behind the largest acknowledged causes immediate ACK. - rp.set_received(*NOW, 0, true); + rp.set_received(now(), 0, true); write_frame(&mut rp); // Receiving the next packet won't elicit an ACK. - rp.set_received(*NOW, 2, true); - assert!(!rp.ack_now(*NOW, RTT)); + rp.set_received(now(), 2, true); + assert!(!rp.ack_now(now(), RTT)); } #[test] fn immediate_ack_after_rtt() { let mut rp = RecvdPackets::new(PacketNumberSpace::ApplicationData); - rp.set_received(*NOW, 1, true); + rp.set_received(now(), 1, true); write_frame(&mut rp); // Filling in behind the largest acknowledged causes immediate ACK. - rp.set_received(*NOW, 0, true); + rp.set_received(now(), 0, true); write_frame(&mut rp); // A new packet ordinarily doesn't result in an ACK, but this time it does. - rp.set_received(*NOW + RTT, 2, true); - write_frame_at(&mut rp, *NOW + RTT); + rp.set_received(now() + RTT, 2, true); + write_frame_at(&mut rp, now() + RTT); } #[test] @@ -937,29 +934,29 @@ mod tests { // Set tolerance to 2 and then it takes three packets. rp.ack_freq(0, 2, Duration::from_millis(10), true); - rp.set_received(*NOW, 1, true); - assert_ne!(Some(*NOW), rp.ack_time()); - rp.set_received(*NOW, 2, true); - assert_ne!(Some(*NOW), rp.ack_time()); - rp.set_received(*NOW, 3, true); - assert_eq!(Some(*NOW), rp.ack_time()); + rp.set_received(now(), 1, true); + assert_ne!(Some(now()), rp.ack_time()); + rp.set_received(now(), 2, true); + assert_ne!(Some(now()), rp.ack_time()); + rp.set_received(now(), 3, true); + assert_eq!(Some(now()), rp.ack_time()); } #[test] fn ooo_no_ack_delay_threshold_gap() { let mut rp = RecvdPackets::new(PacketNumberSpace::ApplicationData); - rp.set_received(*NOW, 1, true); + rp.set_received(now(), 1, true); write_frame(&mut rp); // Set tolerance to 2 and then it takes three packets. rp.ack_freq(0, 2, Duration::from_millis(10), true); - rp.set_received(*NOW, 3, true); - assert_ne!(Some(*NOW), rp.ack_time()); - rp.set_received(*NOW, 4, true); - assert_ne!(Some(*NOW), rp.ack_time()); - rp.set_received(*NOW, 5, true); - assert_eq!(Some(*NOW), rp.ack_time()); + rp.set_received(now(), 3, true); + assert_ne!(Some(now()), rp.ack_time()); + rp.set_received(now(), 4, true); + assert_ne!(Some(now()), rp.ack_time()); + rp.set_received(now(), 5, true); + assert_eq!(Some(now()), rp.ack_time()); } /// Test that an in-order packet that is not ack-eliciting doesn't @@ -970,13 +967,13 @@ mod tests { rp.ack_freq(0, 1, Duration::from_millis(10), true); // This should be ignored. - rp.set_received(*NOW, 0, false); - assert_ne!(Some(*NOW), rp.ack_time()); + rp.set_received(now(), 0, false); + assert_ne!(Some(now()), rp.ack_time()); // Skip 1 (it has no effect). - rp.set_received(*NOW, 2, true); - assert_ne!(Some(*NOW), rp.ack_time()); - rp.set_received(*NOW, 3, true); - assert_eq!(Some(*NOW), rp.ack_time()); + rp.set_received(now(), 2, true); + assert_ne!(Some(now()), rp.ack_time()); + rp.set_received(now(), 3, true); + assert_eq!(Some(now()), rp.ack_time()); } /// If a packet that is not ack-eliciting is reordered, that's fine too. @@ -986,16 +983,16 @@ mod tests { rp.ack_freq(0, 1, Duration::from_millis(10), false); // These are out of order, but they are not ack-eliciting. - rp.set_received(*NOW, 1, false); - assert_ne!(Some(*NOW), rp.ack_time()); - rp.set_received(*NOW, 0, false); - assert_ne!(Some(*NOW), rp.ack_time()); + rp.set_received(now(), 1, false); + assert_ne!(Some(now()), rp.ack_time()); + rp.set_received(now(), 0, false); + assert_ne!(Some(now()), rp.ack_time()); // These are in order. - rp.set_received(*NOW, 2, true); - assert_ne!(Some(*NOW), rp.ack_time()); - rp.set_received(*NOW, 3, true); - assert_eq!(Some(*NOW), rp.ack_time()); + rp.set_received(now(), 2, true); + assert_ne!(Some(now()), rp.ack_time()); + rp.set_received(now(), 3, true); + assert_eq!(Some(now()), rp.ack_time()); } #[test] @@ -1007,23 +1004,23 @@ mod tests { tracker .get_mut(PacketNumberSpace::Handshake) .unwrap() - .set_received(*NOW, 0, false); - assert_eq!(None, tracker.ack_time(*NOW)); + .set_received(now(), 0, false); + assert_eq!(None, tracker.ack_time(now())); // This should be delayed. tracker .get_mut(PacketNumberSpace::ApplicationData) .unwrap() - .set_received(*NOW, 0, true); - assert_eq!(Some(*NOW + DELAY), tracker.ack_time(*NOW)); + .set_received(now(), 0, true); + assert_eq!(Some(now() + DELAY), tracker.ack_time(now())); // This should move the time forward. - let later = *NOW + (DELAY / 2); + let later = now() + (DELAY / 2); tracker .get_mut(PacketNumberSpace::Initial) .unwrap() .set_received(later, 0, true); - assert_eq!(Some(later), tracker.ack_time(*NOW)); + assert_eq!(Some(later), tracker.ack_time(now())); } #[test] @@ -1047,17 +1044,17 @@ mod tests { tracker .get_mut(PacketNumberSpace::Initial) .unwrap() - .set_received(*NOW, 0, true); + .set_received(now(), 0, true); // The reference time for `ack_time` has to be in the past or we filter out the timer. assert!(tracker - .ack_time(NOW.checked_sub(Duration::from_millis(1)).unwrap()) + .ack_time(now().checked_sub(Duration::from_millis(1)).unwrap()) .is_some()); let mut tokens = Vec::new(); let mut stats = FrameStats::default(); tracker.write_frame( PacketNumberSpace::Initial, - *NOW, + now(), RTT, &mut builder, &mut tokens, @@ -1069,9 +1066,9 @@ mod tests { tracker .get_mut(PacketNumberSpace::Initial) .unwrap() - .set_received(*NOW, 1, true); + .set_received(now(), 1, true); assert!(tracker - .ack_time(NOW.checked_sub(Duration::from_millis(1)).unwrap()) + .ack_time(now().checked_sub(Duration::from_millis(1)).unwrap()) .is_some()); // Now drop that space. @@ -1079,11 +1076,11 @@ mod tests { assert!(tracker.get_mut(PacketNumberSpace::Initial).is_none()); assert!(tracker - .ack_time(NOW.checked_sub(Duration::from_millis(1)).unwrap()) + .ack_time(now().checked_sub(Duration::from_millis(1)).unwrap()) .is_none()); tracker.write_frame( PacketNumberSpace::Initial, - *NOW, + now(), RTT, &mut builder, &mut tokens, @@ -1103,9 +1100,9 @@ mod tests { tracker .get_mut(PacketNumberSpace::Initial) .unwrap() - .set_received(*NOW, 0, true); + .set_received(now(), 0, true); assert!(tracker - .ack_time(NOW.checked_sub(Duration::from_millis(1)).unwrap()) + .ack_time(now().checked_sub(Duration::from_millis(1)).unwrap()) .is_some()); let mut builder = PacketBuilder::short(Encoder::new(), false, []); @@ -1114,7 +1111,7 @@ mod tests { let mut stats = FrameStats::default(); tracker.write_frame( PacketNumberSpace::Initial, - *NOW, + now(), RTT, &mut builder, &mut Vec::new(), @@ -1130,13 +1127,13 @@ mod tests { tracker .get_mut(PacketNumberSpace::Initial) .unwrap() - .set_received(*NOW, 0, true); + .set_received(now(), 0, true); tracker .get_mut(PacketNumberSpace::Initial) .unwrap() - .set_received(*NOW, 2, true); + .set_received(now(), 2, true); assert!(tracker - .ack_time(NOW.checked_sub(Duration::from_millis(1)).unwrap()) + .ack_time(now().checked_sub(Duration::from_millis(1)).unwrap()) .is_some()); let mut builder = PacketBuilder::short(Encoder::new(), false, []); @@ -1145,7 +1142,7 @@ mod tests { let mut stats = FrameStats::default(); tracker.write_frame( PacketNumberSpace::Initial, - *NOW, + now(), RTT, &mut builder, &mut Vec::new(), @@ -1168,19 +1165,19 @@ mod tests { let mut tracker = AckTracker::default(); // While we have multiple PN spaces, we ignore ACK timers from the past. - // Send out of order to cause the delayed ack timer to be set to `*NOW`. + // Send out of order to cause the delayed ack timer to be set to `now()`. tracker .get_mut(PacketNumberSpace::ApplicationData) .unwrap() - .set_received(*NOW, 3, true); - assert!(tracker.ack_time(*NOW + Duration::from_millis(1)).is_none()); + .set_received(now(), 3, true); + assert!(tracker.ack_time(now() + Duration::from_millis(1)).is_none()); // When we are reduced to one space, that filter is off. tracker.drop_space(PacketNumberSpace::Initial); tracker.drop_space(PacketNumberSpace::Handshake); assert_eq!( - tracker.ack_time(*NOW + Duration::from_millis(1)), - Some(*NOW) + tracker.ack_time(now() + Duration::from_millis(1)), + Some(now()) ); } diff --git a/test-fixture/Cargo.toml b/test-fixture/Cargo.toml index aebeba04f4..18bdb114be 100644 --- a/test-fixture/Cargo.toml +++ b/test-fixture/Cargo.toml @@ -10,7 +10,6 @@ license.workspace = true [dependencies] # Sync with https://searchfox.org/mozilla-central/source/Cargo.lock 2024-02-08 -lazy_static = "1.4" log = { version = "0.4", default-features = false } neqo-common = { path = "../neqo-common" } neqo-crypto = { path = "../neqo-crypto" } diff --git a/test-fixture/src/lib.rs b/test-fixture/src/lib.rs index 96ed335a83..daef4720df 100644 --- a/test-fixture/src/lib.rs +++ b/test-fixture/src/lib.rs @@ -8,7 +8,7 @@ #![warn(clippy::pedantic)] use std::{ - cell::RefCell, + cell::{OnceCell, RefCell}, cmp::max, convert::TryFrom, io::{Cursor, Result, Write}, @@ -19,7 +19,6 @@ use std::{ time::{Duration, Instant}, }; -use lazy_static::lazy_static; use neqo_common::{ event::Provider, hex, @@ -51,13 +50,14 @@ pub fn fixture_init() { // NSS operates in milliseconds and halves any value it is provided. pub const ANTI_REPLAY_WINDOW: Duration = Duration::from_millis(10); -lazy_static! { - static ref BASE_TIME: Instant = Instant::now(); -} - +/// A baseline time for all tests. This needs to be earlier than what `now()` produces +/// because of the need to have a span of time elapse for anti-replay purposes. fn earlier() -> Instant { + // Note: It is only OK to have a different base time for each thread because our tests are + // single-threaded. + thread_local!(static EARLIER: OnceCell = OnceCell::new()); fixture_init(); - *BASE_TIME + EARLIER.with(|b| *b.get_or_init(Instant::now)) } /// The current time for the test. Which is in the future, From bb74821940fb19699e0b91ae71f1b5b507bfca53 Mon Sep 17 00:00:00 2001 From: Martin Thomson Date: Fri, 9 Feb 2024 18:58:23 +1100 Subject: [PATCH 134/321] Cache random (#1640) * Avoid allocation in random() This makes the function take its argument as a const generic argument, which allows the allocation to be performed on the stack. There are a few cases where we need to do in-place randomization of a different type of object (`SmallVec` in a few places) so I've also exposed an in-place mutation function. Next step is to cache the slot that this uses. * Cache randomness * Add a test for the cache * Remove dead code --------- Co-authored-by: Lars Eggert --- neqo-crypto/src/hkdf.rs | 21 +++-- neqo-crypto/src/lib.rs | 2 +- neqo-crypto/src/p11.rs | 99 +++++++++++++++++++--- neqo-crypto/src/selfencrypt.rs | 2 +- neqo-http3/src/frames/hframe.rs | 7 +- neqo-server/src/main.rs | 2 +- neqo-server/src/old_https.rs | 2 +- neqo-transport/src/cid.rs | 30 +++---- neqo-transport/src/connection/mod.rs | 4 +- neqo-transport/src/connection/tests/mod.rs | 2 +- neqo-transport/src/packet/mod.rs | 6 +- neqo-transport/src/path.rs | 2 +- neqo-transport/src/tparams.rs | 2 +- test-fixture/src/lib.rs | 2 +- test-fixture/src/sim/mod.rs | 2 +- test-fixture/src/sim/rng.rs | 7 +- 16 files changed, 136 insertions(+), 56 deletions(-) diff --git a/neqo-crypto/src/hkdf.rs b/neqo-crypto/src/hkdf.rs index 058a63b2bd..5d918ec13a 100644 --- a/neqo-crypto/src/hkdf.rs +++ b/neqo-crypto/src/hkdf.rs @@ -17,9 +17,10 @@ use crate::{ }, err::{Error, Res}, p11::{ - random, Item, PK11Origin, PK11SymKey, PK11_ImportDataKey, Slot, SymKey, CKA_DERIVE, + Item, PK11Origin, PK11SymKey, PK11_ImportDataKey, Slot, SymKey, CKA_DERIVE, CKM_HKDF_DERIVE, CK_ATTRIBUTE_TYPE, CK_MECHANISM_TYPE, }, + random, }; experimental_api!(SSL_HkdfExtract( @@ -40,24 +41,32 @@ experimental_api!(SSL_HkdfExpandLabel( secret: *mut *mut PK11SymKey, )); -fn key_size(version: Version, cipher: Cipher) -> Res { +const MAX_KEY_SIZE: usize = 48; +const fn key_size(version: Version, cipher: Cipher) -> Res { if version != TLS_VERSION_1_3 { return Err(Error::UnsupportedVersion); } - Ok(match cipher { + let size = match cipher { TLS_AES_128_GCM_SHA256 | TLS_CHACHA20_POLY1305_SHA256 => 32, TLS_AES_256_GCM_SHA384 => 48, _ => return Err(Error::UnsupportedCipher), - }) + }; + debug_assert!(size <= MAX_KEY_SIZE); + Ok(size) } /// Generate a random key of the right size for the given suite. /// /// # Errors /// -/// Only if NSS fails. +/// If the ciphersuite or protocol version is not supported. pub fn generate_key(version: Version, cipher: Cipher) -> Res { - import_key(version, &random(key_size(version, cipher)?)) + // With generic_const_expr, this becomes: + // import_key(version, &random::<{ key_size(version, cipher) }>()) + import_key( + version, + &random::()[0..key_size(version, cipher)?], + ) } /// Import a symmetric key for use with HKDF. diff --git a/neqo-crypto/src/lib.rs b/neqo-crypto/src/lib.rs index 8d2e3e4ccd..4a23b5a7b1 100644 --- a/neqo-crypto/src/lib.rs +++ b/neqo-crypto/src/lib.rs @@ -63,7 +63,7 @@ pub use self::{ }, err::{Error, PRErrorCode, Res}, ext::{ExtensionHandler, ExtensionHandlerResult, ExtensionWriterResult}, - p11::{random, PrivateKey, PublicKey, SymKey}, + p11::{random, randomize, PrivateKey, PublicKey, SymKey}, replay::AntiReplay, secrets::SecretDirection, ssl::Opt, diff --git a/neqo-crypto/src/p11.rs b/neqo-crypto/src/p11.rs index 7a89f14c2b..2225d5b211 100644 --- a/neqo-crypto/src/p11.rs +++ b/neqo-crypto/src/p11.rs @@ -10,6 +10,7 @@ #![allow(non_snake_case)] use std::{ + cell::RefCell, convert::TryFrom, mem, ops::{Deref, DerefMut}, @@ -289,31 +290,107 @@ impl Item { } } -/// Generate a randomized buffer. +/// Fill a buffer with randomness. /// /// # Panics /// /// When `size` is too large or NSS fails. -#[must_use] -pub fn random(size: usize) -> Vec { - let mut buf = vec![0; size]; - secstatus_to_res(unsafe { - PK11_GenerateRandom(buf.as_mut_ptr(), c_int::try_from(buf.len()).unwrap()) - }) - .unwrap(); +pub fn randomize>(mut buf: B) -> B { + let m_buf = buf.as_mut(); + let len = c_int::try_from(m_buf.len()).unwrap(); + secstatus_to_res(unsafe { PK11_GenerateRandom(m_buf.as_mut_ptr(), len) }).unwrap(); buf } +struct RandomCache { + cache: [u8; Self::SIZE], + used: usize, +} + +impl RandomCache { + const SIZE: usize = 256; + const CUTOFF: usize = 32; + + fn new() -> Self { + RandomCache { + cache: [0; Self::SIZE], + used: Self::SIZE, + } + } + + fn randomize>(&mut self, mut buf: B) -> B { + let m_buf = buf.as_mut(); + debug_assert!(m_buf.len() <= Self::CUTOFF); + let avail = Self::SIZE - self.used; + if m_buf.len() <= avail { + m_buf.copy_from_slice(&self.cache[self.used..self.used + m_buf.len()]); + self.used += m_buf.len(); + } else { + if avail > 0 { + m_buf[..avail].copy_from_slice(&self.cache[self.used..]); + } + randomize(&mut self.cache[..]); + self.used = m_buf.len() - avail; + m_buf[avail..].copy_from_slice(&self.cache[..self.used]); + } + buf + } +} + +/// Generate a randomized array. +/// +/// # Panics +/// +/// When `size` is too large or NSS fails. +#[must_use] +pub fn random() -> [u8; N] { + thread_local! { static CACHE: RefCell = RefCell::new(RandomCache::new()) }; + + let buf = [0; N]; + if N <= RandomCache::CUTOFF { + CACHE.with_borrow_mut(|c| c.randomize(buf)) + } else { + randomize(buf) + } +} + #[cfg(test)] mod test { use test_fixture::fixture_init; - use super::random; + use super::RandomCache; + use crate::{random, randomize}; #[test] fn randomness() { fixture_init(); - // If this ever fails, there is either a bug, or it's time to buy a lottery ticket. - assert_ne!(random(16), random(16)); + // If any of these ever fail, there is either a bug, or it's time to buy a lottery ticket. + assert_ne!(random::<16>(), randomize([0; 16])); + assert_ne!([0; 16], random::<16>()); + assert_ne!([0; 64], random::<64>()); + } + + #[test] + fn cache_random_lengths() { + const ZERO: [u8; 256] = [0; 256]; + + fixture_init(); + let mut cache = RandomCache::new(); + let mut buf = [0; 256]; + let bits = usize::BITS - (RandomCache::CUTOFF - 1).leading_zeros(); + let mask = 0xff >> (u8::BITS - bits); + + for _ in 0..100 { + let len = loop { + let len = usize::from(random::<1>()[0] & mask) + 1; + if len <= RandomCache::CUTOFF { + break len; + } + }; + buf.fill(0); + if len >= 16 { + assert_ne!(&cache.randomize(&mut buf[..len])[..len], &ZERO[..len]); + } + } } } diff --git a/neqo-crypto/src/selfencrypt.rs b/neqo-crypto/src/selfencrypt.rs index b8a63153fd..1130c35250 100644 --- a/neqo-crypto/src/selfencrypt.rs +++ b/neqo-crypto/src/selfencrypt.rs @@ -82,7 +82,7 @@ impl SelfEncrypt { // opaque aead_encrypted(plaintext)[length as expanded]; // }; // AAD covers the entire header, plus the value of the AAD parameter that is provided. - let salt = random(Self::SALT_LENGTH); + let salt = random::<{ Self::SALT_LENGTH }>(); let cipher = self.make_aead(&self.key, &salt)?; let encoded_len = 2 + salt.len() + plaintext.len() + cipher.expansion(); diff --git a/neqo-http3/src/frames/hframe.rs b/neqo-http3/src/frames/hframe.rs index 83e69ba894..e69f7b449e 100644 --- a/neqo-http3/src/frames/hframe.rs +++ b/neqo-http3/src/frames/hframe.rs @@ -74,10 +74,7 @@ impl HFrame { Self::MaxPushId { .. } => H3_FRAME_TYPE_MAX_PUSH_ID, Self::PriorityUpdateRequest { .. } => H3_FRAME_TYPE_PRIORITY_UPDATE_REQUEST, Self::PriorityUpdatePush { .. } => H3_FRAME_TYPE_PRIORITY_UPDATE_PUSH, - Self::Grease => { - let r = random(7); - Decoder::from(&r).decode_uint(7).unwrap() * 0x1f + 0x21 - } + Self::Grease => Decoder::from(&random::<7>()).decode_uint(7).unwrap() * 0x1f + 0x21, } } @@ -120,7 +117,7 @@ impl HFrame { } Self::Grease => { // Encode some number of random bytes. - let r = random(8); + let r = random::<8>(); enc.encode_vvec(&r[1..usize::from(1 + (r[0] & 0x7))]); } Self::PriorityUpdateRequest { diff --git a/neqo-server/src/main.rs b/neqo-server/src/main.rs index e7dca12fe6..be56d63586 100644 --- a/neqo-server/src/main.rs +++ b/neqo-server/src/main.rs @@ -573,7 +573,7 @@ impl HttpServer for SimpleServer { fn enable_ech(&mut self) -> &[u8] { let (sk, pk) = generate_ech_keys().expect("should create ECH keys"); self.server - .enable_ech(random(1)[0], "public.example", &sk, &pk) + .enable_ech(random::<1>()[0], "public.example", &sk, &pk) .unwrap(); self.server.ech_config() } diff --git a/neqo-server/src/old_https.rs b/neqo-server/src/old_https.rs index f254446695..bb67ab5c9d 100644 --- a/neqo-server/src/old_https.rs +++ b/neqo-server/src/old_https.rs @@ -246,7 +246,7 @@ impl HttpServer for Http09Server { fn enable_ech(&mut self) -> &[u8] { let (sk, pk) = generate_ech_keys().expect("generate ECH keys"); self.server - .enable_ech(random(1)[0], "public.example", &sk, &pk) + .enable_ech(random::<1>()[0], "public.example", &sk, &pk) .expect("enable ECH"); self.server.ech_config() } diff --git a/neqo-transport/src/cid.rs b/neqo-transport/src/cid.rs index 429751bef2..8a0f7a3070 100644 --- a/neqo-transport/src/cid.rs +++ b/neqo-transport/src/cid.rs @@ -16,8 +16,8 @@ use std::{ }; use neqo_common::{hex, hex_with_len, qinfo, Decoder, Encoder}; -use neqo_crypto::random; -use smallvec::SmallVec; +use neqo_crypto::{random, randomize}; +use smallvec::{smallvec, SmallVec}; use crate::{ frame::FRAME_TYPE_NEW_CONNECTION_ID, packet::PacketBuilder, recovery::RecoveryToken, @@ -41,14 +41,16 @@ pub struct ConnectionId { impl ConnectionId { pub fn generate(len: usize) -> Self { assert!(matches!(len, 0..=MAX_CONNECTION_ID_LEN)); - Self::from(random(len)) + let mut cid = smallvec![0; len]; + randomize(&mut cid); + Self { cid } } // Apply a wee bit of greasing here in picking a length between 8 and 20 bytes long. pub fn generate_initial() -> Self { - let v = random(1); + let v = random::<1>()[0]; // Bias selection toward picking 8 (>50% of the time). - let len: usize = max(8, 5 + (v[0] & (v[0] >> 4))).into(); + let len: usize = max(8, 5 + (v & (v >> 4))).into(); Self::generate(len) } @@ -75,12 +77,6 @@ impl From> for ConnectionId { } } -impl From> for ConnectionId { - fn from(cid: Vec) -> Self { - Self::from(SmallVec::from(cid)) - } -} - impl + ?Sized> From<&T> for ConnectionId { fn from(buf: &T) -> Self { Self::from(SmallVec::from(buf.as_ref())) @@ -222,7 +218,9 @@ impl ConnectionIdDecoder for RandomConnectionIdGenerator { impl ConnectionIdGenerator for RandomConnectionIdGenerator { fn generate_cid(&mut self) -> Option { - Some(ConnectionId::from(&random(self.len))) + let mut buf = smallvec![0; self.len]; + randomize(&mut buf); + Some(ConnectionId::from(buf)) } fn as_decoder(&self) -> &dyn ConnectionIdDecoder { @@ -250,8 +248,8 @@ pub struct ConnectionIdEntry { impl ConnectionIdEntry<[u8; 16]> { /// Create a random stateless reset token so that it is hard to guess the correct /// value and reset the connection. - fn random_srt() -> [u8; 16] { - <[u8; 16]>::try_from(&random(16)[..]).unwrap() + pub fn random_srt() -> [u8; 16] { + random::<16>() } /// Create the first entry, which won't have a stateless reset token. @@ -476,7 +474,7 @@ impl ConnectionIdManager { .add_local(ConnectionIdEntry::new(self.next_seqno, cid.clone(), ())); self.next_seqno += 1; - let srt = <[u8; 16]>::try_from(&random(16)[..]).unwrap(); + let srt = ConnectionIdEntry::random_srt(); Ok((cid, srt)) } else { Err(Error::ConnectionIdsExhausted) @@ -565,7 +563,7 @@ impl ConnectionIdManager { if let Some(cid) = maybe_cid { assert_ne!(cid.len(), 0); // TODO: generate the stateless reset tokens from the connection ID and a key. - let srt = <[u8; 16]>::try_from(&random(16)[..]).unwrap(); + let srt = ConnectionIdEntry::random_srt(); let seqno = self.next_seqno; self.next_seqno += 1; diff --git a/neqo-transport/src/connection/mod.rs b/neqo-transport/src/connection/mod.rs index d33f77ed6b..0b33bd15ec 100644 --- a/neqo-transport/src/connection/mod.rs +++ b/neqo-transport/src/connection/mod.rs @@ -23,7 +23,7 @@ use neqo_common::{ qlog::NeqoQlog, qtrace, qwarn, Datagram, Decoder, Encoder, Role, }; use neqo_crypto::{ - agent::CertificateInfo, random, Agent, AntiReplay, AuthenticationStatus, Cipher, Client, Group, + agent::CertificateInfo, Agent, AntiReplay, AuthenticationStatus, Cipher, Client, Group, HandshakeState, PrivateKey, PublicKey, ResumptionToken, SecretAgentInfo, SecretAgentPreInfo, Server, ZeroRttChecker, }; @@ -2405,7 +2405,7 @@ impl Connection { } else { // The other side didn't provide a stateless reset token. // That's OK, they can try guessing this. - <[u8; 16]>::try_from(&random(16)[..]).unwrap() + ConnectionIdEntry::random_srt() }; self.paths .primary() diff --git a/neqo-transport/src/connection/tests/mod.rs b/neqo-transport/src/connection/tests/mod.rs index d958ecd70c..5470c18000 100644 --- a/neqo-transport/src/connection/tests/mod.rs +++ b/neqo-transport/src/connection/tests/mod.rs @@ -79,7 +79,7 @@ impl ConnectionIdDecoder for CountingConnectionIdGenerator { impl ConnectionIdGenerator for CountingConnectionIdGenerator { fn generate_cid(&mut self) -> Option { - let mut r = random(20); + let mut r = random::<20>(); r[0] = 8; r[1] = u8::try_from(self.counter >> 24).unwrap(); r[2] = u8::try_from((self.counter >> 16) & 0xff).unwrap(); diff --git a/neqo-transport/src/packet/mod.rs b/neqo-transport/src/packet/mod.rs index ccfd212d5f..7e19beba5f 100644 --- a/neqo-transport/src/packet/mod.rs +++ b/neqo-transport/src/packet/mod.rs @@ -271,7 +271,7 @@ impl PacketBuilder { let mask = if quic_bit { PACKET_BIT_FIXED_QUIC } else { 0 } | if self.is_long() { 0 } else { PACKET_BIT_SPIN }; let first = self.header.start; - self.encoder.as_mut()[first] ^= random(1)[0] & mask; + self.encoder.as_mut()[first] ^= random::<1>()[0] & mask; } /// For an Initial packet, encode the token. @@ -424,7 +424,7 @@ impl PacketBuilder { PACKET_BIT_LONG | PACKET_BIT_FIXED_QUIC | (PacketType::Retry.to_byte(version) << 4) - | (random(1)[0] & 0xf), + | (random::<1>()[0] & 0xf), ); encoder.encode_uint(4, version.wire_version()); encoder.encode_vec(1, dcid); @@ -448,7 +448,7 @@ impl PacketBuilder { versions: &[Version], ) -> Vec { let mut encoder = Encoder::default(); - let mut grease = random(4); + let mut grease = random::<4>(); // This will not include the "QUIC bit" sometimes. Intentionally. encoder.encode_byte(PACKET_BIT_LONG | (grease[3] & 0x7f)); encoder.encode(&[0; 4]); // Zero version == VN. diff --git a/neqo-transport/src/path.rs b/neqo-transport/src/path.rs index d6920c8d94..4246e6ed1c 100644 --- a/neqo-transport/src/path.rs +++ b/neqo-transport/src/path.rs @@ -796,7 +796,7 @@ impl Path { // Send PATH_CHALLENGE. if let ProbeState::ProbeNeeded { probe_count } = self.state { qtrace!([self], "Initiating path challenge {}", probe_count); - let data = <[u8; 8]>::try_from(&random(8)[..]).unwrap(); + let data = random::<8>(); builder.encode_varint(FRAME_TYPE_PATH_CHALLENGE); builder.encode(&data); diff --git a/neqo-transport/src/tparams.rs b/neqo-transport/src/tparams.rs index 1297829094..638e3adf89 100644 --- a/neqo-transport/src/tparams.rs +++ b/neqo-transport/src/tparams.rs @@ -438,7 +438,7 @@ impl TransportParameters { /// Set version information. pub fn set_versions(&mut self, role: Role, versions: &VersionConfig) { - let rbuf = random(4); + let rbuf = random::<4>(); let mut other = Vec::with_capacity(versions.all().len() + 1); let mut dec = Decoder::new(&rbuf); let grease = (dec.decode_uint(4).unwrap() as u32) & 0xf0f0_f0f0 | 0x0a0a_0a0a; diff --git a/test-fixture/src/lib.rs b/test-fixture/src/lib.rs index daef4720df..923aa76c15 100644 --- a/test-fixture/src/lib.rs +++ b/test-fixture/src/lib.rs @@ -131,7 +131,7 @@ impl ConnectionIdDecoder for CountingConnectionIdGenerator { impl ConnectionIdGenerator for CountingConnectionIdGenerator { fn generate_cid(&mut self) -> Option { - let mut r = random(20); + let mut r = random::<20>(); // Randomize length, but ensure that the connection ID is long // enough to pass for an original destination connection ID. r[0] = max(8, 5 + ((r[0] >> 4) & r[0])); diff --git a/test-fixture/src/sim/mod.rs b/test-fixture/src/sim/mod.rs index f4b7a52739..cbea621f1b 100644 --- a/test-fixture/src/sim/mod.rs +++ b/test-fixture/src/sim/mod.rs @@ -158,7 +158,7 @@ impl Simulator { } pub fn seed(&mut self, seed: [u8; 32]) { - self.rng = Rc::new(RefCell::new(Random::new(seed))); + self.rng = Rc::new(RefCell::new(Random::new(&seed))); } /// Seed from a hex string. diff --git a/test-fixture/src/sim/rng.rs b/test-fixture/src/sim/rng.rs index 094c5fd791..913d7eae7a 100644 --- a/test-fixture/src/sim/rng.rs +++ b/test-fixture/src/sim/rng.rs @@ -4,7 +4,7 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use std::{convert::TryFrom, ops::Range}; +use std::ops::Range; use neqo_common::Decoder; @@ -16,7 +16,7 @@ pub struct Random { impl Random { #[must_use] #[allow(clippy::missing_panics_doc)] // These are impossible. - pub fn new(seed: [u8; 32]) -> Self { + pub fn new(seed: &[u8; 32]) -> Self { assert!(seed.iter().any(|&x| x != 0)); let mut dec = Decoder::from(&seed); Self { @@ -78,7 +78,6 @@ impl Random { impl Default for Random { fn default() -> Self { - let buf = neqo_crypto::random(32); - Random::new(<[u8; 32]>::try_from(&buf[..]).unwrap()) + Random::new(&neqo_crypto::random::<32>()) } } From 2f039e34225e0353600dbfca810cc7d47df2fc4d Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Fri, 9 Feb 2024 10:31:55 +0200 Subject: [PATCH 135/321] ci: Add benchmark run (#1641) * ci: Add benchmark run * Only run when CI succeeded * Generate debug info for bench builds * Try and fix the condition --- .github/workflows/bench.yml | 43 +++++++++++++++++++++++++++++++++++++ Cargo.toml | 5 +++++ 2 files changed, 48 insertions(+) create mode 100644 .github/workflows/bench.yml diff --git a/.github/workflows/bench.yml b/.github/workflows/bench.yml new file mode 100644 index 0000000000..a1a8020699 --- /dev/null +++ b/.github/workflows/bench.yml @@ -0,0 +1,43 @@ +name: Bench +on: + workflow_run: + workflows: ["CI"] + types: [completed] +env: + CARGO_TERM_COLOR: always + RUST_BACKTRACE: 1 + +jobs: + bench: + name: Benchmark + runs-on: self-hosted + if: ${{ github.event.workflow_run.conclusion == 'success' }} + steps: + - name: Install Rust + uses: dtolnay/rust-toolchain@master + with: + toolchain: stable + # components: rustfmt, clippy, llvm-tools-preview + + - name: Use sccache + uses: mozilla-actions/sccache-action@v0.0.4 + + - name: Enable sscache + run: | + echo "SCCACHE_GHA_ENABLED=true" >> "$GITHUB_ENV" + echo "RUSTC_WRAPPER=sccache" >> "$GITHUB_ENV" + + - name: Install dependencies (Linux) + env: + DEBIAN_FRONTEND: noninteractive + run: | + sudo apt-get install -y --no-install-recommends gyp mercurial ninja-build lld + echo "RUSTFLAGS=-C link-arg=-fuse-ld=lld" >> "$GITHUB_ENV" + + - name: Checkout + uses: actions/checkout@v4 + + - name: Build + run: | + cargo bench --features ci,bench + diff --git a/Cargo.toml b/Cargo.toml index eb6a0b7207..3da82dab90 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -21,3 +21,8 @@ license = "MIT OR Apache-2.0" # Don't increase beyond what Firefox is currently using: # https://firefox-source-docs.mozilla.org/writing-rust-code/update-policy.html#schedule rust-version = "1.74.0" + +[profile.bench] +# Inherits from the "release" profile, so just provide overrides here: +# https://doc.rust-lang.org/cargo/reference/profiles.html#release +debug = true From 526460761b7241f25147e4f415beaf34962b75d1 Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Fri, 9 Feb 2024 11:33:31 +0200 Subject: [PATCH 136/321] ci: Set up bencher (#1642) * ci: Set up bencher * Check out correct branch * Use benchmark-action/github-action-benchmark * Disable benchmark-action/github-action-benchmark until it's on the allowlist * Enable? * Fix? --- .github/workflows/bench.yml | 34 ++++++++++++++++++++++++++-------- 1 file changed, 26 insertions(+), 8 deletions(-) diff --git a/.github/workflows/bench.yml b/.github/workflows/bench.yml index a1a8020699..f1f059d5dc 100644 --- a/.github/workflows/bench.yml +++ b/.github/workflows/bench.yml @@ -1,11 +1,12 @@ name: Bench on: workflow_run: - workflows: ["CI"] + workflows: [CI] types: [completed] env: CARGO_TERM_COLOR: always RUST_BACKTRACE: 1 + TOOLCHAIN: nightly jobs: bench: @@ -16,8 +17,7 @@ jobs: - name: Install Rust uses: dtolnay/rust-toolchain@master with: - toolchain: stable - # components: rustfmt, clippy, llvm-tools-preview + toolchain: $TOOLCHAIN - name: Use sccache uses: mozilla-actions/sccache-action@v0.0.4 @@ -27,17 +27,35 @@ jobs: echo "SCCACHE_GHA_ENABLED=true" >> "$GITHUB_ENV" echo "RUSTC_WRAPPER=sccache" >> "$GITHUB_ENV" - - name: Install dependencies (Linux) + - name: Install dependencies env: DEBIAN_FRONTEND: noninteractive run: | - sudo apt-get install -y --no-install-recommends gyp mercurial ninja-build lld + sudo apt-get install -y --no-install-recommends gyp mercurial ninja-build lld python-is-python3 echo "RUSTFLAGS=-C link-arg=-fuse-ld=lld" >> "$GITHUB_ENV" - name: Checkout uses: actions/checkout@v4 + with: + ref: ${{ github.event.workflow_run.head_branch }} - - name: Build - run: | - cargo bench --features ci,bench + - name: Benchmark + run: cargo +$TOOLCHAIN bench --features ci,bench | tee output.txt + + - name: Download previous benchmark results + uses: actions/cache@v4 + with: + path: ./cache + key: ${{ runner.os }}-benchmark + + # - name: Store current benchmark results + # uses: benchmark-action/github-action-benchmark@v1 + # with: + # tool: 'cargo' + # output-file-path: output.txt + # external-data-json-path: ./cache/benchmark-data.json + # fail-on-alert: true + # github-token: ${{ secrets.GITHUB_TOKEN }} + # comment-on-alert: true + # summary-always: true From 6fc9606a013f69f763993733a4f01a69dd4048f8 Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Fri, 9 Feb 2024 11:47:47 +0200 Subject: [PATCH 137/321] Fix chaining --- .github/workflows/check.yml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/.github/workflows/check.yml b/.github/workflows/check.yml index 639f67f69e..48df415cec 100644 --- a/.github/workflows/check.yml +++ b/.github/workflows/check.yml @@ -158,9 +158,8 @@ jobs: if: success() || failure() - name: Clippy - run: cargo +${{ matrix.rust-toolchain }} clippy --all-targets -- -D warnings + run: cargo +${{ matrix.rust-toolchain }} clippy --all-targets -- -D warnings || ${{ matrix.rust-toolchain == 'nightly' }} if: success() || failure() - continue-on-error: ${{ matrix.rust-toolchain == 'nightly' }} - name: Check rustdoc links run: cargo +${{ matrix.rust-toolchain }} doc --workspace --no-deps --document-private-items From 52d6f35cc38ab1136b1c14148249446e9d138ca0 Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Fri, 9 Feb 2024 11:52:51 +0200 Subject: [PATCH 138/321] Add zlib --- .github/workflows/bench.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/bench.yml b/.github/workflows/bench.yml index f1f059d5dc..30fd72e959 100644 --- a/.github/workflows/bench.yml +++ b/.github/workflows/bench.yml @@ -31,7 +31,7 @@ jobs: env: DEBIAN_FRONTEND: noninteractive run: | - sudo apt-get install -y --no-install-recommends gyp mercurial ninja-build lld python-is-python3 + sudo apt-get install -y --no-install-recommends gyp mercurial ninja-build lld python-is-python3 zlib1g-dev echo "RUSTFLAGS=-C link-arg=-fuse-ld=lld" >> "$GITHUB_ENV" - name: Checkout From 93d39297f74533f2edcddbcc7fa9c922a83e4b59 Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Fri, 9 Feb 2024 12:18:59 +0200 Subject: [PATCH 139/321] Configure and unconfigure bencher OS --- .github/workflows/bench.yml | 28 ++++++++++++++++++++++++++-- 1 file changed, 26 insertions(+), 2 deletions(-) diff --git a/.github/workflows/bench.yml b/.github/workflows/bench.yml index 30fd72e959..eb79fed1e0 100644 --- a/.github/workflows/bench.yml +++ b/.github/workflows/bench.yml @@ -13,6 +13,10 @@ jobs: name: Benchmark runs-on: self-hosted if: ${{ github.event.workflow_run.conclusion == 'success' }} + defaults: + run: + shell: bash + steps: - name: Install Rust uses: dtolnay/rust-toolchain@master @@ -31,7 +35,9 @@ jobs: env: DEBIAN_FRONTEND: noninteractive run: | - sudo apt-get install -y --no-install-recommends gyp mercurial ninja-build lld python-is-python3 zlib1g-dev + sudo apt-get install -y --no-install-recommends \ + git gyp mercurial ninja-build lld python-is-python3 zlib1g-dev \ + linux-tools-common linux-tools-generic linux-tools-$(uname -r) echo "RUSTFLAGS=-C link-arg=-fuse-ld=lld" >> "$GITHUB_ENV" - name: Checkout @@ -39,8 +45,26 @@ jobs: with: ref: ${{ github.event.workflow_run.head_branch }} + - name: Prepare machine + run: | + # Disable turboboost + echo 1 > /sys/devices/system/cpu/intel_pstate/no_turbo + # Disable hyperthreading + echo off > /sys/devices/system/cpu/smt/control + # Use performance governor + cpupower frequency-set -g performance + - name: Benchmark - run: cargo +$TOOLCHAIN bench --features ci,bench | tee output.txt + run: nice -n -10 taskset -c 0 cargo +$TOOLCHAIN bench --features ci,bench | tee output.txt + + - name: Restore machine + run: | + # Enable turboboost + echo 0 > /sys/devices/system/cpu/intel_pstate/no_turbo + # Enable hyperthreading + echo on > /sys/devices/system/cpu/smt/control + # Use powersave governor + cpupower frequency-set -g powersave - name: Download previous benchmark results uses: actions/cache@v4 From 5c0afc97b9d7fd4444d3bdaa53770703246eb722 Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Fri, 9 Feb 2024 12:41:18 +0200 Subject: [PATCH 140/321] Use sudo --- .github/workflows/bench.yml | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/.github/workflows/bench.yml b/.github/workflows/bench.yml index eb79fed1e0..15c9696826 100644 --- a/.github/workflows/bench.yml +++ b/.github/workflows/bench.yml @@ -48,23 +48,23 @@ jobs: - name: Prepare machine run: | # Disable turboboost - echo 1 > /sys/devices/system/cpu/intel_pstate/no_turbo + echo 1 > sudo tee /sys/devices/system/cpu/intel_pstate/no_turbo # Disable hyperthreading - echo off > /sys/devices/system/cpu/smt/control + echo off > sudo tee /sys/devices/system/cpu/smt/control # Use performance governor - cpupower frequency-set -g performance + sudo cpupower frequency-set -g performance - name: Benchmark - run: nice -n -10 taskset -c 0 cargo +$TOOLCHAIN bench --features ci,bench | tee output.txt + run: sudo nice -n -10 taskset -c 0 cargo +$TOOLCHAIN bench --features ci,bench | tee output.txt - name: Restore machine run: | # Enable turboboost - echo 0 > /sys/devices/system/cpu/intel_pstate/no_turbo + echo 0 > sudo tee /sys/devices/system/cpu/intel_pstate/no_turbo # Enable hyperthreading - echo on > /sys/devices/system/cpu/smt/control + echo on > sudo tee /sys/devices/system/cpu/smt/control # Use powersave governor - cpupower frequency-set -g powersave + sudo cpupower frequency-set -g powersave - name: Download previous benchmark results uses: actions/cache@v4 From 2201a7fc2c64729f3a9894d7fc792164ba14bb97 Mon Sep 17 00:00:00 2001 From: Martin Thomson Date: Fri, 9 Feb 2024 21:42:06 +1100 Subject: [PATCH 141/321] Remove some TimeTravel error cases (#1636) * Remove some TimeTravel error cases This might make our code more robust. * Remove invalid test --------- Co-authored-by: Lars Eggert --- neqo-crypto/src/time.rs | 53 +++++++++++++++++++++++++++-------------- 1 file changed, 35 insertions(+), 18 deletions(-) diff --git a/neqo-crypto/src/time.rs b/neqo-crypto/src/time.rs index 205f32bbf0..2159dd7804 100644 --- a/neqo-crypto/src/time.rs +++ b/neqo-crypto/src/time.rs @@ -106,14 +106,17 @@ impl TryFrom for Time { type Error = Error; fn try_from(prtime: PRTime) -> Res { let base = get_base(); - if let Some(delta) = prtime.checked_sub(base.prtime) { - let d = Duration::from_micros(delta.try_into()?); - base.instant - .checked_add(d) - .map_or(Err(Error::TimeTravelError), |t| Ok(Self { t })) + let delta = prtime + .checked_sub(base.prtime) + .ok_or(Error::TimeTravelError)?; + let d = Duration::from_micros(u64::try_from(delta.abs())?); + let t = if delta >= 0 { + base.instant.checked_add(d) } else { - Err(Error::TimeTravelError) - } + base.instant.checked_sub(d) + }; + let t = t.ok_or(Error::TimeTravelError)?; + Ok(Self { t }) } } @@ -121,14 +124,21 @@ impl TryInto for Time { type Error = Error; fn try_into(self) -> Res { let base = get_base(); - let delta = self - .t - .checked_duration_since(base.instant) - .ok_or(Error::TimeTravelError)?; - if let Ok(d) = PRTime::try_from(delta.as_micros()) { - d.checked_add(base.prtime).ok_or(Error::TimeTravelError) + + if let Some(delta) = self.t.checked_duration_since(base.instant) { + if let Ok(d) = PRTime::try_from(delta.as_micros()) { + d.checked_add(base.prtime).ok_or(Error::TimeTravelError) + } else { + Err(Error::TimeTravelError) + } } else { - Err(Error::TimeTravelError) + // Try to go backwards from the base time. + let backwards = base.instant - self.t; // infallible + if let Ok(d) = PRTime::try_from(backwards.as_micros()) { + base.prtime.checked_sub(d).ok_or(Error::TimeTravelError) + } else { + Err(Error::TimeTravelError) + } } } } @@ -226,16 +236,23 @@ mod test { } #[test] - fn past_time() { + fn past_prtime() { + const DELTA: Duration = Duration::from_secs(1); init(); let base = get_base(); - assert!(Time::try_from(base.prtime - 1).is_err()); + let delta_micros = PRTime::try_from(DELTA.as_micros()).unwrap(); + println!("{} - {}", base.prtime, delta_micros); + let t = Time::try_from(base.prtime - delta_micros).unwrap(); + assert_eq!(Instant::from(t) + DELTA, base.instant); } #[test] - fn negative_time() { + fn past_instant() { + const DELTA: Duration = Duration::from_secs(1); init(); - assert!(Time::try_from(-1).is_err()); + let base = get_base(); + let t = Time::from(base.instant.checked_sub(DELTA).unwrap()); + assert_eq!(Instant::from(t) + DELTA, base.instant); } #[test] From c7590f15f21bc0eda38d74463e3c3d7a58d27afe Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Fri, 9 Feb 2024 13:11:27 +0200 Subject: [PATCH 142/321] Preserve user env (PATH) when calling sudo --- .github/workflows/bench.yml | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/.github/workflows/bench.yml b/.github/workflows/bench.yml index 15c9696826..2a38bdb69b 100644 --- a/.github/workflows/bench.yml +++ b/.github/workflows/bench.yml @@ -45,25 +45,24 @@ jobs: with: ref: ${{ github.event.workflow_run.head_branch }} + # Disable turboboost, hyperthreading and use performance governor - name: Prepare machine run: | - # Disable turboboost echo 1 > sudo tee /sys/devices/system/cpu/intel_pstate/no_turbo - # Disable hyperthreading echo off > sudo tee /sys/devices/system/cpu/smt/control - # Use performance governor sudo cpupower frequency-set -g performance + # Pin the benchmark to core 0 and run it at elevated priority. - name: Benchmark - run: sudo nice -n -10 taskset -c 0 cargo +$TOOLCHAIN bench --features ci,bench | tee output.txt + run: | + sudo -E nice -n -20 taskset -c 0 \ + cargo +$TOOLCHAIN bench --features ci,bench | tee output.txt + # Enable turboboost, hyperthreading and use powersave governor - name: Restore machine run: | - # Enable turboboost echo 0 > sudo tee /sys/devices/system/cpu/intel_pstate/no_turbo - # Enable hyperthreading echo on > sudo tee /sys/devices/system/cpu/smt/control - # Use powersave governor sudo cpupower frequency-set -g powersave - name: Download previous benchmark results From 0c4983fef02813f0eb3f4cd30b7539212e43aac9 Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Fri, 9 Feb 2024 13:32:56 +0200 Subject: [PATCH 143/321] Always unconfigure the bencher. Also set PATH. --- .github/workflows/bench.yml | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/.github/workflows/bench.yml b/.github/workflows/bench.yml index 2a38bdb69b..b6201558ac 100644 --- a/.github/workflows/bench.yml +++ b/.github/workflows/bench.yml @@ -25,9 +25,7 @@ jobs: - name: Use sccache uses: mozilla-actions/sccache-action@v0.0.4 - - - name: Enable sscache - run: | + - run: | echo "SCCACHE_GHA_ENABLED=true" >> "$GITHUB_ENV" echo "RUSTC_WRAPPER=sccache" >> "$GITHUB_ENV" @@ -39,11 +37,10 @@ jobs: git gyp mercurial ninja-build lld python-is-python3 zlib1g-dev \ linux-tools-common linux-tools-generic linux-tools-$(uname -r) echo "RUSTFLAGS=-C link-arg=-fuse-ld=lld" >> "$GITHUB_ENV" + echo "$HOME/.cargo/bin" >> $GITHUB_PATH - name: Checkout uses: actions/checkout@v4 - with: - ref: ${{ github.event.workflow_run.head_branch }} # Disable turboboost, hyperthreading and use performance governor - name: Prepare machine @@ -64,6 +61,7 @@ jobs: echo 0 > sudo tee /sys/devices/system/cpu/intel_pstate/no_turbo echo on > sudo tee /sys/devices/system/cpu/smt/control sudo cpupower frequency-set -g powersave + if: success() || failure() - name: Download previous benchmark results uses: actions/cache@v4 From 0e6ba28e5946b1a96923acca7221d3f95e0e3f12 Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Fri, 9 Feb 2024 13:56:37 +0200 Subject: [PATCH 144/321] Debug env --- .github/workflows/bench.yml | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/.github/workflows/bench.yml b/.github/workflows/bench.yml index b6201558ac..6dd949cc1d 100644 --- a/.github/workflows/bench.yml +++ b/.github/workflows/bench.yml @@ -37,6 +37,7 @@ jobs: git gyp mercurial ninja-build lld python-is-python3 zlib1g-dev \ linux-tools-common linux-tools-generic linux-tools-$(uname -r) echo "RUSTFLAGS=-C link-arg=-fuse-ld=lld" >> "$GITHUB_ENV" + echo "$HOME/.cargo/bin" echo "$HOME/.cargo/bin" >> $GITHUB_PATH - name: Checkout @@ -52,7 +53,9 @@ jobs: # Pin the benchmark to core 0 and run it at elevated priority. - name: Benchmark run: | - sudo -E nice -n -20 taskset -c 0 \ + #sudo -E nice -n -20 + env + taskset -c 0 \ cargo +$TOOLCHAIN bench --features ci,bench | tee output.txt # Enable turboboost, hyperthreading and use powersave governor From f55a0bef91f07e93c7b3a8556e4958b2a1ab1956 Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Fri, 9 Feb 2024 16:26:48 +0200 Subject: [PATCH 145/321] Tweak the run --- .github/workflows/bench.yml | 22 ++++++++++++++-------- 1 file changed, 14 insertions(+), 8 deletions(-) diff --git a/.github/workflows/bench.yml b/.github/workflows/bench.yml index 6dd949cc1d..40f6f45885 100644 --- a/.github/workflows/bench.yml +++ b/.github/workflows/bench.yml @@ -1,8 +1,15 @@ name: Bench on: - workflow_run: - workflows: [CI] - types: [completed] + push: + branches: ["main"] + paths-ignore: ["*.md", "*.png", "*.svg", "LICENSE-*"] + pull_request: + branches: ["main"] + paths-ignore: ["*.md", "*.png", "*.svg", "LICENSE-*"] +# on: +# workflow_run: +# workflows: [CI] +# types: [completed] env: CARGO_TERM_COLOR: always RUST_BACKTRACE: 1 @@ -12,7 +19,7 @@ jobs: bench: name: Benchmark runs-on: self-hosted - if: ${{ github.event.workflow_run.conclusion == 'success' }} + # if: ${{ github.event.workflow_run.conclusion == 'success' }} defaults: run: shell: bash @@ -34,7 +41,7 @@ jobs: DEBIAN_FRONTEND: noninteractive run: | sudo apt-get install -y --no-install-recommends \ - git gyp mercurial ninja-build lld python-is-python3 zlib1g-dev \ + git gyp mercurial ninja-build lld python-is-python3 zlib1g-dev libclang-dev \ linux-tools-common linux-tools-generic linux-tools-$(uname -r) echo "RUSTFLAGS=-C link-arg=-fuse-ld=lld" >> "$GITHUB_ENV" echo "$HOME/.cargo/bin" @@ -53,9 +60,8 @@ jobs: # Pin the benchmark to core 0 and run it at elevated priority. - name: Benchmark run: | - #sudo -E nice -n -20 - env - taskset -c 0 \ + cargo +$TOOLCHAIN bench --features ci,bench --no-run + sudo nice -n -20 sudo -u "$USER" taskset -c 0 \ cargo +$TOOLCHAIN bench --features ci,bench | tee output.txt # Enable turboboost, hyperthreading and use powersave governor From f807455997542386308e817f1fd1359940ff48af Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Fri, 9 Feb 2024 16:31:31 +0200 Subject: [PATCH 146/321] Debug build error --- .github/workflows/bench.yml | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/.github/workflows/bench.yml b/.github/workflows/bench.yml index 40f6f45885..5e03738967 100644 --- a/.github/workflows/bench.yml +++ b/.github/workflows/bench.yml @@ -11,6 +11,7 @@ on: # workflows: [CI] # types: [completed] env: + CARGO_PROFILE_BENCH_BUILD_OVERRIDE_DEBUG: true CARGO_TERM_COLOR: always RUST_BACKTRACE: 1 TOOLCHAIN: nightly @@ -29,6 +30,7 @@ jobs: uses: dtolnay/rust-toolchain@master with: toolchain: $TOOLCHAIN + components: rustfmt - name: Use sccache uses: mozilla-actions/sccache-action@v0.0.4 @@ -47,8 +49,9 @@ jobs: echo "$HOME/.cargo/bin" echo "$HOME/.cargo/bin" >> $GITHUB_PATH - - name: Checkout + - name: Checkout and build uses: actions/checkout@v4 + - run: cargo +$TOOLCHAIN bench --features ci,bench --no-run # Disable turboboost, hyperthreading and use performance governor - name: Prepare machine @@ -60,7 +63,6 @@ jobs: # Pin the benchmark to core 0 and run it at elevated priority. - name: Benchmark run: | - cargo +$TOOLCHAIN bench --features ci,bench --no-run sudo nice -n -20 sudo -u "$USER" taskset -c 0 \ cargo +$TOOLCHAIN bench --features ci,bench | tee output.txt From 25c3f2f616500d00e17ddc7e83cbe66870b90269 Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Fri, 9 Feb 2024 16:50:15 +0200 Subject: [PATCH 147/321] Fix sudo tee sequence --- .github/workflows/bench.yml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/bench.yml b/.github/workflows/bench.yml index 5e03738967..6677a09a02 100644 --- a/.github/workflows/bench.yml +++ b/.github/workflows/bench.yml @@ -56,8 +56,8 @@ jobs: # Disable turboboost, hyperthreading and use performance governor - name: Prepare machine run: | - echo 1 > sudo tee /sys/devices/system/cpu/intel_pstate/no_turbo - echo off > sudo tee /sys/devices/system/cpu/smt/control + echo 1 | sudo tee /sys/devices/system/cpu/intel_pstate/no_turbo + echo off | sudo tee /sys/devices/system/cpu/smt/control sudo cpupower frequency-set -g performance # Pin the benchmark to core 0 and run it at elevated priority. @@ -69,8 +69,8 @@ jobs: # Enable turboboost, hyperthreading and use powersave governor - name: Restore machine run: | - echo 0 > sudo tee /sys/devices/system/cpu/intel_pstate/no_turbo - echo on > sudo tee /sys/devices/system/cpu/smt/control + echo 0 | sudo tee /sys/devices/system/cpu/intel_pstate/no_turbo + echo on | sudo tee /sys/devices/system/cpu/smt/control sudo cpupower frequency-set -g powersave if: success() || failure() From 1db51349c5842a931b2a642d072a4ff1b044fc9f Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Fri, 9 Feb 2024 16:54:52 +0200 Subject: [PATCH 148/321] sudo -i --- .github/workflows/bench.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/bench.yml b/.github/workflows/bench.yml index 6677a09a02..15a93a2f88 100644 --- a/.github/workflows/bench.yml +++ b/.github/workflows/bench.yml @@ -63,7 +63,7 @@ jobs: # Pin the benchmark to core 0 and run it at elevated priority. - name: Benchmark run: | - sudo nice -n -20 sudo -u "$USER" taskset -c 0 \ + sudo nice -n -20 taskset -c 0 sudo -i -u "$USER" \ cargo +$TOOLCHAIN bench --features ci,bench | tee output.txt # Enable turboboost, hyperthreading and use powersave governor From 377d49a4feead34597fda6d796605d9cf701b97d Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Fri, 9 Feb 2024 16:59:03 +0200 Subject: [PATCH 149/321] sudo -s --- .github/workflows/bench.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/bench.yml b/.github/workflows/bench.yml index 15a93a2f88..ea89d9d117 100644 --- a/.github/workflows/bench.yml +++ b/.github/workflows/bench.yml @@ -63,7 +63,7 @@ jobs: # Pin the benchmark to core 0 and run it at elevated priority. - name: Benchmark run: | - sudo nice -n -20 taskset -c 0 sudo -i -u "$USER" \ + sudo nice -n -20 taskset -c 0 sudo -s -u "$USER" \ cargo +$TOOLCHAIN bench --features ci,bench | tee output.txt # Enable turboboost, hyperthreading and use powersave governor From f3738e177fe583ac5adf17d8ad1017897f7e7857 Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Fri, 9 Feb 2024 17:04:17 +0200 Subject: [PATCH 150/321] Run in workspace --- .github/workflows/bench.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/bench.yml b/.github/workflows/bench.yml index ea89d9d117..69f69c6601 100644 --- a/.github/workflows/bench.yml +++ b/.github/workflows/bench.yml @@ -63,7 +63,7 @@ jobs: # Pin the benchmark to core 0 and run it at elevated priority. - name: Benchmark run: | - sudo nice -n -20 taskset -c 0 sudo -s -u "$USER" \ + sudo nice -n -20 taskset -c 0 sudo -i -u "$USER" -D ${{ github.workspace }} \ cargo +$TOOLCHAIN bench --features ci,bench | tee output.txt # Enable turboboost, hyperthreading and use powersave governor From 41ff6d274ccf3db72febd47708051b8e1f44f3e0 Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Fri, 9 Feb 2024 17:08:31 +0200 Subject: [PATCH 151/321] pwd --- .github/workflows/bench.yml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/.github/workflows/bench.yml b/.github/workflows/bench.yml index 69f69c6601..aeb53a060d 100644 --- a/.github/workflows/bench.yml +++ b/.github/workflows/bench.yml @@ -63,8 +63,10 @@ jobs: # Pin the benchmark to core 0 and run it at elevated priority. - name: Benchmark run: | - sudo nice -n -20 taskset -c 0 sudo -i -u "$USER" -D ${{ github.workspace }} \ + sudo nice -n -20 taskset -c 0 sudo -i -u "$USER" -D "$PWD" \ cargo +$TOOLCHAIN bench --features ci,bench | tee output.txt + env: + PWD: $(pwd) # Enable turboboost, hyperthreading and use powersave governor - name: Restore machine From be988e9a77b5699cf258afd167bd94db6ada349d Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Fri, 9 Feb 2024 17:12:08 +0200 Subject: [PATCH 152/321] pwd, v2 --- .github/workflows/bench.yml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/.github/workflows/bench.yml b/.github/workflows/bench.yml index aeb53a060d..16e5de5a7d 100644 --- a/.github/workflows/bench.yml +++ b/.github/workflows/bench.yml @@ -63,10 +63,9 @@ jobs: # Pin the benchmark to core 0 and run it at elevated priority. - name: Benchmark run: | + export PWD=$(pwd) sudo nice -n -20 taskset -c 0 sudo -i -u "$USER" -D "$PWD" \ cargo +$TOOLCHAIN bench --features ci,bench | tee output.txt - env: - PWD: $(pwd) # Enable turboboost, hyperthreading and use powersave governor - name: Restore machine From 52b557e28e4df47cf9451e8c2e3cd6e89b9e6fed Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Fri, 9 Feb 2024 17:24:18 +0200 Subject: [PATCH 153/321] complexify --- .github/workflows/bench.yml | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/.github/workflows/bench.yml b/.github/workflows/bench.yml index 16e5de5a7d..748be47637 100644 --- a/.github/workflows/bench.yml +++ b/.github/workflows/bench.yml @@ -63,9 +63,8 @@ jobs: # Pin the benchmark to core 0 and run it at elevated priority. - name: Benchmark run: | - export PWD=$(pwd) - sudo nice -n -20 taskset -c 0 sudo -i -u "$USER" -D "$PWD" \ - cargo +$TOOLCHAIN bench --features ci,bench | tee output.txt + sudo nice -n -20 taskset -c 0 sudo -i -u "$USER" sh -c \ + "cd $(pwd) && cargo +$TOOLCHAIN bench --features ci,bench | tee output.txt" # Enable turboboost, hyperthreading and use powersave governor - name: Restore machine From 258cb7c1c9765cb3d3e4a637fd517e7ab5db40db Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Fri, 9 Feb 2024 17:29:44 +0200 Subject: [PATCH 154/321] Give up on nice -20 for now --- .github/workflows/bench.yml | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/.github/workflows/bench.yml b/.github/workflows/bench.yml index 748be47637..fa607c5445 100644 --- a/.github/workflows/bench.yml +++ b/.github/workflows/bench.yml @@ -49,9 +49,11 @@ jobs: echo "$HOME/.cargo/bin" echo "$HOME/.cargo/bin" >> $GITHUB_PATH - - name: Checkout and build + - name: Checkout uses: actions/checkout@v4 - - run: cargo +$TOOLCHAIN bench --features ci,bench --no-run + + - name: Build + run: cargo +$TOOLCHAIN bench --features ci,bench --no-run # Disable turboboost, hyperthreading and use performance governor - name: Prepare machine @@ -61,10 +63,11 @@ jobs: sudo cpupower frequency-set -g performance # Pin the benchmark to core 0 and run it at elevated priority. + # TODO: Figure out a way to run this with nice -20. - name: Benchmark run: | - sudo nice -n -20 taskset -c 0 sudo -i -u "$USER" sh -c \ - "cd $(pwd) && cargo +$TOOLCHAIN bench --features ci,bench | tee output.txt" + taskset -c 0 \ + cargo +$TOOLCHAIN bench --features ci,bench | tee output.txt # Enable turboboost, hyperthreading and use powersave governor - name: Restore machine From 85948cfa9a8f221da92ac4796e3984619a8fa924 Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Fri, 9 Feb 2024 17:52:12 +0200 Subject: [PATCH 155/321] Chain to run after CI --- .github/workflows/bench.yml | 19 +++++++------------ 1 file changed, 7 insertions(+), 12 deletions(-) diff --git a/.github/workflows/bench.yml b/.github/workflows/bench.yml index fa607c5445..004d853aff 100644 --- a/.github/workflows/bench.yml +++ b/.github/workflows/bench.yml @@ -1,15 +1,8 @@ name: Bench on: - push: - branches: ["main"] - paths-ignore: ["*.md", "*.png", "*.svg", "LICENSE-*"] - pull_request: - branches: ["main"] - paths-ignore: ["*.md", "*.png", "*.svg", "LICENSE-*"] -# on: -# workflow_run: -# workflows: [CI] -# types: [completed] + workflow_run: + workflows: [CI] + types: [completed] env: CARGO_PROFILE_BENCH_BUILD_OVERRIDE_DEBUG: true CARGO_TERM_COLOR: always @@ -32,9 +25,11 @@ jobs: toolchain: $TOOLCHAIN components: rustfmt - - name: Use sccache + - name: Install sccache uses: mozilla-actions/sccache-action@v0.0.4 - - run: | + + - name: Enable sccache + run: | echo "SCCACHE_GHA_ENABLED=true" >> "$GITHUB_ENV" echo "RUSTC_WRAPPER=sccache" >> "$GITHUB_ENV" From 045e7fb5c06111cd519e6ca599061935dc85fce9 Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Mon, 12 Feb 2024 09:15:59 +0200 Subject: [PATCH 156/321] Take away ability to use sudo --- .github/workflows/bench.yml | 24 +++++------------------- 1 file changed, 5 insertions(+), 19 deletions(-) diff --git a/.github/workflows/bench.yml b/.github/workflows/bench.yml index 004d853aff..083b6e9ef5 100644 --- a/.github/workflows/bench.yml +++ b/.github/workflows/bench.yml @@ -25,6 +25,9 @@ jobs: toolchain: $TOOLCHAIN components: rustfmt + - name: Configure Rust + run: echo "RUSTFLAGS=-C link-arg=-fuse-ld=lld" >> "$GITHUB_ENV" + - name: Install sccache uses: mozilla-actions/sccache-action@v0.0.4 @@ -33,17 +36,6 @@ jobs: echo "SCCACHE_GHA_ENABLED=true" >> "$GITHUB_ENV" echo "RUSTC_WRAPPER=sccache" >> "$GITHUB_ENV" - - name: Install dependencies - env: - DEBIAN_FRONTEND: noninteractive - run: | - sudo apt-get install -y --no-install-recommends \ - git gyp mercurial ninja-build lld python-is-python3 zlib1g-dev libclang-dev \ - linux-tools-common linux-tools-generic linux-tools-$(uname -r) - echo "RUSTFLAGS=-C link-arg=-fuse-ld=lld" >> "$GITHUB_ENV" - echo "$HOME/.cargo/bin" - echo "$HOME/.cargo/bin" >> $GITHUB_PATH - - name: Checkout uses: actions/checkout@v4 @@ -52,10 +44,7 @@ jobs: # Disable turboboost, hyperthreading and use performance governor - name: Prepare machine - run: | - echo 1 | sudo tee /sys/devices/system/cpu/intel_pstate/no_turbo - echo off | sudo tee /sys/devices/system/cpu/smt/control - sudo cpupower frequency-set -g performance + run: /home/lars/bin/prep.sh # Pin the benchmark to core 0 and run it at elevated priority. # TODO: Figure out a way to run this with nice -20. @@ -66,10 +55,7 @@ jobs: # Enable turboboost, hyperthreading and use powersave governor - name: Restore machine - run: | - echo 0 | sudo tee /sys/devices/system/cpu/intel_pstate/no_turbo - echo on | sudo tee /sys/devices/system/cpu/smt/control - sudo cpupower frequency-set -g powersave + run: /home/lars/bin/unprep.sh if: success() || failure() - name: Download previous benchmark results From 4e5cf36b68f60c69b4219e63111783d2623c736f Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Mon, 12 Feb 2024 09:19:31 +0200 Subject: [PATCH 157/321] Runs this directly while debugging --- .github/workflows/bench.yml | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/.github/workflows/bench.yml b/.github/workflows/bench.yml index 083b6e9ef5..bd97bc568c 100644 --- a/.github/workflows/bench.yml +++ b/.github/workflows/bench.yml @@ -1,8 +1,14 @@ name: Bench on: - workflow_run: - workflows: [CI] - types: [completed] + push: + branches: ["main"] + paths-ignore: ["*.md", "*.png", "*.svg", "LICENSE-*"] + pull_request: + branches: ["main"] + paths-ignore: ["*.md", "*.png", "*.svg", "LICENSE-*"] + # workflow_run: + # workflows: [CI] + # types: [completed] env: CARGO_PROFILE_BENCH_BUILD_OVERRIDE_DEBUG: true CARGO_TERM_COLOR: always From f7ba952fc9a728a368464c97be5b438ab984b91a Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Mon, 12 Feb 2024 09:31:45 +0200 Subject: [PATCH 158/321] Change sudo scripts --- .github/workflows/bench.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/bench.yml b/.github/workflows/bench.yml index bd97bc568c..6b77d024a2 100644 --- a/.github/workflows/bench.yml +++ b/.github/workflows/bench.yml @@ -50,7 +50,7 @@ jobs: # Disable turboboost, hyperthreading and use performance governor - name: Prepare machine - run: /home/lars/bin/prep.sh + run: sudo /root/bin/prep.sh # Pin the benchmark to core 0 and run it at elevated priority. # TODO: Figure out a way to run this with nice -20. @@ -61,7 +61,7 @@ jobs: # Enable turboboost, hyperthreading and use powersave governor - name: Restore machine - run: /home/lars/bin/unprep.sh + run: sudo /root/bin/unprep.sh if: success() || failure() - name: Download previous benchmark results From 67223360d18000b44f0358cfe746356e89601089 Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Mon, 12 Feb 2024 09:44:18 +0200 Subject: [PATCH 159/321] Run benchmark at elevated priority --- .github/workflows/bench.yml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/.github/workflows/bench.yml b/.github/workflows/bench.yml index 6b77d024a2..795f7757b3 100644 --- a/.github/workflows/bench.yml +++ b/.github/workflows/bench.yml @@ -53,10 +53,9 @@ jobs: run: sudo /root/bin/prep.sh # Pin the benchmark to core 0 and run it at elevated priority. - # TODO: Figure out a way to run this with nice -20. - name: Benchmark run: | - taskset -c 0 \ + nice -n -20 taskset -c 0 \ cargo +$TOOLCHAIN bench --features ci,bench | tee output.txt # Enable turboboost, hyperthreading and use powersave governor From 5b51402d7e1b6aafc2f65cefbf525497ad4bab63 Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Mon, 12 Feb 2024 09:52:31 +0200 Subject: [PATCH 160/321] Only run benches on successful PRs --- .github/workflows/bench.yml | 18 ++++++------------ 1 file changed, 6 insertions(+), 12 deletions(-) diff --git a/.github/workflows/bench.yml b/.github/workflows/bench.yml index 795f7757b3..e36eac0020 100644 --- a/.github/workflows/bench.yml +++ b/.github/workflows/bench.yml @@ -1,14 +1,8 @@ name: Bench on: - push: - branches: ["main"] - paths-ignore: ["*.md", "*.png", "*.svg", "LICENSE-*"] - pull_request: - branches: ["main"] - paths-ignore: ["*.md", "*.png", "*.svg", "LICENSE-*"] - # workflow_run: - # workflows: [CI] - # types: [completed] + workflow_run: + workflows: [CI] + types: [completed] env: CARGO_PROFILE_BENCH_BUILD_OVERRIDE_DEBUG: true CARGO_TERM_COLOR: always @@ -19,7 +13,7 @@ jobs: bench: name: Benchmark runs-on: self-hosted - # if: ${{ github.event.workflow_run.conclusion == 'success' }} + if: ${{ github.event.workflow_run.conclusion == 'success' }} defaults: run: shell: bash @@ -48,7 +42,7 @@ jobs: - name: Build run: cargo +$TOOLCHAIN bench --features ci,bench --no-run - # Disable turboboost, hyperthreading and use performance governor + # Disable turboboost, hyperthreading and use performance governor. - name: Prepare machine run: sudo /root/bin/prep.sh @@ -58,7 +52,7 @@ jobs: nice -n -20 taskset -c 0 \ cargo +$TOOLCHAIN bench --features ci,bench | tee output.txt - # Enable turboboost, hyperthreading and use powersave governor + # Enable turboboost, hyperthreading and use powersave governor. - name: Restore machine run: sudo /root/bin/unprep.sh if: success() || failure() From f38f7c44afe3ff0fbdd0ccb0de529d13c0b31f98 Mon Sep 17 00:00:00 2001 From: Martin Thomson Date: Mon, 12 Feb 2024 20:12:09 +1100 Subject: [PATCH 161/321] Add a wrapper for slice::from_raw_parts (#1644) * Add a wrapper for slice::from_raw_parts And add a lint to prevent the method from being invoked directly. The documentation on the wrapper explains everything. * Add a newline to the file * Cleanup --------- Co-authored-by: Lars Eggert --- clippy.toml | 3 +++ neqo-crypto/src/agent.rs | 9 +++------ neqo-crypto/src/agentio.rs | 7 ++++--- neqo-crypto/src/cert.rs | 11 +++++------ neqo-crypto/src/ech.rs | 7 +++---- neqo-crypto/src/ext.rs | 3 ++- neqo-crypto/src/lib.rs | 25 +++++++++++++++++++++++++ neqo-crypto/src/p11.rs | 13 +++++++------ 8 files changed, 52 insertions(+), 26 deletions(-) create mode 100644 clippy.toml diff --git a/clippy.toml b/clippy.toml new file mode 100644 index 0000000000..e928b4be64 --- /dev/null +++ b/clippy.toml @@ -0,0 +1,3 @@ +disallowed-methods = [ + { path = "std::slice::from_raw_parts", reason = "see null_safe_slice" } +] diff --git a/neqo-crypto/src/agent.rs b/neqo-crypto/src/agent.rs index cd0bb4cb12..85fc496841 100644 --- a/neqo-crypto/src/agent.rs +++ b/neqo-crypto/src/agent.rs @@ -33,6 +33,7 @@ use crate::{ ech, err::{is_blocked, secstatus_to_res, Error, PRErrorCode, Res}, ext::{ExtensionHandler, ExtensionTracker}, + null_safe_slice, p11::{self, PrivateKey, PublicKey}, prio, replay::AntiReplay, @@ -897,7 +898,7 @@ impl Client { let resumption = arg.cast::>().as_mut().unwrap(); let len = usize::try_from(len).unwrap(); let mut v = Vec::with_capacity(len); - v.extend_from_slice(std::slice::from_raw_parts(token, len)); + v.extend_from_slice(null_safe_slice(token, len)); qinfo!( [format!("{fd:p}")], "Got resumption token {}", @@ -1105,11 +1106,7 @@ impl Server { } let check_state = arg.cast::().as_mut().unwrap(); - let token = if client_token.is_null() { - &[] - } else { - std::slice::from_raw_parts(client_token, usize::try_from(client_token_len).unwrap()) - }; + let token = null_safe_slice(client_token, usize::try_from(client_token_len).unwrap()); match check_state.checker.check(token) { ZeroRttCheckResult::Accept => ssl::SSLHelloRetryRequestAction::ssl_hello_retry_accept, ZeroRttCheckResult::Fail => ssl::SSLHelloRetryRequestAction::ssl_hello_retry_fail, diff --git a/neqo-crypto/src/agentio.rs b/neqo-crypto/src/agentio.rs index 2bcc540530..1b0cf11ba7 100644 --- a/neqo-crypto/src/agentio.rs +++ b/neqo-crypto/src/agentio.rs @@ -20,7 +20,7 @@ use neqo_common::{hex, hex_with_len, qtrace}; use crate::{ constants::{ContentType, Epoch}, err::{nspr, Error, PR_SetError, Res}, - prio, ssl, + null_safe_slice, prio, ssl, }; // Alias common types. @@ -100,7 +100,7 @@ impl RecordList { ) -> ssl::SECStatus { let records = arg.cast::().as_mut().unwrap(); - let slice = std::slice::from_raw_parts(data, len as usize); + let slice = null_safe_slice(data, len); records.append(epoch, ContentType::try_from(ct).unwrap(), slice); ssl::SECSuccess } @@ -178,6 +178,7 @@ impl AgentIoInput { return Err(Error::NoDataAvailable); } + #[allow(clippy::disallowed_methods)] // We just checked if this was empty. let src = unsafe { std::slice::from_raw_parts(self.input, amount) }; qtrace!([self], "read {}", hex(src)); let dst = unsafe { std::slice::from_raw_parts_mut(buf, amount) }; @@ -232,7 +233,7 @@ impl AgentIo { // Stage output from TLS into the output buffer. fn save_output(&mut self, buf: *const u8, count: usize) { - let slice = unsafe { std::slice::from_raw_parts(buf, count) }; + let slice = unsafe { null_safe_slice(buf, count) }; qtrace!([self], "save output {}", hex(slice)); self.output.extend_from_slice(slice); } diff --git a/neqo-crypto/src/cert.rs b/neqo-crypto/src/cert.rs index 64e63ec71a..2c16380ee0 100644 --- a/neqo-crypto/src/cert.rs +++ b/neqo-crypto/src/cert.rs @@ -7,13 +7,13 @@ use std::{ convert::TryFrom, ptr::{addr_of, NonNull}, - slice, }; use neqo_common::qerror; use crate::{ err::secstatus_to_res, + null_safe_slice, p11::{CERTCertListNode, CERT_GetCertificateDer, CertList, Item, SECItem, SECItemArray}, ssl::{ PRFileDesc, SSL_PeerCertificateChain, SSL_PeerSignedCertTimestamps, @@ -52,7 +52,7 @@ fn stapled_ocsp_responses(fd: *mut PRFileDesc) -> Option>> { }; for idx in 0..len { let itemp: *const SECItem = unsafe { ocsp_ptr.as_ref().items.offset(idx).cast() }; - let item = unsafe { slice::from_raw_parts((*itemp).data, (*itemp).len as usize) }; + let item = unsafe { null_safe_slice((*itemp).data, (*itemp).len) }; ocsp_helper.push(item.to_owned()); } Some(ocsp_helper) @@ -68,9 +68,8 @@ fn signed_cert_timestamp(fd: *mut PRFileDesc) -> Option> { if unsafe { sct_ptr.as_ref().len == 0 || sct_ptr.as_ref().data.is_null() } { Some(Vec::new()) } else { - let sct_slice = unsafe { - slice::from_raw_parts(sct_ptr.as_ref().data, sct_ptr.as_ref().len as usize) - }; + let sct_slice = + unsafe { null_safe_slice(sct_ptr.as_ref().data, sct_ptr.as_ref().len) }; Some(sct_slice.to_owned()) } } @@ -105,7 +104,7 @@ impl<'a> Iterator for &'a mut CertificateInfo { let cert = unsafe { *self.cursor }.cert; secstatus_to_res(unsafe { CERT_GetCertificateDer(cert, &mut item) }) .expect("getting DER from certificate should work"); - Some(unsafe { std::slice::from_raw_parts(item.data, item.len as usize) }) + Some(unsafe { null_safe_slice(item.data, item.len) }) } } diff --git a/neqo-crypto/src/ech.rs b/neqo-crypto/src/ech.rs index 109d745520..6f9a3ba4ce 100644 --- a/neqo-crypto/src/ech.rs +++ b/neqo-crypto/src/ech.rs @@ -15,7 +15,7 @@ use neqo_common::qtrace; use crate::{ err::{ssl::SSL_ERROR_ECH_RETRY_WITH_ECH, Error, Res}, - experimental_api, + experimental_api, null_safe_slice, p11::{ self, Item, PrivateKey, PublicKey, SECITEM_FreeItem, SECItem, SECKEYPrivateKey, SECKEYPublicKey, Slot, @@ -76,7 +76,7 @@ pub fn convert_ech_error(fd: *mut PRFileDesc, err: Error) -> Error { return Error::InternalError; } let buf = unsafe { - let slc = std::slice::from_raw_parts(item.data, usize::try_from(item.len).unwrap()); + let slc = null_safe_slice(item.data, item.len); let buf = Vec::from(slc); SECITEM_FreeItem(&mut item, PRBool::from(false)); buf @@ -101,8 +101,7 @@ pub fn generate_keys() -> Res<(PrivateKey, PublicKey)> { let oid_data = unsafe { p11::SECOID_FindOIDByTag(p11::SECOidTag::SEC_OID_CURVE25519) }; let oid = unsafe { oid_data.as_ref() }.ok_or(Error::InternalError)?; - let oid_slc = - unsafe { std::slice::from_raw_parts(oid.oid.data, usize::try_from(oid.oid.len).unwrap()) }; + let oid_slc = unsafe { null_safe_slice(oid.oid.data, oid.oid.len) }; let mut params: Vec = Vec::with_capacity(oid_slc.len() + 2); params.push(u8::try_from(p11::SEC_ASN1_OBJECT_ID).unwrap()); params.push(u8::try_from(oid.oid.len).unwrap()); diff --git a/neqo-crypto/src/ext.rs b/neqo-crypto/src/ext.rs index 310e87a1b7..d9f3195051 100644 --- a/neqo-crypto/src/ext.rs +++ b/neqo-crypto/src/ext.rs @@ -16,6 +16,7 @@ use crate::{ agentio::as_c_void, constants::{Extension, HandshakeMessage, TLS_HS_CLIENT_HELLO, TLS_HS_ENCRYPTED_EXTENSIONS}, err::Res, + null_safe_slice, ssl::{ PRBool, PRFileDesc, SECFailure, SECStatus, SECSuccess, SSLAlertDescription, SSLExtensionHandler, SSLExtensionWriter, SSLHandshakeType, @@ -105,7 +106,7 @@ impl ExtensionTracker { alert: *mut SSLAlertDescription, arg: *mut c_void, ) -> SECStatus { - let d = std::slice::from_raw_parts(data, len as usize); + let d = null_safe_slice(data, len); #[allow(clippy::cast_possible_truncation, clippy::cast_sign_loss)] Self::wrap_handler_call(arg, |handler| { // Cast is safe here because the message type is always part of the enum diff --git a/neqo-crypto/src/lib.rs b/neqo-crypto/src/lib.rs index 4a23b5a7b1..3e9260813f 100644 --- a/neqo-crypto/src/lib.rs +++ b/neqo-crypto/src/lib.rs @@ -37,6 +37,7 @@ mod ssl; mod time; use std::{ + convert::TryFrom, ffi::CString, path::{Path, PathBuf}, ptr::null, @@ -200,3 +201,27 @@ pub fn assert_initialized() { .get() .expect("NSS not initialized with init or init_db"); } + +/// NSS tends to return empty "slices" with a null pointer, which will cause +/// `std::slice::from_raw_parts` to panic if passed directly. This wrapper avoids +/// that issue. It also performs conversion for lengths, as a convenience. +/// +/// # Panics +/// If the provided length doesn't fit into a `usize`. +/// +/// # Safety +/// The caller must adhere to the safety constraints of `std::slice::from_raw_parts`, +/// except that this will accept a null value for `data`. +unsafe fn null_safe_slice<'a, T>(data: *const u8, len: T) -> &'a [u8] +where + usize: TryFrom, +{ + if data.is_null() { + &[] + } else if let Ok(len) = usize::try_from(len) { + #[allow(clippy::disallowed_methods)] + std::slice::from_raw_parts(data, len) + } else { + panic!("null_safe_slice: size overflow"); + } +} diff --git a/neqo-crypto/src/p11.rs b/neqo-crypto/src/p11.rs index 2225d5b211..6ec9370360 100644 --- a/neqo-crypto/src/p11.rs +++ b/neqo-crypto/src/p11.rs @@ -20,7 +20,10 @@ use std::{ use neqo_common::hex_with_len; -use crate::err::{secstatus_to_res, Error, Res}; +use crate::{ + err::{secstatus_to_res, Error, Res}, + null_safe_slice, +}; #[allow(clippy::upper_case_acronyms)] #[allow(clippy::unreadable_literal)] @@ -148,9 +151,7 @@ impl PrivateKey { &mut key_item, ) })?; - let slc = unsafe { - std::slice::from_raw_parts(key_item.data, usize::try_from(key_item.len).unwrap()) - }; + let slc = unsafe { null_safe_slice(key_item.data, key_item.len) }; let key = Vec::from(slc); // The data that `key_item` refers to needs to be freed, but we can't // use the scoped `Item` implementation. This is OK as long as nothing @@ -206,7 +207,7 @@ impl SymKey { // This is accessing a value attached to the key, so we can treat this as a borrow. match unsafe { key_item.as_mut() } { None => Err(Error::InternalError), - Some(key) => Ok(unsafe { std::slice::from_raw_parts(key.data, key.len as usize) }), + Some(key) => Ok(unsafe { null_safe_slice(key.data, key.len) }), } } } @@ -285,7 +286,7 @@ impl Item { let b = self.ptr.as_ref().unwrap(); // Sanity check the type, as some types don't count bytes in `Item::len`. assert_eq!(b.type_, SECItemType::siBuffer); - let slc = std::slice::from_raw_parts(b.data, usize::try_from(b.len).unwrap()); + let slc = null_safe_slice(b.data, b.len); Vec::from(slc) } } From d1d5a96a44173194f69deafc3d9da02b38fc0e31 Mon Sep 17 00:00:00 2001 From: Martin Thomson Date: Mon, 12 Feb 2024 20:26:20 +1100 Subject: [PATCH 162/321] Packet wasn't used (#1643) Co-authored-by: Lars Eggert --- neqo-transport/src/connection/mod.rs | 3 --- 1 file changed, 3 deletions(-) diff --git a/neqo-transport/src/connection/mod.rs b/neqo-transport/src/connection/mod.rs index 0b33bd15ec..366eed07ed 100644 --- a/neqo-transport/src/connection/mod.rs +++ b/neqo-transport/src/connection/mod.rs @@ -80,9 +80,6 @@ pub use state::{ClosingFrame, State}; pub use crate::send_stream::{RetransmissionPriority, SendStreamStats, TransmissionPriority}; -#[derive(Debug, Default)] -struct Packet(Vec); - /// The number of Initial packets that the client will send in response /// to receiving an undecryptable packet during the early part of the /// handshake. This is a hack, but a useful one. From 319b7849c3a122c144ca94088ea89d7f5c73057e Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Mon, 12 Feb 2024 12:51:41 +0200 Subject: [PATCH 163/321] ci: Better chaining of bench.yml (#1646) * ci: Better chaining of bench.yml * Update check.yml * Update check.yml * Fire as reusable workflow * Remove condition * Remove ref * Add name --- .github/workflows/bench.yml | 7 ++----- .github/workflows/check.yml | 5 +++++ 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/.github/workflows/bench.yml b/.github/workflows/bench.yml index e36eac0020..4e1c15ee26 100644 --- a/.github/workflows/bench.yml +++ b/.github/workflows/bench.yml @@ -1,8 +1,6 @@ name: Bench on: - workflow_run: - workflows: [CI] - types: [completed] + workflow_call: env: CARGO_PROFILE_BENCH_BUILD_OVERRIDE_DEBUG: true CARGO_TERM_COLOR: always @@ -13,7 +11,6 @@ jobs: bench: name: Benchmark runs-on: self-hosted - if: ${{ github.event.workflow_run.conclusion == 'success' }} defaults: run: shell: bash @@ -26,7 +23,7 @@ jobs: components: rustfmt - name: Configure Rust - run: echo "RUSTFLAGS=-C link-arg=-fuse-ld=lld" >> "$GITHUB_ENV" + run: echo "RUSTFLAGS=-C link-arg=-fuse-ld=lld -C link-arg=-Wl,--no-rosegment" >> "$GITHUB_ENV" - name: Install sccache uses: mozilla-actions/sccache-action@v0.0.4 diff --git a/.github/workflows/check.yml b/.github/workflows/check.yml index 48df415cec..afab22ef48 100644 --- a/.github/workflows/check.yml +++ b/.github/workflows/check.yml @@ -174,3 +174,8 @@ jobs: fail_ci_if_error: false token: ${{ secrets.CODECOV_TOKEN }} if: matrix.type == 'debug' && matrix.rust-toolchain == 'stable' + + bench: + name: "Benchmark" + needs: [check] + uses: ./.github/workflows/bench.yml From 2c17470f083046f2245799696c47d21e427f35fb Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Mon, 12 Feb 2024 18:28:41 +0200 Subject: [PATCH 164/321] ci: Generate perf profile for transfer bench (#1647) * ci: Generate flamegraphs for benches * Run transfer flamegraph separately * Use /bin/nice * Rename step * Simplify matrix for testing * Generate and save perf data * Finalize * Undo commit * Try and force perf to create a file * Debug * Debug more * Give file name * Finalize for real * Typo --- .github/workflows/bench.yml | 24 ++++++++++++++++++++++-- 1 file changed, 22 insertions(+), 2 deletions(-) diff --git a/.github/workflows/bench.yml b/.github/workflows/bench.yml index 4e1c15ee26..a29abd39a4 100644 --- a/.github/workflows/bench.yml +++ b/.github/workflows/bench.yml @@ -43,13 +43,20 @@ jobs: - name: Prepare machine run: sudo /root/bin/prep.sh - # Pin the benchmark to core 0 and run it at elevated priority. + # Pin the benchmark run to core 0 and run all benchmarks at elevated priority. - name: Benchmark run: | nice -n -20 taskset -c 0 \ cargo +$TOOLCHAIN bench --features ci,bench | tee output.txt - # Enable turboboost, hyperthreading and use powersave governor. + # Pin the transfer benchmark to core 0 and run it at elevated priority inside perf. + - name: Perf transfer benchmark + run: | + nice -n -20 taskset -c 0 \ + perf record -F997 --call-graph=lbr -o perf.data \ + cargo +$TOOLCHAIN bench --features ci,bench --bench transfer + + # Re-enable turboboost, hyperthreading and use powersave governor. - name: Restore machine run: sudo /root/bin/unprep.sh if: success() || failure() @@ -60,6 +67,8 @@ jobs: path: ./cache key: ${{ runner.os }}-benchmark + # TODO: Wait for this action to be allowlisted. And then figure out how to only upload + # benchmark data when the main branch is being updated. # - name: Store current benchmark results # uses: benchmark-action/github-action-benchmark@v1 # with: @@ -71,3 +80,14 @@ jobs: # comment-on-alert: true # summary-always: true + - name: Convert perf data + run: | + perf script -i perf.data -F +pid | zstd > perf.ff.data.zst + zstd perf.data + + - name: Archive perf data + uses: actions/upload-artifact@v4 + with: + name: ${{ github.head_ref || github.ref_name }}-perf + path: "*.zst" + compression-level: 0 From 721a1eff430f644e23a3e06ef4c5a248a0cbf592 Mon Sep 17 00:00:00 2001 From: Kershaw Date: Tue, 13 Feb 2024 16:33:27 +0100 Subject: [PATCH 165/321] neqo v0.7.1 (#1650) --- Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Cargo.toml b/Cargo.toml index 3da82dab90..972c3febb4 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -15,7 +15,7 @@ members = [ homepage = "https://github.com/mozilla/neqo/" repository = "https://github.com/mozilla/neqo/" authors = ["The Neqo Authors "] -version = "0.7.0" +version = "0.7.1" edition = "2018" license = "MIT OR Apache-2.0" # Don't increase beyond what Firefox is currently using: From 084a78978060742e3bbd649e266980e1d230e6b1 Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Wed, 14 Feb 2024 11:04:55 +0200 Subject: [PATCH 166/321] ci: Bump MacOS runner to `macos-14` (#1653) * ci: Bump MacOS runner to `macos-14` To see if this magically fixes #1652 * llvm path is different on ARM-based MacOS Also check if Mozilla has larger runners available. * No large runners available :-( * Symlink python --- .github/workflows/check.yml | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/.github/workflows/check.yml b/.github/workflows/check.yml index afab22ef48..138d7482d0 100644 --- a/.github/workflows/check.yml +++ b/.github/workflows/check.yml @@ -16,7 +16,7 @@ jobs: strategy: fail-fast: false matrix: - os: [ubuntu-latest, macos-13, windows-latest] + os: [ubuntu-latest, macos-14, windows-latest] # Don't increase beyond what Firefox is currently using: # https://firefox-source-docs.mozilla.org/writing-rust-code/update-policy.html#schedule rust-toolchain: [1.74.0, stable, nightly] @@ -66,7 +66,8 @@ jobs: run: | csrutil status | grep disabled brew install ninja mercurial cargo-binstall llvm - echo "/usr/local/opt/llvm/bin" >> "$GITHUB_PATH" + echo "/opt/homebrew/opt/llvm/bin" >> "$GITHUB_PATH" + ln -s /opt/homebrew/bin/python3 /opt/homebrew/bin/python # python3 -m pip install gyp-next # Above does not work, since pypi only has gyp 0.15.0, which is too old # for the homebrew python3. Install from source instead. From 836e852616a26d5aa74031a5cf5d9930dedbadc7 Mon Sep 17 00:00:00 2001 From: Martin Thomson Date: Wed, 14 Feb 2024 20:33:12 +1100 Subject: [PATCH 167/321] Clippy pedantic (#1649) * Add must_use everywhere clippy thinks we need to * Add panics docs everywhere * Remove some redundant else blocks * Fix or suppress similar_names lint * Automatic fix for doc_markdown lint * Remove unnecessary `Res` wrappings These are mostly the result of removing internal errors in our code. The cleanup there was incomplete. * Cleanup some unused_self instances, including some dead code * Use String::new instead of String::from("") * Remove deny-warnings feature Enable warnings for clippy::pedantic Disable clippy::module_name_repetitions for all lib.rs * Fix missing error doc on one! function * Invert the test in the pre-commit hook * Document errors on all public interfaces * Suppress excessive bools lint for args struct * Take suggested tweak for map_unwrap_or lint * Take Copy argument by value * Nest or patterns as suggested * Suppress one struct_field_names lint because I can't think of a better name * Remove unnecessary async on functions * Remove unnecessary wrap again * Remove implicit clone calls * Refactor client main a little to reduce its length * Pass by reference where the linter says we can * Remove some easy lints in the server main.rs * Another simple auto-fix lint * Merging match arms automatically * Avoid manual assertion * Remove integer casts where possible * Invert some conditions to avoid inversions * Suppress some too_many_lines lints * Remove some unnecessary borrows * Add some missing semicolons * Avoid a closure when not needed * Remove some Default::default usage * Fix some unreadable literals * qlog needs f32, which means precision loss * Stop allocating large buffers on the stack * Avoid wildcard matches * Fix panics doc * Reorder items * Remove extra deref * Fix format args * Iterate better * Update neqo-transport/src/qlog.rs Co-authored-by: Max Inden Signed-off-by: Lars Eggert * Remove unused path from Db arm of enum * Fix unknown lint warning * Clone directly rather than indirectly * Those any functions aren't used * Suppress warnings here for now * Fix some thread_local usages * Unused, remove * Update SharedVec as suggested, plus a bonus default thing --------- Signed-off-by: Lars Eggert Co-authored-by: Lars Eggert Co-authored-by: Max Inden --- hooks/pre-commit | 2 +- neqo-client/Cargo.toml | 2 - neqo-client/src/main.rs | 139 +++++++------- neqo-common/Cargo.toml | 1 - neqo-common/src/hrtime.rs | 4 +- neqo-common/src/lib.rs | 2 +- neqo-common/tests/log.rs | 3 +- neqo-crypto/Cargo.toml | 1 - neqo-crypto/build.rs | 1 - neqo-crypto/src/lib.rs | 20 +- neqo-crypto/src/p11.rs | 2 +- neqo-crypto/tests/aead.rs | 1 - neqo-crypto/tests/agent.rs | 1 - neqo-crypto/tests/ext.rs | 1 - neqo-crypto/tests/handshake.rs | 1 + neqo-crypto/tests/hkdf.rs | 1 - neqo-crypto/tests/hp.rs | 1 - neqo-crypto/tests/init.rs | 1 - neqo-crypto/tests/selfencrypt.rs | 1 - neqo-http3/Cargo.toml | 1 - neqo-http3/src/connection_client.rs | 2 +- .../extended_connect/webtransport_session.rs | 8 +- neqo-http3/src/lib.rs | 9 +- neqo-http3/src/recv_message.rs | 8 +- neqo-http3/src/send_message.rs | 6 +- neqo-http3/src/server.rs | 2 +- neqo-interop/Cargo.toml | 3 - neqo-interop/src/main.rs | 25 +-- neqo-qpack/Cargo.toml | 3 - neqo-qpack/src/lib.rs | 5 +- neqo-server/Cargo.toml | 2 - neqo-server/src/main.rs | 34 ++-- neqo-server/src/old_https.rs | 5 +- neqo-transport/Cargo.toml | 1 - neqo-transport/src/addr_valid.rs | 35 ++-- neqo-transport/src/cid.rs | 64 ++++--- neqo-transport/src/connection/dump.rs | 2 +- neqo-transport/src/connection/mod.rs | 179 ++++++++++++------ neqo-transport/src/connection/params.rs | 37 +++- neqo-transport/src/connection/state.rs | 19 +- .../src/connection/tests/fuzzing.rs | 1 - .../src/connection/tests/handshake.rs | 2 +- neqo-transport/src/connection/tests/idle.rs | 32 ++-- neqo-transport/src/connection/tests/mod.rs | 2 +- .../src/connection/tests/priority.rs | 2 +- .../src/connection/tests/resumption.rs | 6 +- neqo-transport/src/crypto.rs | 33 ++-- neqo-transport/src/fc.rs | 2 +- neqo-transport/src/frame.rs | 6 +- neqo-transport/src/lib.rs | 6 +- neqo-transport/src/packet/mod.rs | 25 ++- neqo-transport/src/path.rs | 4 +- neqo-transport/src/qlog.rs | 8 +- neqo-transport/src/quic_datagrams.rs | 2 +- neqo-transport/src/recv_stream.rs | 48 ++--- neqo-transport/src/send_stream.rs | 101 +++++----- neqo-transport/src/server.rs | 57 +++--- neqo-transport/src/stream_id.rs | 14 ++ neqo-transport/src/streams.rs | 49 +++-- neqo-transport/src/tparams.rs | 58 ++++-- neqo-transport/src/version.rs | 8 + neqo-transport/tests/common/mod.rs | 1 - neqo-transport/tests/connection.rs | 4 +- neqo-transport/tests/network.rs | 1 - neqo-transport/tests/retry.rs | 1 - neqo-transport/tests/server.rs | 1 - test-fixture/Cargo.toml | 2 - test-fixture/src/lib.rs | 17 +- 68 files changed, 617 insertions(+), 511 deletions(-) diff --git a/hooks/pre-commit b/hooks/pre-commit index 377a70c89d..9166f739b3 100755 --- a/hooks/pre-commit +++ b/hooks/pre-commit @@ -34,7 +34,7 @@ fi toolchain=nightly fmtconfig="$root/.rustfmt.toml" -if cargo "+$toolchain" version >/dev/null; then +if ! cargo "+$toolchain" version >/dev/null; then echo "warning: A rust $toolchain toolchain is recommended to check formatting." toolchain=stable fmtconfig=/dev/null diff --git a/neqo-client/Cargo.toml b/neqo-client/Cargo.toml index 5899475e06..08fe2f8fcd 100644 --- a/neqo-client/Cargo.toml +++ b/neqo-client/Cargo.toml @@ -24,5 +24,3 @@ qlog = "0.12" tokio = { version = "1", features = ["net", "time", "macros", "rt", "rt-multi-thread"] } url = "2.5" -[features] -deny-warnings = [] diff --git a/neqo-client/src/main.rs b/neqo-client/src/main.rs index 041989a1ce..f94ff16837 100644 --- a/neqo-client/src/main.rs +++ b/neqo-client/src/main.rs @@ -4,8 +4,7 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -#![cfg_attr(feature = "deny-warnings", deny(warnings))] -#![warn(clippy::use_self)] +#![warn(clippy::pedantic)] use std::{ cell::RefCell, @@ -126,6 +125,7 @@ impl KeyUpdateState { #[derive(Debug, Parser)] #[command(author, version, about, long_about = None)] +#[allow(clippy::struct_excessive_bools)] // Not a good use of that lint. pub struct Args { #[arg(short = 'a', long, default_value = "h3")] /// ALPN labels to negotiate. @@ -233,6 +233,50 @@ impl Args { }) .collect::>() } + + fn update_for_tests(&mut self) { + let Some(testcase) = self.qns_test.as_ref() else { + return; + }; + + // Only use v1 for most QNS tests. + self.quic_parameters.quic_version = vec![Version::Version1]; + match testcase.as_str() { + // TODO: Add "ecn" when that is ready. + "http3" => {} + "handshake" | "transfer" | "retry" => { + self.use_old_http = true; + } + "zerortt" | "resumption" => { + if self.urls.len() < 2 { + eprintln!("Warning: resumption tests won't work without >1 URL"); + exit(127); + } + self.use_old_http = true; + self.resume = true; + } + "multiconnect" => { + self.use_old_http = true; + self.download_in_series = true; + } + "chacha20" => { + self.use_old_http = true; + self.ciphers.clear(); + self.ciphers + .extend_from_slice(&[String::from("TLS_CHACHA20_POLY1305_SHA256")]); + } + "keyupdate" => { + self.use_old_http = true; + self.key_update = true; + } + "v2" => { + self.use_old_http = true; + // Use default version set for this test (which allows compatible vneg.) + self.quic_parameters.quic_version.clear(); + } + _ => exit(127), + } + } } fn from_str(s: &str) -> Res { @@ -378,8 +422,7 @@ async fn ready( let socket_ready = Box::pin(socket.readable()).map_ok(|()| Ready::Socket); let timeout_ready = timeout .as_mut() - .map(Either::Left) - .unwrap_or(Either::Right(futures::future::pending())) + .map_or(Either::Right(futures::future::pending()), Either::Left) .map(|()| Ok(Ready::Timeout)); select(socket_ready, timeout_ready).await.factor_first().0 } @@ -568,8 +611,8 @@ struct URLHandler<'a> { } impl<'a> URLHandler<'a> { - fn stream_handler(&mut self, stream_id: &StreamId) -> Option<&mut Box> { - self.stream_handlers.get_mut(stream_id) + fn stream_handler(&mut self, stream_id: StreamId) -> Option<&mut Box> { + self.stream_handlers.get_mut(&stream_id) } fn process_urls(&mut self, client: &mut Http3Client) { @@ -612,9 +655,11 @@ impl<'a> URLHandler<'a> { self.stream_handlers.insert(client_stream_id, handler); true } - Err(Error::TransportError(TransportError::StreamLimitError)) - | Err(Error::StreamLimitError) - | Err(Error::Unavailable) => { + Err( + Error::TransportError(TransportError::StreamLimitError) + | Error::StreamLimitError + | Error::Unavailable, + ) => { self.url_queue.push_front(url); false } @@ -640,6 +685,11 @@ impl<'a> URLHandler<'a> { } struct Handler<'a> { + #[allow( + unknown_lints, + clippy::struct_field_names, + clippy::redundant_field_names + )] url_handler: URLHandler<'a>, key_update: KeyUpdateState, token: Option, @@ -678,7 +728,7 @@ impl<'a> Handler<'a> { fin, .. } => { - if let Some(handler) = self.url_handler.stream_handler(&stream_id) { + if let Some(handler) = self.url_handler.stream_handler(stream_id) { handler.process_header_ready(stream_id, fin, headers); } else { println!("Data on unexpected stream: {stream_id}"); @@ -690,7 +740,7 @@ impl<'a> Handler<'a> { } Http3ClientEvent::DataReadable { stream_id } => { let mut stream_done = false; - match self.url_handler.stream_handler(&stream_id) { + match self.url_handler.stream_handler(stream_id) { None => { println!("Data on unexpected stream: {stream_id}"); return Ok(false); @@ -725,7 +775,7 @@ impl<'a> Handler<'a> { } } Http3ClientEvent::DataWritable { stream_id } => { - match self.url_handler.stream_handler(&stream_id) { + match self.url_handler.stream_handler(stream_id) { None => { println!("Data on unexpected stream: {stream_id}"); return Ok(false); @@ -776,7 +826,7 @@ struct ClientRunner<'a> { } impl<'a> ClientRunner<'a> { - async fn new( + fn new( args: &'a mut Args, socket: &'a UdpSocket, local_addr: SocketAddr, @@ -784,7 +834,7 @@ impl<'a> ClientRunner<'a> { hostname: &str, url_queue: VecDeque, resumption_token: Option, - ) -> Res> { + ) -> ClientRunner<'a> { if let Some(testcase) = &args.test { if testcase.as_str() != "upload" { eprintln!("Unsupported test case: {testcase}"); @@ -811,14 +861,14 @@ impl<'a> ClientRunner<'a> { }; let handler = Handler::new(url_handler, key_update, args.output_read_data); - Ok(Self { + Self { local_addr, socket, client, handler, timeout: None, args, - }) + } } async fn run(mut self) -> Res> { @@ -929,7 +979,7 @@ fn create_http3_client( fn qlog_new(args: &Args, hostname: &str, cid: &ConnectionId) -> Res { if let Some(qlog_dir) = &args.qlog_dir { - let mut qlog_path = qlog_dir.to_path_buf(); + let mut qlog_path = qlog_dir.clone(); let filename = format!("{hostname}-{cid}.sqlog"); qlog_path.push(filename); @@ -961,46 +1011,7 @@ async fn main() -> Res<()> { init(); let mut args = Args::parse(); - - if let Some(testcase) = args.qns_test.as_ref() { - // Only use v1 for most QNS tests. - args.quic_parameters.quic_version = vec![Version::Version1]; - match testcase.as_str() { - // TODO: Add "ecn" when that is ready. - "http3" => {} - "handshake" | "transfer" | "retry" => { - args.use_old_http = true; - } - "zerortt" | "resumption" => { - if args.urls.len() < 2 { - eprintln!("Warning: resumption tests won't work without >1 URL"); - exit(127); - } - args.use_old_http = true; - args.resume = true; - } - "multiconnect" => { - args.use_old_http = true; - args.download_in_series = true; - } - "chacha20" => { - args.use_old_http = true; - args.ciphers.clear(); - args.ciphers - .extend_from_slice(&[String::from("TLS_CHACHA20_POLY1305_SHA256")]); - } - "keyupdate" => { - args.use_old_http = true; - args.key_update = true; - } - "v2" => { - args.use_old_http = true; - // Use default version set for this test (which allows compatible vneg.) - args.quic_parameters.quic_version.clear(); - } - _ => exit(127), - } - } + args.update_for_tests(); let urls_by_origin = args .urls @@ -1080,8 +1091,7 @@ async fn main() -> Res<()> { &hostname, to_request, token, - ) - .await? + )? .run() .await? } else { @@ -1094,7 +1104,6 @@ async fn main() -> Res<()> { to_request, token, ) - .await? .run() .await? }; @@ -1175,7 +1184,7 @@ mod old { self.streams.insert(client_stream_id, out_file); true } - Err(e @ Error::StreamLimitError) | Err(e @ Error::ConnectionState) => { + Err(e @ (Error::StreamLimitError | Error::ConnectionState)) => { println!("Cannot create stream {e:?}"); self.url_queue.push_front(url); false @@ -1289,9 +1298,9 @@ mod old { self.download_urls(client); } } - ConnectionEvent::StateChange(State::WaitInitial) - | ConnectionEvent::StateChange(State::Handshaking) - | ConnectionEvent::StateChange(State::Connected) => { + ConnectionEvent::StateChange( + State::WaitInitial | State::Handshaking | State::Connected, + ) => { println!("{event:?}"); self.download_urls(client); } @@ -1321,7 +1330,7 @@ mod old { } impl<'a> ClientRunner<'a> { - pub async fn new( + pub fn new( args: &'a Args, socket: &'a UdpSocket, local_addr: SocketAddr, diff --git a/neqo-common/Cargo.toml b/neqo-common/Cargo.toml index f27d97b42a..25122e9b87 100644 --- a/neqo-common/Cargo.toml +++ b/neqo-common/Cargo.toml @@ -21,7 +21,6 @@ time = { version = "0.3", features = ["formatting"] } test-fixture = { path = "../test-fixture" } [features] -deny-warnings = [] ci = [] [target."cfg(windows)".dependencies.winapi] diff --git a/neqo-common/src/hrtime.rs b/neqo-common/src/hrtime.rs index 62d2567d42..d43fa882f7 100644 --- a/neqo-common/src/hrtime.rs +++ b/neqo-common/src/hrtime.rs @@ -340,9 +340,7 @@ impl Time { /// The handle can also be used to update the resolution. #[must_use] pub fn get(period: Duration) -> Handle { - thread_local! { - static HR_TIME: RefCell>> = RefCell::default(); - } + thread_local!(static HR_TIME: RefCell>> = RefCell::default()); HR_TIME.with(|r| { let mut b = r.borrow_mut(); diff --git a/neqo-common/src/lib.rs b/neqo-common/src/lib.rs index 853b05705b..ee97408a41 100644 --- a/neqo-common/src/lib.rs +++ b/neqo-common/src/lib.rs @@ -4,8 +4,8 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -#![cfg_attr(feature = "deny-warnings", deny(warnings))] #![warn(clippy::pedantic)] +#![allow(clippy::module_name_repetitions)] // This lint doesn't work here. mod codec; mod datagram; diff --git a/neqo-common/tests/log.rs b/neqo-common/tests/log.rs index 33b42d1411..0c286528ed 100644 --- a/neqo-common/tests/log.rs +++ b/neqo-common/tests/log.rs @@ -4,8 +4,7 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -#![cfg_attr(feature = "deny-warnings", deny(warnings))] -#![warn(clippy::use_self)] +#![warn(clippy::pedantic)] use neqo_common::{qdebug, qerror, qinfo, qtrace, qwarn}; diff --git a/neqo-crypto/Cargo.toml b/neqo-crypto/Cargo.toml index 5f3ebea1f4..66cf9ca9f3 100644 --- a/neqo-crypto/Cargo.toml +++ b/neqo-crypto/Cargo.toml @@ -26,6 +26,5 @@ toml = "0.5" test-fixture = { path = "../test-fixture" } [features] -deny-warnings = [] gecko = ["mozbuild"] fuzzing = [] diff --git a/neqo-crypto/build.rs b/neqo-crypto/build.rs index a63c34dedb..57981416ef 100644 --- a/neqo-crypto/build.rs +++ b/neqo-crypto/build.rs @@ -4,7 +4,6 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -#![cfg_attr(feature = "deny-warnings", deny(warnings))] #![warn(clippy::pedantic)] use std::{ diff --git a/neqo-crypto/src/lib.rs b/neqo-crypto/src/lib.rs index 3e9260813f..d092842705 100644 --- a/neqo-crypto/src/lib.rs +++ b/neqo-crypto/src/lib.rs @@ -4,13 +4,9 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -#![cfg_attr(feature = "deny-warnings", deny(warnings))] #![warn(clippy::pedantic)] -// Bindgen auto generated code -// won't adhere to the clippy rules below -#![allow(clippy::module_name_repetitions)] -#![allow(clippy::unseparated_literal_suffix)] -#![allow(clippy::used_underscore_binding)] +#![allow(clippy::module_name_repetitions)] // This lint doesn't work here. +#![allow(clippy::unseparated_literal_suffix, clippy::used_underscore_binding)] // For bindgen code. mod aead; #[cfg(feature = "fuzzing")] @@ -36,13 +32,7 @@ pub mod selfencrypt; mod ssl; mod time; -use std::{ - convert::TryFrom, - ffi::CString, - path::{Path, PathBuf}, - ptr::null, - sync::OnceLock, -}; +use std::{convert::TryFrom, ffi::CString, path::PathBuf, ptr::null, sync::OnceLock}; #[cfg(not(feature = "fuzzing"))] pub use self::aead::RealAead as Aead; @@ -87,7 +77,7 @@ fn secstatus_to_res(code: nss::SECStatus) -> Res<()> { enum NssLoaded { External, NoDb, - Db(Box), + Db, } impl Drop for NssLoaded { @@ -189,7 +179,7 @@ pub fn init_db>(dir: P) { #[cfg(debug_assertions)] enable_ssl_trace(); - NssLoaded::Db(path.into_boxed_path()) + NssLoaded::Db }); } diff --git a/neqo-crypto/src/p11.rs b/neqo-crypto/src/p11.rs index 6ec9370360..4c22b3ec20 100644 --- a/neqo-crypto/src/p11.rs +++ b/neqo-crypto/src/p11.rs @@ -345,7 +345,7 @@ impl RandomCache { /// When `size` is too large or NSS fails. #[must_use] pub fn random() -> [u8; N] { - thread_local! { static CACHE: RefCell = RefCell::new(RandomCache::new()) }; + thread_local!(static CACHE: RefCell = RefCell::new(RandomCache::new())); let buf = [0; N]; if N <= RandomCache::CUTOFF { diff --git a/neqo-crypto/tests/aead.rs b/neqo-crypto/tests/aead.rs index 0ee1e66c38..736acc31c2 100644 --- a/neqo-crypto/tests/aead.rs +++ b/neqo-crypto/tests/aead.rs @@ -1,4 +1,3 @@ -#![cfg_attr(feature = "deny-warnings", deny(warnings))] #![warn(clippy::pedantic)] #![cfg(not(feature = "fuzzing"))] diff --git a/neqo-crypto/tests/agent.rs b/neqo-crypto/tests/agent.rs index c2c83c467c..bbe0a7a646 100644 --- a/neqo-crypto/tests/agent.rs +++ b/neqo-crypto/tests/agent.rs @@ -1,4 +1,3 @@ -#![cfg_attr(feature = "deny-warnings", deny(warnings))] #![warn(clippy::pedantic)] use std::boxed::Box; diff --git a/neqo-crypto/tests/ext.rs b/neqo-crypto/tests/ext.rs index 9ae81133f5..cb048d7a12 100644 --- a/neqo-crypto/tests/ext.rs +++ b/neqo-crypto/tests/ext.rs @@ -1,4 +1,3 @@ -#![cfg_attr(feature = "deny-warnings", deny(warnings))] #![warn(clippy::pedantic)] use std::{cell::RefCell, rc::Rc}; diff --git a/neqo-crypto/tests/handshake.rs b/neqo-crypto/tests/handshake.rs index b2d8b9cc34..9aa88764c7 100644 --- a/neqo-crypto/tests/handshake.rs +++ b/neqo-crypto/tests/handshake.rs @@ -127,6 +127,7 @@ fn zero_rtt_setup( } } +#[must_use] pub fn resumption_setup(mode: Resumption) -> (Option, ResumptionToken) { fixture_init(); diff --git a/neqo-crypto/tests/hkdf.rs b/neqo-crypto/tests/hkdf.rs index b4dde482f8..6dc845e690 100644 --- a/neqo-crypto/tests/hkdf.rs +++ b/neqo-crypto/tests/hkdf.rs @@ -1,4 +1,3 @@ -#![cfg_attr(feature = "deny-warnings", deny(warnings))] #![warn(clippy::pedantic)] use neqo_crypto::{ diff --git a/neqo-crypto/tests/hp.rs b/neqo-crypto/tests/hp.rs index 43b96869d8..dbfda8a95d 100644 --- a/neqo-crypto/tests/hp.rs +++ b/neqo-crypto/tests/hp.rs @@ -1,4 +1,3 @@ -#![cfg_attr(feature = "deny-warnings", deny(warnings))] #![warn(clippy::pedantic)] use std::mem; diff --git a/neqo-crypto/tests/init.rs b/neqo-crypto/tests/init.rs index 21291ceebb..223580584a 100644 --- a/neqo-crypto/tests/init.rs +++ b/neqo-crypto/tests/init.rs @@ -1,4 +1,3 @@ -#![cfg_attr(feature = "deny-warnings", deny(warnings))] #![warn(clippy::pedantic)] // This uses external interfaces to neqo_crypto rather than being a module diff --git a/neqo-crypto/tests/selfencrypt.rs b/neqo-crypto/tests/selfencrypt.rs index fd9d4ea1ea..1b145f5420 100644 --- a/neqo-crypto/tests/selfencrypt.rs +++ b/neqo-crypto/tests/selfencrypt.rs @@ -1,4 +1,3 @@ -#![cfg_attr(feature = "deny-warnings", deny(warnings))] #![warn(clippy::pedantic)] #![cfg(not(feature = "fuzzing"))] diff --git a/neqo-http3/Cargo.toml b/neqo-http3/Cargo.toml index aa7d79f029..ec4e185826 100644 --- a/neqo-http3/Cargo.toml +++ b/neqo-http3/Cargo.toml @@ -25,5 +25,4 @@ url = "2.5" test-fixture = { path = "../test-fixture" } [features] -deny-warnings = [] fuzzing = ["neqo-transport/fuzzing", "neqo-crypto/fuzzing"] diff --git a/neqo-http3/src/connection_client.rs b/neqo-http3/src/connection_client.rs index b98533b043..9711e094ea 100644 --- a/neqo-http3/src/connection_client.rs +++ b/neqo-http3/src/connection_client.rs @@ -1997,7 +1997,7 @@ mod tests { // The response header from PUSH_DATA (0x01, 0x06, 0x00, 0x00, 0xd9, 0x54, 0x01, 0x34) are // decoded into: fn check_push_response_header(header: &[Header]) { - let expected_push_response_header = vec![ + let expected_push_response_header = [ Header::new(":status", "200"), Header::new("content-length", "4"), ]; diff --git a/neqo-http3/src/features/extended_connect/webtransport_session.rs b/neqo-http3/src/features/extended_connect/webtransport_session.rs index adbdf07e11..bc33acb67c 100644 --- a/neqo-http3/src/features/extended_connect/webtransport_session.rs +++ b/neqo-http3/src/features/extended_connect/webtransport_session.rs @@ -4,9 +4,7 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -#![allow(clippy::module_name_repetitions)] - -use std::{any::Any, cell::RefCell, collections::BTreeSet, mem, rc::Rc}; +use std::{cell::RefCell, collections::BTreeSet, mem, rc::Rc}; use neqo_common::{qtrace, Encoder, Header, MessageType, Role}; use neqo_qpack::{QPackDecoder, QPackEncoder}; @@ -473,10 +471,6 @@ impl HttpRecvStream for Rc> { fn priority_update_sent(&mut self) { self.borrow_mut().priority_update_sent(); } - - fn any(&self) -> &dyn Any { - self - } } impl SendStream for Rc> { diff --git a/neqo-http3/src/lib.rs b/neqo-http3/src/lib.rs index 635707ca7c..4113009eb3 100644 --- a/neqo-http3/src/lib.rs +++ b/neqo-http3/src/lib.rs @@ -4,8 +4,8 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -#![cfg_attr(feature = "deny-warnings", deny(warnings))] #![warn(clippy::pedantic)] +#![allow(clippy::module_name_repetitions)] // This lint doesn't work here. /*! @@ -160,7 +160,7 @@ mod server_events; mod settings; mod stream_type_reader; -use std::{any::Any, cell::RefCell, fmt::Debug, rc::Rc}; +use std::{cell::RefCell, fmt::Debug, rc::Rc}; use buffered_send_stream::BufferedStream; pub use client_events::{Http3ClientEvent, WebTransportEvent}; @@ -509,8 +509,6 @@ trait HttpRecvStream: RecvStream { fn extended_connect_wait_for_response(&self) -> bool { false } - - fn any(&self) -> &dyn Any; } #[derive(Debug, PartialEq, Eq, Copy, Clone)] @@ -572,7 +570,9 @@ trait SendStream: Stream { fn has_data_to_send(&self) -> bool; fn stream_writable(&self); fn done(&self) -> bool; + #[allow(dead_code)] // https://github.com/mozilla/neqo/issues/1651 fn set_sendorder(&mut self, conn: &mut Connection, sendorder: Option) -> Res<()>; + #[allow(dead_code)] // https://github.com/mozilla/neqo/issues/1651 fn set_fairness(&mut self, conn: &mut Connection, fairness: bool) -> Res<()>; /// # Errors @@ -627,7 +627,6 @@ trait HttpSendStream: SendStream { /// This can also return an error if the underlying stream is closed. fn send_headers(&mut self, headers: &[Header], conn: &mut Connection) -> Res<()>; fn set_new_listener(&mut self, _conn_events: Box) {} - fn any(&self) -> &dyn Any; } trait SendStreamEvents: Debug { diff --git a/neqo-http3/src/recv_message.rs b/neqo-http3/src/recv_message.rs index 36e8f65b19..6feb017cbb 100644 --- a/neqo-http3/src/recv_message.rs +++ b/neqo-http3/src/recv_message.rs @@ -4,9 +4,7 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use std::{ - any::Any, cell::RefCell, cmp::min, collections::VecDeque, convert::TryFrom, fmt::Debug, rc::Rc, -}; +use std::{cell::RefCell, cmp::min, collections::VecDeque, convert::TryFrom, fmt::Debug, rc::Rc}; use neqo_common::{qdebug, qinfo, qtrace, Header}; use neqo_qpack::decoder::QPackDecoder; @@ -494,8 +492,4 @@ impl HttpRecvStream for RecvMessage { fn extended_connect_wait_for_response(&self) -> bool { matches!(self.state, RecvMessageState::ExtendedConnect) } - - fn any(&self) -> &dyn Any { - self - } } diff --git a/neqo-http3/src/send_message.rs b/neqo-http3/src/send_message.rs index 96156938a0..4d37dcc37f 100644 --- a/neqo-http3/src/send_message.rs +++ b/neqo-http3/src/send_message.rs @@ -4,7 +4,7 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use std::{any::Any, cell::RefCell, cmp::min, fmt::Debug, rc::Rc}; +use std::{cell::RefCell, cmp::min, fmt::Debug, rc::Rc}; use neqo_common::{qdebug, qinfo, qtrace, Encoder, Header, MessageType}; use neqo_qpack::encoder::QPackEncoder; @@ -332,10 +332,6 @@ impl HttpSendStream for SendMessage { self.stream_type = Http3StreamType::ExtendedConnect; self.conn_events = conn_events; } - - fn any(&self) -> &dyn Any { - self - } } impl ::std::fmt::Display for SendMessage { diff --git a/neqo-http3/src/server.rs b/neqo-http3/src/server.rs index b29f715451..71bd1acf9e 100644 --- a/neqo-http3/src/server.rs +++ b/neqo-http3/src/server.rs @@ -151,7 +151,7 @@ impl Http3Server { active_conns.dedup(); active_conns .iter() - .for_each(|conn| self.server.add_to_waiting(conn.clone())); + .for_each(|conn| self.server.add_to_waiting(conn)); for mut conn in active_conns { self.process_events(&mut conn, now); } diff --git a/neqo-interop/Cargo.toml b/neqo-interop/Cargo.toml index f5996032fb..227d480707 100644 --- a/neqo-interop/Cargo.toml +++ b/neqo-interop/Cargo.toml @@ -17,6 +17,3 @@ neqo-crypto = { path = "./../neqo-crypto" } neqo-http3 = { path = "./../neqo-http3" } neqo-qpack = { path = "./../neqo-qpack" } neqo-transport = { path = "./../neqo-transport" } - -[features] -deny-warnings = [] diff --git a/neqo-interop/src/main.rs b/neqo-interop/src/main.rs index fac76a4daa..74c70477fb 100644 --- a/neqo-interop/src/main.rs +++ b/neqo-interop/src/main.rs @@ -4,8 +4,7 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -#![cfg_attr(feature = "deny-warnings", deny(warnings))] -#![warn(clippy::use_self)] +#![warn(clippy::pedantic)] use std::{ cell::RefCell, @@ -60,7 +59,7 @@ trait Handler { } } -fn emit_datagram(socket: &UdpSocket, d: Datagram) { +fn emit_datagram(socket: &UdpSocket, d: &Datagram) { let sent = socket.send(&d[..]).expect("Error sending datagram"); if sent != d.len() { eprintln!("Unable to send all {} bytes of datagram", d.len()); @@ -115,7 +114,7 @@ fn process_loop( match output { Output::Datagram(dgram) => { let dgram = handler.rewrite_out(&dgram).unwrap_or(dgram); - emit_datagram(&nctx.socket, dgram); + emit_datagram(&nctx.socket, &dgram); } Output::Callback(duration) => { let delay = min(timer.check()?, duration); @@ -284,7 +283,7 @@ fn process_loop_h3( loop { let output = handler.h3.conn().process_output(Instant::now()); match output { - Output::Datagram(dgram) => emit_datagram(&nctx.socket, dgram), + Output::Datagram(dgram) => emit_datagram(&nctx.socket, &dgram), Output::Callback(duration) => { let delay = min(timer.check()?, duration); nctx.socket.set_read_timeout(Some(delay)).unwrap(); @@ -399,10 +398,6 @@ impl Peer { SocketAddr::V6(..) => SocketAddr::new(IpAddr::V6(Ipv6Addr::from([0; 16])), 0), } } - - fn test_enabled(&self, _test: &Test) -> bool { - true - } } impl ToSocketAddrs for Peer { @@ -496,9 +491,9 @@ fn test_connect(nctx: &NetworkCtx, test: &Test, peer: &Peer) -> Result Result<(), String> { let client_stream_id = client.stream_create(StreamType::BiDi).unwrap(); - let req: String = "GET /10\r\n".to_string(); + let request: String = "GET /10\r\n".to_string(); client - .stream_send(client_stream_id, req.as_bytes()) + .stream_send(client_stream_id, request.as_bytes()) .unwrap(); let mut hc = H9Handler::default(); hc.streams.insert(client_stream_id); @@ -774,10 +769,6 @@ fn run_peer(args: &Args, peer: &'static Peer) -> Vec<(&'static Test, String)> { let mut children = Vec::new(); for test in &TESTS { - if !peer.test_enabled(test) { - continue; - } - if !args.include_tests.is_empty() && !args.include_tests.contains(&test.label()) { continue; } @@ -902,8 +893,6 @@ const TESTS: [Test; 7] = [ ]; fn main() { - let _tests = vec![Test::Connect]; - let args = Args::parse(); init(); Timer::set_timeout(Duration::from_secs(args.timeout)); @@ -935,7 +924,7 @@ fn main() { } } } - let mut letter_str = String::from(""); + let mut letter_str = String::new(); for l in &['V', 'H', 'D', 'C', 'R', 'Z', 'S', '3'] { if all_letters.contains(l) { letter_str.push(*l); diff --git a/neqo-qpack/Cargo.toml b/neqo-qpack/Cargo.toml index 41f72f9ba2..b0111f0dfd 100644 --- a/neqo-qpack/Cargo.toml +++ b/neqo-qpack/Cargo.toml @@ -19,6 +19,3 @@ static_assertions = "1.1" [dev-dependencies] test-fixture = { path = "../test-fixture" } - -[features] -deny-warnings = [] diff --git a/neqo-qpack/src/lib.rs b/neqo-qpack/src/lib.rs index 1581712017..44b9463e55 100644 --- a/neqo-qpack/src/lib.rs +++ b/neqo-qpack/src/lib.rs @@ -4,11 +4,8 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -#![cfg_attr(feature = "deny-warnings", deny(warnings))] #![warn(clippy::pedantic)] -// This is because of Encoder and Decoder structs. TODO: think about a better namings for crate and -// structs. -#![allow(clippy::module_name_repetitions)] +#![allow(clippy::module_name_repetitions)] // This lint doesn't work here. pub mod decoder; mod decoder_instructions; diff --git a/neqo-server/Cargo.toml b/neqo-server/Cargo.toml index ffba762f23..7a83685c9f 100644 --- a/neqo-server/Cargo.toml +++ b/neqo-server/Cargo.toml @@ -23,5 +23,3 @@ qlog = "0.12" regex = "1.9" tokio = { version = "1", features = ["net", "time", "macros", "rt", "rt-multi-thread"] } -[features] -deny-warnings = [] diff --git a/neqo-server/src/main.rs b/neqo-server/src/main.rs index be56d63586..0c07cb61b7 100644 --- a/neqo-server/src/main.rs +++ b/neqo-server/src/main.rs @@ -4,8 +4,7 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -#![cfg_attr(feature = "deny-warnings", deny(warnings))] -#![warn(clippy::use_self)] +#![warn(clippy::pedantic)] use std::{ cell::RefCell, @@ -246,25 +245,25 @@ impl QuicParameters { { let addr = opt .iter() - .flat_map(|spa| spa.to_socket_addrs().ok()) + .filter_map(|spa| spa.to_socket_addrs().ok()) .flatten() .find(f); - if opt.is_some() != addr.is_some() { - panic!( - "unable to resolve '{}' to an {} address", - opt.as_ref().unwrap(), - v - ); - } + assert_eq!( + opt.is_some(), + addr.is_some(), + "unable to resolve '{}' to an {} address", + opt.as_ref().unwrap(), + v, + ); addr } fn preferred_address_v4(&self) -> Option { - Self::get_sock_addr(&self.preferred_address_v4, "IPv4", |addr| addr.is_ipv4()) + Self::get_sock_addr(&self.preferred_address_v4, "IPv4", SocketAddr::is_ipv4) } fn preferred_address_v6(&self) -> Option { - Self::get_sock_addr(&self.preferred_address_v6, "IPv6", |addr| addr.is_ipv6()) + Self::get_sock_addr(&self.preferred_address_v6, "IPv6", SocketAddr::is_ipv6) } fn preferred_address(&self) -> Option { @@ -300,7 +299,7 @@ impl QuicParameters { } if let Some(first) = self.quic_version.first() { - params = params.versions(*first, self.quic_version.to_vec()); + params = params.versions(*first, self.quic_version.clone()); } params } @@ -731,8 +730,7 @@ impl ServersRunner { let timeout_ready = self .timeout .as_mut() - .map(Either::Left) - .unwrap_or(Either::Right(futures::future::pending())) + .map_or(Either::Right(futures::future::pending()), Either::Left) .map(|()| Ok(Ready::Timeout)); select(sockets_ready, timeout_ready).await.factor_first().0 } @@ -793,7 +791,7 @@ async fn main() -> Result<(), io::Error> { args.alpn = String::from(HQ_INTEROP); args.quic_parameters.max_streams_bidi = 100; } - "handshake" | "transfer" | "resumption" | "multiconnect" => { + "handshake" | "transfer" | "resumption" | "multiconnect" | "v2" => { args.use_old_http = true; args.alpn = String::from(HQ_INTEROP); } @@ -809,10 +807,6 @@ async fn main() -> Result<(), io::Error> { args.alpn = String::from(HQ_INTEROP); args.retry = true; } - "v2" => { - args.use_old_http = true; - args.alpn = String::from(HQ_INTEROP); - } _ => exit(127), } } diff --git a/neqo-server/src/old_https.rs b/neqo-server/src/old_https.rs index bb67ab5c9d..d45509d7d0 100644 --- a/neqo-server/src/old_https.rs +++ b/neqo-server/src/old_https.rs @@ -4,8 +4,7 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -#![cfg_attr(feature = "deny-warnings", deny(warnings))] -#![warn(clippy::use_self)] +#![warn(clippy::pedantic)] use std::{ cell::RefCell, collections::HashMap, fmt::Display, path::PathBuf, rc::Rc, time::Instant, @@ -178,7 +177,7 @@ impl Http09Server { .unwrap(); qdebug!("Wrote {}", sent); *offset += sent; - self.server.add_to_waiting(conn.clone()); + self.server.add_to_waiting(conn); if *offset == data.len() { eprintln!("Sent {sent} on {stream_id}, closing"); conn.borrow_mut().stream_close_send(stream_id).unwrap(); diff --git a/neqo-transport/Cargo.toml b/neqo-transport/Cargo.toml index 55cc117f66..14141a5ab8 100644 --- a/neqo-transport/Cargo.toml +++ b/neqo-transport/Cargo.toml @@ -24,7 +24,6 @@ test-fixture = { path = "../test-fixture" } [features] bench = [] -deny-warnings = [] fuzzing = ["neqo-crypto/fuzzing"] [[bench]] diff --git a/neqo-transport/src/addr_valid.rs b/neqo-transport/src/addr_valid.rs index b5ed2d07d1..d1181be825 100644 --- a/neqo-transport/src/addr_valid.rs +++ b/neqo-transport/src/addr_valid.rs @@ -23,15 +23,15 @@ use crate::{ cid::ConnectionId, packet::PacketBuilder, recovery::RecoveryToken, stats::FrameStats, Res, }; -/// A prefix we add to Retry tokens to distinguish them from NEW_TOKEN tokens. +/// A prefix we add to Retry tokens to distinguish them from `NEW_TOKEN` tokens. const TOKEN_IDENTIFIER_RETRY: &[u8] = &[0x52, 0x65, 0x74, 0x72, 0x79]; -/// A prefix on NEW_TOKEN tokens, that is maximally Hamming distant from NEW_TOKEN. +/// A prefix on `NEW_TOKEN` tokens, that is maximally Hamming distant from `NEW_TOKEN`. /// Together, these need to have a low probability of collision, even if there is /// corruption of individual bits in transit. const TOKEN_IDENTIFIER_NEW_TOKEN: &[u8] = &[0xad, 0x9a, 0x8b, 0x8d, 0x86]; -/// The maximum number of tokens we'll save from NEW_TOKEN frames. -/// This should be the same as the value of MAX_TICKETS in neqo-crypto. +/// The maximum number of tokens we'll save from `NEW_TOKEN` frames. +/// This should be the same as the value of `MAX_TICKETS` in neqo-crypto. const MAX_NEW_TOKEN: usize = 4; /// The number of tokens we'll track for the purposes of looking for duplicates. /// This is based on how many might be received over a period where could be @@ -143,7 +143,7 @@ impl AddressValidation { self.generate_token(Some(dcid), peer_address, now) } - /// This generates a token for use with NEW_TOKEN. + /// This generates a token for use with `NEW_TOKEN`. pub fn generate_new_token(&self, peer_address: SocketAddr, now: Instant) -> Res> { self.generate_token(None, peer_address, now) } @@ -184,7 +184,7 @@ impl AddressValidation { /// Less than one difference per byte indicates that it is likely not a Retry. /// This generous interpretation allows for a lot of damage in transit. /// Note that if this check fails, then the token will be treated like it came - /// from NEW_TOKEN instead. If there truly is corruption of packets that causes + /// from `NEW_TOKEN` instead. If there truly is corruption of packets that causes /// validation failure, it will be a failure that we try to recover from. fn is_likely_retry(token: &[u8]) -> bool { let mut difference = 0; @@ -210,10 +210,9 @@ impl AddressValidation { if self.validation == ValidateAddress::Never { qinfo!("AddressValidation: no token; accepting"); return AddressValidationResult::Pass; - } else { - qinfo!("AddressValidation: no token; validating"); - return AddressValidationResult::Validate; } + qinfo!("AddressValidation: no token; validating"); + return AddressValidationResult::Validate; } if token.len() <= TOKEN_IDENTIFIER_RETRY.len() { // Treat bad tokens strictly. @@ -351,14 +350,13 @@ impl NewTokenState { builder: &mut PacketBuilder, tokens: &mut Vec, stats: &mut FrameStats, - ) -> Res<()> { + ) { if let Self::Server(ref mut sender) = self { - sender.write_frames(builder, tokens, stats)?; + sender.write_frames(builder, tokens, stats); } - Ok(()) } - /// If this a server, buffer a NEW_TOKEN for sending. + /// If this a server, buffer a `NEW_TOKEN` for sending. /// If this is a client, panic. pub fn send_new_token(&mut self, token: Vec) { if let Self::Server(ref mut sender) = self { @@ -368,7 +366,7 @@ impl NewTokenState { } } - /// If this a server, process a lost signal for a NEW_TOKEN frame. + /// If this a server, process a lost signal for a `NEW_TOKEN` frame. /// If this is a client, panic. pub fn lost(&mut self, seqno: usize) { if let Self::Server(ref mut sender) = self { @@ -378,7 +376,7 @@ impl NewTokenState { } } - /// If this a server, process remove the acknowledged NEW_TOKEN frame. + /// If this a server, process remove the acknowledged `NEW_TOKEN` frame. /// If this is a client, panic. pub fn acked(&mut self, seqno: usize) { if let Self::Server(ref mut sender) = self { @@ -426,8 +424,8 @@ impl NewTokenSender { builder: &mut PacketBuilder, tokens: &mut Vec, stats: &mut FrameStats, - ) -> Res<()> { - for t in self.tokens.iter_mut() { + ) { + for t in &mut self.tokens { if t.needs_sending && t.len() <= builder.remaining() { t.needs_sending = false; @@ -438,11 +436,10 @@ impl NewTokenSender { stats.new_token += 1; } } - Ok(()) } pub fn lost(&mut self, seqno: usize) { - for t in self.tokens.iter_mut() { + for t in &mut self.tokens { if t.seqno == seqno { t.needs_sending = true; break; diff --git a/neqo-transport/src/cid.rs b/neqo-transport/src/cid.rs index 8a0f7a3070..e876eba16d 100644 --- a/neqo-transport/src/cid.rs +++ b/neqo-transport/src/cid.rs @@ -39,6 +39,9 @@ pub struct ConnectionId { } impl ConnectionId { + /// # Panics + /// When `len` is larger than `MAX_CONNECTION_ID_LEN`. + #[must_use] pub fn generate(len: usize) -> Self { assert!(matches!(len, 0..=MAX_CONNECTION_ID_LEN)); let mut cid = smallvec![0; len]; @@ -47,6 +50,7 @@ impl ConnectionId { } // Apply a wee bit of greasing here in picking a length between 8 and 20 bytes long. + #[must_use] pub fn generate_initial() -> Self { let v = random::<1>()[0]; // Bias selection toward picking 8 (>50% of the time). @@ -54,6 +58,7 @@ impl ConnectionId { Self::generate(len) } + #[must_use] pub fn as_cid_ref(&self) -> ConnectionIdRef { ConnectionIdRef::from(&self.cid[..]) } @@ -197,7 +202,7 @@ impl ConnectionIdGenerator for EmptyConnectionIdGenerator { } } -/// An RandomConnectionIdGenerator produces connection IDs of +/// An `RandomConnectionIdGenerator` produces connection IDs of /// a fixed length and random content. No effort is made to /// prevent collisions. pub struct RandomConnectionIdGenerator { @@ -205,6 +210,7 @@ pub struct RandomConnectionIdGenerator { } impl RandomConnectionIdGenerator { + #[must_use] pub fn new(len: usize) -> Self { Self { len } } @@ -232,7 +238,7 @@ impl ConnectionIdGenerator for RandomConnectionIdGenerator { } } -/// A single connection ID, as saved from NEW_CONNECTION_ID. +/// A single connection ID, as saved from `NEW_CONNECTION_ID`. /// This is templated so that the connection ID entries from a peer can be /// saved with a stateless reset token. Local entries don't need that. #[derive(Debug, PartialEq, Eq, Clone)] @@ -292,6 +298,23 @@ impl ConnectionIdEntry<[u8; 16]> { pub fn sequence_number(&self) -> u64 { self.seqno } + + /// Write the entry out in a `NEW_CONNECTION_ID` frame. + /// Returns `true` if the frame was written, `false` if there is insufficient space. + pub fn write(&self, builder: &mut PacketBuilder, stats: &mut FrameStats) -> bool { + let len = 1 + Encoder::varint_len(self.seqno) + 1 + 1 + self.cid.len() + 16; + if builder.remaining() < len { + return false; + } + + builder.encode_varint(FRAME_TYPE_NEW_CONNECTION_ID); + builder.encode_varint(self.seqno); + builder.encode_varint(0u64); + builder.encode_vec(1, &self.cid); + builder.encode(&self.srt); + stats.new_connection_id += 1; + true + } } impl ConnectionIdEntry<()> { @@ -514,39 +537,19 @@ impl ConnectionIdManager { ); } - fn write_entry( - &mut self, - entry: &ConnectionIdEntry<[u8; 16]>, - builder: &mut PacketBuilder, - stats: &mut FrameStats, - ) -> Res { - let len = 1 + Encoder::varint_len(entry.seqno) + 1 + 1 + entry.cid.len() + 16; - if builder.remaining() < len { - return Ok(false); - } - - builder.encode_varint(FRAME_TYPE_NEW_CONNECTION_ID); - builder.encode_varint(entry.seqno); - builder.encode_varint(0u64); - builder.encode_vec(1, &entry.cid); - builder.encode(&entry.srt); - stats.new_connection_id += 1; - Ok(true) - } - pub fn write_frames( &mut self, builder: &mut PacketBuilder, tokens: &mut Vec, stats: &mut FrameStats, - ) -> Res<()> { + ) { if self.generator.deref().borrow().generates_empty_cids() { debug_assert_eq!(self.generator.borrow_mut().generate_cid().unwrap().len(), 0); - return Ok(()); + return; } while let Some(entry) = self.lost_new_connection_id.pop() { - if self.write_entry(&entry, builder, stats)? { + if entry.write(builder, stats) { tokens.push(RecoveryToken::NewConnectionId(entry)); } else { // This shouldn't happen often. @@ -571,11 +574,10 @@ impl ConnectionIdManager { .add_local(ConnectionIdEntry::new(seqno, cid.clone(), ())); let entry = ConnectionIdEntry::new(seqno, cid, srt); - self.write_entry(&entry, builder, stats)?; + entry.write(builder, stats); tokens.push(RecoveryToken::NewConnectionId(entry)); } } - Ok(()) } pub fn lost(&mut self, entry: &ConnectionIdEntry<[u8; 16]>) { @@ -599,9 +601,11 @@ mod tests { fixture_init(); for _ in 0..100 { let cid = ConnectionId::generate_initial(); - if !matches!(cid.len(), 8..=MAX_CONNECTION_ID_LEN) { - panic!("connection ID {:?}", cid); - } + assert!( + matches!(cid.len(), 8..=MAX_CONNECTION_ID_LEN), + "connection ID length {:?}", + cid, + ); } } } diff --git a/neqo-transport/src/connection/dump.rs b/neqo-transport/src/connection/dump.rs index 77d51c605c..8811e4f05f 100644 --- a/neqo-transport/src/connection/dump.rs +++ b/neqo-transport/src/connection/dump.rs @@ -31,7 +31,7 @@ pub fn dump_packet( return; } - let mut s = String::from(""); + let mut s = String::new(); let mut d = Decoder::from(payload); while d.remaining() > 0 { let Ok(f) = Frame::decode(&mut d) else { diff --git a/neqo-transport/src/connection/mod.rs b/neqo-transport/src/connection/mod.rs index 366eed07ed..749cf315d3 100644 --- a/neqo-transport/src/connection/mod.rs +++ b/neqo-transport/src/connection/mod.rs @@ -48,6 +48,7 @@ use crate::{ recovery::{LossRecovery, RecoveryToken, SendProfile}, recv_stream::RecvStreamStats, rtt::GRANULARITY, + send_stream::SendStream, stats::{Stats, StatsCell}, stream_id::StreamType, streams::{SendOrder, Streams}, @@ -95,7 +96,7 @@ pub enum ZeroRttState { } #[derive(Clone, Debug, PartialEq, Eq)] -/// Type returned from process() and `process_output()`. Users are required to +/// Type returned from `process()` and `process_output()`. Users are required to /// call these repeatedly until `Callback` or `None` is returned. pub enum Output { /// Connection requires no action. @@ -118,6 +119,7 @@ impl Output { } /// Get a reference to the Datagram, if any. + #[must_use] pub fn as_dgram_ref(&self) -> Option<&Datagram> { match self { Self::Datagram(dg) => Some(dg), @@ -135,7 +137,7 @@ impl Output { } } -/// Used by inner functions like Connection::output. +/// Used by inner functions like `Connection::output`. enum SendOption { /// Yes, please send this datagram. Yes(Datagram), @@ -301,6 +303,8 @@ impl Connection { const LOOSE_TIMER_RESOLUTION: Duration = Duration::from_millis(50); /// Create a new QUIC connection with Client role. + /// # Errors + /// When NSS fails and an agent cannot be created. pub fn new_client( server_name: impl Into, protocols: &[impl AsRef], @@ -337,6 +341,8 @@ impl Connection { } /// Create a new QUIC connection with Server role. + /// # Errors + /// When NSS fails and an agent cannot be created. pub fn new_server( certs: &[impl AsRef], protocols: &[impl AsRef], @@ -426,6 +432,8 @@ impl Connection { Ok(c) } + /// # Errors + /// When the operation fails. pub fn server_enable_0rtt( &mut self, anti_replay: &AntiReplay, @@ -435,6 +443,8 @@ impl Connection { .server_enable_0rtt(self.tps.clone(), anti_replay, zero_rtt_checker) } + /// # Errors + /// When the operation fails. pub fn server_enable_ech( &mut self, config: u8, @@ -446,10 +456,13 @@ impl Connection { } /// Get the active ECH configuration, which is empty if ECH is disabled. + #[must_use] pub fn ech_config(&self) -> &[u8] { self.crypto.ech_config() } + /// # Errors + /// When the operation fails. pub fn client_enable_ech(&mut self, ech_config_list: impl AsRef<[u8]>) -> Res<()> { self.crypto.client_enable_ech(ech_config_list) } @@ -467,8 +480,9 @@ impl Connection { } /// Get the original destination connection id for this connection. This - /// will always be present for Role::Client but not if Role::Server is in - /// State::Init. + /// will always be present for `Role::Client` but not if `Role::Server` is in + /// `State::Init`. + #[must_use] pub fn odcid(&self) -> Option<&ConnectionId> { self.original_destination_cid.as_ref() } @@ -477,8 +491,9 @@ impl Connection { /// This only sets transport parameters without dealing with other aspects of /// setting the value. /// + /// # Errors + /// When the transport parameter is invalid. /// # Panics - /// /// This panics if the transport parameter is known to this crate. pub fn set_local_tparam(&self, tp: TransportParameterId, value: TransportParameter) -> Res<()> { #[cfg(not(test))] @@ -501,9 +516,9 @@ impl Connection { /// Retry. pub(crate) fn set_retry_cids( &mut self, - odcid: ConnectionId, + odcid: &ConnectionId, remote_cid: ConnectionId, - retry_cid: ConnectionId, + retry_cid: &ConnectionId, ) { debug_assert_eq!(self.role, Role::Server); qtrace!( @@ -532,12 +547,16 @@ impl Connection { /// Set ALPN preferences. Strings that appear earlier in the list are given /// higher preference. + /// # Errors + /// When the operation fails, which is usually due to bad inputs or bad connection state. pub fn set_alpn(&mut self, protocols: &[impl AsRef]) -> Res<()> { self.crypto.tls.set_alpn(protocols)?; Ok(()) } /// Enable a set of ciphers. + /// # Errors + /// When the operation fails, which is usually due to bad inputs or bad connection state. pub fn set_ciphers(&mut self, ciphers: &[Cipher]) -> Res<()> { if self.state != State::Init { qerror!([self], "Cannot enable ciphers in state {:?}", self.state); @@ -548,6 +567,8 @@ impl Connection { } /// Enable a set of key exchange groups. + /// # Errors + /// When the operation fails, which is usually due to bad inputs or bad connection state. pub fn set_groups(&mut self, groups: &[Group]) -> Res<()> { if self.state != State::Init { qerror!([self], "Cannot enable groups in state {:?}", self.state); @@ -558,6 +579,8 @@ impl Connection { } /// Set the number of additional key shares to send in the client hello. + /// # Errors + /// When the operation fails, which is usually due to bad inputs or bad connection state. pub fn send_additional_key_shares(&mut self, count: usize) -> Res<()> { if self.state != State::Init { qerror!([self], "Cannot enable groups in state {:?}", self.state); @@ -666,6 +689,8 @@ impl Connection { /// This can only be called once and only on the client. /// After calling the function, it should be possible to attempt 0-RTT /// if the token supports that. + /// # Errors + /// When the operation fails, which is usually due to bad inputs or bad connection state. pub fn enable_resumption(&mut self, now: Instant, token: impl AsRef<[u8]>) -> Res<()> { if self.state != State::Init { qerror!([self], "set token in state {:?}", self.state); @@ -682,8 +707,9 @@ impl Connection { ); let mut dec = Decoder::from(token.as_ref()); - let version = - Version::try_from(dec.decode_uint(4).ok_or(Error::InvalidResumptionToken)? as u32)?; + let version = Version::try_from(u32::try_from( + dec.decode_uint(4).ok_or(Error::InvalidResumptionToken)?, + )?)?; qtrace!([self], " version {:?}", version); if !self.conn_params.get_versions().all().contains(&version) { return Err(Error::DisabledVersion); @@ -731,13 +757,15 @@ impl Connection { Ok(()) } - pub(crate) fn set_validation(&mut self, validation: Rc>) { + pub(crate) fn set_validation(&mut self, validation: &Rc>) { qtrace!([self], "Enabling NEW_TOKEN"); assert_eq!(self.role, Role::Server); - self.address_validation = AddressValidationInfo::Server(Rc::downgrade(&validation)); + self.address_validation = AddressValidationInfo::Server(Rc::downgrade(validation)); } - /// Send a TLS session ticket AND a NEW_TOKEN frame (if possible). + /// Send a TLS session ticket AND a `NEW_TOKEN` frame (if possible). + /// # Errors + /// When the operation fails, which is usually due to bad inputs or bad connection state. pub fn send_ticket(&mut self, now: Instant, extra: &[u8]) -> Res<()> { if self.role == Role::Client { return Err(Error::WrongRole); @@ -773,15 +801,19 @@ impl Connection { } } + #[must_use] pub fn tls_info(&self) -> Option<&SecretAgentInfo> { self.crypto.tls.info() } + /// # Errors + /// When there is no information to obtain. pub fn tls_preinfo(&self) -> Res { Ok(self.crypto.tls.preinfo()?) } /// Get the peer's certificate chain and other info. + #[must_use] pub fn peer_certificate(&self) -> Option { self.crypto.tls.peer_certificate() } @@ -801,26 +833,31 @@ impl Connection { } /// Get the role of the connection. + #[must_use] pub fn role(&self) -> Role { self.role } /// Get the state of the connection. + #[must_use] pub fn state(&self) -> &State { &self.state } /// The QUIC version in use. + #[must_use] pub fn version(&self) -> Version { self.version } /// Get the 0-RTT state of the connection. + #[must_use] pub fn zero_rtt_state(&self) -> ZeroRttState { self.zero_rtt_state } /// Get a snapshot of collected statistics. + #[must_use] pub fn stats(&self) -> Stats { let mut v = self.stats.borrow().clone(); if let Some(p) = self.paths.primary_fallible() { @@ -887,7 +924,7 @@ impl Connection { res } - /// For use with process_input(). Errors there can be ignored, but this + /// For use with `process_input()`. Errors there can be ignored, but this /// needs to ensure that the state is updated. fn absorb_error(&mut self, now: Instant, res: Res) -> Option { self.capture_error(None, now, 0, res).ok() @@ -1233,6 +1270,7 @@ impl Connection { /// Perform any processing that we might have to do on packets prior to /// attempting to remove protection. + #[allow(clippy::too_many_lines)] // Yeah, it's a work in progress. fn preprocess_packet( &mut self, packet: &PublicPacket, @@ -1345,17 +1383,17 @@ impl Connection { } State::WaitInitial => PreprocessResult::Continue, State::WaitVersion | State::Handshaking | State::Connected | State::Confirmed => { - if !self.cid_manager.is_valid(packet.dcid()) { - self.stats - .borrow_mut() - .pkt_dropped(format!("Invalid DCID {:?}", packet.dcid())); - PreprocessResult::Next - } else { + if self.cid_manager.is_valid(packet.dcid()) { if self.role == Role::Server && packet.packet_type() == PacketType::Handshake { // Server has received a Handshake packet -> discard Initial keys and states self.discard_keys(PacketNumberSpace::Initial, now); } PreprocessResult::Continue + } else { + self.stats + .borrow_mut() + .pkt_dropped(format!("Invalid DCID {:?}", packet.dcid())); + PreprocessResult::Next } } State::Closing { .. } => { @@ -1375,7 +1413,7 @@ impl Connection { Ok(res) } - /// After a Initial, Handshake, ZeroRtt, or Short packet is successfully processed. + /// After a Initial, Handshake, `ZeroRtt`, or Short packet is successfully processed. fn postprocess_packet( &mut self, path: &PathRef, @@ -1614,7 +1652,7 @@ impl Connection { } } - /// After an error, a permanent path is needed to send the CONNECTION_CLOSE. + /// After an error, a permanent path is needed to send the `CONNECTION_CLOSE`. /// This attempts to ensure that this exists. As the connection is now /// temporary, there is no reason to do anything special here. fn ensure_error_path(&mut self, path: &PathRef, packet: &PublicPacket, now: Instant) { @@ -1813,7 +1851,7 @@ impl Connection { State::Closing { .. } | State::Draining { .. } | State::Closed(_) => { if let Some(details) = self.state_signaling.close_frame() { let path = Rc::clone(details.path()); - let res = self.output_close(details); + let res = self.output_close(&details); self.capture_error(Some(path), now, 0, res) } else { Ok(SendOption::default()) @@ -1890,7 +1928,7 @@ impl Connection { } } - fn output_close(&mut self, close: ClosingFrame) -> Res { + fn output_close(&mut self, close: &ClosingFrame) -> Res { let mut encoder = Encoder::with_capacity(256); let grease_quic_bit = self.can_grease_quic_bit(); let version = self.version(); @@ -1930,7 +1968,7 @@ impl Connection { }; sanitized .as_ref() - .unwrap_or(&close) + .unwrap_or(close) .write_frame(&mut builder); encoder = builder.build(tx)?; } @@ -1944,11 +1982,11 @@ impl Connection { &mut self, builder: &mut PacketBuilder, tokens: &mut Vec, - ) -> Res<()> { + ) { let stats = &mut self.stats.borrow_mut(); let frame_stats = &mut stats.frame_tx; if self.role == Role::Server { - if let Some(t) = self.state_signaling.write_done(builder)? { + if let Some(t) = self.state_signaling.write_done(builder) { tokens.push(t); frame_stats.handshake_done += 1; } @@ -1957,7 +1995,7 @@ impl Connection { self.streams .write_frames(TransmissionPriority::Critical, builder, tokens, frame_stats); if builder.is_full() { - return Ok(()); + return; } self.streams.write_frames( @@ -1967,36 +2005,35 @@ impl Connection { frame_stats, ); if builder.is_full() { - return Ok(()); + return; } // NEW_CONNECTION_ID, RETIRE_CONNECTION_ID, and ACK_FREQUENCY. - self.cid_manager - .write_frames(builder, tokens, frame_stats)?; + self.cid_manager.write_frames(builder, tokens, frame_stats); if builder.is_full() { - return Ok(()); + return; } self.paths.write_frames(builder, tokens, frame_stats); if builder.is_full() { - return Ok(()); + return; } self.streams .write_frames(TransmissionPriority::High, builder, tokens, frame_stats); if builder.is_full() { - return Ok(()); + return; } self.streams .write_frames(TransmissionPriority::Normal, builder, tokens, frame_stats); if builder.is_full() { - return Ok(()); + return; } // Datagrams are best-effort and unreliable. Let streams starve them for now. self.quic_datagrams.write_frames(builder, tokens, stats); if builder.is_full() { - return Ok(()); + return; } let frame_stats = &mut stats.frame_tx; @@ -2007,13 +2044,13 @@ impl Connection { builder, tokens, frame_stats, - )?; + ); if builder.is_full() { - return Ok(()); + return; } - self.new_token.write_frames(builder, tokens, frame_stats)?; + self.new_token.write_frames(builder, tokens, frame_stats); if builder.is_full() { - return Ok(()); + return; } self.streams @@ -2025,8 +2062,6 @@ impl Connection { w.write_frames(builder); } } - - Ok(()) } // Maybe send a probe. Return true if the packet was ack-eliciting. @@ -2087,7 +2122,7 @@ impl Connection { profile: &SendProfile, builder: &mut PacketBuilder, now: Instant, - ) -> Res<(Vec, bool, bool)> { + ) -> (Vec, bool, bool) { let mut tokens = Vec::new(); let primary = path.borrow().is_primary(); let mut ack_eliciting = false; @@ -2123,16 +2158,15 @@ impl Connection { if profile.ack_only(space) { // If we are CC limited we can only send acks! - return Ok((tokens, false, false)); + return (tokens, false, false); } if primary { if space == PacketNumberSpace::ApplicationData { - self.write_appdata_frames(builder, &mut tokens)?; + self.write_appdata_frames(builder, &mut tokens); } else { let stats = &mut self.stats.borrow_mut().frame_tx; - self.crypto - .write_frame(space, builder, &mut tokens, stats)?; + self.crypto.write_frame(space, builder, &mut tokens, stats); } } @@ -2156,11 +2190,12 @@ impl Connection { }; stats.all += tokens.len(); - Ok((tokens, ack_eliciting, padded)) + (tokens, ack_eliciting, padded) } /// Build a datagram, possibly from multiple packets (for different PN /// spaces) and each containing 1+ frames. + #[allow(clippy::too_many_lines)] // Yeah, that's just the way it is. fn output_path(&mut self, path: &PathRef, now: Instant) -> Res { let mut initial_sent = None; let mut needs_padding = false; @@ -2215,7 +2250,7 @@ impl Connection { // Add frames to the packet. let payload_start = builder.len(); let (tokens, ack_eliciting, padded) = - self.write_frames(path, *space, &profile, &mut builder, now)?; + self.write_frames(path, *space, &profile, &mut builder, now); if builder.packet_empty() { // Nothing to include in this packet. encoder = builder.abort(); @@ -2304,6 +2339,8 @@ impl Connection { } } + /// # Errors + /// When connection state is not valid. pub fn initiate_key_update(&mut self) -> Res<()> { if self.state == State::Confirmed { let la = self @@ -2317,6 +2354,7 @@ impl Connection { } #[cfg(test)] + #[must_use] pub fn get_epochs(&self) -> (Option, Option) { self.crypto.states.get_epochs() } @@ -2375,6 +2413,7 @@ impl Connection { ); } + #[must_use] pub fn is_stream_id_allowed(&self, stream_id: StreamId) -> bool { self.streams.is_stream_id_allowed(stream_id) } @@ -2590,7 +2629,7 @@ impl Connection { HandshakeState::Authenticated(_) | HandshakeState::InProgress => (), HandshakeState::AuthenticationPending => { if !was_authentication_pending { - self.events.authentication_needed() + self.events.authentication_needed(); } } HandshakeState::EchFallbackAuthenticationPending(public_name) => self @@ -2627,6 +2666,7 @@ impl Connection { Ok(()) } + #[allow(clippy::too_many_lines)] // Yep, but it's a nice big match, which is basically lots of little functions. fn input_frame( &mut self, path: &PathRef, @@ -2644,7 +2684,7 @@ impl Connection { if frame.is_stream() { return self .streams - .input_frame(frame, &mut self.stats.borrow_mut().frame_rx); + .input_frame(&frame, &mut self.stats.borrow_mut().frame_rx); } match frame { Frame::Padding => { @@ -3009,11 +3049,10 @@ impl Connection { Ok(()) } - /// Set the SendOrder of a stream. Re-enqueues to keep the ordering correct + /// Set the `SendOrder` of a stream. Re-enqueues to keep the ordering correct /// /// # Errors - /// - /// Returns InvalidStreamId if the stream id doesn't exist + /// When the stream does not exist. pub fn stream_sendorder( &mut self, stream_id: StreamId, @@ -3025,16 +3064,21 @@ impl Connection { /// Set the Fairness of a stream /// /// # Errors - /// - /// Returns InvalidStreamId if the stream id doesn't exist + /// When the stream does not exist. pub fn stream_fairness(&mut self, stream_id: StreamId, fairness: bool) -> Res<()> { self.streams.set_fairness(stream_id, fairness) } + /// # Errors + /// When the stream does not exist. pub fn send_stream_stats(&self, stream_id: StreamId) -> Res { - self.streams.get_send_stream(stream_id).map(|s| s.stats()) + self.streams + .get_send_stream(stream_id) + .map(SendStream::stats) } + /// # Errors + /// When the stream does not exist. pub fn recv_stream_stats(&mut self, stream_id: StreamId) -> Res { let stream = self.streams.get_recv_stream_mut(stream_id)?; @@ -3054,8 +3098,8 @@ impl Connection { self.streams.get_send_stream_mut(stream_id)?.send(data) } - /// Send all data or nothing on a stream. May cause DATA_BLOCKED or - /// STREAM_DATA_BLOCKED frames to be sent. + /// Send all data or nothing on a stream. May cause `DATA_BLOCKED` or + /// `STREAM_DATA_BLOCKED` frames to be sent. /// Returns true if data was successfully sent, otherwise false. /// /// # Errors @@ -3079,20 +3123,26 @@ impl Connection { val.map(|v| v == data.len()) } - /// Bytes that stream_send() is guaranteed to accept for sending. + /// Bytes that `stream_send()` is guaranteed to accept for sending. /// i.e. that will not be blocked by flow credits or send buffer max /// capacity. + /// # Errors + /// When the stream ID is invalid. pub fn stream_avail_send_space(&self, stream_id: StreamId) -> Res { Ok(self.streams.get_send_stream(stream_id)?.avail()) } /// Close the stream. Enqueued data will be sent. + /// # Errors + /// When the stream ID is invalid. pub fn stream_close_send(&mut self, stream_id: StreamId) -> Res<()> { self.streams.get_send_stream_mut(stream_id)?.close(); Ok(()) } /// Abandon transmission of in-flight and future stream data. + /// # Errors + /// When the stream ID is invalid. pub fn stream_reset_send(&mut self, stream_id: StreamId, err: AppError) -> Res<()> { self.streams.get_send_stream_mut(stream_id)?.reset(err); Ok(()) @@ -3113,6 +3163,8 @@ impl Connection { } /// Application is no longer interested in this stream. + /// # Errors + /// When the stream ID is invalid. pub fn stream_stop_sending(&mut self, stream_id: StreamId, err: AppError) -> Res<()> { let stream = self.streams.get_recv_stream_mut(stream_id)?; @@ -3146,6 +3198,7 @@ impl Connection { self.streams.keep_alive(stream_id, keep) } + #[must_use] pub fn remote_datagram_size(&self) -> u64 { self.quic_datagrams.remote_datagram_size() } @@ -3154,9 +3207,10 @@ impl Connection { /// The value will change over time depending on the encoded size of the /// packet number, ack frames, etc. /// - /// # Error - /// + /// # Errors /// The function returns `NotAvailable` if datagrams are not enabled. + /// # Panics + /// Basically never, because that unwrap won't fail. pub fn max_datagram_size(&self) -> Res { let max_dgram_size = self.quic_datagrams.remote_datagram_size(); if max_dgram_size == 0 { @@ -3197,7 +3251,7 @@ impl Connection { /// Queue a datagram for sending. /// - /// # Error + /// # Errors /// /// The function returns `TooMuchData` if the supply buffer is bigger than /// the allowed remote datagram size. The funcion does not check if the @@ -3207,7 +3261,6 @@ impl Connection { /// to check the estimated max datagram size and to use smaller datagrams. /// `max_datagram_size` is just a current estimate and will change over /// time depending on the encoded size of the packet number, ack frames, etc. - pub fn send_datagram(&mut self, buf: &[u8], id: impl Into) -> Res<()> { self.quic_datagrams .add_datagram(buf, id.into(), &mut self.stats.borrow_mut()) diff --git a/neqo-transport/src/connection/params.rs b/neqo-transport/src/connection/params.rs index 48aba4303b..bfa78a8688 100644 --- a/neqo-transport/src/connection/params.rs +++ b/neqo-transport/src/connection/params.rs @@ -41,7 +41,7 @@ pub enum PreferredAddressConfig { Address(PreferredAddress), } -/// ConnectionParameters use for setting intitial value for QUIC parameters. +/// `ConnectionParameters` use for setting intitial value for QUIC parameters. /// This collects configuration like initial limits, protocol version, and /// congestion control algorithm. #[derive(Debug, Clone)] @@ -108,6 +108,7 @@ impl Default for ConnectionParameters { } impl ConnectionParameters { + #[must_use] pub fn get_versions(&self) -> &VersionConfig { &self.versions } @@ -120,29 +121,35 @@ impl ConnectionParameters { /// versions that should be enabled. This list should contain the initial /// version and be in order of preference, with more preferred versions /// before less preferred. + #[must_use] pub fn versions(mut self, initial: Version, all: Vec) -> Self { self.versions = VersionConfig::new(initial, all); self } + #[must_use] pub fn get_cc_algorithm(&self) -> CongestionControlAlgorithm { self.cc_algorithm } + #[must_use] pub fn cc_algorithm(mut self, v: CongestionControlAlgorithm) -> Self { self.cc_algorithm = v; self } + #[must_use] pub fn get_max_data(&self) -> u64 { self.max_data } + #[must_use] pub fn max_data(mut self, v: u64) -> Self { self.max_data = v; self } + #[must_use] pub fn get_max_streams(&self, stream_type: StreamType) -> u64 { match stream_type { StreamType::BiDi => self.max_streams_bidi, @@ -153,6 +160,7 @@ impl ConnectionParameters { /// # Panics /// /// If v > 2^60 (the maximum allowed by the protocol). + #[must_use] pub fn max_streams(mut self, stream_type: StreamType, v: u64) -> Self { assert!(v <= (1 << 60), "max_streams is too large"); match stream_type { @@ -171,6 +179,7 @@ impl ConnectionParameters { /// # Panics /// /// If `StreamType::UniDi` and `false` are passed as that is not a valid combination. + #[must_use] pub fn get_max_stream_data(&self, stream_type: StreamType, remote: bool) -> u64 { match (stream_type, remote) { (StreamType::BiDi, false) => self.max_stream_data_bidi_local, @@ -188,6 +197,7 @@ impl ConnectionParameters { /// /// If `StreamType::UniDi` and `false` are passed as that is not a valid combination /// or if v >= 62 (the maximum allowed by the protocol). + #[must_use] pub fn max_stream_data(mut self, stream_type: StreamType, remote: bool, v: u64) -> Self { assert!(v < (1 << 62), "max stream data is too large"); match (stream_type, remote) { @@ -208,26 +218,31 @@ impl ConnectionParameters { } /// Set a preferred address (which only has an effect for a server). + #[must_use] pub fn preferred_address(mut self, preferred: PreferredAddress) -> Self { self.preferred_address = PreferredAddressConfig::Address(preferred); self } /// Disable the use of preferred addresses. + #[must_use] pub fn disable_preferred_address(mut self) -> Self { self.preferred_address = PreferredAddressConfig::Disabled; self } + #[must_use] pub fn get_preferred_address(&self) -> &PreferredAddressConfig { &self.preferred_address } + #[must_use] pub fn ack_ratio(mut self, ack_ratio: u8) -> Self { self.ack_ratio = ack_ratio; self } + #[must_use] pub fn get_ack_ratio(&self) -> u8 { self.ack_ratio } @@ -235,45 +250,54 @@ impl ConnectionParameters { /// # Panics /// /// If `timeout` is 2^62 milliseconds or more. + #[must_use] pub fn idle_timeout(mut self, timeout: Duration) -> Self { assert!(timeout.as_millis() < (1 << 62), "idle timeout is too long"); self.idle_timeout = timeout; self } + #[must_use] pub fn get_idle_timeout(&self) -> Duration { self.idle_timeout } + #[must_use] pub fn get_datagram_size(&self) -> u64 { self.datagram_size } + #[must_use] pub fn datagram_size(mut self, v: u64) -> Self { self.datagram_size = v; self } + #[must_use] pub fn get_outgoing_datagram_queue(&self) -> usize { self.outgoing_datagram_queue } + #[must_use] pub fn outgoing_datagram_queue(mut self, v: usize) -> Self { // The max queue length must be at least 1. self.outgoing_datagram_queue = max(v, 1); self } + #[must_use] pub fn get_incoming_datagram_queue(&self) -> usize { self.incoming_datagram_queue } + #[must_use] pub fn incoming_datagram_queue(mut self, v: usize) -> Self { // The max queue length must be at least 1. self.incoming_datagram_queue = max(v, 1); self } + #[must_use] pub fn get_fast_pto(&self) -> u8 { self.fast_pto } @@ -293,39 +317,50 @@ impl ConnectionParameters { /// # Panics /// /// A value of 0 is invalid and will cause a panic. + #[must_use] pub fn fast_pto(mut self, scale: u8) -> Self { assert_ne!(scale, 0); self.fast_pto = scale; self } + #[must_use] pub fn is_fuzzing(&self) -> bool { self.fuzzing } + #[must_use] pub fn fuzzing(mut self, enable: bool) -> Self { self.fuzzing = enable; self } + #[must_use] pub fn is_greasing(&self) -> bool { self.grease } + #[must_use] pub fn grease(mut self, grease: bool) -> Self { self.grease = grease; self } + #[must_use] pub fn pacing_enabled(&self) -> bool { self.pacing } + #[must_use] pub fn pacing(mut self, pacing: bool) -> Self { self.pacing = pacing; self } + /// # Errors + /// When a connection ID cannot be obtained. + /// # Panics + /// Only when this code includes a transport parameter that is invalid. pub fn create_transport_parameter( &self, role: Role, diff --git a/neqo-transport/src/connection/state.rs b/neqo-transport/src/connection/state.rs index 9afb42174f..9789151d3f 100644 --- a/neqo-transport/src/connection/state.rs +++ b/neqo-transport/src/connection/state.rs @@ -21,7 +21,7 @@ use crate::{ packet::PacketBuilder, path::PathRef, recovery::RecoveryToken, - ConnectionError, Error, Res, + ConnectionError, Error, }; #[derive(Clone, Debug, PartialEq, Eq)] @@ -66,6 +66,7 @@ impl State { ) } + #[must_use] pub fn error(&self) -> Option<&ConnectionError> { if let Self::Closing { error, .. } | Self::Draining { error, .. } | Self::Closed(error) = self @@ -184,13 +185,13 @@ impl ClosingFrame { } } -/// `StateSignaling` manages whether we need to send HANDSHAKE_DONE and CONNECTION_CLOSE. +/// `StateSignaling` manages whether we need to send `HANDSHAKE_DONE` and `CONNECTION_CLOSE`. /// Valid state transitions are: -/// * Idle -> HandshakeDone: at the server when the handshake completes -/// * HandshakeDone -> Idle: when a HANDSHAKE_DONE frame is sent +/// * Idle -> `HandshakeDone`: at the server when the handshake completes +/// * `HandshakeDone` -> Idle: when a `HANDSHAKE_DONE` frame is sent /// * Idle/HandshakeDone -> Closing/Draining: when closing or draining -/// * Closing/Draining -> CloseSent: after sending CONNECTION_CLOSE -/// * CloseSent -> Closing: any time a new CONNECTION_CLOSE is needed +/// * Closing/Draining -> `CloseSent`: after sending `CONNECTION_CLOSE` +/// * `CloseSent` -> Closing: any time a new `CONNECTION_CLOSE` is needed /// * -> Reset: from any state in case of a stateless reset #[derive(Debug, Clone)] pub enum StateSignaling { @@ -214,13 +215,13 @@ impl StateSignaling { *self = Self::HandshakeDone; } - pub fn write_done(&mut self, builder: &mut PacketBuilder) -> Res> { + pub fn write_done(&mut self, builder: &mut PacketBuilder) -> Option { if matches!(self, Self::HandshakeDone) && builder.remaining() >= 1 { *self = Self::Idle; builder.encode_varint(FRAME_TYPE_HANDSHAKE_DONE); - Ok(Some(RecoveryToken::HandshakeDone)) + Some(RecoveryToken::HandshakeDone) } else { - Ok(None) + None } } diff --git a/neqo-transport/src/connection/tests/fuzzing.rs b/neqo-transport/src/connection/tests/fuzzing.rs index 5425e1a16e..b3efc26cc9 100644 --- a/neqo-transport/src/connection/tests/fuzzing.rs +++ b/neqo-transport/src/connection/tests/fuzzing.rs @@ -4,7 +4,6 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -#![cfg_attr(feature = "deny-warnings", deny(warnings))] #![warn(clippy::pedantic)] #![cfg(feature = "fuzzing")] diff --git a/neqo-transport/src/connection/tests/handshake.rs b/neqo-transport/src/connection/tests/handshake.rs index 52077c8e88..d08b6590f3 100644 --- a/neqo-transport/src/connection/tests/handshake.rs +++ b/neqo-transport/src/connection/tests/handshake.rs @@ -347,7 +347,7 @@ fn reorder_05rtt_with_0rtt() { let mut server = default_server(); let validation = AddressValidation::new(now(), ValidateAddress::NoToken).unwrap(); let validation = Rc::new(RefCell::new(validation)); - server.set_validation(Rc::clone(&validation)); + server.set_validation(&validation); let mut now = connect_with_rtt(&mut client, &mut server, now(), RTT); // Include RTT in sending the ticket or the ticket age reported by the diff --git a/neqo-transport/src/connection/tests/idle.rs b/neqo-transport/src/connection/tests/idle.rs index c33726917a..641802249b 100644 --- a/neqo-transport/src/connection/tests/idle.rs +++ b/neqo-transport/src/connection/tests/idle.rs @@ -310,28 +310,20 @@ fn idle_caching() { server.process_input(&dgram.unwrap(), middle); assert_eq!(server.stats().frame_rx.ping, ping_before_s + 1); let mut tokens = Vec::new(); - server - .crypto - .streams - .write_frame( - PacketNumberSpace::Initial, - &mut builder, - &mut tokens, - &mut FrameStats::default(), - ) - .unwrap(); + server.crypto.streams.write_frame( + PacketNumberSpace::Initial, + &mut builder, + &mut tokens, + &mut FrameStats::default(), + ); assert_eq!(tokens.len(), 1); tokens.clear(); - server - .crypto - .streams - .write_frame( - PacketNumberSpace::Initial, - &mut builder, - &mut tokens, - &mut FrameStats::default(), - ) - .unwrap(); + server.crypto.streams.write_frame( + PacketNumberSpace::Initial, + &mut builder, + &mut tokens, + &mut FrameStats::default(), + ); assert!(tokens.is_empty()); let dgram = server.process_output(middle).dgram(); diff --git a/neqo-transport/src/connection/tests/mod.rs b/neqo-transport/src/connection/tests/mod.rs index 5470c18000..afe01affbf 100644 --- a/neqo-transport/src/connection/tests/mod.rs +++ b/neqo-transport/src/connection/tests/mod.rs @@ -278,7 +278,7 @@ fn exchange_ticket( ) -> ResumptionToken { let validation = AddressValidation::new(now, ValidateAddress::NoToken).unwrap(); let validation = Rc::new(RefCell::new(validation)); - server.set_validation(Rc::clone(&validation)); + server.set_validation(&validation); server.send_ticket(now, &[]).expect("can send ticket"); let ticket = server.process_output(now).dgram(); assert!(ticket.is_some()); diff --git a/neqo-transport/src/connection/tests/priority.rs b/neqo-transport/src/connection/tests/priority.rs index 1f86aa22e5..b7cc9a0af2 100644 --- a/neqo-transport/src/connection/tests/priority.rs +++ b/neqo-transport/src/connection/tests/priority.rs @@ -370,7 +370,7 @@ fn low() { let validation = Rc::new(RefCell::new( AddressValidation::new(now, ValidateAddress::Never).unwrap(), )); - server.set_validation(Rc::clone(&validation)); + server.set_validation(&validation); connect(&mut client, &mut server); let id = server.stream_create(StreamType::UniDi).unwrap(); diff --git a/neqo-transport/src/connection/tests/resumption.rs b/neqo-transport/src/connection/tests/resumption.rs index a8c45a9f06..c9187226d3 100644 --- a/neqo-transport/src/connection/tests/resumption.rs +++ b/neqo-transport/src/connection/tests/resumption.rs @@ -50,7 +50,7 @@ fn remember_smoothed_rtt() { // wants to acknowledge; so the ticket will include an ACK frame too. let validation = AddressValidation::new(now, ValidateAddress::NoToken).unwrap(); let validation = Rc::new(RefCell::new(validation)); - server.set_validation(Rc::clone(&validation)); + server.set_validation(&validation); server.send_ticket(now, &[]).expect("can send ticket"); let ticket = server.process_output(now).dgram(); assert!(ticket.is_some()); @@ -84,7 +84,7 @@ fn address_validation_token_resume() { let mut server = default_server(); let validation = AddressValidation::new(now(), ValidateAddress::Always).unwrap(); let validation = Rc::new(RefCell::new(validation)); - server.set_validation(Rc::clone(&validation)); + server.set_validation(&validation); let mut now = connect_with_rtt(&mut client, &mut server, now(), RTT); let token = exchange_ticket(&mut client, &mut server, now); @@ -155,7 +155,7 @@ fn two_tickets_with_new_token() { let mut server = default_server(); let validation = AddressValidation::new(now(), ValidateAddress::Always).unwrap(); let validation = Rc::new(RefCell::new(validation)); - server.set_validation(Rc::clone(&validation)); + server.set_validation(&validation); connect(&mut client, &mut server); // Send two tickets with tokens and then bundle those into a packet. diff --git a/neqo-transport/src/crypto.rs b/neqo-transport/src/crypto.rs index f6cc7c0e2f..3f708e7bf3 100644 --- a/neqo-transport/src/crypto.rs +++ b/neqo-transport/src/crypto.rs @@ -101,10 +101,10 @@ impl Crypto { version, protocols, tls: agent, - streams: Default::default(), + streams: CryptoStreams::default(), states: CryptoStates { fuzzing, - ..Default::default() + ..CryptoStates::default() }, }) } @@ -239,14 +239,14 @@ impl Crypto { /// Returns true if new handshake keys were installed. pub fn install_keys(&mut self, role: Role) -> Res { - if !self.tls.state().is_final() { + if self.tls.state().is_final() { + Ok(false) + } else { let installed_hs = self.install_handshake_keys()?; if role == Role::Server { self.maybe_install_application_write_key(self.version)?; } Ok(installed_hs) - } else { - Ok(false) } } @@ -274,7 +274,7 @@ impl Crypto { fn maybe_install_application_write_key(&mut self, version: Version) -> Res<()> { qtrace!([self], "Attempt to install application write key"); if let Some(secret) = self.tls.write_secret(TLS_EPOCH_APPLICATION_DATA) { - self.states.set_application_write_key(version, secret)?; + self.states.set_application_write_key(version, &secret)?; qdebug!([self], "Application write key installed"); } Ok(()) @@ -290,7 +290,7 @@ impl Crypto { .read_secret(TLS_EPOCH_APPLICATION_DATA) .ok_or(Error::InternalError)?; self.states - .set_application_read_key(version, read_secret, expire_0rtt)?; + .set_application_read_key(version, &read_secret, expire_0rtt)?; qdebug!([self], "application read keys installed"); Ok(()) } @@ -313,8 +313,8 @@ impl Crypto { builder: &mut PacketBuilder, tokens: &mut Vec, stats: &mut FrameStats, - ) -> Res<()> { - self.streams.write_frame(space, builder, tokens, stats) + ) { + self.streams.write_frame(space, builder, tokens, stats); } pub fn acked(&mut self, token: &CryptoRecoveryToken) { @@ -767,7 +767,7 @@ impl CryptoDxAppData { pub fn new( version: Version, dir: CryptoDxDirection, - secret: SymKey, + secret: &SymKey, cipher: Cipher, fuzzing: bool, ) -> Res { @@ -776,12 +776,12 @@ impl CryptoDxAppData { version, dir, TLS_EPOCH_APPLICATION_DATA, - &secret, + secret, cipher, fuzzing, ), cipher, - next_secret: Self::update_secret(cipher, &secret)?, + next_secret: Self::update_secret(cipher, secret)?, fuzzing, }) } @@ -1111,7 +1111,7 @@ impl CryptoStates { }); } - pub fn set_application_write_key(&mut self, version: Version, secret: SymKey) -> Res<()> { + pub fn set_application_write_key(&mut self, version: Version, secret: &SymKey) -> Res<()> { debug_assert!(self.app_write.is_none()); debug_assert_ne!(self.cipher, 0); let mut app = CryptoDxAppData::new( @@ -1134,7 +1134,7 @@ impl CryptoStates { pub fn set_application_read_key( &mut self, version: Version, - secret: SymKey, + secret: &SymKey, expire_0rtt: Instant, ) -> Res<()> { debug_assert!(self.app_write.is_some(), "should have write keys installed"); @@ -1530,14 +1530,14 @@ impl CryptoStreams { builder: &mut PacketBuilder, tokens: &mut Vec, stats: &mut FrameStats, - ) -> Res<()> { + ) { let cs = self.get_mut(space).unwrap(); if let Some((offset, data)) = cs.tx.next_bytes() { let mut header_len = 1 + Encoder::varint_len(offset) + 1; // Don't bother if there isn't room for the header and some data. if builder.remaining() < header_len + 1 { - return Ok(()); + return; } // Calculate length of data based on the minimum of: // - available data @@ -1561,7 +1561,6 @@ impl CryptoStreams { })); stats.crypto += 1; } - Ok(()) } } diff --git a/neqo-transport/src/fc.rs b/neqo-transport/src/fc.rs index a219ca7e8d..8cd8e10f35 100644 --- a/neqo-transport/src/fc.rs +++ b/neqo-transport/src/fc.rs @@ -249,7 +249,7 @@ where } } - /// This function is called when STREAM_DATA_BLOCKED frame is received. + /// This function is called when `STREAM_DATA_BLOCKED` frame is received. /// The flow control will try to send an update if possible. pub fn send_flowc_update(&mut self) { if self.retired + self.max_active > self.max_allowed { diff --git a/neqo-transport/src/frame.rs b/neqo-transport/src/frame.rs index f3d567ac7c..a3af801925 100644 --- a/neqo-transport/src/frame.rs +++ b/neqo-transport/src/frame.rs @@ -78,6 +78,7 @@ impl CloseError { } } + #[must_use] pub fn code(&self) -> u64 { match self { Self::Transport(c) | Self::Application(c) => *c, @@ -303,7 +304,7 @@ impl<'a> Frame<'a> { ) } - /// Converts AckRanges as encoded in a ACK frame (see -transport + /// Converts `AckRanges` as encoded in a ACK frame (see -transport /// 19.3.1) into ranges of acked packets (end, start), inclusive of /// start and end values. pub fn decode_ack_frame( @@ -387,6 +388,7 @@ impl<'a> Frame<'a> { } } + #[allow(clippy::too_many_lines)] // Yeah, but it's a nice match statement. pub fn decode(dec: &mut Decoder<'a>) -> Res { /// Maximum ACK Range Count in ACK Frame /// @@ -430,7 +432,7 @@ impl<'a> Frame<'a> { } })?; let fa = dv(dec)?; - let mut arr: Vec = Vec::with_capacity(nr as usize); + let mut arr: Vec = Vec::with_capacity(usize::try_from(nr)?); for _ in 0..nr { let ar = AckRange { gap: dv(dec)?, diff --git a/neqo-transport/src/lib.rs b/neqo-transport/src/lib.rs index 2b5ad57579..ffa696e6e5 100644 --- a/neqo-transport/src/lib.rs +++ b/neqo-transport/src/lib.rs @@ -4,8 +4,8 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -#![cfg_attr(feature = "deny-warnings", deny(warnings))] -#![warn(clippy::use_self)] +#![warn(clippy::pedantic)] +#![allow(clippy::module_name_repetitions)] // This lint doesn't work here. use neqo_common::qinfo; use neqo_crypto::Error as CryptoError; @@ -133,6 +133,7 @@ pub enum Error { } impl Error { + #[must_use] pub fn code(&self) -> TransportError { match self { Self::NoError @@ -209,6 +210,7 @@ pub enum ConnectionError { } impl ConnectionError { + #[must_use] pub fn app_code(&self) -> Option { match self { Self::Application(e) => Some(*e), diff --git a/neqo-transport/src/packet/mod.rs b/neqo-transport/src/packet/mod.rs index 7e19beba5f..71ce37501f 100644 --- a/neqo-transport/src/packet/mod.rs +++ b/neqo-transport/src/packet/mod.rs @@ -172,11 +172,12 @@ impl PacketBuilder { } /// Start building a long header packet. - /// For an Initial packet you will need to call initial_token(), + /// For an Initial packet you will need to call `initial_token()`, /// even if the token is empty. /// /// See `short()` for more on how to handle this in cases where there is no space. #[allow(clippy::reversed_empty_ranges)] // For initializing an empty range. + #[allow(clippy::similar_names)] // For dcid and scid, which are fine here. pub fn long( mut encoder: Encoder, pt: PacketType, @@ -315,6 +316,7 @@ impl PacketBuilder { self.pn = pn; } + #[allow(clippy::cast_possible_truncation)] // Nope. fn write_len(&mut self, expansion: usize) { let len = self.encoder.len() - (self.offsets.len + 2) + expansion; self.encoder.as_mut()[self.offsets.len] = 0x40 | ((len >> 8) & 0x3f) as u8; @@ -410,6 +412,7 @@ impl PacketBuilder { /// As this is a simple packet, this is just an associated function. /// As Retry is odd (it has to be constructed with leading bytes), /// this returns a [`Vec`] rather than building on an encoder. + #[allow(clippy::similar_names)] // scid and dcid are fine here. pub fn retry( version: Version, dcid: &[u8], @@ -441,6 +444,7 @@ impl PacketBuilder { } /// Make a Version Negotiation packet. + #[allow(clippy::similar_names)] // scid and dcid are fine here. pub fn version_negotiation( dcid: &[u8], scid: &[u8], @@ -492,7 +496,7 @@ impl From for Encoder { } } -/// PublicPacket holds information from packets that is public only. This allows for +/// `PublicPacket` holds information from packets that is public only. This allows for /// processing of packets prior to decryption. pub struct PublicPacket<'a> { /// The packet type. @@ -552,6 +556,7 @@ impl<'a> PublicPacket<'a> { /// Decode the common parts of a packet. This provides minimal parsing and validation. /// Returns a tuple of a `PublicPacket` and a slice with any remainder from the datagram. + #[allow(clippy::similar_names)] // For dcid and scid, which are fine. pub fn decode(data: &'a [u8], dcid_decoder: &dyn ConnectionIdDecoder) -> Res<(Self, &'a [u8])> { let mut decoder = Decoder::new(data); let first = Self::opt(decoder.decode_byte())?; @@ -1366,8 +1371,12 @@ mod tests { #[test] fn build_vn() { fixture_init(); - let mut vn = - PacketBuilder::version_negotiation(SERVER_CID, CLIENT_CID, 0x0a0a0a0a, &Version::all()); + let mut vn = PacketBuilder::version_negotiation( + SERVER_CID, + CLIENT_CID, + 0x0a0a_0a0a, + &Version::all(), + ); // Erase randomness from greasing... assert_eq!(vn.len(), SAMPLE_VN.len()); vn[0] &= 0x80; @@ -1380,8 +1389,12 @@ mod tests { #[test] fn vn_do_not_repeat_client_grease() { fixture_init(); - let vn = - PacketBuilder::version_negotiation(SERVER_CID, CLIENT_CID, 0x0a0a0a0a, &Version::all()); + let vn = PacketBuilder::version_negotiation( + SERVER_CID, + CLIENT_CID, + 0x0a0a_0a0a, + &Version::all(), + ); assert_ne!(&vn[SAMPLE_VN.len() - 4..], &[0x0a, 0x0a, 0x0a, 0x0a]); } diff --git a/neqo-transport/src/path.rs b/neqo-transport/src/path.rs index 4246e6ed1c..897763d7de 100644 --- a/neqo-transport/src/path.rs +++ b/neqo-transport/src/path.rs @@ -156,7 +156,7 @@ impl Paths { /// Get a reference to the primary path. Use this prior to handshake completion. pub fn primary_fallible(&self) -> Option { - self.primary.as_ref().map(Rc::clone) + self.primary.clone() } /// Returns true if the path is not permanent. @@ -341,7 +341,7 @@ impl Paths { None } }) - .or_else(|| self.primary.as_ref().map(Rc::clone)) + .or_else(|| self.primary.clone()) } /// A `PATH_RESPONSE` was received. diff --git a/neqo-transport/src/qlog.rs b/neqo-transport/src/qlog.rs index 434395fd23..f6d3f4e1e2 100644 --- a/neqo-transport/src/qlog.rs +++ b/neqo-transport/src/qlog.rs @@ -38,6 +38,7 @@ use crate::{ pub fn connection_tparams_set(qlog: &mut NeqoQlog, tph: &TransportParametersHandler) { qlog.add_event_data(|| { let remote = tph.remote(); + #[allow(clippy::cast_possible_truncation)] // Nope. let ev_data = EventData::TransportParametersSet( qlog::events::quic::TransportParametersSet { owner: None, @@ -206,7 +207,7 @@ pub fn packet_sent( let mut frames = SmallVec::new(); while d.remaining() > 0 { if let Ok(f) = Frame::decode(&mut d) { - frames.push(frame_to_qlogframe(&f)) + frames.push(frame_to_qlogframe(&f)); } else { qinfo!("qlog: invalid frame"); break; @@ -300,7 +301,7 @@ pub fn packet_received( while d.remaining() > 0 { if let Ok(f) = Frame::decode(&mut d) { - frames.push(frame_to_qlogframe(&f)) + frames.push(frame_to_qlogframe(&f)); } else { qinfo!("qlog: invalid frame"); break; @@ -355,6 +356,7 @@ pub fn metrics_updated(qlog: &mut NeqoQlog, updated_metrics: &[QlogMetric]) { let mut pacing_rate: Option = None; for metric in updated_metrics { + #[allow(clippy::cast_precision_loss)] // Nought to do here. match metric { QlogMetric::MinRtt(v) => min_rtt = Some(v.as_secs_f32() * 1000.0), QlogMetric::SmoothedRtt(v) => smoothed_rtt = Some(v.as_secs_f32() * 1000.0), @@ -391,6 +393,8 @@ pub fn metrics_updated(qlog: &mut NeqoQlog, updated_metrics: &[QlogMetric]) { // Helper functions +#[allow(clippy::too_many_lines)] // Yeah, but it's a nice match. +#[allow(clippy::cast_possible_truncation, clippy::cast_precision_loss)] // No choice here. fn frame_to_qlogframe(frame: &Frame) -> QuicFrame { match frame { Frame::Padding => QuicFrame::Padding, diff --git a/neqo-transport/src/quic_datagrams.rs b/neqo-transport/src/quic_datagrams.rs index 07f3594768..43046afafe 100644 --- a/neqo-transport/src/quic_datagrams.rs +++ b/neqo-transport/src/quic_datagrams.rs @@ -103,7 +103,7 @@ impl QuicDatagrams { /// This function tries to write a datagram frame into a packet. /// If the frame does not fit into the packet, the datagram will - /// be dropped and a DatagramLost event will be posted. + /// be dropped and a `DatagramLost` event will be posted. pub fn write_frames( &mut self, builder: &mut PacketBuilder, diff --git a/neqo-transport/src/recv_stream.rs b/neqo-transport/src/recv_stream.rs index 06ca59685d..d68da02689 100644 --- a/neqo-transport/src/recv_stream.rs +++ b/neqo-transport/src/recv_stream.rs @@ -34,6 +34,7 @@ use crate::{ const RX_STREAM_DATA_WINDOW: u64 = 0x10_0000; // 1MiB // Export as usize for consistency with SEND_BUFFER_SIZE +#[allow(clippy::cast_possible_truncation)] // Yeah, nope. pub const RECV_BUFFER_SIZE: usize = RX_STREAM_DATA_WINDOW as usize; #[derive(Debug, Default)] @@ -301,8 +302,10 @@ impl RxStreamOrderer { false } }) - .map(|(_, data_len)| data_len as usize) - .sum() + // Accumulate, but saturate at usize::MAX. + .fold(0, |acc: usize, (_, data_len)| { + acc.saturating_add(usize::try_from(data_len).unwrap_or(usize::MAX)) + }) } /// Bytes read by the application. @@ -314,7 +317,7 @@ impl RxStreamOrderer { self.received } - /// Data bytes buffered. Could be more than bytes_readable if there are + /// Data bytes buffered. Could be more than `bytes_readable` if there are /// ranges missing. fn buffered(&self) -> u64 { self.data_ranges @@ -1444,8 +1447,8 @@ mod tests { let mut buf = vec![0u8; RECV_BUFFER_SIZE + 100]; // Make it overlarge assert!(!s.has_frames_to_write()); - s.inbound_stream_frame(false, 0, &[0; RECV_BUFFER_SIZE]) - .unwrap(); + let big_buf = vec![0; RECV_BUFFER_SIZE]; + s.inbound_stream_frame(false, 0, &big_buf).unwrap(); assert!(!s.has_frames_to_write()); assert_eq!(s.read(&mut buf).unwrap(), (RECV_BUFFER_SIZE, false)); assert!(!s.data_ready()); @@ -1476,8 +1479,8 @@ mod tests { fn stream_max_stream_data() { let mut s = create_stream(1024 * RX_STREAM_DATA_WINDOW); assert!(!s.has_frames_to_write()); - s.inbound_stream_frame(false, 0, &[0; RECV_BUFFER_SIZE]) - .unwrap(); + let big_buf = vec![0; RECV_BUFFER_SIZE]; + s.inbound_stream_frame(false, 0, &big_buf).unwrap(); s.inbound_stream_frame(false, RX_STREAM_DATA_WINDOW, &[1; 1]) .unwrap_err(); } @@ -1520,9 +1523,10 @@ mod tests { #[test] fn no_stream_flowc_event_after_exiting_recv() { let mut s = create_stream(1024 * RX_STREAM_DATA_WINDOW); - s.inbound_stream_frame(false, 0, &[0; RECV_BUFFER_SIZE]) - .unwrap(); - let mut buf = [0; RECV_BUFFER_SIZE]; + let mut buf = vec![0; RECV_BUFFER_SIZE]; + // Write from buf at first. + s.inbound_stream_frame(false, 0, &buf).unwrap(); + // Then read into it. s.read(&mut buf).unwrap(); assert!(s.has_frames_to_write()); s.inbound_stream_frame(true, RX_STREAM_DATA_WINDOW, &[]) @@ -1634,7 +1638,7 @@ mod tests { assert_eq!(fc.retired(), retired); } - /// Test consuming the flow control in RecvStreamState::Recv + /// Test consuming the flow control in `RecvStreamState::Recv` #[test] fn fc_state_recv_1() { const SW: u64 = 1024; @@ -1651,7 +1655,7 @@ mod tests { check_fc(s.fc().unwrap(), SW / 4, 0); } - /// Test consuming the flow control in RecvStreamState::Recv + /// Test consuming the flow control in `RecvStreamState::Recv` /// with multiple streams #[test] fn fc_state_recv_2() { @@ -1678,7 +1682,7 @@ mod tests { check_fc(s2.fc().unwrap(), SW / 4, 0); } - /// Test retiring the flow control in RecvStreamState::Recv + /// Test retiring the flow control in `RecvStreamState::Recv` /// with multiple streams #[test] fn fc_state_recv_3() { @@ -1730,7 +1734,7 @@ mod tests { check_fc(s2.fc().unwrap(), SW / 4, SW / 4); } - /// Test consuming the flow control in RecvStreamState::Recv - duplicate data + /// Test consuming the flow control in `RecvStreamState::Recv` - duplicate data #[test] fn fc_state_recv_4() { const SW: u64 = 1024; @@ -1753,7 +1757,7 @@ mod tests { check_fc(s.fc().unwrap(), SW / 4, 0); } - /// Test consuming the flow control in RecvStreamState::Recv - filling a gap in the + /// Test consuming the flow control in `RecvStreamState::Recv` - filling a gap in the /// data stream. #[test] fn fc_state_recv_5() { @@ -1774,7 +1778,7 @@ mod tests { check_fc(s.fc().unwrap(), SW / 4, 0); } - /// Test consuming the flow control in RecvStreamState::Recv - receiving frame past + /// Test consuming the flow control in `RecvStreamState::Recv` - receiving frame past /// the flow control will cause an error. #[test] fn fc_state_recv_6() { @@ -1859,7 +1863,7 @@ mod tests { assert_eq!(stats.max_stream_data, 1); } - /// Test flow control in RecvStreamState::SizeKnown + /// Test flow control in `RecvStreamState::SizeKnown` #[test] fn fc_state_size_known() { const SW: u64 = 1024; @@ -1916,7 +1920,7 @@ mod tests { assert!(s.fc().is_none()); } - /// Test flow control in RecvStreamState::DataRecvd + /// Test flow control in `RecvStreamState::DataRecvd` #[test] fn fc_state_data_recv() { const SW: u64 = 1024; @@ -1961,7 +1965,7 @@ mod tests { assert!(s.fc().is_none()); } - /// Test flow control in RecvStreamState::DataRead + /// Test flow control in `RecvStreamState::DataRead` #[test] fn fc_state_data_read() { const SW: u64 = 1024; @@ -1999,7 +2003,7 @@ mod tests { assert!(s.fc().is_none()); } - /// Test flow control in RecvStreamState::AbortReading and final size is known + /// Test flow control in `RecvStreamState::AbortReading` and final size is known #[test] fn fc_state_abort_reading_1() { const SW: u64 = 1024; @@ -2041,7 +2045,7 @@ mod tests { check_fc(s.fc().unwrap(), SW / 2, SW / 2); } - /// Test flow control in RecvStreamState::AbortReading and final size is unknown + /// Test flow control in `RecvStreamState::AbortReading` and final size is unknown #[test] fn fc_state_abort_reading_2() { const SW: u64 = 1024; @@ -2099,7 +2103,7 @@ mod tests { check_fc(s.fc().unwrap(), SW / 2 + 20, SW / 2 + 20); } - /// Test flow control in RecvStreamState::WaitForReset + /// Test flow control in `RecvStreamState::WaitForReset` #[test] fn fc_state_wait_for_reset() { const SW: u64 = 1024; diff --git a/neqo-transport/src/send_stream.rs b/neqo-transport/src/send_stream.rs index f2acf2c113..cfc1026160 100644 --- a/neqo-transport/src/send_stream.rs +++ b/neqo-transport/src/send_stream.rs @@ -167,7 +167,7 @@ impl RangeTracker { } /// Find the first unmarked range. If all are contiguous, this will return - /// (highest_offset(), None). + /// (`highest_offset()`, None). fn first_unmarked_range(&mut self) -> (u64, Option) { if let Some(first_unmarked) = self.first_unmarked { return first_unmarked; @@ -491,7 +491,7 @@ impl TxBuffer { Self::default() } - /// Attempt to add some or all of the passed-in buffer to the TxBuffer. + /// Attempt to add some or all of the passed-in buffer to the `TxBuffer`. pub fn send(&mut self, buf: &[u8]) -> usize { let can_buffer = min(SEND_BUFFER_SIZE - self.buffered(), buf.len()); if can_buffer > 0 { @@ -534,7 +534,7 @@ impl TxBuffer { } pub fn mark_as_sent(&mut self, offset: u64, len: usize) { - self.ranges.mark_sent(offset, len) + self.ranges.mark_sent(offset, len); } pub fn mark_as_acked(&mut self, offset: u64, len: usize) { @@ -1255,9 +1255,9 @@ impl SendStream { if atomic { self.send_blocked_if_space_needed(buf.len()); return Ok(0); - } else { - &buf[..self.avail()] } + + &buf[..self.avail()] } else { buf }; @@ -1420,25 +1420,19 @@ impl OrderGroup { } pub fn insert(&mut self, stream_id: StreamId) { - match self.vec.binary_search(&stream_id) { - Ok(_) => { - // element already in vector @ `pos` - panic!("Duplicate stream_id {}", stream_id) - } - Err(pos) => self.vec.insert(pos, stream_id), - } + let Err(pos) = self.vec.binary_search(&stream_id) else { + // element already in vector @ `pos` + panic!("Duplicate stream_id {}", stream_id); + }; + self.vec.insert(pos, stream_id); } pub fn remove(&mut self, stream_id: StreamId) { - match self.vec.binary_search(&stream_id) { - Ok(pos) => { - self.vec.remove(pos); - } - Err(_) => { - // element already in vector @ `pos` - panic!("Missing stream_id {}", stream_id) - } - } + let Ok(pos) = self.vec.binary_search(&stream_id) else { + // element already in vector @ `pos` + panic!("Missing stream_id {}", stream_id); + }; + self.vec.remove(pos); } } @@ -1698,9 +1692,9 @@ impl SendStreams { for stream_id in stream_ids { let stream = self.map.get_mut(&stream_id).unwrap(); if let Some(order) = stream.sendorder() { - qtrace!(" {} ({})", stream_id, order) + qtrace!(" {} ({})", stream_id, order); } else { - qtrace!(" None") + qtrace!(" None"); } if !stream.write_frames_with_early_return(priority, builder, tokens, stats) { break; @@ -1709,7 +1703,7 @@ impl SendStreams { } pub fn update_initial_limit(&mut self, remote: &TransportParameters) { - for (id, ss) in self.map.iter_mut() { + for (id, ss) in &mut self.map { let limit = if id.is_bidi() { assert!(!id.is_remote_initiated(Role::Client)); remote.get_integer(tparams::INITIAL_MAX_STREAM_DATA_BIDI_REMOTE) @@ -1784,7 +1778,7 @@ mod tests { assert_eq!(rt.acked_from_zero(), 400); } - /// Check that marked_acked correctly handles all paths. + /// Check that `marked_acked` correctly handles all paths. /// ```ignore /// SSS SSSAAASSS /// + AAAAAAAAA @@ -1807,7 +1801,7 @@ mod tests { assert_eq!(rt, canon); } - /// Check that marked_acked correctly handles all paths. + /// Check that `marked_acked` correctly handles all paths. /// ```ignore /// SSS SSS AAA /// + AAAAAAAAA @@ -1828,7 +1822,7 @@ mod tests { assert_eq!(rt, canon); } - /// Check that marked_acked correctly handles all paths. + /// Check that `marked_acked` correctly handles all paths. /// ```ignore /// AASSS AAAA /// + AAAAAAAAA @@ -1850,7 +1844,7 @@ mod tests { assert_eq!(rt, canon); } - /// Check that marked_acked correctly handles all paths. + /// Check that `marked_acked` correctly handles all paths. /// ```ignore /// SSS /// + AAAA @@ -1871,7 +1865,7 @@ mod tests { assert_eq!(rt, canon); } - /// Check that marked_acked correctly handles all paths. + /// Check that `marked_acked` correctly handles all paths. /// ```ignore /// AAAAAASSS /// + AAA @@ -1893,7 +1887,7 @@ mod tests { assert_eq!(rt, canon); } - /// Check that marked_acked correctly handles all paths. + /// Check that `marked_acked` correctly handles all paths. /// ```ignore /// AAA AAA AAA /// + AAAAAAA @@ -1913,7 +1907,7 @@ mod tests { assert_eq!(rt, canon); } - /// Check that marked_acked correctly handles all paths. + /// Check that `marked_acked` correctly handles all paths. /// ```ignore /// AAA AAA /// + AAA @@ -1932,7 +1926,7 @@ mod tests { assert_eq!(rt, canon); } - /// Check that marked_acked correctly handles all paths. + /// Check that `marked_acked` correctly handles all paths. /// ```ignore /// SSSSSSSS /// + AAAA @@ -1952,7 +1946,7 @@ mod tests { assert_eq!(rt, canon); } - /// Check that marked_acked correctly handles all paths. + /// Check that `marked_acked` correctly handles all paths. /// ```ignore /// SSS /// + AAA @@ -1973,7 +1967,7 @@ mod tests { assert_eq!(rt, canon); } - /// Check that marked_sent correctly handles all paths. + /// Check that `marked_sent` correctly handles all paths. /// ```ignore /// AAA AAA SSS /// + SSSSSSSSSSSS @@ -1998,7 +1992,7 @@ mod tests { assert_eq!(rt, canon); } - /// Check that marked_sent correctly handles all paths. + /// Check that `marked_sent` correctly handles all paths. /// ```ignore /// AAASS AAA S SSSS /// + SSSSSSSSSSSSS @@ -2025,7 +2019,7 @@ mod tests { assert_eq!(rt, canon); } - /// Check that marked_sent correctly handles all paths. + /// Check that `marked_sent` correctly handles all paths. /// ```ignore /// AAA AAA /// + SSSS @@ -2048,7 +2042,7 @@ mod tests { assert_eq!(rt, canon); } - /// Check that marked_sent correctly handles all paths. + /// Check that `marked_sent` correctly handles all paths. /// ```ignore /// SSS AAA SS /// + SSSSSSSS @@ -2070,7 +2064,7 @@ mod tests { assert_eq!(rt, canon); } - /// Check that marked_sent correctly handles all paths. + /// Check that `marked_sent` correctly handles all paths. /// ```ignore /// AAA /// + SSSSSS @@ -2089,7 +2083,7 @@ mod tests { assert_eq!(rt, canon); } - /// Check that marked_sent correctly handles all paths. + /// Check that `marked_sent` correctly handles all paths. /// ```ignore /// SSSSS /// + SSS @@ -2213,14 +2207,15 @@ mod tests { assert_eq!(txb.avail(), SEND_BUFFER_SIZE); // Fill the buffer - assert_eq!(txb.send(&[1; SEND_BUFFER_SIZE * 2]), SEND_BUFFER_SIZE); + let big_buf = vec![1; SEND_BUFFER_SIZE * 2]; + assert_eq!(txb.send(&big_buf), SEND_BUFFER_SIZE); assert!(matches!(txb.next_bytes(), Some((0, x)) if x.len()==SEND_BUFFER_SIZE && x.iter().all(|ch| *ch == 1))); // Mark almost all as sent. Get what's left let one_byte_from_end = SEND_BUFFER_SIZE as u64 - 1; - txb.mark_as_sent(0, one_byte_from_end as usize); + txb.mark_as_sent(0, usize::try_from(one_byte_from_end).unwrap()); assert!(matches!(txb.next_bytes(), Some((start, x)) if x.len() == 1 && start == one_byte_from_end @@ -2249,7 +2244,7 @@ mod tests { // Contig acked range at start means it can be removed from buffer // Impl of vecdeque should now result in a split buffer when more data // is sent - txb.mark_as_acked(0, five_bytes_from_end as usize); + txb.mark_as_acked(0, usize::try_from(five_bytes_from_end).unwrap()); assert_eq!(txb.send(&[2; 30]), 30); // Just get 5 even though there is more assert!(matches!(txb.next_bytes(), @@ -2275,7 +2270,8 @@ mod tests { assert_eq!(txb.avail(), SEND_BUFFER_SIZE); // Fill the buffer - assert_eq!(txb.send(&[1; SEND_BUFFER_SIZE * 2]), SEND_BUFFER_SIZE); + let big_buf = vec![1; SEND_BUFFER_SIZE * 2]; + assert_eq!(txb.send(&big_buf), SEND_BUFFER_SIZE); assert!(matches!(txb.next_bytes(), Some((0, x)) if x.len()==SEND_BUFFER_SIZE && x.iter().all(|ch| *ch == 1))); @@ -2283,7 +2279,7 @@ mod tests { // As above let forty_bytes_from_end = SEND_BUFFER_SIZE as u64 - 40; - txb.mark_as_acked(0, forty_bytes_from_end as usize); + txb.mark_as_acked(0, usize::try_from(forty_bytes_from_end).unwrap()); assert!(matches!(txb.next_bytes(), Some((start, x)) if x.len() == 40 && start == forty_bytes_from_end @@ -2311,7 +2307,7 @@ mod tests { // Ack entire first slice and into second slice let ten_bytes_past_end = SEND_BUFFER_SIZE as u64 + 10; - txb.mark_as_acked(0, ten_bytes_past_end as usize); + txb.mark_as_acked(0, usize::try_from(ten_bytes_past_end).unwrap()); // Get up to marked range A assert!(matches!(txb.next_bytes(), @@ -2349,22 +2345,23 @@ mod tests { } // Should hit stream flow control limit before filling up send buffer - let res = s.send(&[4; SEND_BUFFER_SIZE]).unwrap(); + let big_buf = vec![4; SEND_BUFFER_SIZE + 100]; + let res = s.send(&big_buf[..SEND_BUFFER_SIZE]).unwrap(); assert_eq!(res, 1024 - 100); // should do nothing, max stream data already 1024 s.set_max_stream_data(1024); - let res = s.send(&[4; SEND_BUFFER_SIZE]).unwrap(); + let res = s.send(&big_buf[..SEND_BUFFER_SIZE]).unwrap(); assert_eq!(res, 0); // should now hit the conn flow control (4096) s.set_max_stream_data(1_048_576); - let res = s.send(&[4; SEND_BUFFER_SIZE]).unwrap(); + let res = s.send(&big_buf[..SEND_BUFFER_SIZE]).unwrap(); assert_eq!(res, 3072); // should now hit the tx buffer size conn_fc.borrow_mut().update(SEND_BUFFER_SIZE as u64); - let res = s.send(&[4; SEND_BUFFER_SIZE + 100]).unwrap(); + let res = s.send(&big_buf).unwrap(); assert_eq!(res, SEND_BUFFER_SIZE - 4096); // TODO(agrover@mozilla.com): test ooo acks somehow @@ -2435,10 +2432,8 @@ mod tests { // tx buffer size. assert_eq!(s.avail(), SEND_BUFFER_SIZE - 4); - assert_eq!( - s.send(&[b'a'; SEND_BUFFER_SIZE]).unwrap(), - SEND_BUFFER_SIZE - 4 - ); + let big_buf = vec![b'a'; SEND_BUFFER_SIZE]; + assert_eq!(s.send(&big_buf).unwrap(), SEND_BUFFER_SIZE - 4); // No event because still blocked by tx buffer full s.set_max_stream_data(2_000_000_000); diff --git a/neqo-transport/src/server.rs b/neqo-transport/src/server.rs index 12a7d2f9e0..8f5170af6e 100644 --- a/neqo-transport/src/server.rs +++ b/neqo-transport/src/server.rs @@ -43,7 +43,7 @@ pub enum InitialResult { Retry(Vec), } -/// MIN_INITIAL_PACKET_SIZE is the smallest packet that can be used to establish +/// `MIN_INITIAL_PACKET_SIZE` is the smallest packet that can be used to establish /// a new connection across all QUIC versions this server supports. const MIN_INITIAL_PACKET_SIZE: usize = 1200; /// The size of timer buckets. This is higher than the actual timer granularity @@ -195,6 +195,8 @@ impl Server { /// OK. /// * `cid_generator` is responsible for generating connection IDs and parsing them; connection /// IDs produced by the manager cannot be zero-length. + /// # Errors + /// When address validation state cannot be created. pub fn new( now: Instant, certs: &[impl AsRef], @@ -240,6 +242,8 @@ impl Server { self.ciphers = Vec::from(ciphers.as_ref()); } + /// # Errors + /// When the configuration is invalid. pub fn enable_ech( &mut self, config: u8, @@ -251,6 +255,7 @@ impl Server { Ok(()) } + #[must_use] pub fn ech_config(&self) -> &[u8] { self.ech_config.as_ref().map_or(&[], |cfg| &cfg.encoded) } @@ -262,7 +267,7 @@ impl Server { fn process_connection( &mut self, - c: StateRef, + c: &StateRef, dgram: Option<&Datagram>, now: Instant, ) -> Option { @@ -271,24 +276,24 @@ impl Server { match out { Output::Datagram(_) => { qtrace!([self], "Sending packet, added to waiting connections"); - self.waiting.push_back(Rc::clone(&c)); + self.waiting.push_back(Rc::clone(c)); } Output::Callback(delay) => { let next = now + delay; if next != c.borrow().last_timer { qtrace!([self], "Change timer to {:?}", next); - self.remove_timer(&c); + self.remove_timer(c); c.borrow_mut().last_timer = next; - self.timers.add(next, Rc::clone(&c)); + self.timers.add(next, Rc::clone(c)); } } Output::None => { - self.remove_timer(&c); + self.remove_timer(c); } } if c.borrow().has_events() { qtrace!([self], "Connection active: {:?}", c); - self.active.insert(ActiveConnectionRef { c: Rc::clone(&c) }); + self.active.insert(ActiveConnectionRef { c: Rc::clone(c) }); } if *c.borrow().state() > State::Handshaking { @@ -302,13 +307,13 @@ impl Server { c.borrow_mut().set_qlog(NeqoQlog::disabled()); self.connections .borrow_mut() - .retain(|_, v| !Rc::ptr_eq(v, &c)); + .retain(|_, v| !Rc::ptr_eq(v, c)); } out.dgram() } fn connection(&self, cid: ConnectionIdRef) -> Option { - self.connections.borrow().get(&cid[..]).map(Rc::clone) + self.connections.borrow().get(&cid[..]).cloned() } fn handle_initial( @@ -387,7 +392,7 @@ impl Server { attempt_key ); let c = Rc::clone(c); - self.process_connection(c, Some(dgram), now) + self.process_connection(&c, Some(dgram), now) } else { self.accept_connection(attempt_key, initial, dgram, orig_dcid, now) } @@ -395,9 +400,9 @@ impl Server { fn create_qlog_trace(&self, odcid: ConnectionIdRef<'_>) -> NeqoQlog { if let Some(qlog_dir) = &self.qlog_dir { - let mut qlog_path = qlog_dir.to_path_buf(); + let mut qlog_path = qlog_dir.clone(); - qlog_path.push(format!("{}.qlog", odcid)); + qlog_path.push(format!("{odcid}.qlog")); // The original DCID is chosen by the client. Using create_new() // prevents attackers from overwriting existing logs. @@ -456,9 +461,9 @@ impl Server { } if let Some(odcid) = orig_dcid { // There was a retry, so set the connection IDs for. - c.set_retry_cids(odcid, initial.src_cid, initial.dst_cid); + c.set_retry_cids(&odcid, initial.src_cid, &initial.dst_cid); } - c.set_validation(Rc::clone(&self.address_validation)); + c.set_validation(&self.address_validation); c.set_qlog(self.create_qlog_trace(attempt_key.odcid.as_cid_ref())); if let Some(cfg) = &self.ech_config { if c.server_enable_ech(cfg.config, &cfg.public_name, &cfg.sk, &cfg.pk) @@ -505,10 +510,10 @@ impl Server { last_timer: now, active_attempt: Some(attempt_key.clone()), })); - cid_mgr.borrow_mut().set_connection(Rc::clone(&c)); + cid_mgr.borrow_mut().set_connection(&c); let previous_attempt = self.active_attempts.insert(attempt_key, Rc::clone(&c)); debug_assert!(previous_attempt.is_none()); - self.process_connection(c, Some(dgram), now) + self.process_connection(&c, Some(dgram), now) } Err(e) => { qwarn!([self], "Unable to create connection"); @@ -517,7 +522,7 @@ impl Server { &mut self.create_qlog_trace(attempt_key.odcid.as_cid_ref()), self.conn_params.get_versions().all(), initial.version.wire_version(), - ) + ); } None } @@ -544,7 +549,7 @@ impl Server { attempt_key ); let c = Rc::clone(c); - self.process_connection(c, Some(dgram), now) + self.process_connection(&c, Some(dgram), now) } else { qdebug!([self], "Dropping 0-RTT for unknown connection"); None @@ -564,7 +569,7 @@ impl Server { // Finding an existing connection. Should be the most common case. if let Some(c) = self.connection(packet.dcid()) { - return self.process_connection(c, Some(dgram), now); + return self.process_connection(&c, Some(dgram), now); } if packet.packet_type() == PacketType::Short { @@ -637,13 +642,13 @@ impl Server { fn process_next_output(&mut self, now: Instant) -> Option { qtrace!([self], "No packet to send, look at waiting connections"); while let Some(c) = self.waiting.pop_front() { - if let Some(d) = self.process_connection(c, None, now) { + if let Some(d) = self.process_connection(&c, None, now) { return Some(d); } } qtrace!([self], "No packet to send still, run timers"); while let Some(c) = self.timers.take_next(now) { - if let Some(d) = self.process_connection(c, None, now) { + if let Some(d) = self.process_connection(&c, None, now) { return Some(d); } } @@ -684,7 +689,7 @@ impl Server { mem::take(&mut self.active).into_iter().collect() } - pub fn add_to_waiting(&mut self, c: ActiveConnectionRef) { + pub fn add_to_waiting(&mut self, c: &ActiveConnectionRef) { self.waiting.push_back(c.connection()); } } @@ -695,6 +700,7 @@ pub struct ActiveConnectionRef { } impl ActiveConnectionRef { + #[must_use] pub fn borrow(&self) -> impl Deref + '_ { std::cell::Ref::map(self.c.borrow(), |c| &c.c) } @@ -703,6 +709,7 @@ impl ActiveConnectionRef { std::cell::RefMut::map(self.c.borrow_mut(), |c| &mut c.c) } + #[must_use] pub fn connection(&self) -> StateRef { Rc::clone(&self.c) } @@ -731,13 +738,13 @@ struct ServerConnectionIdGenerator { } impl ServerConnectionIdGenerator { - pub fn set_connection(&mut self, c: StateRef) { + pub fn set_connection(&mut self, c: &StateRef) { let saved = std::mem::replace(&mut self.saved_cids, Vec::with_capacity(0)); for cid in saved { qtrace!("ServerConnectionIdGenerator inserting saved cid {}", cid); - self.insert_cid(cid, Rc::clone(&c)); + self.insert_cid(cid, Rc::clone(c)); } - self.c = Rc::downgrade(&c); + self.c = Rc::downgrade(c); } fn insert_cid(&mut self, cid: ConnectionId, rc: StateRef) { diff --git a/neqo-transport/src/stream_id.rs b/neqo-transport/src/stream_id.rs index f3b07b86a8..8dbe2dcfbc 100644 --- a/neqo-transport/src/stream_id.rs +++ b/neqo-transport/src/stream_id.rs @@ -20,10 +20,12 @@ pub enum StreamType { pub struct StreamId(u64); impl StreamId { + #[must_use] pub const fn new(id: u64) -> Self { Self(id) } + #[must_use] pub fn init(stream_type: StreamType, role: Role) -> Self { let type_val = match stream_type { StreamType::BiDi => 0, @@ -32,18 +34,22 @@ impl StreamId { Self(type_val + Self::role_bit(role)) } + #[must_use] pub fn as_u64(self) -> u64 { self.0 } + #[must_use] pub fn is_bidi(self) -> bool { self.as_u64() & 0x02 == 0 } + #[must_use] pub fn is_uni(self) -> bool { !self.is_bidi() } + #[must_use] pub fn stream_type(self) -> StreamType { if self.is_bidi() { StreamType::BiDi @@ -52,14 +58,17 @@ impl StreamId { } } + #[must_use] pub fn is_client_initiated(self) -> bool { self.as_u64() & 0x01 == 0 } + #[must_use] pub fn is_server_initiated(self) -> bool { !self.is_client_initiated() } + #[must_use] pub fn role(self) -> Role { if self.is_client_initiated() { Role::Client @@ -68,6 +77,7 @@ impl StreamId { } } + #[must_use] pub fn is_self_initiated(self, my_role: Role) -> bool { match my_role { Role::Client if self.is_client_initiated() => true, @@ -76,14 +86,17 @@ impl StreamId { } } + #[must_use] pub fn is_remote_initiated(self, my_role: Role) -> bool { !self.is_self_initiated(my_role) } + #[must_use] pub fn is_send_only(self, my_role: Role) -> bool { self.is_uni() && self.is_self_initiated(my_role) } + #[must_use] pub fn is_recv_only(self, my_role: Role) -> bool { self.is_uni() && self.is_remote_initiated(my_role) } @@ -93,6 +106,7 @@ impl StreamId { } /// This returns a bit that is shared by all streams created by this role. + #[must_use] pub fn role_bit(role: Role) -> u64 { match role { Role::Server => 1, diff --git a/neqo-transport/src/streams.rs b/neqo-transport/src/streams.rs index 7cbb29ce02..d8662afa3b 100644 --- a/neqo-transport/src/streams.rs +++ b/neqo-transport/src/streams.rs @@ -95,6 +95,7 @@ impl Streams { } } + #[must_use] pub fn is_stream_id_allowed(&self, stream_id: StreamId) -> bool { self.remote_stream_limits[stream_id.stream_type()].is_allowed(stream_id) } @@ -118,7 +119,9 @@ impl Streams { self.local_stream_limits = LocalStreamLimits::new(self.role); } - pub fn input_frame(&mut self, frame: Frame, stats: &mut FrameStats) -> Res<()> { + /// # Errors + /// When the frame is invalid. + pub fn input_frame(&mut self, frame: &Frame, stats: &mut FrameStats) -> Res<()> { match frame { Frame::ResetStream { stream_id, @@ -126,8 +129,8 @@ impl Streams { final_size, } => { stats.reset_stream += 1; - if let (_, Some(rs)) = self.obtain_stream(stream_id)? { - rs.reset(application_error_code, final_size)?; + if let (_, Some(rs)) = self.obtain_stream(*stream_id)? { + rs.reset(*application_error_code, *final_size)?; } } Frame::StopSending { @@ -136,9 +139,9 @@ impl Streams { } => { stats.stop_sending += 1; self.events - .send_stream_stop_sending(stream_id, application_error_code); - if let (Some(ss), _) = self.obtain_stream(stream_id)? { - ss.reset(application_error_code); + .send_stream_stop_sending(*stream_id, *application_error_code); + if let (Some(ss), _) = self.obtain_stream(*stream_id)? { + ss.reset(*application_error_code); } } Frame::Stream { @@ -149,13 +152,13 @@ impl Streams { .. } => { stats.stream += 1; - if let (_, Some(rs)) = self.obtain_stream(stream_id)? { - rs.inbound_stream_frame(fin, offset, data)?; + if let (_, Some(rs)) = self.obtain_stream(*stream_id)? { + rs.inbound_stream_frame(*fin, *offset, data)?; } } Frame::MaxData { maximum_data } => { stats.max_data += 1; - self.handle_max_data(maximum_data); + self.handle_max_data(*maximum_data); } Frame::MaxStreamData { stream_id, @@ -163,12 +166,12 @@ impl Streams { } => { qtrace!( "Stream {} Received MaxStreamData {}", - stream_id, - maximum_stream_data + *stream_id, + *maximum_stream_data ); stats.max_stream_data += 1; - if let (Some(ss), _) = self.obtain_stream(stream_id)? { - ss.set_max_stream_data(maximum_stream_data); + if let (Some(ss), _) = self.obtain_stream(*stream_id)? { + ss.set_max_stream_data(*maximum_stream_data); } } Frame::MaxStreams { @@ -176,7 +179,7 @@ impl Streams { maximum_streams, } => { stats.max_streams += 1; - self.handle_max_streams(stream_type, maximum_streams); + self.handle_max_streams(*stream_type, *maximum_streams); } Frame::DataBlocked { data_limit } => { // Should never happen since we set data limit to max @@ -193,7 +196,7 @@ impl Streams { return Err(Error::StreamStateError); } - if let (_, Some(rs)) = self.obtain_stream(stream_id)? { + if let (_, Some(rs)) = self.obtain_stream(*stream_id)? { rs.send_flowc_update(); } } @@ -401,6 +404,8 @@ impl Streams { /// Get or make a stream, and implicitly open additional streams as /// indicated by its stream id. + /// # Errors + /// When the stream cannot be created due to stream limits. pub fn obtain_stream( &mut self, stream_id: StreamId, @@ -412,14 +417,20 @@ impl Streams { )) } + /// # Errors + /// When the stream does not exist. pub fn set_sendorder(&mut self, stream_id: StreamId, sendorder: Option) -> Res<()> { self.send.set_sendorder(stream_id, sendorder) } + /// # Errors + /// When the stream does not exist. pub fn set_fairness(&mut self, stream_id: StreamId, fairness: bool) -> Res<()> { self.send.set_fairness(stream_id, fairness) } + /// # Errors + /// When a stream cannot be created, which might be temporary. pub fn stream_create(&mut self, st: StreamType) -> Res { match self.local_stream_limits.take_stream_id(st) { None => Err(Error::StreamLimitError), @@ -525,18 +536,26 @@ impl Streams { } } + /// # Errors + /// When the stream does not exist. pub fn get_send_stream_mut(&mut self, stream_id: StreamId) -> Res<&mut SendStream> { self.send.get_mut(stream_id) } + /// # Errors + /// When the stream does not exist. pub fn get_send_stream(&self, stream_id: StreamId) -> Res<&SendStream> { self.send.get(stream_id) } + /// # Errors + /// When the stream does not exist. pub fn get_recv_stream_mut(&mut self, stream_id: StreamId) -> Res<&mut RecvStream> { self.recv.get_mut(stream_id) } + /// # Errors + /// When the stream does not exist. pub fn keep_alive(&mut self, stream_id: StreamId, keep: bool) -> Res<()> { self.recv.keep_alive(stream_id, keep) } diff --git a/neqo-transport/src/tparams.rs b/neqo-transport/src/tparams.rs index 638e3adf89..509c96a65b 100644 --- a/neqo-transport/src/tparams.rs +++ b/neqo-transport/src/tparams.rs @@ -88,6 +88,8 @@ impl PreferredAddress { } /// A generic version of `new()` for testing. + /// # Panics + /// When the addresses are the wrong type. #[must_use] #[cfg(test)] pub fn new_any(v4: Option, v6: Option) -> Self { @@ -231,7 +233,7 @@ impl TransportParameter { if v == 0 { Err(Error::TransportParameterError) } else { - Ok(v as WireVersion) + Ok(WireVersion::try_from(v)?) } } @@ -353,6 +355,9 @@ impl TransportParameters { } // Get an integer type or a default. + /// # Panics + /// When the transport parameter isn't recognized as being an integer. + #[must_use] pub fn get_integer(&self, tp: TransportParameterId) -> u64 { let default = match tp { IDLE_TIMEOUT @@ -378,6 +383,8 @@ impl TransportParameters { } // Set an integer type or a default. + /// # Panics + /// When the transport parameter isn't recognized as being an integer. pub fn set_integer(&mut self, tp: TransportParameterId, value: u64) { match tp { IDLE_TIMEOUT @@ -399,6 +406,9 @@ impl TransportParameters { } } + /// # Panics + /// When the transport parameter isn't recognized as containing bytes. + #[must_use] pub fn get_bytes(&self, tp: TransportParameterId) -> Option<&[u8]> { match tp { ORIGINAL_DESTINATION_CONNECTION_ID @@ -415,6 +425,8 @@ impl TransportParameters { } } + /// # Panics + /// When the transport parameter isn't recognized as containing bytes. pub fn set_bytes(&mut self, tp: TransportParameterId, value: Vec) { match tp { ORIGINAL_DESTINATION_CONNECTION_ID @@ -427,6 +439,8 @@ impl TransportParameters { } } + /// # Panics + /// When the transport parameter isn't recognized as being empty. pub fn set_empty(&mut self, tp: TransportParameterId) { match tp { DISABLE_MIGRATION | GREASE_QUIC_BIT => { @@ -437,11 +451,14 @@ impl TransportParameters { } /// Set version information. + /// # Panics + /// Never. But rust doesn't know that. pub fn set_versions(&mut self, role: Role, versions: &VersionConfig) { let rbuf = random::<4>(); let mut other = Vec::with_capacity(versions.all().len() + 1); let mut dec = Decoder::new(&rbuf); - let grease = (dec.decode_uint(4).unwrap() as u32) & 0xf0f0_f0f0 | 0x0a0a_0a0a; + let grease = + (u32::try_from(dec.decode_uint(4).unwrap()).unwrap()) & 0xf0f0_f0f0 | 0x0a0a_0a0a; other.push(grease); for &v in versions.all() { if role == Role::Client && !versions.initial().is_compatible(v) { @@ -467,6 +484,10 @@ impl TransportParameters { } } + /// # Panics + /// When the indicated transport parameter is present but NOT empty. + /// This should not happen if the parsing code in `TransportParameter::decode` is correct. + #[must_use] pub fn get_empty(&self, tipe: TransportParameterId) -> bool { match self.params.get(&tipe) { None => false, @@ -568,6 +589,7 @@ pub struct TransportParametersHandler { } impl TransportParametersHandler { + #[must_use] pub fn new(role: Role, versions: VersionConfig) -> Self { let mut local = TransportParameters::default(); local.set_versions(role, &versions); @@ -588,6 +610,10 @@ impl TransportParametersHandler { self.local.set_versions(self.role, &self.versions); } + /// # Panics + /// When this function is called before the peer has provided transport parameters. + /// Do not call this function if you are not also able to send data. + #[must_use] pub fn remote(&self) -> &TransportParameters { match (self.remote.as_ref(), self.remote_0rtt.as_ref()) { (Some(tp), _) | (_, Some(tp)) => tp, @@ -596,6 +622,7 @@ impl TransportParametersHandler { } /// Get the version as set (or as determined by a compatible upgrade). + #[must_use] pub fn version(&self) -> Version { self.versions.initial() } @@ -843,7 +870,7 @@ mod tests { /// This takes a `TransportParameter::PreferredAddress` that has been mutilated. /// It then encodes it, working from the knowledge that the `encode` function /// doesn't care about validity, and decodes it. The result should be failure. - fn assert_invalid_spa(spa: TransportParameter) { + fn assert_invalid_spa(spa: &TransportParameter) { let mut enc = Encoder::new(); spa.encode(&mut enc, PREFERRED_ADDRESS); assert_eq!( @@ -853,40 +880,40 @@ mod tests { } /// This is for those rare mutations that are acceptable. - fn assert_valid_spa(spa: TransportParameter) { + fn assert_valid_spa(spa: &TransportParameter) { let mut enc = Encoder::new(); spa.encode(&mut enc, PREFERRED_ADDRESS); let mut dec = enc.as_decoder(); let (id, decoded) = TransportParameter::decode(&mut dec).unwrap().unwrap(); assert_eq!(id, PREFERRED_ADDRESS); - assert_eq!(decoded, spa); + assert_eq!(&decoded, spa); } #[test] fn preferred_address_zero_address() { // Either port being zero is bad. - assert_invalid_spa(mutate_spa(|v4, _, _| { + assert_invalid_spa(&mutate_spa(|v4, _, _| { v4.as_mut().unwrap().set_port(0); })); - assert_invalid_spa(mutate_spa(|_, v6, _| { + assert_invalid_spa(&mutate_spa(|_, v6, _| { v6.as_mut().unwrap().set_port(0); })); // Either IP being zero is bad. - assert_invalid_spa(mutate_spa(|v4, _, _| { + assert_invalid_spa(&mutate_spa(|v4, _, _| { v4.as_mut().unwrap().set_ip(Ipv4Addr::from(0)); })); - assert_invalid_spa(mutate_spa(|_, v6, _| { + assert_invalid_spa(&mutate_spa(|_, v6, _| { v6.as_mut().unwrap().set_ip(Ipv6Addr::from(0)); })); // Either address being absent is OK. - assert_valid_spa(mutate_spa(|v4, _, _| { + assert_valid_spa(&mutate_spa(|v4, _, _| { *v4 = None; })); - assert_valid_spa(mutate_spa(|_, v6, _| { + assert_valid_spa(&mutate_spa(|_, v6, _| { *v6 = None; })); // Both addresses being absent is bad. - assert_invalid_spa(mutate_spa(|v4, v6, _| { + assert_invalid_spa(&mutate_spa(|v4, v6, _| { *v4 = None; *v6 = None; })); @@ -894,10 +921,10 @@ mod tests { #[test] fn preferred_address_bad_cid() { - assert_invalid_spa(mutate_spa(|_, _, cid| { + assert_invalid_spa(&mutate_spa(|_, _, cid| { *cid = ConnectionId::from(&[]); })); - assert_invalid_spa(mutate_spa(|_, _, cid| { + assert_invalid_spa(&mutate_spa(|_, _, cid| { *cid = ConnectionId::from(&[0x0c; 21]); })); } @@ -975,7 +1002,6 @@ mod tests { #[test] fn compatible_0rtt_integers() { - let mut tps_a = TransportParameters::default(); const INTEGER_KEYS: &[TransportParameterId] = &[ INITIAL_MAX_DATA, INITIAL_MAX_STREAM_DATA_BIDI_LOCAL, @@ -987,6 +1013,8 @@ mod tests { MIN_ACK_DELAY, MAX_DATAGRAM_FRAME_SIZE, ]; + + let mut tps_a = TransportParameters::default(); for i in INTEGER_KEYS { tps_a.set(*i, TransportParameter::Integer(12)); } diff --git a/neqo-transport/src/version.rs b/neqo-transport/src/version.rs index 13db0bf024..16d394532d 100644 --- a/neqo-transport/src/version.rs +++ b/neqo-transport/src/version.rs @@ -23,6 +23,7 @@ pub enum Version { } impl Version { + #[must_use] pub const fn wire_version(self) -> WireVersion { match self { Self::Version2 => 0x6b33_43cf, @@ -94,6 +95,7 @@ impl Version { } /// Determine if `self` can be upgraded to `other` compatibly. + #[must_use] pub fn is_compatible(self, other: Self) -> bool { self == other || matches!( @@ -102,6 +104,7 @@ impl Version { ) } + #[must_use] pub fn all() -> Vec { vec![ Self::Version2, @@ -176,15 +179,20 @@ pub struct VersionConfig { } impl VersionConfig { + /// # Panics + /// When `all` does not include `initial`. + #[must_use] pub fn new(initial: Version, all: Vec) -> Self { assert!(all.contains(&initial)); Self { initial, all } } + #[must_use] pub fn initial(&self) -> Version { self.initial } + #[must_use] pub fn all(&self) -> &[Version] { &self.all } diff --git a/neqo-transport/tests/common/mod.rs b/neqo-transport/tests/common/mod.rs index a43f91e3fe..2349092d9e 100644 --- a/neqo-transport/tests/common/mod.rs +++ b/neqo-transport/tests/common/mod.rs @@ -4,7 +4,6 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -#![cfg_attr(feature = "deny-warnings", deny(warnings))] #![warn(clippy::pedantic)] #![allow(unused)] diff --git a/neqo-transport/tests/connection.rs b/neqo-transport/tests/connection.rs index 4cbf57f405..6f8aa393af 100644 --- a/neqo-transport/tests/connection.rs +++ b/neqo-transport/tests/connection.rs @@ -4,8 +4,7 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -#![cfg_attr(feature = "deny-warnings", deny(warnings))] -#![warn(clippy::use_self)] +#![warn(clippy::pedantic)] mod common; @@ -133,6 +132,7 @@ fn reorder_server_initial() { } /// Overflow the crypto buffer. +#[allow(clippy::similar_names)] // For ..._scid and ..._dcid, which are fine. #[test] fn overflow_crypto() { let mut client = new_client( diff --git a/neqo-transport/tests/network.rs b/neqo-transport/tests/network.rs index d7a537159b..a0a404a89a 100644 --- a/neqo-transport/tests/network.rs +++ b/neqo-transport/tests/network.rs @@ -4,7 +4,6 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -#![cfg_attr(feature = "deny-warnings", deny(warnings))] #![warn(clippy::pedantic)] use std::{ops::Range, time::Duration}; diff --git a/neqo-transport/tests/retry.rs b/neqo-transport/tests/retry.rs index 93759c7df9..7245337aa1 100644 --- a/neqo-transport/tests/retry.rs +++ b/neqo-transport/tests/retry.rs @@ -4,7 +4,6 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -#![cfg_attr(feature = "deny-warnings", deny(warnings))] #![warn(clippy::pedantic)] #![cfg(not(feature = "fuzzing"))] diff --git a/neqo-transport/tests/server.rs b/neqo-transport/tests/server.rs index d6c9c2df95..1858d47128 100644 --- a/neqo-transport/tests/server.rs +++ b/neqo-transport/tests/server.rs @@ -4,7 +4,6 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -#![cfg_attr(feature = "deny-warnings", deny(warnings))] #![warn(clippy::pedantic)] mod common; diff --git a/test-fixture/Cargo.toml b/test-fixture/Cargo.toml index 18bdb114be..538bd7c094 100644 --- a/test-fixture/Cargo.toml +++ b/test-fixture/Cargo.toml @@ -18,5 +18,3 @@ neqo-qpack = { path = "../neqo-qpack" } neqo-transport = { path = "../neqo-transport" } qlog = "0.12" -[features] -deny-warnings = [] diff --git a/test-fixture/src/lib.rs b/test-fixture/src/lib.rs index 923aa76c15..e676366e32 100644 --- a/test-fixture/src/lib.rs +++ b/test-fixture/src/lib.rs @@ -4,13 +4,14 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -#![cfg_attr(feature = "deny-warnings", deny(warnings))] #![warn(clippy::pedantic)] +#![allow(clippy::module_name_repetitions)] // This lint doesn't work here. use std::{ cell::{OnceCell, RefCell}, cmp::max, convert::TryFrom, + fmt::Display, io::{Cursor, Result, Write}, mem, net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}, @@ -55,7 +56,7 @@ pub const ANTI_REPLAY_WINDOW: Duration = Duration::from_millis(10); fn earlier() -> Instant { // Note: It is only OK to have a different base time for each thread because our tests are // single-threaded. - thread_local!(static EARLIER: OnceCell = OnceCell::new()); + thread_local!(static EARLIER: OnceCell = const { OnceCell::new() }); fixture_init(); EARLIER.with(|b| *b.get_or_init(Instant::now)) } @@ -363,7 +364,7 @@ pub fn split_datagram(d: &Datagram) -> (Datagram, Option) { ) } -#[derive(Clone)] +#[derive(Clone, Default)] pub struct SharedVec { buf: Arc>>>, } @@ -377,9 +378,9 @@ impl Write for SharedVec { } } -impl ToString for SharedVec { - fn to_string(&self) -> String { - String::from_utf8(self.buf.lock().unwrap().clone().into_inner()).unwrap() +impl Display for SharedVec { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.write_str(&String::from_utf8(self.buf.lock().unwrap().clone().into_inner()).unwrap()) } } @@ -395,9 +396,7 @@ pub fn new_neqo_qlog() -> (NeqoQlog, SharedVec) { let mut trace = new_trace(Role::Client); // Set reference time to 0.0 for testing. trace.common_fields.as_mut().unwrap().reference_time = Some(0.0); - let buf = SharedVec { - buf: Arc::default(), - }; + let buf = SharedVec::default(); let contents = buf.clone(); let streamer = QlogStreamer::new( qlog::QLOG_VERSION.to_string(), From 0018873ed2b0b3e6946b1e66e6e3d3aa50b3f9b3 Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Wed, 14 Feb 2024 12:00:12 +0200 Subject: [PATCH 168/321] ci: Benchmark without a qlog (#1648) * ci: Benchmark without a qlog To remove those overheads from the profile. * Tweaks to workflow * Try and use cargo flamegraph * LD_LIBRARY_PATH * Use explicit perf command * Export more perf data * Hack * Finalize * Remove hack, define `bench` feature in `Cargo.toml` * Don't run tests with feature `bench` * See if a separate artifact can be loaded into profiler.firefox.com * Done * Benchmark a proper transfer * Do more * Build before prep * Retry * Set server db. Unprep on workflow cancel. * Again * Again * pid * bg * true * killall * Re-enable all * Properly cache results * Finalize --------- Signed-off-by: Lars Eggert --- .github/workflows/bench.yml | 117 ++++++++++++++++++++++++++++-------- .github/workflows/check.yml | 10 +-- test-fixture/Cargo.toml | 2 + test-fixture/src/lib.rs | 7 ++- 4 files changed, 105 insertions(+), 31 deletions(-) diff --git a/.github/workflows/bench.yml b/.github/workflows/bench.yml index a29abd39a4..a1ea61cf1e 100644 --- a/.github/workflows/bench.yml +++ b/.github/workflows/bench.yml @@ -3,6 +3,7 @@ on: workflow_call: env: CARGO_PROFILE_BENCH_BUILD_OVERRIDE_DEBUG: true + CARGO_PROFILE_RELEASE_DEBUG: true CARGO_TERM_COLOR: always RUST_BACKTRACE: 1 TOOLCHAIN: nightly @@ -22,53 +23,93 @@ jobs: toolchain: $TOOLCHAIN components: rustfmt - - name: Configure Rust - run: echo "RUSTFLAGS=-C link-arg=-fuse-ld=lld -C link-arg=-Wl,--no-rosegment" >> "$GITHUB_ENV" - - name: Install sccache uses: mozilla-actions/sccache-action@v0.0.4 - - name: Enable sccache + - name: Configure Rust run: | + echo "RUSTFLAGS=-C link-arg=-fuse-ld=lld -C link-arg=-Wl,--no-rosegment" >> "$GITHUB_ENV" echo "SCCACHE_GHA_ENABLED=true" >> "$GITHUB_ENV" echo "RUSTC_WRAPPER=sccache" >> "$GITHUB_ENV" + cargo install flamegraph - name: Checkout uses: actions/checkout@v4 + - name: Fetch NSS and NSPR + run: | + hg clone https://hg.mozilla.org/projects/nspr "$NSPR_DIR" + hg clone https://hg.mozilla.org/projects/nss "$NSS_DIR" + echo "NSS_DIR=$NSS_DIR" >> "$GITHUB_ENV" + echo "NSPR_DIR=$NSPR_DIR" >> "$GITHUB_ENV" + env: + NSS_DIR: ${{ github.workspace }}/nss + NSPR_DIR: ${{ github.workspace }}/nspr + - name: Build - run: cargo +$TOOLCHAIN bench --features ci,bench --no-run + run: | + cargo +$TOOLCHAIN bench --features bench --no-run + cargo +$TOOLCHAIN build --release --bin neqo-client --bin neqo-server + echo "LD_LIBRARY_PATH=${{ github.workspace }}/dist/Debug/lib" >> "$GITHUB_ENV" + + - name: Download criterion results + uses: actions/cache/restore@v4 + with: + path: ./target/criterion + key: neqo-${{ runner.os }}-main-criterion + + - name: Download github-action-benchmark results + uses: actions/cache/restore@v4 + with: + path: ./cache + key: neqo-${{ runner.os }}-main-github-action-benchmark # Disable turboboost, hyperthreading and use performance governor. - name: Prepare machine - run: sudo /root/bin/prep.sh + run: | + sudo /root/bin/prep.sh + echo "PERF_CMD=record -o perf.data -F997 --call-graph dwarf,16384 -g" >> "$GITHUB_ENV" # Pin the benchmark run to core 0 and run all benchmarks at elevated priority. - - name: Benchmark + - name: Run cargo bench run: | - nice -n -20 taskset -c 0 \ - cargo +$TOOLCHAIN bench --features ci,bench | tee output.txt + taskset -c 0 nice -n -20 \ + cargo +$TOOLCHAIN bench --features bench | tee output.txt # Pin the transfer benchmark to core 0 and run it at elevated priority inside perf. - - name: Perf transfer benchmark + # Work around https://github.com/flamegraph-rs/flamegraph/issues/248 by passing explicit perf arguments. + - name: Profile cargo bench transfer + run: | + taskset -c 0 nice -n -20 \ + cargo +$TOOLCHAIN flamegraph -v -c "$PERF_CMD" --features bench --bench transfer -- \ + --bench --exact "Run multiple transfers with varying seeds" + + - name: Profile client/server transfer run: | - nice -n -20 taskset -c 0 \ - perf record -F997 --call-graph=lbr -o perf.data \ - cargo +$TOOLCHAIN bench --features ci,bench --bench transfer + { mkdir server; \ + cd server; \ + taskset -c 0 nice -n -20 \ + cargo +$TOOLCHAIN flamegraph -v -c "$PERF_CMD" \ + --bin neqo-server -- --db ../test-fixture/db $HOST:4433 || true; } & + mkdir client; \ + cd client; \ + time taskset -c 1 nice -n -20 \ + cargo +$TOOLCHAIN flamegraph -v -c "$PERF_CMD" \ + --bin neqo-client -- --output-dir . https://$HOST:4433/$SIZE + killall -INT neqo-server + cd ${{ github.workspace }} + [ "$(wc -c < client/"$SIZE")" -eq "$SIZE" ] || exit 1 + env: + HOST: localhost + SIZE: 1073741824 # 1 GB # Re-enable turboboost, hyperthreading and use powersave governor. - name: Restore machine run: sudo /root/bin/unprep.sh - if: success() || failure() - - - name: Download previous benchmark results - uses: actions/cache@v4 - with: - path: ./cache - key: ${{ runner.os }}-benchmark + if: success() || failure() || cancelled() # TODO: Wait for this action to be allowlisted. And then figure out how to only upload - # benchmark data when the main branch is being updated. + # benchmark data when the main branch is being updated (e.g., if: ${{ github.ref == "refs/heads/main" }}) # - name: Store current benchmark results # uses: benchmark-action/github-action-benchmark@v1 # with: @@ -80,14 +121,38 @@ jobs: # comment-on-alert: true # summary-always: true - - name: Convert perf data + - name: Convert for profiler.firefox.com run: | - perf script -i perf.data -F +pid | zstd > perf.ff.data.zst - zstd perf.data + perf script -i perf.data -F +pid > transfer.perf & + perf script -i client/perf.data -F +pid > client.perf & + perf script -i server/perf.data -F +pid > server.perf & + wait + mv flamegraph.svg transfer.svg + mv client/flamegraph.svg client.svg + mv server/flamegraph.svg server.svg + rm neqo.svg + + - name: Upload criterion results + if: github.ref == 'refs/heads/main' + uses: actions/cache/save@v4 + with: + path: ./target/criterion + key: neqo-${{ runner.os }}-main-criterion + + - name: Upload github-action-benchmark results + if: github.ref == 'refs/heads/main' + uses: actions/cache/save@v4 + with: + path: ./cache + key: neqo-${{ runner.os }}-main-github-action-benchmark - name: Archive perf data uses: actions/upload-artifact@v4 with: name: ${{ github.head_ref || github.ref_name }}-perf - path: "*.zst" - compression-level: 0 + path: | + *.svg + *.perf + output.txt + target/criterion + compression-level: 9 diff --git a/.github/workflows/check.yml b/.github/workflows/check.yml index 138d7482d0..dec4e9897d 100644 --- a/.github/workflows/check.yml +++ b/.github/workflows/check.yml @@ -135,14 +135,16 @@ jobs: LIB_DIR: ${{ matrix.type == 'release' && 'Release' || 'Debug' }} - name: Run tests and determine coverage - run: cargo +${{ matrix.rust-toolchain }} llvm-cov nextest $BUILD_TYPE --all-targets --features ci,bench --no-fail-fast --lcov --output-path lcov.info + run: | + cargo +${{ matrix.rust-toolchain }} llvm-cov nextest $BUILD_TYPE --all-targets --features ci --no-fail-fast --lcov --output-path lcov.info + cargo +${{ matrix.rust-toolchain }} bench --features bench --no-run - name: Run client/server transfer run: | - cargo +${{ matrix.rust-toolchain }} build $BUILD_TYPE --features ci,bench --bin neqo-client --bin neqo-server - cargo +${{ matrix.rust-toolchain }} run $BUILD_TYPE --features ci,bench --bin neqo-server -- $HOST:4433 & + cargo +${{ matrix.rust-toolchain }} build $BUILD_TYPE --bin neqo-client --bin neqo-server + cargo +${{ matrix.rust-toolchain }} run $BUILD_TYPE --bin neqo-server -- $HOST:4433 & PID=$! - cargo +${{ matrix.rust-toolchain }} run $BUILD_TYPE --features ci,bench --bin neqo-client -- --output-dir . https://$HOST:4433/$SIZE + cargo +${{ matrix.rust-toolchain }} run $BUILD_TYPE --bin neqo-client -- --output-dir . https://$HOST:4433/$SIZE kill $PID [ "$(wc -c <"$SIZE")" -eq "$SIZE" ] || exit 1 env: diff --git a/test-fixture/Cargo.toml b/test-fixture/Cargo.toml index 538bd7c094..bb3ab14953 100644 --- a/test-fixture/Cargo.toml +++ b/test-fixture/Cargo.toml @@ -18,3 +18,5 @@ neqo-qpack = { path = "../neqo-qpack" } neqo-transport = { path = "../neqo-transport" } qlog = "0.12" +[features] +bench = [] diff --git a/test-fixture/src/lib.rs b/test-fixture/src/lib.rs index e676366e32..aa0b3ea371 100644 --- a/test-fixture/src/lib.rs +++ b/test-fixture/src/lib.rs @@ -393,10 +393,15 @@ impl Display for SharedVec { /// Panics if the log cannot be created. #[must_use] pub fn new_neqo_qlog() -> (NeqoQlog, SharedVec) { + let buf = SharedVec::default(); + + if cfg!(feature = "bench") { + return (NeqoQlog::disabled(), buf); + } + let mut trace = new_trace(Role::Client); // Set reference time to 0.0 for testing. trace.common_fields.as_mut().unwrap().reference_time = Some(0.0); - let buf = SharedVec::default(); let contents = buf.clone(); let streamer = QlogStreamer::new( qlog::QLOG_VERSION.to_string(), From d55db5e6eda74f98f6165591965b5631fb224a5f Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Wed, 14 Feb 2024 12:00:48 +0200 Subject: [PATCH 169/321] chore: Declutter, `clippy.toml` -> `.clippy.toml` (#1654) --- clippy.toml => .clippy.toml | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename clippy.toml => .clippy.toml (100%) diff --git a/clippy.toml b/.clippy.toml similarity index 100% rename from clippy.toml rename to .clippy.toml From 6145d04649ce4885eb4b7eaaec189ea014bceb53 Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Thu, 15 Feb 2024 12:20:20 +0200 Subject: [PATCH 170/321] ci: Use `lbr` to generate a perf call graph (#1655) For @jesup, to see if this is better than `dwarf` --- .github/workflows/bench.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/bench.yml b/.github/workflows/bench.yml index a1ea61cf1e..4e4bd2da96 100644 --- a/.github/workflows/bench.yml +++ b/.github/workflows/bench.yml @@ -68,7 +68,7 @@ jobs: - name: Prepare machine run: | sudo /root/bin/prep.sh - echo "PERF_CMD=record -o perf.data -F997 --call-graph dwarf,16384 -g" >> "$GITHUB_ENV" + echo "PERF_CMD=record -o perf.data -F997 --call-graph lbr -g" >> "$GITHUB_ENV" # Pin the benchmark run to core 0 and run all benchmarks at elevated priority. - name: Run cargo bench From 484730eea70e41e5b63396df3fe212e66789adfa Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Thu, 15 Feb 2024 14:05:45 +0200 Subject: [PATCH 171/321] ci: Fix cache updates (#1659) * ci: Fix cache updates * Remove IDs --- .github/workflows/bench.yml | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/.github/workflows/bench.yml b/.github/workflows/bench.yml index 4e4bd2da96..866b4ff62f 100644 --- a/.github/workflows/bench.yml +++ b/.github/workflows/bench.yml @@ -56,13 +56,15 @@ jobs: uses: actions/cache/restore@v4 with: path: ./target/criterion - key: neqo-${{ runner.os }}-main-criterion + key: neqo-${{ runner.os }}-${{ github.run_id}} + restore-keys: neqo-${{ runner.os }}-main-criterion - name: Download github-action-benchmark results uses: actions/cache/restore@v4 with: path: ./cache - key: neqo-${{ runner.os }}-main-github-action-benchmark + key: neqo-${{ runner.os}}-${{ github.run_id}} + restore-keys: neqo-${{ runner.os }}-main-action-benchmark # Disable turboboost, hyperthreading and use performance governor. - name: Prepare machine @@ -144,7 +146,7 @@ jobs: uses: actions/cache/save@v4 with: path: ./cache - key: neqo-${{ runner.os }}-main-github-action-benchmark + key: neqo-${{ runner.os }}-main-action-benchmark - name: Archive perf data uses: actions/upload-artifact@v4 From 6c3a719f88a74451c14326b7cb1de2240a2febd3 Mon Sep 17 00:00:00 2001 From: Martin Thomson Date: Thu, 15 Feb 2024 23:26:58 +1100 Subject: [PATCH 172/321] Track sent packets in a Vec --- neqo-transport/src/cc/classic_cc.rs | 4 +- neqo-transport/src/cc/mod.rs | 2 +- neqo-transport/src/cc/tests/cubic.rs | 2 +- neqo-transport/src/cc/tests/new_reno.rs | 2 +- neqo-transport/src/connection/mod.rs | 8 +- neqo-transport/src/path.rs | 4 +- neqo-transport/src/qlog.rs | 2 +- .../src/{recovery.rs => recovery/mod.rs} | 180 +++------ neqo-transport/src/recovery/sent.rs | 361 ++++++++++++++++++ neqo-transport/src/recovery/token.rs | 63 +++ neqo-transport/src/sender.rs | 2 +- neqo-transport/src/tracking.rs | 108 ------ 12 files changed, 484 insertions(+), 254 deletions(-) rename neqo-transport/src/{recovery.rs => recovery/mod.rs} (91%) create mode 100644 neqo-transport/src/recovery/sent.rs create mode 100644 neqo-transport/src/recovery/token.rs diff --git a/neqo-transport/src/cc/classic_cc.rs b/neqo-transport/src/cc/classic_cc.rs index 6f4a01d795..baf98cca46 100644 --- a/neqo-transport/src/cc/classic_cc.rs +++ b/neqo-transport/src/cc/classic_cc.rs @@ -18,9 +18,9 @@ use crate::{ cc::MAX_DATAGRAM_SIZE, packet::PacketNumber, qlog::{self, QlogMetric}, + recovery::SentPacket, rtt::RttEstimate, sender::PACING_BURST_SIZE, - tracking::SentPacket, }; #[rustfmt::skip] // to keep `::` and thus prevent conflict with `crate::qlog` use ::qlog::events::{quic::CongestionStateUpdated, EventData}; @@ -555,8 +555,8 @@ mod tests { CongestionControl, CongestionControlAlgorithm, CWND_INITIAL_PKTS, MAX_DATAGRAM_SIZE, }, packet::{PacketNumber, PacketType}, + recovery::SentPacket, rtt::RttEstimate, - tracking::SentPacket, }; const PTO: Duration = Duration::from_millis(100); diff --git a/neqo-transport/src/cc/mod.rs b/neqo-transport/src/cc/mod.rs index a1a43bd157..9d3989b6bc 100644 --- a/neqo-transport/src/cc/mod.rs +++ b/neqo-transport/src/cc/mod.rs @@ -15,7 +15,7 @@ use std::{ use neqo_common::qlog::NeqoQlog; -use crate::{path::PATH_MTU_V6, rtt::RttEstimate, tracking::SentPacket, Error}; +use crate::{path::PATH_MTU_V6, recovery::SentPacket, rtt::RttEstimate, Error}; mod classic_cc; mod cubic; diff --git a/neqo-transport/src/cc/tests/cubic.rs b/neqo-transport/src/cc/tests/cubic.rs index 0c82e47817..1861cdff46 100644 --- a/neqo-transport/src/cc/tests/cubic.rs +++ b/neqo-transport/src/cc/tests/cubic.rs @@ -25,8 +25,8 @@ use crate::{ CongestionControl, MAX_DATAGRAM_SIZE, MAX_DATAGRAM_SIZE_F64, }, packet::PacketType, + recovery::SentPacket, rtt::RttEstimate, - tracking::SentPacket, }; const RTT: Duration = Duration::from_millis(100); diff --git a/neqo-transport/src/cc/tests/new_reno.rs b/neqo-transport/src/cc/tests/new_reno.rs index a73844a755..0736f398fb 100644 --- a/neqo-transport/src/cc/tests/new_reno.rs +++ b/neqo-transport/src/cc/tests/new_reno.rs @@ -17,8 +17,8 @@ use crate::{ MAX_DATAGRAM_SIZE, }, packet::PacketType, + recovery::SentPacket, rtt::RttEstimate, - tracking::SentPacket, }; const PTO: Duration = Duration::from_millis(100); diff --git a/neqo-transport/src/connection/mod.rs b/neqo-transport/src/connection/mod.rs index 749cf315d3..1e53447007 100644 --- a/neqo-transport/src/connection/mod.rs +++ b/neqo-transport/src/connection/mod.rs @@ -45,7 +45,7 @@ use crate::{ path::{Path, PathRef, Paths}, qlog, quic_datagrams::{DatagramTracking, QuicDatagrams}, - recovery::{LossRecovery, RecoveryToken, SendProfile}, + recovery::{LossRecovery, RecoveryToken, SendProfile, SentPacket}, recv_stream::RecvStreamStats, rtt::GRANULARITY, send_stream::SendStream, @@ -56,7 +56,7 @@ use crate::{ self, TransportParameter, TransportParameterId, TransportParameters, TransportParametersHandler, }, - tracking::{AckTracker, PacketNumberSpace, SentPacket}, + tracking::{AckTracker, PacketNumberSpace}, version::{Version, WireVersion}, AppError, ConnectionError, Error, Res, StreamId, }; @@ -2884,12 +2884,12 @@ impl Connection { fn handle_ack( &mut self, space: PacketNumberSpace, - largest_acknowledged: u64, + largest_acknowledged: PacketNumber, ack_ranges: R, ack_delay: u64, now: Instant, ) where - R: IntoIterator> + Debug, + R: IntoIterator> + Debug, R::IntoIter: ExactSizeIterator, { qinfo!([self], "Rx ACK space={}, ranges={:?}", space, ack_ranges); diff --git a/neqo-transport/src/path.rs b/neqo-transport/src/path.rs index 897763d7de..782fe896c0 100644 --- a/neqo-transport/src/path.rs +++ b/neqo-transport/src/path.rs @@ -26,11 +26,11 @@ use crate::{ cid::{ConnectionId, ConnectionIdRef, ConnectionIdStore, RemoteConnectionIdEntry}, frame::{FRAME_TYPE_PATH_CHALLENGE, FRAME_TYPE_PATH_RESPONSE, FRAME_TYPE_RETIRE_CONNECTION_ID}, packet::PacketBuilder, - recovery::RecoveryToken, + recovery::{RecoveryToken, SentPacket}, rtt::RttEstimate, sender::PacketSender, stats::FrameStats, - tracking::{PacketNumberSpace, SentPacket}, + tracking::PacketNumberSpace, Stats, }; diff --git a/neqo-transport/src/qlog.rs b/neqo-transport/src/qlog.rs index f6d3f4e1e2..b59df0e37c 100644 --- a/neqo-transport/src/qlog.rs +++ b/neqo-transport/src/qlog.rs @@ -29,9 +29,9 @@ use crate::{ frame::{CloseError, Frame}, packet::{DecryptedPacket, PacketNumber, PacketType, PublicPacket}, path::PathRef, + recovery::SentPacket, stream_id::StreamType as NeqoStreamType, tparams::{self, TransportParametersHandler}, - tracking::SentPacket, version::{Version, VersionConfig, WireVersion}, }; diff --git a/neqo-transport/src/recovery.rs b/neqo-transport/src/recovery/mod.rs similarity index 91% rename from neqo-transport/src/recovery.rs rename to neqo-transport/src/recovery/mod.rs index ec1d7e4a3d..7f362e4104 100644 --- a/neqo-transport/src/recovery.rs +++ b/neqo-transport/src/recovery/mod.rs @@ -6,33 +6,29 @@ // Tracking of sent packets and detecting their loss. -#![deny(clippy::pedantic)] +mod sent; +mod token; use std::{ cmp::{max, min}, - collections::BTreeMap, convert::TryFrom, - mem, ops::RangeInclusive, time::{Duration, Instant}, }; use neqo_common::{qdebug, qinfo, qlog::NeqoQlog, qtrace, qwarn}; +pub use sent::SentPacket; +use sent::SentPackets; use smallvec::{smallvec, SmallVec}; +pub use token::{RecoveryToken, StreamRecoveryToken}; use crate::{ - ackrate::AckRate, - cid::ConnectionIdEntry, - crypto::CryptoRecoveryToken, packet::PacketNumber, path::{Path, PathRef}, qlog::{self, QlogMetric}, - quic_datagrams::DatagramTracking, rtt::RttEstimate, - send_stream::SendStreamRecoveryToken, stats::{Stats, StatsCell}, - stream_id::{StreamId, StreamType}, - tracking::{AckToken, PacketNumberSpace, PacketNumberSpaceSet, SentPacket}, + tracking::{PacketNumberSpace, PacketNumberSpaceSet}, }; pub(crate) const PACKET_THRESHOLD: u64 = 3; @@ -51,54 +47,6 @@ pub(crate) const MIN_OUTSTANDING_UNACK: usize = 16; /// The scale we use for the fast PTO feature. pub const FAST_PTO_SCALE: u8 = 100; -#[derive(Debug, Clone)] -#[allow(clippy::module_name_repetitions)] -pub enum StreamRecoveryToken { - Stream(SendStreamRecoveryToken), - ResetStream { - stream_id: StreamId, - }, - StopSending { - stream_id: StreamId, - }, - - MaxData(u64), - DataBlocked(u64), - - MaxStreamData { - stream_id: StreamId, - max_data: u64, - }, - StreamDataBlocked { - stream_id: StreamId, - limit: u64, - }, - - MaxStreams { - stream_type: StreamType, - max_streams: u64, - }, - StreamsBlocked { - stream_type: StreamType, - limit: u64, - }, -} - -#[derive(Debug, Clone)] -#[allow(clippy::module_name_repetitions)] -pub enum RecoveryToken { - Stream(StreamRecoveryToken), - Ack(AckToken), - Crypto(CryptoRecoveryToken), - HandshakeDone, - KeepAlive, // Special PING. - NewToken(usize), - NewConnectionId(ConnectionIdEntry<[u8; 16]>), - RetireConnectionId(u64), - AckFrequency(AckRate), - Datagram(DatagramTracking), -} - /// `SendProfile` tells a sender how to send packets. #[derive(Debug)] pub struct SendProfile { @@ -183,7 +131,8 @@ pub(crate) struct LossRecoverySpace { /// This might be less than the number of ACK-eliciting packets, /// because PTO packets don't count. in_flight_outstanding: usize, - sent_packets: BTreeMap, + /// The packets that we have sent and are tracking. + sent_packets: SentPackets, /// The time that the first out-of-order packet was sent. /// This is `None` if there were no out-of-order packets detected. /// When set to `Some(T)`, time-based loss detection should be enabled. @@ -198,7 +147,7 @@ impl LossRecoverySpace { largest_acked_sent_time: None, last_ack_eliciting: None, in_flight_outstanding: 0, - sent_packets: BTreeMap::default(), + sent_packets: SentPackets::default(), first_ooo_time: None, } } @@ -223,9 +172,9 @@ impl LossRecoverySpace { pub fn pto_packets(&mut self, count: usize) -> impl Iterator { self.sent_packets .iter_mut() - .filter_map(|(pn, sent)| { + .filter_map(|sent| { if sent.pto() { - qtrace!("PTO: marking packet {} lost ", pn); + qtrace!("PTO: marking packet {} lost ", sent.pn); Some(&*sent) } else { None @@ -267,7 +216,7 @@ impl LossRecoverySpace { // always. See `LossRecoverySpace::pto_base_time()` for details. self.last_ack_eliciting = Some(sent_packet.time_sent); } - self.sent_packets.insert(sent_packet.pn, sent_packet); + self.sent_packets.track(sent_packet); } /// If we are only sending ACK frames, send a PING frame after 2 PTOs so that @@ -297,46 +246,31 @@ impl LossRecoverySpace { } } - /// Remove all acknowledged packets. + /// Remove all newly acknowledged packets. /// Returns all the acknowledged packets, with the largest packet number first. /// ...and a boolean indicating if any of those packets were ack-eliciting. /// This operates more efficiently because it assumes that the input is sorted /// in the order that an ACK frame is (from the top). fn remove_acked(&mut self, acked_ranges: R, stats: &mut Stats) -> (Vec, bool) where - R: IntoIterator>, + R: IntoIterator>, R::IntoIter: ExactSizeIterator, { - let acked_ranges = acked_ranges.into_iter(); - let mut keep = Vec::with_capacity(acked_ranges.len()); - - let mut acked = Vec::new(); let mut eliciting = false; + let mut acked = Vec::new(); for range in acked_ranges { - let first_keep = *range.end() + 1; - if let Some((&first, _)) = self.sent_packets.range(range).next() { - let mut tail = self.sent_packets.split_off(&first); - if let Some((&next, _)) = tail.range(first_keep..).next() { - keep.push(tail.split_off(&next)); - } - for (_, p) in tail.into_iter().rev() { - self.remove_packet(&p); - eliciting |= p.ack_eliciting(); - if p.lost() { - stats.late_ack += 1; - } - if p.pto_fired() { - stats.pto_ack += 1; - } - acked.push(p); - } - } + acked.extend(self.sent_packets.take_range(range)); } - - for mut k in keep.into_iter().rev() { - self.sent_packets.append(&mut k); + for p in &acked { + self.remove_packet(p); + eliciting |= p.ack_eliciting(); + if p.lost() { + stats.late_ack += 1; + } + if p.pto_fired() { + stats.pto_ack += 1; + } } - (acked, eliciting) } @@ -345,12 +279,12 @@ impl LossRecoverySpace { /// and when keys are dropped. fn remove_ignored(&mut self) -> impl Iterator { self.in_flight_outstanding = 0; - mem::take(&mut self.sent_packets).into_values() + std::mem::take(&mut self.sent_packets).drain_all() } /// Remove the primary path marking on any packets this is tracking. fn migrate(&mut self) { - for pkt in self.sent_packets.values_mut() { + for pkt in self.sent_packets.iter_mut() { pkt.clear_primary_path(); } } @@ -361,23 +295,8 @@ impl LossRecoverySpace { /// might remove all in-flight packets and stop sending probes. #[allow(clippy::option_if_let_else)] // Hard enough to read as-is. fn remove_old_lost(&mut self, now: Instant, cd: Duration) { - let mut it = self.sent_packets.iter(); - // If the first item is not expired, do nothing. - if it.next().map_or(false, |(_, p)| p.expired(now, cd)) { - // Find the index of the first unexpired packet. - let to_remove = if let Some(first_keep) = - it.find_map(|(i, p)| if p.expired(now, cd) { None } else { Some(*i) }) - { - // Some packets haven't expired, so keep those. - let keep = self.sent_packets.split_off(&first_keep); - mem::replace(&mut self.sent_packets, keep) - } else { - // All packets are expired. - mem::take(&mut self.sent_packets) - }; - for (_, p) in to_remove { - self.remove_packet(&p); - } + for p in self.sent_packets.remove_expired(now, cd) { + self.remove_packet(&p); } } @@ -404,27 +323,24 @@ impl LossRecoverySpace { let largest_acked = self.largest_acked; - // Lost for retrans/CC purposes - let mut lost_pns = SmallVec::<[_; 8]>::new(); - - for (pn, packet) in self + for packet in self .sent_packets .iter_mut() // BTreeMap iterates in order of ascending PN - .take_while(|(&k, _)| k < largest_acked.unwrap_or(PacketNumber::MAX)) + .take_while(|p| p.pn < largest_acked.unwrap_or(PacketNumber::MAX)) { // Packets sent before now - loss_delay are deemed lost. if packet.time_sent + loss_delay <= now { qtrace!( "lost={}, time sent {:?} is before lost_delay {:?}", - pn, + packet.pn, packet.time_sent, loss_delay ); - } else if largest_acked >= Some(*pn + PACKET_THRESHOLD) { + } else if largest_acked >= Some(packet.pn + PACKET_THRESHOLD) { qtrace!( "lost={}, is >= {} from largest acked {:?}", - pn, + packet.pn, PACKET_THRESHOLD, largest_acked ); @@ -437,11 +353,9 @@ impl LossRecoverySpace { }; if packet.declare_lost(now) { - lost_pns.push(*pn); + lost_packets.push(packet.clone()); } } - - lost_packets.extend(lost_pns.iter().map(|pn| self.sent_packets[pn].clone())); } } @@ -672,13 +586,13 @@ impl LossRecovery { &mut self, primary_path: &PathRef, pn_space: PacketNumberSpace, - largest_acked: u64, + largest_acked: PacketNumber, acked_ranges: R, ack_delay: Duration, now: Instant, ) -> (Vec, Vec) where - R: IntoIterator>, + R: IntoIterator>, R::IntoIter: ExactSizeIterator, { qdebug!( @@ -1035,7 +949,7 @@ mod tests { use crate::{ cc::CongestionControlAlgorithm, cid::{ConnectionId, ConnectionIdEntry}, - packet::PacketType, + packet::{PacketNumber, PacketType}, path::{Path, PathRef}, rtt::RttEstimate, stats::{Stats, StatsCell}, @@ -1062,8 +976,8 @@ mod tests { pub fn on_ack_received( &mut self, pn_space: PacketNumberSpace, - largest_acked: u64, - acked_ranges: Vec>, + largest_acked: PacketNumber, + acked_ranges: Vec>, ack_delay: Duration, now: Instant, ) -> (Vec, Vec) { @@ -1232,8 +1146,8 @@ mod tests { ); } - fn add_sent(lrs: &mut LossRecoverySpace, packet_numbers: &[u64]) { - for &pn in packet_numbers { + fn add_sent(lrs: &mut LossRecoverySpace, max_pn: PacketNumber) { + for pn in 0..=max_pn { lrs.on_packet_sent(SentPacket::new( PacketType::Short, pn, @@ -1245,15 +1159,15 @@ mod tests { } } - fn match_acked(acked: &[SentPacket], expected: &[u64]) { - assert!(acked.iter().map(|p| &p.pn).eq(expected)); + fn match_acked(acked: &[SentPacket], expected: &[PacketNumber]) { + assert_eq!(acked.iter().map(|p| p.pn).collect::>(), expected); } #[test] fn remove_acked() { let mut lrs = LossRecoverySpace::new(PacketNumberSpace::ApplicationData); let mut stats = Stats::default(); - add_sent(&mut lrs, &[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]); + add_sent(&mut lrs, 10); let (acked, _) = lrs.remove_acked(vec![], &mut stats); assert!(acked.is_empty()); let (acked, _) = lrs.remove_acked(vec![7..=8, 2..=4], &mut stats); @@ -1261,7 +1175,7 @@ mod tests { let (acked, _) = lrs.remove_acked(vec![8..=11], &mut stats); match_acked(&acked, &[10, 9]); let (acked, _) = lrs.remove_acked(vec![0..=2], &mut stats); - match_acked(&acked, &[1]); + match_acked(&acked, &[1, 0]); let (acked, _) = lrs.remove_acked(vec![5..=6], &mut stats); match_acked(&acked, &[6, 5]); } @@ -1597,7 +1511,7 @@ mod tests { lr.on_packet_sent(SentPacket::new( PacketType::Initial, - 1, + 0, now(), true, Vec::new(), diff --git a/neqo-transport/src/recovery/sent.rs b/neqo-transport/src/recovery/sent.rs new file mode 100644 index 0000000000..0da4bfdc0e --- /dev/null +++ b/neqo-transport/src/recovery/sent.rs @@ -0,0 +1,361 @@ +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +// A collection for sent packets. + +use std::{ + cmp::min, + convert::TryFrom, + ops::RangeInclusive, + time::{Duration, Instant}, +}; + +use crate::{ + packet::{PacketNumber, PacketType}, + recovery::RecoveryToken, +}; + +#[derive(Debug, Clone)] +pub struct SentPacket { + pub pt: PacketType, + pub pn: PacketNumber, + ack_eliciting: bool, + pub time_sent: Instant, + primary_path: bool, + pub tokens: Vec, + + time_declared_lost: Option, + /// After a PTO, this is true when the packet has been released. + pto: bool, + + pub size: usize, +} + +impl SentPacket { + pub fn new( + pt: PacketType, + pn: PacketNumber, + time_sent: Instant, + ack_eliciting: bool, + tokens: Vec, + size: usize, + ) -> Self { + Self { + pt, + pn, + time_sent, + ack_eliciting, + primary_path: true, + tokens, + time_declared_lost: None, + pto: false, + size, + } + } + + /// Returns `true` if the packet will elicit an ACK. + pub fn ack_eliciting(&self) -> bool { + self.ack_eliciting + } + + /// Returns `true` if the packet was sent on the primary path. + pub fn on_primary_path(&self) -> bool { + self.primary_path + } + + /// Clears the flag that had this packet on the primary path. + /// Used when migrating to clear out state. + pub fn clear_primary_path(&mut self) { + self.primary_path = false; + } + + /// Whether the packet has been declared lost. + pub fn lost(&self) -> bool { + self.time_declared_lost.is_some() + } + + /// Whether accounting for the loss or acknowledgement in the + /// congestion controller is pending. + /// Returns `true` if the packet counts as being "in flight", + /// and has not previously been declared lost. + /// Note that this should count packets that contain only ACK and PADDING, + /// but we don't send PADDING, so we don't track that. + pub fn cc_outstanding(&self) -> bool { + self.ack_eliciting() && self.on_primary_path() && !self.lost() + } + + /// Whether the packet should be tracked as in-flight. + pub fn cc_in_flight(&self) -> bool { + self.ack_eliciting() && self.on_primary_path() + } + + /// Declare the packet as lost. Returns `true` if this is the first time. + pub fn declare_lost(&mut self, now: Instant) -> bool { + if self.lost() { + false + } else { + self.time_declared_lost = Some(now); + true + } + } + + /// Ask whether this tracked packet has been declared lost for long enough + /// that it can be expired and no longer tracked. + pub fn expired(&self, now: Instant, expiration_period: Duration) -> bool { + self.time_declared_lost + .map_or(false, |loss_time| (loss_time + expiration_period) <= now) + } + + /// Whether the packet contents were cleared out after a PTO. + pub fn pto_fired(&self) -> bool { + self.pto + } + + /// On PTO, we need to get the recovery tokens so that we can ensure that + /// the frames we sent can be sent again in the PTO packet(s). Do that just once. + pub fn pto(&mut self) -> bool { + if self.pto || self.lost() { + false + } else { + self.pto = true; + true + } + } +} + +/// A collection for packets that we have sent that haven't been acknowledged. +#[derive(Debug, Default)] +pub struct SentPackets { + /// The collection. + packets: Vec>, + /// The packet number of the first item in the collection. + offset: PacketNumber, + /// The number of `Some` values in the packet. This is cached to keep things squeaky-fast. + len: usize, +} + +impl SentPackets { + pub fn len(&self) -> usize { + self.len + } + + pub fn track(&mut self, packet: SentPacket) { + if self.offset + PacketNumber::try_from(self.packets.len()).unwrap() != packet.pn { + assert_eq!( + self.len, 0, + "packet number skipping only supported for the first packet in a space" + ); + self.offset = packet.pn; + } + self.len += 1; + self.packets.push(Some(packet)); + } + + pub fn iter_mut(&mut self) -> impl Iterator { + self.packets.iter_mut().filter_map(Option::as_mut) + } + + /// Take values from a specified range of packet numbers. + /// Note that this will not remove values unless the iterator is consumed. + /// The values returned will be reversed, so that the most recent packet appears first. + /// This is because ACK frames arrive with ranges starting from the largest acknowledged + /// and we want to match that. + pub fn take_range( + &mut self, + r: RangeInclusive, + ) -> impl Iterator + '_ { + let start = usize::try_from((*r.start()).saturating_sub(self.offset)).unwrap(); + let end = min( + usize::try_from((*r.end() + 1).saturating_sub(self.offset)).unwrap(), + self.packets.len(), + ); + + let len_ref = &mut self.len; + self.packets[start..end] + .iter_mut() + .rev() + .filter_map(Option::take) + .inspect(move |_| { + // Decrement the length for any values that are taken. + *len_ref -= 1; + }) + } + + /// Empty out the packets, but keep the offset. + pub fn drain_all(&mut self) -> impl Iterator { + self.len = 0; + self.offset += u64::try_from(self.packets.len()).unwrap(); + std::mem::take(&mut self.packets).into_iter().flatten() + } + + /// See `LossRecoverySpace::remove_old_lost` for details on `now` and `cd`. + pub fn remove_expired( + &mut self, + now: Instant, + cd: Duration, + ) -> impl Iterator { + let mut count = 0; + // Find the first unexpired packet and only keep from that one onwards. + for (i, p) in self.packets.iter().enumerate() { + if p.as_ref().map_or(false, |p| !p.expired(now, cd)) { + let mut other = self.packets.split_off(i); + self.len -= count; + self.offset += u64::try_from(i).unwrap(); + std::mem::swap(&mut self.packets, &mut other); + return other.into_iter().flatten(); + } + // Count `Some` values that we are removing. + count += usize::from(p.is_some()); + } + + self.len = 0; + self.offset += u64::try_from(self.packets.len()).unwrap(); + std::mem::take(&mut self.packets).into_iter().flatten() + } +} + +#[cfg(test)] +mod tests { + use std::{ + cell::OnceCell, + convert::TryFrom, + time::{Duration, Instant}, + }; + + use super::{SentPacket, SentPackets}; + use crate::packet::{PacketNumber, PacketType}; + + const PACKET_GAP: Duration = Duration::from_secs(1); + fn start_time() -> Instant { + thread_local!(static STARTING_TIME: OnceCell = OnceCell::new()); + STARTING_TIME.with(|t| *t.get_or_init(Instant::now)) + } + + fn pkt(n: u32) -> SentPacket { + SentPacket::new( + PacketType::Short, + PacketNumber::from(n), + start_time() + (PACKET_GAP * n), + true, + Vec::new(), + 100, + ) + } + + fn pkts() -> SentPackets { + let mut pkts = SentPackets::default(); + pkts.track(pkt(0)); + pkts.track(pkt(1)); + pkts.track(pkt(2)); + assert_eq!(pkts.len(), 3); + pkts + } + + trait HasPacketNumber { + fn pn(&self) -> PacketNumber; + } + impl HasPacketNumber for SentPacket { + fn pn(&self) -> PacketNumber { + self.pn + } + } + impl HasPacketNumber for &'_ SentPacket { + fn pn(&self) -> PacketNumber { + self.pn + } + } + impl HasPacketNumber for &'_ mut SentPacket { + fn pn(&self) -> PacketNumber { + self.pn + } + } + + fn remove_one(pkts: &mut SentPackets, idx: PacketNumber) { + assert_eq!(pkts.len(), 3); + let mut it = pkts.take_range(idx..=idx); + assert_eq!(idx, it.next().unwrap().pn()); + assert!(it.next().is_none()); + std::mem::drop(it); + assert_eq!(pkts.len(), 2); + } + + fn assert_zero_and_two<'a, 'b: 'a>( + mut it: impl Iterator + 'a, + ) { + assert_eq!(it.next().unwrap().pn(), 0); + assert_eq!(it.next().unwrap().pn(), 2); + assert!(it.next().is_none()); + } + + #[test] + fn iterate_skipped() { + let mut pkts = pkts(); + for (i, p) in pkts.packets.iter().enumerate() { + assert_eq!(i, usize::try_from(p.as_ref().unwrap().pn).unwrap()); + } + remove_one(&mut pkts, 1); + + // Validate the merged result multiple ways. + assert_zero_and_two(pkts.iter_mut()); + + { + // Reverse the expectations here as this iterator reverses its output. + let mut it = pkts.take_range(0..=2); + assert_eq!(it.next().unwrap().pn(), 2); + assert_eq!(it.next().unwrap().pn(), 0); + assert!(it.next().is_none()); + }; + + // The None values are still there in this case, so offset is 0. + assert_eq!(pkts.offset, 0); + assert_eq!(pkts.packets.len(), 3); + assert_eq!(pkts.len(), 0); + } + + #[test] + fn drain() { + let mut pkts = pkts(); + remove_one(&mut pkts, 1); + + assert_zero_and_two(pkts.drain_all()); + assert_eq!(pkts.offset, 3); + assert_eq!(pkts.len(), 0); + } + + #[test] + fn remove_expired() { + let mut pkts = pkts(); + remove_one(&mut pkts, 0); + + for p in pkts.iter_mut() { + p.declare_lost(p.time_sent); // just to keep things simple. + } + + // Expire up to pkt(1). + let mut it = pkts.remove_expired(start_time() + PACKET_GAP, Duration::new(0, 0)); + assert_eq!(it.next().unwrap().pn(), 1); + assert!(it.next().is_none()); + std::mem::drop(it); + + assert_eq!(pkts.offset, 2); + assert_eq!(pkts.len(), 1); + } + + #[test] + #[should_panic(expected = "packet number skipping only supported for the first packet")] + fn skipped_not_ok() { + let mut pkts = pkts(); + pkts.track(pkt(4)); + } + + #[test] + fn first_skipped_ok() { + let mut pkts = SentPackets::default(); + pkts.track(pkt(4)); // This is fine. + assert_eq!(pkts.offset, 4); + assert_eq!(pkts.len(), 1); + } +} diff --git a/neqo-transport/src/recovery/token.rs b/neqo-transport/src/recovery/token.rs new file mode 100644 index 0000000000..93f84268cd --- /dev/null +++ b/neqo-transport/src/recovery/token.rs @@ -0,0 +1,63 @@ +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use crate::{ + ackrate::AckRate, + cid::ConnectionIdEntry, + crypto::CryptoRecoveryToken, + quic_datagrams::DatagramTracking, + send_stream::SendStreamRecoveryToken, + stream_id::{StreamId, StreamType}, + tracking::AckToken, +}; + +#[derive(Debug, Clone)] +#[allow(clippy::module_name_repetitions)] +pub enum StreamRecoveryToken { + Stream(SendStreamRecoveryToken), + ResetStream { + stream_id: StreamId, + }, + StopSending { + stream_id: StreamId, + }, + + MaxData(u64), + DataBlocked(u64), + + MaxStreamData { + stream_id: StreamId, + max_data: u64, + }, + StreamDataBlocked { + stream_id: StreamId, + limit: u64, + }, + + MaxStreams { + stream_type: StreamType, + max_streams: u64, + }, + StreamsBlocked { + stream_type: StreamType, + limit: u64, + }, +} + +#[derive(Debug, Clone)] +#[allow(clippy::module_name_repetitions)] +pub enum RecoveryToken { + Stream(StreamRecoveryToken), + Ack(AckToken), + Crypto(CryptoRecoveryToken), + HandshakeDone, + KeepAlive, // Special PING. + NewToken(usize), + NewConnectionId(ConnectionIdEntry<[u8; 16]>), + RetireConnectionId(u64), + AckFrequency(AckRate), + Datagram(DatagramTracking), +} diff --git a/neqo-transport/src/sender.rs b/neqo-transport/src/sender.rs index 9a00dfc7a7..0f75f5543a 100644 --- a/neqo-transport/src/sender.rs +++ b/neqo-transport/src/sender.rs @@ -18,8 +18,8 @@ use neqo_common::qlog::NeqoQlog; use crate::{ cc::{ClassicCongestionControl, CongestionControl, CongestionControlAlgorithm, Cubic, NewReno}, pace::Pacer, + recovery::SentPacket, rtt::RttEstimate, - tracking::SentPacket, }; /// The number of packets we allow to burst from the pacer. diff --git a/neqo-transport/src/tracking.rs b/neqo-transport/src/tracking.rs index 012c895a18..81dda30154 100644 --- a/neqo-transport/src/tracking.rs +++ b/neqo-transport/src/tracking.rs @@ -133,114 +133,6 @@ impl std::fmt::Debug for PacketNumberSpaceSet { } } -#[derive(Debug, Clone)] -pub struct SentPacket { - pub pt: PacketType, - pub pn: PacketNumber, - ack_eliciting: bool, - pub time_sent: Instant, - primary_path: bool, - pub tokens: Vec, - - time_declared_lost: Option, - /// After a PTO, this is true when the packet has been released. - pto: bool, - - pub size: usize, -} - -impl SentPacket { - pub fn new( - pt: PacketType, - pn: PacketNumber, - time_sent: Instant, - ack_eliciting: bool, - tokens: Vec, - size: usize, - ) -> Self { - Self { - pt, - pn, - time_sent, - ack_eliciting, - primary_path: true, - tokens, - time_declared_lost: None, - pto: false, - size, - } - } - - /// Returns `true` if the packet will elicit an ACK. - pub fn ack_eliciting(&self) -> bool { - self.ack_eliciting - } - - /// Returns `true` if the packet was sent on the primary path. - pub fn on_primary_path(&self) -> bool { - self.primary_path - } - - /// Clears the flag that had this packet on the primary path. - /// Used when migrating to clear out state. - pub fn clear_primary_path(&mut self) { - self.primary_path = false; - } - - /// Whether the packet has been declared lost. - pub fn lost(&self) -> bool { - self.time_declared_lost.is_some() - } - - /// Whether accounting for the loss or acknowledgement in the - /// congestion controller is pending. - /// Returns `true` if the packet counts as being "in flight", - /// and has not previously been declared lost. - /// Note that this should count packets that contain only ACK and PADDING, - /// but we don't send PADDING, so we don't track that. - pub fn cc_outstanding(&self) -> bool { - self.ack_eliciting() && self.on_primary_path() && !self.lost() - } - - /// Whether the packet should be tracked as in-flight. - pub fn cc_in_flight(&self) -> bool { - self.ack_eliciting() && self.on_primary_path() - } - - /// Declare the packet as lost. Returns `true` if this is the first time. - pub fn declare_lost(&mut self, now: Instant) -> bool { - if self.lost() { - false - } else { - self.time_declared_lost = Some(now); - true - } - } - - /// Ask whether this tracked packet has been declared lost for long enough - /// that it can be expired and no longer tracked. - pub fn expired(&self, now: Instant, expiration_period: Duration) -> bool { - self.time_declared_lost - .map_or(false, |loss_time| (loss_time + expiration_period) <= now) - } - - /// Whether the packet contents were cleared out after a PTO. - pub fn pto_fired(&self) -> bool { - self.pto - } - - /// On PTO, we need to get the recovery tokens so that we can ensure that - /// the frames we sent can be sent again in the PTO packet(s). Do that just once. - pub fn pto(&mut self) -> bool { - if self.pto || self.lost() { - false - } else { - self.pto = true; - true - } - } -} - impl std::fmt::Display for PacketNumberSpace { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { f.write_str(match self { From 5e24ead002c754ebcf6a1b32dbe52f75ce05e5e3 Mon Sep 17 00:00:00 2001 From: Martin Thomson Date: Fri, 16 Feb 2024 00:09:30 +1100 Subject: [PATCH 173/321] Try a bit harder --- neqo-transport/src/recovery/mod.rs | 21 +++++----- neqo-transport/src/recovery/sent.rs | 60 ++++++++++++++++++++--------- 2 files changed, 54 insertions(+), 27 deletions(-) diff --git a/neqo-transport/src/recovery/mod.rs b/neqo-transport/src/recovery/mod.rs index 7f362e4104..41f3995f07 100644 --- a/neqo-transport/src/recovery/mod.rs +++ b/neqo-transport/src/recovery/mod.rs @@ -174,7 +174,7 @@ impl LossRecoverySpace { .iter_mut() .filter_map(|sent| { if sent.pto() { - qtrace!("PTO: marking packet {} lost ", sent.pn); + qtrace!("PTO: marking packet {} lost ", sent.pn()); Some(&*sent) } else { None @@ -259,7 +259,7 @@ impl LossRecoverySpace { let mut eliciting = false; let mut acked = Vec::new(); for range in acked_ranges { - acked.extend(self.sent_packets.take_range(range)); + self.sent_packets.take_range(range, &mut acked); } for p in &acked { self.remove_packet(p); @@ -327,20 +327,20 @@ impl LossRecoverySpace { .sent_packets .iter_mut() // BTreeMap iterates in order of ascending PN - .take_while(|p| p.pn < largest_acked.unwrap_or(PacketNumber::MAX)) + .take_while(|p| p.pn() < largest_acked.unwrap_or(PacketNumber::MAX)) { // Packets sent before now - loss_delay are deemed lost. if packet.time_sent + loss_delay <= now { qtrace!( "lost={}, time sent {:?} is before lost_delay {:?}", - packet.pn, + packet.pn(), packet.time_sent, loss_delay ); - } else if largest_acked >= Some(packet.pn + PACKET_THRESHOLD) { + } else if largest_acked >= Some(packet.pn() + PACKET_THRESHOLD) { qtrace!( "lost={}, is >= {} from largest acked {:?}", - packet.pn, + packet.pn(), PACKET_THRESHOLD, largest_acked ); @@ -546,7 +546,7 @@ impl LossRecovery { pub fn on_packet_sent(&mut self, path: &PathRef, mut sent_packet: SentPacket) { let pn_space = PacketNumberSpace::from(sent_packet.pt); - qdebug!([self], "packet {}-{} sent", pn_space, sent_packet.pn); + qdebug!([self], "packet {}-{} sent", pn_space, sent_packet.pn()); if let Some(space) = self.spaces.get_mut(pn_space) { path.borrow_mut().packet_sent(&mut sent_packet); space.on_packet_sent(sent_packet); @@ -555,7 +555,7 @@ impl LossRecovery { [self], "ignoring {}-{} from dropped space", pn_space, - sent_packet.pn + sent_packet.pn() ); } } @@ -1160,7 +1160,10 @@ mod tests { } fn match_acked(acked: &[SentPacket], expected: &[PacketNumber]) { - assert_eq!(acked.iter().map(|p| p.pn).collect::>(), expected); + assert_eq!( + acked.iter().map(SentPacket::pn).collect::>(), + expected + ); } #[test] diff --git a/neqo-transport/src/recovery/sent.rs b/neqo-transport/src/recovery/sent.rs index 0da4bfdc0e..4c1d811b12 100644 --- a/neqo-transport/src/recovery/sent.rs +++ b/neqo-transport/src/recovery/sent.rs @@ -56,6 +56,11 @@ impl SentPacket { } } + /// The number of the packet. + pub fn pn(&self) -> PacketNumber { + self.pn + } + /// Returns `true` if the packet will elicit an ACK. pub fn ack_eliciting(&self) -> bool { self.ack_eliciting @@ -155,7 +160,7 @@ impl SentPackets { } pub fn iter_mut(&mut self) -> impl Iterator { - self.packets.iter_mut().filter_map(Option::as_mut) + self.packets.iter_mut().flatten() } /// Take values from a specified range of packet numbers. @@ -163,25 +168,40 @@ impl SentPackets { /// The values returned will be reversed, so that the most recent packet appears first. /// This is because ACK frames arrive with ranges starting from the largest acknowledged /// and we want to match that. - pub fn take_range( - &mut self, - r: RangeInclusive, - ) -> impl Iterator + '_ { + pub fn take_range(&mut self, r: RangeInclusive, store: &mut Vec) { let start = usize::try_from((*r.start()).saturating_sub(self.offset)).unwrap(); let end = min( usize::try_from((*r.end() + 1).saturating_sub(self.offset)).unwrap(), self.packets.len(), ); - let len_ref = &mut self.len; - self.packets[start..end] - .iter_mut() - .rev() - .filter_map(Option::take) - .inspect(move |_| { - // Decrement the length for any values that are taken. - *len_ref -= 1; - }) + let before = store.len(); + if self.packets[..start].iter().all(Option::is_none) { + // If there are extra empty slots, split those off too. + let extra = self.packets[end..] + .iter() + .take_while(|&p| p.is_none()) + .count(); + self.offset += u64::try_from(end + extra).unwrap(); + let mut other = self.packets.split_off(end + extra); + std::mem::swap(&mut self.packets, &mut other); + store.extend( + other + .into_iter() + .rev() + .skip(extra) + .take(end - start) + .flatten(), + ); + } else { + store.extend( + self.packets[start..end] + .iter_mut() + .rev() + .filter_map(Option::take), + ); + } + self.len -= store.len() - before; } /// Empty out the packets, but keep the offset. @@ -275,7 +295,9 @@ mod tests { fn remove_one(pkts: &mut SentPackets, idx: PacketNumber) { assert_eq!(pkts.len(), 3); - let mut it = pkts.take_range(idx..=idx); + let mut store = Vec::new(); + pkts.take_range(idx..=idx, &mut store); + let mut it = store.into_iter(); assert_eq!(idx, it.next().unwrap().pn()); assert!(it.next().is_none()); std::mem::drop(it); @@ -303,15 +325,17 @@ mod tests { { // Reverse the expectations here as this iterator reverses its output. - let mut it = pkts.take_range(0..=2); + let mut store = Vec::new(); + pkts.take_range(0..=2, &mut store); + let mut it = store.into_iter(); assert_eq!(it.next().unwrap().pn(), 2); assert_eq!(it.next().unwrap().pn(), 0); assert!(it.next().is_none()); }; // The None values are still there in this case, so offset is 0. - assert_eq!(pkts.offset, 0); - assert_eq!(pkts.packets.len(), 3); + assert_eq!(pkts.offset, 3); + assert_eq!(pkts.packets.len(), 0); assert_eq!(pkts.len(), 0); } From 5074583553e77fcacd609e70e4bcc454b9960c49 Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Thu, 15 Feb 2024 16:19:11 +0200 Subject: [PATCH 174/321] ci: Use composite actions, and enable rolling builds (#1660) * ci: Use composite actions, and enable rolling builds * Fix name of action * Fix harder * Fix names, again * Need to checkout first * Specify shell * Rename inputs * Use inputs correctly * Fix parameters * Again * Fix * Retry `hg clone` since it's often flaky * Try and fix caching --- .github/actions/nss/action.yml | 21 +++++++++++++ .github/actions/rust/action.yml | 30 ++++++++++++++++++ .github/workflows/bench.yml | 56 ++++++++++++++------------------- .github/workflows/check.yml | 31 ++++++------------ 4 files changed, 84 insertions(+), 54 deletions(-) create mode 100644 .github/actions/nss/action.yml create mode 100644 .github/actions/rust/action.yml diff --git a/.github/actions/nss/action.yml b/.github/actions/nss/action.yml new file mode 100644 index 0000000000..1964ecdc37 --- /dev/null +++ b/.github/actions/nss/action.yml @@ -0,0 +1,21 @@ +name: Checkout NSS and NSPR + +runs: + using: composite + steps: + - name: Fetch NSS and NSPR + shell: bash + run: | + for i in {1..$RETRIES}; do + hg clone https://hg.mozilla.org/projects/nspr "$NSPR_DIR" && break || sleep $DELAY && false + done + for i in {1..$RETRIES}; do + hg clone https://hg.mozilla.org/projects/nss "$NSS_DIR" && break || sleep $DELAY && false + done + echo "NSS_DIR=$NSS_DIR" >> "$GITHUB_ENV" + echo "NSPR_DIR=$NSPR_DIR" >> "$GITHUB_ENV" + env: + NSS_DIR: ${{ github.workspace }}/nss + NSPR_DIR: ${{ github.workspace }}/nspr + RETRIES: 10 + DELAY: 10 diff --git a/.github/actions/rust/action.yml b/.github/actions/rust/action.yml new file mode 100644 index 0000000000..b489029fa9 --- /dev/null +++ b/.github/actions/rust/action.yml @@ -0,0 +1,30 @@ +name: Install Rust +description: Install Rust and sccache + +inputs: + version: + description: 'Rust toolchain version to install' + required: true + default: 'stable' + components: + description: 'Rust components to install' + required: false + default: '' + +runs: + using: composite + steps: + - name: Install Rust + uses: dtolnay/rust-toolchain@master + with: + toolchain: ${{ inputs.version }} + components: ${{ inputs.components }} + + - name: Use sccache + uses: mozilla-actions/sccache-action@v0.0.4 + + - name: Enable sscache + shell: bash + run: | + echo "SCCACHE_GHA_ENABLED=true" >> "$GITHUB_ENV" + echo "RUSTC_WRAPPER=sccache" >> "$GITHUB_ENV" diff --git a/.github/workflows/bench.yml b/.github/workflows/bench.yml index 866b4ff62f..2d7c8933c4 100644 --- a/.github/workflows/bench.yml +++ b/.github/workflows/bench.yml @@ -17,34 +17,22 @@ jobs: shell: bash steps: + - name: Checkout + uses: actions/checkout@v4 + - name: Install Rust - uses: dtolnay/rust-toolchain@master + uses: ./.github/actions/rust with: - toolchain: $TOOLCHAIN + version: $TOOLCHAIN components: rustfmt - - name: Install sccache - uses: mozilla-actions/sccache-action@v0.0.4 - - name: Configure Rust run: | echo "RUSTFLAGS=-C link-arg=-fuse-ld=lld -C link-arg=-Wl,--no-rosegment" >> "$GITHUB_ENV" - echo "SCCACHE_GHA_ENABLED=true" >> "$GITHUB_ENV" - echo "RUSTC_WRAPPER=sccache" >> "$GITHUB_ENV" cargo install flamegraph - - name: Checkout - uses: actions/checkout@v4 - - name: Fetch NSS and NSPR - run: | - hg clone https://hg.mozilla.org/projects/nspr "$NSPR_DIR" - hg clone https://hg.mozilla.org/projects/nss "$NSS_DIR" - echo "NSS_DIR=$NSS_DIR" >> "$GITHUB_ENV" - echo "NSPR_DIR=$NSPR_DIR" >> "$GITHUB_ENV" - env: - NSS_DIR: ${{ github.workspace }}/nss - NSPR_DIR: ${{ github.workspace }}/nspr + uses: ./.github/actions/nss - name: Build run: | @@ -53,18 +41,20 @@ jobs: echo "LD_LIBRARY_PATH=${{ github.workspace }}/dist/Debug/lib" >> "$GITHUB_ENV" - name: Download criterion results + id: criterion-cache uses: actions/cache/restore@v4 with: path: ./target/criterion - key: neqo-${{ runner.os }}-${{ github.run_id}} - restore-keys: neqo-${{ runner.os }}-main-criterion + key: criterion-${{ runner.os }}-${{ hashFiles('./target/criterion/**.json') }} + restore-keys: criterion-${{ runner.os }}- - - name: Download github-action-benchmark results - uses: actions/cache/restore@v4 - with: - path: ./cache - key: neqo-${{ runner.os}}-${{ github.run_id}} - restore-keys: neqo-${{ runner.os }}-main-action-benchmark + # - name: Download github-action-benchmark results + # id: benchmark-cache + # uses: actions/cache/restore@v4 + # with: + # path: ./cache + # key: action-benchmark-${{ runner.os}}-${{ hashFiles('./cache/**.TODO') }} + # restore-keys: action-benchmark-${{ runner.os }}- # Disable turboboost, hyperthreading and use performance governor. - name: Prepare machine @@ -139,14 +129,14 @@ jobs: uses: actions/cache/save@v4 with: path: ./target/criterion - key: neqo-${{ runner.os }}-main-criterion + key: ${{ steps.criterion-cache.outputs.cache-primary-key }} - - name: Upload github-action-benchmark results - if: github.ref == 'refs/heads/main' - uses: actions/cache/save@v4 - with: - path: ./cache - key: neqo-${{ runner.os }}-main-action-benchmark + # - name: Upload github-action-benchmark results + # if: github.ref == 'refs/heads/main' + # uses: actions/cache/save@v4 + # with: + # path: ./cache + # key: ${{ steps.benchmark-cache.outputs.cache-primary-key }} - name: Archive perf data uses: actions/upload-artifact@v4 diff --git a/.github/workflows/check.yml b/.github/workflows/check.yml index dec4e9897d..613c2e9104 100644 --- a/.github/workflows/check.yml +++ b/.github/workflows/check.yml @@ -10,6 +10,10 @@ env: CARGO_TERM_COLOR: always RUST_BACKTRACE: 1 +concurrency: + group: ${{ github.workflow }}-${{ github.ref_name }} + cancel-in-progress: true + jobs: check: name: Build & test @@ -33,20 +37,15 @@ jobs: shell: bash steps: + - name: Checkout + uses: actions/checkout@v4 + - name: Install Rust - uses: dtolnay/rust-toolchain@master + uses: ./.github/actions/rust with: - toolchain: ${{ matrix.rust-toolchain }} + version: ${{ matrix.rust-toolchain }} components: rustfmt, clippy, llvm-tools-preview - - name: Use sccache - uses: mozilla-actions/sccache-action@v0.0.4 - - - name: Enable sscache - run: | - echo "SCCACHE_GHA_ENABLED=true" >> "$GITHUB_ENV" - echo "RUSTC_WRAPPER=sccache" >> "$GITHUB_ENV" - - name: Install dependencies (Linux) if: runner.os == 'Linux' env: @@ -98,23 +97,13 @@ jobs: - name: Install Rust tools run: cargo +${{ matrix.rust-toolchain }} binstall --no-confirm cargo-llvm-cov cargo-nextest - - name: Checkout - uses: actions/checkout@v4 - # This step might be removed if the distro included a recent enough # version of NSS. Ubuntu 20.04 only has 3.49, which is far too old. # (neqo-crypto/build.rs would also need to query pkg-config to get the # right build flags rather than building NSS.) # Clone from the main hg repo, because the GitHub mirror can be out of date. - name: Fetch NSS and NSPR - run: | - hg clone https://hg.mozilla.org/projects/nspr "$NSPR_DIR" - hg clone https://hg.mozilla.org/projects/nss "$NSS_DIR" - echo "NSS_DIR=$NSS_DIR" >> "$GITHUB_ENV" - echo "NSPR_DIR=$NSPR_DIR" >> "$GITHUB_ENV" - env: - NSS_DIR: ${{ github.workspace }}/nss - NSPR_DIR: ${{ github.workspace }}/nspr + uses: ./.github/actions/nss - name: Set up NSS/NSPR build environment (Windows) if: runner.os == 'Windows' From 4f91c07ac7aa6dc8a5cf12c6a7b6386333a634a3 Mon Sep 17 00:00:00 2001 From: Martin Thomson Date: Fri, 16 Feb 2024 09:05:50 +1100 Subject: [PATCH 175/321] VecDeque might be faster --- neqo-transport/src/recovery/sent.rs | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/neqo-transport/src/recovery/sent.rs b/neqo-transport/src/recovery/sent.rs index 4c1d811b12..320acf1c16 100644 --- a/neqo-transport/src/recovery/sent.rs +++ b/neqo-transport/src/recovery/sent.rs @@ -8,6 +8,7 @@ use std::{ cmp::min, + collections::VecDeque, convert::TryFrom, ops::RangeInclusive, time::{Duration, Instant}, @@ -135,7 +136,7 @@ impl SentPacket { #[derive(Debug, Default)] pub struct SentPackets { /// The collection. - packets: Vec>, + packets: VecDeque>, /// The packet number of the first item in the collection. offset: PacketNumber, /// The number of `Some` values in the packet. This is cached to keep things squeaky-fast. @@ -156,7 +157,7 @@ impl SentPackets { self.offset = packet.pn; } self.len += 1; - self.packets.push(Some(packet)); + self.packets.push_back(Some(packet)); } pub fn iter_mut(&mut self) -> impl Iterator { @@ -176,10 +177,11 @@ impl SentPackets { ); let before = store.len(); - if self.packets[..start].iter().all(Option::is_none) { + if self.packets.range(..start).all(Option::is_none) { // If there are extra empty slots, split those off too. - let extra = self.packets[end..] - .iter() + let extra = self + .packets + .range(end..) .take_while(|&p| p.is_none()) .count(); self.offset += u64::try_from(end + extra).unwrap(); @@ -195,8 +197,8 @@ impl SentPackets { ); } else { store.extend( - self.packets[start..end] - .iter_mut() + self.packets + .range_mut(start..end) .rev() .filter_map(Option::take), ); From e4e45be543640e29f10bde670ba77626d49324a4 Mon Sep 17 00:00:00 2001 From: Martin Thomson Date: Mon, 19 Feb 2024 17:55:33 +1100 Subject: [PATCH 176/321] Double down on making SentPacket fields private --- neqo-transport/src/cc/classic_cc.rs | 56 ++++++++++++------------- neqo-transport/src/cc/tests/new_reno.rs | 4 +- neqo-transport/src/connection/mod.rs | 6 +-- neqo-transport/src/path.rs | 4 +- neqo-transport/src/qlog.rs | 9 +++- neqo-transport/src/recovery/mod.rs | 18 ++++---- neqo-transport/src/recovery/sent.rs | 40 ++++++++++++++---- neqo-transport/src/sender.rs | 2 +- 8 files changed, 85 insertions(+), 54 deletions(-) diff --git a/neqo-transport/src/cc/classic_cc.rs b/neqo-transport/src/cc/classic_cc.rs index baf98cca46..01849a5643 100644 --- a/neqo-transport/src/cc/classic_cc.rs +++ b/neqo-transport/src/cc/classic_cc.rs @@ -168,8 +168,8 @@ impl CongestionControl for ClassicCongestionControl { qinfo!( "packet_acked this={:p}, pn={}, ps={}, ignored={}, lost={}, rtt_est={:?}", self, - pkt.pn, - pkt.size, + pkt.pn(), + pkt.len(), i32::from(!pkt.cc_outstanding()), i32::from(pkt.lost()), rtt_est, @@ -177,11 +177,11 @@ impl CongestionControl for ClassicCongestionControl { if !pkt.cc_outstanding() { continue; } - if pkt.pn < self.first_app_limited { + if pkt.pn() < self.first_app_limited { is_app_limited = false; } - assert!(self.bytes_in_flight >= pkt.size); - self.bytes_in_flight -= pkt.size; + assert!(self.bytes_in_flight >= pkt.len()); + self.bytes_in_flight -= pkt.len(); if !self.after_recovery_start(pkt) { // Do not increase congestion window for packets sent before @@ -194,7 +194,7 @@ impl CongestionControl for ClassicCongestionControl { qlog::metrics_updated(&mut self.qlog, &[QlogMetric::InRecovery(false)]); } - new_acked += pkt.size; + new_acked += pkt.len(); } if is_app_limited { @@ -269,11 +269,11 @@ impl CongestionControl for ClassicCongestionControl { qinfo!( "packet_lost this={:p}, pn={}, ps={}", self, - pkt.pn, - pkt.size + pkt.pn(), + pkt.len() ); - assert!(self.bytes_in_flight >= pkt.size); - self.bytes_in_flight -= pkt.size; + assert!(self.bytes_in_flight >= pkt.len()); + self.bytes_in_flight -= pkt.len(); } qlog::metrics_updated( &mut self.qlog, @@ -299,13 +299,13 @@ impl CongestionControl for ClassicCongestionControl { fn discard(&mut self, pkt: &SentPacket) { if pkt.cc_outstanding() { - assert!(self.bytes_in_flight >= pkt.size); - self.bytes_in_flight -= pkt.size; + assert!(self.bytes_in_flight >= pkt.len()); + self.bytes_in_flight -= pkt.len(); qlog::metrics_updated( &mut self.qlog, &[QlogMetric::BytesInFlight(self.bytes_in_flight)], ); - qtrace!([self], "Ignore pkt with size {}", pkt.size); + qtrace!([self], "Ignore pkt with size {}", pkt.len()); } } @@ -320,7 +320,7 @@ impl CongestionControl for ClassicCongestionControl { fn on_packet_sent(&mut self, pkt: &SentPacket) { // Record the recovery time and exit any transient state. if self.state.transient() { - self.recovery_start = Some(pkt.pn); + self.recovery_start = Some(pkt.pn()); self.state.update(); } @@ -332,15 +332,15 @@ impl CongestionControl for ClassicCongestionControl { // window. Assume that all in-flight packets up to this one are NOT app-limited. // However, subsequent packets might be app-limited. Set `first_app_limited` to the // next packet number. - self.first_app_limited = pkt.pn + 1; + self.first_app_limited = pkt.pn() + 1; } - self.bytes_in_flight += pkt.size; + self.bytes_in_flight += pkt.len(); qinfo!( "packet_sent this={:p}, pn={}, ps={}", self, - pkt.pn, - pkt.size + pkt.pn(), + pkt.len() ); qlog::metrics_updated( &mut self.qlog, @@ -439,20 +439,20 @@ impl ClassicCongestionControl { let cutoff = max(first_rtt_sample_time, prev_largest_acked_sent); for p in lost_packets .iter() - .skip_while(|p| Some(p.time_sent) < cutoff) + .skip_while(|p| Some(p.time_sent()) < cutoff) { - if p.pn != last_pn + 1 { + if p.pn() != last_pn + 1 { // Not a contiguous range of lost packets, start over. start = None; } - last_pn = p.pn; + last_pn = p.pn(); if !p.cc_in_flight() { // Not interesting, keep looking. continue; } if let Some(t) = start { let elapsed = p - .time_sent + .time_sent() .checked_duration_since(t) .expect("time is monotonic"); if elapsed > pc_period { @@ -467,7 +467,7 @@ impl ClassicCongestionControl { return true; } } else { - start = Some(p.time_sent); + start = Some(p.time_sent()); } } false @@ -481,7 +481,7 @@ impl ClassicCongestionControl { // state and update the variable `self.recovery_start`. Before the // first recovery, all packets were sent after the recovery event, // allowing to reduce the cwnd on congestion events. - !self.state.transient() && self.recovery_start.map_or(true, |pn| packet.pn >= pn) + !self.state.transient() && self.recovery_start.map_or(true, |pn| packet.pn() >= pn) } /// Handle a congestion event. @@ -916,12 +916,12 @@ mod tests { fn persistent_congestion_ack_eliciting() { let mut lost = make_lost(&[1, PERSISTENT_CONG_THRESH + 2]); lost[0] = SentPacket::new( - lost[0].pt, - lost[0].pn, - lost[0].time_sent, + lost[0].packet_type(), + lost[0].pn(), + lost[0].time_sent(), false, Vec::new(), - lost[0].size, + lost[0].len(), ); assert!(!persistent_congestion_by_pto( ClassicCongestionControl::new(NewReno::default()), diff --git a/neqo-transport/src/cc/tests/new_reno.rs b/neqo-transport/src/cc/tests/new_reno.rs index 0736f398fb..3f3fdd9e07 100644 --- a/neqo-transport/src/cc/tests/new_reno.rs +++ b/neqo-transport/src/cc/tests/new_reno.rs @@ -126,14 +126,14 @@ fn issue_876() { // and ack it. cwnd increases slightly cc.on_packets_acked(&sent_packets[6..], &RTT_ESTIMATE, time_now); - assert_eq!(cc.acked_bytes(), sent_packets[6].size); + assert_eq!(cc.acked_bytes(), sent_packets[6].len()); cwnd_is_halved(&cc); assert_eq!(cc.bytes_in_flight(), 5 * MAX_DATAGRAM_SIZE - 2); // Packet from before is lost. Should not hurt cwnd. cc.on_packets_lost(Some(time_now), None, PTO, &sent_packets[1..2]); assert!(!cc.recovery_packet()); - assert_eq!(cc.acked_bytes(), sent_packets[6].size); + assert_eq!(cc.acked_bytes(), sent_packets[6].len()); cwnd_is_halved(&cc); assert_eq!(cc.bytes_in_flight(), 4 * MAX_DATAGRAM_SIZE); } diff --git a/neqo-transport/src/connection/mod.rs b/neqo-transport/src/connection/mod.rs index 1e53447007..979c164800 100644 --- a/neqo-transport/src/connection/mod.rs +++ b/neqo-transport/src/connection/mod.rs @@ -2329,7 +2329,7 @@ impl Connection { packets.len(), mtu ); - initial.size += mtu - packets.len(); + initial.add_padding(mtu - packets.len()); packets.resize(mtu, 0); } self.loss_recovery.on_packet_sent(path, initial); @@ -2848,7 +2848,7 @@ impl Connection { /// to retransmit the frame as needed. fn handle_lost_packets(&mut self, lost_packets: &[SentPacket]) { for lost in lost_packets { - for token in &lost.tokens { + for token in lost.tokens() { qdebug!([self], "Lost: {:?}", token); match token { RecoveryToken::Ack(_) => {} @@ -2903,7 +2903,7 @@ impl Connection { now, ); for acked in acked_packets { - for token in &acked.tokens { + for token in acked.tokens() { match token { RecoveryToken::Stream(stream_token) => self.streams.acked(stream_token), RecoveryToken::Ack(at) => self.acks.acked(at), diff --git a/neqo-transport/src/path.rs b/neqo-transport/src/path.rs index 782fe896c0..06259e08c9 100644 --- a/neqo-transport/src/path.rs +++ b/neqo-transport/src/path.rs @@ -945,12 +945,12 @@ impl Path { qinfo!( [self], "discarding a packet without an RTT estimate; guessing RTT={:?}", - now - sent.time_sent + now - sent.time_sent() ); stats.rtt_init_guess = true; self.rtt.update( &mut self.qlog, - now - sent.time_sent, + now - sent.time_sent(), Duration::new(0, 0), false, now, diff --git a/neqo-transport/src/qlog.rs b/neqo-transport/src/qlog.rs index b59df0e37c..bb75dc3899 100644 --- a/neqo-transport/src/qlog.rs +++ b/neqo-transport/src/qlog.rs @@ -261,8 +261,13 @@ pub fn packet_dropped(qlog: &mut NeqoQlog, public_packet: &PublicPacket) { pub fn packets_lost(qlog: &mut NeqoQlog, pkts: &[SentPacket]) { qlog.add_event_with_stream(|stream| { for pkt in pkts { - let header = - PacketHeader::with_type(to_qlog_pkt_type(pkt.pt), Some(pkt.pn), None, None, None); + let header = PacketHeader::with_type( + to_qlog_pkt_type(pkt.packet_type()), + Some(pkt.pn()), + None, + None, + None, + ); let ev_data = EventData::PacketLost(PacketLost { header: Some(header), diff --git a/neqo-transport/src/recovery/mod.rs b/neqo-transport/src/recovery/mod.rs index 41f3995f07..2aec479b5f 100644 --- a/neqo-transport/src/recovery/mod.rs +++ b/neqo-transport/src/recovery/mod.rs @@ -207,14 +207,14 @@ impl LossRecoverySpace { pub fn on_packet_sent(&mut self, sent_packet: SentPacket) { if sent_packet.ack_eliciting() { - self.last_ack_eliciting = Some(sent_packet.time_sent); + self.last_ack_eliciting = Some(sent_packet.time_sent()); self.in_flight_outstanding += 1; } else if self.space != PacketNumberSpace::ApplicationData && self.last_ack_eliciting.is_none() { // For Initial and Handshake spaces, make sure that we have a PTO baseline // always. See `LossRecoverySpace::pto_base_time()` for details. - self.last_ack_eliciting = Some(sent_packet.time_sent); + self.last_ack_eliciting = Some(sent_packet.time_sent()); } self.sent_packets.track(sent_packet); } @@ -330,11 +330,11 @@ impl LossRecoverySpace { .take_while(|p| p.pn() < largest_acked.unwrap_or(PacketNumber::MAX)) { // Packets sent before now - loss_delay are deemed lost. - if packet.time_sent + loss_delay <= now { + if packet.time_sent() + loss_delay <= now { qtrace!( "lost={}, time sent {:?} is before lost_delay {:?}", packet.pn(), - packet.time_sent, + packet.time_sent(), loss_delay ); } else if largest_acked >= Some(packet.pn() + PACKET_THRESHOLD) { @@ -346,7 +346,7 @@ impl LossRecoverySpace { ); } else { if largest_acked.is_some() { - self.first_ooo_time = Some(packet.time_sent); + self.first_ooo_time = Some(packet.time_sent()); } // No more packets can be declared lost after this one. break; @@ -545,7 +545,7 @@ impl LossRecovery { } pub fn on_packet_sent(&mut self, path: &PathRef, mut sent_packet: SentPacket) { - let pn_space = PacketNumberSpace::from(sent_packet.pt); + let pn_space = PacketNumberSpace::from(sent_packet.packet_type()); qdebug!([self], "packet {}-{} sent", pn_space, sent_packet.pn()); if let Some(space) = self.spaces.get_mut(pn_space) { path.borrow_mut().packet_sent(&mut sent_packet); @@ -622,11 +622,11 @@ impl LossRecovery { // If the largest acknowledged is newly acked and any newly acked // packet was ack-eliciting, update the RTT. (-recovery 5.1) let largest_acked_pkt = acked_packets.first().expect("must be there"); - space.largest_acked_sent_time = Some(largest_acked_pkt.time_sent); + space.largest_acked_sent_time = Some(largest_acked_pkt.time_sent()); if any_ack_eliciting && largest_acked_pkt.on_primary_path() { self.rtt_sample( primary_path.borrow_mut().rtt_mut(), - largest_acked_pkt.time_sent, + largest_acked_pkt.time_sent(), now, ack_delay, ); @@ -1413,7 +1413,7 @@ mod tests { PacketType::Short, ] { let sent_pkt = SentPacket::new(*sp, 1, pn_time(3), true, Vec::new(), ON_SENT_SIZE); - let pn_space = PacketNumberSpace::from(sent_pkt.pt); + let pn_space = PacketNumberSpace::from(sent_pkt.packet_type()); lr.on_packet_sent(sent_pkt); lr.on_ack_received(pn_space, 1, vec![1..=1], Duration::from_secs(0), pn_time(3)); let mut lost = Vec::new(); diff --git a/neqo-transport/src/recovery/sent.rs b/neqo-transport/src/recovery/sent.rs index 320acf1c16..33d77b0ff7 100644 --- a/neqo-transport/src/recovery/sent.rs +++ b/neqo-transport/src/recovery/sent.rs @@ -21,18 +21,18 @@ use crate::{ #[derive(Debug, Clone)] pub struct SentPacket { - pub pt: PacketType, - pub pn: PacketNumber, + pt: PacketType, + pn: PacketNumber, ack_eliciting: bool, - pub time_sent: Instant, + time_sent: Instant, primary_path: bool, - pub tokens: Vec, + tokens: Vec, time_declared_lost: Option, /// After a PTO, this is true when the packet has been released. pto: bool, - pub size: usize, + len: usize, } impl SentPacket { @@ -42,7 +42,7 @@ impl SentPacket { time_sent: Instant, ack_eliciting: bool, tokens: Vec, - size: usize, + len: usize, ) -> Self { Self { pt, @@ -53,15 +53,25 @@ impl SentPacket { tokens, time_declared_lost: None, pto: false, - size, + len, } } + /// The type of this packet. + pub fn packet_type(&self) -> PacketType { + self.pt + } + /// The number of the packet. pub fn pn(&self) -> PacketNumber { self.pn } + /// The time that this packet was sent. + pub fn time_sent(&self) -> Instant { + self.time_sent + } + /// Returns `true` if the packet will elicit an ACK. pub fn ack_eliciting(&self) -> bool { self.ack_eliciting @@ -72,12 +82,28 @@ impl SentPacket { self.primary_path } + /// The length of the packet that was sent. + pub fn len(&self) -> usize { + self.len + } + + /// Access the recovery tokens that this holds. + pub fn tokens(&self) -> &[RecoveryToken] { + &self.tokens + } + /// Clears the flag that had this packet on the primary path. /// Used when migrating to clear out state. pub fn clear_primary_path(&mut self) { self.primary_path = false; } + /// For Initial packets, it is possible that the packet builder needs to amend the length. + pub fn add_padding(&mut self, padding: usize) { + debug_assert_eq!(self.pt, PacketType::Initial); + self.len += padding; + } + /// Whether the packet has been declared lost. pub fn lost(&self) -> bool { self.time_declared_lost.is_some() diff --git a/neqo-transport/src/sender.rs b/neqo-transport/src/sender.rs index 0f75f5543a..0ea18e5279 100644 --- a/neqo-transport/src/sender.rs +++ b/neqo-transport/src/sender.rs @@ -109,7 +109,7 @@ impl PacketSender { pub fn on_packet_sent(&mut self, pkt: &SentPacket, rtt: Duration) { self.pacer - .spend(pkt.time_sent, rtt, self.cc.cwnd(), pkt.size); + .spend(pkt.time_sent(), rtt, self.cc.cwnd(), pkt.len()); self.cc.on_packet_sent(pkt); } From f7a9de9e789a5866cb16a6b44564cd0f89b39926 Mon Sep 17 00:00:00 2001 From: Max Inden Date: Tue, 20 Feb 2024 23:46:31 +0100 Subject: [PATCH 177/321] fix(http3): typos (#1669) --- neqo-http3/src/lib.rs | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/neqo-http3/src/lib.rs b/neqo-http3/src/lib.rs index 4113009eb3..1b7f163019 100644 --- a/neqo-http3/src/lib.rs +++ b/neqo-http3/src/lib.rs @@ -565,7 +565,7 @@ trait HttpRecvStreamEvents: RecvStreamEvents { trait SendStream: Stream { /// # Errors /// - /// Error my occur during sending data, e.g. protocol error, etc. + /// Error may occur during sending data, e.g. protocol error, etc. fn send(&mut self, conn: &mut Connection) -> Res<()>; fn has_data_to_send(&self) -> bool; fn stream_writable(&self); @@ -577,17 +577,17 @@ trait SendStream: Stream { /// # Errors /// - /// Error my occur during sending data, e.g. protocol error, etc. + /// Error may occur during sending data, e.g. protocol error, etc. fn send_data(&mut self, _conn: &mut Connection, _buf: &[u8]) -> Res; /// # Errors /// - /// It may happen that the transport stream is already close. This is unlikely. + /// It may happen that the transport stream is already closed. This is unlikely. fn close(&mut self, conn: &mut Connection) -> Res<()>; /// # Errors /// - /// It may happen that the transport stream is already close. This is unlikely. + /// It may happen that the transport stream is already closed. This is unlikely. fn close_with_message( &mut self, _conn: &mut Connection, @@ -606,7 +606,7 @@ trait SendStream: Stream { /// # Errors /// - /// It may happen that the transport stream is already close. This is unlikely. + /// It may happen that the transport stream is already closed. This is unlikely. fn send_data_atomic(&mut self, _conn: &mut Connection, _buf: &[u8]) -> Res<()> { Err(Error::InvalidStreamId) } From 3024cbb181809e15f5d903216c7ce36ed0e8c556 Mon Sep 17 00:00:00 2001 From: Max Inden Date: Wed, 21 Feb 2024 00:06:06 +0100 Subject: [PATCH 178/321] refactor: remove dead fairness and sendorder fn on SendStream (#1668) Removes `fn` `set_sendorder` and `set_fairness` on `SendStream` trait. The `SendStream` trait is not publicly exposed. Neither `set_sendorder` nor `set_fairness` is called within `neqo-http3`. Co-authored-by: Martin Thomson --- .../extended_connect/webtransport_session.rs | 12 +----------- .../extended_connect/webtransport_streams.rs | 10 ---------- neqo-http3/src/lib.rs | 4 ---- neqo-http3/src/send_message.rs | 12 +----------- 4 files changed, 2 insertions(+), 36 deletions(-) diff --git a/neqo-http3/src/features/extended_connect/webtransport_session.rs b/neqo-http3/src/features/extended_connect/webtransport_session.rs index bc33acb67c..5e89225956 100644 --- a/neqo-http3/src/features/extended_connect/webtransport_session.rs +++ b/neqo-http3/src/features/extended_connect/webtransport_session.rs @@ -8,7 +8,7 @@ use std::{cell::RefCell, collections::BTreeSet, mem, rc::Rc}; use neqo_common::{qtrace, Encoder, Header, MessageType, Role}; use neqo_qpack::{QPackDecoder, QPackEncoder}; -use neqo_transport::{streams::SendOrder, Connection, DatagramTracking, StreamId}; +use neqo_transport::{Connection, DatagramTracking, StreamId}; use super::{ExtendedConnectEvents, ExtendedConnectType, SessionCloseReason}; use crate::{ @@ -486,16 +486,6 @@ impl SendStream for Rc> { self.borrow_mut().has_data_to_send() } - fn set_sendorder(&mut self, _conn: &mut Connection, _sendorder: Option) -> Res<()> { - // Not relevant on session - Ok(()) - } - - fn set_fairness(&mut self, _conn: &mut Connection, _fairness: bool) -> Res<()> { - // Not relevant on session - Ok(()) - } - fn stream_writable(&self) {} fn done(&self) -> bool { diff --git a/neqo-http3/src/features/extended_connect/webtransport_streams.rs b/neqo-http3/src/features/extended_connect/webtransport_streams.rs index 84dcd20618..cdc692b8d7 100644 --- a/neqo-http3/src/features/extended_connect/webtransport_streams.rs +++ b/neqo-http3/src/features/extended_connect/webtransport_streams.rs @@ -215,16 +215,6 @@ impl SendStream for WebTransportSendStream { } } - fn set_sendorder(&mut self, conn: &mut Connection, sendorder: Option) -> Res<()> { - conn.stream_sendorder(self.stream_id, sendorder) - .map_err(|_| crate::Error::InvalidStreamId) - } - - fn set_fairness(&mut self, conn: &mut Connection, fairness: bool) -> Res<()> { - conn.stream_fairness(self.stream_id, fairness) - .map_err(|_| crate::Error::InvalidStreamId) - } - fn handle_stop_sending(&mut self, close_type: CloseType) { self.set_done(close_type); } diff --git a/neqo-http3/src/lib.rs b/neqo-http3/src/lib.rs index 1b7f163019..aa62e599b5 100644 --- a/neqo-http3/src/lib.rs +++ b/neqo-http3/src/lib.rs @@ -570,10 +570,6 @@ trait SendStream: Stream { fn has_data_to_send(&self) -> bool; fn stream_writable(&self); fn done(&self) -> bool; - #[allow(dead_code)] // https://github.com/mozilla/neqo/issues/1651 - fn set_sendorder(&mut self, conn: &mut Connection, sendorder: Option) -> Res<()>; - #[allow(dead_code)] // https://github.com/mozilla/neqo/issues/1651 - fn set_fairness(&mut self, conn: &mut Connection, fairness: bool) -> Res<()>; /// # Errors /// diff --git a/neqo-http3/src/send_message.rs b/neqo-http3/src/send_message.rs index 4d37dcc37f..c50e3e056a 100644 --- a/neqo-http3/src/send_message.rs +++ b/neqo-http3/src/send_message.rs @@ -8,7 +8,7 @@ use std::{cell::RefCell, cmp::min, fmt::Debug, rc::Rc}; use neqo_common::{qdebug, qinfo, qtrace, Encoder, Header, MessageType}; use neqo_qpack::encoder::QPackEncoder; -use neqo_transport::{streams::SendOrder, Connection, StreamId}; +use neqo_transport::{Connection, StreamId}; use crate::{ frames::HFrame, @@ -270,16 +270,6 @@ impl SendStream for SendMessage { self.stream.has_buffered_data() } - fn set_sendorder(&mut self, _conn: &mut Connection, _sendorder: Option) -> Res<()> { - // Not relevant for SendMessage - Ok(()) - } - - fn set_fairness(&mut self, _conn: &mut Connection, _fairness: bool) -> Res<()> { - // Not relevant for SendMessage - Ok(()) - } - fn close(&mut self, conn: &mut Connection) -> Res<()> { self.state.fin()?; if !self.stream.has_buffered_data() { From 01620144236cdf91a6c018aadfdec97c982442fa Mon Sep 17 00:00:00 2001 From: Max Inden Date: Wed, 21 Feb 2024 22:46:21 +0100 Subject: [PATCH 179/321] chore: remove neqo-interop (#1672) `neqo-interop` is replaced by `neqo-client` and `neqo-server` and thus unused. --- Cargo.toml | 1 - neqo-http3/src/connection_client.rs | 7 - neqo-interop/Cargo.toml | 19 - neqo-interop/src/main.rs | 935 ---------------------------- 4 files changed, 962 deletions(-) delete mode 100644 neqo-interop/Cargo.toml delete mode 100644 neqo-interop/src/main.rs diff --git a/Cargo.toml b/Cargo.toml index 972c3febb4..46de261a41 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -7,7 +7,6 @@ members = [ "neqo-server", "neqo-qpack", "neqo-transport", - "neqo-interop", "test-fixture", ] diff --git a/neqo-http3/src/connection_client.rs b/neqo-http3/src/connection_client.rs index 9711e094ea..4afe29f22b 100644 --- a/neqo-http3/src/connection_client.rs +++ b/neqo-http3/src/connection_client.rs @@ -894,13 +894,6 @@ impl Http3Client { self.process_http3(now); } - /// This should not be used because it gives access to functionalities that may disrupt the - /// proper functioning of the HTTP/3 session. - /// Only used by `neqo-interop`. - pub fn conn(&mut self) -> &mut Connection { - &mut self.conn - } - /// Process HTTP3 layer. /// When `process_output`, `process_input`, or `process` is called we must call this function /// as well. The functions calls `Http3Client::check_connection_events` to handle events from diff --git a/neqo-interop/Cargo.toml b/neqo-interop/Cargo.toml deleted file mode 100644 index 227d480707..0000000000 --- a/neqo-interop/Cargo.toml +++ /dev/null @@ -1,19 +0,0 @@ -[package] -name = "neqo-interop" -description = "A QUIC interop client." -authors.workspace = true -homepage.workspace = true -repository.workspace = true -version.workspace = true -edition.workspace = true -rust-version.workspace = true -license.workspace = true - -[dependencies] -# neqo-interop is not used in Firefox, so we can be liberal with dependency versions -clap = { version = "4.4", features = ["derive"] } -neqo-common = { path = "./../neqo-common" } -neqo-crypto = { path = "./../neqo-crypto" } -neqo-http3 = { path = "./../neqo-http3" } -neqo-qpack = { path = "./../neqo-qpack" } -neqo-transport = { path = "./../neqo-transport" } diff --git a/neqo-interop/src/main.rs b/neqo-interop/src/main.rs deleted file mode 100644 index 74c70477fb..0000000000 --- a/neqo-interop/src/main.rs +++ /dev/null @@ -1,935 +0,0 @@ -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -#![warn(clippy::pedantic)] - -use std::{ - cell::RefCell, - cmp::min, - collections::HashSet, - mem, - net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr, ToSocketAddrs, UdpSocket}, - rc::Rc, - sync::OnceLock, -}; -// use std::path::PathBuf; -use std::{ - str::FromStr, - string::ParseError, - thread, - time::{Duration, Instant}, -}; - -use clap::Parser; -use neqo_common::{event::Provider, hex, Datagram, IpTos}; -use neqo_crypto::{init, AuthenticationStatus, ResumptionToken}; -use neqo_http3::{Header, Http3Client, Http3ClientEvent, Http3Parameters, Http3State, Priority}; -use neqo_transport::{ - Connection, ConnectionError, ConnectionEvent, ConnectionParameters, EmptyConnectionIdGenerator, - Error, Output, State, StreamId, StreamType, -}; - -#[derive(Debug, Parser, Clone)] -#[command(author, version, about, long_about = None)] -struct Args { - #[arg(short = 'p', long)] - // Peers to include - include: Vec, - - #[arg(short = 'P', long)] - exclude: Vec, - - #[arg(short = 't', long)] - include_tests: Vec, - - #[arg(short = 'T', long)] - exclude_tests: Vec, - - #[arg(long, default_value = "5")] - timeout: u64, -} - -trait Handler { - fn handle(&mut self, client: &mut Connection) -> bool; - fn rewrite_out(&mut self, _dgram: &Datagram) -> Option { - None - } -} - -fn emit_datagram(socket: &UdpSocket, d: &Datagram) { - let sent = socket.send(&d[..]).expect("Error sending datagram"); - if sent != d.len() { - eprintln!("Unable to send all {} bytes of datagram", d.len()); - } -} - -static TEST_TIMEOUT: OnceLock = OnceLock::new(); -struct Timer { - end: Instant, -} -impl Timer { - pub fn new() -> Self { - Self { - end: Instant::now() + *TEST_TIMEOUT.get_or_init(|| Duration::from_secs(5)), - } - } - - pub fn set_timeout(t: Duration) { - TEST_TIMEOUT - .set(t) - .expect("failed to set a timeout because one was already set"); - } - - pub fn check(&self) -> Result { - if let Some(d) = self.end.checked_duration_since(Instant::now()) { - if d.as_nanos() > 0 { - Ok(d) - } else { - Err(String::from("Timed out")) - } - } else { - Err(String::from("Timed out")) - } - } -} - -fn process_loop( - nctx: &NetworkCtx, - client: &mut Connection, - handler: &mut dyn Handler, -) -> Result { - let buf = &mut [0u8; 2048]; - let timer = Timer::new(); - - loop { - if let State::Closed(..) = client.state() { - return Ok(client.state().clone()); - } - - loop { - let output = client.process_output(Instant::now()); - match output { - Output::Datagram(dgram) => { - let dgram = handler.rewrite_out(&dgram).unwrap_or(dgram); - emit_datagram(&nctx.socket, &dgram); - } - Output::Callback(duration) => { - let delay = min(timer.check()?, duration); - nctx.socket.set_read_timeout(Some(delay)).unwrap(); - break; - } - Output::None => { - return Ok(client.state().clone()); - } - } - } - - if !handler.handle(client) { - return Ok(client.state().clone()); - } - - let sz = match nctx.socket.recv(&mut buf[..]) { - Ok(sz) => sz, - Err(e) => { - return Err(String::from(match e.kind() { - std::io::ErrorKind::WouldBlock => continue, - _ => "Read error", - })); - } - }; - - if sz == buf.len() { - eprintln!("Received more than {} bytes", buf.len()); - continue; - } - if sz > 0 { - let received = Datagram::new( - nctx.remote_addr, - nctx.local_addr, - IpTos::default(), - None, - &buf[..sz], - ); - client.process_input(&received, Instant::now()); - } - } -} - -struct PreConnectHandler {} -impl Handler for PreConnectHandler { - fn handle(&mut self, client: &mut Connection) -> bool { - let authentication_needed = |e| matches!(e, ConnectionEvent::AuthenticationNeeded); - if client.events().any(authentication_needed) { - client.authenticated(AuthenticationStatus::Ok, Instant::now()); - } - !matches!(client.state(), State::Connected | State::Closing { .. }) - } -} - -// HTTP/0.9 IMPLEMENTATION -#[derive(Default)] -struct H9Handler { - rbytes: usize, - rsfin: bool, - streams: HashSet, -} - -// This is a bit fancier than actually needed. -impl Handler for H9Handler { - fn handle(&mut self, client: &mut Connection) -> bool { - let mut data = vec![0; 4000]; - while let Some(event) = client.next_event() { - eprintln!("Event: {event:?}"); - match event { - ConnectionEvent::RecvStreamReadable { stream_id } => { - if !self.streams.contains(&stream_id) { - eprintln!("Data on unexpected stream: {stream_id}"); - return false; - } - - let (sz, fin) = client - .stream_recv(stream_id, &mut data) - .expect("Read should succeed"); - data.truncate(sz); - eprintln!("Length={sz}"); - self.rbytes += sz; - if fin { - eprintln!(""); - client.close(Instant::now(), 0, "kthxbye!"); - self.rsfin = true; - return false; - } - } - ConnectionEvent::SendStreamWritable { stream_id } => { - eprintln!("stream {stream_id} writable"); - } - _ => { - eprintln!("Unexpected event {event:?}"); - } - } - } - - true - } -} - -// HTTP/3 IMPLEMENTATION -#[derive(Debug)] -struct Headers { - pub h: Vec
, -} - -// dragana: this is a very stupid parser. -// headers should be in form "[(something1, something2), (something3, something4)]" -impl FromStr for Headers { - type Err = ParseError; - - fn from_str(s: &str) -> Result { - let mut res = Self { h: Vec::new() }; - let h1: Vec<&str> = s - .trim_matches(|p| p == '[' || p == ']') - .split(')') - .collect(); - - for h in h1 { - let h2: Vec<&str> = h - .trim_matches(|p| p == ',') - .trim() - .trim_matches(|p| p == '(' || p == ')') - .split(',') - .collect(); - - if h2.len() == 2 { - res.h.push(Header::new(h2[0].trim(), h2[1].trim())); - } - } - - Ok(res) - } -} - -struct H3Handler { - streams: HashSet, - h3: Http3Client, - host: String, - path: String, - token: Option, -} - -// TODO(ekr@rtfm.com): Figure out how to merge this. -fn process_loop_h3( - nctx: &NetworkCtx, - handler: &mut H3Handler, - connect: bool, - close: bool, -) -> Result { - let buf = &mut [0u8; 2048]; - let timer = Timer::new(); - - loop { - if let State::Closed(..) = handler.h3.conn().state() { - return Ok(handler.h3.conn().state().clone()); - } - - if connect { - if let Http3State::Connected = handler.h3.state() { - return Ok(handler.h3.conn().state().clone()); - } - } - - loop { - let output = handler.h3.conn().process_output(Instant::now()); - match output { - Output::Datagram(dgram) => emit_datagram(&nctx.socket, &dgram), - Output::Callback(duration) => { - let delay = min(timer.check()?, duration); - nctx.socket.set_read_timeout(Some(delay)).unwrap(); - break; - } - Output::None => { - return Ok(handler.h3.conn().state().clone()); - } - } - } - if !handler.handle(close) { - return Ok(handler.h3.conn().state().clone()); - } - - let sz = match nctx.socket.recv(&mut buf[..]) { - Ok(sz) => sz, - Err(e) => { - return Err(String::from(match e.kind() { - std::io::ErrorKind::WouldBlock => continue, - _ => "Read error", - })); - } - }; - - if sz == buf.len() { - eprintln!("Received more than {} bytes", buf.len()); - continue; - } - if sz > 0 { - let received = Datagram::new( - nctx.remote_addr, - nctx.local_addr, - IpTos::default(), - None, - &buf[..sz], - ); - handler.h3.process_input(&received, Instant::now()); - } - } -} - -// This is a bit fancier than actually needed. -impl H3Handler { - fn handle(&mut self, close: bool) -> bool { - let mut data = vec![0; 4000]; - while let Some(event) = self.h3.next_event() { - match event { - Http3ClientEvent::HeaderReady { - stream_id, - headers, - fin, - .. - } => { - if !self.streams.contains(&stream_id) { - eprintln!("Data on unexpected stream: {stream_id}"); - return false; - } - - eprintln!("READ HEADERS[{stream_id}]: fin={fin} {headers:?}"); - } - Http3ClientEvent::DataReadable { stream_id } => { - if !self.streams.contains(&stream_id) { - eprintln!("Data on unexpected stream: {stream_id}"); - return false; - } - - let (_sz, fin) = self - .h3 - .read_data(Instant::now(), stream_id, &mut data) - .expect("Read should succeed"); - if let Ok(txt) = String::from_utf8(data.clone()) { - eprintln!("READ[{stream_id}]: {txt}"); - } else { - eprintln!("READ[{}]: 0x{}", stream_id, hex(&data)); - } - if fin { - eprintln!(""); - if close { - self.h3.close(Instant::now(), 0, "kthxbye!"); - } - return false; - } - } - Http3ClientEvent::ResumptionToken(token) => { - self.token = Some(token); - } - _ => {} - } - } - - true - } -} - -struct Peer { - label: &'static str, - host: &'static str, - port: u16, -} - -impl Peer { - fn addr(&self) -> SocketAddr { - self.to_socket_addrs() - .expect("Remote address error") - .next() - .expect("No remote addresses") - } - - fn bind(&self) -> SocketAddr { - match self.addr() { - SocketAddr::V4(..) => SocketAddr::new(IpAddr::V4(Ipv4Addr::from([0; 4])), 0), - SocketAddr::V6(..) => SocketAddr::new(IpAddr::V6(Ipv6Addr::from([0; 16])), 0), - } - } -} - -impl ToSocketAddrs for Peer { - type Iter = ::std::vec::IntoIter; - fn to_socket_addrs(&self) -> ::std::io::Result { - // This is idiotic. There is no path from hostname: String to IpAddr. - // And no means of controlling name resolution either. - std::fmt::format(format_args!("{}:{}", self.host, self.port)).to_socket_addrs() - } -} - -#[allow(clippy::upper_case_acronyms)] -#[derive(Debug, PartialEq)] -enum Test { - Connect, - H9, - H3, - VN, - R, - Z, - D, -} - -impl Test { - fn alpn(&self) -> Vec { - match self { - Self::H3 | Self::R | Self::Z | Self::D => vec![String::from("h3-28")], - _ => vec![String::from("hq-28")], - } - } - - fn label(&self) -> String { - String::from(match self { - Self::Connect => "connect", - Self::H9 => "h9", - Self::H3 => "h3", - Self::VN => "vn", - Self::R => "r", - Self::Z => "z", - Self::D => "d", - }) - } - - fn letters(&self) -> Vec { - match self { - Self::Connect => vec!['H'], - Self::H9 => vec!['D', 'C'], - Self::H3 => vec!['3', 'C', 'D'], - Self::VN => vec!['V'], - Self::R => vec!['R'], - Self::Z => vec!['Z'], - Self::D => vec!['d'], - } - } -} - -struct NetworkCtx { - local_addr: SocketAddr, - remote_addr: SocketAddr, - socket: UdpSocket, -} - -fn test_connect(nctx: &NetworkCtx, test: &Test, peer: &Peer) -> Result { - let mut client = Connection::new_client( - peer.host, - &test.alpn(), - Rc::new(RefCell::new(EmptyConnectionIdGenerator::default())), - nctx.local_addr, - nctx.remote_addr, - ConnectionParameters::default(), - Instant::now(), - ) - .expect("must succeed"); - // Temporary here to help out the type inference engine - let mut h = PreConnectHandler {}; - let res = process_loop(nctx, &mut client, &mut h); - - let st = match res { - Ok(st) => st, - Err(e) => { - return Err(format!("ERROR: {e}")); - } - }; - - if st.connected() { - Ok(client) - } else { - Err(format!("{st:?}")) - } -} - -fn test_h9(nctx: &NetworkCtx, client: &mut Connection) -> Result<(), String> { - let client_stream_id = client.stream_create(StreamType::BiDi).unwrap(); - let request: String = "GET /10\r\n".to_string(); - client - .stream_send(client_stream_id, request.as_bytes()) - .unwrap(); - let mut hc = H9Handler::default(); - hc.streams.insert(client_stream_id); - let res = process_loop(nctx, client, &mut hc); - - if let Err(e) = res { - return Err(format!("ERROR: {e}")); - } - if hc.rbytes == 0 { - return Err(String::from("Empty response")); - } - if !hc.rsfin { - return Err(String::from("No FIN")); - } - Ok(()) -} - -fn connect_h3(nctx: &NetworkCtx, peer: &Peer, client: Connection) -> Result { - let mut hc = H3Handler { - streams: HashSet::new(), - h3: Http3Client::new_with_conn( - client, - Http3Parameters::default() - .max_table_size_encoder(16384) - .max_table_size_decoder(16384) - .max_blocked_streams(10) - .max_concurrent_push_streams(10), - ), - host: String::from(peer.host), - path: String::from("/"), - token: None, - }; - - if let Err(e) = process_loop_h3(nctx, &mut hc, true, false) { - return Err(format!("ERROR: {e}")); - } - Ok(hc) -} - -fn test_h3(nctx: &NetworkCtx, peer: &Peer, client: Connection, test: &Test) -> Result<(), String> { - let mut hc = connect_h3(nctx, peer, client)?; - - let client_stream_id = hc - .h3 - .fetch( - Instant::now(), - "GET", - &("https", &hc.host, &hc.path), - &[], - Priority::default(), - ) - .unwrap(); - hc.h3.stream_close_send(client_stream_id).unwrap(); - - hc.streams.insert(client_stream_id); - if let Err(e) = process_loop_h3(nctx, &mut hc, false, *test != Test::D) { - return Err(format!("ERROR: {e}")); - } - - if *test == Test::D { - // Send another request, when the first one was send we probably did not have the peer's - // qpack parameter. - let client_stream_id = hc - .h3 - .fetch( - Instant::now(), - "GET", - &("https", &hc.host, &hc.path), - &[Header::new("something1", "something2")], - Priority::default(), - ) - .unwrap(); - hc.h3.stream_close_send(client_stream_id).unwrap(); - hc.streams.insert(client_stream_id); - if let Err(e) = process_loop_h3(nctx, &mut hc, false, true) { - return Err(format!("ERROR: {e}")); - } - - if hc.h3.qpack_decoder_stats().dynamic_table_references == 0 { - return Err("ERROR: qpack decoder does not use the dynamic table.".into()); - } - if hc.h3.qpack_encoder_stats().dynamic_table_references == 0 { - return Err("ERROR: qpack encoder does not use the dynamic table.".into()); - } - } - - Ok(()) -} - -// Return true if 0RTT was negotiated. -fn test_h3_rz( - nctx: &NetworkCtx, - peer: &Peer, - client: Connection, - test: &Test, -) -> Result<(), String> { - let mut hc = connect_h3(nctx, peer, client)?; - - // Exchange some data to get http3 control streams and a resumption token. - let client_stream_id = hc - .h3 - .fetch( - Instant::now(), - "GET", - &("https", &hc.host, &hc.path), - &[], - Priority::default(), - ) - .unwrap(); - hc.h3.stream_close_send(client_stream_id).unwrap(); - - hc.streams.insert(client_stream_id); - if let Err(e) = process_loop_h3(nctx, &mut hc, false, true) { - return Err(format!("ERROR: {e}")); - } - - // get resumption ticket - let res_token = hc.token.ok_or("ERROR: no resumption token")?; - - let handler = Http3Client::new( - peer.host, - Rc::new(RefCell::new(EmptyConnectionIdGenerator::default())), - nctx.local_addr, - nctx.remote_addr, - Http3Parameters::default() - .max_table_size_encoder(16384) - .max_table_size_decoder(16384) - .max_blocked_streams(10) - .max_concurrent_push_streams(0), - Instant::now(), - ); - if handler.is_err() { - return Err(String::from("ERROR: creating a client failed")); - } - - let mut hc = H3Handler { - streams: HashSet::new(), - h3: handler.unwrap(), - host: String::from(peer.host), - path: String::from("/"), - token: None, - }; - - hc.h3.enable_resumption(Instant::now(), res_token).unwrap(); - - if *test == Test::Z { - println!("Test 0RTT"); - if Http3State::ZeroRtt != hc.h3.state() { - return Err("ERROR: zerortt not negotiated".into()); - } - - // SendH3 data during 0rtt - let client_stream_id = hc - .h3 - .fetch( - Instant::now(), - "GET", - &("https", &hc.host, &hc.path), - &[], - Priority::default(), - ) - .unwrap(); - mem::drop(hc.h3.stream_close_send(client_stream_id)); - hc.streams.insert(client_stream_id); - if let Err(e) = process_loop_h3(nctx, &mut hc, false, true) { - return Err(format!("ERROR: {e}")); - } - - let recvd_0rtt_reject = |e| e == Http3ClientEvent::ZeroRttRejected; - if hc.h3.events().any(recvd_0rtt_reject) { - return Err("ERROR: 0RTT rejected".into()); - } - } else { - println!("Test resumption"); - if let Err(e) = process_loop_h3(nctx, &mut hc, true, true) { - return Err(format!("ERROR: {e}")); - } - } - - if !hc.h3.conn().stats().resumed { - return Err("ERROR: resumption failed".into()); - } - Ok(()) -} - -struct VnHandler {} - -impl Handler for VnHandler { - fn handle(&mut self, client: &mut Connection) -> bool { - !matches!(client.state(), State::Connected | State::Closing { .. }) - } - - fn rewrite_out(&mut self, d: &Datagram) -> Option { - let mut payload = d[..].to_vec(); - payload[1] = 0x1a; - Some(Datagram::new( - d.source(), - d.destination(), - d.tos(), - d.ttl(), - payload, - )) - } -} - -fn test_vn(nctx: &NetworkCtx, peer: &Peer) -> Connection { - let mut client = Connection::new_client( - peer.host, - &["hq-28"], - Rc::new(RefCell::new(EmptyConnectionIdGenerator::default())), - nctx.local_addr, - nctx.remote_addr, - ConnectionParameters::default(), - Instant::now(), - ) - .expect("must succeed"); - // Temporary here to help out the type inference engine - let mut h = VnHandler {}; - let _res = process_loop(nctx, &mut client, &mut h); - client -} - -fn run_test<'t>(peer: &Peer, test: &'t Test) -> (&'t Test, String) { - let socket = UdpSocket::bind(peer.bind()).expect("Unable to bind UDP socket"); - socket.connect(peer).expect("Unable to connect UDP socket"); - - let local_addr = socket.local_addr().expect("Socket local address not bound"); - let remote_addr = peer.addr(); - - let nctx = NetworkCtx { - local_addr, - remote_addr, - socket, - }; - - if let Test::VN = test { - let client = test_vn(&nctx, peer); - return match client.state() { - State::Closed(ConnectionError::Transport(Error::VersionNegotiation)) => { - (test, String::from("OK")) - } - _ => (test, format!("ERROR: Wrong state {:?}", client.state())), - }; - } - - let mut client = match test_connect(&nctx, test, peer) { - Ok(client) => client, - Err(e) => return (test, e), - }; - - let res = match test { - Test::Connect => { - return (test, String::from("OK")); - } - Test::H9 => test_h9(&nctx, &mut client), - Test::H3 | Test::D => test_h3(&nctx, peer, client, test), - Test::VN => unimplemented!(), - Test::R | Test::Z => test_h3_rz(&nctx, peer, client, test), - }; - - if let Err(e) = res { - return (test, e); - } - - (test, String::from("OK")) -} - -fn run_peer(args: &Args, peer: &'static Peer) -> Vec<(&'static Test, String)> { - let mut results: Vec<(&'static Test, String)> = Vec::new(); - - eprintln!("Running tests for {}", peer.label); - - let mut children = Vec::new(); - - for test in &TESTS { - if !args.include_tests.is_empty() && !args.include_tests.contains(&test.label()) { - continue; - } - if args.exclude_tests.contains(&test.label()) { - continue; - } - - let child = thread::spawn(move || run_test(peer, test)); - children.push((test, child)); - } - - for child in children { - if let Ok(e) = child.1.join() { - eprintln!("Test complete {:?}, {:?}", child.0, e); - results.push(e); - } else { - eprintln!("Thread crashed {:?}", child.0); - results.push((child.0, String::from("CRASHED"))); - } - } - - eprintln!("Tests for {} complete {:?}", peer.label, results); - results -} - -const PEERS: &[Peer] = &[ - Peer { - label: "quiche", - host: "quic.tech", - port: 4433, - }, - Peer { - label: "quiche2", - host: "quic.tech", - port: 8443, - }, - Peer { - label: "quiche3", - host: "quic.tech", - port: 8444, - }, - Peer { - label: "quant", - host: "quant.eggert.org", - port: 4433, - }, - Peer { - label: "quicly", - host: "quic.examp1e.net", - port: 443, - }, - Peer { - label: "quicly2", - host: "quic.examp1e.net", - port: 4433, - }, - Peer { - label: "local", - host: "127.0.0.1", - port: 4433, - }, - Peer { - label: "applequic", - host: "[2a00:79e1:abc:301:fca8:166e:525f:9b5c]", - port: 4433, - }, - Peer { - label: "f5", - host: "f5quic.com", - port: 4433, - }, - Peer { - label: "msft", - host: "quic.westus.cloudapp.azure.com", - port: 443, - }, - Peer { - label: "mvfst", - host: "fb.mvfst.net", - port: 443, - }, - Peer { - label: "google", - host: "quic.rocks", - port: 4433, - }, - Peer { - label: "ngtcp2", - host: "nghttp2.org", - port: 4433, - }, - Peer { - label: "picoquic", - host: "test.privateoctopus.com", - port: 4433, - }, - Peer { - label: "ats", - host: "quic.ogre.com", - port: 4433, - }, - Peer { - label: "cloudflare", - host: "www.cloudflare.com", - port: 443, - }, - Peer { - label: "litespeed", - host: "http3-test.litespeedtech.com", - port: 4433, - }, -]; - -const TESTS: [Test; 7] = [ - Test::Connect, - Test::H9, - Test::H3, - Test::VN, - Test::R, - Test::Z, - Test::D, -]; - -fn main() { - let args = Args::parse(); - init(); - Timer::set_timeout(Duration::from_secs(args.timeout)); - - let mut children = Vec::new(); - - // Start all the children. - for peer in PEERS { - if !args.include.is_empty() && !args.include.contains(&String::from(peer.label)) { - continue; - } - if args.exclude.contains(&String::from(peer.label)) { - continue; - } - - let at = args.clone(); - let child = thread::spawn(move || run_peer(&at, peer)); - children.push((peer, child)); - } - - // Now wait for them. - for child in children { - let res = child.1.join().unwrap(); - let mut all_letters = HashSet::new(); - for r in &res { - for l in r.0.letters() { - if r.1 == "OK" { - all_letters.insert(l); - } - } - } - let mut letter_str = String::new(); - for l in &['V', 'H', 'D', 'C', 'R', 'Z', 'S', '3'] { - if all_letters.contains(l) { - letter_str.push(*l); - } - } - println!("{}: {} -> {:?}", child.0.label, letter_str, res); - } -} From 9b6215364d7c2f91d328b9b81acb68b29218cd78 Mon Sep 17 00:00:00 2001 From: Max Inden Date: Wed, 21 Feb 2024 23:17:12 +0100 Subject: [PATCH 180/321] feat: update to Rust 2021 edition (#1671) * feat: update to Rust 2021 edition See transition docs: https://doc.rust-lang.org/edition-guide/editions/transitioning-an-existing-project-to-a-new-edition.html The described `cargo fix --edition` does not yield any suggestions. * fix: clippy --------- Co-authored-by: Martin Thomson --- Cargo.toml | 2 +- neqo-client/src/main.rs | 4 ++-- neqo-crypto/build.rs | 2 +- neqo-http3/src/connection_client.rs | 8 ++++---- neqo-transport/src/addr_valid.rs | 4 ++-- neqo-transport/src/cid.rs | 3 +-- neqo-transport/src/send_stream.rs | 4 ++-- 7 files changed, 13 insertions(+), 14 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 46de261a41..213f03e3fd 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -15,7 +15,7 @@ homepage = "https://github.com/mozilla/neqo/" repository = "https://github.com/mozilla/neqo/" authors = ["The Neqo Authors "] version = "0.7.1" -edition = "2018" +edition = "2021" license = "MIT OR Apache-2.0" # Don't increase beyond what Firefox is currently using: # https://firefox-source-docs.mozilla.org/writing-rust-code/update-policy.html#schedule diff --git a/neqo-client/src/main.rs b/neqo-client/src/main.rs index f94ff16837..73a3f146a3 100644 --- a/neqo-client/src/main.rs +++ b/neqo-client/src/main.rs @@ -664,7 +664,7 @@ impl<'a> URLHandler<'a> { false } Err(e) => { - panic!("Can't create stream {}", e); + panic!("Can't create stream {e}"); } } } @@ -1190,7 +1190,7 @@ mod old { false } Err(e) => { - panic!("Error creating stream {:?}", e); + panic!("Error creating stream {e:?}"); } } } diff --git a/neqo-crypto/build.rs b/neqo-crypto/build.rs index 57981416ef..e5e8595e82 100644 --- a/neqo-crypto/build.rs +++ b/neqo-crypto/build.rs @@ -125,7 +125,7 @@ fn nss_dir() -> PathBuf { } dir }; - assert!(dir.is_dir(), "NSS_DIR {:?} doesn't exist", dir); + assert!(dir.is_dir(), "NSS_DIR {dir:?} doesn't exist"); // Note that this returns a relative path because UNC // paths on windows cause certain tools to explode. dir diff --git a/neqo-http3/src/connection_client.rs b/neqo-http3/src/connection_client.rs index 4afe29f22b..b4557065cc 100644 --- a/neqo-http3/src/connection_client.rs +++ b/neqo-http3/src/connection_client.rs @@ -3946,7 +3946,7 @@ mod tests { ); } x => { - panic!("event {:?}", x); + panic!("event {x:?}"); } } @@ -3992,7 +3992,7 @@ mod tests { assert!(fin); } x => { - panic!("event {:?}", x); + panic!("event {x:?}"); } } // Stream should now be closed and gone @@ -4065,7 +4065,7 @@ mod tests { assert_eq!(stream_id, request_stream_id); } x => { - panic!("event {:?}", x); + panic!("event {x:?}"); } } } @@ -4129,7 +4129,7 @@ mod tests { assert!(!interim); recv_header = true; } else { - panic!("event {:?}", e); + panic!("event {e:?}"); } } assert!(recv_header); diff --git a/neqo-transport/src/addr_valid.rs b/neqo-transport/src/addr_valid.rs index d1181be825..aed796192e 100644 --- a/neqo-transport/src/addr_valid.rs +++ b/neqo-transport/src/addr_valid.rs @@ -230,7 +230,7 @@ impl AddressValidation { qinfo!("AddressValidation: valid Retry token for {}", cid); AddressValidationResult::ValidRetry(cid) } else { - panic!("AddressValidation: Retry token with small CID {}", cid); + panic!("AddressValidation: Retry token with small CID {cid}"); } } else if cid.is_empty() { // An empty connection ID means NEW_TOKEN. @@ -242,7 +242,7 @@ impl AddressValidation { AddressValidationResult::Pass } } else { - panic!("AddressValidation: NEW_TOKEN token with CID {}", cid); + panic!("AddressValidation: NEW_TOKEN token with CID {cid}"); } } else { // From here on, we have a token that we couldn't decrypt. diff --git a/neqo-transport/src/cid.rs b/neqo-transport/src/cid.rs index e876eba16d..615726160a 100644 --- a/neqo-transport/src/cid.rs +++ b/neqo-transport/src/cid.rs @@ -603,8 +603,7 @@ mod tests { let cid = ConnectionId::generate_initial(); assert!( matches!(cid.len(), 8..=MAX_CONNECTION_ID_LEN), - "connection ID length {:?}", - cid, + "connection ID length {cid:?}", ); } } diff --git a/neqo-transport/src/send_stream.rs b/neqo-transport/src/send_stream.rs index cfc1026160..afbcca7128 100644 --- a/neqo-transport/src/send_stream.rs +++ b/neqo-transport/src/send_stream.rs @@ -1422,7 +1422,7 @@ impl OrderGroup { pub fn insert(&mut self, stream_id: StreamId) { let Err(pos) = self.vec.binary_search(&stream_id) else { // element already in vector @ `pos` - panic!("Duplicate stream_id {}", stream_id); + panic!("Duplicate stream_id {stream_id}"); }; self.vec.insert(pos, stream_id); } @@ -1430,7 +1430,7 @@ impl OrderGroup { pub fn remove(&mut self, stream_id: StreamId) { let Ok(pos) = self.vec.binary_search(&stream_id) else { // element already in vector @ `pos` - panic!("Missing stream_id {}", stream_id); + panic!("Missing stream_id {stream_id}"); }; self.vec.remove(pos); } From 649994e1af4eca259575f6474d1a86d8627a38e8 Mon Sep 17 00:00:00 2001 From: Martin Thomson Date: Thu, 22 Feb 2024 12:05:15 +1100 Subject: [PATCH 181/321] Extra clippy (#1664) * Add some more doc-stuff * Get rid of redundant imports No more use of `use super::*` in tests. Avoid importing things that are in the rust prelude. --- neqo-client/src/main.rs | 3 +- neqo-common/src/event.rs | 2 +- neqo-common/src/tos.rs | 2 +- neqo-crypto/src/agentio.rs | 1 - neqo-crypto/src/time.rs | 1 - neqo-http3/src/qlog.rs | 5 +- neqo-transport/src/cid.rs | 4 +- neqo-transport/src/connection/tests/close.rs | 2 +- .../src/connection/tests/handshake.rs | 4 +- neqo-transport/src/connection/tests/idle.rs | 2 +- neqo-transport/src/connection/tests/keys.rs | 2 +- .../src/connection/tests/migration.rs | 1 - neqo-transport/src/connection/tests/mod.rs | 2 +- .../src/connection/tests/priority.rs | 2 +- .../src/connection/tests/resumption.rs | 2 +- neqo-transport/src/connection/tests/vn.rs | 2 +- .../src/connection/tests/zerortt.rs | 2 +- neqo-transport/src/events.rs | 5 +- neqo-transport/src/frame.rs | 6 +- neqo-transport/src/packet/mod.rs | 9 ++- neqo-transport/src/qlog.rs | 1 - neqo-transport/src/recv_stream.rs | 37 ++++++++++-- neqo-transport/src/send_stream.rs | 57 +++++++++++++++++-- neqo-transport/src/tparams.rs | 19 ++++++- neqo-transport/tests/common/mod.rs | 2 +- neqo-transport/tests/conn_vectors.rs | 2 +- neqo-transport/tests/connection.rs | 2 +- neqo-transport/tests/retry.rs | 2 +- 28 files changed, 134 insertions(+), 47 deletions(-) diff --git a/neqo-client/src/main.rs b/neqo-client/src/main.rs index 73a3f146a3..3aede9e545 100644 --- a/neqo-client/src/main.rs +++ b/neqo-client/src/main.rs @@ -35,8 +35,7 @@ use neqo_crypto::{ init, AuthenticationStatus, Cipher, ResumptionToken, }; use neqo_http3::{ - self, Error, Header, Http3Client, Http3ClientEvent, Http3Parameters, Http3State, Output, - Priority, + Error, Header, Http3Client, Http3ClientEvent, Http3Parameters, Http3State, Output, Priority, }; use neqo_transport::{ CongestionControlAlgorithm, Connection, ConnectionId, ConnectionParameters, diff --git a/neqo-common/src/event.rs b/neqo-common/src/event.rs index 26052b7571..ea8d491822 100644 --- a/neqo-common/src/event.rs +++ b/neqo-common/src/event.rs @@ -4,7 +4,7 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use std::{iter::Iterator, marker::PhantomData}; +use std::marker::PhantomData; /// An event provider is able to generate a stream of events. pub trait Provider { diff --git a/neqo-common/src/tos.rs b/neqo-common/src/tos.rs index aa360d1d53..80e073a1e4 100644 --- a/neqo-common/src/tos.rs +++ b/neqo-common/src/tos.rs @@ -202,7 +202,7 @@ impl Default for IpTos { #[cfg(test)] mod tests { - use super::*; + use crate::{IpTos, IpTosDscp, IpTosEcn}; #[test] fn iptosecn_into_u8() { diff --git a/neqo-crypto/src/agentio.rs b/neqo-crypto/src/agentio.rs index 1b0cf11ba7..0477796f96 100644 --- a/neqo-crypto/src/agentio.rs +++ b/neqo-crypto/src/agentio.rs @@ -12,7 +12,6 @@ use std::{ os::raw::{c_uint, c_void}, pin::Pin, ptr::{null, null_mut}, - vec::Vec, }; use neqo_common::{hex, hex_with_len, qtrace}; diff --git a/neqo-crypto/src/time.rs b/neqo-crypto/src/time.rs index 2159dd7804..db0573d126 100644 --- a/neqo-crypto/src/time.rs +++ b/neqo-crypto/src/time.rs @@ -7,7 +7,6 @@ #![allow(clippy::upper_case_acronyms)] use std::{ - boxed::Box, convert::{TryFrom, TryInto}, ops::Deref, os::raw::c_void, diff --git a/neqo-http3/src/qlog.rs b/neqo-http3/src/qlog.rs index c3a13fd19f..c304d4584f 100644 --- a/neqo-http3/src/qlog.rs +++ b/neqo-http3/src/qlog.rs @@ -10,10 +10,7 @@ use std::convert::TryFrom; use neqo_common::qlog::NeqoQlog; use neqo_transport::StreamId; -use qlog::{ - self, - events::{DataRecipient, EventData}, -}; +use qlog::events::{DataRecipient, EventData}; pub fn h3_data_moved_up(qlog: &mut NeqoQlog, stream_id: StreamId, amount: usize) { qlog.add_event_data(|| { diff --git a/neqo-transport/src/cid.rs b/neqo-transport/src/cid.rs index 615726160a..dfc3354159 100644 --- a/neqo-transport/src/cid.rs +++ b/neqo-transport/src/cid.rs @@ -10,7 +10,7 @@ use std::{ borrow::Borrow, cell::{Ref, RefCell}, cmp::{max, min}, - convert::{AsRef, TryFrom}, + convert::TryFrom, ops::Deref, rc::Rc, }; @@ -594,7 +594,7 @@ impl ConnectionIdManager { mod tests { use test_fixture::fixture_init; - use super::*; + use crate::{cid::MAX_CONNECTION_ID_LEN, ConnectionId}; #[test] fn generate_initial_cid() { diff --git a/neqo-transport/src/connection/tests/close.rs b/neqo-transport/src/connection/tests/close.rs index f45e77e549..5351dd0d5c 100644 --- a/neqo-transport/src/connection/tests/close.rs +++ b/neqo-transport/src/connection/tests/close.rs @@ -6,7 +6,7 @@ use std::time::Duration; -use test_fixture::{self, datagram, now}; +use test_fixture::{datagram, now}; use super::{ super::{Connection, Output, State}, diff --git a/neqo-transport/src/connection/tests/handshake.rs b/neqo-transport/src/connection/tests/handshake.rs index d08b6590f3..375118be70 100644 --- a/neqo-transport/src/connection/tests/handshake.rs +++ b/neqo-transport/src/connection/tests/handshake.rs @@ -18,8 +18,8 @@ use neqo_crypto::{ constants::TLS_CHACHA20_POLY1305_SHA256, generate_ech_keys, AuthenticationStatus, }; use test_fixture::{ - self, assertions, assertions::assert_coalesced_0rtt, datagram, fixture_init, now, - split_datagram, DEFAULT_ADDR, + assertions, assertions::assert_coalesced_0rtt, datagram, fixture_init, now, split_datagram, + DEFAULT_ADDR, }; use super::{ diff --git a/neqo-transport/src/connection/tests/idle.rs b/neqo-transport/src/connection/tests/idle.rs index 641802249b..5d01131541 100644 --- a/neqo-transport/src/connection/tests/idle.rs +++ b/neqo-transport/src/connection/tests/idle.rs @@ -10,7 +10,7 @@ use std::{ }; use neqo_common::{qtrace, Encoder}; -use test_fixture::{self, now, split_datagram}; +use test_fixture::{now, split_datagram}; use super::{ super::{Connection, ConnectionParameters, IdleTimeout, Output, State}, diff --git a/neqo-transport/src/connection/tests/keys.rs b/neqo-transport/src/connection/tests/keys.rs index c247bba670..847b253284 100644 --- a/neqo-transport/src/connection/tests/keys.rs +++ b/neqo-transport/src/connection/tests/keys.rs @@ -7,7 +7,7 @@ use std::mem; use neqo_common::{qdebug, Datagram}; -use test_fixture::{self, now}; +use test_fixture::now; use super::{ super::{ diff --git a/neqo-transport/src/connection/tests/migration.rs b/neqo-transport/src/connection/tests/migration.rs index 7a47ec4156..09a25faa28 100644 --- a/neqo-transport/src/connection/tests/migration.rs +++ b/neqo-transport/src/connection/tests/migration.rs @@ -13,7 +13,6 @@ use std::{ use neqo_common::{Datagram, Decoder}; use test_fixture::{ - self, assertions::{assert_v4_path, assert_v6_path}, fixture_init, new_neqo_qlog, now, DEFAULT_ADDR, DEFAULT_ADDR_V4, }; diff --git a/neqo-transport/src/connection/tests/mod.rs b/neqo-transport/src/connection/tests/mod.rs index afe01affbf..a996702947 100644 --- a/neqo-transport/src/connection/tests/mod.rs +++ b/neqo-transport/src/connection/tests/mod.rs @@ -18,7 +18,7 @@ use std::{ use enum_map::enum_map; use neqo_common::{event::Provider, qdebug, qtrace, Datagram, Decoder, Role}; use neqo_crypto::{random, AllowZeroRtt, AuthenticationStatus, ResumptionToken}; -use test_fixture::{self, fixture_init, new_neqo_qlog, now, DEFAULT_ADDR}; +use test_fixture::{fixture_init, new_neqo_qlog, now, DEFAULT_ADDR}; use super::{Connection, ConnectionError, ConnectionId, Output, State}; use crate::{ diff --git a/neqo-transport/src/connection/tests/priority.rs b/neqo-transport/src/connection/tests/priority.rs index b7cc9a0af2..079ba93b9f 100644 --- a/neqo-transport/src/connection/tests/priority.rs +++ b/neqo-transport/src/connection/tests/priority.rs @@ -7,7 +7,7 @@ use std::{cell::RefCell, mem, rc::Rc}; use neqo_common::event::Provider; -use test_fixture::{self, now}; +use test_fixture::now; use super::{ super::{Connection, Error, Output}, diff --git a/neqo-transport/src/connection/tests/resumption.rs b/neqo-transport/src/connection/tests/resumption.rs index c9187226d3..7410e76ef8 100644 --- a/neqo-transport/src/connection/tests/resumption.rs +++ b/neqo-transport/src/connection/tests/resumption.rs @@ -6,7 +6,7 @@ use std::{cell::RefCell, mem, rc::Rc, time::Duration}; -use test_fixture::{self, assertions, now}; +use test_fixture::{assertions, now}; use super::{ connect, connect_with_rtt, default_client, default_server, exchange_ticket, get_tokens, diff --git a/neqo-transport/src/connection/tests/vn.rs b/neqo-transport/src/connection/tests/vn.rs index 22f15c991c..93872a94f4 100644 --- a/neqo-transport/src/connection/tests/vn.rs +++ b/neqo-transport/src/connection/tests/vn.rs @@ -7,7 +7,7 @@ use std::{mem, time::Duration}; use neqo_common::{event::Provider, Decoder, Encoder}; -use test_fixture::{self, assertions, datagram, now}; +use test_fixture::{assertions, datagram, now}; use super::{ super::{ConnectionError, ConnectionEvent, Output, State, ZeroRttState}, diff --git a/neqo-transport/src/connection/tests/zerortt.rs b/neqo-transport/src/connection/tests/zerortt.rs index 0aa5573c98..b5e5f0d758 100644 --- a/neqo-transport/src/connection/tests/zerortt.rs +++ b/neqo-transport/src/connection/tests/zerortt.rs @@ -8,7 +8,7 @@ use std::{cell::RefCell, rc::Rc}; use neqo_common::event::Provider; use neqo_crypto::{AllowZeroRtt, AntiReplay}; -use test_fixture::{self, assertions, now}; +use test_fixture::{assertions, now}; use super::{ super::Connection, connect, default_client, default_server, exchange_ticket, new_server, diff --git a/neqo-transport/src/events.rs b/neqo-transport/src/events.rs index 88a85250ee..8331f2ba3c 100644 --- a/neqo-transport/src/events.rs +++ b/neqo-transport/src/events.rs @@ -254,8 +254,9 @@ impl EventProvider for ConnectionEvents { #[cfg(test)] mod tests { - use super::*; - use crate::{ConnectionError, Error}; + use neqo_common::event::Provider; + + use crate::{ConnectionError, ConnectionEvent, ConnectionEvents, Error, State, StreamId}; #[test] fn event_culling() { diff --git a/neqo-transport/src/frame.rs b/neqo-transport/src/frame.rs index a3af801925..4e58ad5ab7 100644 --- a/neqo-transport/src/frame.rs +++ b/neqo-transport/src/frame.rs @@ -617,7 +617,11 @@ impl<'a> Frame<'a> { mod tests { use neqo_common::{Decoder, Encoder}; - use super::*; + use crate::{ + cid::MAX_CONNECTION_ID_LEN, + frame::{AckRange, Frame, FRAME_TYPE_ACK}, + CloseError, Error, StreamId, StreamType, + }; fn just_dec(f: &Frame, s: &str) { let encoded = Encoder::from_hex(s); diff --git a/neqo-transport/src/packet/mod.rs b/neqo-transport/src/packet/mod.rs index 71ce37501f..a6e60fe4de 100644 --- a/neqo-transport/src/packet/mod.rs +++ b/neqo-transport/src/packet/mod.rs @@ -9,7 +9,6 @@ use std::{ cmp::min, convert::TryFrom, fmt, - iter::ExactSizeIterator, ops::{Deref, DerefMut, Range}, time::Instant, }; @@ -873,10 +872,14 @@ mod tests { use neqo_common::Encoder; use test_fixture::{fixture_init, now}; - use super::*; use crate::{ + cid::MAX_CONNECTION_ID_LEN, crypto::{CryptoDxState, CryptoStates}, - EmptyConnectionIdGenerator, RandomConnectionIdGenerator, Version, + packet::{ + PacketBuilder, PacketType, PublicPacket, PACKET_BIT_FIXED_QUIC, PACKET_BIT_LONG, + PACKET_BIT_SPIN, + }, + ConnectionId, EmptyConnectionIdGenerator, RandomConnectionIdGenerator, Version, }; const CLIENT_CID: &[u8] = &[0x83, 0x94, 0xc8, 0xf0, 0x3e, 0x51, 0x57, 0x08]; diff --git a/neqo-transport/src/qlog.rs b/neqo-transport/src/qlog.rs index f6d3f4e1e2..949b9c452b 100644 --- a/neqo-transport/src/qlog.rs +++ b/neqo-transport/src/qlog.rs @@ -9,7 +9,6 @@ use std::{ convert::TryFrom, ops::{Deref, RangeInclusive}, - string::String, time::Duration, }; diff --git a/neqo-transport/src/recv_stream.rs b/neqo-transport/src/recv_stream.rs index d68da02689..f6063dd80d 100644 --- a/neqo-transport/src/recv_stream.rs +++ b/neqo-transport/src/recv_stream.rs @@ -131,6 +131,7 @@ pub struct RxStreamOrderer { } impl RxStreamOrderer { + #[must_use] pub fn new() -> Self { Self::default() } @@ -138,6 +139,9 @@ impl RxStreamOrderer { /// Process an incoming stream frame off the wire. This may result in data /// being available to upper layers if frame is not out of order (ooo) or /// if the frame fills a gap. + /// # Panics + /// Only when `u64` values cannot be converted to `usize`, which only + /// happens on 32-bit machines that hold far too much data at the same time. pub fn inbound_frame(&mut self, mut new_start: u64, mut new_data: &[u8]) { qtrace!("Inbound data offset={} len={}", new_start, new_data.len()); @@ -276,6 +280,7 @@ impl RxStreamOrderer { } /// Are any bytes readable? + #[must_use] pub fn data_ready(&self) -> bool { self.data_ranges .keys() @@ -309,10 +314,12 @@ impl RxStreamOrderer { } /// Bytes read by the application. + #[must_use] pub fn retired(&self) -> u64 { self.retired } + #[must_use] pub fn received(&self) -> u64 { self.received } @@ -591,6 +598,7 @@ impl RecvStream { self.state = new_state; } + #[must_use] pub fn stats(&self) -> RecvStreamStats { match &self.state { RecvStreamState::Recv { recv_buf, .. } @@ -625,6 +633,11 @@ impl RecvStream { } } + /// # Errors + /// When the incoming data violates flow control limits. + /// # Panics + /// Only when `u64` values are so big that they can't fit in a `usize`, which + /// only happens on a 32-bit machine that has far too much unread data. pub fn inbound_stream_frame(&mut self, fin: bool, offset: u64, data: &[u8]) -> Res<()> { // We should post a DataReadable event only once when we change from no-data-ready to // data-ready. Therefore remember the state before processing a new frame. @@ -694,6 +707,8 @@ impl RecvStream { Ok(()) } + /// # Errors + /// When the reset occurs at an invalid point. pub fn reset(&mut self, application_error_code: AppError, final_size: u64) -> Res<()> { self.state.flow_control_consume_data(final_size, true)?; match &mut self.state { @@ -776,6 +791,7 @@ impl RecvStream { } } + #[must_use] pub fn is_terminal(&self) -> bool { matches!( self.state, @@ -795,8 +811,8 @@ impl RecvStream { } /// # Errors - /// /// `NoMoreData` if data and fin bit were previously read by the application. + #[allow(clippy::missing_panics_doc)] // with a >16 exabyte packet on a 128-bit machine, maybe pub fn read(&mut self, buf: &mut [u8]) -> Res<(usize, bool)> { let data_recvd_state = matches!(self.state, RecvStreamState::DataRecvd { .. }); match &mut self.state { @@ -970,6 +986,7 @@ impl RecvStream { } #[cfg(test)] + #[must_use] pub fn has_frames_to_write(&self) -> bool { if let RecvStreamState::Recv { fc, .. } = &self.state { fc.frame_needed() @@ -979,6 +996,7 @@ impl RecvStream { } #[cfg(test)] + #[must_use] pub fn fc(&self) -> Option<&ReceiverFlowControl> { match &self.state { RecvStreamState::Recv { fc, .. } @@ -993,11 +1011,18 @@ impl RecvStream { #[cfg(test)] mod tests { - use std::ops::Range; - - use neqo_common::Encoder; - - use super::*; + use std::{cell::RefCell, convert::TryFrom, ops::Range, rc::Rc}; + + use neqo_common::{qtrace, Encoder}; + + use super::RecvStream; + use crate::{ + fc::ReceiverFlowControl, + packet::PacketBuilder, + recv_stream::{RxStreamOrderer, RX_STREAM_DATA_WINDOW}, + stats::FrameStats, + ConnectionEvents, Error, StreamId, RECV_BUFFER_SIZE, + }; const SESSION_WINDOW: usize = 1024; diff --git a/neqo-transport/src/send_stream.rs b/neqo-transport/src/send_stream.rs index afbcca7128..3d65a7d6c3 100644 --- a/neqo-transport/src/send_stream.rs +++ b/neqo-transport/src/send_stream.rs @@ -224,6 +224,7 @@ impl RangeTracker { /// The only tricky parts are making sure that we maintain `self.acked`, /// which is the first acknowledged range. And making sure that we don't create /// ranges of the same type that are adjacent; these need to be merged. + #[allow(clippy::missing_panics_doc)] // with a >16 exabyte packet on a 128-bit machine, maybe pub fn mark_acked(&mut self, new_off: u64, new_len: usize) { let end = new_off + u64::try_from(new_len).unwrap(); let new_off = max(self.acked, new_off); @@ -321,6 +322,7 @@ impl RangeTracker { /// + SS /// = SSSSSS /// ``` + #[allow(clippy::missing_panics_doc)] // not possible pub fn mark_sent(&mut self, mut new_off: u64, new_len: usize) { let new_end = new_off + u64::try_from(new_len).unwrap(); new_off = max(self.acked, new_off); @@ -474,6 +476,9 @@ impl RangeTracker { } /// Unmark all sent ranges. + /// # Panics + /// On 32-bit machines where far too much is sent before calling this. + /// Note that this should not be called for handshakes, which should never exceed that limit. pub fn unmark_sent(&mut self) { self.unmark_range(0, usize::try_from(self.highest_offset()).unwrap()); } @@ -487,6 +492,7 @@ pub struct TxBuffer { } impl TxBuffer { + #[must_use] pub fn new() -> Self { Self::default() } @@ -496,11 +502,12 @@ impl TxBuffer { let can_buffer = min(SEND_BUFFER_SIZE - self.buffered(), buf.len()); if can_buffer > 0 { self.send_buf.extend(&buf[..can_buffer]); - assert!(self.send_buf.len() <= SEND_BUFFER_SIZE); + debug_assert!(self.send_buf.len() <= SEND_BUFFER_SIZE); } can_buffer } + #[allow(clippy::missing_panics_doc)] // These are not possible. pub fn next_bytes(&mut self) -> Option<(u64, &[u8])> { let (start, maybe_len) = self.ranges.first_unmarked_range(); @@ -537,6 +544,7 @@ impl TxBuffer { self.ranges.mark_sent(offset, len); } + #[allow(clippy::missing_panics_doc)] // Not possible here. pub fn mark_as_acked(&mut self, offset: u64, len: usize) { let prev_retired = self.retired(); self.ranges.mark_acked(offset, len); @@ -560,6 +568,7 @@ impl TxBuffer { self.ranges.unmark_sent(); } + #[must_use] pub fn retired(&self) -> u64 { self.ranges.acked_from_zero() } @@ -788,6 +797,7 @@ impl SendStream { self.fair = make_fair; } + #[must_use] pub fn is_fair(&self) -> bool { self.fair } @@ -801,6 +811,7 @@ impl SendStream { self.retransmission_priority = retransmission; } + #[must_use] pub fn sendorder(&self) -> Option { self.sendorder } @@ -810,6 +821,7 @@ impl SendStream { } /// If all data has been buffered or written, how much was sent. + #[must_use] pub fn final_size(&self) -> Option { match &self.state { SendStreamState::DataSent { send_buf, .. } => Some(send_buf.used()), @@ -818,10 +830,13 @@ impl SendStream { } } + #[must_use] pub fn stats(&self) -> SendStreamStats { SendStreamStats::new(self.bytes_written(), self.bytes_sent, self.bytes_acked()) } + #[must_use] + #[allow(clippy::missing_panics_doc)] // not possible pub fn bytes_written(&self) -> u64 { match &self.state { SendStreamState::Send { send_buf, .. } | SendStreamState::DataSent { send_buf, .. } => { @@ -844,6 +859,7 @@ impl SendStream { } } + #[must_use] pub fn bytes_acked(&self) -> u64 { match &self.state { SendStreamState::Send { send_buf, .. } | SendStreamState::DataSent { send_buf, .. } => { @@ -933,6 +949,7 @@ impl SendStream { } /// Maybe write a `STREAM` frame. + #[allow(clippy::missing_panics_doc)] // not possible pub fn write_stream_frame( &mut self, priority: TransmissionPriority, @@ -1095,6 +1112,7 @@ impl SendStream { } } + #[allow(clippy::missing_panics_doc)] // not possible pub fn mark_as_sent(&mut self, offset: u64, len: usize, fin: bool) { self.bytes_sent = max(self.bytes_sent, offset + u64::try_from(len).unwrap()); @@ -1110,6 +1128,7 @@ impl SendStream { } } + #[allow(clippy::missing_panics_doc)] // not possible pub fn mark_as_acked(&mut self, offset: u64, len: usize, fin: bool) { match self.state { SendStreamState::Send { @@ -1147,6 +1166,7 @@ impl SendStream { } } + #[allow(clippy::missing_panics_doc)] // not possible pub fn mark_as_lost(&mut self, offset: u64, len: usize, fin: bool) { self.retransmission_offset = max( self.retransmission_offset, @@ -1175,6 +1195,7 @@ impl SendStream { /// Bytes sendable on stream. Constrained by stream credit available, /// connection credit available, and space in the tx buffer. + #[must_use] pub fn avail(&self) -> usize { if let SendStreamState::Ready { fc, conn_fc } | SendStreamState::Send { fc, conn_fc, .. } = &self.state @@ -1200,6 +1221,7 @@ impl SendStream { } } + #[must_use] pub fn is_terminal(&self) -> bool { matches!( self.state, @@ -1207,10 +1229,14 @@ impl SendStream { ) } + /// # Errors + /// When `buf` is empty or when the stream is already closed. pub fn send(&mut self, buf: &[u8]) -> Res { self.send_internal(buf, false) } + /// # Errors + /// When `buf` is empty or when the stream is already closed. pub fn send_atomic(&mut self, buf: &[u8]) -> Res { self.send_internal(buf, true) } @@ -1302,6 +1328,7 @@ impl SendStream { } } + #[allow(clippy::missing_panics_doc)] // not possible pub fn reset(&mut self, err: AppError) { match &self.state { SendStreamState::Ready { fc, .. } => { @@ -1396,6 +1423,7 @@ impl OrderGroup { } } + #[must_use] pub fn stream_ids(&self) -> &Vec { &self.vec } @@ -1419,6 +1447,8 @@ impl OrderGroup { next } + /// # Panics + /// If the stream ID is already present. pub fn insert(&mut self, stream_id: StreamId) { let Err(pos) = self.vec.binary_search(&stream_id) else { // element already in vector @ `pos` @@ -1427,6 +1457,8 @@ impl OrderGroup { self.vec.insert(pos, stream_id); } + /// # Panics + /// If the stream ID is not present. pub fn remove(&mut self, stream_id: StreamId) { let Ok(pos) = self.vec.binary_search(&stream_id) else { // element already in vector @ `pos` @@ -1734,10 +1766,23 @@ pub struct SendStreamRecoveryToken { #[cfg(test)] mod tests { - use neqo_common::{event::Provider, hex_with_len, qtrace}; - - use super::*; - use crate::events::ConnectionEvent; + use std::{cell::RefCell, collections::VecDeque, convert::TryFrom, rc::Rc}; + + use neqo_common::{event::Provider, hex_with_len, qtrace, Encoder}; + + use super::SendStreamRecoveryToken; + use crate::{ + connection::{RetransmissionPriority, TransmissionPriority}, + events::ConnectionEvent, + fc::SenderFlowControl, + packet::PacketBuilder, + recovery::{RecoveryToken, StreamRecoveryToken}, + send_stream::{ + RangeState, RangeTracker, SendStream, SendStreamState, SendStreams, TxBuffer, + }, + stats::FrameStats, + ConnectionEvents, StreamId, SEND_BUFFER_SIZE, + }; fn connection_fc(limit: u64) -> Rc>> { Rc::new(RefCell::new(SenderFlowControl::new((), limit))) @@ -2210,7 +2255,7 @@ mod tests { let big_buf = vec![1; SEND_BUFFER_SIZE * 2]; assert_eq!(txb.send(&big_buf), SEND_BUFFER_SIZE); assert!(matches!(txb.next_bytes(), - Some((0, x)) if x.len()==SEND_BUFFER_SIZE + Some((0, x)) if x.len() == SEND_BUFFER_SIZE && x.iter().all(|ch| *ch == 1))); // Mark almost all as sent. Get what's left diff --git a/neqo-transport/src/tparams.rs b/neqo-transport/src/tparams.rs index 509c96a65b..d530ba8972 100644 --- a/neqo-transport/src/tparams.rs +++ b/neqo-transport/src/tparams.rs @@ -776,7 +776,24 @@ where #[cfg(test)] #[allow(unused_variables)] mod tests { - use super::*; + use std::net::{Ipv4Addr, Ipv6Addr, SocketAddrV4, SocketAddrV6}; + + use neqo_common::{Decoder, Encoder}; + + use super::PreferredAddress; + use crate::{ + tparams::{ + TransportParameter, TransportParameterId, TransportParameters, + ACTIVE_CONNECTION_ID_LIMIT, IDLE_TIMEOUT, INITIAL_MAX_DATA, INITIAL_MAX_STREAMS_BIDI, + INITIAL_MAX_STREAMS_UNI, INITIAL_MAX_STREAM_DATA_BIDI_LOCAL, + INITIAL_MAX_STREAM_DATA_BIDI_REMOTE, INITIAL_MAX_STREAM_DATA_UNI, + INITIAL_SOURCE_CONNECTION_ID, MAX_ACK_DELAY, MAX_DATAGRAM_FRAME_SIZE, + MAX_UDP_PAYLOAD_SIZE, MIN_ACK_DELAY, ORIGINAL_DESTINATION_CONNECTION_ID, + PREFERRED_ADDRESS, RETRY_SOURCE_CONNECTION_ID, STATELESS_RESET_TOKEN, + VERSION_INFORMATION, + }, + ConnectionId, Error, Version, + }; #[test] fn basic_tps() { diff --git a/neqo-transport/tests/common/mod.rs b/neqo-transport/tests/common/mod.rs index 2349092d9e..0e1a06a329 100644 --- a/neqo-transport/tests/common/mod.rs +++ b/neqo-transport/tests/common/mod.rs @@ -20,7 +20,7 @@ use neqo_transport::{ server::{ActiveConnectionRef, Server, ValidateAddress}, Connection, ConnectionEvent, ConnectionParameters, State, }; -use test_fixture::{self, default_client, now, CountingConnectionIdGenerator}; +use test_fixture::{default_client, now, CountingConnectionIdGenerator}; /// Create a server. This is different than the one in the fixture, which is a single connection. pub fn new_server(params: ConnectionParameters) -> Server { diff --git a/neqo-transport/tests/conn_vectors.rs b/neqo-transport/tests/conn_vectors.rs index 91dbbf31cc..0cdd318fce 100644 --- a/neqo-transport/tests/conn_vectors.rs +++ b/neqo-transport/tests/conn_vectors.rs @@ -13,7 +13,7 @@ use std::{cell::RefCell, rc::Rc}; use neqo_transport::{ Connection, ConnectionParameters, RandomConnectionIdGenerator, State, Version, }; -use test_fixture::{self, datagram, now}; +use test_fixture::{datagram, now}; const INITIAL_PACKET_V2: &[u8] = &[ 0xd7, 0x6b, 0x33, 0x43, 0xcf, 0x08, 0x83, 0x94, 0xc8, 0xf0, 0x3e, 0x51, 0x57, 0x08, 0x00, 0x00, diff --git a/neqo-transport/tests/connection.rs b/neqo-transport/tests/connection.rs index 6f8aa393af..e29154d888 100644 --- a/neqo-transport/tests/connection.rs +++ b/neqo-transport/tests/connection.rs @@ -15,7 +15,7 @@ use common::{ }; use neqo_common::{Datagram, Decoder, Encoder, Role}; use neqo_transport::{ConnectionError, ConnectionParameters, Error, State, Version}; -use test_fixture::{self, default_client, default_server, new_client, now, split_datagram}; +use test_fixture::{default_client, default_server, new_client, now, split_datagram}; #[test] fn connect() { diff --git a/neqo-transport/tests/retry.rs b/neqo-transport/tests/retry.rs index 7245337aa1..6f21c9f688 100644 --- a/neqo-transport/tests/retry.rs +++ b/neqo-transport/tests/retry.rs @@ -23,7 +23,7 @@ use common::{ use neqo_common::{hex_with_len, qdebug, qtrace, Datagram, Encoder, Role}; use neqo_crypto::AuthenticationStatus; use neqo_transport::{server::ValidateAddress, ConnectionError, Error, State, StreamType}; -use test_fixture::{self, assertions, datagram, default_client, now, split_datagram}; +use test_fixture::{assertions, datagram, default_client, now, split_datagram}; #[test] fn retry_basic() { From 5d80d09383aa3bd8612cd3b3571c661dbbe7c39e Mon Sep 17 00:00:00 2001 From: Max Inden Date: Thu, 22 Feb 2024 02:05:32 +0100 Subject: [PATCH 182/321] refactor: derive default for unit enum variants (#1667) For unit enum default variants, derive `Default` and use the `#[default]` attribute. See https://doc.rust-lang.org/std/default/trait.Default.html#enums for details. Co-authored-by: Martin Thomson --- neqo-http3/src/lib.rs | 9 ++------- neqo-qpack/src/reader.rs | 17 ++++++++--------- neqo-transport/src/send_stream.rs | 9 ++------- neqo-transport/src/version.rs | 9 ++------- 4 files changed, 14 insertions(+), 30 deletions(-) diff --git a/neqo-http3/src/lib.rs b/neqo-http3/src/lib.rs index aa62e599b5..e16be709df 100644 --- a/neqo-http3/src/lib.rs +++ b/neqo-http3/src/lib.rs @@ -433,20 +433,15 @@ pub enum Http3StreamType { } #[must_use] -#[derive(PartialEq, Eq, Debug)] +#[derive(Default, PartialEq, Eq, Debug)] enum ReceiveOutput { + #[default] NoOutput, ControlFrames(Vec), UnblockedStreams(Vec), NewStream(NewStreamType), } -impl Default for ReceiveOutput { - fn default() -> Self { - Self::NoOutput - } -} - trait Stream: Debug { fn stream_type(&self) -> Http3StreamType; } diff --git a/neqo-qpack/src/reader.rs b/neqo-qpack/src/reader.rs index ff9c42b246..22ff24ebee 100644 --- a/neqo-qpack/src/reader.rs +++ b/neqo-qpack/src/reader.rs @@ -223,20 +223,19 @@ impl IntReader { } } -#[derive(Debug)] +#[derive(Debug, Default)] enum LiteralReaderState { + #[default] ReadHuffman, - ReadLength { reader: IntReader }, - ReadLiteral { offset: usize }, + ReadLength { + reader: IntReader, + }, + ReadLiteral { + offset: usize, + }, Done, } -impl Default for LiteralReaderState { - fn default() -> Self { - Self::ReadHuffman - } -} - /// This is decoder of a literal with a prefix: /// 1) ignores `prefix_len` bits of the first byte, /// 2) reads "huffman bit" diff --git a/neqo-transport/src/send_stream.rs b/neqo-transport/src/send_stream.rs index 3d65a7d6c3..78c3596785 100644 --- a/neqo-transport/src/send_stream.rs +++ b/neqo-transport/src/send_stream.rs @@ -111,7 +111,7 @@ impl Add for TransmissionPriority { /// If data is lost, this determines the priority that applies to retransmissions /// of that data. -#[derive(Debug, Clone, Copy, PartialEq, Eq)] +#[derive(Debug, Default, Clone, Copy, PartialEq, Eq)] pub enum RetransmissionPriority { /// Prioritize retransmission at a fixed priority. /// With this, it is possible to prioritize retransmissions lower than transmissions. @@ -123,18 +123,13 @@ pub enum RetransmissionPriority { Same, /// Increase the priority of retransmissions (the default). /// Retransmissions of `Critical` or `Important` aren't elevated at all. + #[default] Higher, /// Increase the priority of retransmissions a lot. /// This is useful for streams that are particularly exposed to head-of-line blocking. MuchHigher, } -impl Default for RetransmissionPriority { - fn default() -> Self { - Self::Higher - } -} - #[derive(Debug, PartialEq, Eq, Clone, Copy)] enum RangeState { Sent, diff --git a/neqo-transport/src/version.rs b/neqo-transport/src/version.rs index 16d394532d..cf75c9bd01 100644 --- a/neqo-transport/src/version.rs +++ b/neqo-transport/src/version.rs @@ -12,9 +12,10 @@ use crate::{Error, Res}; pub type WireVersion = u32; -#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] +#[derive(Debug, Default, Clone, Copy, PartialEq, Eq, Hash)] pub enum Version { Version2, + #[default] Version1, Draft29, Draft30, @@ -124,12 +125,6 @@ impl Version { } } -impl Default for Version { - fn default() -> Self { - Self::Version1 - } -} - impl TryFrom for Version { type Error = Error; From 644761a13e2e35fa7e1651d14e8032f22fa15363 Mon Sep 17 00:00:00 2001 From: Max Inden Date: Mon, 26 Feb 2024 00:41:26 +0100 Subject: [PATCH 183/321] feat(.github): add merge_group trigger to workflows (#1674) Enables workflows to run on pull requests within the merge queue. --- .github/workflows/check.yml | 3 +++ .github/workflows/qns.yml | 5 +++++ 2 files changed, 8 insertions(+) diff --git a/.github/workflows/check.yml b/.github/workflows/check.yml index 613c2e9104..2bedb649ad 100644 --- a/.github/workflows/check.yml +++ b/.github/workflows/check.yml @@ -6,6 +6,9 @@ on: pull_request: branches: ["main"] paths-ignore: ["*.md", "*.png", "*.svg", "LICENSE-*"] + merge_group: + branches: ["main"] + paths-ignore: ["*.md", "*.png", "*.svg", "LICENSE-*"] env: CARGO_TERM_COLOR: always RUST_BACKTRACE: 1 diff --git a/.github/workflows/qns.yml b/.github/workflows/qns.yml index ea9c7fb041..00c8595f1a 100644 --- a/.github/workflows/qns.yml +++ b/.github/workflows/qns.yml @@ -9,6 +9,11 @@ on: paths: - 'qns/**' - '.github/workflows/qns.yml' + merge_group: + branches: ["main"] + paths: + - 'qns/**' + - '.github/workflows/qns.yml' jobs: docker-image: runs-on: ubuntu-latest From 2ac86b4ca30db305cb30f592267098d4206f117b Mon Sep 17 00:00:00 2001 From: Martin Thomson Date: Mon, 26 Feb 2024 17:48:13 +1100 Subject: [PATCH 184/321] Benchmark tweaks (#1663) * Benchmark tweaks 1. Use NSS Release 2. Add workflow_dispatch 3. Move some env values up in the file * YAML corrections Co-authored-by: Lars Eggert Signed-off-by: Martin Thomson * Build NSS optimized --------- Signed-off-by: Martin Thomson Co-authored-by: Lars Eggert --- .github/workflows/bench.yml | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/.github/workflows/bench.yml b/.github/workflows/bench.yml index 2d7c8933c4..6458eb541a 100644 --- a/.github/workflows/bench.yml +++ b/.github/workflows/bench.yml @@ -1,12 +1,15 @@ name: Bench on: workflow_call: + workflow_dispatch: env: CARGO_PROFILE_BENCH_BUILD_OVERRIDE_DEBUG: true CARGO_PROFILE_RELEASE_DEBUG: true CARGO_TERM_COLOR: always RUST_BACKTRACE: 1 TOOLCHAIN: nightly + RUSTFLAGS: -C link-arg=-fuse-ld=lld -C link-arg=-Wl,--no-rosegment + PERF_CMD: record -o perf.data -F997 --call-graph lbr -g jobs: bench: @@ -15,6 +18,8 @@ jobs: defaults: run: shell: bash + env: + LD_LIBRARY_PATH: ${{ github.workspace }}/dist/Release/lib steps: - name: Checkout @@ -27,18 +32,16 @@ jobs: components: rustfmt - name: Configure Rust - run: | - echo "RUSTFLAGS=-C link-arg=-fuse-ld=lld -C link-arg=-Wl,--no-rosegment" >> "$GITHUB_ENV" - cargo install flamegraph + run: cargo install flamegraph - name: Fetch NSS and NSPR uses: ./.github/actions/nss - name: Build run: | + $NSS_DIR/build.sh -g -Ddisable_tests=1 -o cargo +$TOOLCHAIN bench --features bench --no-run cargo +$TOOLCHAIN build --release --bin neqo-client --bin neqo-server - echo "LD_LIBRARY_PATH=${{ github.workspace }}/dist/Debug/lib" >> "$GITHUB_ENV" - name: Download criterion results id: criterion-cache @@ -58,9 +61,7 @@ jobs: # Disable turboboost, hyperthreading and use performance governor. - name: Prepare machine - run: | - sudo /root/bin/prep.sh - echo "PERF_CMD=record -o perf.data -F997 --call-graph lbr -g" >> "$GITHUB_ENV" + run: sudo /root/bin/prep.sh # Pin the benchmark run to core 0 and run all benchmarks at elevated priority. - name: Run cargo bench From fa8ce91b775aff58faebf61cc6b7a12f560b001c Mon Sep 17 00:00:00 2001 From: Max Inden Date: Mon, 26 Feb 2024 07:54:19 +0100 Subject: [PATCH 185/321] refactor(client,server): use quinn_udp for I/O (#1604) * Initial commit * refactor(server): replace mio with tokio * Move ready logic into fn * Extend expect docs * Restrict tokio features * Only process datagram once * Remove superfluous pub * fmt * Fix send path * Fix receive path * Instantiate socket state once * Fix busy loop * Have neqo-client use quinn-udp * Add TODO * Await writable * Unify tx and rx * Introduce wrapper type Socket * Move bind to common * Check if datagram was sent as a whole and avoid allocation * Make into_data pub(crate) * Refactor send * Reference issue * Remove debugs * Fix test * Reduce diff * Reduce diff * Pin quinn-udp to rev * Address clippy lints * fmt * fmt * clippy * clippy * Pass None ttl Not yet supported by quinn-udp. * Debug race condition on Windows * Debug windows failure * Have receiver use random port * Test with Ect1 instead of Ce Windows does not allow setting Ce: > Your application isn't allowed to specify the Congestion Encountered (CE) code point when sending datagrams. The send will return with error WSAEINVAL. https://learn.microsoft.com/en-us/windows/win32/winsock/winsock-ecn * Revert "Debug windows failure" This reverts commit e9ac36cb557a2edd5be3654b37202199ca4634ef. * Revert "Debug race condition on Windows" This reverts commit 6f330d353eabaae5615d24aa3a7bcbd1549e2f26. * Fold tos_tx * Add reason to clippy lint ignore * fix: include quinn-udp IPv4-mapped IPv6 patch https://github.com/quinn-rs/quinn/pull/1765 --------- Co-authored-by: Lars Eggert --- neqo-client/Cargo.toml | 2 +- neqo-client/src/main.rs | 95 ++++------------------ neqo-common/Cargo.toml | 3 + neqo-common/src/datagram.rs | 5 ++ neqo-common/src/lib.rs | 2 + neqo-common/src/tos.rs | 35 ++++++++ neqo-common/src/udp.rs | 154 ++++++++++++++++++++++++++++++++++++ neqo-server/Cargo.toml | 2 +- neqo-server/src/main.rs | 81 ++++--------------- 9 files changed, 233 insertions(+), 146 deletions(-) create mode 100644 neqo-common/src/udp.rs diff --git a/neqo-client/Cargo.toml b/neqo-client/Cargo.toml index 08fe2f8fcd..e9b48221aa 100644 --- a/neqo-client/Cargo.toml +++ b/neqo-client/Cargo.toml @@ -15,7 +15,7 @@ clap = { version = "4.4", features = ["derive"] } futures = "0.3" hex = "0.4" log = { version = "0.4", default-features = false } -neqo-common = { path = "./../neqo-common" } +neqo-common = { path = "./../neqo-common", features = ["udp"] } neqo-crypto = { path = "./../neqo-crypto" } neqo-http3 = { path = "./../neqo-http3" } neqo-qpack = { path = "./../neqo-qpack" } diff --git a/neqo-client/src/main.rs b/neqo-client/src/main.rs index 3aede9e545..7584697a47 100644 --- a/neqo-client/src/main.rs +++ b/neqo-client/src/main.rs @@ -22,13 +22,12 @@ use std::{ }; use clap::Parser; -use common::IpTos; use futures::{ future::{select, Either}, FutureExt, TryFutureExt, }; use neqo_common::{ - self as common, event::Provider, hex, qdebug, qinfo, qlog::NeqoQlog, Datagram, Role, + self as common, event::Provider, hex, qdebug, qinfo, qlog::NeqoQlog, udp, Datagram, Role, }; use neqo_crypto::{ constants::{TLS_AES_128_GCM_SHA256, TLS_AES_256_GCM_SHA384, TLS_CHACHA20_POLY1305_SHA256}, @@ -42,7 +41,7 @@ use neqo_transport::{ EmptyConnectionIdGenerator, Error as TransportError, StreamId, StreamType, Version, }; use qlog::{events::EventImportance, streamer::QlogStreamer}; -use tokio::{net::UdpSocket, time::Sleep}; +use tokio::time::Sleep; use url::{Origin, Url}; #[derive(Debug)] @@ -351,21 +350,6 @@ impl QuicParameters { } } -async fn emit_datagram(socket: &UdpSocket, out_dgram: Datagram) -> Result<(), io::Error> { - let sent = match socket.send_to(&out_dgram, &out_dgram.destination()).await { - Ok(res) => res, - Err(ref err) if err.kind() != io::ErrorKind::WouldBlock => { - eprintln!("UDP send error: {err:?}"); - 0 - } - Err(e) => return Err(e), - }; - if sent != out_dgram.len() { - eprintln!("Unable to send all {} bytes of datagram", out_dgram.len()); - } - Ok(()) -} - fn get_output_file( url: &Url, output_dir: &Option, @@ -415,7 +399,7 @@ enum Ready { // Wait for the socket to be readable or the timeout to fire. async fn ready( - socket: &UdpSocket, + socket: &udp::Socket, mut timeout: Option<&mut Pin>>, ) -> Result { let socket_ready = Box::pin(socket.readable()).map_ok(|()| Ready::Socket); @@ -426,43 +410,6 @@ async fn ready( select(socket_ready, timeout_ready).await.factor_first().0 } -fn read_dgram( - socket: &UdpSocket, - local_address: &SocketAddr, -) -> Result, io::Error> { - let buf = &mut [0u8; 2048]; - let (sz, remote_addr) = match socket.try_recv_from(&mut buf[..]) { - Err(ref err) - if err.kind() == io::ErrorKind::WouldBlock - || err.kind() == io::ErrorKind::Interrupted => - { - return Ok(None) - } - Err(err) => { - eprintln!("UDP recv error: {err:?}"); - return Err(err); - } - Ok(res) => res, - }; - - if sz == buf.len() { - eprintln!("Might have received more than {} bytes", buf.len()); - } - - if sz == 0 { - eprintln!("zero length datagram received?"); - Ok(None) - } else { - Ok(Some(Datagram::new( - remote_addr, - *local_address, - IpTos::default(), - None, - &buf[..sz], - ))) - } -} - trait StreamHandler { fn process_header_ready(&mut self, stream_id: StreamId, fin: bool, headers: Vec
); fn process_data_readable( @@ -817,7 +764,7 @@ fn to_headers(values: &[impl AsRef]) -> Vec
{ struct ClientRunner<'a> { local_addr: SocketAddr, - socket: &'a UdpSocket, + socket: &'a udp::Socket, client: Http3Client, handler: Handler<'a>, timeout: Option>>, @@ -827,7 +774,7 @@ struct ClientRunner<'a> { impl<'a> ClientRunner<'a> { fn new( args: &'a mut Args, - socket: &'a UdpSocket, + socket: &'a udp::Socket, local_addr: SocketAddr, remote_addr: SocketAddr, hostname: &str, @@ -880,7 +827,7 @@ impl<'a> ClientRunner<'a> { match ready(self.socket, self.timeout.as_mut()).await? { Ready::Socket => loop { - let dgram = read_dgram(self.socket, &self.local_addr)?; + let dgram = self.socket.recv(&self.local_addr)?; if dgram.is_none() { break; } @@ -915,7 +862,8 @@ impl<'a> ClientRunner<'a> { loop { match self.client.process(dgram.take(), Instant::now()) { Output::Datagram(dgram) => { - emit_datagram(self.socket, dgram).await?; + self.socket.writable().await?; + self.socket.send(dgram)?; } Output::Callback(new_timeout) => { qinfo!("Setting timeout of {:?}", new_timeout); @@ -1051,16 +999,7 @@ async fn main() -> Res<()> { SocketAddr::V6(..) => SocketAddr::new(IpAddr::V6(Ipv6Addr::from([0; 16])), 0), }; - let socket = match std::net::UdpSocket::bind(local_addr) { - Err(e) => { - eprintln!("Unable to bind UDP socket: {e}"); - exit(1) - } - Ok(s) => s, - }; - socket.set_nonblocking(true)?; - let socket = UdpSocket::from_std(socket)?; - + let socket = udp::Socket::bind(local_addr)?; let real_local = socket.local_addr().unwrap(); println!( "{} Client connecting: {:?} -> {:?}", @@ -1125,17 +1064,16 @@ mod old { time::Instant, }; - use neqo_common::{event::Provider, qdebug, qinfo, Datagram}; + use neqo_common::{event::Provider, qdebug, qinfo, udp, Datagram}; use neqo_crypto::{AuthenticationStatus, ResumptionToken}; use neqo_transport::{ Connection, ConnectionEvent, EmptyConnectionIdGenerator, Error, Output, State, StreamId, StreamType, }; - use tokio::{net::UdpSocket, time::Sleep}; + use tokio::time::Sleep; use url::Url; - use super::{get_output_file, qlog_new, read_dgram, ready, Args, KeyUpdateState, Ready, Res}; - use crate::emit_datagram; + use super::{get_output_file, qlog_new, ready, Args, KeyUpdateState, Ready, Res}; struct HandlerOld<'b> { streams: HashMap>, @@ -1321,7 +1259,7 @@ mod old { pub struct ClientRunner<'a> { local_addr: SocketAddr, - socket: &'a UdpSocket, + socket: &'a udp::Socket, client: Connection, handler: HandlerOld<'a>, timeout: Option>>, @@ -1331,7 +1269,7 @@ mod old { impl<'a> ClientRunner<'a> { pub fn new( args: &'a Args, - socket: &'a UdpSocket, + socket: &'a udp::Socket, local_addr: SocketAddr, remote_addr: SocketAddr, origin: &str, @@ -1394,7 +1332,7 @@ mod old { match ready(self.socket, self.timeout.as_mut()).await? { Ready::Socket => loop { - let dgram = read_dgram(self.socket, &self.local_addr)?; + let dgram = self.socket.recv(&self.local_addr)?; if dgram.is_none() { break; } @@ -1430,7 +1368,8 @@ mod old { loop { match self.client.process(dgram.take(), Instant::now()) { Output::Datagram(dgram) => { - emit_datagram(self.socket, dgram).await?; + self.socket.writable().await?; + self.socket.send(dgram)?; } Output::Callback(new_timeout) => { qinfo!("Setting timeout of {:?}", new_timeout); diff --git a/neqo-common/Cargo.toml b/neqo-common/Cargo.toml index 25122e9b87..7017ff9600 100644 --- a/neqo-common/Cargo.toml +++ b/neqo-common/Cargo.toml @@ -15,13 +15,16 @@ enum-map = "2.7" env_logger = { version = "0.10", default-features = false } log = { version = "0.4", default-features = false } qlog = "0.12" +quinn-udp = { git = "https://github.com/quinn-rs/quinn/", rev = "a947962131aba8a6521253d03cc948b20098a2d6", optional = true } time = { version = "0.3", features = ["formatting"] } +tokio = { version = "1", features = ["net", "time", "macros", "rt", "rt-multi-thread"], optional = true } [dev-dependencies] test-fixture = { path = "../test-fixture" } [features] ci = [] +udp = ["dep:quinn-udp", "dep:tokio"] [target."cfg(windows)".dependencies.winapi] version = "0.3" diff --git a/neqo-common/src/datagram.rs b/neqo-common/src/datagram.rs index 1729c8ed8d..d6ed43bde1 100644 --- a/neqo-common/src/datagram.rs +++ b/neqo-common/src/datagram.rs @@ -53,6 +53,11 @@ impl Datagram { pub fn ttl(&self) -> Option { self.ttl } + + #[must_use] + pub(crate) fn into_data(self) -> Vec { + self.d + } } impl Deref for Datagram { diff --git a/neqo-common/src/lib.rs b/neqo-common/src/lib.rs index ee97408a41..7b7bf6a163 100644 --- a/neqo-common/src/lib.rs +++ b/neqo-common/src/lib.rs @@ -17,6 +17,8 @@ pub mod log; pub mod qlog; pub mod timer; pub mod tos; +#[cfg(feature = "udp")] +pub mod udp; use std::fmt::Write; diff --git a/neqo-common/src/tos.rs b/neqo-common/src/tos.rs index 80e073a1e4..3610f72750 100644 --- a/neqo-common/src/tos.rs +++ b/neqo-common/src/tos.rs @@ -46,6 +46,12 @@ impl From for IpTosEcn { } } +impl From for IpTosEcn { + fn from(value: IpTos) -> Self { + IpTosEcn::from(value.0 & 0x3) + } +} + /// Diffserv Codepoints, mapped to the upper six bits of the TOS field. /// #[derive(Copy, Clone, PartialEq, Eq, Enum, Default, Debug)] @@ -159,6 +165,12 @@ impl From for IpTosDscp { } } +impl From for IpTosDscp { + fn from(value: IpTos) -> Self { + IpTosDscp::from(value.0 & 0xfc) + } +} + /// The type-of-service field in an IP packet. #[allow(clippy::module_name_repetitions)] #[derive(Copy, Clone, PartialEq, Eq)] @@ -169,22 +181,37 @@ impl From for IpTos { Self(u8::from(v)) } } + impl From for IpTos { fn from(v: IpTosDscp) -> Self { Self(u8::from(v)) } } + impl From<(IpTosDscp, IpTosEcn)> for IpTos { fn from(v: (IpTosDscp, IpTosEcn)) -> Self { Self(u8::from(v.0) | u8::from(v.1)) } } + +impl From<(IpTosEcn, IpTosDscp)> for IpTos { + fn from(v: (IpTosEcn, IpTosDscp)) -> Self { + Self(u8::from(v.0) | u8::from(v.1)) + } +} + impl From for u8 { fn from(v: IpTos) -> Self { v.0 } } +impl From for IpTos { + fn from(v: u8) -> Self { + Self(v) + } +} + impl Debug for IpTos { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_tuple("IpTos") @@ -287,4 +314,12 @@ mod tests { let iptos_dscp: IpTos = dscp.into(); assert_eq!(u8::from(iptos_dscp), dscp as u8); } + + #[test] + fn u8_to_iptos() { + let tos = 0x8b; + let iptos: IpTos = (IpTosEcn::Ce, IpTosDscp::Af41).into(); + assert_eq!(tos, u8::from(iptos)); + assert_eq!(IpTos::from(tos), iptos); + } } diff --git a/neqo-common/src/udp.rs b/neqo-common/src/udp.rs new file mode 100644 index 0000000000..7ad0b97625 --- /dev/null +++ b/neqo-common/src/udp.rs @@ -0,0 +1,154 @@ +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +#![allow(clippy::missing_errors_doc)] // Functions simply delegate to tokio and quinn-udp. +#![allow(clippy::missing_panics_doc)] // Functions simply delegate to tokio and quinn-udp. + +use std::{ + io::{self, IoSliceMut}, + net::{SocketAddr, ToSocketAddrs}, + slice, +}; + +use quinn_udp::{EcnCodepoint, RecvMeta, Transmit, UdpSocketState}; +use tokio::io::Interest; + +use crate::{Datagram, IpTos}; + +pub struct Socket { + socket: tokio::net::UdpSocket, + state: UdpSocketState, +} + +impl Socket { + /// Calls [`std::net::UdpSocket::bind`] and instantiates [`quinn_udp::UdpSocketState`]. + pub fn bind(addr: A) -> Result { + let socket = std::net::UdpSocket::bind(addr)?; + + Ok(Self { + state: quinn_udp::UdpSocketState::new((&socket).into())?, + socket: tokio::net::UdpSocket::from_std(socket)?, + }) + } + + /// See [`tokio::net::UdpSocket::local_addr`]. + pub fn local_addr(&self) -> io::Result { + self.socket.local_addr() + } + + /// See [`tokio::net::UdpSocket::writable`]. + pub async fn writable(&self) -> Result<(), io::Error> { + self.socket.writable().await + } + + /// See [`tokio::net::UdpSocket::readable`]. + pub async fn readable(&self) -> Result<(), io::Error> { + self.socket.readable().await + } + + /// Send the UDP datagram on the specified socket. + pub fn send(&self, d: Datagram) -> io::Result { + let transmit = Transmit { + destination: d.destination(), + ecn: EcnCodepoint::from_bits(Into::::into(d.tos())), + contents: d.into_data().into(), + segment_size: None, + src_ip: None, + }; + + let n = self.socket.try_io(Interest::WRITABLE, || { + self.state + .send((&self.socket).into(), slice::from_ref(&transmit)) + })?; + + assert_eq!(n, 1, "only passed one slice"); + + Ok(n) + } + + /// Receive a UDP datagram on the specified socket. + pub fn recv(&self, local_address: &SocketAddr) -> Result, io::Error> { + let mut buf = [0; u16::MAX as usize]; + + let mut meta = RecvMeta::default(); + + match self.socket.try_io(Interest::READABLE, || { + self.state.recv( + (&self.socket).into(), + &mut [IoSliceMut::new(&mut buf)], + slice::from_mut(&mut meta), + ) + }) { + Ok(n) => { + assert_eq!(n, 1, "only passed one slice"); + } + Err(ref err) + if err.kind() == io::ErrorKind::WouldBlock + || err.kind() == io::ErrorKind::Interrupted => + { + return Ok(None) + } + Err(err) => { + return Err(err); + } + }; + + if meta.len == 0 { + eprintln!("zero length datagram received?"); + return Ok(None); + } + + if meta.len == buf.len() { + eprintln!("Might have received more than {} bytes", buf.len()); + } + + Ok(Some(Datagram::new( + meta.addr, + *local_address, + meta.ecn.map(|n| IpTos::from(n as u8)).unwrap_or_default(), + None, // TODO: get the real TTL https://github.com/quinn-rs/quinn/issues/1749 + &buf[..meta.len], + ))) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::{IpTos, IpTosDscp, IpTosEcn}; + + #[tokio::test] + async fn datagram_tos() -> Result<(), io::Error> { + let sender = Socket::bind("127.0.0.1:0")?; + let receiver_addr: SocketAddr = "127.0.0.1:0".parse().unwrap(); + let receiver = Socket::bind(receiver_addr)?; + + let datagram = Datagram::new( + sender.local_addr()?, + receiver.local_addr()?, + IpTos::from((IpTosDscp::Le, IpTosEcn::Ect1)), + None, + "Hello, world!".as_bytes().to_vec(), + ); + + sender.writable().await?; + sender.send(datagram.clone())?; + + receiver.readable().await?; + let received_datagram = receiver + .recv(&receiver_addr) + .expect("receive to succeed") + .expect("receive to yield datagram"); + + // Assert that the ECN is correct. + assert_eq!( + IpTosEcn::from(datagram.tos()), + IpTosEcn::from(received_datagram.tos()) + ); + + Ok(()) + } +} diff --git a/neqo-server/Cargo.toml b/neqo-server/Cargo.toml index 7a83685c9f..b2c36ed21f 100644 --- a/neqo-server/Cargo.toml +++ b/neqo-server/Cargo.toml @@ -14,7 +14,7 @@ license.workspace = true clap = { version = "4.4", features = ["derive"] } futures = "0.3" log = { version = "0.4", default-features = false } -neqo-common = { path = "./../neqo-common" } +neqo-common = { path = "./../neqo-common", features = ["udp"] } neqo-crypto = { path = "./../neqo-crypto" } neqo-http3 = { path = "./../neqo-http3" } neqo-qpack = { path = "./../neqo-qpack" } diff --git a/neqo-server/src/main.rs b/neqo-server/src/main.rs index 0c07cb61b7..66450ef5d6 100644 --- a/neqo-server/src/main.rs +++ b/neqo-server/src/main.rs @@ -27,7 +27,7 @@ use futures::{ future::{select, select_all, Either}, FutureExt, }; -use neqo_common::{hex, qdebug, qinfo, qwarn, Datagram, Header, IpTos}; +use neqo_common::{hex, qinfo, qwarn, udp, Datagram, Header}; use neqo_crypto::{ constants::{TLS_AES_128_GCM_SHA256, TLS_AES_256_GCM_SHA384, TLS_CHACHA20_POLY1305_SHA256}, generate_ech_keys, init_db, random, AntiReplay, Cipher, @@ -40,7 +40,7 @@ use neqo_transport::{ ConnectionIdGenerator, ConnectionParameters, Output, RandomConnectionIdGenerator, StreamType, Version, }; -use tokio::{net::UdpSocket, time::Sleep}; +use tokio::time::Sleep; use crate::old_https::Http09Server; @@ -305,21 +305,6 @@ impl QuicParameters { } } -async fn emit_packet(socket: &mut UdpSocket, out_dgram: Datagram) { - let sent = match socket.send_to(&out_dgram, &out_dgram.destination()).await { - Err(ref err) => { - if err.kind() != io::ErrorKind::WouldBlock || err.kind() == io::ErrorKind::Interrupted { - eprintln!("UDP send error: {err:?}"); - } - 0 - } - Ok(res) => res, - }; - if sent != out_dgram.len() { - eprintln!("Unable to send all {} bytes of datagram", out_dgram.len()); - } -} - fn qns_read_response(filename: &str) -> Option> { let mut file_path = PathBuf::from("/www"); file_path.push(filename.trim_matches(|p| p == '/')); @@ -578,48 +563,11 @@ impl HttpServer for SimpleServer { } } -fn read_dgram( - socket: &mut UdpSocket, - local_address: &SocketAddr, -) -> Result, io::Error> { - let buf = &mut [0u8; 2048]; - let (sz, remote_addr) = match socket.try_recv_from(&mut buf[..]) { - Err(ref err) - if err.kind() == io::ErrorKind::WouldBlock - || err.kind() == io::ErrorKind::Interrupted => - { - return Ok(None) - } - Err(err) => { - eprintln!("UDP recv error: {err:?}"); - return Err(err); - } - Ok(res) => res, - }; - - if sz == buf.len() { - eprintln!("Might have received more than {} bytes", buf.len()); - } - - if sz == 0 { - eprintln!("zero length datagram received?"); - Ok(None) - } else { - Ok(Some(Datagram::new( - remote_addr, - *local_address, - IpTos::default(), - None, - &buf[..sz], - ))) - } -} - struct ServersRunner { args: Args, server: Box, timeout: Option>>, - sockets: Vec<(SocketAddr, UdpSocket)>, + sockets: Vec<(SocketAddr, udp::Socket)>, } impl ServersRunner { @@ -632,11 +580,11 @@ impl ServersRunner { let sockets = hosts .into_iter() .map(|host| { - let socket = std::net::UdpSocket::bind(host)?; + let socket = udp::Socket::bind(host)?; let local_addr = socket.local_addr()?; println!("Server waiting for connection on: {local_addr:?}"); - socket.set_nonblocking(true)?; - Ok((host, UdpSocket::from_std(socket)?)) + + Ok((host, socket)) }) .collect::>()?; let server = Self::create_server(&args); @@ -683,7 +631,7 @@ impl ServersRunner { } /// Tries to find a socket, but then just falls back to sending from the first. - fn find_socket(&mut self, addr: SocketAddr) -> &mut UdpSocket { + fn find_socket(&mut self, addr: SocketAddr) -> &mut udp::Socket { let ((_host, first_socket), rest) = self.sockets.split_first_mut().unwrap(); rest.iter_mut() .map(|(_host, socket)| socket) @@ -696,12 +644,13 @@ impl ServersRunner { .unwrap_or(first_socket) } - async fn process(&mut self, mut dgram: Option<&Datagram>) { + async fn process(&mut self, mut dgram: Option<&Datagram>) -> Result<(), io::Error> { loop { match self.server.process(dgram.take(), self.args.now()) { Output::Datagram(dgram) => { let socket = self.find_socket(dgram.source()); - emit_packet(socket, dgram).await; + socket.writable().await?; + socket.send(dgram)?; } Output::Callback(new_timeout) => { qinfo!("Setting timeout of {:?}", new_timeout); @@ -709,11 +658,11 @@ impl ServersRunner { break; } Output::None => { - qdebug!("Output::None"); break; } } } + Ok(()) } // Wait for any of the sockets to be readable or the timeout to fire. @@ -740,20 +689,20 @@ impl ServersRunner { match self.ready().await? { Ready::Socket(inx) => loop { let (host, socket) = self.sockets.get_mut(inx).unwrap(); - let dgram = read_dgram(socket, host)?; + let dgram = socket.recv(host)?; if dgram.is_none() { break; } - self.process(dgram.as_ref()).await; + self.process(dgram.as_ref()).await?; }, Ready::Timeout => { self.timeout = None; - self.process(None).await; + self.process(None).await?; } } self.server.process_events(&self.args, self.args.now()); - self.process(None).await; + self.process(None).await?; } } } From 8fb0652d58cddf8d50bb4cb80af3320c15424484 Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Mon, 26 Feb 2024 10:34:26 +0200 Subject: [PATCH 186/321] ci: Give QNS workflow permissions to upload packages (#1677) * ci: Give QNS workflow permissions to upload packages Fixes #1666 * Delay codecov notifications until all data is uploaded. Hopefully eliminates the "-0.01%" issue... * Update .codecov.yml Signed-off-by: Lars Eggert --------- Signed-off-by: Lars Eggert --- .codecov.yml | 8 ++++++++ .github/workflows/qns.yml | 2 ++ 2 files changed, 10 insertions(+) diff --git a/.codecov.yml b/.codecov.yml index d55ee5edd6..12facb68da 100644 --- a/.codecov.yml +++ b/.codecov.yml @@ -3,3 +3,11 @@ ignore: - "neqo-client" - "neqo-interop" - "neqo-server" + +# Do not notify until at least three results have been uploaded from the CI pipeline. +# (This corresponds to the three main platforms we support: Linux, macOS, and Windows.) +codecov: + notify: + after_n_builds: 3 +comment: + after_n_builds: 3 diff --git a/.github/workflows/qns.yml b/.github/workflows/qns.yml index 00c8595f1a..edbd2824b9 100644 --- a/.github/workflows/qns.yml +++ b/.github/workflows/qns.yml @@ -17,6 +17,8 @@ on: jobs: docker-image: runs-on: ubuntu-latest + permissions: + packages: write steps: - name: Set up QEMU uses: docker/setup-qemu-action@v3 From 733df9a714a5e7e58b8f45397c15c5482bd49ef9 Mon Sep 17 00:00:00 2001 From: Max Inden Date: Mon, 26 Feb 2024 10:58:11 +0100 Subject: [PATCH 187/321] fix(client): wait for resumption token (#1676) The QUIC Interop testcase `resumption` requires a client to download two files on two consecutive connections, establishing the second connection with the resumption token of the first. Previously the client would close the first connection once the first file is downloaded. There is a race condition where the first file might be finished downloading before receiving the resumption token from the server. With this commit, the client will wait for both (1) the file being downloaded and (2) receiving the resumption token from the server. --- neqo-client/src/main.rs | 74 ++++++++++++++++++----------------------- 1 file changed, 32 insertions(+), 42 deletions(-) diff --git a/neqo-client/src/main.rs b/neqo-client/src/main.rs index 7584697a47..17cae92da1 100644 --- a/neqo-client/src/main.rs +++ b/neqo-client/src/main.rs @@ -1170,12 +1170,12 @@ mod old { Ok(()) } - fn read(&mut self, client: &mut Connection, stream_id: StreamId) -> Res { + fn read(&mut self, client: &mut Connection, stream_id: StreamId) -> Res<()> { let mut maybe_maybe_out_file = self.streams.get_mut(&stream_id); match &mut maybe_maybe_out_file { None => { println!("Data on unexpected stream: {stream_id}"); - return Ok(false); + return Ok(()); } Some(maybe_out_file) => { let fin_recvd = Self::read_from_stream( @@ -1191,25 +1191,15 @@ mod old { } self.streams.remove(&stream_id); self.download_urls(client); - if self.streams.is_empty() && self.url_queue.is_empty() { - return Ok(false); - } } } } - Ok(true) - } - - /// Just in case we didn't get a resumption token event, this - /// iterates through events until one is found. - fn get_token(&mut self, client: &mut Connection) { - for event in client.events() { - if let ConnectionEvent::ResumptionToken(token) = event { - self.token = Some(token); - } - } + Ok(()) } + /// Handle events on the connection. + /// + /// Returns `Ok(true)` when done, i.e. url queue is empty and streams are closed. fn handle(&mut self, client: &mut Connection) -> Res { while let Some(event) = client.next_event() { match event { @@ -1217,11 +1207,7 @@ mod old { client.authenticated(AuthenticationStatus::Ok, Instant::now()); } ConnectionEvent::RecvStreamReadable { stream_id } => { - if !self.read(client, stream_id)? { - self.get_token(client); - client.close(Instant::now(), 0, "kthxbye!"); - return Ok(false); - }; + self.read(client, stream_id)?; } ConnectionEvent::SendStreamWritable { stream_id } => { println!("stream {stream_id} writable"); @@ -1253,7 +1239,12 @@ mod old { } } - Ok(true) + if self.streams.is_empty() && self.url_queue.is_empty() { + // Handler is done. + return Ok(true); + } + + Ok(false) } } @@ -1324,12 +1315,29 @@ mod old { pub async fn run(mut self) -> Res> { loop { - if !self.handler.handle(&mut self.client)? { - break; + let handler_done = self.handler.handle(&mut self.client)?; + + match (handler_done, self.args.resume, self.handler.token.is_some()) { + // Handler isn't done. Continue. + (false, _, _) => {}, + // Handler done. Resumption token needed but not present. Continue. + (true, true, false) => { + qdebug!("Handler done. Waiting for resumption token."); + } + // Handler is done, no resumption token needed. Close. + (true, false, _) | + // Handler is done, resumption token needed and present. Close. + (true, true, true) => { + self.client.close(Instant::now(), 0, "kthxbye!"); + } } self.process(None).await?; + if let State::Closed(..) = self.client.state() { + return Ok(self.handler.token.take()); + } + match ready(self.socket, self.timeout.as_mut()).await? { Ready::Socket => loop { let dgram = self.socket.recv(&self.local_addr)?; @@ -1343,25 +1351,7 @@ mod old { self.timeout = None; } } - - if let State::Closed(..) = self.client.state() { - break; - } } - - let token = if self.args.resume { - // If we haven't received an event, take a token if there is one. - // Lots of servers don't provide NEW_TOKEN, but a session ticket - // without NEW_TOKEN is better than nothing. - self.handler - .token - .take() - .or_else(|| self.client.take_resumption_token(Instant::now())) - } else { - None - }; - - Ok(token) } async fn process(&mut self, mut dgram: Option<&Datagram>) -> Result<(), io::Error> { From 6209134e91aa062ab847dafb7336d6cf771ec8c8 Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Tue, 27 Feb 2024 03:19:23 +0200 Subject: [PATCH 188/321] chore: Set workspace resolver to version 2 (#1679) Avoids this warning during build: ``` warning: virtual workspace defaulting to `resolver = "1"` despite one or more workspace members being on edition 2021 which implies `resolver = "2"` note: to keep the current resolver, specify `workspace.resolver = "1"` in the workspace root's manifest note: to use the edition 2021 resolver, specify `workspace.resolver = "2"` in the workspace root's manifest note: for more details see https://doc.rust-lang.org/cargo/reference/resolver.html#resolver-versions ``` --- Cargo.toml | 1 + 1 file changed, 1 insertion(+) diff --git a/Cargo.toml b/Cargo.toml index 213f03e3fd..bec8fa61a7 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -9,6 +9,7 @@ members = [ "neqo-transport", "test-fixture", ] +resolver = "2" [workspace.package] homepage = "https://github.com/mozilla/neqo/" From e19a4cb3ed7f9daee9a16b70f6d10c3f86b22824 Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Tue, 27 Feb 2024 10:18:43 +0200 Subject: [PATCH 189/321] ci: Generate unique (and valid) artifact names (#1686) --- .github/workflows/bench.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/bench.yml b/.github/workflows/bench.yml index 6458eb541a..7d4220c8dc 100644 --- a/.github/workflows/bench.yml +++ b/.github/workflows/bench.yml @@ -142,7 +142,7 @@ jobs: - name: Archive perf data uses: actions/upload-artifact@v4 with: - name: ${{ github.head_ref || github.ref_name }}-perf + name: ${{ github.event.repository.name }}-${{ github.sha }} path: | *.svg *.perf From a9ed8d5c788b3bfccb7782cd6b41ac33fc87ff6f Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Tue, 27 Feb 2024 13:05:05 +0200 Subject: [PATCH 190/321] chore: Fix clippy nightly issues (#1680) --- neqo-client/src/main.rs | 1 - neqo-common/src/codec.rs | 2 +- neqo-common/src/hrtime.rs | 1 - neqo-common/src/qlog.rs | 3 +-- neqo-common/src/timer.rs | 1 - neqo-crypto/src/aead.rs | 1 - neqo-crypto/src/agent.rs | 1 - neqo-crypto/src/agentio.rs | 1 - neqo-crypto/src/cert.rs | 5 +---- neqo-crypto/src/ech.rs | 1 - neqo-crypto/src/ext.rs | 1 - neqo-crypto/src/hkdf.rs | 1 - neqo-crypto/src/hp.rs | 1 - neqo-crypto/src/lib.rs | 2 +- neqo-crypto/src/p11.rs | 1 - neqo-crypto/src/replay.rs | 1 - neqo-crypto/src/time.rs | 6 +----- neqo-http3/src/connection_client.rs | 3 +-- neqo-http3/src/control_stream_local.rs | 5 +---- .../extended_connect/tests/webtransport/datagrams.rs | 2 -- neqo-http3/src/frames/reader.rs | 2 +- neqo-http3/src/frames/wtframe.rs | 2 -- neqo-http3/src/headers_checks.rs | 2 -- neqo-http3/src/priority.rs | 2 +- neqo-http3/src/push_controller.rs | 1 - neqo-http3/src/qlog.rs | 2 -- neqo-http3/src/recv_message.rs | 2 +- neqo-http3/src/server_events.rs | 1 - neqo-qpack/src/decoder.rs | 4 +--- neqo-qpack/src/encoder.rs | 5 +---- neqo-qpack/src/huffman.rs | 2 -- neqo-qpack/src/huffman_decode_helper.rs | 2 +- neqo-qpack/src/qpack_send_buf.rs | 2 +- neqo-qpack/src/reader.rs | 2 +- neqo-qpack/src/table.rs | 2 +- neqo-server/src/main.rs | 1 - neqo-transport/src/ackrate.rs | 2 +- neqo-transport/src/addr_valid.rs | 1 - neqo-transport/src/cc/classic_cc.rs | 5 +---- neqo-transport/src/cc/cubic.rs | 1 - neqo-transport/src/cc/tests/cubic.rs | 1 - neqo-transport/src/cid.rs | 1 - neqo-transport/src/connection/mod.rs | 1 - neqo-transport/src/connection/params.rs | 2 +- neqo-transport/src/connection/tests/cc.rs | 2 +- neqo-transport/src/connection/tests/datagram.rs | 2 +- neqo-transport/src/connection/tests/handshake.rs | 1 - neqo-transport/src/connection/tests/mod.rs | 1 - neqo-transport/src/connection/tests/stream.rs | 2 +- neqo-transport/src/crypto.rs | 1 - neqo-transport/src/fc.rs | 1 - neqo-transport/src/frame.rs | 2 +- neqo-transport/src/pace.rs | 1 - neqo-transport/src/packet/mod.rs | 1 - neqo-transport/src/path.rs | 1 - neqo-transport/src/qlog.rs | 1 - neqo-transport/src/quic_datagrams.rs | 2 +- neqo-transport/src/recovery.rs | 2 -- neqo-transport/src/recv_stream.rs | 3 +-- neqo-transport/src/send_stream.rs | 3 +-- neqo-transport/src/tparams.rs | 1 - neqo-transport/src/tracking.rs | 1 - neqo-transport/src/version.rs | 2 -- neqo-transport/tests/common/mod.rs | 2 +- neqo-transport/tests/connection.rs | 2 -- neqo-transport/tests/retry.rs | 1 - neqo-transport/tests/server.rs | 2 +- test-fixture/src/assertions.rs | 5 +---- test-fixture/src/lib.rs | 1 - test-fixture/src/sim/delay.rs | 1 - test-fixture/src/sim/mod.rs | 1 - test-fixture/src/sim/taildrop.rs | 1 - 72 files changed, 29 insertions(+), 105 deletions(-) diff --git a/neqo-client/src/main.rs b/neqo-client/src/main.rs index 17cae92da1..f465d7c206 100644 --- a/neqo-client/src/main.rs +++ b/neqo-client/src/main.rs @@ -9,7 +9,6 @@ use std::{ cell::RefCell, collections::{HashMap, VecDeque}, - convert::TryFrom, fmt::{self, Display}, fs::{create_dir_all, File, OpenOptions}, io::{self, Write}, diff --git a/neqo-common/src/codec.rs b/neqo-common/src/codec.rs index 620c669ae6..7fea2f71ab 100644 --- a/neqo-common/src/codec.rs +++ b/neqo-common/src/codec.rs @@ -4,7 +4,7 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use std::{convert::TryFrom, fmt::Debug}; +use std::fmt::Debug; use crate::hex_with_len; diff --git a/neqo-common/src/hrtime.rs b/neqo-common/src/hrtime.rs index d43fa882f7..e70b5f0ffb 100644 --- a/neqo-common/src/hrtime.rs +++ b/neqo-common/src/hrtime.rs @@ -6,7 +6,6 @@ use std::{ cell::RefCell, - convert::TryFrom, rc::{Rc, Weak}, time::Duration, }; diff --git a/neqo-common/src/qlog.rs b/neqo-common/src/qlog.rs index 3da8350990..c67ce62afe 100644 --- a/neqo-common/src/qlog.rs +++ b/neqo-common/src/qlog.rs @@ -12,8 +12,7 @@ use std::{ }; use qlog::{ - self, streamer::QlogStreamer, CommonFields, Configuration, TraceSeq, VantagePoint, - VantagePointType, + streamer::QlogStreamer, CommonFields, Configuration, TraceSeq, VantagePoint, VantagePointType, }; use crate::Role; diff --git a/neqo-common/src/timer.rs b/neqo-common/src/timer.rs index 6708c70963..a413252e08 100644 --- a/neqo-common/src/timer.rs +++ b/neqo-common/src/timer.rs @@ -5,7 +5,6 @@ // except according to those terms. use std::{ - convert::TryFrom, mem, time::{Duration, Instant}, }; diff --git a/neqo-crypto/src/aead.rs b/neqo-crypto/src/aead.rs index a2f009a403..bf7d7fe9d7 100644 --- a/neqo-crypto/src/aead.rs +++ b/neqo-crypto/src/aead.rs @@ -5,7 +5,6 @@ // except according to those terms. use std::{ - convert::{TryFrom, TryInto}, fmt, ops::{Deref, DerefMut}, os::raw::{c_char, c_uint}, diff --git a/neqo-crypto/src/agent.rs b/neqo-crypto/src/agent.rs index 85fc496841..c48656284f 100644 --- a/neqo-crypto/src/agent.rs +++ b/neqo-crypto/src/agent.rs @@ -6,7 +6,6 @@ use std::{ cell::RefCell, - convert::TryFrom, ffi::{CStr, CString}, mem::{self, MaybeUninit}, ops::{Deref, DerefMut}, diff --git a/neqo-crypto/src/agentio.rs b/neqo-crypto/src/agentio.rs index 0477796f96..7c57a0ef45 100644 --- a/neqo-crypto/src/agentio.rs +++ b/neqo-crypto/src/agentio.rs @@ -6,7 +6,6 @@ use std::{ cmp::min, - convert::{TryFrom, TryInto}, fmt, mem, ops::Deref, os::raw::{c_uint, c_void}, diff --git a/neqo-crypto/src/cert.rs b/neqo-crypto/src/cert.rs index 2c16380ee0..f6a68fe934 100644 --- a/neqo-crypto/src/cert.rs +++ b/neqo-crypto/src/cert.rs @@ -4,10 +4,7 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use std::{ - convert::TryFrom, - ptr::{addr_of, NonNull}, -}; +use std::ptr::{addr_of, NonNull}; use neqo_common::qerror; diff --git a/neqo-crypto/src/ech.rs b/neqo-crypto/src/ech.rs index 6f9a3ba4ce..4ff2cda7e8 100644 --- a/neqo-crypto/src/ech.rs +++ b/neqo-crypto/src/ech.rs @@ -5,7 +5,6 @@ // except according to those terms. use std::{ - convert::TryFrom, ffi::CString, os::raw::{c_char, c_uint}, ptr::{addr_of_mut, null_mut}, diff --git a/neqo-crypto/src/ext.rs b/neqo-crypto/src/ext.rs index d9f3195051..02ee6340c1 100644 --- a/neqo-crypto/src/ext.rs +++ b/neqo-crypto/src/ext.rs @@ -6,7 +6,6 @@ use std::{ cell::RefCell, - convert::TryFrom, os::raw::{c_uint, c_void}, pin::Pin, rc::Rc, diff --git a/neqo-crypto/src/hkdf.rs b/neqo-crypto/src/hkdf.rs index 5d918ec13a..3706be6c3b 100644 --- a/neqo-crypto/src/hkdf.rs +++ b/neqo-crypto/src/hkdf.rs @@ -5,7 +5,6 @@ // except according to those terms. use std::{ - convert::TryFrom, os::raw::{c_char, c_uint}, ptr::null_mut, }; diff --git a/neqo-crypto/src/hp.rs b/neqo-crypto/src/hp.rs index 27d9a6dc79..ab064c794e 100644 --- a/neqo-crypto/src/hp.rs +++ b/neqo-crypto/src/hp.rs @@ -6,7 +6,6 @@ use std::{ cell::RefCell, - convert::TryFrom, fmt::{self, Debug}, os::raw::{c_char, c_int, c_uint}, ptr::{addr_of_mut, null, null_mut}, diff --git a/neqo-crypto/src/lib.rs b/neqo-crypto/src/lib.rs index d092842705..c64a2c7ec3 100644 --- a/neqo-crypto/src/lib.rs +++ b/neqo-crypto/src/lib.rs @@ -32,7 +32,7 @@ pub mod selfencrypt; mod ssl; mod time; -use std::{convert::TryFrom, ffi::CString, path::PathBuf, ptr::null, sync::OnceLock}; +use std::{ffi::CString, path::PathBuf, ptr::null, sync::OnceLock}; #[cfg(not(feature = "fuzzing"))] pub use self::aead::RealAead as Aead; diff --git a/neqo-crypto/src/p11.rs b/neqo-crypto/src/p11.rs index 4c22b3ec20..5552882e2e 100644 --- a/neqo-crypto/src/p11.rs +++ b/neqo-crypto/src/p11.rs @@ -11,7 +11,6 @@ use std::{ cell::RefCell, - convert::TryFrom, mem, ops::{Deref, DerefMut}, os::raw::{c_int, c_uint}, diff --git a/neqo-crypto/src/replay.rs b/neqo-crypto/src/replay.rs index d4d3677f5c..5fd6fd1250 100644 --- a/neqo-crypto/src/replay.rs +++ b/neqo-crypto/src/replay.rs @@ -5,7 +5,6 @@ // except according to those terms. use std::{ - convert::{TryFrom, TryInto}, ops::{Deref, DerefMut}, os::raw::c_uint, ptr::null_mut, diff --git a/neqo-crypto/src/time.rs b/neqo-crypto/src/time.rs index db0573d126..0e59c4f5e2 100644 --- a/neqo-crypto/src/time.rs +++ b/neqo-crypto/src/time.rs @@ -7,7 +7,6 @@ #![allow(clippy::upper_case_acronyms)] use std::{ - convert::{TryFrom, TryInto}, ops::Deref, os::raw::c_void, pin::Pin, @@ -214,10 +213,7 @@ impl Default for TimeHolder { #[cfg(test)] mod test { - use std::{ - convert::{TryFrom, TryInto}, - time::{Duration, Instant}, - }; + use std::time::{Duration, Instant}; use super::{get_base, init, Interval, PRTime, Time}; use crate::err::Res; diff --git a/neqo-http3/src/connection_client.rs b/neqo-http3/src/connection_client.rs index b4557065cc..6317751f37 100644 --- a/neqo-http3/src/connection_client.rs +++ b/neqo-http3/src/connection_client.rs @@ -6,7 +6,6 @@ use std::{ cell::RefCell, - convert::TryFrom, fmt::{Debug, Display}, mem, net::SocketAddr, @@ -1289,7 +1288,7 @@ impl EventProvider for Http3Client { #[cfg(test)] mod tests { - use std::{convert::TryFrom, mem, time::Duration}; + use std::{mem, time::Duration}; use neqo_common::{event::Provider, qtrace, Datagram, Decoder, Encoder}; use neqo_crypto::{AllowZeroRtt, AntiReplay, ResumptionToken}; diff --git a/neqo-http3/src/control_stream_local.rs b/neqo-http3/src/control_stream_local.rs index 62676ee391..2f336c63a4 100644 --- a/neqo-http3/src/control_stream_local.rs +++ b/neqo-http3/src/control_stream_local.rs @@ -4,10 +4,7 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use std::{ - collections::{HashMap, VecDeque}, - convert::TryFrom, -}; +use std::collections::{HashMap, VecDeque}; use neqo_common::{qtrace, Encoder}; use neqo_transport::{Connection, StreamId, StreamType}; diff --git a/neqo-http3/src/features/extended_connect/tests/webtransport/datagrams.rs b/neqo-http3/src/features/extended_connect/tests/webtransport/datagrams.rs index 1c58596dd3..27b7d2b2f2 100644 --- a/neqo-http3/src/features/extended_connect/tests/webtransport/datagrams.rs +++ b/neqo-http3/src/features/extended_connect/tests/webtransport/datagrams.rs @@ -4,8 +4,6 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use std::convert::TryFrom; - use neqo_common::Encoder; use neqo_transport::Error as TransportError; diff --git a/neqo-http3/src/frames/reader.rs b/neqo-http3/src/frames/reader.rs index 5017c666a4..1a086683cf 100644 --- a/neqo-http3/src/frames/reader.rs +++ b/neqo-http3/src/frames/reader.rs @@ -6,7 +6,7 @@ #![allow(clippy::module_name_repetitions)] -use std::{convert::TryFrom, fmt::Debug}; +use std::fmt::Debug; use neqo_common::{ hex_with_len, qtrace, Decoder, IncrementalDecoderBuffer, IncrementalDecoderIgnore, diff --git a/neqo-http3/src/frames/wtframe.rs b/neqo-http3/src/frames/wtframe.rs index deb7a026a0..20e9b81936 100644 --- a/neqo-http3/src/frames/wtframe.rs +++ b/neqo-http3/src/frames/wtframe.rs @@ -4,8 +4,6 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use std::convert::TryFrom; - use neqo_common::{Decoder, Encoder}; use crate::{frames::reader::FrameDecoder, Error, Res}; diff --git a/neqo-http3/src/headers_checks.rs b/neqo-http3/src/headers_checks.rs index 9bf661c8fe..2dbf43cd32 100644 --- a/neqo-http3/src/headers_checks.rs +++ b/neqo-http3/src/headers_checks.rs @@ -6,8 +6,6 @@ #![allow(clippy::unused_unit)] // see https://github.com/Lymia/enumset/issues/44 -use std::convert::TryFrom; - use enumset::{enum_set, EnumSet, EnumSetType}; use neqo_common::Header; diff --git a/neqo-http3/src/priority.rs b/neqo-http3/src/priority.rs index f2651d3bb5..efd3694003 100644 --- a/neqo-http3/src/priority.rs +++ b/neqo-http3/src/priority.rs @@ -1,4 +1,4 @@ -use std::{convert::TryFrom, fmt}; +use std::fmt; use neqo_transport::StreamId; use sfv::{BareItem, Item, ListEntry, Parser}; diff --git a/neqo-http3/src/push_controller.rs b/neqo-http3/src/push_controller.rs index c4591991ae..bdba254087 100644 --- a/neqo-http3/src/push_controller.rs +++ b/neqo-http3/src/push_controller.rs @@ -6,7 +6,6 @@ use std::{ cell::RefCell, collections::VecDeque, - convert::TryFrom, fmt::{Debug, Display}, mem, rc::Rc, diff --git a/neqo-http3/src/qlog.rs b/neqo-http3/src/qlog.rs index c304d4584f..81f9245a3c 100644 --- a/neqo-http3/src/qlog.rs +++ b/neqo-http3/src/qlog.rs @@ -6,8 +6,6 @@ // Functions that handle capturing QLOG traces. -use std::convert::TryFrom; - use neqo_common::qlog::NeqoQlog; use neqo_transport::StreamId; use qlog::events::{DataRecipient, EventData}; diff --git a/neqo-http3/src/recv_message.rs b/neqo-http3/src/recv_message.rs index 6feb017cbb..be58b7e47c 100644 --- a/neqo-http3/src/recv_message.rs +++ b/neqo-http3/src/recv_message.rs @@ -4,7 +4,7 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use std::{cell::RefCell, cmp::min, collections::VecDeque, convert::TryFrom, fmt::Debug, rc::Rc}; +use std::{cell::RefCell, cmp::min, collections::VecDeque, fmt::Debug, rc::Rc}; use neqo_common::{qdebug, qinfo, qtrace, Header}; use neqo_qpack::decoder::QPackDecoder; diff --git a/neqo-http3/src/server_events.rs b/neqo-http3/src/server_events.rs index 4be48363df..a85ece0bfb 100644 --- a/neqo-http3/src/server_events.rs +++ b/neqo-http3/src/server_events.rs @@ -9,7 +9,6 @@ use std::{ cell::RefCell, collections::VecDeque, - convert::TryFrom, ops::{Deref, DerefMut}, rc::Rc, }; diff --git a/neqo-qpack/src/decoder.rs b/neqo-qpack/src/decoder.rs index 2119db0256..b2cfb6629a 100644 --- a/neqo-qpack/src/decoder.rs +++ b/neqo-qpack/src/decoder.rs @@ -4,8 +4,6 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use std::convert::TryFrom; - use neqo_common::{qdebug, Header}; use neqo_transport::{Connection, StreamId}; @@ -287,7 +285,7 @@ fn map_error(err: &Error) -> Error { #[cfg(test)] mod tests { - use std::{convert::TryFrom, mem}; + use std::mem; use neqo_common::Header; use neqo_transport::{StreamId, StreamType}; diff --git a/neqo-qpack/src/encoder.rs b/neqo-qpack/src/encoder.rs index c7921ee2c0..c90570ccdc 100644 --- a/neqo-qpack/src/encoder.rs +++ b/neqo-qpack/src/encoder.rs @@ -4,10 +4,7 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use std::{ - collections::{HashMap, HashSet, VecDeque}, - convert::TryFrom, -}; +use std::collections::{HashMap, HashSet, VecDeque}; use neqo_common::{qdebug, qerror, qlog::NeqoQlog, qtrace, Header}; use neqo_transport::{Connection, Error as TransportError, StreamId}; diff --git a/neqo-qpack/src/huffman.rs b/neqo-qpack/src/huffman.rs index ee53a4e041..30bb880438 100644 --- a/neqo-qpack/src/huffman.rs +++ b/neqo-qpack/src/huffman.rs @@ -4,8 +4,6 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use std::convert::TryFrom; - use crate::{ huffman_decode_helper::{huffman_decoder_root, HuffmanDecoderNode}, huffman_table::HUFFMAN_TABLE, diff --git a/neqo-qpack/src/huffman_decode_helper.rs b/neqo-qpack/src/huffman_decode_helper.rs index 1ce4485b0f..939312ab22 100644 --- a/neqo-qpack/src/huffman_decode_helper.rs +++ b/neqo-qpack/src/huffman_decode_helper.rs @@ -4,7 +4,7 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use std::{convert::TryFrom, sync::OnceLock}; +use std::sync::OnceLock; use crate::huffman_table::HUFFMAN_TABLE; diff --git a/neqo-qpack/src/qpack_send_buf.rs b/neqo-qpack/src/qpack_send_buf.rs index a443859081..c0b8d7af1b 100644 --- a/neqo-qpack/src/qpack_send_buf.rs +++ b/neqo-qpack/src/qpack_send_buf.rs @@ -4,7 +4,7 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use std::{convert::TryFrom, ops::Deref}; +use std::ops::Deref; use neqo_common::Encoder; diff --git a/neqo-qpack/src/reader.rs b/neqo-qpack/src/reader.rs index 22ff24ebee..0173ed7888 100644 --- a/neqo-qpack/src/reader.rs +++ b/neqo-qpack/src/reader.rs @@ -4,7 +4,7 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use std::{convert::TryInto, mem, str}; +use std::{mem, str}; use neqo_common::{qdebug, qerror}; use neqo_transport::{Connection, StreamId}; diff --git a/neqo-qpack/src/table.rs b/neqo-qpack/src/table.rs index 7ce8572542..517e98db09 100644 --- a/neqo-qpack/src/table.rs +++ b/neqo-qpack/src/table.rs @@ -4,7 +4,7 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use std::{collections::VecDeque, convert::TryFrom}; +use std::collections::VecDeque; use neqo_common::qtrace; diff --git a/neqo-server/src/main.rs b/neqo-server/src/main.rs index 66450ef5d6..691b367b73 100644 --- a/neqo-server/src/main.rs +++ b/neqo-server/src/main.rs @@ -10,7 +10,6 @@ use std::{ cell::RefCell, cmp::min, collections::HashMap, - convert::TryFrom, fmt::{self, Display}, fs::OpenOptions, io::{self, Read}, diff --git a/neqo-transport/src/ackrate.rs b/neqo-transport/src/ackrate.rs index cf68f9021f..d61120408d 100644 --- a/neqo-transport/src/ackrate.rs +++ b/neqo-transport/src/ackrate.rs @@ -7,7 +7,7 @@ // Management of the peer's ack rate. #![deny(clippy::pedantic)] -use std::{cmp::max, convert::TryFrom, time::Duration}; +use std::{cmp::max, time::Duration}; use neqo_common::qtrace; diff --git a/neqo-transport/src/addr_valid.rs b/neqo-transport/src/addr_valid.rs index aed796192e..9b079d211f 100644 --- a/neqo-transport/src/addr_valid.rs +++ b/neqo-transport/src/addr_valid.rs @@ -7,7 +7,6 @@ // This file implements functions necessary for address validation. use std::{ - convert::TryFrom, net::{IpAddr, SocketAddr}, time::{Duration, Instant}, }; diff --git a/neqo-transport/src/cc/classic_cc.rs b/neqo-transport/src/cc/classic_cc.rs index 6f4a01d795..a7de8fc20a 100644 --- a/neqo-transport/src/cc/classic_cc.rs +++ b/neqo-transport/src/cc/classic_cc.rs @@ -536,10 +536,7 @@ impl ClassicCongestionControl { #[cfg(test)] mod tests { - use std::{ - convert::TryFrom, - time::{Duration, Instant}, - }; + use std::time::{Duration, Instant}; use neqo_common::qinfo; use test_fixture::now; diff --git a/neqo-transport/src/cc/cubic.rs b/neqo-transport/src/cc/cubic.rs index c04a29b443..ca6c1177ac 100644 --- a/neqo-transport/src/cc/cubic.rs +++ b/neqo-transport/src/cc/cubic.rs @@ -7,7 +7,6 @@ #![deny(clippy::pedantic)] use std::{ - convert::TryFrom, fmt::{self, Display}, time::{Duration, Instant}, }; diff --git a/neqo-transport/src/cc/tests/cubic.rs b/neqo-transport/src/cc/tests/cubic.rs index 0c82e47817..2e0200fd6d 100644 --- a/neqo-transport/src/cc/tests/cubic.rs +++ b/neqo-transport/src/cc/tests/cubic.rs @@ -8,7 +8,6 @@ #![allow(clippy::cast_sign_loss)] use std::{ - convert::TryFrom, ops::Sub, time::{Duration, Instant}, }; diff --git a/neqo-transport/src/cid.rs b/neqo-transport/src/cid.rs index dfc3354159..a381883d85 100644 --- a/neqo-transport/src/cid.rs +++ b/neqo-transport/src/cid.rs @@ -10,7 +10,6 @@ use std::{ borrow::Borrow, cell::{Ref, RefCell}, cmp::{max, min}, - convert::TryFrom, ops::Deref, rc::Rc, }; diff --git a/neqo-transport/src/connection/mod.rs b/neqo-transport/src/connection/mod.rs index 749cf315d3..8cbead3812 100644 --- a/neqo-transport/src/connection/mod.rs +++ b/neqo-transport/src/connection/mod.rs @@ -9,7 +9,6 @@ use std::{ cell::RefCell, cmp::{max, min}, - convert::TryFrom, fmt::{self, Debug}, mem, net::{IpAddr, SocketAddr}, diff --git a/neqo-transport/src/connection/params.rs b/neqo-transport/src/connection/params.rs index bfa78a8688..72d1efa3ee 100644 --- a/neqo-transport/src/connection/params.rs +++ b/neqo-transport/src/connection/params.rs @@ -4,7 +4,7 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use std::{cmp::max, convert::TryFrom, time::Duration}; +use std::{cmp::max, time::Duration}; pub use crate::recovery::FAST_PTO_SCALE; use crate::{ diff --git a/neqo-transport/src/connection/tests/cc.rs b/neqo-transport/src/connection/tests/cc.rs index b3467ea67c..1536fec8f7 100644 --- a/neqo-transport/src/connection/tests/cc.rs +++ b/neqo-transport/src/connection/tests/cc.rs @@ -4,7 +4,7 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use std::{convert::TryFrom, mem, time::Duration}; +use std::{mem, time::Duration}; use neqo_common::{qdebug, qinfo, Datagram}; diff --git a/neqo-transport/src/connection/tests/datagram.rs b/neqo-transport/src/connection/tests/datagram.rs index 5b7b8dc0b4..ade8c753be 100644 --- a/neqo-transport/src/connection/tests/datagram.rs +++ b/neqo-transport/src/connection/tests/datagram.rs @@ -4,7 +4,7 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use std::{cell::RefCell, convert::TryFrom, rc::Rc}; +use std::{cell::RefCell, rc::Rc}; use neqo_common::event::Provider; use test_fixture::now; diff --git a/neqo-transport/src/connection/tests/handshake.rs b/neqo-transport/src/connection/tests/handshake.rs index 375118be70..af0352ce90 100644 --- a/neqo-transport/src/connection/tests/handshake.rs +++ b/neqo-transport/src/connection/tests/handshake.rs @@ -6,7 +6,6 @@ use std::{ cell::RefCell, - convert::TryFrom, mem, net::{IpAddr, Ipv6Addr, SocketAddr}, rc::Rc, diff --git a/neqo-transport/src/connection/tests/mod.rs b/neqo-transport/src/connection/tests/mod.rs index a996702947..1e19b43456 100644 --- a/neqo-transport/src/connection/tests/mod.rs +++ b/neqo-transport/src/connection/tests/mod.rs @@ -9,7 +9,6 @@ use std::{ cell::RefCell, cmp::min, - convert::TryFrom, mem, rc::Rc, time::{Duration, Instant}, diff --git a/neqo-transport/src/connection/tests/stream.rs b/neqo-transport/src/connection/tests/stream.rs index 586a537b9d..f469866d50 100644 --- a/neqo-transport/src/connection/tests/stream.rs +++ b/neqo-transport/src/connection/tests/stream.rs @@ -4,7 +4,7 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use std::{cmp::max, collections::HashMap, convert::TryFrom, mem}; +use std::{cmp::max, collections::HashMap, mem}; use neqo_common::{event::Provider, qdebug}; use test_fixture::now; diff --git a/neqo-transport/src/crypto.rs b/neqo-transport/src/crypto.rs index 3f708e7bf3..9840eaa1e1 100644 --- a/neqo-transport/src/crypto.rs +++ b/neqo-transport/src/crypto.rs @@ -8,7 +8,6 @@ use std::{ cell::RefCell, cmp::{max, min}, collections::HashMap, - convert::TryFrom, mem, ops::{Index, IndexMut, Range}, rc::Rc, diff --git a/neqo-transport/src/fc.rs b/neqo-transport/src/fc.rs index 8cd8e10f35..5ddfce6463 100644 --- a/neqo-transport/src/fc.rs +++ b/neqo-transport/src/fc.rs @@ -8,7 +8,6 @@ // into flow control frames needing to be sent to the remote. use std::{ - convert::TryFrom, fmt::Debug, ops::{Deref, DerefMut, Index, IndexMut}, }; diff --git a/neqo-transport/src/frame.rs b/neqo-transport/src/frame.rs index 4e58ad5ab7..b3bb024a2c 100644 --- a/neqo-transport/src/frame.rs +++ b/neqo-transport/src/frame.rs @@ -6,7 +6,7 @@ // Directly relating to QUIC frames. -use std::{convert::TryFrom, ops::RangeInclusive}; +use std::ops::RangeInclusive; use neqo_common::{qtrace, Decoder}; diff --git a/neqo-transport/src/pace.rs b/neqo-transport/src/pace.rs index e5214c1bc8..2850c781a3 100644 --- a/neqo-transport/src/pace.rs +++ b/neqo-transport/src/pace.rs @@ -9,7 +9,6 @@ use std::{ cmp::min, - convert::TryFrom, fmt::{Debug, Display}, time::{Duration, Instant}, }; diff --git a/neqo-transport/src/packet/mod.rs b/neqo-transport/src/packet/mod.rs index a6e60fe4de..8458f69779 100644 --- a/neqo-transport/src/packet/mod.rs +++ b/neqo-transport/src/packet/mod.rs @@ -7,7 +7,6 @@ // Encoding and decoding packets off the wire. use std::{ cmp::min, - convert::TryFrom, fmt, ops::{Deref, DerefMut, Range}, time::Instant, diff --git a/neqo-transport/src/path.rs b/neqo-transport/src/path.rs index 897763d7de..aca54d98e4 100644 --- a/neqo-transport/src/path.rs +++ b/neqo-transport/src/path.rs @@ -9,7 +9,6 @@ use std::{ cell::RefCell, - convert::TryFrom, fmt::{self, Display}, mem, net::{IpAddr, SocketAddr}, diff --git a/neqo-transport/src/qlog.rs b/neqo-transport/src/qlog.rs index 949b9c452b..2572966104 100644 --- a/neqo-transport/src/qlog.rs +++ b/neqo-transport/src/qlog.rs @@ -7,7 +7,6 @@ // Functions that handle capturing QLOG traces. use std::{ - convert::TryFrom, ops::{Deref, RangeInclusive}, time::Duration, }; diff --git a/neqo-transport/src/quic_datagrams.rs b/neqo-transport/src/quic_datagrams.rs index 43046afafe..d7c4769e31 100644 --- a/neqo-transport/src/quic_datagrams.rs +++ b/neqo-transport/src/quic_datagrams.rs @@ -6,7 +6,7 @@ // https://datatracker.ietf.org/doc/html/draft-ietf-quic-datagram -use std::{cmp::min, collections::VecDeque, convert::TryFrom}; +use std::{cmp::min, collections::VecDeque}; use neqo_common::Encoder; diff --git a/neqo-transport/src/recovery.rs b/neqo-transport/src/recovery.rs index ec1d7e4a3d..e580c644fc 100644 --- a/neqo-transport/src/recovery.rs +++ b/neqo-transport/src/recovery.rs @@ -11,7 +11,6 @@ use std::{ cmp::{max, min}, collections::BTreeMap, - convert::TryFrom, mem, ops::RangeInclusive, time::{Duration, Instant}, @@ -1020,7 +1019,6 @@ impl ::std::fmt::Display for LossRecovery { mod tests { use std::{ cell::RefCell, - convert::TryInto, ops::{Deref, DerefMut, RangeInclusive}, rc::Rc, time::{Duration, Instant}, diff --git a/neqo-transport/src/recv_stream.rs b/neqo-transport/src/recv_stream.rs index f6063dd80d..5da80d6004 100644 --- a/neqo-transport/src/recv_stream.rs +++ b/neqo-transport/src/recv_stream.rs @@ -11,7 +11,6 @@ use std::{ cell::RefCell, cmp::max, collections::BTreeMap, - convert::TryFrom, mem, rc::{Rc, Weak}, }; @@ -1011,7 +1010,7 @@ impl RecvStream { #[cfg(test)] mod tests { - use std::{cell::RefCell, convert::TryFrom, ops::Range, rc::Rc}; + use std::{cell::RefCell, ops::Range, rc::Rc}; use neqo_common::{qtrace, Encoder}; diff --git a/neqo-transport/src/send_stream.rs b/neqo-transport/src/send_stream.rs index 78c3596785..8771ec7765 100644 --- a/neqo-transport/src/send_stream.rs +++ b/neqo-transport/src/send_stream.rs @@ -10,7 +10,6 @@ use std::{ cell::RefCell, cmp::{max, min, Ordering}, collections::{btree_map::Entry, BTreeMap, VecDeque}, - convert::TryFrom, hash::{Hash, Hasher}, mem, ops::Add, @@ -1761,7 +1760,7 @@ pub struct SendStreamRecoveryToken { #[cfg(test)] mod tests { - use std::{cell::RefCell, collections::VecDeque, convert::TryFrom, rc::Rc}; + use std::{cell::RefCell, collections::VecDeque, rc::Rc}; use neqo_common::{event::Provider, hex_with_len, qtrace, Encoder}; diff --git a/neqo-transport/src/tparams.rs b/neqo-transport/src/tparams.rs index d530ba8972..eada56cc4c 100644 --- a/neqo-transport/src/tparams.rs +++ b/neqo-transport/src/tparams.rs @@ -9,7 +9,6 @@ use std::{ cell::RefCell, collections::HashMap, - convert::TryFrom, net::{Ipv4Addr, Ipv6Addr, SocketAddrV4, SocketAddrV6}, rc::Rc, }; diff --git a/neqo-transport/src/tracking.rs b/neqo-transport/src/tracking.rs index 012c895a18..e69f77380d 100644 --- a/neqo-transport/src/tracking.rs +++ b/neqo-transport/src/tracking.rs @@ -11,7 +11,6 @@ use std::{ cmp::min, collections::VecDeque, - convert::TryFrom, ops::{Index, IndexMut}, time::{Duration, Instant}, }; diff --git a/neqo-transport/src/version.rs b/neqo-transport/src/version.rs index cf75c9bd01..eee598fdd0 100644 --- a/neqo-transport/src/version.rs +++ b/neqo-transport/src/version.rs @@ -4,8 +4,6 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use std::convert::TryFrom; - use neqo_common::qdebug; use crate::{Error, Res}; diff --git a/neqo-transport/tests/common/mod.rs b/neqo-transport/tests/common/mod.rs index 0e1a06a329..ffe51077c7 100644 --- a/neqo-transport/tests/common/mod.rs +++ b/neqo-transport/tests/common/mod.rs @@ -7,7 +7,7 @@ #![warn(clippy::pedantic)] #![allow(unused)] -use std::{cell::RefCell, convert::TryFrom, mem, ops::Range, rc::Rc}; +use std::{cell::RefCell, mem, ops::Range, rc::Rc}; use neqo_common::{event::Provider, hex_with_len, qtrace, Datagram, Decoder, Role}; use neqo_crypto::{ diff --git a/neqo-transport/tests/connection.rs b/neqo-transport/tests/connection.rs index e29154d888..a5fe8e1544 100644 --- a/neqo-transport/tests/connection.rs +++ b/neqo-transport/tests/connection.rs @@ -8,8 +8,6 @@ mod common; -use std::convert::TryFrom; - use common::{ apply_header_protection, decode_initial_header, initial_aead_and_hp, remove_header_protection, }; diff --git a/neqo-transport/tests/retry.rs b/neqo-transport/tests/retry.rs index 6f21c9f688..ffb89b1e8c 100644 --- a/neqo-transport/tests/retry.rs +++ b/neqo-transport/tests/retry.rs @@ -10,7 +10,6 @@ mod common; use std::{ - convert::TryFrom, mem, net::{IpAddr, Ipv4Addr, SocketAddr}, time::Duration, diff --git a/neqo-transport/tests/server.rs b/neqo-transport/tests/server.rs index 1858d47128..75cce6cadd 100644 --- a/neqo-transport/tests/server.rs +++ b/neqo-transport/tests/server.rs @@ -8,7 +8,7 @@ mod common; -use std::{cell::RefCell, convert::TryFrom, mem, net::SocketAddr, rc::Rc, time::Duration}; +use std::{cell::RefCell, mem, net::SocketAddr, rc::Rc, time::Duration}; use common::{ apply_header_protection, connect, connected_server, decode_initial_header, default_server, diff --git a/test-fixture/src/assertions.rs b/test-fixture/src/assertions.rs index dd6d0330ef..191c81f7ab 100644 --- a/test-fixture/src/assertions.rs +++ b/test-fixture/src/assertions.rs @@ -4,10 +4,7 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use std::{ - convert::{TryFrom, TryInto}, - net::SocketAddr, -}; +use std::net::SocketAddr; use neqo_common::{Datagram, Decoder}; use neqo_transport::{version::WireVersion, Version}; diff --git a/test-fixture/src/lib.rs b/test-fixture/src/lib.rs index aa0b3ea371..d0c47cd026 100644 --- a/test-fixture/src/lib.rs +++ b/test-fixture/src/lib.rs @@ -10,7 +10,6 @@ use std::{ cell::{OnceCell, RefCell}, cmp::max, - convert::TryFrom, fmt::Display, io::{Cursor, Result, Write}, mem, diff --git a/test-fixture/src/sim/delay.rs b/test-fixture/src/sim/delay.rs index e66e65f9d8..c8de66758c 100644 --- a/test-fixture/src/sim/delay.rs +++ b/test-fixture/src/sim/delay.rs @@ -8,7 +8,6 @@ use std::{ collections::BTreeMap, - convert::TryFrom, fmt::{self, Debug}, ops::Range, time::{Duration, Instant}, diff --git a/test-fixture/src/sim/mod.rs b/test-fixture/src/sim/mod.rs index cbea621f1b..9cf43b4b67 100644 --- a/test-fixture/src/sim/mod.rs +++ b/test-fixture/src/sim/mod.rs @@ -14,7 +14,6 @@ mod taildrop; use std::{ cell::RefCell, cmp::min, - convert::TryFrom, fmt::Debug, ops::{Deref, DerefMut}, rc::Rc, diff --git a/test-fixture/src/sim/taildrop.rs b/test-fixture/src/sim/taildrop.rs index c23dae10c6..fc093e461d 100644 --- a/test-fixture/src/sim/taildrop.rs +++ b/test-fixture/src/sim/taildrop.rs @@ -9,7 +9,6 @@ use std::{ cmp::max, collections::VecDeque, - convert::TryFrom, fmt::{self, Debug}, time::{Duration, Instant}, }; From 7f6fc8bdf60d026c87980c9fe6974166fc2455c5 Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Tue, 27 Feb 2024 13:37:57 +0200 Subject: [PATCH 191/321] chore: Simplify `CODEOWNERS` (#1687) Fixes #1685 --- .github/CODEOWNERS | 6 ------ 1 file changed, 6 deletions(-) diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 42caa3deee..9fd92c3c70 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -1,7 +1 @@ * @KershawChang @martinthomson @larseggert -/docker/ @martinthomson -/hooks/ @martinthomson -/neqo-crypto/ @martinthomson -/neqo-http3/ @KershawChang -/neqo-qpack/ @KershawChang -/qns/ @martinthomson From ad027cfe2901dc98b3dae20574349f52621c5622 Mon Sep 17 00:00:00 2001 From: Max Inden Date: Thu, 29 Feb 2024 17:47:16 +0100 Subject: [PATCH 192/321] fix(tests/cc): compare cwnd before and after congestion (#1694) The `cc_slow_start_to_cong_avoidance_recovery_period` test sends two flights of data from client to server. In the second flight the first packet is dropped, which is thus not included in the second ACK of the server to the client. Due to the dropped packet, the client is expected to move into recovery state. But the test only checks the largest acknowledged packet. It does not assert the state change. Making `ClassicCongestionControl::on_congestion_event` a no-op does not fail the test. With this commit, the test asserts that the congestion window decreases, thus presuming a state change. --- neqo-transport/src/connection/tests/cc.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/neqo-transport/src/connection/tests/cc.rs b/neqo-transport/src/connection/tests/cc.rs index 1536fec8f7..b708bc421d 100644 --- a/neqo-transport/src/connection/tests/cc.rs +++ b/neqo-transport/src/connection/tests/cc.rs @@ -71,6 +71,7 @@ fn cc_slow_start_to_cong_avoidance_recovery_period() { client.stats().frame_rx.largest_acknowledged, flight1_largest ); + let cwnd_before_cong = cwnd(&client); // Client: send more let (mut c_tx_dgrams, mut now) = fill_cwnd(&mut client, stream_id, now); @@ -93,6 +94,7 @@ fn cc_slow_start_to_cong_avoidance_recovery_period() { client.stats().frame_rx.largest_acknowledged, flight2_largest ); + assert!(cwnd(&client) < cwnd_before_cong); } #[test] From a2d525b3c37badbe890f4141f945164f357325c7 Mon Sep 17 00:00:00 2001 From: Max Inden Date: Sun, 3 Mar 2024 09:09:03 +0100 Subject: [PATCH 193/321] fix(udp): pre-allocate receive buffer (#1707) Instead of allocating a new receive buffer on each call to `neqo_common::udp::Socket::recv`, preallocate a long-lived buffer in `Socket::bind` and reuse this buffer on each read. --- neqo-client/src/main.rs | 14 +++++++------- neqo-common/src/udp.rs | 19 +++++++++++-------- 2 files changed, 18 insertions(+), 15 deletions(-) diff --git a/neqo-client/src/main.rs b/neqo-client/src/main.rs index f465d7c206..868a86ddd7 100644 --- a/neqo-client/src/main.rs +++ b/neqo-client/src/main.rs @@ -763,7 +763,7 @@ fn to_headers(values: &[impl AsRef]) -> Vec
{ struct ClientRunner<'a> { local_addr: SocketAddr, - socket: &'a udp::Socket, + socket: &'a mut udp::Socket, client: Http3Client, handler: Handler<'a>, timeout: Option>>, @@ -773,7 +773,7 @@ struct ClientRunner<'a> { impl<'a> ClientRunner<'a> { fn new( args: &'a mut Args, - socket: &'a udp::Socket, + socket: &'a mut udp::Socket, local_addr: SocketAddr, remote_addr: SocketAddr, hostname: &str, @@ -998,7 +998,7 @@ async fn main() -> Res<()> { SocketAddr::V6(..) => SocketAddr::new(IpAddr::V6(Ipv6Addr::from([0; 16])), 0), }; - let socket = udp::Socket::bind(local_addr)?; + let mut socket = udp::Socket::bind(local_addr)?; let real_local = socket.local_addr().unwrap(); println!( "{} Client connecting: {:?} -> {:?}", @@ -1022,7 +1022,7 @@ async fn main() -> Res<()> { token = if args.use_old_http { old::ClientRunner::new( &args, - &socket, + &mut socket, real_local, remote_addr, &hostname, @@ -1034,7 +1034,7 @@ async fn main() -> Res<()> { } else { ClientRunner::new( &mut args, - &socket, + &mut socket, real_local, remote_addr, &hostname, @@ -1249,7 +1249,7 @@ mod old { pub struct ClientRunner<'a> { local_addr: SocketAddr, - socket: &'a udp::Socket, + socket: &'a mut udp::Socket, client: Connection, handler: HandlerOld<'a>, timeout: Option>>, @@ -1259,7 +1259,7 @@ mod old { impl<'a> ClientRunner<'a> { pub fn new( args: &'a Args, - socket: &'a udp::Socket, + socket: &'a mut udp::Socket, local_addr: SocketAddr, remote_addr: SocketAddr, origin: &str, diff --git a/neqo-common/src/udp.rs b/neqo-common/src/udp.rs index 7ad0b97625..64fc356760 100644 --- a/neqo-common/src/udp.rs +++ b/neqo-common/src/udp.rs @@ -21,6 +21,7 @@ use crate::{Datagram, IpTos}; pub struct Socket { socket: tokio::net::UdpSocket, state: UdpSocketState, + recv_buf: Vec, } impl Socket { @@ -31,6 +32,7 @@ impl Socket { Ok(Self { state: quinn_udp::UdpSocketState::new((&socket).into())?, socket: tokio::net::UdpSocket::from_std(socket)?, + recv_buf: vec![0; u16::MAX as usize], }) } @@ -70,15 +72,13 @@ impl Socket { } /// Receive a UDP datagram on the specified socket. - pub fn recv(&self, local_address: &SocketAddr) -> Result, io::Error> { - let mut buf = [0; u16::MAX as usize]; - + pub fn recv(&mut self, local_address: &SocketAddr) -> Result, io::Error> { let mut meta = RecvMeta::default(); match self.socket.try_io(Interest::READABLE, || { self.state.recv( (&self.socket).into(), - &mut [IoSliceMut::new(&mut buf)], + &mut [IoSliceMut::new(&mut self.recv_buf)], slice::from_mut(&mut meta), ) }) { @@ -101,8 +101,11 @@ impl Socket { return Ok(None); } - if meta.len == buf.len() { - eprintln!("Might have received more than {} bytes", buf.len()); + if meta.len == self.recv_buf.len() { + eprintln!( + "Might have received more than {} bytes", + self.recv_buf.len() + ); } Ok(Some(Datagram::new( @@ -110,7 +113,7 @@ impl Socket { *local_address, meta.ecn.map(|n| IpTos::from(n as u8)).unwrap_or_default(), None, // TODO: get the real TTL https://github.com/quinn-rs/quinn/issues/1749 - &buf[..meta.len], + &self.recv_buf[..meta.len], ))) } } @@ -124,7 +127,7 @@ mod tests { async fn datagram_tos() -> Result<(), io::Error> { let sender = Socket::bind("127.0.0.1:0")?; let receiver_addr: SocketAddr = "127.0.0.1:0".parse().unwrap(); - let receiver = Socket::bind(receiver_addr)?; + let mut receiver = Socket::bind(receiver_addr)?; let datagram = Datagram::new( sender.local_addr()?, From e826698005c627c56e8ebf261c5756948f856828 Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Mon, 4 Mar 2024 01:01:45 +0200 Subject: [PATCH 194/321] chore: Add the missing license blurb where it was missing (#1702) --- neqo-common/build.rs | 6 ++++++ neqo-crypto/tests/aead.rs | 6 ++++++ neqo-crypto/tests/agent.rs | 6 ++++++ neqo-crypto/tests/ext.rs | 6 ++++++ neqo-crypto/tests/handshake.rs | 6 ++++++ neqo-crypto/tests/hkdf.rs | 6 ++++++ neqo-crypto/tests/hp.rs | 6 ++++++ neqo-crypto/tests/init.rs | 6 ++++++ neqo-crypto/tests/selfencrypt.rs | 6 ++++++ neqo-http3/src/priority.rs | 6 ++++++ neqo-http3/src/push_controller.rs | 1 + neqo-transport/benches/range_tracker.rs | 6 ++++++ neqo-transport/benches/rx_stream_orderer.rs | 6 ++++++ neqo-transport/benches/transfer.rs | 6 ++++++ neqo-transport/src/cc/tests/mod.rs | 1 + 15 files changed, 80 insertions(+) diff --git a/neqo-common/build.rs b/neqo-common/build.rs index 0af1a1dbbd..9047b1f5d0 100644 --- a/neqo-common/build.rs +++ b/neqo-common/build.rs @@ -1,3 +1,9 @@ +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + use std::env; fn main() { diff --git a/neqo-crypto/tests/aead.rs b/neqo-crypto/tests/aead.rs index 736acc31c2..5cf0034aec 100644 --- a/neqo-crypto/tests/aead.rs +++ b/neqo-crypto/tests/aead.rs @@ -1,3 +1,9 @@ +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + #![warn(clippy::pedantic)] #![cfg(not(feature = "fuzzing"))] diff --git a/neqo-crypto/tests/agent.rs b/neqo-crypto/tests/agent.rs index bbe0a7a646..85d2e168c8 100644 --- a/neqo-crypto/tests/agent.rs +++ b/neqo-crypto/tests/agent.rs @@ -1,3 +1,9 @@ +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + #![warn(clippy::pedantic)] use std::boxed::Box; diff --git a/neqo-crypto/tests/ext.rs b/neqo-crypto/tests/ext.rs index cb048d7a12..86929d283e 100644 --- a/neqo-crypto/tests/ext.rs +++ b/neqo-crypto/tests/ext.rs @@ -1,3 +1,9 @@ +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + #![warn(clippy::pedantic)] use std::{cell::RefCell, rc::Rc}; diff --git a/neqo-crypto/tests/handshake.rs b/neqo-crypto/tests/handshake.rs index 9aa88764c7..28750df1e3 100644 --- a/neqo-crypto/tests/handshake.rs +++ b/neqo-crypto/tests/handshake.rs @@ -1,3 +1,9 @@ +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + #![allow(dead_code)] use std::{mem, time::Instant}; diff --git a/neqo-crypto/tests/hkdf.rs b/neqo-crypto/tests/hkdf.rs index 6dc845e690..f0c8aebe51 100644 --- a/neqo-crypto/tests/hkdf.rs +++ b/neqo-crypto/tests/hkdf.rs @@ -1,3 +1,9 @@ +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + #![warn(clippy::pedantic)] use neqo_crypto::{ diff --git a/neqo-crypto/tests/hp.rs b/neqo-crypto/tests/hp.rs index dbfda8a95d..23db2df36e 100644 --- a/neqo-crypto/tests/hp.rs +++ b/neqo-crypto/tests/hp.rs @@ -1,3 +1,9 @@ +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + #![warn(clippy::pedantic)] use std::mem; diff --git a/neqo-crypto/tests/init.rs b/neqo-crypto/tests/init.rs index 223580584a..6722a0cf31 100644 --- a/neqo-crypto/tests/init.rs +++ b/neqo-crypto/tests/init.rs @@ -1,3 +1,9 @@ +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + #![warn(clippy::pedantic)] // This uses external interfaces to neqo_crypto rather than being a module diff --git a/neqo-crypto/tests/selfencrypt.rs b/neqo-crypto/tests/selfencrypt.rs index 1b145f5420..141d0b9479 100644 --- a/neqo-crypto/tests/selfencrypt.rs +++ b/neqo-crypto/tests/selfencrypt.rs @@ -1,3 +1,9 @@ +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + #![warn(clippy::pedantic)] #![cfg(not(feature = "fuzzing"))] diff --git a/neqo-http3/src/priority.rs b/neqo-http3/src/priority.rs index efd3694003..76a2cb9a85 100644 --- a/neqo-http3/src/priority.rs +++ b/neqo-http3/src/priority.rs @@ -1,3 +1,9 @@ +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + use std::fmt; use neqo_transport::StreamId; diff --git a/neqo-http3/src/push_controller.rs b/neqo-http3/src/push_controller.rs index bdba254087..ab6afccdf6 100644 --- a/neqo-http3/src/push_controller.rs +++ b/neqo-http3/src/push_controller.rs @@ -1,3 +1,4 @@ +// Licensed under the Apache License, Version 2.0 or the MIT license // , at your // option. This file may not be copied, modified, or distributed diff --git a/neqo-transport/benches/range_tracker.rs b/neqo-transport/benches/range_tracker.rs index 6115542661..c2f78f4874 100644 --- a/neqo-transport/benches/range_tracker.rs +++ b/neqo-transport/benches/range_tracker.rs @@ -1,3 +1,9 @@ +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + use criterion::{criterion_group, criterion_main, Criterion}; // black_box use neqo_transport::send_stream::RangeTracker; diff --git a/neqo-transport/benches/rx_stream_orderer.rs b/neqo-transport/benches/rx_stream_orderer.rs index 03b401ba06..0a1e763e97 100644 --- a/neqo-transport/benches/rx_stream_orderer.rs +++ b/neqo-transport/benches/rx_stream_orderer.rs @@ -1,3 +1,9 @@ +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + use criterion::{criterion_group, criterion_main, Criterion}; use neqo_transport::recv_stream::RxStreamOrderer; diff --git a/neqo-transport/benches/transfer.rs b/neqo-transport/benches/transfer.rs index 59f0264a98..444f738f9c 100644 --- a/neqo-transport/benches/transfer.rs +++ b/neqo-transport/benches/transfer.rs @@ -1,3 +1,9 @@ +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + use std::time::Duration; use criterion::{criterion_group, criterion_main, BatchSize::SmallInput, Criterion}; diff --git a/neqo-transport/src/cc/tests/mod.rs b/neqo-transport/src/cc/tests/mod.rs index 238a7ad012..879693fb24 100644 --- a/neqo-transport/src/cc/tests/mod.rs +++ b/neqo-transport/src/cc/tests/mod.rs @@ -1,3 +1,4 @@ +// Licensed under the Apache License, Version 2.0 or the MIT license // , at your // option. This file may not be copied, modified, or distributed From 4c3ccfdc4b9170d9bca4553fdccdccf3cf405812 Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Mon, 4 Mar 2024 02:06:31 +0200 Subject: [PATCH 195/321] chore: Set `default-features=false`, to eliminate some unneeded deps (#1704) * chore: Set `default-features=false`, to eliminate some unneeded deps * More * More --- neqo-client/Cargo.toml | 13 ++++++------- neqo-common/Cargo.toml | 8 ++++---- neqo-crypto/Cargo.toml | 8 ++++---- neqo-http3/Cargo.toml | 10 +++++----- neqo-qpack/Cargo.toml | 4 ++-- neqo-server/Cargo.toml | 11 +++++------ neqo-transport/Cargo.toml | 10 +++++----- test-fixture/Cargo.toml | 2 +- 8 files changed, 32 insertions(+), 34 deletions(-) diff --git a/neqo-client/Cargo.toml b/neqo-client/Cargo.toml index e9b48221aa..6c5ae0b999 100644 --- a/neqo-client/Cargo.toml +++ b/neqo-client/Cargo.toml @@ -11,16 +11,15 @@ license.workspace = true [dependencies] # neqo-client is not used in Firefox, so we can be liberal with dependency versions -clap = { version = "4.4", features = ["derive"] } -futures = "0.3" -hex = "0.4" +clap = { version = "4.4", default-features = false, features = ["std", "derive"] } +futures = { version = "0.3", default-features = false } +hex = { version = "0.4", default-features = false, features = ["std"] } log = { version = "0.4", default-features = false } neqo-common = { path = "./../neqo-common", features = ["udp"] } neqo-crypto = { path = "./../neqo-crypto" } neqo-http3 = { path = "./../neqo-http3" } neqo-qpack = { path = "./../neqo-qpack" } neqo-transport = { path = "./../neqo-transport" } -qlog = "0.12" -tokio = { version = "1", features = ["net", "time", "macros", "rt", "rt-multi-thread"] } -url = "2.5" - +qlog = { version = "0.12", default-features = false } +tokio = { version = "1", default-features = false, features = ["net", "time", "macros", "rt", "rt-multi-thread"] } +url = { version = "2.5", default-features = false } diff --git a/neqo-common/Cargo.toml b/neqo-common/Cargo.toml index 7017ff9600..b915d33474 100644 --- a/neqo-common/Cargo.toml +++ b/neqo-common/Cargo.toml @@ -11,13 +11,13 @@ license.workspace = true [dependencies] # Sync with https://searchfox.org/mozilla-central/source/Cargo.lock 2024-02-08 -enum-map = "2.7" +enum-map = { version = "2.7", default-features = false } env_logger = { version = "0.10", default-features = false } log = { version = "0.4", default-features = false } -qlog = "0.12" +qlog = { version = "0.12", default-features = false } quinn-udp = { git = "https://github.com/quinn-rs/quinn/", rev = "a947962131aba8a6521253d03cc948b20098a2d6", optional = true } -time = { version = "0.3", features = ["formatting"] } -tokio = { version = "1", features = ["net", "time", "macros", "rt", "rt-multi-thread"], optional = true } +time = { version = "0.3", default-features = false, features = ["formatting"] } +tokio = { version = "1", default-features = false, features = ["net", "time", "macros", "rt", "rt-multi-thread"], optional = true } [dev-dependencies] test-fixture = { path = "../test-fixture" } diff --git a/neqo-crypto/Cargo.toml b/neqo-crypto/Cargo.toml index 66cf9ca9f3..26180af896 100644 --- a/neqo-crypto/Cargo.toml +++ b/neqo-crypto/Cargo.toml @@ -17,10 +17,10 @@ neqo-common = { path = "../neqo-common" } [build-dependencies] # Sync with https://searchfox.org/mozilla-central/source/Cargo.lock 2024-02-08 bindgen = { version = "0.69", default-features = false, features = ["runtime"] } -mozbuild = { version = "0.1", optional = true } -serde = "1.0" -serde_derive = "1.0" -toml = "0.5" +mozbuild = { version = "0.1", default-features = false, optional = true } +serde = { version = "1.0", default-features = false } +serde_derive = { version = "1.0", default-features = false } +toml = { version = "0.5", default-features = false } [dev-dependencies] test-fixture = { path = "../test-fixture" } diff --git a/neqo-http3/Cargo.toml b/neqo-http3/Cargo.toml index ec4e185826..003f6ff114 100644 --- a/neqo-http3/Cargo.toml +++ b/neqo-http3/Cargo.toml @@ -10,16 +10,16 @@ license.workspace = true [dependencies] # Sync with https://searchfox.org/mozilla-central/source/Cargo.lock 2024-02-08 -enumset = "1.1" +enumset = { version = "1.1", default-features = false } log = { version = "0.4", default-features = false } neqo-common = { path = "./../neqo-common" } neqo-crypto = { path = "./../neqo-crypto" } neqo-qpack = { path = "./../neqo-qpack" } neqo-transport = { path = "./../neqo-transport" } -qlog = "0.12" -sfv = "0.9" -smallvec = "1.11" -url = "2.5" +qlog = { version = "0.12", default-features = false } +sfv = { version = "0.9", default-features = false } +smallvec = { version = "1.11", default-features = false } +url = { version = "2.5", default-features = false } [dev-dependencies] test-fixture = { path = "../test-fixture" } diff --git a/neqo-qpack/Cargo.toml b/neqo-qpack/Cargo.toml index b0111f0dfd..817259bfda 100644 --- a/neqo-qpack/Cargo.toml +++ b/neqo-qpack/Cargo.toml @@ -14,8 +14,8 @@ log = { version = "0.4", default-features = false } neqo-common = { path = "./../neqo-common" } neqo-crypto = { path = "./../neqo-crypto" } neqo-transport = { path = "./../neqo-transport" } -qlog = "0.12" -static_assertions = "1.1" +qlog = { version = "0.12", default-features = false } +static_assertions = { version = "1.1", default-features = false } [dev-dependencies] test-fixture = { path = "../test-fixture" } diff --git a/neqo-server/Cargo.toml b/neqo-server/Cargo.toml index b2c36ed21f..d482f15e5b 100644 --- a/neqo-server/Cargo.toml +++ b/neqo-server/Cargo.toml @@ -11,15 +11,14 @@ license.workspace = true [dependencies] # neqo-server is not used in Firefox, so we can be liberal with dependency versions -clap = { version = "4.4", features = ["derive"] } -futures = "0.3" +clap = { version = "4.4", default-features = false, features = ["std", "derive"] } +futures = { version = "0.3", default-features = false, features = ["alloc"] } log = { version = "0.4", default-features = false } neqo-common = { path = "./../neqo-common", features = ["udp"] } neqo-crypto = { path = "./../neqo-crypto" } neqo-http3 = { path = "./../neqo-http3" } neqo-qpack = { path = "./../neqo-qpack" } neqo-transport = { path = "./../neqo-transport" } -qlog = "0.12" -regex = "1.9" -tokio = { version = "1", features = ["net", "time", "macros", "rt", "rt-multi-thread"] } - +qlog = { version = "0.12", default-features = false } +regex = { version = "1.9", default-features = false } +tokio = { version = "1", default-features = false, features = ["net", "time", "macros", "rt", "rt-multi-thread"] } diff --git a/neqo-transport/Cargo.toml b/neqo-transport/Cargo.toml index 14141a5ab8..ecab43ed2a 100644 --- a/neqo-transport/Cargo.toml +++ b/neqo-transport/Cargo.toml @@ -10,16 +10,16 @@ license.workspace = true [dependencies] # Sync with https://searchfox.org/mozilla-central/source/Cargo.lock 2024-02-08 -indexmap = "1.9" +indexmap = { version = "1.9", default-features = false } log = { version = "0.4", default-features = false } neqo-common = { path = "../neqo-common" } neqo-crypto = { path = "../neqo-crypto" } -qlog = "0.12" -smallvec = "1.11" +qlog = { version = "0.12", default-features = false } +smallvec = { version = "1.11", default-features = false } [dev-dependencies] -criterion = { version = "0.5", features = ["html_reports"] } -enum-map = "2.7" +criterion = { version = "0.5", default-features = false, features = ["html_reports"] } +enum-map = { version = "2.7", default-features = false } test-fixture = { path = "../test-fixture" } [features] diff --git a/test-fixture/Cargo.toml b/test-fixture/Cargo.toml index bb3ab14953..d366e101dc 100644 --- a/test-fixture/Cargo.toml +++ b/test-fixture/Cargo.toml @@ -16,7 +16,7 @@ neqo-crypto = { path = "../neqo-crypto" } neqo-http3 = { path = "../neqo-http3" } neqo-qpack = { path = "../neqo-qpack" } neqo-transport = { path = "../neqo-transport" } -qlog = "0.12" +qlog = { version = "0.12", default-features = false } [features] bench = [] From 732a94ba89aff142204ed011bea6fef6672f0d38 Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Mon, 4 Mar 2024 02:07:46 +0200 Subject: [PATCH 196/321] ci: Fix nits found by https://github.com/rhysd/actionlint (#1705) --- .github/workflows/check.yml | 2 -- .github/workflows/qns.yml | 4 ---- 2 files changed, 6 deletions(-) diff --git a/.github/workflows/check.yml b/.github/workflows/check.yml index 2bedb649ad..42a903ee3b 100644 --- a/.github/workflows/check.yml +++ b/.github/workflows/check.yml @@ -7,8 +7,6 @@ on: branches: ["main"] paths-ignore: ["*.md", "*.png", "*.svg", "LICENSE-*"] merge_group: - branches: ["main"] - paths-ignore: ["*.md", "*.png", "*.svg", "LICENSE-*"] env: CARGO_TERM_COLOR: always RUST_BACKTRACE: 1 diff --git a/.github/workflows/qns.yml b/.github/workflows/qns.yml index edbd2824b9..0aeb29b5b2 100644 --- a/.github/workflows/qns.yml +++ b/.github/workflows/qns.yml @@ -10,10 +10,6 @@ on: - 'qns/**' - '.github/workflows/qns.yml' merge_group: - branches: ["main"] - paths: - - 'qns/**' - - '.github/workflows/qns.yml' jobs: docker-image: runs-on: ubuntu-latest From 06b5f29b3435495e6d46ebb6f3022fdcdaa9fa01 Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Mon, 4 Mar 2024 09:52:46 +0200 Subject: [PATCH 197/321] chore: Enable `clippy::pedantic` at the workspace level, globally (#1701) * chore: Enable `clippy::pedantic` at the workspace level, globally * Fix issues --------- Signed-off-by: Martin Thomson Co-authored-by: Martin Thomson --- Cargo.toml | 3 +++ neqo-client/Cargo.toml | 3 +++ neqo-client/src/main.rs | 8 +++----- neqo-common/Cargo.toml | 3 +++ neqo-common/src/lib.rs | 1 - neqo-common/src/udp.rs | 2 +- neqo-common/tests/log.rs | 2 -- neqo-crypto/Cargo.toml | 3 +++ neqo-crypto/build.rs | 4 +--- neqo-crypto/src/agent.rs | 2 +- neqo-crypto/src/cert.rs | 2 +- neqo-crypto/src/hp.rs | 2 +- neqo-crypto/src/lib.rs | 1 - neqo-crypto/tests/agent.rs | 4 ---- neqo-crypto/tests/ext.rs | 2 -- neqo-crypto/tests/handshake.rs | 2 ++ neqo-crypto/tests/hkdf.rs | 2 -- neqo-crypto/tests/hp.rs | 2 -- neqo-crypto/tests/init.rs | 2 -- neqo-crypto/tests/selfencrypt.rs | 1 - neqo-http3/Cargo.toml | 3 +++ neqo-http3/src/client_events.rs | 6 +++--- neqo-http3/src/lib.rs | 1 - neqo-http3/src/server.rs | 4 ++-- neqo-http3/tests/priority.rs | 2 +- neqo-qpack/Cargo.toml | 3 +++ neqo-qpack/src/lib.rs | 1 - neqo-server/Cargo.toml | 3 +++ neqo-server/src/main.rs | 8 +++----- neqo-server/src/old_https.rs | 2 -- neqo-transport/Cargo.toml | 3 +++ neqo-transport/src/ackrate.rs | 1 - neqo-transport/src/addr_valid.rs | 6 +++--- neqo-transport/src/cc/classic_cc.rs | 1 - neqo-transport/src/cc/cubic.rs | 2 -- neqo-transport/src/cc/mod.rs | 1 - neqo-transport/src/cc/new_reno.rs | 1 - neqo-transport/src/cc/tests/new_reno.rs | 1 - neqo-transport/src/cid.rs | 2 +- neqo-transport/src/connection/mod.rs | 6 +++--- neqo-transport/src/connection/tests/fuzzing.rs | 1 - neqo-transport/src/connection/tests/mod.rs | 2 -- neqo-transport/src/events.rs | 4 ++-- neqo-transport/src/lib.rs | 1 - neqo-transport/src/pace.rs | 1 - neqo-transport/src/packet/retry.rs | 2 -- neqo-transport/src/path.rs | 5 ++--- neqo-transport/src/recovery.rs | 2 -- neqo-transport/src/rtt.rs | 2 -- neqo-transport/src/sender.rs | 2 +- neqo-transport/src/server.rs | 2 +- neqo-transport/src/stats.rs | 1 - neqo-transport/src/tracking.rs | 2 -- neqo-transport/tests/common/mod.rs | 1 - neqo-transport/tests/conn_vectors.rs | 2 +- neqo-transport/tests/connection.rs | 2 -- neqo-transport/tests/network.rs | 2 -- neqo-transport/tests/retry.rs | 1 - neqo-transport/tests/server.rs | 4 +--- test-fixture/Cargo.toml | 3 +++ test-fixture/src/lib.rs | 1 - 61 files changed, 61 insertions(+), 90 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index bec8fa61a7..f78cb4b7ba 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -22,6 +22,9 @@ license = "MIT OR Apache-2.0" # https://firefox-source-docs.mozilla.org/writing-rust-code/update-policy.html#schedule rust-version = "1.74.0" +[workspace.lints.clippy] +pedantic = { level = "warn", priority = -1 } + [profile.bench] # Inherits from the "release" profile, so just provide overrides here: # https://doc.rust-lang.org/cargo/reference/profiles.html#release diff --git a/neqo-client/Cargo.toml b/neqo-client/Cargo.toml index 6c5ae0b999..643eff4172 100644 --- a/neqo-client/Cargo.toml +++ b/neqo-client/Cargo.toml @@ -9,6 +9,9 @@ edition.workspace = true rust-version.workspace = true license.workspace = true +[lints] +workspace = true + [dependencies] # neqo-client is not used in Firefox, so we can be liberal with dependency versions clap = { version = "4.4", default-features = false, features = ["std", "derive"] } diff --git a/neqo-client/src/main.rs b/neqo-client/src/main.rs index 868a86ddd7..604853edb6 100644 --- a/neqo-client/src/main.rs +++ b/neqo-client/src/main.rs @@ -4,8 +4,6 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -#![warn(clippy::pedantic)] - use std::{ cell::RefCell, collections::{HashMap, VecDeque}, @@ -190,7 +188,7 @@ pub struct Args { #[arg(short = 'c', long, number_of_values = 1)] /// The set of TLS cipher suites to enable. - /// From: TLS_AES_128_GCM_SHA256, TLS_AES_256_GCM_SHA384, TLS_CHACHA20_POLY1305_SHA256. + /// From: `TLS_AES_128_GCM_SHA256`, `TLS_AES_256_GCM_SHA384`, `TLS_CHACHA20_POLY1305_SHA256`. ciphers: Vec, #[arg(name = "ech", long, value_parser = |s: &str| hex::decode(s))] @@ -299,11 +297,11 @@ struct QuicParameters { quic_version: Vec, #[arg(long, default_value = "16")] - /// Set the MAX_STREAMS_BIDI limit. + /// Set the `MAX_STREAMS_BIDI` limit. max_streams_bidi: u64, #[arg(long, default_value = "16")] - /// Set the MAX_STREAMS_UNI limit. + /// Set the `MAX_STREAMS_UNI` limit. max_streams_uni: u64, #[arg(long = "idle", default_value = "30")] diff --git a/neqo-common/Cargo.toml b/neqo-common/Cargo.toml index b915d33474..5d4d3d0d26 100644 --- a/neqo-common/Cargo.toml +++ b/neqo-common/Cargo.toml @@ -9,6 +9,9 @@ edition.workspace = true rust-version.workspace = true license.workspace = true +[lints] +workspace = true + [dependencies] # Sync with https://searchfox.org/mozilla-central/source/Cargo.lock 2024-02-08 enum-map = { version = "2.7", default-features = false } diff --git a/neqo-common/src/lib.rs b/neqo-common/src/lib.rs index 7b7bf6a163..fe88097983 100644 --- a/neqo-common/src/lib.rs +++ b/neqo-common/src/lib.rs @@ -4,7 +4,6 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -#![warn(clippy::pedantic)] #![allow(clippy::module_name_repetitions)] // This lint doesn't work here. mod codec; diff --git a/neqo-common/src/udp.rs b/neqo-common/src/udp.rs index 64fc356760..4dc62990e4 100644 --- a/neqo-common/src/udp.rs +++ b/neqo-common/src/udp.rs @@ -121,7 +121,7 @@ impl Socket { #[cfg(test)] mod tests { use super::*; - use crate::{IpTos, IpTosDscp, IpTosEcn}; + use crate::{IpTosDscp, IpTosEcn}; #[tokio::test] async fn datagram_tos() -> Result<(), io::Error> { diff --git a/neqo-common/tests/log.rs b/neqo-common/tests/log.rs index 0c286528ed..135a667146 100644 --- a/neqo-common/tests/log.rs +++ b/neqo-common/tests/log.rs @@ -4,8 +4,6 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -#![warn(clippy::pedantic)] - use neqo_common::{qdebug, qerror, qinfo, qtrace, qwarn}; #[test] diff --git a/neqo-crypto/Cargo.toml b/neqo-crypto/Cargo.toml index 26180af896..26ec5ce067 100644 --- a/neqo-crypto/Cargo.toml +++ b/neqo-crypto/Cargo.toml @@ -9,6 +9,9 @@ edition.workspace = true rust-version.workspace = true license.workspace = true +[lints] +workspace = true + [dependencies] # Sync with https://searchfox.org/mozilla-central/source/Cargo.lock 2024-02-08 log = { version = "0.4", default-features = false } diff --git a/neqo-crypto/build.rs b/neqo-crypto/build.rs index e5e8595e82..86a1681821 100644 --- a/neqo-crypto/build.rs +++ b/neqo-crypto/build.rs @@ -4,8 +4,6 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -#![warn(clippy::pedantic)] - use std::{ collections::HashMap, env, fs, @@ -36,7 +34,7 @@ struct Bindings { opaque: Vec, /// enumerations that are turned into a module (without this, the enum is /// mapped using the default, which means that the individual values are - /// formed with an underscore as _). + /// formed with an underscore as <`enum_type`>_<`enum_value_name`>). #[serde(default)] enums: Vec, diff --git a/neqo-crypto/src/agent.rs b/neqo-crypto/src/agent.rs index c48656284f..82a6dacd48 100644 --- a/neqo-crypto/src/agent.rs +++ b/neqo-crypto/src/agent.rs @@ -1015,7 +1015,7 @@ pub enum ZeroRttCheckResult { Accept, /// Reject 0-RTT, but continue the handshake normally. Reject, - /// Send HelloRetryRequest (probably not needed for QUIC). + /// Send `HelloRetryRequest` (probably not needed for QUIC). HelloRetryRequest(Vec), /// Fail the handshake. Fail, diff --git a/neqo-crypto/src/cert.rs b/neqo-crypto/src/cert.rs index f6a68fe934..2836b5237c 100644 --- a/neqo-crypto/src/cert.rs +++ b/neqo-crypto/src/cert.rs @@ -21,7 +21,7 @@ use crate::{ pub struct CertificateInfo { certs: CertList, cursor: *const CERTCertListNode, - /// stapled_ocsp_responses and signed_cert_timestamp are properties + /// `stapled_ocsp_responses` and `signed_cert_timestamp` are properties /// associated with each of the certificates. Right now, NSS only /// reports the value for the end-entity certificate (the first). stapled_ocsp_responses: Option>>, diff --git a/neqo-crypto/src/hp.rs b/neqo-crypto/src/hp.rs index ab064c794e..1eba6a9cb5 100644 --- a/neqo-crypto/src/hp.rs +++ b/neqo-crypto/src/hp.rs @@ -45,7 +45,7 @@ pub enum HpKey { /// track references using `Rc`. `PK11Context` can't be used with `PK11_CloneContext` /// as that is not supported for these contexts. Aes(Rc>), - /// The ChaCha20 mask has to invoke a new PK11_Encrypt every time as it needs to + /// The `ChaCha20` mask has to invoke a new `PK11_Encrypt` every time as it needs to /// change the counter and nonce on each invocation. Chacha(SymKey), } diff --git a/neqo-crypto/src/lib.rs b/neqo-crypto/src/lib.rs index c64a2c7ec3..2ec1b4a3ea 100644 --- a/neqo-crypto/src/lib.rs +++ b/neqo-crypto/src/lib.rs @@ -4,7 +4,6 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -#![warn(clippy::pedantic)] #![allow(clippy::module_name_repetitions)] // This lint doesn't work here. #![allow(clippy::unseparated_literal_suffix, clippy::used_underscore_binding)] // For bindgen code. diff --git a/neqo-crypto/tests/agent.rs b/neqo-crypto/tests/agent.rs index 85d2e168c8..80bf816930 100644 --- a/neqo-crypto/tests/agent.rs +++ b/neqo-crypto/tests/agent.rs @@ -4,10 +4,6 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -#![warn(clippy::pedantic)] - -use std::boxed::Box; - use neqo_crypto::{ generate_ech_keys, AuthenticationStatus, Client, Error, HandshakeState, SecretAgentPreInfo, Server, ZeroRttCheckResult, ZeroRttChecker, TLS_AES_128_GCM_SHA256, diff --git a/neqo-crypto/tests/ext.rs b/neqo-crypto/tests/ext.rs index 86929d283e..c8732dd014 100644 --- a/neqo-crypto/tests/ext.rs +++ b/neqo-crypto/tests/ext.rs @@ -4,8 +4,6 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -#![warn(clippy::pedantic)] - use std::{cell::RefCell, rc::Rc}; use neqo_crypto::{ diff --git a/neqo-crypto/tests/handshake.rs b/neqo-crypto/tests/handshake.rs index 28750df1e3..3cb31337fd 100644 --- a/neqo-crypto/tests/handshake.rs +++ b/neqo-crypto/tests/handshake.rs @@ -5,6 +5,8 @@ // except according to those terms. #![allow(dead_code)] +#![allow(clippy::missing_panics_doc)] +#![allow(clippy::missing_errors_doc)] use std::{mem, time::Instant}; diff --git a/neqo-crypto/tests/hkdf.rs b/neqo-crypto/tests/hkdf.rs index f0c8aebe51..acb5bbdda8 100644 --- a/neqo-crypto/tests/hkdf.rs +++ b/neqo-crypto/tests/hkdf.rs @@ -4,8 +4,6 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -#![warn(clippy::pedantic)] - use neqo_crypto::{ constants::{ Cipher, TLS_AES_128_GCM_SHA256, TLS_AES_256_GCM_SHA384, TLS_CHACHA20_POLY1305_SHA256, diff --git a/neqo-crypto/tests/hp.rs b/neqo-crypto/tests/hp.rs index 23db2df36e..da7df2cc19 100644 --- a/neqo-crypto/tests/hp.rs +++ b/neqo-crypto/tests/hp.rs @@ -4,8 +4,6 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -#![warn(clippy::pedantic)] - use std::mem; use neqo_crypto::{ diff --git a/neqo-crypto/tests/init.rs b/neqo-crypto/tests/init.rs index 6722a0cf31..13218cc340 100644 --- a/neqo-crypto/tests/init.rs +++ b/neqo-crypto/tests/init.rs @@ -4,8 +4,6 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -#![warn(clippy::pedantic)] - // This uses external interfaces to neqo_crypto rather than being a module // inside of lib.rs. Because all other code uses the test_fixture module, // they will be calling into the public version of init_db(). Calling into diff --git a/neqo-crypto/tests/selfencrypt.rs b/neqo-crypto/tests/selfencrypt.rs index 141d0b9479..4c574a3ae9 100644 --- a/neqo-crypto/tests/selfencrypt.rs +++ b/neqo-crypto/tests/selfencrypt.rs @@ -4,7 +4,6 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -#![warn(clippy::pedantic)] #![cfg(not(feature = "fuzzing"))] use neqo_crypto::{ diff --git a/neqo-http3/Cargo.toml b/neqo-http3/Cargo.toml index 003f6ff114..adb137ea15 100644 --- a/neqo-http3/Cargo.toml +++ b/neqo-http3/Cargo.toml @@ -8,6 +8,9 @@ edition.workspace = true rust-version.workspace = true license.workspace = true +[lints] +workspace = true + [dependencies] # Sync with https://searchfox.org/mozilla-central/source/Cargo.lock 2024-02-08 enumset = { version = "1.1", default-features = false } diff --git a/neqo-http3/src/client_events.rs b/neqo-http3/src/client_events.rs index 4b2ebc6c30..61aba8f9f1 100644 --- a/neqo-http3/src/client_events.rs +++ b/neqo-http3/src/client_events.rs @@ -61,7 +61,7 @@ pub enum Http3ClientEvent { error: AppError, local: bool, }, - /// Peer has sent a STOP_SENDING. + /// Peer has sent a `STOP_SENDING`. StopSending { stream_id: StreamId, error: AppError, @@ -83,7 +83,7 @@ pub enum Http3ClientEvent { PushDataReadable { push_id: u64 }, /// A push has been canceled. PushCanceled { push_id: u64 }, - /// A push stream was been reset due to a HttpGeneralProtocol error. + /// A push stream was been reset due to a `HttpGeneralProtocol` error. /// Most common case are malformed response headers. PushReset { push_id: u64, error: AppError }, /// New stream can be created @@ -102,7 +102,7 @@ pub enum Http3ClientEvent { GoawayReceived, /// Connection state change. StateChange(Http3State), - /// WebTransport events + /// `WebTransport` events WebTransport(WebTransportEvent), } diff --git a/neqo-http3/src/lib.rs b/neqo-http3/src/lib.rs index e16be709df..8272151cc1 100644 --- a/neqo-http3/src/lib.rs +++ b/neqo-http3/src/lib.rs @@ -4,7 +4,6 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -#![warn(clippy::pedantic)] #![allow(clippy::module_name_repetitions)] // This lint doesn't work here. /*! diff --git a/neqo-http3/src/server.rs b/neqo-http3/src/server.rs index 71bd1acf9e..1396a4e4cf 100644 --- a/neqo-http3/src/server.rs +++ b/neqo-http3/src/server.rs @@ -1271,11 +1271,11 @@ mod tests { while let Some(event) = hconn.next_event() { match event { Http3ServerEvent::Headers { stream, .. } => { - assert!(requests.get(&stream).is_none()); + assert!(!requests.contains_key(&stream)); requests.insert(stream, 0); } Http3ServerEvent::Data { stream, .. } => { - assert!(requests.get(&stream).is_some()); + assert!(requests.contains_key(&stream)); } Http3ServerEvent::DataWritable { .. } | Http3ServerEvent::StreamReset { .. } diff --git a/neqo-http3/tests/priority.rs b/neqo-http3/tests/priority.rs index cdec161058..d342082d56 100644 --- a/neqo-http3/tests/priority.rs +++ b/neqo-http3/tests/priority.rs @@ -98,7 +98,7 @@ fn priority_update() { assert_eq!(&headers, expected_headers); assert!(!fin); } - other => panic!("unexpected server event: {:?}", other), + other => panic!("unexpected server event: {other:?}"), } let update_priority = Priority::new(3, false); diff --git a/neqo-qpack/Cargo.toml b/neqo-qpack/Cargo.toml index 817259bfda..0138746cfa 100644 --- a/neqo-qpack/Cargo.toml +++ b/neqo-qpack/Cargo.toml @@ -8,6 +8,9 @@ edition.workspace = true rust-version.workspace = true license.workspace = true +[lints] +workspace = true + [dependencies] # Sync with https://searchfox.org/mozilla-central/source/Cargo.lock 2024-02-08 log = { version = "0.4", default-features = false } diff --git a/neqo-qpack/src/lib.rs b/neqo-qpack/src/lib.rs index 44b9463e55..10ee5df61c 100644 --- a/neqo-qpack/src/lib.rs +++ b/neqo-qpack/src/lib.rs @@ -4,7 +4,6 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -#![warn(clippy::pedantic)] #![allow(clippy::module_name_repetitions)] // This lint doesn't work here. pub mod decoder; diff --git a/neqo-server/Cargo.toml b/neqo-server/Cargo.toml index d482f15e5b..7e6a9f966c 100644 --- a/neqo-server/Cargo.toml +++ b/neqo-server/Cargo.toml @@ -9,6 +9,9 @@ edition.workspace = true rust-version.workspace = true license.workspace = true +[lints] +workspace = true + [dependencies] # neqo-server is not used in Firefox, so we can be liberal with dependency versions clap = { version = "4.4", default-features = false, features = ["std", "derive"] } diff --git a/neqo-server/src/main.rs b/neqo-server/src/main.rs index 691b367b73..4c82586c3c 100644 --- a/neqo-server/src/main.rs +++ b/neqo-server/src/main.rs @@ -4,8 +4,6 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -#![warn(clippy::pedantic)] - use std::{ cell::RefCell, cmp::min, @@ -140,7 +138,7 @@ struct Args { #[arg(short = 'c', long, number_of_values = 1)] /// The set of TLS cipher suites to enable. - /// From: TLS_AES_128_GCM_SHA256, TLS_AES_256_GCM_SHA384, TLS_CHACHA20_POLY1305_SHA256. + /// From: `TLS_AES_128_GCM_SHA256`, `TLS_AES_256_GCM_SHA384`, `TLS_CHACHA20_POLY1305_SHA256`. ciphers: Vec, #[arg(name = "ech", long)] @@ -213,11 +211,11 @@ struct QuicParameters { quic_version: Vec, #[arg(long, default_value = "16")] - /// Set the MAX_STREAMS_BIDI limit. + /// Set the `MAX_STREAMS_BIDI` limit. max_streams_bidi: u64, #[arg(long, default_value = "16")] - /// Set the MAX_STREAMS_UNI limit. + /// Set the `MAX_STREAMS_UNI` limit. max_streams_uni: u64, #[arg(long = "idle", default_value = "30")] diff --git a/neqo-server/src/old_https.rs b/neqo-server/src/old_https.rs index d45509d7d0..2417c4790c 100644 --- a/neqo-server/src/old_https.rs +++ b/neqo-server/src/old_https.rs @@ -4,8 +4,6 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -#![warn(clippy::pedantic)] - use std::{ cell::RefCell, collections::HashMap, fmt::Display, path::PathBuf, rc::Rc, time::Instant, }; diff --git a/neqo-transport/Cargo.toml b/neqo-transport/Cargo.toml index ecab43ed2a..21cea4a49f 100644 --- a/neqo-transport/Cargo.toml +++ b/neqo-transport/Cargo.toml @@ -8,6 +8,9 @@ edition.workspace = true rust-version.workspace = true license.workspace = true +[lints] +workspace = true + [dependencies] # Sync with https://searchfox.org/mozilla-central/source/Cargo.lock 2024-02-08 indexmap = { version = "1.9", default-features = false } diff --git a/neqo-transport/src/ackrate.rs b/neqo-transport/src/ackrate.rs index d61120408d..d5923805d9 100644 --- a/neqo-transport/src/ackrate.rs +++ b/neqo-transport/src/ackrate.rs @@ -5,7 +5,6 @@ // except according to those terms. // Management of the peer's ack rate. -#![deny(clippy::pedantic)] use std::{cmp::max, time::Duration}; diff --git a/neqo-transport/src/addr_valid.rs b/neqo-transport/src/addr_valid.rs index 9b079d211f..f596cfc3cb 100644 --- a/neqo-transport/src/addr_valid.rs +++ b/neqo-transport/src/addr_valid.rs @@ -43,9 +43,9 @@ const MAX_SAVED_TOKENS: usize = 8; pub enum ValidateAddress { /// Require address validation never. Never, - /// Require address validation unless a NEW_TOKEN token is provided. + /// Require address validation unless a `NEW_TOKEN` token is provided. NoToken, - /// Require address validation even if a NEW_TOKEN token is provided. + /// Require address validation even if a `NEW_TOKEN` token is provided. Always, } @@ -400,7 +400,7 @@ impl NewTokenFrameStatus { #[derive(Default)] pub struct NewTokenSender { - /// The unacknowledged NEW_TOKEN frames we are yet to send. + /// The unacknowledged `NEW_TOKEN` frames we are yet to send. tokens: Vec, /// A sequence number that is used to track individual tokens /// by reference (so that recovery tokens can be simple). diff --git a/neqo-transport/src/cc/classic_cc.rs b/neqo-transport/src/cc/classic_cc.rs index a7de8fc20a..89be6c4b0f 100644 --- a/neqo-transport/src/cc/classic_cc.rs +++ b/neqo-transport/src/cc/classic_cc.rs @@ -5,7 +5,6 @@ // except according to those terms. // Congestion control -#![deny(clippy::pedantic)] use std::{ cmp::{max, min}, diff --git a/neqo-transport/src/cc/cubic.rs b/neqo-transport/src/cc/cubic.rs index ca6c1177ac..058a4c2aa4 100644 --- a/neqo-transport/src/cc/cubic.rs +++ b/neqo-transport/src/cc/cubic.rs @@ -4,8 +4,6 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -#![deny(clippy::pedantic)] - use std::{ fmt::{self, Display}, time::{Duration, Instant}, diff --git a/neqo-transport/src/cc/mod.rs b/neqo-transport/src/cc/mod.rs index a1a43bd157..486d15e67e 100644 --- a/neqo-transport/src/cc/mod.rs +++ b/neqo-transport/src/cc/mod.rs @@ -5,7 +5,6 @@ // except according to those terms. // Congestion control -#![deny(clippy::pedantic)] use std::{ fmt::{Debug, Display}, diff --git a/neqo-transport/src/cc/new_reno.rs b/neqo-transport/src/cc/new_reno.rs index e51b3d6cc0..47d0d56f37 100644 --- a/neqo-transport/src/cc/new_reno.rs +++ b/neqo-transport/src/cc/new_reno.rs @@ -5,7 +5,6 @@ // except according to those terms. // Congestion control -#![deny(clippy::pedantic)] use std::{ fmt::{self, Display}, diff --git a/neqo-transport/src/cc/tests/new_reno.rs b/neqo-transport/src/cc/tests/new_reno.rs index a73844a755..4cc20de5a7 100644 --- a/neqo-transport/src/cc/tests/new_reno.rs +++ b/neqo-transport/src/cc/tests/new_reno.rs @@ -5,7 +5,6 @@ // except according to those terms. // Congestion control -#![deny(clippy::pedantic)] use std::time::Duration; diff --git a/neqo-transport/src/cid.rs b/neqo-transport/src/cid.rs index a381883d85..6b3a95eaf0 100644 --- a/neqo-transport/src/cid.rs +++ b/neqo-transport/src/cid.rs @@ -450,7 +450,7 @@ pub struct ConnectionIdManager { limit: usize, /// The next sequence number that will be used for sending `NEW_CONNECTION_ID` frames. next_seqno: u64, - /// Outstanding, but lost NEW_CONNECTION_ID frames will be stored here. + /// Outstanding, but lost `NEW_CONNECTION_ID` frames will be stored here. lost_new_connection_id: Vec>, } diff --git a/neqo-transport/src/connection/mod.rs b/neqo-transport/src/connection/mod.rs index 8cbead3812..671361b559 100644 --- a/neqo-transport/src/connection/mod.rs +++ b/neqo-transport/src/connection/mod.rs @@ -257,7 +257,7 @@ pub struct Connection { /// Some packets were received, but not tracked. received_untracked: bool, - /// This is responsible for the QuicDatagrams' handling: + /// This is responsible for the `QuicDatagrams`' handling: /// quic_datagrams: QuicDatagrams, @@ -271,8 +271,8 @@ pub struct Connection { new_token: NewTokenState, stats: StatsCell, qlog: NeqoQlog, - /// A session ticket was received without NEW_TOKEN, - /// this is when that turns into an event without NEW_TOKEN. + /// A session ticket was received without `NEW_TOKEN`, + /// this is when that turns into an event without `NEW_TOKEN`. release_resumption_token_timer: Option, conn_params: ConnectionParameters, hrtime: hrtime::Handle, diff --git a/neqo-transport/src/connection/tests/fuzzing.rs b/neqo-transport/src/connection/tests/fuzzing.rs index b3efc26cc9..9924c06fa4 100644 --- a/neqo-transport/src/connection/tests/fuzzing.rs +++ b/neqo-transport/src/connection/tests/fuzzing.rs @@ -4,7 +4,6 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -#![warn(clippy::pedantic)] #![cfg(feature = "fuzzing")] use neqo_crypto::FIXED_TAG_FUZZING; diff --git a/neqo-transport/src/connection/tests/mod.rs b/neqo-transport/src/connection/tests/mod.rs index 1e19b43456..b6ce08f8d1 100644 --- a/neqo-transport/src/connection/tests/mod.rs +++ b/neqo-transport/src/connection/tests/mod.rs @@ -4,8 +4,6 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -#![deny(clippy::pedantic)] - use std::{ cell::RefCell, cmp::min, diff --git a/neqo-transport/src/events.rs b/neqo-transport/src/events.rs index 8331f2ba3c..a892e384b9 100644 --- a/neqo-transport/src/events.rs +++ b/neqo-transport/src/events.rs @@ -52,7 +52,7 @@ pub enum ConnectionEvent { stream_id: StreamId, app_error: AppError, }, - /// Peer has sent STOP_SENDING + /// Peer has sent `STOP_SENDING` SendStreamStopSending { stream_id: StreamId, app_error: AppError, @@ -61,7 +61,7 @@ pub enum ConnectionEvent { SendStreamComplete { stream_id: StreamId, }, - /// Peer increased MAX_STREAMS + /// Peer increased `MAX_STREAMS` SendStreamCreatable { stream_type: StreamType, }, diff --git a/neqo-transport/src/lib.rs b/neqo-transport/src/lib.rs index ffa696e6e5..be482c466f 100644 --- a/neqo-transport/src/lib.rs +++ b/neqo-transport/src/lib.rs @@ -4,7 +4,6 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -#![warn(clippy::pedantic)] #![allow(clippy::module_name_repetitions)] // This lint doesn't work here. use neqo_common::qinfo; diff --git a/neqo-transport/src/pace.rs b/neqo-transport/src/pace.rs index 2850c781a3..5b88e5c0c4 100644 --- a/neqo-transport/src/pace.rs +++ b/neqo-transport/src/pace.rs @@ -5,7 +5,6 @@ // except according to those terms. // Pacer -#![deny(clippy::pedantic)] use std::{ cmp::min, diff --git a/neqo-transport/src/packet/retry.rs b/neqo-transport/src/packet/retry.rs index 004e9de6e7..72036d3b49 100644 --- a/neqo-transport/src/packet/retry.rs +++ b/neqo-transport/src/packet/retry.rs @@ -4,8 +4,6 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -#![deny(clippy::pedantic)] - use std::cell::RefCell; use neqo_common::qerror; diff --git a/neqo-transport/src/path.rs b/neqo-transport/src/path.rs index aca54d98e4..4e8d9958ab 100644 --- a/neqo-transport/src/path.rs +++ b/neqo-transport/src/path.rs @@ -4,7 +4,6 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -#![deny(clippy::pedantic)] #![allow(clippy::module_name_repetitions)] use std::{ @@ -71,7 +70,7 @@ pub struct Paths { /// Connection IDs that need to be retired. to_retire: Vec, - /// QLog handler. + /// `QLog` handler. qlog: NeqoQlog, } @@ -526,7 +525,7 @@ pub struct Path { /// For a path that is not validated, this is `None`. For a validated /// path, the time that the path was last valid. validated: Option, - /// A path challenge was received and PATH_RESPONSE has not been sent. + /// A path challenge was received and `PATH_RESPONSE` has not been sent. challenge: Option<[u8; 8]>, /// The round trip time estimate for this path. diff --git a/neqo-transport/src/recovery.rs b/neqo-transport/src/recovery.rs index e580c644fc..dbea3aaf57 100644 --- a/neqo-transport/src/recovery.rs +++ b/neqo-transport/src/recovery.rs @@ -6,8 +6,6 @@ // Tracking of sent packets and detecting their loss. -#![deny(clippy::pedantic)] - use std::{ cmp::{max, min}, collections::BTreeMap, diff --git a/neqo-transport/src/rtt.rs b/neqo-transport/src/rtt.rs index 4b05198bc9..3b2969f689 100644 --- a/neqo-transport/src/rtt.rs +++ b/neqo-transport/src/rtt.rs @@ -6,8 +6,6 @@ // Tracking of sent packets and detecting their loss. -#![deny(clippy::pedantic)] - use std::{ cmp::{max, min}, time::{Duration, Instant}, diff --git a/neqo-transport/src/sender.rs b/neqo-transport/src/sender.rs index 9a00dfc7a7..3a54851533 100644 --- a/neqo-transport/src/sender.rs +++ b/neqo-transport/src/sender.rs @@ -5,7 +5,7 @@ // except according to those terms. // Congestion control -#![deny(clippy::pedantic)] + #![allow(clippy::module_name_repetitions)] use std::{ diff --git a/neqo-transport/src/server.rs b/neqo-transport/src/server.rs index 8f5170af6e..96a6244ef1 100644 --- a/neqo-transport/src/server.rs +++ b/neqo-transport/src/server.rs @@ -168,7 +168,7 @@ pub struct Server { /// the same key are routed to the connection that was first accepted. /// This is cleared out when the connection is closed or established. active_attempts: HashMap, - /// All connections, keyed by ConnectionId. + /// All connections, keyed by `ConnectionId`. connections: ConnectionTableRef, /// The connections that have new events. active: HashSet, diff --git a/neqo-transport/src/stats.rs b/neqo-transport/src/stats.rs index d6c7a911f9..9eff503dcf 100644 --- a/neqo-transport/src/stats.rs +++ b/neqo-transport/src/stats.rs @@ -5,7 +5,6 @@ // except according to those terms. // Tracking of some useful statistics. -#![deny(clippy::pedantic)] use std::{ cell::RefCell, diff --git a/neqo-transport/src/tracking.rs b/neqo-transport/src/tracking.rs index e69f77380d..bdd0f250c7 100644 --- a/neqo-transport/src/tracking.rs +++ b/neqo-transport/src/tracking.rs @@ -6,8 +6,6 @@ // Tracking of received packets and generating acks thereof. -#![deny(clippy::pedantic)] - use std::{ cmp::min, collections::VecDeque, diff --git a/neqo-transport/tests/common/mod.rs b/neqo-transport/tests/common/mod.rs index ffe51077c7..faff216eb9 100644 --- a/neqo-transport/tests/common/mod.rs +++ b/neqo-transport/tests/common/mod.rs @@ -4,7 +4,6 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -#![warn(clippy::pedantic)] #![allow(unused)] use std::{cell::RefCell, mem, ops::Range, rc::Rc}; diff --git a/neqo-transport/tests/conn_vectors.rs b/neqo-transport/tests/conn_vectors.rs index 0cdd318fce..f478883075 100644 --- a/neqo-transport/tests/conn_vectors.rs +++ b/neqo-transport/tests/conn_vectors.rs @@ -5,7 +5,7 @@ // except according to those terms. // Tests with the test vectors from the spec. -#![deny(clippy::pedantic)] + #![cfg(not(feature = "fuzzing"))] use std::{cell::RefCell, rc::Rc}; diff --git a/neqo-transport/tests/connection.rs b/neqo-transport/tests/connection.rs index a5fe8e1544..0b91fcf306 100644 --- a/neqo-transport/tests/connection.rs +++ b/neqo-transport/tests/connection.rs @@ -4,8 +4,6 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -#![warn(clippy::pedantic)] - mod common; use common::{ diff --git a/neqo-transport/tests/network.rs b/neqo-transport/tests/network.rs index a0a404a89a..27e5a83cd6 100644 --- a/neqo-transport/tests/network.rs +++ b/neqo-transport/tests/network.rs @@ -4,8 +4,6 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -#![warn(clippy::pedantic)] - use std::{ops::Range, time::Duration}; use neqo_transport::{ConnectionError, ConnectionParameters, Error, State}; diff --git a/neqo-transport/tests/retry.rs b/neqo-transport/tests/retry.rs index ffb89b1e8c..e583fcae0f 100644 --- a/neqo-transport/tests/retry.rs +++ b/neqo-transport/tests/retry.rs @@ -4,7 +4,6 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -#![warn(clippy::pedantic)] #![cfg(not(feature = "fuzzing"))] mod common; diff --git a/neqo-transport/tests/server.rs b/neqo-transport/tests/server.rs index 75cce6cadd..7388e0fee7 100644 --- a/neqo-transport/tests/server.rs +++ b/neqo-transport/tests/server.rs @@ -4,8 +4,6 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -#![warn(clippy::pedantic)] - mod common; use std::{cell::RefCell, mem, net::SocketAddr, rc::Rc, time::Duration}; @@ -23,7 +21,7 @@ use neqo_transport::{ Connection, ConnectionError, ConnectionParameters, Error, Output, State, StreamType, Version, }; use test_fixture::{ - self, assertions, datagram, default_client, new_client, now, split_datagram, + assertions, datagram, default_client, new_client, now, split_datagram, CountingConnectionIdGenerator, }; diff --git a/test-fixture/Cargo.toml b/test-fixture/Cargo.toml index d366e101dc..cc25b7d1bd 100644 --- a/test-fixture/Cargo.toml +++ b/test-fixture/Cargo.toml @@ -8,6 +8,9 @@ edition.workspace = true rust-version.workspace = true license.workspace = true +[lints] +workspace = true + [dependencies] # Sync with https://searchfox.org/mozilla-central/source/Cargo.lock 2024-02-08 log = { version = "0.4", default-features = false } diff --git a/test-fixture/src/lib.rs b/test-fixture/src/lib.rs index d0c47cd026..a6043cd974 100644 --- a/test-fixture/src/lib.rs +++ b/test-fixture/src/lib.rs @@ -4,7 +4,6 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -#![warn(clippy::pedantic)] #![allow(clippy::module_name_repetitions)] // This lint doesn't work here. use std::{ From 78919a5fc416b1a43e4f6c3278eae63897b9a73a Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Tue, 5 Mar 2024 08:18:32 +0200 Subject: [PATCH 198/321] ci: Use Swatinem/rust-cache (#1700) * ci: See if Swatinem/rust-cache is faster than mozilla-actions/sccache-action * Install Rust tools before caching * Set shell * Use correct toolchain * Add cargo bin to PATH --- .github/actions/rust/action.yml | 22 ++++++++++++++++------ .github/workflows/bench.yml | 6 +++--- .github/workflows/check.yml | 11 +---------- 3 files changed, 20 insertions(+), 19 deletions(-) diff --git a/.github/actions/rust/action.yml b/.github/actions/rust/action.yml index b489029fa9..df10b58480 100644 --- a/.github/actions/rust/action.yml +++ b/.github/actions/rust/action.yml @@ -20,11 +20,21 @@ runs: toolchain: ${{ inputs.version }} components: ${{ inputs.components }} - - name: Use sccache - uses: mozilla-actions/sccache-action@v0.0.4 + - name: Install cargo-binstall (Windows) + if: runner.os == 'Windows' + shell: pwsh + run: Set-ExecutionPolicy Unrestricted -Scope Process; iex (iwr "https://raw.githubusercontent.com/cargo-bins/cargo-binstall/main/install-from-binstall-release.ps1").Content - - name: Enable sscache + - name: Install cargo-binstall (Windows) + if: runner.os != 'Windows' shell: bash - run: | - echo "SCCACHE_GHA_ENABLED=true" >> "$GITHUB_ENV" - echo "RUSTC_WRAPPER=sccache" >> "$GITHUB_ENV" + run: curl -L --proto '=https' --tlsv1.2 -sSf https://raw.githubusercontent.com/cargo-bins/cargo-binstall/main/install-from-binstall-release.sh | bash + + - name: Install Rust tools + shell: bash + run: cargo +${{ inputs.version }} binstall --no-confirm cargo-llvm-cov cargo-nextest flamegraph + + - name: Use Rust cache + uses: Swatinem/rust-cache@v2 + with: + cache-all-crates: "true" diff --git a/.github/workflows/bench.yml b/.github/workflows/bench.yml index 7d4220c8dc..7d6c793580 100644 --- a/.github/workflows/bench.yml +++ b/.github/workflows/bench.yml @@ -25,15 +25,15 @@ jobs: - name: Checkout uses: actions/checkout@v4 + - name: Set PATH + run: echo "/home/bench/.cargo/bin" >> "${GITHUB_PATH}" + - name: Install Rust uses: ./.github/actions/rust with: version: $TOOLCHAIN components: rustfmt - - name: Configure Rust - run: cargo install flamegraph - - name: Fetch NSS and NSPR uses: ./.github/actions/nss diff --git a/.github/workflows/check.yml b/.github/workflows/check.yml index 42a903ee3b..8ce5383dbe 100644 --- a/.github/workflows/check.yml +++ b/.github/workflows/check.yml @@ -53,7 +53,6 @@ jobs: DEBIAN_FRONTEND: noninteractive run: | sudo apt-get install -y --no-install-recommends gyp mercurial ninja-build lld - curl -L --proto '=https' --tlsv1.2 -sSf https://raw.githubusercontent.com/cargo-bins/cargo-binstall/main/install-from-binstall-release.sh | bash echo "RUSTFLAGS=-C link-arg=-fuse-ld=lld" >> "$GITHUB_ENV" # In addition to installing dependencies, first make sure System Integrity Protection (SIP) @@ -65,7 +64,7 @@ jobs: if: runner.os == 'MacOS' run: | csrutil status | grep disabled - brew install ninja mercurial cargo-binstall llvm + brew install ninja mercurial llvm echo "/opt/homebrew/opt/llvm/bin" >> "$GITHUB_PATH" ln -s /opt/homebrew/bin/python3 /opt/homebrew/bin/python # python3 -m pip install gyp-next @@ -76,11 +75,6 @@ jobs: echo "$(python3 -m site --user-base)/bin" >> "$GITHUB_PATH" echo "RUSTFLAGS=-C link-arg=-fuse-ld=lld" >> "$GITHUB_ENV" - - name: Install dependencies (Windows) - if: runner.os == 'Windows' - shell: pwsh - run: Set-ExecutionPolicy Unrestricted -Scope Process; iex (iwr "https://raw.githubusercontent.com/cargo-bins/cargo-binstall/main/install-from-binstall-release.ps1").Content - - name: Use MSYS2 environment and install more dependencies (Windows) if: runner.os == 'Windows' run: | @@ -95,9 +89,6 @@ jobs: if: runner.os == 'Windows' uses: ilammy/msvc-dev-cmd@v1 - - name: Install Rust tools - run: cargo +${{ matrix.rust-toolchain }} binstall --no-confirm cargo-llvm-cov cargo-nextest - # This step might be removed if the distro included a recent enough # version of NSS. Ubuntu 20.04 only has 3.49, which is far too old. # (neqo-crypto/build.rs would also need to query pkg-config to get the From d56e993545b3101077bcccaa440058f949ad9162 Mon Sep 17 00:00:00 2001 From: Max Inden Date: Tue, 5 Mar 2024 15:34:41 +0100 Subject: [PATCH 199/321] fix: feature flag Datagram::into_data (#1695) * feat(.github): run cargo clippy on each crate individually > Use cargo-hack to run clippy on each crate individually with its > respective default features only. Can reveal warnings otherwise > hidden given that a plain cargo clippy combines all features of the > workspace. See e.g. https://github.com/mozilla/neqo/pull/1695. * fix: feature flag Datagram::into_data `Datagram::into_data` is used in `neqo_common::udp` only. `neqo_common::udp` is behind the `udp` feature. The feature is not part of the crate's default features. Thus a plain `cargo check` rightly complains with: ``` warning: method `into_data` is never used --> neqo-common/src/datagram.rs:58:19 | 20 | impl Datagram { | ------------- method in this implementation ... 58 | pub(crate) fn into_data(self) -> Vec { | ^^^^^^^^^ | = note: `#[warn(dead_code)]` on by default ``` This commit hides `into_data` behind the `udp` feature as well. * Install cargo-hack as part of 'Install Rust tools' --- .github/actions/rust/action.yml | 2 +- .github/workflows/check.yml | 7 ++++++- neqo-common/src/datagram.rs | 1 + 3 files changed, 8 insertions(+), 2 deletions(-) diff --git a/.github/actions/rust/action.yml b/.github/actions/rust/action.yml index df10b58480..473c326c1a 100644 --- a/.github/actions/rust/action.yml +++ b/.github/actions/rust/action.yml @@ -32,7 +32,7 @@ runs: - name: Install Rust tools shell: bash - run: cargo +${{ inputs.version }} binstall --no-confirm cargo-llvm-cov cargo-nextest flamegraph + run: cargo +${{ inputs.version }} binstall --no-confirm cargo-llvm-cov cargo-nextest flamegraph cargo-hack - name: Use Rust cache uses: Swatinem/rust-cache@v2 diff --git a/.github/workflows/check.yml b/.github/workflows/check.yml index 8ce5383dbe..c92114a983 100644 --- a/.github/workflows/check.yml +++ b/.github/workflows/check.yml @@ -142,7 +142,12 @@ jobs: if: success() || failure() - name: Clippy - run: cargo +${{ matrix.rust-toolchain }} clippy --all-targets -- -D warnings || ${{ matrix.rust-toolchain == 'nightly' }} + run: | + # Use cargo-hack to run clippy on each crate individually with its + # respective default features only. Can reveal warnings otherwise + # hidden given that a plain cargo clippy combines all features of the + # workspace. See e.g. https://github.com/mozilla/neqo/pull/1695. + cargo +${{ matrix.rust-toolchain }} hack clippy --all-targets -- -D warnings || ${{ matrix.rust-toolchain == 'nightly' }} if: success() || failure() - name: Check rustdoc links diff --git a/neqo-common/src/datagram.rs b/neqo-common/src/datagram.rs index d6ed43bde1..04ba1a45a1 100644 --- a/neqo-common/src/datagram.rs +++ b/neqo-common/src/datagram.rs @@ -54,6 +54,7 @@ impl Datagram { self.ttl } + #[cfg(feature = "udp")] #[must_use] pub(crate) fn into_data(self) -> Vec { self.d From 1ec476ed93bb89539152d575f3184f00cbb40efb Mon Sep 17 00:00:00 2001 From: Max Inden Date: Tue, 5 Mar 2024 15:35:32 +0100 Subject: [PATCH 200/321] refactor(udp): define const for receive buffer size (#1709) --- neqo-common/src/udp.rs | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/neqo-common/src/udp.rs b/neqo-common/src/udp.rs index 4dc62990e4..124d693227 100644 --- a/neqo-common/src/udp.rs +++ b/neqo-common/src/udp.rs @@ -18,6 +18,11 @@ use tokio::io::Interest; use crate::{Datagram, IpTos}; +/// Socket receive buffer size. +/// +/// Allows reading multiple datagrams in a single [`Socket::recv`] call. +const RECV_BUF_SIZE: usize = u16::MAX as usize; + pub struct Socket { socket: tokio::net::UdpSocket, state: UdpSocketState, @@ -32,7 +37,7 @@ impl Socket { Ok(Self { state: quinn_udp::UdpSocketState::new((&socket).into())?, socket: tokio::net::UdpSocket::from_std(socket)?, - recv_buf: vec![0; u16::MAX as usize], + recv_buf: vec![0; RECV_BUF_SIZE], }) } From a56e092cedad14039a8ff1e72155f099dfa489c0 Mon Sep 17 00:00:00 2001 From: Max Inden Date: Tue, 5 Mar 2024 15:36:26 +0100 Subject: [PATCH 201/321] fix(udp): handle multiple datagrams on GRO (#1708) * Test multi packet GRO read * fix(udp): handle multiple datagrams through gro Previously `Socket::recv` would at most return a single `Datagram` (i.e. `-> Result, io::Error>`). When supported by the OS, the underlying `quinn-udp` can use both recvMmsg and GRO, each with the ability to return one or more datagrams. As of today, `neqo_common::udp` does not make use of recvmmsg, i.e. it only provides a single `IoSliceMut` to write into. That said, that single `IoSliceMut` might contain multiple `Datagram`s through GRO. Previously this would have been provided as a single `Datagram` to the caller of `Socket::recv`. This commit makes sure to handle the case where many `Datagram`s are retrieved via GRO (see `meta.stride` flag). It updates `neqo_common::udp::Socket::recv` and `neqo-server` and `neqo-client` accordingly. * fix: support single gso sendmmsg to result in multiple gro recvmmsg E.g. the case on CI on windows runner. * Reduce diff in client --- neqo-client/src/main.rs | 16 +++++--- neqo-common/src/udp.rs | 82 +++++++++++++++++++++++++++++++++++------ neqo-server/src/main.rs | 8 ++-- 3 files changed, 86 insertions(+), 20 deletions(-) diff --git a/neqo-client/src/main.rs b/neqo-client/src/main.rs index 604853edb6..34d0626a05 100644 --- a/neqo-client/src/main.rs +++ b/neqo-client/src/main.rs @@ -824,11 +824,13 @@ impl<'a> ClientRunner<'a> { match ready(self.socket, self.timeout.as_mut()).await? { Ready::Socket => loop { - let dgram = self.socket.recv(&self.local_addr)?; - if dgram.is_none() { + let dgrams = self.socket.recv(&self.local_addr)?; + if dgrams.is_empty() { break; } - self.process(dgram.as_ref()).await?; + for dgram in &dgrams { + self.process(Some(dgram)).await?; + } self.handler.maybe_key_update(&mut self.client)?; }, Ready::Timeout => { @@ -1337,11 +1339,13 @@ mod old { match ready(self.socket, self.timeout.as_mut()).await? { Ready::Socket => loop { - let dgram = self.socket.recv(&self.local_addr)?; - if dgram.is_none() { + let dgrams = self.socket.recv(&self.local_addr)?; + if dgrams.is_empty() { break; } - self.process(dgram.as_ref()).await?; + for dgram in &dgrams { + self.process(Some(dgram)).await?; + } self.handler.maybe_key_update(&mut self.client)?; }, Ready::Timeout => { diff --git a/neqo-common/src/udp.rs b/neqo-common/src/udp.rs index 124d693227..c27b0632ff 100644 --- a/neqo-common/src/udp.rs +++ b/neqo-common/src/udp.rs @@ -77,7 +77,7 @@ impl Socket { } /// Receive a UDP datagram on the specified socket. - pub fn recv(&mut self, local_address: &SocketAddr) -> Result, io::Error> { + pub fn recv(&mut self, local_address: &SocketAddr) -> Result, io::Error> { let mut meta = RecvMeta::default(); match self.socket.try_io(Interest::READABLE, || { @@ -94,7 +94,7 @@ impl Socket { if err.kind() == io::ErrorKind::WouldBlock || err.kind() == io::ErrorKind::Interrupted => { - return Ok(None) + return Ok(vec![]) } Err(err) => { return Err(err); @@ -103,9 +103,8 @@ impl Socket { if meta.len == 0 { eprintln!("zero length datagram received?"); - return Ok(None); + return Ok(vec![]); } - if meta.len == self.recv_buf.len() { eprintln!( "Might have received more than {} bytes", @@ -113,13 +112,18 @@ impl Socket { ); } - Ok(Some(Datagram::new( - meta.addr, - *local_address, - meta.ecn.map(|n| IpTos::from(n as u8)).unwrap_or_default(), - None, // TODO: get the real TTL https://github.com/quinn-rs/quinn/issues/1749 - &self.recv_buf[..meta.len], - ))) + Ok(self.recv_buf[0..meta.len] + .chunks(meta.stride.min(self.recv_buf.len())) + .map(|d| { + Datagram::new( + meta.addr, + *local_address, + meta.ecn.map(|n| IpTos::from(n as u8)).unwrap_or_default(), + None, // TODO: get the real TTL https://github.com/quinn-rs/quinn/issues/1749 + d, + ) + }) + .collect()) } } @@ -149,6 +153,8 @@ mod tests { let received_datagram = receiver .recv(&receiver_addr) .expect("receive to succeed") + .into_iter() + .next() .expect("receive to yield datagram"); // Assert that the ECN is correct. @@ -159,4 +165,58 @@ mod tests { Ok(()) } + + /// Expect [`Socket::recv`] to handle multiple [`Datagram`]s on GRO read. + #[tokio::test] + #[cfg_attr(not(any(target_os = "linux", target_os = "windows")), ignore)] + async fn many_datagrams_through_gro() -> Result<(), io::Error> { + const SEGMENT_SIZE: usize = 128; + + let sender = Socket::bind("127.0.0.1:0")?; + let receiver_addr: SocketAddr = "127.0.0.1:0".parse().unwrap(); + let mut receiver = Socket::bind(receiver_addr)?; + + // `neqo_common::udp::Socket::send` does not yet + // (https://github.com/mozilla/neqo/issues/1693) support GSO. Use + // `quinn_udp` directly. + let max_gso_segments = sender.state.max_gso_segments(); + let msg = vec![0xAB; SEGMENT_SIZE * max_gso_segments]; + let transmit = Transmit { + destination: receiver.local_addr()?, + ecn: EcnCodepoint::from_bits(Into::::into(IpTos::from(( + IpTosDscp::Le, + IpTosEcn::Ect1, + )))), + contents: msg.clone().into(), + segment_size: Some(SEGMENT_SIZE), + src_ip: None, + }; + sender.writable().await?; + let n = sender.socket.try_io(Interest::WRITABLE, || { + sender + .state + .send((&sender.socket).into(), slice::from_ref(&transmit)) + })?; + assert_eq!(n, 1, "only passed one slice"); + + // Allow for one GSO sendmmsg to result in multiple GRO recvmmsg. + let mut num_received = 0; + while num_received < max_gso_segments { + receiver.readable().await?; + receiver + .recv(&receiver_addr) + .expect("receive to succeed") + .into_iter() + .for_each(|d| { + assert_eq!( + SEGMENT_SIZE, + d.len(), + "Expect received datagrams to have same length as sent datagrams." + ); + num_received += 1; + }); + } + + Ok(()) + } } diff --git a/neqo-server/src/main.rs b/neqo-server/src/main.rs index 4c82586c3c..819014b331 100644 --- a/neqo-server/src/main.rs +++ b/neqo-server/src/main.rs @@ -686,11 +686,13 @@ impl ServersRunner { match self.ready().await? { Ready::Socket(inx) => loop { let (host, socket) = self.sockets.get_mut(inx).unwrap(); - let dgram = socket.recv(host)?; - if dgram.is_none() { + let dgrams = socket.recv(host)?; + if dgrams.is_empty() { break; } - self.process(dgram.as_ref()).await?; + for dgram in dgrams { + self.process(Some(&dgram)).await?; + } }, Ready::Timeout => { self.timeout = None; From 0f042a3428297a3956e794f77236ac9a3a7e349e Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Wed, 6 Mar 2024 02:26:17 +0200 Subject: [PATCH 202/321] ci: Use the git mirrors for NSS and NSPR (#1710) * ci: Use the git mirrors for NSS and NSPR Since the mercurial ones are often flaky. Also build NSS as part of the `nss` action, to reduce replication. * Cache NSS build, and see if a features change speeds up things * Fix path * Apparently shell variables don't work --- .github/actions/nss/action.yml | 18 ++++++++---------- .github/actions/rust/action.yml | 1 + .github/workflows/bench.yml | 5 +---- .github/workflows/check.yml | 27 ++++++++++----------------- qns/Dockerfile | 10 +++++----- 5 files changed, 25 insertions(+), 36 deletions(-) diff --git a/.github/actions/nss/action.yml b/.github/actions/nss/action.yml index 1964ecdc37..40d9d65d96 100644 --- a/.github/actions/nss/action.yml +++ b/.github/actions/nss/action.yml @@ -1,21 +1,19 @@ -name: Checkout NSS and NSPR +name: Fetch and build NSS runs: using: composite steps: - - name: Fetch NSS and NSPR + - name: Fetch and build NSS shell: bash run: | - for i in {1..$RETRIES}; do - hg clone https://hg.mozilla.org/projects/nspr "$NSPR_DIR" && break || sleep $DELAY && false - done - for i in {1..$RETRIES}; do - hg clone https://hg.mozilla.org/projects/nss "$NSS_DIR" && break || sleep $DELAY && false - done + git clone --depth=1 https://github.com/nss-dev/nspr "$NSPR_DIR" + git clone --depth=1 https://github.com/nss-dev/nss "$NSS_DIR" + $NSS_DIR/build.sh -g -Ddisable_tests=1 -o echo "NSS_DIR=$NSS_DIR" >> "$GITHUB_ENV" echo "NSPR_DIR=$NSPR_DIR" >> "$GITHUB_ENV" + echo "LD_LIBRARY_PATH=$NSS_DIR/../dist/Release/lib" >> "$GITHUB_ENV" + echo "DYLD_FALLBACK_LIBRARY_PATH=$NSS_DIR/../dist/Release/lib" >> "$GITHUB_ENV" + echo "$NSS_DIR/../dist/Release/lib" >> "$GITHUB_PATH" env: NSS_DIR: ${{ github.workspace }}/nss NSPR_DIR: ${{ github.workspace }}/nspr - RETRIES: 10 - DELAY: 10 diff --git a/.github/actions/rust/action.yml b/.github/actions/rust/action.yml index 473c326c1a..5f788dc73f 100644 --- a/.github/actions/rust/action.yml +++ b/.github/actions/rust/action.yml @@ -38,3 +38,4 @@ runs: uses: Swatinem/rust-cache@v2 with: cache-all-crates: "true" + cache-directories: "${{ github.workspace }}/dist" diff --git a/.github/workflows/bench.yml b/.github/workflows/bench.yml index 7d6c793580..21b74ac8a5 100644 --- a/.github/workflows/bench.yml +++ b/.github/workflows/bench.yml @@ -18,8 +18,6 @@ jobs: defaults: run: shell: bash - env: - LD_LIBRARY_PATH: ${{ github.workspace }}/dist/Release/lib steps: - name: Checkout @@ -34,12 +32,11 @@ jobs: version: $TOOLCHAIN components: rustfmt - - name: Fetch NSS and NSPR + - name: Fetch and build NSS and NSPR uses: ./.github/actions/nss - name: Build run: | - $NSS_DIR/build.sh -g -Ddisable_tests=1 -o cargo +$TOOLCHAIN bench --features bench --no-run cargo +$TOOLCHAIN build --release --bin neqo-client --bin neqo-server diff --git a/.github/workflows/check.yml b/.github/workflows/check.yml index c92114a983..6ab21c8cda 100644 --- a/.github/workflows/check.yml +++ b/.github/workflows/check.yml @@ -52,7 +52,7 @@ jobs: env: DEBIAN_FRONTEND: noninteractive run: | - sudo apt-get install -y --no-install-recommends gyp mercurial ninja-build lld + sudo apt-get install -y --no-install-recommends gyp ninja-build lld echo "RUSTFLAGS=-C link-arg=-fuse-ld=lld" >> "$GITHUB_ENV" # In addition to installing dependencies, first make sure System Integrity Protection (SIP) @@ -64,7 +64,7 @@ jobs: if: runner.os == 'MacOS' run: | csrutil status | grep disabled - brew install ninja mercurial llvm + brew install ninja llvm echo "/opt/homebrew/opt/llvm/bin" >> "$GITHUB_PATH" ln -s /opt/homebrew/bin/python3 /opt/homebrew/bin/python # python3 -m pip install gyp-next @@ -89,14 +89,6 @@ jobs: if: runner.os == 'Windows' uses: ilammy/msvc-dev-cmd@v1 - # This step might be removed if the distro included a recent enough - # version of NSS. Ubuntu 20.04 only has 3.49, which is far too old. - # (neqo-crypto/build.rs would also need to query pkg-config to get the - # right build flags rather than building NSS.) - # Clone from the main hg repo, because the GitHub mirror can be out of date. - - name: Fetch NSS and NSPR - uses: ./.github/actions/nss - - name: Set up NSS/NSPR build environment (Windows) if: runner.os == 'Windows' run: | @@ -106,14 +98,15 @@ jobs: # See https://github.com/ilammy/msvc-dev-cmd#name-conflicts-with-shell-bash rm /usr/bin/link.exe + # This step might be removed if the distro included a recent enough + # version of NSS. Ubuntu 20.04 only has 3.49, which is far too old. + # (neqo-crypto/build.rs would also need to query pkg-config to get the + # right build flags rather than building NSS.) + - name: Fetch and build NSS and NSPR + uses: ./.github/actions/nss + - name: Build - run: | - cargo +${{ matrix.rust-toolchain }} build $BUILD_TYPE --all-targets --features ci,bench - echo "LD_LIBRARY_PATH=${{ github.workspace }}/dist/$LIB_DIR/lib" >> "$GITHUB_ENV" - echo "DYLD_FALLBACK_LIBRARY_PATH=${{ github.workspace }}/dist/$LIB_DIR/lib" >> "$GITHUB_ENV" - echo "${{ github.workspace }}/dist/$LIB_DIR/lib" >> "$GITHUB_PATH" - env: - LIB_DIR: ${{ matrix.type == 'release' && 'Release' || 'Debug' }} + run: cargo +${{ matrix.rust-toolchain }} build $BUILD_TYPE --all-targets --features ci - name: Run tests and determine coverage run: | diff --git a/qns/Dockerfile b/qns/Dockerfile index ff9cc7c8f9..eed7d3f986 100644 --- a/qns/Dockerfile +++ b/qns/Dockerfile @@ -4,8 +4,8 @@ RUN apt-get update && apt-get install -y --no-install-recommends \ curl git mercurial \ build-essential libclang-dev lld \ gyp ninja-build zlib1g-dev python \ - && apt-get autoremove -y && apt-get clean -y \ - && rm -rf /var/lib/apt/lists/* + && apt-get autoremove -y && apt-get clean -y \ + && rm -rf /var/lib/apt/lists/* ARG RUST_VERSION=stable @@ -21,8 +21,8 @@ ENV NSS_DIR=/nss \ LD_LIBRARY_PATH=/dist/Release/lib RUN set -eux; \ - hg clone https://hg.mozilla.org/projects/nss "$NSS_DIR"; \ - hg clone https://hg.mozilla.org/projects/nspr "$NSPR_DIR" + git clone --depth=1 https://github.com/nss-dev/nspr "$NSPR_DIR"; \ + git clone --depth=1 https://github.com/nss-dev/nss "$NSS_DIR" RUN "$NSS_DIR"/build.sh --static -Ddisable_tests=1 -o @@ -31,7 +31,7 @@ ADD . /neqo RUN set -eux; \ cd /neqo; \ RUSTFLAGS="-g -C link-arg=-fuse-ld=lld" cargo build --release \ - --bin neqo-client --bin neqo-server + --bin neqo-client --bin neqo-server # Copy only binaries to the final image to keep it small. From a6bf97925d9bf061802ecfc7e0cf19c00f41d6ca Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Wed, 6 Mar 2024 12:40:21 +0200 Subject: [PATCH 203/321] ci: Delay client start during transfer test (#1713) * ci: Delay client start during transfer test Hopefully addresses spurious "An existing connection was forcibly closed by the remote host" issue on Windows CI. * No `killall` on Windows * Minimize diff --- .github/workflows/bench.yml | 2 ++ .github/workflows/check.yml | 2 ++ 2 files changed, 4 insertions(+) diff --git a/.github/workflows/bench.yml b/.github/workflows/bench.yml index 21b74ac8a5..79fc1dd9e5 100644 --- a/.github/workflows/bench.yml +++ b/.github/workflows/bench.yml @@ -81,6 +81,8 @@ jobs: taskset -c 0 nice -n -20 \ cargo +$TOOLCHAIN flamegraph -v -c "$PERF_CMD" \ --bin neqo-server -- --db ../test-fixture/db $HOST:4433 || true; } & + # Give the server time to start. + sleep 3 mkdir client; \ cd client; \ time taskset -c 1 nice -n -20 \ diff --git a/.github/workflows/check.yml b/.github/workflows/check.yml index 6ab21c8cda..b331550d87 100644 --- a/.github/workflows/check.yml +++ b/.github/workflows/check.yml @@ -118,6 +118,8 @@ jobs: cargo +${{ matrix.rust-toolchain }} build $BUILD_TYPE --bin neqo-client --bin neqo-server cargo +${{ matrix.rust-toolchain }} run $BUILD_TYPE --bin neqo-server -- $HOST:4433 & PID=$! + # Give the server time to start. + sleep 3 cargo +${{ matrix.rust-toolchain }} run $BUILD_TYPE --bin neqo-client -- --output-dir . https://$HOST:4433/$SIZE kill $PID [ "$(wc -c <"$SIZE")" -eq "$SIZE" ] || exit 1 From 6f407c9d307cc21f48b8041d587094539e6c828e Mon Sep 17 00:00:00 2001 From: Max Inden Date: Wed, 6 Mar 2024 12:21:22 +0100 Subject: [PATCH 204/321] fix(server): enable `regex` crate `unicode-perl` feature (#1714) Enable the `unicode-perl` feature flag on `regex` crate dependency in `neqo-server` crate. The following regex makes use of the feature: ``` rust Regex::new(r"GET +/(\S+)(?:\r)?\n").unwrap() ``` Without the feature, the QUIC Interop tests panic with: ``` server | thread 'main' panicked at neqo-server/src/old_https.rs:140:49: server | called `Result::unwrap()` on an `Err` value: Syntax( server | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ server | regex parse error: server | GET +/(\S+)(?:\r)?\n server | ^^ server | error: Unicode-aware Perl class not found (make sure the unicode-perl feature is enabled) server | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ server | ) ``` --- neqo-server/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/neqo-server/Cargo.toml b/neqo-server/Cargo.toml index 7e6a9f966c..203742fa06 100644 --- a/neqo-server/Cargo.toml +++ b/neqo-server/Cargo.toml @@ -23,5 +23,5 @@ neqo-http3 = { path = "./../neqo-http3" } neqo-qpack = { path = "./../neqo-qpack" } neqo-transport = { path = "./../neqo-transport" } qlog = { version = "0.12", default-features = false } -regex = { version = "1.9", default-features = false } +regex = { version = "1.9", default-features = false, features = ["unicode-perl"] } tokio = { version = "1", default-features = false, features = ["net", "time", "macros", "rt", "rt-multi-thread"] } From c751b4e45b1647a5a28bcf65f1cabf4b1135140b Mon Sep 17 00:00:00 2001 From: Max Inden Date: Wed, 6 Mar 2024 19:43:34 +0100 Subject: [PATCH 205/321] feat(.github): run QUIC Interop Runner (#1682) * feat(.github): run QUIC Interop Runner Run the QUIC Interop Runner testcases on pull requests when entering the merge queue. * Do not always construct comment * Test for all pull requests to main * Don't build arm64 on pull requests * Use actions/download-artifact * Provide client, server and test argument * Fix action args * Copy action into neqo repo For now until https://github.com/quic-interop/quic-interop-runner/pull/356 is merged. * checkout * yml * introduce folder * load image * Only run interop on pull requests * Remove pull_request path filter * Trigger CI * Cache Python deps * Trigger CI to test python cache --- .../actions/quic-interop-runner/action.yml | 111 ++++++++++++++++++ .github/workflows/qns-comment.yml | 54 +++++++++ .github/workflows/qns.yml | 26 +++- 3 files changed, 186 insertions(+), 5 deletions(-) create mode 100644 .github/actions/quic-interop-runner/action.yml create mode 100644 .github/workflows/qns-comment.yml diff --git a/.github/actions/quic-interop-runner/action.yml b/.github/actions/quic-interop-runner/action.yml new file mode 100644 index 0000000000..6e79b97cfe --- /dev/null +++ b/.github/actions/quic-interop-runner/action.yml @@ -0,0 +1,111 @@ +name: 'QUIC Interop Runner Action' +description: 'Run the QUIC Interop Runner tests.' +author: 'mxinden' + +inputs: + name: + description: 'Name of the QUIC implementation' + required: true + image: + description: 'Docker image to be tested. Needs to reside either locally, or on some registry.' + required: true + url: + description: 'URL of the QUIC implementation' + required: true + role: + description: 'client/server/both' + required: false + default: 'both' + client: + description: 'client implementations (comma-separated)' + required: false + default: '' + server: + description: 'server implementations (comma-separated)' + required: false + default: '' + test: + description: 'test cases (comma-separatated)' + required: false + default: '' + +runs: + using: "composite" + steps: + - name: Checkout quic-interop/quic-interop-runner repository + uses: actions/checkout@v4 + with: + repository: 'quic-interop/quic-interop-runner' + path: 'quic-interop-runner' + + - name: Enable IPv6 support + run: sudo modprobe ip6table_filter + shell: bash + + - name: Install dependencies + run: | + sudo add-apt-repository ppa:wireshark-dev/stable + sudo apt-get update + sudo apt-get install -y wireshark tshark jq + shell: bash + + - uses: actions/setup-python@v5 + with: + python-version: 3.8 + cache: 'pip' + cache-dependency-path: 'quic-interop-runner/requirements.txt' + + - name: Install Python packages + run: | + cd quic-interop-runner + pip install -U pip + pip install -r requirements.txt + shell: bash + + - name: Run tests + id: test-run + run: | + cd quic-interop-runner + jq --arg key "${{ inputs.name }}" --argjson newEntry '{"image": "${{ inputs.image }}", "url": "${{ inputs.url }}", "role": "${{ inputs.role }}"}' '.[$key] = $newEntry' implementations.json > temp.$$ && mv temp.$$ implementations.json + cat implementations.json + ARGS="--log-dir logs --must-include ${{ inputs.name }}" + if [ -n "${{ inputs.client }}" ]; then + ARGS="$ARGS --client ${{ inputs.client }}" + fi + if [ -n "${{ inputs.server }}" ]; then + ARGS="$ARGS --server ${{ inputs.server }}" + fi + if [ -n "${{ inputs.test }}" ]; then + ARGS="$ARGS --test ${{ inputs.test }}" + fi + python run.py $ARGS 2>&1 | tee summary + shell: bash + + - uses: actions/upload-artifact@v4 + id: artifact-upload-step + if: always() + with: + name: logs + path: quic-interop-runner/logs + + # This action might be running off of a fork and would thus not have write + # permissions on the origin repository. In order to allow a separate + # priviledged action to post a comment on a pull request, upload the + # necessary metadata. + - name: store comment-data + shell: bash + if: github.event_name == 'pull_request' + env: + PULL_REQUEST_NUMBER: ${{ github.event.number }} + run: | + mkdir comment-data + mv quic-interop-runner/summary comment-data/summary + echo $PULL_REQUEST_NUMBER > comment-data/pr-number + echo '${{ steps.artifact-upload-step.outputs.artifact-url }}' > comment-data/logs-url + + - name: Upload comment data + uses: actions/upload-artifact@v4 + if: github.event_name == 'pull_request' + with: + name: comment-data + path: ./comment-data diff --git a/.github/workflows/qns-comment.yml b/.github/workflows/qns-comment.yml new file mode 100644 index 0000000000..83ec69f086 --- /dev/null +++ b/.github/workflows/qns-comment.yml @@ -0,0 +1,54 @@ +# Post test results as pull request comment. +# +# This is done as a separate workflow as it requires write permissions. The +# tests itself might run off of a fork, i.e. an untrusted environment and should +# thus not be granted write permissions. + +name: Comment on the pull request + +on: + workflow_run: + workflows: ["QUIC Network Simulator"] + types: + - completed + +jobs: + comment: + permissions: + pull-requests: write + runs-on: ubuntu-latest + if: > + github.event.workflow_run.event == 'pull_request' + steps: + - name: Download comment-data + uses: actions/download-artifact@v4 + with: + run-id: ${{ github.event.workflow_run.id }} + name: comment-data + github-token: ${{ secrets.GITHUB_TOKEN }} + + - name: Format GitHub comment + run: | + pwd + ls -la + echo '[**QUIC Interop Runner**](https://github.com/quic-interop/quic-interop-runner)' >> comment + echo '' >> comment + echo '```' >> comment + cat summary >> comment + echo '```' >> comment + echo '' >> comment + echo 'Download artifacts [here](' >> comment + cat logs-url >> comment + echo ').' >> comment + shell: bash + + - name: Read PR Number + id: pr-number + run: echo "::set-output name=number::$(cat pr-number)" + shell: bash + + - name: Comment PR + uses: thollander/actions-comment-pull-request@v2 + with: + filePath: comment + pr_number: ${{ steps.pr-number.outputs.number }} diff --git a/.github/workflows/qns.yml b/.github/workflows/qns.yml index 0aeb29b5b2..2b1deb4be8 100644 --- a/.github/workflows/qns.yml +++ b/.github/workflows/qns.yml @@ -6,12 +6,10 @@ on: workflow_dispatch: pull_request: branches: ["main"] - paths: - - 'qns/**' - - '.github/workflows/qns.yml' merge_group: + jobs: - docker-image: + quic-network-simulator: runs-on: ubuntu-latest permissions: packages: write @@ -44,6 +42,7 @@ jobs: type=raw,value=latest,enable={{is_default_branch}} - name: Build and push + id: docker_build_and_push uses: docker/build-push-action@v5 with: push: ${{ github.event_name != 'pull_request' }} @@ -53,4 +52,21 @@ jobs: RUST_VERSION=stable cache-from: type=gha cache-to: type=gha,mode=max - platforms: linux/amd64, linux/arm64 + # On pull requests only build amd64 for the sake of CI time. + platforms: ${{ github.event_name == 'pull_request' && 'linux/amd64' || 'linux/amd64, linux/arm64' }} + load: ${{ github.event_name == 'pull_request' }} + + - name: Checkout + uses: actions/checkout@v4 + + - name: Run QUIC Interop tests + if: ${{ github.event_name == 'pull_request' }} + # TODO: Replace once https://github.com/quic-interop/quic-interop-runner/pull/356 is merged. + uses: ./.github/actions/quic-interop-runner + with: + name: 'neqo-latest' + image: ${{ steps.docker_build_and_push.outputs.imageID }} + url: https://github.com/mozilla/neqo + test: handshake + client: neqo-latest,quic-go,ngtcp2,neqo,msquic + server: neqo-latest,quic-go,ngtcp2,neqo,msquic From 1a7e8fe93ef5a82b665b05686205a1f712b29d7b Mon Sep 17 00:00:00 2001 From: Max Inden Date: Wed, 6 Mar 2024 21:25:19 +0100 Subject: [PATCH 206/321] refactor(.github): rename comment workflow (#1715) Rename `Comment on the pull request` to `QUIC Network Simulator Comment`. --- .github/workflows/qns-comment.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/qns-comment.yml b/.github/workflows/qns-comment.yml index 83ec69f086..465be3080c 100644 --- a/.github/workflows/qns-comment.yml +++ b/.github/workflows/qns-comment.yml @@ -4,7 +4,7 @@ # tests itself might run off of a fork, i.e. an untrusted environment and should # thus not be granted write permissions. -name: Comment on the pull request +name: QUIC Network Simulator Comment on: workflow_run: From a59fe0c7b6f2b565e7e51dfaa74abe2696a47cd0 Mon Sep 17 00:00:00 2001 From: Max Inden Date: Thu, 7 Mar 2024 19:44:53 +0100 Subject: [PATCH 207/321] fix(github): qns comment on failure only and update existing (#1720) Once the QUIC Network Simulator workflow finished, the QUIC Network Simulator Comment workflow adds the test results as a comment to the corresponding pull request. This commit makes two changes: - Only add a comment on test failure. - In case a comment already exists, update it in place. --- .github/workflows/qns-comment.yml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/.github/workflows/qns-comment.yml b/.github/workflows/qns-comment.yml index 465be3080c..8b897b259a 100644 --- a/.github/workflows/qns-comment.yml +++ b/.github/workflows/qns-comment.yml @@ -18,7 +18,8 @@ jobs: pull-requests: write runs-on: ubuntu-latest if: > - github.event.workflow_run.event == 'pull_request' + github.event.workflow_run.event == 'pull_request' && + github.event.workflow_run.conclusion == 'failure' steps: - name: Download comment-data uses: actions/download-artifact@v4 @@ -52,3 +53,4 @@ jobs: with: filePath: comment pr_number: ${{ steps.pr-number.outputs.number }} + comment_tag: quic-network-simulator-comment From a456357c42fe5cca2f653d44dde3e74633b3931a Mon Sep 17 00:00:00 2001 From: Max Inden Date: Thu, 7 Mar 2024 21:10:44 +0100 Subject: [PATCH 208/321] fix(client,server): re-enable clap default features (#1721) When default features are disabled, clap does not e.g. generate logic for `--help` or `--version` handling. Instead it errors with the opaque error on anything other than correct command line flags. ``` $ neqo-client --version error: unexpected argument found ``` Instead of removing `default-features = false`, enable them individually. See https://docs.rs/clap/latest/clap/_features/index.html for details. --- neqo-client/Cargo.toml | 2 +- neqo-server/Cargo.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/neqo-client/Cargo.toml b/neqo-client/Cargo.toml index 643eff4172..6fa361020e 100644 --- a/neqo-client/Cargo.toml +++ b/neqo-client/Cargo.toml @@ -14,7 +14,7 @@ workspace = true [dependencies] # neqo-client is not used in Firefox, so we can be liberal with dependency versions -clap = { version = "4.4", default-features = false, features = ["std", "derive"] } +clap = { version = "4.4", default-features = false, features = ["std", "color", "help", "usage", "error-context", "suggestions", "derive"] } futures = { version = "0.3", default-features = false } hex = { version = "0.4", default-features = false, features = ["std"] } log = { version = "0.4", default-features = false } diff --git a/neqo-server/Cargo.toml b/neqo-server/Cargo.toml index 203742fa06..2f2162fea0 100644 --- a/neqo-server/Cargo.toml +++ b/neqo-server/Cargo.toml @@ -14,7 +14,7 @@ workspace = true [dependencies] # neqo-server is not used in Firefox, so we can be liberal with dependency versions -clap = { version = "4.4", default-features = false, features = ["std", "derive"] } +clap = { version = "4.4", default-features = false, features = ["std", "color", "help", "usage", "error-context", "suggestions", "derive"] } futures = { version = "0.3", default-features = false, features = ["alloc"] } log = { version = "0.4", default-features = false } neqo-common = { path = "./../neqo-common", features = ["udp"] } From 25bd9294d1ee01d9d4cf86ee07b679173cbf5bac Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Fri, 8 Mar 2024 00:38:52 +0200 Subject: [PATCH 209/321] ci: Further tweaks to building NSS (#1712) * ci: Further tweaks to building NSS Picks up the comments @martinthomson left on #1710 * Caching the nss build directory does nothing other than increase the cache size * Enable NSS debug symbols also for optimized build * -e * CFLAGS * Don't set `-ggdb3` on Windows * Turn off verbose build * Debug why there is a debug build for NSS * Randomize port for transfer test. Build NSS based on cargo build profile. * Assign PORT in bash * Debug Windows * Not like that * Debug * Again * Try fp and NSS_TARGET * Checkout with action * Always also build static libs * --static * More * More * Release build * export * Try * No release build on Windows * No dynamic link on Windows * Doh * Finalize --- .github/actions/nss/action.yml | 50 ++++++++++++++++++++++++++++------ neqo-crypto/build.rs | 15 +++++----- 2 files changed, 50 insertions(+), 15 deletions(-) diff --git a/.github/actions/nss/action.yml b/.github/actions/nss/action.yml index 40d9d65d96..23d9bd3f8f 100644 --- a/.github/actions/nss/action.yml +++ b/.github/actions/nss/action.yml @@ -1,19 +1,53 @@ name: Fetch and build NSS +description: Fetch and build NSS + +inputs: + type: + description: "Whether to do a debug or release build of NSS" + default: "Release" + +# This step might be removed if the distro included a recent enough +# version of NSS. Ubuntu 20.04 only has 3.49, which is far too old. +# (neqo-crypto/build.rs would also need to query pkg-config to get the +# right build flags rather than building NSS.) +# +# Also see https://github.com/mozilla/neqo/issues/1711 runs: using: composite steps: - - name: Fetch and build NSS + - name: Checkout NSPR + uses: actions/checkout@v4 + with: + repository: "nss-dev/nspr" + path: ${{ github.workspace }}/nspr + + - name: Checkout NSS + uses: actions/checkout@v4 + with: + repository: "nss-dev/nss" + path: ${{ github.workspace }}/nss + + - name: Build shell: bash run: | - git clone --depth=1 https://github.com/nss-dev/nspr "$NSPR_DIR" - git clone --depth=1 https://github.com/nss-dev/nss "$NSS_DIR" - $NSS_DIR/build.sh -g -Ddisable_tests=1 -o + if [ "${{ inputs.type }}" != "Debug" ]; then + # We want to do an optimized build for accurate CPU profiling, but + # we also want debug symbols and frame pointers for that, which the normal optimized NSS + # build process doesn't provide. + OPT="-o" + NSS_TARGET=Release + [ "${{ runner.os }}" != "Windows" ] && export CFLAGS="-ggdb3 -fno-omit-frame-pointer" + else + NSS_TARGET=Debug + fi + $NSS_DIR/build.sh -g -Ddisable_tests=1 $OPT --static + echo "NSS_TARGET=$NSS_TARGET" >> "$GITHUB_ENV" + NSS_OUT="$NSS_DIR/../dist/$NSS_TARGET" + echo "LD_LIBRARY_PATH=$NSS_OUT/lib" >> "$GITHUB_ENV" + echo "DYLD_FALLBACK_LIBRARY_PATH=$NSS_OUT/lib" >> "$GITHUB_ENV" + echo "$NSS_OUT/lib" >> "$GITHUB_PATH" echo "NSS_DIR=$NSS_DIR" >> "$GITHUB_ENV" - echo "NSPR_DIR=$NSPR_DIR" >> "$GITHUB_ENV" - echo "LD_LIBRARY_PATH=$NSS_DIR/../dist/Release/lib" >> "$GITHUB_ENV" - echo "DYLD_FALLBACK_LIBRARY_PATH=$NSS_DIR/../dist/Release/lib" >> "$GITHUB_ENV" - echo "$NSS_DIR/../dist/Release/lib" >> "$GITHUB_PATH" env: NSS_DIR: ${{ github.workspace }}/nss NSPR_DIR: ${{ github.workspace }}/nspr diff --git a/neqo-crypto/build.rs b/neqo-crypto/build.rs index 86a1681821..c4c2a73e75 100644 --- a/neqo-crypto/build.rs +++ b/neqo-crypto/build.rs @@ -50,9 +50,10 @@ struct Bindings { } fn is_debug() -> bool { - env::var("DEBUG") - .map(|d| d.parse::().unwrap_or(false)) - .unwrap_or(false) + // Check the build profile and not whether debug symbols are enabled (i.e., + // `env::var("DEBUG")`), because we enable those for benchmarking/profiling and still want + // to build NSS in release mode. + env::var("PROFILE").unwrap_or_default() == "debug" } // bindgen needs access to libclang. @@ -147,10 +148,10 @@ fn build_nss(dir: PathBuf) { let mut build_nss = vec![ String::from("./build.sh"), String::from("-Ddisable_tests=1"), + // Generate static libraries in addition to shared libraries. + String::from("--static"), ]; - if is_debug() { - build_nss.push(String::from("--static")); - } else { + if !is_debug() { build_nss.push(String::from("-o")); } if let Ok(d) = env::var("NSS_JOBS") { @@ -315,7 +316,7 @@ fn setup_standalone() -> Vec { "cargo:rustc-link-search=native={}", nsslibdir.to_str().unwrap() ); - if is_debug() { + if is_debug() || env::consts::OS == "windows" { static_link(); } else { dynamic_link(); From 1ee64ba69bbfef20e6138d824072d50b509b6c07 Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Fri, 8 Mar 2024 00:39:58 +0200 Subject: [PATCH 210/321] ci: Hopefully fix the spurious Windows transfer test failure (#1717) And small cosmetic changes --- .github/workflows/check.yml | 16 +++++----------- 1 file changed, 5 insertions(+), 11 deletions(-) diff --git a/.github/workflows/check.yml b/.github/workflows/check.yml index b331550d87..ccea626867 100644 --- a/.github/workflows/check.yml +++ b/.github/workflows/check.yml @@ -80,10 +80,9 @@ jobs: run: | echo "C:\\msys64\\usr\\bin" >> "$GITHUB_PATH" echo "C:\\msys64\\mingw64\\bin" >> "$GITHUB_PATH" - /c/msys64/usr/bin/pacman -S --noconfirm nsinstall lld + /c/msys64/usr/bin/pacman -S --noconfirm nsinstall python3 -m pip install git+https://github.com/nodejs/gyp-next echo "$(python3 -m site --user-base)/bin" >> "$GITHUB_PATH" - echo "RUSTFLAGS=-C link-arg=-fuse-ld=lld.exe" >> "$GITHUB_ENV" - name: Set up MSVC build environment (Windows) if: runner.os == 'Windows' @@ -98,10 +97,6 @@ jobs: # See https://github.com/ilammy/msvc-dev-cmd#name-conflicts-with-shell-bash rm /usr/bin/link.exe - # This step might be removed if the distro included a recent enough - # version of NSS. Ubuntu 20.04 only has 3.49, which is far too old. - # (neqo-crypto/build.rs would also need to query pkg-config to get the - # right build flags rather than building NSS.) - name: Fetch and build NSS and NSPR uses: ./.github/actions/nss @@ -116,22 +111,21 @@ jobs: - name: Run client/server transfer run: | cargo +${{ matrix.rust-toolchain }} build $BUILD_TYPE --bin neqo-client --bin neqo-server - cargo +${{ matrix.rust-toolchain }} run $BUILD_TYPE --bin neqo-server -- $HOST:4433 & + target/$BUILD_DIR/neqo-server $HOST:4433 & PID=$! - # Give the server time to start. - sleep 3 - cargo +${{ matrix.rust-toolchain }} run $BUILD_TYPE --bin neqo-client -- --output-dir . https://$HOST:4433/$SIZE + target/$BUILD_DIR/neqo-client --output-dir . https://$HOST:4433/$SIZE kill $PID [ "$(wc -c <"$SIZE")" -eq "$SIZE" ] || exit 1 env: HOST: localhost SIZE: 54321 RUST_LOG: warn + BUILD_DIR: ${{ matrix.type == 'release' && 'release' || 'debug' }} - name: Check formatting run: | if [ "${{ matrix.rust-toolchain }}" != "nightly" ]; then - export CONFIG_PATH="--config-path=$(mktemp)" + CONFIG_PATH="--config-path=$(mktemp)" fi cargo +${{ matrix.rust-toolchain }} fmt --all -- --check $CONFIG_PATH if: success() || failure() From 4a6ec6f5971dc2bbd95cbd9607196c43873246e5 Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Fri, 8 Mar 2024 01:07:16 +0200 Subject: [PATCH 211/321] ci: Use fp-based perf profiling to reduce `unknown` symbols (#1718) --- .github/workflows/bench.yml | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/.github/workflows/bench.yml b/.github/workflows/bench.yml index 79fc1dd9e5..57ad77d722 100644 --- a/.github/workflows/bench.yml +++ b/.github/workflows/bench.yml @@ -8,8 +8,8 @@ env: CARGO_TERM_COLOR: always RUST_BACKTRACE: 1 TOOLCHAIN: nightly - RUSTFLAGS: -C link-arg=-fuse-ld=lld -C link-arg=-Wl,--no-rosegment - PERF_CMD: record -o perf.data -F997 --call-graph lbr -g + RUSTFLAGS: -C link-arg=-fuse-ld=lld -C link-arg=-Wl,--no-rosegment, -Cforce-frame-pointers=yes + PERF_CMD: record -o perf.data -F997 --call-graph fp -g jobs: bench: @@ -81,8 +81,6 @@ jobs: taskset -c 0 nice -n -20 \ cargo +$TOOLCHAIN flamegraph -v -c "$PERF_CMD" \ --bin neqo-server -- --db ../test-fixture/db $HOST:4433 || true; } & - # Give the server time to start. - sleep 3 mkdir client; \ cd client; \ time taskset -c 1 nice -n -20 \ From 3476bd0545436cccf8f413c99e4fe0fbc573396d Mon Sep 17 00:00:00 2001 From: Max Inden Date: Fri, 8 Mar 2024 09:08:47 +0100 Subject: [PATCH 212/321] fix(.rustfmt.toml): specify Rust edition 2021 (#1722) * fix(.rustfmt.toml): specify Rust edition 2021 Emacs' popular Rust plugin Rustic [1] uses `rustfmt` to format code. `rustfmt` only picks up the Rust edition from `Cargo.toml` when executed through `cargo fmt` [2] [3]. This commit duplicates the Rust edition in the `.rustfmt.toml` file. Thus all users of `rustfmt` (whether through `cargo fmt` or plain `rustfmt`) will get formatting according to their Rust edition, e.g. helpful when interacting with `async` code. [1]: https://github.com/brotzeit/rustic [2]: https://github.com/brotzeit/rustic?tab=readme-ov-file#edition-2018 [3]: https://github.com/rust-lang/rustfmt?tab=readme-ov-file#rusts-editions * Document keeping edition statements in sync --- .rustfmt.toml | 7 +++++++ Cargo.toml | 1 + 2 files changed, 8 insertions(+) diff --git a/.rustfmt.toml b/.rustfmt.toml index 482732e6a5..60a3e6946a 100644 --- a/.rustfmt.toml +++ b/.rustfmt.toml @@ -1,3 +1,10 @@ +# Keep in sync with `Cargo.toml` `edition`. +# +# `rustfmt` envoked not through `cargo fmt` but directly does not pick up Rust +# edition in `Cargo.toml`. Thus duplicate here. See +# https://github.com/mozilla/neqo/pull/1722 for details. +edition = "2021" + comment_width=100 wrap_comments=true diff --git a/Cargo.toml b/Cargo.toml index f78cb4b7ba..c4fa2bc4c8 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -16,6 +16,7 @@ homepage = "https://github.com/mozilla/neqo/" repository = "https://github.com/mozilla/neqo/" authors = ["The Neqo Authors "] version = "0.7.1" +# Keep in sync with `.rustfmt.toml` `edition`. edition = "2021" license = "MIT OR Apache-2.0" # Don't increase beyond what Firefox is currently using: From 1df959c64c7ae2446b5ab9d6281cc007768e96ac Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Fri, 8 Mar 2024 11:53:45 +0200 Subject: [PATCH 213/321] ci: Disable Rust cache (#1719) * ci: Disable Rust cache Check if it makes any difference to build times first. * Update action.yml * Give sccache one more go * Tweaks for sccache * Fix step title * Leave commented-out cache configs in the action, in case we want to re-try --------- Signed-off-by: Lars Eggert --- .github/actions/rust/action.yml | 35 ++++++++++++++++++++++++--------- .github/workflows/check.yml | 12 +++++------ 2 files changed, 32 insertions(+), 15 deletions(-) diff --git a/.github/actions/rust/action.yml b/.github/actions/rust/action.yml index 5f788dc73f..dce4923678 100644 --- a/.github/actions/rust/action.yml +++ b/.github/actions/rust/action.yml @@ -1,14 +1,12 @@ name: Install Rust -description: Install Rust and sccache +description: Install Rust and tools inputs: version: description: 'Rust toolchain version to install' - required: true default: 'stable' components: description: 'Rust components to install' - required: false default: '' runs: @@ -25,7 +23,7 @@ runs: shell: pwsh run: Set-ExecutionPolicy Unrestricted -Scope Process; iex (iwr "https://raw.githubusercontent.com/cargo-bins/cargo-binstall/main/install-from-binstall-release.ps1").Content - - name: Install cargo-binstall (Windows) + - name: Install cargo-binstall (Linux & MacOS) if: runner.os != 'Windows' shell: bash run: curl -L --proto '=https' --tlsv1.2 -sSf https://raw.githubusercontent.com/cargo-bins/cargo-binstall/main/install-from-binstall-release.sh | bash @@ -34,8 +32,27 @@ runs: shell: bash run: cargo +${{ inputs.version }} binstall --no-confirm cargo-llvm-cov cargo-nextest flamegraph cargo-hack - - name: Use Rust cache - uses: Swatinem/rust-cache@v2 - with: - cache-all-crates: "true" - cache-directories: "${{ github.workspace }}/dist" + # sccache slows CI down, so we leave it disabled. + # Leaving the steps below commented out, so we can re-evaluate enabling it later. + # - name: Use sccache + # uses: mozilla-actions/sccache-action@v0.0.4 + + # - name: Enable sscache + # shell: bash + # run: | + # if [ "${{ runner.os }}" = "Windows" ]; then + # echo "CC=sccache cl" >> "$GITHUB_ENV" + # echo "CXX=sccache cl" >> "$GITHUB_ENV" + # else + # echo "CC=sccache cc" >> "$GITHUB_ENV" + # echo "CXX=sccache c++" >> "$GITHUB_ENV" + # fi + # echo "SCCACHE_GHA_ENABLED=true" >> "$GITHUB_ENV" + # echo "RUSTC_WRAPPER=sccache" >> "$GITHUB_ENV" + # echo "CARGO_INCREMENTAL=0" >> "$GITHUB_ENV" + + # Ditto for rust-cache. + # - name: Use Rust cache + # uses: Swatinem/rust-cache@v2 + # with: + # cache-all-crates: "true" diff --git a/.github/workflows/check.yml b/.github/workflows/check.yml index ccea626867..5faa67bef9 100644 --- a/.github/workflows/check.yml +++ b/.github/workflows/check.yml @@ -41,12 +41,6 @@ jobs: - name: Checkout uses: actions/checkout@v4 - - name: Install Rust - uses: ./.github/actions/rust - with: - version: ${{ matrix.rust-toolchain }} - components: rustfmt, clippy, llvm-tools-preview - - name: Install dependencies (Linux) if: runner.os == 'Linux' env: @@ -97,6 +91,12 @@ jobs: # See https://github.com/ilammy/msvc-dev-cmd#name-conflicts-with-shell-bash rm /usr/bin/link.exe + - name: Install Rust + uses: ./.github/actions/rust + with: + version: ${{ matrix.rust-toolchain }} + components: rustfmt, clippy, llvm-tools-preview + - name: Fetch and build NSS and NSPR uses: ./.github/actions/nss From 43e2588aa2c9936a66b971fbbf5d51af6fb73c33 Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Fri, 8 Mar 2024 16:32:06 +0200 Subject: [PATCH 214/321] ci: Try `hg` and then try `git` to download NSPR and NSS (#1726) * ci: Try `hg` and then try `git` to download NSPR and NSS Because on the bencher, `git` now has issues. You can't win. * Install hg * Debug * Debug more * And again * Undebug --- .github/actions/nss/action.yml | 31 +++++++++++++++++++++++-------- .github/workflows/check.yml | 4 ++-- 2 files changed, 25 insertions(+), 10 deletions(-) diff --git a/.github/actions/nss/action.yml b/.github/actions/nss/action.yml index 23d9bd3f8f..23232ebc13 100644 --- a/.github/actions/nss/action.yml +++ b/.github/actions/nss/action.yml @@ -16,17 +16,32 @@ inputs: runs: using: composite steps: + # Ideally, we'd use this. But things are sufficiently flaky that we're better off + # trying both hg and git. Leaving this here in case we want to re-try in the future. + # + # - name: Checkout NSPR + # uses: actions/checkout@v4 + # with: + # repository: "nss-dev/nspr" + # path: ${{ github.workspace }}/nspr + + # - name: Checkout NSS + # uses: actions/checkout@v4 + # with: + # repository: "nss-dev/nss" + # path: ${{ github.workspace }}/nss + - name: Checkout NSPR - uses: actions/checkout@v4 - with: - repository: "nss-dev/nspr" - path: ${{ github.workspace }}/nspr + shell: bash + run: | + hg clone https://hg.mozilla.org/projects/nspr "${{ github.workspace }}/nspr" || \ + git clone --depth=1 https://github.com/nss-dev/nspr "${{ github.workspace }}/nspr" - name: Checkout NSS - uses: actions/checkout@v4 - with: - repository: "nss-dev/nss" - path: ${{ github.workspace }}/nss + shell: bash + run: | + hg clone https://hg.mozilla.org/projects/nss "${{ github.workspace }}/nss" || \ + git clone --depth=1 https://github.com/nss-dev/nss "${{ github.workspace }}/nss" - name: Build shell: bash diff --git a/.github/workflows/check.yml b/.github/workflows/check.yml index 5faa67bef9..2a110e22b1 100644 --- a/.github/workflows/check.yml +++ b/.github/workflows/check.yml @@ -46,7 +46,7 @@ jobs: env: DEBIAN_FRONTEND: noninteractive run: | - sudo apt-get install -y --no-install-recommends gyp ninja-build lld + sudo apt-get install -y --no-install-recommends gyp mercurial ninja-build lld echo "RUSTFLAGS=-C link-arg=-fuse-ld=lld" >> "$GITHUB_ENV" # In addition to installing dependencies, first make sure System Integrity Protection (SIP) @@ -58,7 +58,7 @@ jobs: if: runner.os == 'MacOS' run: | csrutil status | grep disabled - brew install ninja llvm + brew install ninja mercurial llvm echo "/opt/homebrew/opt/llvm/bin" >> "$GITHUB_PATH" ln -s /opt/homebrew/bin/python3 /opt/homebrew/bin/python # python3 -m pip install gyp-next From 4494927da456e4a8500dae8993851a9275e6c077 Mon Sep 17 00:00:00 2001 From: Max Inden Date: Mon, 11 Mar 2024 07:35:17 +0100 Subject: [PATCH 215/321] refactor: merge neqo-client and neqo-server into neqo-bin (#1724) This commit merges the `neqo-client` and `neqo-server` crates into a single new `neqo-bin` crate. In addition it de-duplicates command line argument definitions by introducing a `SharedArgs` `struct`. Future commits will consolidate more shared code into `neqo-bin/src/lib.rs` and its sub-modules. Note that binary names are unchanged. E.g. one can still run the client via `cargo run --bin neqo-client`. See `neqo-bin/Cargo.toml` for binary declarations. --- Cargo.toml | 3 +- {neqo-server => neqo-bin}/Cargo.toml | 16 +- .../src/main.rs => neqo-bin/src/bin/client.rs | 163 +++----------- .../src => neqo-bin/src/bin/server}/main.rs | 212 +++--------------- .../src/bin/server}/old_https.rs | 4 +- neqo-bin/src/lib.rs | 204 +++++++++++++++++ neqo-client/Cargo.toml | 28 --- 7 files changed, 285 insertions(+), 345 deletions(-) rename {neqo-server => neqo-bin}/Cargo.toml (72%) rename neqo-client/src/main.rs => neqo-bin/src/bin/client.rs (88%) rename {neqo-server/src => neqo-bin/src/bin/server}/main.rs (74%) rename {neqo-server/src => neqo-bin/src/bin/server}/old_https.rs (98%) create mode 100644 neqo-bin/src/lib.rs delete mode 100644 neqo-client/Cargo.toml diff --git a/Cargo.toml b/Cargo.toml index c4fa2bc4c8..dec317e396 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,10 +1,9 @@ [workspace] members = [ - "neqo-client", + "neqo-bin", "neqo-common", "neqo-crypto", "neqo-http3", - "neqo-server", "neqo-qpack", "neqo-transport", "test-fixture", diff --git a/neqo-server/Cargo.toml b/neqo-bin/Cargo.toml similarity index 72% rename from neqo-server/Cargo.toml rename to neqo-bin/Cargo.toml index 2f2162fea0..8b7b48ab86 100644 --- a/neqo-server/Cargo.toml +++ b/neqo-bin/Cargo.toml @@ -1,6 +1,6 @@ [package] -name = "neqo-server" -description = "A basic HTTP3 server." +name = "neqo-bin" +description = "A basic QUIC HTTP/0.9 and HTTP/3 client and server." authors.workspace = true homepage.workspace = true repository.workspace = true @@ -9,13 +9,22 @@ edition.workspace = true rust-version.workspace = true license.workspace = true +[[bin]] +name = "neqo-client" +path = "src/bin/client.rs" + +[[bin]] +name = "neqo-server" +path = "src/bin/server/main.rs" + [lints] workspace = true [dependencies] -# neqo-server is not used in Firefox, so we can be liberal with dependency versions +# neqo-bin is not used in Firefox, so we can be liberal with dependency versions clap = { version = "4.4", default-features = false, features = ["std", "color", "help", "usage", "error-context", "suggestions", "derive"] } futures = { version = "0.3", default-features = false, features = ["alloc"] } +hex = { version = "0.4", default-features = false, features = ["std"] } log = { version = "0.4", default-features = false } neqo-common = { path = "./../neqo-common", features = ["udp"] } neqo-crypto = { path = "./../neqo-crypto" } @@ -25,3 +34,4 @@ neqo-transport = { path = "./../neqo-transport" } qlog = { version = "0.12", default-features = false } regex = { version = "1.9", default-features = false, features = ["unicode-perl"] } tokio = { version = "1", default-features = false, features = ["net", "time", "macros", "rt", "rt-multi-thread"] } +url = { version = "2.5", default-features = false } diff --git a/neqo-client/src/main.rs b/neqo-bin/src/bin/client.rs similarity index 88% rename from neqo-client/src/main.rs rename to neqo-bin/src/bin/client.rs index 34d0626a05..2f9be1f3d7 100644 --- a/neqo-client/src/main.rs +++ b/neqo-bin/src/bin/client.rs @@ -15,7 +15,7 @@ use std::{ pin::Pin, process::exit, rc::Rc, - time::{Duration, Instant}, + time::Instant, }; use clap::Parser; @@ -34,8 +34,8 @@ use neqo_http3::{ Error, Header, Http3Client, Http3ClientEvent, Http3Parameters, Http3State, Output, Priority, }; use neqo_transport::{ - CongestionControlAlgorithm, Connection, ConnectionId, ConnectionParameters, - EmptyConnectionIdGenerator, Error as TransportError, StreamId, StreamType, Version, + Connection, ConnectionId, EmptyConnectionIdGenerator, Error as TransportError, StreamId, + Version, }; use qlog::{events::EventImportance, streamer::QlogStreamer}; use tokio::time::Sleep; @@ -122,11 +122,8 @@ impl KeyUpdateState { #[command(author, version, about, long_about = None)] #[allow(clippy::struct_excessive_bools)] // Not a good use of that lint. pub struct Args { - #[arg(short = 'a', long, default_value = "h3")] - /// ALPN labels to negotiate. - /// - /// This client still only does HTTP/3 no matter what the ALPN says. - alpn: String, + #[command(flatten)] + shared: neqo_bin::SharedArgs, urls: Vec, @@ -136,22 +133,9 @@ pub struct Args { #[arg(short = 'H', long, number_of_values = 2)] header: Vec, - #[arg(name = "encoder-table-size", long, default_value = "16384")] - max_table_size_encoder: u64, - - #[arg(name = "decoder-table-size", long, default_value = "16384")] - max_table_size_decoder: u64, - - #[arg(name = "max-blocked-streams", short = 'b', long, default_value = "10")] - max_blocked_streams: u16, - #[arg(name = "max-push", short = 'p', long, default_value = "10")] max_concurrent_push_streams: u64, - #[arg(name = "use-old-http", short = 'o', long)] - /// Use http 0.9 instead of HTTP/3 - use_old_http: bool, - #[arg(name = "download-in-series", long)] /// Download resources in series using separate connections. download_in_series: bool, @@ -164,18 +148,10 @@ pub struct Args { /// Output received data to stdout output_read_data: bool, - #[arg(name = "qlog-dir", long)] - /// Enable QLOG logging and QLOG traces to this directory - qlog_dir: Option, - #[arg(name = "output-dir", long)] /// Save contents of fetched URLs to a directory output_dir: Option, - #[arg(name = "qns-test", long)] - /// Enable special behavior for use with QUIC Network Simulator - qns_test: Option, - #[arg(short = 'r', long)] /// Client attempts to resume by making multiple connections to servers. /// Requires that 2 or more URLs are listed for each server. @@ -186,19 +162,11 @@ pub struct Args { /// Attempt to initiate a key update immediately after confirming the connection. key_update: bool, - #[arg(short = 'c', long, number_of_values = 1)] - /// The set of TLS cipher suites to enable. - /// From: `TLS_AES_128_GCM_SHA256`, `TLS_AES_256_GCM_SHA384`, `TLS_CHACHA20_POLY1305_SHA256`. - ciphers: Vec, - #[arg(name = "ech", long, value_parser = |s: &str| hex::decode(s))] /// Enable encrypted client hello (ECH). /// This takes an encoded ECH configuration in hexadecimal format. ech: Option>, - #[command(flatten)] - quic_parameters: QuicParameters, - #[arg(name = "ipv4-only", short = '4', long)] /// Connect only over IPv4 ipv4_only: bool, @@ -218,7 +186,8 @@ pub struct Args { impl Args { fn get_ciphers(&self) -> Vec { - self.ciphers + self.shared + .ciphers .iter() .filter_map(|c| match c.as_str() { "TLS_AES_128_GCM_SHA256" => Some(TLS_AES_128_GCM_SHA256), @@ -230,123 +199,51 @@ impl Args { } fn update_for_tests(&mut self) { - let Some(testcase) = self.qns_test.as_ref() else { + let Some(testcase) = self.shared.qns_test.as_ref() else { return; }; // Only use v1 for most QNS tests. - self.quic_parameters.quic_version = vec![Version::Version1]; + self.shared.quic_parameters.quic_version = vec![Version::Version1]; match testcase.as_str() { // TODO: Add "ecn" when that is ready. "http3" => {} "handshake" | "transfer" | "retry" => { - self.use_old_http = true; + self.shared.use_old_http = true; } "zerortt" | "resumption" => { if self.urls.len() < 2 { eprintln!("Warning: resumption tests won't work without >1 URL"); exit(127); } - self.use_old_http = true; + self.shared.use_old_http = true; self.resume = true; } "multiconnect" => { - self.use_old_http = true; + self.shared.use_old_http = true; self.download_in_series = true; } "chacha20" => { - self.use_old_http = true; - self.ciphers.clear(); - self.ciphers + self.shared.use_old_http = true; + self.shared.ciphers.clear(); + self.shared + .ciphers .extend_from_slice(&[String::from("TLS_CHACHA20_POLY1305_SHA256")]); } "keyupdate" => { - self.use_old_http = true; + self.shared.use_old_http = true; self.key_update = true; } "v2" => { - self.use_old_http = true; + self.shared.use_old_http = true; // Use default version set for this test (which allows compatible vneg.) - self.quic_parameters.quic_version.clear(); + self.shared.quic_parameters.quic_version.clear(); } _ => exit(127), } } } -fn from_str(s: &str) -> Res { - let v = u32::from_str_radix(s, 16) - .map_err(|_| ClientError::ArgumentError("versions need to be specified in hex"))?; - Version::try_from(v).map_err(|_| ClientError::ArgumentError("unknown version")) -} - -#[derive(Debug, Parser)] -struct QuicParameters { - #[arg( - short = 'Q', - long, - num_args = 1.., - value_delimiter = ' ', - number_of_values = 1, - value_parser = from_str)] - /// A list of versions to support, in hex. - /// The first is the version to attempt. - /// Adding multiple values adds versions in order of preference. - /// If the first listed version appears in the list twice, the position - /// of the second entry determines the preference order of that version. - quic_version: Vec, - - #[arg(long, default_value = "16")] - /// Set the `MAX_STREAMS_BIDI` limit. - max_streams_bidi: u64, - - #[arg(long, default_value = "16")] - /// Set the `MAX_STREAMS_UNI` limit. - max_streams_uni: u64, - - #[arg(long = "idle", default_value = "30")] - /// The idle timeout for connections, in seconds. - idle_timeout: u64, - - #[arg(long = "cc", default_value = "newreno")] - /// The congestion controller to use. - congestion_control: CongestionControlAlgorithm, - - #[arg(long = "pacing")] - /// Whether pacing is enabled. - pacing: bool, -} - -impl QuicParameters { - fn get(&self, alpn: &str) -> ConnectionParameters { - let params = ConnectionParameters::default() - .max_streams(StreamType::BiDi, self.max_streams_bidi) - .max_streams(StreamType::UniDi, self.max_streams_uni) - .idle_timeout(Duration::from_secs(self.idle_timeout)) - .cc_algorithm(self.congestion_control) - .pacing(self.pacing); - - if let Some(&first) = self.quic_version.first() { - let all = if self.quic_version[1..].contains(&first) { - &self.quic_version[1..] - } else { - &self.quic_version - }; - params.versions(first, all.to_vec()) - } else { - let version = match alpn { - "h3" | "hq-interop" => Version::Version1, - "h3-29" | "hq-29" => Version::Draft29, - "h3-30" | "hq-30" => Version::Draft30, - "h3-31" | "hq-31" => Version::Draft31, - "h3-32" | "hq-32" => Version::Draft32, - _ => Version::default(), - }; - params.versions(version, Version::all()) - } - } -} - fn get_output_file( url: &Url, output_dir: &Option, @@ -889,11 +786,11 @@ fn create_http3_client( ) -> Res { let mut transport = Connection::new_client( hostname, - &[&args.alpn], + &[&args.shared.alpn], Rc::new(RefCell::new(EmptyConnectionIdGenerator::default())), local_addr, remote_addr, - args.quic_parameters.get(args.alpn.as_str()), + args.shared.quic_parameters.get(args.shared.alpn.as_str()), Instant::now(), )?; let ciphers = args.get_ciphers(); @@ -903,9 +800,9 @@ fn create_http3_client( let mut client = Http3Client::new_with_conn( transport, Http3Parameters::default() - .max_table_size_encoder(args.max_table_size_encoder) - .max_table_size_decoder(args.max_table_size_decoder) - .max_blocked_streams(args.max_blocked_streams) + .max_table_size_encoder(args.shared.max_table_size_encoder) + .max_table_size_decoder(args.shared.max_table_size_decoder) + .max_blocked_streams(args.shared.max_blocked_streams) .max_concurrent_push_streams(args.max_concurrent_push_streams), ); @@ -924,7 +821,7 @@ fn create_http3_client( } fn qlog_new(args: &Args, hostname: &str, cid: &ConnectionId) -> Res { - if let Some(qlog_dir) = &args.qlog_dir { + if let Some(qlog_dir) = &args.shared.qlog_dir { let mut qlog_path = qlog_dir.clone(); let filename = format!("{hostname}-{cid}.sqlog"); qlog_path.push(filename); @@ -1002,7 +899,7 @@ async fn main() -> Res<()> { let real_local = socket.local_addr().unwrap(); println!( "{} Client connecting: {:?} -> {:?}", - if args.use_old_http { "H9" } else { "H3" }, + if args.shared.use_old_http { "H9" } else { "H3" }, real_local, remote_addr, ); @@ -1019,7 +916,7 @@ async fn main() -> Res<()> { first = false; - token = if args.use_old_http { + token = if args.shared.use_old_http { old::ClientRunner::new( &args, &mut socket, @@ -1266,8 +1163,8 @@ mod old { url_queue: VecDeque, token: Option, ) -> Res> { - let alpn = match args.alpn.as_str() { - "hq-29" | "hq-30" | "hq-31" | "hq-32" => args.alpn.as_str(), + let alpn = match args.shared.alpn.as_str() { + "hq-29" | "hq-30" | "hq-31" | "hq-32" => args.shared.alpn.as_str(), _ => "hq-interop", }; @@ -1277,7 +1174,7 @@ mod old { Rc::new(RefCell::new(EmptyConnectionIdGenerator::default())), local_addr, remote_addr, - args.quic_parameters.get(alpn), + args.shared.quic_parameters.get(alpn), Instant::now(), )?; diff --git a/neqo-server/src/main.rs b/neqo-bin/src/bin/server/main.rs similarity index 74% rename from neqo-server/src/main.rs rename to neqo-bin/src/bin/server/main.rs index 819014b331..da8de3831c 100644 --- a/neqo-server/src/main.rs +++ b/neqo-bin/src/bin/server/main.rs @@ -33,9 +33,7 @@ use neqo_http3::{ Error, Http3OrWebTransportStream, Http3Parameters, Http3Server, Http3ServerEvent, StreamId, }; use neqo_transport::{ - server::ValidateAddress, tparams::PreferredAddress, CongestionControlAlgorithm, - ConnectionIdGenerator, ConnectionParameters, Output, RandomConnectionIdGenerator, StreamType, - Version, + server::ValidateAddress, ConnectionIdGenerator, Output, RandomConnectionIdGenerator, Version, }; use tokio::time::Sleep; @@ -90,19 +88,13 @@ impl std::error::Error for ServerError {} #[derive(Debug, Parser)] #[command(author, version, about, long_about = None)] struct Args { + #[command(flatten)] + shared: neqo_bin::SharedArgs, + /// List of IP:port to listen on #[arg(default_value = "[::]:4433")] hosts: Vec, - #[arg(name = "encoder-table-size", long, default_value = "16384")] - max_table_size_encoder: u64, - - #[arg(name = "decoder-table-size", long, default_value = "16384")] - max_table_size_decoder: u64, - - #[arg(short = 'b', long, default_value = "10")] - max_blocked_streams: u16, - #[arg(short = 'd', long, default_value = "./test-fixture/db")] /// NSS database directory. db: PathBuf, @@ -111,36 +103,10 @@ struct Args { /// Name of key from NSS database. key: String, - #[arg(short = 'a', long, default_value = "h3")] - /// ALPN labels to negotiate. - /// - /// This server still only does HTTP3 no matter what the ALPN says. - alpn: String, - - #[arg(name = "qlog-dir", long, value_parser=clap::value_parser!(PathBuf))] - /// Enable QLOG logging and QLOG traces to this directory - qlog_dir: Option, - - #[arg(name = "qns-test", long)] - /// Enable special behavior for use with QUIC Network Simulator - qns_test: Option, - - #[arg(name = "use-old-http", short = 'o', long)] - /// Use http 0.9 instead of HTTP/3 - use_old_http: bool, - - #[command(flatten)] - quic_parameters: QuicParameters, - #[arg(name = "retry", long)] /// Force a retry retry: bool, - #[arg(short = 'c', long, number_of_values = 1)] - /// The set of TLS cipher suites to enable. - /// From: `TLS_AES_128_GCM_SHA256`, `TLS_AES_256_GCM_SHA384`, `TLS_CHACHA20_POLY1305_SHA256`. - ciphers: Vec, - #[arg(name = "ech", long)] /// Enable encrypted client hello (ECH). /// This generates a new set of ECH keys when it is invoked. @@ -150,7 +116,8 @@ struct Args { impl Args { fn get_ciphers(&self) -> Vec { - self.ciphers + self.shared + .ciphers .iter() .filter_map(|c| match c.as_str() { "TLS_AES_128_GCM_SHA256" => Some(TLS_AES_128_GCM_SHA256), @@ -166,13 +133,13 @@ impl Args { .iter() .filter_map(|host| host.to_socket_addrs().ok()) .flatten() - .chain(self.quic_parameters.preferred_address_v4()) - .chain(self.quic_parameters.preferred_address_v6()) + .chain(self.shared.quic_parameters.preferred_address_v4()) + .chain(self.shared.quic_parameters.preferred_address_v6()) .collect() } fn now(&self) -> Instant { - if self.qns_test.is_some() { + if self.shared.qns_test.is_some() { // When NSS starts its anti-replay it blocks any acceptance of 0-RTT for a // single period. This ensures that an attacker that is able to force a // server to reboot is unable to use that to flush the anti-replay buffers @@ -191,117 +158,6 @@ impl Args { } } -fn from_str(s: &str) -> Result { - let v = u32::from_str_radix(s, 16) - .map_err(|_| ServerError::ArgumentError("versions need to be specified in hex"))?; - Version::try_from(v).map_err(|_| ServerError::ArgumentError("unknown version")) -} - -#[derive(Debug, Parser)] -struct QuicParameters { - #[arg( - short = 'Q', - long, - num_args = 1.., - value_delimiter = ' ', - number_of_values = 1, - value_parser = from_str - )] - /// A list of versions to support in order of preference, in hex. - quic_version: Vec, - - #[arg(long, default_value = "16")] - /// Set the `MAX_STREAMS_BIDI` limit. - max_streams_bidi: u64, - - #[arg(long, default_value = "16")] - /// Set the `MAX_STREAMS_UNI` limit. - max_streams_uni: u64, - - #[arg(long = "idle", default_value = "30")] - /// The idle timeout for connections, in seconds. - idle_timeout: u64, - - #[arg(long = "cc", default_value = "newreno")] - /// The congestion controller to use. - congestion_control: CongestionControlAlgorithm, - - #[arg(name = "preferred-address-v4", long)] - /// An IPv4 address for the server preferred address. - preferred_address_v4: Option, - - #[arg(name = "preferred-address-v6", long)] - /// An IPv6 address for the server preferred address. - preferred_address_v6: Option, -} - -impl QuicParameters { - fn get_sock_addr(opt: &Option, v: &str, f: F) -> Option - where - F: FnMut(&SocketAddr) -> bool, - { - let addr = opt - .iter() - .filter_map(|spa| spa.to_socket_addrs().ok()) - .flatten() - .find(f); - assert_eq!( - opt.is_some(), - addr.is_some(), - "unable to resolve '{}' to an {} address", - opt.as_ref().unwrap(), - v, - ); - addr - } - - fn preferred_address_v4(&self) -> Option { - Self::get_sock_addr(&self.preferred_address_v4, "IPv4", SocketAddr::is_ipv4) - } - - fn preferred_address_v6(&self) -> Option { - Self::get_sock_addr(&self.preferred_address_v6, "IPv6", SocketAddr::is_ipv6) - } - - fn preferred_address(&self) -> Option { - let v4 = self.preferred_address_v4(); - let v6 = self.preferred_address_v6(); - if v4.is_none() && v6.is_none() { - None - } else { - let v4 = v4.map(|v4| { - let SocketAddr::V4(v4) = v4 else { - unreachable!(); - }; - v4 - }); - let v6 = v6.map(|v6| { - let SocketAddr::V6(v6) = v6 else { - unreachable!(); - }; - v6 - }); - Some(PreferredAddress::new(v4, v6)) - } - } - - fn get(&self) -> ConnectionParameters { - let mut params = ConnectionParameters::default() - .max_streams(StreamType::BiDi, self.max_streams_bidi) - .max_streams(StreamType::UniDi, self.max_streams_uni) - .idle_timeout(Duration::from_secs(self.idle_timeout)) - .cc_algorithm(self.congestion_control); - if let Some(pa) = self.preferred_address() { - params = params.preferred_address(pa); - } - - if let Some(first) = self.quic_version.first() { - params = params.versions(*first, self.quic_version.clone()); - } - params - } -} - fn qns_read_response(filename: &str) -> Option> { let mut file_path = PathBuf::from("/www"); file_path.push(filename.trim_matches(|p| p == '/')); @@ -417,14 +273,14 @@ impl SimpleServer { let server = Http3Server::new( args.now(), &[args.key.clone()], - &[args.alpn.clone()], + &[args.shared.alpn.clone()], anti_replay, cid_mgr, Http3Parameters::default() - .connection_parameters(args.quic_parameters.get()) - .max_table_size_encoder(args.max_table_size_encoder) - .max_table_size_decoder(args.max_table_size_decoder) - .max_blocked_streams(args.max_blocked_streams), + .connection_parameters(args.shared.quic_parameters.get(&args.shared.alpn)) + .max_table_size_encoder(args.shared.max_table_size_encoder) + .max_table_size_decoder(args.shared.max_table_size_decoder) + .max_blocked_streams(args.shared.max_blocked_streams), None, ) .expect("We cannot make a server!"); @@ -470,7 +326,7 @@ impl HttpServer for SimpleServer { let mut response = if let Some(path) = headers.iter().find(|&h| h.name() == ":path") { - if args.qns_test.is_some() { + if args.shared.qns_test.is_some() { if let Some(data) = qns_read_response(path.value()) { ResponseData::from(data) } else { @@ -600,15 +456,15 @@ impl ServersRunner { .expect("unable to setup anti-replay"); let cid_mgr = Rc::new(RefCell::new(RandomConnectionIdGenerator::new(10))); - let mut svr: Box = if args.use_old_http { + let mut svr: Box = if args.shared.use_old_http { Box::new( Http09Server::new( args.now(), &[args.key.clone()], - &[args.alpn.clone()], + &[args.shared.alpn.clone()], anti_replay, cid_mgr, - args.quic_parameters.get(), + args.shared.quic_parameters.get(&args.shared.alpn), ) .expect("We cannot make a server!"), ) @@ -616,7 +472,7 @@ impl ServersRunner { Box::new(SimpleServer::new(args, anti_replay, cid_mgr)) }; svr.set_ciphers(&args.get_ciphers()); - svr.set_qlog_dir(args.qlog_dir.clone()); + svr.set_qlog_dir(args.shared.qlog_dir.clone()); if args.retry { svr.validate_address(ValidateAddress::Always); } @@ -720,39 +576,41 @@ async fn main() -> Result<(), io::Error> { init_db(args.db.clone()); - if let Some(testcase) = args.qns_test.as_ref() { - if args.quic_parameters.quic_version.is_empty() { + if let Some(testcase) = args.shared.qns_test.as_ref() { + if args.shared.quic_parameters.quic_version.is_empty() { // Quic Interop Runner expects the server to support `Version1` // only. Exceptions are testcases `versionnegotiation` (not yet // implemented) and `v2`. if testcase != "v2" { - args.quic_parameters.quic_version = vec![Version::Version1]; + args.shared.quic_parameters.quic_version = vec![Version::Version1]; } } else { qwarn!("Both -V and --qns-test were set. Ignoring testcase specific versions."); } + // TODO: More options to deduplicate with client? match testcase.as_str() { "http3" => (), "zerortt" => { - args.use_old_http = true; - args.alpn = String::from(HQ_INTEROP); - args.quic_parameters.max_streams_bidi = 100; + args.shared.use_old_http = true; + args.shared.alpn = String::from(HQ_INTEROP); + args.shared.quic_parameters.max_streams_bidi = 100; } "handshake" | "transfer" | "resumption" | "multiconnect" | "v2" => { - args.use_old_http = true; - args.alpn = String::from(HQ_INTEROP); + args.shared.use_old_http = true; + args.shared.alpn = String::from(HQ_INTEROP); } "chacha20" => { - args.use_old_http = true; - args.alpn = String::from(HQ_INTEROP); - args.ciphers.clear(); - args.ciphers + args.shared.use_old_http = true; + args.shared.alpn = String::from(HQ_INTEROP); + args.shared.ciphers.clear(); + args.shared + .ciphers .extend_from_slice(&[String::from("TLS_CHACHA20_POLY1305_SHA256")]); } "retry" => { - args.use_old_http = true; - args.alpn = String::from(HQ_INTEROP); + args.shared.use_old_http = true; + args.shared.alpn = String::from(HQ_INTEROP); args.retry = true; } _ => exit(127), diff --git a/neqo-server/src/old_https.rs b/neqo-bin/src/bin/server/old_https.rs similarity index 98% rename from neqo-server/src/old_https.rs rename to neqo-bin/src/bin/server/old_https.rs index 2417c4790c..f36c99c484 100644 --- a/neqo-server/src/old_https.rs +++ b/neqo-bin/src/bin/server/old_https.rs @@ -136,7 +136,7 @@ impl Http09Server { return; }; - let re = if args.qns_test.is_some() { + let re = if args.shared.qns_test.is_some() { Regex::new(r"GET +/(\S+)(?:\r)?\n").unwrap() } else { Regex::new(r"GET +/(\d+)(?:\r)?\n").unwrap() @@ -150,7 +150,7 @@ impl Http09Server { Some(path) => { let path = path.as_str(); eprintln!("Path = '{path}'"); - if args.qns_test.is_some() { + if args.shared.qns_test.is_some() { qns_read_response(path) } else { let count = path.parse().unwrap(); diff --git a/neqo-bin/src/lib.rs b/neqo-bin/src/lib.rs new file mode 100644 index 0000000000..4fe47d5cbf --- /dev/null +++ b/neqo-bin/src/lib.rs @@ -0,0 +1,204 @@ +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use std::{ + fmt::{self, Display}, + net::{SocketAddr, ToSocketAddrs}, + path::PathBuf, + time::Duration, +}; + +use clap::Parser; +use neqo_transport::{ + tparams::PreferredAddress, CongestionControlAlgorithm, ConnectionParameters, StreamType, + Version, +}; + +#[derive(Debug, Parser)] +pub struct SharedArgs { + #[arg(short = 'a', long, default_value = "h3")] + /// ALPN labels to negotiate. + /// + /// This client still only does HTTP/3 no matter what the ALPN says. + pub alpn: String, + + #[arg(name = "qlog-dir", long, value_parser=clap::value_parser!(PathBuf))] + /// Enable QLOG logging and QLOG traces to this directory + pub qlog_dir: Option, + + #[arg(name = "encoder-table-size", long, default_value = "16384")] + pub max_table_size_encoder: u64, + + #[arg(name = "decoder-table-size", long, default_value = "16384")] + pub max_table_size_decoder: u64, + + #[arg(name = "max-blocked-streams", short = 'b', long, default_value = "10")] + pub max_blocked_streams: u16, + + #[arg(short = 'c', long, number_of_values = 1)] + /// The set of TLS cipher suites to enable. + /// From: `TLS_AES_128_GCM_SHA256`, `TLS_AES_256_GCM_SHA384`, `TLS_CHACHA20_POLY1305_SHA256`. + pub ciphers: Vec, + + #[arg(name = "qns-test", long)] + /// Enable special behavior for use with QUIC Network Simulator + pub qns_test: Option, + + #[arg(name = "use-old-http", short = 'o', long)] + /// Use http 0.9 instead of HTTP/3 + pub use_old_http: bool, + + #[command(flatten)] + pub quic_parameters: QuicParameters, +} + +#[derive(Debug, Parser)] +pub struct QuicParameters { + #[arg( + short = 'Q', + long, + num_args = 1.., + value_delimiter = ' ', + number_of_values = 1, + value_parser = from_str)] + /// A list of versions to support, in hex. + /// The first is the version to attempt. + /// Adding multiple values adds versions in order of preference. + /// If the first listed version appears in the list twice, the position + /// of the second entry determines the preference order of that version. + pub quic_version: Vec, + + #[arg(long, default_value = "16")] + /// Set the `MAX_STREAMS_BIDI` limit. + pub max_streams_bidi: u64, + + #[arg(long, default_value = "16")] + /// Set the `MAX_STREAMS_UNI` limit. + pub max_streams_uni: u64, + + #[arg(long = "idle", default_value = "30")] + /// The idle timeout for connections, in seconds. + pub idle_timeout: u64, + + #[arg(long = "cc", default_value = "newreno")] + /// The congestion controller to use. + pub congestion_control: CongestionControlAlgorithm, + + #[arg(long = "pacing")] + /// Whether pacing is enabled. + pub pacing: bool, + + #[arg(name = "preferred-address-v4", long)] + /// An IPv4 address for the server preferred address. + pub preferred_address_v4: Option, + + #[arg(name = "preferred-address-v6", long)] + /// An IPv6 address for the server preferred address. + pub preferred_address_v6: Option, +} + +impl QuicParameters { + fn get_sock_addr(opt: &Option, v: &str, f: F) -> Option + where + F: FnMut(&SocketAddr) -> bool, + { + let addr = opt + .iter() + .filter_map(|spa| spa.to_socket_addrs().ok()) + .flatten() + .find(f); + assert_eq!( + opt.is_some(), + addr.is_some(), + "unable to resolve '{}' to an {} address", + opt.as_ref().unwrap(), + v, + ); + addr + } + + #[must_use] + pub fn preferred_address_v4(&self) -> Option { + Self::get_sock_addr(&self.preferred_address_v4, "IPv4", SocketAddr::is_ipv4) + } + + #[must_use] + pub fn preferred_address_v6(&self) -> Option { + Self::get_sock_addr(&self.preferred_address_v6, "IPv6", SocketAddr::is_ipv6) + } + + #[must_use] + pub fn preferred_address(&self) -> Option { + let v4 = self.preferred_address_v4(); + let v6 = self.preferred_address_v6(); + if v4.is_none() && v6.is_none() { + None + } else { + let v4 = v4.map(|v4| { + let SocketAddr::V4(v4) = v4 else { + unreachable!(); + }; + v4 + }); + let v6 = v6.map(|v6| { + let SocketAddr::V6(v6) = v6 else { + unreachable!(); + }; + v6 + }); + Some(PreferredAddress::new(v4, v6)) + } + } + + #[must_use] + pub fn get(&self, alpn: &str) -> ConnectionParameters { + let params = ConnectionParameters::default() + .max_streams(StreamType::BiDi, self.max_streams_bidi) + .max_streams(StreamType::UniDi, self.max_streams_uni) + .idle_timeout(Duration::from_secs(self.idle_timeout)) + .cc_algorithm(self.congestion_control) + .pacing(self.pacing); + + if let Some(&first) = self.quic_version.first() { + let all = if self.quic_version[1..].contains(&first) { + &self.quic_version[1..] + } else { + &self.quic_version + }; + params.versions(first, all.to_vec()) + } else { + let version = match alpn { + "h3" | "hq-interop" => Version::Version1, + "h3-29" | "hq-29" => Version::Draft29, + "h3-30" | "hq-30" => Version::Draft30, + "h3-31" | "hq-31" => Version::Draft31, + "h3-32" | "hq-32" => Version::Draft32, + _ => Version::default(), + }; + params.versions(version, Version::all()) + } + } +} + +fn from_str(s: &str) -> Result { + let v = u32::from_str_radix(s, 16) + .map_err(|_| Error::Argument("versions need to be specified in hex"))?; + Version::try_from(v).map_err(|_| Error::Argument("unknown version")) +} + +#[derive(Debug)] +pub enum Error { + Argument(&'static str), +} + +impl Display for Error { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "Error: {self:?}")?; + Ok(()) + } +} + +impl std::error::Error for Error {} diff --git a/neqo-client/Cargo.toml b/neqo-client/Cargo.toml deleted file mode 100644 index 6fa361020e..0000000000 --- a/neqo-client/Cargo.toml +++ /dev/null @@ -1,28 +0,0 @@ -[package] -name = "neqo-client" -description = "A basic QUIC HTTP/0.9 and HTTP/3 client." -authors.workspace = true -homepage.workspace = true -repository.workspace = true -version.workspace = true -edition.workspace = true -rust-version.workspace = true -license.workspace = true - -[lints] -workspace = true - -[dependencies] -# neqo-client is not used in Firefox, so we can be liberal with dependency versions -clap = { version = "4.4", default-features = false, features = ["std", "color", "help", "usage", "error-context", "suggestions", "derive"] } -futures = { version = "0.3", default-features = false } -hex = { version = "0.4", default-features = false, features = ["std"] } -log = { version = "0.4", default-features = false } -neqo-common = { path = "./../neqo-common", features = ["udp"] } -neqo-crypto = { path = "./../neqo-crypto" } -neqo-http3 = { path = "./../neqo-http3" } -neqo-qpack = { path = "./../neqo-qpack" } -neqo-transport = { path = "./../neqo-transport" } -qlog = { version = "0.12", default-features = false } -tokio = { version = "1", default-features = false, features = ["net", "time", "macros", "rt", "rt-multi-thread"] } -url = { version = "2.5", default-features = false } From c551f49948b678a0fbf34d25cb2ce2004a70cc3d Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Mon, 11 Mar 2024 10:36:21 +0200 Subject: [PATCH 216/321] ci: Post benchmark results to PR as comment (#1723) * ci: Post benchmark results to PR as comment * Directory does not exist on first run * Increase permissions * Comment from check.yml * Retry * No upload * Remove statement * Again * And again * Proper Markdown formatting * Add link to artifacts * Use hg since git is now failing :-( * Update bench.yml * Retry * Multiline * permissions * Rollback * Permissions --------- Signed-off-by: Lars Eggert --- .github/workflows/bench.yml | 71 +++++++++++++++++++------------------ .github/workflows/check.yml | 2 ++ 2 files changed, 39 insertions(+), 34 deletions(-) diff --git a/.github/workflows/bench.yml b/.github/workflows/bench.yml index 57ad77d722..176a80deac 100644 --- a/.github/workflows/bench.yml +++ b/.github/workflows/bench.yml @@ -15,6 +15,8 @@ jobs: bench: name: Benchmark runs-on: self-hosted + permissions: + pull-requests: write defaults: run: shell: bash @@ -40,7 +42,7 @@ jobs: cargo +$TOOLCHAIN bench --features bench --no-run cargo +$TOOLCHAIN build --release --bin neqo-client --bin neqo-server - - name: Download criterion results + - name: Download cached main-branch results id: criterion-cache uses: actions/cache/restore@v4 with: @@ -48,14 +50,6 @@ jobs: key: criterion-${{ runner.os }}-${{ hashFiles('./target/criterion/**.json') }} restore-keys: criterion-${{ runner.os }}- - # - name: Download github-action-benchmark results - # id: benchmark-cache - # uses: actions/cache/restore@v4 - # with: - # path: ./cache - # key: action-benchmark-${{ runner.os}}-${{ hashFiles('./cache/**.TODO') }} - # restore-keys: action-benchmark-${{ runner.os }}- - # Disable turboboost, hyperthreading and use performance governor. - name: Prepare machine run: sudo /root/bin/prep.sh @@ -64,15 +58,22 @@ jobs: - name: Run cargo bench run: | taskset -c 0 nice -n -20 \ - cargo +$TOOLCHAIN bench --features bench | tee output.txt + cargo +$TOOLCHAIN bench --features bench | tee results.txt # Pin the transfer benchmark to core 0 and run it at elevated priority inside perf. # Work around https://github.com/flamegraph-rs/flamegraph/issues/248 by passing explicit perf arguments. - name: Profile cargo bench transfer run: | + # This re-runs part of the previous step, and would hence overwrite part of the criterion results. + # Avoid that by shuffling the directories around so this run uses its own results directory. + mv target/criterion target/criterion-bench + mv target/criterion-transfer-profile target/criterion || true taskset -c 0 nice -n -20 \ cargo +$TOOLCHAIN flamegraph -v -c "$PERF_CMD" --features bench --bench transfer -- \ --bench --exact "Run multiple transfers with varying seeds" + # And now restore the directories. + mv target/criterion target/criterion-transfer-profile + mv target/criterion-bench target/criterion - name: Profile client/server transfer run: | @@ -98,19 +99,6 @@ jobs: run: sudo /root/bin/unprep.sh if: success() || failure() || cancelled() - # TODO: Wait for this action to be allowlisted. And then figure out how to only upload - # benchmark data when the main branch is being updated (e.g., if: ${{ github.ref == "refs/heads/main" }}) - # - name: Store current benchmark results - # uses: benchmark-action/github-action-benchmark@v1 - # with: - # tool: 'cargo' - # output-file-path: output.txt - # external-data-json-path: ./cache/benchmark-data.json - # fail-on-alert: true - # github-token: ${{ secrets.GITHUB_TOKEN }} - # comment-on-alert: true - # summary-always: true - - name: Convert for profiler.firefox.com run: | perf script -i perf.data -F +pid > transfer.perf & @@ -122,27 +110,42 @@ jobs: mv server/flamegraph.svg server.svg rm neqo.svg - - name: Upload criterion results + - name: Cache main-branch results if: github.ref == 'refs/heads/main' uses: actions/cache/save@v4 with: path: ./target/criterion key: ${{ steps.criterion-cache.outputs.cache-primary-key }} - # - name: Upload github-action-benchmark results - # if: github.ref == 'refs/heads/main' - # uses: actions/cache/save@v4 - # with: - # path: ./cache - # key: ${{ steps.benchmark-cache.outputs.cache-primary-key }} - - - name: Archive perf data + - name: Export perf data + id: export uses: actions/upload-artifact@v4 with: name: ${{ github.event.repository.name }}-${{ github.sha }} path: | *.svg *.perf - output.txt - target/criterion + results.* + target/criterion* compression-level: 9 + + - name: Format results as Markdown + id: results + run: | + grep -Ev 'ignored|running \d+ tests|%\)' results.txt |\ + sed -E -e 's/(Performance has regressed.)/:broken_heart: **\1**/gi' \ + -e 's/(Performance has improved.)/:green_heart: **\1**/gi' \ + -e 's/^ +/ * /gi' \ + -e 's/^([a-z0-9].*)$/**\1**/gi' \ + -e 's/(change:[^%]*%)([^%]*%)(.*)/\1**\2**\3/gi' \ + > results.md + echo '' >> results.md + echo '[:arrow_down: Download full results](${{ steps.export.outputs.artifact-url }})' >> results.md + + - name: "Post results to PR" + uses: thollander/actions-comment-pull-request@v2 + with: + filePath: results.md + pr_number: ${{ github.event.pull_request.number }} + comment_tag: bench-results + diff --git a/.github/workflows/check.yml b/.github/workflows/check.yml index 2a110e22b1..e96466e2e1 100644 --- a/.github/workflows/check.yml +++ b/.github/workflows/check.yml @@ -156,4 +156,6 @@ jobs: bench: name: "Benchmark" needs: [check] + permissions: + pull-requests: write uses: ./.github/workflows/bench.yml From 3ed8946e7acf1b034b2e7cbac0299ab7ff4e5376 Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Mon, 11 Mar 2024 16:08:24 +0200 Subject: [PATCH 217/321] ci: Export perf reports from benchmarks (#1731) In addition to flamegraphs and profiler.firefox.com data. --- .github/workflows/bench.yml | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/.github/workflows/bench.yml b/.github/workflows/bench.yml index 176a80deac..ddaa606c20 100644 --- a/.github/workflows/bench.yml +++ b/.github/workflows/bench.yml @@ -110,6 +110,13 @@ jobs: mv server/flamegraph.svg server.svg rm neqo.svg + - name: Generate perf reports + run: | + perf report -i perf.data --no-children --stdio > transfer.perf.txt & + perf report -i client/perf.data --no-children --stdio > client.perf.txt & + perf report -i server/perf.data --no-children --stdio > server.perf.txt & + wait + - name: Cache main-branch results if: github.ref == 'refs/heads/main' uses: actions/cache/save@v4 @@ -125,6 +132,7 @@ jobs: path: | *.svg *.perf + *.txt results.* target/criterion* compression-level: 9 From cbd444187aa032ea373decbeec4f2fc0f97a4247 Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Mon, 11 Mar 2024 16:10:48 +0200 Subject: [PATCH 218/321] ci: Run `actionlint` on changes to `.github` (#1733) --- .github/actionlint-matcher.json | 17 +++++++++++++++++ .github/workflows/actionlint.yml | 26 ++++++++++++++++++++++++++ 2 files changed, 43 insertions(+) create mode 100644 .github/actionlint-matcher.json create mode 100644 .github/workflows/actionlint.yml diff --git a/.github/actionlint-matcher.json b/.github/actionlint-matcher.json new file mode 100644 index 0000000000..4613e1617b --- /dev/null +++ b/.github/actionlint-matcher.json @@ -0,0 +1,17 @@ +{ + "problemMatcher": [ + { + "owner": "actionlint", + "pattern": [ + { + "regexp": "^(?:\\x1b\\[\\d+m)?(.+?)(?:\\x1b\\[\\d+m)*:(?:\\x1b\\[\\d+m)*(\\d+)(?:\\x1b\\[\\d+m)*:(?:\\x1b\\[\\d+m)*(\\d+)(?:\\x1b\\[\\d+m)*: (?:\\x1b\\[\\d+m)*(.+?)(?:\\x1b\\[\\d+m)* \\[(.+?)\\]$", + "file": 1, + "line": 2, + "column": 3, + "message": 4, + "code": 5 + } + ] + } + ] +} diff --git a/.github/workflows/actionlint.yml b/.github/workflows/actionlint.yml new file mode 100644 index 0000000000..f11b1b1222 --- /dev/null +++ b/.github/workflows/actionlint.yml @@ -0,0 +1,26 @@ +name: Lint GitHub Actions workflows +on: + push: + branches: ["main"] + paths: [".github"] + pull_request: + branches: ["main"] + paths: [".github"] + merge_group: + +jobs: + actionlint: + runs-on: ubuntu-latest + defaults: + run: + shell: bash + steps: + - uses: actions/checkout@v4 + - name: Download actionlint + id: get_actionlint + run: bash <(curl https://raw.githubusercontent.com/rhysd/actionlint/main/scripts/download-actionlint.bash) + - name: Check workflow files + run: | + echo "::add-matcher::.github/actionlint-matcher.json" + ${{ steps.get_actionlint.outputs.executable }} -color + From b4fd4133e6e6b112ec3c4a755d098b084553c74d Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Mon, 11 Mar 2024 16:38:17 +0200 Subject: [PATCH 219/321] Fix path pattern Signed-off-by: Lars Eggert --- .github/workflows/actionlint.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/actionlint.yml b/.github/workflows/actionlint.yml index f11b1b1222..1ddcfb4f09 100644 --- a/.github/workflows/actionlint.yml +++ b/.github/workflows/actionlint.yml @@ -2,10 +2,10 @@ name: Lint GitHub Actions workflows on: push: branches: ["main"] - paths: [".github"] + paths: [".github/**"] pull_request: branches: ["main"] - paths: [".github"] + paths: [".github/**"] merge_group: jobs: From 532dcc5fb65756efc43e2cf4b79386e58560d4ae Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Mon, 11 Mar 2024 18:17:20 +0200 Subject: [PATCH 220/321] ci: Safe commenting on PRs (#1729) * ci: Safe commenting on PRs This generalizes and refactors what the QNS workflow did. * Test * Add missing file * Finalize * Fixes * While I'm here, make `actionlint` happy * Fix * Can't quote BUILD_TYPE * Can't quote CONFIG_PATH * Condition is already in called action * Proper quoting for backslashes * Windows again * Finalize --- .../actions/pr-comment-data-export/action.yml | 37 +++++++++++++++++++ .github/actions/pr-comment/action.yml | 29 +++++++++++++++ .../actions/quic-interop-runner/action.yml | 31 +++++++--------- .github/workflows/bench-comment.yml | 24 ++++++++++++ .github/workflows/bench.yml | 35 +++++++++--------- .github/workflows/check.yml | 33 +++++++++-------- .github/workflows/qns-comment.yml | 36 ++---------------- 7 files changed, 142 insertions(+), 83 deletions(-) create mode 100644 .github/actions/pr-comment-data-export/action.yml create mode 100644 .github/actions/pr-comment/action.yml create mode 100644 .github/workflows/bench-comment.yml diff --git a/.github/actions/pr-comment-data-export/action.yml b/.github/actions/pr-comment-data-export/action.yml new file mode 100644 index 0000000000..8a8cc50232 --- /dev/null +++ b/.github/actions/pr-comment-data-export/action.yml @@ -0,0 +1,37 @@ +name: 'Export data for PR comment' +description: 'Exports the neccessary data to post a PR comment securely.' + +# This action might be running off of a fork and would thus not have write +# permissions on the origin repository. In order to allow a separate +# priviledged action to post a comment on a pull request, upload the +# necessary metadata. + +inputs: + name: + description: 'A unique name for the artifact used for exporting.' + required: true + contents: + description: 'A filename with a comment (in Markdown) to be added to the PR.' + required: true + log-url: + description: 'A URL to a log to be linked from the PR comment.' + required: false + +runs: + using: composite + steps: + - if: github.event_name == 'pull_request' + shell: bash + run: | + mkdir comment-data + cp "${{ inputs.contents }}" comment-data/contents + echo "${{ inputs.name }}" > comment-data/name + echo "${{ inputs.log-url }}" > comment-data/log-url + echo "${{ github.event.number }}" > comment-data/pr-number + + - if: github.event_name == 'pull_request' + uses: actions/upload-artifact@v4 + with: + name: ${{ inputs.name }} + path: comment-data + retention-days: 1 diff --git a/.github/actions/pr-comment/action.yml b/.github/actions/pr-comment/action.yml new file mode 100644 index 0000000000..ff46d40310 --- /dev/null +++ b/.github/actions/pr-comment/action.yml @@ -0,0 +1,29 @@ +name: 'Comment on PR' +description: 'Post a PR comment securely.' + +inputs: + name: + description: 'Artifact name to import comment data from.' + required: true + +runs: + using: composite + steps: + - uses: actions/download-artifact@v4 + with: + run-id: ${{ github.event.workflow_run.id }} + name: ${{ inputs.name }} + + - id: pr-number + shell: bash + run: echo "number=$(cat pr-number)" >> "$GITHUB_OUTPUT" + + - shell: bash + run: | + [ -s log-url ] && echo "" >> contents && echo "[:arrow_down: Download logs]($(cat log-url))" >> contents + + - uses: thollander/actions-comment-pull-request@v2 + with: + filePath: contents + pr_number: ${{ steps.pr-number.outputs.number }} + comment_tag: ${{ inputs.name }}-comment diff --git a/.github/actions/quic-interop-runner/action.yml b/.github/actions/quic-interop-runner/action.yml index 6e79b97cfe..4c2f695ab4 100644 --- a/.github/actions/quic-interop-runner/action.yml +++ b/.github/actions/quic-interop-runner/action.yml @@ -88,24 +88,19 @@ runs: name: logs path: quic-interop-runner/logs - # This action might be running off of a fork and would thus not have write - # permissions on the origin repository. In order to allow a separate - # priviledged action to post a comment on a pull request, upload the - # necessary metadata. - - name: store comment-data - shell: bash - if: github.event_name == 'pull_request' - env: - PULL_REQUEST_NUMBER: ${{ github.event.number }} + - name: Format GitHub comment run: | - mkdir comment-data - mv quic-interop-runner/summary comment-data/summary - echo $PULL_REQUEST_NUMBER > comment-data/pr-number - echo '${{ steps.artifact-upload-step.outputs.artifact-url }}' > comment-data/logs-url + echo '[**QUIC Interop Runner**](https://github.com/quic-interop/quic-interop-runner)' >> comment + echo '' >> comment + echo '```' >> comment + cat quic-interop-runner/summary >> comment + echo '```' >> comment + echo '' >> comment + shell: bash - - name: Upload comment data - uses: actions/upload-artifact@v4 - if: github.event_name == 'pull_request' + - name: Export PR comment data + uses: ./.github/actions/pr-comment-data-export with: - name: comment-data - path: ./comment-data + name: qns + contents: comment + log-url: ${{ steps.artifact-upload-step.outputs.artifact-url }} diff --git a/.github/workflows/bench-comment.yml b/.github/workflows/bench-comment.yml new file mode 100644 index 0000000000..4eff9ca60a --- /dev/null +++ b/.github/workflows/bench-comment.yml @@ -0,0 +1,24 @@ +# Post test results as pull request comment. +# +# This is done as a separate workflow as it requires write permissions. The +# tests itself might run off of a fork, i.e., an untrusted environment and should +# thus not be granted write permissions. + +name: Benchmark Comment + +on: + workflow_run: + workflows: ["Bench"] + types: + - completed + +jobs: + comment: + permissions: + pull-requests: write + runs-on: ubuntu-latest + if: github.event.workflow_run.event == 'pull_request' + steps: + - uses: ./.github/actions/pr-comment + with: + name: bench diff --git a/.github/workflows/bench.yml b/.github/workflows/bench.yml index ddaa606c20..f19011afe3 100644 --- a/.github/workflows/bench.yml +++ b/.github/workflows/bench.yml @@ -1,6 +1,9 @@ name: Bench on: - workflow_call: + workflow_run: + workflows: ["CI"] + types: + - completed workflow_dispatch: env: CARGO_PROFILE_BENCH_BUILD_OVERRIDE_DEBUG: true @@ -8,7 +11,7 @@ env: CARGO_TERM_COLOR: always RUST_BACKTRACE: 1 TOOLCHAIN: nightly - RUSTFLAGS: -C link-arg=-fuse-ld=lld -C link-arg=-Wl,--no-rosegment, -Cforce-frame-pointers=yes + RUSTFLAGS: -C link-arg=-fuse-ld=lld -C link-arg=-Wl,--no-rosegment, -C force-frame-pointers=yes PERF_CMD: record -o perf.data -F997 --call-graph fp -g jobs: @@ -39,8 +42,8 @@ jobs: - name: Build run: | - cargo +$TOOLCHAIN bench --features bench --no-run - cargo +$TOOLCHAIN build --release --bin neqo-client --bin neqo-server + cargo "+$TOOLCHAIN" bench --features bench --no-run + cargo "+$TOOLCHAIN" build --release --bin neqo-client --bin neqo-server - name: Download cached main-branch results id: criterion-cache @@ -58,7 +61,7 @@ jobs: - name: Run cargo bench run: | taskset -c 0 nice -n -20 \ - cargo +$TOOLCHAIN bench --features bench | tee results.txt + cargo "+$TOOLCHAIN" bench --features bench | tee results.txt # Pin the transfer benchmark to core 0 and run it at elevated priority inside perf. # Work around https://github.com/flamegraph-rs/flamegraph/issues/248 by passing explicit perf arguments. @@ -69,7 +72,7 @@ jobs: mv target/criterion target/criterion-bench mv target/criterion-transfer-profile target/criterion || true taskset -c 0 nice -n -20 \ - cargo +$TOOLCHAIN flamegraph -v -c "$PERF_CMD" --features bench --bench transfer -- \ + cargo "+$TOOLCHAIN" flamegraph -v -c "$PERF_CMD" --features bench --bench transfer -- \ --bench --exact "Run multiple transfers with varying seeds" # And now restore the directories. mv target/criterion target/criterion-transfer-profile @@ -80,13 +83,13 @@ jobs: { mkdir server; \ cd server; \ taskset -c 0 nice -n -20 \ - cargo +$TOOLCHAIN flamegraph -v -c "$PERF_CMD" \ - --bin neqo-server -- --db ../test-fixture/db $HOST:4433 || true; } & + cargo "+$TOOLCHAIN" flamegraph -v -c "$PERF_CMD" \ + --bin neqo-server -- --db ../test-fixture/db "$HOST:4433" || true; } & mkdir client; \ cd client; \ time taskset -c 1 nice -n -20 \ - cargo +$TOOLCHAIN flamegraph -v -c "$PERF_CMD" \ - --bin neqo-client -- --output-dir . https://$HOST:4433/$SIZE + cargo "+$TOOLCHAIN" flamegraph -v -c "$PERF_CMD" \ + --bin neqo-client -- --output-dir . "https://$HOST:4433/$SIZE" killall -INT neqo-server cd ${{ github.workspace }} [ "$(wc -c < client/"$SIZE")" -eq "$SIZE" ] || exit 1 @@ -148,12 +151,10 @@ jobs: -e 's/(change:[^%]*%)([^%]*%)(.*)/\1**\2**\3/gi' \ > results.md echo '' >> results.md - echo '[:arrow_down: Download full results](${{ steps.export.outputs.artifact-url }})' >> results.md - - name: "Post results to PR" - uses: thollander/actions-comment-pull-request@v2 + - name: Export PR comment data + uses: ./.github/actions/pr-comment-data-export with: - filePath: results.md - pr_number: ${{ github.event.pull_request.number }} - comment_tag: bench-results - + name: bench + contents: results.md + log-url: ${{ steps.export.outputs.artifact-url }} diff --git a/.github/workflows/check.yml b/.github/workflows/check.yml index e96466e2e1..48b0b15cc2 100644 --- a/.github/workflows/check.yml +++ b/.github/workflows/check.yml @@ -72,8 +72,11 @@ jobs: - name: Use MSYS2 environment and install more dependencies (Windows) if: runner.os == 'Windows' run: | - echo "C:\\msys64\\usr\\bin" >> "$GITHUB_PATH" - echo "C:\\msys64\\mingw64\\bin" >> "$GITHUB_PATH" + # shellcheck disable=SC2028 + { + echo "C:\\msys64\\usr\\bin" + echo "C:\\msys64\\mingw64\\bin" + } >> "$GITHUB_PATH" /c/msys64/usr/bin/pacman -S --noconfirm nsinstall python3 -m pip install git+https://github.com/nodejs/gyp-next echo "$(python3 -m site --user-base)/bin" >> "$GITHUB_PATH" @@ -85,9 +88,11 @@ jobs: - name: Set up NSS/NSPR build environment (Windows) if: runner.os == 'Windows' run: | - echo "GYP_MSVS_OVERRIDE_PATH=$VSINSTALLDIR" >> "$GITHUB_ENV" - echo "GYP_MSVS_VERSION=2022" >> "$GITHUB_ENV" - echo "BASH=$SHELL" >> "$GITHUB_ENV" + { + echo "GYP_MSVS_OVERRIDE_PATH=$VSINSTALLDIR" + echo "GYP_MSVS_VERSION=2022" + echo "BASH=$SHELL" + } >> "$GITHUB_ENV" # See https://github.com/ilammy/msvc-dev-cmd#name-conflicts-with-shell-bash rm /usr/bin/link.exe @@ -101,19 +106,23 @@ jobs: uses: ./.github/actions/nss - name: Build - run: cargo +${{ matrix.rust-toolchain }} build $BUILD_TYPE --all-targets --features ci + run: | + # shellcheck disable=SC2086 + cargo +${{ matrix.rust-toolchain }} build $BUILD_TYPE --all-targets --features ci - name: Run tests and determine coverage run: | + # shellcheck disable=SC2086 cargo +${{ matrix.rust-toolchain }} llvm-cov nextest $BUILD_TYPE --all-targets --features ci --no-fail-fast --lcov --output-path lcov.info cargo +${{ matrix.rust-toolchain }} bench --features bench --no-run - name: Run client/server transfer run: | + # shellcheck disable=SC2086 cargo +${{ matrix.rust-toolchain }} build $BUILD_TYPE --bin neqo-client --bin neqo-server - target/$BUILD_DIR/neqo-server $HOST:4433 & + "target/$BUILD_DIR/neqo-server" "$HOST:4433" & PID=$! - target/$BUILD_DIR/neqo-client --output-dir . https://$HOST:4433/$SIZE + "target/$BUILD_DIR/neqo-client" --output-dir . "https://$HOST:4433/$SIZE" kill $PID [ "$(wc -c <"$SIZE")" -eq "$SIZE" ] || exit 1 env: @@ -127,6 +136,7 @@ jobs: if [ "${{ matrix.rust-toolchain }}" != "nightly" ]; then CONFIG_PATH="--config-path=$(mktemp)" fi + # shellcheck disable=SC2086 cargo +${{ matrix.rust-toolchain }} fmt --all -- --check $CONFIG_PATH if: success() || failure() @@ -152,10 +162,3 @@ jobs: fail_ci_if_error: false token: ${{ secrets.CODECOV_TOKEN }} if: matrix.type == 'debug' && matrix.rust-toolchain == 'stable' - - bench: - name: "Benchmark" - needs: [check] - permissions: - pull-requests: write - uses: ./.github/workflows/bench.yml diff --git a/.github/workflows/qns-comment.yml b/.github/workflows/qns-comment.yml index 8b897b259a..57a9a151cd 100644 --- a/.github/workflows/qns-comment.yml +++ b/.github/workflows/qns-comment.yml @@ -1,7 +1,7 @@ # Post test results as pull request comment. # # This is done as a separate workflow as it requires write permissions. The -# tests itself might run off of a fork, i.e. an untrusted environment and should +# tests itself might run off of a fork, i.e., an untrusted environment and should # thus not be granted write permissions. name: QUIC Network Simulator Comment @@ -21,36 +21,6 @@ jobs: github.event.workflow_run.event == 'pull_request' && github.event.workflow_run.conclusion == 'failure' steps: - - name: Download comment-data - uses: actions/download-artifact@v4 + - uses: ./.github/actions/pr-comment with: - run-id: ${{ github.event.workflow_run.id }} - name: comment-data - github-token: ${{ secrets.GITHUB_TOKEN }} - - - name: Format GitHub comment - run: | - pwd - ls -la - echo '[**QUIC Interop Runner**](https://github.com/quic-interop/quic-interop-runner)' >> comment - echo '' >> comment - echo '```' >> comment - cat summary >> comment - echo '```' >> comment - echo '' >> comment - echo 'Download artifacts [here](' >> comment - cat logs-url >> comment - echo ').' >> comment - shell: bash - - - name: Read PR Number - id: pr-number - run: echo "::set-output name=number::$(cat pr-number)" - shell: bash - - - name: Comment PR - uses: thollander/actions-comment-pull-request@v2 - with: - filePath: comment - pr_number: ${{ steps.pr-number.outputs.number }} - comment_tag: quic-network-simulator-comment + name: qns From 3ce95813911c1e1b8f9077aaa618a0717e02b7ae Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Mon, 11 Mar 2024 19:09:05 +0200 Subject: [PATCH 221/321] Use `GITHUB_TOKEN` --- .github/actions/pr-comment/action.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/actions/pr-comment/action.yml b/.github/actions/pr-comment/action.yml index ff46d40310..9ff0509b9e 100644 --- a/.github/actions/pr-comment/action.yml +++ b/.github/actions/pr-comment/action.yml @@ -13,6 +13,7 @@ runs: with: run-id: ${{ github.event.workflow_run.id }} name: ${{ inputs.name }} + github-token: ${{ secrets.GITHUB_TOKEN }} - id: pr-number shell: bash From 869afeaad0d1fe70c1ed267836f21189132ef04c Mon Sep 17 00:00:00 2001 From: Valentin Gosu <1454649+valenting@users.noreply.github.com> Date: Mon, 11 Mar 2024 18:10:15 +0100 Subject: [PATCH 222/321] Priority headers should be set by the application (#1725) Co-authored-by: Lars Eggert --- neqo-http3/src/connection.rs | 3 --- neqo-http3/tests/priority.rs | 4 ++-- 2 files changed, 2 insertions(+), 5 deletions(-) diff --git a/neqo-http3/src/connection.rs b/neqo-http3/src/connection.rs index bb2b5a6ce0..287ea2c2af 100644 --- a/neqo-http3/src/connection.rs +++ b/neqo-http3/src/connection.rs @@ -835,9 +835,6 @@ impl Http3Connection { final_headers.push(Header::new(":protocol", conn_type.string())); } - if let Some(priority_header) = request.priority.header() { - final_headers.push(priority_header); - } final_headers.extend_from_slice(request.headers); Ok(final_headers) } diff --git a/neqo-http3/tests/priority.rs b/neqo-http3/tests/priority.rs index d342082d56..77d19e6fcf 100644 --- a/neqo-http3/tests/priority.rs +++ b/neqo-http3/tests/priority.rs @@ -68,7 +68,7 @@ fn priority_update() { Instant::now(), "GET", &("https", "something.com", "/"), - &[], + &[Header::new("priority", "u=4,i")], Priority::new(4, true), ) .unwrap(); @@ -129,7 +129,7 @@ fn priority_update_dont_send_for_cancelled_stream() { Instant::now(), "GET", &("https", "something.com", "/"), - &[], + &[Header::new("priority", "u=5")], Priority::new(5, false), ) .unwrap(); From a8ee7e11cd30b0cfa19915e30f29ad32063f2566 Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Mon, 11 Mar 2024 19:39:32 +0200 Subject: [PATCH 223/321] Debug --- .github/actions/pr-comment-data-export/action.yml | 2 ++ .github/workflows/bench-comment.yml | 3 ++- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/.github/actions/pr-comment-data-export/action.yml b/.github/actions/pr-comment-data-export/action.yml index 8a8cc50232..642e41b4e3 100644 --- a/.github/actions/pr-comment-data-export/action.yml +++ b/.github/actions/pr-comment-data-export/action.yml @@ -20,6 +20,8 @@ inputs: runs: using: composite steps: + - run: echo "${{ github.event_name }}" + shell: bash - if: github.event_name == 'pull_request' shell: bash run: | diff --git a/.github/workflows/bench-comment.yml b/.github/workflows/bench-comment.yml index 4eff9ca60a..3f8ba16de7 100644 --- a/.github/workflows/bench-comment.yml +++ b/.github/workflows/bench-comment.yml @@ -17,8 +17,9 @@ jobs: permissions: pull-requests: write runs-on: ubuntu-latest - if: github.event.workflow_run.event == 'pull_request' + # if: github.event.workflow_run.event == 'pull_request' steps: + - run: echo ${{ github.event.workflow_run.event }} - uses: ./.github/actions/pr-comment with: name: bench From a103cde57210d76b1241387a9b1716ed7d1f75ae Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Mon, 11 Mar 2024 19:47:29 +0200 Subject: [PATCH 224/321] Fix for `.github/workflows/bench-comment.yml` --- .github/workflows/bench-comment.yml | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/.github/workflows/bench-comment.yml b/.github/workflows/bench-comment.yml index 3f8ba16de7..10f8eda131 100644 --- a/.github/workflows/bench-comment.yml +++ b/.github/workflows/bench-comment.yml @@ -17,8 +17,11 @@ jobs: permissions: pull-requests: write runs-on: ubuntu-latest - # if: github.event.workflow_run.event == 'pull_request' + if: > + github.event.workflow_run.event == 'pull_request' || + github.event.workflow_run.event == 'workflow_run' steps: + - uses: actions/checkout@v4 - run: echo ${{ github.event.workflow_run.event }} - uses: ./.github/actions/pr-comment with: From 15e615c13f6506736ebe10a087686b8139b8415d Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Mon, 11 Mar 2024 19:55:12 +0200 Subject: [PATCH 225/321] Remove token --- .github/actions/pr-comment/action.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/.github/actions/pr-comment/action.yml b/.github/actions/pr-comment/action.yml index 9ff0509b9e..ff46d40310 100644 --- a/.github/actions/pr-comment/action.yml +++ b/.github/actions/pr-comment/action.yml @@ -13,7 +13,6 @@ runs: with: run-id: ${{ github.event.workflow_run.id }} name: ${{ inputs.name }} - github-token: ${{ secrets.GITHUB_TOKEN }} - id: pr-number shell: bash From e76645f2094365f4228b8c3b6ed8cf8694cf8849 Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Mon, 11 Mar 2024 20:09:16 +0200 Subject: [PATCH 226/321] Also run on `workflow_run` --- .github/actions/pr-comment-data-export/action.yml | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/.github/actions/pr-comment-data-export/action.yml b/.github/actions/pr-comment-data-export/action.yml index 642e41b4e3..dee56f0035 100644 --- a/.github/actions/pr-comment-data-export/action.yml +++ b/.github/actions/pr-comment-data-export/action.yml @@ -20,9 +20,9 @@ inputs: runs: using: composite steps: - - run: echo "${{ github.event_name }}" - shell: bash - - if: github.event_name == 'pull_request' + - if: > + github.event.workflow_run.event == 'pull_request' || + github.event.workflow_run.event == 'workflow_run' shell: bash run: | mkdir comment-data @@ -31,7 +31,9 @@ runs: echo "${{ inputs.log-url }}" > comment-data/log-url echo "${{ github.event.number }}" > comment-data/pr-number - - if: github.event_name == 'pull_request' + - if: > + github.event.workflow_run.event == 'pull_request' || + github.event.workflow_run.event == 'workflow_run' uses: actions/upload-artifact@v4 with: name: ${{ inputs.name }} From d48fbed733763deda5eb06099c5ae78b90f81937 Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Tue, 12 Mar 2024 08:49:43 +0200 Subject: [PATCH 227/321] fix: Disable generation of criterion HTML graphs (#1728) * fix: Disable generation of criterion HTML graphs Because we pin the benchmark runs to single cores, and criterion hence runs the report generation also on those cores, and based on `top` output it appears as this is parallelized and hence may interfere with the benchmark runs. * Try `--noplot` (and patch things so that actually works) * Missed one * Don't post comment, since this is being refactored * Fix merge --------- Signed-off-by: Lars Eggert --- .github/workflows/bench.yml | 4 ++-- neqo-bin/Cargo.toml | 6 ++++++ neqo-common/Cargo.toml | 4 ++++ neqo-crypto/Cargo.toml | 4 ++++ neqo-http3/Cargo.toml | 4 ++++ neqo-qpack/Cargo.toml | 4 ++++ neqo-transport/Cargo.toml | 4 ++++ test-fixture/Cargo.toml | 4 ++++ 8 files changed, 32 insertions(+), 2 deletions(-) diff --git a/.github/workflows/bench.yml b/.github/workflows/bench.yml index f19011afe3..e950671f58 100644 --- a/.github/workflows/bench.yml +++ b/.github/workflows/bench.yml @@ -61,7 +61,7 @@ jobs: - name: Run cargo bench run: | taskset -c 0 nice -n -20 \ - cargo "+$TOOLCHAIN" bench --features bench | tee results.txt + cargo "+$TOOLCHAIN" bench --features bench -- --noplot | tee results.txt # Pin the transfer benchmark to core 0 and run it at elevated priority inside perf. # Work around https://github.com/flamegraph-rs/flamegraph/issues/248 by passing explicit perf arguments. @@ -73,7 +73,7 @@ jobs: mv target/criterion-transfer-profile target/criterion || true taskset -c 0 nice -n -20 \ cargo "+$TOOLCHAIN" flamegraph -v -c "$PERF_CMD" --features bench --bench transfer -- \ - --bench --exact "Run multiple transfers with varying seeds" + --bench --exact "Run multiple transfers with varying seeds" --noplot # And now restore the directories. mv target/criterion target/criterion-transfer-profile mv target/criterion-bench target/criterion diff --git a/neqo-bin/Cargo.toml b/neqo-bin/Cargo.toml index 8b7b48ab86..ae8801eb3a 100644 --- a/neqo-bin/Cargo.toml +++ b/neqo-bin/Cargo.toml @@ -12,10 +12,12 @@ license.workspace = true [[bin]] name = "neqo-client" path = "src/bin/client.rs" +bench = false [[bin]] name = "neqo-server" path = "src/bin/server/main.rs" +bench = false [lints] workspace = true @@ -35,3 +37,7 @@ qlog = { version = "0.12", default-features = false } regex = { version = "1.9", default-features = false, features = ["unicode-perl"] } tokio = { version = "1", default-features = false, features = ["net", "time", "macros", "rt", "rt-multi-thread"] } url = { version = "2.5", default-features = false } + +[lib] +# See https://github.com/bheisler/criterion.rs/blob/master/book/src/faq.md#cargo-bench-gives-unrecognized-option-errors-for-valid-command-line-options +bench = false diff --git a/neqo-common/Cargo.toml b/neqo-common/Cargo.toml index 5d4d3d0d26..dae8362bfd 100644 --- a/neqo-common/Cargo.toml +++ b/neqo-common/Cargo.toml @@ -32,3 +32,7 @@ udp = ["dep:quinn-udp", "dep:tokio"] [target."cfg(windows)".dependencies.winapi] version = "0.3" features = ["timeapi"] + +[lib] +# See https://github.com/bheisler/criterion.rs/blob/master/book/src/faq.md#cargo-bench-gives-unrecognized-option-errors-for-valid-command-line-options +bench = false diff --git a/neqo-crypto/Cargo.toml b/neqo-crypto/Cargo.toml index 26ec5ce067..588d084741 100644 --- a/neqo-crypto/Cargo.toml +++ b/neqo-crypto/Cargo.toml @@ -31,3 +31,7 @@ test-fixture = { path = "../test-fixture" } [features] gecko = ["mozbuild"] fuzzing = [] + +[lib] +# See https://github.com/bheisler/criterion.rs/blob/master/book/src/faq.md#cargo-bench-gives-unrecognized-option-errors-for-valid-command-line-options +bench = false diff --git a/neqo-http3/Cargo.toml b/neqo-http3/Cargo.toml index adb137ea15..32e3ae7e35 100644 --- a/neqo-http3/Cargo.toml +++ b/neqo-http3/Cargo.toml @@ -29,3 +29,7 @@ test-fixture = { path = "../test-fixture" } [features] fuzzing = ["neqo-transport/fuzzing", "neqo-crypto/fuzzing"] + +[lib] +# See https://github.com/bheisler/criterion.rs/blob/master/book/src/faq.md#cargo-bench-gives-unrecognized-option-errors-for-valid-command-line-options +bench = false diff --git a/neqo-qpack/Cargo.toml b/neqo-qpack/Cargo.toml index 0138746cfa..c3e2ab8a66 100644 --- a/neqo-qpack/Cargo.toml +++ b/neqo-qpack/Cargo.toml @@ -22,3 +22,7 @@ static_assertions = { version = "1.1", default-features = false } [dev-dependencies] test-fixture = { path = "../test-fixture" } + +[lib] +# See https://github.com/bheisler/criterion.rs/blob/master/book/src/faq.md#cargo-bench-gives-unrecognized-option-errors-for-valid-command-line-options +bench = false diff --git a/neqo-transport/Cargo.toml b/neqo-transport/Cargo.toml index 21cea4a49f..3da60bdabb 100644 --- a/neqo-transport/Cargo.toml +++ b/neqo-transport/Cargo.toml @@ -29,6 +29,10 @@ test-fixture = { path = "../test-fixture" } bench = [] fuzzing = ["neqo-crypto/fuzzing"] +[lib] +# See https://github.com/bheisler/criterion.rs/blob/master/book/src/faq.md#cargo-bench-gives-unrecognized-option-errors-for-valid-command-line-options +bench = false + [[bench]] name = "transfer" harness = false diff --git a/test-fixture/Cargo.toml b/test-fixture/Cargo.toml index cc25b7d1bd..9de2a24cce 100644 --- a/test-fixture/Cargo.toml +++ b/test-fixture/Cargo.toml @@ -23,3 +23,7 @@ qlog = { version = "0.12", default-features = false } [features] bench = [] + +[lib] +# See https://github.com/bheisler/criterion.rs/blob/master/book/src/faq.md#cargo-bench-gives-unrecognized-option-errors-for-valid-command-line-options +bench = false From 58890383147c082693cf3d6378e559cd2aadd9eb Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Tue, 12 Mar 2024 11:05:26 +0200 Subject: [PATCH 228/321] ci: Try and fix calling the `bench` workflow (#1734) * ci: Try and fix calling the `bench` workflow * Permissions * Fix condition * Fix --- .github/actions/pr-comment-data-export/action.yml | 8 ++------ .github/workflows/bench-comment.yml | 8 ++------ .github/workflows/bench.yml | 7 +------ .github/workflows/check.yml | 5 +++++ 4 files changed, 10 insertions(+), 18 deletions(-) diff --git a/.github/actions/pr-comment-data-export/action.yml b/.github/actions/pr-comment-data-export/action.yml index dee56f0035..8a8cc50232 100644 --- a/.github/actions/pr-comment-data-export/action.yml +++ b/.github/actions/pr-comment-data-export/action.yml @@ -20,9 +20,7 @@ inputs: runs: using: composite steps: - - if: > - github.event.workflow_run.event == 'pull_request' || - github.event.workflow_run.event == 'workflow_run' + - if: github.event_name == 'pull_request' shell: bash run: | mkdir comment-data @@ -31,9 +29,7 @@ runs: echo "${{ inputs.log-url }}" > comment-data/log-url echo "${{ github.event.number }}" > comment-data/pr-number - - if: > - github.event.workflow_run.event == 'pull_request' || - github.event.workflow_run.event == 'workflow_run' + - if: github.event_name == 'pull_request' uses: actions/upload-artifact@v4 with: name: ${{ inputs.name }} diff --git a/.github/workflows/bench-comment.yml b/.github/workflows/bench-comment.yml index 10f8eda131..7bf7f4bba6 100644 --- a/.github/workflows/bench-comment.yml +++ b/.github/workflows/bench-comment.yml @@ -8,7 +8,7 @@ name: Benchmark Comment on: workflow_run: - workflows: ["Bench"] + workflows: ["CI"] types: - completed @@ -17,12 +17,8 @@ jobs: permissions: pull-requests: write runs-on: ubuntu-latest - if: > - github.event.workflow_run.event == 'pull_request' || - github.event.workflow_run.event == 'workflow_run' + if: github.event.workflow_run.event == 'pull_request' steps: - - uses: actions/checkout@v4 - - run: echo ${{ github.event.workflow_run.event }} - uses: ./.github/actions/pr-comment with: name: bench diff --git a/.github/workflows/bench.yml b/.github/workflows/bench.yml index e950671f58..09941e53f5 100644 --- a/.github/workflows/bench.yml +++ b/.github/workflows/bench.yml @@ -1,9 +1,6 @@ name: Bench on: - workflow_run: - workflows: ["CI"] - types: - - completed + workflow_call: workflow_dispatch: env: CARGO_PROFILE_BENCH_BUILD_OVERRIDE_DEBUG: true @@ -18,8 +15,6 @@ jobs: bench: name: Benchmark runs-on: self-hosted - permissions: - pull-requests: write defaults: run: shell: bash diff --git a/.github/workflows/check.yml b/.github/workflows/check.yml index 48b0b15cc2..10085ffda6 100644 --- a/.github/workflows/check.yml +++ b/.github/workflows/check.yml @@ -162,3 +162,8 @@ jobs: fail_ci_if_error: false token: ${{ secrets.CODECOV_TOKEN }} if: matrix.type == 'debug' && matrix.rust-toolchain == 'stable' + + bench: + name: "Benchmark" + needs: [check] + uses: ./.github/workflows/bench.yml From b0d816a83223f44264f50281b930125414d9830b Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Tue, 12 Mar 2024 11:10:03 +0200 Subject: [PATCH 229/321] Checkout --- .github/workflows/bench-comment.yml | 1 + .github/workflows/qns-comment.yml | 1 + 2 files changed, 2 insertions(+) diff --git a/.github/workflows/bench-comment.yml b/.github/workflows/bench-comment.yml index 7bf7f4bba6..19c9488235 100644 --- a/.github/workflows/bench-comment.yml +++ b/.github/workflows/bench-comment.yml @@ -19,6 +19,7 @@ jobs: runs-on: ubuntu-latest if: github.event.workflow_run.event == 'pull_request' steps: + - uses: actions/checkout@v4 - uses: ./.github/actions/pr-comment with: name: bench diff --git a/.github/workflows/qns-comment.yml b/.github/workflows/qns-comment.yml index 57a9a151cd..28c3e48d1a 100644 --- a/.github/workflows/qns-comment.yml +++ b/.github/workflows/qns-comment.yml @@ -21,6 +21,7 @@ jobs: github.event.workflow_run.event == 'pull_request' && github.event.workflow_run.conclusion == 'failure' steps: + - uses: actions/checkout@v4 - uses: ./.github/actions/pr-comment with: name: qns From e6e4f0f3df7e58d3e43819b81fc5a90e81744198 Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Tue, 12 Mar 2024 11:14:43 +0200 Subject: [PATCH 230/321] Debug --- .github/workflows/qns-comment.yml | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/.github/workflows/qns-comment.yml b/.github/workflows/qns-comment.yml index 28c3e48d1a..96231abd86 100644 --- a/.github/workflows/qns-comment.yml +++ b/.github/workflows/qns-comment.yml @@ -17,11 +17,12 @@ jobs: permissions: pull-requests: write runs-on: ubuntu-latest - if: > - github.event.workflow_run.event == 'pull_request' && - github.event.workflow_run.conclusion == 'failure' + # if: > + # github.event.workflow_run.event == 'pull_request' && + # github.event.workflow_run.conclusion == 'failure' + if: github.event.workflow_run.event == 'pull_request' steps: - - uses: actions/checkout@v4 +# - uses: actions/checkout@v4 - uses: ./.github/actions/pr-comment with: name: qns From f9253bd5a340838681b55a4a74d280cff6734caa Mon Sep 17 00:00:00 2001 From: Max Inden Date: Tue, 12 Mar 2024 12:58:47 +0100 Subject: [PATCH 231/321] fix(http3): typos (#1735) --- neqo-http3/src/connection_client.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/neqo-http3/src/connection_client.rs b/neqo-http3/src/connection_client.rs index 6317751f37..52572a760d 100644 --- a/neqo-http3/src/connection_client.rs +++ b/neqo-http3/src/connection_client.rs @@ -935,12 +935,12 @@ impl Http3Client { /// returned. After that, the application should call the function again if a new UDP packet is /// received and processed or the timer value expires. /// - /// The HTTP/3 neqo implementation drives the HTTP/3 and QUC layers, therefore this function + /// The HTTP/3 neqo implementation drives the HTTP/3 and QUIC layers, therefore this function /// will call both layers: /// - First it calls HTTP/3 layer processing (`process_http3`) to make sure the layer writes /// data to QUIC layer or cancels streams if needed. /// - Then QUIC layer processing is called - [`Connection::process_output`][3]. This produces a - /// packet or a timer value. It may also produce ned [`ConnectionEvent`][2]s, e.g. connection + /// packet or a timer value. It may also produce new [`ConnectionEvent`][2]s, e.g. connection /// state-change event. /// - Therefore the HTTP/3 layer processing (`process_http3`) is called again. /// From 7426ccf7b894b15183a3b7dab6e4622a21fa87bb Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Tue, 12 Mar 2024 14:21:19 +0200 Subject: [PATCH 232/321] Checkout --- .github/workflows/qns-comment.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/qns-comment.yml b/.github/workflows/qns-comment.yml index 96231abd86..bc50ac471b 100644 --- a/.github/workflows/qns-comment.yml +++ b/.github/workflows/qns-comment.yml @@ -22,7 +22,7 @@ jobs: # github.event.workflow_run.conclusion == 'failure' if: github.event.workflow_run.event == 'pull_request' steps: -# - uses: actions/checkout@v4 + - uses: actions/checkout@v4 - uses: ./.github/actions/pr-comment with: name: qns From 8248c313d8169ca5b99c34d10d8a51f5830ef7d8 Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Tue, 12 Mar 2024 14:34:20 +0200 Subject: [PATCH 233/321] CI -> Bench --- .github/workflows/bench-comment.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/bench-comment.yml b/.github/workflows/bench-comment.yml index 19c9488235..50e579302f 100644 --- a/.github/workflows/bench-comment.yml +++ b/.github/workflows/bench-comment.yml @@ -8,7 +8,7 @@ name: Benchmark Comment on: workflow_run: - workflows: ["CI"] + workflows: ["Bench"] types: - completed From 97c33d442c344e03805ff9304e6d928318ee0866 Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Tue, 12 Mar 2024 16:24:34 +0200 Subject: [PATCH 234/321] Add `github-token` --- .github/actions/pr-comment/action.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/actions/pr-comment/action.yml b/.github/actions/pr-comment/action.yml index ff46d40310..9ff0509b9e 100644 --- a/.github/actions/pr-comment/action.yml +++ b/.github/actions/pr-comment/action.yml @@ -13,6 +13,7 @@ runs: with: run-id: ${{ github.event.workflow_run.id }} name: ${{ inputs.name }} + github-token: ${{ secrets.GITHUB_TOKEN }} - id: pr-number shell: bash From 5c7289070e60ff1b2138ffeafbf2866e389ce391 Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Tue, 12 Mar 2024 16:48:15 +0200 Subject: [PATCH 235/321] Pass PAT into composite action --- .github/actions/pr-comment/action.yml | 5 ++++- .github/workflows/bench-comment.yml | 1 + .github/workflows/qns-comment.yml | 1 + 3 files changed, 6 insertions(+), 1 deletion(-) diff --git a/.github/actions/pr-comment/action.yml b/.github/actions/pr-comment/action.yml index 9ff0509b9e..b7f9bb12da 100644 --- a/.github/actions/pr-comment/action.yml +++ b/.github/actions/pr-comment/action.yml @@ -5,6 +5,9 @@ inputs: name: description: 'Artifact name to import comment data from.' required: true + token: + description: 'A Github PAT' + required: true runs: using: composite @@ -13,7 +16,7 @@ runs: with: run-id: ${{ github.event.workflow_run.id }} name: ${{ inputs.name }} - github-token: ${{ secrets.GITHUB_TOKEN }} + github-token: ${{ inputs.token }} - id: pr-number shell: bash diff --git a/.github/workflows/bench-comment.yml b/.github/workflows/bench-comment.yml index 50e579302f..f953465713 100644 --- a/.github/workflows/bench-comment.yml +++ b/.github/workflows/bench-comment.yml @@ -23,3 +23,4 @@ jobs: - uses: ./.github/actions/pr-comment with: name: bench + token: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/qns-comment.yml b/.github/workflows/qns-comment.yml index bc50ac471b..f1b29184aa 100644 --- a/.github/workflows/qns-comment.yml +++ b/.github/workflows/qns-comment.yml @@ -26,3 +26,4 @@ jobs: - uses: ./.github/actions/pr-comment with: name: qns + token: ${{ secrets.GITHUB_TOKEN }} From 17c4175bc40005aff2549d431307bddff3fc2487 Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Tue, 12 Mar 2024 21:53:19 +0200 Subject: [PATCH 236/321] Trigger on `CI` again --- .github/workflows/bench-comment.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/bench-comment.yml b/.github/workflows/bench-comment.yml index f953465713..ecda3eb539 100644 --- a/.github/workflows/bench-comment.yml +++ b/.github/workflows/bench-comment.yml @@ -8,7 +8,7 @@ name: Benchmark Comment on: workflow_run: - workflows: ["Bench"] + workflows: ["CI"] types: - completed From 2ff9742e98bb71786eb2cc7f9dad6d95cf83894e Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Tue, 12 Mar 2024 22:37:28 +0200 Subject: [PATCH 237/321] Better Markdown output --- .github/workflows/bench.yml | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/.github/workflows/bench.yml b/.github/workflows/bench.yml index 09941e53f5..7c7f812341 100644 --- a/.github/workflows/bench.yml +++ b/.github/workflows/bench.yml @@ -138,13 +138,14 @@ jobs: - name: Format results as Markdown id: results run: | + echo '### Benchmark results' > results.md grep -Ev 'ignored|running \d+ tests|%\)' results.txt |\ sed -E -e 's/(Performance has regressed.)/:broken_heart: **\1**/gi' \ -e 's/(Performance has improved.)/:green_heart: **\1**/gi' \ - -e 's/^ +/ * /gi' \ - -e 's/^([a-z0-9].*)$/**\1**/gi' \ - -e 's/(change:[^%]*%)([^%]*%)(.*)/\1**\2**\3/gi' \ - > results.md + -e 's/^ +/ /gi' \ + -e 's/^([a-z0-9].*)$/* **\1**/gi' \ + -e 's/(change:[^%]*% )([^%]*%)(.*)/\1**\2**\3/gi' \ + >> results.md echo '' >> results.md - name: Export PR comment data From 66d83654b948db07f7a410455a4cb4c7f4241650 Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Wed, 13 Mar 2024 00:59:53 +0200 Subject: [PATCH 238/321] ci: Add `concurrency` to top-level workflows (#1737) So that a failure stops all CI for a PR. --- .github/workflows/actionlint.yml | 4 ++++ .github/workflows/qns.yml | 4 ++++ 2 files changed, 8 insertions(+) diff --git a/.github/workflows/actionlint.yml b/.github/workflows/actionlint.yml index 1ddcfb4f09..c0e6de01c0 100644 --- a/.github/workflows/actionlint.yml +++ b/.github/workflows/actionlint.yml @@ -8,6 +8,10 @@ on: paths: [".github/**"] merge_group: +concurrency: + group: ${{ github.workflow }}-${{ github.ref_name }} + cancel-in-progress: true + jobs: actionlint: runs-on: ubuntu-latest diff --git a/.github/workflows/qns.yml b/.github/workflows/qns.yml index 2b1deb4be8..caadb022df 100644 --- a/.github/workflows/qns.yml +++ b/.github/workflows/qns.yml @@ -8,6 +8,10 @@ on: branches: ["main"] merge_group: +concurrency: + group: ${{ github.workflow }}-${{ github.ref_name }} + cancel-in-progress: true + jobs: quic-network-simulator: runs-on: ubuntu-latest From bc262a53a585c7e5856d7626f95af15692f8aa77 Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Wed, 13 Mar 2024 01:22:29 +0200 Subject: [PATCH 239/321] feat: Add ability to disable log with feature (#1699) * ci: Disable Rust log for release builds via feature Alternative to #1688 * Skip more things when log is off * Don't set `features = ["release_max_level_off"]`, but leave rest in place --------- Signed-off-by: Lars Eggert --- neqo-common/src/log.rs | 7 ++++++- neqo-transport/src/connection/dump.rs | 2 +- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/neqo-common/src/log.rs b/neqo-common/src/log.rs index 1b181de56b..c5b89be8a6 100644 --- a/neqo-common/src/log.rs +++ b/neqo-common/src/log.rs @@ -18,7 +18,7 @@ use env_logger::Builder; macro_rules! do_log { (target: $target:expr, $lvl:expr, $($arg:tt)+) => ({ let lvl = $lvl; - if lvl <= ::log::max_level() { + if lvl <= ::log::STATIC_MAX_LEVEL && lvl <= ::log::max_level() { ::log::logger().log( &::log::Record::builder() .args(format_args!($($arg)+)) @@ -52,6 +52,11 @@ fn since_start() -> Duration { pub fn init() { static INIT_ONCE: Once = Once::new(); + + if ::log::STATIC_MAX_LEVEL == ::log::LevelFilter::Off { + return; + } + INIT_ONCE.call_once(|| { let mut builder = Builder::from_env("RUST_LOG"); builder.format(|buf, record| { diff --git a/neqo-transport/src/connection/dump.rs b/neqo-transport/src/connection/dump.rs index 8811e4f05f..8a4f34dbb8 100644 --- a/neqo-transport/src/connection/dump.rs +++ b/neqo-transport/src/connection/dump.rs @@ -27,7 +27,7 @@ pub fn dump_packet( pn: PacketNumber, payload: &[u8], ) { - if !log::log_enabled!(log::Level::Debug) { + if log::STATIC_MAX_LEVEL == log::LevelFilter::Off || !log::log_enabled!(log::Level::Debug) { return; } From 3325cdbece032822e8fd2c5229da06041aa4f736 Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Wed, 13 Mar 2024 11:20:11 +0200 Subject: [PATCH 240/321] Only comment after successful CI runs --- .github/workflows/bench-comment.yml | 4 +++- .github/workflows/qns-comment.yml | 7 +++---- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/.github/workflows/bench-comment.yml b/.github/workflows/bench-comment.yml index ecda3eb539..f89d223059 100644 --- a/.github/workflows/bench-comment.yml +++ b/.github/workflows/bench-comment.yml @@ -17,7 +17,9 @@ jobs: permissions: pull-requests: write runs-on: ubuntu-latest - if: github.event.workflow_run.event == 'pull_request' + if: | + github.event.workflow_run.event == 'pull_request' && + github.event.workflow_run.conclusion == 'success' steps: - uses: actions/checkout@v4 - uses: ./.github/actions/pr-comment diff --git a/.github/workflows/qns-comment.yml b/.github/workflows/qns-comment.yml index f1b29184aa..71cbcc805b 100644 --- a/.github/workflows/qns-comment.yml +++ b/.github/workflows/qns-comment.yml @@ -17,10 +17,9 @@ jobs: permissions: pull-requests: write runs-on: ubuntu-latest - # if: > - # github.event.workflow_run.event == 'pull_request' && - # github.event.workflow_run.conclusion == 'failure' - if: github.event.workflow_run.event == 'pull_request' + if: | + github.event.workflow_run.event == 'pull_request' && + github.event.workflow_run.conclusion == 'failure' steps: - uses: actions/checkout@v4 - uses: ./.github/actions/pr-comment From 2ee8c7acc31a9a33a8426213dd14af9f8f24ecbb Mon Sep 17 00:00:00 2001 From: Kershaw Date: Wed, 13 Mar 2024 11:26:57 +0100 Subject: [PATCH 241/321] Return an error if the path is not permanent (#1698) * Return an error if the path is not permanent * address comments * address comments and make sure the test case really triggers the crash * clippy * clippy again --- neqo-transport/src/connection/mod.rs | 8 ++++ .../src/connection/tests/migration.rs | 37 +++++++++++++++++++ 2 files changed, 45 insertions(+) diff --git a/neqo-transport/src/connection/mod.rs b/neqo-transport/src/connection/mod.rs index 671361b559..c81a3727c6 100644 --- a/neqo-transport/src/connection/mod.rs +++ b/neqo-transport/src/connection/mod.rs @@ -1937,6 +1937,14 @@ impl Connection { }; let path = close.path().borrow(); + // In some error cases, we will not be able to make a new, permanent path. + // For example, if we run out of connection IDs and the error results from + // a packet on a new path, we avoid sending (and the privacy risk) rather + // than reuse a connection ID. + if path.is_temporary() { + assert!(!cfg!(test), "attempting to close with a temporary path"); + return Err(Error::InternalError); + } let (_, mut builder) = Self::build_packet_header( &path, cspace, diff --git a/neqo-transport/src/connection/tests/migration.rs b/neqo-transport/src/connection/tests/migration.rs index 09a25faa28..405ae161a4 100644 --- a/neqo-transport/src/connection/tests/migration.rs +++ b/neqo-transport/src/connection/tests/migration.rs @@ -6,6 +6,7 @@ use std::{ cell::RefCell, + mem, net::{IpAddr, Ipv6Addr, SocketAddr}, rc::Rc, time::{Duration, Instant}, @@ -950,3 +951,39 @@ fn retire_prior_to_migration_success() { assert_ne!(get_cid(&dgram), original_cid); assert_ne!(get_cid(&dgram), probe_cid); } + +struct GarbageWriter {} + +impl crate::connection::test_internal::FrameWriter for GarbageWriter { + fn write_frames(&mut self, builder: &mut PacketBuilder) { + // Not a valid frame type. + builder.encode_varint(u32::MAX); + } +} + +/// Test the case that we run out of connection ID and receive an invalid frame +/// from a new path. +#[test] +#[should_panic(expected = "attempting to close with a temporary path")] +fn error_on_new_path_with_no_connection_id() { + let mut client = default_client(); + let mut server = default_server(); + connect_force_idle(&mut client, &mut server); + + let cid_gen: Rc> = + Rc::new(RefCell::new(CountingConnectionIdGenerator::default())); + server.test_frame_writer = Some(Box::new(RetireAll { cid_gen })); + let retire_all = send_something(&mut server, now()); + + client.process_input(&retire_all, now()); + + server.test_frame_writer = Some(Box::new(GarbageWriter {})); + let garbage = send_something(&mut server, now()); + + let dgram = change_path(&garbage, DEFAULT_ADDR_V4); + client.process_input(&dgram, now()); + + // See issue #1697. We had a crash when the client had a temporary path and + // process_output is called. + mem::drop(client.process_output(now())); +} From ce5cbe4dfc2e38b238abb022c39eee4215058221 Mon Sep 17 00:00:00 2001 From: Kershaw Date: Wed, 13 Mar 2024 11:40:54 +0100 Subject: [PATCH 242/321] neqo version 0.7.2 (#1740) --- Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Cargo.toml b/Cargo.toml index dec317e396..35916da5b1 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -14,7 +14,7 @@ resolver = "2" homepage = "https://github.com/mozilla/neqo/" repository = "https://github.com/mozilla/neqo/" authors = ["The Neqo Authors "] -version = "0.7.1" +version = "0.7.2" # Keep in sync with `.rustfmt.toml` `edition`. edition = "2021" license = "MIT OR Apache-2.0" From 203987a9fd02d740ee9c4141150c5d73d018a148 Mon Sep 17 00:00:00 2001 From: Max Inden Date: Thu, 14 Mar 2024 01:45:38 +0100 Subject: [PATCH 243/321] refactor(client): de-duplicate process & run and split into h3 & h09 (#1727) * refactor(bin): move client.rs to client/main.rs As a preparation to introducing sub-modules to `client`. Done in a separate commit for git to recognize the move as a move, not as a remove and add. * refactor(client): de-duplicate process & run and split into h3 & h09 The Neqo Client binary supports both http3 and http09 (prev. "old"). Before this commit both the http3 and the http09 implementation had their own `run` and `process` `fn`, orchestrating the interaction between handler, client and I/O. While similar, they had subtle differences e.g. when to terminate. This commit splits the http3 and http09 specific logic into two separate modules, but extracts duplicate logic (e.g. `run` and `process`) into the shared root module. --------- Co-authored-by: Lars Eggert --- neqo-bin/Cargo.toml | 2 +- neqo-bin/src/bin/client.rs | 1277 ----------------------------- neqo-bin/src/bin/client/http09.rs | 274 +++++++ neqo-bin/src/bin/client/http3.rs | 456 ++++++++++ neqo-bin/src/bin/client/main.rs | 541 ++++++++++++ 5 files changed, 1272 insertions(+), 1278 deletions(-) delete mode 100644 neqo-bin/src/bin/client.rs create mode 100644 neqo-bin/src/bin/client/http09.rs create mode 100644 neqo-bin/src/bin/client/http3.rs create mode 100644 neqo-bin/src/bin/client/main.rs diff --git a/neqo-bin/Cargo.toml b/neqo-bin/Cargo.toml index ae8801eb3a..2beafa7e42 100644 --- a/neqo-bin/Cargo.toml +++ b/neqo-bin/Cargo.toml @@ -11,7 +11,7 @@ license.workspace = true [[bin]] name = "neqo-client" -path = "src/bin/client.rs" +path = "src/bin/client/main.rs" bench = false [[bin]] diff --git a/neqo-bin/src/bin/client.rs b/neqo-bin/src/bin/client.rs deleted file mode 100644 index 2f9be1f3d7..0000000000 --- a/neqo-bin/src/bin/client.rs +++ /dev/null @@ -1,1277 +0,0 @@ -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use std::{ - cell::RefCell, - collections::{HashMap, VecDeque}, - fmt::{self, Display}, - fs::{create_dir_all, File, OpenOptions}, - io::{self, Write}, - net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr, ToSocketAddrs}, - path::PathBuf, - pin::Pin, - process::exit, - rc::Rc, - time::Instant, -}; - -use clap::Parser; -use futures::{ - future::{select, Either}, - FutureExt, TryFutureExt, -}; -use neqo_common::{ - self as common, event::Provider, hex, qdebug, qinfo, qlog::NeqoQlog, udp, Datagram, Role, -}; -use neqo_crypto::{ - constants::{TLS_AES_128_GCM_SHA256, TLS_AES_256_GCM_SHA384, TLS_CHACHA20_POLY1305_SHA256}, - init, AuthenticationStatus, Cipher, ResumptionToken, -}; -use neqo_http3::{ - Error, Header, Http3Client, Http3ClientEvent, Http3Parameters, Http3State, Output, Priority, -}; -use neqo_transport::{ - Connection, ConnectionId, EmptyConnectionIdGenerator, Error as TransportError, StreamId, - Version, -}; -use qlog::{events::EventImportance, streamer::QlogStreamer}; -use tokio::time::Sleep; -use url::{Origin, Url}; - -#[derive(Debug)] -pub enum ClientError { - ArgumentError(&'static str), - Http3Error(neqo_http3::Error), - IoError(io::Error), - QlogError, - TransportError(neqo_transport::Error), -} - -impl From for ClientError { - fn from(err: io::Error) -> Self { - Self::IoError(err) - } -} - -impl From for ClientError { - fn from(err: neqo_http3::Error) -> Self { - Self::Http3Error(err) - } -} - -impl From for ClientError { - fn from(_err: qlog::Error) -> Self { - Self::QlogError - } -} - -impl From for ClientError { - fn from(err: neqo_transport::Error) -> Self { - Self::TransportError(err) - } -} - -impl Display for ClientError { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "Error: {self:?}")?; - Ok(()) - } -} - -impl std::error::Error for ClientError {} - -type Res = Result; - -/// Track whether a key update is needed. -#[derive(Debug, PartialEq, Eq)] -struct KeyUpdateState(bool); - -impl KeyUpdateState { - pub fn maybe_update(&mut self, update_fn: F) -> Res<()> - where - F: FnOnce() -> Result<(), E>, - E: Into, - { - if self.0 { - if let Err(e) = update_fn() { - let e = e.into(); - match e { - ClientError::TransportError(TransportError::KeyUpdateBlocked) - | ClientError::Http3Error(Error::TransportError( - TransportError::KeyUpdateBlocked, - )) => (), - _ => return Err(e), - } - } else { - println!("Keys updated"); - self.0 = false; - } - } - Ok(()) - } - - fn needed(&self) -> bool { - self.0 - } -} - -#[derive(Debug, Parser)] -#[command(author, version, about, long_about = None)] -#[allow(clippy::struct_excessive_bools)] // Not a good use of that lint. -pub struct Args { - #[command(flatten)] - shared: neqo_bin::SharedArgs, - - urls: Vec, - - #[arg(short = 'm', default_value = "GET")] - method: String, - - #[arg(short = 'H', long, number_of_values = 2)] - header: Vec, - - #[arg(name = "max-push", short = 'p', long, default_value = "10")] - max_concurrent_push_streams: u64, - - #[arg(name = "download-in-series", long)] - /// Download resources in series using separate connections. - download_in_series: bool, - - #[arg(name = "concurrency", long, default_value = "100")] - /// The maximum number of requests to have outstanding at one time. - concurrency: usize, - - #[arg(name = "output-read-data", long)] - /// Output received data to stdout - output_read_data: bool, - - #[arg(name = "output-dir", long)] - /// Save contents of fetched URLs to a directory - output_dir: Option, - - #[arg(short = 'r', long)] - /// Client attempts to resume by making multiple connections to servers. - /// Requires that 2 or more URLs are listed for each server. - /// Use this for 0-RTT: the stack always attempts 0-RTT on resumption. - resume: bool, - - #[arg(name = "key-update", long)] - /// Attempt to initiate a key update immediately after confirming the connection. - key_update: bool, - - #[arg(name = "ech", long, value_parser = |s: &str| hex::decode(s))] - /// Enable encrypted client hello (ECH). - /// This takes an encoded ECH configuration in hexadecimal format. - ech: Option>, - - #[arg(name = "ipv4-only", short = '4', long)] - /// Connect only over IPv4 - ipv4_only: bool, - - #[arg(name = "ipv6-only", short = '6', long)] - /// Connect only over IPv6 - ipv6_only: bool, - - /// The test that this client will run. Currently, we only support "upload". - #[arg(name = "test", long)] - test: Option, - - /// The request size that will be used for upload test. - #[arg(name = "upload-size", long, default_value = "100")] - upload_size: usize, -} - -impl Args { - fn get_ciphers(&self) -> Vec { - self.shared - .ciphers - .iter() - .filter_map(|c| match c.as_str() { - "TLS_AES_128_GCM_SHA256" => Some(TLS_AES_128_GCM_SHA256), - "TLS_AES_256_GCM_SHA384" => Some(TLS_AES_256_GCM_SHA384), - "TLS_CHACHA20_POLY1305_SHA256" => Some(TLS_CHACHA20_POLY1305_SHA256), - _ => None, - }) - .collect::>() - } - - fn update_for_tests(&mut self) { - let Some(testcase) = self.shared.qns_test.as_ref() else { - return; - }; - - // Only use v1 for most QNS tests. - self.shared.quic_parameters.quic_version = vec![Version::Version1]; - match testcase.as_str() { - // TODO: Add "ecn" when that is ready. - "http3" => {} - "handshake" | "transfer" | "retry" => { - self.shared.use_old_http = true; - } - "zerortt" | "resumption" => { - if self.urls.len() < 2 { - eprintln!("Warning: resumption tests won't work without >1 URL"); - exit(127); - } - self.shared.use_old_http = true; - self.resume = true; - } - "multiconnect" => { - self.shared.use_old_http = true; - self.download_in_series = true; - } - "chacha20" => { - self.shared.use_old_http = true; - self.shared.ciphers.clear(); - self.shared - .ciphers - .extend_from_slice(&[String::from("TLS_CHACHA20_POLY1305_SHA256")]); - } - "keyupdate" => { - self.shared.use_old_http = true; - self.key_update = true; - } - "v2" => { - self.shared.use_old_http = true; - // Use default version set for this test (which allows compatible vneg.) - self.shared.quic_parameters.quic_version.clear(); - } - _ => exit(127), - } - } -} - -fn get_output_file( - url: &Url, - output_dir: &Option, - all_paths: &mut Vec, -) -> Option { - if let Some(ref dir) = output_dir { - let mut out_path = dir.clone(); - - let url_path = if url.path() == "/" { - // If no path is given... call it "root"? - "root" - } else { - // Omit leading slash - &url.path()[1..] - }; - out_path.push(url_path); - - if all_paths.contains(&out_path) { - eprintln!("duplicate path {}", out_path.display()); - return None; - } - - eprintln!("Saving {url} to {out_path:?}"); - - if let Some(parent) = out_path.parent() { - create_dir_all(parent).ok()?; - } - - let f = OpenOptions::new() - .write(true) - .create(true) - .truncate(true) - .open(&out_path) - .ok()?; - - all_paths.push(out_path); - Some(f) - } else { - None - } -} - -enum Ready { - Socket, - Timeout, -} - -// Wait for the socket to be readable or the timeout to fire. -async fn ready( - socket: &udp::Socket, - mut timeout: Option<&mut Pin>>, -) -> Result { - let socket_ready = Box::pin(socket.readable()).map_ok(|()| Ready::Socket); - let timeout_ready = timeout - .as_mut() - .map_or(Either::Right(futures::future::pending()), Either::Left) - .map(|()| Ok(Ready::Timeout)); - select(socket_ready, timeout_ready).await.factor_first().0 -} - -trait StreamHandler { - fn process_header_ready(&mut self, stream_id: StreamId, fin: bool, headers: Vec
); - fn process_data_readable( - &mut self, - stream_id: StreamId, - fin: bool, - data: Vec, - sz: usize, - output_read_data: bool, - ) -> Res; - fn process_data_writable(&mut self, client: &mut Http3Client, stream_id: StreamId); -} - -enum StreamHandlerType { - Download, - Upload, -} - -impl StreamHandlerType { - fn make_handler( - handler_type: &Self, - url: &Url, - args: &Args, - all_paths: &mut Vec, - client: &mut Http3Client, - client_stream_id: StreamId, - ) -> Box { - match handler_type { - Self::Download => { - let out_file = get_output_file(url, &args.output_dir, all_paths); - client.stream_close_send(client_stream_id).unwrap(); - Box::new(DownloadStreamHandler { out_file }) - } - Self::Upload => Box::new(UploadStreamHandler { - data: vec![42; args.upload_size], - offset: 0, - chunk_size: 32768, - start: Instant::now(), - }), - } - } -} - -struct DownloadStreamHandler { - out_file: Option, -} - -impl StreamHandler for DownloadStreamHandler { - fn process_header_ready(&mut self, stream_id: StreamId, fin: bool, headers: Vec
) { - if self.out_file.is_none() { - println!("READ HEADERS[{stream_id}]: fin={fin} {headers:?}"); - } - } - - fn process_data_readable( - &mut self, - stream_id: StreamId, - fin: bool, - data: Vec, - sz: usize, - output_read_data: bool, - ) -> Res { - if let Some(out_file) = &mut self.out_file { - if sz > 0 { - out_file.write_all(&data[..sz])?; - } - return Ok(true); - } else if !output_read_data { - println!("READ[{stream_id}]: {sz} bytes"); - } else if let Ok(txt) = String::from_utf8(data.clone()) { - println!("READ[{stream_id}]: {txt}"); - } else { - println!("READ[{}]: 0x{}", stream_id, hex(&data)); - } - - if fin && self.out_file.is_none() { - println!(""); - } - - Ok(true) - } - - fn process_data_writable(&mut self, _client: &mut Http3Client, _stream_id: StreamId) {} -} - -struct UploadStreamHandler { - data: Vec, - offset: usize, - chunk_size: usize, - start: Instant, -} - -impl StreamHandler for UploadStreamHandler { - fn process_header_ready(&mut self, stream_id: StreamId, fin: bool, headers: Vec
) { - println!("READ HEADERS[{stream_id}]: fin={fin} {headers:?}"); - } - - fn process_data_readable( - &mut self, - stream_id: StreamId, - _fin: bool, - data: Vec, - _sz: usize, - _output_read_data: bool, - ) -> Res { - if let Ok(txt) = String::from_utf8(data.clone()) { - let trimmed_txt = txt.trim_end_matches(char::from(0)); - let parsed: usize = trimmed_txt.parse().unwrap(); - if parsed == self.data.len() { - let upload_time = Instant::now().duration_since(self.start); - println!("Stream ID: {stream_id:?}, Upload time: {upload_time:?}"); - } - } else { - panic!("Unexpected data [{}]: 0x{}", stream_id, hex(&data)); - } - Ok(true) - } - - fn process_data_writable(&mut self, client: &mut Http3Client, stream_id: StreamId) { - while self.offset < self.data.len() { - let end = self.offset + self.chunk_size.min(self.data.len() - self.offset); - let chunk = &self.data[self.offset..end]; - match client.send_data(stream_id, chunk) { - Ok(amount) => { - if amount == 0 { - break; - } - self.offset += amount; - if self.offset == self.data.len() { - client.stream_close_send(stream_id).unwrap(); - } - } - Err(_) => break, - }; - } - } -} - -struct URLHandler<'a> { - url_queue: VecDeque, - stream_handlers: HashMap>, - all_paths: Vec, - handler_type: StreamHandlerType, - args: &'a Args, -} - -impl<'a> URLHandler<'a> { - fn stream_handler(&mut self, stream_id: StreamId) -> Option<&mut Box> { - self.stream_handlers.get_mut(&stream_id) - } - - fn process_urls(&mut self, client: &mut Http3Client) { - loop { - if self.url_queue.is_empty() { - break; - } - if self.stream_handlers.len() >= self.args.concurrency { - break; - } - if !self.next_url(client) { - break; - } - } - } - - fn next_url(&mut self, client: &mut Http3Client) -> bool { - let url = self - .url_queue - .pop_front() - .expect("download_next called with empty queue"); - match client.fetch( - Instant::now(), - &self.args.method, - &url, - &to_headers(&self.args.header), - Priority::default(), - ) { - Ok(client_stream_id) => { - println!("Successfully created stream id {client_stream_id} for {url}"); - - let handler: Box = StreamHandlerType::make_handler( - &self.handler_type, - &url, - self.args, - &mut self.all_paths, - client, - client_stream_id, - ); - self.stream_handlers.insert(client_stream_id, handler); - true - } - Err( - Error::TransportError(TransportError::StreamLimitError) - | Error::StreamLimitError - | Error::Unavailable, - ) => { - self.url_queue.push_front(url); - false - } - Err(e) => { - panic!("Can't create stream {e}"); - } - } - } - - fn done(&mut self) -> bool { - self.stream_handlers.is_empty() && self.url_queue.is_empty() - } - - fn on_stream_fin(&mut self, client: &mut Http3Client, stream_id: StreamId) -> bool { - self.stream_handlers.remove(&stream_id); - self.process_urls(client); - if self.done() { - client.close(Instant::now(), 0, "kthxbye!"); - return false; - } - true - } -} - -struct Handler<'a> { - #[allow( - unknown_lints, - clippy::struct_field_names, - clippy::redundant_field_names - )] - url_handler: URLHandler<'a>, - key_update: KeyUpdateState, - token: Option, - output_read_data: bool, -} - -impl<'a> Handler<'a> { - pub fn new( - url_handler: URLHandler<'a>, - key_update: KeyUpdateState, - output_read_data: bool, - ) -> Self { - Self { - url_handler, - key_update, - token: None, - output_read_data, - } - } - - fn maybe_key_update(&mut self, c: &mut Http3Client) -> Res<()> { - self.key_update.maybe_update(|| c.initiate_key_update())?; - self.url_handler.process_urls(c); - Ok(()) - } - - fn handle(&mut self, client: &mut Http3Client) -> Res { - while let Some(event) = client.next_event() { - match event { - Http3ClientEvent::AuthenticationNeeded => { - client.authenticated(AuthenticationStatus::Ok, Instant::now()); - } - Http3ClientEvent::HeaderReady { - stream_id, - headers, - fin, - .. - } => { - if let Some(handler) = self.url_handler.stream_handler(stream_id) { - handler.process_header_ready(stream_id, fin, headers); - } else { - println!("Data on unexpected stream: {stream_id}"); - return Ok(false); - } - if fin { - return Ok(self.url_handler.on_stream_fin(client, stream_id)); - } - } - Http3ClientEvent::DataReadable { stream_id } => { - let mut stream_done = false; - match self.url_handler.stream_handler(stream_id) { - None => { - println!("Data on unexpected stream: {stream_id}"); - return Ok(false); - } - Some(handler) => loop { - let mut data = vec![0; 4096]; - let (sz, fin) = client - .read_data(Instant::now(), stream_id, &mut data) - .expect("Read should succeed"); - - handler.process_data_readable( - stream_id, - fin, - data, - sz, - self.output_read_data, - )?; - - if fin { - stream_done = true; - break; - } - - if sz == 0 { - break; - } - }, - } - - if stream_done { - return Ok(self.url_handler.on_stream_fin(client, stream_id)); - } - } - Http3ClientEvent::DataWritable { stream_id } => { - match self.url_handler.stream_handler(stream_id) { - None => { - println!("Data on unexpected stream: {stream_id}"); - return Ok(false); - } - Some(handler) => { - handler.process_data_writable(client, stream_id); - return Ok(true); - } - } - } - Http3ClientEvent::StateChange(Http3State::Connected) - | Http3ClientEvent::RequestsCreatable => { - self.url_handler.process_urls(client); - } - Http3ClientEvent::ResumptionToken(t) => self.token = Some(t), - _ => { - println!("Unhandled event {event:?}"); - } - } - } - - Ok(true) - } -} - -fn to_headers(values: &[impl AsRef]) -> Vec
{ - values - .iter() - .scan(None, |state, value| { - if let Some(name) = state.take() { - *state = None; - Some(Header::new(name, value.as_ref())) - } else { - *state = Some(value.as_ref().to_string()); - None - } - }) - .collect() -} - -struct ClientRunner<'a> { - local_addr: SocketAddr, - socket: &'a mut udp::Socket, - client: Http3Client, - handler: Handler<'a>, - timeout: Option>>, - args: &'a Args, -} - -impl<'a> ClientRunner<'a> { - fn new( - args: &'a mut Args, - socket: &'a mut udp::Socket, - local_addr: SocketAddr, - remote_addr: SocketAddr, - hostname: &str, - url_queue: VecDeque, - resumption_token: Option, - ) -> ClientRunner<'a> { - if let Some(testcase) = &args.test { - if testcase.as_str() != "upload" { - eprintln!("Unsupported test case: {testcase}"); - exit(127) - } - } - - let client = create_http3_client(args, local_addr, remote_addr, hostname, resumption_token) - .expect("failed to create client"); - if args.test.is_some() { - args.method = String::from("POST"); - } - let key_update = KeyUpdateState(args.key_update); - let url_handler = URLHandler { - url_queue, - stream_handlers: HashMap::new(), - all_paths: Vec::new(), - handler_type: if args.test.is_some() { - StreamHandlerType::Upload - } else { - StreamHandlerType::Download - }, - args, - }; - let handler = Handler::new(url_handler, key_update, args.output_read_data); - - Self { - local_addr, - socket, - client, - handler, - timeout: None, - args, - } - } - - async fn run(mut self) -> Res> { - loop { - if !self.handler.handle(&mut self.client)? { - break; - } - - self.process(None).await?; - - match ready(self.socket, self.timeout.as_mut()).await? { - Ready::Socket => loop { - let dgrams = self.socket.recv(&self.local_addr)?; - if dgrams.is_empty() { - break; - } - for dgram in &dgrams { - self.process(Some(dgram)).await?; - } - self.handler.maybe_key_update(&mut self.client)?; - }, - Ready::Timeout => { - self.timeout = None; - } - } - - if let Http3State::Closed(..) = self.client.state() { - break; - } - } - - let token = if self.args.test.is_none() && self.args.resume { - // If we haven't received an event, take a token if there is one. - // Lots of servers don't provide NEW_TOKEN, but a session ticket - // without NEW_TOKEN is better than nothing. - self.handler - .token - .take() - .or_else(|| self.client.take_resumption_token(Instant::now())) - } else { - None - }; - Ok(token) - } - - async fn process(&mut self, mut dgram: Option<&Datagram>) -> Result<(), io::Error> { - loop { - match self.client.process(dgram.take(), Instant::now()) { - Output::Datagram(dgram) => { - self.socket.writable().await?; - self.socket.send(dgram)?; - } - Output::Callback(new_timeout) => { - qinfo!("Setting timeout of {:?}", new_timeout); - self.timeout = Some(Box::pin(tokio::time::sleep(new_timeout))); - break; - } - Output::None => { - qdebug!("Output::None"); - break; - } - } - } - - Ok(()) - } -} - -fn create_http3_client( - args: &mut Args, - local_addr: SocketAddr, - remote_addr: SocketAddr, - hostname: &str, - resumption_token: Option, -) -> Res { - let mut transport = Connection::new_client( - hostname, - &[&args.shared.alpn], - Rc::new(RefCell::new(EmptyConnectionIdGenerator::default())), - local_addr, - remote_addr, - args.shared.quic_parameters.get(args.shared.alpn.as_str()), - Instant::now(), - )?; - let ciphers = args.get_ciphers(); - if !ciphers.is_empty() { - transport.set_ciphers(&ciphers)?; - } - let mut client = Http3Client::new_with_conn( - transport, - Http3Parameters::default() - .max_table_size_encoder(args.shared.max_table_size_encoder) - .max_table_size_decoder(args.shared.max_table_size_decoder) - .max_blocked_streams(args.shared.max_blocked_streams) - .max_concurrent_push_streams(args.max_concurrent_push_streams), - ); - - let qlog = qlog_new(args, hostname, client.connection_id())?; - client.set_qlog(qlog); - if let Some(ech) = &args.ech { - client.enable_ech(ech).expect("enable ECH"); - } - if let Some(token) = resumption_token { - client - .enable_resumption(Instant::now(), token) - .expect("enable resumption"); - } - - Ok(client) -} - -fn qlog_new(args: &Args, hostname: &str, cid: &ConnectionId) -> Res { - if let Some(qlog_dir) = &args.shared.qlog_dir { - let mut qlog_path = qlog_dir.clone(); - let filename = format!("{hostname}-{cid}.sqlog"); - qlog_path.push(filename); - - let f = OpenOptions::new() - .write(true) - .create(true) - .truncate(true) - .open(&qlog_path)?; - - let streamer = QlogStreamer::new( - qlog::QLOG_VERSION.to_string(), - Some("Example qlog".to_string()), - Some("Example qlog description".to_string()), - None, - std::time::Instant::now(), - common::qlog::new_trace(Role::Client), - EventImportance::Base, - Box::new(f), - ); - - Ok(NeqoQlog::enabled(streamer, qlog_path)?) - } else { - Ok(NeqoQlog::disabled()) - } -} - -#[tokio::main] -async fn main() -> Res<()> { - init(); - - let mut args = Args::parse(); - args.update_for_tests(); - - let urls_by_origin = args - .urls - .clone() - .into_iter() - .fold(HashMap::>::new(), |mut urls, url| { - urls.entry(url.origin()).or_default().push_back(url); - urls - }) - .into_iter() - .filter_map(|(origin, urls)| match origin { - Origin::Tuple(_scheme, h, p) => Some(((h, p), urls)), - Origin::Opaque(x) => { - eprintln!("Opaque origin {x:?}"); - None - } - }); - - for ((host, port), mut urls) in urls_by_origin { - if args.resume && urls.len() < 2 { - eprintln!("Resumption to {host} cannot work without at least 2 URLs."); - exit(127); - } - - let remote_addr = format!("{host}:{port}").to_socket_addrs()?.find(|addr| { - !matches!( - (addr, args.ipv4_only, args.ipv6_only), - (SocketAddr::V4(..), false, true) | (SocketAddr::V6(..), true, false) - ) - }); - let Some(remote_addr) = remote_addr else { - eprintln!("No compatible address found for: {host}"); - exit(1); - }; - - let local_addr = match remote_addr { - SocketAddr::V4(..) => SocketAddr::new(IpAddr::V4(Ipv4Addr::from([0; 4])), 0), - SocketAddr::V6(..) => SocketAddr::new(IpAddr::V6(Ipv6Addr::from([0; 16])), 0), - }; - - let mut socket = udp::Socket::bind(local_addr)?; - let real_local = socket.local_addr().unwrap(); - println!( - "{} Client connecting: {:?} -> {:?}", - if args.shared.use_old_http { "H9" } else { "H3" }, - real_local, - remote_addr, - ); - - let hostname = format!("{host}"); - let mut token: Option = None; - let mut first = true; - while !urls.is_empty() { - let to_request = if (args.resume && first) || args.download_in_series { - urls.pop_front().into_iter().collect() - } else { - std::mem::take(&mut urls) - }; - - first = false; - - token = if args.shared.use_old_http { - old::ClientRunner::new( - &args, - &mut socket, - real_local, - remote_addr, - &hostname, - to_request, - token, - )? - .run() - .await? - } else { - ClientRunner::new( - &mut args, - &mut socket, - real_local, - remote_addr, - &hostname, - to_request, - token, - ) - .run() - .await? - }; - } - } - - Ok(()) -} - -mod old { - use std::{ - cell::RefCell, - collections::{HashMap, VecDeque}, - fs::File, - io::{self, Write}, - net::SocketAddr, - path::PathBuf, - pin::Pin, - rc::Rc, - time::Instant, - }; - - use neqo_common::{event::Provider, qdebug, qinfo, udp, Datagram}; - use neqo_crypto::{AuthenticationStatus, ResumptionToken}; - use neqo_transport::{ - Connection, ConnectionEvent, EmptyConnectionIdGenerator, Error, Output, State, StreamId, - StreamType, - }; - use tokio::time::Sleep; - use url::Url; - - use super::{get_output_file, qlog_new, ready, Args, KeyUpdateState, Ready, Res}; - - struct HandlerOld<'b> { - streams: HashMap>, - url_queue: VecDeque, - all_paths: Vec, - args: &'b Args, - token: Option, - key_update: KeyUpdateState, - } - - impl<'b> HandlerOld<'b> { - fn download_urls(&mut self, client: &mut Connection) { - loop { - if self.url_queue.is_empty() { - break; - } - if self.streams.len() >= self.args.concurrency { - break; - } - if !self.download_next(client) { - break; - } - } - } - - fn download_next(&mut self, client: &mut Connection) -> bool { - if self.key_update.needed() { - println!("Deferring requests until after first key update"); - return false; - } - let url = self - .url_queue - .pop_front() - .expect("download_next called with empty queue"); - match client.stream_create(StreamType::BiDi) { - Ok(client_stream_id) => { - println!("Created stream {client_stream_id} for {url}"); - let req = format!("GET {}\r\n", url.path()); - _ = client - .stream_send(client_stream_id, req.as_bytes()) - .unwrap(); - client.stream_close_send(client_stream_id).unwrap(); - let out_file = - get_output_file(&url, &self.args.output_dir, &mut self.all_paths); - self.streams.insert(client_stream_id, out_file); - true - } - Err(e @ (Error::StreamLimitError | Error::ConnectionState)) => { - println!("Cannot create stream {e:?}"); - self.url_queue.push_front(url); - false - } - Err(e) => { - panic!("Error creating stream {e:?}"); - } - } - } - - /// Read and maybe print received data from a stream. - // Returns bool: was fin received? - fn read_from_stream( - client: &mut Connection, - stream_id: StreamId, - output_read_data: bool, - maybe_out_file: &mut Option, - ) -> Res { - let mut data = vec![0; 4096]; - loop { - let (sz, fin) = client.stream_recv(stream_id, &mut data)?; - if sz == 0 { - return Ok(fin); - } - - if let Some(out_file) = maybe_out_file { - out_file.write_all(&data[..sz])?; - } else if !output_read_data { - println!("READ[{stream_id}]: {sz} bytes"); - } else { - println!( - "READ[{}]: {}", - stream_id, - String::from_utf8(data.clone()).unwrap() - ); - } - if fin { - return Ok(true); - } - } - } - - fn maybe_key_update(&mut self, c: &mut Connection) -> Res<()> { - self.key_update.maybe_update(|| c.initiate_key_update())?; - self.download_urls(c); - Ok(()) - } - - fn read(&mut self, client: &mut Connection, stream_id: StreamId) -> Res<()> { - let mut maybe_maybe_out_file = self.streams.get_mut(&stream_id); - match &mut maybe_maybe_out_file { - None => { - println!("Data on unexpected stream: {stream_id}"); - return Ok(()); - } - Some(maybe_out_file) => { - let fin_recvd = Self::read_from_stream( - client, - stream_id, - self.args.output_read_data, - maybe_out_file, - )?; - - if fin_recvd { - if maybe_out_file.is_none() { - println!(""); - } - self.streams.remove(&stream_id); - self.download_urls(client); - } - } - } - Ok(()) - } - - /// Handle events on the connection. - /// - /// Returns `Ok(true)` when done, i.e. url queue is empty and streams are closed. - fn handle(&mut self, client: &mut Connection) -> Res { - while let Some(event) = client.next_event() { - match event { - ConnectionEvent::AuthenticationNeeded => { - client.authenticated(AuthenticationStatus::Ok, Instant::now()); - } - ConnectionEvent::RecvStreamReadable { stream_id } => { - self.read(client, stream_id)?; - } - ConnectionEvent::SendStreamWritable { stream_id } => { - println!("stream {stream_id} writable"); - } - ConnectionEvent::SendStreamComplete { stream_id } => { - println!("stream {stream_id} complete"); - } - ConnectionEvent::SendStreamCreatable { stream_type } => { - println!("stream {stream_type:?} creatable"); - if stream_type == StreamType::BiDi { - self.download_urls(client); - } - } - ConnectionEvent::StateChange( - State::WaitInitial | State::Handshaking | State::Connected, - ) => { - println!("{event:?}"); - self.download_urls(client); - } - ConnectionEvent::StateChange(State::Confirmed) => { - self.maybe_key_update(client)?; - } - ConnectionEvent::ResumptionToken(token) => { - self.token = Some(token); - } - _ => { - println!("Unhandled event {event:?}"); - } - } - } - - if self.streams.is_empty() && self.url_queue.is_empty() { - // Handler is done. - return Ok(true); - } - - Ok(false) - } - } - - pub struct ClientRunner<'a> { - local_addr: SocketAddr, - socket: &'a mut udp::Socket, - client: Connection, - handler: HandlerOld<'a>, - timeout: Option>>, - args: &'a Args, - } - - impl<'a> ClientRunner<'a> { - pub fn new( - args: &'a Args, - socket: &'a mut udp::Socket, - local_addr: SocketAddr, - remote_addr: SocketAddr, - origin: &str, - url_queue: VecDeque, - token: Option, - ) -> Res> { - let alpn = match args.shared.alpn.as_str() { - "hq-29" | "hq-30" | "hq-31" | "hq-32" => args.shared.alpn.as_str(), - _ => "hq-interop", - }; - - let mut client = Connection::new_client( - origin, - &[alpn], - Rc::new(RefCell::new(EmptyConnectionIdGenerator::default())), - local_addr, - remote_addr, - args.shared.quic_parameters.get(alpn), - Instant::now(), - )?; - - if let Some(tok) = token { - client.enable_resumption(Instant::now(), tok)?; - } - - let ciphers = args.get_ciphers(); - if !ciphers.is_empty() { - client.set_ciphers(&ciphers)?; - } - - client.set_qlog(qlog_new(args, origin, client.odcid().unwrap())?); - - let key_update = KeyUpdateState(args.key_update); - let handler = HandlerOld { - streams: HashMap::new(), - url_queue, - all_paths: Vec::new(), - args, - token: None, - key_update, - }; - - Ok(Self { - local_addr, - socket, - client, - handler, - timeout: None, - args, - }) - } - - pub async fn run(mut self) -> Res> { - loop { - let handler_done = self.handler.handle(&mut self.client)?; - - match (handler_done, self.args.resume, self.handler.token.is_some()) { - // Handler isn't done. Continue. - (false, _, _) => {}, - // Handler done. Resumption token needed but not present. Continue. - (true, true, false) => { - qdebug!("Handler done. Waiting for resumption token."); - } - // Handler is done, no resumption token needed. Close. - (true, false, _) | - // Handler is done, resumption token needed and present. Close. - (true, true, true) => { - self.client.close(Instant::now(), 0, "kthxbye!"); - } - } - - self.process(None).await?; - - if let State::Closed(..) = self.client.state() { - return Ok(self.handler.token.take()); - } - - match ready(self.socket, self.timeout.as_mut()).await? { - Ready::Socket => loop { - let dgrams = self.socket.recv(&self.local_addr)?; - if dgrams.is_empty() { - break; - } - for dgram in &dgrams { - self.process(Some(dgram)).await?; - } - self.handler.maybe_key_update(&mut self.client)?; - }, - Ready::Timeout => { - self.timeout = None; - } - } - } - } - - async fn process(&mut self, mut dgram: Option<&Datagram>) -> Result<(), io::Error> { - loop { - match self.client.process(dgram.take(), Instant::now()) { - Output::Datagram(dgram) => { - self.socket.writable().await?; - self.socket.send(dgram)?; - } - Output::Callback(new_timeout) => { - qinfo!("Setting timeout of {:?}", new_timeout); - self.timeout = Some(Box::pin(tokio::time::sleep(new_timeout))); - break; - } - Output::None => { - qdebug!("Output::None"); - break; - } - } - } - - Ok(()) - } - } -} diff --git a/neqo-bin/src/bin/client/http09.rs b/neqo-bin/src/bin/client/http09.rs new file mode 100644 index 0000000000..a7dc2c21c7 --- /dev/null +++ b/neqo-bin/src/bin/client/http09.rs @@ -0,0 +1,274 @@ +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! An [HTTP 0.9](https://www.w3.org/Protocols/HTTP/AsImplemented.html) client implementation. + +use std::{ + cell::RefCell, + collections::{HashMap, VecDeque}, + fs::File, + io::Write, + net::SocketAddr, + path::PathBuf, + rc::Rc, + time::Instant, +}; + +use neqo_common::{event::Provider, Datagram}; +use neqo_crypto::{AuthenticationStatus, ResumptionToken}; +use neqo_transport::{ + Connection, ConnectionEvent, EmptyConnectionIdGenerator, Error, Output, State, StreamId, + StreamType, +}; +use url::Url; + +use super::{get_output_file, Args, KeyUpdateState, Res}; +use crate::qlog_new; + +pub struct Handler<'a> { + streams: HashMap>, + url_queue: VecDeque, + all_paths: Vec, + args: &'a Args, + token: Option, + key_update: KeyUpdateState, +} + +impl<'a> super::Handler for Handler<'a> { + type Client = Connection; + + fn handle(&mut self, client: &mut Self::Client) -> Res { + while let Some(event) = client.next_event() { + match event { + ConnectionEvent::AuthenticationNeeded => { + client.authenticated(AuthenticationStatus::Ok, Instant::now()); + } + ConnectionEvent::RecvStreamReadable { stream_id } => { + self.read(client, stream_id)?; + } + ConnectionEvent::SendStreamWritable { stream_id } => { + println!("stream {stream_id} writable"); + } + ConnectionEvent::SendStreamComplete { stream_id } => { + println!("stream {stream_id} complete"); + } + ConnectionEvent::SendStreamCreatable { stream_type } => { + println!("stream {stream_type:?} creatable"); + if stream_type == StreamType::BiDi { + self.download_urls(client); + } + } + ConnectionEvent::StateChange( + State::WaitInitial | State::Handshaking | State::Connected, + ) => { + println!("{event:?}"); + self.download_urls(client); + } + ConnectionEvent::StateChange(State::Confirmed) => { + self.maybe_key_update(client)?; + } + ConnectionEvent::ResumptionToken(token) => { + self.token = Some(token); + } + _ => { + println!("Unhandled event {event:?}"); + } + } + } + + if self.streams.is_empty() && self.url_queue.is_empty() { + // Handler is done. + return Ok(true); + } + + Ok(false) + } + + fn maybe_key_update(&mut self, c: &mut Self::Client) -> Res<()> { + self.key_update.maybe_update(|| c.initiate_key_update())?; + self.download_urls(c); + Ok(()) + } + + fn take_token(&mut self) -> Option { + self.token.take() + } + + fn has_token(&self) -> bool { + self.token.is_some() + } +} + +pub(crate) fn create_client( + args: &Args, + local_addr: SocketAddr, + remote_addr: SocketAddr, + hostname: &str, + resumption_token: Option, +) -> Res { + let alpn = match args.shared.alpn.as_str() { + "hq-29" | "hq-30" | "hq-31" | "hq-32" => args.shared.alpn.as_str(), + _ => "hq-interop", + }; + + let mut client = Connection::new_client( + hostname, + &[alpn], + Rc::new(RefCell::new(EmptyConnectionIdGenerator::default())), + local_addr, + remote_addr, + args.shared.quic_parameters.get(alpn), + Instant::now(), + )?; + + if let Some(tok) = resumption_token { + client.enable_resumption(Instant::now(), tok)?; + } + + let ciphers = args.get_ciphers(); + if !ciphers.is_empty() { + client.set_ciphers(&ciphers)?; + } + + client.set_qlog(qlog_new(args, hostname, client.odcid().unwrap())?); + + Ok(client) +} + +impl super::Client for Connection { + fn process(&mut self, dgram: Option<&Datagram>, now: Instant) -> Output { + self.process(dgram, now) + } + + fn close(&mut self, now: Instant, app_error: neqo_transport::AppError, msg: S) + where + S: AsRef + std::fmt::Display, + { + self.close(now, app_error, msg); + } + + fn is_closed(&self) -> bool { + matches!(self.state(), State::Closed(..)) + } +} + +impl<'b> Handler<'b> { + pub fn new(url_queue: VecDeque, args: &'b Args, key_update: KeyUpdateState) -> Self { + Self { + streams: HashMap::new(), + url_queue, + all_paths: Vec::new(), + args, + token: None, + key_update, + } + } + + fn download_urls(&mut self, client: &mut Connection) { + loop { + if self.url_queue.is_empty() { + break; + } + if self.streams.len() >= self.args.concurrency { + break; + } + if !self.download_next(client) { + break; + } + } + } + + fn download_next(&mut self, client: &mut Connection) -> bool { + if self.key_update.needed() { + println!("Deferring requests until after first key update"); + return false; + } + let url = self + .url_queue + .pop_front() + .expect("download_next called with empty queue"); + match client.stream_create(StreamType::BiDi) { + Ok(client_stream_id) => { + println!("Created stream {client_stream_id} for {url}"); + let req = format!("GET {}\r\n", url.path()); + _ = client + .stream_send(client_stream_id, req.as_bytes()) + .unwrap(); + client.stream_close_send(client_stream_id).unwrap(); + let out_file = get_output_file(&url, &self.args.output_dir, &mut self.all_paths); + self.streams.insert(client_stream_id, out_file); + true + } + Err(e @ (Error::StreamLimitError | Error::ConnectionState)) => { + println!("Cannot create stream {e:?}"); + self.url_queue.push_front(url); + false + } + Err(e) => { + panic!("Error creating stream {e:?}"); + } + } + } + + /// Read and maybe print received data from a stream. + // Returns bool: was fin received? + fn read_from_stream( + client: &mut Connection, + stream_id: StreamId, + output_read_data: bool, + maybe_out_file: &mut Option, + ) -> Res { + let mut data = vec![0; 4096]; + loop { + let (sz, fin) = client.stream_recv(stream_id, &mut data)?; + if sz == 0 { + return Ok(fin); + } + + if let Some(out_file) = maybe_out_file { + out_file.write_all(&data[..sz])?; + } else if !output_read_data { + println!("READ[{stream_id}]: {sz} bytes"); + } else { + println!( + "READ[{}]: {}", + stream_id, + String::from_utf8(data.clone()).unwrap() + ); + } + if fin { + return Ok(true); + } + } + } + + fn read(&mut self, client: &mut Connection, stream_id: StreamId) -> Res<()> { + let mut maybe_maybe_out_file = self.streams.get_mut(&stream_id); + match &mut maybe_maybe_out_file { + None => { + println!("Data on unexpected stream: {stream_id}"); + return Ok(()); + } + Some(maybe_out_file) => { + let fin_recvd = Self::read_from_stream( + client, + stream_id, + self.args.output_read_data, + maybe_out_file, + )?; + + if fin_recvd { + if maybe_out_file.is_none() { + println!(""); + } + self.streams.remove(&stream_id); + self.download_urls(client); + } + } + } + Ok(()) + } +} diff --git a/neqo-bin/src/bin/client/http3.rs b/neqo-bin/src/bin/client/http3.rs new file mode 100644 index 0000000000..754de9cb16 --- /dev/null +++ b/neqo-bin/src/bin/client/http3.rs @@ -0,0 +1,456 @@ +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! An HTTP 3 client implementation. + +use std::{ + cell::RefCell, + collections::{HashMap, VecDeque}, + fmt::Display, + fs::File, + io::Write, + net::SocketAddr, + path::PathBuf, + rc::Rc, + time::Instant, +}; + +use neqo_common::{event::Provider, hex, Datagram, Header}; +use neqo_crypto::{AuthenticationStatus, ResumptionToken}; +use neqo_http3::{Error, Http3Client, Http3ClientEvent, Http3Parameters, Http3State, Priority}; +use neqo_transport::{ + AppError, Connection, EmptyConnectionIdGenerator, Error as TransportError, Output, StreamId, +}; +use url::Url; + +use crate::{get_output_file, qlog_new, Args, KeyUpdateState, Res}; + +pub(crate) struct Handler<'a> { + #[allow( + unknown_lints, + clippy::struct_field_names, + clippy::redundant_field_names + )] + url_handler: UrlHandler<'a>, + key_update: KeyUpdateState, + token: Option, + output_read_data: bool, +} + +impl<'a> Handler<'a> { + pub(crate) fn new( + url_queue: VecDeque, + args: &'a Args, + key_update: KeyUpdateState, + ) -> Self { + let url_handler = UrlHandler { + url_queue, + stream_handlers: HashMap::new(), + all_paths: Vec::new(), + handler_type: if args.test.is_some() { + StreamHandlerType::Upload + } else { + StreamHandlerType::Download + }, + args, + }; + + Self { + url_handler, + key_update, + token: None, + output_read_data: args.output_read_data, + } + } +} + +pub(crate) fn create_client( + args: &Args, + local_addr: SocketAddr, + remote_addr: SocketAddr, + hostname: &str, + resumption_token: Option, +) -> Res { + let mut transport = Connection::new_client( + hostname, + &[&args.shared.alpn], + Rc::new(RefCell::new(EmptyConnectionIdGenerator::default())), + local_addr, + remote_addr, + args.shared.quic_parameters.get(args.shared.alpn.as_str()), + Instant::now(), + )?; + let ciphers = args.get_ciphers(); + if !ciphers.is_empty() { + transport.set_ciphers(&ciphers)?; + } + let mut client = Http3Client::new_with_conn( + transport, + Http3Parameters::default() + .max_table_size_encoder(args.shared.max_table_size_encoder) + .max_table_size_decoder(args.shared.max_table_size_decoder) + .max_blocked_streams(args.shared.max_blocked_streams) + .max_concurrent_push_streams(args.max_concurrent_push_streams), + ); + + let qlog = qlog_new(args, hostname, client.connection_id())?; + client.set_qlog(qlog); + if let Some(ech) = &args.ech { + client.enable_ech(ech).expect("enable ECH"); + } + if let Some(token) = resumption_token { + client + .enable_resumption(Instant::now(), token) + .expect("enable resumption"); + } + + Ok(client) +} + +impl super::Client for Http3Client { + fn is_closed(&self) -> bool { + matches!(self.state(), Http3State::Closed(..)) + } + + fn process(&mut self, dgram: Option<&Datagram>, now: Instant) -> Output { + self.process(dgram, now) + } + + fn close(&mut self, now: Instant, app_error: AppError, msg: S) + where + S: AsRef + Display, + { + self.close(now, app_error, msg); + } +} + +impl<'a> super::Handler for Handler<'a> { + type Client = Http3Client; + + fn handle(&mut self, client: &mut Http3Client) -> Res { + while let Some(event) = client.next_event() { + match event { + Http3ClientEvent::AuthenticationNeeded => { + client.authenticated(AuthenticationStatus::Ok, Instant::now()); + } + Http3ClientEvent::HeaderReady { + stream_id, + headers, + fin, + .. + } => { + if let Some(handler) = self.url_handler.stream_handler(stream_id) { + handler.process_header_ready(stream_id, fin, headers); + } else { + println!("Data on unexpected stream: {stream_id}"); + } + if fin { + self.url_handler.on_stream_fin(client, stream_id); + } + } + Http3ClientEvent::DataReadable { stream_id } => { + let mut stream_done = false; + match self.url_handler.stream_handler(stream_id) { + None => { + println!("Data on unexpected stream: {stream_id}"); + } + Some(handler) => loop { + let mut data = vec![0; 4096]; + let (sz, fin) = client + .read_data(Instant::now(), stream_id, &mut data) + .expect("Read should succeed"); + + handler.process_data_readable( + stream_id, + fin, + data, + sz, + self.output_read_data, + )?; + + if fin { + stream_done = true; + break; + } + + if sz == 0 { + break; + } + }, + } + + if stream_done { + self.url_handler.on_stream_fin(client, stream_id); + } + } + Http3ClientEvent::DataWritable { stream_id } => { + match self.url_handler.stream_handler(stream_id) { + None => { + println!("Data on unexpected stream: {stream_id}"); + } + Some(handler) => { + handler.process_data_writable(client, stream_id); + } + } + } + Http3ClientEvent::StateChange(Http3State::Connected) + | Http3ClientEvent::RequestsCreatable => { + self.url_handler.process_urls(client); + } + Http3ClientEvent::ResumptionToken(t) => self.token = Some(t), + _ => { + println!("Unhandled event {event:?}"); + } + } + } + + Ok(self.url_handler.done()) + } + + fn maybe_key_update(&mut self, c: &mut Http3Client) -> Res<()> { + self.key_update.maybe_update(|| c.initiate_key_update())?; + self.url_handler.process_urls(c); + Ok(()) + } + + fn take_token(&mut self) -> Option { + self.token.take() + } + + fn has_token(&self) -> bool { + self.token.is_some() + } +} + +trait StreamHandler { + fn process_header_ready(&mut self, stream_id: StreamId, fin: bool, headers: Vec
); + fn process_data_readable( + &mut self, + stream_id: StreamId, + fin: bool, + data: Vec, + sz: usize, + output_read_data: bool, + ) -> Res; + fn process_data_writable(&mut self, client: &mut Http3Client, stream_id: StreamId); +} + +enum StreamHandlerType { + Download, + Upload, +} + +impl StreamHandlerType { + fn make_handler( + handler_type: &Self, + url: &Url, + args: &Args, + all_paths: &mut Vec, + client: &mut Http3Client, + client_stream_id: StreamId, + ) -> Box { + match handler_type { + Self::Download => { + let out_file = get_output_file(url, &args.output_dir, all_paths); + client.stream_close_send(client_stream_id).unwrap(); + Box::new(DownloadStreamHandler { out_file }) + } + Self::Upload => Box::new(UploadStreamHandler { + data: vec![42; args.upload_size], + offset: 0, + chunk_size: 32768, + start: Instant::now(), + }), + } + } +} + +struct DownloadStreamHandler { + out_file: Option, +} + +impl StreamHandler for DownloadStreamHandler { + fn process_header_ready(&mut self, stream_id: StreamId, fin: bool, headers: Vec
) { + if self.out_file.is_none() { + println!("READ HEADERS[{stream_id}]: fin={fin} {headers:?}"); + } + } + + fn process_data_readable( + &mut self, + stream_id: StreamId, + fin: bool, + data: Vec, + sz: usize, + output_read_data: bool, + ) -> Res { + if let Some(out_file) = &mut self.out_file { + if sz > 0 { + out_file.write_all(&data[..sz])?; + } + return Ok(true); + } else if !output_read_data { + println!("READ[{stream_id}]: {sz} bytes"); + } else if let Ok(txt) = String::from_utf8(data.clone()) { + println!("READ[{stream_id}]: {txt}"); + } else { + println!("READ[{}]: 0x{}", stream_id, hex(&data)); + } + + if fin && self.out_file.is_none() { + println!(""); + } + + Ok(true) + } + + fn process_data_writable(&mut self, _client: &mut Http3Client, _stream_id: StreamId) {} +} + +struct UploadStreamHandler { + data: Vec, + offset: usize, + chunk_size: usize, + start: Instant, +} + +impl StreamHandler for UploadStreamHandler { + fn process_header_ready(&mut self, stream_id: StreamId, fin: bool, headers: Vec
) { + println!("READ HEADERS[{stream_id}]: fin={fin} {headers:?}"); + } + + fn process_data_readable( + &mut self, + stream_id: StreamId, + _fin: bool, + data: Vec, + _sz: usize, + _output_read_data: bool, + ) -> Res { + if let Ok(txt) = String::from_utf8(data.clone()) { + let trimmed_txt = txt.trim_end_matches(char::from(0)); + let parsed: usize = trimmed_txt.parse().unwrap(); + if parsed == self.data.len() { + let upload_time = Instant::now().duration_since(self.start); + println!("Stream ID: {stream_id:?}, Upload time: {upload_time:?}"); + } + } else { + panic!("Unexpected data [{}]: 0x{}", stream_id, hex(&data)); + } + Ok(true) + } + + fn process_data_writable(&mut self, client: &mut Http3Client, stream_id: StreamId) { + while self.offset < self.data.len() { + let end = self.offset + self.chunk_size.min(self.data.len() - self.offset); + let chunk = &self.data[self.offset..end]; + match client.send_data(stream_id, chunk) { + Ok(amount) => { + if amount == 0 { + break; + } + self.offset += amount; + if self.offset == self.data.len() { + client.stream_close_send(stream_id).unwrap(); + } + } + Err(_) => break, + }; + } + } +} + +struct UrlHandler<'a> { + url_queue: VecDeque, + stream_handlers: HashMap>, + all_paths: Vec, + handler_type: StreamHandlerType, + args: &'a Args, +} + +impl<'a> UrlHandler<'a> { + fn stream_handler(&mut self, stream_id: StreamId) -> Option<&mut Box> { + self.stream_handlers.get_mut(&stream_id) + } + + fn process_urls(&mut self, client: &mut Http3Client) { + loop { + if self.url_queue.is_empty() { + break; + } + if self.stream_handlers.len() >= self.args.concurrency { + break; + } + if !self.next_url(client) { + break; + } + } + } + + fn next_url(&mut self, client: &mut Http3Client) -> bool { + let url = self + .url_queue + .pop_front() + .expect("download_next called with empty queue"); + match client.fetch( + Instant::now(), + &self.args.method, + &url, + &to_headers(&self.args.header), + Priority::default(), + ) { + Ok(client_stream_id) => { + println!("Successfully created stream id {client_stream_id} for {url}"); + + let handler: Box = StreamHandlerType::make_handler( + &self.handler_type, + &url, + self.args, + &mut self.all_paths, + client, + client_stream_id, + ); + self.stream_handlers.insert(client_stream_id, handler); + true + } + Err( + Error::TransportError(TransportError::StreamLimitError) + | Error::StreamLimitError + | Error::Unavailable, + ) => { + self.url_queue.push_front(url); + false + } + Err(e) => { + panic!("Can't create stream {e}"); + } + } + } + + fn done(&mut self) -> bool { + self.stream_handlers.is_empty() && self.url_queue.is_empty() + } + + fn on_stream_fin(&mut self, client: &mut Http3Client, stream_id: StreamId) { + self.stream_handlers.remove(&stream_id); + self.process_urls(client); + } +} + +fn to_headers(values: &[impl AsRef]) -> Vec
{ + values + .iter() + .scan(None, |state, value| { + if let Some(name) = state.take() { + *state = None; + Some(Header::new(name, value.as_ref())) + } else { + *state = Some(value.as_ref().to_string()); + None + } + }) + .collect() +} diff --git a/neqo-bin/src/bin/client/main.rs b/neqo-bin/src/bin/client/main.rs new file mode 100644 index 0000000000..4710f8b222 --- /dev/null +++ b/neqo-bin/src/bin/client/main.rs @@ -0,0 +1,541 @@ +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use std::{ + collections::{HashMap, VecDeque}, + fmt::{self, Display}, + fs::{create_dir_all, File, OpenOptions}, + io, + net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr, ToSocketAddrs}, + path::PathBuf, + pin::Pin, + process::exit, + time::Instant, +}; + +use clap::Parser; +use futures::{ + future::{select, Either}, + FutureExt, TryFutureExt, +}; +use neqo_common::{self as common, qdebug, qinfo, qlog::NeqoQlog, udp, Datagram, Role}; +use neqo_crypto::{ + constants::{TLS_AES_128_GCM_SHA256, TLS_AES_256_GCM_SHA384, TLS_CHACHA20_POLY1305_SHA256}, + init, Cipher, ResumptionToken, +}; +use neqo_http3::{Error, Output}; +use neqo_transport::{AppError, ConnectionId, Error as TransportError, Version}; +use qlog::{events::EventImportance, streamer::QlogStreamer}; +use tokio::time::Sleep; +use url::{Origin, Url}; + +mod http09; +mod http3; + +#[derive(Debug)] +pub enum ClientError { + ArgumentError(&'static str), + Http3Error(neqo_http3::Error), + IoError(io::Error), + QlogError, + TransportError(neqo_transport::Error), +} + +impl From for ClientError { + fn from(err: io::Error) -> Self { + Self::IoError(err) + } +} + +impl From for ClientError { + fn from(err: neqo_http3::Error) -> Self { + Self::Http3Error(err) + } +} + +impl From for ClientError { + fn from(_err: qlog::Error) -> Self { + Self::QlogError + } +} + +impl From for ClientError { + fn from(err: neqo_transport::Error) -> Self { + Self::TransportError(err) + } +} + +impl Display for ClientError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "Error: {self:?}")?; + Ok(()) + } +} + +impl std::error::Error for ClientError {} + +type Res = Result; + +/// Track whether a key update is needed. +#[derive(Debug, PartialEq, Eq)] +struct KeyUpdateState(bool); + +impl KeyUpdateState { + pub fn maybe_update(&mut self, update_fn: F) -> Res<()> + where + F: FnOnce() -> Result<(), E>, + E: Into, + { + if self.0 { + if let Err(e) = update_fn() { + let e = e.into(); + match e { + ClientError::TransportError(TransportError::KeyUpdateBlocked) + | ClientError::Http3Error(Error::TransportError( + TransportError::KeyUpdateBlocked, + )) => (), + _ => return Err(e), + } + } else { + println!("Keys updated"); + self.0 = false; + } + } + Ok(()) + } + + fn needed(&self) -> bool { + self.0 + } +} + +#[derive(Debug, Parser)] +#[command(author, version, about, long_about = None)] +#[allow(clippy::struct_excessive_bools)] // Not a good use of that lint. +pub struct Args { + #[command(flatten)] + shared: neqo_bin::SharedArgs, + + urls: Vec, + + #[arg(short = 'm', default_value = "GET")] + method: String, + + #[arg(short = 'H', long, number_of_values = 2)] + header: Vec, + + #[arg(name = "max-push", short = 'p', long, default_value = "10")] + max_concurrent_push_streams: u64, + + #[arg(name = "download-in-series", long)] + /// Download resources in series using separate connections. + download_in_series: bool, + + #[arg(name = "concurrency", long, default_value = "100")] + /// The maximum number of requests to have outstanding at one time. + concurrency: usize, + + #[arg(name = "output-read-data", long)] + /// Output received data to stdout + output_read_data: bool, + + #[arg(name = "output-dir", long)] + /// Save contents of fetched URLs to a directory + output_dir: Option, + + #[arg(short = 'r', long)] + /// Client attempts to resume by making multiple connections to servers. + /// Requires that 2 or more URLs are listed for each server. + /// Use this for 0-RTT: the stack always attempts 0-RTT on resumption. + resume: bool, + + #[arg(name = "key-update", long)] + /// Attempt to initiate a key update immediately after confirming the connection. + key_update: bool, + + #[arg(name = "ech", long, value_parser = |s: &str| hex::decode(s))] + /// Enable encrypted client hello (ECH). + /// This takes an encoded ECH configuration in hexadecimal format. + ech: Option>, + + #[arg(name = "ipv4-only", short = '4', long)] + /// Connect only over IPv4 + ipv4_only: bool, + + #[arg(name = "ipv6-only", short = '6', long)] + /// Connect only over IPv6 + ipv6_only: bool, + + /// The test that this client will run. Currently, we only support "upload". + #[arg(name = "test", long)] + test: Option, + + /// The request size that will be used for upload test. + #[arg(name = "upload-size", long, default_value = "100")] + upload_size: usize, +} + +impl Args { + fn get_ciphers(&self) -> Vec { + self.shared + .ciphers + .iter() + .filter_map(|c| match c.as_str() { + "TLS_AES_128_GCM_SHA256" => Some(TLS_AES_128_GCM_SHA256), + "TLS_AES_256_GCM_SHA384" => Some(TLS_AES_256_GCM_SHA384), + "TLS_CHACHA20_POLY1305_SHA256" => Some(TLS_CHACHA20_POLY1305_SHA256), + _ => None, + }) + .collect::>() + } + + fn update_for_tests(&mut self) { + let Some(testcase) = self.shared.qns_test.as_ref() else { + return; + }; + + // Only use v1 for most QNS tests. + self.shared.quic_parameters.quic_version = vec![Version::Version1]; + match testcase.as_str() { + // TODO: Add "ecn" when that is ready. + "http3" => { + if let Some(testcase) = &self.test { + if testcase.as_str() != "upload" { + eprintln!("Unsupported test case: {testcase}"); + exit(127) + } + + self.method = String::from("POST"); + } + } + "handshake" | "transfer" | "retry" => { + self.shared.use_old_http = true; + } + "zerortt" | "resumption" => { + if self.urls.len() < 2 { + eprintln!("Warning: resumption tests won't work without >1 URL"); + exit(127); + } + self.shared.use_old_http = true; + self.resume = true; + } + "multiconnect" => { + self.shared.use_old_http = true; + self.download_in_series = true; + } + "chacha20" => { + self.shared.use_old_http = true; + self.shared.ciphers.clear(); + self.shared + .ciphers + .extend_from_slice(&[String::from("TLS_CHACHA20_POLY1305_SHA256")]); + } + "keyupdate" => { + self.shared.use_old_http = true; + self.key_update = true; + } + "v2" => { + self.shared.use_old_http = true; + // Use default version set for this test (which allows compatible vneg.) + self.shared.quic_parameters.quic_version.clear(); + } + _ => exit(127), + } + } +} + +fn get_output_file( + url: &Url, + output_dir: &Option, + all_paths: &mut Vec, +) -> Option { + if let Some(ref dir) = output_dir { + let mut out_path = dir.clone(); + + let url_path = if url.path() == "/" { + // If no path is given... call it "root"? + "root" + } else { + // Omit leading slash + &url.path()[1..] + }; + out_path.push(url_path); + + if all_paths.contains(&out_path) { + eprintln!("duplicate path {}", out_path.display()); + return None; + } + + eprintln!("Saving {url} to {out_path:?}"); + + if let Some(parent) = out_path.parent() { + create_dir_all(parent).ok()?; + } + + let f = OpenOptions::new() + .write(true) + .create(true) + .truncate(true) + .open(&out_path) + .ok()?; + + all_paths.push(out_path); + Some(f) + } else { + None + } +} + +enum Ready { + Socket, + Timeout, +} + +// Wait for the socket to be readable or the timeout to fire. +async fn ready( + socket: &udp::Socket, + mut timeout: Option<&mut Pin>>, +) -> Result { + let socket_ready = Box::pin(socket.readable()).map_ok(|()| Ready::Socket); + let timeout_ready = timeout + .as_mut() + .map_or(Either::Right(futures::future::pending()), Either::Left) + .map(|()| Ok(Ready::Timeout)); + select(socket_ready, timeout_ready).await.factor_first().0 +} + +/// Handles a given task on the provided [`Client`]. +trait Handler { + type Client: Client; + + fn handle(&mut self, client: &mut Self::Client) -> Res; + fn maybe_key_update(&mut self, c: &mut Self::Client) -> Res<()>; + fn take_token(&mut self) -> Option; + fn has_token(&self) -> bool; +} + +/// Network client, e.g. [`neqo_transport::Connection`] or [`neqo_http3::Http3Client`]. +trait Client { + fn process(&mut self, dgram: Option<&Datagram>, now: Instant) -> Output; + fn close(&mut self, now: Instant, app_error: AppError, msg: S) + where + S: AsRef + Display; + fn is_closed(&self) -> bool; +} + +struct Runner<'a, H: Handler> { + local_addr: SocketAddr, + socket: &'a mut udp::Socket, + client: H::Client, + handler: H, + timeout: Option>>, + args: &'a Args, +} + +impl<'a, H: Handler> Runner<'a, H> { + async fn run(mut self) -> Res> { + loop { + let handler_done = self.handler.handle(&mut self.client)?; + + match (handler_done, self.args.resume, self.handler.has_token()) { + // Handler isn't done. Continue. + (false, _, _) => {}, + // Handler done. Resumption token needed but not present. Continue. + (true, true, false) => { + qdebug!("Handler done. Waiting for resumption token."); + } + // Handler is done, no resumption token needed. Close. + (true, false, _) | + // Handler is done, resumption token needed and present. Close. + (true, true, true) => { + self.client.close(Instant::now(), 0, "kthxbye!"); + } + } + + self.process(None).await?; + + if self.client.is_closed() { + return Ok(self.handler.take_token()); + } + + match ready(self.socket, self.timeout.as_mut()).await? { + Ready::Socket => loop { + let dgrams = self.socket.recv(&self.local_addr)?; + if dgrams.is_empty() { + break; + } + for dgram in &dgrams { + self.process(Some(dgram)).await?; + } + self.handler.maybe_key_update(&mut self.client)?; + }, + Ready::Timeout => { + self.timeout = None; + } + } + } + } + + async fn process(&mut self, mut dgram: Option<&Datagram>) -> Result<(), io::Error> { + loop { + match self.client.process(dgram.take(), Instant::now()) { + Output::Datagram(dgram) => { + self.socket.writable().await?; + self.socket.send(dgram)?; + } + Output::Callback(new_timeout) => { + qinfo!("Setting timeout of {:?}", new_timeout); + self.timeout = Some(Box::pin(tokio::time::sleep(new_timeout))); + break; + } + Output::None => { + qdebug!("Output::None"); + break; + } + } + } + + Ok(()) + } +} + +fn qlog_new(args: &Args, hostname: &str, cid: &ConnectionId) -> Res { + if let Some(qlog_dir) = &args.shared.qlog_dir { + let mut qlog_path = qlog_dir.clone(); + let filename = format!("{hostname}-{cid}.sqlog"); + qlog_path.push(filename); + + let f = OpenOptions::new() + .write(true) + .create(true) + .truncate(true) + .open(&qlog_path)?; + + let streamer = QlogStreamer::new( + qlog::QLOG_VERSION.to_string(), + Some("Example qlog".to_string()), + Some("Example qlog description".to_string()), + None, + std::time::Instant::now(), + common::qlog::new_trace(Role::Client), + EventImportance::Base, + Box::new(f), + ); + + Ok(NeqoQlog::enabled(streamer, qlog_path)?) + } else { + Ok(NeqoQlog::disabled()) + } +} + +#[tokio::main] +async fn main() -> Res<()> { + init(); + + let mut args = Args::parse(); + args.update_for_tests(); + + let urls_by_origin = args + .urls + .clone() + .into_iter() + .fold(HashMap::>::new(), |mut urls, url| { + urls.entry(url.origin()).or_default().push_back(url); + urls + }) + .into_iter() + .filter_map(|(origin, urls)| match origin { + Origin::Tuple(_scheme, h, p) => Some(((h, p), urls)), + Origin::Opaque(x) => { + eprintln!("Opaque origin {x:?}"); + None + } + }); + + for ((host, port), mut urls) in urls_by_origin { + if args.resume && urls.len() < 2 { + eprintln!("Resumption to {host} cannot work without at least 2 URLs."); + exit(127); + } + + let remote_addr = format!("{host}:{port}").to_socket_addrs()?.find(|addr| { + !matches!( + (addr, args.ipv4_only, args.ipv6_only), + (SocketAddr::V4(..), false, true) | (SocketAddr::V6(..), true, false) + ) + }); + let Some(remote_addr) = remote_addr else { + eprintln!("No compatible address found for: {host}"); + exit(1); + }; + + let local_addr = match remote_addr { + SocketAddr::V4(..) => SocketAddr::new(IpAddr::V4(Ipv4Addr::from([0; 4])), 0), + SocketAddr::V6(..) => SocketAddr::new(IpAddr::V6(Ipv6Addr::from([0; 16])), 0), + }; + + let mut socket = udp::Socket::bind(local_addr)?; + let real_local = socket.local_addr().unwrap(); + println!( + "{} Client connecting: {:?} -> {:?}", + if args.shared.use_old_http { "H9" } else { "H3" }, + real_local, + remote_addr, + ); + + let hostname = format!("{host}"); + let mut token: Option = None; + let mut first = true; + while !urls.is_empty() { + let to_request = if (args.resume && first) || args.download_in_series { + urls.pop_front().into_iter().collect() + } else { + std::mem::take(&mut urls) + }; + + first = false; + + let key_update = KeyUpdateState(args.key_update); + + token = if args.shared.use_old_http { + let client = + http09::create_client(&args, real_local, remote_addr, &hostname, token) + .expect("failed to create client"); + + let handler = http09::Handler::new(to_request, &args, key_update); + + Runner { + args: &args, + client, + handler, + local_addr: real_local, + socket: &mut socket, + timeout: None, + } + .run() + .await? + } else { + let client = http3::create_client(&args, real_local, remote_addr, &hostname, token) + .expect("failed to create client"); + + let handler = http3::Handler::new(to_request, &args, key_update); + + Runner { + args: &args, + client, + handler, + local_addr: real_local, + socket: &mut socket, + timeout: None, + } + .run() + .await? + }; + } + } + + Ok(()) +} From 0f473b58504cf85ccaafe5bb414a37983d957a56 Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Thu, 14 Mar 2024 10:37:10 +0200 Subject: [PATCH 244/321] ci: Run `cargo mutants` on changed code (#1742) * ci: Run `cargo mutants` on changed code See https://mutants.rs/, esp. https://mutants.rs/pr-diff.html * Fixes * Fix trigger --- .github/actions/rust/action.yml | 2 +- .github/workflows/mutants.yml | 36 +++++++++++++++++++++++++++++++++ 2 files changed, 37 insertions(+), 1 deletion(-) create mode 100644 .github/workflows/mutants.yml diff --git a/.github/actions/rust/action.yml b/.github/actions/rust/action.yml index dce4923678..bfb09d332d 100644 --- a/.github/actions/rust/action.yml +++ b/.github/actions/rust/action.yml @@ -30,7 +30,7 @@ runs: - name: Install Rust tools shell: bash - run: cargo +${{ inputs.version }} binstall --no-confirm cargo-llvm-cov cargo-nextest flamegraph cargo-hack + run: cargo +${{ inputs.version }} binstall --no-confirm cargo-llvm-cov cargo-nextest flamegraph cargo-hack cargo-mutants # sccache slows CI down, so we leave it disabled. # Leaving the steps below commented out, so we can re-evaluate enabling it later. diff --git a/.github/workflows/mutants.yml b/.github/workflows/mutants.yml new file mode 100644 index 0000000000..4e757b267f --- /dev/null +++ b/.github/workflows/mutants.yml @@ -0,0 +1,36 @@ +name: Find mutants +on: + pull_request: + branches: ["main"] + paths-ignore: ["*.md", "*.png", "*.svg", "LICENSE-*"] + merge_group: + +concurrency: + group: ${{ github.workflow }}-${{ github.ref_name }} + cancel-in-progress: true + +jobs: + incremental-mutants: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - name: Relative diff + run: git diff origin/${{ github.base_ref }}.. > pr.diff + + - name: Install Rust + uses: ./.github/actions/rust + with: + version: stable + + - name: Mutants + run: cargo mutants --test-tool=nextest --no-shuffle -j 2 -vV --in-diff pr.diff + + - name: Archive mutants.out + uses: actions/upload-artifact@v4 + if: always() + with: + name: mutants-incremental.out + path: mutants.out From db654874a89e62106c2533ae5dcc70186dcd044b Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Thu, 14 Mar 2024 20:07:27 +1000 Subject: [PATCH 245/321] Fix mutants --- .github/workflows/mutants.yml | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/.github/workflows/mutants.yml b/.github/workflows/mutants.yml index 4e757b267f..1f31d99132 100644 --- a/.github/workflows/mutants.yml +++ b/.github/workflows/mutants.yml @@ -3,7 +3,6 @@ on: pull_request: branches: ["main"] paths-ignore: ["*.md", "*.png", "*.svg", "LICENSE-*"] - merge_group: concurrency: group: ${{ github.workflow }}-${{ github.ref_name }} @@ -20,6 +19,16 @@ jobs: - name: Relative diff run: git diff origin/${{ github.base_ref }}.. > pr.diff + - name: Install dependencies + env: + DEBIAN_FRONTEND: noninteractive + run: | + sudo apt-get install -y --no-install-recommends gyp mercurial ninja-build lld + echo "RUSTFLAGS=-C link-arg=-fuse-ld=lld" >> "$GITHUB_ENV" + + - name: Fetch and build NSS and NSPR + uses: ./.github/actions/nss + - name: Install Rust uses: ./.github/actions/rust with: From 92db586c0fbc87b14ea08c4931da615d5233bdad Mon Sep 17 00:00:00 2001 From: Max Inden Date: Thu, 14 Mar 2024 10:59:26 +0100 Subject: [PATCH 246/321] feat(.github/interop): use markdown table (#1743) 1. Provide `--markdown` flag to QUIC Interop Runner to get markdown formatted table. 2. Ignore all log output, but table. --- .github/actions/quic-interop-runner/action.yml | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/.github/actions/quic-interop-runner/action.yml b/.github/actions/quic-interop-runner/action.yml index 4c2f695ab4..ef4865bde6 100644 --- a/.github/actions/quic-interop-runner/action.yml +++ b/.github/actions/quic-interop-runner/action.yml @@ -68,7 +68,7 @@ runs: cd quic-interop-runner jq --arg key "${{ inputs.name }}" --argjson newEntry '{"image": "${{ inputs.image }}", "url": "${{ inputs.url }}", "role": "${{ inputs.role }}"}' '.[$key] = $newEntry' implementations.json > temp.$$ && mv temp.$$ implementations.json cat implementations.json - ARGS="--log-dir logs --must-include ${{ inputs.name }}" + ARGS="--log-dir logs --markdown --must-include ${{ inputs.name }}" if [ -n "${{ inputs.client }}" ]; then ARGS="$ARGS --client ${{ inputs.client }}" fi @@ -92,9 +92,8 @@ runs: run: | echo '[**QUIC Interop Runner**](https://github.com/quic-interop/quic-interop-runner)' >> comment echo '' >> comment - echo '```' >> comment - cat quic-interop-runner/summary >> comment - echo '```' >> comment + # Ignore all, but table, which starts with "|:--". + cat quic-interop-runner/summary | awk '/^\|:--/{flag=1} flag' >> comment echo '' >> comment shell: bash From 56c380d3624479256383da217a92b89effc5e34d Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Thu, 14 Mar 2024 21:26:02 +1000 Subject: [PATCH 247/321] Fix benchmark caching, and store (and show) baseline SHA --- .github/workflows/bench.yml | 18 +++++++++++++----- 1 file changed, 13 insertions(+), 5 deletions(-) diff --git a/.github/workflows/bench.yml b/.github/workflows/bench.yml index 7c7f812341..3a4a7875cb 100644 --- a/.github/workflows/bench.yml +++ b/.github/workflows/bench.yml @@ -45,8 +45,8 @@ jobs: uses: actions/cache/restore@v4 with: path: ./target/criterion - key: criterion-${{ runner.os }}-${{ hashFiles('./target/criterion/**.json') }} - restore-keys: criterion-${{ runner.os }}- + key: criterion-${{ runner.name }}-${{ hashFiles('./target/criterion/**.json') }} + restore-keys: criterion-${{ runner.name }}- # Disable turboboost, hyperthreading and use performance governor. - name: Prepare machine @@ -120,7 +120,11 @@ jobs: uses: actions/cache/save@v4 with: path: ./target/criterion - key: ${{ steps.criterion-cache.outputs.cache-primary-key }} + key: criterion-${{ runner.name }}-${{ hashFiles('./target/criterion/**.json') }} + + - name: Remember main-branch push URL + if: github.ref == 'refs/heads/main' + run: echo "${{ github.sha }}" > target/criterion/baseline-sha.txt - name: Export perf data id: export @@ -138,7 +142,12 @@ jobs: - name: Format results as Markdown id: results run: | - echo '### Benchmark results' > results.md + { + echo "### Benchmark results" + echo + echo "Performance differences relative to $(cat target/criterion/baseline-sha.txt)." + echo + } > results.md grep -Ev 'ignored|running \d+ tests|%\)' results.txt |\ sed -E -e 's/(Performance has regressed.)/:broken_heart: **\1**/gi' \ -e 's/(Performance has improved.)/:green_heart: **\1**/gi' \ @@ -146,7 +155,6 @@ jobs: -e 's/^([a-z0-9].*)$/* **\1**/gi' \ -e 's/(change:[^%]*% )([^%]*%)(.*)/\1**\2**\3/gi' \ >> results.md - echo '' >> results.md - name: Export PR comment data uses: ./.github/actions/pr-comment-data-export From 2750423b08db37a2541ad0aee72b3cb2360bf66c Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Thu, 14 Mar 2024 21:55:36 +1000 Subject: [PATCH 248/321] Reorder steps --- .github/workflows/bench.yml | 42 ++++++++++++++++++------------------- 1 file changed, 21 insertions(+), 21 deletions(-) diff --git a/.github/workflows/bench.yml b/.github/workflows/bench.yml index 3a4a7875cb..fbe250d098 100644 --- a/.github/workflows/bench.yml +++ b/.github/workflows/bench.yml @@ -115,6 +115,27 @@ jobs: perf report -i server/perf.data --no-children --stdio > server.perf.txt & wait + - name: Format results as Markdown + id: results + run: | + { + echo "### Benchmark results" + echo + echo "Performance differences relative to $(cat target/criterion/baseline-sha.txt)." + echo + } > results.md + grep -Ev 'ignored|running \d+ tests|%\)' results.txt |\ + sed -E -e 's/(Performance has regressed.)/:broken_heart: **\1**/gi' \ + -e 's/(Performance has improved.)/:green_heart: **\1**/gi' \ + -e 's/^ +/ /gi' \ + -e 's/^([a-z0-9].*)$/* **\1**/gi' \ + -e 's/(change:[^%]*% )([^%]*%)(.*)/\1**\2**\3/gi' \ + >> results.md + + - name: Remember main-branch push URL + if: github.ref == 'refs/heads/main' + run: echo "${{ github.sha }}" > target/criterion/baseline-sha.txt + - name: Cache main-branch results if: github.ref == 'refs/heads/main' uses: actions/cache/save@v4 @@ -122,10 +143,6 @@ jobs: path: ./target/criterion key: criterion-${{ runner.name }}-${{ hashFiles('./target/criterion/**.json') }} - - name: Remember main-branch push URL - if: github.ref == 'refs/heads/main' - run: echo "${{ github.sha }}" > target/criterion/baseline-sha.txt - - name: Export perf data id: export uses: actions/upload-artifact@v4 @@ -139,23 +156,6 @@ jobs: target/criterion* compression-level: 9 - - name: Format results as Markdown - id: results - run: | - { - echo "### Benchmark results" - echo - echo "Performance differences relative to $(cat target/criterion/baseline-sha.txt)." - echo - } > results.md - grep -Ev 'ignored|running \d+ tests|%\)' results.txt |\ - sed -E -e 's/(Performance has regressed.)/:broken_heart: **\1**/gi' \ - -e 's/(Performance has improved.)/:green_heart: **\1**/gi' \ - -e 's/^ +/ /gi' \ - -e 's/^([a-z0-9].*)$/* **\1**/gi' \ - -e 's/(change:[^%]*% )([^%]*%)(.*)/\1**\2**\3/gi' \ - >> results.md - - name: Export PR comment data uses: ./.github/actions/pr-comment-data-export with: From 179e075ee41228f27e2fedab290f3ad8ce9a5600 Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Fri, 15 Mar 2024 06:05:15 +1000 Subject: [PATCH 249/321] Fix caching --- .github/workflows/bench.yml | 19 +++++++++++++++---- 1 file changed, 15 insertions(+), 4 deletions(-) diff --git a/.github/workflows/bench.yml b/.github/workflows/bench.yml index fbe250d098..72b835f843 100644 --- a/.github/workflows/bench.yml +++ b/.github/workflows/bench.yml @@ -45,7 +45,7 @@ jobs: uses: actions/cache/restore@v4 with: path: ./target/criterion - key: criterion-${{ runner.name }}-${{ hashFiles('./target/criterion/**.json') }} + key: criterion-${{ runner.name }}-${{ github.sha }} restore-keys: criterion-${{ runner.name }}- # Disable turboboost, hyperthreading and use performance governor. @@ -121,9 +121,14 @@ jobs: { echo "### Benchmark results" echo - echo "Performance differences relative to $(cat target/criterion/baseline-sha.txt)." - echo } > results.md + SHA=$(cat target/criterion/baseline-sha.txt) + if [ -n "$SHA" ]; then + { + echo "Performance differences relative to $SHA." + echo + } >> results.md + fi grep -Ev 'ignored|running \d+ tests|%\)' results.txt |\ sed -E -e 's/(Performance has regressed.)/:broken_heart: **\1**/gi' \ -e 's/(Performance has improved.)/:green_heart: **\1**/gi' \ @@ -136,12 +141,18 @@ jobs: if: github.ref == 'refs/heads/main' run: echo "${{ github.sha }}" > target/criterion/baseline-sha.txt + - name: Store history + if: github.ref == 'refs/heads/main' + run: | + mkdir -p target/criterion-history + cp -r target/criterion "target/criterion-history/$(date +%s)-${{ github.sha }}" + - name: Cache main-branch results if: github.ref == 'refs/heads/main' uses: actions/cache/save@v4 with: path: ./target/criterion - key: criterion-${{ runner.name }}-${{ hashFiles('./target/criterion/**.json') }} + key: criterion-${{ runner.name }}-${{ github.sha }} - name: Export perf data id: export From a93859af704cce5d8bcf84a26145a7435bb9e022 Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Fri, 15 Mar 2024 07:09:24 +1000 Subject: [PATCH 250/321] Don't fail CI if mutants fails, and show results --- .github/workflows/mutants.yml | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/.github/workflows/mutants.yml b/.github/workflows/mutants.yml index 1f31d99132..bd9f844edc 100644 --- a/.github/workflows/mutants.yml +++ b/.github/workflows/mutants.yml @@ -35,11 +35,21 @@ jobs: version: stable - name: Mutants - run: cargo mutants --test-tool=nextest --no-shuffle -j 2 -vV --in-diff pr.diff + run: | + # Don't fail the build if mutants fail, for now. + set -o pipefail + cargo mutants --test-tool=nextest --no-shuffle -j 2 -vV --in-diff pr.diff | tee results.txt || true + { + echo "### Mutants" + echo "See https://mutants.rs/using-results.html for more information." + echo "```" + cat results.txt + echo "```" + } > "$GITHUB_STEP_SUMMARY" - name: Archive mutants.out uses: actions/upload-artifact@v4 if: always() with: - name: mutants-incremental.out + name: mutants.out path: mutants.out From f40832151639ddbaa6319bf433ad0d173637ffa7 Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Fri, 15 Mar 2024 07:12:10 +1000 Subject: [PATCH 251/321] Fix backticks --- .github/workflows/mutants.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/mutants.yml b/.github/workflows/mutants.yml index bd9f844edc..7903f3f5e4 100644 --- a/.github/workflows/mutants.yml +++ b/.github/workflows/mutants.yml @@ -42,9 +42,9 @@ jobs: { echo "### Mutants" echo "See https://mutants.rs/using-results.html for more information." - echo "```" + echo '```' cat results.txt - echo "```" + echo '```' } > "$GITHUB_STEP_SUMMARY" - name: Archive mutants.out From cffa720e5fdb043019a3343f1faca772c3f7a275 Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Fri, 15 Mar 2024 05:19:55 +0200 Subject: [PATCH 252/321] ci: Do full mutants run twice a week (#1745) And when dispatched manually. --- .github/workflows/mutants.yml | 26 ++++++++++++++++++++------ 1 file changed, 20 insertions(+), 6 deletions(-) diff --git a/.github/workflows/mutants.yml b/.github/workflows/mutants.yml index 7903f3f5e4..a32068ce17 100644 --- a/.github/workflows/mutants.yml +++ b/.github/workflows/mutants.yml @@ -1,8 +1,12 @@ name: Find mutants on: + schedule: + - cron: '42 3 * * 2,5' # Runs at 03:42 UTC (m and h chosen arbitrarily) twice a week. + workflow_dispatch: pull_request: branches: ["main"] paths-ignore: ["*.md", "*.png", "*.svg", "LICENSE-*"] + merge_group: concurrency: group: ${{ github.workflow }}-${{ github.ref_name }} @@ -16,9 +20,6 @@ jobs: with: fetch-depth: 0 - - name: Relative diff - run: git diff origin/${{ github.base_ref }}.. > pr.diff - - name: Install dependencies env: DEBIAN_FRONTEND: noninteractive @@ -34,13 +35,26 @@ jobs: with: version: stable - - name: Mutants + - name: Find incremental mutants + if: github.event_name == 'pull_request' || github.event_name == 'merge_group' run: | - # Don't fail the build if mutants fail, for now. + git diff origin/${{ github.base_ref }}.. > pr.diff set -o pipefail cargo mutants --test-tool=nextest --no-shuffle -j 2 -vV --in-diff pr.diff | tee results.txt || true + echo 'TITLE="Incremental Mutants"' >> "$GITHUB_ENV" + + - name: Find mutants + if: github.event_name == 'schedule' || github.event_name == 'workflow_dispatch' + run: | + set -o pipefail + cargo mutants --test-tool=nextest -vV --in-place | tee results.txt || true + echo 'TITLE="All Mutants"' >> "$GITHUB_ENV" + + - name: Post step summary + if: always() + run: | { - echo "### Mutants" + echo "### $TITLE" echo "See https://mutants.rs/using-results.html for more information." echo '```' cat results.txt From f2733e4e89dc20294054d4840d51ed9cd9c8889e Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Fri, 15 Mar 2024 05:23:12 +0200 Subject: [PATCH 253/321] chore: Add some things, remove some things (#1746) --- .gitignore | 13 +++++-------- 1 file changed, 5 insertions(+), 8 deletions(-) diff --git a/.gitignore b/.gitignore index 41867fa4e8..542ed5205d 100644 --- a/.gitignore +++ b/.gitignore @@ -1,10 +1,7 @@ -/target/ -**/*.rs.bk -Cargo.lock -/db +.DS_Store +*.qlog *~ /.vscode/ -.idea -*.qlog -*.swp -/qns/.last-update-* +/lcov.info +/target/ +Cargo.lock From 177385e02466115755a815edb334bbe5fee144a4 Mon Sep 17 00:00:00 2001 From: Max Inden Date: Fri, 15 Mar 2024 13:21:32 +0100 Subject: [PATCH 254/321] refactor(udp): move udp mod from neqo-common to neqo-bin (#1736) The `udp` module is only used in the `neqo_bin` crate, more specifically by the Neqo client and server implementation. This commit moves the `udp` module to `neqo-bin`. Co-authored-by: Lars Eggert --- neqo-bin/Cargo.toml | 3 ++- neqo-bin/src/bin/client/main.rs | 3 ++- neqo-bin/src/bin/server/main.rs | 3 ++- neqo-bin/src/lib.rs | 2 ++ {neqo-common => neqo-bin}/src/udp.rs | 8 ++++---- neqo-common/Cargo.toml | 3 --- neqo-common/src/datagram.rs | 12 ++++++------ neqo-common/src/lib.rs | 2 -- 8 files changed, 18 insertions(+), 18 deletions(-) rename {neqo-common => neqo-bin}/src/udp.rs (98%) diff --git a/neqo-bin/Cargo.toml b/neqo-bin/Cargo.toml index 2beafa7e42..d36d2ecdca 100644 --- a/neqo-bin/Cargo.toml +++ b/neqo-bin/Cargo.toml @@ -28,12 +28,13 @@ clap = { version = "4.4", default-features = false, features = ["std", "color", futures = { version = "0.3", default-features = false, features = ["alloc"] } hex = { version = "0.4", default-features = false, features = ["std"] } log = { version = "0.4", default-features = false } -neqo-common = { path = "./../neqo-common", features = ["udp"] } +neqo-common = { path = "./../neqo-common" } neqo-crypto = { path = "./../neqo-crypto" } neqo-http3 = { path = "./../neqo-http3" } neqo-qpack = { path = "./../neqo-qpack" } neqo-transport = { path = "./../neqo-transport" } qlog = { version = "0.12", default-features = false } +quinn-udp = { git = "https://github.com/quinn-rs/quinn/", rev = "a947962131aba8a6521253d03cc948b20098a2d6" } regex = { version = "1.9", default-features = false, features = ["unicode-perl"] } tokio = { version = "1", default-features = false, features = ["net", "time", "macros", "rt", "rt-multi-thread"] } url = { version = "2.5", default-features = false } diff --git a/neqo-bin/src/bin/client/main.rs b/neqo-bin/src/bin/client/main.rs index 4710f8b222..d472dfb2bc 100644 --- a/neqo-bin/src/bin/client/main.rs +++ b/neqo-bin/src/bin/client/main.rs @@ -21,7 +21,8 @@ use futures::{ future::{select, Either}, FutureExt, TryFutureExt, }; -use neqo_common::{self as common, qdebug, qinfo, qlog::NeqoQlog, udp, Datagram, Role}; +use neqo_bin::udp; +use neqo_common::{self as common, qdebug, qinfo, qlog::NeqoQlog, Datagram, Role}; use neqo_crypto::{ constants::{TLS_AES_128_GCM_SHA256, TLS_AES_256_GCM_SHA384, TLS_CHACHA20_POLY1305_SHA256}, init, Cipher, ResumptionToken, diff --git a/neqo-bin/src/bin/server/main.rs b/neqo-bin/src/bin/server/main.rs index da8de3831c..f694cf98c1 100644 --- a/neqo-bin/src/bin/server/main.rs +++ b/neqo-bin/src/bin/server/main.rs @@ -24,7 +24,8 @@ use futures::{ future::{select, select_all, Either}, FutureExt, }; -use neqo_common::{hex, qinfo, qwarn, udp, Datagram, Header}; +use neqo_bin::udp; +use neqo_common::{hex, qinfo, qwarn, Datagram, Header}; use neqo_crypto::{ constants::{TLS_AES_128_GCM_SHA256, TLS_AES_256_GCM_SHA384, TLS_CHACHA20_POLY1305_SHA256}, generate_ech_keys, init_db, random, AntiReplay, Cipher, diff --git a/neqo-bin/src/lib.rs b/neqo-bin/src/lib.rs index 4fe47d5cbf..8a7ff69b69 100644 --- a/neqo-bin/src/lib.rs +++ b/neqo-bin/src/lib.rs @@ -17,6 +17,8 @@ use neqo_transport::{ Version, }; +pub mod udp; + #[derive(Debug, Parser)] pub struct SharedArgs { #[arg(short = 'a', long, default_value = "h3")] diff --git a/neqo-common/src/udp.rs b/neqo-bin/src/udp.rs similarity index 98% rename from neqo-common/src/udp.rs rename to neqo-bin/src/udp.rs index c27b0632ff..632a1293d7 100644 --- a/neqo-common/src/udp.rs +++ b/neqo-bin/src/udp.rs @@ -13,11 +13,10 @@ use std::{ slice, }; +use neqo_common::{Datagram, IpTos}; use quinn_udp::{EcnCodepoint, RecvMeta, Transmit, UdpSocketState}; use tokio::io::Interest; -use crate::{Datagram, IpTos}; - /// Socket receive buffer size. /// /// Allows reading multiple datagrams in a single [`Socket::recv`] call. @@ -61,7 +60,7 @@ impl Socket { let transmit = Transmit { destination: d.destination(), ecn: EcnCodepoint::from_bits(Into::::into(d.tos())), - contents: d.into_data().into(), + contents: Vec::from(d).into(), segment_size: None, src_ip: None, }; @@ -129,8 +128,9 @@ impl Socket { #[cfg(test)] mod tests { + use neqo_common::{IpTosDscp, IpTosEcn}; + use super::*; - use crate::{IpTosDscp, IpTosEcn}; #[tokio::test] async fn datagram_tos() -> Result<(), io::Error> { diff --git a/neqo-common/Cargo.toml b/neqo-common/Cargo.toml index dae8362bfd..89eaa53890 100644 --- a/neqo-common/Cargo.toml +++ b/neqo-common/Cargo.toml @@ -18,16 +18,13 @@ enum-map = { version = "2.7", default-features = false } env_logger = { version = "0.10", default-features = false } log = { version = "0.4", default-features = false } qlog = { version = "0.12", default-features = false } -quinn-udp = { git = "https://github.com/quinn-rs/quinn/", rev = "a947962131aba8a6521253d03cc948b20098a2d6", optional = true } time = { version = "0.3", default-features = false, features = ["formatting"] } -tokio = { version = "1", default-features = false, features = ["net", "time", "macros", "rt", "rt-multi-thread"], optional = true } [dev-dependencies] test-fixture = { path = "../test-fixture" } [features] ci = [] -udp = ["dep:quinn-udp", "dep:tokio"] [target."cfg(windows)".dependencies.winapi] version = "0.3" diff --git a/neqo-common/src/datagram.rs b/neqo-common/src/datagram.rs index 04ba1a45a1..9cebb64ea5 100644 --- a/neqo-common/src/datagram.rs +++ b/neqo-common/src/datagram.rs @@ -53,12 +53,6 @@ impl Datagram { pub fn ttl(&self) -> Option { self.ttl } - - #[cfg(feature = "udp")] - #[must_use] - pub(crate) fn into_data(self) -> Vec { - self.d - } } impl Deref for Datagram { @@ -83,6 +77,12 @@ impl std::fmt::Debug for Datagram { } } +impl From for Vec { + fn from(datagram: Datagram) -> Self { + datagram.d + } +} + #[cfg(test)] use test_fixture::datagram; diff --git a/neqo-common/src/lib.rs b/neqo-common/src/lib.rs index fe88097983..e988c6071d 100644 --- a/neqo-common/src/lib.rs +++ b/neqo-common/src/lib.rs @@ -16,8 +16,6 @@ pub mod log; pub mod qlog; pub mod timer; pub mod tos; -#[cfg(feature = "udp")] -pub mod udp; use std::fmt::Write; From d2c7cbb49319052e48ea4f28f76ac1a06df3746c Mon Sep 17 00:00:00 2001 From: Max Inden Date: Fri, 15 Mar 2024 22:19:40 +0100 Subject: [PATCH 255/321] refactor(udp): remove useless Socket::send return value (#1749) `Socket::send` returns the number of `Datagram`s sent. Given that it only accepts a single `Datagram` to be send, this return value is at best useless and in the worst case confusing. --- neqo-bin/src/udp.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/neqo-bin/src/udp.rs b/neqo-bin/src/udp.rs index 632a1293d7..f4ede0b5c2 100644 --- a/neqo-bin/src/udp.rs +++ b/neqo-bin/src/udp.rs @@ -56,7 +56,7 @@ impl Socket { } /// Send the UDP datagram on the specified socket. - pub fn send(&self, d: Datagram) -> io::Result { + pub fn send(&self, d: Datagram) -> io::Result<()> { let transmit = Transmit { destination: d.destination(), ecn: EcnCodepoint::from_bits(Into::::into(d.tos())), @@ -72,7 +72,7 @@ impl Socket { assert_eq!(n, 1, "only passed one slice"); - Ok(n) + Ok(()) } /// Receive a UDP datagram on the specified socket. From aed423eafc5b8cb8382c694e73a64fb8722611ac Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Sat, 16 Mar 2024 07:36:30 +1000 Subject: [PATCH 256/321] ci: Don't trigger on `merge_group` --- .github/workflows/mutants.yml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/.github/workflows/mutants.yml b/.github/workflows/mutants.yml index a32068ce17..90001bd8b7 100644 --- a/.github/workflows/mutants.yml +++ b/.github/workflows/mutants.yml @@ -6,7 +6,6 @@ on: pull_request: branches: ["main"] paths-ignore: ["*.md", "*.png", "*.svg", "LICENSE-*"] - merge_group: concurrency: group: ${{ github.workflow }}-${{ github.ref_name }} @@ -36,7 +35,7 @@ jobs: version: stable - name: Find incremental mutants - if: github.event_name == 'pull_request' || github.event_name == 'merge_group' + if: github.event_name == 'pull_request' run: | git diff origin/${{ github.base_ref }}.. > pr.diff set -o pipefail From fe2241eaf1215a1537c4b66d6eea12f94121eef6 Mon Sep 17 00:00:00 2001 From: Max Inden Date: Fri, 15 Mar 2024 22:23:37 +0100 Subject: [PATCH 257/321] refactor(transport/tests): remove unused IdEntry (#1748) --- neqo-transport/src/connection/tests/stream.rs | 6 ------ 1 file changed, 6 deletions(-) diff --git a/neqo-transport/src/connection/tests/stream.rs b/neqo-transport/src/connection/tests/stream.rs index f469866d50..66d3bf32f3 100644 --- a/neqo-transport/src/connection/tests/stream.rs +++ b/neqo-transport/src/connection/tests/stream.rs @@ -116,12 +116,6 @@ fn transfer() { assert!(fin3); } -#[derive(PartialEq, Eq, PartialOrd, Ord)] -struct IdEntry { - sendorder: StreamOrder, - stream_id: StreamId, -} - // tests stream sendorder priorization fn sendorder_test(order_of_sendorder: &[Option]) { let mut client = default_client(); From a68dd1c2a1aead9117b808dc22329ca928d86cd8 Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Sat, 16 Mar 2024 05:57:12 +0200 Subject: [PATCH 258/321] test: Make `criterion` calculate transfer benchmark throughputs (#1738) * test: Make `criterion` calculate transfer benchmark throughputs Like: ``` transfer/Run multiple transfers with varying seeds time: [213.61 ms 224.95 ms 235.29 ms] thrpt: [17.000 MiB/s 17.782 MiB/s 18.726 MiB/s] change: time: [-17.204% -6.9690% +1.8324%] (p = 0.21 > 0.05) thrpt: [-1.7994% +7.4910% +20.778%] No change in performance detected. Found 5 outliers among 100 measurements (5.00%) 5 (5.00%) low mild transfer/Run multiple transfers with the same seed time: [175.41 ms 188.40 ms 201.32 ms] thrpt: [19.869 MiB/s 21.231 MiB/s 22.803 MiB/s] change: time: [-5.9393% +5.5417% +18.399%] (p = 0.35 > 0.05) thrpt: [-15.540% -5.2507% +6.3144%] No change in performance detected. Found 14 outliers among 100 measurements (14.00%) 8 (8.00%) low mild 5 (5.00%) high mild 1 (1.00%) high severe ``` * This doesn't actually run `cargo bench`, it runs `cargo flamegraph` --- .github/workflows/bench.yml | 7 ------- neqo-transport/benches/transfer.rs | 7 +++++-- 2 files changed, 5 insertions(+), 9 deletions(-) diff --git a/.github/workflows/bench.yml b/.github/workflows/bench.yml index 72b835f843..81ef297a9e 100644 --- a/.github/workflows/bench.yml +++ b/.github/workflows/bench.yml @@ -62,16 +62,9 @@ jobs: # Work around https://github.com/flamegraph-rs/flamegraph/issues/248 by passing explicit perf arguments. - name: Profile cargo bench transfer run: | - # This re-runs part of the previous step, and would hence overwrite part of the criterion results. - # Avoid that by shuffling the directories around so this run uses its own results directory. - mv target/criterion target/criterion-bench - mv target/criterion-transfer-profile target/criterion || true taskset -c 0 nice -n -20 \ cargo "+$TOOLCHAIN" flamegraph -v -c "$PERF_CMD" --features bench --bench transfer -- \ --bench --exact "Run multiple transfers with varying seeds" --noplot - # And now restore the directories. - mv target/criterion target/criterion-transfer-profile - mv target/criterion-bench target/criterion - name: Profile client/server transfer run: | diff --git a/neqo-transport/benches/transfer.rs b/neqo-transport/benches/transfer.rs index 444f738f9c..b13075a4ff 100644 --- a/neqo-transport/benches/transfer.rs +++ b/neqo-transport/benches/transfer.rs @@ -6,7 +6,7 @@ use std::time::Duration; -use criterion::{criterion_group, criterion_main, BatchSize::SmallInput, Criterion}; +use criterion::{criterion_group, criterion_main, BatchSize::SmallInput, Criterion, Throughput}; use test_fixture::{ boxed, sim::{ @@ -21,7 +21,9 @@ const JITTER: Duration = Duration::from_millis(10); const TRANSFER_AMOUNT: usize = 1 << 22; // 4Mbyte fn benchmark_transfer(c: &mut Criterion, label: &str, seed: Option>) { - c.bench_function(label, |b| { + let mut group = c.benchmark_group("transfer"); + group.throughput(Throughput::Bytes(u64::try_from(TRANSFER_AMOUNT).unwrap())); + group.bench_function(label, |b| { b.iter_batched( || { let nodes = boxed![ @@ -44,6 +46,7 @@ fn benchmark_transfer(c: &mut Criterion, label: &str, seed: Option Date: Sat, 16 Mar 2024 14:17:48 +1000 Subject: [PATCH 259/321] Strip ANSI escapes from Markdown output --- .github/workflows/mutants.yml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/mutants.yml b/.github/workflows/mutants.yml index 90001bd8b7..cd28cfe604 100644 --- a/.github/workflows/mutants.yml +++ b/.github/workflows/mutants.yml @@ -12,7 +12,7 @@ concurrency: cancel-in-progress: true jobs: - incremental-mutants: + mutants: runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 @@ -40,14 +40,14 @@ jobs: git diff origin/${{ github.base_ref }}.. > pr.diff set -o pipefail cargo mutants --test-tool=nextest --no-shuffle -j 2 -vV --in-diff pr.diff | tee results.txt || true - echo 'TITLE="Incremental Mutants"' >> "$GITHUB_ENV" + echo 'TITLE=Incremental Mutants' >> "$GITHUB_ENV" - name: Find mutants if: github.event_name == 'schedule' || github.event_name == 'workflow_dispatch' run: | set -o pipefail cargo mutants --test-tool=nextest -vV --in-place | tee results.txt || true - echo 'TITLE="All Mutants"' >> "$GITHUB_ENV" + echo 'TITLE=All Mutants' >> "$GITHUB_ENV" - name: Post step summary if: always() @@ -56,7 +56,7 @@ jobs: echo "### $TITLE" echo "See https://mutants.rs/using-results.html for more information." echo '```' - cat results.txt + cat results.txt | sed 's/[^[:print:]]//g' echo '```' } > "$GITHUB_STEP_SUMMARY" From 83e673335875d426faed91fc6e095ef521565f1d Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Sat, 16 Mar 2024 14:19:01 +1000 Subject: [PATCH 260/321] Fix `actionlint` issue --- .github/workflows/mutants.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/mutants.yml b/.github/workflows/mutants.yml index cd28cfe604..e11eb96f21 100644 --- a/.github/workflows/mutants.yml +++ b/.github/workflows/mutants.yml @@ -56,7 +56,7 @@ jobs: echo "### $TITLE" echo "See https://mutants.rs/using-results.html for more information." echo '```' - cat results.txt | sed 's/[^[:print:]]//g' + sed 's/[^[:print:]]//g' results.txt echo '```' } > "$GITHUB_STEP_SUMMARY" From 9ff375129b9a84b9589a5dbecc87117e0c92337b Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Sat, 16 Mar 2024 06:44:49 +0200 Subject: [PATCH 261/321] build: Enable "fat" LTO for release builds (#1751) Because that's what Firefox does: https://searchfox.org/mozilla-central/source/config/makefiles/rust.mk#95 --- Cargo.toml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/Cargo.toml b/Cargo.toml index 35916da5b1..e5bec00796 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -25,6 +25,9 @@ rust-version = "1.74.0" [workspace.lints.clippy] pedantic = { level = "warn", priority = -1 } +[profile.release] +lto = "fat" + [profile.bench] # Inherits from the "release" profile, so just provide overrides here: # https://doc.rust-lang.org/cargo/reference/profiles.html#release From ec4ec8e3019a55f6cf1bb11709a76b40205c5b0e Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Sat, 16 Mar 2024 21:35:06 +1000 Subject: [PATCH 262/321] Properly strip ANSI sequences from Markdown --- .github/workflows/mutants.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/mutants.yml b/.github/workflows/mutants.yml index e11eb96f21..4db2e4b925 100644 --- a/.github/workflows/mutants.yml +++ b/.github/workflows/mutants.yml @@ -56,7 +56,7 @@ jobs: echo "### $TITLE" echo "See https://mutants.rs/using-results.html for more information." echo '```' - sed 's/[^[:print:]]//g' results.txt + sed 's/\x1b\[[0-9;]*[mGKHF]//g' results.txt echo '```' } > "$GITHUB_STEP_SUMMARY" From ef5caeb46b510f89cd6f8668a3bf31a2d597cbd2 Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Sun, 17 Mar 2024 02:56:20 +0200 Subject: [PATCH 263/321] ci: Ignore coverage in `neqo-bin` (#1754) Should have been part of #1724 --- .codecov.yml | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/.codecov.yml b/.codecov.yml index 12facb68da..3ecf204940 100644 --- a/.codecov.yml +++ b/.codecov.yml @@ -1,8 +1,6 @@ -# neqo has no test coverage for its example client, server and interop test +# neqo has no test coverage for its example client and server ignore: - - "neqo-client" - - "neqo-interop" - - "neqo-server" + - "neqo-bin" # Do not notify until at least three results have been uploaded from the CI pipeline. # (This corresponds to the three main platforms we support: Linux, macOS, and Windows.) From 29fdbc0f6990760e71dd13bd33c0ee81c4c81040 Mon Sep 17 00:00:00 2001 From: Max Inden Date: Sun, 17 Mar 2024 21:47:38 +0100 Subject: [PATCH 264/321] feat(bin/client): use BufWriter when writing to file (#1756) * feat: Use `std::io::BufWriter` with a 64KB buffer for dumping data Let's see if this makes things faster. * Flush `BufWriter` on FIN * Update neqo-bin/src/bin/client/http3.rs Co-authored-by: Max Inden Signed-off-by: Lars Eggert * Fix * Also for h09 * Move BufWriter construction into get_output_file --------- Signed-off-by: Lars Eggert Co-authored-by: Lars Eggert --- neqo-bin/src/bin/client/http09.rs | 13 +++++++------ neqo-bin/src/bin/client/http3.rs | 12 ++++++++---- neqo-bin/src/bin/client/main.rs | 8 +++++--- 3 files changed, 20 insertions(+), 13 deletions(-) diff --git a/neqo-bin/src/bin/client/http09.rs b/neqo-bin/src/bin/client/http09.rs index a7dc2c21c7..6d9a26fec2 100644 --- a/neqo-bin/src/bin/client/http09.rs +++ b/neqo-bin/src/bin/client/http09.rs @@ -10,7 +10,7 @@ use std::{ cell::RefCell, collections::{HashMap, VecDeque}, fs::File, - io::Write, + io::{BufWriter, Write}, net::SocketAddr, path::PathBuf, rc::Rc, @@ -29,7 +29,7 @@ use super::{get_output_file, Args, KeyUpdateState, Res}; use crate::qlog_new; pub struct Handler<'a> { - streams: HashMap>, + streams: HashMap>>, url_queue: VecDeque, all_paths: Vec, args: &'a Args, @@ -219,7 +219,7 @@ impl<'b> Handler<'b> { client: &mut Connection, stream_id: StreamId, output_read_data: bool, - maybe_out_file: &mut Option, + maybe_out_file: &mut Option>, ) -> Res { let mut data = vec![0; 4096]; loop { @@ -246,8 +246,7 @@ impl<'b> Handler<'b> { } fn read(&mut self, client: &mut Connection, stream_id: StreamId) -> Res<()> { - let mut maybe_maybe_out_file = self.streams.get_mut(&stream_id); - match &mut maybe_maybe_out_file { + match self.streams.get_mut(&stream_id) { None => { println!("Data on unexpected stream: {stream_id}"); return Ok(()); @@ -261,7 +260,9 @@ impl<'b> Handler<'b> { )?; if fin_recvd { - if maybe_out_file.is_none() { + if let Some(mut out_file) = maybe_out_file.take() { + out_file.flush()?; + } else { println!(""); } self.streams.remove(&stream_id); diff --git a/neqo-bin/src/bin/client/http3.rs b/neqo-bin/src/bin/client/http3.rs index 754de9cb16..07cc0e4cde 100644 --- a/neqo-bin/src/bin/client/http3.rs +++ b/neqo-bin/src/bin/client/http3.rs @@ -11,7 +11,7 @@ use std::{ collections::{HashMap, VecDeque}, fmt::Display, fs::File, - io::Write, + io::{BufWriter, Write}, net::SocketAddr, path::PathBuf, rc::Rc, @@ -269,7 +269,7 @@ impl StreamHandlerType { } struct DownloadStreamHandler { - out_file: Option, + out_file: Option>, } impl StreamHandler for DownloadStreamHandler { @@ -300,8 +300,12 @@ impl StreamHandler for DownloadStreamHandler { println!("READ[{}]: 0x{}", stream_id, hex(&data)); } - if fin && self.out_file.is_none() { - println!(""); + if fin { + if let Some(mut out_file) = self.out_file.take() { + out_file.flush()?; + } else { + println!(""); + } } Ok(true) diff --git a/neqo-bin/src/bin/client/main.rs b/neqo-bin/src/bin/client/main.rs index d472dfb2bc..7b1a5928a6 100644 --- a/neqo-bin/src/bin/client/main.rs +++ b/neqo-bin/src/bin/client/main.rs @@ -8,7 +8,7 @@ use std::{ collections::{HashMap, VecDeque}, fmt::{self, Display}, fs::{create_dir_all, File, OpenOptions}, - io, + io::{self, BufWriter}, net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr, ToSocketAddrs}, path::PathBuf, pin::Pin, @@ -36,6 +36,8 @@ use url::{Origin, Url}; mod http09; mod http3; +const BUFWRITER_BUFFER_SIZE: usize = 64 * 1024; + #[derive(Debug)] pub enum ClientError { ArgumentError(&'static str), @@ -252,7 +254,7 @@ fn get_output_file( url: &Url, output_dir: &Option, all_paths: &mut Vec, -) -> Option { +) -> Option> { if let Some(ref dir) = output_dir { let mut out_path = dir.clone(); @@ -284,7 +286,7 @@ fn get_output_file( .ok()?; all_paths.push(out_path); - Some(f) + Some(BufWriter::with_capacity(BUFWRITER_BUFFER_SIZE, f)) } else { None } From 32ef2c3cb2f0f13c6b46222d98c3316a69d7b411 Mon Sep 17 00:00:00 2001 From: Max Inden Date: Sun, 17 Mar 2024 21:48:18 +0100 Subject: [PATCH 265/321] fix(bin): enable pacing on client & server by default (#1753) * fix(bin): enable pacing on server by default Before https://github.com/mozilla/neqo/pull/1724 `neqo-server` would always run with `ConnectionParameters::pacing` `true`. `neqo-client` would provide a command line flag `--pacing` which defaulted to `false`. https://github.com/mozilla/neqo/pull/1724 consolidated the `QuicParameters` `struct` from `neqo-client` and `neqo-server`. Since both `neqo-client` and `neqo-server` default to `pacing` `false`. This commit restores the pre-#1724 behavior, i.e. `neqo-server` defaulting to `pacing=true` and `neqo-client` defaulting to `pacing=false`. (Note that you will have to provide a value to `--pacing` from now on. I.e. `--pacing=true` or `--pacing=false` instead of just `--pacing`.) * fix(bin): enable pacing by default and add optional --no-pacing Before https://github.com/mozilla/neqo/pull/1724 `neqo-server` would always run with `ConnectionParameters::pacing` `true`. `neqo-client` would provide a command line flag `--pacing` which defaulted to `false`. https://github.com/mozilla/neqo/pull/1724 consolidated the `QuicParameters` `struct` from `neqo-client` and `neqo-server`. Since both `neqo-client` and `neqo-server` default to `pacing` `false`. This commit replaces the `--pacing` flag with `--no-pacing`. Pacing is enabled by default on both `neqo-server` and `neqo-client`. * Update test/upload_test.sh * Update comment --- neqo-bin/src/lib.rs | 8 ++++---- test/upload_test.sh | 7 ++++--- 2 files changed, 8 insertions(+), 7 deletions(-) diff --git a/neqo-bin/src/lib.rs b/neqo-bin/src/lib.rs index 8a7ff69b69..b7bc158245 100644 --- a/neqo-bin/src/lib.rs +++ b/neqo-bin/src/lib.rs @@ -89,9 +89,9 @@ pub struct QuicParameters { /// The congestion controller to use. pub congestion_control: CongestionControlAlgorithm, - #[arg(long = "pacing")] - /// Whether pacing is enabled. - pub pacing: bool, + #[arg(long = "no-pacing")] + /// Whether to disable pacing. + pub no_pacing: bool, #[arg(name = "preferred-address-v4", long)] /// An IPv4 address for the server preferred address. @@ -162,7 +162,7 @@ impl QuicParameters { .max_streams(StreamType::UniDi, self.max_streams_uni) .idle_timeout(Duration::from_secs(self.idle_timeout)) .cc_algorithm(self.congestion_control) - .pacing(self.pacing); + .pacing(!self.no_pacing); if let Some(&first) = self.quic_version.first() { let all = if self.quic_version[1..].contains(&first) { diff --git a/test/upload_test.sh b/test/upload_test.sh index 40c3aaaeb9..685a6a926c 100755 --- a/test/upload_test.sh +++ b/test/upload_test.sh @@ -9,9 +9,10 @@ cc=cubic client="cargo run --release --bin neqo-client -- http://$server_address:$server_port/ --test upload --upload-size $upload_size --cc $cc" server="cargo run --release --bin neqo-server -- --db ../test-fixture/db $server_address:$server_port" server_pid=0 -pacing=true -if [ "$pacing" = true ]; then - client="$client --pacing" +no_pacing=false +if [ "$no_pacing" = true ]; then + client="$client --no-pacing" + server="$server --no-pacing" fi # Define two indexed arrays to store network conditions From 7028479a4bc0e652f97295c5c57bfafd42c7dd92 Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Tue, 19 Mar 2024 16:17:50 +1000 Subject: [PATCH 266/321] Revert "Merge branch 'sent-packets-vec' of github.com:martinthomson/neqo" This reverts commit 8e8972c13bd3d511b5d2c7e1de8478fcc54d1478, reversing changes made to 32ef2c3cb2f0f13c6b46222d98c3316a69d7b411. --- neqo-transport/src/cc/classic_cc.rs | 60 +-- neqo-transport/src/cc/mod.rs | 2 +- neqo-transport/src/cc/tests/cubic.rs | 2 +- neqo-transport/src/cc/tests/new_reno.rs | 6 +- neqo-transport/src/connection/mod.rs | 14 +- neqo-transport/src/path.rs | 8 +- neqo-transport/src/qlog.rs | 11 +- .../src/{recovery/mod.rs => recovery.rs} | 207 ++++++--- neqo-transport/src/recovery/sent.rs | 413 ------------------ neqo-transport/src/recovery/token.rs | 63 --- neqo-transport/src/sender.rs | 4 +- neqo-transport/src/tracking.rs | 108 +++++ 12 files changed, 302 insertions(+), 596 deletions(-) rename neqo-transport/src/{recovery/mod.rs => recovery.rs} (91%) delete mode 100644 neqo-transport/src/recovery/sent.rs delete mode 100644 neqo-transport/src/recovery/token.rs diff --git a/neqo-transport/src/cc/classic_cc.rs b/neqo-transport/src/cc/classic_cc.rs index 0e26d39cbd..89be6c4b0f 100644 --- a/neqo-transport/src/cc/classic_cc.rs +++ b/neqo-transport/src/cc/classic_cc.rs @@ -17,9 +17,9 @@ use crate::{ cc::MAX_DATAGRAM_SIZE, packet::PacketNumber, qlog::{self, QlogMetric}, - recovery::SentPacket, rtt::RttEstimate, sender::PACING_BURST_SIZE, + tracking::SentPacket, }; #[rustfmt::skip] // to keep `::` and thus prevent conflict with `crate::qlog` use ::qlog::events::{quic::CongestionStateUpdated, EventData}; @@ -167,8 +167,8 @@ impl CongestionControl for ClassicCongestionControl { qinfo!( "packet_acked this={:p}, pn={}, ps={}, ignored={}, lost={}, rtt_est={:?}", self, - pkt.pn(), - pkt.len(), + pkt.pn, + pkt.size, i32::from(!pkt.cc_outstanding()), i32::from(pkt.lost()), rtt_est, @@ -176,11 +176,11 @@ impl CongestionControl for ClassicCongestionControl { if !pkt.cc_outstanding() { continue; } - if pkt.pn() < self.first_app_limited { + if pkt.pn < self.first_app_limited { is_app_limited = false; } - assert!(self.bytes_in_flight >= pkt.len()); - self.bytes_in_flight -= pkt.len(); + assert!(self.bytes_in_flight >= pkt.size); + self.bytes_in_flight -= pkt.size; if !self.after_recovery_start(pkt) { // Do not increase congestion window for packets sent before @@ -193,7 +193,7 @@ impl CongestionControl for ClassicCongestionControl { qlog::metrics_updated(&mut self.qlog, &[QlogMetric::InRecovery(false)]); } - new_acked += pkt.len(); + new_acked += pkt.size; } if is_app_limited { @@ -268,11 +268,11 @@ impl CongestionControl for ClassicCongestionControl { qinfo!( "packet_lost this={:p}, pn={}, ps={}", self, - pkt.pn(), - pkt.len() + pkt.pn, + pkt.size ); - assert!(self.bytes_in_flight >= pkt.len()); - self.bytes_in_flight -= pkt.len(); + assert!(self.bytes_in_flight >= pkt.size); + self.bytes_in_flight -= pkt.size; } qlog::metrics_updated( &mut self.qlog, @@ -298,13 +298,13 @@ impl CongestionControl for ClassicCongestionControl { fn discard(&mut self, pkt: &SentPacket) { if pkt.cc_outstanding() { - assert!(self.bytes_in_flight >= pkt.len()); - self.bytes_in_flight -= pkt.len(); + assert!(self.bytes_in_flight >= pkt.size); + self.bytes_in_flight -= pkt.size; qlog::metrics_updated( &mut self.qlog, &[QlogMetric::BytesInFlight(self.bytes_in_flight)], ); - qtrace!([self], "Ignore pkt with size {}", pkt.len()); + qtrace!([self], "Ignore pkt with size {}", pkt.size); } } @@ -319,7 +319,7 @@ impl CongestionControl for ClassicCongestionControl { fn on_packet_sent(&mut self, pkt: &SentPacket) { // Record the recovery time and exit any transient state. if self.state.transient() { - self.recovery_start = Some(pkt.pn()); + self.recovery_start = Some(pkt.pn); self.state.update(); } @@ -331,15 +331,15 @@ impl CongestionControl for ClassicCongestionControl { // window. Assume that all in-flight packets up to this one are NOT app-limited. // However, subsequent packets might be app-limited. Set `first_app_limited` to the // next packet number. - self.first_app_limited = pkt.pn() + 1; + self.first_app_limited = pkt.pn + 1; } - self.bytes_in_flight += pkt.len(); + self.bytes_in_flight += pkt.size; qinfo!( "packet_sent this={:p}, pn={}, ps={}", self, - pkt.pn(), - pkt.len() + pkt.pn, + pkt.size ); qlog::metrics_updated( &mut self.qlog, @@ -438,20 +438,20 @@ impl ClassicCongestionControl { let cutoff = max(first_rtt_sample_time, prev_largest_acked_sent); for p in lost_packets .iter() - .skip_while(|p| Some(p.time_sent()) < cutoff) + .skip_while(|p| Some(p.time_sent) < cutoff) { - if p.pn() != last_pn + 1 { + if p.pn != last_pn + 1 { // Not a contiguous range of lost packets, start over. start = None; } - last_pn = p.pn(); + last_pn = p.pn; if !p.cc_in_flight() { // Not interesting, keep looking. continue; } if let Some(t) = start { let elapsed = p - .time_sent() + .time_sent .checked_duration_since(t) .expect("time is monotonic"); if elapsed > pc_period { @@ -466,7 +466,7 @@ impl ClassicCongestionControl { return true; } } else { - start = Some(p.time_sent()); + start = Some(p.time_sent); } } false @@ -480,7 +480,7 @@ impl ClassicCongestionControl { // state and update the variable `self.recovery_start`. Before the // first recovery, all packets were sent after the recovery event, // allowing to reduce the cwnd on congestion events. - !self.state.transient() && self.recovery_start.map_or(true, |pn| packet.pn() >= pn) + !self.state.transient() && self.recovery_start.map_or(true, |pn| packet.pn >= pn) } /// Handle a congestion event. @@ -551,8 +551,8 @@ mod tests { CongestionControl, CongestionControlAlgorithm, CWND_INITIAL_PKTS, MAX_DATAGRAM_SIZE, }, packet::{PacketNumber, PacketType}, - recovery::SentPacket, rtt::RttEstimate, + tracking::SentPacket, }; const PTO: Duration = Duration::from_millis(100); @@ -912,12 +912,12 @@ mod tests { fn persistent_congestion_ack_eliciting() { let mut lost = make_lost(&[1, PERSISTENT_CONG_THRESH + 2]); lost[0] = SentPacket::new( - lost[0].packet_type(), - lost[0].pn(), - lost[0].time_sent(), + lost[0].pt, + lost[0].pn, + lost[0].time_sent, false, Vec::new(), - lost[0].len(), + lost[0].size, ); assert!(!persistent_congestion_by_pto( ClassicCongestionControl::new(NewReno::default()), diff --git a/neqo-transport/src/cc/mod.rs b/neqo-transport/src/cc/mod.rs index 965b3e5558..486d15e67e 100644 --- a/neqo-transport/src/cc/mod.rs +++ b/neqo-transport/src/cc/mod.rs @@ -14,7 +14,7 @@ use std::{ use neqo_common::qlog::NeqoQlog; -use crate::{path::PATH_MTU_V6, recovery::SentPacket, rtt::RttEstimate, Error}; +use crate::{path::PATH_MTU_V6, rtt::RttEstimate, tracking::SentPacket, Error}; mod classic_cc; mod cubic; diff --git a/neqo-transport/src/cc/tests/cubic.rs b/neqo-transport/src/cc/tests/cubic.rs index 1d933790e1..2e0200fd6d 100644 --- a/neqo-transport/src/cc/tests/cubic.rs +++ b/neqo-transport/src/cc/tests/cubic.rs @@ -24,8 +24,8 @@ use crate::{ CongestionControl, MAX_DATAGRAM_SIZE, MAX_DATAGRAM_SIZE_F64, }, packet::PacketType, - recovery::SentPacket, rtt::RttEstimate, + tracking::SentPacket, }; const RTT: Duration = Duration::from_millis(100); diff --git a/neqo-transport/src/cc/tests/new_reno.rs b/neqo-transport/src/cc/tests/new_reno.rs index 863c15c260..4cc20de5a7 100644 --- a/neqo-transport/src/cc/tests/new_reno.rs +++ b/neqo-transport/src/cc/tests/new_reno.rs @@ -16,8 +16,8 @@ use crate::{ MAX_DATAGRAM_SIZE, }, packet::PacketType, - recovery::SentPacket, rtt::RttEstimate, + tracking::SentPacket, }; const PTO: Duration = Duration::from_millis(100); @@ -125,14 +125,14 @@ fn issue_876() { // and ack it. cwnd increases slightly cc.on_packets_acked(&sent_packets[6..], &RTT_ESTIMATE, time_now); - assert_eq!(cc.acked_bytes(), sent_packets[6].len()); + assert_eq!(cc.acked_bytes(), sent_packets[6].size); cwnd_is_halved(&cc); assert_eq!(cc.bytes_in_flight(), 5 * MAX_DATAGRAM_SIZE - 2); // Packet from before is lost. Should not hurt cwnd. cc.on_packets_lost(Some(time_now), None, PTO, &sent_packets[1..2]); assert!(!cc.recovery_packet()); - assert_eq!(cc.acked_bytes(), sent_packets[6].len()); + assert_eq!(cc.acked_bytes(), sent_packets[6].size); cwnd_is_halved(&cc); assert_eq!(cc.bytes_in_flight(), 4 * MAX_DATAGRAM_SIZE); } diff --git a/neqo-transport/src/connection/mod.rs b/neqo-transport/src/connection/mod.rs index 03f05aad06..c81a3727c6 100644 --- a/neqo-transport/src/connection/mod.rs +++ b/neqo-transport/src/connection/mod.rs @@ -44,7 +44,7 @@ use crate::{ path::{Path, PathRef, Paths}, qlog, quic_datagrams::{DatagramTracking, QuicDatagrams}, - recovery::{LossRecovery, RecoveryToken, SendProfile, SentPacket}, + recovery::{LossRecovery, RecoveryToken, SendProfile}, recv_stream::RecvStreamStats, rtt::GRANULARITY, send_stream::SendStream, @@ -55,7 +55,7 @@ use crate::{ self, TransportParameter, TransportParameterId, TransportParameters, TransportParametersHandler, }, - tracking::{AckTracker, PacketNumberSpace}, + tracking::{AckTracker, PacketNumberSpace, SentPacket}, version::{Version, WireVersion}, AppError, ConnectionError, Error, Res, StreamId, }; @@ -2336,7 +2336,7 @@ impl Connection { packets.len(), mtu ); - initial.add_padding(mtu - packets.len()); + initial.size += mtu - packets.len(); packets.resize(mtu, 0); } self.loss_recovery.on_packet_sent(path, initial); @@ -2855,7 +2855,7 @@ impl Connection { /// to retransmit the frame as needed. fn handle_lost_packets(&mut self, lost_packets: &[SentPacket]) { for lost in lost_packets { - for token in lost.tokens() { + for token in &lost.tokens { qdebug!([self], "Lost: {:?}", token); match token { RecoveryToken::Ack(_) => {} @@ -2891,12 +2891,12 @@ impl Connection { fn handle_ack( &mut self, space: PacketNumberSpace, - largest_acknowledged: PacketNumber, + largest_acknowledged: u64, ack_ranges: R, ack_delay: u64, now: Instant, ) where - R: IntoIterator> + Debug, + R: IntoIterator> + Debug, R::IntoIter: ExactSizeIterator, { qinfo!([self], "Rx ACK space={}, ranges={:?}", space, ack_ranges); @@ -2910,7 +2910,7 @@ impl Connection { now, ); for acked in acked_packets { - for token in acked.tokens() { + for token in &acked.tokens { match token { RecoveryToken::Stream(stream_token) => self.streams.acked(stream_token), RecoveryToken::Ack(at) => self.acks.acked(at), diff --git a/neqo-transport/src/path.rs b/neqo-transport/src/path.rs index 59bb871b3d..4e8d9958ab 100644 --- a/neqo-transport/src/path.rs +++ b/neqo-transport/src/path.rs @@ -24,11 +24,11 @@ use crate::{ cid::{ConnectionId, ConnectionIdRef, ConnectionIdStore, RemoteConnectionIdEntry}, frame::{FRAME_TYPE_PATH_CHALLENGE, FRAME_TYPE_PATH_RESPONSE, FRAME_TYPE_RETIRE_CONNECTION_ID}, packet::PacketBuilder, - recovery::{RecoveryToken, SentPacket}, + recovery::RecoveryToken, rtt::RttEstimate, sender::PacketSender, stats::FrameStats, - tracking::PacketNumberSpace, + tracking::{PacketNumberSpace, SentPacket}, Stats, }; @@ -943,12 +943,12 @@ impl Path { qinfo!( [self], "discarding a packet without an RTT estimate; guessing RTT={:?}", - now - sent.time_sent() + now - sent.time_sent ); stats.rtt_init_guess = true; self.rtt.update( &mut self.qlog, - now - sent.time_sent(), + now - sent.time_sent, Duration::new(0, 0), false, now, diff --git a/neqo-transport/src/qlog.rs b/neqo-transport/src/qlog.rs index 7732974136..2572966104 100644 --- a/neqo-transport/src/qlog.rs +++ b/neqo-transport/src/qlog.rs @@ -27,9 +27,9 @@ use crate::{ frame::{CloseError, Frame}, packet::{DecryptedPacket, PacketNumber, PacketType, PublicPacket}, path::PathRef, - recovery::SentPacket, stream_id::StreamType as NeqoStreamType, tparams::{self, TransportParametersHandler}, + tracking::SentPacket, version::{Version, VersionConfig, WireVersion}, }; @@ -259,13 +259,8 @@ pub fn packet_dropped(qlog: &mut NeqoQlog, public_packet: &PublicPacket) { pub fn packets_lost(qlog: &mut NeqoQlog, pkts: &[SentPacket]) { qlog.add_event_with_stream(|stream| { for pkt in pkts { - let header = PacketHeader::with_type( - to_qlog_pkt_type(pkt.packet_type()), - Some(pkt.pn()), - None, - None, - None, - ); + let header = + PacketHeader::with_type(to_qlog_pkt_type(pkt.pt), Some(pkt.pn), None, None, None); let ev_data = EventData::PacketLost(PacketLost { header: Some(header), diff --git a/neqo-transport/src/recovery/mod.rs b/neqo-transport/src/recovery.rs similarity index 91% rename from neqo-transport/src/recovery/mod.rs rename to neqo-transport/src/recovery.rs index 2aec479b5f..dbea3aaf57 100644 --- a/neqo-transport/src/recovery/mod.rs +++ b/neqo-transport/src/recovery.rs @@ -6,29 +6,30 @@ // Tracking of sent packets and detecting their loss. -mod sent; -mod token; - use std::{ cmp::{max, min}, - convert::TryFrom, + collections::BTreeMap, + mem, ops::RangeInclusive, time::{Duration, Instant}, }; use neqo_common::{qdebug, qinfo, qlog::NeqoQlog, qtrace, qwarn}; -pub use sent::SentPacket; -use sent::SentPackets; use smallvec::{smallvec, SmallVec}; -pub use token::{RecoveryToken, StreamRecoveryToken}; use crate::{ + ackrate::AckRate, + cid::ConnectionIdEntry, + crypto::CryptoRecoveryToken, packet::PacketNumber, path::{Path, PathRef}, qlog::{self, QlogMetric}, + quic_datagrams::DatagramTracking, rtt::RttEstimate, + send_stream::SendStreamRecoveryToken, stats::{Stats, StatsCell}, - tracking::{PacketNumberSpace, PacketNumberSpaceSet}, + stream_id::{StreamId, StreamType}, + tracking::{AckToken, PacketNumberSpace, PacketNumberSpaceSet, SentPacket}, }; pub(crate) const PACKET_THRESHOLD: u64 = 3; @@ -47,6 +48,54 @@ pub(crate) const MIN_OUTSTANDING_UNACK: usize = 16; /// The scale we use for the fast PTO feature. pub const FAST_PTO_SCALE: u8 = 100; +#[derive(Debug, Clone)] +#[allow(clippy::module_name_repetitions)] +pub enum StreamRecoveryToken { + Stream(SendStreamRecoveryToken), + ResetStream { + stream_id: StreamId, + }, + StopSending { + stream_id: StreamId, + }, + + MaxData(u64), + DataBlocked(u64), + + MaxStreamData { + stream_id: StreamId, + max_data: u64, + }, + StreamDataBlocked { + stream_id: StreamId, + limit: u64, + }, + + MaxStreams { + stream_type: StreamType, + max_streams: u64, + }, + StreamsBlocked { + stream_type: StreamType, + limit: u64, + }, +} + +#[derive(Debug, Clone)] +#[allow(clippy::module_name_repetitions)] +pub enum RecoveryToken { + Stream(StreamRecoveryToken), + Ack(AckToken), + Crypto(CryptoRecoveryToken), + HandshakeDone, + KeepAlive, // Special PING. + NewToken(usize), + NewConnectionId(ConnectionIdEntry<[u8; 16]>), + RetireConnectionId(u64), + AckFrequency(AckRate), + Datagram(DatagramTracking), +} + /// `SendProfile` tells a sender how to send packets. #[derive(Debug)] pub struct SendProfile { @@ -131,8 +180,7 @@ pub(crate) struct LossRecoverySpace { /// This might be less than the number of ACK-eliciting packets, /// because PTO packets don't count. in_flight_outstanding: usize, - /// The packets that we have sent and are tracking. - sent_packets: SentPackets, + sent_packets: BTreeMap, /// The time that the first out-of-order packet was sent. /// This is `None` if there were no out-of-order packets detected. /// When set to `Some(T)`, time-based loss detection should be enabled. @@ -147,7 +195,7 @@ impl LossRecoverySpace { largest_acked_sent_time: None, last_ack_eliciting: None, in_flight_outstanding: 0, - sent_packets: SentPackets::default(), + sent_packets: BTreeMap::default(), first_ooo_time: None, } } @@ -172,9 +220,9 @@ impl LossRecoverySpace { pub fn pto_packets(&mut self, count: usize) -> impl Iterator { self.sent_packets .iter_mut() - .filter_map(|sent| { + .filter_map(|(pn, sent)| { if sent.pto() { - qtrace!("PTO: marking packet {} lost ", sent.pn()); + qtrace!("PTO: marking packet {} lost ", pn); Some(&*sent) } else { None @@ -207,16 +255,16 @@ impl LossRecoverySpace { pub fn on_packet_sent(&mut self, sent_packet: SentPacket) { if sent_packet.ack_eliciting() { - self.last_ack_eliciting = Some(sent_packet.time_sent()); + self.last_ack_eliciting = Some(sent_packet.time_sent); self.in_flight_outstanding += 1; } else if self.space != PacketNumberSpace::ApplicationData && self.last_ack_eliciting.is_none() { // For Initial and Handshake spaces, make sure that we have a PTO baseline // always. See `LossRecoverySpace::pto_base_time()` for details. - self.last_ack_eliciting = Some(sent_packet.time_sent()); + self.last_ack_eliciting = Some(sent_packet.time_sent); } - self.sent_packets.track(sent_packet); + self.sent_packets.insert(sent_packet.pn, sent_packet); } /// If we are only sending ACK frames, send a PING frame after 2 PTOs so that @@ -246,31 +294,46 @@ impl LossRecoverySpace { } } - /// Remove all newly acknowledged packets. + /// Remove all acknowledged packets. /// Returns all the acknowledged packets, with the largest packet number first. /// ...and a boolean indicating if any of those packets were ack-eliciting. /// This operates more efficiently because it assumes that the input is sorted /// in the order that an ACK frame is (from the top). fn remove_acked(&mut self, acked_ranges: R, stats: &mut Stats) -> (Vec, bool) where - R: IntoIterator>, + R: IntoIterator>, R::IntoIter: ExactSizeIterator, { - let mut eliciting = false; + let acked_ranges = acked_ranges.into_iter(); + let mut keep = Vec::with_capacity(acked_ranges.len()); + let mut acked = Vec::new(); + let mut eliciting = false; for range in acked_ranges { - self.sent_packets.take_range(range, &mut acked); - } - for p in &acked { - self.remove_packet(p); - eliciting |= p.ack_eliciting(); - if p.lost() { - stats.late_ack += 1; - } - if p.pto_fired() { - stats.pto_ack += 1; + let first_keep = *range.end() + 1; + if let Some((&first, _)) = self.sent_packets.range(range).next() { + let mut tail = self.sent_packets.split_off(&first); + if let Some((&next, _)) = tail.range(first_keep..).next() { + keep.push(tail.split_off(&next)); + } + for (_, p) in tail.into_iter().rev() { + self.remove_packet(&p); + eliciting |= p.ack_eliciting(); + if p.lost() { + stats.late_ack += 1; + } + if p.pto_fired() { + stats.pto_ack += 1; + } + acked.push(p); + } } } + + for mut k in keep.into_iter().rev() { + self.sent_packets.append(&mut k); + } + (acked, eliciting) } @@ -279,12 +342,12 @@ impl LossRecoverySpace { /// and when keys are dropped. fn remove_ignored(&mut self) -> impl Iterator { self.in_flight_outstanding = 0; - std::mem::take(&mut self.sent_packets).drain_all() + mem::take(&mut self.sent_packets).into_values() } /// Remove the primary path marking on any packets this is tracking. fn migrate(&mut self) { - for pkt in self.sent_packets.iter_mut() { + for pkt in self.sent_packets.values_mut() { pkt.clear_primary_path(); } } @@ -295,8 +358,23 @@ impl LossRecoverySpace { /// might remove all in-flight packets and stop sending probes. #[allow(clippy::option_if_let_else)] // Hard enough to read as-is. fn remove_old_lost(&mut self, now: Instant, cd: Duration) { - for p in self.sent_packets.remove_expired(now, cd) { - self.remove_packet(&p); + let mut it = self.sent_packets.iter(); + // If the first item is not expired, do nothing. + if it.next().map_or(false, |(_, p)| p.expired(now, cd)) { + // Find the index of the first unexpired packet. + let to_remove = if let Some(first_keep) = + it.find_map(|(i, p)| if p.expired(now, cd) { None } else { Some(*i) }) + { + // Some packets haven't expired, so keep those. + let keep = self.sent_packets.split_off(&first_keep); + mem::replace(&mut self.sent_packets, keep) + } else { + // All packets are expired. + mem::take(&mut self.sent_packets) + }; + for (_, p) in to_remove { + self.remove_packet(&p); + } } } @@ -323,39 +401,44 @@ impl LossRecoverySpace { let largest_acked = self.largest_acked; - for packet in self + // Lost for retrans/CC purposes + let mut lost_pns = SmallVec::<[_; 8]>::new(); + + for (pn, packet) in self .sent_packets .iter_mut() // BTreeMap iterates in order of ascending PN - .take_while(|p| p.pn() < largest_acked.unwrap_or(PacketNumber::MAX)) + .take_while(|(&k, _)| k < largest_acked.unwrap_or(PacketNumber::MAX)) { // Packets sent before now - loss_delay are deemed lost. - if packet.time_sent() + loss_delay <= now { + if packet.time_sent + loss_delay <= now { qtrace!( "lost={}, time sent {:?} is before lost_delay {:?}", - packet.pn(), - packet.time_sent(), + pn, + packet.time_sent, loss_delay ); - } else if largest_acked >= Some(packet.pn() + PACKET_THRESHOLD) { + } else if largest_acked >= Some(*pn + PACKET_THRESHOLD) { qtrace!( "lost={}, is >= {} from largest acked {:?}", - packet.pn(), + pn, PACKET_THRESHOLD, largest_acked ); } else { if largest_acked.is_some() { - self.first_ooo_time = Some(packet.time_sent()); + self.first_ooo_time = Some(packet.time_sent); } // No more packets can be declared lost after this one. break; }; if packet.declare_lost(now) { - lost_packets.push(packet.clone()); + lost_pns.push(*pn); } } + + lost_packets.extend(lost_pns.iter().map(|pn| self.sent_packets[pn].clone())); } } @@ -545,8 +628,8 @@ impl LossRecovery { } pub fn on_packet_sent(&mut self, path: &PathRef, mut sent_packet: SentPacket) { - let pn_space = PacketNumberSpace::from(sent_packet.packet_type()); - qdebug!([self], "packet {}-{} sent", pn_space, sent_packet.pn()); + let pn_space = PacketNumberSpace::from(sent_packet.pt); + qdebug!([self], "packet {}-{} sent", pn_space, sent_packet.pn); if let Some(space) = self.spaces.get_mut(pn_space) { path.borrow_mut().packet_sent(&mut sent_packet); space.on_packet_sent(sent_packet); @@ -555,7 +638,7 @@ impl LossRecovery { [self], "ignoring {}-{} from dropped space", pn_space, - sent_packet.pn() + sent_packet.pn ); } } @@ -586,13 +669,13 @@ impl LossRecovery { &mut self, primary_path: &PathRef, pn_space: PacketNumberSpace, - largest_acked: PacketNumber, + largest_acked: u64, acked_ranges: R, ack_delay: Duration, now: Instant, ) -> (Vec, Vec) where - R: IntoIterator>, + R: IntoIterator>, R::IntoIter: ExactSizeIterator, { qdebug!( @@ -622,11 +705,11 @@ impl LossRecovery { // If the largest acknowledged is newly acked and any newly acked // packet was ack-eliciting, update the RTT. (-recovery 5.1) let largest_acked_pkt = acked_packets.first().expect("must be there"); - space.largest_acked_sent_time = Some(largest_acked_pkt.time_sent()); + space.largest_acked_sent_time = Some(largest_acked_pkt.time_sent); if any_ack_eliciting && largest_acked_pkt.on_primary_path() { self.rtt_sample( primary_path.borrow_mut().rtt_mut(), - largest_acked_pkt.time_sent(), + largest_acked_pkt.time_sent, now, ack_delay, ); @@ -934,7 +1017,6 @@ impl ::std::fmt::Display for LossRecovery { mod tests { use std::{ cell::RefCell, - convert::TryInto, ops::{Deref, DerefMut, RangeInclusive}, rc::Rc, time::{Duration, Instant}, @@ -949,7 +1031,7 @@ mod tests { use crate::{ cc::CongestionControlAlgorithm, cid::{ConnectionId, ConnectionIdEntry}, - packet::{PacketNumber, PacketType}, + packet::PacketType, path::{Path, PathRef}, rtt::RttEstimate, stats::{Stats, StatsCell}, @@ -976,8 +1058,8 @@ mod tests { pub fn on_ack_received( &mut self, pn_space: PacketNumberSpace, - largest_acked: PacketNumber, - acked_ranges: Vec>, + largest_acked: u64, + acked_ranges: Vec>, ack_delay: Duration, now: Instant, ) -> (Vec, Vec) { @@ -1146,8 +1228,8 @@ mod tests { ); } - fn add_sent(lrs: &mut LossRecoverySpace, max_pn: PacketNumber) { - for pn in 0..=max_pn { + fn add_sent(lrs: &mut LossRecoverySpace, packet_numbers: &[u64]) { + for &pn in packet_numbers { lrs.on_packet_sent(SentPacket::new( PacketType::Short, pn, @@ -1159,18 +1241,15 @@ mod tests { } } - fn match_acked(acked: &[SentPacket], expected: &[PacketNumber]) { - assert_eq!( - acked.iter().map(SentPacket::pn).collect::>(), - expected - ); + fn match_acked(acked: &[SentPacket], expected: &[u64]) { + assert!(acked.iter().map(|p| &p.pn).eq(expected)); } #[test] fn remove_acked() { let mut lrs = LossRecoverySpace::new(PacketNumberSpace::ApplicationData); let mut stats = Stats::default(); - add_sent(&mut lrs, 10); + add_sent(&mut lrs, &[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]); let (acked, _) = lrs.remove_acked(vec![], &mut stats); assert!(acked.is_empty()); let (acked, _) = lrs.remove_acked(vec![7..=8, 2..=4], &mut stats); @@ -1178,7 +1257,7 @@ mod tests { let (acked, _) = lrs.remove_acked(vec![8..=11], &mut stats); match_acked(&acked, &[10, 9]); let (acked, _) = lrs.remove_acked(vec![0..=2], &mut stats); - match_acked(&acked, &[1, 0]); + match_acked(&acked, &[1]); let (acked, _) = lrs.remove_acked(vec![5..=6], &mut stats); match_acked(&acked, &[6, 5]); } @@ -1413,7 +1492,7 @@ mod tests { PacketType::Short, ] { let sent_pkt = SentPacket::new(*sp, 1, pn_time(3), true, Vec::new(), ON_SENT_SIZE); - let pn_space = PacketNumberSpace::from(sent_pkt.packet_type()); + let pn_space = PacketNumberSpace::from(sent_pkt.pt); lr.on_packet_sent(sent_pkt); lr.on_ack_received(pn_space, 1, vec![1..=1], Duration::from_secs(0), pn_time(3)); let mut lost = Vec::new(); @@ -1514,7 +1593,7 @@ mod tests { lr.on_packet_sent(SentPacket::new( PacketType::Initial, - 0, + 1, now(), true, Vec::new(), diff --git a/neqo-transport/src/recovery/sent.rs b/neqo-transport/src/recovery/sent.rs deleted file mode 100644 index 33d77b0ff7..0000000000 --- a/neqo-transport/src/recovery/sent.rs +++ /dev/null @@ -1,413 +0,0 @@ -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -// A collection for sent packets. - -use std::{ - cmp::min, - collections::VecDeque, - convert::TryFrom, - ops::RangeInclusive, - time::{Duration, Instant}, -}; - -use crate::{ - packet::{PacketNumber, PacketType}, - recovery::RecoveryToken, -}; - -#[derive(Debug, Clone)] -pub struct SentPacket { - pt: PacketType, - pn: PacketNumber, - ack_eliciting: bool, - time_sent: Instant, - primary_path: bool, - tokens: Vec, - - time_declared_lost: Option, - /// After a PTO, this is true when the packet has been released. - pto: bool, - - len: usize, -} - -impl SentPacket { - pub fn new( - pt: PacketType, - pn: PacketNumber, - time_sent: Instant, - ack_eliciting: bool, - tokens: Vec, - len: usize, - ) -> Self { - Self { - pt, - pn, - time_sent, - ack_eliciting, - primary_path: true, - tokens, - time_declared_lost: None, - pto: false, - len, - } - } - - /// The type of this packet. - pub fn packet_type(&self) -> PacketType { - self.pt - } - - /// The number of the packet. - pub fn pn(&self) -> PacketNumber { - self.pn - } - - /// The time that this packet was sent. - pub fn time_sent(&self) -> Instant { - self.time_sent - } - - /// Returns `true` if the packet will elicit an ACK. - pub fn ack_eliciting(&self) -> bool { - self.ack_eliciting - } - - /// Returns `true` if the packet was sent on the primary path. - pub fn on_primary_path(&self) -> bool { - self.primary_path - } - - /// The length of the packet that was sent. - pub fn len(&self) -> usize { - self.len - } - - /// Access the recovery tokens that this holds. - pub fn tokens(&self) -> &[RecoveryToken] { - &self.tokens - } - - /// Clears the flag that had this packet on the primary path. - /// Used when migrating to clear out state. - pub fn clear_primary_path(&mut self) { - self.primary_path = false; - } - - /// For Initial packets, it is possible that the packet builder needs to amend the length. - pub fn add_padding(&mut self, padding: usize) { - debug_assert_eq!(self.pt, PacketType::Initial); - self.len += padding; - } - - /// Whether the packet has been declared lost. - pub fn lost(&self) -> bool { - self.time_declared_lost.is_some() - } - - /// Whether accounting for the loss or acknowledgement in the - /// congestion controller is pending. - /// Returns `true` if the packet counts as being "in flight", - /// and has not previously been declared lost. - /// Note that this should count packets that contain only ACK and PADDING, - /// but we don't send PADDING, so we don't track that. - pub fn cc_outstanding(&self) -> bool { - self.ack_eliciting() && self.on_primary_path() && !self.lost() - } - - /// Whether the packet should be tracked as in-flight. - pub fn cc_in_flight(&self) -> bool { - self.ack_eliciting() && self.on_primary_path() - } - - /// Declare the packet as lost. Returns `true` if this is the first time. - pub fn declare_lost(&mut self, now: Instant) -> bool { - if self.lost() { - false - } else { - self.time_declared_lost = Some(now); - true - } - } - - /// Ask whether this tracked packet has been declared lost for long enough - /// that it can be expired and no longer tracked. - pub fn expired(&self, now: Instant, expiration_period: Duration) -> bool { - self.time_declared_lost - .map_or(false, |loss_time| (loss_time + expiration_period) <= now) - } - - /// Whether the packet contents were cleared out after a PTO. - pub fn pto_fired(&self) -> bool { - self.pto - } - - /// On PTO, we need to get the recovery tokens so that we can ensure that - /// the frames we sent can be sent again in the PTO packet(s). Do that just once. - pub fn pto(&mut self) -> bool { - if self.pto || self.lost() { - false - } else { - self.pto = true; - true - } - } -} - -/// A collection for packets that we have sent that haven't been acknowledged. -#[derive(Debug, Default)] -pub struct SentPackets { - /// The collection. - packets: VecDeque>, - /// The packet number of the first item in the collection. - offset: PacketNumber, - /// The number of `Some` values in the packet. This is cached to keep things squeaky-fast. - len: usize, -} - -impl SentPackets { - pub fn len(&self) -> usize { - self.len - } - - pub fn track(&mut self, packet: SentPacket) { - if self.offset + PacketNumber::try_from(self.packets.len()).unwrap() != packet.pn { - assert_eq!( - self.len, 0, - "packet number skipping only supported for the first packet in a space" - ); - self.offset = packet.pn; - } - self.len += 1; - self.packets.push_back(Some(packet)); - } - - pub fn iter_mut(&mut self) -> impl Iterator { - self.packets.iter_mut().flatten() - } - - /// Take values from a specified range of packet numbers. - /// Note that this will not remove values unless the iterator is consumed. - /// The values returned will be reversed, so that the most recent packet appears first. - /// This is because ACK frames arrive with ranges starting from the largest acknowledged - /// and we want to match that. - pub fn take_range(&mut self, r: RangeInclusive, store: &mut Vec) { - let start = usize::try_from((*r.start()).saturating_sub(self.offset)).unwrap(); - let end = min( - usize::try_from((*r.end() + 1).saturating_sub(self.offset)).unwrap(), - self.packets.len(), - ); - - let before = store.len(); - if self.packets.range(..start).all(Option::is_none) { - // If there are extra empty slots, split those off too. - let extra = self - .packets - .range(end..) - .take_while(|&p| p.is_none()) - .count(); - self.offset += u64::try_from(end + extra).unwrap(); - let mut other = self.packets.split_off(end + extra); - std::mem::swap(&mut self.packets, &mut other); - store.extend( - other - .into_iter() - .rev() - .skip(extra) - .take(end - start) - .flatten(), - ); - } else { - store.extend( - self.packets - .range_mut(start..end) - .rev() - .filter_map(Option::take), - ); - } - self.len -= store.len() - before; - } - - /// Empty out the packets, but keep the offset. - pub fn drain_all(&mut self) -> impl Iterator { - self.len = 0; - self.offset += u64::try_from(self.packets.len()).unwrap(); - std::mem::take(&mut self.packets).into_iter().flatten() - } - - /// See `LossRecoverySpace::remove_old_lost` for details on `now` and `cd`. - pub fn remove_expired( - &mut self, - now: Instant, - cd: Duration, - ) -> impl Iterator { - let mut count = 0; - // Find the first unexpired packet and only keep from that one onwards. - for (i, p) in self.packets.iter().enumerate() { - if p.as_ref().map_or(false, |p| !p.expired(now, cd)) { - let mut other = self.packets.split_off(i); - self.len -= count; - self.offset += u64::try_from(i).unwrap(); - std::mem::swap(&mut self.packets, &mut other); - return other.into_iter().flatten(); - } - // Count `Some` values that we are removing. - count += usize::from(p.is_some()); - } - - self.len = 0; - self.offset += u64::try_from(self.packets.len()).unwrap(); - std::mem::take(&mut self.packets).into_iter().flatten() - } -} - -#[cfg(test)] -mod tests { - use std::{ - cell::OnceCell, - convert::TryFrom, - time::{Duration, Instant}, - }; - - use super::{SentPacket, SentPackets}; - use crate::packet::{PacketNumber, PacketType}; - - const PACKET_GAP: Duration = Duration::from_secs(1); - fn start_time() -> Instant { - thread_local!(static STARTING_TIME: OnceCell = OnceCell::new()); - STARTING_TIME.with(|t| *t.get_or_init(Instant::now)) - } - - fn pkt(n: u32) -> SentPacket { - SentPacket::new( - PacketType::Short, - PacketNumber::from(n), - start_time() + (PACKET_GAP * n), - true, - Vec::new(), - 100, - ) - } - - fn pkts() -> SentPackets { - let mut pkts = SentPackets::default(); - pkts.track(pkt(0)); - pkts.track(pkt(1)); - pkts.track(pkt(2)); - assert_eq!(pkts.len(), 3); - pkts - } - - trait HasPacketNumber { - fn pn(&self) -> PacketNumber; - } - impl HasPacketNumber for SentPacket { - fn pn(&self) -> PacketNumber { - self.pn - } - } - impl HasPacketNumber for &'_ SentPacket { - fn pn(&self) -> PacketNumber { - self.pn - } - } - impl HasPacketNumber for &'_ mut SentPacket { - fn pn(&self) -> PacketNumber { - self.pn - } - } - - fn remove_one(pkts: &mut SentPackets, idx: PacketNumber) { - assert_eq!(pkts.len(), 3); - let mut store = Vec::new(); - pkts.take_range(idx..=idx, &mut store); - let mut it = store.into_iter(); - assert_eq!(idx, it.next().unwrap().pn()); - assert!(it.next().is_none()); - std::mem::drop(it); - assert_eq!(pkts.len(), 2); - } - - fn assert_zero_and_two<'a, 'b: 'a>( - mut it: impl Iterator + 'a, - ) { - assert_eq!(it.next().unwrap().pn(), 0); - assert_eq!(it.next().unwrap().pn(), 2); - assert!(it.next().is_none()); - } - - #[test] - fn iterate_skipped() { - let mut pkts = pkts(); - for (i, p) in pkts.packets.iter().enumerate() { - assert_eq!(i, usize::try_from(p.as_ref().unwrap().pn).unwrap()); - } - remove_one(&mut pkts, 1); - - // Validate the merged result multiple ways. - assert_zero_and_two(pkts.iter_mut()); - - { - // Reverse the expectations here as this iterator reverses its output. - let mut store = Vec::new(); - pkts.take_range(0..=2, &mut store); - let mut it = store.into_iter(); - assert_eq!(it.next().unwrap().pn(), 2); - assert_eq!(it.next().unwrap().pn(), 0); - assert!(it.next().is_none()); - }; - - // The None values are still there in this case, so offset is 0. - assert_eq!(pkts.offset, 3); - assert_eq!(pkts.packets.len(), 0); - assert_eq!(pkts.len(), 0); - } - - #[test] - fn drain() { - let mut pkts = pkts(); - remove_one(&mut pkts, 1); - - assert_zero_and_two(pkts.drain_all()); - assert_eq!(pkts.offset, 3); - assert_eq!(pkts.len(), 0); - } - - #[test] - fn remove_expired() { - let mut pkts = pkts(); - remove_one(&mut pkts, 0); - - for p in pkts.iter_mut() { - p.declare_lost(p.time_sent); // just to keep things simple. - } - - // Expire up to pkt(1). - let mut it = pkts.remove_expired(start_time() + PACKET_GAP, Duration::new(0, 0)); - assert_eq!(it.next().unwrap().pn(), 1); - assert!(it.next().is_none()); - std::mem::drop(it); - - assert_eq!(pkts.offset, 2); - assert_eq!(pkts.len(), 1); - } - - #[test] - #[should_panic(expected = "packet number skipping only supported for the first packet")] - fn skipped_not_ok() { - let mut pkts = pkts(); - pkts.track(pkt(4)); - } - - #[test] - fn first_skipped_ok() { - let mut pkts = SentPackets::default(); - pkts.track(pkt(4)); // This is fine. - assert_eq!(pkts.offset, 4); - assert_eq!(pkts.len(), 1); - } -} diff --git a/neqo-transport/src/recovery/token.rs b/neqo-transport/src/recovery/token.rs deleted file mode 100644 index 93f84268cd..0000000000 --- a/neqo-transport/src/recovery/token.rs +++ /dev/null @@ -1,63 +0,0 @@ -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use crate::{ - ackrate::AckRate, - cid::ConnectionIdEntry, - crypto::CryptoRecoveryToken, - quic_datagrams::DatagramTracking, - send_stream::SendStreamRecoveryToken, - stream_id::{StreamId, StreamType}, - tracking::AckToken, -}; - -#[derive(Debug, Clone)] -#[allow(clippy::module_name_repetitions)] -pub enum StreamRecoveryToken { - Stream(SendStreamRecoveryToken), - ResetStream { - stream_id: StreamId, - }, - StopSending { - stream_id: StreamId, - }, - - MaxData(u64), - DataBlocked(u64), - - MaxStreamData { - stream_id: StreamId, - max_data: u64, - }, - StreamDataBlocked { - stream_id: StreamId, - limit: u64, - }, - - MaxStreams { - stream_type: StreamType, - max_streams: u64, - }, - StreamsBlocked { - stream_type: StreamType, - limit: u64, - }, -} - -#[derive(Debug, Clone)] -#[allow(clippy::module_name_repetitions)] -pub enum RecoveryToken { - Stream(StreamRecoveryToken), - Ack(AckToken), - Crypto(CryptoRecoveryToken), - HandshakeDone, - KeepAlive, // Special PING. - NewToken(usize), - NewConnectionId(ConnectionIdEntry<[u8; 16]>), - RetireConnectionId(u64), - AckFrequency(AckRate), - Datagram(DatagramTracking), -} diff --git a/neqo-transport/src/sender.rs b/neqo-transport/src/sender.rs index 0d3da70faa..3a54851533 100644 --- a/neqo-transport/src/sender.rs +++ b/neqo-transport/src/sender.rs @@ -18,8 +18,8 @@ use neqo_common::qlog::NeqoQlog; use crate::{ cc::{ClassicCongestionControl, CongestionControl, CongestionControlAlgorithm, Cubic, NewReno}, pace::Pacer, - recovery::SentPacket, rtt::RttEstimate, + tracking::SentPacket, }; /// The number of packets we allow to burst from the pacer. @@ -109,7 +109,7 @@ impl PacketSender { pub fn on_packet_sent(&mut self, pkt: &SentPacket, rtt: Duration) { self.pacer - .spend(pkt.time_sent(), rtt, self.cc.cwnd(), pkt.len()); + .spend(pkt.time_sent, rtt, self.cc.cwnd(), pkt.size); self.cc.on_packet_sent(pkt); } diff --git a/neqo-transport/src/tracking.rs b/neqo-transport/src/tracking.rs index 44a0bef4c8..bdd0f250c7 100644 --- a/neqo-transport/src/tracking.rs +++ b/neqo-transport/src/tracking.rs @@ -130,6 +130,114 @@ impl std::fmt::Debug for PacketNumberSpaceSet { } } +#[derive(Debug, Clone)] +pub struct SentPacket { + pub pt: PacketType, + pub pn: PacketNumber, + ack_eliciting: bool, + pub time_sent: Instant, + primary_path: bool, + pub tokens: Vec, + + time_declared_lost: Option, + /// After a PTO, this is true when the packet has been released. + pto: bool, + + pub size: usize, +} + +impl SentPacket { + pub fn new( + pt: PacketType, + pn: PacketNumber, + time_sent: Instant, + ack_eliciting: bool, + tokens: Vec, + size: usize, + ) -> Self { + Self { + pt, + pn, + time_sent, + ack_eliciting, + primary_path: true, + tokens, + time_declared_lost: None, + pto: false, + size, + } + } + + /// Returns `true` if the packet will elicit an ACK. + pub fn ack_eliciting(&self) -> bool { + self.ack_eliciting + } + + /// Returns `true` if the packet was sent on the primary path. + pub fn on_primary_path(&self) -> bool { + self.primary_path + } + + /// Clears the flag that had this packet on the primary path. + /// Used when migrating to clear out state. + pub fn clear_primary_path(&mut self) { + self.primary_path = false; + } + + /// Whether the packet has been declared lost. + pub fn lost(&self) -> bool { + self.time_declared_lost.is_some() + } + + /// Whether accounting for the loss or acknowledgement in the + /// congestion controller is pending. + /// Returns `true` if the packet counts as being "in flight", + /// and has not previously been declared lost. + /// Note that this should count packets that contain only ACK and PADDING, + /// but we don't send PADDING, so we don't track that. + pub fn cc_outstanding(&self) -> bool { + self.ack_eliciting() && self.on_primary_path() && !self.lost() + } + + /// Whether the packet should be tracked as in-flight. + pub fn cc_in_flight(&self) -> bool { + self.ack_eliciting() && self.on_primary_path() + } + + /// Declare the packet as lost. Returns `true` if this is the first time. + pub fn declare_lost(&mut self, now: Instant) -> bool { + if self.lost() { + false + } else { + self.time_declared_lost = Some(now); + true + } + } + + /// Ask whether this tracked packet has been declared lost for long enough + /// that it can be expired and no longer tracked. + pub fn expired(&self, now: Instant, expiration_period: Duration) -> bool { + self.time_declared_lost + .map_or(false, |loss_time| (loss_time + expiration_period) <= now) + } + + /// Whether the packet contents were cleared out after a PTO. + pub fn pto_fired(&self) -> bool { + self.pto + } + + /// On PTO, we need to get the recovery tokens so that we can ensure that + /// the frames we sent can be sent again in the PTO packet(s). Do that just once. + pub fn pto(&mut self) -> bool { + if self.pto || self.lost() { + false + } else { + self.pto = true; + true + } + } +} + impl std::fmt::Display for PacketNumberSpace { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { f.write_str(match self { From e2f9369296488c6d60c778e19aa77aca6df4fbf1 Mon Sep 17 00:00:00 2001 From: Max Inden Date: Thu, 21 Mar 2024 22:53:23 +0100 Subject: [PATCH 267/321] ci(check.yml): add sleep i.e. client wait for server (#1765) Windows CI oftentimes fails with: ``` 0s 0ms INFO H3 Client connecting: [::]:65093 -> [::1]:4433 Error: IoError(Os { code: 10054, kind: ConnectionReset, message: "An existing connection was forcibly closed by the remote host." }) 0s 0ms INFO Server waiting for connection on: [::1]:4433 0s 0ms INFO Server waiting for connection on: 127.0.0.1:4433 ``` https://github.com/mozilla/neqo/actions/runs/8374577016/job/22930136500?pr=1692 This suggests that the client connects to the server before the server is ready to accept connections. This commit adds a sleep, thus giving the server time to start up. Tracked in https://github.com/mozilla/neqo/issues/1759. Sleep was previously introduced in https://github.com/mozilla/neqo/pull/1713 but later removed in https://github.com/mozilla/neqo/pull/1717 --- .github/workflows/check.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/workflows/check.yml b/.github/workflows/check.yml index 10085ffda6..a89e4859d3 100644 --- a/.github/workflows/check.yml +++ b/.github/workflows/check.yml @@ -122,6 +122,8 @@ jobs: cargo +${{ matrix.rust-toolchain }} build $BUILD_TYPE --bin neqo-client --bin neqo-server "target/$BUILD_DIR/neqo-server" "$HOST:4433" & PID=$! + # Give the server time to start. + sleep 1 "target/$BUILD_DIR/neqo-client" --output-dir . "https://$HOST:4433/$SIZE" kill $PID [ "$(wc -c <"$SIZE")" -eq "$SIZE" ] || exit 1 From 56f53bb6b3fde59cf4e7d0daa54c8c206d11d3d6 Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Thu, 21 Mar 2024 23:53:51 +0200 Subject: [PATCH 268/321] feat: Extend `Frame::Padding` with length (#1762) * feat: Extend `Frame::Padding` with a `length` field This is preparation of qlog supporting the logging of runs of padding frames via `payload_length`, instead of each one individually. * Use `dv` more * Fix test * Address code review * Add TODO --- neqo-transport/src/connection/dump.rs | 3 +- neqo-transport/src/connection/mod.rs | 25 +- .../src/connection/tests/handshake.rs | 2 +- neqo-transport/src/frame.rs | 60 +-- neqo-transport/src/packet/mod.rs | 4 +- neqo-transport/src/qlog.rs | 347 +++++++++--------- 6 files changed, 219 insertions(+), 222 deletions(-) diff --git a/neqo-transport/src/connection/dump.rs b/neqo-transport/src/connection/dump.rs index 8a4f34dbb8..34ac58f55e 100644 --- a/neqo-transport/src/connection/dump.rs +++ b/neqo-transport/src/connection/dump.rs @@ -38,7 +38,8 @@ pub fn dump_packet( s.push_str(" [broken]..."); break; }; - if let Some(x) = f.dump() { + let x = f.dump(); + if !x.is_empty() { write!(&mut s, "\n {} {}", dir, &x).unwrap(); } } diff --git a/neqo-transport/src/connection/mod.rs b/neqo-transport/src/connection/mod.rs index c81a3727c6..8d1c106358 100644 --- a/neqo-transport/src/connection/mod.rs +++ b/neqo-transport/src/connection/mod.rs @@ -461,7 +461,7 @@ impl Connection { } /// # Errors - /// When the operation fails. + /// When the operation fails. pub fn client_enable_ech(&mut self, ech_config_list: impl AsRef<[u8]>) -> Res<()> { self.crypto.client_enable_ech(ech_config_list) } @@ -1560,24 +1560,8 @@ impl Connection { let mut ack_eliciting = false; let mut probing = true; let mut d = Decoder::from(&packet[..]); - let mut consecutive_padding = 0; while d.remaining() > 0 { - let mut f = Frame::decode(&mut d)?; - - // Skip padding - while f == Frame::Padding && d.remaining() > 0 { - consecutive_padding += 1; - f = Frame::decode(&mut d)?; - } - if consecutive_padding > 0 { - qdebug!( - [self], - "PADDING frame repeated {} times", - consecutive_padding - ); - consecutive_padding = 0; - } - + let f = Frame::decode(&mut d)?; ack_eliciting |= f.ack_eliciting(); probing &= f.path_probing(); let t = f.get_type(); @@ -2694,9 +2678,8 @@ impl Connection { .input_frame(&frame, &mut self.stats.borrow_mut().frame_rx); } match frame { - Frame::Padding => { - // Note: This counts contiguous padding as a single frame. - self.stats.borrow_mut().frame_rx.padding += 1; + Frame::Padding(length) => { + self.stats.borrow_mut().frame_rx.padding += usize::from(length); } Frame::Ping => { // If we get a PING and there are outstanding CRYPTO frames, diff --git a/neqo-transport/src/connection/tests/handshake.rs b/neqo-transport/src/connection/tests/handshake.rs index af0352ce90..cfb6d99166 100644 --- a/neqo-transport/src/connection/tests/handshake.rs +++ b/neqo-transport/src/connection/tests/handshake.rs @@ -458,7 +458,7 @@ fn coalesce_05rtt() { assert_eq!(client.stats().dropped_rx, 0); // No Initial padding. assert_eq!(client.stats().packets_rx, 4); assert_eq!(client.stats().saved_datagrams, 1); - assert_eq!(client.stats().frame_rx.padding, 1); // Padding uses frames. + assert!(client.stats().frame_rx.padding > 0); // Padding uses frames. // Allow the handshake to complete. now += RTT / 2; diff --git a/neqo-transport/src/frame.rs b/neqo-transport/src/frame.rs index b3bb024a2c..5a86a07108 100644 --- a/neqo-transport/src/frame.rs +++ b/neqo-transport/src/frame.rs @@ -20,7 +20,7 @@ use crate::{ #[allow(clippy::module_name_repetitions)] pub type FrameType = u64; -const FRAME_TYPE_PADDING: FrameType = 0x0; +pub const FRAME_TYPE_PADDING: FrameType = 0x0; pub const FRAME_TYPE_PING: FrameType = 0x1; pub const FRAME_TYPE_ACK: FrameType = 0x2; const FRAME_TYPE_ACK_ECN: FrameType = 0x3; @@ -103,7 +103,7 @@ pub struct AckRange { #[derive(PartialEq, Eq, Debug, Clone)] pub enum Frame<'a> { - Padding, + Padding(u16), Ping, Ack { largest_acknowledged: u64, @@ -215,7 +215,7 @@ impl<'a> Frame<'a> { pub fn get_type(&self) -> FrameType { match self { - Self::Padding => FRAME_TYPE_PADDING, + Self::Padding { .. } => FRAME_TYPE_PADDING, Self::Ping => FRAME_TYPE_PING, Self::Ack { .. } => FRAME_TYPE_ACK, // We don't do ACK ECN. Self::ResetStream { .. } => FRAME_TYPE_RESET_STREAM, @@ -288,7 +288,7 @@ impl<'a> Frame<'a> { pub fn ack_eliciting(&self) -> bool { !matches!( self, - Self::Ack { .. } | Self::Padding | Self::ConnectionClose { .. } + Self::Ack { .. } | Self::Padding { .. } | Self::ConnectionClose { .. } ) } @@ -297,7 +297,7 @@ impl<'a> Frame<'a> { pub fn path_probing(&self) -> bool { matches!( self, - Self::Padding + Self::Padding { .. } | Self::NewConnectionId { .. } | Self::PathChallenge { .. } | Self::PathResponse { .. } @@ -347,36 +347,34 @@ impl<'a> Frame<'a> { Ok(acked_ranges) } - pub fn dump(&self) -> Option { + pub fn dump(&self) -> String { match self { - Self::Crypto { offset, data } => Some(format!( - "Crypto {{ offset: {}, len: {} }}", - offset, - data.len() - )), + Self::Crypto { offset, data } => { + format!("Crypto {{ offset: {}, len: {} }}", offset, data.len()) + } Self::Stream { stream_id, offset, fill, data, fin, - } => Some(format!( + } => format!( "Stream {{ stream_id: {}, offset: {}, len: {}{}, fin: {} }}", stream_id.as_u64(), offset, if *fill { ">>" } else { "" }, data.len(), fin, - )), - Self::Padding => None, - Self::Datagram { data, .. } => Some(format!("Datagram {{ len: {} }}", data.len())), - _ => Some(format!("{self:?}")), + ), + Self::Padding(length) => format!("Padding {{ len: {length} }}"), + Self::Datagram { data, .. } => format!("Datagram {{ len: {} }}", data.len()), + _ => format!("{self:?}"), } } pub fn is_allowed(&self, pt: PacketType) -> bool { match self { - Self::Padding | Self::Ping => true, + Self::Padding { .. } | Self::Ping => true, Self::Crypto { .. } | Self::Ack { .. } | Self::ConnectionClose { @@ -409,13 +407,23 @@ impl<'a> Frame<'a> { } // TODO(ekr@rtfm.com): check for minimal encoding - let t = d(dec.decode_varint())?; + let t = dv(dec)?; match t { - FRAME_TYPE_PADDING => Ok(Self::Padding), + FRAME_TYPE_PADDING => { + let mut length: u16 = 1; + while let Some(b) = dec.peek_byte() { + if u64::from(b) != FRAME_TYPE_PADDING { + break; + } + length += 1; + dec.skip(1); + } + Ok(Self::Padding(length)) + } FRAME_TYPE_PING => Ok(Self::Ping), FRAME_TYPE_RESET_STREAM => Ok(Self::ResetStream { stream_id: StreamId::from(dv(dec)?), - application_error_code: d(dec.decode_varint())?, + application_error_code: dv(dec)?, final_size: match dec.decode_varint() { Some(v) => v, _ => return Err(Error::NoMoreData), @@ -457,7 +465,7 @@ impl<'a> Frame<'a> { } FRAME_TYPE_STOP_SENDING => Ok(Self::StopSending { stream_id: StreamId::from(dv(dec)?), - application_error_code: d(dec.decode_varint())?, + application_error_code: dv(dec)?, }), FRAME_TYPE_CRYPTO => { let offset = dv(dec)?; @@ -563,7 +571,7 @@ impl<'a> Frame<'a> { Ok(Self::PathResponse { data: datav }) } FRAME_TYPE_CONNECTION_CLOSE_TRANSPORT | FRAME_TYPE_CONNECTION_CLOSE_APPLICATION => { - let error_code = CloseError::from_type_bit(t, d(dec.decode_varint())?); + let error_code = CloseError::from_type_bit(t, dv(dec)?); let frame_type = if t == FRAME_TYPE_CONNECTION_CLOSE_TRANSPORT { dv(dec)? } else { @@ -631,8 +639,10 @@ mod tests { #[test] fn padding() { - let f = Frame::Padding; + let f = Frame::Padding(1); just_dec(&f, "00"); + let f = Frame::Padding(2); + just_dec(&f, "0000"); } #[test] @@ -888,8 +898,8 @@ mod tests { #[test] fn test_compare() { - let f1 = Frame::Padding; - let f2 = Frame::Padding; + let f1 = Frame::Padding(1); + let f2 = Frame::Padding(1); let f3 = Frame::Crypto { offset: 0, data: &[1, 2, 3], diff --git a/neqo-transport/src/packet/mod.rs b/neqo-transport/src/packet/mod.rs index 8458f69779..d11b3423a4 100644 --- a/neqo-transport/src/packet/mod.rs +++ b/neqo-transport/src/packet/mod.rs @@ -18,6 +18,7 @@ use neqo_crypto::random; use crate::{ cid::{ConnectionId, ConnectionIdDecoder, ConnectionIdRef, MAX_CONNECTION_ID_LEN}, crypto::{CryptoDxState, CryptoSpace, CryptoStates}, + frame::FRAME_TYPE_PADDING, version::{Version, WireVersion}, Error, Res, }; @@ -257,7 +258,8 @@ impl PacketBuilder { /// Returns true if padding was added. pub fn pad(&mut self) -> bool { if self.padding && !self.is_long() { - self.encoder.pad_to(self.limit, 0); + self.encoder + .pad_to(self.limit, FRAME_TYPE_PADDING.try_into().unwrap()); true } else { false diff --git a/neqo-transport/src/qlog.rs b/neqo-transport/src/qlog.rs index 2572966104..a8ad986d2a 100644 --- a/neqo-transport/src/qlog.rs +++ b/neqo-transport/src/qlog.rs @@ -195,7 +195,7 @@ pub fn packet_sent( ) { qlog.add_event_with_stream(|stream| { let mut d = Decoder::from(body); - let header = PacketHeader::with_type(to_qlog_pkt_type(pt), Some(pn), None, None, None); + let header = PacketHeader::with_type(pt.into(), Some(pn), None, None, None); let raw = RawInfo { length: Some(plen as u64), payload_length: None, @@ -205,7 +205,7 @@ pub fn packet_sent( let mut frames = SmallVec::new(); while d.remaining() > 0 { if let Ok(f) = Frame::decode(&mut d) { - frames.push(frame_to_qlogframe(&f)); + frames.push(QuicFrame::from(&f)); } else { qinfo!("qlog: invalid frame"); break; @@ -231,13 +231,8 @@ pub fn packet_sent( pub fn packet_dropped(qlog: &mut NeqoQlog, public_packet: &PublicPacket) { qlog.add_event_data(|| { - let header = PacketHeader::with_type( - to_qlog_pkt_type(public_packet.packet_type()), - None, - None, - None, - None, - ); + let header = + PacketHeader::with_type(public_packet.packet_type().into(), None, None, None, None); let raw = RawInfo { length: Some(public_packet.len() as u64), payload_length: None, @@ -259,8 +254,7 @@ pub fn packet_dropped(qlog: &mut NeqoQlog, public_packet: &PublicPacket) { pub fn packets_lost(qlog: &mut NeqoQlog, pkts: &[SentPacket]) { qlog.add_event_with_stream(|stream| { for pkt in pkts { - let header = - PacketHeader::with_type(to_qlog_pkt_type(pkt.pt), Some(pkt.pn), None, None, None); + let header = PacketHeader::with_type(pkt.pt.into(), Some(pkt.pn), None, None, None); let ev_data = EventData::PacketLost(PacketLost { header: Some(header), @@ -283,7 +277,7 @@ pub fn packet_received( let mut d = Decoder::from(&payload[..]); let header = PacketHeader::with_type( - to_qlog_pkt_type(public_packet.packet_type()), + public_packet.packet_type().into(), Some(payload.pn()), None, None, @@ -299,7 +293,7 @@ pub fn packet_received( while d.remaining() > 0 { if let Ok(f) = Frame::decode(&mut d) { - frames.push(frame_to_qlogframe(&f)); + frames.push(QuicFrame::from(&f)); } else { qinfo!("qlog: invalid frame"); break; @@ -393,173 +387,180 @@ pub fn metrics_updated(qlog: &mut NeqoQlog, updated_metrics: &[QlogMetric]) { #[allow(clippy::too_many_lines)] // Yeah, but it's a nice match. #[allow(clippy::cast_possible_truncation, clippy::cast_precision_loss)] // No choice here. -fn frame_to_qlogframe(frame: &Frame) -> QuicFrame { - match frame { - Frame::Padding => QuicFrame::Padding, - Frame::Ping => QuicFrame::Ping, - Frame::Ack { - largest_acknowledged, - ack_delay, - first_ack_range, - ack_ranges, - } => { - let ranges = - Frame::decode_ack_frame(*largest_acknowledged, *first_ack_range, ack_ranges).ok(); - - let acked_ranges = ranges.map(|all| { - AckedRanges::Double( - all.into_iter() - .map(RangeInclusive::into_inner) - .collect::>(), - ) - }); - - QuicFrame::Ack { - ack_delay: Some(*ack_delay as f32 / 1000.0), - acked_ranges, - ect1: None, - ect0: None, - ce: None, +impl From<&Frame<'_>> for QuicFrame { + fn from(frame: &Frame) -> Self { + match frame { + // TODO: Add payload length to `QuicFrame::Padding` once + // https://github.com/cloudflare/quiche/pull/1745 is available via the qlog crate. + Frame::Padding { .. } => QuicFrame::Padding, + Frame::Ping => QuicFrame::Ping, + Frame::Ack { + largest_acknowledged, + ack_delay, + first_ack_range, + ack_ranges, + } => { + let ranges = + Frame::decode_ack_frame(*largest_acknowledged, *first_ack_range, ack_ranges) + .ok(); + + let acked_ranges = ranges.map(|all| { + AckedRanges::Double( + all.into_iter() + .map(RangeInclusive::into_inner) + .collect::>(), + ) + }); + + QuicFrame::Ack { + ack_delay: Some(*ack_delay as f32 / 1000.0), + acked_ranges, + ect1: None, + ect0: None, + ce: None, + } } - } - Frame::ResetStream { - stream_id, - application_error_code, - final_size, - } => QuicFrame::ResetStream { - stream_id: stream_id.as_u64(), - error_code: *application_error_code, - final_size: *final_size, - }, - Frame::StopSending { - stream_id, - application_error_code, - } => QuicFrame::StopSending { - stream_id: stream_id.as_u64(), - error_code: *application_error_code, - }, - Frame::Crypto { offset, data } => QuicFrame::Crypto { - offset: *offset, - length: data.len() as u64, - }, - Frame::NewToken { token } => QuicFrame::NewToken { - token: qlog::Token { - ty: Some(qlog::TokenType::Retry), - details: None, - raw: Some(RawInfo { - data: Some(hex(token)), - length: Some(token.len() as u64), - payload_length: None, - }), + Frame::ResetStream { + stream_id, + application_error_code, + final_size, + } => QuicFrame::ResetStream { + stream_id: stream_id.as_u64(), + error_code: *application_error_code, + final_size: *final_size, + }, + Frame::StopSending { + stream_id, + application_error_code, + } => QuicFrame::StopSending { + stream_id: stream_id.as_u64(), + error_code: *application_error_code, + }, + Frame::Crypto { offset, data } => QuicFrame::Crypto { + offset: *offset, + length: data.len() as u64, + }, + Frame::NewToken { token } => QuicFrame::NewToken { + token: qlog::Token { + ty: Some(qlog::TokenType::Retry), + details: None, + raw: Some(RawInfo { + data: Some(hex(token)), + length: Some(token.len() as u64), + payload_length: None, + }), + }, }, - }, - Frame::Stream { - fin, - stream_id, - offset, - data, - .. - } => QuicFrame::Stream { - stream_id: stream_id.as_u64(), - offset: *offset, - length: data.len() as u64, - fin: Some(*fin), - raw: None, - }, - Frame::MaxData { maximum_data } => QuicFrame::MaxData { - maximum: *maximum_data, - }, - Frame::MaxStreamData { - stream_id, - maximum_stream_data, - } => QuicFrame::MaxStreamData { - stream_id: stream_id.as_u64(), - maximum: *maximum_stream_data, - }, - Frame::MaxStreams { - stream_type, - maximum_streams, - } => QuicFrame::MaxStreams { - stream_type: match stream_type { - NeqoStreamType::BiDi => StreamType::Bidirectional, - NeqoStreamType::UniDi => StreamType::Unidirectional, + Frame::Stream { + fin, + stream_id, + offset, + data, + .. + } => QuicFrame::Stream { + stream_id: stream_id.as_u64(), + offset: *offset, + length: data.len() as u64, + fin: Some(*fin), + raw: None, }, - maximum: *maximum_streams, - }, - Frame::DataBlocked { data_limit } => QuicFrame::DataBlocked { limit: *data_limit }, - Frame::StreamDataBlocked { - stream_id, - stream_data_limit, - } => QuicFrame::StreamDataBlocked { - stream_id: stream_id.as_u64(), - limit: *stream_data_limit, - }, - Frame::StreamsBlocked { - stream_type, - stream_limit, - } => QuicFrame::StreamsBlocked { - stream_type: match stream_type { - NeqoStreamType::BiDi => StreamType::Bidirectional, - NeqoStreamType::UniDi => StreamType::Unidirectional, + Frame::MaxData { maximum_data } => QuicFrame::MaxData { + maximum: *maximum_data, }, - limit: *stream_limit, - }, - Frame::NewConnectionId { - sequence_number, - retire_prior, - connection_id, - stateless_reset_token, - } => QuicFrame::NewConnectionId { - sequence_number: *sequence_number as u32, - retire_prior_to: *retire_prior as u32, - connection_id_length: Some(connection_id.len() as u8), - connection_id: hex(connection_id), - stateless_reset_token: Some(hex(stateless_reset_token)), - }, - Frame::RetireConnectionId { sequence_number } => QuicFrame::RetireConnectionId { - sequence_number: *sequence_number as u32, - }, - Frame::PathChallenge { data } => QuicFrame::PathChallenge { - data: Some(hex(data)), - }, - Frame::PathResponse { data } => QuicFrame::PathResponse { - data: Some(hex(data)), - }, - Frame::ConnectionClose { - error_code, - frame_type, - reason_phrase, - } => QuicFrame::ConnectionClose { - error_space: match error_code { - CloseError::Transport(_) => Some(ErrorSpace::TransportError), - CloseError::Application(_) => Some(ErrorSpace::ApplicationError), + Frame::MaxStreamData { + stream_id, + maximum_stream_data, + } => QuicFrame::MaxStreamData { + stream_id: stream_id.as_u64(), + maximum: *maximum_stream_data, }, - error_code: Some(error_code.code()), - error_code_value: Some(0), - reason: Some(String::from_utf8_lossy(reason_phrase).to_string()), - trigger_frame_type: Some(*frame_type), - }, - Frame::HandshakeDone => QuicFrame::HandshakeDone, - Frame::AckFrequency { .. } => QuicFrame::Unknown { - frame_type_value: None, - raw_frame_type: frame.get_type(), - raw: None, - }, - Frame::Datagram { data, .. } => QuicFrame::Datagram { - length: data.len() as u64, - raw: None, - }, + Frame::MaxStreams { + stream_type, + maximum_streams, + } => QuicFrame::MaxStreams { + stream_type: match stream_type { + NeqoStreamType::BiDi => StreamType::Bidirectional, + NeqoStreamType::UniDi => StreamType::Unidirectional, + }, + maximum: *maximum_streams, + }, + Frame::DataBlocked { data_limit } => QuicFrame::DataBlocked { limit: *data_limit }, + Frame::StreamDataBlocked { + stream_id, + stream_data_limit, + } => QuicFrame::StreamDataBlocked { + stream_id: stream_id.as_u64(), + limit: *stream_data_limit, + }, + Frame::StreamsBlocked { + stream_type, + stream_limit, + } => QuicFrame::StreamsBlocked { + stream_type: match stream_type { + NeqoStreamType::BiDi => StreamType::Bidirectional, + NeqoStreamType::UniDi => StreamType::Unidirectional, + }, + limit: *stream_limit, + }, + Frame::NewConnectionId { + sequence_number, + retire_prior, + connection_id, + stateless_reset_token, + } => QuicFrame::NewConnectionId { + sequence_number: *sequence_number as u32, + retire_prior_to: *retire_prior as u32, + connection_id_length: Some(connection_id.len() as u8), + connection_id: hex(connection_id), + stateless_reset_token: Some(hex(stateless_reset_token)), + }, + Frame::RetireConnectionId { sequence_number } => QuicFrame::RetireConnectionId { + sequence_number: *sequence_number as u32, + }, + Frame::PathChallenge { data } => QuicFrame::PathChallenge { + data: Some(hex(data)), + }, + Frame::PathResponse { data } => QuicFrame::PathResponse { + data: Some(hex(data)), + }, + Frame::ConnectionClose { + error_code, + frame_type, + reason_phrase, + } => QuicFrame::ConnectionClose { + error_space: match error_code { + CloseError::Transport(_) => Some(ErrorSpace::TransportError), + CloseError::Application(_) => Some(ErrorSpace::ApplicationError), + }, + error_code: Some(error_code.code()), + error_code_value: Some(0), + reason: Some(String::from_utf8_lossy(reason_phrase).to_string()), + trigger_frame_type: Some(*frame_type), + }, + Frame::HandshakeDone => QuicFrame::HandshakeDone, + Frame::AckFrequency { .. } => QuicFrame::Unknown { + frame_type_value: None, + raw_frame_type: frame.get_type(), + raw: None, + }, + Frame::Datagram { data, .. } => QuicFrame::Datagram { + length: data.len() as u64, + raw: None, + }, + } } } -fn to_qlog_pkt_type(ptype: PacketType) -> qlog::events::quic::PacketType { - match ptype { - PacketType::Initial => qlog::events::quic::PacketType::Initial, - PacketType::Handshake => qlog::events::quic::PacketType::Handshake, - PacketType::ZeroRtt => qlog::events::quic::PacketType::ZeroRtt, - PacketType::Short => qlog::events::quic::PacketType::OneRtt, - PacketType::Retry => qlog::events::quic::PacketType::Retry, - PacketType::VersionNegotiation => qlog::events::quic::PacketType::VersionNegotiation, - PacketType::OtherVersion => qlog::events::quic::PacketType::Unknown, +impl From for qlog::events::quic::PacketType { + fn from(value: PacketType) -> Self { + match value { + PacketType::Initial => qlog::events::quic::PacketType::Initial, + PacketType::Handshake => qlog::events::quic::PacketType::Handshake, + PacketType::ZeroRtt => qlog::events::quic::PacketType::ZeroRtt, + PacketType::Short => qlog::events::quic::PacketType::OneRtt, + PacketType::Retry => qlog::events::quic::PacketType::Retry, + PacketType::VersionNegotiation => qlog::events::quic::PacketType::VersionNegotiation, + PacketType::OtherVersion => qlog::events::quic::PacketType::Unknown, + } } } From a80db4759ed63c7a314e624a35a5055b5de916f4 Mon Sep 17 00:00:00 2001 From: Max Inden Date: Thu, 21 Mar 2024 23:47:22 +0100 Subject: [PATCH 269/321] feat(bin/client): add --stats flag printing connection statistics (#1766) This commit introduces the `--stats` flag to `neqo-client`. It will print the `neqo_transport::Stats` of the `Connection` after close. ``` $ cargo run --bin neqo-client -- --stats http://127.0.0.1:12345/10000000 stats for Client ... rx: 7728 drop 1 dup 0 saved 1 tx: 819 lost 12 lateack 0 ptoack 4 resumed: false frames rx: crypto 3 done 1 token 1 close 0 ack 40 (max 805) ping 0 padding 0 stream 7704 reset 0 stop 0 max: stream 0 data 0 stream_data 0 blocked: stream 0 data 0 stream_data 19 datagram 0 ncid 7 rcid 0 pchallenge 0 presponse 0 ack_frequency 4 frames tx: crypto 2 done 0 token 0 close 0 ack 783 (max 7769) ping 5 padding 0 stream 5 reset 0 stop 0 max: stream 0 data 0 stream_data 30 blocked: stream 0 data 0 stream_data 0 datagram 0 ncid 0 rcid 0 pchallenge 0 presponse 0 ack_frequency 2 ``` Co-authored-by: Lars Eggert --- neqo-bin/src/bin/client/http09.rs | 4 ++++ neqo-bin/src/bin/client/http3.rs | 4 ++++ neqo-bin/src/bin/client/main.rs | 8 ++++++++ neqo-transport/src/stats.rs | 2 +- 4 files changed, 17 insertions(+), 1 deletion(-) diff --git a/neqo-bin/src/bin/client/http09.rs b/neqo-bin/src/bin/client/http09.rs index 6d9a26fec2..9135eb5bb8 100644 --- a/neqo-bin/src/bin/client/http09.rs +++ b/neqo-bin/src/bin/client/http09.rs @@ -153,6 +153,10 @@ impl super::Client for Connection { fn is_closed(&self) -> bool { matches!(self.state(), State::Closed(..)) } + + fn stats(&self) -> neqo_transport::Stats { + self.stats() + } } impl<'b> Handler<'b> { diff --git a/neqo-bin/src/bin/client/http3.rs b/neqo-bin/src/bin/client/http3.rs index 07cc0e4cde..21637fb3d6 100644 --- a/neqo-bin/src/bin/client/http3.rs +++ b/neqo-bin/src/bin/client/http3.rs @@ -125,6 +125,10 @@ impl super::Client for Http3Client { { self.close(now, app_error, msg); } + + fn stats(&self) -> neqo_transport::Stats { + self.transport_stats() + } } impl<'a> super::Handler for Handler<'a> { diff --git a/neqo-bin/src/bin/client/main.rs b/neqo-bin/src/bin/client/main.rs index 7b1a5928a6..3332d79438 100644 --- a/neqo-bin/src/bin/client/main.rs +++ b/neqo-bin/src/bin/client/main.rs @@ -179,6 +179,10 @@ pub struct Args { /// The request size that will be used for upload test. #[arg(name = "upload-size", long, default_value = "100")] upload_size: usize, + + /// Print connection stats after close. + #[arg(name = "stats", long)] + stats: bool, } impl Args { @@ -327,6 +331,7 @@ trait Client { where S: AsRef + Display; fn is_closed(&self) -> bool; + fn stats(&self) -> neqo_transport::Stats; } struct Runner<'a, H: Handler> { @@ -361,6 +366,9 @@ impl<'a, H: Handler> Runner<'a, H> { self.process(None).await?; if self.client.is_closed() { + if self.args.stats { + qinfo!("{:?}", self.client.stats()); + } return Ok(self.handler.take_token()); } diff --git a/neqo-transport/src/stats.rs b/neqo-transport/src/stats.rs index 9eff503dcf..cdc378d71b 100644 --- a/neqo-transport/src/stats.rs +++ b/neqo-transport/src/stats.rs @@ -206,7 +206,7 @@ impl Debug for Stats { " tx: {} lost {} lateack {} ptoack {}", self.packets_tx, self.lost, self.late_ack, self.pto_ack )?; - writeln!(f, " resumed: {} ", self.resumed)?; + writeln!(f, " resumed: {}", self.resumed)?; writeln!(f, " frames rx:")?; self.frame_rx.fmt(f)?; writeln!(f, " frames tx:")?; From e4bc0c1ed4b3075d1a5d3ebff488d4ed74d4d260 Mon Sep 17 00:00:00 2001 From: Max Inden Date: Thu, 21 Mar 2024 23:47:58 +0100 Subject: [PATCH 270/321] feat(client,server): rework logging (#1692) * feat(client,server): rework logging - In `neqo-client` and `neqo-server` use `neqo_common::log` instead of `println!` and `eprintln!`. - Add `-q`, `-v`, `-vv`, `-vvv`, `-vvvv` log level flags via `clap_verbosity_flag`. - Set default log level to INFO. Demote many `qinfo!` to `qdebug!`. * fix(upload_test.sh): set RUST_LOG debug for neqo_transport::cc Needed in order for mozlog_neqo_cwnd.py to analyze cc. * Additional level reductions * Trigger CI --------- Co-authored-by: Lars Eggert --- neqo-bin/Cargo.toml | 1 + neqo-bin/src/bin/client/http09.rs | 26 ++++++++++++------------ neqo-bin/src/bin/client/http3.rs | 26 ++++++++++++------------ neqo-bin/src/bin/client/main.rs | 30 ++++++++++++++++------------ neqo-bin/src/bin/server/main.rs | 22 +++++++++++--------- neqo-bin/src/bin/server/old_https.rs | 12 +++++------ neqo-common/src/log.rs | 21 ++++++++++--------- neqo-crypto/src/agent.rs | 4 ++-- neqo-http3/src/connection.rs | 16 +++++++-------- neqo-http3/src/connection_client.rs | 4 ++-- neqo-http3/src/connection_server.rs | 4 ++-- neqo-http3/src/recv_message.rs | 2 +- neqo-http3/src/send_message.rs | 6 +++--- neqo-http3/src/server_events.rs | 8 ++++---- neqo-transport/src/cc/classic_cc.rs | 16 +++++++-------- neqo-transport/src/connection/mod.rs | 18 ++++++++--------- neqo-transport/src/crypto.rs | 8 ++++---- neqo-transport/src/lib.rs | 4 ++-- neqo-transport/src/path.rs | 2 +- neqo-transport/src/stats.rs | 4 ++-- test/upload_test.sh | 2 ++ 21 files changed, 125 insertions(+), 111 deletions(-) diff --git a/neqo-bin/Cargo.toml b/neqo-bin/Cargo.toml index d36d2ecdca..04210e00db 100644 --- a/neqo-bin/Cargo.toml +++ b/neqo-bin/Cargo.toml @@ -25,6 +25,7 @@ workspace = true [dependencies] # neqo-bin is not used in Firefox, so we can be liberal with dependency versions clap = { version = "4.4", default-features = false, features = ["std", "color", "help", "usage", "error-context", "suggestions", "derive"] } +clap-verbosity-flag = { version = "2.2", default-features = false } futures = { version = "0.3", default-features = false, features = ["alloc"] } hex = { version = "0.4", default-features = false, features = ["std"] } log = { version = "0.4", default-features = false } diff --git a/neqo-bin/src/bin/client/http09.rs b/neqo-bin/src/bin/client/http09.rs index 9135eb5bb8..372a112853 100644 --- a/neqo-bin/src/bin/client/http09.rs +++ b/neqo-bin/src/bin/client/http09.rs @@ -17,7 +17,7 @@ use std::{ time::Instant, }; -use neqo_common::{event::Provider, Datagram}; +use neqo_common::{event::Provider, qdebug, qinfo, qwarn, Datagram}; use neqo_crypto::{AuthenticationStatus, ResumptionToken}; use neqo_transport::{ Connection, ConnectionEvent, EmptyConnectionIdGenerator, Error, Output, State, StreamId, @@ -50,13 +50,13 @@ impl<'a> super::Handler for Handler<'a> { self.read(client, stream_id)?; } ConnectionEvent::SendStreamWritable { stream_id } => { - println!("stream {stream_id} writable"); + qdebug!("stream {stream_id} writable"); } ConnectionEvent::SendStreamComplete { stream_id } => { - println!("stream {stream_id} complete"); + qdebug!("stream {stream_id} complete"); } ConnectionEvent::SendStreamCreatable { stream_type } => { - println!("stream {stream_type:?} creatable"); + qdebug!("stream {stream_type:?} creatable"); if stream_type == StreamType::BiDi { self.download_urls(client); } @@ -64,7 +64,7 @@ impl<'a> super::Handler for Handler<'a> { ConnectionEvent::StateChange( State::WaitInitial | State::Handshaking | State::Connected, ) => { - println!("{event:?}"); + qdebug!("{event:?}"); self.download_urls(client); } ConnectionEvent::StateChange(State::Confirmed) => { @@ -74,7 +74,7 @@ impl<'a> super::Handler for Handler<'a> { self.token = Some(token); } _ => { - println!("Unhandled event {event:?}"); + qwarn!("Unhandled event {event:?}"); } } } @@ -187,7 +187,7 @@ impl<'b> Handler<'b> { fn download_next(&mut self, client: &mut Connection) -> bool { if self.key_update.needed() { - println!("Deferring requests until after first key update"); + qdebug!("Deferring requests until after first key update"); return false; } let url = self @@ -196,7 +196,7 @@ impl<'b> Handler<'b> { .expect("download_next called with empty queue"); match client.stream_create(StreamType::BiDi) { Ok(client_stream_id) => { - println!("Created stream {client_stream_id} for {url}"); + qinfo!("Created stream {client_stream_id} for {url}"); let req = format!("GET {}\r\n", url.path()); _ = client .stream_send(client_stream_id, req.as_bytes()) @@ -207,7 +207,7 @@ impl<'b> Handler<'b> { true } Err(e @ (Error::StreamLimitError | Error::ConnectionState)) => { - println!("Cannot create stream {e:?}"); + qwarn!("Cannot create stream {e:?}"); self.url_queue.push_front(url); false } @@ -235,9 +235,9 @@ impl<'b> Handler<'b> { if let Some(out_file) = maybe_out_file { out_file.write_all(&data[..sz])?; } else if !output_read_data { - println!("READ[{stream_id}]: {sz} bytes"); + qdebug!("READ[{stream_id}]: {sz} bytes"); } else { - println!( + qdebug!( "READ[{}]: {}", stream_id, String::from_utf8(data.clone()).unwrap() @@ -252,7 +252,7 @@ impl<'b> Handler<'b> { fn read(&mut self, client: &mut Connection, stream_id: StreamId) -> Res<()> { match self.streams.get_mut(&stream_id) { None => { - println!("Data on unexpected stream: {stream_id}"); + qwarn!("Data on unexpected stream: {stream_id}"); return Ok(()); } Some(maybe_out_file) => { @@ -267,7 +267,7 @@ impl<'b> Handler<'b> { if let Some(mut out_file) = maybe_out_file.take() { out_file.flush()?; } else { - println!(""); + qinfo!(""); } self.streams.remove(&stream_id); self.download_urls(client); diff --git a/neqo-bin/src/bin/client/http3.rs b/neqo-bin/src/bin/client/http3.rs index 21637fb3d6..e9f5e406a5 100644 --- a/neqo-bin/src/bin/client/http3.rs +++ b/neqo-bin/src/bin/client/http3.rs @@ -18,7 +18,7 @@ use std::{ time::Instant, }; -use neqo_common::{event::Provider, hex, Datagram, Header}; +use neqo_common::{event::Provider, hex, qdebug, qinfo, qwarn, Datagram, Header}; use neqo_crypto::{AuthenticationStatus, ResumptionToken}; use neqo_http3::{Error, Http3Client, Http3ClientEvent, Http3Parameters, Http3State, Priority}; use neqo_transport::{ @@ -149,7 +149,7 @@ impl<'a> super::Handler for Handler<'a> { if let Some(handler) = self.url_handler.stream_handler(stream_id) { handler.process_header_ready(stream_id, fin, headers); } else { - println!("Data on unexpected stream: {stream_id}"); + qwarn!("Data on unexpected stream: {stream_id}"); } if fin { self.url_handler.on_stream_fin(client, stream_id); @@ -159,7 +159,7 @@ impl<'a> super::Handler for Handler<'a> { let mut stream_done = false; match self.url_handler.stream_handler(stream_id) { None => { - println!("Data on unexpected stream: {stream_id}"); + qwarn!("Data on unexpected stream: {stream_id}"); } Some(handler) => loop { let mut data = vec![0; 4096]; @@ -193,7 +193,7 @@ impl<'a> super::Handler for Handler<'a> { Http3ClientEvent::DataWritable { stream_id } => { match self.url_handler.stream_handler(stream_id) { None => { - println!("Data on unexpected stream: {stream_id}"); + qwarn!("Data on unexpected stream: {stream_id}"); } Some(handler) => { handler.process_data_writable(client, stream_id); @@ -206,7 +206,7 @@ impl<'a> super::Handler for Handler<'a> { } Http3ClientEvent::ResumptionToken(t) => self.token = Some(t), _ => { - println!("Unhandled event {event:?}"); + qwarn!("Unhandled event {event:?}"); } } } @@ -279,7 +279,7 @@ struct DownloadStreamHandler { impl StreamHandler for DownloadStreamHandler { fn process_header_ready(&mut self, stream_id: StreamId, fin: bool, headers: Vec
) { if self.out_file.is_none() { - println!("READ HEADERS[{stream_id}]: fin={fin} {headers:?}"); + qdebug!("READ HEADERS[{stream_id}]: fin={fin} {headers:?}"); } } @@ -297,18 +297,18 @@ impl StreamHandler for DownloadStreamHandler { } return Ok(true); } else if !output_read_data { - println!("READ[{stream_id}]: {sz} bytes"); + qdebug!("READ[{stream_id}]: {sz} bytes"); } else if let Ok(txt) = String::from_utf8(data.clone()) { - println!("READ[{stream_id}]: {txt}"); + qdebug!("READ[{stream_id}]: {txt}"); } else { - println!("READ[{}]: 0x{}", stream_id, hex(&data)); + qdebug!("READ[{}]: 0x{}", stream_id, hex(&data)); } if fin { if let Some(mut out_file) = self.out_file.take() { out_file.flush()?; } else { - println!(""); + qdebug!(""); } } @@ -327,7 +327,7 @@ struct UploadStreamHandler { impl StreamHandler for UploadStreamHandler { fn process_header_ready(&mut self, stream_id: StreamId, fin: bool, headers: Vec
) { - println!("READ HEADERS[{stream_id}]: fin={fin} {headers:?}"); + qdebug!("READ HEADERS[{stream_id}]: fin={fin} {headers:?}"); } fn process_data_readable( @@ -343,7 +343,7 @@ impl StreamHandler for UploadStreamHandler { let parsed: usize = trimmed_txt.parse().unwrap(); if parsed == self.data.len() { let upload_time = Instant::now().duration_since(self.start); - println!("Stream ID: {stream_id:?}, Upload time: {upload_time:?}"); + qinfo!("Stream ID: {stream_id:?}, Upload time: {upload_time:?}"); } } else { panic!("Unexpected data [{}]: 0x{}", stream_id, hex(&data)); @@ -411,7 +411,7 @@ impl<'a> UrlHandler<'a> { Priority::default(), ) { Ok(client_stream_id) => { - println!("Successfully created stream id {client_stream_id} for {url}"); + qdebug!("Successfully created stream id {client_stream_id} for {url}"); let handler: Box = StreamHandlerType::make_handler( &self.handler_type, diff --git a/neqo-bin/src/bin/client/main.rs b/neqo-bin/src/bin/client/main.rs index 3332d79438..63aa12db13 100644 --- a/neqo-bin/src/bin/client/main.rs +++ b/neqo-bin/src/bin/client/main.rs @@ -22,7 +22,7 @@ use futures::{ FutureExt, TryFutureExt, }; use neqo_bin::udp; -use neqo_common::{self as common, qdebug, qinfo, qlog::NeqoQlog, Datagram, Role}; +use neqo_common::{self as common, qdebug, qerror, qinfo, qlog::NeqoQlog, qwarn, Datagram, Role}; use neqo_crypto::{ constants::{TLS_AES_128_GCM_SHA256, TLS_AES_256_GCM_SHA384, TLS_CHACHA20_POLY1305_SHA256}, init, Cipher, ResumptionToken, @@ -103,7 +103,7 @@ impl KeyUpdateState { _ => return Err(e), } } else { - println!("Keys updated"); + qerror!("Keys updated"); self.0 = false; } } @@ -119,6 +119,9 @@ impl KeyUpdateState { #[command(author, version, about, long_about = None)] #[allow(clippy::struct_excessive_bools)] // Not a good use of that lint. pub struct Args { + #[command(flatten)] + verbose: clap_verbosity_flag::Verbosity, + #[command(flatten)] shared: neqo_bin::SharedArgs, @@ -211,7 +214,7 @@ impl Args { "http3" => { if let Some(testcase) = &self.test { if testcase.as_str() != "upload" { - eprintln!("Unsupported test case: {testcase}"); + qerror!("Unsupported test case: {testcase}"); exit(127) } @@ -223,7 +226,7 @@ impl Args { } "zerortt" | "resumption" => { if self.urls.len() < 2 { - eprintln!("Warning: resumption tests won't work without >1 URL"); + qerror!("Warning: resumption tests won't work without >1 URL"); exit(127); } self.shared.use_old_http = true; @@ -272,11 +275,11 @@ fn get_output_file( out_path.push(url_path); if all_paths.contains(&out_path) { - eprintln!("duplicate path {}", out_path.display()); + qerror!("duplicate path {}", out_path.display()); return None; } - eprintln!("Saving {url} to {out_path:?}"); + qinfo!("Saving {url} to {out_path:?}"); if let Some(parent) = out_path.parent() { create_dir_all(parent).ok()?; @@ -398,7 +401,7 @@ impl<'a, H: Handler> Runner<'a, H> { self.socket.send(dgram)?; } Output::Callback(new_timeout) => { - qinfo!("Setting timeout of {:?}", new_timeout); + qdebug!("Setting timeout of {:?}", new_timeout); self.timeout = Some(Box::pin(tokio::time::sleep(new_timeout))); break; } @@ -444,11 +447,12 @@ fn qlog_new(args: &Args, hostname: &str, cid: &ConnectionId) -> Res { #[tokio::main] async fn main() -> Res<()> { - init(); - let mut args = Args::parse(); + neqo_common::log::init(Some(args.verbose.log_level_filter())); args.update_for_tests(); + init(); + let urls_by_origin = args .urls .clone() @@ -461,14 +465,14 @@ async fn main() -> Res<()> { .filter_map(|(origin, urls)| match origin { Origin::Tuple(_scheme, h, p) => Some(((h, p), urls)), Origin::Opaque(x) => { - eprintln!("Opaque origin {x:?}"); + qwarn!("Opaque origin {x:?}"); None } }); for ((host, port), mut urls) in urls_by_origin { if args.resume && urls.len() < 2 { - eprintln!("Resumption to {host} cannot work without at least 2 URLs."); + qerror!("Resumption to {host} cannot work without at least 2 URLs."); exit(127); } @@ -479,7 +483,7 @@ async fn main() -> Res<()> { ) }); let Some(remote_addr) = remote_addr else { - eprintln!("No compatible address found for: {host}"); + qerror!("No compatible address found for: {host}"); exit(1); }; @@ -490,7 +494,7 @@ async fn main() -> Res<()> { let mut socket = udp::Socket::bind(local_addr)?; let real_local = socket.local_addr().unwrap(); - println!( + qinfo!( "{} Client connecting: {:?} -> {:?}", if args.shared.use_old_http { "H9" } else { "H3" }, real_local, diff --git a/neqo-bin/src/bin/server/main.rs b/neqo-bin/src/bin/server/main.rs index f694cf98c1..753794d6f6 100644 --- a/neqo-bin/src/bin/server/main.rs +++ b/neqo-bin/src/bin/server/main.rs @@ -25,7 +25,7 @@ use futures::{ FutureExt, }; use neqo_bin::udp; -use neqo_common::{hex, qinfo, qwarn, Datagram, Header}; +use neqo_common::{hex, qdebug, qerror, qinfo, qwarn, Datagram, Header}; use neqo_crypto::{ constants::{TLS_AES_128_GCM_SHA256, TLS_AES_256_GCM_SHA384, TLS_CHACHA20_POLY1305_SHA256}, generate_ech_keys, init_db, random, AntiReplay, Cipher, @@ -89,6 +89,9 @@ impl std::error::Error for ServerError {} #[derive(Debug, Parser)] #[command(author, version, about, long_about = None)] struct Args { + #[command(flatten)] + verbose: clap_verbosity_flag::Verbosity, + #[command(flatten)] shared: neqo_bin::SharedArgs, @@ -166,17 +169,17 @@ fn qns_read_response(filename: &str) -> Option> { OpenOptions::new() .read(true) .open(&file_path) - .map_err(|_e| eprintln!("Could not open {}", file_path.display())) + .map_err(|_e| qerror!("Could not open {}", file_path.display())) .ok() .and_then(|mut f| { let mut data = Vec::new(); match f.read_to_end(&mut data) { Ok(sz) => { - println!("{} bytes read from {}", sz, file_path.display()); + qinfo!("{} bytes read from {}", sz, file_path.display()); Some(data) } Err(e) => { - eprintln!("Error reading data: {e:?}"); + qerror!("Error reading data: {e:?}"); None } } @@ -312,7 +315,7 @@ impl HttpServer for SimpleServer { headers, fin, } => { - println!("Headers (request={stream} fin={fin}): {headers:?}"); + qdebug!("Headers (request={stream} fin={fin}): {headers:?}"); let post = if let Some(method) = headers.iter().find(|&h| h.name() == ":method") { @@ -428,7 +431,7 @@ impl ServersRunner { pub fn new(args: Args) -> Result { let hosts = args.listen_addresses(); if hosts.is_empty() { - eprintln!("No valid hosts defined"); + qerror!("No valid hosts defined"); return Err(io::Error::new(io::ErrorKind::InvalidInput, "No hosts")); } let sockets = hosts @@ -436,7 +439,7 @@ impl ServersRunner { .map(|host| { let socket = udp::Socket::bind(host)?; let local_addr = socket.local_addr()?; - println!("Server waiting for connection on: {local_addr:?}"); + qinfo!("Server waiting for connection on: {local_addr:?}"); Ok((host, socket)) }) @@ -479,7 +482,7 @@ impl ServersRunner { } if args.ech { let cfg = svr.enable_ech(); - println!("ECHConfigList: {}", hex(cfg)); + qinfo!("ECHConfigList: {}", hex(cfg)); } svr } @@ -507,7 +510,7 @@ impl ServersRunner { socket.send(dgram)?; } Output::Callback(new_timeout) => { - qinfo!("Setting timeout of {:?}", new_timeout); + qdebug!("Setting timeout of {:?}", new_timeout); self.timeout = Some(Box::pin(tokio::time::sleep(new_timeout))); break; } @@ -573,6 +576,7 @@ async fn main() -> Result<(), io::Error> { const HQ_INTEROP: &str = "hq-interop"; let mut args = Args::parse(); + neqo_common::log::init(Some(args.verbose.log_level_filter())); assert!(!args.key.is_empty(), "Need at least one key"); init_db(args.db.clone()); diff --git a/neqo-bin/src/bin/server/old_https.rs b/neqo-bin/src/bin/server/old_https.rs index f36c99c484..ec32032a05 100644 --- a/neqo-bin/src/bin/server/old_https.rs +++ b/neqo-bin/src/bin/server/old_https.rs @@ -8,7 +8,7 @@ use std::{ cell::RefCell, collections::HashMap, fmt::Display, path::PathBuf, rc::Rc, time::Instant, }; -use neqo_common::{event::Provider, hex, qdebug, Datagram}; +use neqo_common::{event::Provider, hex, qdebug, qinfo, qwarn, Datagram}; use neqo_crypto::{generate_ech_keys, random, AllowZeroRtt, AntiReplay, Cipher}; use neqo_http3::Error; use neqo_transport::{ @@ -149,7 +149,7 @@ impl Http09Server { } Some(path) => { let path = path.as_str(); - eprintln!("Path = '{path}'"); + qdebug!("Path = '{path}'"); if args.shared.qns_test.is_some() { qns_read_response(path) } else { @@ -164,7 +164,7 @@ impl Http09Server { fn stream_writable(&mut self, stream_id: StreamId, conn: &mut ActiveConnectionRef) { match self.write_state.get_mut(&stream_id) { None => { - eprintln!("Unknown stream {stream_id}, ignoring event"); + qwarn!("Unknown stream {stream_id}, ignoring event"); } Some(stream_state) => { stream_state.writable = true; @@ -177,7 +177,7 @@ impl Http09Server { *offset += sent; self.server.add_to_waiting(conn); if *offset == data.len() { - eprintln!("Sent {sent} on {stream_id}, closing"); + qinfo!("Sent {sent} on {stream_id}, closing"); conn.borrow_mut().stream_close_send(stream_id).unwrap(); self.write_state.remove(&stream_id); } else { @@ -202,7 +202,7 @@ impl HttpServer for Http09Server { None => break, Some(e) => e, }; - eprintln!("Event {event:?}"); + qdebug!("Event {event:?}"); match event { ConnectionEvent::NewStream { stream_id } => { self.write_state @@ -222,7 +222,7 @@ impl HttpServer for Http09Server { } ConnectionEvent::StateChange(_) | ConnectionEvent::SendStreamComplete { .. } => (), - e => eprintln!("unhandled event {e:?}"), + e => qwarn!("unhandled event {e:?}"), } } } diff --git a/neqo-common/src/log.rs b/neqo-common/src/log.rs index c5b89be8a6..04028a26bd 100644 --- a/neqo-common/src/log.rs +++ b/neqo-common/src/log.rs @@ -50,7 +50,7 @@ fn since_start() -> Duration { START_TIME.get_or_init(Instant::now).elapsed() } -pub fn init() { +pub fn init(level_filter: Option) { static INIT_ONCE: Once = Once::new(); if ::log::STATIC_MAX_LEVEL == ::log::LevelFilter::Off { @@ -59,6 +59,9 @@ pub fn init() { INIT_ONCE.call_once(|| { let mut builder = Builder::from_env("RUST_LOG"); + if let Some(filter) = level_filter { + builder.filter_level(filter); + } builder.format(|buf, record| { let elapsed = since_start(); writeln!( @@ -71,9 +74,9 @@ pub fn init() { ) }); if let Err(e) = builder.try_init() { - do_log!(::log::Level::Info, "Logging initialization error {:?}", e); + do_log!(::log::Level::Warn, "Logging initialization error {:?}", e); } else { - do_log!(::log::Level::Info, "Logging initialized"); + do_log!(::log::Level::Debug, "Logging initialized"); } }); } @@ -81,32 +84,32 @@ pub fn init() { #[macro_export] macro_rules! log_invoke { ($lvl:expr, $ctx:expr, $($arg:tt)*) => ( { - ::neqo_common::log::init(); + ::neqo_common::log::init(None); ::neqo_common::do_log!($lvl, "[{}] {}", $ctx, format!($($arg)*)); } ) } #[macro_export] macro_rules! qerror { ([$ctx:expr], $($arg:tt)*) => (::neqo_common::log_invoke!(::log::Level::Error, $ctx, $($arg)*);); - ($($arg:tt)*) => ( { ::neqo_common::log::init(); ::neqo_common::do_log!(::log::Level::Error, $($arg)*); } ); + ($($arg:tt)*) => ( { ::neqo_common::log::init(None); ::neqo_common::do_log!(::log::Level::Error, $($arg)*); } ); } #[macro_export] macro_rules! qwarn { ([$ctx:expr], $($arg:tt)*) => (::neqo_common::log_invoke!(::log::Level::Warn, $ctx, $($arg)*);); - ($($arg:tt)*) => ( { ::neqo_common::log::init(); ::neqo_common::do_log!(::log::Level::Warn, $($arg)*); } ); + ($($arg:tt)*) => ( { ::neqo_common::log::init(None); ::neqo_common::do_log!(::log::Level::Warn, $($arg)*); } ); } #[macro_export] macro_rules! qinfo { ([$ctx:expr], $($arg:tt)*) => (::neqo_common::log_invoke!(::log::Level::Info, $ctx, $($arg)*);); - ($($arg:tt)*) => ( { ::neqo_common::log::init(); ::neqo_common::do_log!(::log::Level::Info, $($arg)*); } ); + ($($arg:tt)*) => ( { ::neqo_common::log::init(None); ::neqo_common::do_log!(::log::Level::Info, $($arg)*); } ); } #[macro_export] macro_rules! qdebug { ([$ctx:expr], $($arg:tt)*) => (::neqo_common::log_invoke!(::log::Level::Debug, $ctx, $($arg)*);); - ($($arg:tt)*) => ( { ::neqo_common::log::init(); ::neqo_common::do_log!(::log::Level::Debug, $($arg)*); } ); + ($($arg:tt)*) => ( { ::neqo_common::log::init(None); ::neqo_common::do_log!(::log::Level::Debug, $($arg)*); } ); } #[macro_export] macro_rules! qtrace { ([$ctx:expr], $($arg:tt)*) => (::neqo_common::log_invoke!(::log::Level::Trace, $ctx, $($arg)*);); - ($($arg:tt)*) => ( { ::neqo_common::log::init(); ::neqo_common::do_log!(::log::Level::Trace, $($arg)*); } ); + ($($arg:tt)*) => ( { ::neqo_common::log::init(None); ::neqo_common::do_log!(::log::Level::Trace, $($arg)*); } ); } diff --git a/neqo-crypto/src/agent.rs b/neqo-crypto/src/agent.rs index 82a6dacd48..90085cb759 100644 --- a/neqo-crypto/src/agent.rs +++ b/neqo-crypto/src/agent.rs @@ -670,7 +670,7 @@ impl SecretAgent { let info = self.capture_error(SecretAgentInfo::new(self.fd))?; HandshakeState::Complete(info) }; - qinfo!([self], "state -> {:?}", self.state); + qdebug!([self], "state -> {:?}", self.state); Ok(()) } @@ -898,7 +898,7 @@ impl Client { let len = usize::try_from(len).unwrap(); let mut v = Vec::with_capacity(len); v.extend_from_slice(null_safe_slice(token, len)); - qinfo!( + qdebug!( [format!("{fd:p}")], "Got resumption token {}", hex_snip_middle(&v) diff --git a/neqo-http3/src/connection.rs b/neqo-http3/src/connection.rs index 287ea2c2af..cfa78df787 100644 --- a/neqo-http3/src/connection.rs +++ b/neqo-http3/src/connection.rs @@ -354,7 +354,7 @@ impl Http3Connection { /// This function creates and initializes, i.e. send stream type, the control and qpack /// streams. fn initialize_http3_connection(&mut self, conn: &mut Connection) -> Res<()> { - qinfo!([self], "Initialize the http3 connection."); + qdebug!([self], "Initialize the http3 connection."); self.control_stream_local.create(conn)?; self.send_settings(); @@ -704,7 +704,7 @@ impl Http3Connection { ); } NewStreamType::Decoder => { - qinfo!([self], "A new remote qpack encoder stream {}", stream_id); + qdebug!([self], "A new remote qpack encoder stream {}", stream_id); self.check_stream_exists(Http3StreamType::Decoder)?; self.recv_streams.insert( stream_id, @@ -715,7 +715,7 @@ impl Http3Connection { ); } NewStreamType::Encoder => { - qinfo!([self], "A new remote qpack decoder stream {}", stream_id); + qdebug!([self], "A new remote qpack decoder stream {}", stream_id); self.check_stream_exists(Http3StreamType::Encoder)?; self.recv_streams.insert( stream_id, @@ -766,7 +766,7 @@ impl Http3Connection { /// This is called when an application closes the connection. pub fn close(&mut self, error: AppError) { - qinfo!([self], "Close connection error {:?}.", error); + qdebug!([self], "Close connection error {:?}.", error); self.state = Http3State::Closing(ConnectionError::Application(error)); if (!self.send_streams.is_empty() || !self.recv_streams.is_empty()) && (error == 0) { qwarn!("close(0) called when streams still active"); @@ -952,7 +952,7 @@ impl Http3Connection { stream_id: StreamId, buf: &mut [u8], ) -> Res<(usize, bool)> { - qinfo!([self], "read_data from stream {}.", stream_id); + qdebug!([self], "read_data from stream {}.", stream_id); let res = self .recv_streams .get_mut(&stream_id) @@ -1091,7 +1091,7 @@ impl Http3Connection { /// This is called when an application wants to close the sending side of a stream. pub fn stream_close_send(&mut self, conn: &mut Connection, stream_id: StreamId) -> Res<()> { - qinfo!([self], "Close the sending side for stream {}.", stream_id); + qdebug!([self], "Close the sending side for stream {}.", stream_id); debug_assert!(self.state.active()); let send_stream = self .send_streams @@ -1402,7 +1402,7 @@ impl Http3Connection { /// `PriorityUpdateRequestPush` which handling is specific to the client and server, we must /// give them to the specific client/server handler. fn handle_control_frame(&mut self, f: HFrame) -> Res> { - qinfo!([self], "Handle a control frame {:?}", f); + qdebug!([self], "Handle a control frame {:?}", f); if !matches!(f, HFrame::Settings { .. }) && !matches!( self.settings_state, @@ -1433,7 +1433,7 @@ impl Http3Connection { } fn handle_settings(&mut self, new_settings: HSettings) -> Res<()> { - qinfo!([self], "Handle SETTINGS frame."); + qdebug!([self], "Handle SETTINGS frame."); match &self.settings_state { Http3RemoteSettingsState::NotReceived => { self.set_qpack_settings(&new_settings)?; diff --git a/neqo-http3/src/connection_client.rs b/neqo-http3/src/connection_client.rs index 52572a760d..836816b337 100644 --- a/neqo-http3/src/connection_client.rs +++ b/neqo-http3/src/connection_client.rs @@ -590,7 +590,7 @@ impl Http3Client { /// /// An error will be return if stream does not exist. pub fn stream_close_send(&mut self, stream_id: StreamId) -> Res<()> { - qinfo!([self], "Close sending side stream={}.", stream_id); + qdebug!([self], "Close sending side stream={}.", stream_id); self.base_handler .stream_close_send(&mut self.conn, stream_id) } @@ -652,7 +652,7 @@ impl Http3Client { stream_id: StreamId, buf: &mut [u8], ) -> Res<(usize, bool)> { - qinfo!([self], "read_data from stream {}.", stream_id); + qdebug!([self], "read_data from stream {}.", stream_id); let res = self.base_handler.read_data(&mut self.conn, stream_id, buf); if let Err(e) = &res { if e.connection_error() { diff --git a/neqo-http3/src/connection_server.rs b/neqo-http3/src/connection_server.rs index 097209a226..dcf759f177 100644 --- a/neqo-http3/src/connection_server.rs +++ b/neqo-http3/src/connection_server.rs @@ -98,7 +98,7 @@ impl Http3ServerHandler { /// /// An error will be returned if stream does not exist. pub fn stream_close_send(&mut self, stream_id: StreamId, conn: &mut Connection) -> Res<()> { - qinfo!([self], "Close sending side stream={}.", stream_id); + qdebug!([self], "Close sending side stream={}.", stream_id); self.base_handler.stream_close_send(conn, stream_id)?; self.base_handler.stream_has_pending_data(stream_id); self.needs_processing = true; @@ -408,7 +408,7 @@ impl Http3ServerHandler { stream_id: StreamId, buf: &mut [u8], ) -> Res<(usize, bool)> { - qinfo!([self], "read_data from stream {}.", stream_id); + qdebug!([self], "read_data from stream {}.", stream_id); let res = self.base_handler.read_data(conn, stream_id, buf); if let Err(e) = &res { if e.connection_error() { diff --git a/neqo-http3/src/recv_message.rs b/neqo-http3/src/recv_message.rs index be58b7e47c..55970849ef 100644 --- a/neqo-http3/src/recv_message.rs +++ b/neqo-http3/src/recv_message.rs @@ -271,7 +271,7 @@ impl RecvMessage { } (None, false) => break Ok(()), (Some(frame), fin) => { - qinfo!( + qdebug!( [self], "A new frame has been received: {:?}; state={:?} fin={}", frame, diff --git a/neqo-http3/src/send_message.rs b/neqo-http3/src/send_message.rs index c50e3e056a..15965c44f6 100644 --- a/neqo-http3/src/send_message.rs +++ b/neqo-http3/src/send_message.rs @@ -6,7 +6,7 @@ use std::{cell::RefCell, cmp::min, fmt::Debug, rc::Rc}; -use neqo_common::{qdebug, qinfo, qtrace, Encoder, Header, MessageType}; +use neqo_common::{qdebug, qtrace, Encoder, Header, MessageType}; use neqo_qpack::encoder::QPackEncoder; use neqo_transport::{Connection, StreamId}; @@ -119,7 +119,7 @@ impl SendMessage { encoder: Rc>, conn_events: Box, ) -> Self { - qinfo!("Create a request stream_id={}", stream_id); + qdebug!("Create a request stream_id={}", stream_id); Self { state: MessageState::WaitingForHeaders, message_type, @@ -193,7 +193,7 @@ impl SendStream for SendMessage { min(buf.len(), available - 9) }; - qinfo!( + qdebug!( [self], "send_request_body: available={} to_send={}.", available, diff --git a/neqo-http3/src/server_events.rs b/neqo-http3/src/server_events.rs index a85ece0bfb..214a48c757 100644 --- a/neqo-http3/src/server_events.rs +++ b/neqo-http3/src/server_events.rs @@ -13,7 +13,7 @@ use std::{ rc::Rc, }; -use neqo_common::{qdebug, qinfo, Encoder, Header}; +use neqo_common::{qdebug, Encoder, Header}; use neqo_transport::{ server::ActiveConnectionRef, AppError, Connection, DatagramTracking, StreamId, StreamType, }; @@ -189,7 +189,7 @@ impl Http3OrWebTransportStream { /// /// It may return `InvalidStreamId` if a stream does not exist anymore. pub fn send_data(&mut self, data: &[u8]) -> Res { - qinfo!([self], "Set new response."); + qdebug!([self], "Set new response."); self.stream_handler.send_data(data) } @@ -199,7 +199,7 @@ impl Http3OrWebTransportStream { /// /// It may return `InvalidStreamId` if a stream does not exist anymore. pub fn stream_close_send(&mut self) -> Res<()> { - qinfo!([self], "Set new response."); + qdebug!([self], "Set new response."); self.stream_handler.stream_close_send() } } @@ -270,7 +270,7 @@ impl WebTransportRequest { /// /// It may return `InvalidStreamId` if a stream does not exist anymore. pub fn response(&mut self, accept: &WebTransportSessionAcceptAction) -> Res<()> { - qinfo!([self], "Set a response for a WebTransport session."); + qdebug!([self], "Set a response for a WebTransport session."); self.stream_handler .handler .borrow_mut() diff --git a/neqo-transport/src/cc/classic_cc.rs b/neqo-transport/src/cc/classic_cc.rs index 89be6c4b0f..a63d6e0b38 100644 --- a/neqo-transport/src/cc/classic_cc.rs +++ b/neqo-transport/src/cc/classic_cc.rs @@ -164,7 +164,7 @@ impl CongestionControl for ClassicCongestionControl { let mut is_app_limited = true; let mut new_acked = 0; for pkt in acked_pkts { - qinfo!( + qdebug!( "packet_acked this={:p}, pn={}, ps={}, ignored={}, lost={}, rtt_est={:?}", self, pkt.pn, @@ -198,7 +198,7 @@ impl CongestionControl for ClassicCongestionControl { if is_app_limited { self.cc_algorithm.on_app_limited(); - qinfo!("on_packets_acked this={:p}, limited=1, bytes_in_flight={}, cwnd={}, state={:?}, new_acked={}", self, self.bytes_in_flight, self.congestion_window, self.state, new_acked); + qdebug!("on_packets_acked this={:p}, limited=1, bytes_in_flight={}, cwnd={}, state={:?}, new_acked={}", self, self.bytes_in_flight, self.congestion_window, self.state, new_acked); return; } @@ -208,7 +208,7 @@ impl CongestionControl for ClassicCongestionControl { let increase = min(self.ssthresh - self.congestion_window, self.acked_bytes); self.congestion_window += increase; self.acked_bytes -= increase; - qinfo!([self], "slow start += {}", increase); + qdebug!([self], "slow start += {}", increase); if self.congestion_window == self.ssthresh { // This doesn't look like it is necessary, but it can happen // after persistent congestion. @@ -249,7 +249,7 @@ impl CongestionControl for ClassicCongestionControl { QlogMetric::BytesInFlight(self.bytes_in_flight), ], ); - qinfo!([self], "on_packets_acked this={:p}, limited=0, bytes_in_flight={}, cwnd={}, state={:?}, new_acked={}", self, self.bytes_in_flight, self.congestion_window, self.state, new_acked); + qdebug!([self], "on_packets_acked this={:p}, limited=0, bytes_in_flight={}, cwnd={}, state={:?}, new_acked={}", self, self.bytes_in_flight, self.congestion_window, self.state, new_acked); } /// Update congestion controller state based on lost packets. @@ -265,7 +265,7 @@ impl CongestionControl for ClassicCongestionControl { } for pkt in lost_packets.iter().filter(|pkt| pkt.cc_in_flight()) { - qinfo!( + qdebug!( "packet_lost this={:p}, pn={}, ps={}", self, pkt.pn, @@ -286,7 +286,7 @@ impl CongestionControl for ClassicCongestionControl { pto, lost_packets, ); - qinfo!( + qdebug!( "on_packets_lost this={:p}, bytes_in_flight={}, cwnd={}, state={:?}", self, self.bytes_in_flight, @@ -335,7 +335,7 @@ impl CongestionControl for ClassicCongestionControl { } self.bytes_in_flight += pkt.size; - qinfo!( + qdebug!( "packet_sent this={:p}, pn={}, ps={}", self, pkt.pn, @@ -498,7 +498,7 @@ impl ClassicCongestionControl { self.congestion_window = max(cwnd, CWND_MIN); self.acked_bytes = acked_bytes; self.ssthresh = self.congestion_window; - qinfo!( + qdebug!( [self], "Cong event -> recovery; cwnd {}, ssthresh {}", self.congestion_window, diff --git a/neqo-transport/src/connection/mod.rs b/neqo-transport/src/connection/mod.rs index 8d1c106358..75c3490cba 100644 --- a/neqo-transport/src/connection/mod.rs +++ b/neqo-transport/src/connection/mod.rs @@ -778,7 +778,7 @@ impl Connection { }); enc.encode(extra); let records = s.send_ticket(now, enc.as_ref())?; - qinfo!([self], "send session ticket {}", hex(&enc)); + qdebug!([self], "send session ticket {}", hex(&enc)); self.crypto.buffer_records(records)?; } else { unreachable!(); @@ -824,7 +824,7 @@ impl Connection { /// the connection to fail. However, if no packets have been /// exchanged, it's not OK. pub fn authenticated(&mut self, status: AuthenticationStatus, now: Instant) { - qinfo!([self], "Authenticated {:?}", status); + qdebug!([self], "Authenticated {:?}", status); self.crypto.tls.authenticated(status); let res = self.handshake(now, self.version, PacketNumberSpace::Handshake, None); self.absorb_error(now, res); @@ -1154,7 +1154,7 @@ impl Connection { fn discard_keys(&mut self, space: PacketNumberSpace, now: Instant) { if self.crypto.discard(space) { - qinfo!([self], "Drop packet number space {}", space); + qdebug!([self], "Drop packet number space {}", space); let primary = self.paths.primary(); self.loss_recovery.discard(&primary, space, now); self.acks.drop_space(space); @@ -2307,7 +2307,7 @@ impl Connection { } if encoder.is_empty() { - qinfo!("TX blocked, profile={:?} ", profile); + qdebug!("TX blocked, profile={:?} ", profile); Ok(SendOption::No(profile.paced())) } else { // Perform additional padding for Initial packets as necessary. @@ -2351,7 +2351,7 @@ impl Connection { } fn client_start(&mut self, now: Instant) -> Res<()> { - qinfo!([self], "client_start"); + qdebug!([self], "client_start"); debug_assert_eq!(self.role, Role::Client); qlog::client_connection_started(&mut self.qlog, &self.paths.primary()); qlog::client_version_information_initiated(&mut self.qlog, self.conn_params.get_versions()); @@ -2583,7 +2583,7 @@ impl Connection { fn confirm_version(&mut self, v: Version) { if self.version != v { - qinfo!([self], "Compatible upgrade {:?} ==> {:?}", self.version, v); + qdebug!([self], "Compatible upgrade {:?} ==> {:?}", self.version, v); } self.crypto.confirm_version(v); self.version = v; @@ -2882,7 +2882,7 @@ impl Connection { R: IntoIterator> + Debug, R::IntoIter: ExactSizeIterator, { - qinfo!([self], "Rx ACK space={}, ranges={:?}", space, ack_ranges); + qdebug!([self], "Rx ACK space={}, ranges={:?}", space, ack_ranges); let (acked_packets, lost_packets) = self.loss_recovery.on_ack_received( &self.paths.primary(), @@ -2936,7 +2936,7 @@ impl Connection { } fn set_connected(&mut self, now: Instant) -> Res<()> { - qinfo!([self], "TLS connection complete"); + qdebug!([self], "TLS connection complete"); if self.crypto.tls.info().map(SecretAgentInfo::alpn).is_none() { qwarn!([self], "No ALPN. Closing connection."); // 120 = no_application_protocol @@ -2979,7 +2979,7 @@ impl Connection { fn set_state(&mut self, state: State) { if state > self.state { - qinfo!([self], "State change from {:?} -> {:?}", self.state, state); + qdebug!([self], "State change from {:?} -> {:?}", self.state, state); self.state = state.clone(); if self.state.closed() { self.streams.clear_streams(); diff --git a/neqo-transport/src/crypto.rs b/neqo-transport/src/crypto.rs index 9840eaa1e1..acc02172d5 100644 --- a/neqo-transport/src/crypto.rs +++ b/neqo-transport/src/crypto.rs @@ -317,7 +317,7 @@ impl Crypto { } pub fn acked(&mut self, token: &CryptoRecoveryToken) { - qinfo!( + qdebug!( "Acked crypto frame space={} offset={} length={}", token.space, token.offset, @@ -367,7 +367,7 @@ impl Crypto { }); enc.encode_vvec(new_token.unwrap_or(&[])); enc.encode(t.as_ref()); - qinfo!("resumption token {}", hex_snip_middle(enc.as_ref())); + qdebug!("resumption token {}", hex_snip_middle(enc.as_ref())); Some(ResumptionToken::new(enc.into(), t.expiration_time())) } else { None @@ -433,7 +433,7 @@ impl CryptoDxState { cipher: Cipher, fuzzing: bool, ) -> Self { - qinfo!( + qdebug!( "Making {:?} {} CryptoDxState, v={:?} cipher={}", direction, epoch, @@ -980,7 +980,7 @@ impl CryptoStates { }; for v in versions { - qinfo!( + qdebug!( [self], "Creating initial cipher state v={:?}, role={:?} dcid={}", v, diff --git a/neqo-transport/src/lib.rs b/neqo-transport/src/lib.rs index be482c466f..8fabbeb9a3 100644 --- a/neqo-transport/src/lib.rs +++ b/neqo-transport/src/lib.rs @@ -6,7 +6,7 @@ #![allow(clippy::module_name_repetitions)] // This lint doesn't work here. -use neqo_common::qinfo; +use neqo_common::qwarn; use neqo_crypto::Error as CryptoError; mod ackrate; @@ -165,7 +165,7 @@ impl Error { impl From for Error { fn from(err: CryptoError) -> Self { - qinfo!("Crypto operation failed {:?}", err); + qwarn!("Crypto operation failed {:?}", err); match err { CryptoError::EchRetry(config) => Self::EchRetry(config), _ => Self::CryptoError(err), diff --git a/neqo-transport/src/path.rs b/neqo-transport/src/path.rs index 4e8d9958ab..50e458ff36 100644 --- a/neqo-transport/src/path.rs +++ b/neqo-transport/src/path.rs @@ -216,7 +216,7 @@ impl Paths { /// to a migration from a peer, in which case the old path needs to be probed. #[must_use] fn select_primary(&mut self, path: &PathRef) -> Option { - qinfo!([path.borrow()], "set as primary path"); + qdebug!([path.borrow()], "set as primary path"); let old_path = self.primary.replace(Rc::clone(path)).map(|old| { old.borrow_mut().set_primary(false); old diff --git a/neqo-transport/src/stats.rs b/neqo-transport/src/stats.rs index cdc378d71b..0a61097010 100644 --- a/neqo-transport/src/stats.rs +++ b/neqo-transport/src/stats.rs @@ -14,7 +14,7 @@ use std::{ time::Duration, }; -use neqo_common::qinfo; +use neqo_common::qwarn; use crate::packet::PacketNumber; @@ -168,7 +168,7 @@ impl Stats { pub fn pkt_dropped(&mut self, reason: impl AsRef) { self.dropped_rx += 1; - qinfo!( + qwarn!( [self.info], "Dropped received packet: {}; Total: {}", reason.as_ref(), diff --git a/test/upload_test.sh b/test/upload_test.sh index 685a6a926c..8edb55e75d 100755 --- a/test/upload_test.sh +++ b/test/upload_test.sh @@ -2,6 +2,8 @@ set -e +export RUST_LOG=neqo_transport::cc=debug + server_address=127.0.0.1 server_port=4433 upload_size=8388608 From 8775c593e6bfeb0dea92889e99797480fcfb6967 Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Fri, 22 Mar 2024 14:15:02 +1000 Subject: [PATCH 271/321] No `--all-targets` during test --- .github/workflows/check.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/check.yml b/.github/workflows/check.yml index a89e4859d3..e17d563905 100644 --- a/.github/workflows/check.yml +++ b/.github/workflows/check.yml @@ -113,7 +113,7 @@ jobs: - name: Run tests and determine coverage run: | # shellcheck disable=SC2086 - cargo +${{ matrix.rust-toolchain }} llvm-cov nextest $BUILD_TYPE --all-targets --features ci --no-fail-fast --lcov --output-path lcov.info + cargo +${{ matrix.rust-toolchain }} llvm-cov nextest $BUILD_TYPE --features ci --no-fail-fast --lcov --output-path lcov.info cargo +${{ matrix.rust-toolchain }} bench --features bench --no-run - name: Run client/server transfer From bb3ab602481c0ec61597ef76242a3008108d524b Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Sat, 23 Mar 2024 17:13:01 +1000 Subject: [PATCH 272/321] Don't fail if cached main-branch results don't exist --- .github/workflows/bench.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/bench.yml b/.github/workflows/bench.yml index 81ef297a9e..134f78f559 100644 --- a/.github/workflows/bench.yml +++ b/.github/workflows/bench.yml @@ -115,7 +115,7 @@ jobs: echo "### Benchmark results" echo } > results.md - SHA=$(cat target/criterion/baseline-sha.txt) + SHA=$(cat target/criterion/baseline-sha.txt || true) if [ -n "$SHA" ]; then { echo "Performance differences relative to $SHA." From 9814bcd907039349fdf08fad39209a38515414d4 Mon Sep 17 00:00:00 2001 From: Max Inden Date: Sun, 24 Mar 2024 18:20:27 +0100 Subject: [PATCH 273/321] fix(transport/bench): increase transfer bench noise threshold (#1771) The default Criterion noise threshold is 0.01, i.e. ``` rust /// Changes the noise threshold for benchmarks in this group. The noise threshold /// is used to filter out small changes in performance from one run to the next, even if they /// are statistically significant. Sometimes benchmarking the same code twice will result in /// small but statistically significant differences solely because of noise. This provides a way /// to filter out some of these false positives at the cost of making it harder to detect small /// changes to the true performance of the benchmark. /// /// The default is 0.01, meaning that changes smaller than 1% will be ignored. /// /// # Panics /// /// Panics if the threshold is set to a negative value pub fn noise_threshold(&mut self, threshold: f64) -> &mut Self { ``` https://bheisler.github.io/criterion.rs/criterion/struct.BenchmarkGroup.html#method.noise_threshold Multiple runs of the `neqo-transport` `transfer/*` benchmarks showed between 2% and 3% performance regression on unrelated changes. - https://github.com/mozilla/neqo/actions/runs/8402727182/job/23012699901 - https://github.com/mozilla/neqo/actions/runs/8408164062/job/23024217712 To improve the signal-to-noise ratio of the benchmarks, set the noise threshold to 3%. --- neqo-transport/benches/transfer.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/neqo-transport/benches/transfer.rs b/neqo-transport/benches/transfer.rs index b13075a4ff..98bd29ff05 100644 --- a/neqo-transport/benches/transfer.rs +++ b/neqo-transport/benches/transfer.rs @@ -23,6 +23,7 @@ const TRANSFER_AMOUNT: usize = 1 << 22; // 4Mbyte fn benchmark_transfer(c: &mut Criterion, label: &str, seed: Option>) { let mut group = c.benchmark_group("transfer"); group.throughput(Throughput::Bytes(u64::try_from(TRANSFER_AMOUNT).unwrap())); + group.noise_threshold(0.03); group.bench_function(label, |b| { b.iter_batched( || { From 6691c1a570a37a388dc2495b0e592c572b4129b6 Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Mon, 25 Mar 2024 02:38:50 +0200 Subject: [PATCH 274/321] ci: Run a daily baseline benchmark of `main` for the cache (#1770) --- .github/workflows/bench.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.github/workflows/bench.yml b/.github/workflows/bench.yml index 134f78f559..0e215f571f 100644 --- a/.github/workflows/bench.yml +++ b/.github/workflows/bench.yml @@ -2,6 +2,9 @@ name: Bench on: workflow_call: workflow_dispatch: + schedule: + # Run at 1 AM each day, so there is a `main`-branch baseline in the cache. + - cron: '0 1 * * *' env: CARGO_PROFILE_BENCH_BUILD_OVERRIDE_DEBUG: true CARGO_PROFILE_RELEASE_DEBUG: true From d8eeddaad505710bb60361a4da66bf2743e64e1f Mon Sep 17 00:00:00 2001 From: Martin Thomson Date: Mon, 25 Mar 2024 17:16:18 +1100 Subject: [PATCH 275/321] Maybe make the timer wheel faster (#1763) * Maybe make the timer wheel faster * Add some panics docs * Work around mozpkix issue * Add a simple benchmark for the timer wheel * Remove workaround * Make clippy happy --------- Co-authored-by: Lars Eggert --- neqo-common/Cargo.toml | 5 ++++ neqo-common/benches/timer.rs | 39 ++++++++++++++++++++++++++++++ neqo-common/src/timer.rs | 46 +++++++++++++++++++++++++++--------- 3 files changed, 79 insertions(+), 11 deletions(-) create mode 100644 neqo-common/benches/timer.rs diff --git a/neqo-common/Cargo.toml b/neqo-common/Cargo.toml index 89eaa53890..069d67b834 100644 --- a/neqo-common/Cargo.toml +++ b/neqo-common/Cargo.toml @@ -21,6 +21,7 @@ qlog = { version = "0.12", default-features = false } time = { version = "0.3", default-features = false, features = ["formatting"] } [dev-dependencies] +criterion = { version = "0.5", default-features = false, features = ["html_reports"] } test-fixture = { path = "../test-fixture" } [features] @@ -33,3 +34,7 @@ features = ["timeapi"] [lib] # See https://github.com/bheisler/criterion.rs/blob/master/book/src/faq.md#cargo-bench-gives-unrecognized-option-errors-for-valid-command-line-options bench = false + +[[bench]] +name = "timer" +harness = false diff --git a/neqo-common/benches/timer.rs b/neqo-common/benches/timer.rs new file mode 100644 index 0000000000..5ac8019db4 --- /dev/null +++ b/neqo-common/benches/timer.rs @@ -0,0 +1,39 @@ +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use std::time::{Duration, Instant}; + +use criterion::{criterion_group, criterion_main, Criterion}; +use neqo_common::timer::Timer; +use test_fixture::now; + +fn benchmark_timer(c: &mut Criterion) { + c.bench_function("drain a timer quickly", |b| { + b.iter_batched_ref( + make_timer, + |(_now, timer)| { + while let Some(t) = timer.next_time() { + assert!(timer.take_next(t).is_some()); + } + }, + criterion::BatchSize::SmallInput, + ); + }); +} + +fn make_timer() -> (Instant, Timer<()>) { + const TIMES: &[u64] = &[1, 2, 3, 5, 8, 13, 21, 34]; + + let now = now(); + let mut timer = Timer::new(now, Duration::from_millis(777), 100); + for &t in TIMES { + timer.add(now + Duration::from_secs(t), ()); + } + (now, timer) +} + +criterion_group!(benches, benchmark_timer); +criterion_main!(benches); diff --git a/neqo-common/src/timer.rs b/neqo-common/src/timer.rs index a413252e08..3feddb2226 100644 --- a/neqo-common/src/timer.rs +++ b/neqo-common/src/timer.rs @@ -5,6 +5,7 @@ // except according to those terms. use std::{ + collections::VecDeque, mem, time::{Duration, Instant}, }; @@ -27,7 +28,7 @@ impl TimerItem { /// points). Time is relative, the wheel has an origin time and it is unable to represent times that /// are more than `granularity * capacity` past that time. pub struct Timer { - items: Vec>>, + items: Vec>>, now: Instant, granularity: Duration, cursor: usize, @@ -55,9 +56,14 @@ impl Timer { /// Return a reference to the time of the next entry. #[must_use] pub fn next_time(&self) -> Option { - for i in 0..self.items.len() { - let idx = self.bucket(i); - if let Some(t) = self.items[idx].first() { + let idx = self.bucket(0); + for i in idx..self.items.len() { + if let Some(t) = self.items[i].front() { + return Some(t.time); + } + } + for i in 0..idx { + if let Some(t) = self.items[i].front() { return Some(t.time); } } @@ -145,6 +151,9 @@ impl Timer { /// Given knowledge of the time an item was added, remove it. /// This requires use of a predicate that identifies matching items. + /// + /// # Panics + /// Impossible, I think. pub fn remove(&mut self, time: Instant, mut selector: F) -> Option where F: FnMut(&T) -> bool, @@ -167,7 +176,7 @@ impl Timer { break; } if selector(&self.items[bucket][i].item) { - return Some(self.items[bucket].remove(i).item); + return Some(self.items[bucket].remove(i).unwrap().item); } } // ... then forwards. @@ -176,7 +185,7 @@ impl Timer { break; } if selector(&self.items[bucket][i].item) { - return Some(self.items[bucket].remove(i).item); + return Some(self.items[bucket].remove(i).unwrap().item); } } None @@ -185,10 +194,25 @@ impl Timer { /// Take the next item, unless there are no items with /// a timeout in the past relative to `until`. pub fn take_next(&mut self, until: Instant) -> Option { - for i in 0..self.items.len() { - let idx = self.bucket(i); - if !self.items[idx].is_empty() && self.items[idx][0].time <= until { - return Some(self.items[idx].remove(0).item); + fn maybe_take(v: &mut VecDeque>, until: Instant) -> Option { + if !v.is_empty() && v[0].time <= until { + Some(v.pop_front().unwrap().item) + } else { + None + } + } + + let idx = self.bucket(0); + for i in idx..self.items.len() { + let res = maybe_take(&mut self.items[i], until); + if res.is_some() { + return res; + } + } + for i in 0..idx { + let res = maybe_take(&mut self.items[i], until); + if res.is_some() { + return res; } } None @@ -201,7 +225,7 @@ impl Timer { if until >= self.now + self.span() { // Drain everything, so a clean sweep. let mut empty_items = Vec::with_capacity(self.items.len()); - empty_items.resize_with(self.items.len(), Vec::default); + empty_items.resize_with(self.items.len(), VecDeque::default); let mut items = mem::replace(&mut self.items, empty_items); self.now = until; self.cursor = 0; From 50876af998b97b4a249be814b32f675704f9714a Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Mon, 25 Mar 2024 09:50:45 +0200 Subject: [PATCH 276/321] ci: Run `clippy` for all features except `gecko` (#1768) * ci: Run `clippy` for all features except `gecko` * Make clippy happy * Update .github/workflows/check.yml Co-authored-by: Max Inden Signed-off-by: Lars Eggert --------- Signed-off-by: Lars Eggert Co-authored-by: Max Inden --- .github/workflows/check.yml | 2 +- neqo-crypto/src/aead_fuzzing.rs | 3 +++ neqo-crypto/src/lib.rs | 2 +- neqo-transport/benches/range_tracker.rs | 16 +++++++++------- neqo-transport/benches/rx_stream_orderer.rs | 4 ++-- neqo-transport/benches/transfer.rs | 8 ++++---- neqo-transport/src/connection/tests/fuzzing.rs | 2 +- neqo-transport/src/connection/tests/handshake.rs | 5 +++-- 8 files changed, 24 insertions(+), 18 deletions(-) diff --git a/.github/workflows/check.yml b/.github/workflows/check.yml index e17d563905..9dc8ff2b7f 100644 --- a/.github/workflows/check.yml +++ b/.github/workflows/check.yml @@ -148,7 +148,7 @@ jobs: # respective default features only. Can reveal warnings otherwise # hidden given that a plain cargo clippy combines all features of the # workspace. See e.g. https://github.com/mozilla/neqo/pull/1695. - cargo +${{ matrix.rust-toolchain }} hack clippy --all-targets -- -D warnings || ${{ matrix.rust-toolchain == 'nightly' }} + cargo +${{ matrix.rust-toolchain }} hack clippy --all-targets --feature-powerset --exclude-features gecko -- -D warnings || ${{ matrix.rust-toolchain == 'nightly' }} if: success() || failure() - name: Check rustdoc links diff --git a/neqo-crypto/src/aead_fuzzing.rs b/neqo-crypto/src/aead_fuzzing.rs index 4e5a6de07f..1f3bfb14bd 100644 --- a/neqo-crypto/src/aead_fuzzing.rs +++ b/neqo-crypto/src/aead_fuzzing.rs @@ -20,6 +20,7 @@ pub struct FuzzingAead { } impl FuzzingAead { + #[allow(clippy::missing_errors_doc)] pub fn new( fuzzing: bool, version: Version, @@ -44,6 +45,7 @@ impl FuzzingAead { } } + #[allow(clippy::missing_errors_doc)] pub fn encrypt<'a>( &self, count: u64, @@ -61,6 +63,7 @@ impl FuzzingAead { Ok(&output[..l + 16]) } + #[allow(clippy::missing_errors_doc)] pub fn decrypt<'a>( &self, count: u64, diff --git a/neqo-crypto/src/lib.rs b/neqo-crypto/src/lib.rs index 2ec1b4a3ea..45f61f6127 100644 --- a/neqo-crypto/src/lib.rs +++ b/neqo-crypto/src/lib.rs @@ -9,7 +9,7 @@ mod aead; #[cfg(feature = "fuzzing")] -mod aead_fuzzing; +pub mod aead_fuzzing; pub mod agent; mod agentio; mod auth; diff --git a/neqo-transport/benches/range_tracker.rs b/neqo-transport/benches/range_tracker.rs index c2f78f4874..ee611cf4ea 100644 --- a/neqo-transport/benches/range_tracker.rs +++ b/neqo-transport/benches/range_tracker.rs @@ -11,30 +11,32 @@ const CHUNK: u64 = 1000; const END: u64 = 100_000; fn build_coalesce(len: u64) -> RangeTracker { let mut used = RangeTracker::default(); - used.mark_acked(0, CHUNK as usize); - used.mark_sent(CHUNK, END as usize); + let chunk = usize::try_from(CHUNK).expect("should fit"); + used.mark_acked(0, chunk); + used.mark_sent(CHUNK, usize::try_from(END).expect("should fit")); // leave a gap or it will coalesce here for i in 2..=len { // These do not get immediately coalesced when marking since they're not at the end or start - used.mark_acked(i * CHUNK, CHUNK as usize); + used.mark_acked(i * CHUNK, chunk); } used } fn coalesce(c: &mut Criterion, count: u64) { + let chunk = usize::try_from(CHUNK).expect("should fit"); c.bench_function( &format!("coalesce_acked_from_zero {count}+1 entries"), |b| { b.iter_batched_ref( || build_coalesce(count), |used| { - used.mark_acked(CHUNK, CHUNK as usize); + used.mark_acked(CHUNK, chunk); let tail = (count + 1) * CHUNK; - used.mark_sent(tail, CHUNK as usize); - used.mark_acked(tail, CHUNK as usize); + used.mark_sent(tail, chunk); + used.mark_acked(tail, chunk); }, criterion::BatchSize::SmallInput, - ) + ); }, ); } diff --git a/neqo-transport/benches/rx_stream_orderer.rs b/neqo-transport/benches/rx_stream_orderer.rs index 0a1e763e97..d58e11ee86 100644 --- a/neqo-transport/benches/rx_stream_orderer.rs +++ b/neqo-transport/benches/rx_stream_orderer.rs @@ -11,14 +11,14 @@ fn rx_stream_orderer() { let mut rx = RxStreamOrderer::new(); let data: &[u8] = &[0; 1337]; - for i in 0..100000 { + for i in 0..100_000 { rx.inbound_frame(i * 1337, data); } } fn criterion_benchmark(c: &mut Criterion) { c.bench_function("RxStreamOrderer::inbound_frame()", |b| { - b.iter(rx_stream_orderer) + b.iter(rx_stream_orderer); }); } diff --git a/neqo-transport/benches/transfer.rs b/neqo-transport/benches/transfer.rs index 98bd29ff05..32959f6cb5 100644 --- a/neqo-transport/benches/transfer.rs +++ b/neqo-transport/benches/transfer.rs @@ -20,7 +20,7 @@ const ZERO: Duration = Duration::from_millis(0); const JITTER: Duration = Duration::from_millis(10); const TRANSFER_AMOUNT: usize = 1 << 22; // 4Mbyte -fn benchmark_transfer(c: &mut Criterion, label: &str, seed: Option>) { +fn benchmark_transfer(c: &mut Criterion, label: &str, seed: &Option>) { let mut group = c.benchmark_group("transfer"); group.throughput(Throughput::Bytes(u64::try_from(TRANSFER_AMOUNT).unwrap())); group.noise_threshold(0.03); @@ -45,7 +45,7 @@ fn benchmark_transfer(c: &mut Criterion, label: &str, seed: Option Date: Mon, 25 Mar 2024 13:33:08 +0200 Subject: [PATCH 277/321] ci: Compare performance to msquic (#1750) * ci: Compare performance to msquic In progress * Make it run * Fixes * Fix path * Use msquic `secnetperf` * Fix actionlint * Sigh * Retry * Better reporting * Debug * Retry * Sigh^2 * Fix * Fixes * Fixes * Update bench.yml * Fix * Squashed commit of the following: commit 9ca5ecc434bcd2609845b3f27d72960bd5c7cb0a Merge: f50f4148 f4083215 Author: Lars Eggert Date: Fri Mar 15 00:12:29 2024 +0200 Merge branch 'main' into ci-bench-cc Signed-off-by: Lars Eggert commit f50f4148bdb04c21e1e0165d72a8181cbbb39274 Merge: 8e5290b2 bc262a53 Author: Lars Eggert Date: Wed Mar 13 07:59:01 2024 +0200 Merge branch 'main' into ci-bench-cc commit 8e5290b213ae4ea9459cb1b71a73a7118d4bdcc4 Merge: f0cd19ec 2ff9742e Author: Lars Eggert Date: Tue Mar 12 22:42:54 2024 +0200 Merge branch 'main' into ci-bench-cc commit f0cd19ecb6196ab861658b675146b57cf20a1f9a Merge: b2bb855b 17c4175b Author: Lars Eggert Date: Tue Mar 12 21:54:08 2024 +0200 Merge branch 'main' into ci-bench-cc commit b2bb855b25349d6c51d5877b18ce631b1c800250 Merge: d072504e 4ea2c566 Author: Lars Eggert Date: Tue Mar 12 17:25:13 2024 +0200 Merge branch 'ci-bench-cc' of github.com:larseggert/neqo into ci-bench-cc commit d072504e5e80e2bb541f948c6c7733c78e93cb4a Author: Lars Eggert Date: Tue Mar 12 17:24:52 2024 +0200 Reorder things so `results.ms` is included in the exported artifact commit 4ea2c5669cfc6bbb9d4b7eddb66ad2f5ab4d6aac Merge: c82ff3af 5c728907 Author: Lars Eggert Date: Tue Mar 12 17:18:37 2024 +0200 Merge branch 'main' into ci-bench-cc commit c82ff3af7dd92aa4b2218992f85faaeff0b7cecb Author: Lars Eggert Date: Tue Mar 12 16:41:59 2024 +0200 `killall` -> `pkill` commit d37e7068e65ed407e0dc38b15e67707dfc628fd5 Author: Lars Eggert Date: Tue Mar 12 16:37:50 2024 +0200 Go back to `killall` commit 11320d0095e4a8d637c40ed7eb57886582ce4a21 Author: Lars Eggert Date: Tue Mar 12 16:11:38 2024 +0200 No -INT commit 407bd4ff8cc34767063b234af1737dfc2ea06339 Author: Lars Eggert Date: Tue Mar 12 14:33:52 2024 +0200 kill -> killall Also reduce test transfer size. commit 9d3a8b792e8fed127503fd93ff0406c0b462768d Author: Lars Eggert Date: Tue Mar 12 13:57:51 2024 +0200 Use temp dir, and fix path error commit 84e22060c89c88ca699c185dc21d86d0902c3be9 Merge: 925cc120 b0d816a8 Author: Lars Eggert Date: Tue Mar 12 11:10:41 2024 +0200 Merge branch 'main' into ci-bench-cc commit 925cc120fb1c1330a9b12c9cf3ed6e5107251158 Merge: 3241f931 58890383 Author: Lars Eggert Date: Tue Mar 12 11:05:42 2024 +0200 Merge branch 'main' into ci-bench-cc commit 3241f9312951f205c827397a7fc0c59e5ef0b1ee Merge: 02620a7c d48fbed7 Author: Lars Eggert Date: Tue Mar 12 09:59:24 2024 +0200 Merge branch 'main' into ci-bench-cc Signed-off-by: Lars Eggert commit 02620a7c48bdfcad703a47b25f324554edb5de7c Author: Lars Eggert Date: Tue Mar 12 09:57:33 2024 +0200 Try to kill via `$!` commit b32ce9ef7b93c0960c071594b91cddc9fa674292 Merge: 9ea3a991 db1dbb29 Author: Lars Eggert Date: Tue Mar 12 09:15:18 2024 +0200 Merge branch 'ci-bench-cc' of github.com:larseggert/neqo into ci-bench-cc commit 9ea3a99119e9ad19c0e7157b8218d0a631d1de95 Author: Lars Eggert Date: Tue Mar 12 09:15:05 2024 +0200 Address comments from @martinthomson commit db1dbb29af26054d14a12c8d5b7f074bdc9c0c2b Merge: 681bbb7c 869afeaa Author: Lars Eggert Date: Mon Mar 11 19:33:53 2024 +0200 Merge branch 'main' into ci-bench-cc commit 681bbb7c6787094ff085aa6a65edff1dd0e2dd38 Merge: bd742af1 532dcc5f Author: Lars Eggert Date: Mon Mar 11 18:21:06 2024 +0200 Merge branch 'main' into ci-bench-cc Signed-off-by: Lars Eggert commit bd742af1b5b56422294a01a43ed50388d1bc31f5 Author: Lars Eggert Date: Mon Mar 11 17:00:34 2024 +0200 mkdir -p commit bc7b99fe0b156294590dda561318283693b88df6 Author: Lars Eggert Date: Mon Mar 11 16:29:14 2024 +0200 Fix commit e7bf50959d1c24b430414f38f242dd086b483a83 Merge: de64b3e1 cbd44418 Author: Lars Eggert Date: Mon Mar 11 16:27:56 2024 +0200 Merge branch 'main' into ci-bench-cc commit de64b3e1a0108a35af58cb8d6d433a576eaa13c4 Author: Lars Eggert Date: Mon Mar 11 16:00:19 2024 +0200 Wait for output before continuing commit 12386a32c2179727a6b4779d0ed96373571cfafd Author: Lars Eggert Date: Mon Mar 11 15:25:40 2024 +0200 ci: Benchmark NewReno and Cubic * Fixes * Fix * Fixes * Tweaks * Debug * actionlint * Fix sed * Pacing changed * Fixes * msquic server exits with non-zero * echo * Again * Again * A new hope * Finalize * Fixes * ls * Add comments * Report transfer size * Again * Quiet the server down * Remove debug output * Reformat table * Fix * Fix table * Mac sed != GNU sed * Avoid piping commands into themselves * Fix comment --------- Signed-off-by: Lars Eggert --- .github/actions/rust/action.yml | 2 +- .github/workflows/bench.yml | 158 +++++++++++++++++++-------- neqo-bin/src/bin/server/old_https.rs | 2 +- 3 files changed, 117 insertions(+), 45 deletions(-) diff --git a/.github/actions/rust/action.yml b/.github/actions/rust/action.yml index bfb09d332d..4b03b37b8d 100644 --- a/.github/actions/rust/action.yml +++ b/.github/actions/rust/action.yml @@ -30,7 +30,7 @@ runs: - name: Install Rust tools shell: bash - run: cargo +${{ inputs.version }} binstall --no-confirm cargo-llvm-cov cargo-nextest flamegraph cargo-hack cargo-mutants + run: cargo +${{ inputs.version }} binstall --no-confirm cargo-llvm-cov cargo-nextest flamegraph cargo-hack cargo-mutants hyperfine # sccache slows CI down, so we leave it disabled. # Leaving the steps below commented out, so we can re-evaluate enabling it later. diff --git a/.github/workflows/bench.yml b/.github/workflows/bench.yml index 0e215f571f..80c51c236d 100644 --- a/.github/workflows/bench.yml +++ b/.github/workflows/bench.yml @@ -12,7 +12,7 @@ env: RUST_BACKTRACE: 1 TOOLCHAIN: nightly RUSTFLAGS: -C link-arg=-fuse-ld=lld -C link-arg=-Wl,--no-rosegment, -C force-frame-pointers=yes - PERF_CMD: record -o perf.data -F997 --call-graph fp -g + PERF_OPT: record -F997 --call-graph fp -g jobs: bench: @@ -23,9 +23,17 @@ jobs: shell: bash steps: - - name: Checkout + - name: Checkout neqo uses: actions/checkout@v4 + - name: Checkout msquic + uses: actions/checkout@v4 + with: + repository: microsoft/msquic + ref: main + path: msquic + submodules: true + - name: Set PATH run: echo "/home/bench/.cargo/bin" >> "${GITHUB_PATH}" @@ -38,10 +46,17 @@ jobs: - name: Fetch and build NSS and NSPR uses: ./.github/actions/nss - - name: Build + - name: Build neqo run: | cargo "+$TOOLCHAIN" bench --features bench --no-run - cargo "+$TOOLCHAIN" build --release --bin neqo-client --bin neqo-server + cargo "+$TOOLCHAIN" build --release + + - name: Build msquic + run: | + mkdir -p msquic/build + cd msquic/build + cmake -GNinja -DCMAKE_BUILD_TYPE=RelWithDebInfo -DQUIC_BUILD_TOOLS=1 -DQUIC_BUILD_PERF=1 .. + cmake --build . - name: Download cached main-branch results id: criterion-cache @@ -61,56 +76,107 @@ jobs: taskset -c 0 nice -n -20 \ cargo "+$TOOLCHAIN" bench --features bench -- --noplot | tee results.txt - # Pin the transfer benchmark to core 0 and run it at elevated priority inside perf. - # Work around https://github.com/flamegraph-rs/flamegraph/issues/248 by passing explicit perf arguments. - - name: Profile cargo bench transfer - run: | - taskset -c 0 nice -n -20 \ - cargo "+$TOOLCHAIN" flamegraph -v -c "$PERF_CMD" --features bench --bench transfer -- \ - --bench --exact "Run multiple transfers with varying seeds" --noplot - - - name: Profile client/server transfer - run: | - { mkdir server; \ - cd server; \ - taskset -c 0 nice -n -20 \ - cargo "+$TOOLCHAIN" flamegraph -v -c "$PERF_CMD" \ - --bin neqo-server -- --db ../test-fixture/db "$HOST:4433" || true; } & - mkdir client; \ - cd client; \ - time taskset -c 1 nice -n -20 \ - cargo "+$TOOLCHAIN" flamegraph -v -c "$PERF_CMD" \ - --bin neqo-client -- --output-dir . "https://$HOST:4433/$SIZE" - killall -INT neqo-server - cd ${{ github.workspace }} - [ "$(wc -c < client/"$SIZE")" -eq "$SIZE" ] || exit 1 + # Compare various configurations of neqo against msquic, and gather perf data + # during the hyperfine runs. + - name: Compare neqo and msquic env: - HOST: localhost - SIZE: 1073741824 # 1 GB + HOST: 127.0.0.1 + PORT: 4433 + SIZE: 134217728 # 128 MB + run: | + TMP=$(mktemp -d) + # Make a cert and key for msquic. + openssl req -nodes -new -x509 -keyout "$TMP/key" -out "$TMP/cert" -subj "/CN=DOMAIN" 2>/dev/null + # Make a test file for msquic to serve. + truncate -s "$SIZE" "$TMP/$SIZE" + # Define the commands to run for each client and server. + declare -A client_cmd=( + ["neqo"]="target/release/neqo-client _cc _pacing --output-dir . -o -a hq-interop -Q 1 https://$HOST:$PORT/$SIZE" + ["msquic"]="msquic/build/bin/Release/quicinterop -test:D -custom:$HOST -port:$PORT -urls:https://$HOST:$PORT/$SIZE" + ) + declare -A server_cmd=( + ["neqo"]="target/release/neqo-server _cc _pacing -o -a hq-interop -Q 1 $HOST:$PORT 2> /dev/null" + ["msquic"]="msquic/build/bin/Release/quicinteropserver -root:$TMP -listen:$HOST -port:$PORT -file:$TMP/cert -key:$TMP/key -noexit > /dev/null || true" + ) + + # Replace various placeholders in the commands with the actual values. + # Also generate an extension to append to the file name. + function transmogrify { + CMD=$1 + local cc=$2 + local pacing=$3 + if [ "$cc" != "" ]; then + CMD=${CMD//_cc/--cc $cc} + EXT="-$cc" + fi + if [ "$pacing" == "on" ]; then + CMD=${CMD//_pacing/} + EXT="$EXT-pacing" + else + CMD=${CMD//_pacing/--no-pacing} + EXT="$EXT-nopacing" + fi + } + + for server in msquic neqo; do + for client in msquic neqo; do + # msquic doesn't let us configure the congestion control or pacing. + if [ "$client" == "msquic" ] && [ "$server" == "msquic" ]; then + cc_opt=("") + pacing_opt=("") + else + cc_opt=("reno" "cubic") + pacing_opt=("on" "") + fi + for cc in "${cc_opt[@]}"; do + for pacing in "${pacing_opt[@]}"; do + # Make a tag string for this test, for the results. + TAG="$client,$server,$cc,$pacing" + echo "Running benchmarks for $TAG" | tee -a comparison.txt + transmogrify "${server_cmd[$server]}" "$cc" "$pacing" + # shellcheck disable=SC2086 + taskset -c 0 nice -n -20 \ + perf $PERF_OPT -o "$client-$server$EXT.server.perf" $CMD & + PID=$! + transmogrify "${client_cmd[$client]}" "$cc" "$pacing" + # shellcheck disable=SC2086 + taskset -c 1 nice -n -20 \ + perf $PERF_OPT -o "$client-$server$EXT.client.perf" \ + hyperfine -N --output null -w 1 -s "sleep 1" -n "$TAG" -u millisecond --export-markdown step.md "$CMD" | + tee -a comparison.txt + echo >> comparison.txt + kill $PID + cat step.md >> steps.md + # Sanity check the size of the last retrieved file. + [ "$(wc -c <"$SIZE")" -eq "$SIZE" ] || exit 1 + done + done + done + done + # Merge the results tables generated by hyperfine into a single table. + echo "Transfer of $SIZE bytes over loopback." > comparison.md + awk '(!/^\| Command/ || !c++) && (!/^\|:/ || !d++)' < steps.md |\ + sed -E 's/`//g; s/^\|:/\|:---\|:---\|:---\|:/g; s/,/ \| /g; s/^\| Command/\| Client \| Server \| CC \| Pacing/g' >> comparison.md + rm -r "$TMP" # Re-enable turboboost, hyperthreading and use powersave governor. - name: Restore machine run: sudo /root/bin/unprep.sh if: success() || failure() || cancelled() - - name: Convert for profiler.firefox.com + - name: Post-process perf data run: | - perf script -i perf.data -F +pid > transfer.perf & - perf script -i client/perf.data -F +pid > client.perf & - perf script -i server/perf.data -F +pid > server.perf & + for f in *.perf; do + # Convert for profiler.firefox.com + perf script -i "$f" -F +pid > "$f.fx" & + # Generate perf reports + perf report -i "$f" --no-children --stdio > "$f.txt" & + # Generate flamegraphs + flamegraph --perfdata "$f" --palette rust -o "${f//.perf/.svg}" & + done wait - mv flamegraph.svg transfer.svg - mv client/flamegraph.svg client.svg - mv server/flamegraph.svg server.svg rm neqo.svg - - name: Generate perf reports - run: | - perf report -i perf.data --no-children --stdio > transfer.perf.txt & - perf report -i client/perf.data --no-children --stdio > client.perf.txt & - perf report -i server/perf.data --no-children --stdio > server.perf.txt & - wait - - name: Format results as Markdown id: results run: | @@ -132,6 +198,11 @@ jobs: -e 's/^([a-z0-9].*)$/* **\1**/gi' \ -e 's/(change:[^%]*% )([^%]*%)(.*)/\1**\2**\3/gi' \ >> results.md + { + echo "### Client/server transfer results" + cat comparison.md + } >> results.md + cat results.md > "$GITHUB_STEP_SUMMARY" - name: Remember main-branch push URL if: github.ref == 'refs/heads/main' @@ -158,6 +229,7 @@ jobs: path: | *.svg *.perf + *.perf.fx *.txt results.* target/criterion* diff --git a/neqo-bin/src/bin/server/old_https.rs b/neqo-bin/src/bin/server/old_https.rs index ec32032a05..505a16578f 100644 --- a/neqo-bin/src/bin/server/old_https.rs +++ b/neqo-bin/src/bin/server/old_https.rs @@ -202,7 +202,6 @@ impl HttpServer for Http09Server { None => break, Some(e) => e, }; - qdebug!("Event {event:?}"); match event { ConnectionEvent::NewStream { stream_id } => { self.write_state @@ -221,6 +220,7 @@ impl HttpServer for Http09Server { .unwrap(); } ConnectionEvent::StateChange(_) + | ConnectionEvent::SendStreamCreatable { .. } | ConnectionEvent::SendStreamComplete { .. } => (), e => qwarn!("unhandled event {e:?}"), } From 57dd4ec34e817f3edeb243ee6ea4934982f4147a Mon Sep 17 00:00:00 2001 From: Max Inden Date: Tue, 26 Mar 2024 16:57:32 +0100 Subject: [PATCH 278/321] refactor(github/interop): use grep instead of awk (#1747) * refactor(github/interop): use grep instead of awk * Simplify regex --- .github/actions/quic-interop-runner/action.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/actions/quic-interop-runner/action.yml b/.github/actions/quic-interop-runner/action.yml index ef4865bde6..30c7f0d8d6 100644 --- a/.github/actions/quic-interop-runner/action.yml +++ b/.github/actions/quic-interop-runner/action.yml @@ -92,8 +92,8 @@ runs: run: | echo '[**QUIC Interop Runner**](https://github.com/quic-interop/quic-interop-runner)' >> comment echo '' >> comment - # Ignore all, but table, which starts with "|:--". - cat quic-interop-runner/summary | awk '/^\|:--/{flag=1} flag' >> comment + # Ignore all, but table, which starts with "|". + grep -E '^\|' quic-interop-runner/summary >> comment echo '' >> comment shell: bash From 1af91f506120a3d68d6c938ddc42ea3a73218507 Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Wed, 27 Mar 2024 02:37:19 +0200 Subject: [PATCH 279/321] test: Let packets be modified with a closure during tests (#1773) * test: Let packets be modified with a closure during tests Broken out of #1678 to reduce the size of that PR. * Support dropping packets via `DatagramModifier` --- neqo-transport/src/connection/tests/mod.rs | 83 +++++++++++++++++++--- 1 file changed, 73 insertions(+), 10 deletions(-) diff --git a/neqo-transport/src/connection/tests/mod.rs b/neqo-transport/src/connection/tests/mod.rs index b6ce08f8d1..6f598fb23e 100644 --- a/neqo-transport/src/connection/tests/mod.rs +++ b/neqo-transport/src/connection/tests/mod.rs @@ -170,12 +170,17 @@ impl crate::connection::test_internal::FrameWriter for PingWriter { } } +trait DatagramModifier: FnMut(Datagram) -> Option {} + +impl DatagramModifier for T where T: FnMut(Datagram) -> Option {} + /// Drive the handshake between the client and server. -fn handshake( +fn handshake_with_modifier( client: &mut Connection, server: &mut Connection, now: Instant, rtt: Duration, + mut modifier: impl DatagramModifier, ) -> Instant { let mut a = client; let mut b = server; @@ -212,7 +217,11 @@ fn handshake( did_ping[a.role()] = true; } assert!(had_input || output.is_some()); - input = output; + if let Some(d) = output { + input = modifier(d); + } else { + input = output; + } qtrace!("handshake: t += {:?}", rtt / 2); now += rtt / 2; mem::swap(&mut a, &mut b); @@ -223,6 +232,15 @@ fn handshake( now } +fn handshake( + client: &mut Connection, + server: &mut Connection, + now: Instant, + rtt: Duration, +) -> Instant { + handshake_with_modifier(client, server, now, rtt, Some) +} + fn connect_fail( client: &mut Connection, server: &mut Connection, @@ -234,11 +252,12 @@ fn connect_fail( assert_error(server, &ConnectionError::Transport(server_error)); } -fn connect_with_rtt( +fn connect_with_rtt_and_modifier( client: &mut Connection, server: &mut Connection, now: Instant, rtt: Duration, + modifier: impl DatagramModifier, ) -> Instant { fn check_rtt(stats: &Stats, rtt: Duration) { assert_eq!(stats.rtt, rtt); @@ -246,7 +265,7 @@ fn connect_with_rtt( let n = stats.frame_rx.ack + usize::from(stats.rtt_init_guess); assert_eq!(stats.rttvar, rttvar_after_n_updates(n, rtt)); } - let now = handshake(client, server, now, rtt); + let now = handshake_with_modifier(client, server, now, rtt, modifier); assert_eq!(*client.state(), State::Confirmed); assert_eq!(*server.state(), State::Confirmed); @@ -255,6 +274,15 @@ fn connect_with_rtt( now } +fn connect_with_rtt( + client: &mut Connection, + server: &mut Connection, + now: Instant, + rtt: Duration, +) -> Instant { + connect_with_rtt_and_modifier(client, server, now, rtt, Some) +} + fn connect(client: &mut Connection, server: &mut Connection) { connect_with_rtt(client, server, now(), Duration::new(0, 0)); } @@ -301,8 +329,13 @@ fn assert_idle(client: &mut Connection, server: &mut Connection, rtt: Duration, } /// Connect with an RTT and then force both peers to be idle. -fn connect_rtt_idle(client: &mut Connection, server: &mut Connection, rtt: Duration) -> Instant { - let now = connect_with_rtt(client, server, now(), rtt); +fn connect_rtt_idle_with_modifier( + client: &mut Connection, + server: &mut Connection, + rtt: Duration, + modifier: impl DatagramModifier, +) -> Instant { + let now = connect_with_rtt_and_modifier(client, server, now(), rtt, modifier); assert_idle(client, server, rtt, now); // Drain events from both as well. _ = client.events().count(); @@ -311,8 +344,20 @@ fn connect_rtt_idle(client: &mut Connection, server: &mut Connection, rtt: Durat now } +fn connect_rtt_idle(client: &mut Connection, server: &mut Connection, rtt: Duration) -> Instant { + connect_rtt_idle_with_modifier(client, server, rtt, Some) +} + +fn connect_force_idle_with_modifier( + client: &mut Connection, + server: &mut Connection, + modifier: impl DatagramModifier, +) { + connect_rtt_idle_with_modifier(client, server, Duration::new(0, 0), modifier); +} + fn connect_force_idle(client: &mut Connection, server: &mut Connection) { - connect_rtt_idle(client, server, Duration::new(0, 0)); + connect_force_idle_with_modifier(client, server, Some); } fn fill_stream(c: &mut Connection, stream: StreamId) { @@ -524,12 +569,14 @@ fn assert_full_cwnd(packets: &[Datagram], cwnd: usize) { } /// Send something on a stream from `sender` to `receiver`, maybe allowing for pacing. +/// Takes a modifier function that can be used to modify the datagram before it is sent. /// Return the resulting datagram and the new time. #[must_use] -fn send_something_paced( +fn send_something_paced_with_modifier( sender: &mut Connection, mut now: Instant, allow_pacing: bool, + mut modifier: impl DatagramModifier, ) -> (Datagram, Instant) { let stream_id = sender.stream_create(StreamType::UniDi).unwrap(); assert!(sender.stream_send(stream_id, DEFAULT_STREAM_DATA).is_ok()); @@ -544,16 +591,32 @@ fn send_something_paced( .dgram() .expect("send_something: should have something to send") } - Output::Datagram(d) => d, + Output::Datagram(d) => modifier(d).unwrap(), Output::None => panic!("send_something: got Output::None"), }; (dgram, now) } +fn send_something_paced( + sender: &mut Connection, + now: Instant, + allow_pacing: bool, +) -> (Datagram, Instant) { + send_something_paced_with_modifier(sender, now, allow_pacing, Some) +} + +fn send_something_with_modifier( + sender: &mut Connection, + now: Instant, + modifier: impl DatagramModifier, +) -> Datagram { + send_something_paced_with_modifier(sender, now, false, modifier).0 +} + /// Send something on a stream from `sender` to `receiver`. /// Return the resulting datagram. fn send_something(sender: &mut Connection, now: Instant) -> Datagram { - send_something_paced(sender, now, false).0 + send_something_with_modifier(sender, now, Some) } /// Send something on a stream from `sender` to `receiver`. From 37be89473d72a1504894be36d387cda339c342af Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Wed, 27 Mar 2024 09:25:00 +0200 Subject: [PATCH 280/321] feat: TOS improvements (#1774) * feat: TOS improvements Broken out of #1678 to reduce the size of that PR. * Remove function --- neqo-common/src/datagram.rs | 7 ++-- neqo-common/src/tos.rs | 48 ++++++++++++++++++++------- neqo-transport/src/connection/dump.rs | 13 ++++++-- neqo-transport/src/connection/mod.rs | 4 ++- 4 files changed, 55 insertions(+), 17 deletions(-) diff --git a/neqo-common/src/datagram.rs b/neqo-common/src/datagram.rs index 9cebb64ea5..cc2cb7d113 100644 --- a/neqo-common/src/datagram.rs +++ b/neqo-common/src/datagram.rs @@ -53,6 +53,10 @@ impl Datagram { pub fn ttl(&self) -> Option { self.ttl } + + pub fn set_tos(&mut self, tos: IpTos) { + self.tos = tos; + } } impl Deref for Datagram { @@ -90,8 +94,7 @@ use test_fixture::datagram; fn fmt_datagram() { let d = datagram([0; 1].to_vec()); assert_eq!( - format!("{d:?}"), + &format!("{d:?}"), "Datagram IpTos(Cs0, NotEct) TTL Some(128) [fe80::1]:443->[fe80::1]:443: [1]: 00" - .to_string() ); } diff --git a/neqo-common/src/tos.rs b/neqo-common/src/tos.rs index 3610f72750..533c5447e2 100644 --- a/neqo-common/src/tos.rs +++ b/neqo-common/src/tos.rs @@ -36,7 +36,7 @@ impl From for u8 { impl From for IpTosEcn { fn from(v: u8) -> Self { - match v & 0b11 { + match v & 0b0000_0011 { 0b00 => IpTosEcn::NotEct, 0b01 => IpTosEcn::Ect1, 0b10 => IpTosEcn::Ect0, @@ -47,8 +47,8 @@ impl From for IpTosEcn { } impl From for IpTosEcn { - fn from(value: IpTos) -> Self { - IpTosEcn::from(value.0 & 0x3) + fn from(v: IpTos) -> Self { + IpTosEcn::from(u8::from(v)) } } @@ -166,14 +166,13 @@ impl From for IpTosDscp { } impl From for IpTosDscp { - fn from(value: IpTos) -> Self { - IpTosDscp::from(value.0 & 0xfc) + fn from(v: IpTos) -> Self { + IpTosDscp::from(u8::from(v)) } } /// The type-of-service field in an IP packet. -#[allow(clippy::module_name_repetitions)] -#[derive(Copy, Clone, PartialEq, Eq)] +#[derive(Copy, Clone, PartialEq, Eq, Default)] pub struct IpTos(u8); impl From for IpTos { @@ -215,15 +214,19 @@ impl From for IpTos { impl Debug for IpTos { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_tuple("IpTos") - .field(&IpTosDscp::from(self.0 & 0xfc)) - .field(&IpTosEcn::from(self.0 & 0x3)) + .field(&IpTosDscp::from(*self)) + .field(&IpTosEcn::from(*self)) .finish() } } -impl Default for IpTos { - fn default() -> Self { - (IpTosDscp::default(), IpTosEcn::default()).into() +impl IpTos { + pub fn set_ecn(&mut self, ecn: IpTosEcn) { + self.0 = u8::from(IpTosDscp::from(*self)) | u8::from(ecn); + } + + pub fn set_dscp(&mut self, dscp: IpTosDscp) { + self.0 = u8::from(IpTosEcn::from(*self)) | u8::from(dscp); } } @@ -322,4 +325,25 @@ mod tests { assert_eq!(tos, u8::from(iptos)); assert_eq!(IpTos::from(tos), iptos); } + + #[test] + fn iptos_to_iptosdscp() { + let tos = IpTos::from((IpTosDscp::Af41, IpTosEcn::NotEct)); + let dscp = IpTosDscp::from(tos); + assert_eq!(dscp, IpTosDscp::Af41); + } + + #[test] + fn tos_modify_ecn() { + let mut iptos: IpTos = (IpTosDscp::Af41, IpTosEcn::NotEct).into(); + iptos.set_ecn(IpTosEcn::Ce); + assert_eq!(u8::from(iptos), 0b1000_1011); + } + + #[test] + fn tos_modify_dscp() { + let mut iptos: IpTos = (IpTosDscp::Af41, IpTosEcn::Ect1).into(); + iptos.set_dscp(IpTosDscp::Le); + assert_eq!(u8::from(iptos), 0b0000_0101); + } } diff --git a/neqo-transport/src/connection/dump.rs b/neqo-transport/src/connection/dump.rs index 34ac58f55e..12d337c570 100644 --- a/neqo-transport/src/connection/dump.rs +++ b/neqo-transport/src/connection/dump.rs @@ -9,7 +9,7 @@ use std::fmt::Write; -use neqo_common::{qdebug, Decoder}; +use neqo_common::{qdebug, Decoder, IpTos}; use crate::{ connection::Connection, @@ -26,6 +26,7 @@ pub fn dump_packet( pt: PacketType, pn: PacketNumber, payload: &[u8], + tos: IpTos, ) { if log::STATIC_MAX_LEVEL == log::LevelFilter::Off || !log::log_enabled!(log::Level::Debug) { return; @@ -43,5 +44,13 @@ pub fn dump_packet( write!(&mut s, "\n {} {}", dir, &x).unwrap(); } } - qdebug!([conn], "pn={} type={:?} {}{}", pn, pt, path.borrow(), s); + qdebug!( + [conn], + "pn={} type={:?} {} {:?}{}", + pn, + pt, + path.borrow(), + tos, + s + ); } diff --git a/neqo-transport/src/connection/mod.rs b/neqo-transport/src/connection/mod.rs index 75c3490cba..535d3f4084 100644 --- a/neqo-transport/src/connection/mod.rs +++ b/neqo-transport/src/connection/mod.rs @@ -19,7 +19,7 @@ use std::{ use neqo_common::{ event::Provider as EventProvider, hex, hex_snip_middle, hrtime, qdebug, qerror, qinfo, - qlog::NeqoQlog, qtrace, qwarn, Datagram, Decoder, Encoder, Role, + qlog::NeqoQlog, qtrace, qwarn, Datagram, Decoder, Encoder, IpTos, Role, }; use neqo_crypto::{ agent::CertificateInfo, Agent, AntiReplay, AuthenticationStatus, Cipher, Client, Group, @@ -1492,6 +1492,7 @@ impl Connection { payload.packet_type(), payload.pn(), &payload[..], + d.tos(), ); qlog::packet_received(&mut self.qlog, &packet, &payload); @@ -2255,6 +2256,7 @@ impl Connection { pt, pn, &builder.as_ref()[payload_start..], + IpTos::default(), // TODO: set from path ); qlog::packet_sent( &mut self.qlog, From 6a51a35dd63aa9833f5a4bcf31900acb7bbf64d6 Mon Sep 17 00:00:00 2001 From: Max Inden Date: Wed, 27 Mar 2024 08:39:30 +0100 Subject: [PATCH 281/321] perf(bin/server): increase msg size and don't allocate msg per resp (#1772) * perf(bin/server): increase msg size and don't allocate msg per resp Previously `neqo-server` would respond to a request by repeatedly sending a static 440 byte message (Major-General's Song). Instead of sending 440 bytes, increase the batch size to 4096 bytes. This also matches the `neqo-client` receive buffer size. https://github.com/mozilla/neqo/blob/76630a5ebb6c6b94de6a40cf3f439b9a846f6ab7/neqo-bin/src/bin/client/http3.rs#L165 Previously `ResponseData::repeat` would convert the provided `buf: &[u8]` to ` Vec`, i.e. re-allocate the buf. Instead keep a reference to the original buf, thus removing the allocation. * Remove unnecessary into() --- neqo-bin/src/bin/server/main.rs | 18 ++++++------------ 1 file changed, 6 insertions(+), 12 deletions(-) diff --git a/neqo-bin/src/bin/server/main.rs b/neqo-bin/src/bin/server/main.rs index 753794d6f6..62eb19e78c 100644 --- a/neqo-bin/src/bin/server/main.rs +++ b/neqo-bin/src/bin/server/main.rs @@ -5,6 +5,7 @@ // except according to those terms. use std::{ + borrow::Cow, cell::RefCell, cmp::min, collections::HashMap, @@ -196,7 +197,7 @@ trait HttpServer: Display { } struct ResponseData { - data: Vec, + data: Cow<'static, [u8]>, offset: usize, remaining: usize, } @@ -211,7 +212,7 @@ impl From> for ResponseData { fn from(data: Vec) -> Self { let remaining = data.len(); Self { - data, + data: Cow::Owned(data), offset: 0, remaining, } @@ -219,9 +220,9 @@ impl From> for ResponseData { } impl ResponseData { - fn repeat(buf: &[u8], total: usize) -> Self { + fn repeat(buf: &'static [u8], total: usize) -> Self { Self { - data: buf.to_owned(), + data: Cow::Borrowed(buf), offset: 0, remaining: total, } @@ -260,14 +261,7 @@ struct SimpleServer { } impl SimpleServer { - const MESSAGE: &'static [u8] = b"I am the very model of a modern Major-General,\n\ - I've information vegetable, animal, and mineral,\n\ - I know the kings of England, and I quote the fights historical\n\ - From Marathon to Waterloo, in order categorical;\n\ - I'm very well acquainted, too, with matters mathematical,\n\ - I understand equations, both the simple and quadratical,\n\ - About binomial theorem, I'm teeming with a lot o' news,\n\ - With many cheerful facts about the square of the hypotenuse.\n"; + const MESSAGE: &'static [u8] = &[0; 4096]; pub fn new( args: &Args, From 47dfb3baa50ef89751cfc6818d6e2ee79616a66f Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Wed, 27 Mar 2024 09:52:23 +0200 Subject: [PATCH 282/321] build: Use system NSS when possible (#1739) * build: Use system-installed NSS instead of building our own Fixes #1711 * Update CI * Fix docs * Fix Dockerfile * Fix * build-essential * Try and search for nss * Try to get newest versions * More fixes * Restore Windows link.exe fix * Install pkg-config * Remove MSYS2 linker * Retain ability to build NSS from source * Update Linux instructions * Try and find MSYS2 library path * Retry * Again * Again * Again * Again * Again * Again * Again * Again * Again * Again * Revert many things, keep building NSS from source unless system version is OK * Fixes * Fixes * debug * Debug * Fixes * Compare versions with the `semver` crate * Use NSS version from code in CI * File has other name * Update .github/actions/nss/action.yml Co-authored-by: Martin Thomson Signed-off-by: Lars Eggert * Update neqo-crypto/build.rs Co-authored-by: Martin Thomson Signed-off-by: Lars Eggert * Update neqo-crypto/build.rs Co-authored-by: Martin Thomson Signed-off-by: Lars Eggert * Address code review comments. Not ready yet. Need to determine what to do in `nss_dir()`. See comments. * Update neqo-crypto/build.rs Co-authored-by: Martin Thomson Signed-off-by: Lars Eggert * Address code review * Updates to README * Remove `nss_dir()` --------- Signed-off-by: Lars Eggert Co-authored-by: Martin Thomson --- .github/actions/nss/action.yml | 33 +++++++++ .github/workflows/check.yml | 22 ++---- README.md | 114 ++++++++++++++++++----------- neqo-crypto/Cargo.toml | 1 + neqo-crypto/bindings/bindings.toml | 5 -- neqo-crypto/bindings/mozpkix.hpp | 1 - neqo-crypto/build.rs | 105 +++++++++++++++----------- neqo-crypto/min_version.txt | 1 + neqo-crypto/src/err.rs | 30 +++++++- neqo-crypto/src/lib.rs | 3 +- neqo-crypto/src/min_version.rs | 9 +++ 11 files changed, 214 insertions(+), 110 deletions(-) delete mode 100644 neqo-crypto/bindings/mozpkix.hpp create mode 100644 neqo-crypto/min_version.txt create mode 100644 neqo-crypto/src/min_version.rs diff --git a/.github/actions/nss/action.yml b/.github/actions/nss/action.yml index 23232ebc13..ec6f13eaf8 100644 --- a/.github/actions/nss/action.yml +++ b/.github/actions/nss/action.yml @@ -16,16 +16,46 @@ inputs: runs: using: composite steps: + - name: Check system NSS version + shell: bash + run: | + if ! command -v pkg-config &> /dev/null; then + echo "BUILD_NSS=1" >> "$GITHUB_ENV" + exit 0 + fi + if ! pkg-config --exists nss; then + echo "BUILD_NSS=1" >> "$GITHUB_ENV" + exit 0 + fi + NSS_VERSION="$(pkg-config --modversion nss)" + if [ "$?" -ne 0 ]; then + echo "BUILD_NSS=1" >> "$GITHUB_ENV" + exit 0 + fi + NSS_MAJOR=$(echo "$NSS_VERSION" | cut -d. -f1) + NSS_MINOR=$(echo "$NSS_VERSION" | cut -d. -f2) + REQ_NSS_MAJOR=$(cat neqo-crypto/min_version.txt | cut -d. -f1) + REQ_NSS_MINOR=$(cat neqo-crypto/min_version.txt | cut -d. -f2) + if [ "$NSS_MAJOR" -lt "REQ_NSS_MAJOR" ] || [ "$NSS_MAJOR" -eq "REQ_NSS_MAJOR" -a "$NSS_MINOR" -lt "REQ_NSS_MINOR"]; then + echo "System NSS is too old: $NSS_VERSION" + echo "BUILD_NSS=1" >> "$GITHUB_ENV" + exit 0 + fi + echo "System NSS is suitable: $NSS_VERSION" + echo "BUILD_NSS=0" >> "$GITHUB_ENV" + # Ideally, we'd use this. But things are sufficiently flaky that we're better off # trying both hg and git. Leaving this here in case we want to re-try in the future. # # - name: Checkout NSPR + # if: env.BUILD_NSS == '1' # uses: actions/checkout@v4 # with: # repository: "nss-dev/nspr" # path: ${{ github.workspace }}/nspr # - name: Checkout NSS + # if: env.BUILD_NSS == '1' # uses: actions/checkout@v4 # with: # repository: "nss-dev/nss" @@ -33,18 +63,21 @@ runs: - name: Checkout NSPR shell: bash + if: env.BUILD_NSS == '1' run: | hg clone https://hg.mozilla.org/projects/nspr "${{ github.workspace }}/nspr" || \ git clone --depth=1 https://github.com/nss-dev/nspr "${{ github.workspace }}/nspr" - name: Checkout NSS shell: bash + if: env.BUILD_NSS == '1' run: | hg clone https://hg.mozilla.org/projects/nss "${{ github.workspace }}/nss" || \ git clone --depth=1 https://github.com/nss-dev/nss "${{ github.workspace }}/nss" - name: Build shell: bash + if: env.BUILD_NSS == '1' run: | if [ "${{ inputs.type }}" != "Debug" ]; then # We want to do an optimized build for accurate CPU profiling, but diff --git a/.github/workflows/check.yml b/.github/workflows/check.yml index 9dc8ff2b7f..4e47961d8e 100644 --- a/.github/workflows/check.yml +++ b/.github/workflows/check.yml @@ -49,33 +49,21 @@ jobs: sudo apt-get install -y --no-install-recommends gyp mercurial ninja-build lld echo "RUSTFLAGS=-C link-arg=-fuse-ld=lld" >> "$GITHUB_ENV" - # In addition to installing dependencies, first make sure System Integrity Protection (SIP) - # is disabled on this MacOS runner. This is needed to allow the NSS libraries to be loaded - # from the build directory and avoid various other test failures. This seems to always be - # the case on any macos-13 runner, but not consistently on macos-latest (which is currently - # macos-12, FWIW). - name: Install dependencies (MacOS) if: runner.os == 'MacOS' run: | - csrutil status | grep disabled - brew install ninja mercurial llvm + brew update + brew install llvm nss echo "/opt/homebrew/opt/llvm/bin" >> "$GITHUB_PATH" - ln -s /opt/homebrew/bin/python3 /opt/homebrew/bin/python - # python3 -m pip install gyp-next - # Above does not work, since pypi only has gyp 0.15.0, which is too old - # for the homebrew python3. Install from source instead. - python3 -m pip install git+https://github.com/nodejs/gyp-next - python3 -m pip install packaging - echo "$(python3 -m site --user-base)/bin" >> "$GITHUB_PATH" echo "RUSTFLAGS=-C link-arg=-fuse-ld=lld" >> "$GITHUB_ENV" - - name: Use MSYS2 environment and install more dependencies (Windows) + - name: Install dependencies (Windows) if: runner.os == 'Windows' run: | # shellcheck disable=SC2028 { - echo "C:\\msys64\\usr\\bin" - echo "C:\\msys64\\mingw64\\bin" + echo C:/msys64/usr/bin + echo C:/msys64/mingw64/bin } >> "$GITHUB_PATH" /c/msys64/usr/bin/pacman -S --noconfirm nsinstall python3 -m pip install git+https://github.com/nodejs/gyp-next diff --git a/README.md b/README.md index 31d6ab9e94..beadf22ecf 100644 --- a/README.md +++ b/README.md @@ -1,82 +1,102 @@ -# Neqo, an Implementation of QUIC written in Rust +# Neqo, an Implementation of QUIC in Rust ![neqo logo](https://github.com/mozilla/neqo/raw/main/neqo.png "neqo logo") -To run test HTTP/3 programs (neqo-client and neqo-server): +To build Neqo: -* `cargo build` -* `./target/debug/neqo-server '[::]:12345' --db ./test-fixture/db` -* `./target/debug/neqo-client http://127.0.0.1:12345/` - -If a "Failure to load dynamic library" error happens at runtime, do ```shell -export LD_LIBRARY_PATH="$(dirname "$(find . -name libssl3.so -print | head -1)")" +cargo build ``` -On a macOS, do +This will use a system-installed [NSS][NSS] library if it is new enough. (See "Build with Separate NSS/NSPR" below if NSS is not installed or it is deemed too old.) + +To run test HTTP/3 programs (`neqo-client` and `neqo-server`): + ```shell -export DYLD_LIBRARY_PATH="$(dirname "$(find . -name libssl3.dylib -print | head -1)")" +./target/debug/neqo-server '[::]:12345' +./target/debug/neqo-client 'https://[::]:12345/' ``` -## Faster Builds with Separate NSS/NSPR +## Build with separate NSS/NSPR -You can clone NSS (https://hg.mozilla.org/projects/nss) and NSPR -(https://hg.mozilla.org/projects/nspr) into the same directory and export an +You can clone [NSS][NSS] and [NSPR][NSPR] into the same directory and export an environment variable called `NSS_DIR` pointing to NSS. This causes the build to use the existing NSS checkout. However, in order to run anything that depends -on NSS, you need to set `$\[DY]LD\_LIBRARY\_PATH` to point to -`$NSS_DIR/../dist/Debug/lib`. +on NSS, you need to set an environment as follows: + +### Linux + +```shell +export LD_LIBRARY_PATH="$(dirname "$(find . -name libssl3.so -print | head -1)")" +``` + +### macOS + +```shell +export DYLD_LIBRARY_PATH="$(dirname "$(find . -name libssl3.dylib -print | head -1)")" +``` -Note: If you did not compile NSS separately, you need to have mercurial (hg), installed. -NSS builds require gyp, and ninja (or ninja-build) to be present also. +Note: If you did not already compile NSS separately, you need to have +[Mercurial (hg)][HG], installed. NSS builds require [GYP][GYP] and +[Ninja][NINJA] to be installed. ## Debugging Neqo -### QUIC Logging +### QUIC logging -Enable [QLOG](https://datatracker.ietf.org/doc/draft-ietf-quic-qlog-main-schema/) with: +Enable generation of [QLOG][QLOG] logs with: -``` -$ mkdir "$logdir" -$ ./target/debug/neqo-server '[::]:12345' --db ./test-fixture/db --qlog-dir "$logdir" -$ ./target/debug/neqo-client 'https://[::]:12345/' --qlog-dir "$logdir" +```shell +target/debug/neqo-server '[::]:12345' --qlog-dir . +target/debug/neqo-client 'https://[::]:12345/' --qlog-dir . ``` -You may use https://qvis.quictools.info/ by uploading the QLOG files and visualize the flows. +You can of course specify a different directory for the QLOG files. +You can upload QLOG files to [qvis][QVIS] to visualize the flows. -### Using SSLKEYLOGFILE to decrypt Wireshark logs +### Using `SSLKEYLOGFILE` to decrypt Wireshark logs -[Info here](https://developer.mozilla.org/en-US/docs/Mozilla/Projects/NSS/Key_Log_Format) - -TODO: What is the minimum Wireshark version needed? -TODO: Above link may be incorrect, protocol now called TLS instead of SSL? +You can export TLS keys by setting the `SSLKEYLOGFILE` environment variable +to a filename to instruct NSS to dump keys in the +[standard format](https://datatracker.ietf.org/doc/draft-ietf-tls-keylogfile/) +to enable decryption by [Wireshark](https://wiki.wireshark.org/TLS) and other tools. ### Using RUST_LOG effectively As documented in the [env_logger documentation](https://docs.rs/env_logger/), the `RUST_LOG` environment variable can be used to selectively enable log messages -from Rust code. This works for Neqo's cmdline tools, as well as for when Neqo is +from Rust code. This works for Neqo's command line tools, as well as for when Neqo is incorporated into Gecko, although [Gecko needs to be built in debug mode](https://developer.mozilla.org/en-US/docs/Mozilla/Developer_guide/Build_Instructions/Configuring_Build_Options). Some examples: -1. `RUST_LOG=neqo_transport::dump ./mach run` lists sent and received QUIC - packets and their frames' contents only. -1. `RUST_LOG=neqo_transport=debug,neqo_http3=trace,info ./mach run` sets a - 'debug' log level for transport, 'trace' level for http3, and 'info' log + +1. ```shell + RUST_LOG=neqo_transport::dump ./mach run + ``` + + lists sent and received QUIC packets and their frames' contents only. + +1. ```shell + RUST_LOG=neqo_transport=debug,neqo_http3=trace,info ./mach run + ``` + + sets a `debug` log level for `transport`, `trace` level for `http3`, and `info` log level for all other Rust crates, both Neqo and others used by Gecko. -1. `RUST_LOG=neqo=trace,error ./mach run` sets `trace` level for all modules - starting with "neqo", and sets `error` as minimum log level for other - unrelated Rust log messages. +1. ```shell + RUST_LOG=neqo=trace,error ./mach run + ``` + + sets `trace` level for all modules starting with `neqo`, and sets `error` as minimum log level for other unrelated Rust log messages. -### Trying In-development Neqo code in Gecko +### Trying in-development Neqo code in Gecko In a checked-out copy of Gecko source, set `[patches.*]` values for the four Neqo crates to local versions in the root `Cargo.toml`. For example, if Neqo was checked out to `/home/alice/git/neqo`, add the following lines to the root `Cargo.toml`. -``` +```toml [patch."https://github.com/mozilla/neqo"] neqo-http3 = { path = "/home/alice/git/neqo/neqo-http3" } neqo-transport = { path = "/home/alice/git/neqo/neqo-transport" } @@ -87,11 +107,23 @@ neqo-crypto = { path = "/home/alice/git/neqo/neqo-crypto" } Then run the following: -``` +```shell ./mach vendor rust ``` -Compile Gecko as usual with `./mach build`. +Compile Gecko as usual with + +```shell +./mach build +``` Note: Using newer Neqo code with Gecko may also require changes (likely to `neqo_glue`) if something has changed. + +[NSS]: https://hg.mozilla.org/projects/nss +[NSPR]: https://hg.mozilla.org/projects/nspr +[GYP]: https://github.com/nodejs/gyp-next +[HG]: https://www.mercurial-scm.org/ +[NINJA]: https://ninja-build.org/ +[QLOG]: https://datatracker.ietf.org/doc/draft-ietf-quic-qlog-main-schema/ +[QVIS]: https://qvis.quictools.info/ diff --git a/neqo-crypto/Cargo.toml b/neqo-crypto/Cargo.toml index 588d084741..d2f70a5714 100644 --- a/neqo-crypto/Cargo.toml +++ b/neqo-crypto/Cargo.toml @@ -21,6 +21,7 @@ neqo-common = { path = "../neqo-common" } # Sync with https://searchfox.org/mozilla-central/source/Cargo.lock 2024-02-08 bindgen = { version = "0.69", default-features = false, features = ["runtime"] } mozbuild = { version = "0.1", default-features = false, optional = true } +semver = { version = "1.0", default-features = false } serde = { version = "1.0", default-features = false } serde_derive = { version = "1.0", default-features = false } toml = { version = "0.5", default-features = false } diff --git a/neqo-crypto/bindings/bindings.toml b/neqo-crypto/bindings/bindings.toml index 3e5c1fdf7d..72c6d524d5 100644 --- a/neqo-crypto/bindings/bindings.toml +++ b/neqo-crypto/bindings/bindings.toml @@ -265,8 +265,3 @@ enums = [ [nspr_time] types = ["PRTime"] functions = ["PR_Now"] - -[mozpkix] -cplusplus = true -types = ["mozilla::pkix::ErrorCode"] -enums = ["mozilla::pkix::ErrorCode"] diff --git a/neqo-crypto/bindings/mozpkix.hpp b/neqo-crypto/bindings/mozpkix.hpp deleted file mode 100644 index d0a6cb5861..0000000000 --- a/neqo-crypto/bindings/mozpkix.hpp +++ /dev/null @@ -1 +0,0 @@ -#include "mozpkix/pkixnss.h" \ No newline at end of file diff --git a/neqo-crypto/build.rs b/neqo-crypto/build.rs index c4c2a73e75..2dd4543797 100644 --- a/neqo-crypto/build.rs +++ b/neqo-crypto/build.rs @@ -12,8 +12,13 @@ use std::{ }; use bindgen::Builder; +use semver::{Version, VersionReq}; use serde_derive::Deserialize; +#[path = "src/min_version.rs"] +mod min_version; +use min_version::MINIMUM_NSS_VERSION; + const BINDINGS_DIR: &str = "bindings"; const BINDINGS_CONFIG: &str = "bindings.toml"; @@ -90,46 +95,6 @@ fn setup_clang() { } } -fn nss_dir() -> PathBuf { - let dir = if let Ok(dir) = env::var("NSS_DIR") { - let path = PathBuf::from(dir.trim()); - assert!( - !path.is_relative(), - "The NSS_DIR environment variable is expected to be an absolute path." - ); - path - } else { - let out_dir = env::var("OUT_DIR").unwrap(); - let dir = Path::new(&out_dir).join("nss"); - if !dir.exists() { - Command::new("hg") - .args([ - "clone", - "https://hg.mozilla.org/projects/nss", - dir.to_str().unwrap(), - ]) - .status() - .expect("can't clone nss"); - } - let nspr_dir = Path::new(&out_dir).join("nspr"); - if !nspr_dir.exists() { - Command::new("hg") - .args([ - "clone", - "https://hg.mozilla.org/projects/nspr", - nspr_dir.to_str().unwrap(), - ]) - .status() - .expect("can't clone nspr"); - } - dir - }; - assert!(dir.is_dir(), "NSS_DIR {dir:?} doesn't exist"); - // Note that this returns a relative path because UNC - // paths on windows cause certain tools to explode. - dir -} - fn get_bash() -> PathBuf { // If BASH is set, use that. if let Ok(bash) = env::var("BASH") { @@ -295,11 +260,63 @@ fn build_bindings(base: &str, bindings: &Bindings, flags: &[String], gecko: bool .expect("couldn't write bindings"); } -fn setup_standalone() -> Vec { +fn pkg_config() -> Vec { + let modversion = Command::new("pkg-config") + .args(["--modversion", "nss"]) + .output() + .expect("pkg-config reports NSS as absent") + .stdout; + let modversion = String::from_utf8(modversion).expect("non-UTF8 from pkg-config"); + let modversion = modversion.trim(); + // The NSS version number does not follow semver numbering, because it omits the patch version + // when that's 0. Deal with that. + let modversion_for_cmp = if modversion.chars().filter(|c| *c == '.').count() == 1 { + modversion.to_owned() + ".0" + } else { + modversion.to_owned() + }; + let modversion_for_cmp = + Version::parse(&modversion_for_cmp).expect("NSS version not in semver format"); + let version_req = VersionReq::parse(&format!(">={}", MINIMUM_NSS_VERSION.trim())).unwrap(); + assert!( + version_req.matches(&modversion_for_cmp), + "neqo has NSS version requirement {version_req}, found {modversion}" + ); + + let cfg = Command::new("pkg-config") + .args(["--cflags", "--libs", "nss"]) + .output() + .expect("NSS flags not returned by pkg-config") + .stdout; + let cfg_str = String::from_utf8(cfg).expect("non-UTF8 from pkg-config"); + + let mut flags: Vec = Vec::new(); + for f in cfg_str.split(' ') { + if let Some(include) = f.strip_prefix("-I") { + flags.push(String::from(f)); + println!("cargo:include={include}"); + } else if let Some(path) = f.strip_prefix("-L") { + println!("cargo:rustc-link-search=native={path}"); + } else if let Some(lib) = f.strip_prefix("-l") { + println!("cargo:rustc-link-lib=dylib={lib}"); + } else { + println!("Warning: Unknown flag from pkg-config: {f}"); + } + } + + flags +} + +fn setup_standalone(nss: &str) -> Vec { setup_clang(); println!("cargo:rerun-if-env-changed=NSS_DIR"); - let nss = nss_dir(); + let nss = PathBuf::from(nss); + assert!( + !nss.is_relative(), + "The NSS_DIR environment variable is expected to be an absolute path." + ); + build_nss(nss.clone()); // $NSS_DIR/../dist/ @@ -406,8 +423,10 @@ fn setup_for_gecko() -> Vec { fn main() { let flags = if cfg!(feature = "gecko") { setup_for_gecko() + } else if let Ok(nss_dir) = env::var("NSS_DIR") { + setup_standalone(nss_dir.trim()) } else { - setup_standalone() + pkg_config() }; let config_file = PathBuf::from(BINDINGS_DIR).join(BINDINGS_CONFIG); diff --git a/neqo-crypto/min_version.txt b/neqo-crypto/min_version.txt new file mode 100644 index 0000000000..422c9c7093 --- /dev/null +++ b/neqo-crypto/min_version.txt @@ -0,0 +1 @@ +3.98 diff --git a/neqo-crypto/src/err.rs b/neqo-crypto/src/err.rs index 187303d2a9..8d4f239a0b 100644 --- a/neqo-crypto/src/err.rs +++ b/neqo-crypto/src/err.rs @@ -16,13 +16,39 @@ mod codes { #![allow(non_snake_case)] include!(concat!(env!("OUT_DIR"), "/nss_secerr.rs")); include!(concat!(env!("OUT_DIR"), "/nss_sslerr.rs")); - include!(concat!(env!("OUT_DIR"), "/mozpkix.rs")); } -pub use codes::{mozilla_pkix_ErrorCode as mozpkix, SECErrorCodes as sec, SSLErrorCodes as ssl}; +pub use codes::{SECErrorCodes as sec, SSLErrorCodes as ssl}; pub mod nspr { include!(concat!(env!("OUT_DIR"), "/nspr_err.rs")); } +pub mod mozpkix { + // These are manually extracted from the many bindings generated + // by bindgen when provided with the simple header: + // #include "mozpkix/pkixnss.h" + + #[allow(non_camel_case_types)] + pub type mozilla_pkix_ErrorCode = ::std::os::raw::c_int; + pub const MOZILLA_PKIX_ERROR_KEY_PINNING_FAILURE: mozilla_pkix_ErrorCode = -16384; + pub const MOZILLA_PKIX_ERROR_CA_CERT_USED_AS_END_ENTITY: mozilla_pkix_ErrorCode = -16383; + pub const MOZILLA_PKIX_ERROR_INADEQUATE_KEY_SIZE: mozilla_pkix_ErrorCode = -16382; + pub const MOZILLA_PKIX_ERROR_V1_CERT_USED_AS_CA: mozilla_pkix_ErrorCode = -16381; + pub const MOZILLA_PKIX_ERROR_NO_RFC822NAME_MATCH: mozilla_pkix_ErrorCode = -16380; + pub const MOZILLA_PKIX_ERROR_NOT_YET_VALID_CERTIFICATE: mozilla_pkix_ErrorCode = -16379; + pub const MOZILLA_PKIX_ERROR_NOT_YET_VALID_ISSUER_CERTIFICATE: mozilla_pkix_ErrorCode = -16378; + pub const MOZILLA_PKIX_ERROR_SIGNATURE_ALGORITHM_MISMATCH: mozilla_pkix_ErrorCode = -16377; + pub const MOZILLA_PKIX_ERROR_OCSP_RESPONSE_FOR_CERT_MISSING: mozilla_pkix_ErrorCode = -16376; + pub const MOZILLA_PKIX_ERROR_VALIDITY_TOO_LONG: mozilla_pkix_ErrorCode = -16375; + pub const MOZILLA_PKIX_ERROR_REQUIRED_TLS_FEATURE_MISSING: mozilla_pkix_ErrorCode = -16374; + pub const MOZILLA_PKIX_ERROR_INVALID_INTEGER_ENCODING: mozilla_pkix_ErrorCode = -16373; + pub const MOZILLA_PKIX_ERROR_EMPTY_ISSUER_NAME: mozilla_pkix_ErrorCode = -16372; + pub const MOZILLA_PKIX_ERROR_ADDITIONAL_POLICY_CONSTRAINT_FAILED: mozilla_pkix_ErrorCode = + -16371; + pub const MOZILLA_PKIX_ERROR_SELF_SIGNED_CERT: mozilla_pkix_ErrorCode = -16370; + pub const MOZILLA_PKIX_ERROR_MITM_DETECTED: mozilla_pkix_ErrorCode = -16369; + pub const END_OF_LIST: mozilla_pkix_ErrorCode = -16368; +} + pub type Res = Result; #[derive(Clone, Debug, PartialEq, PartialOrd, Ord, Eq)] diff --git a/neqo-crypto/src/lib.rs b/neqo-crypto/src/lib.rs index 45f61f6127..33fe623b17 100644 --- a/neqo-crypto/src/lib.rs +++ b/neqo-crypto/src/lib.rs @@ -59,7 +59,8 @@ pub use self::{ ssl::Opt, }; -const MINIMUM_NSS_VERSION: &str = "3.97"; +mod min_version; +use min_version::MINIMUM_NSS_VERSION; #[allow(non_upper_case_globals, clippy::redundant_static_lifetimes)] #[allow(clippy::upper_case_acronyms)] diff --git a/neqo-crypto/src/min_version.rs b/neqo-crypto/src/min_version.rs new file mode 100644 index 0000000000..4386371b1b --- /dev/null +++ b/neqo-crypto/src/min_version.rs @@ -0,0 +1,9 @@ +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +/// The minimum version of NSS that is required by this version of neqo. +/// Note that the string may contain whitespace at the beginning and/or end. +pub(crate) const MINIMUM_NSS_VERSION: &str = include_str!("../min_version.txt"); From f1560abfeea51a309bcff70da2325de6e07a8e89 Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Wed, 27 Mar 2024 10:39:57 +0200 Subject: [PATCH 283/321] chore: Rename feature `fuzzing` to `disable-encryption` (#1767) * chore: Rename feature `fuzzing` to `disable-encryption` Because `cargo fuzz` relies on being able to use `fuzzing` * WIP * More * Add `disable-encryption` feature to CI, to make sure it doesn't rot * shellcheck * Undo * Fix * Address code review and rename `fuzzing` -> `null` * Fix clippy * Address code review --------- Signed-off-by: Lars Eggert --- neqo-crypto/Cargo.toml | 2 +- neqo-crypto/src/aead.rs | 8 +- .../src/{aead_fuzzing.rs => aead_null.rs} | 64 ++++---------- neqo-crypto/src/lib.rs | 12 +-- neqo-crypto/src/selfencrypt.rs | 2 +- neqo-crypto/tests/aead.rs | 3 +- neqo-crypto/tests/selfencrypt.rs | 2 +- neqo-http3/Cargo.toml | 2 +- neqo-transport/Cargo.toml | 2 +- neqo-transport/src/connection/mod.rs | 1 - neqo-transport/src/connection/params.rs | 13 --- .../src/connection/tests/handshake.rs | 6 +- neqo-transport/src/connection/tests/mod.rs | 2 +- .../connection/tests/{fuzzing.rs => null.rs} | 8 +- neqo-transport/src/crypto.rs | 88 +++---------------- neqo-transport/src/packet/mod.rs | 2 +- neqo-transport/src/packet/retry.rs | 1 - neqo-transport/tests/common/mod.rs | 9 +- neqo-transport/tests/conn_vectors.rs | 2 +- neqo-transport/tests/retry.rs | 2 +- 20 files changed, 53 insertions(+), 178 deletions(-) rename neqo-crypto/src/{aead_fuzzing.rs => aead_null.rs} (55%) rename neqo-transport/src/connection/tests/{fuzzing.rs => null.rs} (84%) diff --git a/neqo-crypto/Cargo.toml b/neqo-crypto/Cargo.toml index d2f70a5714..47337d99c0 100644 --- a/neqo-crypto/Cargo.toml +++ b/neqo-crypto/Cargo.toml @@ -31,7 +31,7 @@ test-fixture = { path = "../test-fixture" } [features] gecko = ["mozbuild"] -fuzzing = [] +disable-encryption = [] [lib] # See https://github.com/bheisler/criterion.rs/blob/master/book/src/faq.md#cargo-bench-gives-unrecognized-option-errors-for-valid-command-line-options diff --git a/neqo-crypto/src/aead.rs b/neqo-crypto/src/aead.rs index bf7d7fe9d7..21027d55b2 100644 --- a/neqo-crypto/src/aead.rs +++ b/neqo-crypto/src/aead.rs @@ -63,13 +63,7 @@ impl RealAead { /// # Errors /// /// Returns `Error` when the supporting NSS functions fail. - pub fn new( - _fuzzing: bool, - version: Version, - cipher: Cipher, - secret: &SymKey, - prefix: &str, - ) -> Res { + pub fn new(version: Version, cipher: Cipher, secret: &SymKey, prefix: &str) -> Res { let s: *mut PK11SymKey = **secret; unsafe { Self::from_raw(version, cipher, s, prefix) } } diff --git a/neqo-crypto/src/aead_fuzzing.rs b/neqo-crypto/src/aead_null.rs similarity index 55% rename from neqo-crypto/src/aead_fuzzing.rs rename to neqo-crypto/src/aead_null.rs index 1f3bfb14bd..2d5656de73 100644 --- a/neqo-crypto/src/aead_fuzzing.rs +++ b/neqo-crypto/src/aead_null.rs @@ -4,87 +4,63 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +#![cfg(feature = "disable-encryption")] + use std::fmt; use crate::{ constants::{Cipher, Version}, err::{sec::SEC_ERROR_BAD_DATA, Error, Res}, p11::SymKey, - RealAead, }; -pub const FIXED_TAG_FUZZING: &[u8] = &[0x0a; 16]; +pub const AEAD_NULL_TAG: &[u8] = &[0x0a; 16]; -pub struct FuzzingAead { - real: Option, -} +pub struct AeadNull {} -impl FuzzingAead { +impl AeadNull { #[allow(clippy::missing_errors_doc)] - pub fn new( - fuzzing: bool, - version: Version, - cipher: Cipher, - secret: &SymKey, - prefix: &str, - ) -> Res { - let real = if fuzzing { - None - } else { - Some(RealAead::new(false, version, cipher, secret, prefix)?) - }; - Ok(Self { real }) + pub fn new(_version: Version, _cipher: Cipher, _secret: &SymKey, _prefix: &str) -> Res { + Ok(Self {}) } #[must_use] pub fn expansion(&self) -> usize { - if let Some(aead) = &self.real { - aead.expansion() - } else { - FIXED_TAG_FUZZING.len() - } + AEAD_NULL_TAG.len() } #[allow(clippy::missing_errors_doc)] pub fn encrypt<'a>( &self, - count: u64, - aad: &[u8], + _count: u64, + _aad: &[u8], input: &[u8], output: &'a mut [u8], ) -> Res<&'a [u8]> { - if let Some(aead) = &self.real { - return aead.encrypt(count, aad, input, output); - } - let l = input.len(); output[..l].copy_from_slice(input); - output[l..l + 16].copy_from_slice(FIXED_TAG_FUZZING); + output[l..l + 16].copy_from_slice(AEAD_NULL_TAG); Ok(&output[..l + 16]) } #[allow(clippy::missing_errors_doc)] pub fn decrypt<'a>( &self, - count: u64, - aad: &[u8], + _count: u64, + _aad: &[u8], input: &[u8], output: &'a mut [u8], ) -> Res<&'a [u8]> { - if let Some(aead) = &self.real { - return aead.decrypt(count, aad, input, output); - } - - if input.len() < FIXED_TAG_FUZZING.len() { + if input.len() < AEAD_NULL_TAG.len() { return Err(Error::from(SEC_ERROR_BAD_DATA)); } - let len_encrypted = input.len() - FIXED_TAG_FUZZING.len(); + let len_encrypted = input.len() - AEAD_NULL_TAG.len(); // Check that: // 1) expansion is all zeros and // 2) if the encrypted data is also supplied that at least some values are no zero // (otherwise padding will be interpreted as a valid packet) - if &input[len_encrypted..] == FIXED_TAG_FUZZING + if &input[len_encrypted..] == AEAD_NULL_TAG && (len_encrypted == 0 || input[..len_encrypted].iter().any(|x| *x != 0x0)) { output[..len_encrypted].copy_from_slice(&input[..len_encrypted]); @@ -95,12 +71,8 @@ impl FuzzingAead { } } -impl fmt::Debug for FuzzingAead { +impl fmt::Debug for AeadNull { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - if let Some(a) = &self.real { - a.fmt(f) - } else { - write!(f, "[FUZZING AEAD]") - } + write!(f, "[NULL AEAD]") } } diff --git a/neqo-crypto/src/lib.rs b/neqo-crypto/src/lib.rs index 33fe623b17..b82b225d40 100644 --- a/neqo-crypto/src/lib.rs +++ b/neqo-crypto/src/lib.rs @@ -8,8 +8,8 @@ #![allow(clippy::unseparated_literal_suffix, clippy::used_underscore_binding)] // For bindgen code. mod aead; -#[cfg(feature = "fuzzing")] -pub mod aead_fuzzing; +#[cfg(feature = "disable-encryption")] +pub mod aead_null; pub mod agent; mod agentio; mod auth; @@ -33,12 +33,12 @@ mod time; use std::{ffi::CString, path::PathBuf, ptr::null, sync::OnceLock}; -#[cfg(not(feature = "fuzzing"))] +#[cfg(not(feature = "disable-encryption"))] pub use self::aead::RealAead as Aead; -#[cfg(feature = "fuzzing")] +#[cfg(feature = "disable-encryption")] pub use self::aead::RealAead; -#[cfg(feature = "fuzzing")] -pub use self::aead_fuzzing::FuzzingAead as Aead; +#[cfg(feature = "disable-encryption")] +pub use self::aead_null::AeadNull as Aead; pub use self::{ agent::{ Agent, AllowZeroRtt, Client, HandshakeState, Record, RecordList, ResumptionToken, diff --git a/neqo-crypto/src/selfencrypt.rs b/neqo-crypto/src/selfencrypt.rs index 1130c35250..d0a85830b0 100644 --- a/neqo-crypto/src/selfencrypt.rs +++ b/neqo-crypto/src/selfencrypt.rs @@ -47,7 +47,7 @@ impl SelfEncrypt { debug_assert_eq!(salt.len(), Self::SALT_LENGTH); let salt = hkdf::import_key(self.version, salt)?; let secret = hkdf::extract(self.version, self.cipher, Some(&salt), k)?; - Aead::new(false, self.version, self.cipher, &secret, "neqo self") + Aead::new(self.version, self.cipher, &secret, "neqo self") } /// Rotate keys. This causes any previous key that is being held to be replaced by the current diff --git a/neqo-crypto/tests/aead.rs b/neqo-crypto/tests/aead.rs index 5cf0034aec..f8416ed9a7 100644 --- a/neqo-crypto/tests/aead.rs +++ b/neqo-crypto/tests/aead.rs @@ -5,7 +5,7 @@ // except according to those terms. #![warn(clippy::pedantic)] -#![cfg(not(feature = "fuzzing"))] +#![cfg(not(feature = "disable-encryption"))] use neqo_crypto::{ constants::{Cipher, TLS_AES_128_GCM_SHA256, TLS_VERSION_1_3}, @@ -40,7 +40,6 @@ fn make_aead(cipher: Cipher) -> Aead { ) .expect("make a secret"); Aead::new( - false, TLS_VERSION_1_3, cipher, &secret, diff --git a/neqo-crypto/tests/selfencrypt.rs b/neqo-crypto/tests/selfencrypt.rs index 4c574a3ae9..b20aa27ee6 100644 --- a/neqo-crypto/tests/selfencrypt.rs +++ b/neqo-crypto/tests/selfencrypt.rs @@ -4,7 +4,7 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -#![cfg(not(feature = "fuzzing"))] +#![cfg(not(feature = "disable-encryption"))] use neqo_crypto::{ constants::{TLS_AES_128_GCM_SHA256, TLS_VERSION_1_3}, diff --git a/neqo-http3/Cargo.toml b/neqo-http3/Cargo.toml index 32e3ae7e35..27f43fd93f 100644 --- a/neqo-http3/Cargo.toml +++ b/neqo-http3/Cargo.toml @@ -28,7 +28,7 @@ url = { version = "2.5", default-features = false } test-fixture = { path = "../test-fixture" } [features] -fuzzing = ["neqo-transport/fuzzing", "neqo-crypto/fuzzing"] +disable-encryption = ["neqo-transport/disable-encryption", "neqo-crypto/disable-encryption"] [lib] # See https://github.com/bheisler/criterion.rs/blob/master/book/src/faq.md#cargo-bench-gives-unrecognized-option-errors-for-valid-command-line-options diff --git a/neqo-transport/Cargo.toml b/neqo-transport/Cargo.toml index 3da60bdabb..125da11508 100644 --- a/neqo-transport/Cargo.toml +++ b/neqo-transport/Cargo.toml @@ -27,7 +27,7 @@ test-fixture = { path = "../test-fixture" } [features] bench = [] -fuzzing = ["neqo-crypto/fuzzing"] +disable-encryption = ["neqo-crypto/disable-encryption"] [lib] # See https://github.com/bheisler/criterion.rs/blob/master/book/src/faq.md#cargo-bench-gives-unrecognized-option-errors-for-valid-command-line-options diff --git a/neqo-transport/src/connection/mod.rs b/neqo-transport/src/connection/mod.rs index 535d3f4084..3bf6b91263 100644 --- a/neqo-transport/src/connection/mod.rs +++ b/neqo-transport/src/connection/mod.rs @@ -383,7 +383,6 @@ impl Connection { agent, protocols.iter().map(P::as_ref).map(String::from).collect(), Rc::clone(&tphandler), - conn_params.is_fuzzing(), )?; let stats = StatsCell::default(); diff --git a/neqo-transport/src/connection/params.rs b/neqo-transport/src/connection/params.rs index 72d1efa3ee..d8aa617024 100644 --- a/neqo-transport/src/connection/params.rs +++ b/neqo-transport/src/connection/params.rs @@ -77,7 +77,6 @@ pub struct ConnectionParameters { outgoing_datagram_queue: usize, incoming_datagram_queue: usize, fast_pto: u8, - fuzzing: bool, grease: bool, pacing: bool, } @@ -100,7 +99,6 @@ impl Default for ConnectionParameters { outgoing_datagram_queue: MAX_QUEUED_DATAGRAMS_DEFAULT, incoming_datagram_queue: MAX_QUEUED_DATAGRAMS_DEFAULT, fast_pto: FAST_PTO_SCALE, - fuzzing: false, grease: true, pacing: true, } @@ -324,17 +322,6 @@ impl ConnectionParameters { self } - #[must_use] - pub fn is_fuzzing(&self) -> bool { - self.fuzzing - } - - #[must_use] - pub fn fuzzing(mut self, enable: bool) -> Self { - self.fuzzing = enable; - self - } - #[must_use] pub fn is_greasing(&self) -> bool { self.grease diff --git a/neqo-transport/src/connection/tests/handshake.rs b/neqo-transport/src/connection/tests/handshake.rs index 4b2a18642f..f2103523ec 100644 --- a/neqo-transport/src/connection/tests/handshake.rs +++ b/neqo-transport/src/connection/tests/handshake.rs @@ -16,7 +16,7 @@ use neqo_common::{event::Provider, qdebug, Datagram}; use neqo_crypto::{ constants::TLS_CHACHA20_POLY1305_SHA256, generate_ech_keys, AuthenticationStatus, }; -#[cfg(not(feature = "fuzzing"))] +#[cfg(not(feature = "disable-encryption"))] use test_fixture::datagram; use test_fixture::{ assertions, assertions::assert_coalesced_0rtt, fixture_init, now, split_datagram, DEFAULT_ADDR, @@ -606,7 +606,7 @@ fn reorder_1rtt() { } } -#[cfg(not(feature = "fuzzing"))] +#[cfg(not(feature = "disable-encryption"))] #[test] fn corrupted_initial() { let mut client = default_client(); @@ -809,7 +809,7 @@ fn anti_amplification() { assert_eq!(*server.state(), State::Confirmed); } -#[cfg(not(feature = "fuzzing"))] +#[cfg(not(feature = "disable-encryption"))] #[test] fn garbage_initial() { let mut client = default_client(); diff --git a/neqo-transport/src/connection/tests/mod.rs b/neqo-transport/src/connection/tests/mod.rs index 6f598fb23e..c8c87a0df0 100644 --- a/neqo-transport/src/connection/tests/mod.rs +++ b/neqo-transport/src/connection/tests/mod.rs @@ -37,11 +37,11 @@ mod ackrate; mod cc; mod close; mod datagram; -mod fuzzing; mod handshake; mod idle; mod keys; mod migration; +mod null; mod priority; mod recovery; mod resumption; diff --git a/neqo-transport/src/connection/tests/fuzzing.rs b/neqo-transport/src/connection/tests/null.rs similarity index 84% rename from neqo-transport/src/connection/tests/fuzzing.rs rename to neqo-transport/src/connection/tests/null.rs index b12100f8ad..e4d60445c6 100644 --- a/neqo-transport/src/connection/tests/fuzzing.rs +++ b/neqo-transport/src/connection/tests/null.rs @@ -4,9 +4,9 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -#![cfg(feature = "fuzzing")] +#![cfg(feature = "disable-encryption")] -use neqo_crypto::aead_fuzzing::FIXED_TAG_FUZZING; +use neqo_crypto::aead_null::AEAD_NULL_TAG; use test_fixture::now; use super::{connect_force_idle, default_client, default_server}; @@ -24,7 +24,7 @@ fn no_encryption() { client.stream_send(stream_id, DATA_CLIENT).unwrap(); let client_pkt = client.process_output(now()).dgram().unwrap(); - assert!(client_pkt[..client_pkt.len() - FIXED_TAG_FUZZING.len()].ends_with(DATA_CLIENT)); + assert!(client_pkt[..client_pkt.len() - AEAD_NULL_TAG.len()].ends_with(DATA_CLIENT)); server.process_input(&client_pkt, now()); let mut buf = vec![0; 100]; @@ -33,7 +33,7 @@ fn no_encryption() { assert_eq!(&buf[..len], DATA_CLIENT); server.stream_send(stream_id, DATA_SERVER).unwrap(); let server_pkt = server.process_output(now()).dgram().unwrap(); - assert!(server_pkt[..server_pkt.len() - FIXED_TAG_FUZZING.len()].ends_with(DATA_SERVER)); + assert!(server_pkt[..server_pkt.len() - AEAD_NULL_TAG.len()].ends_with(DATA_SERVER)); client.process_input(&server_pkt, now()); let (len, _) = client.stream_recv(stream_id, &mut buf).unwrap(); diff --git a/neqo-transport/src/crypto.rs b/neqo-transport/src/crypto.rs index acc02172d5..54bfe622cf 100644 --- a/neqo-transport/src/crypto.rs +++ b/neqo-transport/src/crypto.rs @@ -69,7 +69,6 @@ impl Crypto { mut agent: Agent, protocols: Vec, tphandler: TpHandler, - fuzzing: bool, ) -> Res { agent.set_version_range(TLS_VERSION_1_3, TLS_VERSION_1_3)?; agent.set_ciphers(&[ @@ -102,7 +101,6 @@ impl Crypto { tls: agent, streams: CryptoStreams::default(), states: CryptoStates { - fuzzing, ..CryptoStates::default() }, }) @@ -420,7 +418,6 @@ pub struct CryptoDxState { /// The total number of operations that are remaining before the keys /// become exhausted and can't be used any more. invocations: PacketNumber, - fuzzing: bool, } impl CryptoDxState { @@ -431,7 +428,6 @@ impl CryptoDxState { epoch: Epoch, secret: &SymKey, cipher: Cipher, - fuzzing: bool, ) -> Self { qdebug!( "Making {:?} {} CryptoDxState, v={:?} cipher={}", @@ -445,19 +441,11 @@ impl CryptoDxState { version, direction, epoch: usize::from(epoch), - aead: Aead::new( - fuzzing, - TLS_VERSION_1_3, - cipher, - secret, - version.label_prefix(), - ) - .unwrap(), + aead: Aead::new(TLS_VERSION_1_3, cipher, secret, version.label_prefix()).unwrap(), hpkey: HpKey::extract(TLS_VERSION_1_3, cipher, secret, &hplabel).unwrap(), used_pn: 0..0, min_pn: 0, invocations: Self::limit(direction, cipher), - fuzzing, } } @@ -466,7 +454,6 @@ impl CryptoDxState { direction: CryptoDxDirection, label: &str, dcid: &[u8], - fuzzing: bool, ) -> Self { qtrace!("new_initial {:?} {}", version, ConnectionIdRef::from(dcid)); let salt = version.initial_salt(); @@ -482,14 +469,7 @@ impl CryptoDxState { let secret = hkdf::expand_label(TLS_VERSION_1_3, cipher, &initial_secret, &[], label).unwrap(); - Self::new( - version, - direction, - TLS_EPOCH_INITIAL, - &secret, - cipher, - fuzzing, - ) + Self::new(version, direction, TLS_EPOCH_INITIAL, &secret, cipher) } /// Determine the confidentiality and integrity limits for the cipher. @@ -549,7 +529,6 @@ impl CryptoDxState { direction: self.direction, epoch: self.epoch + 1, aead: Aead::new( - self.fuzzing, TLS_VERSION_1_3, cipher, next_secret, @@ -560,7 +539,6 @@ impl CryptoDxState { used_pn: pn..pn, min_pn: pn, invocations, - fuzzing: self.fuzzing, } } @@ -696,7 +674,7 @@ impl CryptoDxState { Ok(res.to_vec()) } - #[cfg(all(test, not(feature = "fuzzing")))] + #[cfg(all(test, not(feature = "disable-encryption")))] pub(crate) fn test_default() -> Self { // This matches the value in packet.rs const CLIENT_CID: &[u8] = &[0x83, 0x94, 0xc8, 0xf0, 0x3e, 0x51, 0x57, 0x08]; @@ -705,7 +683,6 @@ impl CryptoDxState { CryptoDxDirection::Write, "server in", CLIENT_CID, - false, ) } @@ -759,7 +736,6 @@ pub(crate) struct CryptoDxAppData { cipher: Cipher, // Not the secret used to create `self.dx`, but the one needed for the next iteration. next_secret: SymKey, - fuzzing: bool, } impl CryptoDxAppData { @@ -768,20 +744,11 @@ impl CryptoDxAppData { dir: CryptoDxDirection, secret: &SymKey, cipher: Cipher, - fuzzing: bool, ) -> Res { Ok(Self { - dx: CryptoDxState::new( - version, - dir, - TLS_EPOCH_APPLICATION_DATA, - secret, - cipher, - fuzzing, - ), + dx: CryptoDxState::new(version, dir, TLS_EPOCH_APPLICATION_DATA, secret, cipher), cipher, next_secret: Self::update_secret(cipher, secret)?, - fuzzing, }) } @@ -800,7 +767,6 @@ impl CryptoDxAppData { dx: self.dx.next(&self.next_secret, self.cipher), cipher: self.cipher, next_secret, - fuzzing: self.fuzzing, }) } @@ -834,7 +800,6 @@ pub struct CryptoStates { // If this is set, then we have noticed a genuine update. // Once this time passes, we should switch in new keys. read_update_time: Option, - fuzzing: bool, } impl CryptoStates { @@ -989,20 +954,8 @@ impl CryptoStates { ); let mut initial = CryptoState { - tx: CryptoDxState::new_initial( - *v, - CryptoDxDirection::Write, - write, - dcid, - self.fuzzing, - ), - rx: CryptoDxState::new_initial( - *v, - CryptoDxDirection::Read, - read, - dcid, - self.fuzzing, - ), + tx: CryptoDxState::new_initial(*v, CryptoDxDirection::Write, write, dcid), + rx: CryptoDxState::new_initial(*v, CryptoDxDirection::Read, read, dcid), }; if let Some(prev) = self.initials.get(v) { qinfo!( @@ -1056,7 +1009,6 @@ impl CryptoStates { TLS_EPOCH_ZERO_RTT, secret, cipher, - self.fuzzing, )); } @@ -1097,7 +1049,6 @@ impl CryptoStates { TLS_EPOCH_HANDSHAKE, write_secret, cipher, - self.fuzzing, ), rx: CryptoDxState::new( version, @@ -1105,7 +1056,6 @@ impl CryptoStates { TLS_EPOCH_HANDSHAKE, read_secret, cipher, - self.fuzzing, ), }); } @@ -1113,13 +1063,7 @@ impl CryptoStates { pub fn set_application_write_key(&mut self, version: Version, secret: &SymKey) -> Res<()> { debug_assert!(self.app_write.is_none()); debug_assert_ne!(self.cipher, 0); - let mut app = CryptoDxAppData::new( - version, - CryptoDxDirection::Write, - secret, - self.cipher, - self.fuzzing, - )?; + let mut app = CryptoDxAppData::new(version, CryptoDxDirection::Write, secret, self.cipher)?; if let Some(z) = &self.zero_rtt { if z.direction == CryptoDxDirection::Write { app.dx.continuation(z)?; @@ -1138,13 +1082,7 @@ impl CryptoStates { ) -> Res<()> { debug_assert!(self.app_write.is_some(), "should have write keys installed"); debug_assert!(self.app_read.is_none()); - let mut app = CryptoDxAppData::new( - version, - CryptoDxDirection::Read, - secret, - self.cipher, - self.fuzzing, - )?; + let mut app = CryptoDxAppData::new(version, CryptoDxDirection::Read, secret, self.cipher)?; if let Some(z) = &self.zero_rtt { if z.direction == CryptoDxDirection::Read { app.dx.continuation(z)?; @@ -1286,7 +1224,7 @@ impl CryptoStates { } /// Make some state for removing protection in tests. - #[cfg(not(feature = "fuzzing"))] + #[cfg(not(feature = "disable-encryption"))] #[cfg(test)] pub(crate) fn test_default() -> Self { let read = |epoch| { @@ -1299,7 +1237,6 @@ impl CryptoStates { dx: read(epoch), cipher: TLS_AES_128_GCM_SHA256, next_secret: hkdf::import_key(TLS_VERSION_1_3, &[0xaa; 32]).unwrap(), - fuzzing: false, }; let mut initials = HashMap::new(); initials.insert( @@ -1319,11 +1256,10 @@ impl CryptoStates { app_read: Some(app_read(3)), app_read_next: Some(app_read(4)), read_update_time: None, - fuzzing: false, } } - #[cfg(all(not(feature = "fuzzing"), test))] + #[cfg(all(not(feature = "disable-encryption"), test))] pub(crate) fn test_chacha() -> Self { const SECRET: &[u8] = &[ 0x9a, 0xc3, 0x12, 0xa7, 0xf8, 0x77, 0x46, 0x8e, 0xbe, 0x69, 0x42, 0x27, 0x48, 0xad, @@ -1337,7 +1273,6 @@ impl CryptoStates { direction: CryptoDxDirection::Read, epoch, aead: Aead::new( - false, TLS_VERSION_1_3, TLS_CHACHA20_POLY1305_SHA256, &secret, @@ -1354,11 +1289,9 @@ impl CryptoStates { used_pn: 0..645_971_972, min_pn: 0, invocations: 10, - fuzzing: false, }, cipher: TLS_CHACHA20_POLY1305_SHA256, next_secret: secret.clone(), - fuzzing: false, }; Self { initials: HashMap::new(), @@ -1369,7 +1302,6 @@ impl CryptoStates { app_read: Some(app_read(3)), app_read_next: Some(app_read(4)), read_update_time: None, - fuzzing: false, } } } diff --git a/neqo-transport/src/packet/mod.rs b/neqo-transport/src/packet/mod.rs index d11b3423a4..552e50a1f9 100644 --- a/neqo-transport/src/packet/mod.rs +++ b/neqo-transport/src/packet/mod.rs @@ -868,7 +868,7 @@ impl Deref for DecryptedPacket { } } -#[cfg(all(test, not(feature = "fuzzing")))] +#[cfg(all(test, not(feature = "disable-encryption")))] mod tests { use neqo_common::Encoder; use test_fixture::{fixture_init, now}; diff --git a/neqo-transport/src/packet/retry.rs b/neqo-transport/src/packet/retry.rs index 72036d3b49..71193b9100 100644 --- a/neqo-transport/src/packet/retry.rs +++ b/neqo-transport/src/packet/retry.rs @@ -18,7 +18,6 @@ fn make_aead(version: Version) -> Aead { let secret = hkdf::import_key(TLS_VERSION_1_3, version.retry_secret()).unwrap(); Aead::new( - false, TLS_VERSION_1_3, TLS_AES_128_GCM_SHA256, &secret, diff --git a/neqo-transport/tests/common/mod.rs b/neqo-transport/tests/common/mod.rs index faff216eb9..e36e66f753 100644 --- a/neqo-transport/tests/common/mod.rs +++ b/neqo-transport/tests/common/mod.rs @@ -146,14 +146,7 @@ pub fn initial_aead_and_hp(dcid: &[u8], role: Role) -> (Aead, HpKey) { ) .unwrap(); ( - Aead::new( - false, - TLS_VERSION_1_3, - TLS_AES_128_GCM_SHA256, - &secret, - "quic ", - ) - .unwrap(), + Aead::new(TLS_VERSION_1_3, TLS_AES_128_GCM_SHA256, &secret, "quic ").unwrap(), HpKey::extract(TLS_VERSION_1_3, TLS_AES_128_GCM_SHA256, &secret, "quic hp").unwrap(), ) } diff --git a/neqo-transport/tests/conn_vectors.rs b/neqo-transport/tests/conn_vectors.rs index f478883075..86fe9d36fc 100644 --- a/neqo-transport/tests/conn_vectors.rs +++ b/neqo-transport/tests/conn_vectors.rs @@ -6,7 +6,7 @@ // Tests with the test vectors from the spec. -#![cfg(not(feature = "fuzzing"))] +#![cfg(not(feature = "disable-encryption"))] use std::{cell::RefCell, rc::Rc}; diff --git a/neqo-transport/tests/retry.rs b/neqo-transport/tests/retry.rs index e583fcae0f..36eff71e7b 100644 --- a/neqo-transport/tests/retry.rs +++ b/neqo-transport/tests/retry.rs @@ -4,7 +4,7 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -#![cfg(not(feature = "fuzzing"))] +#![cfg(not(feature = "disable-encryption"))] mod common; From 20ef1be046769efaeb151b21b6cde22af67ee712 Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Wed, 27 Mar 2024 12:10:59 +0200 Subject: [PATCH 284/321] feat: Turn on TLS greasing (#1760) * feat: Turn on TLS greasing Fixes #1391 Needs #1739 * Make clippy happy --- neqo-crypto/src/agent.rs | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/neqo-crypto/src/agent.rs b/neqo-crypto/src/agent.rs index 90085cb759..3d5a8b9f35 100644 --- a/neqo-crypto/src/agent.rs +++ b/neqo-crypto/src/agent.rs @@ -16,7 +16,7 @@ use std::{ time::Instant, }; -use neqo_common::{hex_snip_middle, hex_with_len, qdebug, qinfo, qtrace, qwarn}; +use neqo_common::{hex_snip_middle, hex_with_len, qdebug, qtrace, qwarn}; pub use crate::{ agentio::{as_c_void, Record, RecordList}, @@ -406,10 +406,7 @@ impl SecretAgent { self.set_option(ssl::Opt::Locking, false)?; self.set_option(ssl::Opt::Tickets, false)?; self.set_option(ssl::Opt::OcspStapling, true)?; - if let Err(e) = self.set_option(ssl::Opt::Grease, grease) { - // Until NSS supports greasing, it's OK to fail here. - qinfo!([self], "Failed to enable greasing {:?}", e); - } + self.set_option(ssl::Opt::Grease, grease)?; Ok(()) } From efc4813affbb8077e2f7f7fb2799be0452fc52ed Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Wed, 27 Mar 2024 12:39:04 +0200 Subject: [PATCH 285/321] fix: Address more clippy warnings (#1777) * fix: Address more clippy warnings PR #1764 exports `frame` and `packet` if feature `fuzzing` is enabled. This apparently turns on a bunch of clippy checks that are not on by default? This PR fixes them. Made this separate from #1764 to reduce that PR's size. * Fix clippy * Remove comment --- neqo-transport/src/frame.rs | 26 ++++++++++++-- neqo-transport/src/packet/mod.rs | 61 +++++++++++++++++++++++++++++--- 2 files changed, 79 insertions(+), 8 deletions(-) diff --git a/neqo-transport/src/frame.rs b/neqo-transport/src/frame.rs index 5a86a07108..d84eb61ce8 100644 --- a/neqo-transport/src/frame.rs +++ b/neqo-transport/src/frame.rs @@ -95,6 +95,12 @@ impl From for CloseError { } } +impl From for Error { + fn from(_err: std::array::TryFromSliceError) -> Self { + Self::FrameEncodingError + } +} + #[derive(PartialEq, Eq, Debug, Default, Clone)] pub struct AckRange { pub(crate) gap: u64, @@ -213,6 +219,7 @@ impl<'a> Frame<'a> { } } + #[must_use] pub fn get_type(&self) -> FrameType { match self { Self::Padding { .. } => FRAME_TYPE_PADDING, @@ -254,6 +261,7 @@ impl<'a> Frame<'a> { } } + #[must_use] pub fn is_stream(&self) -> bool { matches!( self, @@ -269,6 +277,7 @@ impl<'a> Frame<'a> { ) } + #[must_use] pub fn stream_type(fin: bool, nonzero_offset: bool, fill: bool) -> u64 { let mut t = FRAME_TYPE_STREAM; if fin { @@ -285,6 +294,7 @@ impl<'a> Frame<'a> { /// If the frame causes a recipient to generate an ACK within its /// advertised maximum acknowledgement delay. + #[must_use] pub fn ack_eliciting(&self) -> bool { !matches!( self, @@ -294,6 +304,7 @@ impl<'a> Frame<'a> { /// If the frame can be sent in a path probe /// without initiating migration to that path. + #[must_use] pub fn path_probing(&self) -> bool { matches!( self, @@ -307,6 +318,10 @@ impl<'a> Frame<'a> { /// Converts `AckRanges` as encoded in a ACK frame (see -transport /// 19.3.1) into ranges of acked packets (end, start), inclusive of /// start and end values. + /// + /// # Errors + /// + /// Returns an error if the ranges are invalid. pub fn decode_ack_frame( largest_acked: u64, first_ack_range: u64, @@ -347,6 +362,7 @@ impl<'a> Frame<'a> { Ok(acked_ranges) } + #[must_use] pub fn dump(&self) -> String { match self { Self::Crypto { offset, data } => { @@ -372,6 +388,7 @@ impl<'a> Frame<'a> { } } + #[must_use] pub fn is_allowed(&self, pt: PacketType) -> bool { match self { Self::Padding { .. } | Self::Ping => true, @@ -386,6 +403,9 @@ impl<'a> Frame<'a> { } } + /// # Errors + /// + /// Returns an error if the frame cannot be decoded. #[allow(clippy::too_many_lines)] // Yeah, but it's a nice match statement. pub fn decode(dec: &mut Decoder<'a>) -> Res { /// Maximum ACK Range Count in ACK Frame @@ -470,7 +490,7 @@ impl<'a> Frame<'a> { FRAME_TYPE_CRYPTO => { let offset = dv(dec)?; let data = d(dec.decode_vvec())?; - if offset + u64::try_from(data.len()).unwrap() > ((1 << 62) - 1) { + if offset + u64::try_from(data.len())? > ((1 << 62) - 1) { return Err(Error::FrameEncodingError); } Ok(Self::Crypto { offset, data }) @@ -497,7 +517,7 @@ impl<'a> Frame<'a> { qtrace!("STREAM frame, with length"); d(dec.decode_vvec())? }; - if o + u64::try_from(data.len()).unwrap() > ((1 << 62) - 1) { + if o + u64::try_from(data.len())? > ((1 << 62) - 1) { return Err(Error::FrameEncodingError); } Ok(Self::Stream { @@ -546,7 +566,7 @@ impl<'a> Frame<'a> { return Err(Error::DecodingFrame); } let srt = d(dec.decode(16))?; - let stateless_reset_token = <&[_; 16]>::try_from(srt).unwrap(); + let stateless_reset_token = <&[_; 16]>::try_from(srt)?; Ok(Self::NewConnectionId { sequence_number, diff --git a/neqo-transport/src/packet/mod.rs b/neqo-transport/src/packet/mod.rs index 552e50a1f9..0843d050ab 100644 --- a/neqo-transport/src/packet/mod.rs +++ b/neqo-transport/src/packet/mod.rs @@ -256,6 +256,10 @@ impl PacketBuilder { /// Maybe pad with "PADDING" frames. /// Only does so if padding was needed and this is a short packet. /// Returns true if padding was added. + /// + /// # Panics + /// + /// Cannot happen. pub fn pad(&mut self) -> bool { if self.padding && !self.is_long() { self.encoder @@ -290,6 +294,10 @@ impl PacketBuilder { /// The length is filled in after calling `build`. /// Does nothing if there isn't 4 bytes available other than render this builder /// unusable; if `remaining()` returns 0 at any point, call `abort()`. + /// + /// # Panics + /// + /// This will panic if the packet number length is too large. pub fn pn(&mut self, pn: PacketNumber, pn_len: usize) { if self.remaining() < 4 { self.limit = 0; @@ -354,6 +362,10 @@ impl PacketBuilder { } /// Build the packet and return the encoder. + /// + /// # Errors + /// + /// This will return an error if the packet is too large. pub fn build(mut self, crypto: &mut CryptoDxState) -> Res { if self.len() > self.limit { qwarn!("Packet contents are more than the limit"); @@ -378,7 +390,9 @@ impl PacketBuilder { // Calculate the mask. let offset = SAMPLE_OFFSET - self.offsets.pn.len(); - assert!(offset + SAMPLE_SIZE <= ciphertext.len()); + if offset + SAMPLE_SIZE > ciphertext.len() { + return Err(Error::InternalError); + } let sample = &ciphertext[offset..offset + SAMPLE_SIZE]; let mask = crypto.compute_mask(sample)?; @@ -412,6 +426,10 @@ impl PacketBuilder { /// As this is a simple packet, this is just an associated function. /// As Retry is odd (it has to be constructed with leading bytes), /// this returns a [`Vec`] rather than building on an encoder. + /// + /// # Errors + /// + /// This will return an error if AEAD encrypt fails. #[allow(clippy::similar_names)] // scid and dcid are fine here. pub fn retry( version: Version, @@ -445,6 +463,7 @@ impl PacketBuilder { /// Make a Version Negotiation packet. #[allow(clippy::similar_names)] // scid and dcid are fine here. + #[must_use] pub fn version_negotiation( dcid: &[u8], scid: &[u8], @@ -556,6 +575,10 @@ impl<'a> PublicPacket<'a> { /// Decode the common parts of a packet. This provides minimal parsing and validation. /// Returns a tuple of a `PublicPacket` and a slice with any remainder from the datagram. + /// + /// # Errors + /// + /// This will return an error if the packet could not be decoded. #[allow(clippy::similar_names)] // For dcid and scid, which are fine. pub fn decode(data: &'a [u8], dcid_decoder: &dyn ConnectionIdDecoder) -> Res<(Self, &'a [u8])> { let mut decoder = Decoder::new(data); @@ -587,7 +610,7 @@ impl<'a> PublicPacket<'a> { } // Generic long header. - let version = WireVersion::try_from(Self::opt(decoder.decode_uint(4))?).unwrap(); + let version = WireVersion::try_from(Self::opt(decoder.decode_uint(4))?)?; let dcid = ConnectionIdRef::from(Self::opt(decoder.decode_vec(1))?); let scid = ConnectionIdRef::from(Self::opt(decoder.decode_vec(1))?); @@ -647,11 +670,14 @@ impl<'a> PublicPacket<'a> { } /// Validate the given packet as though it were a retry. + #[must_use] pub fn is_valid_retry(&self, odcid: &ConnectionId) -> bool { if self.packet_type != PacketType::Retry { return false; } - let version = self.version().unwrap(); + let Some(version) = self.version() else { + return false; + }; let expansion = retry::expansion(version); if self.data.len() <= expansion { return false; @@ -667,6 +693,7 @@ impl<'a> PublicPacket<'a> { .unwrap_or(false) } + #[must_use] pub fn is_valid_initial(&self) -> bool { // Packet has to be an initial, with a DCID of 8 bytes, or a token. // Note: the Server class validates the token and checks the length. @@ -674,32 +701,42 @@ impl<'a> PublicPacket<'a> { && (self.dcid().len() >= 8 || !self.token.is_empty()) } + #[must_use] pub fn packet_type(&self) -> PacketType { self.packet_type } + #[must_use] pub fn dcid(&self) -> ConnectionIdRef<'a> { self.dcid } + /// # Panics + /// + /// This will panic if called for a short header packet. + #[must_use] pub fn scid(&self) -> ConnectionIdRef<'a> { self.scid .expect("should only be called for long header packets") } + #[must_use] pub fn token(&self) -> &'a [u8] { self.token } + #[must_use] pub fn version(&self) -> Option { self.version.and_then(|v| Version::try_from(v).ok()) } + #[must_use] pub fn wire_version(&self) -> WireVersion { debug_assert!(self.version.is_some()); self.version.unwrap_or(0) } + #[must_use] pub fn len(&self) -> usize { self.data.len() } @@ -778,6 +815,9 @@ impl<'a> PublicPacket<'a> { )) } + /// # Errors + /// + /// This will return an error if the packet cannot be decrypted. pub fn decrypt(&self, crypto: &mut CryptoStates, release_at: Instant) -> Res { let cspace: CryptoSpace = self.packet_type.into(); // When we don't have a version, the crypto code doesn't need a version @@ -792,7 +832,9 @@ impl<'a> PublicPacket<'a> { // too small (which is public information). let (key_phase, pn, header, body) = self.decrypt_header(rx)?; qtrace!([rx], "decoded header: {:?}", header); - let rx = crypto.rx(version, cspace, key_phase).unwrap(); + let Some(rx) = crypto.rx(version, cspace, key_phase) else { + return Err(Error::DecryptError); + }; let version = rx.version(); // Version fixup; see above. let d = rx.decrypt(pn, &header, body)?; // If this is the first packet ever successfully decrypted @@ -815,8 +857,14 @@ impl<'a> PublicPacket<'a> { } } + /// # Errors + /// + /// This will return an error if the packet is not a version negotiation packet + /// or if the versions cannot be decoded. pub fn supported_versions(&self) -> Res> { - assert_eq!(self.packet_type, PacketType::VersionNegotiation); + if self.packet_type != PacketType::VersionNegotiation { + return Err(Error::InvalidPacket); + } let mut decoder = Decoder::new(&self.data[self.header_len..]); let mut res = Vec::new(); while decoder.remaining() > 0 { @@ -847,14 +895,17 @@ pub struct DecryptedPacket { } impl DecryptedPacket { + #[must_use] pub fn version(&self) -> Version { self.version } + #[must_use] pub fn packet_type(&self) -> PacketType { self.pt } + #[must_use] pub fn pn(&self) -> PacketNumber { self.pn } From 36fae6282b2214e4fea425ee4a952c08acf1c445 Mon Sep 17 00:00:00 2001 From: Max Inden Date: Wed, 27 Mar 2024 16:50:21 +0100 Subject: [PATCH 286/321] perf(bin): add criterion benchmarks (#1758) * perf(bin): add criterion benchmarks Wraps the `neqo-client` and `neqo-server` code, starts the server and runs various benchmarks through the client. Benchmarks: - single-request-1gb - single-request-1mb - requests-per-second - handshakes-per-second * Rename benchmark instances * Turn off logging * Remove 100mb sample size restriction * Use v6 * Remove 1gb It just takes too long on the bench machine * Use /dev/null * rework imports * Fix import * Test without CPU pinning * Revert "Test without CPU pinning" This reverts commit a0ef46a3d321aa00aae0d3835ef44533dded46bf. * Pin all but neqo-bin to CPU 0 * Quote package * Add rational for neqo-bin handling * Rework tuples * Pin first * Just duplicate the two calls * Add --workspace flag * Remove taskset from neqo-bin --------- Co-authored-by: Martin Thomson --- .github/workflows/bench.yml | 10 +- neqo-bin/Cargo.toml | 16 +++- neqo-bin/benches/main.rs | 92 +++++++++++++++++++ neqo-bin/src/bin/client.rs | 14 +++ neqo-bin/src/bin/server.rs | 14 +++ neqo-bin/src/{bin => }/client/http09.rs | 3 +- neqo-bin/src/{bin => }/client/http3.rs | 2 +- .../src/{bin/client/main.rs => client/mod.rs} | 65 +++++++++---- neqo-bin/src/lib.rs | 40 +++++++- .../src/{bin/server/main.rs => server/mod.rs} | 46 ++++++---- neqo-bin/src/{bin => }/server/old_https.rs | 0 neqo-bin/src/udp.rs | 2 + 12 files changed, 262 insertions(+), 42 deletions(-) create mode 100644 neqo-bin/benches/main.rs create mode 100644 neqo-bin/src/bin/client.rs create mode 100644 neqo-bin/src/bin/server.rs rename neqo-bin/src/{bin => }/client/http09.rs (99%) rename neqo-bin/src/{bin => }/client/http3.rs (99%) rename neqo-bin/src/{bin/client/main.rs => client/mod.rs} (91%) rename neqo-bin/src/{bin/server/main.rs => server/mod.rs} (94%) rename neqo-bin/src/{bin => }/server/old_https.rs (100%) diff --git a/.github/workflows/bench.yml b/.github/workflows/bench.yml index 80c51c236d..5df8bcfd91 100644 --- a/.github/workflows/bench.yml +++ b/.github/workflows/bench.yml @@ -70,11 +70,15 @@ jobs: - name: Prepare machine run: sudo /root/bin/prep.sh - # Pin the benchmark run to core 0 and run all benchmarks at elevated priority. - name: Run cargo bench run: | - taskset -c 0 nice -n -20 \ - cargo "+$TOOLCHAIN" bench --features bench -- --noplot | tee results.txt + # Pin all but neqo-bin benchmarks to CPU 0. neqo-bin benchmarks run + # both a client and a server, thus benefiting from multiple CPU cores. + # + # Run all benchmarks at elevated priority. + taskset -c 0 nice -n -20 cargo "+$TOOLCHAIN" bench --workspace --exclude neqo-bin --features bench -- --noplot | tee results.txt + nice -n -20 cargo "+$TOOLCHAIN" bench --package neqo-bin --features bench -- --noplot | tee -a results.txt + # Compare various configurations of neqo against msquic, and gather perf data # during the hyperfine runs. diff --git a/neqo-bin/Cargo.toml b/neqo-bin/Cargo.toml index 04210e00db..a165a4ac32 100644 --- a/neqo-bin/Cargo.toml +++ b/neqo-bin/Cargo.toml @@ -11,12 +11,12 @@ license.workspace = true [[bin]] name = "neqo-client" -path = "src/bin/client/main.rs" +path = "src/bin/client.rs" bench = false [[bin]] name = "neqo-server" -path = "src/bin/server/main.rs" +path = "src/bin/server.rs" bench = false [lints] @@ -40,6 +40,18 @@ regex = { version = "1.9", default-features = false, features = ["unicode-perl"] tokio = { version = "1", default-features = false, features = ["net", "time", "macros", "rt", "rt-multi-thread"] } url = { version = "2.5", default-features = false } +[dev-dependencies] +criterion = { version = "0.5", default-features = false, features = ["html_reports", "async_tokio"] } +tokio = { version = "1", default-features = false, features = ["sync"] } + +[features] +bench = [] + [lib] # See https://github.com/bheisler/criterion.rs/blob/master/book/src/faq.md#cargo-bench-gives-unrecognized-option-errors-for-valid-command-line-options bench = false + +[[bench]] +name = "main" +harness = false +required-features = ["bench"] diff --git a/neqo-bin/benches/main.rs b/neqo-bin/benches/main.rs new file mode 100644 index 0000000000..fe3aba2714 --- /dev/null +++ b/neqo-bin/benches/main.rs @@ -0,0 +1,92 @@ +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use std::{path::PathBuf, str::FromStr}; + +use criterion::{criterion_group, criterion_main, BatchSize, Criterion, Throughput}; +use neqo_bin::{client, server}; +use tokio::runtime::Runtime; + +struct Benchmark { + name: String, + requests: Vec, + /// Download resources in series using separate connections. + download_in_series: bool, + sample_size: Option, +} + +fn transfer(c: &mut Criterion) { + neqo_common::log::init(Some(log::LevelFilter::Off)); + neqo_crypto::init_db(PathBuf::from_str("../test-fixture/db").unwrap()); + + let done_sender = spawn_server(); + + for Benchmark { + name, + requests, + download_in_series, + sample_size, + } in [ + Benchmark { + name: "1-conn/1-100mb-resp (aka. Download)".to_string(), + requests: vec![100 * 1024 * 1024], + download_in_series: false, + sample_size: Some(10), + }, + Benchmark { + name: "1-conn/10_000-1b-seq-resp (aka. RPS)".to_string(), + requests: vec![1; 10_000], + download_in_series: false, + sample_size: None, + }, + Benchmark { + name: "100-seq-conn/1-1b-resp (aka. HPS)".to_string(), + requests: vec![1; 100], + download_in_series: true, + sample_size: None, + }, + ] { + let mut group = c.benchmark_group(name); + group.throughput(if requests[0] > 1 { + assert_eq!(requests.len(), 1); + Throughput::Bytes(requests[0]) + } else { + Throughput::Elements(requests.len() as u64) + }); + if let Some(size) = sample_size { + group.sample_size(size); + } + group.bench_function("client", |b| { + b.to_async(Runtime::new().unwrap()).iter_batched( + || client::client(client::Args::new(&requests, download_in_series)), + |client| async move { + client.await.unwrap(); + }, + BatchSize::PerIteration, + ); + }); + group.finish(); + } + + done_sender.send(()).unwrap(); +} + +fn spawn_server() -> tokio::sync::oneshot::Sender<()> { + let (done_sender, mut done_receiver) = tokio::sync::oneshot::channel(); + std::thread::spawn(move || { + Runtime::new().unwrap().block_on(async { + let mut server = Box::pin(server::server(server::Args::default())); + tokio::select! { + _ = &mut done_receiver => {} + _ = &mut server => {} + } + }); + }); + done_sender +} + +criterion_group!(benches, transfer); +criterion_main!(benches); diff --git a/neqo-bin/src/bin/client.rs b/neqo-bin/src/bin/client.rs new file mode 100644 index 0000000000..25c0e8753f --- /dev/null +++ b/neqo-bin/src/bin/client.rs @@ -0,0 +1,14 @@ +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use clap::Parser; + +#[tokio::main] +async fn main() -> Result<(), neqo_bin::client::Error> { + let args = neqo_bin::client::Args::parse(); + + neqo_bin::client::client(args).await +} diff --git a/neqo-bin/src/bin/server.rs b/neqo-bin/src/bin/server.rs new file mode 100644 index 0000000000..8d166c7487 --- /dev/null +++ b/neqo-bin/src/bin/server.rs @@ -0,0 +1,14 @@ +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use clap::Parser; + +#[tokio::main] +async fn main() -> Result<(), std::io::Error> { + let args = neqo_bin::server::Args::parse(); + + neqo_bin::server::server(args).await +} diff --git a/neqo-bin/src/bin/client/http09.rs b/neqo-bin/src/client/http09.rs similarity index 99% rename from neqo-bin/src/bin/client/http09.rs rename to neqo-bin/src/client/http09.rs index 372a112853..9bdb6dca85 100644 --- a/neqo-bin/src/bin/client/http09.rs +++ b/neqo-bin/src/client/http09.rs @@ -25,8 +25,7 @@ use neqo_transport::{ }; use url::Url; -use super::{get_output_file, Args, KeyUpdateState, Res}; -use crate::qlog_new; +use super::{get_output_file, qlog_new, Args, KeyUpdateState, Res}; pub struct Handler<'a> { streams: HashMap>>, diff --git a/neqo-bin/src/bin/client/http3.rs b/neqo-bin/src/client/http3.rs similarity index 99% rename from neqo-bin/src/bin/client/http3.rs rename to neqo-bin/src/client/http3.rs index e9f5e406a5..c88a8448f6 100644 --- a/neqo-bin/src/bin/client/http3.rs +++ b/neqo-bin/src/client/http3.rs @@ -26,7 +26,7 @@ use neqo_transport::{ }; use url::Url; -use crate::{get_output_file, qlog_new, Args, KeyUpdateState, Res}; +use super::{get_output_file, qlog_new, Args, KeyUpdateState, Res}; pub(crate) struct Handler<'a> { #[allow( diff --git a/neqo-bin/src/bin/client/main.rs b/neqo-bin/src/client/mod.rs similarity index 91% rename from neqo-bin/src/bin/client/main.rs rename to neqo-bin/src/client/mod.rs index 63aa12db13..e0169e3f24 100644 --- a/neqo-bin/src/bin/client/main.rs +++ b/neqo-bin/src/client/mod.rs @@ -21,25 +21,26 @@ use futures::{ future::{select, Either}, FutureExt, TryFutureExt, }; -use neqo_bin::udp; use neqo_common::{self as common, qdebug, qerror, qinfo, qlog::NeqoQlog, qwarn, Datagram, Role}; use neqo_crypto::{ constants::{TLS_AES_128_GCM_SHA256, TLS_AES_256_GCM_SHA384, TLS_CHACHA20_POLY1305_SHA256}, init, Cipher, ResumptionToken, }; -use neqo_http3::{Error, Output}; +use neqo_http3::Output; use neqo_transport::{AppError, ConnectionId, Error as TransportError, Version}; use qlog::{events::EventImportance, streamer::QlogStreamer}; use tokio::time::Sleep; use url::{Origin, Url}; +use crate::{udp, SharedArgs}; + mod http09; mod http3; const BUFWRITER_BUFFER_SIZE: usize = 64 * 1024; #[derive(Debug)] -pub enum ClientError { +pub enum Error { ArgumentError(&'static str), Http3Error(neqo_http3::Error), IoError(io::Error), @@ -47,40 +48,40 @@ pub enum ClientError { TransportError(neqo_transport::Error), } -impl From for ClientError { +impl From for Error { fn from(err: io::Error) -> Self { Self::IoError(err) } } -impl From for ClientError { +impl From for Error { fn from(err: neqo_http3::Error) -> Self { Self::Http3Error(err) } } -impl From for ClientError { +impl From for Error { fn from(_err: qlog::Error) -> Self { Self::QlogError } } -impl From for ClientError { +impl From for Error { fn from(err: neqo_transport::Error) -> Self { Self::TransportError(err) } } -impl Display for ClientError { +impl Display for Error { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "Error: {self:?}")?; Ok(()) } } -impl std::error::Error for ClientError {} +impl std::error::Error for Error {} -type Res = Result; +type Res = Result; /// Track whether a key update is needed. #[derive(Debug, PartialEq, Eq)] @@ -90,14 +91,14 @@ impl KeyUpdateState { pub fn maybe_update(&mut self, update_fn: F) -> Res<()> where F: FnOnce() -> Result<(), E>, - E: Into, + E: Into, { if self.0 { if let Err(e) = update_fn() { let e = e.into(); match e { - ClientError::TransportError(TransportError::KeyUpdateBlocked) - | ClientError::Http3Error(Error::TransportError( + Error::TransportError(TransportError::KeyUpdateBlocked) + | Error::Http3Error(neqo_http3::Error::TransportError( TransportError::KeyUpdateBlocked, )) => (), _ => return Err(e), @@ -123,7 +124,7 @@ pub struct Args { verbose: clap_verbosity_flag::Verbosity, #[command(flatten)] - shared: neqo_bin::SharedArgs, + shared: SharedArgs, urls: Vec, @@ -189,6 +190,36 @@ pub struct Args { } impl Args { + #[must_use] + #[cfg(feature = "bench")] + #[allow(clippy::missing_panics_doc)] + pub fn new(requests: &[u64], download_in_series: bool) -> Self { + use std::str::FromStr; + Self { + verbose: clap_verbosity_flag::Verbosity::::default(), + shared: crate::SharedArgs::default(), + urls: requests + .iter() + .map(|r| Url::from_str(&format!("http://[::1]:12345/{r}")).unwrap()) + .collect(), + method: "GET".into(), + header: vec![], + max_concurrent_push_streams: 10, + download_in_series, + concurrency: 100, + output_read_data: false, + output_dir: Some("/dev/null".into()), + resume: false, + key_update: false, + ech: None, + ipv4_only: false, + ipv6_only: false, + test: None, + upload_size: 100, + stats: false, + } + } + fn get_ciphers(&self) -> Vec { self.shared .ciphers @@ -445,10 +476,10 @@ fn qlog_new(args: &Args, hostname: &str, cid: &ConnectionId) -> Res { } } -#[tokio::main] -async fn main() -> Res<()> { - let mut args = Args::parse(); +pub async fn client(mut args: Args) -> Res<()> { neqo_common::log::init(Some(args.verbose.log_level_filter())); + init(); + args.update_for_tests(); init(); diff --git a/neqo-bin/src/lib.rs b/neqo-bin/src/lib.rs index b7bc158245..380c56ddce 100644 --- a/neqo-bin/src/lib.rs +++ b/neqo-bin/src/lib.rs @@ -4,6 +4,9 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +#![allow(clippy::missing_panics_doc)] +#![allow(clippy::missing_errors_doc)] + use std::{ fmt::{self, Display}, net::{SocketAddr, ToSocketAddrs}, @@ -17,7 +20,9 @@ use neqo_transport::{ Version, }; -pub mod udp; +pub mod client; +pub mod server; +mod udp; #[derive(Debug, Parser)] pub struct SharedArgs { @@ -57,6 +62,23 @@ pub struct SharedArgs { pub quic_parameters: QuicParameters, } +#[cfg(feature = "bench")] +impl Default for SharedArgs { + fn default() -> Self { + Self { + alpn: "h3".into(), + qlog_dir: None, + max_table_size_encoder: 16384, + max_table_size_decoder: 16384, + max_blocked_streams: 10, + ciphers: vec![], + qns_test: None, + use_old_http: false, + quic_parameters: QuicParameters::default(), + } + } +} + #[derive(Debug, Parser)] pub struct QuicParameters { #[arg( @@ -102,6 +124,22 @@ pub struct QuicParameters { pub preferred_address_v6: Option, } +#[cfg(feature = "bench")] +impl Default for QuicParameters { + fn default() -> Self { + Self { + quic_version: vec![], + max_streams_bidi: 16, + max_streams_uni: 16, + idle_timeout: 30, + congestion_control: CongestionControlAlgorithm::NewReno, + no_pacing: false, + preferred_address_v4: None, + preferred_address_v6: None, + } + } +} + impl QuicParameters { fn get_sock_addr(opt: &Option, v: &str, f: F) -> Option where diff --git a/neqo-bin/src/bin/server/main.rs b/neqo-bin/src/server/mod.rs similarity index 94% rename from neqo-bin/src/bin/server/main.rs rename to neqo-bin/src/server/mod.rs index 62eb19e78c..f89d6620de 100644 --- a/neqo-bin/src/bin/server/main.rs +++ b/neqo-bin/src/server/mod.rs @@ -25,28 +25,28 @@ use futures::{ future::{select, select_all, Either}, FutureExt, }; -use neqo_bin::udp; use neqo_common::{hex, qdebug, qerror, qinfo, qwarn, Datagram, Header}; use neqo_crypto::{ constants::{TLS_AES_128_GCM_SHA256, TLS_AES_256_GCM_SHA384, TLS_CHACHA20_POLY1305_SHA256}, generate_ech_keys, init_db, random, AntiReplay, Cipher, }; use neqo_http3::{ - Error, Http3OrWebTransportStream, Http3Parameters, Http3Server, Http3ServerEvent, StreamId, + Http3OrWebTransportStream, Http3Parameters, Http3Server, Http3ServerEvent, StreamId, }; use neqo_transport::{ server::ValidateAddress, ConnectionIdGenerator, Output, RandomConnectionIdGenerator, Version, }; +use old_https::Http09Server; use tokio::time::Sleep; -use crate::old_https::Http09Server; +use crate::{udp, SharedArgs}; const ANTI_REPLAY_WINDOW: Duration = Duration::from_secs(10); mod old_https; #[derive(Debug)] -pub enum ServerError { +pub enum Error { ArgumentError(&'static str), Http3Error(neqo_http3::Error), IoError(io::Error), @@ -54,47 +54,47 @@ pub enum ServerError { TransportError(neqo_transport::Error), } -impl From for ServerError { +impl From for Error { fn from(err: io::Error) -> Self { Self::IoError(err) } } -impl From for ServerError { +impl From for Error { fn from(err: neqo_http3::Error) -> Self { Self::Http3Error(err) } } -impl From for ServerError { +impl From for Error { fn from(_err: qlog::Error) -> Self { Self::QlogError } } -impl From for ServerError { +impl From for Error { fn from(err: neqo_transport::Error) -> Self { Self::TransportError(err) } } -impl Display for ServerError { +impl Display for Error { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "Error: {self:?}")?; Ok(()) } } -impl std::error::Error for ServerError {} +impl std::error::Error for Error {} #[derive(Debug, Parser)] #[command(author, version, about, long_about = None)] -struct Args { +pub struct Args { #[command(flatten)] verbose: clap_verbosity_flag::Verbosity, #[command(flatten)] - shared: neqo_bin::SharedArgs, + shared: SharedArgs, /// List of IP:port to listen on #[arg(default_value = "[::]:4433")] @@ -119,6 +119,22 @@ struct Args { ech: bool, } +#[cfg(feature = "bench")] +impl Default for Args { + fn default() -> Self { + use std::str::FromStr; + Self { + verbose: clap_verbosity_flag::Verbosity::::default(), + shared: crate::SharedArgs::default(), + hosts: vec!["[::]:12345".to_string()], + db: PathBuf::from_str("../test-fixture/db").unwrap(), + key: "key".to_string(), + retry: false, + ech: false, + } + } +} + impl Args { fn get_ciphers(&self) -> Vec { self.shared @@ -339,7 +355,7 @@ impl HttpServer for SimpleServer { } } else { stream - .cancel_fetch(Error::HttpRequestIncomplete.code()) + .cancel_fetch(neqo_http3::Error::HttpRequestIncomplete.code()) .unwrap(); continue; }; @@ -565,11 +581,9 @@ enum Ready { Timeout, } -#[tokio::main] -async fn main() -> Result<(), io::Error> { +pub async fn server(mut args: Args) -> Result<(), io::Error> { const HQ_INTEROP: &str = "hq-interop"; - let mut args = Args::parse(); neqo_common::log::init(Some(args.verbose.log_level_filter())); assert!(!args.key.is_empty(), "Need at least one key"); diff --git a/neqo-bin/src/bin/server/old_https.rs b/neqo-bin/src/server/old_https.rs similarity index 100% rename from neqo-bin/src/bin/server/old_https.rs rename to neqo-bin/src/server/old_https.rs diff --git a/neqo-bin/src/udp.rs b/neqo-bin/src/udp.rs index f4ede0b5c2..7ccfa1f36f 100644 --- a/neqo-bin/src/udp.rs +++ b/neqo-bin/src/udp.rs @@ -23,6 +23,8 @@ use tokio::io::Interest; const RECV_BUF_SIZE: usize = u16::MAX as usize; pub struct Socket { + #[allow(unknown_lints)] // available with Rust v1.75 + #[allow(clippy::struct_field_names)] socket: tokio::net::UdpSocket, state: UdpSocketState, recv_buf: Vec, From f7492045743b5017e7d24051c15c21afe321c2a4 Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Thu, 28 Mar 2024 08:02:19 +0200 Subject: [PATCH 287/321] Enable the ossf/scorecard action (#1776) Let's see what this will report on our code... Signed-off-by: Lars Eggert --- .github/workflows/scorecard.yml | 72 +++++++++++++++++++++++++++++++++ 1 file changed, 72 insertions(+) create mode 100644 .github/workflows/scorecard.yml diff --git a/.github/workflows/scorecard.yml b/.github/workflows/scorecard.yml new file mode 100644 index 0000000000..338c9239c3 --- /dev/null +++ b/.github/workflows/scorecard.yml @@ -0,0 +1,72 @@ +# This workflow uses actions that are not certified by GitHub. They are provided +# by a third-party and are governed by separate terms of service, privacy +# policy, and support documentation. + +name: Scorecard supply-chain security +on: + # For Branch-Protection check. Only the default branch is supported. See + # https://github.com/ossf/scorecard/blob/main/docs/checks.md#branch-protection + branch_protection_rule: + # To guarantee Maintained check is occasionally updated. See + # https://github.com/ossf/scorecard/blob/main/docs/checks.md#maintained + schedule: + - cron: '26 8 * * 6' + push: + branches: [ "main" ] + +# Declare default permissions as read only. +permissions: read-all + +jobs: + analysis: + name: Scorecard analysis + runs-on: ubuntu-latest + permissions: + # Needed to upload the results to code-scanning dashboard. + security-events: write + # Needed to publish results and get a badge (see publish_results below). + id-token: write + # Uncomment the permissions below if installing in a private repository. + # contents: read + # actions: read + + steps: + - name: "Checkout code" + uses: actions/checkout@93ea575cb5d8a053eaa0ac8fa3b40d7e05a33cc8 # v3.1.0 + with: + persist-credentials: false + + - name: "Run analysis" + uses: ossf/scorecard-action@e38b1902ae4f44df626f11ba0734b14fb91f8f86 # v2.1.2 + with: + results_file: results.sarif + results_format: sarif + # (Optional) "write" PAT token. Uncomment the `repo_token` line below if: + # - you want to enable the Branch-Protection check on a *public* repository, or + # - you are installing Scorecard on a *private* repository + # To create the PAT, follow the steps in https://github.com/ossf/scorecard-action#authentication-with-pat. + # repo_token: ${{ secrets.SCORECARD_TOKEN }} + + # Public repositories: + # - Publish results to OpenSSF REST API for easy access by consumers + # - Allows the repository to include the Scorecard badge. + # - See https://github.com/ossf/scorecard-action#publishing-results. + # For private repositories: + # - `publish_results` will always be set to `false`, regardless + # of the value entered here. + publish_results: false + + # Upload the results as artifacts (optional). Commenting out will disable uploads of run results in SARIF + # format to the repository Actions tab. + - name: "Upload artifact" + uses: actions/upload-artifact@3cea5372237819ed00197afe530f5a7ea3e805c8 # v3.1.0 + with: + name: SARIF file + path: results.sarif + retention-days: 5 + + # Upload the results to GitHub's code scanning dashboard. + - name: "Upload to code-scanning" + uses: github/codeql-action/upload-sarif@17573ee1cc1b9d061760f3a006fc4aac4f944fd5 # v2.2.4 + with: + sarif_file: results.sarif From 3e12eae25da4ef3ac45363e512e89b538216d005 Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Thu, 28 Mar 2024 08:15:54 +0200 Subject: [PATCH 288/321] ci: Fix shell script bugs in `nss/action.yml` (#1778) --- .github/actions/nss/action.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/actions/nss/action.yml b/.github/actions/nss/action.yml index ec6f13eaf8..051b54143b 100644 --- a/.github/actions/nss/action.yml +++ b/.github/actions/nss/action.yml @@ -34,9 +34,9 @@ runs: fi NSS_MAJOR=$(echo "$NSS_VERSION" | cut -d. -f1) NSS_MINOR=$(echo "$NSS_VERSION" | cut -d. -f2) - REQ_NSS_MAJOR=$(cat neqo-crypto/min_version.txt | cut -d. -f1) - REQ_NSS_MINOR=$(cat neqo-crypto/min_version.txt | cut -d. -f2) - if [ "$NSS_MAJOR" -lt "REQ_NSS_MAJOR" ] || [ "$NSS_MAJOR" -eq "REQ_NSS_MAJOR" -a "$NSS_MINOR" -lt "REQ_NSS_MINOR"]; then + REQ_NSS_MAJOR=$(cut -d. -f1 < neqo-crypto/min_version.txt) + REQ_NSS_MINOR=$(cut -d. -f2 < neqo-crypto/min_version.txt) + if [[ "$NSS_MAJOR" -lt "$REQ_NSS_MAJOR" || "$NSS_MAJOR" -eq "$REQ_NSS_MAJOR" && "$NSS_MINOR" -lt "$REQ_NSS_MINOR" ]]; then echo "System NSS is too old: $NSS_VERSION" echo "BUILD_NSS=1" >> "$GITHUB_ENV" exit 0 From 3151adc53e71273eed1319114380119c70e169a2 Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Thu, 28 Mar 2024 08:44:37 +0200 Subject: [PATCH 289/321] fix: Don't panic in `neqo_crypto::init()` (#1775) * fix: Don't panic in `neqo_crypto::init()` Return `Res<()>` instead. Fixes #1675 * Update neqo-bin/src/bin/client/main.rs Co-authored-by: Martin Thomson Signed-off-by: Lars Eggert * Update neqo-crypto/src/lib.rs Co-authored-by: Martin Thomson Signed-off-by: Lars Eggert * Update neqo-crypto/tests/init.rs Co-authored-by: Martin Thomson Signed-off-by: Lars Eggert * Address code review * Try and improve coverage * Fix the `nss_nodb` case * Fail tests if `init()` fails * Address code review * Update neqo-bin/src/bin/client/main.rs Co-authored-by: Martin Thomson Signed-off-by: Lars Eggert * InternalError -> CryptoError * Fix merge * clippy --------- Signed-off-by: Lars Eggert Co-authored-by: Martin Thomson --- neqo-bin/benches/main.rs | 2 +- neqo-bin/src/bin/server.rs | 2 +- neqo-bin/src/client/mod.rs | 11 +++++-- neqo-bin/src/server/mod.rs | 15 +++++++-- neqo-crypto/src/lib.rs | 53 ++++++++++++++++---------------- neqo-crypto/tests/init.rs | 51 +++++++++++++++++++++++------- neqo-crypto/tests/selfencrypt.rs | 4 +-- test-fixture/src/lib.rs | 6 +++- 8 files changed, 96 insertions(+), 48 deletions(-) diff --git a/neqo-bin/benches/main.rs b/neqo-bin/benches/main.rs index fe3aba2714..6bb8b3161d 100644 --- a/neqo-bin/benches/main.rs +++ b/neqo-bin/benches/main.rs @@ -20,7 +20,7 @@ struct Benchmark { fn transfer(c: &mut Criterion) { neqo_common::log::init(Some(log::LevelFilter::Off)); - neqo_crypto::init_db(PathBuf::from_str("../test-fixture/db").unwrap()); + neqo_crypto::init_db(PathBuf::from_str("../test-fixture/db").unwrap()).unwrap(); let done_sender = spawn_server(); diff --git a/neqo-bin/src/bin/server.rs b/neqo-bin/src/bin/server.rs index 8d166c7487..e9b30261e4 100644 --- a/neqo-bin/src/bin/server.rs +++ b/neqo-bin/src/bin/server.rs @@ -7,7 +7,7 @@ use clap::Parser; #[tokio::main] -async fn main() -> Result<(), std::io::Error> { +async fn main() -> Result<(), neqo_bin::server::Error> { let args = neqo_bin::server::Args::parse(); neqo_bin::server::server(args).await diff --git a/neqo-bin/src/client/mod.rs b/neqo-bin/src/client/mod.rs index e0169e3f24..81721802e1 100644 --- a/neqo-bin/src/client/mod.rs +++ b/neqo-bin/src/client/mod.rs @@ -46,6 +46,13 @@ pub enum Error { IoError(io::Error), QlogError, TransportError(neqo_transport::Error), + CryptoError(neqo_crypto::Error), +} + +impl From for Error { + fn from(err: neqo_crypto::Error) -> Self { + Self::CryptoError(err) + } } impl From for Error { @@ -478,11 +485,11 @@ fn qlog_new(args: &Args, hostname: &str, cid: &ConnectionId) -> Res { pub async fn client(mut args: Args) -> Res<()> { neqo_common::log::init(Some(args.verbose.log_level_filter())); - init(); + init()?; args.update_for_tests(); - init(); + init()?; let urls_by_origin = args .urls diff --git a/neqo-bin/src/server/mod.rs b/neqo-bin/src/server/mod.rs index f89d6620de..38eb766f5f 100644 --- a/neqo-bin/src/server/mod.rs +++ b/neqo-bin/src/server/mod.rs @@ -52,6 +52,13 @@ pub enum Error { IoError(io::Error), QlogError, TransportError(neqo_transport::Error), + CryptoError(neqo_crypto::Error), +} + +impl From for Error { + fn from(err: neqo_crypto::Error) -> Self { + Self::CryptoError(err) + } } impl From for Error { @@ -87,6 +94,8 @@ impl Display for Error { impl std::error::Error for Error {} +type Res = Result; + #[derive(Debug, Parser)] #[command(author, version, about, long_about = None)] pub struct Args { @@ -551,7 +560,7 @@ impl ServersRunner { select(sockets_ready, timeout_ready).await.factor_first().0 } - async fn run(&mut self) -> Result<(), io::Error> { + async fn run(&mut self) -> Res<()> { loop { match self.ready().await? { Ready::Socket(inx) => loop { @@ -581,13 +590,13 @@ enum Ready { Timeout, } -pub async fn server(mut args: Args) -> Result<(), io::Error> { +pub async fn server(mut args: Args) -> Res<()> { const HQ_INTEROP: &str = "hq-interop"; neqo_common::log::init(Some(args.verbose.log_level_filter())); assert!(!args.key.is_empty(), "Need at least one key"); - init_db(args.db.clone()); + init_db(args.db.clone())?; if let Some(testcase) = args.shared.qns_test.as_ref() { if args.shared.quic_parameters.quic_version.is_empty() { diff --git a/neqo-crypto/src/lib.rs b/neqo-crypto/src/lib.rs index b82b225d40..2db985e8ee 100644 --- a/neqo-crypto/src/lib.rs +++ b/neqo-crypto/src/lib.rs @@ -90,7 +90,7 @@ impl Drop for NssLoaded { } } -static INITIALIZED: OnceLock = OnceLock::new(); +static INITIALIZED: OnceLock> = OnceLock::new(); fn already_initialized() -> bool { unsafe { nss::NSS_IsInitialized() != 0 } @@ -108,24 +108,24 @@ fn version_check() { /// Initialize NSS. This only executes the initialization routines once, so if there is any chance /// that /// -/// # Panics +/// # Errors /// /// When NSS initialization fails. -pub fn init() { +pub fn init() -> Res<()> { // Set time zero. time::init(); - _ = INITIALIZED.get_or_init(|| { + let res = INITIALIZED.get_or_init(|| { version_check(); if already_initialized() { - return NssLoaded::External; + return Ok(NssLoaded::External); } - secstatus_to_res(unsafe { nss::NSS_NoDB_Init(null()) }).expect("NSS_NoDB_Init failed"); - secstatus_to_res(unsafe { nss::NSS_SetDomesticPolicy() }) - .expect("NSS_SetDomesticPolicy failed"); + secstatus_to_res(unsafe { nss::NSS_NoDB_Init(null()) })?; + secstatus_to_res(unsafe { nss::NSS_SetDomesticPolicy() })?; - NssLoaded::NoDb + Ok(NssLoaded::NoDb) }); + res.as_ref().map(|_| ()).map_err(Clone::clone) } /// This enables SSLTRACE by calling a simple, harmless function to trigger its @@ -133,31 +133,32 @@ pub fn init() { /// global options are accessed. Reading an option is the least impact approach. /// This allows us to use SSLTRACE in all of our unit tests and programs. #[cfg(debug_assertions)] -fn enable_ssl_trace() { +fn enable_ssl_trace() -> Res<()> { let opt = ssl::Opt::Locking.as_int(); let mut v: ::std::os::raw::c_int = 0; secstatus_to_res(unsafe { ssl::SSL_OptionGetDefault(opt, &mut v) }) - .expect("SSL_OptionGetDefault failed"); } /// Initialize with a database. /// -/// # Panics +/// # Errors /// /// If NSS cannot be initialized. -pub fn init_db>(dir: P) { +pub fn init_db>(dir: P) -> Res<()> { time::init(); - _ = INITIALIZED.get_or_init(|| { + let res = INITIALIZED.get_or_init(|| { version_check(); if already_initialized() { - return NssLoaded::External; + return Ok(NssLoaded::External); } let path = dir.into(); - assert!(path.is_dir()); - let pathstr = path.to_str().expect("path converts to string").to_string(); - let dircstr = CString::new(pathstr).unwrap(); - let empty = CString::new("").unwrap(); + if !path.is_dir() { + return Err(Error::InternalError); + } + let pathstr = path.to_str().ok_or(Error::InternalError)?; + let dircstr = CString::new(pathstr)?; + let empty = CString::new("")?; secstatus_to_res(unsafe { nss::NSS_Initialize( dircstr.as_ptr(), @@ -166,21 +167,19 @@ pub fn init_db>(dir: P) { nss::SECMOD_DB.as_ptr().cast(), nss::NSS_INIT_READONLY, ) - }) - .expect("NSS_Initialize failed"); + })?; - secstatus_to_res(unsafe { nss::NSS_SetDomesticPolicy() }) - .expect("NSS_SetDomesticPolicy failed"); + secstatus_to_res(unsafe { nss::NSS_SetDomesticPolicy() })?; secstatus_to_res(unsafe { ssl::SSL_ConfigServerSessionIDCache(1024, 0, 0, dircstr.as_ptr()) - }) - .expect("SSL_ConfigServerSessionIDCache failed"); + })?; #[cfg(debug_assertions)] - enable_ssl_trace(); + enable_ssl_trace()?; - NssLoaded::Db + Ok(NssLoaded::Db) }); + res.as_ref().map(|_| ()).map_err(Clone::clone) } /// # Panics diff --git a/neqo-crypto/tests/init.rs b/neqo-crypto/tests/init.rs index 13218cc340..ee7d808e29 100644 --- a/neqo-crypto/tests/init.rs +++ b/neqo-crypto/tests/init.rs @@ -15,13 +15,7 @@ use neqo_crypto::{assert_initialized, init_db}; // Pull in the NSS internals so that we can ask NSS if it thinks that // it is properly initialized. -#[allow( - dead_code, - non_upper_case_globals, - clippy::redundant_static_lifetimes, - clippy::unseparated_literal_suffix, - clippy::upper_case_acronyms -)] +#[allow(dead_code, non_upper_case_globals)] mod nss { include!(concat!(env!("OUT_DIR"), "/nss_init.rs")); } @@ -29,19 +23,54 @@ mod nss { #[cfg(nss_nodb)] #[test] fn init_nodb() { - init(); + neqo_crypto::init().unwrap(); assert_initialized(); unsafe { - assert!(nss::NSS_IsInitialized() != 0); + assert_ne!(nss::NSS_IsInitialized(), 0); } } +#[cfg(nss_nodb)] +#[test] +fn init_twice_nodb() { + unsafe { + nss::NSS_NoDB_Init(std::ptr::null()); + assert_ne!(nss::NSS_IsInitialized(), 0); + } + // Now do it again + init_nodb(); +} + #[cfg(not(nss_nodb))] #[test] fn init_withdb() { - init_db(::test_fixture::NSS_DB_PATH); + init_db(::test_fixture::NSS_DB_PATH).unwrap(); assert_initialized(); unsafe { - assert!(nss::NSS_IsInitialized() != 0); + assert_ne!(nss::NSS_IsInitialized(), 0); + } +} + +#[cfg(not(nss_nodb))] +#[test] +fn init_twice_withdb() { + use std::{ffi::CString, path::PathBuf}; + + let empty = CString::new("").unwrap(); + let path: PathBuf = ::test_fixture::NSS_DB_PATH.into(); + assert!(path.is_dir()); + let pathstr = path.to_str().unwrap(); + let dircstr = CString::new(pathstr).unwrap(); + unsafe { + nss::NSS_Initialize( + dircstr.as_ptr(), + empty.as_ptr(), + empty.as_ptr(), + nss::SECMOD_DB.as_ptr().cast(), + nss::NSS_INIT_READONLY, + ); + assert_ne!(nss::NSS_IsInitialized(), 0); } + // Now do it again + init_withdb(); } diff --git a/neqo-crypto/tests/selfencrypt.rs b/neqo-crypto/tests/selfencrypt.rs index b20aa27ee6..9fc2162fe2 100644 --- a/neqo-crypto/tests/selfencrypt.rs +++ b/neqo-crypto/tests/selfencrypt.rs @@ -15,7 +15,7 @@ use neqo_crypto::{ #[test] fn se_create() { - init(); + init().unwrap(); SelfEncrypt::new(TLS_VERSION_1_3, TLS_AES_128_GCM_SHA256).expect("constructor works"); } @@ -23,7 +23,7 @@ const PLAINTEXT: &[u8] = b"PLAINTEXT"; const AAD: &[u8] = b"AAD"; fn sealed() -> (SelfEncrypt, Vec) { - init(); + init().unwrap(); let se = SelfEncrypt::new(TLS_VERSION_1_3, TLS_AES_128_GCM_SHA256).unwrap(); let sealed = se.seal(AAD, PLAINTEXT).expect("sealing works"); (se, sealed) diff --git a/test-fixture/src/lib.rs b/test-fixture/src/lib.rs index a6043cd974..e34fb522ff 100644 --- a/test-fixture/src/lib.rs +++ b/test-fixture/src/lib.rs @@ -41,8 +41,12 @@ pub const NSS_DB_PATH: &str = concat!(env!("CARGO_MANIFEST_DIR"), "/db"); /// Initialize the test fixture. Only call this if you aren't also calling a /// fixture function that depends on setup. Other functions in the fixture /// that depend on this setup call the function for you. +/// +/// # Panics +/// +/// When the NSS initialization fails. pub fn fixture_init() { - init_db(NSS_DB_PATH); + init_db(NSS_DB_PATH).unwrap(); } // This needs to be > 2ms to avoid it being rounded to zero. From 92ed07f1b7a537c1638a3a6fce8b0baa66e64ac4 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 2 Apr 2024 09:53:07 +0300 Subject: [PATCH 290/321] build(deps): bump github/codeql-action from 2.2.4 to 3.24.9 (#1782) Bumps [github/codeql-action](https://github.com/github/codeql-action) from 2.2.4 to 3.24.9. - [Release notes](https://github.com/github/codeql-action/releases) - [Changelog](https://github.com/github/codeql-action/blob/main/CHANGELOG.md) - [Commits](https://github.com/github/codeql-action/compare/17573ee1cc1b9d061760f3a006fc4aac4f944fd5...1b1aada464948af03b950897e5eb522f92603cc2) --- updated-dependencies: - dependency-name: github/codeql-action dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/scorecard.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/scorecard.yml b/.github/workflows/scorecard.yml index 338c9239c3..dfbda87b58 100644 --- a/.github/workflows/scorecard.yml +++ b/.github/workflows/scorecard.yml @@ -67,6 +67,6 @@ jobs: # Upload the results to GitHub's code scanning dashboard. - name: "Upload to code-scanning" - uses: github/codeql-action/upload-sarif@17573ee1cc1b9d061760f3a006fc4aac4f944fd5 # v2.2.4 + uses: github/codeql-action/upload-sarif@1b1aada464948af03b950897e5eb522f92603cc2 # v3.24.9 with: sarif_file: results.sarif From 94f8e687ebb0eff9656f556b38edb1a4b43fcb9d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 2 Apr 2024 09:53:27 +0300 Subject: [PATCH 291/321] build(deps): bump ossf/scorecard-action from 2.1.2 to 2.3.1 (#1781) Bumps [ossf/scorecard-action](https://github.com/ossf/scorecard-action) from 2.1.2 to 2.3.1. - [Release notes](https://github.com/ossf/scorecard-action/releases) - [Changelog](https://github.com/ossf/scorecard-action/blob/main/RELEASE.md) - [Commits](https://github.com/ossf/scorecard-action/compare/e38b1902ae4f44df626f11ba0734b14fb91f8f86...0864cf19026789058feabb7e87baa5f140aac736) --- updated-dependencies: - dependency-name: ossf/scorecard-action dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/scorecard.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/scorecard.yml b/.github/workflows/scorecard.yml index dfbda87b58..01e5fe16a8 100644 --- a/.github/workflows/scorecard.yml +++ b/.github/workflows/scorecard.yml @@ -37,7 +37,7 @@ jobs: persist-credentials: false - name: "Run analysis" - uses: ossf/scorecard-action@e38b1902ae4f44df626f11ba0734b14fb91f8f86 # v2.1.2 + uses: ossf/scorecard-action@0864cf19026789058feabb7e87baa5f140aac736 # v2.3.1 with: results_file: results.sarif results_format: sarif From 27a7250dd321e60bd2f68564cf7c2fd58fac0e27 Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Tue, 2 Apr 2024 10:43:29 +0300 Subject: [PATCH 292/321] fix: Exit with `ProtocolViolation` if a packet has no frames (#1779) * fix: Exit with `ProtocolViolation` if a packet has no frames Fixes #1476 * Fix comment * Cleanup * Simplify * Address code review * Clippy --- neqo-transport/src/connection/mod.rs | 4 ++ neqo-transport/src/packet/mod.rs | 6 +-- neqo-transport/tests/connection.rs | 70 ++++++++++++++++++++++++++++ 3 files changed, 75 insertions(+), 5 deletions(-) diff --git a/neqo-transport/src/connection/mod.rs b/neqo-transport/src/connection/mod.rs index 3bf6b91263..06d6cab9e1 100644 --- a/neqo-transport/src/connection/mod.rs +++ b/neqo-transport/src/connection/mod.rs @@ -1552,6 +1552,10 @@ impl Connection { packet: &DecryptedPacket, now: Instant, ) -> Res { + (!packet.is_empty()) + .then_some(()) + .ok_or(Error::ProtocolViolation)?; + // TODO(ekr@rtfm.com): Have the server blow away the initial // crypto state if this fails? Otherwise, we will get a panic // on the assert for doesn't exist. diff --git a/neqo-transport/src/packet/mod.rs b/neqo-transport/src/packet/mod.rs index 0843d050ab..d435ac0dd8 100644 --- a/neqo-transport/src/packet/mod.rs +++ b/neqo-transport/src/packet/mod.rs @@ -764,14 +764,10 @@ impl<'a> PublicPacket<'a> { assert_ne!(self.packet_type, PacketType::Retry); assert_ne!(self.packet_type, PacketType::VersionNegotiation); - qtrace!( - "unmask hdr={}", - hex(&self.data[..self.header_len + SAMPLE_OFFSET]) - ); - let sample_offset = self.header_len + SAMPLE_OFFSET; let mask = if let Some(sample) = self.data.get(sample_offset..(sample_offset + SAMPLE_SIZE)) { + qtrace!("unmask hdr={}", hex(&self.data[..sample_offset])); crypto.compute_mask(sample) } else { Err(Error::NoMoreData) diff --git a/neqo-transport/tests/connection.rs b/neqo-transport/tests/connection.rs index 0b91fcf306..b8877b946d 100644 --- a/neqo-transport/tests/connection.rs +++ b/neqo-transport/tests/connection.rs @@ -127,6 +127,76 @@ fn reorder_server_initial() { assert_eq!(*client.state(), State::Confirmed); } +fn set_payload(server_packet: &Option, client_dcid: &[u8], payload: &[u8]) -> Datagram { + let (server_initial, _server_hs) = split_datagram(server_packet.as_ref().unwrap()); + let (protected_header, _, _, orig_payload) = + decode_initial_header(&server_initial, Role::Server); + + // Now decrypt the packet. + let (aead, hp) = initial_aead_and_hp(client_dcid, Role::Server); + let (mut header, pn) = remove_header_protection(&hp, protected_header, orig_payload); + assert_eq!(pn, 0); + // Re-encode the packet number as four bytes, so we have enough material for the header + // protection sample if payload is empty. + let pn_pos = header.len() - 2; + header[pn_pos] = u8::try_from(4 + aead.expansion()).unwrap(); + header.resize(header.len() + 3, 0); + header[0] |= 0b0000_0011; // Set the packet number length to 4. + + // And build a packet containing the given payload. + let mut packet = header.clone(); + packet.resize(header.len() + payload.len() + aead.expansion(), 0); + aead.encrypt(pn, &header, payload, &mut packet[header.len()..]) + .unwrap(); + apply_header_protection(&hp, &mut packet, protected_header.len()..header.len()); + Datagram::new( + server_initial.source(), + server_initial.destination(), + server_initial.tos(), + server_initial.ttl(), + packet, + ) +} + +/// Test that the stack treats a packet without any frames as a protocol violation. +#[test] +fn packet_without_frames() { + let mut client = new_client( + ConnectionParameters::default().versions(Version::Version1, vec![Version::Version1]), + ); + let mut server = default_server(); + + let client_initial = client.process_output(now()); + let (_, client_dcid, _, _) = + decode_initial_header(client_initial.as_dgram_ref().unwrap(), Role::Client); + + let server_packet = server.process(client_initial.as_dgram_ref(), now()).dgram(); + let modified = set_payload(&server_packet, client_dcid, &[]); + client.process_input(&modified, now()); + assert_eq!( + client.state(), + &State::Closed(ConnectionError::Transport(Error::ProtocolViolation)) + ); +} + +/// Test that the stack permits a packet containing only padding. +#[test] +fn packet_with_only_padding() { + let mut client = new_client( + ConnectionParameters::default().versions(Version::Version1, vec![Version::Version1]), + ); + let mut server = default_server(); + + let client_initial = client.process_output(now()); + let (_, client_dcid, _, _) = + decode_initial_header(client_initial.as_dgram_ref().unwrap(), Role::Client); + + let server_packet = server.process(client_initial.as_dgram_ref(), now()).dgram(); + let modified = set_payload(&server_packet, client_dcid, &[0]); + client.process_input(&modified, now()); + assert_eq!(client.state(), &State::WaitInitial); +} + /// Overflow the crypto buffer. #[allow(clippy::similar_names)] // For ..._scid and ..._dcid, which are fine. #[test] From 68606871ca9f1a8435f2472bcaf49e83a06a823b Mon Sep 17 00:00:00 2001 From: Max Inden Date: Tue, 2 Apr 2024 14:23:59 +0200 Subject: [PATCH 293/321] refactor: print current `StateSignaling` variant in `debug_assert` (#1783) * refactor: print current `StateSignaling` variant in debug_assert CI paniced in `StateSignaling::handshake_done`. Though failure hasn't been reproducible locally. To ease debugging future CI failures, print the current state on panic. ``` thread 'idle_timeout_crazy_rtt' panicked at neqo-transport\src\connection\state.rs:212:13: StateSignaling must be in Idle state. stack backtrace: 0: std::panicking::begin_panic_handler at /rustc/8df7e723ea729a7f917501cc2d91d640b7021373/library\std\src\panicking.rs:646 1: core::panicking::panic_fmt at /rustc/8df7e723ea729a7f917501cc2d91d640b7021373/library\core\src\panicking.rs:72 2: enum2$::handshake_done at .\src\connection\state.rs:212 3: neqo_transport::connection::Connection::handle_lost_packets at .\src\connection\mod.rs:2847 4: neqo_transport::connection::Connection::process_timer at .\src\connection\mod.rs:966 5: neqo_transport::connection::Connection::process_output at .\src\connection\mod.rs:1085 6: neqo_transport::connection::Connection::process at .\src\connection\mod.rs:1108 7: test_fixture::sim::connection::impl$1::process at D:\a\neqo\neqo\test-fixture\src\sim\connection.rs:146 8: test_fixture::sim::Simulator::process_loop at D:\a\neqo\neqo\test-fixture\src\sim\mod.rs:193 9: test_fixture::sim::ReadySimulator::run at D:\a\neqo\neqo\test-fixture\src\sim\mod.rs:284 10: test_fixture::sim::Simulator::run at D:\a\neqo\neqo\test-fixture\src\sim\mod.rs:265 11: network::idle_timeout_crazy_rtt at D:\a\neqo\neqo\test-fixture\src\sim\mod.rs:69 12: network::idle_timeout_crazy_rtt::closure$0 at D:\a\neqo\neqo\test-fixture\src\sim\mod.rs:58 13: core::ops::function::FnOnce::call_once > at /rustc/8df7e723ea729a7f917501cc2d91d640b7021373\library\core\src\ops\function.rs:250 14: core::ops::function::FnOnce::call_once at /rustc/8df7e723ea729a7f917501cc2d91d640b7021373/library\core\src\ops\function.rs:250 note: Some details are omitted, run with `RUST_BACKTRACE=full` for a verbose backtrace. ``` https://github.com/mozilla/neqo/actions/runs/8496770595/job/23274538553 * clippy --- neqo-transport/src/connection/state.rs | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/neqo-transport/src/connection/state.rs b/neqo-transport/src/connection/state.rs index 9789151d3f..cc2f6e30d2 100644 --- a/neqo-transport/src/connection/state.rs +++ b/neqo-transport/src/connection/state.rs @@ -209,7 +209,10 @@ pub enum StateSignaling { impl StateSignaling { pub fn handshake_done(&mut self) { if !matches!(self, Self::Idle) { - debug_assert!(false, "StateSignaling must be in Idle state."); + debug_assert!( + false, + "StateSignaling must be in Idle state but is in {self:?} state.", + ); return; } *self = Self::HandshakeDone; From 07514299a70a8aea8d28a25544e85cf96620d976 Mon Sep 17 00:00:00 2001 From: Max Inden Date: Tue, 2 Apr 2024 18:25:10 +0200 Subject: [PATCH 294/321] refactor(server): simplify :method POST check (#1787) --- neqo-bin/src/server/mod.rs | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/neqo-bin/src/server/mod.rs b/neqo-bin/src/server/mod.rs index 38eb766f5f..e3067ecdf0 100644 --- a/neqo-bin/src/server/mod.rs +++ b/neqo-bin/src/server/mod.rs @@ -336,13 +336,10 @@ impl HttpServer for SimpleServer { } => { qdebug!("Headers (request={stream} fin={fin}): {headers:?}"); - let post = if let Some(method) = headers.iter().find(|&h| h.name() == ":method") + if headers + .iter() + .any(|h| h.name() == ":method" && h.value() == "POST") { - method.value() == "POST" - } else { - false - }; - if post { self.posts.insert(stream, 0); continue; } From 301ebe40da3794cb4e6410cd0cca87fe3e2c4f1c Mon Sep 17 00:00:00 2001 From: Max Inden Date: Wed, 3 Apr 2024 18:53:13 +0200 Subject: [PATCH 295/321] ci(interop/action): always upload comment data (#1790) In https://github.com/mozilla/neqo/pull/1785 the QUIC Network Simulator workflow failed. https://github.com/mozilla/neqo/actions/runs/8523273988/job/23345192160?pr=1785 This triggers the QUIC Network Simulator Comment workflow. https://github.com/mozilla/neqo/actions/runs/8524906268 Though currently, the former does not upload the _comment_ artifact on failure, which is to be consumed by the latter. This commit makes the former always upload the comment data, such that the latter can consume it on failure. --- .github/actions/quic-interop-runner/action.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/actions/quic-interop-runner/action.yml b/.github/actions/quic-interop-runner/action.yml index 30c7f0d8d6..4d88191646 100644 --- a/.github/actions/quic-interop-runner/action.yml +++ b/.github/actions/quic-interop-runner/action.yml @@ -89,6 +89,7 @@ runs: path: quic-interop-runner/logs - name: Format GitHub comment + if: always() run: | echo '[**QUIC Interop Runner**](https://github.com/quic-interop/quic-interop-runner)' >> comment echo '' >> comment @@ -98,6 +99,7 @@ runs: shell: bash - name: Export PR comment data + if: always() uses: ./.github/actions/pr-comment-data-export with: name: qns From 61fcd282c420da03a566cf3324b4dc86b04415e4 Mon Sep 17 00:00:00 2001 From: Max Inden Date: Thu, 4 Apr 2024 07:11:27 +0200 Subject: [PATCH 296/321] perf(transport): remove Server::timers (#1784) * perf(transport): remove Server::timers The current `neqo_transport::server::Server::timers` has a large performance overhead, especially when serving small amount of connections. See https://github.com/mozilla/neqo/pull/1780 for details. This commit optimizes for the small-number-of-connections case, keeping a single callback timestamp only, iterating each connection when there is no other work to be done. * Cleanups * Rename to wake_at * Introduce ServerConnectionState::{set_wake_at,needs_waking,woken} --- neqo-common/Cargo.toml | 4 - neqo-common/benches/timer.rs | 39 ---- neqo-common/src/lib.rs | 1 - neqo-common/src/timer.rs | 420 ----------------------------------- neqo-transport/src/server.rs | 87 ++++---- 5 files changed, 42 insertions(+), 509 deletions(-) delete mode 100644 neqo-common/benches/timer.rs delete mode 100644 neqo-common/src/timer.rs diff --git a/neqo-common/Cargo.toml b/neqo-common/Cargo.toml index 069d67b834..0cb4bcbf4f 100644 --- a/neqo-common/Cargo.toml +++ b/neqo-common/Cargo.toml @@ -34,7 +34,3 @@ features = ["timeapi"] [lib] # See https://github.com/bheisler/criterion.rs/blob/master/book/src/faq.md#cargo-bench-gives-unrecognized-option-errors-for-valid-command-line-options bench = false - -[[bench]] -name = "timer" -harness = false diff --git a/neqo-common/benches/timer.rs b/neqo-common/benches/timer.rs deleted file mode 100644 index 5ac8019db4..0000000000 --- a/neqo-common/benches/timer.rs +++ /dev/null @@ -1,39 +0,0 @@ -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use std::time::{Duration, Instant}; - -use criterion::{criterion_group, criterion_main, Criterion}; -use neqo_common::timer::Timer; -use test_fixture::now; - -fn benchmark_timer(c: &mut Criterion) { - c.bench_function("drain a timer quickly", |b| { - b.iter_batched_ref( - make_timer, - |(_now, timer)| { - while let Some(t) = timer.next_time() { - assert!(timer.take_next(t).is_some()); - } - }, - criterion::BatchSize::SmallInput, - ); - }); -} - -fn make_timer() -> (Instant, Timer<()>) { - const TIMES: &[u64] = &[1, 2, 3, 5, 8, 13, 21, 34]; - - let now = now(); - let mut timer = Timer::new(now, Duration::from_millis(777), 100); - for &t in TIMES { - timer.add(now + Duration::from_secs(t), ()); - } - (now, timer) -} - -criterion_group!(benches, benchmark_timer); -criterion_main!(benches); diff --git a/neqo-common/src/lib.rs b/neqo-common/src/lib.rs index e988c6071d..f3e8e63023 100644 --- a/neqo-common/src/lib.rs +++ b/neqo-common/src/lib.rs @@ -14,7 +14,6 @@ pub mod hrtime; mod incrdecoder; pub mod log; pub mod qlog; -pub mod timer; pub mod tos; use std::fmt::Write; diff --git a/neqo-common/src/timer.rs b/neqo-common/src/timer.rs deleted file mode 100644 index 3feddb2226..0000000000 --- a/neqo-common/src/timer.rs +++ /dev/null @@ -1,420 +0,0 @@ -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use std::{ - collections::VecDeque, - mem, - time::{Duration, Instant}, -}; - -/// Internal structure for a timer item. -struct TimerItem { - time: Instant, - item: T, -} - -impl TimerItem { - fn time(ti: &Self) -> Instant { - ti.time - } -} - -/// A timer queue. -/// This uses a classic timer wheel arrangement, with some characteristics that might be considered -/// peculiar. Each slot in the wheel is sorted (complexity O(N) insertions, but O(logN) to find cut -/// points). Time is relative, the wheel has an origin time and it is unable to represent times that -/// are more than `granularity * capacity` past that time. -pub struct Timer { - items: Vec>>, - now: Instant, - granularity: Duration, - cursor: usize, -} - -impl Timer { - /// Construct a new wheel at the given granularity, starting at the given time. - /// - /// # Panics - /// - /// When `capacity` is too large to fit in `u32` or `granularity` is zero. - pub fn new(now: Instant, granularity: Duration, capacity: usize) -> Self { - assert!(u32::try_from(capacity).is_ok()); - assert!(granularity.as_nanos() > 0); - let mut items = Vec::with_capacity(capacity); - items.resize_with(capacity, Default::default); - Self { - items, - now, - granularity, - cursor: 0, - } - } - - /// Return a reference to the time of the next entry. - #[must_use] - pub fn next_time(&self) -> Option { - let idx = self.bucket(0); - for i in idx..self.items.len() { - if let Some(t) = self.items[i].front() { - return Some(t.time); - } - } - for i in 0..idx { - if let Some(t) = self.items[i].front() { - return Some(t.time); - } - } - None - } - - /// Get the full span of time that this can cover. - /// Two timers cannot be more than this far apart. - /// In practice, this value is less by one amount of the timer granularity. - #[inline] - #[allow(clippy::cast_possible_truncation)] // guarded by assertion - #[must_use] - pub fn span(&self) -> Duration { - self.granularity * (self.items.len() as u32) - } - - /// For the given `time`, get the number of whole buckets in the future that is. - #[inline] - #[allow(clippy::cast_possible_truncation)] // guarded by assertion - fn delta(&self, time: Instant) -> usize { - // This really should use Duration::div_duration_f??(), but it can't yet. - ((time - self.now).as_nanos() / self.granularity.as_nanos()) as usize - } - - #[inline] - fn time_bucket(&self, time: Instant) -> usize { - self.bucket(self.delta(time)) - } - - #[inline] - fn bucket(&self, delta: usize) -> usize { - debug_assert!(delta < self.items.len()); - (self.cursor + delta) % self.items.len() - } - - /// Slide forward in time by `n * self.granularity`. - #[allow(clippy::cast_possible_truncation, clippy::reversed_empty_ranges)] - // cast_possible_truncation is ok because we have an assertion guard. - // reversed_empty_ranges is to avoid different types on the if/else. - fn tick(&mut self, n: usize) { - let new = self.bucket(n); - let iter = if new < self.cursor { - (self.cursor..self.items.len()).chain(0..new) - } else { - (self.cursor..new).chain(0..0) - }; - for i in iter { - assert!(self.items[i].is_empty()); - } - self.now += self.granularity * (n as u32); - self.cursor = new; - } - - /// Asserts if the time given is in the past or too far in the future. - /// - /// # Panics - /// - /// When `time` is in the past relative to previous calls. - pub fn add(&mut self, time: Instant, item: T) { - assert!(time >= self.now); - // Skip forward quickly if there is too large a gap. - let short_span = self.span() - self.granularity; - if time >= (self.now + self.span() + short_span) { - // Assert that there aren't any items. - for i in &self.items { - debug_assert!(i.is_empty()); - } - self.now = time.checked_sub(short_span).unwrap(); - self.cursor = 0; - } - - // Adjust time forward the minimum amount necessary. - let mut d = self.delta(time); - if d >= self.items.len() { - self.tick(1 + d - self.items.len()); - d = self.items.len() - 1; - } - - let bucket = self.bucket(d); - let ins = match self.items[bucket].binary_search_by_key(&time, TimerItem::time) { - Ok(j) | Err(j) => j, - }; - self.items[bucket].insert(ins, TimerItem { time, item }); - } - - /// Given knowledge of the time an item was added, remove it. - /// This requires use of a predicate that identifies matching items. - /// - /// # Panics - /// Impossible, I think. - pub fn remove(&mut self, time: Instant, mut selector: F) -> Option - where - F: FnMut(&T) -> bool, - { - if time < self.now { - return None; - } - if time > self.now + self.span() { - return None; - } - let bucket = self.time_bucket(time); - let Ok(start_index) = self.items[bucket].binary_search_by_key(&time, TimerItem::time) - else { - return None; - }; - // start_index is just one of potentially many items with the same time. - // Search backwards for a match, ... - for i in (0..=start_index).rev() { - if self.items[bucket][i].time != time { - break; - } - if selector(&self.items[bucket][i].item) { - return Some(self.items[bucket].remove(i).unwrap().item); - } - } - // ... then forwards. - for i in (start_index + 1)..self.items[bucket].len() { - if self.items[bucket][i].time != time { - break; - } - if selector(&self.items[bucket][i].item) { - return Some(self.items[bucket].remove(i).unwrap().item); - } - } - None - } - - /// Take the next item, unless there are no items with - /// a timeout in the past relative to `until`. - pub fn take_next(&mut self, until: Instant) -> Option { - fn maybe_take(v: &mut VecDeque>, until: Instant) -> Option { - if !v.is_empty() && v[0].time <= until { - Some(v.pop_front().unwrap().item) - } else { - None - } - } - - let idx = self.bucket(0); - for i in idx..self.items.len() { - let res = maybe_take(&mut self.items[i], until); - if res.is_some() { - return res; - } - } - for i in 0..idx { - let res = maybe_take(&mut self.items[i], until); - if res.is_some() { - return res; - } - } - None - } - - /// Create an iterator that takes all items until the given time. - /// Note: Items might be removed even if the iterator is not fully exhausted. - pub fn take_until(&mut self, until: Instant) -> impl Iterator { - let get_item = move |x: TimerItem| x.item; - if until >= self.now + self.span() { - // Drain everything, so a clean sweep. - let mut empty_items = Vec::with_capacity(self.items.len()); - empty_items.resize_with(self.items.len(), VecDeque::default); - let mut items = mem::replace(&mut self.items, empty_items); - self.now = until; - self.cursor = 0; - - let tail = items.split_off(self.cursor); - return tail.into_iter().chain(items).flatten().map(get_item); - } - - // Only returning a partial span, so do it bucket at a time. - let delta = self.delta(until); - let mut buckets = Vec::with_capacity(delta + 1); - - // First, the whole buckets. - for i in 0..delta { - let idx = self.bucket(i); - buckets.push(mem::take(&mut self.items[idx])); - } - self.tick(delta); - - // Now we need to split the last bucket, because there might be - // some items with `item.time > until`. - let bucket = &mut self.items[self.cursor]; - let last_idx = match bucket.binary_search_by_key(&until, TimerItem::time) { - Ok(mut m) => { - // If there are multiple values, the search will hit any of them. - // Make sure to get them all. - while m < bucket.len() && bucket[m].time == until { - m += 1; - } - m - } - Err(ins) => ins, - }; - let tail = bucket.split_off(last_idx); - buckets.push(mem::replace(bucket, tail)); - // This tomfoolery with the empty vector ensures that - // the returned type here matches the one above precisely - // without having to invoke the `either` crate. - buckets.into_iter().chain(vec![]).flatten().map(get_item) - } -} - -#[cfg(test)] -mod test { - use std::sync::OnceLock; - - use super::{Duration, Instant, Timer}; - - fn now() -> Instant { - static NOW: OnceLock = OnceLock::new(); - *NOW.get_or_init(Instant::now) - } - - const GRANULARITY: Duration = Duration::from_millis(10); - const CAPACITY: usize = 10; - #[test] - fn create() { - let t: Timer<()> = Timer::new(now(), GRANULARITY, CAPACITY); - assert_eq!(t.span(), Duration::from_millis(100)); - assert_eq!(None, t.next_time()); - } - - #[test] - fn immediate_entry() { - let mut t = Timer::new(now(), GRANULARITY, CAPACITY); - t.add(now(), 12); - assert_eq!(now(), t.next_time().expect("should have an entry")); - let values: Vec<_> = t.take_until(now()).collect(); - assert_eq!(vec![12], values); - } - - #[test] - fn same_time() { - let mut t = Timer::new(now(), GRANULARITY, CAPACITY); - let v1 = 12; - let v2 = 13; - t.add(now(), v1); - t.add(now(), v2); - assert_eq!(now(), t.next_time().expect("should have an entry")); - let values: Vec<_> = t.take_until(now()).collect(); - assert!(values.contains(&v1)); - assert!(values.contains(&v2)); - } - - #[test] - fn add() { - let mut t = Timer::new(now(), GRANULARITY, CAPACITY); - let near_future = now() + Duration::from_millis(17); - let v = 9; - t.add(near_future, v); - assert_eq!(near_future, t.next_time().expect("should return a value")); - assert_eq!( - t.take_until(near_future.checked_sub(Duration::from_millis(1)).unwrap()) - .count(), - 0 - ); - assert!(t - .take_until(near_future + Duration::from_millis(1)) - .any(|x| x == v)); - } - - #[test] - fn add_future() { - let mut t = Timer::new(now(), GRANULARITY, CAPACITY); - let future = now() + Duration::from_millis(117); - let v = 9; - t.add(future, v); - assert_eq!(future, t.next_time().expect("should return a value")); - assert!(t.take_until(future).any(|x| x == v)); - } - - #[test] - fn add_far_future() { - let mut t = Timer::new(now(), GRANULARITY, CAPACITY); - let far_future = now() + Duration::from_millis(892); - let v = 9; - t.add(far_future, v); - assert_eq!(far_future, t.next_time().expect("should return a value")); - assert!(t.take_until(far_future).any(|x| x == v)); - } - - const TIMES: &[Duration] = &[ - Duration::from_millis(40), - Duration::from_millis(91), - Duration::from_millis(6), - Duration::from_millis(3), - Duration::from_millis(22), - Duration::from_millis(40), - ]; - - fn with_times() -> Timer { - let mut t = Timer::new(now(), GRANULARITY, CAPACITY); - for (i, time) in TIMES.iter().enumerate() { - t.add(now() + *time, i); - } - assert_eq!( - now() + *TIMES.iter().min().unwrap(), - t.next_time().expect("should have a time") - ); - t - } - - #[test] - #[allow(clippy::needless_collect)] // false positive - fn multiple_values() { - let mut t = with_times(); - let values: Vec<_> = t.take_until(now() + *TIMES.iter().max().unwrap()).collect(); - for i in 0..TIMES.len() { - assert!(values.contains(&i)); - } - } - - #[test] - #[allow(clippy::needless_collect)] // false positive - fn take_far_future() { - let mut t = with_times(); - let values: Vec<_> = t.take_until(now() + Duration::from_secs(100)).collect(); - for i in 0..TIMES.len() { - assert!(values.contains(&i)); - } - } - - #[test] - fn remove_each() { - let mut t = with_times(); - for (i, time) in TIMES.iter().enumerate() { - assert_eq!(Some(i), t.remove(now() + *time, |&x| x == i)); - } - assert_eq!(None, t.next_time()); - } - - #[test] - fn remove_future() { - let mut t = Timer::new(now(), GRANULARITY, CAPACITY); - let future = now() + Duration::from_millis(117); - let v = 9; - t.add(future, v); - - assert_eq!(Some(v), t.remove(future, |candidate| *candidate == v)); - } - - #[test] - fn remove_too_far_future() { - let mut t = Timer::new(now(), GRANULARITY, CAPACITY); - let future = now() + Duration::from_millis(117); - let too_far_future = now() + t.span() + Duration::from_millis(117); - let v = 9; - t.add(future, v); - - assert_eq!(None, t.remove(too_far_future, |candidate| *candidate == v)); - } -} diff --git a/neqo-transport/src/server.rs b/neqo-transport/src/server.rs index 96a6244ef1..7d3d144a09 100644 --- a/neqo-transport/src/server.rs +++ b/neqo-transport/src/server.rs @@ -15,12 +15,12 @@ use std::{ ops::{Deref, DerefMut}, path::PathBuf, rc::{Rc, Weak}, - time::{Duration, Instant}, + time::Instant, }; use neqo_common::{ self as common, event::Provider, hex, qdebug, qerror, qinfo, qlog::NeqoQlog, qtrace, qwarn, - timer::Timer, Datagram, Decoder, Role, + Datagram, Decoder, Role, }; use neqo_crypto::{ encode_ech_config, AntiReplay, Cipher, PrivateKey, PublicKey, ZeroRttCheckResult, @@ -46,13 +46,6 @@ pub enum InitialResult { /// `MIN_INITIAL_PACKET_SIZE` is the smallest packet that can be used to establish /// a new connection across all QUIC versions this server supports. const MIN_INITIAL_PACKET_SIZE: usize = 1200; -/// The size of timer buckets. This is higher than the actual timer granularity -/// as this depends on there being some distribution of events. -const TIMER_GRANULARITY: Duration = Duration::from_millis(4); -/// The number of buckets in the timer. As mentioned in the definition of `Timer`, -/// the granularity and capacity need to multiply to be larger than the largest -/// delay that might be used. That's the idle timeout (currently 30s). -const TIMER_CAPACITY: usize = 16384; type StateRef = Rc>; type ConnectionTableRef = Rc>>; @@ -61,7 +54,21 @@ type ConnectionTableRef = Rc>>; pub struct ServerConnectionState { c: Connection, active_attempt: Option, - last_timer: Instant, + wake_at: Option, +} + +impl ServerConnectionState { + fn set_wake_at(&mut self, at: Instant) { + self.wake_at = Some(at); + } + + fn needs_waking(&self, now: Instant) -> bool { + self.wake_at.map_or(false, |t| t <= now) + } + + fn woken(&mut self) { + self.wake_at = None; + } } impl Deref for ServerConnectionState { @@ -174,8 +181,8 @@ pub struct Server { active: HashSet, /// The set of connections that need immediate processing. waiting: VecDeque, - /// Outstanding timers for connections. - timers: Timer, + /// The latest [`Output::Callback`] returned from [`Server::process`]. + wake_at: Option, /// Address validation logic, which determines whether we send a Retry. address_validation: Rc>, /// Directory to create qlog traces in @@ -219,10 +226,10 @@ impl Server { connections: Rc::default(), active: HashSet::default(), waiting: VecDeque::default(), - timers: Timer::new(now, TIMER_GRANULARITY, TIMER_CAPACITY), address_validation: Rc::new(RefCell::new(validation)), qlog_dir: None, ech_config: None, + wake_at: None, }) } @@ -260,11 +267,6 @@ impl Server { self.ech_config.as_ref().map_or(&[], |cfg| &cfg.encoded) } - fn remove_timer(&mut self, c: &StateRef) { - let last = c.borrow().last_timer; - self.timers.remove(last, |t| Rc::ptr_eq(t, c)); - } - fn process_connection( &mut self, c: &StateRef, @@ -280,16 +282,12 @@ impl Server { } Output::Callback(delay) => { let next = now + delay; - if next != c.borrow().last_timer { - qtrace!([self], "Change timer to {:?}", next); - self.remove_timer(c); - c.borrow_mut().last_timer = next; - self.timers.add(next, Rc::clone(c)); + c.borrow_mut().set_wake_at(next); + if self.wake_at.map_or(true, |c| c > next) { + self.wake_at = Some(next); } } - Output::None => { - self.remove_timer(c); - } + Output::None => {} } if c.borrow().has_events() { qtrace!([self], "Connection active: {:?}", c); @@ -507,7 +505,7 @@ impl Server { self.setup_connection(&mut c, &attempt_key, initial, orig_dcid); let c = Rc::new(RefCell::new(ServerConnectionState { c, - last_timer: now, + wake_at: None, active_attempt: Some(attempt_key.clone()), })); cid_mgr.borrow_mut().set_connection(&c); @@ -646,24 +644,28 @@ impl Server { return Some(d); } } - qtrace!([self], "No packet to send still, run timers"); - while let Some(c) = self.timers.take_next(now) { - if let Some(d) = self.process_connection(&c, None, now) { - return Some(d); + + qtrace!([self], "No packet to send still, check wake up times"); + loop { + let connection = self + .connections + .borrow() + .values() + .find(|c| c.borrow().needs_waking(now)) + .cloned()?; + let datagram = self.process_connection(&connection, None, now); + connection.borrow_mut().woken(); + if datagram.is_some() { + return datagram; } } - None } - fn next_time(&mut self, now: Instant) -> Option { - if self.waiting.is_empty() { - self.timers.next_time().map(|x| x - now) - } else { - Some(Duration::new(0, 0)) + pub fn process(&mut self, dgram: Option<&Datagram>, now: Instant) -> Output { + if self.wake_at.map_or(false, |c| c <= now) { + self.wake_at = None; } - } - pub fn process(&mut self, dgram: Option<&Datagram>, now: Instant) -> Output { dgram .and_then(|d| self.process_input(d, now)) .or_else(|| self.process_next_output(now)) @@ -671,12 +673,7 @@ impl Server { qtrace!([self], "Send packet: {:?}", d); Output::Datagram(d) }) - .or_else(|| { - self.next_time(now).map(|delay| { - qtrace!([self], "Wait: {:?}", delay); - Output::Callback(delay) - }) - }) + .or_else(|| self.wake_at.take().map(|c| Output::Callback(c - now))) .unwrap_or_else(|| { qtrace!([self], "Go dormant"); Output::None From a33fe606766b1f907c60075e7b88992112adc353 Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Thu, 4 Apr 2024 15:44:04 +0300 Subject: [PATCH 297/321] Set threshold to 0.05% --- .codecov.yml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/.codecov.yml b/.codecov.yml index 3ecf204940..7ff673d877 100644 --- a/.codecov.yml +++ b/.codecov.yml @@ -9,3 +9,9 @@ codecov: after_n_builds: 3 comment: after_n_builds: 3 + +coverage: + status: + project: + default: + threshold: 0.05% From 1dc8ea33e27b65a0294ff3502204285358fc4a77 Mon Sep 17 00:00:00 2001 From: Manuel Bucher Date: Thu, 4 Apr 2024 18:34:53 +0200 Subject: [PATCH 298/321] neqo v0.7.3 (#1791) --- Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Cargo.toml b/Cargo.toml index e5bec00796..48f3b7d74b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -14,7 +14,7 @@ resolver = "2" homepage = "https://github.com/mozilla/neqo/" repository = "https://github.com/mozilla/neqo/" authors = ["The Neqo Authors "] -version = "0.7.2" +version = "0.7.3" # Keep in sync with `.rustfmt.toml` `edition`. edition = "2021" license = "MIT OR Apache-2.0" From 5dfe106669ccb695187511305c21b8e8a8775e91 Mon Sep 17 00:00:00 2001 From: Max Inden Date: Fri, 5 Apr 2024 06:52:38 +0200 Subject: [PATCH 299/321] refactor: have process_input delegate to process_multiple_input (#1792) The `Connection::process_input` and `Connection::process_multiple_input` functions are identical, except that the latter handles multiple `Datagram`s. To avoid any changes to one without updating the other, have `process_input` simply delegate to `process_multiple_input`. Commit also does the equivalent change to `neqo_http3::Http3Client`. --- neqo-http3/src/connection_client.rs | 6 ++---- neqo-transport/src/connection/mod.rs | 6 ++---- 2 files changed, 4 insertions(+), 8 deletions(-) diff --git a/neqo-http3/src/connection_client.rs b/neqo-http3/src/connection_client.rs index 836816b337..be20126353 100644 --- a/neqo-http3/src/connection_client.rs +++ b/neqo-http3/src/connection_client.rs @@ -7,7 +7,7 @@ use std::{ cell::RefCell, fmt::{Debug, Display}, - mem, + iter, mem, net::SocketAddr, rc::Rc, time::Instant, @@ -874,9 +874,7 @@ impl Http3Client { /// /// [1]: ../neqo_transport/enum.ConnectionEvent.html pub fn process_input(&mut self, dgram: &Datagram, now: Instant) { - qtrace!([self], "Process input."); - self.conn.process_input(dgram, now); - self.process_http3(now); + self.process_multiple_input(iter::once(dgram), now); } pub fn process_multiple_input<'a, I>(&mut self, dgrams: I, now: Instant) diff --git a/neqo-transport/src/connection/mod.rs b/neqo-transport/src/connection/mod.rs index 06d6cab9e1..9cddcdac28 100644 --- a/neqo-transport/src/connection/mod.rs +++ b/neqo-transport/src/connection/mod.rs @@ -10,7 +10,7 @@ use std::{ cell::RefCell, cmp::{max, min}, fmt::{self, Debug}, - mem, + iter, mem, net::{IpAddr, SocketAddr}, ops::RangeInclusive, rc::{Rc, Weak}, @@ -978,9 +978,7 @@ impl Connection { /// Process new input datagrams on the connection. pub fn process_input(&mut self, d: &Datagram, now: Instant) { - self.input(d, now, now); - self.process_saved(now); - self.streams.cleanup_closed_streams(); + self.process_multiple_input(iter::once(d), now); } /// Process new input datagrams on the connection. From e38e3cfa0c688725e88f5cdd7fe850833c910f0a Mon Sep 17 00:00:00 2001 From: Max Inden Date: Mon, 8 Apr 2024 07:01:23 +0200 Subject: [PATCH 300/321] fix(bin/bench): benchmark handshakes per second (HPS) without zero RTT (#1795) The "100-seq-conn/1-1b-resp (aka. HPS)" benchmark supposedly measures the time for 100 uniform handshakes. Though `neqo-client` by default uses a resumption token for zero RTT if available. Thus the first is a normal and all following 99 handshakes are a zero RTT handshake. To simplify the benchmark and to measure the same handshake type, establish a single connection within the benchmark function and have criterion do all the iterating. --- neqo-bin/benches/main.rs | 12 +++--------- neqo-bin/src/client/mod.rs | 4 ++-- 2 files changed, 5 insertions(+), 11 deletions(-) diff --git a/neqo-bin/benches/main.rs b/neqo-bin/benches/main.rs index 6bb8b3161d..3afcc5d127 100644 --- a/neqo-bin/benches/main.rs +++ b/neqo-bin/benches/main.rs @@ -13,8 +13,6 @@ use tokio::runtime::Runtime; struct Benchmark { name: String, requests: Vec, - /// Download resources in series using separate connections. - download_in_series: bool, sample_size: Option, } @@ -27,25 +25,21 @@ fn transfer(c: &mut Criterion) { for Benchmark { name, requests, - download_in_series, sample_size, } in [ Benchmark { name: "1-conn/1-100mb-resp (aka. Download)".to_string(), requests: vec![100 * 1024 * 1024], - download_in_series: false, sample_size: Some(10), }, Benchmark { name: "1-conn/10_000-1b-seq-resp (aka. RPS)".to_string(), requests: vec![1; 10_000], - download_in_series: false, sample_size: None, }, Benchmark { - name: "100-seq-conn/1-1b-resp (aka. HPS)".to_string(), - requests: vec![1; 100], - download_in_series: true, + name: "1-conn/1-1b-resp (aka. HPS)".to_string(), + requests: vec![1; 1], sample_size: None, }, ] { @@ -61,7 +55,7 @@ fn transfer(c: &mut Criterion) { } group.bench_function("client", |b| { b.to_async(Runtime::new().unwrap()).iter_batched( - || client::client(client::Args::new(&requests, download_in_series)), + || client::client(client::Args::new(&requests)), |client| async move { client.await.unwrap(); }, diff --git a/neqo-bin/src/client/mod.rs b/neqo-bin/src/client/mod.rs index 81721802e1..ad6e34e15b 100644 --- a/neqo-bin/src/client/mod.rs +++ b/neqo-bin/src/client/mod.rs @@ -200,7 +200,7 @@ impl Args { #[must_use] #[cfg(feature = "bench")] #[allow(clippy::missing_panics_doc)] - pub fn new(requests: &[u64], download_in_series: bool) -> Self { + pub fn new(requests: &[u64]) -> Self { use std::str::FromStr; Self { verbose: clap_verbosity_flag::Verbosity::::default(), @@ -212,7 +212,7 @@ impl Args { method: "GET".into(), header: vec![], max_concurrent_push_streams: 10, - download_in_series, + download_in_series: false, concurrency: 100, output_read_data: false, output_dir: Some("/dev/null".into()), From aca1352b6bf93d8fe2cf0f627dc2fdf83a0bf95d Mon Sep 17 00:00:00 2001 From: Max Inden Date: Mon, 8 Apr 2024 07:01:58 +0200 Subject: [PATCH 301/321] fix(bin/bench): rename 1-conn/10_000-1b-seq-resp to parallel (#1796) The benchmark is using the default concurrency factor. By default `neqo-client` runs up to `100` requests in parallel. https://github.com/mozilla/neqo/blob/5dfe106669ccb695187511305c21b8e8a8775e91/neqo-bin/src/client/mod.rs#L151-L153 Thus the benchmark name is wrong, i.e. the requests are run in parallel and not sequentially. --- neqo-bin/benches/main.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/neqo-bin/benches/main.rs b/neqo-bin/benches/main.rs index 3afcc5d127..59927ebe0c 100644 --- a/neqo-bin/benches/main.rs +++ b/neqo-bin/benches/main.rs @@ -33,7 +33,7 @@ fn transfer(c: &mut Criterion) { sample_size: Some(10), }, Benchmark { - name: "1-conn/10_000-1b-seq-resp (aka. RPS)".to_string(), + name: "1-conn/10_000-parallel-1b-resp (aka. RPS)".to_string(), requests: vec![1; 10_000], sample_size: None, }, From 992d588ea09b896b7877692e95e2c561d22aa907 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 8 Apr 2024 13:05:32 +0300 Subject: [PATCH 302/321] build(deps): bump github/codeql-action from 3.24.9 to 3.24.10 (#1797) Bumps [github/codeql-action](https://github.com/github/codeql-action) from 3.24.9 to 3.24.10. - [Release notes](https://github.com/github/codeql-action/releases) - [Changelog](https://github.com/github/codeql-action/blob/main/CHANGELOG.md) - [Commits](https://github.com/github/codeql-action/compare/1b1aada464948af03b950897e5eb522f92603cc2...4355270be187e1b672a7a1c7c7bae5afdc1ab94a) --- updated-dependencies: - dependency-name: github/codeql-action dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/scorecard.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/scorecard.yml b/.github/workflows/scorecard.yml index 01e5fe16a8..2c0b04d09e 100644 --- a/.github/workflows/scorecard.yml +++ b/.github/workflows/scorecard.yml @@ -67,6 +67,6 @@ jobs: # Upload the results to GitHub's code scanning dashboard. - name: "Upload to code-scanning" - uses: github/codeql-action/upload-sarif@1b1aada464948af03b950897e5eb522f92603cc2 # v3.24.9 + uses: github/codeql-action/upload-sarif@4355270be187e1b672a7a1c7c7bae5afdc1ab94a # v3.24.10 with: sarif_file: results.sarif From a65d945344a2dee7ddb2e81f0875bdb24c00ebec Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Mon, 8 Apr 2024 14:31:53 +0300 Subject: [PATCH 303/321] chore: Fix clippy `usage of a legacy numeric method` (#1798) `u64::max_value()` -> `u64::MAX` --- neqo-crypto/src/time.rs | 4 ++-- neqo-qpack/src/table.rs | 2 +- neqo-transport/src/crypto.rs | 2 +- neqo-transport/src/packet/mod.rs | 4 ++-- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/neqo-crypto/src/time.rs b/neqo-crypto/src/time.rs index 0e59c4f5e2..359436a854 100644 --- a/neqo-crypto/src/time.rs +++ b/neqo-crypto/src/time.rs @@ -258,11 +258,11 @@ mod test { #[test] // We allow replace_consts here because - // std::u64::max_value() isn't available + // std::u64::MAX isn't available // in all of our targets fn overflow_interval() { init(); - let interval = Interval::from(Duration::from_micros(u64::max_value())); + let interval = Interval::from(Duration::from_micros(u64::MAX)); let res: Res = interval.try_into(); assert!(res.is_err()); } diff --git a/neqo-qpack/src/table.rs b/neqo-qpack/src/table.rs index 517e98db09..d5275ec98f 100644 --- a/neqo-qpack/src/table.rs +++ b/neqo-qpack/src/table.rs @@ -94,7 +94,7 @@ impl HeaderTable { capacity: 0, used: 0, base: 0, - acked_inserts_cnt: if encoder { 0 } else { u64::max_value() }, + acked_inserts_cnt: if encoder { 0 } else { u64::MAX }, } } diff --git a/neqo-transport/src/crypto.rs b/neqo-transport/src/crypto.rs index 54bfe622cf..60d056f2d2 100644 --- a/neqo-transport/src/crypto.rs +++ b/neqo-transport/src/crypto.rs @@ -758,7 +758,7 @@ impl CryptoDxAppData { } pub fn next(&self) -> Res { - if self.dx.epoch == usize::max_value() { + if self.dx.epoch == usize::MAX { // Guard against too many key updates. return Err(Error::KeysExhausted); } diff --git a/neqo-transport/src/packet/mod.rs b/neqo-transport/src/packet/mod.rs index d435ac0dd8..f5e8320ccb 100644 --- a/neqo-transport/src/packet/mod.rs +++ b/neqo-transport/src/packet/mod.rs @@ -158,7 +158,7 @@ impl PacketBuilder { } Self { encoder, - pn: u64::max_value(), + pn: u64::MAX, header: header_start..header_start, offsets: PacketBuilderOffsets { first_byte_mask: PACKET_HP_MASK_SHORT, @@ -201,7 +201,7 @@ impl PacketBuilder { Self { encoder, - pn: u64::max_value(), + pn: u64::MAX, header: header_start..header_start, offsets: PacketBuilderOffsets { first_byte_mask: PACKET_HP_MASK_LONG, From 32a2a59e4ea0c09bbb5ddb0c0a42fc6f0658dc70 Mon Sep 17 00:00:00 2001 From: Max Inden Date: Mon, 8 Apr 2024 15:49:03 +0200 Subject: [PATCH 304/321] fix(client): exit with non-zero on error (#1786) * fix(client): exit with non-zero on error When a connection closes with an error, surface the error to the user and exit with non-zero. * Trigger CI --- neqo-bin/src/client/http09.rs | 11 +++++++---- neqo-bin/src/client/http3.rs | 10 +++++++--- neqo-bin/src/client/mod.rs | 26 ++++++++++++++++++++++---- neqo-transport/src/lib.rs | 4 ++-- 4 files changed, 38 insertions(+), 13 deletions(-) diff --git a/neqo-bin/src/client/http09.rs b/neqo-bin/src/client/http09.rs index 9bdb6dca85..b157a6a13f 100644 --- a/neqo-bin/src/client/http09.rs +++ b/neqo-bin/src/client/http09.rs @@ -20,8 +20,8 @@ use std::{ use neqo_common::{event::Provider, qdebug, qinfo, qwarn, Datagram}; use neqo_crypto::{AuthenticationStatus, ResumptionToken}; use neqo_transport::{ - Connection, ConnectionEvent, EmptyConnectionIdGenerator, Error, Output, State, StreamId, - StreamType, + Connection, ConnectionError, ConnectionEvent, EmptyConnectionIdGenerator, Error, Output, State, + StreamId, StreamType, }; use url::Url; @@ -149,8 +149,11 @@ impl super::Client for Connection { self.close(now, app_error, msg); } - fn is_closed(&self) -> bool { - matches!(self.state(), State::Closed(..)) + fn is_closed(&self) -> Option { + if let State::Closed(err) = self.state() { + return Some(err.clone()); + } + None } fn stats(&self) -> neqo_transport::Stats { diff --git a/neqo-bin/src/client/http3.rs b/neqo-bin/src/client/http3.rs index c88a8448f6..d56af5eda9 100644 --- a/neqo-bin/src/client/http3.rs +++ b/neqo-bin/src/client/http3.rs @@ -22,7 +22,8 @@ use neqo_common::{event::Provider, hex, qdebug, qinfo, qwarn, Datagram, Header}; use neqo_crypto::{AuthenticationStatus, ResumptionToken}; use neqo_http3::{Error, Http3Client, Http3ClientEvent, Http3Parameters, Http3State, Priority}; use neqo_transport::{ - AppError, Connection, EmptyConnectionIdGenerator, Error as TransportError, Output, StreamId, + AppError, Connection, ConnectionError, EmptyConnectionIdGenerator, Error as TransportError, + Output, StreamId, }; use url::Url; @@ -111,8 +112,11 @@ pub(crate) fn create_client( } impl super::Client for Http3Client { - fn is_closed(&self) -> bool { - matches!(self.state(), Http3State::Closed(..)) + fn is_closed(&self) -> Option { + if let Http3State::Closed(err) = self.state() { + return Some(err); + } + None } fn process(&mut self, dgram: Option<&Datagram>, now: Instant) -> Output { diff --git a/neqo-bin/src/client/mod.rs b/neqo-bin/src/client/mod.rs index ad6e34e15b..49c116aa95 100644 --- a/neqo-bin/src/client/mod.rs +++ b/neqo-bin/src/client/mod.rs @@ -27,7 +27,7 @@ use neqo_crypto::{ init, Cipher, ResumptionToken, }; use neqo_http3::Output; -use neqo_transport::{AppError, ConnectionId, Error as TransportError, Version}; +use neqo_transport::{AppError, ConnectionError, ConnectionId, Error as TransportError, Version}; use qlog::{events::EventImportance, streamer::QlogStreamer}; use tokio::time::Sleep; use url::{Origin, Url}; @@ -46,6 +46,7 @@ pub enum Error { IoError(io::Error), QlogError, TransportError(neqo_transport::Error), + ApplicationError(neqo_transport::AppError), CryptoError(neqo_crypto::Error), } @@ -79,6 +80,15 @@ impl From for Error { } } +impl From for Error { + fn from(err: neqo_transport::ConnectionError) -> Self { + match err { + ConnectionError::Transport(e) => Self::TransportError(e), + ConnectionError::Application(e) => Self::ApplicationError(e), + } + } +} + impl Display for Error { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "Error: {self:?}")?; @@ -371,7 +381,11 @@ trait Client { fn close(&mut self, now: Instant, app_error: AppError, msg: S) where S: AsRef + Display; - fn is_closed(&self) -> bool; + /// Returns [`Some(_)`] if the connection is closed. + /// + /// Note that connection was closed without error on + /// [`Some(ConnectionError::Transport(TransportError::NoError))`]. + fn is_closed(&self) -> Option; fn stats(&self) -> neqo_transport::Stats; } @@ -406,11 +420,15 @@ impl<'a, H: Handler> Runner<'a, H> { self.process(None).await?; - if self.client.is_closed() { + if let Some(reason) = self.client.is_closed() { if self.args.stats { qinfo!("{:?}", self.client.stats()); } - return Ok(self.handler.take_token()); + return match reason { + ConnectionError::Transport(TransportError::NoError) + | ConnectionError::Application(0) => Ok(self.handler.take_token()), + _ => Err(reason.into()), + }; } match ready(self.socket, self.timeout.as_mut()).await? { diff --git a/neqo-transport/src/lib.rs b/neqo-transport/src/lib.rs index 8fabbeb9a3..5488472b58 100644 --- a/neqo-transport/src/lib.rs +++ b/neqo-transport/src/lib.rs @@ -70,8 +70,8 @@ const ERROR_AEAD_LIMIT_REACHED: TransportError = 15; #[derive(Clone, Debug, PartialEq, PartialOrd, Ord, Eq)] pub enum Error { NoError, - // Each time tihe error is return a different parameter is supply. - // This will be use to distinguish each occurance of this error. + // Each time this error is returned a different parameter is supplied. + // This will be used to distinguish each occurance of this error. InternalError, ConnectionRefused, FlowControlError, From 6daede078b19c99a53ea5f4405c124683723b08e Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Tue, 9 Apr 2024 08:49:51 +0300 Subject: [PATCH 305/321] chore: Fix various QNS issues (#1799) * Record client and server logs during run * Fix result comment table formatting * Fix docker image build (on macOS at least) --- .dockerignore | 1 + .../actions/quic-interop-runner/action.yml | 11 ++++- qns/Dockerfile | 4 +- qns/interop.sh | 45 +++++++++---------- 4 files changed, 33 insertions(+), 28 deletions(-) diff --git a/.dockerignore b/.dockerignore index cc95fda49e..324520383b 100644 --- a/.dockerignore +++ b/.dockerignore @@ -5,5 +5,6 @@ !**/*.rs !**/*.h !**/*.hpp +!neqo-crypto/min_version.txt !qns !Cargo.lock diff --git a/.github/actions/quic-interop-runner/action.yml b/.github/actions/quic-interop-runner/action.yml index 4d88191646..7fd055f184 100644 --- a/.github/actions/quic-interop-runner/action.yml +++ b/.github/actions/quic-interop-runner/action.yml @@ -91,15 +91,22 @@ runs: - name: Format GitHub comment if: always() run: | + if [ -s quic-interop-runner/summary ]; then + exit 0 + fi echo '[**QUIC Interop Runner**](https://github.com/quic-interop/quic-interop-runner)' >> comment echo '' >> comment # Ignore all, but table, which starts with "|". - grep -E '^\|' quic-interop-runner/summary >> comment + grep -E '^\|' quic-interop-runner/summary |\ + awk '(!/^\| *:-/ || (d++ && d < 3))' |\ + sed -E -e 's/✓/:white_check_mark:/gi' -e 's/✕/:x:/gi' -e 's/\?/:grey_question:/gi' \ + >> comment echo '' >> comment + echo "EXPORT_COMMENT=1" >> "$GITHUB_ENV" shell: bash - name: Export PR comment data - if: always() + if: env.EXPORT_COMMENT == '1' uses: ./.github/actions/pr-comment-data-export with: name: qns diff --git a/qns/Dockerfile b/qns/Dockerfile index eed7d3f986..cdb192f203 100644 --- a/qns/Dockerfile +++ b/qns/Dockerfile @@ -1,7 +1,7 @@ FROM martenseemann/quic-network-simulator-endpoint:latest AS buildimage RUN apt-get update && apt-get install -y --no-install-recommends \ - curl git mercurial \ + curl git mercurial coreutils \ build-essential libclang-dev lld \ gyp ninja-build zlib1g-dev python \ && apt-get autoremove -y && apt-get clean -y \ @@ -30,7 +30,7 @@ ADD . /neqo RUN set -eux; \ cd /neqo; \ - RUSTFLAGS="-g -C link-arg=-fuse-ld=lld" cargo build --release \ + RUSTFLAGS="-C link-arg=-fuse-ld=lld" cargo build --release \ --bin neqo-client --bin neqo-server # Copy only binaries to the final image to keep it small. diff --git a/qns/interop.sh b/qns/interop.sh index 4baa6b7e8f..e216e49866 100755 --- a/qns/interop.sh +++ b/qns/interop.sh @@ -10,30 +10,27 @@ export PATH="${PATH}:/neqo/bin" [ -n "$QLOGDIR" ] case "$ROLE" in - client) - /wait-for-it.sh sim:57832 -s -t 30 - sleep 5 - neqo-client --help | head -n 1 - RUST_LOG=debug RUST_BACKTRACE=1 neqo-client --cc cubic --qns-test "$TESTCASE" \ - --qlog-dir "$QLOGDIR" --output-dir /downloads $REQUESTS - ;; +client) + /wait-for-it.sh sim:57832 -s -t 30 + RUST_LOG=debug RUST_BACKTRACE=1 neqo-client --cc cubic --qns-test "$TESTCASE" \ + --qlog-dir "$QLOGDIR" --output-dir /downloads $REQUESTS 2> >(tee -i -a "/logs/$ROLE.log" >&2) + ;; - server) - DB=/neqo/db - CERT=cert - P12CERT=$(mktemp) - mkdir -p "$DB" - certutil -N -d "sql:$DB" --empty-password - openssl pkcs12 -export -nodes -in /certs/cert.pem -inkey /certs/priv.key \ - -name "$CERT" -passout pass: -out "$P12CERT" - pk12util -d "sql:$DB" -i "$P12CERT" -W '' - certutil -L -d "sql:$DB" -n "$CERT" - neqo-server --help | head -n 1 - RUST_LOG=info RUST_BACKTRACE=1 neqo-server --cc cubic --qns-test "$TESTCASE" \ - --qlog-dir "$QLOGDIR" -d "$DB" -k "$CERT" [::]:443 - ;; +server) + DB=/neqo/db + CERT=cert + P12CERT=$(mktemp) + mkdir -p "$DB" + certutil -N -d "sql:$DB" --empty-password + openssl pkcs12 -export -nodes -in /certs/cert.pem -inkey /certs/priv.key \ + -name "$CERT" -passout pass: -out "$P12CERT" + pk12util -d "sql:$DB" -i "$P12CERT" -W '' + certutil -L -d "sql:$DB" -n "$CERT" + RUST_LOG=info RUST_BACKTRACE=1 neqo-server --cc cubic --qns-test "$TESTCASE" \ + --qlog-dir "$QLOGDIR" -d "$DB" -k "$CERT" '[::]:443' 2> >(tee -i -a "/logs/$ROLE.log" >&2) + ;; - *) - exit 1 - ;; +*) + exit 1 + ;; esac From 342e4e785e9edbe0ec4d4c6c6f51716a293e27a6 Mon Sep 17 00:00:00 2001 From: Kershaw Date: Tue, 9 Apr 2024 07:46:47 +0200 Subject: [PATCH 306/321] Revert "perf(transport): remove Server::timers (#1784)" (#1800) This reverts commit 61fcd282c420da03a566cf3324b4dc86b04415e4. --- neqo-common/Cargo.toml | 4 + neqo-common/benches/timer.rs | 39 ++++ neqo-common/src/lib.rs | 1 + neqo-common/src/timer.rs | 420 +++++++++++++++++++++++++++++++++++ neqo-transport/src/server.rs | 87 ++++---- 5 files changed, 509 insertions(+), 42 deletions(-) create mode 100644 neqo-common/benches/timer.rs create mode 100644 neqo-common/src/timer.rs diff --git a/neqo-common/Cargo.toml b/neqo-common/Cargo.toml index 0cb4bcbf4f..069d67b834 100644 --- a/neqo-common/Cargo.toml +++ b/neqo-common/Cargo.toml @@ -34,3 +34,7 @@ features = ["timeapi"] [lib] # See https://github.com/bheisler/criterion.rs/blob/master/book/src/faq.md#cargo-bench-gives-unrecognized-option-errors-for-valid-command-line-options bench = false + +[[bench]] +name = "timer" +harness = false diff --git a/neqo-common/benches/timer.rs b/neqo-common/benches/timer.rs new file mode 100644 index 0000000000..5ac8019db4 --- /dev/null +++ b/neqo-common/benches/timer.rs @@ -0,0 +1,39 @@ +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use std::time::{Duration, Instant}; + +use criterion::{criterion_group, criterion_main, Criterion}; +use neqo_common::timer::Timer; +use test_fixture::now; + +fn benchmark_timer(c: &mut Criterion) { + c.bench_function("drain a timer quickly", |b| { + b.iter_batched_ref( + make_timer, + |(_now, timer)| { + while let Some(t) = timer.next_time() { + assert!(timer.take_next(t).is_some()); + } + }, + criterion::BatchSize::SmallInput, + ); + }); +} + +fn make_timer() -> (Instant, Timer<()>) { + const TIMES: &[u64] = &[1, 2, 3, 5, 8, 13, 21, 34]; + + let now = now(); + let mut timer = Timer::new(now, Duration::from_millis(777), 100); + for &t in TIMES { + timer.add(now + Duration::from_secs(t), ()); + } + (now, timer) +} + +criterion_group!(benches, benchmark_timer); +criterion_main!(benches); diff --git a/neqo-common/src/lib.rs b/neqo-common/src/lib.rs index f3e8e63023..e988c6071d 100644 --- a/neqo-common/src/lib.rs +++ b/neqo-common/src/lib.rs @@ -14,6 +14,7 @@ pub mod hrtime; mod incrdecoder; pub mod log; pub mod qlog; +pub mod timer; pub mod tos; use std::fmt::Write; diff --git a/neqo-common/src/timer.rs b/neqo-common/src/timer.rs new file mode 100644 index 0000000000..3feddb2226 --- /dev/null +++ b/neqo-common/src/timer.rs @@ -0,0 +1,420 @@ +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use std::{ + collections::VecDeque, + mem, + time::{Duration, Instant}, +}; + +/// Internal structure for a timer item. +struct TimerItem { + time: Instant, + item: T, +} + +impl TimerItem { + fn time(ti: &Self) -> Instant { + ti.time + } +} + +/// A timer queue. +/// This uses a classic timer wheel arrangement, with some characteristics that might be considered +/// peculiar. Each slot in the wheel is sorted (complexity O(N) insertions, but O(logN) to find cut +/// points). Time is relative, the wheel has an origin time and it is unable to represent times that +/// are more than `granularity * capacity` past that time. +pub struct Timer { + items: Vec>>, + now: Instant, + granularity: Duration, + cursor: usize, +} + +impl Timer { + /// Construct a new wheel at the given granularity, starting at the given time. + /// + /// # Panics + /// + /// When `capacity` is too large to fit in `u32` or `granularity` is zero. + pub fn new(now: Instant, granularity: Duration, capacity: usize) -> Self { + assert!(u32::try_from(capacity).is_ok()); + assert!(granularity.as_nanos() > 0); + let mut items = Vec::with_capacity(capacity); + items.resize_with(capacity, Default::default); + Self { + items, + now, + granularity, + cursor: 0, + } + } + + /// Return a reference to the time of the next entry. + #[must_use] + pub fn next_time(&self) -> Option { + let idx = self.bucket(0); + for i in idx..self.items.len() { + if let Some(t) = self.items[i].front() { + return Some(t.time); + } + } + for i in 0..idx { + if let Some(t) = self.items[i].front() { + return Some(t.time); + } + } + None + } + + /// Get the full span of time that this can cover. + /// Two timers cannot be more than this far apart. + /// In practice, this value is less by one amount of the timer granularity. + #[inline] + #[allow(clippy::cast_possible_truncation)] // guarded by assertion + #[must_use] + pub fn span(&self) -> Duration { + self.granularity * (self.items.len() as u32) + } + + /// For the given `time`, get the number of whole buckets in the future that is. + #[inline] + #[allow(clippy::cast_possible_truncation)] // guarded by assertion + fn delta(&self, time: Instant) -> usize { + // This really should use Duration::div_duration_f??(), but it can't yet. + ((time - self.now).as_nanos() / self.granularity.as_nanos()) as usize + } + + #[inline] + fn time_bucket(&self, time: Instant) -> usize { + self.bucket(self.delta(time)) + } + + #[inline] + fn bucket(&self, delta: usize) -> usize { + debug_assert!(delta < self.items.len()); + (self.cursor + delta) % self.items.len() + } + + /// Slide forward in time by `n * self.granularity`. + #[allow(clippy::cast_possible_truncation, clippy::reversed_empty_ranges)] + // cast_possible_truncation is ok because we have an assertion guard. + // reversed_empty_ranges is to avoid different types on the if/else. + fn tick(&mut self, n: usize) { + let new = self.bucket(n); + let iter = if new < self.cursor { + (self.cursor..self.items.len()).chain(0..new) + } else { + (self.cursor..new).chain(0..0) + }; + for i in iter { + assert!(self.items[i].is_empty()); + } + self.now += self.granularity * (n as u32); + self.cursor = new; + } + + /// Asserts if the time given is in the past or too far in the future. + /// + /// # Panics + /// + /// When `time` is in the past relative to previous calls. + pub fn add(&mut self, time: Instant, item: T) { + assert!(time >= self.now); + // Skip forward quickly if there is too large a gap. + let short_span = self.span() - self.granularity; + if time >= (self.now + self.span() + short_span) { + // Assert that there aren't any items. + for i in &self.items { + debug_assert!(i.is_empty()); + } + self.now = time.checked_sub(short_span).unwrap(); + self.cursor = 0; + } + + // Adjust time forward the minimum amount necessary. + let mut d = self.delta(time); + if d >= self.items.len() { + self.tick(1 + d - self.items.len()); + d = self.items.len() - 1; + } + + let bucket = self.bucket(d); + let ins = match self.items[bucket].binary_search_by_key(&time, TimerItem::time) { + Ok(j) | Err(j) => j, + }; + self.items[bucket].insert(ins, TimerItem { time, item }); + } + + /// Given knowledge of the time an item was added, remove it. + /// This requires use of a predicate that identifies matching items. + /// + /// # Panics + /// Impossible, I think. + pub fn remove(&mut self, time: Instant, mut selector: F) -> Option + where + F: FnMut(&T) -> bool, + { + if time < self.now { + return None; + } + if time > self.now + self.span() { + return None; + } + let bucket = self.time_bucket(time); + let Ok(start_index) = self.items[bucket].binary_search_by_key(&time, TimerItem::time) + else { + return None; + }; + // start_index is just one of potentially many items with the same time. + // Search backwards for a match, ... + for i in (0..=start_index).rev() { + if self.items[bucket][i].time != time { + break; + } + if selector(&self.items[bucket][i].item) { + return Some(self.items[bucket].remove(i).unwrap().item); + } + } + // ... then forwards. + for i in (start_index + 1)..self.items[bucket].len() { + if self.items[bucket][i].time != time { + break; + } + if selector(&self.items[bucket][i].item) { + return Some(self.items[bucket].remove(i).unwrap().item); + } + } + None + } + + /// Take the next item, unless there are no items with + /// a timeout in the past relative to `until`. + pub fn take_next(&mut self, until: Instant) -> Option { + fn maybe_take(v: &mut VecDeque>, until: Instant) -> Option { + if !v.is_empty() && v[0].time <= until { + Some(v.pop_front().unwrap().item) + } else { + None + } + } + + let idx = self.bucket(0); + for i in idx..self.items.len() { + let res = maybe_take(&mut self.items[i], until); + if res.is_some() { + return res; + } + } + for i in 0..idx { + let res = maybe_take(&mut self.items[i], until); + if res.is_some() { + return res; + } + } + None + } + + /// Create an iterator that takes all items until the given time. + /// Note: Items might be removed even if the iterator is not fully exhausted. + pub fn take_until(&mut self, until: Instant) -> impl Iterator { + let get_item = move |x: TimerItem| x.item; + if until >= self.now + self.span() { + // Drain everything, so a clean sweep. + let mut empty_items = Vec::with_capacity(self.items.len()); + empty_items.resize_with(self.items.len(), VecDeque::default); + let mut items = mem::replace(&mut self.items, empty_items); + self.now = until; + self.cursor = 0; + + let tail = items.split_off(self.cursor); + return tail.into_iter().chain(items).flatten().map(get_item); + } + + // Only returning a partial span, so do it bucket at a time. + let delta = self.delta(until); + let mut buckets = Vec::with_capacity(delta + 1); + + // First, the whole buckets. + for i in 0..delta { + let idx = self.bucket(i); + buckets.push(mem::take(&mut self.items[idx])); + } + self.tick(delta); + + // Now we need to split the last bucket, because there might be + // some items with `item.time > until`. + let bucket = &mut self.items[self.cursor]; + let last_idx = match bucket.binary_search_by_key(&until, TimerItem::time) { + Ok(mut m) => { + // If there are multiple values, the search will hit any of them. + // Make sure to get them all. + while m < bucket.len() && bucket[m].time == until { + m += 1; + } + m + } + Err(ins) => ins, + }; + let tail = bucket.split_off(last_idx); + buckets.push(mem::replace(bucket, tail)); + // This tomfoolery with the empty vector ensures that + // the returned type here matches the one above precisely + // without having to invoke the `either` crate. + buckets.into_iter().chain(vec![]).flatten().map(get_item) + } +} + +#[cfg(test)] +mod test { + use std::sync::OnceLock; + + use super::{Duration, Instant, Timer}; + + fn now() -> Instant { + static NOW: OnceLock = OnceLock::new(); + *NOW.get_or_init(Instant::now) + } + + const GRANULARITY: Duration = Duration::from_millis(10); + const CAPACITY: usize = 10; + #[test] + fn create() { + let t: Timer<()> = Timer::new(now(), GRANULARITY, CAPACITY); + assert_eq!(t.span(), Duration::from_millis(100)); + assert_eq!(None, t.next_time()); + } + + #[test] + fn immediate_entry() { + let mut t = Timer::new(now(), GRANULARITY, CAPACITY); + t.add(now(), 12); + assert_eq!(now(), t.next_time().expect("should have an entry")); + let values: Vec<_> = t.take_until(now()).collect(); + assert_eq!(vec![12], values); + } + + #[test] + fn same_time() { + let mut t = Timer::new(now(), GRANULARITY, CAPACITY); + let v1 = 12; + let v2 = 13; + t.add(now(), v1); + t.add(now(), v2); + assert_eq!(now(), t.next_time().expect("should have an entry")); + let values: Vec<_> = t.take_until(now()).collect(); + assert!(values.contains(&v1)); + assert!(values.contains(&v2)); + } + + #[test] + fn add() { + let mut t = Timer::new(now(), GRANULARITY, CAPACITY); + let near_future = now() + Duration::from_millis(17); + let v = 9; + t.add(near_future, v); + assert_eq!(near_future, t.next_time().expect("should return a value")); + assert_eq!( + t.take_until(near_future.checked_sub(Duration::from_millis(1)).unwrap()) + .count(), + 0 + ); + assert!(t + .take_until(near_future + Duration::from_millis(1)) + .any(|x| x == v)); + } + + #[test] + fn add_future() { + let mut t = Timer::new(now(), GRANULARITY, CAPACITY); + let future = now() + Duration::from_millis(117); + let v = 9; + t.add(future, v); + assert_eq!(future, t.next_time().expect("should return a value")); + assert!(t.take_until(future).any(|x| x == v)); + } + + #[test] + fn add_far_future() { + let mut t = Timer::new(now(), GRANULARITY, CAPACITY); + let far_future = now() + Duration::from_millis(892); + let v = 9; + t.add(far_future, v); + assert_eq!(far_future, t.next_time().expect("should return a value")); + assert!(t.take_until(far_future).any(|x| x == v)); + } + + const TIMES: &[Duration] = &[ + Duration::from_millis(40), + Duration::from_millis(91), + Duration::from_millis(6), + Duration::from_millis(3), + Duration::from_millis(22), + Duration::from_millis(40), + ]; + + fn with_times() -> Timer { + let mut t = Timer::new(now(), GRANULARITY, CAPACITY); + for (i, time) in TIMES.iter().enumerate() { + t.add(now() + *time, i); + } + assert_eq!( + now() + *TIMES.iter().min().unwrap(), + t.next_time().expect("should have a time") + ); + t + } + + #[test] + #[allow(clippy::needless_collect)] // false positive + fn multiple_values() { + let mut t = with_times(); + let values: Vec<_> = t.take_until(now() + *TIMES.iter().max().unwrap()).collect(); + for i in 0..TIMES.len() { + assert!(values.contains(&i)); + } + } + + #[test] + #[allow(clippy::needless_collect)] // false positive + fn take_far_future() { + let mut t = with_times(); + let values: Vec<_> = t.take_until(now() + Duration::from_secs(100)).collect(); + for i in 0..TIMES.len() { + assert!(values.contains(&i)); + } + } + + #[test] + fn remove_each() { + let mut t = with_times(); + for (i, time) in TIMES.iter().enumerate() { + assert_eq!(Some(i), t.remove(now() + *time, |&x| x == i)); + } + assert_eq!(None, t.next_time()); + } + + #[test] + fn remove_future() { + let mut t = Timer::new(now(), GRANULARITY, CAPACITY); + let future = now() + Duration::from_millis(117); + let v = 9; + t.add(future, v); + + assert_eq!(Some(v), t.remove(future, |candidate| *candidate == v)); + } + + #[test] + fn remove_too_far_future() { + let mut t = Timer::new(now(), GRANULARITY, CAPACITY); + let future = now() + Duration::from_millis(117); + let too_far_future = now() + t.span() + Duration::from_millis(117); + let v = 9; + t.add(future, v); + + assert_eq!(None, t.remove(too_far_future, |candidate| *candidate == v)); + } +} diff --git a/neqo-transport/src/server.rs b/neqo-transport/src/server.rs index 7d3d144a09..96a6244ef1 100644 --- a/neqo-transport/src/server.rs +++ b/neqo-transport/src/server.rs @@ -15,12 +15,12 @@ use std::{ ops::{Deref, DerefMut}, path::PathBuf, rc::{Rc, Weak}, - time::Instant, + time::{Duration, Instant}, }; use neqo_common::{ self as common, event::Provider, hex, qdebug, qerror, qinfo, qlog::NeqoQlog, qtrace, qwarn, - Datagram, Decoder, Role, + timer::Timer, Datagram, Decoder, Role, }; use neqo_crypto::{ encode_ech_config, AntiReplay, Cipher, PrivateKey, PublicKey, ZeroRttCheckResult, @@ -46,6 +46,13 @@ pub enum InitialResult { /// `MIN_INITIAL_PACKET_SIZE` is the smallest packet that can be used to establish /// a new connection across all QUIC versions this server supports. const MIN_INITIAL_PACKET_SIZE: usize = 1200; +/// The size of timer buckets. This is higher than the actual timer granularity +/// as this depends on there being some distribution of events. +const TIMER_GRANULARITY: Duration = Duration::from_millis(4); +/// The number of buckets in the timer. As mentioned in the definition of `Timer`, +/// the granularity and capacity need to multiply to be larger than the largest +/// delay that might be used. That's the idle timeout (currently 30s). +const TIMER_CAPACITY: usize = 16384; type StateRef = Rc>; type ConnectionTableRef = Rc>>; @@ -54,21 +61,7 @@ type ConnectionTableRef = Rc>>; pub struct ServerConnectionState { c: Connection, active_attempt: Option, - wake_at: Option, -} - -impl ServerConnectionState { - fn set_wake_at(&mut self, at: Instant) { - self.wake_at = Some(at); - } - - fn needs_waking(&self, now: Instant) -> bool { - self.wake_at.map_or(false, |t| t <= now) - } - - fn woken(&mut self) { - self.wake_at = None; - } + last_timer: Instant, } impl Deref for ServerConnectionState { @@ -181,8 +174,8 @@ pub struct Server { active: HashSet, /// The set of connections that need immediate processing. waiting: VecDeque, - /// The latest [`Output::Callback`] returned from [`Server::process`]. - wake_at: Option, + /// Outstanding timers for connections. + timers: Timer, /// Address validation logic, which determines whether we send a Retry. address_validation: Rc>, /// Directory to create qlog traces in @@ -226,10 +219,10 @@ impl Server { connections: Rc::default(), active: HashSet::default(), waiting: VecDeque::default(), + timers: Timer::new(now, TIMER_GRANULARITY, TIMER_CAPACITY), address_validation: Rc::new(RefCell::new(validation)), qlog_dir: None, ech_config: None, - wake_at: None, }) } @@ -267,6 +260,11 @@ impl Server { self.ech_config.as_ref().map_or(&[], |cfg| &cfg.encoded) } + fn remove_timer(&mut self, c: &StateRef) { + let last = c.borrow().last_timer; + self.timers.remove(last, |t| Rc::ptr_eq(t, c)); + } + fn process_connection( &mut self, c: &StateRef, @@ -282,12 +280,16 @@ impl Server { } Output::Callback(delay) => { let next = now + delay; - c.borrow_mut().set_wake_at(next); - if self.wake_at.map_or(true, |c| c > next) { - self.wake_at = Some(next); + if next != c.borrow().last_timer { + qtrace!([self], "Change timer to {:?}", next); + self.remove_timer(c); + c.borrow_mut().last_timer = next; + self.timers.add(next, Rc::clone(c)); } } - Output::None => {} + Output::None => { + self.remove_timer(c); + } } if c.borrow().has_events() { qtrace!([self], "Connection active: {:?}", c); @@ -505,7 +507,7 @@ impl Server { self.setup_connection(&mut c, &attempt_key, initial, orig_dcid); let c = Rc::new(RefCell::new(ServerConnectionState { c, - wake_at: None, + last_timer: now, active_attempt: Some(attempt_key.clone()), })); cid_mgr.borrow_mut().set_connection(&c); @@ -644,28 +646,24 @@ impl Server { return Some(d); } } - - qtrace!([self], "No packet to send still, check wake up times"); - loop { - let connection = self - .connections - .borrow() - .values() - .find(|c| c.borrow().needs_waking(now)) - .cloned()?; - let datagram = self.process_connection(&connection, None, now); - connection.borrow_mut().woken(); - if datagram.is_some() { - return datagram; + qtrace!([self], "No packet to send still, run timers"); + while let Some(c) = self.timers.take_next(now) { + if let Some(d) = self.process_connection(&c, None, now) { + return Some(d); } } + None } - pub fn process(&mut self, dgram: Option<&Datagram>, now: Instant) -> Output { - if self.wake_at.map_or(false, |c| c <= now) { - self.wake_at = None; + fn next_time(&mut self, now: Instant) -> Option { + if self.waiting.is_empty() { + self.timers.next_time().map(|x| x - now) + } else { + Some(Duration::new(0, 0)) } + } + pub fn process(&mut self, dgram: Option<&Datagram>, now: Instant) -> Output { dgram .and_then(|d| self.process_input(d, now)) .or_else(|| self.process_next_output(now)) @@ -673,7 +671,12 @@ impl Server { qtrace!([self], "Send packet: {:?}", d); Output::Datagram(d) }) - .or_else(|| self.wake_at.take().map(|c| Output::Callback(c - now))) + .or_else(|| { + self.next_time(now).map(|delay| { + qtrace!([self], "Wait: {:?}", delay); + Output::Callback(delay) + }) + }) .unwrap_or_else(|| { qtrace!([self], "Go dormant"); Output::None From 5f9c3e7096db0e33c20ab92bea25d2715cbbbb86 Mon Sep 17 00:00:00 2001 From: Max Inden Date: Tue, 9 Apr 2024 07:47:49 +0200 Subject: [PATCH 307/321] refactor(client): use process_output and process_multiple_input (#1794) * refactor(client): use process_output and process_multiple_input `neqo_transport::Connection` offers 4 process methods: - `process` - `process_output` - `process_input` - `process_multiple_input` Where `process` is a wrapper around `process_input` and `process_output` calling both in sequence. https://github.com/mozilla/neqo/blob/5dfe106669ccb695187511305c21b8e8a8775e91/neqo-transport/src/connection/mod.rs#L1099-L1107 Where `process_input` delegates to `process_multiple_input`. https://github.com/mozilla/neqo/blob/5dfe106669ccb695187511305c21b8e8a8775e91/neqo-transport/src/connection/mod.rs#L979-L1000 Previously `neqo-client` would use `process` only. Thus continuously interleaving output and input. Say `neqo-client` would have multiple datagrams buffered through a GRO read, it could potentially have to do a write in between each `process` calls, as each call to `process` with an input datagram might return an output datagram to be written. With this commit `neqo-client` uses `process_output` and `process_multiple_input` directly, thus reducing interleaving on batch reads (GRO and in the future recvmmsg) and in the future batch writes (GSO and sendmmsg). By using `process_multiple_input` instead of `process` or `process_input`, auxiliarry logic, like `self.cleanup_closed_streams` only has to run per input datagram batch, and not for each input datagram. Extracted from https://github.com/mozilla/neqo/pull/1741. * process_output before handle * process_ouput after each input batch --- neqo-bin/src/client/http09.rs | 11 ++++- neqo-bin/src/client/http3.rs | 11 ++++- neqo-bin/src/client/mod.rs | 61 ++++++++++++++++------------ neqo-http3/src/connection_client.rs | 7 ++-- neqo-transport/src/connection/mod.rs | 5 +-- 5 files changed, 58 insertions(+), 37 deletions(-) diff --git a/neqo-bin/src/client/http09.rs b/neqo-bin/src/client/http09.rs index b157a6a13f..e0b254f67b 100644 --- a/neqo-bin/src/client/http09.rs +++ b/neqo-bin/src/client/http09.rs @@ -138,8 +138,15 @@ pub(crate) fn create_client( } impl super::Client for Connection { - fn process(&mut self, dgram: Option<&Datagram>, now: Instant) -> Output { - self.process(dgram, now) + fn process_output(&mut self, now: Instant) -> Output { + self.process_output(now) + } + + fn process_multiple_input<'a, I>(&mut self, dgrams: I, now: Instant) + where + I: IntoIterator, + { + self.process_multiple_input(dgrams, now); } fn close(&mut self, now: Instant, app_error: neqo_transport::AppError, msg: S) diff --git a/neqo-bin/src/client/http3.rs b/neqo-bin/src/client/http3.rs index d56af5eda9..09a30461bf 100644 --- a/neqo-bin/src/client/http3.rs +++ b/neqo-bin/src/client/http3.rs @@ -119,8 +119,15 @@ impl super::Client for Http3Client { None } - fn process(&mut self, dgram: Option<&Datagram>, now: Instant) -> Output { - self.process(dgram, now) + fn process_output(&mut self, now: Instant) -> Output { + self.process_output(now) + } + + fn process_multiple_input<'a, I>(&mut self, dgrams: I, now: Instant) + where + I: IntoIterator, + { + self.process_multiple_input(dgrams, now); } fn close(&mut self, now: Instant, app_error: AppError, msg: S) diff --git a/neqo-bin/src/client/mod.rs b/neqo-bin/src/client/mod.rs index 49c116aa95..791e2a6366 100644 --- a/neqo-bin/src/client/mod.rs +++ b/neqo-bin/src/client/mod.rs @@ -377,7 +377,10 @@ trait Handler { /// Network client, e.g. [`neqo_transport::Connection`] or [`neqo_http3::Http3Client`]. trait Client { - fn process(&mut self, dgram: Option<&Datagram>, now: Instant) -> Output; + fn process_output(&mut self, now: Instant) -> Output; + fn process_multiple_input<'a, I>(&mut self, dgrams: I, now: Instant) + where + I: IntoIterator; fn close(&mut self, now: Instant, app_error: AppError, msg: S) where S: AsRef + Display; @@ -404,21 +407,21 @@ impl<'a, H: Handler> Runner<'a, H> { let handler_done = self.handler.handle(&mut self.client)?; match (handler_done, self.args.resume, self.handler.has_token()) { - // Handler isn't done. Continue. - (false, _, _) => {}, - // Handler done. Resumption token needed but not present. Continue. - (true, true, false) => { - qdebug!("Handler done. Waiting for resumption token."); - } - // Handler is done, no resumption token needed. Close. - (true, false, _) | - // Handler is done, resumption token needed and present. Close. - (true, true, true) => { - self.client.close(Instant::now(), 0, "kthxbye!"); - } + // Handler isn't done. Continue. + (false, _, _) => {}, + // Handler done. Resumption token needed but not present. Continue. + (true, true, false) => { + qdebug!("Handler done. Waiting for resumption token."); + } + // Handler is done, no resumption token needed. Close. + (true, false, _) | + // Handler is done, resumption token needed and present. Close. + (true, true, true) => { + self.client.close(Instant::now(), 0, "kthxbye!"); } + } - self.process(None).await?; + self.process_output().await?; if let Some(reason) = self.client.is_closed() { if self.args.stats { @@ -432,16 +435,7 @@ impl<'a, H: Handler> Runner<'a, H> { } match ready(self.socket, self.timeout.as_mut()).await? { - Ready::Socket => loop { - let dgrams = self.socket.recv(&self.local_addr)?; - if dgrams.is_empty() { - break; - } - for dgram in &dgrams { - self.process(Some(dgram)).await?; - } - self.handler.maybe_key_update(&mut self.client)?; - }, + Ready::Socket => self.process_multiple_input().await?, Ready::Timeout => { self.timeout = None; } @@ -449,9 +443,9 @@ impl<'a, H: Handler> Runner<'a, H> { } } - async fn process(&mut self, mut dgram: Option<&Datagram>) -> Result<(), io::Error> { + async fn process_output(&mut self) -> Result<(), io::Error> { loop { - match self.client.process(dgram.take(), Instant::now()) { + match self.client.process_output(Instant::now()) { Output::Datagram(dgram) => { self.socket.writable().await?; self.socket.send(dgram)?; @@ -470,6 +464,21 @@ impl<'a, H: Handler> Runner<'a, H> { Ok(()) } + + async fn process_multiple_input(&mut self) -> Res<()> { + loop { + let dgrams = self.socket.recv(&self.local_addr)?; + if dgrams.is_empty() { + break; + } + self.client + .process_multiple_input(dgrams.iter(), Instant::now()); + self.process_output().await?; + self.handler.maybe_key_update(&mut self.client)?; + } + + Ok(()) + } } fn qlog_new(args: &Args, hostname: &str, cid: &ConnectionId) -> Res { diff --git a/neqo-http3/src/connection_client.rs b/neqo-http3/src/connection_client.rs index be20126353..4c8772d14a 100644 --- a/neqo-http3/src/connection_client.rs +++ b/neqo-http3/src/connection_client.rs @@ -880,11 +880,10 @@ impl Http3Client { pub fn process_multiple_input<'a, I>(&mut self, dgrams: I, now: Instant) where I: IntoIterator, - I::IntoIter: ExactSizeIterator, { - let dgrams = dgrams.into_iter(); - qtrace!([self], "Process multiple datagrams, len={}", dgrams.len()); - if dgrams.len() == 0 { + let mut dgrams = dgrams.into_iter().peekable(); + qtrace!([self], "Process multiple datagrams"); + if dgrams.peek().is_none() { return; } self.conn.process_multiple_input(dgrams, now); diff --git a/neqo-transport/src/connection/mod.rs b/neqo-transport/src/connection/mod.rs index 9cddcdac28..e471c29c25 100644 --- a/neqo-transport/src/connection/mod.rs +++ b/neqo-transport/src/connection/mod.rs @@ -985,10 +985,9 @@ impl Connection { pub fn process_multiple_input<'a, I>(&mut self, dgrams: I, now: Instant) where I: IntoIterator, - I::IntoIter: ExactSizeIterator, { - let dgrams = dgrams.into_iter(); - if dgrams.len() == 0 { + let mut dgrams = dgrams.into_iter().peekable(); + if dgrams.peek().is_none() { return; } From 7feb7cb19a95c83ec6531bcd7721679076ddfb17 Mon Sep 17 00:00:00 2001 From: Max Inden Date: Tue, 9 Apr 2024 07:49:35 +0200 Subject: [PATCH 308/321] fix(http3): only add to stream_has_pending_data on has_data_to_send (#1793) * fix(http3): only add to stream_has_pending_data on has_data_to_send `::send_data` attempts to send a slice of data down into the QUIC layer, more specifically `neqo_transport::Connection::stream_send_atomic`. While it attempts to send any existing buffered data at the http3 layer first, it does not itself fill the http3 layer buffer, but instead only sends data, if the lower QUIC layer has capacity, i.e. only if it can send the data down to the QUIC layer right away. https://github.com/mozilla/neqo/blob/5dfe106669ccb695187511305c21b8e8a8775e91/neqo-http3/src/send_message.rs#L168-L221 `::send_data` is called via `Http3ServerHandler::send_data`. The wrapper first marks the stream as `stream_has_pending_data`, marks itself as `needs_processing` and then calls down into `::send_data`. https://github.com/mozilla/neqo/blob/5dfe106669ccb695187511305c21b8e8a8775e91/neqo-http3/src/connection_server.rs#L51-L74 Thus the latter always marks the former as `stream_has_pending_data` even though the former never writes into the buffer and thus might actually not have pending data. Why is this problematic? 1. Say that the buffer of the `BufferedStream` of `SendMessage` is empty. 2. Say that the user attempts to write data via `Http3ServerHandler::send_data`. Despite not filling the http3 layer buffer, the stream is marked as `stream_has_pending_data`. 3. Say that next the user calls `Http3Server::process`, which will call `Http3Server::process_http3`, which will call `Http3ServerHandler::process_http3`, which will call `Http3Connection::process_sending`, which will call `Http3Connection::send_non_control_streams`. `Http3Connection::send_non_control_streams` will attempt to flush all http3 layer buffers of streams marked via `stream_has_pending_data`, e.g. the stream from step (2). Thus it will call `::send` (note `send` and not the previous `send_data`). This function will attempt the stream's http3 layer buffer. In the case where the http3 layer stream buffer is empty, it will enqueue a `DataWritable` event for the user. Given that the buffer of our stream is empty (see (1)) such `DataWritable` event is always emitted. https://github.com/mozilla/neqo/blob/5dfe106669ccb695187511305c21b8e8a8775e91/neqo-http3/src/send_message.rs#L236-L264 The user, on receiving the `DataWritable` event will attempt to write to it via `Http3ServerHandler::send_data`, back to step (2), thus closing the busy loop. How to break the loop? This commit adds an additional check to the `stream_has_pending_data` function to ensure it indeed does have pending data. This breaks the above busy loop. In addition, it renames the function to `stream_might_have_pending_data`. * Address review * Revert comment but keep links --- neqo-http3/src/connection.rs | 4 ++-- neqo-http3/src/connection_server.rs | 13 ++++++++----- 2 files changed, 10 insertions(+), 7 deletions(-) diff --git a/neqo-http3/src/connection.rs b/neqo-http3/src/connection.rs index cfa78df787..dd45797baa 100644 --- a/neqo-http3/src/connection.rs +++ b/neqo-http3/src/connection.rs @@ -386,8 +386,8 @@ impl Http3Connection { Ok(()) } - /// Inform a `HttpConnection` that a stream has data to send and that `send` should be called - /// for the stream. + /// Inform an [`Http3Connection`] that a stream has data to send and that + /// [`SendStream::send`] should be called for the stream. pub fn stream_has_pending_data(&mut self, stream_id: StreamId) { self.streams_with_pending_data.insert(stream_id); } diff --git a/neqo-http3/src/connection_server.rs b/neqo-http3/src/connection_server.rs index dcf759f177..cc887a26fc 100644 --- a/neqo-http3/src/connection_server.rs +++ b/neqo-http3/src/connection_server.rs @@ -64,13 +64,17 @@ impl Http3ServerHandler { data: &[u8], conn: &mut Connection, ) -> Res { - self.base_handler.stream_has_pending_data(stream_id); - self.needs_processing = true; - self.base_handler + let n = self + .base_handler .send_streams .get_mut(&stream_id) .ok_or(Error::InvalidStreamId)? - .send_data(conn, data) + .send_data(conn, data)?; + if n > 0 { + self.base_handler.stream_has_pending_data(stream_id); + } + self.needs_processing = true; + Ok(n) } /// Supply response heeaders for a request. @@ -100,7 +104,6 @@ impl Http3ServerHandler { pub fn stream_close_send(&mut self, stream_id: StreamId, conn: &mut Connection) -> Res<()> { qdebug!([self], "Close sending side stream={}.", stream_id); self.base_handler.stream_close_send(conn, stream_id)?; - self.base_handler.stream_has_pending_data(stream_id); self.needs_processing = true; Ok(()) } From 3f3c5f6f66d1530c3d7e26aa3bc1b5f0618ef016 Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Tue, 9 Apr 2024 11:26:51 +0300 Subject: [PATCH 309/321] test: Add my `test.sh` script (#1802) Because I keep blowing it away when I `git clean`... --- test/test.sh | 41 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 41 insertions(+) create mode 100755 test/test.sh diff --git a/test/test.sh b/test/test.sh new file mode 100755 index 0000000000..8d4cccd3d8 --- /dev/null +++ b/test/test.sh @@ -0,0 +1,41 @@ +#! /usr/bin/env bash + +# This script builds the client and server binaries and runs them in a tmux +# session side-by-side. The client connects to the server and the server +# responds with a simple HTTP response. The client and server are run with +# verbose logging and the qlog output is stored in a temporary directory. The +# script also runs tcpdump to capture the packets exchanged between the client +# and server. The script uses tmux to create a split terminal window to display +# the qlog output and the packet capture. + +set -e + +cargo build --bin neqo-client --bin neqo-server + +tmp=$(mktemp -d) +addr=127.0.0.1 +port=4433 +path=/20000 +flags="--verbose --qlog-dir $tmp --use-old-http --alpn hq-interop --quic-version 1" +if [ "$(uname -s)" != "Linux" ]; then + iface=lo0 +else + iface=lo +fi + +client="./target/debug/neqo-client $flags --output-dir $tmp --stats https://$addr:$port$path" +server="SSLKEYLOGFILE=$tmp/test.tlskey ./target/debug/neqo-server $flags $addr:$port" + +tcpdump -U -i "$iface" -w "$tmp/test.pcap" host $addr and port $port >/dev/null 2>&1 & +tcpdump_pid=$! + +tmux -CC \ + set-option -g default-shell "$(which bash)" \; \ + new-session "$client && kill -USR2 $tcpdump_pid && touch $tmp/done" \; \ + split-window -h "$server" \; \ + split-window -v -f "\ + until [ -e $tmp/done ]; do sleep 1; done && \ + tshark -r $tmp/test.pcap -o tls.keylog_file:$tmp/test.tlskey" \; \ + set remain-on-exit on + +# rm -rf "$tmp" From 166b84c5a3307d678f38d9994af9b56b68c6b695 Mon Sep 17 00:00:00 2001 From: Kershaw Date: Tue, 9 Apr 2024 10:49:40 +0200 Subject: [PATCH 310/321] neqo v0.7.4 (#1804) --- Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Cargo.toml b/Cargo.toml index 48f3b7d74b..1a7fc1eeb2 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -14,7 +14,7 @@ resolver = "2" homepage = "https://github.com/mozilla/neqo/" repository = "https://github.com/mozilla/neqo/" authors = ["The Neqo Authors "] -version = "0.7.3" +version = "0.7.4" # Keep in sync with `.rustfmt.toml` `edition`. edition = "2021" license = "MIT OR Apache-2.0" From 7ff76c7c875daf4fb1a0df25a3499d12f55da5b8 Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Tue, 9 Apr 2024 14:03:47 +0300 Subject: [PATCH 311/321] fix: Correctly track sent `CONNECTION_CLOSE` frames (#1805) * fix: Correctly track sent `CONNECTION_CLOSE` frames And as a side effect, remove `output_close()`. * Add comment * Address code review * Move check --- neqo-transport/src/connection/mod.rs | 97 +++++++++++----------------- 1 file changed, 36 insertions(+), 61 deletions(-) diff --git a/neqo-transport/src/connection/mod.rs b/neqo-transport/src/connection/mod.rs index e471c29c25..8522507a69 100644 --- a/neqo-transport/src/connection/mod.rs +++ b/neqo-transport/src/connection/mod.rs @@ -1826,7 +1826,7 @@ impl Connection { | State::Connected | State::Confirmed => { if let Some(path) = self.paths.select_path() { - let res = self.output_path(&path, now); + let res = self.output_path(&path, now, &None); self.capture_error(Some(path), now, 0, res) } else { Ok(SendOption::default()) @@ -1835,7 +1835,16 @@ impl Connection { State::Closing { .. } | State::Draining { .. } | State::Closed(_) => { if let Some(details) = self.state_signaling.close_frame() { let path = Rc::clone(details.path()); - let res = self.output_close(&details); + // In some error cases, we will not be able to make a new, permanent path. + // For example, if we run out of connection IDs and the error results from + // a packet on a new path, we avoid sending (and the privacy risk) rather + // than reuse a connection ID. + let res = if path.borrow().is_temporary() { + assert!(!cfg!(test), "attempting to close with a temporary path"); + Err(Error::InternalError) + } else { + self.output_path(&path, now, &Some(details)) + }; self.capture_error(Some(path), now, 0, res) } else { Ok(SendOption::default()) @@ -1912,62 +1921,6 @@ impl Connection { } } - fn output_close(&mut self, close: &ClosingFrame) -> Res { - let mut encoder = Encoder::with_capacity(256); - let grease_quic_bit = self.can_grease_quic_bit(); - let version = self.version(); - for space in PacketNumberSpace::iter() { - let Some((cspace, tx)) = self.crypto.states.select_tx_mut(self.version, *space) else { - continue; - }; - - let path = close.path().borrow(); - // In some error cases, we will not be able to make a new, permanent path. - // For example, if we run out of connection IDs and the error results from - // a packet on a new path, we avoid sending (and the privacy risk) rather - // than reuse a connection ID. - if path.is_temporary() { - assert!(!cfg!(test), "attempting to close with a temporary path"); - return Err(Error::InternalError); - } - let (_, mut builder) = Self::build_packet_header( - &path, - cspace, - encoder, - tx, - &AddressValidationInfo::None, - version, - grease_quic_bit, - ); - _ = Self::add_packet_number( - &mut builder, - tx, - self.loss_recovery.largest_acknowledged_pn(*space), - ); - // The builder will set the limit to 0 if there isn't enough space for the header. - if builder.is_full() { - encoder = builder.abort(); - break; - } - builder.set_limit(min(path.amplification_limit(), path.mtu()) - tx.expansion()); - debug_assert!(builder.limit() <= 2048); - - // ConnectionError::Application is only allowed at 1RTT. - let sanitized = if *space == PacketNumberSpace::ApplicationData { - None - } else { - close.sanitize() - }; - sanitized - .as_ref() - .unwrap_or(close) - .write_frame(&mut builder); - encoder = builder.build(tx)?; - } - - Ok(SendOption::Yes(close.path().borrow().datagram(encoder))) - } - /// Write the frames that are exchanged in the application data space. /// The order of calls here determines the relative priority of frames. fn write_appdata_frames( @@ -2188,7 +2141,12 @@ impl Connection { /// Build a datagram, possibly from multiple packets (for different PN /// spaces) and each containing 1+ frames. #[allow(clippy::too_many_lines)] // Yeah, that's just the way it is. - fn output_path(&mut self, path: &PathRef, now: Instant) -> Res { + fn output_path( + &mut self, + path: &PathRef, + now: Instant, + closing_frame: &Option, + ) -> Res { let mut initial_sent = None; let mut needs_padding = false; let grease_quic_bit = self.can_grease_quic_bit(); @@ -2241,8 +2199,23 @@ impl Connection { // Add frames to the packet. let payload_start = builder.len(); - let (tokens, ack_eliciting, padded) = - self.write_frames(path, *space, &profile, &mut builder, now); + let (mut tokens, mut ack_eliciting, mut padded) = (Vec::new(), false, false); + if let Some(ref close) = closing_frame { + // ConnectionError::Application is only allowed at 1RTT. + let sanitized = if *space == PacketNumberSpace::ApplicationData { + None + } else { + close.sanitize() + }; + sanitized + .as_ref() + .unwrap_or(close) + .write_frame(&mut builder); + self.stats.borrow_mut().frame_tx.connection_close += 1; + } else { + (tokens, ack_eliciting, padded) = + self.write_frames(path, *space, &profile, &mut builder, now); + } if builder.packet_empty() { // Nothing to include in this packet. encoder = builder.abort(); @@ -2323,6 +2296,8 @@ impl Connection { mtu ); initial.size += mtu - packets.len(); + // These zeros aren't padding frames, they are an invalid all-zero coalesced + // packet, which is why we don't increase `frame_tx.padding` count here. packets.resize(mtu, 0); } self.loss_recovery.on_packet_sent(path, initial); From c6c60ba5cc3f1f536fee6ceed647396bea1da7cc Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Tue, 9 Apr 2024 16:34:25 +0300 Subject: [PATCH 312/321] ci: Delete QNS PR comment on successful run (#1807) * ci: Always add or update the QNS PR comment Because we only add one when there is a failure, and when things then later get fixed we don't update it again (because we skip the update on success). So the outdated, failed state keeps being shown. * Delete comment on success * Update .github/actions/pr-comment/action.yml Co-authored-by: Max Inden Signed-off-by: Lars Eggert * Address code review --------- Signed-off-by: Lars Eggert Co-authored-by: Max Inden --- .github/actions/pr-comment/action.yml | 4 ++++ .github/actions/quic-interop-runner/action.yml | 7 ++----- .github/workflows/qns-comment.yml | 4 ++-- 3 files changed, 8 insertions(+), 7 deletions(-) diff --git a/.github/actions/pr-comment/action.yml b/.github/actions/pr-comment/action.yml index b7f9bb12da..1e84aa5bb4 100644 --- a/.github/actions/pr-comment/action.yml +++ b/.github/actions/pr-comment/action.yml @@ -5,6 +5,9 @@ inputs: name: description: 'Artifact name to import comment data from.' required: true + mode: + description: 'Mode of operation (upsert/recreate/delete).' + default: 'upsert' token: description: 'A Github PAT' required: true @@ -29,5 +32,6 @@ runs: - uses: thollander/actions-comment-pull-request@v2 with: filePath: contents + mode: ${{ inputs.mode }} pr_number: ${{ steps.pr-number.outputs.number }} comment_tag: ${{ inputs.name }}-comment diff --git a/.github/actions/quic-interop-runner/action.yml b/.github/actions/quic-interop-runner/action.yml index 7fd055f184..cdc617d275 100644 --- a/.github/actions/quic-interop-runner/action.yml +++ b/.github/actions/quic-interop-runner/action.yml @@ -91,12 +91,9 @@ runs: - name: Format GitHub comment if: always() run: | - if [ -s quic-interop-runner/summary ]; then - exit 0 - fi echo '[**QUIC Interop Runner**](https://github.com/quic-interop/quic-interop-runner)' >> comment echo '' >> comment - # Ignore all, but table, which starts with "|". + # Ignore all, but table, which starts with "|". Also reformat it to GitHub Markdown. grep -E '^\|' quic-interop-runner/summary |\ awk '(!/^\| *:-/ || (d++ && d < 3))' |\ sed -E -e 's/✓/:white_check_mark:/gi' -e 's/✕/:x:/gi' -e 's/\?/:grey_question:/gi' \ @@ -106,7 +103,7 @@ runs: shell: bash - name: Export PR comment data - if: env.EXPORT_COMMENT == '1' + if: always() uses: ./.github/actions/pr-comment-data-export with: name: qns diff --git a/.github/workflows/qns-comment.yml b/.github/workflows/qns-comment.yml index 71cbcc805b..db9f74f7bf 100644 --- a/.github/workflows/qns-comment.yml +++ b/.github/workflows/qns-comment.yml @@ -18,11 +18,11 @@ jobs: pull-requests: write runs-on: ubuntu-latest if: | - github.event.workflow_run.event == 'pull_request' && - github.event.workflow_run.conclusion == 'failure' + github.event.workflow_run.event == 'pull_request' steps: - uses: actions/checkout@v4 - uses: ./.github/actions/pr-comment with: name: qns + mode: ${{ github.event.workflow_run.conclusion == 'success' && 'delete' || 'upsert' }} token: ${{ secrets.GITHUB_TOKEN }} From 98f7b26c658a9af2eec5a90aae25fa62237a7e3b Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Tue, 9 Apr 2024 16:37:11 +0300 Subject: [PATCH 313/321] ci: Disable `ossf/scorecard-action` until it's allowlisted (#1809) --- .github/workflows/scorecard.yml | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/.github/workflows/scorecard.yml b/.github/workflows/scorecard.yml index 2c0b04d09e..34bad4d875 100644 --- a/.github/workflows/scorecard.yml +++ b/.github/workflows/scorecard.yml @@ -6,13 +6,13 @@ name: Scorecard supply-chain security on: # For Branch-Protection check. Only the default branch is supported. See # https://github.com/ossf/scorecard/blob/main/docs/checks.md#branch-protection - branch_protection_rule: + # branch_protection_rule: # To guarantee Maintained check is occasionally updated. See # https://github.com/ossf/scorecard/blob/main/docs/checks.md#maintained - schedule: - - cron: '26 8 * * 6' - push: - branches: [ "main" ] + # schedule: + # - cron: '26 8 * * 6' + # push: + # branches: [ "main" ] # Declare default permissions as read only. permissions: read-all From 087e095c6d2fb5e6f52bae4bc4dd9c91beff3655 Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Tue, 9 Apr 2024 16:46:20 +0300 Subject: [PATCH 314/321] `on:` can't be empty Signed-off-by: Lars Eggert --- .github/workflows/scorecard.yml | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/.github/workflows/scorecard.yml b/.github/workflows/scorecard.yml index 34bad4d875..651a30be01 100644 --- a/.github/workflows/scorecard.yml +++ b/.github/workflows/scorecard.yml @@ -4,15 +4,16 @@ name: Scorecard supply-chain security on: - # For Branch-Protection check. Only the default branch is supported. See - # https://github.com/ossf/scorecard/blob/main/docs/checks.md#branch-protection - # branch_protection_rule: - # To guarantee Maintained check is occasionally updated. See - # https://github.com/ossf/scorecard/blob/main/docs/checks.md#maintained - # schedule: - # - cron: '26 8 * * 6' - # push: - # branches: [ "main" ] + workflow_dispatch: +# # For Branch-Protection check. Only the default branch is supported. See +# # https://github.com/ossf/scorecard/blob/main/docs/checks.md#branch-protection +# branch_protection_rule: +# # To guarantee Maintained check is occasionally updated. See +# # https://github.com/ossf/scorecard/blob/main/docs/checks.md#maintained +# schedule: +# - cron: '26 8 * * 6' +# push: +# branches: [ "main" ] # Declare default permissions as read only. permissions: read-all From 1cc44f0920d53357aa528a7da6ed1ae1aae281f4 Mon Sep 17 00:00:00 2001 From: Max Inden Date: Tue, 9 Apr 2024 15:33:01 +0200 Subject: [PATCH 315/321] fix(server): log error when failing to read file (#1788) * fix(server): log error when failing to read file As part of the QUIC Interop testcases, a server needs to be able to respond to a request with the content of a local file. Previously, when failing to read a file, the server would simply reply with the predefined `SELF::MESSAGE`. Now the server logs the error and moves on. * Send 404 --- neqo-bin/src/server/mod.rs | 75 +++++++++++++------------------- neqo-bin/src/server/old_https.rs | 33 ++++++++------ 2 files changed, 50 insertions(+), 58 deletions(-) diff --git a/neqo-bin/src/server/mod.rs b/neqo-bin/src/server/mod.rs index e3067ecdf0..3490b3e9b3 100644 --- a/neqo-bin/src/server/mod.rs +++ b/neqo-bin/src/server/mod.rs @@ -10,8 +10,7 @@ use std::{ cmp::min, collections::HashMap, fmt::{self, Display}, - fs::OpenOptions, - io::{self, Read}, + fs, io, net::{SocketAddr, ToSocketAddrs}, path::PathBuf, pin::Pin, @@ -188,28 +187,11 @@ impl Args { } } -fn qns_read_response(filename: &str) -> Option> { - let mut file_path = PathBuf::from("/www"); - file_path.push(filename.trim_matches(|p| p == '/')); - - OpenOptions::new() - .read(true) - .open(&file_path) - .map_err(|_e| qerror!("Could not open {}", file_path.display())) - .ok() - .and_then(|mut f| { - let mut data = Vec::new(); - match f.read_to_end(&mut data) { - Ok(sz) => { - qinfo!("{} bytes read from {}", sz, file_path.display()); - Some(data) - } - Err(e) => { - qerror!("Error reading data: {e:?}"); - None - } - } - }) +fn qns_read_response(filename: &str) -> Result, io::Error> { + let path: PathBuf = ["/www", filename.trim_matches(|p| p == '/')] + .iter() + .collect(); + fs::read(path) } trait HttpServer: Display { @@ -344,27 +326,32 @@ impl HttpServer for SimpleServer { continue; } - let mut response = - if let Some(path) = headers.iter().find(|&h| h.name() == ":path") { - if args.shared.qns_test.is_some() { - if let Some(data) = qns_read_response(path.value()) { - ResponseData::from(data) - } else { - ResponseData::from(Self::MESSAGE) - } - } else if let Ok(count) = - path.value().trim_matches(|p| p == '/').parse::() - { - ResponseData::repeat(Self::MESSAGE, count) - } else { - ResponseData::from(Self::MESSAGE) + let Some(path) = headers.iter().find(|&h| h.name() == ":path") else { + stream + .cancel_fetch(neqo_http3::Error::HttpRequestIncomplete.code()) + .unwrap(); + continue; + }; + + let mut response = if args.shared.qns_test.is_some() { + match qns_read_response(path.value()) { + Ok(data) => ResponseData::from(data), + Err(e) => { + qerror!("Failed to read {}: {e}", path.value()); + stream + .send_headers(&[Header::new(":status", "404")]) + .unwrap(); + stream.stream_close_send().unwrap(); + continue; } - } else { - stream - .cancel_fetch(neqo_http3::Error::HttpRequestIncomplete.code()) - .unwrap(); - continue; - }; + } + } else if let Ok(count) = + path.value().trim_matches(|p| p == '/').parse::() + { + ResponseData::repeat(Self::MESSAGE, count) + } else { + ResponseData::from(Self::MESSAGE) + }; stream .send_headers(&[ diff --git a/neqo-bin/src/server/old_https.rs b/neqo-bin/src/server/old_https.rs index 505a16578f..38f3fdc3a7 100644 --- a/neqo-bin/src/server/old_https.rs +++ b/neqo-bin/src/server/old_https.rs @@ -8,7 +8,7 @@ use std::{ cell::RefCell, collections::HashMap, fmt::Display, path::PathBuf, rc::Rc, time::Instant, }; -use neqo_common::{event::Provider, hex, qdebug, qinfo, qwarn, Datagram}; +use neqo_common::{event::Provider, hex, qdebug, qerror, qinfo, qwarn, Datagram}; use neqo_crypto::{generate_ech_keys, random, AllowZeroRtt, AntiReplay, Cipher}; use neqo_http3::Error; use neqo_transport::{ @@ -142,20 +142,25 @@ impl Http09Server { Regex::new(r"GET +/(\d+)(?:\r)?\n").unwrap() }; let m = re.captures(msg); - let resp = match m.and_then(|m| m.get(1)) { - None => { - self.save_partial(stream_id, buf, conn); - return; - } - Some(path) => { - let path = path.as_str(); - qdebug!("Path = '{path}'"); - if args.shared.qns_test.is_some() { - qns_read_response(path) - } else { - let count = path.parse().unwrap(); - Some(vec![b'a'; count]) + let Some(path) = m.and_then(|m| m.get(1)) else { + self.save_partial(stream_id, buf, conn); + return; + }; + + let resp = { + let path = path.as_str(); + qdebug!("Path = '{path}'"); + if args.shared.qns_test.is_some() { + match qns_read_response(path) { + Ok(data) => Some(data), + Err(e) => { + qerror!("Failed to read {path}: {e}"); + Some(b"404".to_vec()) + } } + } else { + let count = path.parse().unwrap(); + Some(vec![b'a'; count]) } }; self.write(stream_id, resp, conn); From 329af2fdc07a153d1335e08f748b1560ce25dc19 Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Tue, 9 Apr 2024 17:08:06 +0300 Subject: [PATCH 316/321] test: Clean up tmp directory on script exit (#1810) --- test/test.sh | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/test/test.sh b/test/test.sh index 8d4cccd3d8..dc02b2161c 100755 --- a/test/test.sh +++ b/test/test.sh @@ -9,10 +9,11 @@ # the qlog output and the packet capture. set -e +tmp=$(mktemp -d) +trap 'rm -rf "$tmp"' EXIT cargo build --bin neqo-client --bin neqo-server -tmp=$(mktemp -d) addr=127.0.0.1 port=4433 path=/20000 @@ -37,5 +38,3 @@ tmux -CC \ until [ -e $tmp/done ]; do sleep 1; done && \ tshark -r $tmp/test.pcap -o tls.keylog_file:$tmp/test.tlskey" \; \ set remain-on-exit on - -# rm -rf "$tmp" From c44e53647d922359ef859d18d86dd9eb7fe4891c Mon Sep 17 00:00:00 2001 From: Max Inden Date: Wed, 10 Apr 2024 07:46:42 +0200 Subject: [PATCH 317/321] refactor(client): simplify keyupdate testcase implementation (#1808) The QUIC Interop Runner `keyupdate` testcase has a client establish a connection to the server and then trigger a key update. https://github.com/quic-interop/quic-interop-runner/blob/2a2534a1284d50d99ff92884d4f1ecf98fb41e4c/testcases.py#L889 This testcase always uses the `http09` client and server implementation. This commit simplifies the testcase implementation: - Given that it is only used with `http09`, move it to `http09.rs`. - Reduce the `KeyUpdateState` `struct` to a single `bool`. - Mark the `--key-update` command line argument as hidden, given that it is only set indirectly through the `-t keyupdate` flag. - Try to run `client.initiate_key_update` on events only, not on ever new received datagram. In addition it enables the `keyupdate` test on the Neqo `qns.yml` CI workflow. --- .github/workflows/qns.yml | 2 +- neqo-bin/src/client/http09.rs | 31 ++++++++++++---------- neqo-bin/src/client/http3.rs | 16 ++---------- neqo-bin/src/client/mod.rs | 48 ++++++----------------------------- 4 files changed, 28 insertions(+), 69 deletions(-) diff --git a/.github/workflows/qns.yml b/.github/workflows/qns.yml index caadb022df..17cd584a26 100644 --- a/.github/workflows/qns.yml +++ b/.github/workflows/qns.yml @@ -71,6 +71,6 @@ jobs: name: 'neqo-latest' image: ${{ steps.docker_build_and_push.outputs.imageID }} url: https://github.com/mozilla/neqo - test: handshake + test: handshake,keyupdate client: neqo-latest,quic-go,ngtcp2,neqo,msquic server: neqo-latest,quic-go,ngtcp2,neqo,msquic diff --git a/neqo-bin/src/client/http09.rs b/neqo-bin/src/client/http09.rs index e0b254f67b..a9ed12b157 100644 --- a/neqo-bin/src/client/http09.rs +++ b/neqo-bin/src/client/http09.rs @@ -25,7 +25,7 @@ use neqo_transport::{ }; use url::Url; -use super::{get_output_file, qlog_new, Args, KeyUpdateState, Res}; +use super::{get_output_file, qlog_new, Args, Res}; pub struct Handler<'a> { streams: HashMap>>, @@ -33,7 +33,7 @@ pub struct Handler<'a> { all_paths: Vec, args: &'a Args, token: Option, - key_update: KeyUpdateState, + needs_key_update: bool, } impl<'a> super::Handler for Handler<'a> { @@ -41,6 +41,18 @@ impl<'a> super::Handler for Handler<'a> { fn handle(&mut self, client: &mut Self::Client) -> Res { while let Some(event) = client.next_event() { + if self.needs_key_update { + match client.initiate_key_update() { + Ok(()) => { + qdebug!("Keys updated"); + self.needs_key_update = false; + self.download_urls(client); + } + Err(neqo_transport::Error::KeyUpdateBlocked) => (), + Err(e) => return Err(e.into()), + } + } + match event { ConnectionEvent::AuthenticationNeeded => { client.authenticated(AuthenticationStatus::Ok, Instant::now()); @@ -66,9 +78,6 @@ impl<'a> super::Handler for Handler<'a> { qdebug!("{event:?}"); self.download_urls(client); } - ConnectionEvent::StateChange(State::Confirmed) => { - self.maybe_key_update(client)?; - } ConnectionEvent::ResumptionToken(token) => { self.token = Some(token); } @@ -86,12 +95,6 @@ impl<'a> super::Handler for Handler<'a> { Ok(false) } - fn maybe_key_update(&mut self, c: &mut Self::Client) -> Res<()> { - self.key_update.maybe_update(|| c.initiate_key_update())?; - self.download_urls(c); - Ok(()) - } - fn take_token(&mut self) -> Option { self.token.take() } @@ -169,14 +172,14 @@ impl super::Client for Connection { } impl<'b> Handler<'b> { - pub fn new(url_queue: VecDeque, args: &'b Args, key_update: KeyUpdateState) -> Self { + pub fn new(url_queue: VecDeque, args: &'b Args) -> Self { Self { streams: HashMap::new(), url_queue, all_paths: Vec::new(), args, token: None, - key_update, + needs_key_update: args.key_update, } } @@ -195,7 +198,7 @@ impl<'b> Handler<'b> { } fn download_next(&mut self, client: &mut Connection) -> bool { - if self.key_update.needed() { + if self.needs_key_update { qdebug!("Deferring requests until after first key update"); return false; } diff --git a/neqo-bin/src/client/http3.rs b/neqo-bin/src/client/http3.rs index 09a30461bf..b3f577127e 100644 --- a/neqo-bin/src/client/http3.rs +++ b/neqo-bin/src/client/http3.rs @@ -27,7 +27,7 @@ use neqo_transport::{ }; use url::Url; -use super::{get_output_file, qlog_new, Args, KeyUpdateState, Res}; +use super::{get_output_file, qlog_new, Args, Res}; pub(crate) struct Handler<'a> { #[allow( @@ -36,17 +36,12 @@ pub(crate) struct Handler<'a> { clippy::redundant_field_names )] url_handler: UrlHandler<'a>, - key_update: KeyUpdateState, token: Option, output_read_data: bool, } impl<'a> Handler<'a> { - pub(crate) fn new( - url_queue: VecDeque, - args: &'a Args, - key_update: KeyUpdateState, - ) -> Self { + pub(crate) fn new(url_queue: VecDeque, args: &'a Args) -> Self { let url_handler = UrlHandler { url_queue, stream_handlers: HashMap::new(), @@ -61,7 +56,6 @@ impl<'a> Handler<'a> { Self { url_handler, - key_update, token: None, output_read_data: args.output_read_data, } @@ -225,12 +219,6 @@ impl<'a> super::Handler for Handler<'a> { Ok(self.url_handler.done()) } - fn maybe_key_update(&mut self, c: &mut Http3Client) -> Res<()> { - self.key_update.maybe_update(|| c.initiate_key_update())?; - self.url_handler.process_urls(c); - Ok(()) - } - fn take_token(&mut self) -> Option { self.token.take() } diff --git a/neqo-bin/src/client/mod.rs b/neqo-bin/src/client/mod.rs index 791e2a6366..61e43c00d1 100644 --- a/neqo-bin/src/client/mod.rs +++ b/neqo-bin/src/client/mod.rs @@ -100,39 +100,6 @@ impl std::error::Error for Error {} type Res = Result; -/// Track whether a key update is needed. -#[derive(Debug, PartialEq, Eq)] -struct KeyUpdateState(bool); - -impl KeyUpdateState { - pub fn maybe_update(&mut self, update_fn: F) -> Res<()> - where - F: FnOnce() -> Result<(), E>, - E: Into, - { - if self.0 { - if let Err(e) = update_fn() { - let e = e.into(); - match e { - Error::TransportError(TransportError::KeyUpdateBlocked) - | Error::Http3Error(neqo_http3::Error::TransportError( - TransportError::KeyUpdateBlocked, - )) => (), - _ => return Err(e), - } - } else { - qerror!("Keys updated"); - self.0 = false; - } - } - Ok(()) - } - - fn needed(&self) -> bool { - self.0 - } -} - #[derive(Debug, Parser)] #[command(author, version, about, long_about = None)] #[allow(clippy::struct_excessive_bools)] // Not a good use of that lint. @@ -176,7 +143,7 @@ pub struct Args { /// Use this for 0-RTT: the stack always attempts 0-RTT on resumption. resume: bool, - #[arg(name = "key-update", long)] + #[arg(name = "key-update", long, hide = true)] /// Attempt to initiate a key update immediately after confirming the connection. key_update: bool, @@ -255,6 +222,11 @@ impl Args { return; }; + if self.key_update { + qerror!("internal option key_update set by user"); + exit(127) + } + // Only use v1 for most QNS tests. self.shared.quic_parameters.quic_version = vec![Version::Version1]; match testcase.as_str() { @@ -370,7 +342,6 @@ trait Handler { type Client: Client; fn handle(&mut self, client: &mut Self::Client) -> Res; - fn maybe_key_update(&mut self, c: &mut Self::Client) -> Res<()>; fn take_token(&mut self) -> Option; fn has_token(&self) -> bool; } @@ -474,7 +445,6 @@ impl<'a, H: Handler> Runner<'a, H> { self.client .process_multiple_input(dgrams.iter(), Instant::now()); self.process_output().await?; - self.handler.maybe_key_update(&mut self.client)?; } Ok(()) @@ -578,14 +548,12 @@ pub async fn client(mut args: Args) -> Res<()> { first = false; - let key_update = KeyUpdateState(args.key_update); - token = if args.shared.use_old_http { let client = http09::create_client(&args, real_local, remote_addr, &hostname, token) .expect("failed to create client"); - let handler = http09::Handler::new(to_request, &args, key_update); + let handler = http09::Handler::new(to_request, &args); Runner { args: &args, @@ -601,7 +569,7 @@ pub async fn client(mut args: Args) -> Res<()> { let client = http3::create_client(&args, real_local, remote_addr, &hostname, token) .expect("failed to create client"); - let handler = http3::Handler::new(to_request, &args, key_update); + let handler = http3::Handler::new(to_request, &args); Runner { args: &args, From b3cf65f39c179cbbfe4de6cf6d2e4cbad2558c7a Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Thu, 11 Apr 2024 08:12:48 +0300 Subject: [PATCH 318/321] Merge pull request from GHSA-hvhj-4r52-8568 --- neqo-transport/src/packet/mod.rs | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/neqo-transport/src/packet/mod.rs b/neqo-transport/src/packet/mod.rs index f5e8320ccb..f1112e90fa 100644 --- a/neqo-transport/src/packet/mod.rs +++ b/neqo-transport/src/packet/mod.rs @@ -555,7 +555,10 @@ impl<'a> PublicPacket<'a> { if packet_type == PacketType::Retry { let header_len = decoder.offset(); let expansion = retry::expansion(version); - let token = Self::opt(decoder.decode(decoder.remaining() - expansion))?; + let token = decoder + .remaining() + .checked_sub(expansion) + .map_or(Err(Error::InvalidPacket), |v| Self::opt(decoder.decode(v)))?; if token.is_empty() { return Err(Error::InvalidPacket); } From c0ca26d6fd4ff85b7c0609da6ed84623137b1a92 Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Thu, 11 Apr 2024 08:15:51 +0300 Subject: [PATCH 319/321] fix: Correctly manage `bytes_in_flight` during a rebinding event (#1812) * fix: Correctly manage `bytes_in_flight` during a rebinding event Fixes #1289 * Update comment. Remove incorrect `clippy::unused_self` while I'm here. --- neqo-transport/src/cc/classic_cc.rs | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/neqo-transport/src/cc/classic_cc.rs b/neqo-transport/src/cc/classic_cc.rs index a63d6e0b38..f8bcee6722 100644 --- a/neqo-transport/src/cc/classic_cc.rs +++ b/neqo-transport/src/cc/classic_cc.rs @@ -179,8 +179,9 @@ impl CongestionControl for ClassicCongestionControl { if pkt.pn < self.first_app_limited { is_app_limited = false; } - assert!(self.bytes_in_flight >= pkt.size); - self.bytes_in_flight -= pkt.size; + // BIF is set to 0 on a path change, but in case that was because of a simple rebinding + // event, we may still get ACKs for packets sent before the rebinding. + self.bytes_in_flight = self.bytes_in_flight.saturating_sub(pkt.size); if !self.after_recovery_start(pkt) { // Do not increase congestion window for packets sent before @@ -271,8 +272,9 @@ impl CongestionControl for ClassicCongestionControl { pkt.pn, pkt.size ); - assert!(self.bytes_in_flight >= pkt.size); - self.bytes_in_flight -= pkt.size; + // BIF is set to 0 on a path change, but in case that was because of a simple rebinding + // event, we may still declare packets lost that were sent before the rebinding. + self.bytes_in_flight = self.bytes_in_flight.saturating_sub(pkt.size); } qlog::metrics_updated( &mut self.qlog, @@ -516,7 +518,6 @@ impl ClassicCongestionControl { true } - #[allow(clippy::unused_self)] fn app_limited(&self) -> bool { if self.bytes_in_flight >= self.congestion_window { false From 33da3979f3a02d2a703a850c18b7c56e0275309f Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Thu, 11 Apr 2024 14:40:39 +0300 Subject: [PATCH 320/321] Consolidated fix + test (#1816) --- neqo-transport/src/packet/mod.rs | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/neqo-transport/src/packet/mod.rs b/neqo-transport/src/packet/mod.rs index f1112e90fa..ce611a9664 100644 --- a/neqo-transport/src/packet/mod.rs +++ b/neqo-transport/src/packet/mod.rs @@ -1521,4 +1521,21 @@ mod tests { assert_eq!(decrypted.pn(), 654_360_564); assert_eq!(&decrypted[..], &[0x01]); } + + #[test] + fn decode_empty() { + neqo_crypto::init().unwrap(); + let res = PublicPacket::decode(&[], &EmptyConnectionIdGenerator::default()); + assert!(res.is_err()); + } + + #[test] + fn decode_too_short() { + neqo_crypto::init().unwrap(); + let res = PublicPacket::decode( + &[179, 255, 0, 0, 32, 0, 0], + &EmptyConnectionIdGenerator::default(), + ); + assert!(res.is_err()); + } } From c004359a817ffdea33394e94944d2f882e7e78af Mon Sep 17 00:00:00 2001 From: Kershaw Date: Thu, 11 Apr 2024 14:06:25 +0200 Subject: [PATCH 321/321] neqo v0.7.5 (#1817) --- Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Cargo.toml b/Cargo.toml index 1a7fc1eeb2..cddc19c190 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -14,7 +14,7 @@ resolver = "2" homepage = "https://github.com/mozilla/neqo/" repository = "https://github.com/mozilla/neqo/" authors = ["The Neqo Authors "] -version = "0.7.4" +version = "0.7.5" # Keep in sync with `.rustfmt.toml` `edition`. edition = "2021" license = "MIT OR Apache-2.0"