From e2f9369296488c6d60c778e19aa77aca6df4fbf1 Mon Sep 17 00:00:00 2001 From: Max Inden Date: Thu, 21 Mar 2024 22:53:23 +0100 Subject: [PATCH 01/28] ci(check.yml): add sleep i.e. client wait for server (#1765) Windows CI oftentimes fails with: ``` 0s 0ms INFO H3 Client connecting: [::]:65093 -> [::1]:4433 Error: IoError(Os { code: 10054, kind: ConnectionReset, message: "An existing connection was forcibly closed by the remote host." }) 0s 0ms INFO Server waiting for connection on: [::1]:4433 0s 0ms INFO Server waiting for connection on: 127.0.0.1:4433 ``` https://github.com/mozilla/neqo/actions/runs/8374577016/job/22930136500?pr=1692 This suggests that the client connects to the server before the server is ready to accept connections. This commit adds a sleep, thus giving the server time to start up. Tracked in https://github.com/mozilla/neqo/issues/1759. Sleep was previously introduced in https://github.com/mozilla/neqo/pull/1713 but later removed in https://github.com/mozilla/neqo/pull/1717 --- .github/workflows/check.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/workflows/check.yml b/.github/workflows/check.yml index 10085ffda6..a89e4859d3 100644 --- a/.github/workflows/check.yml +++ b/.github/workflows/check.yml @@ -122,6 +122,8 @@ jobs: cargo +${{ matrix.rust-toolchain }} build $BUILD_TYPE --bin neqo-client --bin neqo-server "target/$BUILD_DIR/neqo-server" "$HOST:4433" & PID=$! + # Give the server time to start. + sleep 1 "target/$BUILD_DIR/neqo-client" --output-dir . "https://$HOST:4433/$SIZE" kill $PID [ "$(wc -c <"$SIZE")" -eq "$SIZE" ] || exit 1 From 56f53bb6b3fde59cf4e7d0daa54c8c206d11d3d6 Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Thu, 21 Mar 2024 23:53:51 +0200 Subject: [PATCH 02/28] feat: Extend `Frame::Padding` with length (#1762) * feat: Extend `Frame::Padding` with a `length` field This is preparation of qlog supporting the logging of runs of padding frames via `payload_length`, instead of each one individually. * Use `dv` more * Fix test * Address code review * Add TODO --- neqo-transport/src/connection/dump.rs | 3 +- neqo-transport/src/connection/mod.rs | 25 +- .../src/connection/tests/handshake.rs | 2 +- neqo-transport/src/frame.rs | 60 +-- neqo-transport/src/packet/mod.rs | 4 +- neqo-transport/src/qlog.rs | 347 +++++++++--------- 6 files changed, 219 insertions(+), 222 deletions(-) diff --git a/neqo-transport/src/connection/dump.rs b/neqo-transport/src/connection/dump.rs index 8a4f34dbb8..34ac58f55e 100644 --- a/neqo-transport/src/connection/dump.rs +++ b/neqo-transport/src/connection/dump.rs @@ -38,7 +38,8 @@ pub fn dump_packet( s.push_str(" [broken]..."); break; }; - if let Some(x) = f.dump() { + let x = f.dump(); + if !x.is_empty() { write!(&mut s, "\n {} {}", dir, &x).unwrap(); } } diff --git a/neqo-transport/src/connection/mod.rs b/neqo-transport/src/connection/mod.rs index c81a3727c6..8d1c106358 100644 --- a/neqo-transport/src/connection/mod.rs +++ b/neqo-transport/src/connection/mod.rs @@ -461,7 +461,7 @@ impl Connection { } /// # Errors - /// When the operation fails. + /// When the operation fails. pub fn client_enable_ech(&mut self, ech_config_list: impl AsRef<[u8]>) -> Res<()> { self.crypto.client_enable_ech(ech_config_list) } @@ -1560,24 +1560,8 @@ impl Connection { let mut ack_eliciting = false; let mut probing = true; let mut d = Decoder::from(&packet[..]); - let mut consecutive_padding = 0; while d.remaining() > 0 { - let mut f = Frame::decode(&mut d)?; - - // Skip padding - while f == Frame::Padding && d.remaining() > 0 { - consecutive_padding += 1; - f = Frame::decode(&mut d)?; - } - if consecutive_padding > 0 { - qdebug!( - [self], - "PADDING frame repeated {} times", - consecutive_padding - ); - consecutive_padding = 0; - } - + let f = Frame::decode(&mut d)?; ack_eliciting |= f.ack_eliciting(); probing &= f.path_probing(); let t = f.get_type(); @@ -2694,9 +2678,8 @@ impl Connection { .input_frame(&frame, &mut self.stats.borrow_mut().frame_rx); } match frame { - Frame::Padding => { - // Note: This counts contiguous padding as a single frame. - self.stats.borrow_mut().frame_rx.padding += 1; + Frame::Padding(length) => { + self.stats.borrow_mut().frame_rx.padding += usize::from(length); } Frame::Ping => { // If we get a PING and there are outstanding CRYPTO frames, diff --git a/neqo-transport/src/connection/tests/handshake.rs b/neqo-transport/src/connection/tests/handshake.rs index af0352ce90..cfb6d99166 100644 --- a/neqo-transport/src/connection/tests/handshake.rs +++ b/neqo-transport/src/connection/tests/handshake.rs @@ -458,7 +458,7 @@ fn coalesce_05rtt() { assert_eq!(client.stats().dropped_rx, 0); // No Initial padding. assert_eq!(client.stats().packets_rx, 4); assert_eq!(client.stats().saved_datagrams, 1); - assert_eq!(client.stats().frame_rx.padding, 1); // Padding uses frames. + assert!(client.stats().frame_rx.padding > 0); // Padding uses frames. // Allow the handshake to complete. now += RTT / 2; diff --git a/neqo-transport/src/frame.rs b/neqo-transport/src/frame.rs index b3bb024a2c..5a86a07108 100644 --- a/neqo-transport/src/frame.rs +++ b/neqo-transport/src/frame.rs @@ -20,7 +20,7 @@ use crate::{ #[allow(clippy::module_name_repetitions)] pub type FrameType = u64; -const FRAME_TYPE_PADDING: FrameType = 0x0; +pub const FRAME_TYPE_PADDING: FrameType = 0x0; pub const FRAME_TYPE_PING: FrameType = 0x1; pub const FRAME_TYPE_ACK: FrameType = 0x2; const FRAME_TYPE_ACK_ECN: FrameType = 0x3; @@ -103,7 +103,7 @@ pub struct AckRange { #[derive(PartialEq, Eq, Debug, Clone)] pub enum Frame<'a> { - Padding, + Padding(u16), Ping, Ack { largest_acknowledged: u64, @@ -215,7 +215,7 @@ impl<'a> Frame<'a> { pub fn get_type(&self) -> FrameType { match self { - Self::Padding => FRAME_TYPE_PADDING, + Self::Padding { .. } => FRAME_TYPE_PADDING, Self::Ping => FRAME_TYPE_PING, Self::Ack { .. } => FRAME_TYPE_ACK, // We don't do ACK ECN. Self::ResetStream { .. } => FRAME_TYPE_RESET_STREAM, @@ -288,7 +288,7 @@ impl<'a> Frame<'a> { pub fn ack_eliciting(&self) -> bool { !matches!( self, - Self::Ack { .. } | Self::Padding | Self::ConnectionClose { .. } + Self::Ack { .. } | Self::Padding { .. } | Self::ConnectionClose { .. } ) } @@ -297,7 +297,7 @@ impl<'a> Frame<'a> { pub fn path_probing(&self) -> bool { matches!( self, - Self::Padding + Self::Padding { .. } | Self::NewConnectionId { .. } | Self::PathChallenge { .. } | Self::PathResponse { .. } @@ -347,36 +347,34 @@ impl<'a> Frame<'a> { Ok(acked_ranges) } - pub fn dump(&self) -> Option { + pub fn dump(&self) -> String { match self { - Self::Crypto { offset, data } => Some(format!( - "Crypto {{ offset: {}, len: {} }}", - offset, - data.len() - )), + Self::Crypto { offset, data } => { + format!("Crypto {{ offset: {}, len: {} }}", offset, data.len()) + } Self::Stream { stream_id, offset, fill, data, fin, - } => Some(format!( + } => format!( "Stream {{ stream_id: {}, offset: {}, len: {}{}, fin: {} }}", stream_id.as_u64(), offset, if *fill { ">>" } else { "" }, data.len(), fin, - )), - Self::Padding => None, - Self::Datagram { data, .. } => Some(format!("Datagram {{ len: {} }}", data.len())), - _ => Some(format!("{self:?}")), + ), + Self::Padding(length) => format!("Padding {{ len: {length} }}"), + Self::Datagram { data, .. } => format!("Datagram {{ len: {} }}", data.len()), + _ => format!("{self:?}"), } } pub fn is_allowed(&self, pt: PacketType) -> bool { match self { - Self::Padding | Self::Ping => true, + Self::Padding { .. } | Self::Ping => true, Self::Crypto { .. } | Self::Ack { .. } | Self::ConnectionClose { @@ -409,13 +407,23 @@ impl<'a> Frame<'a> { } // TODO(ekr@rtfm.com): check for minimal encoding - let t = d(dec.decode_varint())?; + let t = dv(dec)?; match t { - FRAME_TYPE_PADDING => Ok(Self::Padding), + FRAME_TYPE_PADDING => { + let mut length: u16 = 1; + while let Some(b) = dec.peek_byte() { + if u64::from(b) != FRAME_TYPE_PADDING { + break; + } + length += 1; + dec.skip(1); + } + Ok(Self::Padding(length)) + } FRAME_TYPE_PING => Ok(Self::Ping), FRAME_TYPE_RESET_STREAM => Ok(Self::ResetStream { stream_id: StreamId::from(dv(dec)?), - application_error_code: d(dec.decode_varint())?, + application_error_code: dv(dec)?, final_size: match dec.decode_varint() { Some(v) => v, _ => return Err(Error::NoMoreData), @@ -457,7 +465,7 @@ impl<'a> Frame<'a> { } FRAME_TYPE_STOP_SENDING => Ok(Self::StopSending { stream_id: StreamId::from(dv(dec)?), - application_error_code: d(dec.decode_varint())?, + application_error_code: dv(dec)?, }), FRAME_TYPE_CRYPTO => { let offset = dv(dec)?; @@ -563,7 +571,7 @@ impl<'a> Frame<'a> { Ok(Self::PathResponse { data: datav }) } FRAME_TYPE_CONNECTION_CLOSE_TRANSPORT | FRAME_TYPE_CONNECTION_CLOSE_APPLICATION => { - let error_code = CloseError::from_type_bit(t, d(dec.decode_varint())?); + let error_code = CloseError::from_type_bit(t, dv(dec)?); let frame_type = if t == FRAME_TYPE_CONNECTION_CLOSE_TRANSPORT { dv(dec)? } else { @@ -631,8 +639,10 @@ mod tests { #[test] fn padding() { - let f = Frame::Padding; + let f = Frame::Padding(1); just_dec(&f, "00"); + let f = Frame::Padding(2); + just_dec(&f, "0000"); } #[test] @@ -888,8 +898,8 @@ mod tests { #[test] fn test_compare() { - let f1 = Frame::Padding; - let f2 = Frame::Padding; + let f1 = Frame::Padding(1); + let f2 = Frame::Padding(1); let f3 = Frame::Crypto { offset: 0, data: &[1, 2, 3], diff --git a/neqo-transport/src/packet/mod.rs b/neqo-transport/src/packet/mod.rs index 8458f69779..d11b3423a4 100644 --- a/neqo-transport/src/packet/mod.rs +++ b/neqo-transport/src/packet/mod.rs @@ -18,6 +18,7 @@ use neqo_crypto::random; use crate::{ cid::{ConnectionId, ConnectionIdDecoder, ConnectionIdRef, MAX_CONNECTION_ID_LEN}, crypto::{CryptoDxState, CryptoSpace, CryptoStates}, + frame::FRAME_TYPE_PADDING, version::{Version, WireVersion}, Error, Res, }; @@ -257,7 +258,8 @@ impl PacketBuilder { /// Returns true if padding was added. pub fn pad(&mut self) -> bool { if self.padding && !self.is_long() { - self.encoder.pad_to(self.limit, 0); + self.encoder + .pad_to(self.limit, FRAME_TYPE_PADDING.try_into().unwrap()); true } else { false diff --git a/neqo-transport/src/qlog.rs b/neqo-transport/src/qlog.rs index 2572966104..a8ad986d2a 100644 --- a/neqo-transport/src/qlog.rs +++ b/neqo-transport/src/qlog.rs @@ -195,7 +195,7 @@ pub fn packet_sent( ) { qlog.add_event_with_stream(|stream| { let mut d = Decoder::from(body); - let header = PacketHeader::with_type(to_qlog_pkt_type(pt), Some(pn), None, None, None); + let header = PacketHeader::with_type(pt.into(), Some(pn), None, None, None); let raw = RawInfo { length: Some(plen as u64), payload_length: None, @@ -205,7 +205,7 @@ pub fn packet_sent( let mut frames = SmallVec::new(); while d.remaining() > 0 { if let Ok(f) = Frame::decode(&mut d) { - frames.push(frame_to_qlogframe(&f)); + frames.push(QuicFrame::from(&f)); } else { qinfo!("qlog: invalid frame"); break; @@ -231,13 +231,8 @@ pub fn packet_sent( pub fn packet_dropped(qlog: &mut NeqoQlog, public_packet: &PublicPacket) { qlog.add_event_data(|| { - let header = PacketHeader::with_type( - to_qlog_pkt_type(public_packet.packet_type()), - None, - None, - None, - None, - ); + let header = + PacketHeader::with_type(public_packet.packet_type().into(), None, None, None, None); let raw = RawInfo { length: Some(public_packet.len() as u64), payload_length: None, @@ -259,8 +254,7 @@ pub fn packet_dropped(qlog: &mut NeqoQlog, public_packet: &PublicPacket) { pub fn packets_lost(qlog: &mut NeqoQlog, pkts: &[SentPacket]) { qlog.add_event_with_stream(|stream| { for pkt in pkts { - let header = - PacketHeader::with_type(to_qlog_pkt_type(pkt.pt), Some(pkt.pn), None, None, None); + let header = PacketHeader::with_type(pkt.pt.into(), Some(pkt.pn), None, None, None); let ev_data = EventData::PacketLost(PacketLost { header: Some(header), @@ -283,7 +277,7 @@ pub fn packet_received( let mut d = Decoder::from(&payload[..]); let header = PacketHeader::with_type( - to_qlog_pkt_type(public_packet.packet_type()), + public_packet.packet_type().into(), Some(payload.pn()), None, None, @@ -299,7 +293,7 @@ pub fn packet_received( while d.remaining() > 0 { if let Ok(f) = Frame::decode(&mut d) { - frames.push(frame_to_qlogframe(&f)); + frames.push(QuicFrame::from(&f)); } else { qinfo!("qlog: invalid frame"); break; @@ -393,173 +387,180 @@ pub fn metrics_updated(qlog: &mut NeqoQlog, updated_metrics: &[QlogMetric]) { #[allow(clippy::too_many_lines)] // Yeah, but it's a nice match. #[allow(clippy::cast_possible_truncation, clippy::cast_precision_loss)] // No choice here. -fn frame_to_qlogframe(frame: &Frame) -> QuicFrame { - match frame { - Frame::Padding => QuicFrame::Padding, - Frame::Ping => QuicFrame::Ping, - Frame::Ack { - largest_acknowledged, - ack_delay, - first_ack_range, - ack_ranges, - } => { - let ranges = - Frame::decode_ack_frame(*largest_acknowledged, *first_ack_range, ack_ranges).ok(); - - let acked_ranges = ranges.map(|all| { - AckedRanges::Double( - all.into_iter() - .map(RangeInclusive::into_inner) - .collect::>(), - ) - }); - - QuicFrame::Ack { - ack_delay: Some(*ack_delay as f32 / 1000.0), - acked_ranges, - ect1: None, - ect0: None, - ce: None, +impl From<&Frame<'_>> for QuicFrame { + fn from(frame: &Frame) -> Self { + match frame { + // TODO: Add payload length to `QuicFrame::Padding` once + // https://github.com/cloudflare/quiche/pull/1745 is available via the qlog crate. + Frame::Padding { .. } => QuicFrame::Padding, + Frame::Ping => QuicFrame::Ping, + Frame::Ack { + largest_acknowledged, + ack_delay, + first_ack_range, + ack_ranges, + } => { + let ranges = + Frame::decode_ack_frame(*largest_acknowledged, *first_ack_range, ack_ranges) + .ok(); + + let acked_ranges = ranges.map(|all| { + AckedRanges::Double( + all.into_iter() + .map(RangeInclusive::into_inner) + .collect::>(), + ) + }); + + QuicFrame::Ack { + ack_delay: Some(*ack_delay as f32 / 1000.0), + acked_ranges, + ect1: None, + ect0: None, + ce: None, + } } - } - Frame::ResetStream { - stream_id, - application_error_code, - final_size, - } => QuicFrame::ResetStream { - stream_id: stream_id.as_u64(), - error_code: *application_error_code, - final_size: *final_size, - }, - Frame::StopSending { - stream_id, - application_error_code, - } => QuicFrame::StopSending { - stream_id: stream_id.as_u64(), - error_code: *application_error_code, - }, - Frame::Crypto { offset, data } => QuicFrame::Crypto { - offset: *offset, - length: data.len() as u64, - }, - Frame::NewToken { token } => QuicFrame::NewToken { - token: qlog::Token { - ty: Some(qlog::TokenType::Retry), - details: None, - raw: Some(RawInfo { - data: Some(hex(token)), - length: Some(token.len() as u64), - payload_length: None, - }), + Frame::ResetStream { + stream_id, + application_error_code, + final_size, + } => QuicFrame::ResetStream { + stream_id: stream_id.as_u64(), + error_code: *application_error_code, + final_size: *final_size, + }, + Frame::StopSending { + stream_id, + application_error_code, + } => QuicFrame::StopSending { + stream_id: stream_id.as_u64(), + error_code: *application_error_code, + }, + Frame::Crypto { offset, data } => QuicFrame::Crypto { + offset: *offset, + length: data.len() as u64, + }, + Frame::NewToken { token } => QuicFrame::NewToken { + token: qlog::Token { + ty: Some(qlog::TokenType::Retry), + details: None, + raw: Some(RawInfo { + data: Some(hex(token)), + length: Some(token.len() as u64), + payload_length: None, + }), + }, }, - }, - Frame::Stream { - fin, - stream_id, - offset, - data, - .. - } => QuicFrame::Stream { - stream_id: stream_id.as_u64(), - offset: *offset, - length: data.len() as u64, - fin: Some(*fin), - raw: None, - }, - Frame::MaxData { maximum_data } => QuicFrame::MaxData { - maximum: *maximum_data, - }, - Frame::MaxStreamData { - stream_id, - maximum_stream_data, - } => QuicFrame::MaxStreamData { - stream_id: stream_id.as_u64(), - maximum: *maximum_stream_data, - }, - Frame::MaxStreams { - stream_type, - maximum_streams, - } => QuicFrame::MaxStreams { - stream_type: match stream_type { - NeqoStreamType::BiDi => StreamType::Bidirectional, - NeqoStreamType::UniDi => StreamType::Unidirectional, + Frame::Stream { + fin, + stream_id, + offset, + data, + .. + } => QuicFrame::Stream { + stream_id: stream_id.as_u64(), + offset: *offset, + length: data.len() as u64, + fin: Some(*fin), + raw: None, }, - maximum: *maximum_streams, - }, - Frame::DataBlocked { data_limit } => QuicFrame::DataBlocked { limit: *data_limit }, - Frame::StreamDataBlocked { - stream_id, - stream_data_limit, - } => QuicFrame::StreamDataBlocked { - stream_id: stream_id.as_u64(), - limit: *stream_data_limit, - }, - Frame::StreamsBlocked { - stream_type, - stream_limit, - } => QuicFrame::StreamsBlocked { - stream_type: match stream_type { - NeqoStreamType::BiDi => StreamType::Bidirectional, - NeqoStreamType::UniDi => StreamType::Unidirectional, + Frame::MaxData { maximum_data } => QuicFrame::MaxData { + maximum: *maximum_data, }, - limit: *stream_limit, - }, - Frame::NewConnectionId { - sequence_number, - retire_prior, - connection_id, - stateless_reset_token, - } => QuicFrame::NewConnectionId { - sequence_number: *sequence_number as u32, - retire_prior_to: *retire_prior as u32, - connection_id_length: Some(connection_id.len() as u8), - connection_id: hex(connection_id), - stateless_reset_token: Some(hex(stateless_reset_token)), - }, - Frame::RetireConnectionId { sequence_number } => QuicFrame::RetireConnectionId { - sequence_number: *sequence_number as u32, - }, - Frame::PathChallenge { data } => QuicFrame::PathChallenge { - data: Some(hex(data)), - }, - Frame::PathResponse { data } => QuicFrame::PathResponse { - data: Some(hex(data)), - }, - Frame::ConnectionClose { - error_code, - frame_type, - reason_phrase, - } => QuicFrame::ConnectionClose { - error_space: match error_code { - CloseError::Transport(_) => Some(ErrorSpace::TransportError), - CloseError::Application(_) => Some(ErrorSpace::ApplicationError), + Frame::MaxStreamData { + stream_id, + maximum_stream_data, + } => QuicFrame::MaxStreamData { + stream_id: stream_id.as_u64(), + maximum: *maximum_stream_data, }, - error_code: Some(error_code.code()), - error_code_value: Some(0), - reason: Some(String::from_utf8_lossy(reason_phrase).to_string()), - trigger_frame_type: Some(*frame_type), - }, - Frame::HandshakeDone => QuicFrame::HandshakeDone, - Frame::AckFrequency { .. } => QuicFrame::Unknown { - frame_type_value: None, - raw_frame_type: frame.get_type(), - raw: None, - }, - Frame::Datagram { data, .. } => QuicFrame::Datagram { - length: data.len() as u64, - raw: None, - }, + Frame::MaxStreams { + stream_type, + maximum_streams, + } => QuicFrame::MaxStreams { + stream_type: match stream_type { + NeqoStreamType::BiDi => StreamType::Bidirectional, + NeqoStreamType::UniDi => StreamType::Unidirectional, + }, + maximum: *maximum_streams, + }, + Frame::DataBlocked { data_limit } => QuicFrame::DataBlocked { limit: *data_limit }, + Frame::StreamDataBlocked { + stream_id, + stream_data_limit, + } => QuicFrame::StreamDataBlocked { + stream_id: stream_id.as_u64(), + limit: *stream_data_limit, + }, + Frame::StreamsBlocked { + stream_type, + stream_limit, + } => QuicFrame::StreamsBlocked { + stream_type: match stream_type { + NeqoStreamType::BiDi => StreamType::Bidirectional, + NeqoStreamType::UniDi => StreamType::Unidirectional, + }, + limit: *stream_limit, + }, + Frame::NewConnectionId { + sequence_number, + retire_prior, + connection_id, + stateless_reset_token, + } => QuicFrame::NewConnectionId { + sequence_number: *sequence_number as u32, + retire_prior_to: *retire_prior as u32, + connection_id_length: Some(connection_id.len() as u8), + connection_id: hex(connection_id), + stateless_reset_token: Some(hex(stateless_reset_token)), + }, + Frame::RetireConnectionId { sequence_number } => QuicFrame::RetireConnectionId { + sequence_number: *sequence_number as u32, + }, + Frame::PathChallenge { data } => QuicFrame::PathChallenge { + data: Some(hex(data)), + }, + Frame::PathResponse { data } => QuicFrame::PathResponse { + data: Some(hex(data)), + }, + Frame::ConnectionClose { + error_code, + frame_type, + reason_phrase, + } => QuicFrame::ConnectionClose { + error_space: match error_code { + CloseError::Transport(_) => Some(ErrorSpace::TransportError), + CloseError::Application(_) => Some(ErrorSpace::ApplicationError), + }, + error_code: Some(error_code.code()), + error_code_value: Some(0), + reason: Some(String::from_utf8_lossy(reason_phrase).to_string()), + trigger_frame_type: Some(*frame_type), + }, + Frame::HandshakeDone => QuicFrame::HandshakeDone, + Frame::AckFrequency { .. } => QuicFrame::Unknown { + frame_type_value: None, + raw_frame_type: frame.get_type(), + raw: None, + }, + Frame::Datagram { data, .. } => QuicFrame::Datagram { + length: data.len() as u64, + raw: None, + }, + } } } -fn to_qlog_pkt_type(ptype: PacketType) -> qlog::events::quic::PacketType { - match ptype { - PacketType::Initial => qlog::events::quic::PacketType::Initial, - PacketType::Handshake => qlog::events::quic::PacketType::Handshake, - PacketType::ZeroRtt => qlog::events::quic::PacketType::ZeroRtt, - PacketType::Short => qlog::events::quic::PacketType::OneRtt, - PacketType::Retry => qlog::events::quic::PacketType::Retry, - PacketType::VersionNegotiation => qlog::events::quic::PacketType::VersionNegotiation, - PacketType::OtherVersion => qlog::events::quic::PacketType::Unknown, +impl From for qlog::events::quic::PacketType { + fn from(value: PacketType) -> Self { + match value { + PacketType::Initial => qlog::events::quic::PacketType::Initial, + PacketType::Handshake => qlog::events::quic::PacketType::Handshake, + PacketType::ZeroRtt => qlog::events::quic::PacketType::ZeroRtt, + PacketType::Short => qlog::events::quic::PacketType::OneRtt, + PacketType::Retry => qlog::events::quic::PacketType::Retry, + PacketType::VersionNegotiation => qlog::events::quic::PacketType::VersionNegotiation, + PacketType::OtherVersion => qlog::events::quic::PacketType::Unknown, + } } } From a80db4759ed63c7a314e624a35a5055b5de916f4 Mon Sep 17 00:00:00 2001 From: Max Inden Date: Thu, 21 Mar 2024 23:47:22 +0100 Subject: [PATCH 03/28] feat(bin/client): add --stats flag printing connection statistics (#1766) This commit introduces the `--stats` flag to `neqo-client`. It will print the `neqo_transport::Stats` of the `Connection` after close. ``` $ cargo run --bin neqo-client -- --stats http://127.0.0.1:12345/10000000 stats for Client ... rx: 7728 drop 1 dup 0 saved 1 tx: 819 lost 12 lateack 0 ptoack 4 resumed: false frames rx: crypto 3 done 1 token 1 close 0 ack 40 (max 805) ping 0 padding 0 stream 7704 reset 0 stop 0 max: stream 0 data 0 stream_data 0 blocked: stream 0 data 0 stream_data 19 datagram 0 ncid 7 rcid 0 pchallenge 0 presponse 0 ack_frequency 4 frames tx: crypto 2 done 0 token 0 close 0 ack 783 (max 7769) ping 5 padding 0 stream 5 reset 0 stop 0 max: stream 0 data 0 stream_data 30 blocked: stream 0 data 0 stream_data 0 datagram 0 ncid 0 rcid 0 pchallenge 0 presponse 0 ack_frequency 2 ``` Co-authored-by: Lars Eggert --- neqo-bin/src/bin/client/http09.rs | 4 ++++ neqo-bin/src/bin/client/http3.rs | 4 ++++ neqo-bin/src/bin/client/main.rs | 8 ++++++++ neqo-transport/src/stats.rs | 2 +- 4 files changed, 17 insertions(+), 1 deletion(-) diff --git a/neqo-bin/src/bin/client/http09.rs b/neqo-bin/src/bin/client/http09.rs index 6d9a26fec2..9135eb5bb8 100644 --- a/neqo-bin/src/bin/client/http09.rs +++ b/neqo-bin/src/bin/client/http09.rs @@ -153,6 +153,10 @@ impl super::Client for Connection { fn is_closed(&self) -> bool { matches!(self.state(), State::Closed(..)) } + + fn stats(&self) -> neqo_transport::Stats { + self.stats() + } } impl<'b> Handler<'b> { diff --git a/neqo-bin/src/bin/client/http3.rs b/neqo-bin/src/bin/client/http3.rs index 07cc0e4cde..21637fb3d6 100644 --- a/neqo-bin/src/bin/client/http3.rs +++ b/neqo-bin/src/bin/client/http3.rs @@ -125,6 +125,10 @@ impl super::Client for Http3Client { { self.close(now, app_error, msg); } + + fn stats(&self) -> neqo_transport::Stats { + self.transport_stats() + } } impl<'a> super::Handler for Handler<'a> { diff --git a/neqo-bin/src/bin/client/main.rs b/neqo-bin/src/bin/client/main.rs index 7b1a5928a6..3332d79438 100644 --- a/neqo-bin/src/bin/client/main.rs +++ b/neqo-bin/src/bin/client/main.rs @@ -179,6 +179,10 @@ pub struct Args { /// The request size that will be used for upload test. #[arg(name = "upload-size", long, default_value = "100")] upload_size: usize, + + /// Print connection stats after close. + #[arg(name = "stats", long)] + stats: bool, } impl Args { @@ -327,6 +331,7 @@ trait Client { where S: AsRef + Display; fn is_closed(&self) -> bool; + fn stats(&self) -> neqo_transport::Stats; } struct Runner<'a, H: Handler> { @@ -361,6 +366,9 @@ impl<'a, H: Handler> Runner<'a, H> { self.process(None).await?; if self.client.is_closed() { + if self.args.stats { + qinfo!("{:?}", self.client.stats()); + } return Ok(self.handler.take_token()); } diff --git a/neqo-transport/src/stats.rs b/neqo-transport/src/stats.rs index 9eff503dcf..cdc378d71b 100644 --- a/neqo-transport/src/stats.rs +++ b/neqo-transport/src/stats.rs @@ -206,7 +206,7 @@ impl Debug for Stats { " tx: {} lost {} lateack {} ptoack {}", self.packets_tx, self.lost, self.late_ack, self.pto_ack )?; - writeln!(f, " resumed: {} ", self.resumed)?; + writeln!(f, " resumed: {}", self.resumed)?; writeln!(f, " frames rx:")?; self.frame_rx.fmt(f)?; writeln!(f, " frames tx:")?; From e4bc0c1ed4b3075d1a5d3ebff488d4ed74d4d260 Mon Sep 17 00:00:00 2001 From: Max Inden Date: Thu, 21 Mar 2024 23:47:58 +0100 Subject: [PATCH 04/28] feat(client,server): rework logging (#1692) * feat(client,server): rework logging - In `neqo-client` and `neqo-server` use `neqo_common::log` instead of `println!` and `eprintln!`. - Add `-q`, `-v`, `-vv`, `-vvv`, `-vvvv` log level flags via `clap_verbosity_flag`. - Set default log level to INFO. Demote many `qinfo!` to `qdebug!`. * fix(upload_test.sh): set RUST_LOG debug for neqo_transport::cc Needed in order for mozlog_neqo_cwnd.py to analyze cc. * Additional level reductions * Trigger CI --------- Co-authored-by: Lars Eggert --- neqo-bin/Cargo.toml | 1 + neqo-bin/src/bin/client/http09.rs | 26 ++++++++++++------------ neqo-bin/src/bin/client/http3.rs | 26 ++++++++++++------------ neqo-bin/src/bin/client/main.rs | 30 ++++++++++++++++------------ neqo-bin/src/bin/server/main.rs | 22 +++++++++++--------- neqo-bin/src/bin/server/old_https.rs | 12 +++++------ neqo-common/src/log.rs | 21 ++++++++++--------- neqo-crypto/src/agent.rs | 4 ++-- neqo-http3/src/connection.rs | 16 +++++++-------- neqo-http3/src/connection_client.rs | 4 ++-- neqo-http3/src/connection_server.rs | 4 ++-- neqo-http3/src/recv_message.rs | 2 +- neqo-http3/src/send_message.rs | 6 +++--- neqo-http3/src/server_events.rs | 8 ++++---- neqo-transport/src/cc/classic_cc.rs | 16 +++++++-------- neqo-transport/src/connection/mod.rs | 18 ++++++++--------- neqo-transport/src/crypto.rs | 8 ++++---- neqo-transport/src/lib.rs | 4 ++-- neqo-transport/src/path.rs | 2 +- neqo-transport/src/stats.rs | 4 ++-- test/upload_test.sh | 2 ++ 21 files changed, 125 insertions(+), 111 deletions(-) diff --git a/neqo-bin/Cargo.toml b/neqo-bin/Cargo.toml index d36d2ecdca..04210e00db 100644 --- a/neqo-bin/Cargo.toml +++ b/neqo-bin/Cargo.toml @@ -25,6 +25,7 @@ workspace = true [dependencies] # neqo-bin is not used in Firefox, so we can be liberal with dependency versions clap = { version = "4.4", default-features = false, features = ["std", "color", "help", "usage", "error-context", "suggestions", "derive"] } +clap-verbosity-flag = { version = "2.2", default-features = false } futures = { version = "0.3", default-features = false, features = ["alloc"] } hex = { version = "0.4", default-features = false, features = ["std"] } log = { version = "0.4", default-features = false } diff --git a/neqo-bin/src/bin/client/http09.rs b/neqo-bin/src/bin/client/http09.rs index 9135eb5bb8..372a112853 100644 --- a/neqo-bin/src/bin/client/http09.rs +++ b/neqo-bin/src/bin/client/http09.rs @@ -17,7 +17,7 @@ use std::{ time::Instant, }; -use neqo_common::{event::Provider, Datagram}; +use neqo_common::{event::Provider, qdebug, qinfo, qwarn, Datagram}; use neqo_crypto::{AuthenticationStatus, ResumptionToken}; use neqo_transport::{ Connection, ConnectionEvent, EmptyConnectionIdGenerator, Error, Output, State, StreamId, @@ -50,13 +50,13 @@ impl<'a> super::Handler for Handler<'a> { self.read(client, stream_id)?; } ConnectionEvent::SendStreamWritable { stream_id } => { - println!("stream {stream_id} writable"); + qdebug!("stream {stream_id} writable"); } ConnectionEvent::SendStreamComplete { stream_id } => { - println!("stream {stream_id} complete"); + qdebug!("stream {stream_id} complete"); } ConnectionEvent::SendStreamCreatable { stream_type } => { - println!("stream {stream_type:?} creatable"); + qdebug!("stream {stream_type:?} creatable"); if stream_type == StreamType::BiDi { self.download_urls(client); } @@ -64,7 +64,7 @@ impl<'a> super::Handler for Handler<'a> { ConnectionEvent::StateChange( State::WaitInitial | State::Handshaking | State::Connected, ) => { - println!("{event:?}"); + qdebug!("{event:?}"); self.download_urls(client); } ConnectionEvent::StateChange(State::Confirmed) => { @@ -74,7 +74,7 @@ impl<'a> super::Handler for Handler<'a> { self.token = Some(token); } _ => { - println!("Unhandled event {event:?}"); + qwarn!("Unhandled event {event:?}"); } } } @@ -187,7 +187,7 @@ impl<'b> Handler<'b> { fn download_next(&mut self, client: &mut Connection) -> bool { if self.key_update.needed() { - println!("Deferring requests until after first key update"); + qdebug!("Deferring requests until after first key update"); return false; } let url = self @@ -196,7 +196,7 @@ impl<'b> Handler<'b> { .expect("download_next called with empty queue"); match client.stream_create(StreamType::BiDi) { Ok(client_stream_id) => { - println!("Created stream {client_stream_id} for {url}"); + qinfo!("Created stream {client_stream_id} for {url}"); let req = format!("GET {}\r\n", url.path()); _ = client .stream_send(client_stream_id, req.as_bytes()) @@ -207,7 +207,7 @@ impl<'b> Handler<'b> { true } Err(e @ (Error::StreamLimitError | Error::ConnectionState)) => { - println!("Cannot create stream {e:?}"); + qwarn!("Cannot create stream {e:?}"); self.url_queue.push_front(url); false } @@ -235,9 +235,9 @@ impl<'b> Handler<'b> { if let Some(out_file) = maybe_out_file { out_file.write_all(&data[..sz])?; } else if !output_read_data { - println!("READ[{stream_id}]: {sz} bytes"); + qdebug!("READ[{stream_id}]: {sz} bytes"); } else { - println!( + qdebug!( "READ[{}]: {}", stream_id, String::from_utf8(data.clone()).unwrap() @@ -252,7 +252,7 @@ impl<'b> Handler<'b> { fn read(&mut self, client: &mut Connection, stream_id: StreamId) -> Res<()> { match self.streams.get_mut(&stream_id) { None => { - println!("Data on unexpected stream: {stream_id}"); + qwarn!("Data on unexpected stream: {stream_id}"); return Ok(()); } Some(maybe_out_file) => { @@ -267,7 +267,7 @@ impl<'b> Handler<'b> { if let Some(mut out_file) = maybe_out_file.take() { out_file.flush()?; } else { - println!(""); + qinfo!(""); } self.streams.remove(&stream_id); self.download_urls(client); diff --git a/neqo-bin/src/bin/client/http3.rs b/neqo-bin/src/bin/client/http3.rs index 21637fb3d6..e9f5e406a5 100644 --- a/neqo-bin/src/bin/client/http3.rs +++ b/neqo-bin/src/bin/client/http3.rs @@ -18,7 +18,7 @@ use std::{ time::Instant, }; -use neqo_common::{event::Provider, hex, Datagram, Header}; +use neqo_common::{event::Provider, hex, qdebug, qinfo, qwarn, Datagram, Header}; use neqo_crypto::{AuthenticationStatus, ResumptionToken}; use neqo_http3::{Error, Http3Client, Http3ClientEvent, Http3Parameters, Http3State, Priority}; use neqo_transport::{ @@ -149,7 +149,7 @@ impl<'a> super::Handler for Handler<'a> { if let Some(handler) = self.url_handler.stream_handler(stream_id) { handler.process_header_ready(stream_id, fin, headers); } else { - println!("Data on unexpected stream: {stream_id}"); + qwarn!("Data on unexpected stream: {stream_id}"); } if fin { self.url_handler.on_stream_fin(client, stream_id); @@ -159,7 +159,7 @@ impl<'a> super::Handler for Handler<'a> { let mut stream_done = false; match self.url_handler.stream_handler(stream_id) { None => { - println!("Data on unexpected stream: {stream_id}"); + qwarn!("Data on unexpected stream: {stream_id}"); } Some(handler) => loop { let mut data = vec![0; 4096]; @@ -193,7 +193,7 @@ impl<'a> super::Handler for Handler<'a> { Http3ClientEvent::DataWritable { stream_id } => { match self.url_handler.stream_handler(stream_id) { None => { - println!("Data on unexpected stream: {stream_id}"); + qwarn!("Data on unexpected stream: {stream_id}"); } Some(handler) => { handler.process_data_writable(client, stream_id); @@ -206,7 +206,7 @@ impl<'a> super::Handler for Handler<'a> { } Http3ClientEvent::ResumptionToken(t) => self.token = Some(t), _ => { - println!("Unhandled event {event:?}"); + qwarn!("Unhandled event {event:?}"); } } } @@ -279,7 +279,7 @@ struct DownloadStreamHandler { impl StreamHandler for DownloadStreamHandler { fn process_header_ready(&mut self, stream_id: StreamId, fin: bool, headers: Vec
) { if self.out_file.is_none() { - println!("READ HEADERS[{stream_id}]: fin={fin} {headers:?}"); + qdebug!("READ HEADERS[{stream_id}]: fin={fin} {headers:?}"); } } @@ -297,18 +297,18 @@ impl StreamHandler for DownloadStreamHandler { } return Ok(true); } else if !output_read_data { - println!("READ[{stream_id}]: {sz} bytes"); + qdebug!("READ[{stream_id}]: {sz} bytes"); } else if let Ok(txt) = String::from_utf8(data.clone()) { - println!("READ[{stream_id}]: {txt}"); + qdebug!("READ[{stream_id}]: {txt}"); } else { - println!("READ[{}]: 0x{}", stream_id, hex(&data)); + qdebug!("READ[{}]: 0x{}", stream_id, hex(&data)); } if fin { if let Some(mut out_file) = self.out_file.take() { out_file.flush()?; } else { - println!(""); + qdebug!(""); } } @@ -327,7 +327,7 @@ struct UploadStreamHandler { impl StreamHandler for UploadStreamHandler { fn process_header_ready(&mut self, stream_id: StreamId, fin: bool, headers: Vec
) { - println!("READ HEADERS[{stream_id}]: fin={fin} {headers:?}"); + qdebug!("READ HEADERS[{stream_id}]: fin={fin} {headers:?}"); } fn process_data_readable( @@ -343,7 +343,7 @@ impl StreamHandler for UploadStreamHandler { let parsed: usize = trimmed_txt.parse().unwrap(); if parsed == self.data.len() { let upload_time = Instant::now().duration_since(self.start); - println!("Stream ID: {stream_id:?}, Upload time: {upload_time:?}"); + qinfo!("Stream ID: {stream_id:?}, Upload time: {upload_time:?}"); } } else { panic!("Unexpected data [{}]: 0x{}", stream_id, hex(&data)); @@ -411,7 +411,7 @@ impl<'a> UrlHandler<'a> { Priority::default(), ) { Ok(client_stream_id) => { - println!("Successfully created stream id {client_stream_id} for {url}"); + qdebug!("Successfully created stream id {client_stream_id} for {url}"); let handler: Box = StreamHandlerType::make_handler( &self.handler_type, diff --git a/neqo-bin/src/bin/client/main.rs b/neqo-bin/src/bin/client/main.rs index 3332d79438..63aa12db13 100644 --- a/neqo-bin/src/bin/client/main.rs +++ b/neqo-bin/src/bin/client/main.rs @@ -22,7 +22,7 @@ use futures::{ FutureExt, TryFutureExt, }; use neqo_bin::udp; -use neqo_common::{self as common, qdebug, qinfo, qlog::NeqoQlog, Datagram, Role}; +use neqo_common::{self as common, qdebug, qerror, qinfo, qlog::NeqoQlog, qwarn, Datagram, Role}; use neqo_crypto::{ constants::{TLS_AES_128_GCM_SHA256, TLS_AES_256_GCM_SHA384, TLS_CHACHA20_POLY1305_SHA256}, init, Cipher, ResumptionToken, @@ -103,7 +103,7 @@ impl KeyUpdateState { _ => return Err(e), } } else { - println!("Keys updated"); + qerror!("Keys updated"); self.0 = false; } } @@ -119,6 +119,9 @@ impl KeyUpdateState { #[command(author, version, about, long_about = None)] #[allow(clippy::struct_excessive_bools)] // Not a good use of that lint. pub struct Args { + #[command(flatten)] + verbose: clap_verbosity_flag::Verbosity, + #[command(flatten)] shared: neqo_bin::SharedArgs, @@ -211,7 +214,7 @@ impl Args { "http3" => { if let Some(testcase) = &self.test { if testcase.as_str() != "upload" { - eprintln!("Unsupported test case: {testcase}"); + qerror!("Unsupported test case: {testcase}"); exit(127) } @@ -223,7 +226,7 @@ impl Args { } "zerortt" | "resumption" => { if self.urls.len() < 2 { - eprintln!("Warning: resumption tests won't work without >1 URL"); + qerror!("Warning: resumption tests won't work without >1 URL"); exit(127); } self.shared.use_old_http = true; @@ -272,11 +275,11 @@ fn get_output_file( out_path.push(url_path); if all_paths.contains(&out_path) { - eprintln!("duplicate path {}", out_path.display()); + qerror!("duplicate path {}", out_path.display()); return None; } - eprintln!("Saving {url} to {out_path:?}"); + qinfo!("Saving {url} to {out_path:?}"); if let Some(parent) = out_path.parent() { create_dir_all(parent).ok()?; @@ -398,7 +401,7 @@ impl<'a, H: Handler> Runner<'a, H> { self.socket.send(dgram)?; } Output::Callback(new_timeout) => { - qinfo!("Setting timeout of {:?}", new_timeout); + qdebug!("Setting timeout of {:?}", new_timeout); self.timeout = Some(Box::pin(tokio::time::sleep(new_timeout))); break; } @@ -444,11 +447,12 @@ fn qlog_new(args: &Args, hostname: &str, cid: &ConnectionId) -> Res { #[tokio::main] async fn main() -> Res<()> { - init(); - let mut args = Args::parse(); + neqo_common::log::init(Some(args.verbose.log_level_filter())); args.update_for_tests(); + init(); + let urls_by_origin = args .urls .clone() @@ -461,14 +465,14 @@ async fn main() -> Res<()> { .filter_map(|(origin, urls)| match origin { Origin::Tuple(_scheme, h, p) => Some(((h, p), urls)), Origin::Opaque(x) => { - eprintln!("Opaque origin {x:?}"); + qwarn!("Opaque origin {x:?}"); None } }); for ((host, port), mut urls) in urls_by_origin { if args.resume && urls.len() < 2 { - eprintln!("Resumption to {host} cannot work without at least 2 URLs."); + qerror!("Resumption to {host} cannot work without at least 2 URLs."); exit(127); } @@ -479,7 +483,7 @@ async fn main() -> Res<()> { ) }); let Some(remote_addr) = remote_addr else { - eprintln!("No compatible address found for: {host}"); + qerror!("No compatible address found for: {host}"); exit(1); }; @@ -490,7 +494,7 @@ async fn main() -> Res<()> { let mut socket = udp::Socket::bind(local_addr)?; let real_local = socket.local_addr().unwrap(); - println!( + qinfo!( "{} Client connecting: {:?} -> {:?}", if args.shared.use_old_http { "H9" } else { "H3" }, real_local, diff --git a/neqo-bin/src/bin/server/main.rs b/neqo-bin/src/bin/server/main.rs index f694cf98c1..753794d6f6 100644 --- a/neqo-bin/src/bin/server/main.rs +++ b/neqo-bin/src/bin/server/main.rs @@ -25,7 +25,7 @@ use futures::{ FutureExt, }; use neqo_bin::udp; -use neqo_common::{hex, qinfo, qwarn, Datagram, Header}; +use neqo_common::{hex, qdebug, qerror, qinfo, qwarn, Datagram, Header}; use neqo_crypto::{ constants::{TLS_AES_128_GCM_SHA256, TLS_AES_256_GCM_SHA384, TLS_CHACHA20_POLY1305_SHA256}, generate_ech_keys, init_db, random, AntiReplay, Cipher, @@ -89,6 +89,9 @@ impl std::error::Error for ServerError {} #[derive(Debug, Parser)] #[command(author, version, about, long_about = None)] struct Args { + #[command(flatten)] + verbose: clap_verbosity_flag::Verbosity, + #[command(flatten)] shared: neqo_bin::SharedArgs, @@ -166,17 +169,17 @@ fn qns_read_response(filename: &str) -> Option> { OpenOptions::new() .read(true) .open(&file_path) - .map_err(|_e| eprintln!("Could not open {}", file_path.display())) + .map_err(|_e| qerror!("Could not open {}", file_path.display())) .ok() .and_then(|mut f| { let mut data = Vec::new(); match f.read_to_end(&mut data) { Ok(sz) => { - println!("{} bytes read from {}", sz, file_path.display()); + qinfo!("{} bytes read from {}", sz, file_path.display()); Some(data) } Err(e) => { - eprintln!("Error reading data: {e:?}"); + qerror!("Error reading data: {e:?}"); None } } @@ -312,7 +315,7 @@ impl HttpServer for SimpleServer { headers, fin, } => { - println!("Headers (request={stream} fin={fin}): {headers:?}"); + qdebug!("Headers (request={stream} fin={fin}): {headers:?}"); let post = if let Some(method) = headers.iter().find(|&h| h.name() == ":method") { @@ -428,7 +431,7 @@ impl ServersRunner { pub fn new(args: Args) -> Result { let hosts = args.listen_addresses(); if hosts.is_empty() { - eprintln!("No valid hosts defined"); + qerror!("No valid hosts defined"); return Err(io::Error::new(io::ErrorKind::InvalidInput, "No hosts")); } let sockets = hosts @@ -436,7 +439,7 @@ impl ServersRunner { .map(|host| { let socket = udp::Socket::bind(host)?; let local_addr = socket.local_addr()?; - println!("Server waiting for connection on: {local_addr:?}"); + qinfo!("Server waiting for connection on: {local_addr:?}"); Ok((host, socket)) }) @@ -479,7 +482,7 @@ impl ServersRunner { } if args.ech { let cfg = svr.enable_ech(); - println!("ECHConfigList: {}", hex(cfg)); + qinfo!("ECHConfigList: {}", hex(cfg)); } svr } @@ -507,7 +510,7 @@ impl ServersRunner { socket.send(dgram)?; } Output::Callback(new_timeout) => { - qinfo!("Setting timeout of {:?}", new_timeout); + qdebug!("Setting timeout of {:?}", new_timeout); self.timeout = Some(Box::pin(tokio::time::sleep(new_timeout))); break; } @@ -573,6 +576,7 @@ async fn main() -> Result<(), io::Error> { const HQ_INTEROP: &str = "hq-interop"; let mut args = Args::parse(); + neqo_common::log::init(Some(args.verbose.log_level_filter())); assert!(!args.key.is_empty(), "Need at least one key"); init_db(args.db.clone()); diff --git a/neqo-bin/src/bin/server/old_https.rs b/neqo-bin/src/bin/server/old_https.rs index f36c99c484..ec32032a05 100644 --- a/neqo-bin/src/bin/server/old_https.rs +++ b/neqo-bin/src/bin/server/old_https.rs @@ -8,7 +8,7 @@ use std::{ cell::RefCell, collections::HashMap, fmt::Display, path::PathBuf, rc::Rc, time::Instant, }; -use neqo_common::{event::Provider, hex, qdebug, Datagram}; +use neqo_common::{event::Provider, hex, qdebug, qinfo, qwarn, Datagram}; use neqo_crypto::{generate_ech_keys, random, AllowZeroRtt, AntiReplay, Cipher}; use neqo_http3::Error; use neqo_transport::{ @@ -149,7 +149,7 @@ impl Http09Server { } Some(path) => { let path = path.as_str(); - eprintln!("Path = '{path}'"); + qdebug!("Path = '{path}'"); if args.shared.qns_test.is_some() { qns_read_response(path) } else { @@ -164,7 +164,7 @@ impl Http09Server { fn stream_writable(&mut self, stream_id: StreamId, conn: &mut ActiveConnectionRef) { match self.write_state.get_mut(&stream_id) { None => { - eprintln!("Unknown stream {stream_id}, ignoring event"); + qwarn!("Unknown stream {stream_id}, ignoring event"); } Some(stream_state) => { stream_state.writable = true; @@ -177,7 +177,7 @@ impl Http09Server { *offset += sent; self.server.add_to_waiting(conn); if *offset == data.len() { - eprintln!("Sent {sent} on {stream_id}, closing"); + qinfo!("Sent {sent} on {stream_id}, closing"); conn.borrow_mut().stream_close_send(stream_id).unwrap(); self.write_state.remove(&stream_id); } else { @@ -202,7 +202,7 @@ impl HttpServer for Http09Server { None => break, Some(e) => e, }; - eprintln!("Event {event:?}"); + qdebug!("Event {event:?}"); match event { ConnectionEvent::NewStream { stream_id } => { self.write_state @@ -222,7 +222,7 @@ impl HttpServer for Http09Server { } ConnectionEvent::StateChange(_) | ConnectionEvent::SendStreamComplete { .. } => (), - e => eprintln!("unhandled event {e:?}"), + e => qwarn!("unhandled event {e:?}"), } } } diff --git a/neqo-common/src/log.rs b/neqo-common/src/log.rs index c5b89be8a6..04028a26bd 100644 --- a/neqo-common/src/log.rs +++ b/neqo-common/src/log.rs @@ -50,7 +50,7 @@ fn since_start() -> Duration { START_TIME.get_or_init(Instant::now).elapsed() } -pub fn init() { +pub fn init(level_filter: Option) { static INIT_ONCE: Once = Once::new(); if ::log::STATIC_MAX_LEVEL == ::log::LevelFilter::Off { @@ -59,6 +59,9 @@ pub fn init() { INIT_ONCE.call_once(|| { let mut builder = Builder::from_env("RUST_LOG"); + if let Some(filter) = level_filter { + builder.filter_level(filter); + } builder.format(|buf, record| { let elapsed = since_start(); writeln!( @@ -71,9 +74,9 @@ pub fn init() { ) }); if let Err(e) = builder.try_init() { - do_log!(::log::Level::Info, "Logging initialization error {:?}", e); + do_log!(::log::Level::Warn, "Logging initialization error {:?}", e); } else { - do_log!(::log::Level::Info, "Logging initialized"); + do_log!(::log::Level::Debug, "Logging initialized"); } }); } @@ -81,32 +84,32 @@ pub fn init() { #[macro_export] macro_rules! log_invoke { ($lvl:expr, $ctx:expr, $($arg:tt)*) => ( { - ::neqo_common::log::init(); + ::neqo_common::log::init(None); ::neqo_common::do_log!($lvl, "[{}] {}", $ctx, format!($($arg)*)); } ) } #[macro_export] macro_rules! qerror { ([$ctx:expr], $($arg:tt)*) => (::neqo_common::log_invoke!(::log::Level::Error, $ctx, $($arg)*);); - ($($arg:tt)*) => ( { ::neqo_common::log::init(); ::neqo_common::do_log!(::log::Level::Error, $($arg)*); } ); + ($($arg:tt)*) => ( { ::neqo_common::log::init(None); ::neqo_common::do_log!(::log::Level::Error, $($arg)*); } ); } #[macro_export] macro_rules! qwarn { ([$ctx:expr], $($arg:tt)*) => (::neqo_common::log_invoke!(::log::Level::Warn, $ctx, $($arg)*);); - ($($arg:tt)*) => ( { ::neqo_common::log::init(); ::neqo_common::do_log!(::log::Level::Warn, $($arg)*); } ); + ($($arg:tt)*) => ( { ::neqo_common::log::init(None); ::neqo_common::do_log!(::log::Level::Warn, $($arg)*); } ); } #[macro_export] macro_rules! qinfo { ([$ctx:expr], $($arg:tt)*) => (::neqo_common::log_invoke!(::log::Level::Info, $ctx, $($arg)*);); - ($($arg:tt)*) => ( { ::neqo_common::log::init(); ::neqo_common::do_log!(::log::Level::Info, $($arg)*); } ); + ($($arg:tt)*) => ( { ::neqo_common::log::init(None); ::neqo_common::do_log!(::log::Level::Info, $($arg)*); } ); } #[macro_export] macro_rules! qdebug { ([$ctx:expr], $($arg:tt)*) => (::neqo_common::log_invoke!(::log::Level::Debug, $ctx, $($arg)*);); - ($($arg:tt)*) => ( { ::neqo_common::log::init(); ::neqo_common::do_log!(::log::Level::Debug, $($arg)*); } ); + ($($arg:tt)*) => ( { ::neqo_common::log::init(None); ::neqo_common::do_log!(::log::Level::Debug, $($arg)*); } ); } #[macro_export] macro_rules! qtrace { ([$ctx:expr], $($arg:tt)*) => (::neqo_common::log_invoke!(::log::Level::Trace, $ctx, $($arg)*);); - ($($arg:tt)*) => ( { ::neqo_common::log::init(); ::neqo_common::do_log!(::log::Level::Trace, $($arg)*); } ); + ($($arg:tt)*) => ( { ::neqo_common::log::init(None); ::neqo_common::do_log!(::log::Level::Trace, $($arg)*); } ); } diff --git a/neqo-crypto/src/agent.rs b/neqo-crypto/src/agent.rs index 82a6dacd48..90085cb759 100644 --- a/neqo-crypto/src/agent.rs +++ b/neqo-crypto/src/agent.rs @@ -670,7 +670,7 @@ impl SecretAgent { let info = self.capture_error(SecretAgentInfo::new(self.fd))?; HandshakeState::Complete(info) }; - qinfo!([self], "state -> {:?}", self.state); + qdebug!([self], "state -> {:?}", self.state); Ok(()) } @@ -898,7 +898,7 @@ impl Client { let len = usize::try_from(len).unwrap(); let mut v = Vec::with_capacity(len); v.extend_from_slice(null_safe_slice(token, len)); - qinfo!( + qdebug!( [format!("{fd:p}")], "Got resumption token {}", hex_snip_middle(&v) diff --git a/neqo-http3/src/connection.rs b/neqo-http3/src/connection.rs index 287ea2c2af..cfa78df787 100644 --- a/neqo-http3/src/connection.rs +++ b/neqo-http3/src/connection.rs @@ -354,7 +354,7 @@ impl Http3Connection { /// This function creates and initializes, i.e. send stream type, the control and qpack /// streams. fn initialize_http3_connection(&mut self, conn: &mut Connection) -> Res<()> { - qinfo!([self], "Initialize the http3 connection."); + qdebug!([self], "Initialize the http3 connection."); self.control_stream_local.create(conn)?; self.send_settings(); @@ -704,7 +704,7 @@ impl Http3Connection { ); } NewStreamType::Decoder => { - qinfo!([self], "A new remote qpack encoder stream {}", stream_id); + qdebug!([self], "A new remote qpack encoder stream {}", stream_id); self.check_stream_exists(Http3StreamType::Decoder)?; self.recv_streams.insert( stream_id, @@ -715,7 +715,7 @@ impl Http3Connection { ); } NewStreamType::Encoder => { - qinfo!([self], "A new remote qpack decoder stream {}", stream_id); + qdebug!([self], "A new remote qpack decoder stream {}", stream_id); self.check_stream_exists(Http3StreamType::Encoder)?; self.recv_streams.insert( stream_id, @@ -766,7 +766,7 @@ impl Http3Connection { /// This is called when an application closes the connection. pub fn close(&mut self, error: AppError) { - qinfo!([self], "Close connection error {:?}.", error); + qdebug!([self], "Close connection error {:?}.", error); self.state = Http3State::Closing(ConnectionError::Application(error)); if (!self.send_streams.is_empty() || !self.recv_streams.is_empty()) && (error == 0) { qwarn!("close(0) called when streams still active"); @@ -952,7 +952,7 @@ impl Http3Connection { stream_id: StreamId, buf: &mut [u8], ) -> Res<(usize, bool)> { - qinfo!([self], "read_data from stream {}.", stream_id); + qdebug!([self], "read_data from stream {}.", stream_id); let res = self .recv_streams .get_mut(&stream_id) @@ -1091,7 +1091,7 @@ impl Http3Connection { /// This is called when an application wants to close the sending side of a stream. pub fn stream_close_send(&mut self, conn: &mut Connection, stream_id: StreamId) -> Res<()> { - qinfo!([self], "Close the sending side for stream {}.", stream_id); + qdebug!([self], "Close the sending side for stream {}.", stream_id); debug_assert!(self.state.active()); let send_stream = self .send_streams @@ -1402,7 +1402,7 @@ impl Http3Connection { /// `PriorityUpdateRequestPush` which handling is specific to the client and server, we must /// give them to the specific client/server handler. fn handle_control_frame(&mut self, f: HFrame) -> Res> { - qinfo!([self], "Handle a control frame {:?}", f); + qdebug!([self], "Handle a control frame {:?}", f); if !matches!(f, HFrame::Settings { .. }) && !matches!( self.settings_state, @@ -1433,7 +1433,7 @@ impl Http3Connection { } fn handle_settings(&mut self, new_settings: HSettings) -> Res<()> { - qinfo!([self], "Handle SETTINGS frame."); + qdebug!([self], "Handle SETTINGS frame."); match &self.settings_state { Http3RemoteSettingsState::NotReceived => { self.set_qpack_settings(&new_settings)?; diff --git a/neqo-http3/src/connection_client.rs b/neqo-http3/src/connection_client.rs index 52572a760d..836816b337 100644 --- a/neqo-http3/src/connection_client.rs +++ b/neqo-http3/src/connection_client.rs @@ -590,7 +590,7 @@ impl Http3Client { /// /// An error will be return if stream does not exist. pub fn stream_close_send(&mut self, stream_id: StreamId) -> Res<()> { - qinfo!([self], "Close sending side stream={}.", stream_id); + qdebug!([self], "Close sending side stream={}.", stream_id); self.base_handler .stream_close_send(&mut self.conn, stream_id) } @@ -652,7 +652,7 @@ impl Http3Client { stream_id: StreamId, buf: &mut [u8], ) -> Res<(usize, bool)> { - qinfo!([self], "read_data from stream {}.", stream_id); + qdebug!([self], "read_data from stream {}.", stream_id); let res = self.base_handler.read_data(&mut self.conn, stream_id, buf); if let Err(e) = &res { if e.connection_error() { diff --git a/neqo-http3/src/connection_server.rs b/neqo-http3/src/connection_server.rs index 097209a226..dcf759f177 100644 --- a/neqo-http3/src/connection_server.rs +++ b/neqo-http3/src/connection_server.rs @@ -98,7 +98,7 @@ impl Http3ServerHandler { /// /// An error will be returned if stream does not exist. pub fn stream_close_send(&mut self, stream_id: StreamId, conn: &mut Connection) -> Res<()> { - qinfo!([self], "Close sending side stream={}.", stream_id); + qdebug!([self], "Close sending side stream={}.", stream_id); self.base_handler.stream_close_send(conn, stream_id)?; self.base_handler.stream_has_pending_data(stream_id); self.needs_processing = true; @@ -408,7 +408,7 @@ impl Http3ServerHandler { stream_id: StreamId, buf: &mut [u8], ) -> Res<(usize, bool)> { - qinfo!([self], "read_data from stream {}.", stream_id); + qdebug!([self], "read_data from stream {}.", stream_id); let res = self.base_handler.read_data(conn, stream_id, buf); if let Err(e) = &res { if e.connection_error() { diff --git a/neqo-http3/src/recv_message.rs b/neqo-http3/src/recv_message.rs index be58b7e47c..55970849ef 100644 --- a/neqo-http3/src/recv_message.rs +++ b/neqo-http3/src/recv_message.rs @@ -271,7 +271,7 @@ impl RecvMessage { } (None, false) => break Ok(()), (Some(frame), fin) => { - qinfo!( + qdebug!( [self], "A new frame has been received: {:?}; state={:?} fin={}", frame, diff --git a/neqo-http3/src/send_message.rs b/neqo-http3/src/send_message.rs index c50e3e056a..15965c44f6 100644 --- a/neqo-http3/src/send_message.rs +++ b/neqo-http3/src/send_message.rs @@ -6,7 +6,7 @@ use std::{cell::RefCell, cmp::min, fmt::Debug, rc::Rc}; -use neqo_common::{qdebug, qinfo, qtrace, Encoder, Header, MessageType}; +use neqo_common::{qdebug, qtrace, Encoder, Header, MessageType}; use neqo_qpack::encoder::QPackEncoder; use neqo_transport::{Connection, StreamId}; @@ -119,7 +119,7 @@ impl SendMessage { encoder: Rc>, conn_events: Box, ) -> Self { - qinfo!("Create a request stream_id={}", stream_id); + qdebug!("Create a request stream_id={}", stream_id); Self { state: MessageState::WaitingForHeaders, message_type, @@ -193,7 +193,7 @@ impl SendStream for SendMessage { min(buf.len(), available - 9) }; - qinfo!( + qdebug!( [self], "send_request_body: available={} to_send={}.", available, diff --git a/neqo-http3/src/server_events.rs b/neqo-http3/src/server_events.rs index a85ece0bfb..214a48c757 100644 --- a/neqo-http3/src/server_events.rs +++ b/neqo-http3/src/server_events.rs @@ -13,7 +13,7 @@ use std::{ rc::Rc, }; -use neqo_common::{qdebug, qinfo, Encoder, Header}; +use neqo_common::{qdebug, Encoder, Header}; use neqo_transport::{ server::ActiveConnectionRef, AppError, Connection, DatagramTracking, StreamId, StreamType, }; @@ -189,7 +189,7 @@ impl Http3OrWebTransportStream { /// /// It may return `InvalidStreamId` if a stream does not exist anymore. pub fn send_data(&mut self, data: &[u8]) -> Res { - qinfo!([self], "Set new response."); + qdebug!([self], "Set new response."); self.stream_handler.send_data(data) } @@ -199,7 +199,7 @@ impl Http3OrWebTransportStream { /// /// It may return `InvalidStreamId` if a stream does not exist anymore. pub fn stream_close_send(&mut self) -> Res<()> { - qinfo!([self], "Set new response."); + qdebug!([self], "Set new response."); self.stream_handler.stream_close_send() } } @@ -270,7 +270,7 @@ impl WebTransportRequest { /// /// It may return `InvalidStreamId` if a stream does not exist anymore. pub fn response(&mut self, accept: &WebTransportSessionAcceptAction) -> Res<()> { - qinfo!([self], "Set a response for a WebTransport session."); + qdebug!([self], "Set a response for a WebTransport session."); self.stream_handler .handler .borrow_mut() diff --git a/neqo-transport/src/cc/classic_cc.rs b/neqo-transport/src/cc/classic_cc.rs index 89be6c4b0f..a63d6e0b38 100644 --- a/neqo-transport/src/cc/classic_cc.rs +++ b/neqo-transport/src/cc/classic_cc.rs @@ -164,7 +164,7 @@ impl CongestionControl for ClassicCongestionControl { let mut is_app_limited = true; let mut new_acked = 0; for pkt in acked_pkts { - qinfo!( + qdebug!( "packet_acked this={:p}, pn={}, ps={}, ignored={}, lost={}, rtt_est={:?}", self, pkt.pn, @@ -198,7 +198,7 @@ impl CongestionControl for ClassicCongestionControl { if is_app_limited { self.cc_algorithm.on_app_limited(); - qinfo!("on_packets_acked this={:p}, limited=1, bytes_in_flight={}, cwnd={}, state={:?}, new_acked={}", self, self.bytes_in_flight, self.congestion_window, self.state, new_acked); + qdebug!("on_packets_acked this={:p}, limited=1, bytes_in_flight={}, cwnd={}, state={:?}, new_acked={}", self, self.bytes_in_flight, self.congestion_window, self.state, new_acked); return; } @@ -208,7 +208,7 @@ impl CongestionControl for ClassicCongestionControl { let increase = min(self.ssthresh - self.congestion_window, self.acked_bytes); self.congestion_window += increase; self.acked_bytes -= increase; - qinfo!([self], "slow start += {}", increase); + qdebug!([self], "slow start += {}", increase); if self.congestion_window == self.ssthresh { // This doesn't look like it is necessary, but it can happen // after persistent congestion. @@ -249,7 +249,7 @@ impl CongestionControl for ClassicCongestionControl { QlogMetric::BytesInFlight(self.bytes_in_flight), ], ); - qinfo!([self], "on_packets_acked this={:p}, limited=0, bytes_in_flight={}, cwnd={}, state={:?}, new_acked={}", self, self.bytes_in_flight, self.congestion_window, self.state, new_acked); + qdebug!([self], "on_packets_acked this={:p}, limited=0, bytes_in_flight={}, cwnd={}, state={:?}, new_acked={}", self, self.bytes_in_flight, self.congestion_window, self.state, new_acked); } /// Update congestion controller state based on lost packets. @@ -265,7 +265,7 @@ impl CongestionControl for ClassicCongestionControl { } for pkt in lost_packets.iter().filter(|pkt| pkt.cc_in_flight()) { - qinfo!( + qdebug!( "packet_lost this={:p}, pn={}, ps={}", self, pkt.pn, @@ -286,7 +286,7 @@ impl CongestionControl for ClassicCongestionControl { pto, lost_packets, ); - qinfo!( + qdebug!( "on_packets_lost this={:p}, bytes_in_flight={}, cwnd={}, state={:?}", self, self.bytes_in_flight, @@ -335,7 +335,7 @@ impl CongestionControl for ClassicCongestionControl { } self.bytes_in_flight += pkt.size; - qinfo!( + qdebug!( "packet_sent this={:p}, pn={}, ps={}", self, pkt.pn, @@ -498,7 +498,7 @@ impl ClassicCongestionControl { self.congestion_window = max(cwnd, CWND_MIN); self.acked_bytes = acked_bytes; self.ssthresh = self.congestion_window; - qinfo!( + qdebug!( [self], "Cong event -> recovery; cwnd {}, ssthresh {}", self.congestion_window, diff --git a/neqo-transport/src/connection/mod.rs b/neqo-transport/src/connection/mod.rs index 8d1c106358..75c3490cba 100644 --- a/neqo-transport/src/connection/mod.rs +++ b/neqo-transport/src/connection/mod.rs @@ -778,7 +778,7 @@ impl Connection { }); enc.encode(extra); let records = s.send_ticket(now, enc.as_ref())?; - qinfo!([self], "send session ticket {}", hex(&enc)); + qdebug!([self], "send session ticket {}", hex(&enc)); self.crypto.buffer_records(records)?; } else { unreachable!(); @@ -824,7 +824,7 @@ impl Connection { /// the connection to fail. However, if no packets have been /// exchanged, it's not OK. pub fn authenticated(&mut self, status: AuthenticationStatus, now: Instant) { - qinfo!([self], "Authenticated {:?}", status); + qdebug!([self], "Authenticated {:?}", status); self.crypto.tls.authenticated(status); let res = self.handshake(now, self.version, PacketNumberSpace::Handshake, None); self.absorb_error(now, res); @@ -1154,7 +1154,7 @@ impl Connection { fn discard_keys(&mut self, space: PacketNumberSpace, now: Instant) { if self.crypto.discard(space) { - qinfo!([self], "Drop packet number space {}", space); + qdebug!([self], "Drop packet number space {}", space); let primary = self.paths.primary(); self.loss_recovery.discard(&primary, space, now); self.acks.drop_space(space); @@ -2307,7 +2307,7 @@ impl Connection { } if encoder.is_empty() { - qinfo!("TX blocked, profile={:?} ", profile); + qdebug!("TX blocked, profile={:?} ", profile); Ok(SendOption::No(profile.paced())) } else { // Perform additional padding for Initial packets as necessary. @@ -2351,7 +2351,7 @@ impl Connection { } fn client_start(&mut self, now: Instant) -> Res<()> { - qinfo!([self], "client_start"); + qdebug!([self], "client_start"); debug_assert_eq!(self.role, Role::Client); qlog::client_connection_started(&mut self.qlog, &self.paths.primary()); qlog::client_version_information_initiated(&mut self.qlog, self.conn_params.get_versions()); @@ -2583,7 +2583,7 @@ impl Connection { fn confirm_version(&mut self, v: Version) { if self.version != v { - qinfo!([self], "Compatible upgrade {:?} ==> {:?}", self.version, v); + qdebug!([self], "Compatible upgrade {:?} ==> {:?}", self.version, v); } self.crypto.confirm_version(v); self.version = v; @@ -2882,7 +2882,7 @@ impl Connection { R: IntoIterator> + Debug, R::IntoIter: ExactSizeIterator, { - qinfo!([self], "Rx ACK space={}, ranges={:?}", space, ack_ranges); + qdebug!([self], "Rx ACK space={}, ranges={:?}", space, ack_ranges); let (acked_packets, lost_packets) = self.loss_recovery.on_ack_received( &self.paths.primary(), @@ -2936,7 +2936,7 @@ impl Connection { } fn set_connected(&mut self, now: Instant) -> Res<()> { - qinfo!([self], "TLS connection complete"); + qdebug!([self], "TLS connection complete"); if self.crypto.tls.info().map(SecretAgentInfo::alpn).is_none() { qwarn!([self], "No ALPN. Closing connection."); // 120 = no_application_protocol @@ -2979,7 +2979,7 @@ impl Connection { fn set_state(&mut self, state: State) { if state > self.state { - qinfo!([self], "State change from {:?} -> {:?}", self.state, state); + qdebug!([self], "State change from {:?} -> {:?}", self.state, state); self.state = state.clone(); if self.state.closed() { self.streams.clear_streams(); diff --git a/neqo-transport/src/crypto.rs b/neqo-transport/src/crypto.rs index 9840eaa1e1..acc02172d5 100644 --- a/neqo-transport/src/crypto.rs +++ b/neqo-transport/src/crypto.rs @@ -317,7 +317,7 @@ impl Crypto { } pub fn acked(&mut self, token: &CryptoRecoveryToken) { - qinfo!( + qdebug!( "Acked crypto frame space={} offset={} length={}", token.space, token.offset, @@ -367,7 +367,7 @@ impl Crypto { }); enc.encode_vvec(new_token.unwrap_or(&[])); enc.encode(t.as_ref()); - qinfo!("resumption token {}", hex_snip_middle(enc.as_ref())); + qdebug!("resumption token {}", hex_snip_middle(enc.as_ref())); Some(ResumptionToken::new(enc.into(), t.expiration_time())) } else { None @@ -433,7 +433,7 @@ impl CryptoDxState { cipher: Cipher, fuzzing: bool, ) -> Self { - qinfo!( + qdebug!( "Making {:?} {} CryptoDxState, v={:?} cipher={}", direction, epoch, @@ -980,7 +980,7 @@ impl CryptoStates { }; for v in versions { - qinfo!( + qdebug!( [self], "Creating initial cipher state v={:?}, role={:?} dcid={}", v, diff --git a/neqo-transport/src/lib.rs b/neqo-transport/src/lib.rs index be482c466f..8fabbeb9a3 100644 --- a/neqo-transport/src/lib.rs +++ b/neqo-transport/src/lib.rs @@ -6,7 +6,7 @@ #![allow(clippy::module_name_repetitions)] // This lint doesn't work here. -use neqo_common::qinfo; +use neqo_common::qwarn; use neqo_crypto::Error as CryptoError; mod ackrate; @@ -165,7 +165,7 @@ impl Error { impl From for Error { fn from(err: CryptoError) -> Self { - qinfo!("Crypto operation failed {:?}", err); + qwarn!("Crypto operation failed {:?}", err); match err { CryptoError::EchRetry(config) => Self::EchRetry(config), _ => Self::CryptoError(err), diff --git a/neqo-transport/src/path.rs b/neqo-transport/src/path.rs index 4e8d9958ab..50e458ff36 100644 --- a/neqo-transport/src/path.rs +++ b/neqo-transport/src/path.rs @@ -216,7 +216,7 @@ impl Paths { /// to a migration from a peer, in which case the old path needs to be probed. #[must_use] fn select_primary(&mut self, path: &PathRef) -> Option { - qinfo!([path.borrow()], "set as primary path"); + qdebug!([path.borrow()], "set as primary path"); let old_path = self.primary.replace(Rc::clone(path)).map(|old| { old.borrow_mut().set_primary(false); old diff --git a/neqo-transport/src/stats.rs b/neqo-transport/src/stats.rs index cdc378d71b..0a61097010 100644 --- a/neqo-transport/src/stats.rs +++ b/neqo-transport/src/stats.rs @@ -14,7 +14,7 @@ use std::{ time::Duration, }; -use neqo_common::qinfo; +use neqo_common::qwarn; use crate::packet::PacketNumber; @@ -168,7 +168,7 @@ impl Stats { pub fn pkt_dropped(&mut self, reason: impl AsRef) { self.dropped_rx += 1; - qinfo!( + qwarn!( [self.info], "Dropped received packet: {}; Total: {}", reason.as_ref(), diff --git a/test/upload_test.sh b/test/upload_test.sh index 685a6a926c..8edb55e75d 100755 --- a/test/upload_test.sh +++ b/test/upload_test.sh @@ -2,6 +2,8 @@ set -e +export RUST_LOG=neqo_transport::cc=debug + server_address=127.0.0.1 server_port=4433 upload_size=8388608 From 8775c593e6bfeb0dea92889e99797480fcfb6967 Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Fri, 22 Mar 2024 14:15:02 +1000 Subject: [PATCH 05/28] No `--all-targets` during test --- .github/workflows/check.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/check.yml b/.github/workflows/check.yml index a89e4859d3..e17d563905 100644 --- a/.github/workflows/check.yml +++ b/.github/workflows/check.yml @@ -113,7 +113,7 @@ jobs: - name: Run tests and determine coverage run: | # shellcheck disable=SC2086 - cargo +${{ matrix.rust-toolchain }} llvm-cov nextest $BUILD_TYPE --all-targets --features ci --no-fail-fast --lcov --output-path lcov.info + cargo +${{ matrix.rust-toolchain }} llvm-cov nextest $BUILD_TYPE --features ci --no-fail-fast --lcov --output-path lcov.info cargo +${{ matrix.rust-toolchain }} bench --features bench --no-run - name: Run client/server transfer From bb3ab602481c0ec61597ef76242a3008108d524b Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Sat, 23 Mar 2024 17:13:01 +1000 Subject: [PATCH 06/28] Don't fail if cached main-branch results don't exist --- .github/workflows/bench.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/bench.yml b/.github/workflows/bench.yml index 81ef297a9e..134f78f559 100644 --- a/.github/workflows/bench.yml +++ b/.github/workflows/bench.yml @@ -115,7 +115,7 @@ jobs: echo "### Benchmark results" echo } > results.md - SHA=$(cat target/criterion/baseline-sha.txt) + SHA=$(cat target/criterion/baseline-sha.txt || true) if [ -n "$SHA" ]; then { echo "Performance differences relative to $SHA." From 9814bcd907039349fdf08fad39209a38515414d4 Mon Sep 17 00:00:00 2001 From: Max Inden Date: Sun, 24 Mar 2024 18:20:27 +0100 Subject: [PATCH 07/28] fix(transport/bench): increase transfer bench noise threshold (#1771) The default Criterion noise threshold is 0.01, i.e. ``` rust /// Changes the noise threshold for benchmarks in this group. The noise threshold /// is used to filter out small changes in performance from one run to the next, even if they /// are statistically significant. Sometimes benchmarking the same code twice will result in /// small but statistically significant differences solely because of noise. This provides a way /// to filter out some of these false positives at the cost of making it harder to detect small /// changes to the true performance of the benchmark. /// /// The default is 0.01, meaning that changes smaller than 1% will be ignored. /// /// # Panics /// /// Panics if the threshold is set to a negative value pub fn noise_threshold(&mut self, threshold: f64) -> &mut Self { ``` https://bheisler.github.io/criterion.rs/criterion/struct.BenchmarkGroup.html#method.noise_threshold Multiple runs of the `neqo-transport` `transfer/*` benchmarks showed between 2% and 3% performance regression on unrelated changes. - https://github.com/mozilla/neqo/actions/runs/8402727182/job/23012699901 - https://github.com/mozilla/neqo/actions/runs/8408164062/job/23024217712 To improve the signal-to-noise ratio of the benchmarks, set the noise threshold to 3%. --- neqo-transport/benches/transfer.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/neqo-transport/benches/transfer.rs b/neqo-transport/benches/transfer.rs index b13075a4ff..98bd29ff05 100644 --- a/neqo-transport/benches/transfer.rs +++ b/neqo-transport/benches/transfer.rs @@ -23,6 +23,7 @@ const TRANSFER_AMOUNT: usize = 1 << 22; // 4Mbyte fn benchmark_transfer(c: &mut Criterion, label: &str, seed: Option>) { let mut group = c.benchmark_group("transfer"); group.throughput(Throughput::Bytes(u64::try_from(TRANSFER_AMOUNT).unwrap())); + group.noise_threshold(0.03); group.bench_function(label, |b| { b.iter_batched( || { From 6691c1a570a37a388dc2495b0e592c572b4129b6 Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Mon, 25 Mar 2024 02:38:50 +0200 Subject: [PATCH 08/28] ci: Run a daily baseline benchmark of `main` for the cache (#1770) --- .github/workflows/bench.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.github/workflows/bench.yml b/.github/workflows/bench.yml index 134f78f559..0e215f571f 100644 --- a/.github/workflows/bench.yml +++ b/.github/workflows/bench.yml @@ -2,6 +2,9 @@ name: Bench on: workflow_call: workflow_dispatch: + schedule: + # Run at 1 AM each day, so there is a `main`-branch baseline in the cache. + - cron: '0 1 * * *' env: CARGO_PROFILE_BENCH_BUILD_OVERRIDE_DEBUG: true CARGO_PROFILE_RELEASE_DEBUG: true From d8eeddaad505710bb60361a4da66bf2743e64e1f Mon Sep 17 00:00:00 2001 From: Martin Thomson Date: Mon, 25 Mar 2024 17:16:18 +1100 Subject: [PATCH 09/28] Maybe make the timer wheel faster (#1763) * Maybe make the timer wheel faster * Add some panics docs * Work around mozpkix issue * Add a simple benchmark for the timer wheel * Remove workaround * Make clippy happy --------- Co-authored-by: Lars Eggert --- neqo-common/Cargo.toml | 5 ++++ neqo-common/benches/timer.rs | 39 ++++++++++++++++++++++++++++++ neqo-common/src/timer.rs | 46 +++++++++++++++++++++++++++--------- 3 files changed, 79 insertions(+), 11 deletions(-) create mode 100644 neqo-common/benches/timer.rs diff --git a/neqo-common/Cargo.toml b/neqo-common/Cargo.toml index 89eaa53890..069d67b834 100644 --- a/neqo-common/Cargo.toml +++ b/neqo-common/Cargo.toml @@ -21,6 +21,7 @@ qlog = { version = "0.12", default-features = false } time = { version = "0.3", default-features = false, features = ["formatting"] } [dev-dependencies] +criterion = { version = "0.5", default-features = false, features = ["html_reports"] } test-fixture = { path = "../test-fixture" } [features] @@ -33,3 +34,7 @@ features = ["timeapi"] [lib] # See https://github.com/bheisler/criterion.rs/blob/master/book/src/faq.md#cargo-bench-gives-unrecognized-option-errors-for-valid-command-line-options bench = false + +[[bench]] +name = "timer" +harness = false diff --git a/neqo-common/benches/timer.rs b/neqo-common/benches/timer.rs new file mode 100644 index 0000000000..5ac8019db4 --- /dev/null +++ b/neqo-common/benches/timer.rs @@ -0,0 +1,39 @@ +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use std::time::{Duration, Instant}; + +use criterion::{criterion_group, criterion_main, Criterion}; +use neqo_common::timer::Timer; +use test_fixture::now; + +fn benchmark_timer(c: &mut Criterion) { + c.bench_function("drain a timer quickly", |b| { + b.iter_batched_ref( + make_timer, + |(_now, timer)| { + while let Some(t) = timer.next_time() { + assert!(timer.take_next(t).is_some()); + } + }, + criterion::BatchSize::SmallInput, + ); + }); +} + +fn make_timer() -> (Instant, Timer<()>) { + const TIMES: &[u64] = &[1, 2, 3, 5, 8, 13, 21, 34]; + + let now = now(); + let mut timer = Timer::new(now, Duration::from_millis(777), 100); + for &t in TIMES { + timer.add(now + Duration::from_secs(t), ()); + } + (now, timer) +} + +criterion_group!(benches, benchmark_timer); +criterion_main!(benches); diff --git a/neqo-common/src/timer.rs b/neqo-common/src/timer.rs index a413252e08..3feddb2226 100644 --- a/neqo-common/src/timer.rs +++ b/neqo-common/src/timer.rs @@ -5,6 +5,7 @@ // except according to those terms. use std::{ + collections::VecDeque, mem, time::{Duration, Instant}, }; @@ -27,7 +28,7 @@ impl TimerItem { /// points). Time is relative, the wheel has an origin time and it is unable to represent times that /// are more than `granularity * capacity` past that time. pub struct Timer { - items: Vec>>, + items: Vec>>, now: Instant, granularity: Duration, cursor: usize, @@ -55,9 +56,14 @@ impl Timer { /// Return a reference to the time of the next entry. #[must_use] pub fn next_time(&self) -> Option { - for i in 0..self.items.len() { - let idx = self.bucket(i); - if let Some(t) = self.items[idx].first() { + let idx = self.bucket(0); + for i in idx..self.items.len() { + if let Some(t) = self.items[i].front() { + return Some(t.time); + } + } + for i in 0..idx { + if let Some(t) = self.items[i].front() { return Some(t.time); } } @@ -145,6 +151,9 @@ impl Timer { /// Given knowledge of the time an item was added, remove it. /// This requires use of a predicate that identifies matching items. + /// + /// # Panics + /// Impossible, I think. pub fn remove(&mut self, time: Instant, mut selector: F) -> Option where F: FnMut(&T) -> bool, @@ -167,7 +176,7 @@ impl Timer { break; } if selector(&self.items[bucket][i].item) { - return Some(self.items[bucket].remove(i).item); + return Some(self.items[bucket].remove(i).unwrap().item); } } // ... then forwards. @@ -176,7 +185,7 @@ impl Timer { break; } if selector(&self.items[bucket][i].item) { - return Some(self.items[bucket].remove(i).item); + return Some(self.items[bucket].remove(i).unwrap().item); } } None @@ -185,10 +194,25 @@ impl Timer { /// Take the next item, unless there are no items with /// a timeout in the past relative to `until`. pub fn take_next(&mut self, until: Instant) -> Option { - for i in 0..self.items.len() { - let idx = self.bucket(i); - if !self.items[idx].is_empty() && self.items[idx][0].time <= until { - return Some(self.items[idx].remove(0).item); + fn maybe_take(v: &mut VecDeque>, until: Instant) -> Option { + if !v.is_empty() && v[0].time <= until { + Some(v.pop_front().unwrap().item) + } else { + None + } + } + + let idx = self.bucket(0); + for i in idx..self.items.len() { + let res = maybe_take(&mut self.items[i], until); + if res.is_some() { + return res; + } + } + for i in 0..idx { + let res = maybe_take(&mut self.items[i], until); + if res.is_some() { + return res; } } None @@ -201,7 +225,7 @@ impl Timer { if until >= self.now + self.span() { // Drain everything, so a clean sweep. let mut empty_items = Vec::with_capacity(self.items.len()); - empty_items.resize_with(self.items.len(), Vec::default); + empty_items.resize_with(self.items.len(), VecDeque::default); let mut items = mem::replace(&mut self.items, empty_items); self.now = until; self.cursor = 0; From 50876af998b97b4a249be814b32f675704f9714a Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Mon, 25 Mar 2024 09:50:45 +0200 Subject: [PATCH 10/28] ci: Run `clippy` for all features except `gecko` (#1768) * ci: Run `clippy` for all features except `gecko` * Make clippy happy * Update .github/workflows/check.yml Co-authored-by: Max Inden Signed-off-by: Lars Eggert --------- Signed-off-by: Lars Eggert Co-authored-by: Max Inden --- .github/workflows/check.yml | 2 +- neqo-crypto/src/aead_fuzzing.rs | 3 +++ neqo-crypto/src/lib.rs | 2 +- neqo-transport/benches/range_tracker.rs | 16 +++++++++------- neqo-transport/benches/rx_stream_orderer.rs | 4 ++-- neqo-transport/benches/transfer.rs | 8 ++++---- neqo-transport/src/connection/tests/fuzzing.rs | 2 +- neqo-transport/src/connection/tests/handshake.rs | 5 +++-- 8 files changed, 24 insertions(+), 18 deletions(-) diff --git a/.github/workflows/check.yml b/.github/workflows/check.yml index e17d563905..9dc8ff2b7f 100644 --- a/.github/workflows/check.yml +++ b/.github/workflows/check.yml @@ -148,7 +148,7 @@ jobs: # respective default features only. Can reveal warnings otherwise # hidden given that a plain cargo clippy combines all features of the # workspace. See e.g. https://github.com/mozilla/neqo/pull/1695. - cargo +${{ matrix.rust-toolchain }} hack clippy --all-targets -- -D warnings || ${{ matrix.rust-toolchain == 'nightly' }} + cargo +${{ matrix.rust-toolchain }} hack clippy --all-targets --feature-powerset --exclude-features gecko -- -D warnings || ${{ matrix.rust-toolchain == 'nightly' }} if: success() || failure() - name: Check rustdoc links diff --git a/neqo-crypto/src/aead_fuzzing.rs b/neqo-crypto/src/aead_fuzzing.rs index 4e5a6de07f..1f3bfb14bd 100644 --- a/neqo-crypto/src/aead_fuzzing.rs +++ b/neqo-crypto/src/aead_fuzzing.rs @@ -20,6 +20,7 @@ pub struct FuzzingAead { } impl FuzzingAead { + #[allow(clippy::missing_errors_doc)] pub fn new( fuzzing: bool, version: Version, @@ -44,6 +45,7 @@ impl FuzzingAead { } } + #[allow(clippy::missing_errors_doc)] pub fn encrypt<'a>( &self, count: u64, @@ -61,6 +63,7 @@ impl FuzzingAead { Ok(&output[..l + 16]) } + #[allow(clippy::missing_errors_doc)] pub fn decrypt<'a>( &self, count: u64, diff --git a/neqo-crypto/src/lib.rs b/neqo-crypto/src/lib.rs index 2ec1b4a3ea..45f61f6127 100644 --- a/neqo-crypto/src/lib.rs +++ b/neqo-crypto/src/lib.rs @@ -9,7 +9,7 @@ mod aead; #[cfg(feature = "fuzzing")] -mod aead_fuzzing; +pub mod aead_fuzzing; pub mod agent; mod agentio; mod auth; diff --git a/neqo-transport/benches/range_tracker.rs b/neqo-transport/benches/range_tracker.rs index c2f78f4874..ee611cf4ea 100644 --- a/neqo-transport/benches/range_tracker.rs +++ b/neqo-transport/benches/range_tracker.rs @@ -11,30 +11,32 @@ const CHUNK: u64 = 1000; const END: u64 = 100_000; fn build_coalesce(len: u64) -> RangeTracker { let mut used = RangeTracker::default(); - used.mark_acked(0, CHUNK as usize); - used.mark_sent(CHUNK, END as usize); + let chunk = usize::try_from(CHUNK).expect("should fit"); + used.mark_acked(0, chunk); + used.mark_sent(CHUNK, usize::try_from(END).expect("should fit")); // leave a gap or it will coalesce here for i in 2..=len { // These do not get immediately coalesced when marking since they're not at the end or start - used.mark_acked(i * CHUNK, CHUNK as usize); + used.mark_acked(i * CHUNK, chunk); } used } fn coalesce(c: &mut Criterion, count: u64) { + let chunk = usize::try_from(CHUNK).expect("should fit"); c.bench_function( &format!("coalesce_acked_from_zero {count}+1 entries"), |b| { b.iter_batched_ref( || build_coalesce(count), |used| { - used.mark_acked(CHUNK, CHUNK as usize); + used.mark_acked(CHUNK, chunk); let tail = (count + 1) * CHUNK; - used.mark_sent(tail, CHUNK as usize); - used.mark_acked(tail, CHUNK as usize); + used.mark_sent(tail, chunk); + used.mark_acked(tail, chunk); }, criterion::BatchSize::SmallInput, - ) + ); }, ); } diff --git a/neqo-transport/benches/rx_stream_orderer.rs b/neqo-transport/benches/rx_stream_orderer.rs index 0a1e763e97..d58e11ee86 100644 --- a/neqo-transport/benches/rx_stream_orderer.rs +++ b/neqo-transport/benches/rx_stream_orderer.rs @@ -11,14 +11,14 @@ fn rx_stream_orderer() { let mut rx = RxStreamOrderer::new(); let data: &[u8] = &[0; 1337]; - for i in 0..100000 { + for i in 0..100_000 { rx.inbound_frame(i * 1337, data); } } fn criterion_benchmark(c: &mut Criterion) { c.bench_function("RxStreamOrderer::inbound_frame()", |b| { - b.iter(rx_stream_orderer) + b.iter(rx_stream_orderer); }); } diff --git a/neqo-transport/benches/transfer.rs b/neqo-transport/benches/transfer.rs index 98bd29ff05..32959f6cb5 100644 --- a/neqo-transport/benches/transfer.rs +++ b/neqo-transport/benches/transfer.rs @@ -20,7 +20,7 @@ const ZERO: Duration = Duration::from_millis(0); const JITTER: Duration = Duration::from_millis(10); const TRANSFER_AMOUNT: usize = 1 << 22; // 4Mbyte -fn benchmark_transfer(c: &mut Criterion, label: &str, seed: Option>) { +fn benchmark_transfer(c: &mut Criterion, label: &str, seed: &Option>) { let mut group = c.benchmark_group("transfer"); group.throughput(Throughput::Bytes(u64::try_from(TRANSFER_AMOUNT).unwrap())); group.noise_threshold(0.03); @@ -45,7 +45,7 @@ fn benchmark_transfer(c: &mut Criterion, label: &str, seed: Option Date: Mon, 25 Mar 2024 13:33:08 +0200 Subject: [PATCH 11/28] ci: Compare performance to msquic (#1750) * ci: Compare performance to msquic In progress * Make it run * Fixes * Fix path * Use msquic `secnetperf` * Fix actionlint * Sigh * Retry * Better reporting * Debug * Retry * Sigh^2 * Fix * Fixes * Fixes * Update bench.yml * Fix * Squashed commit of the following: commit 9ca5ecc434bcd2609845b3f27d72960bd5c7cb0a Merge: f50f4148 f4083215 Author: Lars Eggert Date: Fri Mar 15 00:12:29 2024 +0200 Merge branch 'main' into ci-bench-cc Signed-off-by: Lars Eggert commit f50f4148bdb04c21e1e0165d72a8181cbbb39274 Merge: 8e5290b2 bc262a53 Author: Lars Eggert Date: Wed Mar 13 07:59:01 2024 +0200 Merge branch 'main' into ci-bench-cc commit 8e5290b213ae4ea9459cb1b71a73a7118d4bdcc4 Merge: f0cd19ec 2ff9742e Author: Lars Eggert Date: Tue Mar 12 22:42:54 2024 +0200 Merge branch 'main' into ci-bench-cc commit f0cd19ecb6196ab861658b675146b57cf20a1f9a Merge: b2bb855b 17c4175b Author: Lars Eggert Date: Tue Mar 12 21:54:08 2024 +0200 Merge branch 'main' into ci-bench-cc commit b2bb855b25349d6c51d5877b18ce631b1c800250 Merge: d072504e 4ea2c566 Author: Lars Eggert Date: Tue Mar 12 17:25:13 2024 +0200 Merge branch 'ci-bench-cc' of github.com:larseggert/neqo into ci-bench-cc commit d072504e5e80e2bb541f948c6c7733c78e93cb4a Author: Lars Eggert Date: Tue Mar 12 17:24:52 2024 +0200 Reorder things so `results.ms` is included in the exported artifact commit 4ea2c5669cfc6bbb9d4b7eddb66ad2f5ab4d6aac Merge: c82ff3af 5c728907 Author: Lars Eggert Date: Tue Mar 12 17:18:37 2024 +0200 Merge branch 'main' into ci-bench-cc commit c82ff3af7dd92aa4b2218992f85faaeff0b7cecb Author: Lars Eggert Date: Tue Mar 12 16:41:59 2024 +0200 `killall` -> `pkill` commit d37e7068e65ed407e0dc38b15e67707dfc628fd5 Author: Lars Eggert Date: Tue Mar 12 16:37:50 2024 +0200 Go back to `killall` commit 11320d0095e4a8d637c40ed7eb57886582ce4a21 Author: Lars Eggert Date: Tue Mar 12 16:11:38 2024 +0200 No -INT commit 407bd4ff8cc34767063b234af1737dfc2ea06339 Author: Lars Eggert Date: Tue Mar 12 14:33:52 2024 +0200 kill -> killall Also reduce test transfer size. commit 9d3a8b792e8fed127503fd93ff0406c0b462768d Author: Lars Eggert Date: Tue Mar 12 13:57:51 2024 +0200 Use temp dir, and fix path error commit 84e22060c89c88ca699c185dc21d86d0902c3be9 Merge: 925cc120 b0d816a8 Author: Lars Eggert Date: Tue Mar 12 11:10:41 2024 +0200 Merge branch 'main' into ci-bench-cc commit 925cc120fb1c1330a9b12c9cf3ed6e5107251158 Merge: 3241f931 58890383 Author: Lars Eggert Date: Tue Mar 12 11:05:42 2024 +0200 Merge branch 'main' into ci-bench-cc commit 3241f9312951f205c827397a7fc0c59e5ef0b1ee Merge: 02620a7c d48fbed7 Author: Lars Eggert Date: Tue Mar 12 09:59:24 2024 +0200 Merge branch 'main' into ci-bench-cc Signed-off-by: Lars Eggert commit 02620a7c48bdfcad703a47b25f324554edb5de7c Author: Lars Eggert Date: Tue Mar 12 09:57:33 2024 +0200 Try to kill via `$!` commit b32ce9ef7b93c0960c071594b91cddc9fa674292 Merge: 9ea3a991 db1dbb29 Author: Lars Eggert Date: Tue Mar 12 09:15:18 2024 +0200 Merge branch 'ci-bench-cc' of github.com:larseggert/neqo into ci-bench-cc commit 9ea3a99119e9ad19c0e7157b8218d0a631d1de95 Author: Lars Eggert Date: Tue Mar 12 09:15:05 2024 +0200 Address comments from @martinthomson commit db1dbb29af26054d14a12c8d5b7f074bdc9c0c2b Merge: 681bbb7c 869afeaa Author: Lars Eggert Date: Mon Mar 11 19:33:53 2024 +0200 Merge branch 'main' into ci-bench-cc commit 681bbb7c6787094ff085aa6a65edff1dd0e2dd38 Merge: bd742af1 532dcc5f Author: Lars Eggert Date: Mon Mar 11 18:21:06 2024 +0200 Merge branch 'main' into ci-bench-cc Signed-off-by: Lars Eggert commit bd742af1b5b56422294a01a43ed50388d1bc31f5 Author: Lars Eggert Date: Mon Mar 11 17:00:34 2024 +0200 mkdir -p commit bc7b99fe0b156294590dda561318283693b88df6 Author: Lars Eggert Date: Mon Mar 11 16:29:14 2024 +0200 Fix commit e7bf50959d1c24b430414f38f242dd086b483a83 Merge: de64b3e1 cbd44418 Author: Lars Eggert Date: Mon Mar 11 16:27:56 2024 +0200 Merge branch 'main' into ci-bench-cc commit de64b3e1a0108a35af58cb8d6d433a576eaa13c4 Author: Lars Eggert Date: Mon Mar 11 16:00:19 2024 +0200 Wait for output before continuing commit 12386a32c2179727a6b4779d0ed96373571cfafd Author: Lars Eggert Date: Mon Mar 11 15:25:40 2024 +0200 ci: Benchmark NewReno and Cubic * Fixes * Fix * Fixes * Tweaks * Debug * actionlint * Fix sed * Pacing changed * Fixes * msquic server exits with non-zero * echo * Again * Again * A new hope * Finalize * Fixes * ls * Add comments * Report transfer size * Again * Quiet the server down * Remove debug output * Reformat table * Fix * Fix table * Mac sed != GNU sed * Avoid piping commands into themselves * Fix comment --------- Signed-off-by: Lars Eggert --- .github/actions/rust/action.yml | 2 +- .github/workflows/bench.yml | 158 +++++++++++++++++++-------- neqo-bin/src/bin/server/old_https.rs | 2 +- 3 files changed, 117 insertions(+), 45 deletions(-) diff --git a/.github/actions/rust/action.yml b/.github/actions/rust/action.yml index bfb09d332d..4b03b37b8d 100644 --- a/.github/actions/rust/action.yml +++ b/.github/actions/rust/action.yml @@ -30,7 +30,7 @@ runs: - name: Install Rust tools shell: bash - run: cargo +${{ inputs.version }} binstall --no-confirm cargo-llvm-cov cargo-nextest flamegraph cargo-hack cargo-mutants + run: cargo +${{ inputs.version }} binstall --no-confirm cargo-llvm-cov cargo-nextest flamegraph cargo-hack cargo-mutants hyperfine # sccache slows CI down, so we leave it disabled. # Leaving the steps below commented out, so we can re-evaluate enabling it later. diff --git a/.github/workflows/bench.yml b/.github/workflows/bench.yml index 0e215f571f..80c51c236d 100644 --- a/.github/workflows/bench.yml +++ b/.github/workflows/bench.yml @@ -12,7 +12,7 @@ env: RUST_BACKTRACE: 1 TOOLCHAIN: nightly RUSTFLAGS: -C link-arg=-fuse-ld=lld -C link-arg=-Wl,--no-rosegment, -C force-frame-pointers=yes - PERF_CMD: record -o perf.data -F997 --call-graph fp -g + PERF_OPT: record -F997 --call-graph fp -g jobs: bench: @@ -23,9 +23,17 @@ jobs: shell: bash steps: - - name: Checkout + - name: Checkout neqo uses: actions/checkout@v4 + - name: Checkout msquic + uses: actions/checkout@v4 + with: + repository: microsoft/msquic + ref: main + path: msquic + submodules: true + - name: Set PATH run: echo "/home/bench/.cargo/bin" >> "${GITHUB_PATH}" @@ -38,10 +46,17 @@ jobs: - name: Fetch and build NSS and NSPR uses: ./.github/actions/nss - - name: Build + - name: Build neqo run: | cargo "+$TOOLCHAIN" bench --features bench --no-run - cargo "+$TOOLCHAIN" build --release --bin neqo-client --bin neqo-server + cargo "+$TOOLCHAIN" build --release + + - name: Build msquic + run: | + mkdir -p msquic/build + cd msquic/build + cmake -GNinja -DCMAKE_BUILD_TYPE=RelWithDebInfo -DQUIC_BUILD_TOOLS=1 -DQUIC_BUILD_PERF=1 .. + cmake --build . - name: Download cached main-branch results id: criterion-cache @@ -61,56 +76,107 @@ jobs: taskset -c 0 nice -n -20 \ cargo "+$TOOLCHAIN" bench --features bench -- --noplot | tee results.txt - # Pin the transfer benchmark to core 0 and run it at elevated priority inside perf. - # Work around https://github.com/flamegraph-rs/flamegraph/issues/248 by passing explicit perf arguments. - - name: Profile cargo bench transfer - run: | - taskset -c 0 nice -n -20 \ - cargo "+$TOOLCHAIN" flamegraph -v -c "$PERF_CMD" --features bench --bench transfer -- \ - --bench --exact "Run multiple transfers with varying seeds" --noplot - - - name: Profile client/server transfer - run: | - { mkdir server; \ - cd server; \ - taskset -c 0 nice -n -20 \ - cargo "+$TOOLCHAIN" flamegraph -v -c "$PERF_CMD" \ - --bin neqo-server -- --db ../test-fixture/db "$HOST:4433" || true; } & - mkdir client; \ - cd client; \ - time taskset -c 1 nice -n -20 \ - cargo "+$TOOLCHAIN" flamegraph -v -c "$PERF_CMD" \ - --bin neqo-client -- --output-dir . "https://$HOST:4433/$SIZE" - killall -INT neqo-server - cd ${{ github.workspace }} - [ "$(wc -c < client/"$SIZE")" -eq "$SIZE" ] || exit 1 + # Compare various configurations of neqo against msquic, and gather perf data + # during the hyperfine runs. + - name: Compare neqo and msquic env: - HOST: localhost - SIZE: 1073741824 # 1 GB + HOST: 127.0.0.1 + PORT: 4433 + SIZE: 134217728 # 128 MB + run: | + TMP=$(mktemp -d) + # Make a cert and key for msquic. + openssl req -nodes -new -x509 -keyout "$TMP/key" -out "$TMP/cert" -subj "/CN=DOMAIN" 2>/dev/null + # Make a test file for msquic to serve. + truncate -s "$SIZE" "$TMP/$SIZE" + # Define the commands to run for each client and server. + declare -A client_cmd=( + ["neqo"]="target/release/neqo-client _cc _pacing --output-dir . -o -a hq-interop -Q 1 https://$HOST:$PORT/$SIZE" + ["msquic"]="msquic/build/bin/Release/quicinterop -test:D -custom:$HOST -port:$PORT -urls:https://$HOST:$PORT/$SIZE" + ) + declare -A server_cmd=( + ["neqo"]="target/release/neqo-server _cc _pacing -o -a hq-interop -Q 1 $HOST:$PORT 2> /dev/null" + ["msquic"]="msquic/build/bin/Release/quicinteropserver -root:$TMP -listen:$HOST -port:$PORT -file:$TMP/cert -key:$TMP/key -noexit > /dev/null || true" + ) + + # Replace various placeholders in the commands with the actual values. + # Also generate an extension to append to the file name. + function transmogrify { + CMD=$1 + local cc=$2 + local pacing=$3 + if [ "$cc" != "" ]; then + CMD=${CMD//_cc/--cc $cc} + EXT="-$cc" + fi + if [ "$pacing" == "on" ]; then + CMD=${CMD//_pacing/} + EXT="$EXT-pacing" + else + CMD=${CMD//_pacing/--no-pacing} + EXT="$EXT-nopacing" + fi + } + + for server in msquic neqo; do + for client in msquic neqo; do + # msquic doesn't let us configure the congestion control or pacing. + if [ "$client" == "msquic" ] && [ "$server" == "msquic" ]; then + cc_opt=("") + pacing_opt=("") + else + cc_opt=("reno" "cubic") + pacing_opt=("on" "") + fi + for cc in "${cc_opt[@]}"; do + for pacing in "${pacing_opt[@]}"; do + # Make a tag string for this test, for the results. + TAG="$client,$server,$cc,$pacing" + echo "Running benchmarks for $TAG" | tee -a comparison.txt + transmogrify "${server_cmd[$server]}" "$cc" "$pacing" + # shellcheck disable=SC2086 + taskset -c 0 nice -n -20 \ + perf $PERF_OPT -o "$client-$server$EXT.server.perf" $CMD & + PID=$! + transmogrify "${client_cmd[$client]}" "$cc" "$pacing" + # shellcheck disable=SC2086 + taskset -c 1 nice -n -20 \ + perf $PERF_OPT -o "$client-$server$EXT.client.perf" \ + hyperfine -N --output null -w 1 -s "sleep 1" -n "$TAG" -u millisecond --export-markdown step.md "$CMD" | + tee -a comparison.txt + echo >> comparison.txt + kill $PID + cat step.md >> steps.md + # Sanity check the size of the last retrieved file. + [ "$(wc -c <"$SIZE")" -eq "$SIZE" ] || exit 1 + done + done + done + done + # Merge the results tables generated by hyperfine into a single table. + echo "Transfer of $SIZE bytes over loopback." > comparison.md + awk '(!/^\| Command/ || !c++) && (!/^\|:/ || !d++)' < steps.md |\ + sed -E 's/`//g; s/^\|:/\|:---\|:---\|:---\|:/g; s/,/ \| /g; s/^\| Command/\| Client \| Server \| CC \| Pacing/g' >> comparison.md + rm -r "$TMP" # Re-enable turboboost, hyperthreading and use powersave governor. - name: Restore machine run: sudo /root/bin/unprep.sh if: success() || failure() || cancelled() - - name: Convert for profiler.firefox.com + - name: Post-process perf data run: | - perf script -i perf.data -F +pid > transfer.perf & - perf script -i client/perf.data -F +pid > client.perf & - perf script -i server/perf.data -F +pid > server.perf & + for f in *.perf; do + # Convert for profiler.firefox.com + perf script -i "$f" -F +pid > "$f.fx" & + # Generate perf reports + perf report -i "$f" --no-children --stdio > "$f.txt" & + # Generate flamegraphs + flamegraph --perfdata "$f" --palette rust -o "${f//.perf/.svg}" & + done wait - mv flamegraph.svg transfer.svg - mv client/flamegraph.svg client.svg - mv server/flamegraph.svg server.svg rm neqo.svg - - name: Generate perf reports - run: | - perf report -i perf.data --no-children --stdio > transfer.perf.txt & - perf report -i client/perf.data --no-children --stdio > client.perf.txt & - perf report -i server/perf.data --no-children --stdio > server.perf.txt & - wait - - name: Format results as Markdown id: results run: | @@ -132,6 +198,11 @@ jobs: -e 's/^([a-z0-9].*)$/* **\1**/gi' \ -e 's/(change:[^%]*% )([^%]*%)(.*)/\1**\2**\3/gi' \ >> results.md + { + echo "### Client/server transfer results" + cat comparison.md + } >> results.md + cat results.md > "$GITHUB_STEP_SUMMARY" - name: Remember main-branch push URL if: github.ref == 'refs/heads/main' @@ -158,6 +229,7 @@ jobs: path: | *.svg *.perf + *.perf.fx *.txt results.* target/criterion* diff --git a/neqo-bin/src/bin/server/old_https.rs b/neqo-bin/src/bin/server/old_https.rs index ec32032a05..505a16578f 100644 --- a/neqo-bin/src/bin/server/old_https.rs +++ b/neqo-bin/src/bin/server/old_https.rs @@ -202,7 +202,6 @@ impl HttpServer for Http09Server { None => break, Some(e) => e, }; - qdebug!("Event {event:?}"); match event { ConnectionEvent::NewStream { stream_id } => { self.write_state @@ -221,6 +220,7 @@ impl HttpServer for Http09Server { .unwrap(); } ConnectionEvent::StateChange(_) + | ConnectionEvent::SendStreamCreatable { .. } | ConnectionEvent::SendStreamComplete { .. } => (), e => qwarn!("unhandled event {e:?}"), } From 57dd4ec34e817f3edeb243ee6ea4934982f4147a Mon Sep 17 00:00:00 2001 From: Max Inden Date: Tue, 26 Mar 2024 16:57:32 +0100 Subject: [PATCH 12/28] refactor(github/interop): use grep instead of awk (#1747) * refactor(github/interop): use grep instead of awk * Simplify regex --- .github/actions/quic-interop-runner/action.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/actions/quic-interop-runner/action.yml b/.github/actions/quic-interop-runner/action.yml index ef4865bde6..30c7f0d8d6 100644 --- a/.github/actions/quic-interop-runner/action.yml +++ b/.github/actions/quic-interop-runner/action.yml @@ -92,8 +92,8 @@ runs: run: | echo '[**QUIC Interop Runner**](https://github.com/quic-interop/quic-interop-runner)' >> comment echo '' >> comment - # Ignore all, but table, which starts with "|:--". - cat quic-interop-runner/summary | awk '/^\|:--/{flag=1} flag' >> comment + # Ignore all, but table, which starts with "|". + grep -E '^\|' quic-interop-runner/summary >> comment echo '' >> comment shell: bash From 1af91f506120a3d68d6c938ddc42ea3a73218507 Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Wed, 27 Mar 2024 02:37:19 +0200 Subject: [PATCH 13/28] test: Let packets be modified with a closure during tests (#1773) * test: Let packets be modified with a closure during tests Broken out of #1678 to reduce the size of that PR. * Support dropping packets via `DatagramModifier` --- neqo-transport/src/connection/tests/mod.rs | 83 +++++++++++++++++++--- 1 file changed, 73 insertions(+), 10 deletions(-) diff --git a/neqo-transport/src/connection/tests/mod.rs b/neqo-transport/src/connection/tests/mod.rs index b6ce08f8d1..6f598fb23e 100644 --- a/neqo-transport/src/connection/tests/mod.rs +++ b/neqo-transport/src/connection/tests/mod.rs @@ -170,12 +170,17 @@ impl crate::connection::test_internal::FrameWriter for PingWriter { } } +trait DatagramModifier: FnMut(Datagram) -> Option {} + +impl DatagramModifier for T where T: FnMut(Datagram) -> Option {} + /// Drive the handshake between the client and server. -fn handshake( +fn handshake_with_modifier( client: &mut Connection, server: &mut Connection, now: Instant, rtt: Duration, + mut modifier: impl DatagramModifier, ) -> Instant { let mut a = client; let mut b = server; @@ -212,7 +217,11 @@ fn handshake( did_ping[a.role()] = true; } assert!(had_input || output.is_some()); - input = output; + if let Some(d) = output { + input = modifier(d); + } else { + input = output; + } qtrace!("handshake: t += {:?}", rtt / 2); now += rtt / 2; mem::swap(&mut a, &mut b); @@ -223,6 +232,15 @@ fn handshake( now } +fn handshake( + client: &mut Connection, + server: &mut Connection, + now: Instant, + rtt: Duration, +) -> Instant { + handshake_with_modifier(client, server, now, rtt, Some) +} + fn connect_fail( client: &mut Connection, server: &mut Connection, @@ -234,11 +252,12 @@ fn connect_fail( assert_error(server, &ConnectionError::Transport(server_error)); } -fn connect_with_rtt( +fn connect_with_rtt_and_modifier( client: &mut Connection, server: &mut Connection, now: Instant, rtt: Duration, + modifier: impl DatagramModifier, ) -> Instant { fn check_rtt(stats: &Stats, rtt: Duration) { assert_eq!(stats.rtt, rtt); @@ -246,7 +265,7 @@ fn connect_with_rtt( let n = stats.frame_rx.ack + usize::from(stats.rtt_init_guess); assert_eq!(stats.rttvar, rttvar_after_n_updates(n, rtt)); } - let now = handshake(client, server, now, rtt); + let now = handshake_with_modifier(client, server, now, rtt, modifier); assert_eq!(*client.state(), State::Confirmed); assert_eq!(*server.state(), State::Confirmed); @@ -255,6 +274,15 @@ fn connect_with_rtt( now } +fn connect_with_rtt( + client: &mut Connection, + server: &mut Connection, + now: Instant, + rtt: Duration, +) -> Instant { + connect_with_rtt_and_modifier(client, server, now, rtt, Some) +} + fn connect(client: &mut Connection, server: &mut Connection) { connect_with_rtt(client, server, now(), Duration::new(0, 0)); } @@ -301,8 +329,13 @@ fn assert_idle(client: &mut Connection, server: &mut Connection, rtt: Duration, } /// Connect with an RTT and then force both peers to be idle. -fn connect_rtt_idle(client: &mut Connection, server: &mut Connection, rtt: Duration) -> Instant { - let now = connect_with_rtt(client, server, now(), rtt); +fn connect_rtt_idle_with_modifier( + client: &mut Connection, + server: &mut Connection, + rtt: Duration, + modifier: impl DatagramModifier, +) -> Instant { + let now = connect_with_rtt_and_modifier(client, server, now(), rtt, modifier); assert_idle(client, server, rtt, now); // Drain events from both as well. _ = client.events().count(); @@ -311,8 +344,20 @@ fn connect_rtt_idle(client: &mut Connection, server: &mut Connection, rtt: Durat now } +fn connect_rtt_idle(client: &mut Connection, server: &mut Connection, rtt: Duration) -> Instant { + connect_rtt_idle_with_modifier(client, server, rtt, Some) +} + +fn connect_force_idle_with_modifier( + client: &mut Connection, + server: &mut Connection, + modifier: impl DatagramModifier, +) { + connect_rtt_idle_with_modifier(client, server, Duration::new(0, 0), modifier); +} + fn connect_force_idle(client: &mut Connection, server: &mut Connection) { - connect_rtt_idle(client, server, Duration::new(0, 0)); + connect_force_idle_with_modifier(client, server, Some); } fn fill_stream(c: &mut Connection, stream: StreamId) { @@ -524,12 +569,14 @@ fn assert_full_cwnd(packets: &[Datagram], cwnd: usize) { } /// Send something on a stream from `sender` to `receiver`, maybe allowing for pacing. +/// Takes a modifier function that can be used to modify the datagram before it is sent. /// Return the resulting datagram and the new time. #[must_use] -fn send_something_paced( +fn send_something_paced_with_modifier( sender: &mut Connection, mut now: Instant, allow_pacing: bool, + mut modifier: impl DatagramModifier, ) -> (Datagram, Instant) { let stream_id = sender.stream_create(StreamType::UniDi).unwrap(); assert!(sender.stream_send(stream_id, DEFAULT_STREAM_DATA).is_ok()); @@ -544,16 +591,32 @@ fn send_something_paced( .dgram() .expect("send_something: should have something to send") } - Output::Datagram(d) => d, + Output::Datagram(d) => modifier(d).unwrap(), Output::None => panic!("send_something: got Output::None"), }; (dgram, now) } +fn send_something_paced( + sender: &mut Connection, + now: Instant, + allow_pacing: bool, +) -> (Datagram, Instant) { + send_something_paced_with_modifier(sender, now, allow_pacing, Some) +} + +fn send_something_with_modifier( + sender: &mut Connection, + now: Instant, + modifier: impl DatagramModifier, +) -> Datagram { + send_something_paced_with_modifier(sender, now, false, modifier).0 +} + /// Send something on a stream from `sender` to `receiver`. /// Return the resulting datagram. fn send_something(sender: &mut Connection, now: Instant) -> Datagram { - send_something_paced(sender, now, false).0 + send_something_with_modifier(sender, now, Some) } /// Send something on a stream from `sender` to `receiver`. From 37be89473d72a1504894be36d387cda339c342af Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Wed, 27 Mar 2024 09:25:00 +0200 Subject: [PATCH 14/28] feat: TOS improvements (#1774) * feat: TOS improvements Broken out of #1678 to reduce the size of that PR. * Remove function --- neqo-common/src/datagram.rs | 7 ++-- neqo-common/src/tos.rs | 48 ++++++++++++++++++++------- neqo-transport/src/connection/dump.rs | 13 ++++++-- neqo-transport/src/connection/mod.rs | 4 ++- 4 files changed, 55 insertions(+), 17 deletions(-) diff --git a/neqo-common/src/datagram.rs b/neqo-common/src/datagram.rs index 9cebb64ea5..cc2cb7d113 100644 --- a/neqo-common/src/datagram.rs +++ b/neqo-common/src/datagram.rs @@ -53,6 +53,10 @@ impl Datagram { pub fn ttl(&self) -> Option { self.ttl } + + pub fn set_tos(&mut self, tos: IpTos) { + self.tos = tos; + } } impl Deref for Datagram { @@ -90,8 +94,7 @@ use test_fixture::datagram; fn fmt_datagram() { let d = datagram([0; 1].to_vec()); assert_eq!( - format!("{d:?}"), + &format!("{d:?}"), "Datagram IpTos(Cs0, NotEct) TTL Some(128) [fe80::1]:443->[fe80::1]:443: [1]: 00" - .to_string() ); } diff --git a/neqo-common/src/tos.rs b/neqo-common/src/tos.rs index 3610f72750..533c5447e2 100644 --- a/neqo-common/src/tos.rs +++ b/neqo-common/src/tos.rs @@ -36,7 +36,7 @@ impl From for u8 { impl From for IpTosEcn { fn from(v: u8) -> Self { - match v & 0b11 { + match v & 0b0000_0011 { 0b00 => IpTosEcn::NotEct, 0b01 => IpTosEcn::Ect1, 0b10 => IpTosEcn::Ect0, @@ -47,8 +47,8 @@ impl From for IpTosEcn { } impl From for IpTosEcn { - fn from(value: IpTos) -> Self { - IpTosEcn::from(value.0 & 0x3) + fn from(v: IpTos) -> Self { + IpTosEcn::from(u8::from(v)) } } @@ -166,14 +166,13 @@ impl From for IpTosDscp { } impl From for IpTosDscp { - fn from(value: IpTos) -> Self { - IpTosDscp::from(value.0 & 0xfc) + fn from(v: IpTos) -> Self { + IpTosDscp::from(u8::from(v)) } } /// The type-of-service field in an IP packet. -#[allow(clippy::module_name_repetitions)] -#[derive(Copy, Clone, PartialEq, Eq)] +#[derive(Copy, Clone, PartialEq, Eq, Default)] pub struct IpTos(u8); impl From for IpTos { @@ -215,15 +214,19 @@ impl From for IpTos { impl Debug for IpTos { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_tuple("IpTos") - .field(&IpTosDscp::from(self.0 & 0xfc)) - .field(&IpTosEcn::from(self.0 & 0x3)) + .field(&IpTosDscp::from(*self)) + .field(&IpTosEcn::from(*self)) .finish() } } -impl Default for IpTos { - fn default() -> Self { - (IpTosDscp::default(), IpTosEcn::default()).into() +impl IpTos { + pub fn set_ecn(&mut self, ecn: IpTosEcn) { + self.0 = u8::from(IpTosDscp::from(*self)) | u8::from(ecn); + } + + pub fn set_dscp(&mut self, dscp: IpTosDscp) { + self.0 = u8::from(IpTosEcn::from(*self)) | u8::from(dscp); } } @@ -322,4 +325,25 @@ mod tests { assert_eq!(tos, u8::from(iptos)); assert_eq!(IpTos::from(tos), iptos); } + + #[test] + fn iptos_to_iptosdscp() { + let tos = IpTos::from((IpTosDscp::Af41, IpTosEcn::NotEct)); + let dscp = IpTosDscp::from(tos); + assert_eq!(dscp, IpTosDscp::Af41); + } + + #[test] + fn tos_modify_ecn() { + let mut iptos: IpTos = (IpTosDscp::Af41, IpTosEcn::NotEct).into(); + iptos.set_ecn(IpTosEcn::Ce); + assert_eq!(u8::from(iptos), 0b1000_1011); + } + + #[test] + fn tos_modify_dscp() { + let mut iptos: IpTos = (IpTosDscp::Af41, IpTosEcn::Ect1).into(); + iptos.set_dscp(IpTosDscp::Le); + assert_eq!(u8::from(iptos), 0b0000_0101); + } } diff --git a/neqo-transport/src/connection/dump.rs b/neqo-transport/src/connection/dump.rs index 34ac58f55e..12d337c570 100644 --- a/neqo-transport/src/connection/dump.rs +++ b/neqo-transport/src/connection/dump.rs @@ -9,7 +9,7 @@ use std::fmt::Write; -use neqo_common::{qdebug, Decoder}; +use neqo_common::{qdebug, Decoder, IpTos}; use crate::{ connection::Connection, @@ -26,6 +26,7 @@ pub fn dump_packet( pt: PacketType, pn: PacketNumber, payload: &[u8], + tos: IpTos, ) { if log::STATIC_MAX_LEVEL == log::LevelFilter::Off || !log::log_enabled!(log::Level::Debug) { return; @@ -43,5 +44,13 @@ pub fn dump_packet( write!(&mut s, "\n {} {}", dir, &x).unwrap(); } } - qdebug!([conn], "pn={} type={:?} {}{}", pn, pt, path.borrow(), s); + qdebug!( + [conn], + "pn={} type={:?} {} {:?}{}", + pn, + pt, + path.borrow(), + tos, + s + ); } diff --git a/neqo-transport/src/connection/mod.rs b/neqo-transport/src/connection/mod.rs index 75c3490cba..535d3f4084 100644 --- a/neqo-transport/src/connection/mod.rs +++ b/neqo-transport/src/connection/mod.rs @@ -19,7 +19,7 @@ use std::{ use neqo_common::{ event::Provider as EventProvider, hex, hex_snip_middle, hrtime, qdebug, qerror, qinfo, - qlog::NeqoQlog, qtrace, qwarn, Datagram, Decoder, Encoder, Role, + qlog::NeqoQlog, qtrace, qwarn, Datagram, Decoder, Encoder, IpTos, Role, }; use neqo_crypto::{ agent::CertificateInfo, Agent, AntiReplay, AuthenticationStatus, Cipher, Client, Group, @@ -1492,6 +1492,7 @@ impl Connection { payload.packet_type(), payload.pn(), &payload[..], + d.tos(), ); qlog::packet_received(&mut self.qlog, &packet, &payload); @@ -2255,6 +2256,7 @@ impl Connection { pt, pn, &builder.as_ref()[payload_start..], + IpTos::default(), // TODO: set from path ); qlog::packet_sent( &mut self.qlog, From 6a51a35dd63aa9833f5a4bcf31900acb7bbf64d6 Mon Sep 17 00:00:00 2001 From: Max Inden Date: Wed, 27 Mar 2024 08:39:30 +0100 Subject: [PATCH 15/28] perf(bin/server): increase msg size and don't allocate msg per resp (#1772) * perf(bin/server): increase msg size and don't allocate msg per resp Previously `neqo-server` would respond to a request by repeatedly sending a static 440 byte message (Major-General's Song). Instead of sending 440 bytes, increase the batch size to 4096 bytes. This also matches the `neqo-client` receive buffer size. https://github.com/mozilla/neqo/blob/76630a5ebb6c6b94de6a40cf3f439b9a846f6ab7/neqo-bin/src/bin/client/http3.rs#L165 Previously `ResponseData::repeat` would convert the provided `buf: &[u8]` to ` Vec`, i.e. re-allocate the buf. Instead keep a reference to the original buf, thus removing the allocation. * Remove unnecessary into() --- neqo-bin/src/bin/server/main.rs | 18 ++++++------------ 1 file changed, 6 insertions(+), 12 deletions(-) diff --git a/neqo-bin/src/bin/server/main.rs b/neqo-bin/src/bin/server/main.rs index 753794d6f6..62eb19e78c 100644 --- a/neqo-bin/src/bin/server/main.rs +++ b/neqo-bin/src/bin/server/main.rs @@ -5,6 +5,7 @@ // except according to those terms. use std::{ + borrow::Cow, cell::RefCell, cmp::min, collections::HashMap, @@ -196,7 +197,7 @@ trait HttpServer: Display { } struct ResponseData { - data: Vec, + data: Cow<'static, [u8]>, offset: usize, remaining: usize, } @@ -211,7 +212,7 @@ impl From> for ResponseData { fn from(data: Vec) -> Self { let remaining = data.len(); Self { - data, + data: Cow::Owned(data), offset: 0, remaining, } @@ -219,9 +220,9 @@ impl From> for ResponseData { } impl ResponseData { - fn repeat(buf: &[u8], total: usize) -> Self { + fn repeat(buf: &'static [u8], total: usize) -> Self { Self { - data: buf.to_owned(), + data: Cow::Borrowed(buf), offset: 0, remaining: total, } @@ -260,14 +261,7 @@ struct SimpleServer { } impl SimpleServer { - const MESSAGE: &'static [u8] = b"I am the very model of a modern Major-General,\n\ - I've information vegetable, animal, and mineral,\n\ - I know the kings of England, and I quote the fights historical\n\ - From Marathon to Waterloo, in order categorical;\n\ - I'm very well acquainted, too, with matters mathematical,\n\ - I understand equations, both the simple and quadratical,\n\ - About binomial theorem, I'm teeming with a lot o' news,\n\ - With many cheerful facts about the square of the hypotenuse.\n"; + const MESSAGE: &'static [u8] = &[0; 4096]; pub fn new( args: &Args, From 47dfb3baa50ef89751cfc6818d6e2ee79616a66f Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Wed, 27 Mar 2024 09:52:23 +0200 Subject: [PATCH 16/28] build: Use system NSS when possible (#1739) * build: Use system-installed NSS instead of building our own Fixes #1711 * Update CI * Fix docs * Fix Dockerfile * Fix * build-essential * Try and search for nss * Try to get newest versions * More fixes * Restore Windows link.exe fix * Install pkg-config * Remove MSYS2 linker * Retain ability to build NSS from source * Update Linux instructions * Try and find MSYS2 library path * Retry * Again * Again * Again * Again * Again * Again * Again * Again * Again * Again * Revert many things, keep building NSS from source unless system version is OK * Fixes * Fixes * debug * Debug * Fixes * Compare versions with the `semver` crate * Use NSS version from code in CI * File has other name * Update .github/actions/nss/action.yml Co-authored-by: Martin Thomson Signed-off-by: Lars Eggert * Update neqo-crypto/build.rs Co-authored-by: Martin Thomson Signed-off-by: Lars Eggert * Update neqo-crypto/build.rs Co-authored-by: Martin Thomson Signed-off-by: Lars Eggert * Address code review comments. Not ready yet. Need to determine what to do in `nss_dir()`. See comments. * Update neqo-crypto/build.rs Co-authored-by: Martin Thomson Signed-off-by: Lars Eggert * Address code review * Updates to README * Remove `nss_dir()` --------- Signed-off-by: Lars Eggert Co-authored-by: Martin Thomson --- .github/actions/nss/action.yml | 33 +++++++++ .github/workflows/check.yml | 22 ++---- README.md | 114 ++++++++++++++++++----------- neqo-crypto/Cargo.toml | 1 + neqo-crypto/bindings/bindings.toml | 5 -- neqo-crypto/bindings/mozpkix.hpp | 1 - neqo-crypto/build.rs | 105 +++++++++++++++----------- neqo-crypto/min_version.txt | 1 + neqo-crypto/src/err.rs | 30 +++++++- neqo-crypto/src/lib.rs | 3 +- neqo-crypto/src/min_version.rs | 9 +++ 11 files changed, 214 insertions(+), 110 deletions(-) delete mode 100644 neqo-crypto/bindings/mozpkix.hpp create mode 100644 neqo-crypto/min_version.txt create mode 100644 neqo-crypto/src/min_version.rs diff --git a/.github/actions/nss/action.yml b/.github/actions/nss/action.yml index 23232ebc13..ec6f13eaf8 100644 --- a/.github/actions/nss/action.yml +++ b/.github/actions/nss/action.yml @@ -16,16 +16,46 @@ inputs: runs: using: composite steps: + - name: Check system NSS version + shell: bash + run: | + if ! command -v pkg-config &> /dev/null; then + echo "BUILD_NSS=1" >> "$GITHUB_ENV" + exit 0 + fi + if ! pkg-config --exists nss; then + echo "BUILD_NSS=1" >> "$GITHUB_ENV" + exit 0 + fi + NSS_VERSION="$(pkg-config --modversion nss)" + if [ "$?" -ne 0 ]; then + echo "BUILD_NSS=1" >> "$GITHUB_ENV" + exit 0 + fi + NSS_MAJOR=$(echo "$NSS_VERSION" | cut -d. -f1) + NSS_MINOR=$(echo "$NSS_VERSION" | cut -d. -f2) + REQ_NSS_MAJOR=$(cat neqo-crypto/min_version.txt | cut -d. -f1) + REQ_NSS_MINOR=$(cat neqo-crypto/min_version.txt | cut -d. -f2) + if [ "$NSS_MAJOR" -lt "REQ_NSS_MAJOR" ] || [ "$NSS_MAJOR" -eq "REQ_NSS_MAJOR" -a "$NSS_MINOR" -lt "REQ_NSS_MINOR"]; then + echo "System NSS is too old: $NSS_VERSION" + echo "BUILD_NSS=1" >> "$GITHUB_ENV" + exit 0 + fi + echo "System NSS is suitable: $NSS_VERSION" + echo "BUILD_NSS=0" >> "$GITHUB_ENV" + # Ideally, we'd use this. But things are sufficiently flaky that we're better off # trying both hg and git. Leaving this here in case we want to re-try in the future. # # - name: Checkout NSPR + # if: env.BUILD_NSS == '1' # uses: actions/checkout@v4 # with: # repository: "nss-dev/nspr" # path: ${{ github.workspace }}/nspr # - name: Checkout NSS + # if: env.BUILD_NSS == '1' # uses: actions/checkout@v4 # with: # repository: "nss-dev/nss" @@ -33,18 +63,21 @@ runs: - name: Checkout NSPR shell: bash + if: env.BUILD_NSS == '1' run: | hg clone https://hg.mozilla.org/projects/nspr "${{ github.workspace }}/nspr" || \ git clone --depth=1 https://github.com/nss-dev/nspr "${{ github.workspace }}/nspr" - name: Checkout NSS shell: bash + if: env.BUILD_NSS == '1' run: | hg clone https://hg.mozilla.org/projects/nss "${{ github.workspace }}/nss" || \ git clone --depth=1 https://github.com/nss-dev/nss "${{ github.workspace }}/nss" - name: Build shell: bash + if: env.BUILD_NSS == '1' run: | if [ "${{ inputs.type }}" != "Debug" ]; then # We want to do an optimized build for accurate CPU profiling, but diff --git a/.github/workflows/check.yml b/.github/workflows/check.yml index 9dc8ff2b7f..4e47961d8e 100644 --- a/.github/workflows/check.yml +++ b/.github/workflows/check.yml @@ -49,33 +49,21 @@ jobs: sudo apt-get install -y --no-install-recommends gyp mercurial ninja-build lld echo "RUSTFLAGS=-C link-arg=-fuse-ld=lld" >> "$GITHUB_ENV" - # In addition to installing dependencies, first make sure System Integrity Protection (SIP) - # is disabled on this MacOS runner. This is needed to allow the NSS libraries to be loaded - # from the build directory and avoid various other test failures. This seems to always be - # the case on any macos-13 runner, but not consistently on macos-latest (which is currently - # macos-12, FWIW). - name: Install dependencies (MacOS) if: runner.os == 'MacOS' run: | - csrutil status | grep disabled - brew install ninja mercurial llvm + brew update + brew install llvm nss echo "/opt/homebrew/opt/llvm/bin" >> "$GITHUB_PATH" - ln -s /opt/homebrew/bin/python3 /opt/homebrew/bin/python - # python3 -m pip install gyp-next - # Above does not work, since pypi only has gyp 0.15.0, which is too old - # for the homebrew python3. Install from source instead. - python3 -m pip install git+https://github.com/nodejs/gyp-next - python3 -m pip install packaging - echo "$(python3 -m site --user-base)/bin" >> "$GITHUB_PATH" echo "RUSTFLAGS=-C link-arg=-fuse-ld=lld" >> "$GITHUB_ENV" - - name: Use MSYS2 environment and install more dependencies (Windows) + - name: Install dependencies (Windows) if: runner.os == 'Windows' run: | # shellcheck disable=SC2028 { - echo "C:\\msys64\\usr\\bin" - echo "C:\\msys64\\mingw64\\bin" + echo C:/msys64/usr/bin + echo C:/msys64/mingw64/bin } >> "$GITHUB_PATH" /c/msys64/usr/bin/pacman -S --noconfirm nsinstall python3 -m pip install git+https://github.com/nodejs/gyp-next diff --git a/README.md b/README.md index 31d6ab9e94..beadf22ecf 100644 --- a/README.md +++ b/README.md @@ -1,82 +1,102 @@ -# Neqo, an Implementation of QUIC written in Rust +# Neqo, an Implementation of QUIC in Rust ![neqo logo](https://github.com/mozilla/neqo/raw/main/neqo.png "neqo logo") -To run test HTTP/3 programs (neqo-client and neqo-server): +To build Neqo: -* `cargo build` -* `./target/debug/neqo-server '[::]:12345' --db ./test-fixture/db` -* `./target/debug/neqo-client http://127.0.0.1:12345/` - -If a "Failure to load dynamic library" error happens at runtime, do ```shell -export LD_LIBRARY_PATH="$(dirname "$(find . -name libssl3.so -print | head -1)")" +cargo build ``` -On a macOS, do +This will use a system-installed [NSS][NSS] library if it is new enough. (See "Build with Separate NSS/NSPR" below if NSS is not installed or it is deemed too old.) + +To run test HTTP/3 programs (`neqo-client` and `neqo-server`): + ```shell -export DYLD_LIBRARY_PATH="$(dirname "$(find . -name libssl3.dylib -print | head -1)")" +./target/debug/neqo-server '[::]:12345' +./target/debug/neqo-client 'https://[::]:12345/' ``` -## Faster Builds with Separate NSS/NSPR +## Build with separate NSS/NSPR -You can clone NSS (https://hg.mozilla.org/projects/nss) and NSPR -(https://hg.mozilla.org/projects/nspr) into the same directory and export an +You can clone [NSS][NSS] and [NSPR][NSPR] into the same directory and export an environment variable called `NSS_DIR` pointing to NSS. This causes the build to use the existing NSS checkout. However, in order to run anything that depends -on NSS, you need to set `$\[DY]LD\_LIBRARY\_PATH` to point to -`$NSS_DIR/../dist/Debug/lib`. +on NSS, you need to set an environment as follows: + +### Linux + +```shell +export LD_LIBRARY_PATH="$(dirname "$(find . -name libssl3.so -print | head -1)")" +``` + +### macOS + +```shell +export DYLD_LIBRARY_PATH="$(dirname "$(find . -name libssl3.dylib -print | head -1)")" +``` -Note: If you did not compile NSS separately, you need to have mercurial (hg), installed. -NSS builds require gyp, and ninja (or ninja-build) to be present also. +Note: If you did not already compile NSS separately, you need to have +[Mercurial (hg)][HG], installed. NSS builds require [GYP][GYP] and +[Ninja][NINJA] to be installed. ## Debugging Neqo -### QUIC Logging +### QUIC logging -Enable [QLOG](https://datatracker.ietf.org/doc/draft-ietf-quic-qlog-main-schema/) with: +Enable generation of [QLOG][QLOG] logs with: -``` -$ mkdir "$logdir" -$ ./target/debug/neqo-server '[::]:12345' --db ./test-fixture/db --qlog-dir "$logdir" -$ ./target/debug/neqo-client 'https://[::]:12345/' --qlog-dir "$logdir" +```shell +target/debug/neqo-server '[::]:12345' --qlog-dir . +target/debug/neqo-client 'https://[::]:12345/' --qlog-dir . ``` -You may use https://qvis.quictools.info/ by uploading the QLOG files and visualize the flows. +You can of course specify a different directory for the QLOG files. +You can upload QLOG files to [qvis][QVIS] to visualize the flows. -### Using SSLKEYLOGFILE to decrypt Wireshark logs +### Using `SSLKEYLOGFILE` to decrypt Wireshark logs -[Info here](https://developer.mozilla.org/en-US/docs/Mozilla/Projects/NSS/Key_Log_Format) - -TODO: What is the minimum Wireshark version needed? -TODO: Above link may be incorrect, protocol now called TLS instead of SSL? +You can export TLS keys by setting the `SSLKEYLOGFILE` environment variable +to a filename to instruct NSS to dump keys in the +[standard format](https://datatracker.ietf.org/doc/draft-ietf-tls-keylogfile/) +to enable decryption by [Wireshark](https://wiki.wireshark.org/TLS) and other tools. ### Using RUST_LOG effectively As documented in the [env_logger documentation](https://docs.rs/env_logger/), the `RUST_LOG` environment variable can be used to selectively enable log messages -from Rust code. This works for Neqo's cmdline tools, as well as for when Neqo is +from Rust code. This works for Neqo's command line tools, as well as for when Neqo is incorporated into Gecko, although [Gecko needs to be built in debug mode](https://developer.mozilla.org/en-US/docs/Mozilla/Developer_guide/Build_Instructions/Configuring_Build_Options). Some examples: -1. `RUST_LOG=neqo_transport::dump ./mach run` lists sent and received QUIC - packets and their frames' contents only. -1. `RUST_LOG=neqo_transport=debug,neqo_http3=trace,info ./mach run` sets a - 'debug' log level for transport, 'trace' level for http3, and 'info' log + +1. ```shell + RUST_LOG=neqo_transport::dump ./mach run + ``` + + lists sent and received QUIC packets and their frames' contents only. + +1. ```shell + RUST_LOG=neqo_transport=debug,neqo_http3=trace,info ./mach run + ``` + + sets a `debug` log level for `transport`, `trace` level for `http3`, and `info` log level for all other Rust crates, both Neqo and others used by Gecko. -1. `RUST_LOG=neqo=trace,error ./mach run` sets `trace` level for all modules - starting with "neqo", and sets `error` as minimum log level for other - unrelated Rust log messages. +1. ```shell + RUST_LOG=neqo=trace,error ./mach run + ``` + + sets `trace` level for all modules starting with `neqo`, and sets `error` as minimum log level for other unrelated Rust log messages. -### Trying In-development Neqo code in Gecko +### Trying in-development Neqo code in Gecko In a checked-out copy of Gecko source, set `[patches.*]` values for the four Neqo crates to local versions in the root `Cargo.toml`. For example, if Neqo was checked out to `/home/alice/git/neqo`, add the following lines to the root `Cargo.toml`. -``` +```toml [patch."https://github.com/mozilla/neqo"] neqo-http3 = { path = "/home/alice/git/neqo/neqo-http3" } neqo-transport = { path = "/home/alice/git/neqo/neqo-transport" } @@ -87,11 +107,23 @@ neqo-crypto = { path = "/home/alice/git/neqo/neqo-crypto" } Then run the following: -``` +```shell ./mach vendor rust ``` -Compile Gecko as usual with `./mach build`. +Compile Gecko as usual with + +```shell +./mach build +``` Note: Using newer Neqo code with Gecko may also require changes (likely to `neqo_glue`) if something has changed. + +[NSS]: https://hg.mozilla.org/projects/nss +[NSPR]: https://hg.mozilla.org/projects/nspr +[GYP]: https://github.com/nodejs/gyp-next +[HG]: https://www.mercurial-scm.org/ +[NINJA]: https://ninja-build.org/ +[QLOG]: https://datatracker.ietf.org/doc/draft-ietf-quic-qlog-main-schema/ +[QVIS]: https://qvis.quictools.info/ diff --git a/neqo-crypto/Cargo.toml b/neqo-crypto/Cargo.toml index 588d084741..d2f70a5714 100644 --- a/neqo-crypto/Cargo.toml +++ b/neqo-crypto/Cargo.toml @@ -21,6 +21,7 @@ neqo-common = { path = "../neqo-common" } # Sync with https://searchfox.org/mozilla-central/source/Cargo.lock 2024-02-08 bindgen = { version = "0.69", default-features = false, features = ["runtime"] } mozbuild = { version = "0.1", default-features = false, optional = true } +semver = { version = "1.0", default-features = false } serde = { version = "1.0", default-features = false } serde_derive = { version = "1.0", default-features = false } toml = { version = "0.5", default-features = false } diff --git a/neqo-crypto/bindings/bindings.toml b/neqo-crypto/bindings/bindings.toml index 3e5c1fdf7d..72c6d524d5 100644 --- a/neqo-crypto/bindings/bindings.toml +++ b/neqo-crypto/bindings/bindings.toml @@ -265,8 +265,3 @@ enums = [ [nspr_time] types = ["PRTime"] functions = ["PR_Now"] - -[mozpkix] -cplusplus = true -types = ["mozilla::pkix::ErrorCode"] -enums = ["mozilla::pkix::ErrorCode"] diff --git a/neqo-crypto/bindings/mozpkix.hpp b/neqo-crypto/bindings/mozpkix.hpp deleted file mode 100644 index d0a6cb5861..0000000000 --- a/neqo-crypto/bindings/mozpkix.hpp +++ /dev/null @@ -1 +0,0 @@ -#include "mozpkix/pkixnss.h" \ No newline at end of file diff --git a/neqo-crypto/build.rs b/neqo-crypto/build.rs index c4c2a73e75..2dd4543797 100644 --- a/neqo-crypto/build.rs +++ b/neqo-crypto/build.rs @@ -12,8 +12,13 @@ use std::{ }; use bindgen::Builder; +use semver::{Version, VersionReq}; use serde_derive::Deserialize; +#[path = "src/min_version.rs"] +mod min_version; +use min_version::MINIMUM_NSS_VERSION; + const BINDINGS_DIR: &str = "bindings"; const BINDINGS_CONFIG: &str = "bindings.toml"; @@ -90,46 +95,6 @@ fn setup_clang() { } } -fn nss_dir() -> PathBuf { - let dir = if let Ok(dir) = env::var("NSS_DIR") { - let path = PathBuf::from(dir.trim()); - assert!( - !path.is_relative(), - "The NSS_DIR environment variable is expected to be an absolute path." - ); - path - } else { - let out_dir = env::var("OUT_DIR").unwrap(); - let dir = Path::new(&out_dir).join("nss"); - if !dir.exists() { - Command::new("hg") - .args([ - "clone", - "https://hg.mozilla.org/projects/nss", - dir.to_str().unwrap(), - ]) - .status() - .expect("can't clone nss"); - } - let nspr_dir = Path::new(&out_dir).join("nspr"); - if !nspr_dir.exists() { - Command::new("hg") - .args([ - "clone", - "https://hg.mozilla.org/projects/nspr", - nspr_dir.to_str().unwrap(), - ]) - .status() - .expect("can't clone nspr"); - } - dir - }; - assert!(dir.is_dir(), "NSS_DIR {dir:?} doesn't exist"); - // Note that this returns a relative path because UNC - // paths on windows cause certain tools to explode. - dir -} - fn get_bash() -> PathBuf { // If BASH is set, use that. if let Ok(bash) = env::var("BASH") { @@ -295,11 +260,63 @@ fn build_bindings(base: &str, bindings: &Bindings, flags: &[String], gecko: bool .expect("couldn't write bindings"); } -fn setup_standalone() -> Vec { +fn pkg_config() -> Vec { + let modversion = Command::new("pkg-config") + .args(["--modversion", "nss"]) + .output() + .expect("pkg-config reports NSS as absent") + .stdout; + let modversion = String::from_utf8(modversion).expect("non-UTF8 from pkg-config"); + let modversion = modversion.trim(); + // The NSS version number does not follow semver numbering, because it omits the patch version + // when that's 0. Deal with that. + let modversion_for_cmp = if modversion.chars().filter(|c| *c == '.').count() == 1 { + modversion.to_owned() + ".0" + } else { + modversion.to_owned() + }; + let modversion_for_cmp = + Version::parse(&modversion_for_cmp).expect("NSS version not in semver format"); + let version_req = VersionReq::parse(&format!(">={}", MINIMUM_NSS_VERSION.trim())).unwrap(); + assert!( + version_req.matches(&modversion_for_cmp), + "neqo has NSS version requirement {version_req}, found {modversion}" + ); + + let cfg = Command::new("pkg-config") + .args(["--cflags", "--libs", "nss"]) + .output() + .expect("NSS flags not returned by pkg-config") + .stdout; + let cfg_str = String::from_utf8(cfg).expect("non-UTF8 from pkg-config"); + + let mut flags: Vec = Vec::new(); + for f in cfg_str.split(' ') { + if let Some(include) = f.strip_prefix("-I") { + flags.push(String::from(f)); + println!("cargo:include={include}"); + } else if let Some(path) = f.strip_prefix("-L") { + println!("cargo:rustc-link-search=native={path}"); + } else if let Some(lib) = f.strip_prefix("-l") { + println!("cargo:rustc-link-lib=dylib={lib}"); + } else { + println!("Warning: Unknown flag from pkg-config: {f}"); + } + } + + flags +} + +fn setup_standalone(nss: &str) -> Vec { setup_clang(); println!("cargo:rerun-if-env-changed=NSS_DIR"); - let nss = nss_dir(); + let nss = PathBuf::from(nss); + assert!( + !nss.is_relative(), + "The NSS_DIR environment variable is expected to be an absolute path." + ); + build_nss(nss.clone()); // $NSS_DIR/../dist/ @@ -406,8 +423,10 @@ fn setup_for_gecko() -> Vec { fn main() { let flags = if cfg!(feature = "gecko") { setup_for_gecko() + } else if let Ok(nss_dir) = env::var("NSS_DIR") { + setup_standalone(nss_dir.trim()) } else { - setup_standalone() + pkg_config() }; let config_file = PathBuf::from(BINDINGS_DIR).join(BINDINGS_CONFIG); diff --git a/neqo-crypto/min_version.txt b/neqo-crypto/min_version.txt new file mode 100644 index 0000000000..422c9c7093 --- /dev/null +++ b/neqo-crypto/min_version.txt @@ -0,0 +1 @@ +3.98 diff --git a/neqo-crypto/src/err.rs b/neqo-crypto/src/err.rs index 187303d2a9..8d4f239a0b 100644 --- a/neqo-crypto/src/err.rs +++ b/neqo-crypto/src/err.rs @@ -16,13 +16,39 @@ mod codes { #![allow(non_snake_case)] include!(concat!(env!("OUT_DIR"), "/nss_secerr.rs")); include!(concat!(env!("OUT_DIR"), "/nss_sslerr.rs")); - include!(concat!(env!("OUT_DIR"), "/mozpkix.rs")); } -pub use codes::{mozilla_pkix_ErrorCode as mozpkix, SECErrorCodes as sec, SSLErrorCodes as ssl}; +pub use codes::{SECErrorCodes as sec, SSLErrorCodes as ssl}; pub mod nspr { include!(concat!(env!("OUT_DIR"), "/nspr_err.rs")); } +pub mod mozpkix { + // These are manually extracted from the many bindings generated + // by bindgen when provided with the simple header: + // #include "mozpkix/pkixnss.h" + + #[allow(non_camel_case_types)] + pub type mozilla_pkix_ErrorCode = ::std::os::raw::c_int; + pub const MOZILLA_PKIX_ERROR_KEY_PINNING_FAILURE: mozilla_pkix_ErrorCode = -16384; + pub const MOZILLA_PKIX_ERROR_CA_CERT_USED_AS_END_ENTITY: mozilla_pkix_ErrorCode = -16383; + pub const MOZILLA_PKIX_ERROR_INADEQUATE_KEY_SIZE: mozilla_pkix_ErrorCode = -16382; + pub const MOZILLA_PKIX_ERROR_V1_CERT_USED_AS_CA: mozilla_pkix_ErrorCode = -16381; + pub const MOZILLA_PKIX_ERROR_NO_RFC822NAME_MATCH: mozilla_pkix_ErrorCode = -16380; + pub const MOZILLA_PKIX_ERROR_NOT_YET_VALID_CERTIFICATE: mozilla_pkix_ErrorCode = -16379; + pub const MOZILLA_PKIX_ERROR_NOT_YET_VALID_ISSUER_CERTIFICATE: mozilla_pkix_ErrorCode = -16378; + pub const MOZILLA_PKIX_ERROR_SIGNATURE_ALGORITHM_MISMATCH: mozilla_pkix_ErrorCode = -16377; + pub const MOZILLA_PKIX_ERROR_OCSP_RESPONSE_FOR_CERT_MISSING: mozilla_pkix_ErrorCode = -16376; + pub const MOZILLA_PKIX_ERROR_VALIDITY_TOO_LONG: mozilla_pkix_ErrorCode = -16375; + pub const MOZILLA_PKIX_ERROR_REQUIRED_TLS_FEATURE_MISSING: mozilla_pkix_ErrorCode = -16374; + pub const MOZILLA_PKIX_ERROR_INVALID_INTEGER_ENCODING: mozilla_pkix_ErrorCode = -16373; + pub const MOZILLA_PKIX_ERROR_EMPTY_ISSUER_NAME: mozilla_pkix_ErrorCode = -16372; + pub const MOZILLA_PKIX_ERROR_ADDITIONAL_POLICY_CONSTRAINT_FAILED: mozilla_pkix_ErrorCode = + -16371; + pub const MOZILLA_PKIX_ERROR_SELF_SIGNED_CERT: mozilla_pkix_ErrorCode = -16370; + pub const MOZILLA_PKIX_ERROR_MITM_DETECTED: mozilla_pkix_ErrorCode = -16369; + pub const END_OF_LIST: mozilla_pkix_ErrorCode = -16368; +} + pub type Res = Result; #[derive(Clone, Debug, PartialEq, PartialOrd, Ord, Eq)] diff --git a/neqo-crypto/src/lib.rs b/neqo-crypto/src/lib.rs index 45f61f6127..33fe623b17 100644 --- a/neqo-crypto/src/lib.rs +++ b/neqo-crypto/src/lib.rs @@ -59,7 +59,8 @@ pub use self::{ ssl::Opt, }; -const MINIMUM_NSS_VERSION: &str = "3.97"; +mod min_version; +use min_version::MINIMUM_NSS_VERSION; #[allow(non_upper_case_globals, clippy::redundant_static_lifetimes)] #[allow(clippy::upper_case_acronyms)] diff --git a/neqo-crypto/src/min_version.rs b/neqo-crypto/src/min_version.rs new file mode 100644 index 0000000000..4386371b1b --- /dev/null +++ b/neqo-crypto/src/min_version.rs @@ -0,0 +1,9 @@ +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +/// The minimum version of NSS that is required by this version of neqo. +/// Note that the string may contain whitespace at the beginning and/or end. +pub(crate) const MINIMUM_NSS_VERSION: &str = include_str!("../min_version.txt"); From f1560abfeea51a309bcff70da2325de6e07a8e89 Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Wed, 27 Mar 2024 10:39:57 +0200 Subject: [PATCH 17/28] chore: Rename feature `fuzzing` to `disable-encryption` (#1767) * chore: Rename feature `fuzzing` to `disable-encryption` Because `cargo fuzz` relies on being able to use `fuzzing` * WIP * More * Add `disable-encryption` feature to CI, to make sure it doesn't rot * shellcheck * Undo * Fix * Address code review and rename `fuzzing` -> `null` * Fix clippy * Address code review --------- Signed-off-by: Lars Eggert --- neqo-crypto/Cargo.toml | 2 +- neqo-crypto/src/aead.rs | 8 +- .../src/{aead_fuzzing.rs => aead_null.rs} | 64 ++++---------- neqo-crypto/src/lib.rs | 12 +-- neqo-crypto/src/selfencrypt.rs | 2 +- neqo-crypto/tests/aead.rs | 3 +- neqo-crypto/tests/selfencrypt.rs | 2 +- neqo-http3/Cargo.toml | 2 +- neqo-transport/Cargo.toml | 2 +- neqo-transport/src/connection/mod.rs | 1 - neqo-transport/src/connection/params.rs | 13 --- .../src/connection/tests/handshake.rs | 6 +- neqo-transport/src/connection/tests/mod.rs | 2 +- .../connection/tests/{fuzzing.rs => null.rs} | 8 +- neqo-transport/src/crypto.rs | 88 +++---------------- neqo-transport/src/packet/mod.rs | 2 +- neqo-transport/src/packet/retry.rs | 1 - neqo-transport/tests/common/mod.rs | 9 +- neqo-transport/tests/conn_vectors.rs | 2 +- neqo-transport/tests/retry.rs | 2 +- 20 files changed, 53 insertions(+), 178 deletions(-) rename neqo-crypto/src/{aead_fuzzing.rs => aead_null.rs} (55%) rename neqo-transport/src/connection/tests/{fuzzing.rs => null.rs} (84%) diff --git a/neqo-crypto/Cargo.toml b/neqo-crypto/Cargo.toml index d2f70a5714..47337d99c0 100644 --- a/neqo-crypto/Cargo.toml +++ b/neqo-crypto/Cargo.toml @@ -31,7 +31,7 @@ test-fixture = { path = "../test-fixture" } [features] gecko = ["mozbuild"] -fuzzing = [] +disable-encryption = [] [lib] # See https://github.com/bheisler/criterion.rs/blob/master/book/src/faq.md#cargo-bench-gives-unrecognized-option-errors-for-valid-command-line-options diff --git a/neqo-crypto/src/aead.rs b/neqo-crypto/src/aead.rs index bf7d7fe9d7..21027d55b2 100644 --- a/neqo-crypto/src/aead.rs +++ b/neqo-crypto/src/aead.rs @@ -63,13 +63,7 @@ impl RealAead { /// # Errors /// /// Returns `Error` when the supporting NSS functions fail. - pub fn new( - _fuzzing: bool, - version: Version, - cipher: Cipher, - secret: &SymKey, - prefix: &str, - ) -> Res { + pub fn new(version: Version, cipher: Cipher, secret: &SymKey, prefix: &str) -> Res { let s: *mut PK11SymKey = **secret; unsafe { Self::from_raw(version, cipher, s, prefix) } } diff --git a/neqo-crypto/src/aead_fuzzing.rs b/neqo-crypto/src/aead_null.rs similarity index 55% rename from neqo-crypto/src/aead_fuzzing.rs rename to neqo-crypto/src/aead_null.rs index 1f3bfb14bd..2d5656de73 100644 --- a/neqo-crypto/src/aead_fuzzing.rs +++ b/neqo-crypto/src/aead_null.rs @@ -4,87 +4,63 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +#![cfg(feature = "disable-encryption")] + use std::fmt; use crate::{ constants::{Cipher, Version}, err::{sec::SEC_ERROR_BAD_DATA, Error, Res}, p11::SymKey, - RealAead, }; -pub const FIXED_TAG_FUZZING: &[u8] = &[0x0a; 16]; +pub const AEAD_NULL_TAG: &[u8] = &[0x0a; 16]; -pub struct FuzzingAead { - real: Option, -} +pub struct AeadNull {} -impl FuzzingAead { +impl AeadNull { #[allow(clippy::missing_errors_doc)] - pub fn new( - fuzzing: bool, - version: Version, - cipher: Cipher, - secret: &SymKey, - prefix: &str, - ) -> Res { - let real = if fuzzing { - None - } else { - Some(RealAead::new(false, version, cipher, secret, prefix)?) - }; - Ok(Self { real }) + pub fn new(_version: Version, _cipher: Cipher, _secret: &SymKey, _prefix: &str) -> Res { + Ok(Self {}) } #[must_use] pub fn expansion(&self) -> usize { - if let Some(aead) = &self.real { - aead.expansion() - } else { - FIXED_TAG_FUZZING.len() - } + AEAD_NULL_TAG.len() } #[allow(clippy::missing_errors_doc)] pub fn encrypt<'a>( &self, - count: u64, - aad: &[u8], + _count: u64, + _aad: &[u8], input: &[u8], output: &'a mut [u8], ) -> Res<&'a [u8]> { - if let Some(aead) = &self.real { - return aead.encrypt(count, aad, input, output); - } - let l = input.len(); output[..l].copy_from_slice(input); - output[l..l + 16].copy_from_slice(FIXED_TAG_FUZZING); + output[l..l + 16].copy_from_slice(AEAD_NULL_TAG); Ok(&output[..l + 16]) } #[allow(clippy::missing_errors_doc)] pub fn decrypt<'a>( &self, - count: u64, - aad: &[u8], + _count: u64, + _aad: &[u8], input: &[u8], output: &'a mut [u8], ) -> Res<&'a [u8]> { - if let Some(aead) = &self.real { - return aead.decrypt(count, aad, input, output); - } - - if input.len() < FIXED_TAG_FUZZING.len() { + if input.len() < AEAD_NULL_TAG.len() { return Err(Error::from(SEC_ERROR_BAD_DATA)); } - let len_encrypted = input.len() - FIXED_TAG_FUZZING.len(); + let len_encrypted = input.len() - AEAD_NULL_TAG.len(); // Check that: // 1) expansion is all zeros and // 2) if the encrypted data is also supplied that at least some values are no zero // (otherwise padding will be interpreted as a valid packet) - if &input[len_encrypted..] == FIXED_TAG_FUZZING + if &input[len_encrypted..] == AEAD_NULL_TAG && (len_encrypted == 0 || input[..len_encrypted].iter().any(|x| *x != 0x0)) { output[..len_encrypted].copy_from_slice(&input[..len_encrypted]); @@ -95,12 +71,8 @@ impl FuzzingAead { } } -impl fmt::Debug for FuzzingAead { +impl fmt::Debug for AeadNull { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - if let Some(a) = &self.real { - a.fmt(f) - } else { - write!(f, "[FUZZING AEAD]") - } + write!(f, "[NULL AEAD]") } } diff --git a/neqo-crypto/src/lib.rs b/neqo-crypto/src/lib.rs index 33fe623b17..b82b225d40 100644 --- a/neqo-crypto/src/lib.rs +++ b/neqo-crypto/src/lib.rs @@ -8,8 +8,8 @@ #![allow(clippy::unseparated_literal_suffix, clippy::used_underscore_binding)] // For bindgen code. mod aead; -#[cfg(feature = "fuzzing")] -pub mod aead_fuzzing; +#[cfg(feature = "disable-encryption")] +pub mod aead_null; pub mod agent; mod agentio; mod auth; @@ -33,12 +33,12 @@ mod time; use std::{ffi::CString, path::PathBuf, ptr::null, sync::OnceLock}; -#[cfg(not(feature = "fuzzing"))] +#[cfg(not(feature = "disable-encryption"))] pub use self::aead::RealAead as Aead; -#[cfg(feature = "fuzzing")] +#[cfg(feature = "disable-encryption")] pub use self::aead::RealAead; -#[cfg(feature = "fuzzing")] -pub use self::aead_fuzzing::FuzzingAead as Aead; +#[cfg(feature = "disable-encryption")] +pub use self::aead_null::AeadNull as Aead; pub use self::{ agent::{ Agent, AllowZeroRtt, Client, HandshakeState, Record, RecordList, ResumptionToken, diff --git a/neqo-crypto/src/selfencrypt.rs b/neqo-crypto/src/selfencrypt.rs index 1130c35250..d0a85830b0 100644 --- a/neqo-crypto/src/selfencrypt.rs +++ b/neqo-crypto/src/selfencrypt.rs @@ -47,7 +47,7 @@ impl SelfEncrypt { debug_assert_eq!(salt.len(), Self::SALT_LENGTH); let salt = hkdf::import_key(self.version, salt)?; let secret = hkdf::extract(self.version, self.cipher, Some(&salt), k)?; - Aead::new(false, self.version, self.cipher, &secret, "neqo self") + Aead::new(self.version, self.cipher, &secret, "neqo self") } /// Rotate keys. This causes any previous key that is being held to be replaced by the current diff --git a/neqo-crypto/tests/aead.rs b/neqo-crypto/tests/aead.rs index 5cf0034aec..f8416ed9a7 100644 --- a/neqo-crypto/tests/aead.rs +++ b/neqo-crypto/tests/aead.rs @@ -5,7 +5,7 @@ // except according to those terms. #![warn(clippy::pedantic)] -#![cfg(not(feature = "fuzzing"))] +#![cfg(not(feature = "disable-encryption"))] use neqo_crypto::{ constants::{Cipher, TLS_AES_128_GCM_SHA256, TLS_VERSION_1_3}, @@ -40,7 +40,6 @@ fn make_aead(cipher: Cipher) -> Aead { ) .expect("make a secret"); Aead::new( - false, TLS_VERSION_1_3, cipher, &secret, diff --git a/neqo-crypto/tests/selfencrypt.rs b/neqo-crypto/tests/selfencrypt.rs index 4c574a3ae9..b20aa27ee6 100644 --- a/neqo-crypto/tests/selfencrypt.rs +++ b/neqo-crypto/tests/selfencrypt.rs @@ -4,7 +4,7 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -#![cfg(not(feature = "fuzzing"))] +#![cfg(not(feature = "disable-encryption"))] use neqo_crypto::{ constants::{TLS_AES_128_GCM_SHA256, TLS_VERSION_1_3}, diff --git a/neqo-http3/Cargo.toml b/neqo-http3/Cargo.toml index 32e3ae7e35..27f43fd93f 100644 --- a/neqo-http3/Cargo.toml +++ b/neqo-http3/Cargo.toml @@ -28,7 +28,7 @@ url = { version = "2.5", default-features = false } test-fixture = { path = "../test-fixture" } [features] -fuzzing = ["neqo-transport/fuzzing", "neqo-crypto/fuzzing"] +disable-encryption = ["neqo-transport/disable-encryption", "neqo-crypto/disable-encryption"] [lib] # See https://github.com/bheisler/criterion.rs/blob/master/book/src/faq.md#cargo-bench-gives-unrecognized-option-errors-for-valid-command-line-options diff --git a/neqo-transport/Cargo.toml b/neqo-transport/Cargo.toml index 3da60bdabb..125da11508 100644 --- a/neqo-transport/Cargo.toml +++ b/neqo-transport/Cargo.toml @@ -27,7 +27,7 @@ test-fixture = { path = "../test-fixture" } [features] bench = [] -fuzzing = ["neqo-crypto/fuzzing"] +disable-encryption = ["neqo-crypto/disable-encryption"] [lib] # See https://github.com/bheisler/criterion.rs/blob/master/book/src/faq.md#cargo-bench-gives-unrecognized-option-errors-for-valid-command-line-options diff --git a/neqo-transport/src/connection/mod.rs b/neqo-transport/src/connection/mod.rs index 535d3f4084..3bf6b91263 100644 --- a/neqo-transport/src/connection/mod.rs +++ b/neqo-transport/src/connection/mod.rs @@ -383,7 +383,6 @@ impl Connection { agent, protocols.iter().map(P::as_ref).map(String::from).collect(), Rc::clone(&tphandler), - conn_params.is_fuzzing(), )?; let stats = StatsCell::default(); diff --git a/neqo-transport/src/connection/params.rs b/neqo-transport/src/connection/params.rs index 72d1efa3ee..d8aa617024 100644 --- a/neqo-transport/src/connection/params.rs +++ b/neqo-transport/src/connection/params.rs @@ -77,7 +77,6 @@ pub struct ConnectionParameters { outgoing_datagram_queue: usize, incoming_datagram_queue: usize, fast_pto: u8, - fuzzing: bool, grease: bool, pacing: bool, } @@ -100,7 +99,6 @@ impl Default for ConnectionParameters { outgoing_datagram_queue: MAX_QUEUED_DATAGRAMS_DEFAULT, incoming_datagram_queue: MAX_QUEUED_DATAGRAMS_DEFAULT, fast_pto: FAST_PTO_SCALE, - fuzzing: false, grease: true, pacing: true, } @@ -324,17 +322,6 @@ impl ConnectionParameters { self } - #[must_use] - pub fn is_fuzzing(&self) -> bool { - self.fuzzing - } - - #[must_use] - pub fn fuzzing(mut self, enable: bool) -> Self { - self.fuzzing = enable; - self - } - #[must_use] pub fn is_greasing(&self) -> bool { self.grease diff --git a/neqo-transport/src/connection/tests/handshake.rs b/neqo-transport/src/connection/tests/handshake.rs index 4b2a18642f..f2103523ec 100644 --- a/neqo-transport/src/connection/tests/handshake.rs +++ b/neqo-transport/src/connection/tests/handshake.rs @@ -16,7 +16,7 @@ use neqo_common::{event::Provider, qdebug, Datagram}; use neqo_crypto::{ constants::TLS_CHACHA20_POLY1305_SHA256, generate_ech_keys, AuthenticationStatus, }; -#[cfg(not(feature = "fuzzing"))] +#[cfg(not(feature = "disable-encryption"))] use test_fixture::datagram; use test_fixture::{ assertions, assertions::assert_coalesced_0rtt, fixture_init, now, split_datagram, DEFAULT_ADDR, @@ -606,7 +606,7 @@ fn reorder_1rtt() { } } -#[cfg(not(feature = "fuzzing"))] +#[cfg(not(feature = "disable-encryption"))] #[test] fn corrupted_initial() { let mut client = default_client(); @@ -809,7 +809,7 @@ fn anti_amplification() { assert_eq!(*server.state(), State::Confirmed); } -#[cfg(not(feature = "fuzzing"))] +#[cfg(not(feature = "disable-encryption"))] #[test] fn garbage_initial() { let mut client = default_client(); diff --git a/neqo-transport/src/connection/tests/mod.rs b/neqo-transport/src/connection/tests/mod.rs index 6f598fb23e..c8c87a0df0 100644 --- a/neqo-transport/src/connection/tests/mod.rs +++ b/neqo-transport/src/connection/tests/mod.rs @@ -37,11 +37,11 @@ mod ackrate; mod cc; mod close; mod datagram; -mod fuzzing; mod handshake; mod idle; mod keys; mod migration; +mod null; mod priority; mod recovery; mod resumption; diff --git a/neqo-transport/src/connection/tests/fuzzing.rs b/neqo-transport/src/connection/tests/null.rs similarity index 84% rename from neqo-transport/src/connection/tests/fuzzing.rs rename to neqo-transport/src/connection/tests/null.rs index b12100f8ad..e4d60445c6 100644 --- a/neqo-transport/src/connection/tests/fuzzing.rs +++ b/neqo-transport/src/connection/tests/null.rs @@ -4,9 +4,9 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -#![cfg(feature = "fuzzing")] +#![cfg(feature = "disable-encryption")] -use neqo_crypto::aead_fuzzing::FIXED_TAG_FUZZING; +use neqo_crypto::aead_null::AEAD_NULL_TAG; use test_fixture::now; use super::{connect_force_idle, default_client, default_server}; @@ -24,7 +24,7 @@ fn no_encryption() { client.stream_send(stream_id, DATA_CLIENT).unwrap(); let client_pkt = client.process_output(now()).dgram().unwrap(); - assert!(client_pkt[..client_pkt.len() - FIXED_TAG_FUZZING.len()].ends_with(DATA_CLIENT)); + assert!(client_pkt[..client_pkt.len() - AEAD_NULL_TAG.len()].ends_with(DATA_CLIENT)); server.process_input(&client_pkt, now()); let mut buf = vec![0; 100]; @@ -33,7 +33,7 @@ fn no_encryption() { assert_eq!(&buf[..len], DATA_CLIENT); server.stream_send(stream_id, DATA_SERVER).unwrap(); let server_pkt = server.process_output(now()).dgram().unwrap(); - assert!(server_pkt[..server_pkt.len() - FIXED_TAG_FUZZING.len()].ends_with(DATA_SERVER)); + assert!(server_pkt[..server_pkt.len() - AEAD_NULL_TAG.len()].ends_with(DATA_SERVER)); client.process_input(&server_pkt, now()); let (len, _) = client.stream_recv(stream_id, &mut buf).unwrap(); diff --git a/neqo-transport/src/crypto.rs b/neqo-transport/src/crypto.rs index acc02172d5..54bfe622cf 100644 --- a/neqo-transport/src/crypto.rs +++ b/neqo-transport/src/crypto.rs @@ -69,7 +69,6 @@ impl Crypto { mut agent: Agent, protocols: Vec, tphandler: TpHandler, - fuzzing: bool, ) -> Res { agent.set_version_range(TLS_VERSION_1_3, TLS_VERSION_1_3)?; agent.set_ciphers(&[ @@ -102,7 +101,6 @@ impl Crypto { tls: agent, streams: CryptoStreams::default(), states: CryptoStates { - fuzzing, ..CryptoStates::default() }, }) @@ -420,7 +418,6 @@ pub struct CryptoDxState { /// The total number of operations that are remaining before the keys /// become exhausted and can't be used any more. invocations: PacketNumber, - fuzzing: bool, } impl CryptoDxState { @@ -431,7 +428,6 @@ impl CryptoDxState { epoch: Epoch, secret: &SymKey, cipher: Cipher, - fuzzing: bool, ) -> Self { qdebug!( "Making {:?} {} CryptoDxState, v={:?} cipher={}", @@ -445,19 +441,11 @@ impl CryptoDxState { version, direction, epoch: usize::from(epoch), - aead: Aead::new( - fuzzing, - TLS_VERSION_1_3, - cipher, - secret, - version.label_prefix(), - ) - .unwrap(), + aead: Aead::new(TLS_VERSION_1_3, cipher, secret, version.label_prefix()).unwrap(), hpkey: HpKey::extract(TLS_VERSION_1_3, cipher, secret, &hplabel).unwrap(), used_pn: 0..0, min_pn: 0, invocations: Self::limit(direction, cipher), - fuzzing, } } @@ -466,7 +454,6 @@ impl CryptoDxState { direction: CryptoDxDirection, label: &str, dcid: &[u8], - fuzzing: bool, ) -> Self { qtrace!("new_initial {:?} {}", version, ConnectionIdRef::from(dcid)); let salt = version.initial_salt(); @@ -482,14 +469,7 @@ impl CryptoDxState { let secret = hkdf::expand_label(TLS_VERSION_1_3, cipher, &initial_secret, &[], label).unwrap(); - Self::new( - version, - direction, - TLS_EPOCH_INITIAL, - &secret, - cipher, - fuzzing, - ) + Self::new(version, direction, TLS_EPOCH_INITIAL, &secret, cipher) } /// Determine the confidentiality and integrity limits for the cipher. @@ -549,7 +529,6 @@ impl CryptoDxState { direction: self.direction, epoch: self.epoch + 1, aead: Aead::new( - self.fuzzing, TLS_VERSION_1_3, cipher, next_secret, @@ -560,7 +539,6 @@ impl CryptoDxState { used_pn: pn..pn, min_pn: pn, invocations, - fuzzing: self.fuzzing, } } @@ -696,7 +674,7 @@ impl CryptoDxState { Ok(res.to_vec()) } - #[cfg(all(test, not(feature = "fuzzing")))] + #[cfg(all(test, not(feature = "disable-encryption")))] pub(crate) fn test_default() -> Self { // This matches the value in packet.rs const CLIENT_CID: &[u8] = &[0x83, 0x94, 0xc8, 0xf0, 0x3e, 0x51, 0x57, 0x08]; @@ -705,7 +683,6 @@ impl CryptoDxState { CryptoDxDirection::Write, "server in", CLIENT_CID, - false, ) } @@ -759,7 +736,6 @@ pub(crate) struct CryptoDxAppData { cipher: Cipher, // Not the secret used to create `self.dx`, but the one needed for the next iteration. next_secret: SymKey, - fuzzing: bool, } impl CryptoDxAppData { @@ -768,20 +744,11 @@ impl CryptoDxAppData { dir: CryptoDxDirection, secret: &SymKey, cipher: Cipher, - fuzzing: bool, ) -> Res { Ok(Self { - dx: CryptoDxState::new( - version, - dir, - TLS_EPOCH_APPLICATION_DATA, - secret, - cipher, - fuzzing, - ), + dx: CryptoDxState::new(version, dir, TLS_EPOCH_APPLICATION_DATA, secret, cipher), cipher, next_secret: Self::update_secret(cipher, secret)?, - fuzzing, }) } @@ -800,7 +767,6 @@ impl CryptoDxAppData { dx: self.dx.next(&self.next_secret, self.cipher), cipher: self.cipher, next_secret, - fuzzing: self.fuzzing, }) } @@ -834,7 +800,6 @@ pub struct CryptoStates { // If this is set, then we have noticed a genuine update. // Once this time passes, we should switch in new keys. read_update_time: Option, - fuzzing: bool, } impl CryptoStates { @@ -989,20 +954,8 @@ impl CryptoStates { ); let mut initial = CryptoState { - tx: CryptoDxState::new_initial( - *v, - CryptoDxDirection::Write, - write, - dcid, - self.fuzzing, - ), - rx: CryptoDxState::new_initial( - *v, - CryptoDxDirection::Read, - read, - dcid, - self.fuzzing, - ), + tx: CryptoDxState::new_initial(*v, CryptoDxDirection::Write, write, dcid), + rx: CryptoDxState::new_initial(*v, CryptoDxDirection::Read, read, dcid), }; if let Some(prev) = self.initials.get(v) { qinfo!( @@ -1056,7 +1009,6 @@ impl CryptoStates { TLS_EPOCH_ZERO_RTT, secret, cipher, - self.fuzzing, )); } @@ -1097,7 +1049,6 @@ impl CryptoStates { TLS_EPOCH_HANDSHAKE, write_secret, cipher, - self.fuzzing, ), rx: CryptoDxState::new( version, @@ -1105,7 +1056,6 @@ impl CryptoStates { TLS_EPOCH_HANDSHAKE, read_secret, cipher, - self.fuzzing, ), }); } @@ -1113,13 +1063,7 @@ impl CryptoStates { pub fn set_application_write_key(&mut self, version: Version, secret: &SymKey) -> Res<()> { debug_assert!(self.app_write.is_none()); debug_assert_ne!(self.cipher, 0); - let mut app = CryptoDxAppData::new( - version, - CryptoDxDirection::Write, - secret, - self.cipher, - self.fuzzing, - )?; + let mut app = CryptoDxAppData::new(version, CryptoDxDirection::Write, secret, self.cipher)?; if let Some(z) = &self.zero_rtt { if z.direction == CryptoDxDirection::Write { app.dx.continuation(z)?; @@ -1138,13 +1082,7 @@ impl CryptoStates { ) -> Res<()> { debug_assert!(self.app_write.is_some(), "should have write keys installed"); debug_assert!(self.app_read.is_none()); - let mut app = CryptoDxAppData::new( - version, - CryptoDxDirection::Read, - secret, - self.cipher, - self.fuzzing, - )?; + let mut app = CryptoDxAppData::new(version, CryptoDxDirection::Read, secret, self.cipher)?; if let Some(z) = &self.zero_rtt { if z.direction == CryptoDxDirection::Read { app.dx.continuation(z)?; @@ -1286,7 +1224,7 @@ impl CryptoStates { } /// Make some state for removing protection in tests. - #[cfg(not(feature = "fuzzing"))] + #[cfg(not(feature = "disable-encryption"))] #[cfg(test)] pub(crate) fn test_default() -> Self { let read = |epoch| { @@ -1299,7 +1237,6 @@ impl CryptoStates { dx: read(epoch), cipher: TLS_AES_128_GCM_SHA256, next_secret: hkdf::import_key(TLS_VERSION_1_3, &[0xaa; 32]).unwrap(), - fuzzing: false, }; let mut initials = HashMap::new(); initials.insert( @@ -1319,11 +1256,10 @@ impl CryptoStates { app_read: Some(app_read(3)), app_read_next: Some(app_read(4)), read_update_time: None, - fuzzing: false, } } - #[cfg(all(not(feature = "fuzzing"), test))] + #[cfg(all(not(feature = "disable-encryption"), test))] pub(crate) fn test_chacha() -> Self { const SECRET: &[u8] = &[ 0x9a, 0xc3, 0x12, 0xa7, 0xf8, 0x77, 0x46, 0x8e, 0xbe, 0x69, 0x42, 0x27, 0x48, 0xad, @@ -1337,7 +1273,6 @@ impl CryptoStates { direction: CryptoDxDirection::Read, epoch, aead: Aead::new( - false, TLS_VERSION_1_3, TLS_CHACHA20_POLY1305_SHA256, &secret, @@ -1354,11 +1289,9 @@ impl CryptoStates { used_pn: 0..645_971_972, min_pn: 0, invocations: 10, - fuzzing: false, }, cipher: TLS_CHACHA20_POLY1305_SHA256, next_secret: secret.clone(), - fuzzing: false, }; Self { initials: HashMap::new(), @@ -1369,7 +1302,6 @@ impl CryptoStates { app_read: Some(app_read(3)), app_read_next: Some(app_read(4)), read_update_time: None, - fuzzing: false, } } } diff --git a/neqo-transport/src/packet/mod.rs b/neqo-transport/src/packet/mod.rs index d11b3423a4..552e50a1f9 100644 --- a/neqo-transport/src/packet/mod.rs +++ b/neqo-transport/src/packet/mod.rs @@ -868,7 +868,7 @@ impl Deref for DecryptedPacket { } } -#[cfg(all(test, not(feature = "fuzzing")))] +#[cfg(all(test, not(feature = "disable-encryption")))] mod tests { use neqo_common::Encoder; use test_fixture::{fixture_init, now}; diff --git a/neqo-transport/src/packet/retry.rs b/neqo-transport/src/packet/retry.rs index 72036d3b49..71193b9100 100644 --- a/neqo-transport/src/packet/retry.rs +++ b/neqo-transport/src/packet/retry.rs @@ -18,7 +18,6 @@ fn make_aead(version: Version) -> Aead { let secret = hkdf::import_key(TLS_VERSION_1_3, version.retry_secret()).unwrap(); Aead::new( - false, TLS_VERSION_1_3, TLS_AES_128_GCM_SHA256, &secret, diff --git a/neqo-transport/tests/common/mod.rs b/neqo-transport/tests/common/mod.rs index faff216eb9..e36e66f753 100644 --- a/neqo-transport/tests/common/mod.rs +++ b/neqo-transport/tests/common/mod.rs @@ -146,14 +146,7 @@ pub fn initial_aead_and_hp(dcid: &[u8], role: Role) -> (Aead, HpKey) { ) .unwrap(); ( - Aead::new( - false, - TLS_VERSION_1_3, - TLS_AES_128_GCM_SHA256, - &secret, - "quic ", - ) - .unwrap(), + Aead::new(TLS_VERSION_1_3, TLS_AES_128_GCM_SHA256, &secret, "quic ").unwrap(), HpKey::extract(TLS_VERSION_1_3, TLS_AES_128_GCM_SHA256, &secret, "quic hp").unwrap(), ) } diff --git a/neqo-transport/tests/conn_vectors.rs b/neqo-transport/tests/conn_vectors.rs index f478883075..86fe9d36fc 100644 --- a/neqo-transport/tests/conn_vectors.rs +++ b/neqo-transport/tests/conn_vectors.rs @@ -6,7 +6,7 @@ // Tests with the test vectors from the spec. -#![cfg(not(feature = "fuzzing"))] +#![cfg(not(feature = "disable-encryption"))] use std::{cell::RefCell, rc::Rc}; diff --git a/neqo-transport/tests/retry.rs b/neqo-transport/tests/retry.rs index e583fcae0f..36eff71e7b 100644 --- a/neqo-transport/tests/retry.rs +++ b/neqo-transport/tests/retry.rs @@ -4,7 +4,7 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -#![cfg(not(feature = "fuzzing"))] +#![cfg(not(feature = "disable-encryption"))] mod common; From 20ef1be046769efaeb151b21b6cde22af67ee712 Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Wed, 27 Mar 2024 12:10:59 +0200 Subject: [PATCH 18/28] feat: Turn on TLS greasing (#1760) * feat: Turn on TLS greasing Fixes #1391 Needs #1739 * Make clippy happy --- neqo-crypto/src/agent.rs | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/neqo-crypto/src/agent.rs b/neqo-crypto/src/agent.rs index 90085cb759..3d5a8b9f35 100644 --- a/neqo-crypto/src/agent.rs +++ b/neqo-crypto/src/agent.rs @@ -16,7 +16,7 @@ use std::{ time::Instant, }; -use neqo_common::{hex_snip_middle, hex_with_len, qdebug, qinfo, qtrace, qwarn}; +use neqo_common::{hex_snip_middle, hex_with_len, qdebug, qtrace, qwarn}; pub use crate::{ agentio::{as_c_void, Record, RecordList}, @@ -406,10 +406,7 @@ impl SecretAgent { self.set_option(ssl::Opt::Locking, false)?; self.set_option(ssl::Opt::Tickets, false)?; self.set_option(ssl::Opt::OcspStapling, true)?; - if let Err(e) = self.set_option(ssl::Opt::Grease, grease) { - // Until NSS supports greasing, it's OK to fail here. - qinfo!([self], "Failed to enable greasing {:?}", e); - } + self.set_option(ssl::Opt::Grease, grease)?; Ok(()) } From efc4813affbb8077e2f7f7fb2799be0452fc52ed Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Wed, 27 Mar 2024 12:39:04 +0200 Subject: [PATCH 19/28] fix: Address more clippy warnings (#1777) * fix: Address more clippy warnings PR #1764 exports `frame` and `packet` if feature `fuzzing` is enabled. This apparently turns on a bunch of clippy checks that are not on by default? This PR fixes them. Made this separate from #1764 to reduce that PR's size. * Fix clippy * Remove comment --- neqo-transport/src/frame.rs | 26 ++++++++++++-- neqo-transport/src/packet/mod.rs | 61 +++++++++++++++++++++++++++++--- 2 files changed, 79 insertions(+), 8 deletions(-) diff --git a/neqo-transport/src/frame.rs b/neqo-transport/src/frame.rs index 5a86a07108..d84eb61ce8 100644 --- a/neqo-transport/src/frame.rs +++ b/neqo-transport/src/frame.rs @@ -95,6 +95,12 @@ impl From for CloseError { } } +impl From for Error { + fn from(_err: std::array::TryFromSliceError) -> Self { + Self::FrameEncodingError + } +} + #[derive(PartialEq, Eq, Debug, Default, Clone)] pub struct AckRange { pub(crate) gap: u64, @@ -213,6 +219,7 @@ impl<'a> Frame<'a> { } } + #[must_use] pub fn get_type(&self) -> FrameType { match self { Self::Padding { .. } => FRAME_TYPE_PADDING, @@ -254,6 +261,7 @@ impl<'a> Frame<'a> { } } + #[must_use] pub fn is_stream(&self) -> bool { matches!( self, @@ -269,6 +277,7 @@ impl<'a> Frame<'a> { ) } + #[must_use] pub fn stream_type(fin: bool, nonzero_offset: bool, fill: bool) -> u64 { let mut t = FRAME_TYPE_STREAM; if fin { @@ -285,6 +294,7 @@ impl<'a> Frame<'a> { /// If the frame causes a recipient to generate an ACK within its /// advertised maximum acknowledgement delay. + #[must_use] pub fn ack_eliciting(&self) -> bool { !matches!( self, @@ -294,6 +304,7 @@ impl<'a> Frame<'a> { /// If the frame can be sent in a path probe /// without initiating migration to that path. + #[must_use] pub fn path_probing(&self) -> bool { matches!( self, @@ -307,6 +318,10 @@ impl<'a> Frame<'a> { /// Converts `AckRanges` as encoded in a ACK frame (see -transport /// 19.3.1) into ranges of acked packets (end, start), inclusive of /// start and end values. + /// + /// # Errors + /// + /// Returns an error if the ranges are invalid. pub fn decode_ack_frame( largest_acked: u64, first_ack_range: u64, @@ -347,6 +362,7 @@ impl<'a> Frame<'a> { Ok(acked_ranges) } + #[must_use] pub fn dump(&self) -> String { match self { Self::Crypto { offset, data } => { @@ -372,6 +388,7 @@ impl<'a> Frame<'a> { } } + #[must_use] pub fn is_allowed(&self, pt: PacketType) -> bool { match self { Self::Padding { .. } | Self::Ping => true, @@ -386,6 +403,9 @@ impl<'a> Frame<'a> { } } + /// # Errors + /// + /// Returns an error if the frame cannot be decoded. #[allow(clippy::too_many_lines)] // Yeah, but it's a nice match statement. pub fn decode(dec: &mut Decoder<'a>) -> Res { /// Maximum ACK Range Count in ACK Frame @@ -470,7 +490,7 @@ impl<'a> Frame<'a> { FRAME_TYPE_CRYPTO => { let offset = dv(dec)?; let data = d(dec.decode_vvec())?; - if offset + u64::try_from(data.len()).unwrap() > ((1 << 62) - 1) { + if offset + u64::try_from(data.len())? > ((1 << 62) - 1) { return Err(Error::FrameEncodingError); } Ok(Self::Crypto { offset, data }) @@ -497,7 +517,7 @@ impl<'a> Frame<'a> { qtrace!("STREAM frame, with length"); d(dec.decode_vvec())? }; - if o + u64::try_from(data.len()).unwrap() > ((1 << 62) - 1) { + if o + u64::try_from(data.len())? > ((1 << 62) - 1) { return Err(Error::FrameEncodingError); } Ok(Self::Stream { @@ -546,7 +566,7 @@ impl<'a> Frame<'a> { return Err(Error::DecodingFrame); } let srt = d(dec.decode(16))?; - let stateless_reset_token = <&[_; 16]>::try_from(srt).unwrap(); + let stateless_reset_token = <&[_; 16]>::try_from(srt)?; Ok(Self::NewConnectionId { sequence_number, diff --git a/neqo-transport/src/packet/mod.rs b/neqo-transport/src/packet/mod.rs index 552e50a1f9..0843d050ab 100644 --- a/neqo-transport/src/packet/mod.rs +++ b/neqo-transport/src/packet/mod.rs @@ -256,6 +256,10 @@ impl PacketBuilder { /// Maybe pad with "PADDING" frames. /// Only does so if padding was needed and this is a short packet. /// Returns true if padding was added. + /// + /// # Panics + /// + /// Cannot happen. pub fn pad(&mut self) -> bool { if self.padding && !self.is_long() { self.encoder @@ -290,6 +294,10 @@ impl PacketBuilder { /// The length is filled in after calling `build`. /// Does nothing if there isn't 4 bytes available other than render this builder /// unusable; if `remaining()` returns 0 at any point, call `abort()`. + /// + /// # Panics + /// + /// This will panic if the packet number length is too large. pub fn pn(&mut self, pn: PacketNumber, pn_len: usize) { if self.remaining() < 4 { self.limit = 0; @@ -354,6 +362,10 @@ impl PacketBuilder { } /// Build the packet and return the encoder. + /// + /// # Errors + /// + /// This will return an error if the packet is too large. pub fn build(mut self, crypto: &mut CryptoDxState) -> Res { if self.len() > self.limit { qwarn!("Packet contents are more than the limit"); @@ -378,7 +390,9 @@ impl PacketBuilder { // Calculate the mask. let offset = SAMPLE_OFFSET - self.offsets.pn.len(); - assert!(offset + SAMPLE_SIZE <= ciphertext.len()); + if offset + SAMPLE_SIZE > ciphertext.len() { + return Err(Error::InternalError); + } let sample = &ciphertext[offset..offset + SAMPLE_SIZE]; let mask = crypto.compute_mask(sample)?; @@ -412,6 +426,10 @@ impl PacketBuilder { /// As this is a simple packet, this is just an associated function. /// As Retry is odd (it has to be constructed with leading bytes), /// this returns a [`Vec`] rather than building on an encoder. + /// + /// # Errors + /// + /// This will return an error if AEAD encrypt fails. #[allow(clippy::similar_names)] // scid and dcid are fine here. pub fn retry( version: Version, @@ -445,6 +463,7 @@ impl PacketBuilder { /// Make a Version Negotiation packet. #[allow(clippy::similar_names)] // scid and dcid are fine here. + #[must_use] pub fn version_negotiation( dcid: &[u8], scid: &[u8], @@ -556,6 +575,10 @@ impl<'a> PublicPacket<'a> { /// Decode the common parts of a packet. This provides minimal parsing and validation. /// Returns a tuple of a `PublicPacket` and a slice with any remainder from the datagram. + /// + /// # Errors + /// + /// This will return an error if the packet could not be decoded. #[allow(clippy::similar_names)] // For dcid and scid, which are fine. pub fn decode(data: &'a [u8], dcid_decoder: &dyn ConnectionIdDecoder) -> Res<(Self, &'a [u8])> { let mut decoder = Decoder::new(data); @@ -587,7 +610,7 @@ impl<'a> PublicPacket<'a> { } // Generic long header. - let version = WireVersion::try_from(Self::opt(decoder.decode_uint(4))?).unwrap(); + let version = WireVersion::try_from(Self::opt(decoder.decode_uint(4))?)?; let dcid = ConnectionIdRef::from(Self::opt(decoder.decode_vec(1))?); let scid = ConnectionIdRef::from(Self::opt(decoder.decode_vec(1))?); @@ -647,11 +670,14 @@ impl<'a> PublicPacket<'a> { } /// Validate the given packet as though it were a retry. + #[must_use] pub fn is_valid_retry(&self, odcid: &ConnectionId) -> bool { if self.packet_type != PacketType::Retry { return false; } - let version = self.version().unwrap(); + let Some(version) = self.version() else { + return false; + }; let expansion = retry::expansion(version); if self.data.len() <= expansion { return false; @@ -667,6 +693,7 @@ impl<'a> PublicPacket<'a> { .unwrap_or(false) } + #[must_use] pub fn is_valid_initial(&self) -> bool { // Packet has to be an initial, with a DCID of 8 bytes, or a token. // Note: the Server class validates the token and checks the length. @@ -674,32 +701,42 @@ impl<'a> PublicPacket<'a> { && (self.dcid().len() >= 8 || !self.token.is_empty()) } + #[must_use] pub fn packet_type(&self) -> PacketType { self.packet_type } + #[must_use] pub fn dcid(&self) -> ConnectionIdRef<'a> { self.dcid } + /// # Panics + /// + /// This will panic if called for a short header packet. + #[must_use] pub fn scid(&self) -> ConnectionIdRef<'a> { self.scid .expect("should only be called for long header packets") } + #[must_use] pub fn token(&self) -> &'a [u8] { self.token } + #[must_use] pub fn version(&self) -> Option { self.version.and_then(|v| Version::try_from(v).ok()) } + #[must_use] pub fn wire_version(&self) -> WireVersion { debug_assert!(self.version.is_some()); self.version.unwrap_or(0) } + #[must_use] pub fn len(&self) -> usize { self.data.len() } @@ -778,6 +815,9 @@ impl<'a> PublicPacket<'a> { )) } + /// # Errors + /// + /// This will return an error if the packet cannot be decrypted. pub fn decrypt(&self, crypto: &mut CryptoStates, release_at: Instant) -> Res { let cspace: CryptoSpace = self.packet_type.into(); // When we don't have a version, the crypto code doesn't need a version @@ -792,7 +832,9 @@ impl<'a> PublicPacket<'a> { // too small (which is public information). let (key_phase, pn, header, body) = self.decrypt_header(rx)?; qtrace!([rx], "decoded header: {:?}", header); - let rx = crypto.rx(version, cspace, key_phase).unwrap(); + let Some(rx) = crypto.rx(version, cspace, key_phase) else { + return Err(Error::DecryptError); + }; let version = rx.version(); // Version fixup; see above. let d = rx.decrypt(pn, &header, body)?; // If this is the first packet ever successfully decrypted @@ -815,8 +857,14 @@ impl<'a> PublicPacket<'a> { } } + /// # Errors + /// + /// This will return an error if the packet is not a version negotiation packet + /// or if the versions cannot be decoded. pub fn supported_versions(&self) -> Res> { - assert_eq!(self.packet_type, PacketType::VersionNegotiation); + if self.packet_type != PacketType::VersionNegotiation { + return Err(Error::InvalidPacket); + } let mut decoder = Decoder::new(&self.data[self.header_len..]); let mut res = Vec::new(); while decoder.remaining() > 0 { @@ -847,14 +895,17 @@ pub struct DecryptedPacket { } impl DecryptedPacket { + #[must_use] pub fn version(&self) -> Version { self.version } + #[must_use] pub fn packet_type(&self) -> PacketType { self.pt } + #[must_use] pub fn pn(&self) -> PacketNumber { self.pn } From 36fae6282b2214e4fea425ee4a952c08acf1c445 Mon Sep 17 00:00:00 2001 From: Max Inden Date: Wed, 27 Mar 2024 16:50:21 +0100 Subject: [PATCH 20/28] perf(bin): add criterion benchmarks (#1758) * perf(bin): add criterion benchmarks Wraps the `neqo-client` and `neqo-server` code, starts the server and runs various benchmarks through the client. Benchmarks: - single-request-1gb - single-request-1mb - requests-per-second - handshakes-per-second * Rename benchmark instances * Turn off logging * Remove 100mb sample size restriction * Use v6 * Remove 1gb It just takes too long on the bench machine * Use /dev/null * rework imports * Fix import * Test without CPU pinning * Revert "Test without CPU pinning" This reverts commit a0ef46a3d321aa00aae0d3835ef44533dded46bf. * Pin all but neqo-bin to CPU 0 * Quote package * Add rational for neqo-bin handling * Rework tuples * Pin first * Just duplicate the two calls * Add --workspace flag * Remove taskset from neqo-bin --------- Co-authored-by: Martin Thomson --- .github/workflows/bench.yml | 10 +- neqo-bin/Cargo.toml | 16 +++- neqo-bin/benches/main.rs | 92 +++++++++++++++++++ neqo-bin/src/bin/client.rs | 14 +++ neqo-bin/src/bin/server.rs | 14 +++ neqo-bin/src/{bin => }/client/http09.rs | 3 +- neqo-bin/src/{bin => }/client/http3.rs | 2 +- .../src/{bin/client/main.rs => client/mod.rs} | 65 +++++++++---- neqo-bin/src/lib.rs | 40 +++++++- .../src/{bin/server/main.rs => server/mod.rs} | 46 ++++++---- neqo-bin/src/{bin => }/server/old_https.rs | 0 neqo-bin/src/udp.rs | 2 + 12 files changed, 262 insertions(+), 42 deletions(-) create mode 100644 neqo-bin/benches/main.rs create mode 100644 neqo-bin/src/bin/client.rs create mode 100644 neqo-bin/src/bin/server.rs rename neqo-bin/src/{bin => }/client/http09.rs (99%) rename neqo-bin/src/{bin => }/client/http3.rs (99%) rename neqo-bin/src/{bin/client/main.rs => client/mod.rs} (91%) rename neqo-bin/src/{bin/server/main.rs => server/mod.rs} (94%) rename neqo-bin/src/{bin => }/server/old_https.rs (100%) diff --git a/.github/workflows/bench.yml b/.github/workflows/bench.yml index 80c51c236d..5df8bcfd91 100644 --- a/.github/workflows/bench.yml +++ b/.github/workflows/bench.yml @@ -70,11 +70,15 @@ jobs: - name: Prepare machine run: sudo /root/bin/prep.sh - # Pin the benchmark run to core 0 and run all benchmarks at elevated priority. - name: Run cargo bench run: | - taskset -c 0 nice -n -20 \ - cargo "+$TOOLCHAIN" bench --features bench -- --noplot | tee results.txt + # Pin all but neqo-bin benchmarks to CPU 0. neqo-bin benchmarks run + # both a client and a server, thus benefiting from multiple CPU cores. + # + # Run all benchmarks at elevated priority. + taskset -c 0 nice -n -20 cargo "+$TOOLCHAIN" bench --workspace --exclude neqo-bin --features bench -- --noplot | tee results.txt + nice -n -20 cargo "+$TOOLCHAIN" bench --package neqo-bin --features bench -- --noplot | tee -a results.txt + # Compare various configurations of neqo against msquic, and gather perf data # during the hyperfine runs. diff --git a/neqo-bin/Cargo.toml b/neqo-bin/Cargo.toml index 04210e00db..a165a4ac32 100644 --- a/neqo-bin/Cargo.toml +++ b/neqo-bin/Cargo.toml @@ -11,12 +11,12 @@ license.workspace = true [[bin]] name = "neqo-client" -path = "src/bin/client/main.rs" +path = "src/bin/client.rs" bench = false [[bin]] name = "neqo-server" -path = "src/bin/server/main.rs" +path = "src/bin/server.rs" bench = false [lints] @@ -40,6 +40,18 @@ regex = { version = "1.9", default-features = false, features = ["unicode-perl"] tokio = { version = "1", default-features = false, features = ["net", "time", "macros", "rt", "rt-multi-thread"] } url = { version = "2.5", default-features = false } +[dev-dependencies] +criterion = { version = "0.5", default-features = false, features = ["html_reports", "async_tokio"] } +tokio = { version = "1", default-features = false, features = ["sync"] } + +[features] +bench = [] + [lib] # See https://github.com/bheisler/criterion.rs/blob/master/book/src/faq.md#cargo-bench-gives-unrecognized-option-errors-for-valid-command-line-options bench = false + +[[bench]] +name = "main" +harness = false +required-features = ["bench"] diff --git a/neqo-bin/benches/main.rs b/neqo-bin/benches/main.rs new file mode 100644 index 0000000000..fe3aba2714 --- /dev/null +++ b/neqo-bin/benches/main.rs @@ -0,0 +1,92 @@ +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use std::{path::PathBuf, str::FromStr}; + +use criterion::{criterion_group, criterion_main, BatchSize, Criterion, Throughput}; +use neqo_bin::{client, server}; +use tokio::runtime::Runtime; + +struct Benchmark { + name: String, + requests: Vec, + /// Download resources in series using separate connections. + download_in_series: bool, + sample_size: Option, +} + +fn transfer(c: &mut Criterion) { + neqo_common::log::init(Some(log::LevelFilter::Off)); + neqo_crypto::init_db(PathBuf::from_str("../test-fixture/db").unwrap()); + + let done_sender = spawn_server(); + + for Benchmark { + name, + requests, + download_in_series, + sample_size, + } in [ + Benchmark { + name: "1-conn/1-100mb-resp (aka. Download)".to_string(), + requests: vec![100 * 1024 * 1024], + download_in_series: false, + sample_size: Some(10), + }, + Benchmark { + name: "1-conn/10_000-1b-seq-resp (aka. RPS)".to_string(), + requests: vec![1; 10_000], + download_in_series: false, + sample_size: None, + }, + Benchmark { + name: "100-seq-conn/1-1b-resp (aka. HPS)".to_string(), + requests: vec![1; 100], + download_in_series: true, + sample_size: None, + }, + ] { + let mut group = c.benchmark_group(name); + group.throughput(if requests[0] > 1 { + assert_eq!(requests.len(), 1); + Throughput::Bytes(requests[0]) + } else { + Throughput::Elements(requests.len() as u64) + }); + if let Some(size) = sample_size { + group.sample_size(size); + } + group.bench_function("client", |b| { + b.to_async(Runtime::new().unwrap()).iter_batched( + || client::client(client::Args::new(&requests, download_in_series)), + |client| async move { + client.await.unwrap(); + }, + BatchSize::PerIteration, + ); + }); + group.finish(); + } + + done_sender.send(()).unwrap(); +} + +fn spawn_server() -> tokio::sync::oneshot::Sender<()> { + let (done_sender, mut done_receiver) = tokio::sync::oneshot::channel(); + std::thread::spawn(move || { + Runtime::new().unwrap().block_on(async { + let mut server = Box::pin(server::server(server::Args::default())); + tokio::select! { + _ = &mut done_receiver => {} + _ = &mut server => {} + } + }); + }); + done_sender +} + +criterion_group!(benches, transfer); +criterion_main!(benches); diff --git a/neqo-bin/src/bin/client.rs b/neqo-bin/src/bin/client.rs new file mode 100644 index 0000000000..25c0e8753f --- /dev/null +++ b/neqo-bin/src/bin/client.rs @@ -0,0 +1,14 @@ +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use clap::Parser; + +#[tokio::main] +async fn main() -> Result<(), neqo_bin::client::Error> { + let args = neqo_bin::client::Args::parse(); + + neqo_bin::client::client(args).await +} diff --git a/neqo-bin/src/bin/server.rs b/neqo-bin/src/bin/server.rs new file mode 100644 index 0000000000..8d166c7487 --- /dev/null +++ b/neqo-bin/src/bin/server.rs @@ -0,0 +1,14 @@ +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use clap::Parser; + +#[tokio::main] +async fn main() -> Result<(), std::io::Error> { + let args = neqo_bin::server::Args::parse(); + + neqo_bin::server::server(args).await +} diff --git a/neqo-bin/src/bin/client/http09.rs b/neqo-bin/src/client/http09.rs similarity index 99% rename from neqo-bin/src/bin/client/http09.rs rename to neqo-bin/src/client/http09.rs index 372a112853..9bdb6dca85 100644 --- a/neqo-bin/src/bin/client/http09.rs +++ b/neqo-bin/src/client/http09.rs @@ -25,8 +25,7 @@ use neqo_transport::{ }; use url::Url; -use super::{get_output_file, Args, KeyUpdateState, Res}; -use crate::qlog_new; +use super::{get_output_file, qlog_new, Args, KeyUpdateState, Res}; pub struct Handler<'a> { streams: HashMap>>, diff --git a/neqo-bin/src/bin/client/http3.rs b/neqo-bin/src/client/http3.rs similarity index 99% rename from neqo-bin/src/bin/client/http3.rs rename to neqo-bin/src/client/http3.rs index e9f5e406a5..c88a8448f6 100644 --- a/neqo-bin/src/bin/client/http3.rs +++ b/neqo-bin/src/client/http3.rs @@ -26,7 +26,7 @@ use neqo_transport::{ }; use url::Url; -use crate::{get_output_file, qlog_new, Args, KeyUpdateState, Res}; +use super::{get_output_file, qlog_new, Args, KeyUpdateState, Res}; pub(crate) struct Handler<'a> { #[allow( diff --git a/neqo-bin/src/bin/client/main.rs b/neqo-bin/src/client/mod.rs similarity index 91% rename from neqo-bin/src/bin/client/main.rs rename to neqo-bin/src/client/mod.rs index 63aa12db13..e0169e3f24 100644 --- a/neqo-bin/src/bin/client/main.rs +++ b/neqo-bin/src/client/mod.rs @@ -21,25 +21,26 @@ use futures::{ future::{select, Either}, FutureExt, TryFutureExt, }; -use neqo_bin::udp; use neqo_common::{self as common, qdebug, qerror, qinfo, qlog::NeqoQlog, qwarn, Datagram, Role}; use neqo_crypto::{ constants::{TLS_AES_128_GCM_SHA256, TLS_AES_256_GCM_SHA384, TLS_CHACHA20_POLY1305_SHA256}, init, Cipher, ResumptionToken, }; -use neqo_http3::{Error, Output}; +use neqo_http3::Output; use neqo_transport::{AppError, ConnectionId, Error as TransportError, Version}; use qlog::{events::EventImportance, streamer::QlogStreamer}; use tokio::time::Sleep; use url::{Origin, Url}; +use crate::{udp, SharedArgs}; + mod http09; mod http3; const BUFWRITER_BUFFER_SIZE: usize = 64 * 1024; #[derive(Debug)] -pub enum ClientError { +pub enum Error { ArgumentError(&'static str), Http3Error(neqo_http3::Error), IoError(io::Error), @@ -47,40 +48,40 @@ pub enum ClientError { TransportError(neqo_transport::Error), } -impl From for ClientError { +impl From for Error { fn from(err: io::Error) -> Self { Self::IoError(err) } } -impl From for ClientError { +impl From for Error { fn from(err: neqo_http3::Error) -> Self { Self::Http3Error(err) } } -impl From for ClientError { +impl From for Error { fn from(_err: qlog::Error) -> Self { Self::QlogError } } -impl From for ClientError { +impl From for Error { fn from(err: neqo_transport::Error) -> Self { Self::TransportError(err) } } -impl Display for ClientError { +impl Display for Error { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "Error: {self:?}")?; Ok(()) } } -impl std::error::Error for ClientError {} +impl std::error::Error for Error {} -type Res = Result; +type Res = Result; /// Track whether a key update is needed. #[derive(Debug, PartialEq, Eq)] @@ -90,14 +91,14 @@ impl KeyUpdateState { pub fn maybe_update(&mut self, update_fn: F) -> Res<()> where F: FnOnce() -> Result<(), E>, - E: Into, + E: Into, { if self.0 { if let Err(e) = update_fn() { let e = e.into(); match e { - ClientError::TransportError(TransportError::KeyUpdateBlocked) - | ClientError::Http3Error(Error::TransportError( + Error::TransportError(TransportError::KeyUpdateBlocked) + | Error::Http3Error(neqo_http3::Error::TransportError( TransportError::KeyUpdateBlocked, )) => (), _ => return Err(e), @@ -123,7 +124,7 @@ pub struct Args { verbose: clap_verbosity_flag::Verbosity, #[command(flatten)] - shared: neqo_bin::SharedArgs, + shared: SharedArgs, urls: Vec, @@ -189,6 +190,36 @@ pub struct Args { } impl Args { + #[must_use] + #[cfg(feature = "bench")] + #[allow(clippy::missing_panics_doc)] + pub fn new(requests: &[u64], download_in_series: bool) -> Self { + use std::str::FromStr; + Self { + verbose: clap_verbosity_flag::Verbosity::::default(), + shared: crate::SharedArgs::default(), + urls: requests + .iter() + .map(|r| Url::from_str(&format!("http://[::1]:12345/{r}")).unwrap()) + .collect(), + method: "GET".into(), + header: vec![], + max_concurrent_push_streams: 10, + download_in_series, + concurrency: 100, + output_read_data: false, + output_dir: Some("/dev/null".into()), + resume: false, + key_update: false, + ech: None, + ipv4_only: false, + ipv6_only: false, + test: None, + upload_size: 100, + stats: false, + } + } + fn get_ciphers(&self) -> Vec { self.shared .ciphers @@ -445,10 +476,10 @@ fn qlog_new(args: &Args, hostname: &str, cid: &ConnectionId) -> Res { } } -#[tokio::main] -async fn main() -> Res<()> { - let mut args = Args::parse(); +pub async fn client(mut args: Args) -> Res<()> { neqo_common::log::init(Some(args.verbose.log_level_filter())); + init(); + args.update_for_tests(); init(); diff --git a/neqo-bin/src/lib.rs b/neqo-bin/src/lib.rs index b7bc158245..380c56ddce 100644 --- a/neqo-bin/src/lib.rs +++ b/neqo-bin/src/lib.rs @@ -4,6 +4,9 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +#![allow(clippy::missing_panics_doc)] +#![allow(clippy::missing_errors_doc)] + use std::{ fmt::{self, Display}, net::{SocketAddr, ToSocketAddrs}, @@ -17,7 +20,9 @@ use neqo_transport::{ Version, }; -pub mod udp; +pub mod client; +pub mod server; +mod udp; #[derive(Debug, Parser)] pub struct SharedArgs { @@ -57,6 +62,23 @@ pub struct SharedArgs { pub quic_parameters: QuicParameters, } +#[cfg(feature = "bench")] +impl Default for SharedArgs { + fn default() -> Self { + Self { + alpn: "h3".into(), + qlog_dir: None, + max_table_size_encoder: 16384, + max_table_size_decoder: 16384, + max_blocked_streams: 10, + ciphers: vec![], + qns_test: None, + use_old_http: false, + quic_parameters: QuicParameters::default(), + } + } +} + #[derive(Debug, Parser)] pub struct QuicParameters { #[arg( @@ -102,6 +124,22 @@ pub struct QuicParameters { pub preferred_address_v6: Option, } +#[cfg(feature = "bench")] +impl Default for QuicParameters { + fn default() -> Self { + Self { + quic_version: vec![], + max_streams_bidi: 16, + max_streams_uni: 16, + idle_timeout: 30, + congestion_control: CongestionControlAlgorithm::NewReno, + no_pacing: false, + preferred_address_v4: None, + preferred_address_v6: None, + } + } +} + impl QuicParameters { fn get_sock_addr(opt: &Option, v: &str, f: F) -> Option where diff --git a/neqo-bin/src/bin/server/main.rs b/neqo-bin/src/server/mod.rs similarity index 94% rename from neqo-bin/src/bin/server/main.rs rename to neqo-bin/src/server/mod.rs index 62eb19e78c..f89d6620de 100644 --- a/neqo-bin/src/bin/server/main.rs +++ b/neqo-bin/src/server/mod.rs @@ -25,28 +25,28 @@ use futures::{ future::{select, select_all, Either}, FutureExt, }; -use neqo_bin::udp; use neqo_common::{hex, qdebug, qerror, qinfo, qwarn, Datagram, Header}; use neqo_crypto::{ constants::{TLS_AES_128_GCM_SHA256, TLS_AES_256_GCM_SHA384, TLS_CHACHA20_POLY1305_SHA256}, generate_ech_keys, init_db, random, AntiReplay, Cipher, }; use neqo_http3::{ - Error, Http3OrWebTransportStream, Http3Parameters, Http3Server, Http3ServerEvent, StreamId, + Http3OrWebTransportStream, Http3Parameters, Http3Server, Http3ServerEvent, StreamId, }; use neqo_transport::{ server::ValidateAddress, ConnectionIdGenerator, Output, RandomConnectionIdGenerator, Version, }; +use old_https::Http09Server; use tokio::time::Sleep; -use crate::old_https::Http09Server; +use crate::{udp, SharedArgs}; const ANTI_REPLAY_WINDOW: Duration = Duration::from_secs(10); mod old_https; #[derive(Debug)] -pub enum ServerError { +pub enum Error { ArgumentError(&'static str), Http3Error(neqo_http3::Error), IoError(io::Error), @@ -54,47 +54,47 @@ pub enum ServerError { TransportError(neqo_transport::Error), } -impl From for ServerError { +impl From for Error { fn from(err: io::Error) -> Self { Self::IoError(err) } } -impl From for ServerError { +impl From for Error { fn from(err: neqo_http3::Error) -> Self { Self::Http3Error(err) } } -impl From for ServerError { +impl From for Error { fn from(_err: qlog::Error) -> Self { Self::QlogError } } -impl From for ServerError { +impl From for Error { fn from(err: neqo_transport::Error) -> Self { Self::TransportError(err) } } -impl Display for ServerError { +impl Display for Error { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "Error: {self:?}")?; Ok(()) } } -impl std::error::Error for ServerError {} +impl std::error::Error for Error {} #[derive(Debug, Parser)] #[command(author, version, about, long_about = None)] -struct Args { +pub struct Args { #[command(flatten)] verbose: clap_verbosity_flag::Verbosity, #[command(flatten)] - shared: neqo_bin::SharedArgs, + shared: SharedArgs, /// List of IP:port to listen on #[arg(default_value = "[::]:4433")] @@ -119,6 +119,22 @@ struct Args { ech: bool, } +#[cfg(feature = "bench")] +impl Default for Args { + fn default() -> Self { + use std::str::FromStr; + Self { + verbose: clap_verbosity_flag::Verbosity::::default(), + shared: crate::SharedArgs::default(), + hosts: vec!["[::]:12345".to_string()], + db: PathBuf::from_str("../test-fixture/db").unwrap(), + key: "key".to_string(), + retry: false, + ech: false, + } + } +} + impl Args { fn get_ciphers(&self) -> Vec { self.shared @@ -339,7 +355,7 @@ impl HttpServer for SimpleServer { } } else { stream - .cancel_fetch(Error::HttpRequestIncomplete.code()) + .cancel_fetch(neqo_http3::Error::HttpRequestIncomplete.code()) .unwrap(); continue; }; @@ -565,11 +581,9 @@ enum Ready { Timeout, } -#[tokio::main] -async fn main() -> Result<(), io::Error> { +pub async fn server(mut args: Args) -> Result<(), io::Error> { const HQ_INTEROP: &str = "hq-interop"; - let mut args = Args::parse(); neqo_common::log::init(Some(args.verbose.log_level_filter())); assert!(!args.key.is_empty(), "Need at least one key"); diff --git a/neqo-bin/src/bin/server/old_https.rs b/neqo-bin/src/server/old_https.rs similarity index 100% rename from neqo-bin/src/bin/server/old_https.rs rename to neqo-bin/src/server/old_https.rs diff --git a/neqo-bin/src/udp.rs b/neqo-bin/src/udp.rs index f4ede0b5c2..7ccfa1f36f 100644 --- a/neqo-bin/src/udp.rs +++ b/neqo-bin/src/udp.rs @@ -23,6 +23,8 @@ use tokio::io::Interest; const RECV_BUF_SIZE: usize = u16::MAX as usize; pub struct Socket { + #[allow(unknown_lints)] // available with Rust v1.75 + #[allow(clippy::struct_field_names)] socket: tokio::net::UdpSocket, state: UdpSocketState, recv_buf: Vec, From f7492045743b5017e7d24051c15c21afe321c2a4 Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Thu, 28 Mar 2024 08:02:19 +0200 Subject: [PATCH 21/28] Enable the ossf/scorecard action (#1776) Let's see what this will report on our code... Signed-off-by: Lars Eggert --- .github/workflows/scorecard.yml | 72 +++++++++++++++++++++++++++++++++ 1 file changed, 72 insertions(+) create mode 100644 .github/workflows/scorecard.yml diff --git a/.github/workflows/scorecard.yml b/.github/workflows/scorecard.yml new file mode 100644 index 0000000000..338c9239c3 --- /dev/null +++ b/.github/workflows/scorecard.yml @@ -0,0 +1,72 @@ +# This workflow uses actions that are not certified by GitHub. They are provided +# by a third-party and are governed by separate terms of service, privacy +# policy, and support documentation. + +name: Scorecard supply-chain security +on: + # For Branch-Protection check. Only the default branch is supported. See + # https://github.com/ossf/scorecard/blob/main/docs/checks.md#branch-protection + branch_protection_rule: + # To guarantee Maintained check is occasionally updated. See + # https://github.com/ossf/scorecard/blob/main/docs/checks.md#maintained + schedule: + - cron: '26 8 * * 6' + push: + branches: [ "main" ] + +# Declare default permissions as read only. +permissions: read-all + +jobs: + analysis: + name: Scorecard analysis + runs-on: ubuntu-latest + permissions: + # Needed to upload the results to code-scanning dashboard. + security-events: write + # Needed to publish results and get a badge (see publish_results below). + id-token: write + # Uncomment the permissions below if installing in a private repository. + # contents: read + # actions: read + + steps: + - name: "Checkout code" + uses: actions/checkout@93ea575cb5d8a053eaa0ac8fa3b40d7e05a33cc8 # v3.1.0 + with: + persist-credentials: false + + - name: "Run analysis" + uses: ossf/scorecard-action@e38b1902ae4f44df626f11ba0734b14fb91f8f86 # v2.1.2 + with: + results_file: results.sarif + results_format: sarif + # (Optional) "write" PAT token. Uncomment the `repo_token` line below if: + # - you want to enable the Branch-Protection check on a *public* repository, or + # - you are installing Scorecard on a *private* repository + # To create the PAT, follow the steps in https://github.com/ossf/scorecard-action#authentication-with-pat. + # repo_token: ${{ secrets.SCORECARD_TOKEN }} + + # Public repositories: + # - Publish results to OpenSSF REST API for easy access by consumers + # - Allows the repository to include the Scorecard badge. + # - See https://github.com/ossf/scorecard-action#publishing-results. + # For private repositories: + # - `publish_results` will always be set to `false`, regardless + # of the value entered here. + publish_results: false + + # Upload the results as artifacts (optional). Commenting out will disable uploads of run results in SARIF + # format to the repository Actions tab. + - name: "Upload artifact" + uses: actions/upload-artifact@3cea5372237819ed00197afe530f5a7ea3e805c8 # v3.1.0 + with: + name: SARIF file + path: results.sarif + retention-days: 5 + + # Upload the results to GitHub's code scanning dashboard. + - name: "Upload to code-scanning" + uses: github/codeql-action/upload-sarif@17573ee1cc1b9d061760f3a006fc4aac4f944fd5 # v2.2.4 + with: + sarif_file: results.sarif From 3e12eae25da4ef3ac45363e512e89b538216d005 Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Thu, 28 Mar 2024 08:15:54 +0200 Subject: [PATCH 22/28] ci: Fix shell script bugs in `nss/action.yml` (#1778) --- .github/actions/nss/action.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/actions/nss/action.yml b/.github/actions/nss/action.yml index ec6f13eaf8..051b54143b 100644 --- a/.github/actions/nss/action.yml +++ b/.github/actions/nss/action.yml @@ -34,9 +34,9 @@ runs: fi NSS_MAJOR=$(echo "$NSS_VERSION" | cut -d. -f1) NSS_MINOR=$(echo "$NSS_VERSION" | cut -d. -f2) - REQ_NSS_MAJOR=$(cat neqo-crypto/min_version.txt | cut -d. -f1) - REQ_NSS_MINOR=$(cat neqo-crypto/min_version.txt | cut -d. -f2) - if [ "$NSS_MAJOR" -lt "REQ_NSS_MAJOR" ] || [ "$NSS_MAJOR" -eq "REQ_NSS_MAJOR" -a "$NSS_MINOR" -lt "REQ_NSS_MINOR"]; then + REQ_NSS_MAJOR=$(cut -d. -f1 < neqo-crypto/min_version.txt) + REQ_NSS_MINOR=$(cut -d. -f2 < neqo-crypto/min_version.txt) + if [[ "$NSS_MAJOR" -lt "$REQ_NSS_MAJOR" || "$NSS_MAJOR" -eq "$REQ_NSS_MAJOR" && "$NSS_MINOR" -lt "$REQ_NSS_MINOR" ]]; then echo "System NSS is too old: $NSS_VERSION" echo "BUILD_NSS=1" >> "$GITHUB_ENV" exit 0 From 3151adc53e71273eed1319114380119c70e169a2 Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Thu, 28 Mar 2024 08:44:37 +0200 Subject: [PATCH 23/28] fix: Don't panic in `neqo_crypto::init()` (#1775) * fix: Don't panic in `neqo_crypto::init()` Return `Res<()>` instead. Fixes #1675 * Update neqo-bin/src/bin/client/main.rs Co-authored-by: Martin Thomson Signed-off-by: Lars Eggert * Update neqo-crypto/src/lib.rs Co-authored-by: Martin Thomson Signed-off-by: Lars Eggert * Update neqo-crypto/tests/init.rs Co-authored-by: Martin Thomson Signed-off-by: Lars Eggert * Address code review * Try and improve coverage * Fix the `nss_nodb` case * Fail tests if `init()` fails * Address code review * Update neqo-bin/src/bin/client/main.rs Co-authored-by: Martin Thomson Signed-off-by: Lars Eggert * InternalError -> CryptoError * Fix merge * clippy --------- Signed-off-by: Lars Eggert Co-authored-by: Martin Thomson --- neqo-bin/benches/main.rs | 2 +- neqo-bin/src/bin/server.rs | 2 +- neqo-bin/src/client/mod.rs | 11 +++++-- neqo-bin/src/server/mod.rs | 15 +++++++-- neqo-crypto/src/lib.rs | 53 ++++++++++++++++---------------- neqo-crypto/tests/init.rs | 51 +++++++++++++++++++++++------- neqo-crypto/tests/selfencrypt.rs | 4 +-- test-fixture/src/lib.rs | 6 +++- 8 files changed, 96 insertions(+), 48 deletions(-) diff --git a/neqo-bin/benches/main.rs b/neqo-bin/benches/main.rs index fe3aba2714..6bb8b3161d 100644 --- a/neqo-bin/benches/main.rs +++ b/neqo-bin/benches/main.rs @@ -20,7 +20,7 @@ struct Benchmark { fn transfer(c: &mut Criterion) { neqo_common::log::init(Some(log::LevelFilter::Off)); - neqo_crypto::init_db(PathBuf::from_str("../test-fixture/db").unwrap()); + neqo_crypto::init_db(PathBuf::from_str("../test-fixture/db").unwrap()).unwrap(); let done_sender = spawn_server(); diff --git a/neqo-bin/src/bin/server.rs b/neqo-bin/src/bin/server.rs index 8d166c7487..e9b30261e4 100644 --- a/neqo-bin/src/bin/server.rs +++ b/neqo-bin/src/bin/server.rs @@ -7,7 +7,7 @@ use clap::Parser; #[tokio::main] -async fn main() -> Result<(), std::io::Error> { +async fn main() -> Result<(), neqo_bin::server::Error> { let args = neqo_bin::server::Args::parse(); neqo_bin::server::server(args).await diff --git a/neqo-bin/src/client/mod.rs b/neqo-bin/src/client/mod.rs index e0169e3f24..81721802e1 100644 --- a/neqo-bin/src/client/mod.rs +++ b/neqo-bin/src/client/mod.rs @@ -46,6 +46,13 @@ pub enum Error { IoError(io::Error), QlogError, TransportError(neqo_transport::Error), + CryptoError(neqo_crypto::Error), +} + +impl From for Error { + fn from(err: neqo_crypto::Error) -> Self { + Self::CryptoError(err) + } } impl From for Error { @@ -478,11 +485,11 @@ fn qlog_new(args: &Args, hostname: &str, cid: &ConnectionId) -> Res { pub async fn client(mut args: Args) -> Res<()> { neqo_common::log::init(Some(args.verbose.log_level_filter())); - init(); + init()?; args.update_for_tests(); - init(); + init()?; let urls_by_origin = args .urls diff --git a/neqo-bin/src/server/mod.rs b/neqo-bin/src/server/mod.rs index f89d6620de..38eb766f5f 100644 --- a/neqo-bin/src/server/mod.rs +++ b/neqo-bin/src/server/mod.rs @@ -52,6 +52,13 @@ pub enum Error { IoError(io::Error), QlogError, TransportError(neqo_transport::Error), + CryptoError(neqo_crypto::Error), +} + +impl From for Error { + fn from(err: neqo_crypto::Error) -> Self { + Self::CryptoError(err) + } } impl From for Error { @@ -87,6 +94,8 @@ impl Display for Error { impl std::error::Error for Error {} +type Res = Result; + #[derive(Debug, Parser)] #[command(author, version, about, long_about = None)] pub struct Args { @@ -551,7 +560,7 @@ impl ServersRunner { select(sockets_ready, timeout_ready).await.factor_first().0 } - async fn run(&mut self) -> Result<(), io::Error> { + async fn run(&mut self) -> Res<()> { loop { match self.ready().await? { Ready::Socket(inx) => loop { @@ -581,13 +590,13 @@ enum Ready { Timeout, } -pub async fn server(mut args: Args) -> Result<(), io::Error> { +pub async fn server(mut args: Args) -> Res<()> { const HQ_INTEROP: &str = "hq-interop"; neqo_common::log::init(Some(args.verbose.log_level_filter())); assert!(!args.key.is_empty(), "Need at least one key"); - init_db(args.db.clone()); + init_db(args.db.clone())?; if let Some(testcase) = args.shared.qns_test.as_ref() { if args.shared.quic_parameters.quic_version.is_empty() { diff --git a/neqo-crypto/src/lib.rs b/neqo-crypto/src/lib.rs index b82b225d40..2db985e8ee 100644 --- a/neqo-crypto/src/lib.rs +++ b/neqo-crypto/src/lib.rs @@ -90,7 +90,7 @@ impl Drop for NssLoaded { } } -static INITIALIZED: OnceLock = OnceLock::new(); +static INITIALIZED: OnceLock> = OnceLock::new(); fn already_initialized() -> bool { unsafe { nss::NSS_IsInitialized() != 0 } @@ -108,24 +108,24 @@ fn version_check() { /// Initialize NSS. This only executes the initialization routines once, so if there is any chance /// that /// -/// # Panics +/// # Errors /// /// When NSS initialization fails. -pub fn init() { +pub fn init() -> Res<()> { // Set time zero. time::init(); - _ = INITIALIZED.get_or_init(|| { + let res = INITIALIZED.get_or_init(|| { version_check(); if already_initialized() { - return NssLoaded::External; + return Ok(NssLoaded::External); } - secstatus_to_res(unsafe { nss::NSS_NoDB_Init(null()) }).expect("NSS_NoDB_Init failed"); - secstatus_to_res(unsafe { nss::NSS_SetDomesticPolicy() }) - .expect("NSS_SetDomesticPolicy failed"); + secstatus_to_res(unsafe { nss::NSS_NoDB_Init(null()) })?; + secstatus_to_res(unsafe { nss::NSS_SetDomesticPolicy() })?; - NssLoaded::NoDb + Ok(NssLoaded::NoDb) }); + res.as_ref().map(|_| ()).map_err(Clone::clone) } /// This enables SSLTRACE by calling a simple, harmless function to trigger its @@ -133,31 +133,32 @@ pub fn init() { /// global options are accessed. Reading an option is the least impact approach. /// This allows us to use SSLTRACE in all of our unit tests and programs. #[cfg(debug_assertions)] -fn enable_ssl_trace() { +fn enable_ssl_trace() -> Res<()> { let opt = ssl::Opt::Locking.as_int(); let mut v: ::std::os::raw::c_int = 0; secstatus_to_res(unsafe { ssl::SSL_OptionGetDefault(opt, &mut v) }) - .expect("SSL_OptionGetDefault failed"); } /// Initialize with a database. /// -/// # Panics +/// # Errors /// /// If NSS cannot be initialized. -pub fn init_db>(dir: P) { +pub fn init_db>(dir: P) -> Res<()> { time::init(); - _ = INITIALIZED.get_or_init(|| { + let res = INITIALIZED.get_or_init(|| { version_check(); if already_initialized() { - return NssLoaded::External; + return Ok(NssLoaded::External); } let path = dir.into(); - assert!(path.is_dir()); - let pathstr = path.to_str().expect("path converts to string").to_string(); - let dircstr = CString::new(pathstr).unwrap(); - let empty = CString::new("").unwrap(); + if !path.is_dir() { + return Err(Error::InternalError); + } + let pathstr = path.to_str().ok_or(Error::InternalError)?; + let dircstr = CString::new(pathstr)?; + let empty = CString::new("")?; secstatus_to_res(unsafe { nss::NSS_Initialize( dircstr.as_ptr(), @@ -166,21 +167,19 @@ pub fn init_db>(dir: P) { nss::SECMOD_DB.as_ptr().cast(), nss::NSS_INIT_READONLY, ) - }) - .expect("NSS_Initialize failed"); + })?; - secstatus_to_res(unsafe { nss::NSS_SetDomesticPolicy() }) - .expect("NSS_SetDomesticPolicy failed"); + secstatus_to_res(unsafe { nss::NSS_SetDomesticPolicy() })?; secstatus_to_res(unsafe { ssl::SSL_ConfigServerSessionIDCache(1024, 0, 0, dircstr.as_ptr()) - }) - .expect("SSL_ConfigServerSessionIDCache failed"); + })?; #[cfg(debug_assertions)] - enable_ssl_trace(); + enable_ssl_trace()?; - NssLoaded::Db + Ok(NssLoaded::Db) }); + res.as_ref().map(|_| ()).map_err(Clone::clone) } /// # Panics diff --git a/neqo-crypto/tests/init.rs b/neqo-crypto/tests/init.rs index 13218cc340..ee7d808e29 100644 --- a/neqo-crypto/tests/init.rs +++ b/neqo-crypto/tests/init.rs @@ -15,13 +15,7 @@ use neqo_crypto::{assert_initialized, init_db}; // Pull in the NSS internals so that we can ask NSS if it thinks that // it is properly initialized. -#[allow( - dead_code, - non_upper_case_globals, - clippy::redundant_static_lifetimes, - clippy::unseparated_literal_suffix, - clippy::upper_case_acronyms -)] +#[allow(dead_code, non_upper_case_globals)] mod nss { include!(concat!(env!("OUT_DIR"), "/nss_init.rs")); } @@ -29,19 +23,54 @@ mod nss { #[cfg(nss_nodb)] #[test] fn init_nodb() { - init(); + neqo_crypto::init().unwrap(); assert_initialized(); unsafe { - assert!(nss::NSS_IsInitialized() != 0); + assert_ne!(nss::NSS_IsInitialized(), 0); } } +#[cfg(nss_nodb)] +#[test] +fn init_twice_nodb() { + unsafe { + nss::NSS_NoDB_Init(std::ptr::null()); + assert_ne!(nss::NSS_IsInitialized(), 0); + } + // Now do it again + init_nodb(); +} + #[cfg(not(nss_nodb))] #[test] fn init_withdb() { - init_db(::test_fixture::NSS_DB_PATH); + init_db(::test_fixture::NSS_DB_PATH).unwrap(); assert_initialized(); unsafe { - assert!(nss::NSS_IsInitialized() != 0); + assert_ne!(nss::NSS_IsInitialized(), 0); + } +} + +#[cfg(not(nss_nodb))] +#[test] +fn init_twice_withdb() { + use std::{ffi::CString, path::PathBuf}; + + let empty = CString::new("").unwrap(); + let path: PathBuf = ::test_fixture::NSS_DB_PATH.into(); + assert!(path.is_dir()); + let pathstr = path.to_str().unwrap(); + let dircstr = CString::new(pathstr).unwrap(); + unsafe { + nss::NSS_Initialize( + dircstr.as_ptr(), + empty.as_ptr(), + empty.as_ptr(), + nss::SECMOD_DB.as_ptr().cast(), + nss::NSS_INIT_READONLY, + ); + assert_ne!(nss::NSS_IsInitialized(), 0); } + // Now do it again + init_withdb(); } diff --git a/neqo-crypto/tests/selfencrypt.rs b/neqo-crypto/tests/selfencrypt.rs index b20aa27ee6..9fc2162fe2 100644 --- a/neqo-crypto/tests/selfencrypt.rs +++ b/neqo-crypto/tests/selfencrypt.rs @@ -15,7 +15,7 @@ use neqo_crypto::{ #[test] fn se_create() { - init(); + init().unwrap(); SelfEncrypt::new(TLS_VERSION_1_3, TLS_AES_128_GCM_SHA256).expect("constructor works"); } @@ -23,7 +23,7 @@ const PLAINTEXT: &[u8] = b"PLAINTEXT"; const AAD: &[u8] = b"AAD"; fn sealed() -> (SelfEncrypt, Vec) { - init(); + init().unwrap(); let se = SelfEncrypt::new(TLS_VERSION_1_3, TLS_AES_128_GCM_SHA256).unwrap(); let sealed = se.seal(AAD, PLAINTEXT).expect("sealing works"); (se, sealed) diff --git a/test-fixture/src/lib.rs b/test-fixture/src/lib.rs index a6043cd974..e34fb522ff 100644 --- a/test-fixture/src/lib.rs +++ b/test-fixture/src/lib.rs @@ -41,8 +41,12 @@ pub const NSS_DB_PATH: &str = concat!(env!("CARGO_MANIFEST_DIR"), "/db"); /// Initialize the test fixture. Only call this if you aren't also calling a /// fixture function that depends on setup. Other functions in the fixture /// that depend on this setup call the function for you. +/// +/// # Panics +/// +/// When the NSS initialization fails. pub fn fixture_init() { - init_db(NSS_DB_PATH); + init_db(NSS_DB_PATH).unwrap(); } // This needs to be > 2ms to avoid it being rounded to zero. From 92ed07f1b7a537c1638a3a6fce8b0baa66e64ac4 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 2 Apr 2024 09:53:07 +0300 Subject: [PATCH 24/28] build(deps): bump github/codeql-action from 2.2.4 to 3.24.9 (#1782) Bumps [github/codeql-action](https://github.com/github/codeql-action) from 2.2.4 to 3.24.9. - [Release notes](https://github.com/github/codeql-action/releases) - [Changelog](https://github.com/github/codeql-action/blob/main/CHANGELOG.md) - [Commits](https://github.com/github/codeql-action/compare/17573ee1cc1b9d061760f3a006fc4aac4f944fd5...1b1aada464948af03b950897e5eb522f92603cc2) --- updated-dependencies: - dependency-name: github/codeql-action dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/scorecard.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/scorecard.yml b/.github/workflows/scorecard.yml index 338c9239c3..dfbda87b58 100644 --- a/.github/workflows/scorecard.yml +++ b/.github/workflows/scorecard.yml @@ -67,6 +67,6 @@ jobs: # Upload the results to GitHub's code scanning dashboard. - name: "Upload to code-scanning" - uses: github/codeql-action/upload-sarif@17573ee1cc1b9d061760f3a006fc4aac4f944fd5 # v2.2.4 + uses: github/codeql-action/upload-sarif@1b1aada464948af03b950897e5eb522f92603cc2 # v3.24.9 with: sarif_file: results.sarif From 94f8e687ebb0eff9656f556b38edb1a4b43fcb9d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 2 Apr 2024 09:53:27 +0300 Subject: [PATCH 25/28] build(deps): bump ossf/scorecard-action from 2.1.2 to 2.3.1 (#1781) Bumps [ossf/scorecard-action](https://github.com/ossf/scorecard-action) from 2.1.2 to 2.3.1. - [Release notes](https://github.com/ossf/scorecard-action/releases) - [Changelog](https://github.com/ossf/scorecard-action/blob/main/RELEASE.md) - [Commits](https://github.com/ossf/scorecard-action/compare/e38b1902ae4f44df626f11ba0734b14fb91f8f86...0864cf19026789058feabb7e87baa5f140aac736) --- updated-dependencies: - dependency-name: ossf/scorecard-action dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/scorecard.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/scorecard.yml b/.github/workflows/scorecard.yml index dfbda87b58..01e5fe16a8 100644 --- a/.github/workflows/scorecard.yml +++ b/.github/workflows/scorecard.yml @@ -37,7 +37,7 @@ jobs: persist-credentials: false - name: "Run analysis" - uses: ossf/scorecard-action@e38b1902ae4f44df626f11ba0734b14fb91f8f86 # v2.1.2 + uses: ossf/scorecard-action@0864cf19026789058feabb7e87baa5f140aac736 # v2.3.1 with: results_file: results.sarif results_format: sarif From 27a7250dd321e60bd2f68564cf7c2fd58fac0e27 Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Tue, 2 Apr 2024 10:43:29 +0300 Subject: [PATCH 26/28] fix: Exit with `ProtocolViolation` if a packet has no frames (#1779) * fix: Exit with `ProtocolViolation` if a packet has no frames Fixes #1476 * Fix comment * Cleanup * Simplify * Address code review * Clippy --- neqo-transport/src/connection/mod.rs | 4 ++ neqo-transport/src/packet/mod.rs | 6 +-- neqo-transport/tests/connection.rs | 70 ++++++++++++++++++++++++++++ 3 files changed, 75 insertions(+), 5 deletions(-) diff --git a/neqo-transport/src/connection/mod.rs b/neqo-transport/src/connection/mod.rs index 3bf6b91263..06d6cab9e1 100644 --- a/neqo-transport/src/connection/mod.rs +++ b/neqo-transport/src/connection/mod.rs @@ -1552,6 +1552,10 @@ impl Connection { packet: &DecryptedPacket, now: Instant, ) -> Res { + (!packet.is_empty()) + .then_some(()) + .ok_or(Error::ProtocolViolation)?; + // TODO(ekr@rtfm.com): Have the server blow away the initial // crypto state if this fails? Otherwise, we will get a panic // on the assert for doesn't exist. diff --git a/neqo-transport/src/packet/mod.rs b/neqo-transport/src/packet/mod.rs index 0843d050ab..d435ac0dd8 100644 --- a/neqo-transport/src/packet/mod.rs +++ b/neqo-transport/src/packet/mod.rs @@ -764,14 +764,10 @@ impl<'a> PublicPacket<'a> { assert_ne!(self.packet_type, PacketType::Retry); assert_ne!(self.packet_type, PacketType::VersionNegotiation); - qtrace!( - "unmask hdr={}", - hex(&self.data[..self.header_len + SAMPLE_OFFSET]) - ); - let sample_offset = self.header_len + SAMPLE_OFFSET; let mask = if let Some(sample) = self.data.get(sample_offset..(sample_offset + SAMPLE_SIZE)) { + qtrace!("unmask hdr={}", hex(&self.data[..sample_offset])); crypto.compute_mask(sample) } else { Err(Error::NoMoreData) diff --git a/neqo-transport/tests/connection.rs b/neqo-transport/tests/connection.rs index 0b91fcf306..b8877b946d 100644 --- a/neqo-transport/tests/connection.rs +++ b/neqo-transport/tests/connection.rs @@ -127,6 +127,76 @@ fn reorder_server_initial() { assert_eq!(*client.state(), State::Confirmed); } +fn set_payload(server_packet: &Option, client_dcid: &[u8], payload: &[u8]) -> Datagram { + let (server_initial, _server_hs) = split_datagram(server_packet.as_ref().unwrap()); + let (protected_header, _, _, orig_payload) = + decode_initial_header(&server_initial, Role::Server); + + // Now decrypt the packet. + let (aead, hp) = initial_aead_and_hp(client_dcid, Role::Server); + let (mut header, pn) = remove_header_protection(&hp, protected_header, orig_payload); + assert_eq!(pn, 0); + // Re-encode the packet number as four bytes, so we have enough material for the header + // protection sample if payload is empty. + let pn_pos = header.len() - 2; + header[pn_pos] = u8::try_from(4 + aead.expansion()).unwrap(); + header.resize(header.len() + 3, 0); + header[0] |= 0b0000_0011; // Set the packet number length to 4. + + // And build a packet containing the given payload. + let mut packet = header.clone(); + packet.resize(header.len() + payload.len() + aead.expansion(), 0); + aead.encrypt(pn, &header, payload, &mut packet[header.len()..]) + .unwrap(); + apply_header_protection(&hp, &mut packet, protected_header.len()..header.len()); + Datagram::new( + server_initial.source(), + server_initial.destination(), + server_initial.tos(), + server_initial.ttl(), + packet, + ) +} + +/// Test that the stack treats a packet without any frames as a protocol violation. +#[test] +fn packet_without_frames() { + let mut client = new_client( + ConnectionParameters::default().versions(Version::Version1, vec![Version::Version1]), + ); + let mut server = default_server(); + + let client_initial = client.process_output(now()); + let (_, client_dcid, _, _) = + decode_initial_header(client_initial.as_dgram_ref().unwrap(), Role::Client); + + let server_packet = server.process(client_initial.as_dgram_ref(), now()).dgram(); + let modified = set_payload(&server_packet, client_dcid, &[]); + client.process_input(&modified, now()); + assert_eq!( + client.state(), + &State::Closed(ConnectionError::Transport(Error::ProtocolViolation)) + ); +} + +/// Test that the stack permits a packet containing only padding. +#[test] +fn packet_with_only_padding() { + let mut client = new_client( + ConnectionParameters::default().versions(Version::Version1, vec![Version::Version1]), + ); + let mut server = default_server(); + + let client_initial = client.process_output(now()); + let (_, client_dcid, _, _) = + decode_initial_header(client_initial.as_dgram_ref().unwrap(), Role::Client); + + let server_packet = server.process(client_initial.as_dgram_ref(), now()).dgram(); + let modified = set_payload(&server_packet, client_dcid, &[0]); + client.process_input(&modified, now()); + assert_eq!(client.state(), &State::WaitInitial); +} + /// Overflow the crypto buffer. #[allow(clippy::similar_names)] // For ..._scid and ..._dcid, which are fine. #[test] From 68606871ca9f1a8435f2472bcaf49e83a06a823b Mon Sep 17 00:00:00 2001 From: Max Inden Date: Tue, 2 Apr 2024 14:23:59 +0200 Subject: [PATCH 27/28] refactor: print current `StateSignaling` variant in `debug_assert` (#1783) * refactor: print current `StateSignaling` variant in debug_assert CI paniced in `StateSignaling::handshake_done`. Though failure hasn't been reproducible locally. To ease debugging future CI failures, print the current state on panic. ``` thread 'idle_timeout_crazy_rtt' panicked at neqo-transport\src\connection\state.rs:212:13: StateSignaling must be in Idle state. stack backtrace: 0: std::panicking::begin_panic_handler at /rustc/8df7e723ea729a7f917501cc2d91d640b7021373/library\std\src\panicking.rs:646 1: core::panicking::panic_fmt at /rustc/8df7e723ea729a7f917501cc2d91d640b7021373/library\core\src\panicking.rs:72 2: enum2$::handshake_done at .\src\connection\state.rs:212 3: neqo_transport::connection::Connection::handle_lost_packets at .\src\connection\mod.rs:2847 4: neqo_transport::connection::Connection::process_timer at .\src\connection\mod.rs:966 5: neqo_transport::connection::Connection::process_output at .\src\connection\mod.rs:1085 6: neqo_transport::connection::Connection::process at .\src\connection\mod.rs:1108 7: test_fixture::sim::connection::impl$1::process at D:\a\neqo\neqo\test-fixture\src\sim\connection.rs:146 8: test_fixture::sim::Simulator::process_loop at D:\a\neqo\neqo\test-fixture\src\sim\mod.rs:193 9: test_fixture::sim::ReadySimulator::run at D:\a\neqo\neqo\test-fixture\src\sim\mod.rs:284 10: test_fixture::sim::Simulator::run at D:\a\neqo\neqo\test-fixture\src\sim\mod.rs:265 11: network::idle_timeout_crazy_rtt at D:\a\neqo\neqo\test-fixture\src\sim\mod.rs:69 12: network::idle_timeout_crazy_rtt::closure$0 at D:\a\neqo\neqo\test-fixture\src\sim\mod.rs:58 13: core::ops::function::FnOnce::call_once > at /rustc/8df7e723ea729a7f917501cc2d91d640b7021373\library\core\src\ops\function.rs:250 14: core::ops::function::FnOnce::call_once at /rustc/8df7e723ea729a7f917501cc2d91d640b7021373/library\core\src\ops\function.rs:250 note: Some details are omitted, run with `RUST_BACKTRACE=full` for a verbose backtrace. ``` https://github.com/mozilla/neqo/actions/runs/8496770595/job/23274538553 * clippy --- neqo-transport/src/connection/state.rs | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/neqo-transport/src/connection/state.rs b/neqo-transport/src/connection/state.rs index 9789151d3f..cc2f6e30d2 100644 --- a/neqo-transport/src/connection/state.rs +++ b/neqo-transport/src/connection/state.rs @@ -209,7 +209,10 @@ pub enum StateSignaling { impl StateSignaling { pub fn handshake_done(&mut self) { if !matches!(self, Self::Idle) { - debug_assert!(false, "StateSignaling must be in Idle state."); + debug_assert!( + false, + "StateSignaling must be in Idle state but is in {self:?} state.", + ); return; } *self = Self::HandshakeDone; From 07514299a70a8aea8d28a25544e85cf96620d976 Mon Sep 17 00:00:00 2001 From: Max Inden Date: Tue, 2 Apr 2024 18:25:10 +0200 Subject: [PATCH 28/28] refactor(server): simplify :method POST check (#1787) --- neqo-bin/src/server/mod.rs | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/neqo-bin/src/server/mod.rs b/neqo-bin/src/server/mod.rs index 38eb766f5f..e3067ecdf0 100644 --- a/neqo-bin/src/server/mod.rs +++ b/neqo-bin/src/server/mod.rs @@ -336,13 +336,10 @@ impl HttpServer for SimpleServer { } => { qdebug!("Headers (request={stream} fin={fin}): {headers:?}"); - let post = if let Some(method) = headers.iter().find(|&h| h.name() == ":method") + if headers + .iter() + .any(|h| h.name() == ":method" && h.value() == "POST") { - method.value() == "POST" - } else { - false - }; - if post { self.posts.insert(stream, 0); continue; }