From 85bd9534d93978e6c3d815d4258138fb817516e1 Mon Sep 17 00:00:00 2001 From: Tim Evens Date: Thu, 30 Nov 2023 23:06:05 -0800 Subject: [PATCH 01/17] Initial commit on refactor to support ephemeral streams * Resolves #85 * Resolves #88 - Need to still add code for replacing a stream * Resolves #94 * Resolves #95 * Resolves #96 * Resolves #97 --- cmd/client.cc | 43 +- cmd/echoServer.cc | 45 +- include/transport/priority_queue.h | 13 + include/transport/safe_queue.h | 20 +- include/transport/time_queue.h | 4 +- include/transport/transport.h | 108 +-- src/transport_picoquic.cpp | 1408 +++++++++++++++------------- src/transport_picoquic.h | 218 +++-- src/transport_udp.cpp | 44 +- src/transport_udp.h | 50 +- 10 files changed, 1083 insertions(+), 870 deletions(-) diff --git a/cmd/client.cc b/cmd/client.cc index 290e557..7e0aca6 100644 --- a/cmd/client.cc +++ b/cmd/client.cc @@ -17,7 +17,7 @@ struct Delegate : public ITransport::TransportDelegate private: std::shared_ptr client; uint64_t msgcount; - TransportContextId tcid; + TransportConnId conn_id; cantina::LoggerPointer logger; public: @@ -25,7 +25,7 @@ struct Delegate : public ITransport::TransportDelegate : logger(std::make_shared("CMD", logger)) { msgcount = 0; - tcid = 0; + conn_id = 0; } void stop() { @@ -34,21 +34,22 @@ struct Delegate : public ITransport::TransportDelegate void setClientTransport(std::shared_ptr client) { this->client = client; } - TransportContextId getContextId() { return tcid; } + TransportConnId getContextId() { return conn_id; } - void on_connection_status(const TransportContextId& context_id, const TransportStatus status) + void on_connection_status(const TransportConnId& conn_id, const TransportStatus status) { - tcid = context_id; - logger->info << "Connection state change context: " << context_id << ", " << int(status) << std::flush; + logger->info << "Connection state change conn_id: " << conn_id << ", " << int(status) << std::flush; } - void on_new_connection(const TransportContextId& /* context_id */, const TransportRemote& /* remote */) {} - void on_recv_notify(const TransportContextId& context_id, const StreamId& streamId) + void on_new_connection(const TransportConnId& , const TransportRemote&) {} + + void on_recv_notify(const TransportConnId& conn_id, const DataContextId& data_ctx_id) { static uint32_t prev_msg_num = 0; + static uint32_t prev_msgcount = 0; while (true) { - auto data = client->dequeue(context_id, streamId); + auto data = client->dequeue(conn_id, data_ctx_id); if (data.has_value()) { msgcount++; @@ -56,7 +57,7 @@ struct Delegate : public ITransport::TransportDelegate uint32_t* msg_num = (uint32_t*)data.value().data(); if (prev_msg_num && (*msg_num - prev_msg_num) > 1) { - logger->info << "cid: " << context_id << " sid: " << streamId << " length: " << data->size() + logger->info << "conn_id: " << conn_id << " data_ctx_id: " << data_ctx_id << " length: " << data->size() << " RecvMsg (" << msgcount << ")" << " msg_num: " << *msg_num << " prev_num: " << prev_msg_num << "(" << *msg_num - prev_msg_num << ")" << std::flush; @@ -65,11 +66,16 @@ struct Delegate : public ITransport::TransportDelegate prev_msg_num = *msg_num; } else { + if (msgcount % 2000 == 0 && prev_msgcount != msgcount) { + logger->info << "conn_id: " << conn_id << " data_ctx_id: " << data_ctx_id << " msgcount: " << msgcount << std::flush; + } + break; } } } - void on_new_stream(const TransportContextId& /* context_id */, const StreamId& /* streamId */) {} + + void on_new_data_context(const TransportConnId&, const DataContextId&) {} }; cantina::LoggerPointer logger = std::make_shared(); @@ -95,14 +101,19 @@ main() if ((envVar = getenv("RELAY_PORT"))) server.port = atoi(envVar); + bool bidir = true; + if (getenv("RELAY_UNIDIR")) + bidir = false; + auto client = ITransport::make_client_transport(server, tconfig, d, logger); + logger->info << "bidir is " << (bidir ? "True" : "False") << std::flush; logger->info << "client use_count: " << client.use_count() << std::flush; d.setClientTransport(client); logger->info << "after set client transport client use_count: " << client.use_count() << std::flush; - auto tcid = client->start(); + auto conn_id = client->start(); uint8_t data_buf[4200]{ 0 }; while (client->status() != TransportStatus::Ready) { @@ -110,16 +121,16 @@ main() std::this_thread::sleep_for(std::chrono::milliseconds(250)); } - StreamId stream_id = client->createStream(tcid, true); + DataContextId data_ctx_id = client->createDataContext(conn_id, true, 1, bidir); uint32_t* msg_num = (uint32_t*)&data_buf; - while (true) { + while (client->status() != TransportStatus::Shutdown && client->status() != TransportStatus::Disconnected) { for (int i = 0; i < 10; i++) { (*msg_num)++; auto data = bytes(data_buf, data_buf + sizeof(data_buf)); - client->enqueue(tcid, server.proto == TransportProtocol::UDP ? 1 : stream_id, std::move(data)); + client->enqueue(conn_id, server.proto == TransportProtocol::UDP ? 1 : data_ctx_id, std::move(data)); } // Increase delay if using UDP, need to pace more @@ -130,7 +141,7 @@ main() } } - client->closeStream(tcid, stream_id); + client->deleteDataContext(conn_id, data_ctx_id); logger->Log("Done with transport, closing"); client.reset(); diff --git a/cmd/echoServer.cc b/cmd/echoServer.cc index 396698d..8233a86 100644 --- a/cmd/echoServer.cc +++ b/cmd/echoServer.cc @@ -11,14 +11,17 @@ using namespace qtransport; struct Delegate : public ITransport::TransportDelegate { private: std::shared_ptr server; - uint64_t msgcount; cantina::LoggerPointer logger; + uint64_t msgcount {0}; + uint64_t prev_msgcount {0}; + uint32_t prev_msg_num {0}; + DataContextId out_data_ctx {0}; + public: Delegate(const cantina::LoggerPointer& logger) : logger(std::make_shared("ECHO", logger)) { - msgcount = 0; } void stop() { @@ -29,24 +32,25 @@ struct Delegate : public ITransport::TransportDelegate { this->server = server; } - void on_connection_status(const TransportContextId &context_id, + void on_connection_status(const TransportConnId &conn_id, const TransportStatus status) { - logger->info << "Connection state change context: " << context_id << ", " + logger->info << "Connection state change conn_id: " << conn_id << ", " << int(status) << std::flush; } - void on_new_connection(const TransportContextId &context_id, + void on_new_connection(const TransportConnId &conn_id, const TransportRemote &remote) { - logger->info << "New connection cid: " << context_id << " from " + logger->info << "New connection conn_id: " << conn_id << " from " << remote.host_or_ip << ":" << remote.port << std::flush; + + out_data_ctx = this->server->createDataContext(conn_id, true, 10); } - void on_recv_notify(const TransportContextId &context_id, - const StreamId &streamId) { - static uint32_t prev_msg_num = 0; + void on_recv_notify(const TransportConnId &conn_id, + const DataContextId &data_ctx_id) { while (true) { - auto data = server->dequeue(context_id, streamId); + auto data = server->dequeue(conn_id, data_ctx_id); if (data.has_value()) { msgcount++; @@ -54,7 +58,7 @@ struct Delegate : public ITransport::TransportDelegate { uint32_t *msg_num = (uint32_t *)data->data(); if (prev_msg_num && (*msg_num - prev_msg_num) > 1) { - logger->info << "cid: " << context_id << " sid: " << streamId << " length: " << data->size() + logger->info << "conn_id: " << conn_id << " data_ctx_id: " << data_ctx_id << " length: " << data->size() << " RecvMsg (" << msgcount << ")" << " msg_num: " << *msg_num << " prev_num: " << prev_msg_num << "(" << *msg_num - prev_msg_num << ")" << std::flush; @@ -62,15 +66,24 @@ struct Delegate : public ITransport::TransportDelegate { prev_msg_num = *msg_num; - server->enqueue(context_id, streamId, std::move(data.value())); + server->enqueue(conn_id, out_data_ctx, std::move(data.value())); + } else { + if (msgcount % 2000 == 0 && prev_msgcount != msgcount) { + prev_msgcount = msgcount; + logger->info << "conn_id: " << conn_id << " data_ctx_id: " << data_ctx_id << " msgcount: " << msgcount << std::flush; + } + break; } } } - void on_new_stream(const TransportContextId & /* context_id */, - const StreamId & /* streamId */) {} + void on_new_data_context(const TransportConnId& conn_id, const DataContextId& data_ctx_id) + { + logger->info << "Callback for new data context conn_id: " << conn_id << " data_ctx_id: " << data_ctx_id + << std::flush; + } }; int main() { @@ -94,8 +107,8 @@ int main() { d.setServerTransport(server); - while (true) { - std::this_thread::sleep_for(std::chrono::seconds(60)); + while (server->status() != TransportStatus::Shutdown) { + std::this_thread::sleep_for(std::chrono::seconds(3)); } server.reset(); diff --git a/include/transport/priority_queue.h b/include/transport/priority_queue.h index f9ac6cb..7e70817 100644 --- a/include/transport/priority_queue.h +++ b/include/transport/priority_queue.h @@ -151,6 +151,19 @@ namespace qtransport { } } + /** + * @brief Clear queue + */ + void clear() { + std::lock_guard _(_mutex); + + for (auto& tqueue: _queue) { + if (tqueue && !tqueue->empty()) { + tqueue->clear(); + } + } + } + // TODO: Consider changing empty/size to look at timeQueue sizes - maybe support blocking pops size_t size() const { diff --git a/include/transport/safe_queue.h b/include/transport/safe_queue.h index 6df8915..d8cc547 100644 --- a/include/transport/safe_queue.h +++ b/include/transport/safe_queue.h @@ -110,8 +110,6 @@ class safe_queue pop_front_internal(); } - - /** * @brief Block waiting for data in queue, then remove the first object from * queue (oldest object) @@ -145,6 +143,15 @@ class safe_queue return queue.size(); } + /** + * @brief Clear the queue + */ + void clear() { + std::lock_guard _(mutex); + std::queue empty; + std::swap(queue, empty); + } + /** * @brief Check if queue is empty * @@ -189,6 +196,10 @@ class safe_queue auto elem = queue.front(); queue.pop(); + if (queue.empty()) { + _empty = true; + } + return std::move(elem); } @@ -200,10 +211,15 @@ class safe_queue void pop_front_internal() { if (queue.empty()) { + _empty = true; return; } queue.pop(); + + if (queue.empty()) { + _empty = true; + } } std::atomic _empty { true }; diff --git a/include/transport/time_queue.h b/include/transport/time_queue.h index e6c3110..0b9050b 100644 --- a/include/transport/time_queue.h +++ b/include/transport/time_queue.h @@ -308,7 +308,6 @@ namespace qtransport { size_t size() const noexcept { return _queue.size() - _queue_index; } bool empty() const noexcept { return _queue.empty() || _queue_index >= _queue.size(); } - private: /** * @brief Clear/reset the queue to no objects */ @@ -322,6 +321,9 @@ namespace qtransport { } } + private: + + /** * @brief Based on current time, adjust and move the bucket index with time * (sliding window) diff --git a/include/transport/transport.h b/include/transport/transport.h index 94f2b31..2e2657d 100644 --- a/include/transport/transport.h +++ b/include/transport/transport.h @@ -11,9 +11,8 @@ namespace qtransport { -using TransportContextId = uint64_t; ///< Context Id is a 64bit number that is used as a key to maps -using StreamId = uint64_t; ///< stream Id is a 64bit number that is - ///< used as a key to maps +using TransportConnId = uint64_t; ///< Connection Id is a 64bit number that is used as a key to maps +using DataContextId = uint64_t; ///< Data Context 64bit number that identifies a data flow/track/stream /** * Transport status/state values */ @@ -37,8 +36,8 @@ enum class TransportError : uint8_t PeerDisconnected, PeerUnreachable, CannotResolveHostname, - InvalidContextId, - InvalidStreamId, + InvalidConnContextId, + InvalidDataContextId, InvalidIpv4Address, InvalidIpv6Address }; @@ -59,9 +58,9 @@ enum class TransportProtocol */ struct TransportRemote { - std::string host_or_ip; // IPv4/v6 or FQDN (user input) - uint16_t port; // Port (user input) - TransportProtocol proto; // Protocol to use for the transport + std::string host_or_ip; /// IPv4/v6 or FQDN (user input) + uint16_t port; /// Port (user input) + TransportProtocol proto; /// Protocol to use for the transport }; /** @@ -117,10 +116,10 @@ class ITransport * * @details Called when the connection changes state/status * - * @param[in] context_id Transport context Id - * @param[in] status Transport Status value + * @param[in] conn_id Transport context Id + * @param[in] status Transport Status value */ - virtual void on_connection_status(const TransportContextId& context_id, + virtual void on_connection_status(const TransportConnId& conn_id, const TransportStatus status) = 0; /** @@ -129,26 +128,24 @@ class ITransport * @details Called when new connection is received. This is only used in * server mode. * - * @param[in] context_id Transport context identifier mapped to the - * connection - * @param[in] remote Transport information for the - * connection + * @param[in] conn_id Transport context identifier mapped to the connection + * @param[in] remote Transport information for the connection */ - virtual void on_new_connection(const TransportContextId& context_id, + virtual void on_new_connection(const TransportConnId& conn_id, const TransportRemote& remote) = 0; /** - * @brief Report arrival of a new stream + * @brief Report a new data context created * - * @details Called when new connection is received. This is only used in - * server mode. + * @details Report that a new data context was created for a new bi-directional + * stream that was received. This method is not called for app created + * data contexts. * - * @param[in] context_id Transport context identifier mapped to the - * connection - * @param[in] streamId A new stream id created + * @param[in] conn_id Transport context identifier mapped to the connection + * @param[in] data_ctx_id Data context id for a new data context received by the transport */ - virtual void on_new_stream(const TransportContextId& context_id, - const StreamId & streamId) = 0; + virtual void on_new_data_context(const TransportConnId& conn_id, + const DataContextId& data_ctx_id) = 0; /** * @brief Event reporting transport has some data over @@ -157,13 +154,11 @@ class ITransport * @details Applications must invoke ITransport::deqeue() to obtain * the data by passing the transport context id * - * @param[in] context_id Transport context identifier mapped to the - * connection - * @param[in] streamId Stream id that the data was - * received on + * @param[in] conn_id Transport context identifier mapped to the connection + * @param[in] data_ctx_id Data context id that the data was received on */ - virtual void on_recv_notify(const TransportContextId& context_id, - const StreamId & streamId) = 0; + virtual void on_recv_notify(const TransportConnId& conn_id, + const DataContextId& data_ctx_id) = 0; }; /* Factory APIs */ @@ -222,34 +217,40 @@ class ITransport * * @return TransportContextId: identifying the connection */ - virtual TransportContextId start() = 0; + virtual TransportConnId start() = 0; /** - * @brief Create a stream in the context - * - * @todo change to generic stream + * @brief Create a data context + * @details Data context is flow of data (track, namespace). This is similar to a pipe of data to be transmitted. + * Metrics, shaping, etc. maintained at the data context level. * - * @param[in] context_id Identifying the connection + * @param[in] conn_id Connection ID to create data context * @param[in] use_reliable_transport Indicates a reliable stream is * preferred for transporting data * @param[in] priority Priority for stream (default is 1) + * @param[in] bidir Set context to be bi-directional or unidirectional * - * @return StreamId identifying the stream via the connection + * @return DataContextId identifying the data context via the connection */ - virtual StreamId createStream(const TransportContextId& context_id, - bool use_reliable_transport, - uint8_t priority=1) = 0; + virtual DataContextId createDataContext(const TransportConnId conn_id, + bool use_reliable_transport, + uint8_t priority = 1, + bool bidir = false) = 0; /** * @brief Close a transport context */ - virtual void close(const TransportContextId& context_id) = 0; + virtual void close(const TransportConnId& context_id) = 0; /** - * @brief Close/end a stream within context + * @brief Delete data context + * @details Deletes a data context for the given connection id. If reliable, the stream will + * be closed by FIN (graceful). + * + * @param[in] conn_id Connection ID to create data context + * @param[in] data_ctx_id Data context ID to delete */ - virtual void closeStream(const TransportContextId& context_id, - StreamId stream_id) = 0; + virtual void deleteDataContext(const TransportConnId& conn_id, DataContextId data_ctx_id) = 0; /** * @brief Get the peer IP address and port associated with the stream @@ -259,7 +260,7 @@ class ITransport * * @returns True if the address was successfully returned, false otherwise */ - virtual bool getPeerAddrInfo(const TransportContextId& context_id, + virtual bool getPeerAddrInfo(const TransportConnId& context_id, sockaddr_storage* addr) = 0; /** @@ -268,21 +269,21 @@ class ITransport * @details Add data to the transport queue. Data enqueued will be transmitted * when available. * - * @todo is priority missing? - * * @param[in] context_id Identifying the connection - * @param[in] streamId stream Id to send data on + * @param[in] data_ctx_id stream Id to send data on * @param[in] bytes Data to send/write * @param[in] priority Priority of the object, range should be 0 - 255 * @param[in] ttl_ms The age the object should exist in queue in milliseconds * * @returns TransportError is returned indicating status of the operation */ - virtual TransportError enqueue(const TransportContextId& context_id, - const StreamId & streamId, + virtual TransportError enqueue(const TransportConnId& context_id, + const DataContextId& data_ctx_id, std::vector&& bytes, const uint8_t priority = 1, - const uint32_t ttl_ms=350) = 0; + const uint32_t ttl_ms=350, + const bool new_stream=false, + const bool buffer_reset=false) = 0; /** * @brief Dequeue application data from transport queue @@ -291,14 +292,13 @@ class ITransport * to the caller using this method. An empty return will be * * @param[in] context_id Identifying the connection - * @param[in] streamId Stream Id to receive data - * from + * @param[in] data_ctx_id Stream Id to receive data from * * @returns std::nullopt if there is no data */ virtual std::optional> dequeue( - const TransportContextId& context_id, - const StreamId & streamId) = 0; + const TransportConnId& context_id, + const DataContextId& data_ctx_id) = 0; }; } // namespace qtransport diff --git a/src/transport_picoquic.cpp b/src/transport_picoquic.cpp index 27c4591..5fa2907 100644 --- a/src/transport_picoquic.cpp +++ b/src/transport_picoquic.cpp @@ -26,20 +26,18 @@ using namespace qtransport; namespace { /** - * @brief Returns the appropriate stream id depending on if the stream is + * @brief Returns the next stream id depending on if the stream is * initiated by the client or the server, and if it bi- or uni- directional. * - * @tparam Int_t The preferred integer type to deal with. - * - * @param id The initial value to adjust - * @param is_server Flag if the initiating request is from a server or a client - * @param is_unidirectional Flag if the streams are bi- or uni- directional. + * @param id The initial value to adjust + * @param is_server Flag if the initiating request is from a server or a client + * @param is_unidirectional Flag if the streams are bi- or uni- directional. * * @return The correctly adjusted stream id value. */ - constexpr StreamId make_stream_id(StreamId id, bool is_server, bool is_unidirectional) + constexpr uint64_t get_next_stream_id(uint64_t last_stream_id, bool is_server, bool is_unidirectional) { - return (id & (~0x0u << 2)) | (is_server ? 0b01 : 0b00) | (is_unidirectional ? 0b10 : 0b00); + return ((last_stream_id + 4) & (~0x0u << 2)) | (is_server ? 0b01 : 0b00) | (is_unidirectional ? 0b10 : 0b00); } /** @@ -51,18 +49,18 @@ namespace { * * @return The datagram stream id. */ - constexpr StreamId make_datagram_stream_id(bool is_server, bool is_unidirectional) + constexpr DataContextId make_datagram_stream_id(bool is_server, bool is_unidirectional) { return 0; } } // namespace -/* +/* ============================================================================ * PicoQuic Callbacks + * ============================================================================ */ -int -pq_event_cb(picoquic_cnx_t* cnx, +int pq_event_cb(picoquic_cnx_t* pq_cnx, uint64_t stream_id, uint8_t* bytes, size_t length, @@ -71,7 +69,8 @@ pq_event_cb(picoquic_cnx_t* cnx, void* v_stream_ctx) { PicoQuicTransport* transport = static_cast(callback_ctx); - PicoQuicTransport::StreamContext* stream_cnx = static_cast(v_stream_ctx); + PicoQuicTransport::DataContext* data_ctx = static_cast(v_stream_ctx); + const auto conn_id = reinterpret_cast(pq_cnx); bool is_fin = false; @@ -84,42 +83,46 @@ pq_event_cb(picoquic_cnx_t* cnx, switch (fin_or_event) { case picoquic_callback_prepare_datagram: { + data_ctx = &transport->getDefaultDataContext(conn_id); + // length is the max allowed data length - if (stream_cnx == NULL) { - // picoquic doesn't provide stream context for datagram/stream_id==-0 - stream_cnx = transport->getZeroStreamContext(cnx); - } - transport->metrics.dgram_prepare_send++; - transport->send_next_datagram(stream_cnx, bytes, length); + data_ctx->metrics.dgram_prepare_send++; + transport->send_next_datagram(data_ctx, bytes, length); break; } case picoquic_callback_datagram_acked: + data_ctx = &transport->getDefaultDataContext(conn_id); + // Called for each datagram - TODO: Revisit how often acked frames are coming in // bytes carries the original packet data // log_msg.str(""); // log_msg << "Got datagram ack send_time: " << stream_id // << " bytes length: " << length; // transport->logger.log(LogLevel::info, log_msg.str()); - transport->metrics.dgram_ack++; + data_ctx->metrics.dgram_ack++; break; case picoquic_callback_datagram_spurious: - transport->metrics.dgram_spurious++; + data_ctx = &transport->getDefaultDataContext(conn_id); + data_ctx->metrics.dgram_spurious++; break; case picoquic_callback_datagram_lost: - transport->metrics.dgram_lost++; + data_ctx = &transport->getDefaultDataContext(conn_id); + data_ctx->metrics.dgram_lost++; break; case picoquic_callback_datagram: { - if (stream_cnx == NULL) { - // picoquic doesn't provide stream context for datagram/stream_id==-0 - stream_cnx = transport->getZeroStreamContext(cnx); - } + data_ctx = &transport->getDefaultDataContext(conn_id); + data_ctx->metrics.dgram_received++; + transport->on_recv_datagram(data_ctx, bytes, length); + break; + } - transport->metrics.dgram_received++; - transport->on_recv_datagram(stream_cnx, bytes, length); + case picoquic_callback_prepare_to_send: { + data_ctx->metrics.stream_prepare_send++; + transport->send_stream_bytes(data_ctx, bytes, length); break; } @@ -128,37 +131,45 @@ pq_event_cb(picoquic_cnx_t* cnx, // fall through to picoquic_callback_stream_data case picoquic_callback_stream_data: { - transport->metrics.stream_rx_callbacks++; - if (stream_cnx == NULL) { - stream_cnx = transport->createStreamContext(cnx, stream_id); - picoquic_set_app_stream_ctx(cnx, stream_id, stream_cnx); + if (data_ctx == NULL) { + if (not (stream_id & 0x2) /* bidir stream */) { + + // Create the data context for new bidir streams created by remote side + data_ctx = transport->createDataContextBiDirRecv(conn_id, stream_id); + picoquic_set_app_stream_ctx(pq_cnx, stream_id, data_ctx); + } + else { + data_ctx = &transport->getDefaultDataContext(conn_id); + picoquic_set_app_stream_ctx(pq_cnx, stream_id, data_ctx); + } } + data_ctx->metrics.stream_rx_callbacks++; + // length is the amount of data received if (length) { - transport->on_recv_stream_bytes(stream_cnx, bytes, length); + transport->on_recv_stream_bytes(data_ctx, bytes, length); } if (is_fin) { transport->logger->info << "Received FIN for stream " << stream_id << std::flush; - transport->deleteStreamContext(reinterpret_cast(cnx), stream_id); + transport->deleteDataContext(conn_id, stream_id); } break; } case picoquic_callback_stream_reset: { - transport->logger->info << "Reset stream " << stream_id - << std::flush; - - if (stream_id == 0) { // close connection - picoquic_close(cnx, 0); + transport->logger->info << "Received reset stream conn_id: " << conn_id << " stream_id: " << stream_id << std::flush; + if (stream_id & 0x2 /* bidir stream */) { + transport->deleteDataContext(conn_id, stream_id); } else { - picoquic_set_callback(cnx, NULL, NULL); + // Unidirectional stream, no need to FIN/RESET or clear up data context for unidir received streams + // TODO: This should not be an issue with threads since this is the only thread that adds to this. + data_ctx->reset_rx_object(); } - transport->deleteStreamContext(reinterpret_cast(cnx), stream_id); return 0; } @@ -178,10 +189,10 @@ pq_event_cb(picoquic_cnx_t* cnx, break; case picoquic_callback_pacing_changed: { - const auto cwin_bytes = picoquic_get_cwin(cnx); - const auto rtt_us = picoquic_get_rtt(cnx); + const auto cwin_bytes = picoquic_get_cwin(pq_cnx); + const auto rtt_us = picoquic_get_rtt(pq_cnx); - transport->logger->info << "Pacing rate changed; context_id: " << reinterpret_cast(cnx) + transport->logger->info << "Pacing rate changed; conn_id: " << conn_id << " rate bps: " << stream_id * 8 << " cwin_bytes: " << cwin_bytes << " rtt_us: " << rtt_us @@ -194,28 +205,32 @@ pq_event_cb(picoquic_cnx_t* cnx, transport->logger->info << "Closing connection stream_id: " << stream_id; - switch (picoquic_get_local_error(cnx)) { + switch (picoquic_get_local_error(pq_cnx)) { case PICOQUIC_ERROR_IDLE_TIMEOUT: transport->logger->info << " Idle timeout"; + break; default: - transport->logger->info << " local_error: " << picoquic_get_local_error(cnx) - << " remote_error: " << picoquic_get_remote_error(cnx) - << " app_error: " << picoquic_get_application_error(cnx); + transport->logger->info << " local_error: " << picoquic_get_local_error(pq_cnx) + << " remote_error: " << picoquic_get_remote_error(pq_cnx) + << " app_error: " << picoquic_get_application_error(pq_cnx); } - if (stream_cnx != NULL) { - auto conn_ctx = transport->getConnContext(reinterpret_cast(cnx)); + data_ctx = &transport->getDefaultDataContext(conn_id); - transport->logger->info << " " << conn_ctx.peer_addr_text; + if (data_ctx != NULL) { + auto conn_ctx = transport->getConnContext(conn_id); + + transport->logger->info << " " << conn_ctx->peer_addr_text; + + transport->deleteDataContext(conn_id, data_ctx->data_ctx_id); } transport->logger->info << std::flush; - picoquic_set_callback(cnx, NULL, NULL); + picoquic_set_callback(pq_cnx, NULL, NULL); - transport->deleteStreamContext(reinterpret_cast(cnx), stream_id); if (not transport->_is_server_mode) { transport->setStatus(TransportStatus::Disconnected); @@ -228,29 +243,23 @@ pq_event_cb(picoquic_cnx_t* cnx, } case picoquic_callback_ready: { // Connection callback, not per stream - if (stream_cnx == NULL) { - stream_cnx = transport->createStreamContext(cnx, stream_id); - } - - picoquic_enable_keep_alive(cnx, 3000000); - (void)picoquic_mark_datagram_ready(cnx, 1); - if (transport->_is_server_mode) { - transport->on_new_connection(cnx); + transport->createConnContext(pq_cnx); // !! Important, must call this before getDefaultDataContext() + transport->on_new_connection(conn_id); } - else { // Client transport->setStatus(TransportStatus::Ready); - transport->on_connection_status(stream_cnx, TransportStatus::Ready); + transport->on_connection_status(conn_id, TransportStatus::Ready); } - break; - } + if (data_ctx == NULL) { + data_ctx = &transport->getDefaultDataContext(conn_id); + } + + picoquic_enable_keep_alive(pq_cnx, 3000000); + (void)picoquic_mark_datagram_ready(pq_cnx, 1); - case picoquic_callback_prepare_to_send: { - transport->metrics.stream_prepare_send++; - transport->send_stream_bytes(stream_cnx, bytes, length); break; } @@ -262,8 +271,7 @@ pq_event_cb(picoquic_cnx_t* cnx, return 0; } -int -pq_loop_cb(picoquic_quic_t* quic, picoquic_packet_loop_cb_enum cb_mode, void* callback_ctx, void* callback_arg) +int pq_loop_cb(picoquic_quic_t* quic, picoquic_packet_loop_cb_enum cb_mode, void* callback_ctx, void* callback_arg) { PicoQuicTransport* transport = static_cast(callback_ctx); int ret = 0; @@ -317,51 +325,18 @@ pq_loop_cb(picoquic_quic_t* quic, picoquic_packet_loop_cb_enum cb_mode, void* ca targ->delta_t = 4000; } - if (!transport->prev_time) { - transport->prev_time = targ->current_time; - transport->prev_metrics = transport->metrics; + if (!transport->pq_loop_prev_time) { + transport->pq_loop_prev_time = targ->current_time; } - transport->metrics.time_checks++; - - if (targ->current_time - transport->prev_time > 1000000) { + if (targ->current_time - transport->pq_loop_prev_time > 1000000) { // TODO: Debug only mode for now. Will remove or change based on findings if (transport->debug) { transport->check_conns_for_congestion(); } - if (transport->debug && transport->metrics != transport->prev_metrics) { -/* - LOGGER_DEBUG( - transport->logger, - "Metrics: " - << std::endl - << " time checks : " << transport->metrics.time_checks << std::endl - << " enqueued_objs : " << transport->metrics.enqueued_objs << std::endl - << " send with null ctx : " << transport->metrics.send_null_bytes_ctx << std::endl - << " ----[ Datagrams ] --------------" << std::endl - << " dgram_recv : " << transport->metrics.dgram_received << std::endl - << " dgram_sent : " << transport->metrics.dgram_sent << std::endl - << " dgram_prepare_send : " << transport->metrics.dgram_prepare_send << " (" - << transport->metrics.dgram_prepare_send - prev_metrics.dgram_prepare_send << ")" - << std::endl - << " dgram_lost : " << transport->metrics.dgram_lost << std::endl - << " dgram_ack : " << transport->metrics.dgram_ack << std::endl - << " dgram_spurious : " << transport->metrics.dgram_spurious << " (" - << transport->metrics.dgram_spurious + transport->metrics.dgram_ack << ")" << std::endl - << " ----[ Streams ] --------------" << std::endl - << " stream_prepare_send: " << transport->metrics.stream_prepare_send << std::endl - << " stream_objects_sent: " << transport->metrics.stream_objects_sent << std::endl - << " stream_bytes_sent : " << transport->metrics.stream_bytes_sent << std::endl - << " stream_rx_callbacks: " << transport->metrics.stream_rx_callbacks << std::endl - << " stream_objects_recv: " << transport->metrics.stream_objects_recv << std::endl - << " stream_bytes_recv : " << transport->metrics.stream_bytes_recv << std::endl); -*/ - transport->prev_metrics = transport->metrics; - } - - transport->prev_time = targ->current_time; + transport->pq_loop_prev_time = targ->current_time; } // Stop loop if shutting down @@ -393,97 +368,291 @@ pq_loop_cb(picoquic_quic_t* quic, picoquic_packet_loop_cb_enum cb_mode, void* ca return ret; } -void -PicoQuicTransport::deleteStreamContext(const TransportContextId& context_id, const StreamId& stream_id, bool fin_stream) +/* ============================================================================ + * Public API methods + * ============================================================================ + */ + +TransportStatus PicoQuicTransport::status() const +{ + return transportStatus; +} + +TransportConnId +PicoQuicTransport::start() +{ + uint64_t current_time = picoquic_current_time(); + + if (debug) { + picoquic_logger = std::make_shared("PQIC", logger); + debug_set_callback(&PicoQuicTransport::PicoQuicLogging, this); + } + + (void)picoquic_config_set_option(&config, picoquic_option_CC_ALGO, "bbr"); + (void)picoquic_config_set_option(&config, picoquic_option_ALPN, QUICR_ALPN); + (void)picoquic_config_set_option(&config, picoquic_option_CWIN_MIN, + std::to_string(tconfig.quic_cwin_minimum).c_str()); + (void)picoquic_config_set_option(&config, picoquic_option_MAX_CONNECTIONS, "100"); + + quic_ctx = picoquic_create_and_configure(&config, pq_event_cb, this, current_time, NULL); + + if (quic_ctx == NULL) { + logger->critical << "Unable to create picoquic context, check certificate and key filenames" + << std::flush; + throw PicoQuicException("Unable to create picoquic context"); + } + + /* + * TODO doc: Apparently need to set some value to send datagrams. If not set, + * max datagram size is zero, preventing sending of datagrams. Setting this + * also triggers PMTUD to run. This value will be the initial value. + */ + picoquic_init_transport_parameters(&local_tp_options, 1); + local_tp_options.max_datagram_frame_size = 1280; + // local_tp_options.max_packet_size = 1450; + local_tp_options.idle_timeout = 30000; // TODO: Remove when we add reconnnect change back to 10 seconds + local_tp_options.max_ack_delay = 100000; + local_tp_options.min_ack_delay = 1000; + + picoquic_set_default_tp(quic_ctx, &local_tp_options); + + picoquic_set_default_wifi_shadow_rtt(quic_ctx, tconfig.quic_wifi_shadow_rtt_us); + logger->info << "Setting wifi shadow RTT to " << tconfig.quic_wifi_shadow_rtt_us << "us" << std::flush; + + picoquic_runner_queue.set_limit(2000); + + cbNotifyQueue.set_limit(2000); + cbNotifyThread = std::thread(&PicoQuicTransport::cbNotifier, this); + + TransportConnId cid = 0; + std::ostringstream log_msg; + + if (_is_server_mode) { + + logger->info << "Starting server, listening on " << serverInfo.host_or_ip << ':' << serverInfo.port + << std::flush; + + picoQuicThread = std::thread(&PicoQuicTransport::server, this); + + } else { + logger->info << "Connecting to server " << serverInfo.host_or_ip << ':' << serverInfo.port + << std::flush; + + cid = createClient(); + picoQuicThread = std::thread(&PicoQuicTransport::client, this, cid); + } + + return cid; +} + +bool PicoQuicTransport::getPeerAddrInfo(const TransportConnId& conn_id, + sockaddr_storage* addr) { std::lock_guard lock(_state_mutex); - const auto& active_stream_it = active_streams.find(context_id); - if (active_stream_it == active_streams.end()) - return; + // Locate the specified transport connection context + auto it = conn_context.find(conn_id); - logger->info << "Delete stream context for stream " << stream_id << std::flush; + // If not found, return false + if (it == conn_context.end()) return false; - if (stream_id == 0) { - StreamContext* s_cnx = &active_streams[context_id][stream_id]; + // Copy the address + std::memcpy(addr, &it->second.peer_addr, sizeof(sockaddr_storage)); - on_connection_status(s_cnx, TransportStatus::Disconnected); + return true; +} - // Remove pointer references in picoquic for active streams - for (const auto& [sid, _]: active_streams[context_id]) { - picoquic_add_to_stream(s_cnx->cnx, stream_id, NULL, 0, 1); +TransportError +PicoQuicTransport::enqueue(const TransportConnId& conn_id, + const DataContextId& data_ctx_id, + std::vector&& bytes, + const uint8_t priority, + const uint32_t ttl_ms, + const bool new_stream, + const bool buffer_reset) +{ + if (bytes.empty()) { + std::cerr << "enqueue dropped due bytes empty" << std::endl; + return TransportError::None; + } + + std::lock_guard lock(_state_mutex); + + const auto conn_ctx_it = conn_context.find(conn_id); + if (conn_ctx_it == conn_context.end()) { + return TransportError::InvalidConnContextId; + } + + if (!data_ctx_id) { // Default data context + conn_ctx_it->second.default_data_context.metrics.enqueued_objs++; + + if (buffer_reset) { + conn_ctx_it->second.default_data_context.tx_data->clear(); + conn_ctx_it->second.default_data_context.rx_data->clear(); } - picoquic_close(s_cnx->cnx, 0); + conn_ctx_it->second.default_data_context.tx_data->push(bytes, ttl_ms, priority); - // Remove all streams if closing the root/datagram stream (closed connection) - active_streams.erase(context_id); - conn_context.erase(context_id); + picoquic_runner_queue.push([=]() { + if (conn_ctx_it->second.pq_cnx != NULL) + picoquic_mark_datagram_ready(conn_ctx_it->second.pq_cnx, 1); + }); - return; } + else { + const auto data_ctx_it = conn_ctx_it->second.active_data_contexts.find(data_ctx_id); + if (data_ctx_it == conn_ctx_it->second.active_data_contexts.end()) { + return TransportError::InvalidDataContextId; + } - auto& [ctx_id, stream_contexts] = *active_stream_it; - const auto& stream_iter = stream_contexts.find(stream_id); - if (stream_iter == stream_contexts.end()) - return; - - const auto& [_, ctx] = *stream_iter; - picoquic_runner_queue.push([=, cnx = ctx.cnx]() { - picoquic_mark_active_stream(cnx, stream_id, 0, NULL); - }); + data_ctx_it->second.metrics.enqueued_objs++; + data_ctx_it->second.tx_data->push(bytes, ttl_ms, priority); - if (fin_stream) { - logger->info << "Sending FIN to stream " << stream_id << std::flush; - picoquic_runner_queue.push([=, cnx = ctx.cnx]() { - picoquic_add_to_stream(cnx, stream_id, NULL, 0, 1); + picoquic_runner_queue.push([=]() { + if (conn_ctx_it->second.pq_cnx != NULL) + picoquic_mark_active_stream( + conn_ctx_it->second.pq_cnx, data_ctx_it->second.current_stream_id, 1, &data_ctx_it->second); }); } - (void)stream_contexts.erase(stream_id); + return TransportError::None; +} + +std::optional> +PicoQuicTransport::dequeue(const TransportConnId& conn_id, const DataContextId& data_ctx_id) +{ + std::lock_guard lock(_state_mutex); + + const auto conn_ctx_it = conn_context.find(conn_id); + if (conn_ctx_it == conn_context.end()) { + return std::nullopt; + } + + if (!data_ctx_id) { // Default data context + return std::move(conn_ctx_it->second.default_data_context.rx_data->pop()); + } + + const auto data_ctx_it = conn_ctx_it->second.active_data_contexts.find(data_ctx_id); + if (data_ctx_it == conn_ctx_it->second.active_data_contexts.end()) { + return std::nullopt; + } + + return std::move(data_ctx_it->second.rx_data->pop()); + + return std::nullopt; } -PicoQuicTransport::StreamContext* -PicoQuicTransport::getZeroStreamContext(picoquic_cnx_t* cnx) +DataContextId +PicoQuicTransport::createDataContext(const TransportConnId conn_id, + bool use_reliable_transport, + uint8_t priority, bool bidir) { - if (cnx == NULL) - return NULL; + std::lock_guard lock(_state_mutex); + + if (priority > 127) { + throw std::runtime_error("Create stream priority cannot be greater than 127, range is 0 - 127"); + } + + const auto conn_it = conn_context.find(conn_id); + if (conn_it == conn_context.end()) { + logger->error << "Invalid conn_id: " << conn_id << ", cannot create data context" << std::flush; + return 0; + } + + if (not use_reliable_transport) { + return 0; + } + + const auto [data_ctx_it, is_new] = conn_it->second.active_data_contexts.emplace(conn_it->second.next_data_ctx_id, + DataContext{}); - // This should be safe since it's not removed unless the connection is removed. Zero is reserved for datagram - return &active_streams[reinterpret_cast(cnx)][0]; + if (is_new) { + // Init context + data_ctx_it->second.conn_id = conn_id; + data_ctx_it->second.data_ctx_id = conn_it->second.next_data_ctx_id++; // Set and bump next data_ctx_id + + data_ctx_it->second.priority = priority; + + data_ctx_it->second.rx_data = std::make_unique>(tconfig.time_queue_size_rx); + data_ctx_it->second.tx_data = std::make_unique>(tconfig.time_queue_max_duration, + tconfig.time_queue_bucket_interval, + _tick_service, + tconfig.time_queue_init_queue_size); + + // Create stream + if (use_reliable_transport) { + create_stream(conn_it->second, data_ctx_it->second, bidir); + } + } + + return data_ctx_it->second.data_ctx_id; } -PicoQuicTransport::ConnectionContext PicoQuicTransport::getConnContext(const TransportContextId& context_id) +void +PicoQuicTransport::close(const TransportConnId& conn_id) { std::lock_guard lock(_state_mutex); + const auto conn_it = conn_context.find(conn_id); + + if (conn_it == conn_context.end()) + return; + + // Only one datagram context is per connection, if it's deleted, then the connection is to be terminated + on_connection_status(conn_id, TransportStatus::Disconnected); + + // Remove pointer references in picoquic for active streams + for (const auto& [d_id, d_ctx]: conn_it->second.active_data_contexts) { + picoquic_add_to_stream(conn_it->second.pq_cnx, d_ctx.current_stream_id, NULL, 0, 1); + } + + picoquic_close(conn_it->second.pq_cnx, 0); + + conn_context.erase(conn_it); + + if (not _is_server_mode) { + setStatus(TransportStatus::Shutdown); + } +} + +/* ============================================================================ + * Public internal methods used by picoquic + * ============================================================================ + */ + +PicoQuicTransport::DataContext& +PicoQuicTransport::getDefaultDataContext(TransportConnId conn_id) +{ + return conn_context[conn_id].default_data_context; +} + +PicoQuicTransport::ConnectionContext* PicoQuicTransport::getConnContext(const TransportConnId& conn_id) +{ ConnectionContext conn_ctx { }; // Locate the specified transport connection context - auto it = conn_context.find(context_id); + auto it = conn_context.find(conn_id); - // If not found, return false - if (it == conn_context.end()) return std::move(conn_ctx); - - conn_ctx = it->second; + // If not found, return empty context + if (it == conn_context.end()) return nullptr; - return std::move(conn_ctx); + return &it->second; } -PicoQuicTransport::ConnectionContext& PicoQuicTransport::createConnContext(picoquic_cnx_t *cnx) +PicoQuicTransport::ConnectionContext& PicoQuicTransport::createConnContext(picoquic_cnx_t * pq_cnx) { - auto [conn_it, _] = conn_context.emplace( - reinterpret_cast(cnx), - ConnectionContext{ - .cnx = cnx, .peer_addr_text = { 0 }, .peer_port = 0, .is_congested = false, .total_retransmits = 0 }); + + auto [conn_it, is_new] = conn_context.emplace(reinterpret_cast(pq_cnx), pq_cnx); sockaddr* addr; auto &conn_ctx = conn_it->second; + conn_ctx.conn_id = reinterpret_cast(pq_cnx); + conn_ctx.pq_cnx = pq_cnx; - picoquic_get_peer_addr(cnx, &addr); + picoquic_get_peer_addr(pq_cnx, &addr); std::memset(conn_ctx.peer_addr_text, 0, sizeof(conn_ctx.peer_addr_text)); std::memcpy(&conn_ctx.peer_addr, addr, sizeof(conn_ctx.peer_addr)); @@ -507,40 +676,21 @@ PicoQuicTransport::ConnectionContext& PicoQuicTransport::createConnContext(picoq break; } - return conn_ctx; -} - -PicoQuicTransport::StreamContext* -PicoQuicTransport::createStreamContext(picoquic_cnx_t* cnx, uint64_t stream_id) -{ - if (cnx == NULL) - return NULL; - - if (stream_id == 0) { - createConnContext(cnx); - } - - std::lock_guard lock(_state_mutex); - - StreamContext* stream_cnx = &active_streams[reinterpret_cast(cnx)][stream_id]; - stream_cnx->stream_id = stream_id; - stream_cnx->context_id = reinterpret_cast(cnx); - stream_cnx->cnx = cnx; + if (is_new) { + logger->info << "Created new connection context for conn_id: " << conn_ctx.conn_id << std::flush; - stream_cnx->rx_data = std::make_unique>(tconfig.time_queue_size_rx); + // Setup default data context + conn_ctx.default_data_context.is_default_context = true; + conn_ctx.default_data_context.conn_id = conn_ctx.conn_id; + conn_ctx.default_data_context.priority = 1; + conn_ctx.default_data_context.rx_data = std::make_unique>(tconfig.time_queue_size_rx); - stream_cnx->tx_data = std::make_unique>( - tconfig.time_queue_max_duration, tconfig.time_queue_bucket_interval, _tick_service, - tconfig.time_queue_init_queue_size); - - - if (stream_id) { - picoquic_runner_queue.push([=, this]() { - picoquic_mark_active_stream(cnx, stream_id, 1, stream_cnx); - }); + conn_ctx.default_data_context.tx_data = std::make_unique>( + tconfig.time_queue_max_duration, tconfig.time_queue_bucket_interval, _tick_service, + tconfig.time_queue_init_queue_size); } - return stream_cnx; + return conn_ctx; } PicoQuicTransport::PicoQuicTransport(const TransportRemote& server, @@ -555,7 +705,6 @@ PicoQuicTransport::PicoQuicTransport(const TransportRemote& server, , serverInfo(server) , delegate(delegate) , tconfig(tcfg) - , next_stream_id{ ::make_datagram_stream_id(_is_server_mode, _is_unidirectional) } { debug = tcfg.debug; @@ -583,365 +732,136 @@ PicoQuicTransport::~PicoQuicTransport() } void -PicoQuicTransport::shutdown() +PicoQuicTransport::setStatus(TransportStatus status) { - if (stop) // Already stopped - return; + transportStatus = status; +} - stop = true; +PicoQuicTransport::DataContext* PicoQuicTransport::createDataContextBiDirRecv(TransportConnId conn_id, uint64_t stream_id) +{ + std::lock_guard lock(_state_mutex); - if (picoQuicThread.joinable()) { - logger->Log("Closing transport pico thread"); - picoQuicThread.join(); + const auto conn_it = conn_context.find(conn_id); + if (conn_it == conn_context.end()) { + logger->error << "Invalid conn_id: " << conn_id << ", cannot create data context" << std::flush; + return nullptr; } - picoquic_runner_queue.stop_waiting(); - cbNotifyQueue.stop_waiting(); + const auto [data_ctx_it, is_new] = conn_it->second.active_data_contexts.emplace(conn_it->second.next_data_ctx_id, + DataContext{}); - if (cbNotifyThread.joinable()) { - logger->Log("Closing transport callback notifier thread"); - cbNotifyThread.join(); - } + if (is_new) { + // Init context + data_ctx_it->second.conn_id = conn_id; + data_ctx_it->second.is_bidir = true; + data_ctx_it->second.data_ctx_id = conn_it->second.next_data_ctx_id++; // Set and bump next data_ctx_id - _tick_service.reset(); - logger->Log("done closing transport threads"); + data_ctx_it->second.priority = 10; // TODO: Need to get priority from remote - picoquic_config_clear(&config); + data_ctx_it->second.rx_data = std::make_unique>(tconfig.time_queue_size_rx); + data_ctx_it->second.tx_data = std::make_unique>(tconfig.time_queue_max_duration, + tconfig.time_queue_bucket_interval, + _tick_service, + tconfig.time_queue_init_queue_size); - // If logging picoquic events, stop those - if (picoquic_logger) { - debug_set_callback(NULL, NULL); - picoquic_logger.reset(); + data_ctx_it->second.current_stream_id = stream_id; + + cbNotifyQueue.push([=, this]() { delegate.on_new_data_context(conn_id, data_ctx_it->second.data_ctx_id); }); + + logger->info << "Created new bidir data context conn_id: " << conn_id + << " data_ctx_id: " << data_ctx_it->second.data_ctx_id + << " stream_id: " << stream_id + << std::flush; + + return &data_ctx_it->second; } -} -TransportStatus -PicoQuicTransport::status() const -{ - return transportStatus; + return nullptr; } -void -PicoQuicTransport::setStatus(TransportStatus status) -{ - transportStatus = status; -} +void PicoQuicTransport::pq_runner() { -StreamId -PicoQuicTransport::createStream(const TransportContextId& context_id, - bool use_reliable_transport, - uint8_t priority) -{ - if (priority > 127) { - throw std::runtime_error("Create stream priority cannot be greater than 127, range is 0 - 127"); + if (picoquic_runner_queue.empty()) { + return; } - const auto& iter = active_streams.find(context_id); - if (iter == active_streams.end()) { - logger->info << "Invalid context id, cannot create stream. context_id = " << context_id << std::flush; - return 0; + // note: check before running move of optional, which is more CPU taxing when empty + while (auto cb = picoquic_runner_queue.pop()) { + (*cb)(); } +} - const auto datagram_stream_id = ::make_datagram_stream_id(_is_server_mode, _is_unidirectional); - const auto& cnx_stream_iter = iter->second.find(datagram_stream_id); - if (cnx_stream_iter == iter->second.end()) { - createStreamContext(cnx_stream_iter->second.cnx, datagram_stream_id); - } +void PicoQuicTransport::deleteDataContext(const TransportConnId& conn_id, DataContextId data_ctx_id) +{ + std::lock_guard lock(_state_mutex); - if (!use_reliable_transport) - return datagram_stream_id; + const auto conn_it = conn_context.find(conn_id); - next_stream_id = ::make_stream_id(next_stream_id + 4, _is_server_mode, _is_unidirectional); + if (conn_it == conn_context.end()) + return; - PicoQuicTransport::StreamContext* stream_cnx = createStreamContext(cnx_stream_iter->second.cnx, next_stream_id); - stream_cnx->priority = priority; + const auto data_ctx_it = conn_it->second.active_data_contexts.find(data_ctx_id); - /* - * Low order bit set indicates FIFO handling of same priorities, unset is round-robin - */ - picoquic_runner_queue.push([=, this]() { - picoquic_set_app_stream_ctx(cnx_stream_iter->second.cnx, next_stream_id, stream_cnx); - picoquic_set_stream_priority(cnx_stream_iter->second.cnx, next_stream_id, (priority << 1)); - }); + if (data_ctx_it == conn_it->second.active_data_contexts.end()) + return; - cbNotifyQueue.push([&] { delegate.on_new_stream(context_id, next_stream_id); }); - - return stream_cnx->stream_id; -} - -TransportContextId -PicoQuicTransport::start() -{ - uint64_t current_time = picoquic_current_time(); - - if (debug) { - picoquic_logger = std::make_shared("PQIC", logger); - debug_set_callback(&PicoQuicTransport::PicoQuicLogging, this); - } - - (void)picoquic_config_set_option(&config, picoquic_option_CC_ALGO, "bbr"); - (void)picoquic_config_set_option(&config, picoquic_option_ALPN, QUICR_ALPN); - (void)picoquic_config_set_option(&config, picoquic_option_CWIN_MIN, - std::to_string(tconfig.quic_cwin_minimum).c_str()); - (void)picoquic_config_set_option(&config, picoquic_option_MAX_CONNECTIONS, "100"); - - quic_ctx = picoquic_create_and_configure(&config, pq_event_cb, this, current_time, NULL); - - if (quic_ctx == NULL) { - logger->critical << "Unable to create picoquic context, check certificate and key filenames" - << std::flush; - throw PicoQuicException("Unable to create picoquic context"); - } - - /* - * TODO doc: Apparently need to set some value to send datagrams. If not set, - * max datagram size is zero, preventing sending of datagrams. Setting this - * also triggers PMTUD to run. This value will be the initial value. - */ - picoquic_init_transport_parameters(&local_tp_options, 1); - local_tp_options.max_datagram_frame_size = 1280; - // local_tp_options.max_packet_size = 1450; - local_tp_options.idle_timeout = 30000; // TODO: Remove when we add reconnnect change back to 10 seconds - local_tp_options.max_ack_delay = 100000; - local_tp_options.min_ack_delay = 1000; - - picoquic_set_default_tp(quic_ctx, &local_tp_options); - - picoquic_set_default_wifi_shadow_rtt(quic_ctx, tconfig.quic_wifi_shadow_rtt_us); - logger->info << "Setting wifi shadow RTT to " << tconfig.quic_wifi_shadow_rtt_us << "us" << std::flush; - - picoquic_runner_queue.set_limit(2000); - - cbNotifyQueue.set_limit(2000); - cbNotifyThread = std::thread(&PicoQuicTransport::cbNotifier, this); - - TransportContextId cid = 0; - std::ostringstream log_msg; - - if (_is_server_mode) { - - logger->info << "Starting server, listening on " << serverInfo.host_or_ip << ':' << serverInfo.port - << std::flush; - - picoQuicThread = std::thread(&PicoQuicTransport::server, this); - - } else { - logger->info << "Connecting to server " << serverInfo.host_or_ip << ':' << serverInfo.port - << std::flush; - - cid = createClient(); - picoQuicThread = std::thread(&PicoQuicTransport::client, this, cid); - } - - return cid; -} - -void PicoQuicTransport::pq_runner() { + logger->info << "Delete data context " << data_ctx_id + << " in conn_id: " << conn_id + << std::flush; - if (picoquic_runner_queue.empty()) { - return; - } + if (data_ctx_it->second.is_default_context) { + logger->info << "Delete default context for conn_id: " << conn_id << ", closing connection" << std::flush; - // note: check before running move of optional, which is more CPU taxing when empty - while (auto cb = picoquic_runner_queue.pop()) { - (*cb)(); - } -} + // Only one datagram context is per connection, if it's deleted, then the connection is to be terminated + on_connection_status(conn_id, TransportStatus::Disconnected); -void -PicoQuicTransport::cbNotifier() -{ - logger->Log("Starting transport callback notifier thread"); - - while (not stop) { - auto cb = std::move(cbNotifyQueue.block_pop()); - if (cb) { - (*cb)(); - } else { - logger->Log("Notify callback is NULL"); + // Remove pointer references in picoquic for active streams + for (const auto& [d_id, d_ctx]: conn_it->second.active_data_contexts) { + picoquic_add_to_stream(conn_it->second.pq_cnx, d_ctx.current_stream_id, NULL, 0, 1); } - } - - logger->Log("Done with transport callback notifier thread"); -} - -void -PicoQuicTransport::server() -{ - int ret = picoquic_packet_loop(quic_ctx, serverInfo.port, PF_UNSPEC, 0, 2000000, 0, pq_loop_cb, this); - if (quic_ctx != NULL) { - picoquic_free(quic_ctx); - quic_ctx = NULL; - } + picoquic_close(conn_it->second.pq_cnx, 0); - logger->info << "picoquic packet loop ended with " << ret << std::flush; - - setStatus(TransportStatus::Shutdown); -} + conn_context.erase(conn_it); -TransportContextId -PicoQuicTransport::createClient() -{ - struct sockaddr_storage server_address; - char const* sni = "cisco.webex.com"; - int ret; - - int is_name = 0; - - ret = picoquic_get_server_address(serverInfo.host_or_ip.c_str(), serverInfo.port, &server_address, &is_name); - if (ret != 0) { - logger->error << "Failed to get server: " << serverInfo.host_or_ip << " port: " << serverInfo.port - << std::flush; - } else if (is_name) { - sni = serverInfo.host_or_ip.c_str(); - } - - picoquic_set_default_congestion_algorithm(quic_ctx, picoquic_bbr_algorithm); - - uint64_t current_time = picoquic_current_time(); - - picoquic_cnx_t* cnx = picoquic_create_cnx(quic_ctx, - picoquic_null_connection_id, - picoquic_null_connection_id, - reinterpret_cast(&server_address), - current_time, - 0, - sni, - config.alpn, - 1); - - if (cnx == NULL) { - logger->Log(cantina::LogLevel::Error, "Could not create picoquic connection client context"); - return 0; - } - - // Using default TP - picoquic_set_transport_parameters(cnx, &local_tp_options); - - picoquic_subscribe_pacing_rate_updates(cnx, tconfig.pacing_decrease_threshold_Bps, - tconfig.pacing_increase_threshold_Bps); - - (void)createStreamContext(cnx, 0); - - return reinterpret_cast(cnx); -} - -void -PicoQuicTransport::client(const TransportContextId tcid) -{ - int ret; - - picoquic_cnx_t* cnx = active_streams[tcid][0].cnx; - - logger->info << "Thread client packet loop for client context_id: " << tcid - << std::flush; - - if (cnx == NULL) { - logger->Log(cantina::LogLevel::Error, "Could not create picoquic connection client context"); - } else { - picoquic_set_callback(cnx, pq_event_cb, this); - - picoquic_enable_keep_alive(cnx, 3000000); - - ret = picoquic_start_client_cnx(cnx); - if (ret < 0) { - logger->Log(cantina::LogLevel::Error, "Could not activate connection"); - return; + if (not _is_server_mode) { + setStatus(TransportStatus::Shutdown); } - ret = picoquic_packet_loop(quic_ctx, 0, PF_UNSPEC, 0, 2000000, 0, pq_loop_cb, this); - - logger->info << "picoquic ended with " << ret << std::flush; + return; } - if (quic_ctx != NULL) { - picoquic_free(quic_ctx); - quic_ctx = NULL; - } + close_stream(conn_it->second, data_ctx_it->second, false, true); - setStatus(TransportStatus::Disconnected); + conn_it->second.active_data_contexts.erase(data_ctx_it); } void -PicoQuicTransport::closeStream(const TransportContextId& context_id, const StreamId stream_id) -{ - deleteStreamContext(context_id, stream_id, true); -} - -bool PicoQuicTransport::getPeerAddrInfo(const TransportContextId& context_id, - sockaddr_storage* addr) -{ - std::lock_guard lock(_state_mutex); - - // Locate the specified transport connection context - auto it = conn_context.find(context_id); - - // If not found, return false - if (it == conn_context.end()) return false; - - // Copy the address - std::memcpy(addr, &it->second.peer_addr, sizeof(sockaddr_storage)); - - return true; -} - -void -PicoQuicTransport::close([[maybe_unused]] const TransportContextId& context_id) -{ -} - -void -PicoQuicTransport::check_callback_delta(StreamContext* stream_cnx, bool tx) { - auto now_time = std::chrono::steady_clock::now(); - - if (!tx) return; - - const auto delta_ms = std::chrono::duration_cast( - now_time - stream_cnx->last_tx_callback_time).count(); - - stream_cnx->last_tx_callback_time = std::move(now_time); - - if (delta_ms > 50 && stream_cnx->tx_data->size() > 10) { - stream_cnx->metrics.tx_delayed_callback++; - - logger->debug << "context_id: " << reinterpret_cast(stream_cnx->cnx) - << " stream_id: " << stream_cnx->stream_id - << " pri: " << static_cast(stream_cnx->priority) - << " CB TX delta " << delta_ms << " ms" - << " count: " << stream_cnx->metrics.tx_delayed_callback - << " tx_queue_size: " - << stream_cnx->tx_data->size() << std::flush; - } -} - -void -PicoQuicTransport::send_next_datagram(StreamContext* stream_cnx, uint8_t* bytes_ctx, size_t max_len) +PicoQuicTransport::send_next_datagram(DataContext* data_ctx, uint8_t* bytes_ctx, size_t max_len) { if (bytes_ctx == NULL) { - metrics.send_null_bytes_ctx++; return; } - if (stream_cnx->stream_id != 0) { + if (data_ctx->current_stream_id != 0) { // Only for datagrams return; } - check_callback_delta(stream_cnx); + check_callback_delta(data_ctx); - const auto& out_data = stream_cnx->tx_data->front(); + const auto& out_data = data_ctx->tx_data->front(); if (out_data.has_value()) { if (max_len >= out_data->size()) { - stream_cnx->tx_data->pop(); + data_ctx->tx_data->pop(); - metrics.dgram_sent++; + data_ctx->metrics.dgram_sent++; uint8_t* buf = NULL; buf = picoquic_provide_datagram_buffer_ex(bytes_ctx, out_data->size(), - stream_cnx->tx_data->empty() ? picoquic_datagram_not_active : picoquic_datagram_active_any_path); + data_ctx->tx_data->empty() ? picoquic_datagram_not_active : picoquic_datagram_active_any_path); if (buf != NULL) { std::memcpy(buf, out_data->data(), out_data->size()); @@ -957,49 +877,48 @@ PicoQuicTransport::send_next_datagram(StreamContext* stream_cnx, uint8_t* bytes_ } void -PicoQuicTransport::send_stream_bytes(StreamContext* stream_cnx, uint8_t* bytes_ctx, size_t max_len) +PicoQuicTransport::send_stream_bytes(DataContext* data_ctx, uint8_t* bytes_ctx, size_t max_len) { if (bytes_ctx == NULL) { - metrics.send_null_bytes_ctx++; return; } - if (stream_cnx->stream_id == 0) { + if (data_ctx->current_stream_id == 0) { return; // Only for steams } - check_callback_delta(stream_cnx); + check_callback_delta(data_ctx); uint32_t data_len = 0; /// Length of data to follow the 4 byte length size_t offset = 0; int is_still_active = 0; - if (stream_cnx->stream_tx_object == nullptr) { + if (data_ctx->stream_tx_object == nullptr) { if (max_len < 5) { // Not enough bytes to send logger->debug << "Not enough bytes to send stream size header, waiting for next callback. sid: " - << stream_cnx->stream_id << std::flush; + << data_ctx->current_stream_id << std::flush; return; } - auto obj = stream_cnx->tx_data->pop_front(); + auto obj = data_ctx->tx_data->pop_front(); if (obj.has_value()) { - metrics.stream_objects_sent++; + data_ctx->metrics.stream_objects_sent++; max_len -= 4; // Subtract out the length header that will be added - stream_cnx->stream_tx_object = new uint8_t[obj->size()]; - stream_cnx->stream_tx_object_size = obj->size(); - std::memcpy(stream_cnx->stream_tx_object, obj->data(), obj->size()); + data_ctx->stream_tx_object = new uint8_t[obj->size()]; + data_ctx->stream_tx_object_size = obj->size(); + std::memcpy(data_ctx->stream_tx_object, obj->data(), obj->size()); if (obj->size() > max_len) { - stream_cnx->stream_tx_object_offset = max_len; + data_ctx->stream_tx_object_offset = max_len; data_len = max_len; is_still_active = 1; } else { data_len = obj->size(); - stream_cnx->stream_tx_object_offset = 0; + data_ctx->stream_tx_object_offset = 0; } } else { @@ -1008,29 +927,29 @@ PicoQuicTransport::send_stream_bytes(StreamContext* stream_cnx, uint8_t* bytes_c } } else { // Have existing object with remaining bytes to send. - data_len = stream_cnx->stream_tx_object_size - stream_cnx->stream_tx_object_offset; - offset = stream_cnx->stream_tx_object_offset; + data_len = data_ctx->stream_tx_object_size - data_ctx->stream_tx_object_offset; + offset = data_ctx->stream_tx_object_offset; if (data_len > max_len) { - stream_cnx->stream_tx_object_offset += max_len; + data_ctx->stream_tx_object_offset += max_len; data_len = max_len; is_still_active = 1; } else { - stream_cnx->stream_tx_object_offset = 0; + data_ctx->stream_tx_object_offset = 0; } } - metrics.stream_bytes_sent += data_len; + data_ctx->metrics.stream_bytes_sent += data_len; - if (!is_still_active && !stream_cnx->tx_data->empty()) + if (!is_still_active && !data_ctx->tx_data->empty()) is_still_active = 1; uint8_t *buf = nullptr; if (offset == 0) { // New object being written, add length (4 bytes), little endian for now - uint32_t obj_length = stream_cnx->stream_tx_object_size; + uint32_t obj_length = data_ctx->stream_tx_object_size; buf = picoquic_provide_stream_data_buffer(bytes_ctx, data_len + 4, @@ -1056,167 +975,100 @@ PicoQuicTransport::send_stream_bytes(StreamContext* stream_cnx, uint8_t* bytes_c } // Write data - std::memcpy(buf, stream_cnx->stream_tx_object + offset, data_len); + std::memcpy(buf, data_ctx->stream_tx_object + offset, data_len); - if (stream_cnx->stream_tx_object_offset == 0 && stream_cnx->stream_tx_object != nullptr) { + if (data_ctx->stream_tx_object_offset == 0 && data_ctx->stream_tx_object != nullptr) { // Zero offset at this point means the object was fully sent - stream_cnx->reset_tx_object(); + data_ctx->reset_tx_object(); } } -TransportError -PicoQuicTransport::enqueue(const TransportContextId& context_id, - const StreamId& stream_id, - std::vector&& bytes, - const uint8_t priority, - const uint32_t ttl_ms) -{ - if (bytes.empty()) { - std::cerr << "enqueue dropped due bytes empty" << std::endl; - return TransportError::None; - } - - std::lock_guard lock(_state_mutex); - - const auto& ctx = active_streams.find(context_id); - - if (ctx != active_streams.end()) { - const auto& stream_cnx = ctx->second.find(stream_id); - - if (stream_cnx != ctx->second.end()) { - metrics.enqueued_objs++; - stream_cnx->second.tx_data->push(bytes, ttl_ms, priority); - - if (!stream_id) { - picoquic_runner_queue.push([=]() { - if (stream_cnx->second.cnx != NULL) - picoquic_mark_datagram_ready(stream_cnx->second.cnx, 1); - }); - - } else { - picoquic_runner_queue.push([=]() { - if (stream_cnx->second.cnx != NULL) - picoquic_mark_active_stream( - stream_cnx->second.cnx, stream_id, 1, static_cast(&stream_cnx->second)); - }); - } - } else { - std::cerr << "enqueue dropped due to invalid stream: " << stream_id << std::endl; - return TransportError::InvalidStreamId; - } - } else { - //std::cerr << "enqueue dropped due to invalid contextId: " << context_id << std::endl; - //TODO: Add metrics to report invalid context - return TransportError::InvalidContextId; - } - return TransportError::None; -} - -std::optional> -PicoQuicTransport::dequeue(const TransportContextId& context_id, const StreamId& stream_id) -{ - std::lock_guard lock(_state_mutex); - - auto cnx_it = active_streams.find(context_id); - - if (cnx_it != active_streams.end()) { - auto stream_it = cnx_it->second.find(stream_id); - if (stream_it != cnx_it->second.end()) { - return std::move(stream_it->second.rx_data->pop()); - } - } - - return std::nullopt; -} - void -PicoQuicTransport::on_connection_status(PicoQuicTransport::StreamContext* stream_cnx, const TransportStatus status) +PicoQuicTransport::on_connection_status(const TransportConnId conn_id, const TransportStatus status) { - TransportContextId context_id{ stream_cnx->context_id }; + std::lock_guard _(_state_mutex); if (status == TransportStatus::Ready) { - auto conn_ctx = getConnContext(stream_cnx->context_id); + auto conn_ctx = getConnContext(conn_id); logger->info << "Connection established to server " - << conn_ctx.peer_addr_text + << conn_ctx->peer_addr_text << std::flush; } - cbNotifyQueue.push([=, this]() { delegate.on_connection_status(context_id, status); }); + cbNotifyQueue.push([=, this]() { delegate.on_connection_status(conn_id, status); }); } void -PicoQuicTransport::on_new_connection(picoquic_cnx_t *cnx) +PicoQuicTransport::on_new_connection(const TransportConnId conn_id) { - const auto context_id = reinterpret_cast(cnx); + std::lock_guard _(_state_mutex); - auto conn_ctx = getConnContext(context_id); - logger->info << "New Connection " << conn_ctx.peer_addr_text << " port: " << conn_ctx.peer_port - << " context_id: " << context_id + auto conn_ctx = getConnContext(conn_id); + logger->info << "New Connection " << conn_ctx->peer_addr_text << " port: " << conn_ctx->peer_port + << " conn_id: " << conn_id << std::flush; - picoquic_subscribe_pacing_rate_updates(cnx, tconfig.pacing_decrease_threshold_Bps, + picoquic_subscribe_pacing_rate_updates(conn_ctx->pq_cnx, tconfig.pacing_decrease_threshold_Bps, tconfig.pacing_increase_threshold_Bps); - TransportRemote remote{ .host_or_ip = conn_ctx.peer_addr_text, - .port = conn_ctx.peer_port, + TransportRemote remote{ .host_or_ip = conn_ctx->peer_addr_text, + .port = conn_ctx->peer_port, .proto = TransportProtocol::QUIC }; - cbNotifyQueue.push([=, this]() { delegate.on_new_connection(context_id, remote); }); + cbNotifyQueue.push([=, this]() { delegate.on_new_connection(conn_id, remote); }); } void -PicoQuicTransport::on_recv_datagram(StreamContext* stream_cnx, uint8_t* bytes, size_t length) +PicoQuicTransport::on_recv_datagram(DataContext* data_ctx, uint8_t* bytes, size_t length) { - if (stream_cnx == NULL || length == 0) { + if (data_ctx == NULL || length == 0) { logger->Log(cantina::LogLevel::Debug, "On receive datagram has null context"); return; } std::vector data(bytes, bytes + length); - stream_cnx->rx_data->push(std::move(data)); + data_ctx->rx_data->push(std::move(data)); if (cbNotifyQueue.size() > 200) { logger->info << "on_recv_datagram cbNotifyQueue size" << cbNotifyQueue.size() << std::flush; } - if (stream_cnx->rx_data->size() < 2 || stream_cnx->in_data_cb_skip_count > 30) { - stream_cnx->in_data_cb_skip_count = 0; - TransportContextId context_id = stream_cnx->context_id; - StreamId stream_id = stream_cnx->stream_id; + if (data_ctx->rx_data->size() < 2 || data_ctx->in_data_cb_skip_count > 30) { + data_ctx->in_data_cb_skip_count = 0; - cbNotifyQueue.push([=, this]() { delegate.on_recv_notify(context_id, stream_id); }); + cbNotifyQueue.push([=, this]() { delegate.on_recv_notify(data_ctx->conn_id, data_ctx->current_stream_id); }); } else { - stream_cnx->in_data_cb_skip_count++; + data_ctx->in_data_cb_skip_count++; } } -void PicoQuicTransport::on_recv_stream_bytes(StreamContext* stream_cnx, uint8_t* bytes, size_t length) +void PicoQuicTransport::on_recv_stream_bytes(DataContext* data_ctx, uint8_t* bytes, size_t length) { uint8_t *bytes_p = bytes; - if (stream_cnx == NULL || length == 0) { + if (data_ctx == NULL || length == 0) { logger->Log(cantina::LogLevel::Debug, "on_recv_stream_bytes has null context"); return; } bool object_complete = false; - if (stream_cnx->stream_rx_object == nullptr) { + if (data_ctx->stream_rx_object == nullptr) { - if (stream_cnx->stream_rx_object_hdr_size < 4) { + if (data_ctx->stream_rx_object_hdr_size < 4) { - uint16_t len_to_copy = length >= 4 ? 4 - stream_cnx->stream_rx_object_hdr_size : 4; + uint16_t len_to_copy = length >= 4 ? 4 - data_ctx->stream_rx_object_hdr_size : 4; - std::memcpy(&stream_cnx->stream_rx_object_size + stream_cnx->stream_rx_object_hdr_size, + std::memcpy(&data_ctx->stream_rx_object_size + data_ctx->stream_rx_object_hdr_size, bytes_p, len_to_copy); - stream_cnx->stream_rx_object_hdr_size += len_to_copy; + data_ctx->stream_rx_object_hdr_size += len_to_copy; - if (stream_cnx->stream_rx_object_hdr_size < 4) { - logger->debug << "Stream header not complete. hdr " << stream_cnx->stream_rx_object_hdr_size + if (data_ctx->stream_rx_object_hdr_size < 4) { + logger->debug << "Stream header not complete. hdr " << data_ctx->stream_rx_object_hdr_size << " len_to_copy: " << len_to_copy << " length: " << length << std::flush; @@ -1226,49 +1078,49 @@ void PicoQuicTransport::on_recv_stream_bytes(StreamContext* stream_cnx, uint8_t* bytes_p += len_to_copy; - if (length == 0 || stream_cnx->stream_rx_object_hdr_size < 4) { + if (length == 0 || data_ctx->stream_rx_object_hdr_size < 4) { // Either no data left to read or not enough data for the header (length) value return; } } - if (stream_cnx->stream_rx_object_size > 40000000L) { // Safety check - logger->warning << "on_recv_stream_bytes sid: " << stream_cnx->stream_id + if (data_ctx->stream_rx_object_size > 40000000L) { // Safety check + logger->warning << "on_recv_stream_bytes sid: " << data_ctx->current_stream_id << " data length is too large: " - << std::to_string(stream_cnx->stream_rx_object_size) + << std::to_string(data_ctx->stream_rx_object_size) << std::flush; - stream_cnx->reset_rx_object(); + data_ctx->reset_rx_object(); // TODO: Should reset stream in this case return; } - if (stream_cnx->stream_rx_object_size <= length) { + if (data_ctx->stream_rx_object_size <= length) { object_complete = true; - std::vector data(bytes_p, bytes_p + stream_cnx->stream_rx_object_size); - stream_cnx->rx_data->push(std::move(data)); + std::vector data(bytes_p, bytes_p + data_ctx->stream_rx_object_size); + data_ctx->rx_data->push(std::move(data)); - bytes_p += stream_cnx->stream_rx_object_size; - length -= stream_cnx->stream_rx_object_size; + bytes_p += data_ctx->stream_rx_object_size; + length -= data_ctx->stream_rx_object_size; - metrics.stream_bytes_recv += stream_cnx->stream_rx_object_size; + data_ctx->metrics.stream_bytes_recv += data_ctx->stream_rx_object_size; - stream_cnx->reset_rx_object(); + data_ctx->reset_rx_object(); length = 0; // no more data left to process } else { // Need to wait for more data, create new object buffer - stream_cnx->stream_rx_object = new uint8_t[stream_cnx->stream_rx_object_size]; + data_ctx->stream_rx_object = new uint8_t[data_ctx->stream_rx_object_size]; - stream_cnx->stream_rx_object_offset = length; - std::memcpy(stream_cnx->stream_rx_object, bytes_p, length); - metrics.stream_bytes_recv += length; + data_ctx->stream_rx_object_offset = length; + std::memcpy(data_ctx->stream_rx_object, bytes_p, length); + data_ctx->metrics.stream_bytes_recv += length; length = 0; // no more data left to process } } else { // Existing object, append - size_t remaining_len = stream_cnx->stream_rx_object_size - stream_cnx->stream_rx_object_offset; + size_t remaining_len = data_ctx->stream_rx_object_size - data_ctx->stream_rx_object_offset; if (remaining_len > length) { remaining_len = length; @@ -1279,46 +1131,44 @@ void PicoQuicTransport::on_recv_stream_bytes(StreamContext* stream_cnx, uint8_t* length -= remaining_len; } - metrics.stream_bytes_recv += remaining_len; + data_ctx->metrics.stream_bytes_recv += remaining_len; - std::memcpy(stream_cnx->stream_rx_object + stream_cnx->stream_rx_object_offset, bytes_p, remaining_len); + std::memcpy(data_ctx->stream_rx_object + data_ctx->stream_rx_object_offset, bytes_p, remaining_len); bytes_p += remaining_len; if (object_complete) { - std::vector data(stream_cnx->stream_rx_object, stream_cnx->stream_rx_object + stream_cnx->stream_rx_object_size); - stream_cnx->rx_data->push(std::move(data)); + std::vector data(data_ctx->stream_rx_object, + data_ctx->stream_rx_object + data_ctx->stream_rx_object_size); + data_ctx->rx_data->push(std::move(data)); - stream_cnx->reset_rx_object(); + data_ctx->reset_rx_object(); } else { - stream_cnx->stream_rx_object_offset += remaining_len; + data_ctx->stream_rx_object_offset += remaining_len; } } if (object_complete) { - metrics.stream_objects_recv++; + data_ctx->metrics.stream_objects_recv++; bool too_many_in_queue = false; if (cbNotifyQueue.size() > 200) { - logger->warning << "on_recv_stream_bytes sid: " << stream_cnx->stream_id - << "cbNotifyQueue size" << cbNotifyQueue.size() + logger->warning << "on_recv_stream_bytes sid: " << data_ctx->current_stream_id << "cbNotifyQueue size" << cbNotifyQueue.size() << std::flush; } - if (too_many_in_queue || stream_cnx->rx_data->size() < 4 || stream_cnx->in_data_cb_skip_count > 30) { - stream_cnx->in_data_cb_skip_count = 0; - TransportContextId context_id = stream_cnx->context_id; - StreamId stream_id = stream_cnx->stream_id; + if (too_many_in_queue || data_ctx->rx_data->size() < 4 || data_ctx->in_data_cb_skip_count > 30) { + data_ctx->in_data_cb_skip_count = 0; - cbNotifyQueue.push([=, this]() { delegate.on_recv_notify(context_id, stream_id); }); + cbNotifyQueue.push([=, this]() { delegate.on_recv_notify(data_ctx->conn_id, data_ctx->data_ctx_id); }); } else { - stream_cnx->in_data_cb_skip_count++; + data_ctx->in_data_cb_skip_count++; } } if (length > 0) { logger->debug << "on_stream_bytes has remaining bytes: " << length << std::flush; - on_recv_stream_bytes(stream_cnx, bytes_p, length); + on_recv_stream_bytes(data_ctx, bytes_p, length); } } @@ -1333,41 +1183,277 @@ void PicoQuicTransport::check_conns_for_congestion() * Check each queue size to determine if there is possible congestion */ - for (auto& [context_id, streams] : active_streams) { + for (auto& [conn_id, conn_ctx] : conn_context) { int congested_count { 0 }; - for (auto& [stream_id, stream_cnx] : streams) { - if (stream_cnx.metrics.tx_delayed_callback - stream_cnx.metrics.prev_tx_delayed_callback > 1) { + // Default context first + if (conn_ctx.default_data_context.metrics.tx_delayed_callback - conn_ctx.default_data_context.metrics.prev_tx_delayed_callback > 1) { + congested_count++; + } + + if (conn_ctx.default_data_context.metrics.tx_delayed_callback) { + conn_ctx.default_data_context.metrics.prev_tx_delayed_callback = conn_ctx.default_data_context.metrics.tx_delayed_callback; + } + + // All other data flows (streams) + for (auto& [data_ctx_id, data_ctx] : conn_ctx.active_data_contexts) { + if (data_ctx.metrics.tx_delayed_callback - data_ctx.metrics.prev_tx_delayed_callback > 1) { congested_count++; } - if (stream_cnx.metrics.tx_delayed_callback) { - stream_cnx.metrics.prev_tx_delayed_callback = stream_cnx.metrics.tx_delayed_callback; + if (data_ctx.metrics.tx_delayed_callback) { + data_ctx.metrics.prev_tx_delayed_callback = data_ctx.metrics.tx_delayed_callback; } } - auto& conn_ctx = conn_context[context_id]; if (congested_count && not conn_ctx.is_congested) { conn_ctx.is_congested = true; - logger->debug << "context_id: " << context_id << " has " << congested_count << " streams congested." << std::flush; + logger->info << "conn_id: " << conn_id << " has " << congested_count << " streams congested." << std::flush; } else if (conn_ctx.is_congested) { // No longer congested conn_ctx.is_congested = false; - logger->debug << "context_id: " << context_id << " c_count: " << congested_count << " is no longer congested." << std::flush; + logger->info << "conn_id: " << conn_id << " c_count: " << congested_count << " is no longer congested." << std::flush; + } + + if (conn_ctx.metrics.total_retransmits < conn_ctx.pq_cnx->nb_retransmission_total) { + logger->info << "remote: " << conn_ctx.peer_addr_text << " port: " << conn_ctx.peer_port + << " conn_id: " << conn_id << " retransmits increased, delta: " + << (conn_ctx.pq_cnx->nb_retransmission_total - conn_ctx.metrics.total_retransmits) + << " total: " << conn_ctx.pq_cnx->nb_retransmission_total << std::flush; + + conn_ctx.metrics.total_retransmits = conn_ctx.pq_cnx->nb_retransmission_total; + } + } +} + +/* ============================================================================ + * Private methods + * ============================================================================ + */ +void PicoQuicTransport::server() +{ + int ret = picoquic_packet_loop(quic_ctx, serverInfo.port, PF_UNSPEC, 0, 2000000, 0, pq_loop_cb, this); + + if (quic_ctx != NULL) { + picoquic_free(quic_ctx); + quic_ctx = NULL; + } + + logger->info << "picoquic packet loop ended with " << ret << std::flush; + + setStatus(TransportStatus::Shutdown); +} + +TransportConnId PicoQuicTransport::createClient() +{ + struct sockaddr_storage server_address; + char const* sni = "cisco.webex.com"; + int ret; + + int is_name = 0; + + ret = picoquic_get_server_address(serverInfo.host_or_ip.c_str(), serverInfo.port, &server_address, &is_name); + if (ret != 0) { + logger->error << "Failed to get server: " << serverInfo.host_or_ip << " port: " << serverInfo.port + << std::flush; + } else if (is_name) { + sni = serverInfo.host_or_ip.c_str(); + } + + picoquic_set_default_congestion_algorithm(quic_ctx, picoquic_bbr_algorithm); + + uint64_t current_time = picoquic_current_time(); + + picoquic_cnx_t* cnx = picoquic_create_cnx(quic_ctx, + picoquic_null_connection_id, + picoquic_null_connection_id, + reinterpret_cast(&server_address), + current_time, + 0, + sni, + config.alpn, + 1); + + if (cnx == NULL) { + logger->Log(cantina::LogLevel::Error, "Could not create picoquic connection client context"); + return 0; + } + + // Using default TP + picoquic_set_transport_parameters(cnx, &local_tp_options); + + picoquic_subscribe_pacing_rate_updates(cnx, tconfig.pacing_decrease_threshold_Bps, + tconfig.pacing_increase_threshold_Bps); + + auto &_ = createConnContext(cnx); + + return reinterpret_cast(cnx); +} + +void PicoQuicTransport::client(const TransportConnId conn_id) +{ + int ret; + + auto conn_ctx = getConnContext(conn_id); + + logger->info << "Thread client packet loop for client conn_id: " << conn_id << std::flush; + + if (conn_ctx->pq_cnx == NULL) { + logger->Log(cantina::LogLevel::Error, "Could not create picoquic connection client context"); + } else { + picoquic_set_callback(conn_ctx->pq_cnx, pq_event_cb, this); + + picoquic_enable_keep_alive(conn_ctx->pq_cnx, 3000000); + + ret = picoquic_start_client_cnx(conn_ctx->pq_cnx); + if (ret < 0) { + logger->Log(cantina::LogLevel::Error, "Could not activate connection"); + return; + } + + ret = picoquic_packet_loop(quic_ctx, 0, PF_UNSPEC, 0, 2000000, 0, pq_loop_cb, this); + + logger->info << "picoquic ended with " << ret << std::flush; + } + + if (quic_ctx != NULL) { + picoquic_free(quic_ctx); + quic_ctx = NULL; + } + + setStatus(TransportStatus::Disconnected); +} + +void PicoQuicTransport::shutdown() +{ + if (stop) // Already stopped + return; + + stop = true; + + if (picoQuicThread.joinable()) { + logger->Log("Closing transport pico thread"); + picoQuicThread.join(); + } + + picoquic_runner_queue.stop_waiting(); + cbNotifyQueue.stop_waiting(); + + if (cbNotifyThread.joinable()) { + logger->Log("Closing transport callback notifier thread"); + cbNotifyThread.join(); + } + + _tick_service.reset(); + logger->Log("done closing transport threads"); + + picoquic_config_clear(&config); + + // If logging picoquic events, stop those + if (picoquic_logger) { + debug_set_callback(NULL, NULL); + picoquic_logger.reset(); + } +} + +void PicoQuicTransport::check_callback_delta(DataContext* data_ctx, bool tx) { + auto now_time = std::chrono::steady_clock::now(); + + if (!tx) return; + + const auto delta_ms = std::chrono::duration_cast( + now_time - data_ctx->last_tx_callback_time).count(); + + data_ctx->last_tx_callback_time = std::move(now_time); + + if (delta_ms > 50 && data_ctx->tx_data->size() > 10) { + data_ctx->metrics.tx_delayed_callback++; + + logger->debug << "conn_id: " << data_ctx->conn_id + << " stream_id: " << data_ctx->current_stream_id + << " pri: " << static_cast(data_ctx->priority) + << " CB TX delta " << delta_ms << " ms" + << " count: " << data_ctx->metrics.tx_delayed_callback + << " tx_queue_size: " + << data_ctx->tx_data->size() << std::flush; + } +} + +void PicoQuicTransport::cbNotifier() +{ + logger->Log("Starting transport callback notifier thread"); + + while (not stop) { + auto cb = std::move(cbNotifyQueue.block_pop()); + if (cb) { + (*cb)(); + } else { + logger->Log("Notify callback is NULL"); } } - // TODO: Remove and add metrics per stream if possible - for (auto& [conn_id, conn_ctx]: conn_context) { - if (conn_ctx.total_retransmits < conn_ctx.cnx->nb_retransmission_total) { - logger->debug << "remote: " << conn_ctx.peer_addr_text << " port: " << conn_ctx.peer_port - << " context_id: " << conn_id << " retransmits increased, delta: " - << (conn_ctx.cnx->nb_retransmission_total - conn_ctx.total_retransmits) - << " total: " << conn_ctx.cnx->nb_retransmission_total << std::flush; + logger->Log("Done with transport callback notifier thread"); +} - conn_ctx.total_retransmits = conn_ctx.cnx->nb_retransmission_total; +void PicoQuicTransport::create_stream(ConnectionContext& conn_ctx, DataContext &data_ctx, const bool bidir) +{ + conn_ctx.last_stream_id = ::get_next_stream_id(conn_ctx.last_stream_id , + _is_server_mode, !bidir); + + logger->info << "conn_id: " << conn_ctx.conn_id << " data_ctx_id: " << data_ctx.data_ctx_id + << " create new stream with stream_id: " << conn_ctx.last_stream_id + << std::flush; + + if (data_ctx.current_stream_id) { + close_stream(conn_ctx, data_ctx, false, true); + } + + data_ctx.current_stream_id = conn_ctx.last_stream_id; + + /* + * Low order bit set indicates FIFO handling of same priorities, unset is round-robin + */ + picoquic_runner_queue.push([=, pq_cnx = conn_ctx.pq_cnx, + stream_id = data_ctx.current_stream_id, + d_ctx = &data_ctx, + pri = data_ctx.priority, + this]() { + picoquic_set_app_stream_ctx(pq_cnx, stream_id , d_ctx); + picoquic_set_stream_priority(pq_cnx, stream_id, (pri << 1)); + picoquic_mark_active_stream(pq_cnx, stream_id, 1, d_ctx); + }); +} + +void PicoQuicTransport::close_stream(const ConnectionContext& conn_ctx, DataContext& data_ctx, + const bool send_reset, const bool send_fin) +{ + logger->info << "conn_id: " << conn_ctx.conn_id << " data_ctx_id: " << data_ctx.data_ctx_id + << " closing stream stream_id: " << data_ctx.current_stream_id + << std::flush; + + if (send_reset) { + logger->info << "Reset stream_id: " << data_ctx.current_stream_id << " conn_id: " << conn_ctx.conn_id + << std::flush; + + picoquic_runner_queue.push([=, cnx = conn_ctx.pq_cnx, stream_id = data_ctx.current_stream_id]() { + picoquic_reset_stream(cnx, stream_id, 0); + }); + + data_ctx.reset_tx_object(); + + if (data_ctx.is_bidir) { + data_ctx.reset_rx_object(); } + + } else if (send_fin) { + logger->info << "Sending FIN for stream_id: " << data_ctx.current_stream_id << " conn_id: " << conn_ctx.conn_id + << std::flush; + + picoquic_runner_queue.push([=, cnx = conn_ctx.pq_cnx, stream_id = data_ctx.current_stream_id]() { + picoquic_add_to_stream(cnx, stream_id, NULL, 0, 1); + }); } -} \ No newline at end of file + data_ctx.current_stream_id = 0; +} diff --git a/src/transport_picoquic.h b/src/transport_picoquic.h index d350a18..a9d1bcb 100644 --- a/src/transport_picoquic.h +++ b/src/transport_picoquic.h @@ -34,34 +34,58 @@ class PicoQuicTransport : public ITransport public: const char* QUICR_ALPN = "quicr-v1"; - struct Metrics { + struct ConnectionMetrics { + uint64_t time_checks {0}; + uint64_t total_retransmits {0}; + + auto operator<=>(const ConnectionMetrics&) const = default; + }; + + struct DataContextMetrics { uint64_t dgram_ack {0}; uint64_t dgram_spurious {0}; uint64_t dgram_prepare_send {0}; + uint64_t dgram_lost {0}; + uint64_t dgram_received {0}; uint64_t dgram_sent {0}; + + uint64_t enqueued_objs {0}; + uint64_t stream_prepare_send {0}; + uint64_t stream_rx_callbacks {0}; + + uint64_t tx_delayed_callback {0}; /// Count of times transmit callbacks were delayed + uint64_t prev_tx_delayed_callback {0}; /// Previous transmit delayed callback value, set each interval uint64_t stream_objects_sent {0}; uint64_t stream_bytes_sent {0}; uint64_t stream_bytes_recv {0}; uint64_t stream_objects_recv {0}; - uint64_t stream_rx_callbacks {0}; - uint64_t send_null_bytes_ctx {0}; - uint64_t dgram_lost {0}; - uint64_t dgram_received {0}; - uint64_t time_checks {0}; - uint64_t enqueued_objs {0 }; - auto operator<=>(const Metrics&) const = default; - } metrics; + auto operator<=>(const DataContextMetrics&) const = default; + + }; + using bytes_t = std::vector; using timeQueue = time_queue; + using DataContextId = uint64_t; + + /** + * Data context information + * Data context is intended to be a container for metrics and other state that is related to a flow of + * data that may use datagram or one or more stream QUIC frames + */ + struct DataContext { + bool is_default_context { false }; /// Indicates if the data context is the default context + bool is_bidir { false }; /// Indicates if the stream is bidir (true) or unidir (false) + + DataContextId data_ctx_id {0}; /// The ID of this context + TransportConnId conn_id {0}; /// The connection ID this context is under + + uint64_t current_stream_id {0}; /// Current active stream if the value is >= 4 - struct StreamContext { - uint64_t stream_id {0}; - TransportContextId context_id {0}; uint8_t priority {0}; - picoquic_cnx_t *cnx; + uint64_t in_data_cb_skip_count {0}; /// Number of times callback was skipped due to size std::unique_ptr> rx_data; /// Pending objects received from the network std::unique_ptr> tx_data; /// Pending objects to be written to the network @@ -78,12 +102,15 @@ class PicoQuicTransport : public ITransport // The last time TX callback was run std::chrono::time_point last_tx_callback_time { std::chrono::steady_clock::now() }; - struct stream_metrics { - uint64_t tx_delayed_callback {0}; /// Count of times transmit callbacks were delayed - uint64_t prev_tx_delayed_callback {0}; /// Previous transmit delayed callback value, set each interval - } metrics; + DataContextMetrics metrics; - ~StreamContext() { + DataContext() = default; + DataContext(DataContext&&) = default; + + DataContext& operator=(const DataContext&) = delete; + DataContext& operator=(DataContext&&) = delete; + + ~DataContext() { /* * Free legacy pointers if not null */ @@ -124,34 +151,56 @@ class PicoQuicTransport : public ITransport stream_tx_object_offset = 0; stream_tx_object_size = 0; } - - }; - /* - * pq event loop member vars - */ - uint64_t prev_time = 0; - qtransport::PicoQuicTransport::Metrics prev_metrics; - /** * Connection context information */ struct ConnectionContext { - picoquic_cnx_t *cnx = nullptr; - char peer_addr_text[45]; - uint16_t peer_port; + TransportConnId conn_id {0}; /// This connection ID + picoquic_cnx_t * pq_cnx = nullptr; /// Picoquic connection/path context + uint64_t last_stream_id {0}; /// last stream Id - Zero means not set/no stream yet. >=4 is the starting stream value + + DataContextId next_data_ctx_id {1}; /// Next data context ID; zero is reserved for default context + + /** + * Default data context is used for datagrams and received unidirectional streams only. Transmit + * unidirectional MUST be it's own context. + */ + DataContext default_data_context; + + + /** + * Active data contexts are for transmit unidirectional data flows as well as for bi-directional (received) flows + */ + std::map active_data_contexts; + + char peer_addr_text[45] {0}; + uint16_t peer_port {0}; sockaddr_storage peer_addr; // States bool is_congested { false }; // Metrics - uint64_t total_retransmits { 0 }; + ConnectionMetrics metrics; + + ConnectionContext() { + default_data_context.is_default_context = true; + } + + ConnectionContext(picoquic_cnx_t *cnx) : ConnectionContext() { + pq_cnx = cnx; + } }; /* - * Exceptions + * pq event loop member vars + */ + uint64_t pq_loop_prev_time = 0; + + /* + * Exceptions */ struct Exception : public std::runtime_error { @@ -187,56 +236,63 @@ class PicoQuicTransport : public ITransport virtual ~PicoQuicTransport(); TransportStatus status() const override; + TransportConnId start() override; + void close(const TransportConnId& conn_id) override; - TransportContextId start() override; - - void close(const TransportContextId& context_id) override; - void closeStream(const TransportContextId& context_id, - StreamId stream_id) override; - - ConnectionContext getConnContext(const TransportContextId& context_id); - - virtual bool getPeerAddrInfo(const TransportContextId& context_id, + virtual bool getPeerAddrInfo(const TransportConnId& conn_id, sockaddr_storage* addr) override; - StreamId createStream(const TransportContextId& context_id, - bool use_reliable_transport, - uint8_t priority) override; + DataContextId createDataContext(const TransportConnId conn_id, + bool use_reliable_transport, + uint8_t priority, bool bidir) override; - TransportError enqueue(const TransportContextId& context_id, - const StreamId& stream_id, - std::vector&& bytes, - const uint8_t priority = 1, - const uint32_t ttl_ms = 300) override; + void deleteDataContext(const TransportConnId& conn_id, DataContextId data_ctx_id) override; + TransportError enqueue(const TransportConnId& conn_id, + const DataContextId& data_ctx_id, + std::vector&& bytes, + const uint8_t priority, + const uint32_t ttl_ms, + const bool new_stream, + const bool buffer_reset) override; std::optional> dequeue( - const TransportContextId& context_id, - const StreamId & stream_id) override; + const TransportConnId& conn_id, + const DataContextId& data_ctx_id) override; /* * Internal public methods */ + ConnectionContext* getConnContext(const TransportConnId& conn_id); void setStatus(TransportStatus status); - ConnectionContext& createConnContext(picoquic_cnx_t *cnx); + /** + * @brief Create bidirectional data context for received new stream + * + * @details Create a bidir data context for received bidir stream. This is only called + * for received bidirectional streams. + * + * @param conn_id Connection context ID for the stream + * @param stream_id Stream ID of the new received stream + * + * @returns DataContext pointer to the created context, nullptr if invalid connection id + */ + DataContext* createDataContextBiDirRecv(TransportConnId conn_id, uint64_t stream_id); - StreamContext * getZeroStreamContext(picoquic_cnx_t* cnx); + ConnectionContext& createConnContext(picoquic_cnx_t * pq_cnx); - StreamContext * createStreamContext(picoquic_cnx_t* cnx, - uint64_t stream_id); - void deleteStreamContext(const TransportContextId& context_id, - const StreamId& stream_id, bool fin_stream=false); + DataContext& getDefaultDataContext(TransportConnId conn_id); - void send_next_datagram(StreamContext *stream_cnx, uint8_t* bytes_ctx, size_t max_len); - void send_stream_bytes(StreamContext *stream_cnx, uint8_t* bytes_ctx, size_t max_len); + void send_next_datagram(DataContext* data_ctx, uint8_t* bytes_ctx, size_t max_len); + void send_stream_bytes(DataContext* data_ctx, uint8_t* bytes_ctx, size_t max_len); - void on_connection_status(StreamContext *stream_cnx, + void on_connection_status(const TransportConnId conn_id, const TransportStatus status); - void on_new_connection(picoquic_cnx_t* cnx); - void on_recv_datagram(StreamContext *stream_cnx, + + void on_new_connection(const TransportConnId conn_id); + void on_recv_datagram(DataContext* data_ctx, uint8_t* bytes, size_t length); - void on_recv_stream_bytes(StreamContext *stream_cnx, + void on_recv_stream_bytes(DataContext* data_ctx, uint8_t* bytes, size_t length); void check_conns_for_congestion(); @@ -261,13 +317,35 @@ class PicoQuicTransport : public ITransport private: - TransportContextId createClient(); + TransportConnId createClient(); void shutdown(); void server(); - void client(const TransportContextId tcid); + void client(const TransportConnId conn_id); void cbNotifier(); - void check_callback_delta(StreamContext* stream_cnx, bool tx=true); + + void check_callback_delta(DataContext* data_ctx, bool tx=true); + + /** + * @brief Create a new stream + * + * @param conn_ctx Connection context to create stream under + * @param data_ctx Data context in connection context to create streams + * @param bidir Stream should be created as bi-directional (true) or unidirectional (false) + */ + void create_stream(ConnectionContext&conn_ctx, DataContext &data_ctx, const bool bidir=false); + + /** + * @brief App initiated Close stream + * @details App initiated close stream. When the app deletes a context or wants to switch streams to a new stream + * this function is used to close out the current stream. A FIN will be sent. + * + * @param conn_ctx Connection context for the stream + * @param data_ctx Data context for the stream + * @param send_reset Indicates if the stream should be closed by RESET + * @param send_fin Indicates if the stream should be closed by FIN (reset will take priority) + */ + void close_stream(const ConnectionContext&conn_ctx, DataContext &data_ctx, const bool send_reset, const bool send_fin); /* @@ -290,15 +368,7 @@ class PicoQuicTransport : public ITransport TransportDelegate& delegate; TransportConfig tconfig; - /* - * RFC9000 Section 2.1 defines the stream id max value and types. - * Type is encoded in the stream id as the first 2 least significant - * bits. Stream ID is therefore incremented by 4. - */ - std::atomic next_stream_id; - std::map> active_streams; - std::map conn_context; - + std::map conn_context; std::shared_ptr _tick_service; }; diff --git a/src/transport_udp.cpp b/src/transport_udp.cpp index 0d5090b..b3be58f 100644 --- a/src/transport_udp.cpp +++ b/src/transport_udp.cpp @@ -62,23 +62,24 @@ UDPTransport::status() const * UDP doesn't support multiple streams for clients. This will return the same * stream ID used for the context */ -StreamId -UDPTransport::createStream( - const qtransport::TransportContextId& context_id, +DataContextId +UDPTransport::createDataContext( + const qtransport::TransportConnId conn_id, [[maybe_unused]] bool use_reliable_transport, - [[maybe_unused]] uint8_t priority) + [[maybe_unused]] uint8_t priority, + [[maybe_unused]] bool bidir) { - if (remote_contexts.count(context_id) == 0) { - logger->error << "Invalid context id: " << context_id << std::flush; + if (remote_contexts.count(conn_id) == 0) { + logger->error << "Invalid context id: " << conn_id << std::flush; return 0; // Error } - auto addr = remote_contexts[context_id]; + auto addr = remote_contexts[conn_id]; return remote_addrs[addr.key].sid; } -TransportContextId +TransportConnId UDPTransport::start() { @@ -92,8 +93,7 @@ UDPTransport::start() } void -UDPTransport::closeStream(const TransportContextId& context_id, - const StreamId streamId) +UDPTransport::deleteDataContext(const TransportConnId& context_id, DataContextId streamId) { if (dequeue_data_map[context_id].count(streamId) > 0) { @@ -102,7 +102,7 @@ UDPTransport::closeStream(const TransportContextId& context_id, } bool -UDPTransport::getPeerAddrInfo(const TransportContextId& context_id, +UDPTransport::getPeerAddrInfo(const TransportConnId& context_id, sockaddr_storage* addr) { // Locate the given transport context @@ -118,7 +118,7 @@ UDPTransport::getPeerAddrInfo(const TransportContextId& context_id, } void -UDPTransport::close(const TransportContextId& context_id) +UDPTransport::close(const TransportConnId& context_id) { if (isServerMode) { @@ -351,11 +351,13 @@ UDPTransport::fd_reader() } TransportError -UDPTransport::enqueue(const TransportContextId& context_id, - const StreamId& streamId, +UDPTransport::enqueue(const TransportConnId& context_id, + const DataContextId& streamId, std::vector&& bytes, [[maybe_unused]] const uint8_t priority, - [[maybe_unused]] const uint32_t ttl_ms) + [[maybe_unused]] const uint32_t ttl_m, + [[maybe_unused]] const bool new_stream, + [[maybe_unused]] const bool buffer_reset) { if (bytes.empty()) { return TransportError::None; @@ -363,12 +365,12 @@ UDPTransport::enqueue(const TransportContextId& context_id, if (remote_contexts.count(context_id) == 0) { // Invalid context id - return TransportError::InvalidContextId; + return TransportError::InvalidConnContextId; } if (dequeue_data_map[context_id].count(streamId) == 0) { // Invalid stream Id - return TransportError::InvalidStreamId; + return TransportError::InvalidDataContextId; } connData cd; @@ -384,8 +386,8 @@ UDPTransport::enqueue(const TransportContextId& context_id, } std::optional> -UDPTransport::dequeue(const TransportContextId& context_id, - const StreamId& streamId) +UDPTransport::dequeue(const TransportConnId& context_id, + const DataContextId& streamId) { if (remote_contexts.count(context_id) == 0) { @@ -410,7 +412,7 @@ UDPTransport::dequeue(const TransportContextId& context_id, return dq.pop().value().data; } -TransportContextId +TransportConnId UDPTransport::connect_client() { std::stringstream s_log; @@ -524,7 +526,7 @@ UDPTransport::connect_client() return last_context_id; } -TransportContextId +TransportConnId UDPTransport::connect_server() { std::stringstream s_log; diff --git a/src/transport_udp.h b/src/transport_udp.h index 064b404..a9d487a 100644 --- a/src/transport_udp.h +++ b/src/transport_udp.h @@ -46,8 +46,8 @@ struct addrKey struct connData { - TransportContextId contextId; - StreamId streamId; + TransportConnId contextId; + DataContextId streamId; std::vector data; }; @@ -63,34 +63,34 @@ class UDPTransport : public ITransport TransportStatus status() const override; - TransportContextId start() override; + TransportConnId start() override; - void close(const TransportContextId& context_id) override; - void closeStream(const TransportContextId& context_id, - StreamId streamId) override; + void close(const TransportConnId& context_id) override; - virtual bool getPeerAddrInfo(const TransportContextId& context_id, + virtual bool getPeerAddrInfo(const TransportConnId& context_id, sockaddr_storage* addr) override; - StreamId createStream(const TransportContextId& context_id, - bool use_reliable_transport, - uint8_t priority) override; + DataContextId createDataContext(const TransportConnId conn_id, + bool use_reliable_transport, + uint8_t priority, bool bidir) override; + void deleteDataContext(const TransportConnId& conn_id, DataContextId data_ctx_id) override; - TransportError enqueue(const TransportContextId& context_id, - const StreamId & stream_id, + TransportError enqueue(const TransportConnId& conn_id, + const DataContextId& data_ctx_id, std::vector&& bytes, - const uint8_t priority = 1, - const uint32_t ttl_ms = 300) override; - + const uint8_t priority, + const uint32_t ttl_ms, + const bool new_stream, + const bool buffer_reset) override; std::optional> dequeue( - const TransportContextId& context_id, - const StreamId &streamId) override; + const TransportConnId& context_id, + const DataContextId&streamId) override; private: - TransportContextId connect_client(); - TransportContextId connect_server(); + TransportConnId connect_client(); + TransportConnId connect_server(); void addr_to_remote(sockaddr_storage& addr, TransportRemote& remote); void addr_to_key(sockaddr_storage& addr, addrKey& key); @@ -110,8 +110,8 @@ class UDPTransport : public ITransport struct AddrStream { - TransportContextId tcid; - StreamId sid; + TransportConnId tcid; + DataContextId sid; }; cantina::LoggerPointer logger; @@ -124,14 +124,14 @@ class UDPTransport : public ITransport // NOTE: this is a map supporting multiple streams, but UDP does not have that // right now. - std::map>> + std::map>> dequeue_data_map; TransportDelegate& delegate; - TransportContextId last_context_id{ 0 }; - StreamId last_stream_id{ 0 }; - std::map remote_contexts = {}; + TransportConnId last_context_id{ 0 }; + DataContextId last_stream_id{ 0 }; + std::map remote_contexts = {}; std::map remote_addrs = {}; }; From 6cfff5e67ba5d677edb07aede48b22fe2f2d8ea1 Mon Sep 17 00:00:00 2001 From: Tim Evens Date: Fri, 1 Dec 2023 09:44:56 -0800 Subject: [PATCH 02/17] Add bidir to on_recv_notify() method Removes race/asynchronous delay issue for receiver to return packets based on bidir notification. We still get notify of new bidir data context, but now we indicate via on_recv_notify as well. Resolves #88 --- cmd/client.cc | 2 +- include/transport/transport.h | 12 +++++++----- src/transport_picoquic.cpp | 5 +++-- 3 files changed, 11 insertions(+), 8 deletions(-) diff --git a/cmd/client.cc b/cmd/client.cc index 7e0aca6..255b236 100644 --- a/cmd/client.cc +++ b/cmd/client.cc @@ -43,7 +43,7 @@ struct Delegate : public ITransport::TransportDelegate void on_new_connection(const TransportConnId& , const TransportRemote&) {} - void on_recv_notify(const TransportConnId& conn_id, const DataContextId& data_ctx_id) + void on_recv_notify(const TransportConnId& conn_id, const DataContextId& data_ctx_id, [[maybe_unused]] const bool is_bidir) { static uint32_t prev_msg_num = 0; static uint32_t prev_msgcount = 0; diff --git a/include/transport/transport.h b/include/transport/transport.h index 2e2657d..8bbc78c 100644 --- a/include/transport/transport.h +++ b/include/transport/transport.h @@ -156,9 +156,11 @@ class ITransport * * @param[in] conn_id Transport context identifier mapped to the connection * @param[in] data_ctx_id Data context id that the data was received on + * @param[in] is_bidir True if the message is from a bidirectional stream */ virtual void on_recv_notify(const TransportConnId& conn_id, - const DataContextId& data_ctx_id) = 0; + const DataContextId& data_ctx_id, + const bool is_bidir=false) = 0; }; /* Factory APIs */ @@ -270,10 +272,10 @@ class ITransport * when available. * * @param[in] context_id Identifying the connection - * @param[in] data_ctx_id stream Id to send data on - * @param[in] bytes Data to send/write - * @param[in] priority Priority of the object, range should be 0 - 255 - * @param[in] ttl_ms The age the object should exist in queue in milliseconds + * @param[in] data_ctx_id stream Id to send data on + * @param[in] bytes Data to send/write + * @param[in] priority Priority of the object, range should be 0 - 255 + * @param[in] ttl_ms The age the object should exist in queue in milliseconds * * @returns TransportError is returned indicating status of the operation */ diff --git a/src/transport_picoquic.cpp b/src/transport_picoquic.cpp index 5fa2907..10a4f8f 100644 --- a/src/transport_picoquic.cpp +++ b/src/transport_picoquic.cpp @@ -1038,7 +1038,7 @@ PicoQuicTransport::on_recv_datagram(DataContext* data_ctx, uint8_t* bytes, size_ if (data_ctx->rx_data->size() < 2 || data_ctx->in_data_cb_skip_count > 30) { data_ctx->in_data_cb_skip_count = 0; - cbNotifyQueue.push([=, this]() { delegate.on_recv_notify(data_ctx->conn_id, data_ctx->current_stream_id); }); + cbNotifyQueue.push([=, this]() { delegate.on_recv_notify(data_ctx->conn_id, data_ctx->current_stream_id, true); }); } else { data_ctx->in_data_cb_skip_count++; } @@ -1160,7 +1160,8 @@ void PicoQuicTransport::on_recv_stream_bytes(DataContext* data_ctx, uint8_t* byt if (too_many_in_queue || data_ctx->rx_data->size() < 4 || data_ctx->in_data_cb_skip_count > 30) { data_ctx->in_data_cb_skip_count = 0; - cbNotifyQueue.push([=, this]() { delegate.on_recv_notify(data_ctx->conn_id, data_ctx->data_ctx_id); }); + cbNotifyQueue.push([=, this]() { delegate.on_recv_notify(data_ctx->conn_id, + data_ctx->data_ctx_id, data_ctx->is_bidir); }); } else { data_ctx->in_data_cb_skip_count++; } From 48723997fd41118b2ca8b609d3e7f5d87b086d6e Mon Sep 17 00:00:00 2001 From: Tim Evens Date: Fri, 1 Dec 2023 15:41:28 -0800 Subject: [PATCH 03/17] Implemented stream replacement This change adds stream replacement and new flags on enqueue. Enqueue flags instruct the transport on how to handle clearing of queues and creating new streams. Resolves #88 --- cmd/client.cc | 28 ++++-- cmd/echoServer.cc | 19 +++-- include/transport/transport.h | 17 +++- src/transport_picoquic.cpp | 155 +++++++++++++++++++++++++--------- src/transport_picoquic.h | 15 ++-- src/transport_udp.cpp | 3 +- src/transport_udp.h | 3 +- 7 files changed, 179 insertions(+), 61 deletions(-) diff --git a/cmd/client.cc b/cmd/client.cc index 255b236..a53c53e 100644 --- a/cmd/client.cc +++ b/cmd/client.cc @@ -54,6 +54,10 @@ struct Delegate : public ITransport::TransportDelegate if (data.has_value()) { msgcount++; + if (msgcount % 2000 == 0 && prev_msgcount != msgcount) { + logger->info << "conn_id: " << conn_id << " data_ctx_id: " << data_ctx_id << " msgcount: " << msgcount << std::flush; + } + uint32_t* msg_num = (uint32_t*)data.value().data(); if (prev_msg_num && (*msg_num - prev_msg_num) > 1) { @@ -66,10 +70,6 @@ struct Delegate : public ITransport::TransportDelegate prev_msg_num = *msg_num; } else { - if (msgcount % 2000 == 0 && prev_msgcount != msgcount) { - logger->info << "conn_id: " << conn_id << " data_ctx_id: " << data_ctx_id << " msgcount: " << msgcount << std::flush; - } - break; } } @@ -124,13 +124,29 @@ main() DataContextId data_ctx_id = client->createDataContext(conn_id, true, 1, bidir); uint32_t* msg_num = (uint32_t*)&data_buf; + int period_count = 0; + + ITransport::EncodeFlags encode_flags { .new_stream = true, .clear_tx_queue = true, .use_reset = true}; while (client->status() != TransportStatus::Shutdown && client->status() != TransportStatus::Disconnected) { + period_count++; for (int i = 0; i < 10; i++) { (*msg_num)++; auto data = bytes(data_buf, data_buf + sizeof(data_buf)); - client->enqueue(conn_id, server.proto == TransportProtocol::UDP ? 1 : data_ctx_id, std::move(data)); + if (period_count > 2000) { + period_count = 0; + client->enqueue(conn_id, + server.proto == TransportProtocol::UDP ? 1 : data_ctx_id, + std::move(data), + 1, + 350, + encode_flags); + } + else { + client->enqueue(conn_id, server.proto == TransportProtocol::UDP ? 1 : data_ctx_id, std::move(data)); + } + } // Increase delay if using UDP, need to pace more @@ -139,6 +155,8 @@ main() } else { std::this_thread::sleep_for(std::chrono::milliseconds(2)); } + + } client->deleteDataContext(conn_id, data_ctx_id); diff --git a/cmd/echoServer.cc b/cmd/echoServer.cc index 8233a86..c4b01ba 100644 --- a/cmd/echoServer.cc +++ b/cmd/echoServer.cc @@ -47,7 +47,7 @@ struct Delegate : public ITransport::TransportDelegate { } void on_recv_notify(const TransportConnId &conn_id, - const DataContextId &data_ctx_id) { + const DataContextId &data_ctx_id, const bool is_bidir) { while (true) { auto data = server->dequeue(conn_id, data_ctx_id); @@ -55,7 +55,14 @@ struct Delegate : public ITransport::TransportDelegate { if (data.has_value()) { msgcount++; + if (msgcount % 2000 == 0 && prev_msgcount != msgcount) { + prev_msgcount = msgcount; + logger->info << "conn_id: " << conn_id << " data_ctx_id: " << data_ctx_id << " msgcount: " << msgcount << std::flush; + } + uint32_t *msg_num = (uint32_t *)data->data(); + if (msg_num == nullptr) + continue; if (prev_msg_num && (*msg_num - prev_msg_num) > 1) { logger->info << "conn_id: " << conn_id << " data_ctx_id: " << data_ctx_id << " length: " << data->size() @@ -66,14 +73,14 @@ struct Delegate : public ITransport::TransportDelegate { prev_msg_num = *msg_num; - server->enqueue(conn_id, out_data_ctx, std::move(data.value())); + if (is_bidir) { + server->enqueue(conn_id, data_ctx_id, std::move(data.value())); - } else { - if (msgcount % 2000 == 0 && prev_msgcount != msgcount) { - prev_msgcount = msgcount; - logger->info << "conn_id: " << conn_id << " data_ctx_id: " << data_ctx_id << " msgcount: " << msgcount << std::flush; + } else { + server->enqueue(conn_id, out_data_ctx, std::move(data.value())); } + } else { break; } } diff --git a/include/transport/transport.h b/include/transport/transport.h index 8bbc78c..6366e36 100644 --- a/include/transport/transport.h +++ b/include/transport/transport.h @@ -43,7 +43,7 @@ enum class TransportError : uint8_t }; /** - * + * Transport Protocol to use */ enum class TransportProtocol { @@ -265,6 +265,15 @@ class ITransport virtual bool getPeerAddrInfo(const TransportConnId& context_id, sockaddr_storage* addr) = 0; + /** + * Encode flags + */ + struct EncodeFlags { + bool new_stream { false }; /// Indicates that a new stream should be created to replace existing one + bool clear_tx_queue { false }; /// Indicates that the TX queue should be cleared before adding new object + bool use_reset { false }; /// Indicates new stream created will close the previous using reset/abrupt + }; + /** * @brief Enqueue application data within the transport * @@ -277,6 +286,9 @@ class ITransport * @param[in] priority Priority of the object, range should be 0 - 255 * @param[in] ttl_ms The age the object should exist in queue in milliseconds * + * @param[in] new_stream Indicates that a new stream should replace the existing on + * @param[in] clear_queue Clear the TX priority queue before enqueueing this object + * * @returns TransportError is returned indicating status of the operation */ virtual TransportError enqueue(const TransportConnId& context_id, @@ -284,8 +296,7 @@ class ITransport std::vector&& bytes, const uint8_t priority = 1, const uint32_t ttl_ms=350, - const bool new_stream=false, - const bool buffer_reset=false) = 0; + const EncodeFlags flags={false, false, false}) = 0; /** * @brief Dequeue application data from transport queue diff --git a/src/transport_picoquic.cpp b/src/transport_picoquic.cpp index 10a4f8f..818aec1 100644 --- a/src/transport_picoquic.cpp +++ b/src/transport_picoquic.cpp @@ -121,6 +121,14 @@ int pq_event_cb(picoquic_cnx_t* pq_cnx, } case picoquic_callback_prepare_to_send: { + + if (data_ctx == NULL) { + // For some reason picoquic calls this again even after reset, here we ignore it + transport->logger->info << "conn_id: " << conn_id << " stream_id: " << stream_id + << " context is null" << std::flush; + break; + } + data_ctx->metrics.stream_prepare_send++; transport->send_stream_bytes(data_ctx, bytes, length); break; @@ -154,23 +162,41 @@ int pq_event_cb(picoquic_cnx_t* pq_cnx, if (is_fin) { transport->logger->info << "Received FIN for stream " << stream_id << std::flush; - transport->deleteDataContext(conn_id, stream_id); + + if (data_ctx->is_default_context) { + data_ctx->reset_rx_object(); + picoquic_set_app_stream_ctx(pq_cnx, stream_id, NULL); + data_ctx->current_stream_id = 0; + } + else { + transport->logger->info << "delete data context" << std::flush; + transport->deleteDataContext(conn_id, stream_id); + } } break; } case picoquic_callback_stream_reset: { - transport->logger->info << "Received reset stream conn_id: " << conn_id << " stream_id: " << stream_id << std::flush; + transport->logger->info << "Received reset stream conn_id: " << conn_id + << " stream_id: " << stream_id + << std::flush; - if (stream_id & 0x2 /* bidir stream */) { - transport->deleteDataContext(conn_id, stream_id); - } else { - // Unidirectional stream, no need to FIN/RESET or clear up data context for unidir received streams - // TODO: This should not be an issue with threads since this is the only thread that adds to this. + if (data_ctx == NULL) { + data_ctx = transport->createDataContextBiDirRecv(conn_id, stream_id); + } + + if (data_ctx->is_default_context) { data_ctx->reset_rx_object(); + data_ctx->current_stream_id = 0; + } + else { + transport->logger->info << "delete data context" << std::flush; + transport->deleteDataContext(conn_id, stream_id); } - return 0; + picoquic_set_app_stream_ctx(pq_cnx, stream_id, data_ctx); + + break; } case picoquic_callback_almost_ready: @@ -230,8 +256,6 @@ int pq_event_cb(picoquic_cnx_t* pq_cnx, picoquic_set_callback(pq_cnx, NULL, NULL); - - if (not transport->_is_server_mode) { transport->setStatus(TransportStatus::Disconnected); @@ -468,8 +492,7 @@ PicoQuicTransport::enqueue(const TransportConnId& conn_id, std::vector&& bytes, const uint8_t priority, const uint32_t ttl_ms, - const bool new_stream, - const bool buffer_reset) + const EncodeFlags flags) { if (bytes.empty()) { std::cerr << "enqueue dropped due bytes empty" << std::endl; @@ -483,17 +506,15 @@ PicoQuicTransport::enqueue(const TransportConnId& conn_id, return TransportError::InvalidConnContextId; } - if (!data_ctx_id) { // Default data context + if (!data_ctx_id) { // Default data context (using datagram) conn_ctx_it->second.default_data_context.metrics.enqueued_objs++; - if (buffer_reset) { + if (flags.clear_tx_queue) { conn_ctx_it->second.default_data_context.tx_data->clear(); - conn_ctx_it->second.default_data_context.rx_data->clear(); } conn_ctx_it->second.default_data_context.tx_data->push(bytes, ttl_ms, priority); - picoquic_runner_queue.push([=]() { if (conn_ctx_it->second.pq_cnx != NULL) picoquic_mark_datagram_ready(conn_ctx_it->second.pq_cnx, 1); @@ -507,6 +528,19 @@ PicoQuicTransport::enqueue(const TransportConnId& conn_id, } data_ctx_it->second.metrics.enqueued_objs++; + + if (flags.new_stream) { + if (flags.use_reset) { + data_ctx_it->second.stream_action = DataContext::StreamAction::REPLACE_STREAM_USE_RESET; + } else { + data_ctx_it->second.stream_action = DataContext::StreamAction::REPLACE_STREAM_USE_FIN; + } + } + + if (flags.clear_tx_queue) { + data_ctx_it->second.tx_data->clear(); + } + data_ctx_it->second.tx_data->push(bytes, ttl_ms, priority); picoquic_runner_queue.push([=]() { @@ -561,7 +595,7 @@ PicoQuicTransport::createDataContext(const TransportConnId conn_id, } if (not use_reliable_transport) { - return 0; + return 0; // Default data context, which is datagram } const auto [data_ctx_it, is_new] = conn_it->second.active_data_contexts.emplace(conn_it->second.next_data_ctx_id, @@ -831,7 +865,7 @@ void PicoQuicTransport::deleteDataContext(const TransportConnId& conn_id, DataCo return; } - close_stream(conn_it->second, data_ctx_it->second, false, true); + close_stream(conn_it->second, data_ctx_it->second, false); conn_it->second.active_data_contexts.erase(data_ctx_it); } @@ -883,7 +917,7 @@ PicoQuicTransport::send_stream_bytes(DataContext* data_ctx, uint8_t* bytes_ctx, return; } - if (data_ctx->current_stream_id == 0) { + if (data_ctx->is_default_context) { return; // Only for steams } @@ -893,6 +927,57 @@ PicoQuicTransport::send_stream_bytes(DataContext* data_ctx, uint8_t* bytes_ctx, size_t offset = 0; int is_still_active = 0; + if (data_ctx->current_stream_id == 0) { + logger->info << "Creating unset stream in conn_id: " << data_ctx->conn_id + << std::flush; + const auto conn_ctx = getConnContext(data_ctx->conn_id); + create_stream(*conn_ctx, *data_ctx, data_ctx->is_bidir); + + } else { + switch (data_ctx->stream_action) { + case DataContext::StreamAction::NO_ACTION: + default: + break; + + case DataContext::StreamAction::REPLACE_STREAM_USE_RESET: { + if (data_ctx->stream_tx_object != nullptr) { + data_ctx->metrics.tx_write_buffer_drops++; + } + + logger->info << "Replacing stream using RESET; conn_id: " << data_ctx->conn_id + << " existing_stream: " << data_ctx->current_stream_id + << " write buf drops: " << data_ctx->metrics.tx_write_buffer_drops << std::flush; + + const auto conn_ctx = getConnContext(data_ctx->conn_id); + + close_stream(*conn_ctx, *data_ctx, true); + + data_ctx->reset_tx_object(); + + create_stream(*conn_ctx, *data_ctx, data_ctx->is_bidir); + + std::lock_guard _(_state_mutex); + data_ctx->stream_action = DataContext::StreamAction::NO_ACTION; + + break; + } + + case DataContext::StreamAction::REPLACE_STREAM_USE_FIN: { + if (data_ctx->stream_tx_object == nullptr) { + logger->info << "Replacing stream using FIN; conn_id: " << data_ctx->conn_id + << " existing_stream: " << data_ctx->conn_id << std::flush; + + const auto conn_ctx = getConnContext(data_ctx->conn_id); + create_stream(*conn_ctx, *data_ctx, data_ctx->is_bidir); + + std::lock_guard _(_state_mutex); + data_ctx->stream_action = DataContext::StreamAction::NO_ACTION; + } + break; + } + } + } + if (data_ctx->stream_tx_object == nullptr) { if (max_len < 5) { @@ -903,6 +988,7 @@ PicoQuicTransport::send_stream_bytes(DataContext* data_ctx, uint8_t* bytes_ctx, } auto obj = data_ctx->tx_data->pop_front(); + if (obj.has_value()) { data_ctx->metrics.stream_objects_sent++; max_len -= 4; // Subtract out the length header that will be added @@ -1407,7 +1493,7 @@ void PicoQuicTransport::create_stream(ConnectionContext& conn_ctx, DataContext & << std::flush; if (data_ctx.current_stream_id) { - close_stream(conn_ctx, data_ctx, false, true); + close_stream(conn_ctx, data_ctx, false); } data_ctx.current_stream_id = conn_ctx.last_stream_id; @@ -1415,20 +1501,17 @@ void PicoQuicTransport::create_stream(ConnectionContext& conn_ctx, DataContext & /* * Low order bit set indicates FIFO handling of same priorities, unset is round-robin */ - picoquic_runner_queue.push([=, pq_cnx = conn_ctx.pq_cnx, - stream_id = data_ctx.current_stream_id, - d_ctx = &data_ctx, - pri = data_ctx.priority, - this]() { - picoquic_set_app_stream_ctx(pq_cnx, stream_id , d_ctx); - picoquic_set_stream_priority(pq_cnx, stream_id, (pri << 1)); - picoquic_mark_active_stream(pq_cnx, stream_id, 1, d_ctx); - }); + picoquic_set_app_stream_ctx(conn_ctx.pq_cnx, data_ctx.current_stream_id , &data_ctx); + picoquic_set_stream_priority(conn_ctx.pq_cnx, data_ctx.current_stream_id, (data_ctx.priority << 1)); + picoquic_mark_active_stream(conn_ctx.pq_cnx, data_ctx.current_stream_id, 1, &data_ctx); } -void PicoQuicTransport::close_stream(const ConnectionContext& conn_ctx, DataContext& data_ctx, - const bool send_reset, const bool send_fin) +void PicoQuicTransport::close_stream(const ConnectionContext& conn_ctx, DataContext& data_ctx, const bool send_reset) { + if (data_ctx.current_stream_id == 0) { + return; // stream already closed + } + logger->info << "conn_id: " << conn_ctx.conn_id << " data_ctx_id: " << data_ctx.data_ctx_id << " closing stream stream_id: " << data_ctx.current_stream_id << std::flush; @@ -1437,9 +1520,7 @@ void PicoQuicTransport::close_stream(const ConnectionContext& conn_ctx, DataCont logger->info << "Reset stream_id: " << data_ctx.current_stream_id << " conn_id: " << conn_ctx.conn_id << std::flush; - picoquic_runner_queue.push([=, cnx = conn_ctx.pq_cnx, stream_id = data_ctx.current_stream_id]() { - picoquic_reset_stream(cnx, stream_id, 0); - }); + picoquic_reset_stream(conn_ctx.pq_cnx, data_ctx.current_stream_id, 0); data_ctx.reset_tx_object(); @@ -1447,13 +1528,11 @@ void PicoQuicTransport::close_stream(const ConnectionContext& conn_ctx, DataCont data_ctx.reset_rx_object(); } - } else if (send_fin) { + } else { logger->info << "Sending FIN for stream_id: " << data_ctx.current_stream_id << " conn_id: " << conn_ctx.conn_id << std::flush; - picoquic_runner_queue.push([=, cnx = conn_ctx.pq_cnx, stream_id = data_ctx.current_stream_id]() { - picoquic_add_to_stream(cnx, stream_id, NULL, 0, 1); - }); + picoquic_add_to_stream(conn_ctx.pq_cnx, data_ctx.current_stream_id, NULL, 0, 1); } data_ctx.current_stream_id = 0; diff --git a/src/transport_picoquic.h b/src/transport_picoquic.h index a9d1bcb..d39055d 100644 --- a/src/transport_picoquic.h +++ b/src/transport_picoquic.h @@ -54,6 +54,7 @@ class PicoQuicTransport : public ITransport uint64_t stream_prepare_send {0}; uint64_t stream_rx_callbacks {0}; + uint64_t tx_write_buffer_drops {0}; /// Count of write buffer drops of data due to RESET request uint64_t tx_delayed_callback {0}; /// Count of times transmit callbacks were delayed uint64_t prev_tx_delayed_callback {0}; /// Previous transmit delayed callback value, set each interval uint64_t stream_objects_sent {0}; @@ -82,6 +83,12 @@ class PicoQuicTransport : public ITransport DataContextId data_ctx_id {0}; /// The ID of this context TransportConnId conn_id {0}; /// The connection ID this context is under + enum class StreamAction : uint8_t { /// Stream action that should be done by send/receive processing + NO_ACTION=0, + REPLACE_STREAM_USE_RESET, + REPLACE_STREAM_USE_FIN, + } stream_action {StreamAction::NO_ACTION}; + uint64_t current_stream_id {0}; /// Current active stream if the value is >= 4 uint8_t priority {0}; @@ -253,8 +260,7 @@ class PicoQuicTransport : public ITransport std::vector&& bytes, const uint8_t priority, const uint32_t ttl_ms, - const bool new_stream, - const bool buffer_reset) override; + const EncodeFlags flags) override; std::optional> dequeue( const TransportConnId& conn_id, @@ -342,10 +348,9 @@ class PicoQuicTransport : public ITransport * * @param conn_ctx Connection context for the stream * @param data_ctx Data context for the stream - * @param send_reset Indicates if the stream should be closed by RESET - * @param send_fin Indicates if the stream should be closed by FIN (reset will take priority) + * @param send_reset Indicates if the stream should be closed by RESET, otherwise FIN */ - void close_stream(const ConnectionContext&conn_ctx, DataContext &data_ctx, const bool send_reset, const bool send_fin); + void close_stream(const ConnectionContext&conn_ctx, DataContext &data_ctx, const bool send_reset); /* diff --git a/src/transport_udp.cpp b/src/transport_udp.cpp index b3be58f..7e881bf 100644 --- a/src/transport_udp.cpp +++ b/src/transport_udp.cpp @@ -356,8 +356,7 @@ UDPTransport::enqueue(const TransportConnId& context_id, std::vector&& bytes, [[maybe_unused]] const uint8_t priority, [[maybe_unused]] const uint32_t ttl_m, - [[maybe_unused]] const bool new_stream, - [[maybe_unused]] const bool buffer_reset) + [[maybe_unused]] const EncodeFlags flags) { if (bytes.empty()) { return TransportError::None; diff --git a/src/transport_udp.h b/src/transport_udp.h index a9d487a..0ba8db7 100644 --- a/src/transport_udp.h +++ b/src/transport_udp.h @@ -81,8 +81,7 @@ class UDPTransport : public ITransport std::vector&& bytes, const uint8_t priority, const uint32_t ttl_ms, - const bool new_stream, - const bool buffer_reset) override; + const EncodeFlags flags) override; std::optional> dequeue( const TransportConnId& context_id, From 7b1d52368f492c267634b33095a42bb7e8c0d57e Mon Sep 17 00:00:00 2001 From: Tim Evens Date: Fri, 1 Dec 2023 15:55:35 -0800 Subject: [PATCH 04/17] build error fix --- Makefile | 2 +- src/transport_picoquic.cpp | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/Makefile b/Makefile index b381de4..e4f3c52 100644 --- a/Makefile +++ b/Makefile @@ -13,7 +13,7 @@ all: ${BUILD_DIR} cmake --build ${BUILD_DIR} --parallel 8 ${BUILD_DIR}: CMakeLists.txt cmd/CMakeLists.txt - cmake -B${BUILD_DIR} -DBUILD_TESTING=TRUE -DQTRANSPORT_BUILD_TESTS=TRUE -DCMAKE_BUILD_TYPE=Release . + cmake -B${BUILD_DIR} -DBUILD_TESTING=TRUE -DQTRANSPORT_BUILD_TESTS=TRUE -DCMAKE_BUILD_TYPE=Debug . clean: cmake --build ${BUILD_DIR} --target clean diff --git a/src/transport_picoquic.cpp b/src/transport_picoquic.cpp index 818aec1..38ef415 100644 --- a/src/transport_picoquic.cpp +++ b/src/transport_picoquic.cpp @@ -800,7 +800,8 @@ PicoQuicTransport::DataContext* PicoQuicTransport::createDataContextBiDirRecv(Tr data_ctx_it->second.current_stream_id = stream_id; - cbNotifyQueue.push([=, this]() { delegate.on_new_data_context(conn_id, data_ctx_it->second.data_ctx_id); }); + cbNotifyQueue.push([=,data_ctx_id = data_ctx_it->second.data_ctx_id, this]() { + delegate.on_new_data_context(conn_id, data_ctx_id); }); logger->info << "Created new bidir data context conn_id: " << conn_id << " data_ctx_id: " << data_ctx_it->second.data_ctx_id From db31ff8e94f8077635e15446963f1ab859436ccf Mon Sep 17 00:00:00 2001 From: Tim Evens Date: Fri, 1 Dec 2023 17:10:18 -0800 Subject: [PATCH 05/17] Fixes for bidir replace --- cmd/client.cc | 3 ++- src/transport_picoquic.cpp | 20 ++++++++++---------- 2 files changed, 12 insertions(+), 11 deletions(-) diff --git a/cmd/client.cc b/cmd/client.cc index a53c53e..4b4886a 100644 --- a/cmd/client.cc +++ b/cmd/client.cc @@ -75,6 +75,7 @@ struct Delegate : public ITransport::TransportDelegate } } + void on_new_data_context(const TransportConnId&, const DataContextId&) {} }; @@ -126,7 +127,7 @@ main() uint32_t* msg_num = (uint32_t*)&data_buf; int period_count = 0; - ITransport::EncodeFlags encode_flags { .new_stream = true, .clear_tx_queue = true, .use_reset = true}; + ITransport::EncodeFlags encode_flags { .new_stream = true, .clear_tx_queue = false, .use_reset = false}; while (client->status() != TransportStatus::Shutdown && client->status() != TransportStatus::Disconnected) { period_count++; diff --git a/src/transport_picoquic.cpp b/src/transport_picoquic.cpp index 38ef415..01f6b47 100644 --- a/src/transport_picoquic.cpp +++ b/src/transport_picoquic.cpp @@ -163,14 +163,14 @@ int pq_event_cb(picoquic_cnx_t* pq_cnx, if (is_fin) { transport->logger->info << "Received FIN for stream " << stream_id << std::flush; + data_ctx->current_stream_id = 0; + if (data_ctx->is_default_context) { data_ctx->reset_rx_object(); picoquic_set_app_stream_ctx(pq_cnx, stream_id, NULL); - data_ctx->current_stream_id = 0; - } - else { - transport->logger->info << "delete data context" << std::flush; - transport->deleteDataContext(conn_id, stream_id); + + } else { + transport->deleteDataContext(conn_id, data_ctx->data_ctx_id); } } break; @@ -185,13 +185,13 @@ int pq_event_cb(picoquic_cnx_t* pq_cnx, data_ctx = transport->createDataContextBiDirRecv(conn_id, stream_id); } + data_ctx->current_stream_id = 0; + if (data_ctx->is_default_context) { data_ctx->reset_rx_object(); - data_ctx->current_stream_id = 0; } else { - transport->logger->info << "delete data context" << std::flush; - transport->deleteDataContext(conn_id, stream_id); + transport->deleteDataContext(conn_id, data_ctx->data_ctx_id); } picoquic_set_app_stream_ctx(pq_cnx, stream_id, data_ctx); @@ -800,7 +800,7 @@ PicoQuicTransport::DataContext* PicoQuicTransport::createDataContextBiDirRecv(Tr data_ctx_it->second.current_stream_id = stream_id; - cbNotifyQueue.push([=,data_ctx_id = data_ctx_it->second.data_ctx_id, this]() { + cbNotifyQueue.push([=, data_ctx_id = data_ctx_it->second.data_ctx_id, this]() { delegate.on_new_data_context(conn_id, data_ctx_id); }); logger->info << "Created new bidir data context conn_id: " << conn_id @@ -966,7 +966,7 @@ PicoQuicTransport::send_stream_bytes(DataContext* data_ctx, uint8_t* bytes_ctx, case DataContext::StreamAction::REPLACE_STREAM_USE_FIN: { if (data_ctx->stream_tx_object == nullptr) { logger->info << "Replacing stream using FIN; conn_id: " << data_ctx->conn_id - << " existing_stream: " << data_ctx->conn_id << std::flush; + << " existing_stream: " << data_ctx->current_stream_id << std::flush; const auto conn_ctx = getConnContext(data_ctx->conn_id); create_stream(*conn_ctx, *data_ctx, data_ctx->is_bidir); From c8dd6172ee0e418238529adcd7870dcb0d02aae5 Mon Sep 17 00:00:00 2001 From: Tim Evens Date: Sat, 2 Dec 2023 13:24:07 -0800 Subject: [PATCH 06/17] Fix issue with replace in same callback --- cmd/client.cc | 3 +- include/transport/transport.h | 7 +- src/transport_picoquic.cpp | 150 ++++++++++++++++++---------------- src/transport_picoquic.h | 7 +- src/transport_udp.cpp | 2 +- src/transport_udp.h | 2 +- 6 files changed, 89 insertions(+), 82 deletions(-) diff --git a/cmd/client.cc b/cmd/client.cc index 4b4886a..9498b1e 100644 --- a/cmd/client.cc +++ b/cmd/client.cc @@ -59,6 +59,7 @@ struct Delegate : public ITransport::TransportDelegate } uint32_t* msg_num = (uint32_t*)data.value().data(); + if (msg_num == NULL) break; if (prev_msg_num && (*msg_num - prev_msg_num) > 1) { logger->info << "conn_id: " << conn_id << " data_ctx_id: " << data_ctx_id << " length: " << data->size() @@ -127,7 +128,7 @@ main() uint32_t* msg_num = (uint32_t*)&data_buf; int period_count = 0; - ITransport::EncodeFlags encode_flags { .new_stream = true, .clear_tx_queue = false, .use_reset = false}; + ITransport::EnqueueFlags encode_flags { .new_stream = true, .clear_tx_queue = true, .use_reset = true}; while (client->status() != TransportStatus::Shutdown && client->status() != TransportStatus::Disconnected) { period_count++; diff --git a/include/transport/transport.h b/include/transport/transport.h index 6366e36..a3b63c9 100644 --- a/include/transport/transport.h +++ b/include/transport/transport.h @@ -266,9 +266,10 @@ class ITransport sockaddr_storage* addr) = 0; /** - * Encode flags + * Enqueue flags */ - struct EncodeFlags { + struct EnqueueFlags + { bool new_stream { false }; /// Indicates that a new stream should be created to replace existing one bool clear_tx_queue { false }; /// Indicates that the TX queue should be cleared before adding new object bool use_reset { false }; /// Indicates new stream created will close the previous using reset/abrupt @@ -296,7 +297,7 @@ class ITransport std::vector&& bytes, const uint8_t priority = 1, const uint32_t ttl_ms=350, - const EncodeFlags flags={false, false, false}) = 0; + const EnqueueFlags flags={false, false, false}) = 0; /** * @brief Dequeue application data from transport queue diff --git a/src/transport_picoquic.cpp b/src/transport_picoquic.cpp index 01f6b47..3eb7c64 100644 --- a/src/transport_picoquic.cpp +++ b/src/transport_picoquic.cpp @@ -80,11 +80,13 @@ int pq_event_cb(picoquic_cnx_t* pq_cnx, transport->pq_runner(); + if (data_ctx == NULL && stream_id == 0) { + data_ctx = &transport->getDefaultDataContext(conn_id); + } + switch (fin_or_event) { case picoquic_callback_prepare_datagram: { - data_ctx = &transport->getDefaultDataContext(conn_id); - // length is the max allowed data length data_ctx->metrics.dgram_prepare_send++; transport->send_next_datagram(data_ctx, bytes, length); @@ -92,8 +94,6 @@ int pq_event_cb(picoquic_cnx_t* pq_cnx, } case picoquic_callback_datagram_acked: - data_ctx = &transport->getDefaultDataContext(conn_id); - // Called for each datagram - TODO: Revisit how often acked frames are coming in // bytes carries the original packet data // log_msg.str(""); @@ -104,26 +104,22 @@ int pq_event_cb(picoquic_cnx_t* pq_cnx, break; case picoquic_callback_datagram_spurious: - data_ctx = &transport->getDefaultDataContext(conn_id); data_ctx->metrics.dgram_spurious++; break; case picoquic_callback_datagram_lost: - data_ctx = &transport->getDefaultDataContext(conn_id); data_ctx->metrics.dgram_lost++; break; case picoquic_callback_datagram: { - data_ctx = &transport->getDefaultDataContext(conn_id); data_ctx->metrics.dgram_received++; transport->on_recv_datagram(data_ctx, bytes, length); break; } case picoquic_callback_prepare_to_send: { - if (data_ctx == NULL) { - // For some reason picoquic calls this again even after reset, here we ignore it + // picoquic calls this again even after reset/fin, here we ignore it transport->logger->info << "conn_id: " << conn_id << " stream_id: " << stream_id << " context is null" << std::flush; break; @@ -136,16 +132,24 @@ int pq_event_cb(picoquic_cnx_t* pq_cnx, case picoquic_callback_stream_fin: is_fin = true; - // fall through to picoquic_callback_stream_data + // fallthrough to picoquic_callback_stream_data case picoquic_callback_stream_data: { - if (data_ctx == NULL) { - if (not (stream_id & 0x2) /* bidir stream */) { + if (!((stream_id & 0x2) == 2) /* not unidir stream */) { - // Create the data context for new bidir streams created by remote side - data_ctx = transport->createDataContextBiDirRecv(conn_id, stream_id); - picoquic_set_app_stream_ctx(pq_cnx, stream_id, data_ctx); + // Do not create bidir data context if it wasn't initiated by this instance + if ( ( (stream_id & 0x1) == 1 && !transport->_is_server_mode) + || ( (stream_id & 0x0) == 0 && transport->_is_server_mode)) { + + // Create the data context for new bidir streams created by remote side + data_ctx = transport->createDataContextBiDirRecv(conn_id, stream_id); + picoquic_set_app_stream_ctx(pq_cnx, stream_id, data_ctx); + + } else { + // No data context, ignore stream that is not valid + break; + } } else { data_ctx = &transport->getDefaultDataContext(conn_id); @@ -164,11 +168,10 @@ int pq_event_cb(picoquic_cnx_t* pq_cnx, transport->logger->info << "Received FIN for stream " << stream_id << std::flush; data_ctx->current_stream_id = 0; + picoquic_reset_stream_ctx(pq_cnx, data_ctx->current_stream_id); if (data_ctx->is_default_context) { data_ctx->reset_rx_object(); - picoquic_set_app_stream_ctx(pq_cnx, stream_id, NULL); - } else { transport->deleteDataContext(conn_id, data_ctx->data_ctx_id); } @@ -177,16 +180,17 @@ int pq_event_cb(picoquic_cnx_t* pq_cnx, } case picoquic_callback_stream_reset: { - transport->logger->info << "Received reset stream conn_id: " << conn_id + transport->logger->info << "Received RESET stream conn_id: " << conn_id << " stream_id: " << stream_id << std::flush; + picoquic_reset_stream_ctx(pq_cnx, stream_id); + data_ctx->current_stream_id = 0; + if (data_ctx == NULL) { - data_ctx = transport->createDataContextBiDirRecv(conn_id, stream_id); + break; } - data_ctx->current_stream_id = 0; - if (data_ctx->is_default_context) { data_ctx->reset_rx_object(); } @@ -194,8 +198,6 @@ int pq_event_cb(picoquic_cnx_t* pq_cnx, transport->deleteDataContext(conn_id, data_ctx->data_ctx_id); } - picoquic_set_app_stream_ctx(pq_cnx, stream_id, data_ctx); - break; } @@ -277,10 +279,6 @@ int pq_event_cb(picoquic_cnx_t* pq_cnx, transport->on_connection_status(conn_id, TransportStatus::Ready); } - if (data_ctx == NULL) { - data_ctx = &transport->getDefaultDataContext(conn_id); - } - picoquic_enable_keep_alive(pq_cnx, 3000000); (void)picoquic_mark_datagram_ready(pq_cnx, 1); @@ -472,7 +470,7 @@ PicoQuicTransport::start() bool PicoQuicTransport::getPeerAddrInfo(const TransportConnId& conn_id, sockaddr_storage* addr) { - std::lock_guard lock(_state_mutex); + std::lock_guard _(_state_mutex); // Locate the specified transport connection context auto it = conn_context.find(conn_id); @@ -492,14 +490,16 @@ PicoQuicTransport::enqueue(const TransportConnId& conn_id, std::vector&& bytes, const uint8_t priority, const uint32_t ttl_ms, - const EncodeFlags flags) + const EnqueueFlags flags) { if (bytes.empty()) { - std::cerr << "enqueue dropped due bytes empty" << std::endl; + logger->error << "enqueue dropped due bytes empty, conn_id: " << conn_id + << " data_ctx_id: " << data_ctx_id + << std::flush; return TransportError::None; } - std::lock_guard lock(_state_mutex); + std::lock_guard _(_state_mutex); const auto conn_ctx_it = conn_context.find(conn_id); if (conn_ctx_it == conn_context.end()) { @@ -521,7 +521,7 @@ PicoQuicTransport::enqueue(const TransportConnId& conn_id, }); } - else { + else { // is stream const auto data_ctx_it = conn_ctx_it->second.active_data_contexts.find(data_ctx_id); if (data_ctx_it == conn_ctx_it->second.active_data_contexts.end()) { return TransportError::InvalidDataContextId; @@ -556,7 +556,7 @@ PicoQuicTransport::enqueue(const TransportConnId& conn_id, std::optional> PicoQuicTransport::dequeue(const TransportConnId& conn_id, const DataContextId& data_ctx_id) { - std::lock_guard lock(_state_mutex); + std::lock_guard _(_state_mutex); const auto conn_ctx_it = conn_context.find(conn_id); if (conn_ctx_it == conn_context.end()) { @@ -582,7 +582,7 @@ PicoQuicTransport::createDataContext(const TransportConnId conn_id, bool use_reliable_transport, uint8_t priority, bool bidir) { - std::lock_guard lock(_state_mutex); + std::lock_guard _(_state_mutex); if (priority > 127) { throw std::runtime_error("Create stream priority cannot be greater than 127, range is 0 - 127"); @@ -604,6 +604,7 @@ PicoQuicTransport::createDataContext(const TransportConnId conn_id, if (is_new) { // Init context data_ctx_it->second.conn_id = conn_id; + data_ctx_it->second.is_bidir = bidir; data_ctx_it->second.data_ctx_id = conn_it->second.next_data_ctx_id++; // Set and bump next data_ctx_id data_ctx_it->second.priority = priority; @@ -616,7 +617,7 @@ PicoQuicTransport::createDataContext(const TransportConnId conn_id, // Create stream if (use_reliable_transport) { - create_stream(conn_it->second, data_ctx_it->second, bidir); + create_stream(conn_it->second, &data_ctx_it->second); } } @@ -626,7 +627,7 @@ PicoQuicTransport::createDataContext(const TransportConnId conn_id, void PicoQuicTransport::close(const TransportConnId& conn_id) { - std::lock_guard lock(_state_mutex); + std::lock_guard _(_state_mutex); const auto conn_it = conn_context.find(conn_id); @@ -658,7 +659,14 @@ PicoQuicTransport::close(const TransportConnId& conn_id) PicoQuicTransport::DataContext& PicoQuicTransport::getDefaultDataContext(TransportConnId conn_id) { - return conn_context[conn_id].default_data_context; + // Locate the specified transport connection context + auto it = conn_context.find(conn_id); + + if (it == conn_context.end()) { + + } + + return it->second.default_data_context; } PicoQuicTransport::ConnectionContext* PicoQuicTransport::getConnContext(const TransportConnId& conn_id) @@ -710,7 +718,7 @@ PicoQuicTransport::ConnectionContext& PicoQuicTransport::createConnContext(picoq break; } - if (is_new) { + if (is_new || !conn_ctx.default_data_context.is_default_context) { logger->info << "Created new connection context for conn_id: " << conn_ctx.conn_id << std::flush; // Setup default data context @@ -773,7 +781,7 @@ PicoQuicTransport::setStatus(TransportStatus status) PicoQuicTransport::DataContext* PicoQuicTransport::createDataContextBiDirRecv(TransportConnId conn_id, uint64_t stream_id) { - std::lock_guard lock(_state_mutex); + std::lock_guard _(_state_mutex); const auto conn_it = conn_context.find(conn_id); if (conn_it == conn_context.end()) { @@ -828,7 +836,7 @@ void PicoQuicTransport::pq_runner() { void PicoQuicTransport::deleteDataContext(const TransportConnId& conn_id, DataContextId data_ctx_id) { - std::lock_guard lock(_state_mutex); + std::lock_guard _(_state_mutex); const auto conn_it = conn_context.find(conn_id); @@ -852,7 +860,9 @@ void PicoQuicTransport::deleteDataContext(const TransportConnId& conn_id, DataCo // Remove pointer references in picoquic for active streams for (const auto& [d_id, d_ctx]: conn_it->second.active_data_contexts) { - picoquic_add_to_stream(conn_it->second.pq_cnx, d_ctx.current_stream_id, NULL, 0, 1); + // FIN picoquic_add_to_stream(conn_it->second.pq_cnx, d_ctx.current_stream_id, NULL, 0, 1); + picoquic_mark_active_stream(conn_it->second.pq_cnx, d_ctx.current_stream_id, 0, NULL); + picoquic_reset_stream(conn_it->second.pq_cnx, d_ctx.current_stream_id, 0); } picoquic_close(conn_it->second.pq_cnx, 0); @@ -866,7 +876,7 @@ void PicoQuicTransport::deleteDataContext(const TransportConnId& conn_id, DataCo return; } - close_stream(conn_it->second, data_ctx_it->second, false); + close_stream(conn_it->second, &data_ctx_it->second, false); conn_it->second.active_data_contexts.erase(data_ctx_it); } @@ -932,7 +942,7 @@ PicoQuicTransport::send_stream_bytes(DataContext* data_ctx, uint8_t* bytes_ctx, logger->info << "Creating unset stream in conn_id: " << data_ctx->conn_id << std::flush; const auto conn_ctx = getConnContext(data_ctx->conn_id); - create_stream(*conn_ctx, *data_ctx, data_ctx->is_bidir); + create_stream(*conn_ctx, data_ctx); } else { switch (data_ctx->stream_action) { @@ -951,16 +961,16 @@ PicoQuicTransport::send_stream_bytes(DataContext* data_ctx, uint8_t* bytes_ctx, const auto conn_ctx = getConnContext(data_ctx->conn_id); - close_stream(*conn_ctx, *data_ctx, true); + close_stream(*conn_ctx, data_ctx, true); data_ctx->reset_tx_object(); - create_stream(*conn_ctx, *data_ctx, data_ctx->is_bidir); + create_stream(*conn_ctx, data_ctx); std::lock_guard _(_state_mutex); data_ctx->stream_action = DataContext::StreamAction::NO_ACTION; - break; + return; } case DataContext::StreamAction::REPLACE_STREAM_USE_FIN: { @@ -969,12 +979,12 @@ PicoQuicTransport::send_stream_bytes(DataContext* data_ctx, uint8_t* bytes_ctx, << " existing_stream: " << data_ctx->current_stream_id << std::flush; const auto conn_ctx = getConnContext(data_ctx->conn_id); - create_stream(*conn_ctx, *data_ctx, data_ctx->is_bidir); + create_stream(*conn_ctx, data_ctx); std::lock_guard _(_state_mutex); data_ctx->stream_action = DataContext::StreamAction::NO_ACTION; } - break; + return; } } } @@ -1073,8 +1083,6 @@ PicoQuicTransport::send_stream_bytes(DataContext* data_ctx, uint8_t* bytes_ctx, void PicoQuicTransport::on_connection_status(const TransportConnId conn_id, const TransportStatus status) { - std::lock_guard _(_state_mutex); - if (status == TransportStatus::Ready) { auto conn_ctx = getConnContext(conn_id); logger->info << "Connection established to server " @@ -1089,8 +1097,6 @@ PicoQuicTransport::on_connection_status(const TransportConnId conn_id, const Tra void PicoQuicTransport::on_new_connection(const TransportConnId conn_id) { - std::lock_guard _(_state_mutex); - auto conn_ctx = getConnContext(conn_id); logger->info << "New Connection " << conn_ctx->peer_addr_text << " port: " << conn_ctx->peer_port << " conn_id: " << conn_id @@ -1484,57 +1490,57 @@ void PicoQuicTransport::cbNotifier() logger->Log("Done with transport callback notifier thread"); } -void PicoQuicTransport::create_stream(ConnectionContext& conn_ctx, DataContext &data_ctx, const bool bidir) +void PicoQuicTransport::create_stream(ConnectionContext& conn_ctx, DataContext *data_ctx) { conn_ctx.last_stream_id = ::get_next_stream_id(conn_ctx.last_stream_id , - _is_server_mode, !bidir); + _is_server_mode, !data_ctx->is_bidir); - logger->info << "conn_id: " << conn_ctx.conn_id << " data_ctx_id: " << data_ctx.data_ctx_id + logger->info << "conn_id: " << conn_ctx.conn_id << " data_ctx_id: " << data_ctx->data_ctx_id << " create new stream with stream_id: " << conn_ctx.last_stream_id << std::flush; - if (data_ctx.current_stream_id) { + if (data_ctx->current_stream_id) { close_stream(conn_ctx, data_ctx, false); } - data_ctx.current_stream_id = conn_ctx.last_stream_id; + data_ctx->current_stream_id = conn_ctx.last_stream_id; /* * Low order bit set indicates FIFO handling of same priorities, unset is round-robin */ - picoquic_set_app_stream_ctx(conn_ctx.pq_cnx, data_ctx.current_stream_id , &data_ctx); - picoquic_set_stream_priority(conn_ctx.pq_cnx, data_ctx.current_stream_id, (data_ctx.priority << 1)); - picoquic_mark_active_stream(conn_ctx.pq_cnx, data_ctx.current_stream_id, 1, &data_ctx); + picoquic_set_stream_priority(conn_ctx.pq_cnx, data_ctx->current_stream_id, (data_ctx->priority << 1)); + picoquic_mark_active_stream(conn_ctx.pq_cnx, data_ctx->current_stream_id, 1, data_ctx); } -void PicoQuicTransport::close_stream(const ConnectionContext& conn_ctx, DataContext& data_ctx, const bool send_reset) +void PicoQuicTransport::close_stream(const ConnectionContext& conn_ctx, DataContext* data_ctx, const bool send_reset) { - if (data_ctx.current_stream_id == 0) { + if (data_ctx->current_stream_id == 0) { return; // stream already closed } - logger->info << "conn_id: " << conn_ctx.conn_id << " data_ctx_id: " << data_ctx.data_ctx_id - << " closing stream stream_id: " << data_ctx.current_stream_id + logger->info << "conn_id: " << conn_ctx.conn_id << " data_ctx_id: " << data_ctx->data_ctx_id + << " closing stream stream_id: " << data_ctx->current_stream_id << std::flush; if (send_reset) { - logger->info << "Reset stream_id: " << data_ctx.current_stream_id << " conn_id: " << conn_ctx.conn_id + logger->info << "Reset stream_id: " << data_ctx->current_stream_id << " conn_id: " << conn_ctx.conn_id << std::flush; - picoquic_reset_stream(conn_ctx.pq_cnx, data_ctx.current_stream_id, 0); + picoquic_reset_stream(conn_ctx.pq_cnx, data_ctx->current_stream_id, 0); + picoquic_set_app_stream_ctx(conn_ctx.pq_cnx, data_ctx->current_stream_id , NULL); - data_ctx.reset_tx_object(); + data_ctx->reset_tx_object(); - if (data_ctx.is_bidir) { - data_ctx.reset_rx_object(); + if (data_ctx->is_bidir) { + data_ctx->reset_rx_object(); } } else { - logger->info << "Sending FIN for stream_id: " << data_ctx.current_stream_id << " conn_id: " << conn_ctx.conn_id + logger->info << "Sending FIN for stream_id: " << data_ctx->current_stream_id << " conn_id: " << conn_ctx.conn_id << std::flush; - picoquic_add_to_stream(conn_ctx.pq_cnx, data_ctx.current_stream_id, NULL, 0, 1); + picoquic_add_to_stream(conn_ctx.pq_cnx, data_ctx->current_stream_id, NULL, 0, 1); } - data_ctx.current_stream_id = 0; + data_ctx->current_stream_id = 0; } diff --git a/src/transport_picoquic.h b/src/transport_picoquic.h index d39055d..7773004 100644 --- a/src/transport_picoquic.h +++ b/src/transport_picoquic.h @@ -260,7 +260,7 @@ class PicoQuicTransport : public ITransport std::vector&& bytes, const uint8_t priority, const uint32_t ttl_ms, - const EncodeFlags flags) override; + const EnqueueFlags flags) override; std::optional> dequeue( const TransportConnId& conn_id, @@ -337,9 +337,8 @@ class PicoQuicTransport : public ITransport * * @param conn_ctx Connection context to create stream under * @param data_ctx Data context in connection context to create streams - * @param bidir Stream should be created as bi-directional (true) or unidirectional (false) */ - void create_stream(ConnectionContext&conn_ctx, DataContext &data_ctx, const bool bidir=false); + void create_stream(ConnectionContext&conn_ctx, DataContext *data_ctx); /** * @brief App initiated Close stream @@ -350,7 +349,7 @@ class PicoQuicTransport : public ITransport * @param data_ctx Data context for the stream * @param send_reset Indicates if the stream should be closed by RESET, otherwise FIN */ - void close_stream(const ConnectionContext&conn_ctx, DataContext &data_ctx, const bool send_reset); + void close_stream(const ConnectionContext& conn_ctx, DataContext *data_ctx, const bool send_reset); /* diff --git a/src/transport_udp.cpp b/src/transport_udp.cpp index 7e881bf..170ba31 100644 --- a/src/transport_udp.cpp +++ b/src/transport_udp.cpp @@ -356,7 +356,7 @@ UDPTransport::enqueue(const TransportConnId& context_id, std::vector&& bytes, [[maybe_unused]] const uint8_t priority, [[maybe_unused]] const uint32_t ttl_m, - [[maybe_unused]] const EncodeFlags flags) + [[maybe_unused]] const EnqueueFlags flags) { if (bytes.empty()) { return TransportError::None; diff --git a/src/transport_udp.h b/src/transport_udp.h index 0ba8db7..b25a637 100644 --- a/src/transport_udp.h +++ b/src/transport_udp.h @@ -81,7 +81,7 @@ class UDPTransport : public ITransport std::vector&& bytes, const uint8_t priority, const uint32_t ttl_ms, - const EncodeFlags flags) override; + const EnqueueFlags flags) override; std::optional> dequeue( const TransportConnId& context_id, From 0af39f3adbf6005242752ac9ae6b0af3886798f0 Mon Sep 17 00:00:00 2001 From: Tim Evens Date: Mon, 4 Dec 2023 09:07:32 -0800 Subject: [PATCH 07/17] Add fallthrough attribute --- src/transport_picoquic.cpp | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/transport_picoquic.cpp b/src/transport_picoquic.cpp index 3eb7c64..b9791ef 100644 --- a/src/transport_picoquic.cpp +++ b/src/transport_picoquic.cpp @@ -132,8 +132,7 @@ int pq_event_cb(picoquic_cnx_t* pq_cnx, case picoquic_callback_stream_fin: is_fin = true; - // fallthrough to picoquic_callback_stream_data - + [[fallthrough]]; case picoquic_callback_stream_data: { if (data_ctx == NULL) { if (!((stream_id & 0x2) == 2) /* not unidir stream */) { From 976b367de6b272820b973b1025cf254f6f94609c Mon Sep 17 00:00:00 2001 From: Tim Evens Date: Tue, 5 Dec 2023 17:20:15 -0800 Subject: [PATCH 08/17] Fix issues with data context being NULL in pq_event callback --- Makefile | 2 +- include/transport/transport.h | 2 +- src/transport_picoquic.cpp | 20 ++++++++++---------- 3 files changed, 12 insertions(+), 12 deletions(-) diff --git a/Makefile b/Makefile index e4f3c52..2c9687a 100644 --- a/Makefile +++ b/Makefile @@ -13,7 +13,7 @@ all: ${BUILD_DIR} cmake --build ${BUILD_DIR} --parallel 8 ${BUILD_DIR}: CMakeLists.txt cmd/CMakeLists.txt - cmake -B${BUILD_DIR} -DBUILD_TESTING=TRUE -DQTRANSPORT_BUILD_TESTS=TRUE -DCMAKE_BUILD_TYPE=Debug . + cmake -B${BUILD_DIR} -DBUILD_TESTING=ON -DQTRANSPORT_BUILD_TESTS=ON -DCMAKE_BUILD_TYPE=Debug . clean: cmake --build ${BUILD_DIR} --target clean diff --git a/include/transport/transport.h b/include/transport/transport.h index a3b63c9..9d5f6cd 100644 --- a/include/transport/transport.h +++ b/include/transport/transport.h @@ -242,7 +242,7 @@ class ITransport /** * @brief Close a transport context */ - virtual void close(const TransportConnId& context_id) = 0; + virtual void close(const TransportConnId& conn_id) = 0; /** * @brief Delete data context diff --git a/src/transport_picoquic.cpp b/src/transport_picoquic.cpp index b9791ef..ae5698b 100644 --- a/src/transport_picoquic.cpp +++ b/src/transport_picoquic.cpp @@ -80,38 +80,37 @@ int pq_event_cb(picoquic_cnx_t* pq_cnx, transport->pq_runner(); - if (data_ctx == NULL && stream_id == 0) { - data_ctx = &transport->getDefaultDataContext(conn_id); - } switch (fin_or_event) { case picoquic_callback_prepare_datagram: { // length is the max allowed data length + data_ctx = &transport->getDefaultDataContext(conn_id); data_ctx->metrics.dgram_prepare_send++; transport->send_next_datagram(data_ctx, bytes, length); break; } case picoquic_callback_datagram_acked: - // Called for each datagram - TODO: Revisit how often acked frames are coming in // bytes carries the original packet data - // log_msg.str(""); - // log_msg << "Got datagram ack send_time: " << stream_id - // << " bytes length: " << length; - // transport->logger.log(LogLevel::info, log_msg.str()); + data_ctx = &transport->getDefaultDataContext(conn_id); +// transport->logger->info << "Got datagram ack send_time: " << stream_id +// << " bytes length: " << length << std::flush; data_ctx->metrics.dgram_ack++; break; case picoquic_callback_datagram_spurious: + data_ctx = &transport->getDefaultDataContext(conn_id); data_ctx->metrics.dgram_spurious++; break; case picoquic_callback_datagram_lost: + data_ctx = &transport->getDefaultDataContext(conn_id); data_ctx->metrics.dgram_lost++; break; case picoquic_callback_datagram: { + data_ctx = &transport->getDefaultDataContext(conn_id); data_ctx->metrics.dgram_received++; transport->on_recv_datagram(data_ctx, bytes, length); break; @@ -184,12 +183,13 @@ int pq_event_cb(picoquic_cnx_t* pq_cnx, << std::flush; picoquic_reset_stream_ctx(pq_cnx, stream_id); - data_ctx->current_stream_id = 0; if (data_ctx == NULL) { - break; + data_ctx = &transport->getDefaultDataContext(conn_id); } + data_ctx->current_stream_id = 0; + if (data_ctx->is_default_context) { data_ctx->reset_rx_object(); } From da94704b42e25048f6610bebe10e380ee2aac1d9 Mon Sep 17 00:00:00 2001 From: Tim Evens Date: Wed, 6 Dec 2023 09:05:20 -0800 Subject: [PATCH 09/17] Fix closing of default context to call on_connection_status --- src/transport_picoquic.cpp | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/src/transport_picoquic.cpp b/src/transport_picoquic.cpp index ae5698b..566ca60 100644 --- a/src/transport_picoquic.cpp +++ b/src/transport_picoquic.cpp @@ -842,16 +842,11 @@ void PicoQuicTransport::deleteDataContext(const TransportConnId& conn_id, DataCo if (conn_it == conn_context.end()) return; - const auto data_ctx_it = conn_it->second.active_data_contexts.find(data_ctx_id); - - if (data_ctx_it == conn_it->second.active_data_contexts.end()) - return; - logger->info << "Delete data context " << data_ctx_id << " in conn_id: " << conn_id << std::flush; - if (data_ctx_it->second.is_default_context) { + if (data_ctx_id == 0) { logger->info << "Delete default context for conn_id: " << conn_id << ", closing connection" << std::flush; // Only one datagram context is per connection, if it's deleted, then the connection is to be terminated @@ -875,6 +870,10 @@ void PicoQuicTransport::deleteDataContext(const TransportConnId& conn_id, DataCo return; } + const auto data_ctx_it = conn_it->second.active_data_contexts.find(data_ctx_id); + if (data_ctx_it == conn_it->second.active_data_contexts.end()) + return; + close_stream(conn_it->second, &data_ctx_it->second, false); conn_it->second.active_data_contexts.erase(data_ctx_it); From 50afbeb3940be32fef99a13636eac0dac62bf257 Mon Sep 17 00:00:00 2001 From: Tim Evens Date: Wed, 6 Dec 2023 09:59:29 -0800 Subject: [PATCH 10/17] Fix data and conn context being removed and fix close() Close was erasing connection context when it shouldn't have been. Picoquic callback close action will remove the connection context. --- src/transport_picoquic.cpp | 46 ++++++++++++++++++++------------------ src/transport_picoquic.h | 2 +- 2 files changed, 25 insertions(+), 23 deletions(-) diff --git a/src/transport_picoquic.cpp b/src/transport_picoquic.cpp index 566ca60..cedcf1a 100644 --- a/src/transport_picoquic.cpp +++ b/src/transport_picoquic.cpp @@ -85,7 +85,7 @@ int pq_event_cb(picoquic_cnx_t* pq_cnx, case picoquic_callback_prepare_datagram: { // length is the max allowed data length - data_ctx = &transport->getDefaultDataContext(conn_id); + data_ctx = transport->getDefaultDataContext(conn_id); data_ctx->metrics.dgram_prepare_send++; transport->send_next_datagram(data_ctx, bytes, length); break; @@ -93,24 +93,24 @@ int pq_event_cb(picoquic_cnx_t* pq_cnx, case picoquic_callback_datagram_acked: // bytes carries the original packet data - data_ctx = &transport->getDefaultDataContext(conn_id); + data_ctx = transport->getDefaultDataContext(conn_id); // transport->logger->info << "Got datagram ack send_time: " << stream_id // << " bytes length: " << length << std::flush; data_ctx->metrics.dgram_ack++; break; case picoquic_callback_datagram_spurious: - data_ctx = &transport->getDefaultDataContext(conn_id); + data_ctx = transport->getDefaultDataContext(conn_id); data_ctx->metrics.dgram_spurious++; break; case picoquic_callback_datagram_lost: - data_ctx = &transport->getDefaultDataContext(conn_id); + data_ctx = transport->getDefaultDataContext(conn_id); data_ctx->metrics.dgram_lost++; break; case picoquic_callback_datagram: { - data_ctx = &transport->getDefaultDataContext(conn_id); + data_ctx = transport->getDefaultDataContext(conn_id); data_ctx->metrics.dgram_received++; transport->on_recv_datagram(data_ctx, bytes, length); break; @@ -150,7 +150,7 @@ int pq_event_cb(picoquic_cnx_t* pq_cnx, } } else { - data_ctx = &transport->getDefaultDataContext(conn_id); + data_ctx = transport->getDefaultDataContext(conn_id); picoquic_set_app_stream_ctx(pq_cnx, stream_id, data_ctx); } } @@ -185,7 +185,7 @@ int pq_event_cb(picoquic_cnx_t* pq_cnx, picoquic_reset_stream_ctx(pq_cnx, stream_id); if (data_ctx == NULL) { - data_ctx = &transport->getDefaultDataContext(conn_id); + data_ctx = transport->getDefaultDataContext(conn_id); } data_ctx->current_stream_id = 0; @@ -229,8 +229,8 @@ int pq_event_cb(picoquic_cnx_t* pq_cnx, case picoquic_callback_application_close: case picoquic_callback_close: { - transport->logger->info << "Closing connection stream_id: " - << stream_id; + transport->logger->info << "Closing connection conn_id: " << conn_id + << "stream_id: " << stream_id; switch (picoquic_get_local_error(pq_cnx)) { case PICOQUIC_ERROR_IDLE_TIMEOUT: @@ -243,19 +243,25 @@ int pq_event_cb(picoquic_cnx_t* pq_cnx, << " app_error: " << picoquic_get_application_error(pq_cnx); } - data_ctx = &transport->getDefaultDataContext(conn_id); + picoquic_set_callback(pq_cnx, NULL, NULL); - if (data_ctx != NULL) { - auto conn_ctx = transport->getConnContext(conn_id); + data_ctx = transport->getDefaultDataContext(conn_id); - transport->logger->info << " " << conn_ctx->peer_addr_text; + if (data_ctx == nullptr) { + transport->logger->info << std::flush; + transport->logger->error << "Close conn_id: " << conn_id + << " is missing connection context" << std::flush; + return 0; + } - transport->deleteDataContext(conn_id, data_ctx->data_ctx_id); + auto conn_ctx = transport->getConnContext(conn_id); + if (conn_ctx != nullptr) { + transport->logger->info << " remote: " << conn_ctx->peer_addr_text; } transport->logger->info << std::flush; - picoquic_set_callback(pq_cnx, NULL, NULL); + transport->deleteDataContext(conn_id, data_ctx->data_ctx_id); if (not transport->_is_server_mode) { transport->setStatus(TransportStatus::Disconnected); @@ -643,8 +649,6 @@ PicoQuicTransport::close(const TransportConnId& conn_id) picoquic_close(conn_it->second.pq_cnx, 0); - conn_context.erase(conn_it); - if (not _is_server_mode) { setStatus(TransportStatus::Shutdown); } @@ -655,23 +659,21 @@ PicoQuicTransport::close(const TransportConnId& conn_id) * ============================================================================ */ -PicoQuicTransport::DataContext& +PicoQuicTransport::DataContext* PicoQuicTransport::getDefaultDataContext(TransportConnId conn_id) { // Locate the specified transport connection context auto it = conn_context.find(conn_id); if (it == conn_context.end()) { - + return nullptr; } - return it->second.default_data_context; + return &it->second.default_data_context; } PicoQuicTransport::ConnectionContext* PicoQuicTransport::getConnContext(const TransportConnId& conn_id) { - ConnectionContext conn_ctx { }; - // Locate the specified transport connection context auto it = conn_context.find(conn_id); diff --git a/src/transport_picoquic.h b/src/transport_picoquic.h index 7773004..6af54bd 100644 --- a/src/transport_picoquic.h +++ b/src/transport_picoquic.h @@ -287,7 +287,7 @@ class PicoQuicTransport : public ITransport ConnectionContext& createConnContext(picoquic_cnx_t * pq_cnx); - DataContext& getDefaultDataContext(TransportConnId conn_id); + DataContext* getDefaultDataContext(TransportConnId conn_id); void send_next_datagram(DataContext* data_ctx, uint8_t* bytes_ctx, size_t max_len); void send_stream_bytes(DataContext* data_ctx, uint8_t* bytes_ctx, size_t max_len); From 7026ff0658b7a78ffbcf860fd196d21be5bbc980 Mon Sep 17 00:00:00 2001 From: Tim Evens Date: Thu, 7 Dec 2023 10:22:21 -0800 Subject: [PATCH 11/17] Resolves #101 and reset stream RX object on RESET --- include/transport/time_queue.h | 2 ++ src/transport_picoquic.cpp | 6 ++---- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/include/transport/time_queue.h b/include/transport/time_queue.h index 0b9050b..6b1a886 100644 --- a/include/transport/time_queue.h +++ b/include/transport/time_queue.h @@ -369,6 +369,8 @@ namespace qtransport { { if (ttl > _duration) { throw std::invalid_argument("TTL is greater than max duration"); + } else if (ttl == 0) { + ttl = _duration; } ttl = ttl / _interval; diff --git a/src/transport_picoquic.cpp b/src/transport_picoquic.cpp index cedcf1a..0973cf8 100644 --- a/src/transport_picoquic.cpp +++ b/src/transport_picoquic.cpp @@ -189,11 +189,9 @@ int pq_event_cb(picoquic_cnx_t* pq_cnx, } data_ctx->current_stream_id = 0; + data_ctx->reset_rx_object(); - if (data_ctx->is_default_context) { - data_ctx->reset_rx_object(); - } - else { + if (!data_ctx->is_default_context) { transport->deleteDataContext(conn_id, data_ctx->data_ctx_id); } From b6ea7c5c5e1d59f8fc98ea128eb6435644075cbe Mon Sep 17 00:00:00 2001 From: Tim Evens Date: Thu, 7 Dec 2023 16:05:37 -0800 Subject: [PATCH 12/17] Fix RX buffer handling to support multiple streams --- src/transport_picoquic.cpp | 78 ++++++++++++++++++++++---------------- src/transport_picoquic.h | 43 ++++++++++++--------- 2 files changed, 70 insertions(+), 51 deletions(-) diff --git a/src/transport_picoquic.cpp b/src/transport_picoquic.cpp index 0973cf8..5d209f0 100644 --- a/src/transport_picoquic.cpp +++ b/src/transport_picoquic.cpp @@ -159,7 +159,7 @@ int pq_event_cb(picoquic_cnx_t* pq_cnx, // length is the amount of data received if (length) { - transport->on_recv_stream_bytes(data_ctx, bytes, length); + transport->on_recv_stream_bytes(data_ctx, stream_id, bytes, length); } if (is_fin) { @@ -169,7 +169,7 @@ int pq_event_cb(picoquic_cnx_t* pq_cnx, picoquic_reset_stream_ctx(pq_cnx, data_ctx->current_stream_id); if (data_ctx->is_default_context) { - data_ctx->reset_rx_object(); + data_ctx->reset_rx_object(stream_id); } else { transport->deleteDataContext(conn_id, data_ctx->data_ctx_id); } @@ -189,7 +189,7 @@ int pq_event_cb(picoquic_cnx_t* pq_cnx, } data_ctx->current_stream_id = 0; - data_ctx->reset_rx_object(); + data_ctx->reset_rx_object(stream_id); if (!data_ctx->is_default_context) { transport->deleteDataContext(conn_id, data_ctx->data_ctx_id); @@ -1135,7 +1135,7 @@ PicoQuicTransport::on_recv_datagram(DataContext* data_ctx, uint8_t* bytes, size_ } } -void PicoQuicTransport::on_recv_stream_bytes(DataContext* data_ctx, uint8_t* bytes, size_t length) +void PicoQuicTransport::on_recv_stream_bytes(DataContext* data_ctx, uint64_t stream_id, uint8_t* bytes, size_t length) { uint8_t *bytes_p = bytes; @@ -1144,22 +1144,34 @@ void PicoQuicTransport::on_recv_stream_bytes(DataContext* data_ctx, uint8_t* byt return; } + auto rx_buf_it = data_ctx->stream_rx_buffer.find(stream_id); + if (rx_buf_it == data_ctx->stream_rx_buffer.end()) { + logger->debug << "Adding received conn_id: " << data_ctx->conn_id + << " stream_id: " << stream_id + << " into RX buffer" << std::flush; + + auto [it, _] = data_ctx->stream_rx_buffer.try_emplace(stream_id); + rx_buf_it = it; + } + + auto &rx_buf = rx_buf_it->second; + bool object_complete = false; - if (data_ctx->stream_rx_object == nullptr) { + if (rx_buf.object == nullptr) { - if (data_ctx->stream_rx_object_hdr_size < 4) { + if (rx_buf.object_hdr_size < 4) { - uint16_t len_to_copy = length >= 4 ? 4 - data_ctx->stream_rx_object_hdr_size : 4; + uint16_t len_to_copy = length >= 4 ? 4 - rx_buf.object_hdr_size : 4; - std::memcpy(&data_ctx->stream_rx_object_size + data_ctx->stream_rx_object_hdr_size, + std::memcpy(&rx_buf.object_size + rx_buf.object_hdr_size, bytes_p, len_to_copy); - data_ctx->stream_rx_object_hdr_size += len_to_copy; + rx_buf.object_hdr_size += len_to_copy; - if (data_ctx->stream_rx_object_hdr_size < 4) { - logger->debug << "Stream header not complete. hdr " << data_ctx->stream_rx_object_hdr_size + if (rx_buf.object_hdr_size < 4) { + logger->debug << "Stream header not complete. hdr " << rx_buf.object_hdr_size << " len_to_copy: " << len_to_copy << " length: " << length << std::flush; @@ -1169,49 +1181,49 @@ void PicoQuicTransport::on_recv_stream_bytes(DataContext* data_ctx, uint8_t* byt bytes_p += len_to_copy; - if (length == 0 || data_ctx->stream_rx_object_hdr_size < 4) { + if (length == 0 || rx_buf.object_hdr_size < 4) { // Either no data left to read or not enough data for the header (length) value return; } } - if (data_ctx->stream_rx_object_size > 40000000L) { // Safety check - logger->warning << "on_recv_stream_bytes sid: " << data_ctx->current_stream_id + if (rx_buf.object_size > 40000000L) { // Safety check + logger->warning << "on_recv_stream_bytes sid: " << stream_id << " data length is too large: " - << std::to_string(data_ctx->stream_rx_object_size) + << std::to_string(rx_buf.object_size) << std::flush; - data_ctx->reset_rx_object(); + data_ctx->reset_rx_object(stream_id); // TODO: Should reset stream in this case return; } - if (data_ctx->stream_rx_object_size <= length) { + if (rx_buf.object_size <= length) { object_complete = true; - std::vector data(bytes_p, bytes_p + data_ctx->stream_rx_object_size); + std::vector data(bytes_p, bytes_p + rx_buf.object_size); data_ctx->rx_data->push(std::move(data)); - bytes_p += data_ctx->stream_rx_object_size; - length -= data_ctx->stream_rx_object_size; + bytes_p += rx_buf.object_size; + length -= rx_buf.object_size; - data_ctx->metrics.stream_bytes_recv += data_ctx->stream_rx_object_size; + data_ctx->metrics.stream_bytes_recv += rx_buf.object_size; - data_ctx->reset_rx_object(); + data_ctx->reset_rx_object(stream_id); length = 0; // no more data left to process } else { // Need to wait for more data, create new object buffer - data_ctx->stream_rx_object = new uint8_t[data_ctx->stream_rx_object_size]; + rx_buf.object = new uint8_t[rx_buf.object_size]; - data_ctx->stream_rx_object_offset = length; - std::memcpy(data_ctx->stream_rx_object, bytes_p, length); + rx_buf.object_offset = length; + std::memcpy(rx_buf.object, bytes_p, length); data_ctx->metrics.stream_bytes_recv += length; length = 0; // no more data left to process } } else { // Existing object, append - size_t remaining_len = data_ctx->stream_rx_object_size - data_ctx->stream_rx_object_offset; + size_t remaining_len = rx_buf.object_size - rx_buf.object_offset; if (remaining_len > length) { remaining_len = length; @@ -1224,18 +1236,18 @@ void PicoQuicTransport::on_recv_stream_bytes(DataContext* data_ctx, uint8_t* byt data_ctx->metrics.stream_bytes_recv += remaining_len; - std::memcpy(data_ctx->stream_rx_object + data_ctx->stream_rx_object_offset, bytes_p, remaining_len); + std::memcpy(rx_buf.object + rx_buf.object_offset, bytes_p, remaining_len); bytes_p += remaining_len; if (object_complete) { - std::vector data(data_ctx->stream_rx_object, - data_ctx->stream_rx_object + data_ctx->stream_rx_object_size); + std::vector data(rx_buf.object, + rx_buf.object + rx_buf.object_size); data_ctx->rx_data->push(std::move(data)); - data_ctx->reset_rx_object(); + data_ctx->reset_rx_object(stream_id); } else { - data_ctx->stream_rx_object_offset += remaining_len; + rx_buf.object_offset += remaining_len; } } @@ -1260,7 +1272,7 @@ void PicoQuicTransport::on_recv_stream_bytes(DataContext* data_ctx, uint8_t* byt if (length > 0) { logger->debug << "on_stream_bytes has remaining bytes: " << length << std::flush; - on_recv_stream_bytes(data_ctx, bytes_p, length); + on_recv_stream_bytes(data_ctx, stream_id, bytes_p, length); } } @@ -1530,7 +1542,7 @@ void PicoQuicTransport::close_stream(const ConnectionContext& conn_ctx, DataCont data_ctx->reset_tx_object(); if (data_ctx->is_bidir) { - data_ctx->reset_rx_object(); + data_ctx->reset_rx_object(data_ctx->current_stream_id); } } else { diff --git a/src/transport_picoquic.h b/src/transport_picoquic.h index 6af54bd..012feca 100644 --- a/src/transport_picoquic.h +++ b/src/transport_picoquic.h @@ -101,10 +101,20 @@ class PicoQuicTransport : public ITransport size_t stream_tx_object_size {0}; /// Size of the tx object size_t stream_tx_object_offset{0}; /// Pointer offset to next byte to send - uint8_t* stream_rx_object {nullptr}; /// Current object that is being received via byte stream - uint16_t stream_rx_object_hdr_size { 0 }; /// Size of header read in (should be 4 right now) - uint32_t stream_rx_object_size {0}; /// Receive object data size to append up to before sending to app - size_t stream_rx_object_offset{0}; /// Pointer offset to next byte to append + struct StreamRxBuffer + { + uint8_t* object { nullptr }; /// Current object that is being received via byte stream + uint16_t object_hdr_size{ 0 }; /// Size of header read in (should be 4 right now) + uint32_t object_size{ 0 }; /// Receive object data size to append up to before sending to app + size_t object_offset{ 0 }; /// Pointer offset to next byte to append + + ~StreamRxBuffer() { + if (object != nullptr) { + delete[] object; + } + } + }; + std::map stream_rx_buffer; /// Map of stream receive buffers, key is stream_id // The last time TX callback was run std::chrono::time_point last_tx_callback_time { std::chrono::steady_clock::now() }; @@ -125,25 +135,22 @@ class PicoQuicTransport : public ITransport delete[] stream_tx_object; stream_tx_object = nullptr; } - - if (stream_rx_object != nullptr) { - delete[] stream_rx_object; - stream_rx_object = nullptr; - } } /** * Reset the RX object buffer */ - void reset_rx_object() { - if (stream_rx_object != nullptr) { - delete[] stream_rx_object; - } + void reset_rx_object(uint64_t stream_id) { + + auto it = stream_rx_buffer.find(stream_id); + if (it != stream_rx_buffer.end()) { + delete[] it->second.object; - stream_rx_object = nullptr; - stream_rx_object_hdr_size = 0; - stream_rx_object_size = 0; - stream_rx_object_offset = 0; + it->second.object = nullptr; + it->second.object_hdr_size = 0; + it->second.object_size = 0; + it->second.object_offset = 0; + } } /** @@ -298,7 +305,7 @@ class PicoQuicTransport : public ITransport void on_new_connection(const TransportConnId conn_id); void on_recv_datagram(DataContext* data_ctx, uint8_t* bytes, size_t length); - void on_recv_stream_bytes(DataContext* data_ctx, + void on_recv_stream_bytes(DataContext* data_ctx, uint64_t stream_id, uint8_t* bytes, size_t length); void check_conns_for_congestion(); From c7ea48b81f38276eda08281eeeb21b06e084c05f Mon Sep 17 00:00:00 2001 From: Tim Evens Date: Fri, 8 Dec 2023 07:19:54 -0800 Subject: [PATCH 13/17] PR comment updates --- cmd/client.cc | 2 +- include/transport/transport.h | 2 ++ src/transport_picoquic.cpp | 13 ++++++++----- src/transport_picoquic.h | 6 +++++- 4 files changed, 16 insertions(+), 7 deletions(-) diff --git a/cmd/client.cc b/cmd/client.cc index 9498b1e..7e36547 100644 --- a/cmd/client.cc +++ b/cmd/client.cc @@ -34,7 +34,7 @@ struct Delegate : public ITransport::TransportDelegate void setClientTransport(std::shared_ptr client) { this->client = client; } - TransportConnId getContextId() { return conn_id; } + TransportConnId getContextId() const { return conn_id; } void on_connection_status(const TransportConnId& conn_id, const TransportStatus status) { diff --git a/include/transport/transport.h b/include/transport/transport.h index 9d5f6cd..17a14fb 100644 --- a/include/transport/transport.h +++ b/include/transport/transport.h @@ -80,6 +80,8 @@ struct TransportConfig const uint64_t pacing_decrease_threshold_Bps { 16000 }; /// QUIC pacing rate decrease threshold for notification in Bps const uint64_t pacing_increase_threshold_Bps { 16000 }; /// QUIC pacing rate increase threshold for notification in Bps + + const uint64_t idle_timeout_ms { 30000 }; /// Idle timeout for transport connection(s) in milliseconds }; /** diff --git a/src/transport_picoquic.cpp b/src/transport_picoquic.cpp index 5d209f0..50b2d84 100644 --- a/src/transport_picoquic.cpp +++ b/src/transport_picoquic.cpp @@ -282,7 +282,6 @@ int pq_event_cb(picoquic_cnx_t* pq_cnx, transport->on_connection_status(conn_id, TransportStatus::Ready); } - picoquic_enable_keep_alive(pq_cnx, 3000000); (void)picoquic_mark_datagram_ready(pq_cnx, 1); break; @@ -435,7 +434,7 @@ PicoQuicTransport::start() picoquic_init_transport_parameters(&local_tp_options, 1); local_tp_options.max_datagram_frame_size = 1280; // local_tp_options.max_packet_size = 1450; - local_tp_options.idle_timeout = 30000; // TODO: Remove when we add reconnnect change back to 10 seconds + local_tp_options.idle_timeout = tconfig.idle_timeout_ms; local_tp_options.max_ack_delay = 100000; local_tp_options.min_ack_delay = 1000; @@ -576,8 +575,6 @@ PicoQuicTransport::dequeue(const TransportConnId& conn_id, const DataContextId& } return std::move(data_ctx_it->second.rx_data->pop()); - - return std::nullopt; } DataContextId @@ -588,6 +585,10 @@ PicoQuicTransport::createDataContext(const TransportConnId conn_id, std::lock_guard _(_state_mutex); if (priority > 127) { + /* + * Picoquic most significant bit of priority indicates to use round-robin. We don't want + * to use round-robin of same priorities right now. + */ throw std::runtime_error("Create stream priority cannot be greater than 127, range is 0 - 127"); } @@ -1107,6 +1108,8 @@ PicoQuicTransport::on_new_connection(const TransportConnId conn_id) .port = conn_ctx->peer_port, .proto = TransportProtocol::QUIC }; + picoquic_enable_keep_alive(conn_ctx->pq_cnx, tconfig.idle_timeout_ms * 500); + cbNotifyQueue.push([=, this]() { delegate.on_new_connection(conn_id, remote); }); } @@ -1408,7 +1411,7 @@ void PicoQuicTransport::client(const TransportConnId conn_id) } else { picoquic_set_callback(conn_ctx->pq_cnx, pq_event_cb, this); - picoquic_enable_keep_alive(conn_ctx->pq_cnx, 3000000); + picoquic_enable_keep_alive(conn_ctx->pq_cnx, tconfig.idle_timeout_ms * 500); ret = picoquic_start_client_cnx(conn_ctx->pq_cnx); if (ret < 0) { diff --git a/src/transport_picoquic.h b/src/transport_picoquic.h index 012feca..cd550e2 100644 --- a/src/transport_picoquic.h +++ b/src/transport_picoquic.h @@ -62,7 +62,7 @@ class PicoQuicTransport : public ITransport uint64_t stream_bytes_recv {0}; uint64_t stream_objects_recv {0}; - auto operator<=>(const DataContextMetrics&) const = default; + constexpr auto operator<=>(const DataContextMetrics&) const = default; }; @@ -206,6 +206,10 @@ class PicoQuicTransport : public ITransport ConnectionContext(picoquic_cnx_t *cnx) : ConnectionContext() { pq_cnx = cnx; } + + ConnectionContext(picoquic_cnx_t *cnx) : ConnectionContext() { + pq_cnx = cnx; + } }; /* From 109d15d5c25c80f4632a4a0de9a287b885d21305 Mon Sep 17 00:00:00 2001 From: Tim Evens Date: Fri, 8 Dec 2023 07:20:38 -0800 Subject: [PATCH 14/17] ... --- src/transport_picoquic.h | 4 ---- 1 file changed, 4 deletions(-) diff --git a/src/transport_picoquic.h b/src/transport_picoquic.h index cd550e2..5ff953b 100644 --- a/src/transport_picoquic.h +++ b/src/transport_picoquic.h @@ -206,10 +206,6 @@ class PicoQuicTransport : public ITransport ConnectionContext(picoquic_cnx_t *cnx) : ConnectionContext() { pq_cnx = cnx; } - - ConnectionContext(picoquic_cnx_t *cnx) : ConnectionContext() { - pq_cnx = cnx; - } }; /* From 0ecf705563405ab604b8400dc998877dd429a426 Mon Sep 17 00:00:00 2001 From: Tim Evens Date: Fri, 8 Dec 2023 10:03:08 -0800 Subject: [PATCH 15/17] Add log and metrics for rx buffer reset drops --- src/transport_picoquic.cpp | 16 +++++++++++----- src/transport_picoquic.h | 6 +++++- 2 files changed, 16 insertions(+), 6 deletions(-) diff --git a/src/transport_picoquic.cpp b/src/transport_picoquic.cpp index 50b2d84..c1e0552 100644 --- a/src/transport_picoquic.cpp +++ b/src/transport_picoquic.cpp @@ -189,8 +189,13 @@ int pq_event_cb(picoquic_cnx_t* pq_cnx, } data_ctx->current_stream_id = 0; + data_ctx->reset_rx_object(stream_id); + transport->logger->info << "Received RESET stream; conn_id: " << data_ctx->conn_id + << " stream_id: " << stream_id + << " Rx buf drops: " << data_ctx->metrics.tx_buffer_drops << std::flush; + if (!data_ctx->is_default_context) { transport->deleteDataContext(conn_id, data_ctx->data_ctx_id); } @@ -951,19 +956,20 @@ PicoQuicTransport::send_stream_bytes(DataContext* data_ctx, uint8_t* bytes_ctx, case DataContext::StreamAction::REPLACE_STREAM_USE_RESET: { if (data_ctx->stream_tx_object != nullptr) { - data_ctx->metrics.tx_write_buffer_drops++; + data_ctx->metrics.tx_buffer_drops++; } - logger->info << "Replacing stream using RESET; conn_id: " << data_ctx->conn_id - << " existing_stream: " << data_ctx->current_stream_id - << " write buf drops: " << data_ctx->metrics.tx_write_buffer_drops << std::flush; - const auto conn_ctx = getConnContext(data_ctx->conn_id); close_stream(*conn_ctx, data_ctx, true); data_ctx->reset_tx_object(); + logger->info << "Replacing stream using RESET; conn_id: " << data_ctx->conn_id + << " existing_stream: " << data_ctx->current_stream_id + << " write buf drops: " << data_ctx->metrics.tx_buffer_drops << std::flush; + + create_stream(*conn_ctx, data_ctx); std::lock_guard _(_state_mutex); diff --git a/src/transport_picoquic.h b/src/transport_picoquic.h index 5ff953b..7a450aa 100644 --- a/src/transport_picoquic.h +++ b/src/transport_picoquic.h @@ -54,7 +54,8 @@ class PicoQuicTransport : public ITransport uint64_t stream_prepare_send {0}; uint64_t stream_rx_callbacks {0}; - uint64_t tx_write_buffer_drops {0}; /// Count of write buffer drops of data due to RESET request + uint64_t rx_buffer_drops {0}; /// count of receive buffer drops of data due to RESET request + uint64_t tx_buffer_drops{0}; /// Count of write buffer drops of data due to RESET request uint64_t tx_delayed_callback {0}; /// Count of times transmit callbacks were delayed uint64_t prev_tx_delayed_callback {0}; /// Previous transmit delayed callback value, set each interval uint64_t stream_objects_sent {0}; @@ -144,6 +145,8 @@ class PicoQuicTransport : public ITransport auto it = stream_rx_buffer.find(stream_id); if (it != stream_rx_buffer.end()) { + metrics.rx_buffer_drops++; + delete[] it->second.object; it->second.object = nullptr; @@ -158,6 +161,7 @@ class PicoQuicTransport : public ITransport */ void reset_tx_object() { if (stream_tx_object != nullptr) { + metrics.tx_buffer_drops++; delete [] stream_tx_object; } From 815afeab9d4b509a23c684e2fe26217294d4b929 Mon Sep 17 00:00:00 2001 From: Tim Evens Date: Fri, 8 Dec 2023 12:27:28 -0800 Subject: [PATCH 16/17] Add data_ctx_id to reset logs --- src/transport_picoquic.cpp | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/transport_picoquic.cpp b/src/transport_picoquic.cpp index c1e0552..d261d24 100644 --- a/src/transport_picoquic.cpp +++ b/src/transport_picoquic.cpp @@ -193,6 +193,7 @@ int pq_event_cb(picoquic_cnx_t* pq_cnx, data_ctx->reset_rx_object(stream_id); transport->logger->info << "Received RESET stream; conn_id: " << data_ctx->conn_id + << " data_ctx_id: " << data_ctx->data_ctx_id << " stream_id: " << stream_id << " Rx buf drops: " << data_ctx->metrics.tx_buffer_drops << std::flush; @@ -233,7 +234,7 @@ int pq_event_cb(picoquic_cnx_t* pq_cnx, case picoquic_callback_application_close: case picoquic_callback_close: { transport->logger->info << "Closing connection conn_id: " << conn_id - << "stream_id: " << stream_id; + << " stream_id: " << stream_id; switch (picoquic_get_local_error(pq_cnx)) { case PICOQUIC_ERROR_IDLE_TIMEOUT: @@ -966,6 +967,7 @@ PicoQuicTransport::send_stream_bytes(DataContext* data_ctx, uint8_t* bytes_ctx, data_ctx->reset_tx_object(); logger->info << "Replacing stream using RESET; conn_id: " << data_ctx->conn_id + << " data_ctx_id: " << data_ctx->data_ctx_id << " existing_stream: " << data_ctx->current_stream_id << " write buf drops: " << data_ctx->metrics.tx_buffer_drops << std::flush; From d18614a29f6e96f3c9ee61ec7bb01cf57b6e4ce0 Mon Sep 17 00:00:00 2001 From: Tim Evens Date: Fri, 8 Dec 2023 12:54:17 -0800 Subject: [PATCH 17/17] Fix buf drop metrics --- src/transport_picoquic.cpp | 7 ++++++- src/transport_picoquic.h | 3 --- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/src/transport_picoquic.cpp b/src/transport_picoquic.cpp index d261d24..a85c83d 100644 --- a/src/transport_picoquic.cpp +++ b/src/transport_picoquic.cpp @@ -190,12 +190,17 @@ int pq_event_cb(picoquic_cnx_t* pq_cnx, data_ctx->current_stream_id = 0; + const auto rx_buf_it = data_ctx->stream_rx_buffer.find(stream_id); + if (rx_buf_it != data_ctx->stream_rx_buffer.end() && rx_buf_it->second.object != nullptr) { + data_ctx->metrics.rx_buffer_drops++; + } + data_ctx->reset_rx_object(stream_id); transport->logger->info << "Received RESET stream; conn_id: " << data_ctx->conn_id << " data_ctx_id: " << data_ctx->data_ctx_id << " stream_id: " << stream_id - << " Rx buf drops: " << data_ctx->metrics.tx_buffer_drops << std::flush; + << " RX buf drops: " << data_ctx->metrics.tx_buffer_drops << std::flush; if (!data_ctx->is_default_context) { transport->deleteDataContext(conn_id, data_ctx->data_ctx_id); diff --git a/src/transport_picoquic.h b/src/transport_picoquic.h index 7a450aa..5f6daff 100644 --- a/src/transport_picoquic.h +++ b/src/transport_picoquic.h @@ -145,8 +145,6 @@ class PicoQuicTransport : public ITransport auto it = stream_rx_buffer.find(stream_id); if (it != stream_rx_buffer.end()) { - metrics.rx_buffer_drops++; - delete[] it->second.object; it->second.object = nullptr; @@ -161,7 +159,6 @@ class PicoQuicTransport : public ITransport */ void reset_tx_object() { if (stream_tx_object != nullptr) { - metrics.tx_buffer_drops++; delete [] stream_tx_object; }