From 000c0c20ed220ef171b8e2d2276cdf8e214becd8 Mon Sep 17 00:00:00 2001 From: Christian Huitema Date: Wed, 1 May 2024 11:21:06 -0700 Subject: [PATCH 1/7] High priority bypass CC --- picoquic/picoquic.h | 16 ++++++++++++++++ picoquic/picoquic_internal.h | 1 + picoquic/quicctx.c | 5 +++++ picoquic/sender.c | 32 +++++++++++++++++++++++--------- picoquictest/mediatest.c | 6 ++++++ 5 files changed, 51 insertions(+), 9 deletions(-) diff --git a/picoquic/picoquic.h b/picoquic/picoquic.h index 811cd0988..abfbe7b92 100644 --- a/picoquic/picoquic.h +++ b/picoquic/picoquic.h @@ -1486,6 +1486,22 @@ void picoquic_set_default_wifi_shadow_rtt(picoquic_quic_t* quic, uint64_t wifi_s */ void picoquic_set_default_bbr_quantum_ratio(picoquic_quic_t* quic, double quantum_ratio); +/* The experimental API 'picoquic_set_priority_limit_for_bypass' +* instruct the stack to send the high priority streams or datagrams +* immediately, even if congestion control would normally prevent it. +* +* The "priority_limit" parameter indicates the lowest priority that will +* not be bypassed. For example, if the priority limit is set to 3, streams +* or datagrams with priority 0, 1 or 2 will be sent without waiting for +* congestion control credits, but streams will priority 3 or more will +* not. By default, the limit is set to 0, meaning no stream or datagram +* will bypass congestion control. +* +* This experimental feature will not be activated in a multipath +* environment, i.e., if more that 1 path is activated. + */ +void picoquic_set_priority_limit_for_bypass(picoquic_cnx_t* cnx, uint8_t priority_limit); + /* The experimental API `picoquic_set_feedback_loss_notification` allow applications * to turn on the "feedback lost" event notification. These events are * passed to the congestion control algorithm, allowing it to react diff --git a/picoquic/picoquic_internal.h b/picoquic/picoquic_internal.h index dee4165c8..a2f5ee470 100644 --- a/picoquic/picoquic_internal.h +++ b/picoquic/picoquic_internal.h @@ -1448,6 +1448,7 @@ typedef struct st_picoquic_cnx_t { picoquic_stream_head_t * last_output_stream; uint64_t high_priority_stream_id; uint64_t next_stream_id[4]; + uint64_t priority_limit_for_bypass; /* Bypass CC if dtagram or stream priority lower than this, 0 means never */ /* Repeat queue contains packets with data frames that should be * sent according to priority when congestion window opens. */ diff --git a/picoquic/quicctx.c b/picoquic/quicctx.c index 43cc27460..a2330f0dc 100644 --- a/picoquic/quicctx.c +++ b/picoquic/quicctx.c @@ -4782,6 +4782,11 @@ void picoquic_set_default_bbr_quantum_ratio(picoquic_quic_t* quic, double quantu quic->bbr_quantum_ratio = quantum_ratio; } +void picoquic_set_priority_limit_for_bypass(picoquic_cnx_t* cnx, uint8_t priority_limit) +{ + cnx->priority_limit_for_bypass = priority_limit; +} + void picoquic_set_feedback_loss_notification(picoquic_cnx_t* cnx, unsigned int should_notify) { cnx->is_lost_feedback_notification_required = should_notify; diff --git a/picoquic/sender.c b/picoquic/sender.c index 46643f1b1..bd4e54a21 100644 --- a/picoquic/sender.c +++ b/picoquic/sender.c @@ -3150,6 +3150,7 @@ static uint8_t* picoquic_prepare_datagram_ready(picoquic_cnx_t* cnx, picoquic_pa */ static uint8_t* picoquic_prepare_stream_and_datagrams(picoquic_cnx_t* cnx, picoquic_path_t* path_x, uint8_t* bytes_next, uint8_t* bytes_max, + uint64_t max_priority_allowed, int* more_data, int* is_pure_ack, int* no_data_to_send, int* ret) { int datagram_sent = 0; @@ -3187,7 +3188,7 @@ static uint8_t* picoquic_prepare_stream_and_datagrams(picoquic_cnx_t* cnx, picoq current_priority = stream_priority; } - if (current_priority == UINT64_MAX) { + if (current_priority == UINT64_MAX || current_priority >= max_priority_allowed) { /* Nothing to send! */ if (is_first_round) { *no_data_to_send = 1; @@ -3392,13 +3393,25 @@ int picoquic_prepare_packet_almost_ready(picoquic_cnx_t* cnx, picoquic_path_t* p length = bytes_next - bytes; if (path_x->cwin < path_x->bytes_in_transit) { - picoquic_per_ack_state_t ack_state = { 0 }; - cnx->cwin_blocked = 1; - path_x->last_cwin_blocked_time = current_time; - if (cnx->congestion_alg != NULL) { - cnx->congestion_alg->alg_notify(cnx, path_x, - picoquic_congestion_notification_cwin_blocked, - &ack_state, current_time); + /* Implementation of experimental API, picoquic_set_priority_limit_for_bypass */ + uint8_t* bytes_next_before_bypass = bytes_next; + if (cnx->priority_limit_for_bypass > 0 && cnx->nb_paths == 1) { + bytes_next = picoquic_prepare_stream_and_datagrams(cnx, path_x, bytes_next, bytes_max, + cnx->priority_limit_for_bypass, + &more_data, &is_pure_ack, &no_data_to_send, &ret); + } + if (bytes_next != bytes_next_before_bypass) { + length = bytes_next - bytes; + } + else { + picoquic_per_ack_state_t ack_state = { 0 }; + cnx->cwin_blocked = 1; + path_x->last_cwin_blocked_time = current_time; + if (cnx->congestion_alg != NULL) { + cnx->congestion_alg->alg_notify(cnx, path_x, + picoquic_congestion_notification_cwin_blocked, + &ack_state, current_time); + } } } else { @@ -3440,6 +3453,7 @@ int picoquic_prepare_packet_almost_ready(picoquic_cnx_t* cnx, picoquic_path_t* p } if (ret == 0) { bytes_next = picoquic_prepare_stream_and_datagrams(cnx, path_x, bytes_next, bytes_max, + UINT64_MAX, &more_data, &is_pure_ack, &no_data_to_send, &ret); } /* TODO: replace this by posting of frame when CWIN estimated */ @@ -3774,7 +3788,7 @@ int picoquic_prepare_packet_ready(picoquic_cnx_t* cnx, picoquic_path_t* path_x, bytes_next = picoquic_format_ack_frequency_frame(cnx, bytes_next, bytes_max, &more_data); } if (ret == 0) { - bytes_next = picoquic_prepare_stream_and_datagrams(cnx, path_x, bytes_next, bytes_max, + bytes_next = picoquic_prepare_stream_and_datagrams(cnx, path_x, bytes_next, bytes_max, UINT64_MAX, &more_data, &is_pure_ack, &no_data_to_send, &ret); } diff --git a/picoquictest/mediatest.c b/picoquictest/mediatest.c index 417245da7..964ed2991 100644 --- a/picoquictest/mediatest.c +++ b/picoquictest/mediatest.c @@ -175,6 +175,7 @@ typedef struct st_mediatest_spec_t { double bandwidth; uint64_t latency_average; uint64_t latency_max; + uint8_t priority_limit_for_bypass; int do_not_check_video2; } mediatest_spec_t; @@ -1149,6 +1150,10 @@ mediatest_ctx_t * mediatest_configure(int media_test_id, mediatest_spec_t * spe if (spec->datagram_data_size > 0 && ret == 0) { mt_ctx->datagram_data_requested = spec->datagram_data_size; } + if (spec->priority_limit_for_bypass > 0) { + picoquic_set_priority_limit_for_bypass(mt_ctx->client_cnx->cnx, spec->priority_limit_for_bypass); + } + for (int i = 0; i < media_test_nb_types; i++) { mt_ctx->media_stats[i].min_delay = UINT64_MAX; @@ -1436,6 +1441,7 @@ int mediatest_wifi_test() spec.data_size = 0; spec.latency_average = 60000; spec.latency_max = 350000; + spec.priority_limit_for_bypass = 5; spec.do_not_check_video2 = 1; ret = mediatest_one(mediatest_wifi, &spec); From fe150ddb84e13e3a69ab1411426cab3622194de8 Mon Sep 17 00:00:00 2001 From: Christian Huitema Date: Wed, 1 May 2024 12:01:28 -0700 Subject: [PATCH 2/7] Fix bypass in ready mode --- picoquic/sender.c | 41 ++++++++++++++++++++++++++++++++--------- 1 file changed, 32 insertions(+), 9 deletions(-) diff --git a/picoquic/sender.c b/picoquic/sender.c index bd4e54a21..2dbe8f4ef 100644 --- a/picoquic/sender.c +++ b/picoquic/sender.c @@ -3749,15 +3749,28 @@ int picoquic_prepare_packet_ready(picoquic_cnx_t* cnx, picoquic_path_t* path_x, if ((path_x->cwin < path_x->bytes_in_transit || cnx->quic->cwin_max < path_x->bytes_in_transit) &&!path_x->is_pto_required) { - cnx->cwin_blocked = 1; - path_x->last_cwin_blocked_time = current_time; - if (cnx->congestion_alg != NULL) { - picoquic_per_ack_state_t ack_state = { 0 }; - - cnx->congestion_alg->alg_notify(cnx, path_x, - picoquic_congestion_notification_cwin_blocked, - &ack_state, current_time); - } + /* Implementation of experimental API, picoquic_set_priority_limit_for_bypass */ + uint8_t* bytes_next_before_bypass = bytes_next; + int no_data_to_send = 0; + if (cnx->priority_limit_for_bypass > 0 && cnx->nb_paths == 1) { + bytes_next = picoquic_prepare_stream_and_datagrams(cnx, path_x, bytes_next, bytes_max, + cnx->priority_limit_for_bypass, + &more_data, &is_pure_ack, &no_data_to_send, &ret); + } + if (bytes_next != bytes_next_before_bypass) { + length = bytes_next - bytes; + } + else { + cnx->cwin_blocked = 1; + path_x->last_cwin_blocked_time = current_time; + if (cnx->congestion_alg != NULL) { + picoquic_per_ack_state_t ack_state = { 0 }; + + cnx->congestion_alg->alg_notify(cnx, path_x, + picoquic_congestion_notification_cwin_blocked, + &ack_state, current_time); + } + } } else { /* Send here the frames that are subject to both congestion and pacing control. @@ -3865,6 +3878,16 @@ int picoquic_prepare_packet_ready(picoquic_cnx_t* cnx, picoquic_path_t* path_x, } } /* end of CC */ } /* End of pacing */ + else if (cnx->priority_limit_for_bypass > 0 && cnx->nb_paths == 1) { + /* If congestion bypass is implemented, also consider pacing bypass */ + int no_data_to_send = 0; + + if ((bytes_next = picoquic_prepare_stream_and_datagrams(cnx, path_x, bytes_next, bytes_max, + cnx->priority_limit_for_bypass, + &more_data, &is_pure_ack, &no_data_to_send, &ret)) != NULL) { + length = bytes_next - bytes; + } + } } /* End of challenge verified */ } From 825d603593f75af7ad28d18ba0add6258bff9a20 Mon Sep 17 00:00:00 2001 From: Christian Huitema Date: Wed, 1 May 2024 22:09:17 -0700 Subject: [PATCH 3/7] Isolate pacing code in module --- CMakeLists.txt | 1 + picoquic/bbr.c | 2 +- picoquic/bbr1.c | 4 +- picoquic/cubic.c | 8 +- picoquic/logwriter.c | 2 +- picoquic/newreno.c | 2 +- picoquic/pacing.c | 282 ++++++++++++++++++++++++++++++ picoquic/performance_log.c | 4 +- picoquic/picoquic.vcxproj | 1 + picoquic/picoquic.vcxproj.filters | 3 + picoquic/picoquic_internal.h | 33 ++++ picoquic/prague.c | 2 +- picoquic/quicctx.c | 24 +-- picoquic/sender.c | 23 +-- picoquictest/app_limited.c | 4 +- 15 files changed, 359 insertions(+), 36 deletions(-) create mode 100644 picoquic/pacing.c diff --git a/CMakeLists.txt b/CMakeLists.txt index a716ffc2b..d2a7c349f 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -84,6 +84,7 @@ set(PICOQUIC_LIBRARY_FILES picoquic/logwriter.c picoquic/loss_recovery.c picoquic/newreno.c + picoquic/pacing.c picoquic/packet.c picoquic/performance_log.c picoquic/picohash.c diff --git a/picoquic/bbr.c b/picoquic/bbr.c index ffac6e736..2ddcba19a 100644 --- a/picoquic/bbr.c +++ b/picoquic/bbr.c @@ -1922,7 +1922,7 @@ void BBRCheckStartupLongRtt(picoquic_bbr_state_t* bbr_state, picoquic_path_t* pa } if (picoquic_hystart_test(&bbr_state->rtt_filter, rs->rtt_sample, - path_x->pacing_packet_time_microsec, current_time, 0)) { + path_x->pacing.packet_time_microsec, current_time, 0)) { BBRExitStartupLongRtt(bbr_state, path_x, current_time); } else if (rs->ecn_alpha > BBRExcessiveEcnCE) { diff --git a/picoquic/bbr1.c b/picoquic/bbr1.c index 0ee78ab63..8ebde7c62 100644 --- a/picoquic/bbr1.c +++ b/picoquic/bbr1.c @@ -1161,7 +1161,7 @@ static void picoquic_bbr1_notify( if (bbr1_state->state == picoquic_bbr1_alg_startup_long_rtt) { if (picoquic_hystart_test(&bbr1_state->rtt_filter, (cnx->is_time_stamp_enabled) ? ack_state->one_way_delay : ack_state->rtt_measurement, - cnx->path[0]->pacing_packet_time_microsec, current_time, cnx->is_time_stamp_enabled)) { + cnx->path[0]->pacing.packet_time_microsec, current_time, cnx->is_time_stamp_enabled)) { BBR1ExitStartupLongRtt(bbr1_state, path_x, current_time); } } @@ -1188,7 +1188,7 @@ static void picoquic_bbr1_notify( path_x->cwin = min_win; } else if (path_x->smoothed_rtt > PICOQUIC_TARGET_RENO_RTT) { - path_x->pacing_bandwidth_pause = 1; + path_x->pacing.bandwidth_pause = 1; } picoquic_update_pacing_data(cnx, path_x, 1); diff --git a/picoquic/cubic.c b/picoquic/cubic.c index 47a548128..0de889f5d 100644 --- a/picoquic/cubic.c +++ b/picoquic/cubic.c @@ -266,7 +266,7 @@ static void picoquic_cubic_notify( /* Using RTT increases as signal to get out of initial slow start */ if (cubic_state->ssthresh == UINT64_MAX && picoquic_hystart_test(&cubic_state->rtt_filter, (cnx->is_time_stamp_enabled) ? ack_state->one_way_delay : ack_state->rtt_measurement, - cnx->path[0]->pacing_packet_time_microsec, current_time, cnx->is_time_stamp_enabled)) { + cnx->path[0]->pacing.packet_time_microsec, current_time, cnx->is_time_stamp_enabled)) { /* RTT increased too much, get out of slow start! */ if (cubic_state->rtt_filter.rtt_filtered_min > PICOQUIC_TARGET_RENO_RTT){ double correction; @@ -518,7 +518,7 @@ static void picoquic_dcubic_notify( * for getting out of slow start, but also for ending a cycle * during congestion avoidance */ if (picoquic_hystart_test(&cubic_state->rtt_filter, (cnx->is_time_stamp_enabled) ? ack_state->one_way_delay : ack_state->rtt_measurement, - cnx->path[0]->pacing_packet_time_microsec, current_time, cnx->is_time_stamp_enabled)) { + cnx->path[0]->pacing.packet_time_microsec, current_time, cnx->is_time_stamp_enabled)) { dcubic_exit_slow_start(cnx, path_x, notification, cubic_state, current_time); } break; @@ -578,7 +578,7 @@ static void picoquic_dcubic_notify( } if (picoquic_hystart_test(&cubic_state->rtt_filter, (cnx->is_time_stamp_enabled) ? ack_state->one_way_delay : ack_state->rtt_measurement, - cnx->path[0]->pacing_packet_time_microsec, current_time, cnx->is_time_stamp_enabled)) { + cnx->path[0]->pacing.packet_time_microsec, current_time, cnx->is_time_stamp_enabled)) { if (current_time - cubic_state->start_of_epoch > path_x->smoothed_rtt || cubic_state->recovery_sequence <= picoquic_cc_get_ack_number(cnx, path_x)) { /* re-enter recovery if this is a new event */ @@ -643,7 +643,7 @@ static void picoquic_dcubic_notify( break; case picoquic_congestion_notification_rtt_measurement: if (picoquic_hystart_test(&cubic_state->rtt_filter, (cnx->is_time_stamp_enabled) ? ack_state->one_way_delay : ack_state->rtt_measurement, - cnx->path[0]->pacing_packet_time_microsec, current_time, cnx->is_time_stamp_enabled)) { + cnx->path[0]->pacing.packet_time_microsec, current_time, cnx->is_time_stamp_enabled)) { if (current_time - cubic_state->start_of_epoch > path_x->smoothed_rtt || cubic_state->recovery_sequence <= picoquic_cc_get_ack_number(cnx, path_x)) { /* re-enter recovery */ diff --git a/picoquic/logwriter.c b/picoquic/logwriter.c index 830841161..ba288c779 100644 --- a/picoquic/logwriter.c +++ b/picoquic/logwriter.c @@ -1176,7 +1176,7 @@ void binlog_cc_dump(picoquic_cnx_t* cnx, uint64_t current_time) bytewrite_vint(ps_msg, path->bandwidth_estimate); bytewrite_vint(ps_msg, path->receive_rate_estimate); bytewrite_vint(ps_msg, path->send_mtu); - bytewrite_vint(ps_msg, path->pacing_packet_time_microsec); + bytewrite_vint(ps_msg, path->pacing.packet_time_microsec); if (cnx->is_simple_multipath_enabled || cnx->is_multipath_enabled) { bytewrite_vint(ps_msg, path->nb_losses_found); bytewrite_vint(ps_msg, path->nb_spurious); diff --git a/picoquic/newreno.c b/picoquic/newreno.c index 19af74bf2..37a9b9136 100644 --- a/picoquic/newreno.c +++ b/picoquic/newreno.c @@ -268,7 +268,7 @@ static void picoquic_newreno_notify( } if (picoquic_hystart_test(&nr_state->rtt_filter, (cnx->is_time_stamp_enabled) ? ack_state->one_way_delay : ack_state->rtt_measurement, - cnx->path[0]->pacing_packet_time_microsec, current_time, + cnx->path[0]->pacing.packet_time_microsec, current_time, cnx->is_time_stamp_enabled)) { /* RTT increased too much, get out of slow start! */ nr_state->nrss.ssthresh = nr_state->nrss.cwin; diff --git a/picoquic/pacing.c b/picoquic/pacing.c new file mode 100644 index 000000000..ad25527f0 --- /dev/null +++ b/picoquic/pacing.c @@ -0,0 +1,282 @@ +/* +* Author: Christian Huitema +* Copyright (c) 2017, Private Octopus, Inc. +* All rights reserved. +* +* Permission to use, copy, modify, and distribute this software for any +* purpose with or without fee is hereby granted, provided that the above +* copyright notice and this permission notice appear in all copies. +* +* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +* DISCLAIMED. IN NO EVENT SHALL Private Octopus, Inc. BE LIABLE FOR ANY +* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#include "picoquic_internal.h" +#include +#include + +/* Compute nanosec per packet */ +static uint64_t picoquic_pacing_time_nanosec(picoquic_pacing_t* pacing, size_t length) +{ + const uint64_t nanosec_per_sec = 1000000000ull; + uint64_t packet_time_nanosec = 1; + if (pacing->rate > 0) { + packet_time_nanosec = (nanosec_per_sec * length + (pacing->rate - 1)) / pacing->rate; + } + + return packet_time_nanosec; +} + +/* Update the leaky bucket used for pacing. +*/ +static void picoquic_update_pacing_bucket(picoquic_pacing_t* pacing, uint64_t current_time) +{ + if (pacing->bucket_nanosec < -pacing->packet_time_nanosec) { + pacing->bucket_nanosec = -pacing->packet_time_nanosec; + } + + if (current_time > pacing->evaluation_time) { + pacing->bucket_nanosec += (current_time - pacing->evaluation_time) * 1000; + pacing->evaluation_time = current_time; + if (pacing->bucket_nanosec > pacing->bucket_max) { + pacing->bucket_nanosec = pacing->bucket_max; + } + } +} + +/* +* Check pacing to see whether the next transmission is authorized. +* If if is not, update the next wait time to reflect pacing. +* +* In packet train mode, the wait will last until the bucket is completely full, or +* if at least N packets are received. +*/ +int picoquic_is_authorized_by_pacing(picoquic_pacing_t * pacing, uint64_t current_time, uint64_t * next_time, + unsigned int packet_train_mode, picoquic_quic_t * quic) +{ + int ret = 1; + + picoquic_update_pacing_bucket(pacing, current_time); + + if (pacing->bucket_nanosec < pacing->packet_time_nanosec) { + uint64_t next_pacing_time; + int64_t bucket_required; + + if (packet_train_mode || pacing->bandwidth_pause) { + bucket_required = pacing->bucket_max; + + if (bucket_required > 10 * pacing->packet_time_nanosec) { + bucket_required = 10 * pacing->packet_time_nanosec; + } + + bucket_required -= pacing->bucket_nanosec; + } + else { + bucket_required = pacing->packet_time_nanosec - pacing->bucket_nanosec; + } + + next_pacing_time = current_time + 1 + bucket_required / 1000; + if (next_pacing_time < *next_time) { + pacing->bandwidth_pause = 0; + *next_time = next_pacing_time; + SET_LAST_WAKE(quic, PICOQUIC_SENDER); + } + ret = 0; + } + + return ret; +} + +/* Report pacing updates if required + */ +static void picoquic_report_pacing_update(picoquic_pacing_t* pacing, picoquic_path_t* path_x) +{ + picoquic_cnx_t* cnx = path_x->cnx; + + if (cnx->is_pacing_update_requested && path_x == cnx->path[0] && + cnx->callback_fn != NULL) { + if ((pacing->rate > cnx->pacing_rate_signalled && + (pacing->rate - cnx->pacing_rate_signalled >= cnx->pacing_increase_threshold)) || + (pacing->rate < cnx->pacing_rate_signalled && + (cnx->pacing_rate_signalled - pacing->rate > cnx->pacing_decrease_threshold))){ + (void)cnx->callback_fn(cnx, pacing->rate, NULL, 0, picoquic_callback_pacing_changed, cnx->callback_ctx, NULL); + cnx->pacing_rate_signalled = pacing->rate; + } + } + if (cnx->is_path_quality_update_requested && + cnx->callback_fn != NULL) { + /* TODO: add a function "export path quality" */ + /* TODO: remember previous signalled value for change tests */ + if (path_x->smoothed_rtt < path_x->rtt_threshold_low || + path_x->smoothed_rtt > path_x->rtt_threshold_high || + pacing->rate < path_x->pacing_rate_threshold_low || + pacing->rate > path_x->pacing_rate_threshold_high) { + (void)cnx->callback_fn(cnx, path_x->unique_path_id, NULL, 0, picoquic_callback_path_quality_changed, cnx->callback_ctx, path_x->app_path_ctx); + picoquic_refresh_path_quality_thresholds(path_x); + } + } +} + +/* Reset the pacing data after recomputing the pacing rate +*/ +void picoquic_update_pacing_parameters(picoquic_pacing_t * pacing, double pacing_rate, uint64_t quantum, size_t send_mtu, uint64_t smoothed_rtt, + picoquic_path_t * signalled_path) +{ +#if 0 + const uint64_t nanosec_per_sec = 1000000000ull; + + pacing->rate = (uint64_t)pacing_rate; + + if (quantum > pacing->quantum_max) { + pacing->quantum_max = quantum; + } + if (pacing->rate > pacing->rate_max) { + pacing->rate_max = pacing->rate; + } + + pacing->packet_time_nanosec = picoquic_packet_time_nanosec(pacing, send_mtu); + + pacing->bucket_max = (nanosec_per_sec * quantum) / pacing->rate; + if (pacing->bucket_max <= 0) { + pacing->bucket_max = 16 * pacing->packet_time_nanosec; + } + +#else + double packet_time = (double)send_mtu / pacing_rate; + double quantum_time = (double)quantum / pacing_rate; + uint64_t rtt_nanosec = smoothed_rtt * 1000; + + pacing->rate = (uint64_t)pacing_rate; + + if (quantum > pacing->quantum_max) { + pacing->quantum_max = quantum; + } + if (pacing->rate > pacing->rate_max) { + pacing->rate_max = pacing->rate; + } + + pacing->packet_time_nanosec = (uint64_t)(packet_time * 1000000000.0); + + if (pacing->packet_time_nanosec <= 0) { + pacing->packet_time_nanosec = 1; + pacing->packet_time_microsec = 1; + } + else { + if ((uint64_t)pacing->packet_time_nanosec > rtt_nanosec) { + pacing->packet_time_nanosec = rtt_nanosec; + } + pacing->packet_time_microsec = (pacing->packet_time_nanosec + 999ull) / 1000; + } + + pacing->bucket_max = (uint64_t)(quantum_time * 1000000000.0); + if (pacing->bucket_max <= 0) { + pacing->bucket_max = 16 * pacing->packet_time_nanosec; + } +#endif + + if (pacing->bucket_nanosec > pacing->bucket_max) { + pacing->bucket_nanosec = pacing->bucket_max; + } + + if (signalled_path != NULL) { + picoquic_report_pacing_update(pacing, signalled_path); + } +} + +/* +* Reset the pacing data after CWIN is updated. +* The max bucket is set to contain at least 2 packets more than 1/8th of the congestion window. +*/ + +void picoquic_update_pacing_window(picoquic_pacing_t * pacing, int slow_start, uint64_t cwin, size_t send_mtu, uint64_t smoothed_rtt, + picoquic_path_t * signalled_path) +{ + uint64_t rtt_nanosec = smoothed_rtt * 1000; + + if ((cwin < ((uint64_t)send_mtu) * 8) || rtt_nanosec <= 1000) { + /* Small windows, should only relie on ACK clocking */ + pacing->bucket_max = rtt_nanosec; + pacing->packet_time_nanosec = 1; + pacing->packet_time_microsec = 1; + + if (pacing->bucket_nanosec > pacing->bucket_max) { + pacing->bucket_nanosec = pacing->bucket_max; + } + } + else { + double pacing_rate = ((double)cwin / (double)rtt_nanosec) * 1000000000.0; + uint64_t quantum = cwin / 4; + + if (quantum < 2ull * send_mtu) { + quantum = 2ull * send_mtu; + } + else { + if (slow_start && smoothed_rtt > 4*PICOQUIC_MAX_BANDWIDTH_TIME_INTERVAL_MAX) { + const uint64_t quantum_min = 0x8000; + if (quantum < quantum_min){ + quantum = quantum_min; + } + else { + uint64_t quantum2 = (uint64_t)((pacing_rate * PICOQUIC_MAX_BANDWIDTH_TIME_INTERVAL_MAX) / 1000000.0); + if (quantum2 > quantum) { + quantum = quantum2; + } + } + } + else if (quantum > 16ull * send_mtu) { + quantum = 16ull * send_mtu; + } + + } + + if (slow_start) { + pacing_rate *= 1.25; + } + picoquic_update_pacing_parameters(pacing, pacing_rate, quantum, send_mtu, smoothed_rtt, signalled_path); + } +} + +/* +* Update the pacing data after sending a packet. +*/ +void picoquic_update_pacing_data_after_send(picoquic_pacing_t * pacing, size_t length, size_t send_mtu, uint64_t current_time) +{ + uint64_t packet_time_nanosec; + + picoquic_update_pacing_bucket(pacing, current_time); + packet_time_nanosec = ((pacing->packet_time_nanosec * (uint64_t)length) + (send_mtu - 1)) / send_mtu; + pacing->bucket_nanosec -= packet_time_nanosec; +} + +/* Interface functions for compatibility with old implementation */ +void picoquic_update_pacing_after_send(picoquic_path_t* path_x, size_t length, uint64_t current_time) +{ + picoquic_update_pacing_data_after_send(&path_x->pacing, length, path_x->send_mtu, current_time); +} + +int picoquic_is_sending_authorized_by_pacing(picoquic_cnx_t* cnx, picoquic_path_t* path_x, uint64_t current_time, uint64_t* next_time) +{ + return picoquic_is_authorized_by_pacing(&path_x->pacing, current_time, next_time, cnx->quic->packet_train_mode, + cnx->quic); +} + +/* Reset pacing data if congestion algorithm computes it directly */ +void picoquic_update_pacing_rate(picoquic_cnx_t* cnx, picoquic_path_t* path_x, double pacing_rate, uint64_t quantum) +{ + picoquic_update_pacing_parameters(&path_x->pacing, pacing_rate, + quantum, path_x->send_mtu, path_x->smoothed_rtt, path_x); +} +/* Reset pacing if expressed as CWIN and RTT */ +void picoquic_update_pacing_data(picoquic_cnx_t* cnx, picoquic_path_t* path_x, int slow_start) +{ + picoquic_update_pacing_window(&path_x->pacing, slow_start, path_x->cwin, path_x->send_mtu, path_x->smoothed_rtt, + path_x); +} diff --git a/picoquic/performance_log.c b/picoquic/performance_log.c index ca1f90c90..de0479011 100644 --- a/picoquic/performance_log.c +++ b/picoquic/performance_log.c @@ -183,8 +183,8 @@ int picoquic_perflog_record(picoquic_cnx_t* cnx, picoquic_performance_log_ctx_t* perflog_item->v[picoquic_perflog_minrtt] = cnx->path[0]->rtt_min; perflog_item->v[picoquic_perflog_cwin] = cnx->path[0]->cwin; perflog_item->v[picoquic_perflog_bwe_max] = cnx->path[0]->bandwidth_estimate_max; - perflog_item->v[picoquic_perflog_pacing_quantum_max] = cnx->path[0]->pacing_quantum_max; - perflog_item->v[picoquic_perflog_pacing_rate] = cnx->path[0]->pacing_rate_max; + perflog_item->v[picoquic_perflog_pacing_quantum_max] = cnx->path[0]->pacing.quantum_max; + perflog_item->v[picoquic_perflog_pacing_rate] = cnx->path[0]->pacing.rate_max; } if (cnx->congestion_alg != NULL) { perflog_item->v[picoquic_perflog_ccalgo] = cnx->congestion_alg->congestion_algorithm_number; diff --git a/picoquic/picoquic.vcxproj b/picoquic/picoquic.vcxproj index 5ac7a40d0..0bc040770 100644 --- a/picoquic/picoquic.vcxproj +++ b/picoquic/picoquic.vcxproj @@ -157,6 +157,7 @@ + diff --git a/picoquic/picoquic.vcxproj.filters b/picoquic/picoquic.vcxproj.filters index 71469e504..cf68438cd 100644 --- a/picoquic/picoquic.vcxproj.filters +++ b/picoquic/picoquic.vcxproj.filters @@ -135,6 +135,9 @@ Source Files + + Source Files + diff --git a/picoquic/picoquic_internal.h b/picoquic/picoquic_internal.h index a2f5ee470..611871842 100644 --- a/picoquic/picoquic_internal.h +++ b/picoquic/picoquic_internal.h @@ -987,6 +987,28 @@ typedef struct st_picoquic_remote_cnxid_stash_t { picoquic_remote_cnxid_t* cnxid_stash_first; } picoquic_remote_cnxid_stash_t; +/* +* Pacing uses a set of per path variables: +* - rate: bytes per second. +* - evaluation_time: last time the path was evaluated. +* - bucket_nanosec: number of nanoseconds of transmission time that are allowed. +* - bucket_max: maximum value (capacity) of the leaky bucket. +* - packet_time_nanosec: number of nanoseconds required to send a full size packet. +* - packet_time_microsec: max of (packet_time_nano_sec/1024, 1) microsec. +*/ +typedef struct st_picoquic_pacing_t { + uint64_t rate; + uint64_t evaluation_time; + int64_t bucket_nanosec; + int64_t bucket_max; + int64_t packet_time_nanosec; + uint64_t packet_time_microsec; + uint64_t quantum_max; + uint64_t rate_max; + int bandwidth_pause; +} picoquic_pacing_t; + + /* * Per path context. * Path contexts are created: @@ -1156,6 +1178,9 @@ typedef struct st_picoquic_path_t { uint64_t last_time_acked_data_frame_sent; void* congestion_alg_state; +#if 1 + picoquic_pacing_t pacing; +#else /* * Pacing uses a set of per path variables: * - pacing_rate: bytes per second. @@ -1175,6 +1200,7 @@ typedef struct st_picoquic_path_t { uint64_t pacing_quantum_max; uint64_t pacing_rate_max; int pacing_bandwidth_pause; +#endif /* MTU safety tracking */ uint64_t nb_mtu_losses; @@ -1612,6 +1638,13 @@ picoquic_cnx_t* picoquic_cnx_by_icid(picoquic_quic_t* quic, picoquic_connection_ const struct sockaddr* addr); picoquic_cnx_t* picoquic_cnx_by_secret(picoquic_quic_t* quic, const uint8_t* reset_secret, const struct sockaddr* addr); +/* Pacing implementation */ +int picoquic_is_authorized_by_pacing(picoquic_pacing_t* pacing, uint64_t current_time, uint64_t* next_time, unsigned int packet_train_mode, picoquic_quic_t * quic); +void picoquic_update_pacing_parameters(picoquic_pacing_t* pacing, double pacing_rate, uint64_t quantum, size_t send_mtu, uint64_t smoothed_rtt, + picoquic_path_t* signalled_path); +void picoquic_update_pacing_window(picoquic_pacing_t* pacing, int slow_start, uint64_t cwin, size_t send_mtu, uint64_t smoothed_rtt, picoquic_path_t * signalled_path); +void picoquic_update_pacing_data_after_send(picoquic_pacing_t * pacing, size_t length, size_t send_mtu, uint64_t current_time); + /* Reset the pacing data after CWIN is updated */ void picoquic_update_pacing_data(picoquic_cnx_t* cnx, picoquic_path_t * path_x, int slow_start); void picoquic_update_pacing_after_send(picoquic_path_t* path_x, size_t length, uint64_t current_time); diff --git a/picoquic/prague.c b/picoquic/prague.c index 5cb51764c..c9970dacd 100644 --- a/picoquic/prague.c +++ b/picoquic/prague.c @@ -384,7 +384,7 @@ void picoquic_prague_notify( } if (picoquic_hystart_test(&pr_state->rtt_filter, (cnx->is_time_stamp_enabled) ? ack_state->one_way_delay : ack_state->rtt_measurement, - cnx->path[0]->pacing_packet_time_microsec, current_time, + cnx->path[0]->pacing.packet_time_microsec, current_time, cnx->is_time_stamp_enabled)) { /* RTT increased too much, get out of slow start! */ pr_state->ssthresh = path_x->cwin; diff --git a/picoquic/quicctx.c b/picoquic/quicctx.c index a2330f0dc..110b4277c 100644 --- a/picoquic/quicctx.c +++ b/picoquic/quicctx.c @@ -1535,11 +1535,11 @@ int picoquic_create_path(picoquic_cnx_t* cnx, uint64_t start_time, const struct path_x->congestion_alg_state = NULL; /* Initialize per path pacing state */ - path_x->pacing_evaluation_time = start_time; - path_x->pacing_bucket_nanosec = 16; - path_x->pacing_bucket_max = 16; - path_x->pacing_packet_time_nanosec = 1; - path_x->pacing_packet_time_microsec = 1; + path_x->pacing.evaluation_time = start_time; + path_x->pacing.bucket_nanosec = 16; + path_x->pacing.bucket_max = 16; + path_x->pacing.packet_time_nanosec = 1; + path_x->pacing.packet_time_microsec = 1; /* Initialize the MTU */ path_x->send_mtu = (peer_addr == NULL || peer_addr->sa_family == AF_INET) ? PICOQUIC_INITIAL_MTU_IPV4 : PICOQUIC_INITIAL_MTU_IPV6; @@ -2209,13 +2209,13 @@ void picoquic_refresh_path_quality_thresholds(picoquic_path_t* path_x) } if (path_x->pacing_rate_update_delta > 0) { - if (path_x->pacing_rate > path_x->pacing_rate_update_delta) { - path_x->pacing_rate_threshold_low = path_x->pacing_rate - path_x->pacing_rate_update_delta; + if (path_x->pacing.rate > path_x->pacing_rate_update_delta) { + path_x->pacing_rate_threshold_low = path_x->pacing.rate - path_x->pacing_rate_update_delta; } else { path_x->pacing_rate_threshold_low = 0; } - path_x->pacing_rate_threshold_high = path_x->pacing_rate + path_x->pacing_rate_update_delta; + path_x->pacing_rate_threshold_high = path_x->pacing.rate + path_x->pacing_rate_update_delta; if (path_x->receive_rate_estimate > path_x->pacing_rate_update_delta) { path_x->receive_rate_threshold_low = path_x->receive_rate_estimate - path_x->pacing_rate_update_delta; } @@ -2234,8 +2234,8 @@ int picoquic_issue_path_quality_update(picoquic_cnx_t* cnx, picoquic_path_t* pat path_x->smoothed_rtt < path_x->rtt_threshold_low || path_x->smoothed_rtt > path_x->rtt_threshold_high)) || (path_x->pacing_rate_update_delta > 0 && ( - path_x->pacing_rate < path_x->pacing_rate_threshold_low || - path_x->pacing_rate > path_x->pacing_rate_threshold_high || + path_x->pacing.rate < path_x->pacing_rate_threshold_low || + path_x->pacing.rate > path_x->pacing_rate_threshold_high || path_x->receive_rate_estimate < path_x->receive_rate_threshold_low || path_x->receive_rate_estimate > path_x->receive_rate_threshold_high))) { picoquic_refresh_path_quality_thresholds(path_x); @@ -2253,7 +2253,7 @@ static void picoquic_get_path_quality_from_context(picoquic_path_t* path_x, pico quality->rtt_min = path_x->rtt_min; quality->rtt_max = path_x->rtt_max; quality->rtt_variant = path_x->rtt_variant; - quality->pacing_rate = path_x->pacing_rate; + quality->pacing_rate = path_x->pacing.rate; quality->receive_rate_estimate = path_x->receive_rate_estimate; quality->sent = path_x->path_packet_number; quality->lost = path_x->nb_losses_found; @@ -4801,7 +4801,7 @@ void picoquic_subscribe_pacing_rate_updates(picoquic_cnx_t* cnx, uint64_t decrea uint64_t picoquic_get_pacing_rate(picoquic_cnx_t* cnx) { - return cnx->path[0]->pacing_rate; + return cnx->path[0]->pacing.rate; } uint64_t picoquic_get_cwin(picoquic_cnx_t* cnx) diff --git a/picoquic/sender.c b/picoquic/sender.c index 2dbe8f4ef..904d6cbec 100644 --- a/picoquic/sender.c +++ b/picoquic/sender.c @@ -913,7 +913,7 @@ static size_t picoquic_protect_packet(picoquic_cnx_t* cnx, return send_length; } - +#if 0 /* Compute nanosec per packet */ uint64_t picoquic_packet_time_nanosec(picoquic_path_t* path_x, size_t length) { @@ -927,15 +927,15 @@ uint64_t picoquic_packet_time_nanosec(picoquic_path_t* path_x, size_t length) */ static void picoquic_update_pacing_bucket(picoquic_path_t * path_x, uint64_t current_time) { - if (path_x->pacing_bucket_nanosec < -path_x->pacing_packet_time_nanosec) { - path_x->pacing_bucket_nanosec = -path_x->pacing_packet_time_nanosec; + if (path_x->pacing.bucket_nanosec < -path_x->pacing_packet_time_nanosec) { + path_x->pacing.bucket_nanosec = -path_x->pacing_packet_time_nanosec; } if (current_time > path_x->pacing_evaluation_time) { - path_x->pacing_bucket_nanosec += (current_time - path_x->pacing_evaluation_time) * 1000; + path_x->pacing.bucket_nanosec += (current_time - path_x->pacing_evaluation_time) * 1000; path_x->pacing_evaluation_time = current_time; - if (path_x->pacing_bucket_nanosec > path_x->pacing_bucket_max) { - path_x->pacing_bucket_nanosec = path_x->pacing_bucket_max; + if (path_x->pacing.bucket_nanosec > path_x->pacing_bucket_max) { + path_x->pacing.bucket_nanosec = path_x->pacing_bucket_max; } } } @@ -953,7 +953,7 @@ int picoquic_is_sending_authorized_by_pacing(picoquic_cnx_t * cnx, picoquic_path picoquic_update_pacing_bucket(path_x, current_time); - if (path_x->pacing_bucket_nanosec < path_x->pacing_packet_time_nanosec) { + if (path_x->pacing.bucket_nanosec < path_x->pacing_packet_time_nanosec) { uint64_t next_pacing_time; int64_t bucket_required; @@ -964,7 +964,7 @@ int picoquic_is_sending_authorized_by_pacing(picoquic_cnx_t * cnx, picoquic_path bucket_required = 10 * path_x->pacing_packet_time_nanosec; } - bucket_required -= path_x->pacing_bucket_nanosec; + bucket_required -= path_x->pacing.bucket_nanosec; } else { bucket_required = path_x->pacing_packet_time_nanosec - path_x->pacing_bucket_nanosec; @@ -982,6 +982,9 @@ int picoquic_is_sending_authorized_by_pacing(picoquic_cnx_t * cnx, picoquic_path return ret; } +/* Handle reporting of parameter updates if path is specified. + */ + /* Reset the pacing data after recomputing the pacing rate */ void picoquic_update_pacing_rate(picoquic_cnx_t * cnx, picoquic_path_t* path_x, double pacing_rate, uint64_t quantum) @@ -1131,7 +1134,7 @@ void picoquic_update_pacing_after_send(picoquic_path_t * path_x, size_t length, packet_time_nanosec = ((path_x->pacing_packet_time_nanosec * (uint64_t)length) + (path_x->send_mtu - 1)) / path_x->send_mtu; path_x->pacing_bucket_nanosec -= packet_time_nanosec; } - +#endif /* * Final steps in packet transmission: queue for retransmission, etc */ @@ -4675,7 +4678,7 @@ int picoquic_prepare_packet_ex(picoquic_cnx_t* cnx, if (cnx->path[path_id]->cwin <= cnx->path[path_id]->bytes_in_transit) { cnx->nb_trains_blocked_cwin++; } - else if (cnx->path[path_id]->pacing_bucket_nanosec < cnx->path[path_id]->pacing_packet_time_nanosec){ + else if (cnx->path[path_id]->pacing.bucket_nanosec < cnx->path[path_id]->pacing.packet_time_nanosec){ cnx->nb_trains_blocked_pacing++; } else { diff --git a/picoquictest/app_limited.c b/picoquictest/app_limited.c index d71b33f0f..4e9a46afd 100644 --- a/picoquictest/app_limited.c +++ b/picoquictest/app_limited.c @@ -423,8 +423,8 @@ void app_limited_monitor(app_limited_ctx_t* al_ctx) if (path_x->cwin > al_ctx->cwin_max) { al_ctx->cwin_max = path_x->cwin; } - if (path_x->pacing_rate > al_ctx->data_rate_max) { - al_ctx->data_rate_max = path_x->pacing_rate; + if (path_x->pacing.rate > al_ctx->data_rate_max) { + al_ctx->data_rate_max = path_x->pacing.rate; } } } From 23e22d558737735e60edfffa68bf75b05e952c21 Mon Sep 17 00:00:00 2001 From: Christian Huitema Date: Thu, 2 May 2024 17:55:52 -0700 Subject: [PATCH 4/7] Add unit tests for pacing. --- CMakeLists.txt | 1 + UnitTest1/unittest1.cpp | 7 + picoquic/pacing.c | 23 +- picoquic/picoquic_internal.h | 36 +-- picoquic/quicctx.c | 6 +- picoquic/sender.c | 224 +------------- picoquic_t/picoquic_t.c | 1 + picoquictest/pacing_test.c | 342 ++++++++++++++++++++++ picoquictest/picoquictest.h | 1 + picoquictest/picoquictest.vcxproj | 1 + picoquictest/picoquictest.vcxproj.filters | 3 + picoquictest/tls_api_test.c | 208 ------------- 12 files changed, 388 insertions(+), 465 deletions(-) create mode 100644 picoquictest/pacing_test.c diff --git a/CMakeLists.txt b/CMakeLists.txt index d2a7c349f..7adbb21bb 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -158,6 +158,7 @@ set(PICOQUIC_TEST_LIBRARY_FILES picoquictest/multipath_test.c picoquictest/netperf_test.c picoquictest/openssl_test.c + picoquictest/pacing_test.c picoquictest/parseheadertest.c picoquictest/picoquic_lb_test.c picoquictest/pn2pn64test.c diff --git a/UnitTest1/unittest1.cpp b/UnitTest1/unittest1.cpp index f787f1fce..8dafd191f 100644 --- a/UnitTest1/unittest1.cpp +++ b/UnitTest1/unittest1.cpp @@ -435,6 +435,13 @@ namespace UnitTest1 { int ret = pacing_test(); + Assert::AreEqual(ret, 0); + } + + TEST_METHOD(pacing_repeat) + { + int ret = pacing_repeat_test(); + Assert::AreEqual(ret, 0); } diff --git a/picoquic/pacing.c b/picoquic/pacing.c index ad25527f0..057f193fc 100644 --- a/picoquic/pacing.c +++ b/picoquic/pacing.c @@ -23,6 +23,17 @@ #include #include + +/* Initialize pacing state to high speed default */ +void picoquic_pacing_init(picoquic_pacing_t* pacing, uint64_t current_time) +{ + pacing->evaluation_time = current_time; + pacing->bucket_nanosec = 16; + pacing->bucket_max = 16; + pacing->packet_time_nanosec = 1; + pacing->packet_time_microsec = 1; +} + /* Compute nanosec per packet */ static uint64_t picoquic_pacing_time_nanosec(picoquic_pacing_t* pacing, size_t length) { @@ -52,6 +63,14 @@ static void picoquic_update_pacing_bucket(picoquic_pacing_t* pacing, uint64_t cu } } +/* Check whether pacing authorizes immediate transmission, +* no not send any state + */ +int picoquic_is_pacing_blocked(picoquic_pacing_t* pacing) +{ + return (pacing->bucket_nanosec < pacing->packet_time_nanosec); +} + /* * Check pacing to see whether the next transmission is authorized. * If if is not, update the next wait time to reflect pacing. @@ -87,7 +106,9 @@ int picoquic_is_authorized_by_pacing(picoquic_pacing_t * pacing, uint64_t curren if (next_pacing_time < *next_time) { pacing->bandwidth_pause = 0; *next_time = next_pacing_time; - SET_LAST_WAKE(quic, PICOQUIC_SENDER); + if (quic != NULL) { + SET_LAST_WAKE(quic, PICOQUIC_SENDER); + } } ret = 0; } diff --git a/picoquic/picoquic_internal.h b/picoquic/picoquic_internal.h index 611871842..adbdb0fe5 100644 --- a/picoquic/picoquic_internal.h +++ b/picoquic/picoquic_internal.h @@ -991,24 +991,25 @@ typedef struct st_picoquic_remote_cnxid_stash_t { * Pacing uses a set of per path variables: * - rate: bytes per second. * - evaluation_time: last time the path was evaluated. -* - bucket_nanosec: number of nanoseconds of transmission time that are allowed. * - bucket_max: maximum value (capacity) of the leaky bucket. -* - packet_time_nanosec: number of nanoseconds required to send a full size packet. * - packet_time_microsec: max of (packet_time_nano_sec/1024, 1) microsec. +* Internal variables: +* - bucket_nanosec: number of nanoseconds of transmission time that are allowed. +* - packet_time_nanosec: number of nanoseconds required to send a full size packet. */ typedef struct st_picoquic_pacing_t { uint64_t rate; uint64_t evaluation_time; - int64_t bucket_nanosec; int64_t bucket_max; - int64_t packet_time_nanosec; uint64_t packet_time_microsec; uint64_t quantum_max; uint64_t rate_max; int bandwidth_pause; + /* High precision variables should only be used inside pacing.c */ + int64_t bucket_nanosec; + int64_t packet_time_nanosec; } picoquic_pacing_t; - /* * Per path context. * Path contexts are created: @@ -1177,30 +1178,7 @@ typedef struct st_picoquic_path_t { uint64_t last_cwin_blocked_time; uint64_t last_time_acked_data_frame_sent; void* congestion_alg_state; - -#if 1 picoquic_pacing_t pacing; -#else - /* - * Pacing uses a set of per path variables: - * - pacing_rate: bytes per second. - * - pacing_evaluation_time: last time the path was evaluated. - * - pacing_bucket_nanosec: number of nanoseconds of transmission time that are allowed. - * - pacing_bucket_max: maximum value (capacity) of the leaky bucket. - * - pacing_packet_time_nanosec: number of nanoseconds required to send a full size packet. - * - pacing_packet_time_microsec: max of (packet_time_nano_sec/1024, 1) microsec. - */ - - uint64_t pacing_rate; - uint64_t pacing_evaluation_time; - int64_t pacing_bucket_nanosec; - int64_t pacing_bucket_max; - int64_t pacing_packet_time_nanosec; - uint64_t pacing_packet_time_microsec; - uint64_t pacing_quantum_max; - uint64_t pacing_rate_max; - int pacing_bandwidth_pause; -#endif /* MTU safety tracking */ uint64_t nb_mtu_losses; @@ -1639,6 +1617,8 @@ picoquic_cnx_t* picoquic_cnx_by_icid(picoquic_quic_t* quic, picoquic_connection_ picoquic_cnx_t* picoquic_cnx_by_secret(picoquic_quic_t* quic, const uint8_t* reset_secret, const struct sockaddr* addr); /* Pacing implementation */ +void picoquic_pacing_init(picoquic_pacing_t* pacing, uint64_t current_time); +int picoquic_is_pacing_blocked(picoquic_pacing_t* pacing); int picoquic_is_authorized_by_pacing(picoquic_pacing_t* pacing, uint64_t current_time, uint64_t* next_time, unsigned int packet_train_mode, picoquic_quic_t * quic); void picoquic_update_pacing_parameters(picoquic_pacing_t* pacing, double pacing_rate, uint64_t quantum, size_t send_mtu, uint64_t smoothed_rtt, picoquic_path_t* signalled_path); diff --git a/picoquic/quicctx.c b/picoquic/quicctx.c index 110b4277c..1625c1ec6 100644 --- a/picoquic/quicctx.c +++ b/picoquic/quicctx.c @@ -1535,11 +1535,7 @@ int picoquic_create_path(picoquic_cnx_t* cnx, uint64_t start_time, const struct path_x->congestion_alg_state = NULL; /* Initialize per path pacing state */ - path_x->pacing.evaluation_time = start_time; - path_x->pacing.bucket_nanosec = 16; - path_x->pacing.bucket_max = 16; - path_x->pacing.packet_time_nanosec = 1; - path_x->pacing.packet_time_microsec = 1; + picoquic_pacing_init(&path_x->pacing, start_time); /* Initialize the MTU */ path_x->send_mtu = (peer_addr == NULL || peer_addr->sa_family == AF_INET) ? PICOQUIC_INITIAL_MTU_IPV4 : PICOQUIC_INITIAL_MTU_IPV6; diff --git a/picoquic/sender.c b/picoquic/sender.c index 904d6cbec..08126062c 100644 --- a/picoquic/sender.c +++ b/picoquic/sender.c @@ -913,228 +913,6 @@ static size_t picoquic_protect_packet(picoquic_cnx_t* cnx, return send_length; } -#if 0 -/* Compute nanosec per packet */ -uint64_t picoquic_packet_time_nanosec(picoquic_path_t* path_x, size_t length) -{ - const uint64_t nanosec_per_sec = 1000000000ull; - uint64_t packet_time_nanosec = (nanosec_per_sec * length + (path_x->pacing_rate - 1)) / path_x->pacing_rate; - - return packet_time_nanosec; -} - -/* Update the leaky bucket used for pacing. - */ -static void picoquic_update_pacing_bucket(picoquic_path_t * path_x, uint64_t current_time) -{ - if (path_x->pacing.bucket_nanosec < -path_x->pacing_packet_time_nanosec) { - path_x->pacing.bucket_nanosec = -path_x->pacing_packet_time_nanosec; - } - - if (current_time > path_x->pacing_evaluation_time) { - path_x->pacing.bucket_nanosec += (current_time - path_x->pacing_evaluation_time) * 1000; - path_x->pacing_evaluation_time = current_time; - if (path_x->pacing.bucket_nanosec > path_x->pacing_bucket_max) { - path_x->pacing.bucket_nanosec = path_x->pacing_bucket_max; - } - } -} - -/* - * Check pacing to see whether the next transmission is authorized. - * If if is not, update the next wait time to reflect pacing. - * - * In packet train mode, the wait will last until the bucket is completely full, or - * if at least N packets are received. - */ -int picoquic_is_sending_authorized_by_pacing(picoquic_cnx_t * cnx, picoquic_path_t * path_x, uint64_t current_time, uint64_t * next_time) -{ - int ret = 1; - - picoquic_update_pacing_bucket(path_x, current_time); - - if (path_x->pacing.bucket_nanosec < path_x->pacing_packet_time_nanosec) { - uint64_t next_pacing_time; - int64_t bucket_required; - - if (cnx->quic->packet_train_mode || path_x->pacing_bandwidth_pause) { - bucket_required = path_x->pacing_bucket_max; - - if (bucket_required > 10 * path_x->pacing_packet_time_nanosec) { - bucket_required = 10 * path_x->pacing_packet_time_nanosec; - } - - bucket_required -= path_x->pacing.bucket_nanosec; - } - else { - bucket_required = path_x->pacing_packet_time_nanosec - path_x->pacing_bucket_nanosec; - } - - next_pacing_time = current_time + 1 + bucket_required / 1000; - if (next_pacing_time < *next_time) { - path_x->pacing_bandwidth_pause = 0; - *next_time = next_pacing_time; - SET_LAST_WAKE(cnx->quic, PICOQUIC_SENDER); - } - ret = 0; - } - - return ret; -} - -/* Handle reporting of parameter updates if path is specified. - */ - -/* Reset the pacing data after recomputing the pacing rate - */ -void picoquic_update_pacing_rate(picoquic_cnx_t * cnx, picoquic_path_t* path_x, double pacing_rate, uint64_t quantum) -{ -#if 0 - const uint64_t nanosec_per_sec = 1000000000ull; - - path_x->pacing_rate = (uint64_t)pacing_rate; - - if (quantum > path_x->pacing_quantum_max) { - path_x->pacing_quantum_max = quantum; - } - if (path_x->pacing_rate > path_x->pacing_rate_max) { - path_x->pacing_rate_max = path_x->pacing_rate; - } - - path_x->pacing_packet_time_nanosec = picoquic_packet_time_nanosec(path_x, path_x->send_mtu); - - path_x->pacing_bucket_max = (nanosec_per_sec * quantum) / path_x->pacing_rate; - if (path_x->pacing_bucket_max <= 0) { - path_x->pacing_bucket_max = 16 * path_x->pacing_packet_time_nanosec; - } - -#else - double packet_time = (double)path_x->send_mtu / pacing_rate; - double quantum_time = (double)quantum / pacing_rate; - uint64_t rtt_nanosec = path_x->smoothed_rtt * 1000; - - path_x->pacing_rate = (uint64_t)pacing_rate; - - if (quantum > path_x->pacing_quantum_max) { - path_x->pacing_quantum_max = quantum; - } - if (path_x->pacing_rate > path_x->pacing_rate_max) { - path_x->pacing_rate_max = path_x->pacing_rate; - } - - path_x->pacing_packet_time_nanosec = (uint64_t)(packet_time * 1000000000.0); - - if (path_x->pacing_packet_time_nanosec <= 0) { - path_x->pacing_packet_time_nanosec = 1; - path_x->pacing_packet_time_microsec = 1; - } - else { - if ((uint64_t)path_x->pacing_packet_time_nanosec > rtt_nanosec) { - path_x->pacing_packet_time_nanosec = rtt_nanosec; - } - path_x->pacing_packet_time_microsec = (path_x->pacing_packet_time_nanosec + 999ull) / 1000; - } - - path_x->pacing_bucket_max = (uint64_t)(quantum_time * 1000000000.0); - if (path_x->pacing_bucket_max <= 0) { - path_x->pacing_bucket_max = 16 * path_x->pacing_packet_time_nanosec; - } -#endif - - if (path_x->pacing_bucket_nanosec > path_x->pacing_bucket_max) { - path_x->pacing_bucket_nanosec = path_x->pacing_bucket_max; - } - - if (cnx->is_pacing_update_requested && path_x == cnx->path[0] && - cnx->callback_fn != NULL) { - if ((path_x->pacing_rate > cnx->pacing_rate_signalled && - (path_x->pacing_rate - cnx->pacing_rate_signalled >= cnx->pacing_increase_threshold)) || - (path_x->pacing_rate < cnx->pacing_rate_signalled && - (cnx->pacing_rate_signalled - path_x->pacing_rate > cnx->pacing_decrease_threshold))){ - (void)cnx->callback_fn(cnx, path_x->pacing_rate, NULL, 0, picoquic_callback_pacing_changed, cnx->callback_ctx, NULL); - cnx->pacing_rate_signalled = path_x->pacing_rate; - } - } - if (cnx->is_path_quality_update_requested && - cnx->callback_fn != NULL) { - /* TODO: add a function "export path quality" */ - /* TODO: remember previous signalled value for change tests */ - if (path_x->smoothed_rtt < path_x->rtt_threshold_low || - path_x->smoothed_rtt > path_x->rtt_threshold_high || - path_x->pacing_rate < path_x->pacing_rate_threshold_low || - path_x->pacing_rate > path_x->pacing_rate_threshold_high) { - (void)cnx->callback_fn(cnx, path_x->unique_path_id, NULL, 0, picoquic_callback_path_quality_changed, cnx->callback_ctx, path_x->app_path_ctx); - picoquic_refresh_path_quality_thresholds(path_x); - } - } -} - -/* - * Reset the pacing data after CWIN is updated. - * The max bucket is set to contain at least 2 packets more than 1/8th of the congestion window. - */ - -void picoquic_update_pacing_data(picoquic_cnx_t* cnx, picoquic_path_t * path_x, int slow_start) -{ - uint64_t rtt_nanosec = path_x->smoothed_rtt * 1000; - - if ((path_x->cwin < ((uint64_t)path_x->send_mtu) * 8) || rtt_nanosec <= 1000) { - /* Small windows, should only relie on ACK clocking */ - path_x->pacing_bucket_max = rtt_nanosec; - path_x->pacing_packet_time_nanosec = 1; - path_x->pacing_packet_time_microsec = 1; - - if (path_x->pacing_bucket_nanosec > path_x->pacing_bucket_max) { - path_x->pacing_bucket_nanosec = path_x->pacing_bucket_max; - } - } - else { - double pacing_rate = ((double)path_x->cwin / (double)rtt_nanosec) * 1000000000.0; - uint64_t quantum = path_x->cwin / 4; - - if (quantum < 2ull * path_x->send_mtu) { - quantum = 2ull * path_x->send_mtu; - } - else { - if (slow_start && path_x->smoothed_rtt > 4*PICOQUIC_MAX_BANDWIDTH_TIME_INTERVAL_MAX) { - const uint64_t quantum_min = 0x8000; - if (quantum < quantum_min){ - quantum = quantum_min; - } - else { - uint64_t quantum2 = (uint64_t)((pacing_rate * PICOQUIC_MAX_BANDWIDTH_TIME_INTERVAL_MAX) / 1000000.0); - if (quantum2 > quantum) { - quantum = quantum2; - } - } - } - else if (quantum > 16ull * path_x->send_mtu) { - quantum = 16ull * path_x->send_mtu; - } - - } - - if (slow_start) { - pacing_rate *= 1.25; - } - - picoquic_update_pacing_rate(cnx, path_x, pacing_rate, quantum); - } -} - -/* - * Update the pacing data after sending a packet. - */ -void picoquic_update_pacing_after_send(picoquic_path_t * path_x, size_t length, uint64_t current_time) -{ - uint64_t packet_time_nanosec; - - picoquic_update_pacing_bucket(path_x, current_time); - - packet_time_nanosec = ((path_x->pacing_packet_time_nanosec * (uint64_t)length) + (path_x->send_mtu - 1)) / path_x->send_mtu; - path_x->pacing_bucket_nanosec -= packet_time_nanosec; -} -#endif /* * Final steps in packet transmission: queue for retransmission, etc */ @@ -4678,7 +4456,7 @@ int picoquic_prepare_packet_ex(picoquic_cnx_t* cnx, if (cnx->path[path_id]->cwin <= cnx->path[path_id]->bytes_in_transit) { cnx->nb_trains_blocked_cwin++; } - else if (cnx->path[path_id]->pacing.bucket_nanosec < cnx->path[path_id]->pacing.packet_time_nanosec){ + else if (picoquic_is_pacing_blocked(&cnx->path[path_id]->pacing)) { cnx->nb_trains_blocked_pacing++; } else { diff --git a/picoquic_t/picoquic_t.c b/picoquic_t/picoquic_t.c index f273eee9e..b0448fd5b 100644 --- a/picoquic_t/picoquic_t.c +++ b/picoquic_t/picoquic_t.c @@ -105,6 +105,7 @@ static const picoquic_test_def_t test_table[] = { { "new_cnxid_stash", cnxid_stash_test }, { "new_cnxid", new_cnxid_test }, { "pacing", pacing_test }, + { "pacing_repeat", pacing_repeat_test }, #if 0 /* The TLS API connect test is only useful when debugging issues step by step */ { "tls_api_connect", tls_api_connect_test }, diff --git a/picoquictest/pacing_test.c b/picoquictest/pacing_test.c new file mode 100644 index 000000000..6aa71d64e --- /dev/null +++ b/picoquictest/pacing_test.c @@ -0,0 +1,342 @@ +/* +* Author: Christian Huitema +* Copyright (c) 2024, Private Octopus, Inc. +* All rights reserved. +* +* Permission to use, copy, modify, and distribute this software for any +* purpose with or without fee is hereby granted, provided that the above +* copyright notice and this permission notice appear in all copies. +* +* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +* DISCLAIMED. IN NO EVENT SHALL Private Octopus, Inc. BE LIABLE FOR ANY +* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#include "tls_api.h" +#include "picoquic_internal.h" +#include "picoquictest_internal.h" +#include +#include + +#include "logreader.h" +#include "picoquic_binlog.h" +#include "picoquic_logger.h" +#include "qlog.h" + +/* Test of the pacing functions. +*/ + +int pacing_test() +{ + /* Create a connection so as to instantiate the pacing context */ + int ret = 0; + uint64_t current_time = 0; + picoquic_quic_t* quic = NULL; + picoquic_cnx_t* cnx = NULL; + struct sockaddr_in saddr; + const uint64_t test_byte_per_sec = 1250000; + const uint64_t test_quantum = 0x4000; + int nb_sent = 0; + int nb_round = 0; + const int nb_target = 10000; + + quic = picoquic_create(8, NULL, NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, current_time, + ¤t_time, NULL, NULL, 0); + + memset(&saddr, 0, sizeof(struct sockaddr_in)); + saddr.sin_family = AF_INET; + saddr.sin_port = 1000; + + if (quic == NULL) { + DBG_PRINTF("%s", "Cannot create QUIC context\n"); + ret = -1; + } + else { + cnx = picoquic_create_cnx(quic, + picoquic_null_connection_id, picoquic_null_connection_id, (struct sockaddr*) & saddr, + current_time, 0, "test-sni", "test-alpn", 1); + + if (cnx == NULL) { + DBG_PRINTF("%s", "Cannot create connection\n"); + ret = -1; + } + } + + if (ret == 0) { + /* Set pacing parameters to specified value */ + picoquic_update_pacing_rate(cnx, cnx->path[0], (double)test_byte_per_sec, test_quantum); + /* Run a loop of N tests based on next wake time. */ + while (ret == 0 && nb_sent < nb_target) { + nb_round++; + if (nb_round > 4 * nb_target) { + DBG_PRINTF("Pacing needs more that %d rounds for %d packets", nb_round, nb_target); + ret = -1; + } + else { + uint64_t next_time = current_time + 10000000; + if (picoquic_is_sending_authorized_by_pacing(cnx, cnx->path[0], current_time, &next_time)) { + nb_sent++; + picoquic_update_pacing_after_send(cnx->path[0], cnx->path[0]->send_mtu, current_time); + } + else { + if (current_time < next_time) { + current_time = next_time; + } + else { + DBG_PRINTF("Pacing next = %" PRIu64", current = %d" PRIu64, next_time, current_time); + ret = -1; + } + } + } + } + + /* Verify that the total send time matches expectations */ + if (ret == 0) { + uint64_t volume_sent = ((uint64_t)nb_target) * cnx->path[0]->send_mtu; + uint64_t time_max = ((volume_sent * 1000000) / test_byte_per_sec) + 1; + uint64_t time_min = (((volume_sent - test_quantum) * 1000000) / test_byte_per_sec) + 1; + + if (current_time > time_max) { + DBG_PRINTF("Pacing used = %" PRIu64", expected max = %d" PRIu64, current_time, time_max); + ret = -1; + } + else if (current_time < time_min) { + DBG_PRINTF("Pacing used = %" PRIu64", expected min = %d" PRIu64, current_time, time_min); + ret = -1; + } + } + } + + if (quic != NULL) { + picoquic_free(quic); + } + + return ret; +} + +/* Test effects of leaky bucket pacer +*/ + +static test_api_stream_desc_t test_scenario_pacing[] = { + { 4, 0, 257, 1000000 } +}; +static int pacing_cc_algotest(picoquic_congestion_algorithm_t* cc_algo, uint64_t target_time, uint64_t loss_target) +{ + uint64_t simulated_time = 0; + uint64_t loss_mask = 0; + const uint64_t latency_target = 7500; + const double bucket_increase_per_microsec = 1.25; /* 1.25 bytes per microsec = 10 Mbps */ + const uint64_t bucket_max = 16 * PICOQUIC_MAX_PACKET_SIZE; + const uint64_t picosec_per_byte = (1000000ull * 8) / 100; /* Underlying rate = 100 Mbps */ + uint64_t observed_loss = 0; + + picoquic_test_tls_api_ctx_t* test_ctx = NULL; + picoquic_connection_id_t initial_cid = { {0x9a, 0xc1, 0xcc, 0xa1, 0x90, 6, 7, 8}, 8 }; + int ret; + + initial_cid.id[4] = cc_algo->congestion_algorithm_number; + + ret = tls_api_init_ctx_ex(&test_ctx, PICOQUIC_INTERNAL_TEST_VERSION_1, + PICOQUIC_TEST_SNI, PICOQUIC_TEST_ALPN, &simulated_time, NULL, NULL, 0, 0, 0, &initial_cid); + + if (ret == 0) { + /* Set link */ + test_ctx->c_to_s_link->microsec_latency = latency_target; + test_ctx->c_to_s_link->picosec_per_byte = picosec_per_byte; + test_ctx->s_to_c_link->microsec_latency = latency_target; + test_ctx->s_to_c_link->picosec_per_byte = picosec_per_byte; + /* Set leaky bucket parameters */ + test_ctx->c_to_s_link->bucket_increase_per_microsec = bucket_increase_per_microsec; + test_ctx->c_to_s_link->bucket_max = bucket_max; + test_ctx->c_to_s_link->bucket_current = (double)bucket_max; + test_ctx->c_to_s_link->bucket_arrival_last = simulated_time; + test_ctx->s_to_c_link->bucket_increase_per_microsec = bucket_increase_per_microsec; + test_ctx->s_to_c_link->bucket_max = bucket_max; + test_ctx->s_to_c_link->bucket_current = (double)bucket_max; + test_ctx->s_to_c_link->bucket_arrival_last = simulated_time; + /* Set the CC algorithm to selected value */ + picoquic_set_default_congestion_algorithm(test_ctx->qserver, cc_algo); + picoquic_set_binlog(test_ctx->qserver, "."); + test_ctx->qserver->use_long_log = 1; + } + + if (ret == 0) { + ret = tls_api_connection_loop(test_ctx, &loss_mask, latency_target, &simulated_time); + } + + /* Prepare to send data */ + if (ret == 0) { + ret = test_api_init_send_recv_scenario(test_ctx, test_scenario_pacing, sizeof(test_scenario_pacing)); + } + + /* Try to complete the data sending loop */ + if (ret == 0) { + ret = tls_api_data_sending_loop(test_ctx, &loss_mask, &simulated_time, 0); + } + + if (ret == 0) { + observed_loss = (test_ctx->cnx_server == NULL) ? UINT64_MAX : test_ctx->cnx_server->nb_retransmission_total; + } + + /* verify that the transmission was complete */ + if (ret == 0) { + ret = tls_api_one_scenario_body_verify(test_ctx, &simulated_time, target_time); + } + + if (ret == 0 && observed_loss > loss_target) { + DBG_PRINTF("Pacing, for cc=%s, expected %" PRIu64 " losses, got %" PRIu64 "\n", + cc_algo->congestion_algorithm_id, loss_target, observed_loss); + ret = -1; + } + + if (test_ctx != NULL) { + tls_api_delete_ctx(test_ctx); + test_ctx = NULL; + } + + return ret; +} + +int pacing_bbr_test() +{ + /* BBRv3 includes a short term loop that detects losses and tune the + * sending rate accordingly. The packet losses cause startup to + * give up too soon, but this is fixed by probing up "quickly" + * after exiting startup. The packet losses occur during startup + * and during the probing periods. + */ + int ret = pacing_cc_algotest(picoquic_bbr_algorithm, 900000, 150); + return ret; +} + +int pacing_cubic_test() +{ + int ret = pacing_cc_algotest(picoquic_cubic_algorithm, 900000, 210); + return ret; +} + +int pacing_dcubic_test() +{ + int ret = pacing_cc_algotest(picoquic_dcubic_algorithm, 900000, 240); + return ret; +} + +int pacing_fast_test() +{ + int ret = pacing_cc_algotest(picoquic_fastcc_algorithm, 960000, 180); + return ret; +} + +int pacing_newreno_test() +{ + int ret = pacing_cc_algotest(picoquic_newreno_algorithm, 900000, 100); + return ret; +} + + +/* Verify that pacing provides repeatable results + */ + +typedef struct st_pacing_test_t { + uint64_t current_time; + uint64_t length; + size_t send_mtu; + uint64_t cwin; + int slow_start; + uint64_t rtt; + uint64_t rate; + uint64_t quantum; + int expected_ok; + uint64_t expected_packet_nanosec; + int64_t expected_bucket_nanosec; + uint64_t expected_next_time; +} pacing_test_t; + +pacing_test_t pacing_events[] = { + { 0, 0, 1280, 0, 0, 10000, 125000, 8096, 0, 10000000, 64768000, 0 }, + { 0, 0, 1280, 0, 0, 10000, 1250000, 8096, 0, 1024000, 6476800, 0 }, + { 0, 0, 1280, 0, 0, 10000, 12500000, 8096, 0, 102400, 647680, 0 }, + { 0, 0, 1280, 0, 0, 10000, 12500000, 16192, 0, 102400, 1295360, 0 }, + { 0, 0, 1280, 0, 0, 10000, 125000000, 16192, 0, 10240, 129536, 0 }, + { 0, 0, 1280, 0, 0, 10000, 1250000000, 16192, 0, 1024, 12953, 0 }, + { 0, 0, 1280, 0, 0, 10000, 12500000000ull, 16192, 0, 102, 1295, 0 }, + { 0, 0, 1280, 16000, 1, 10000, 2000000, 0, 0, 640000, 2000000, 0 }, + { 0, 0, 1536, 153600, 1, 10000, 19200000, 0, 0, 80000, 1280000, 0 }, + { 0, 0, 1536, 153600, 0, 10000, 15360000, 0, 0, 100000, 1600000, 0 }, + {1000, 1536, 1536, 0, 0, 10000, 0, 0, 1, 0, 900000, UINT64_MAX }, + {1000, 1536, 1536, 0, 0, 10000, 0, 0, 1, 0, 800000, UINT64_MAX }, + {1000, 1536, 1536, 0, 0, 10000, 0, 0, 1, 0, 700000, UINT64_MAX }, + {1000, 1536, 1536, 0, 0, 10000, 0, 0, 1, 0, 600000, UINT64_MAX }, + {1000, 1536, 1536, 0, 0, 10000, 0, 0, 1, 0, 500000, UINT64_MAX }, + {1000, 1536, 1536, 0, 0, 10000, 0, 0, 1, 0, 400000, UINT64_MAX }, + {1000, 1536, 1536, 0, 0, 10000, 0, 0, 1, 0, 300000, UINT64_MAX }, + {1000, 1536, 1536, 0, 0, 10000, 0, 0, 1, 0, 200000, UINT64_MAX }, + {1000, 1536, 1536, 0, 0, 10000, 0, 0, 1, 0, 100000, UINT64_MAX }, + {1000, 1536, 1536, 0, 0, 10000, 0, 0, 1, 0, 0, UINT64_MAX }, + {1000, 1536, 1536, 0, 0, 10000, 0, 0, 0, 0, 0, 1101 }, + {1050, 1536, 1536, 0, 0, 10000, 0, 0, 0, 0, 50000, 1101 }, + {1101, 1536, 1536, 0, 0, 10000, 0, 0, 1, 0, 1000, UINT64_MAX } +}; + +size_t nb_pacing_events = sizeof(pacing_events) / sizeof(pacing_test_t); + +int pacing_repeat_test() +{ + int ret = 0; + picoquic_pacing_t pacing = { 0 }; + + /* set either CWIN or data rate to expected value */ + for (size_t i = 0; ret == 0 && i < nb_pacing_events; i++) { + if (pacing_events[i].length == 0) { + /* This is a set up event */ + if (pacing_events[i].cwin == 0) { + /* directly set the quantum and rate */ + picoquic_update_pacing_parameters(&pacing, (double)pacing_events[i].rate, + pacing_events[i].quantum, pacing_events[i].send_mtu, pacing_events[i].rtt, + NULL); + } + else { + /* Set control based on CWIN and RTT */ + picoquic_update_pacing_window(&pacing, pacing_events[i].slow_start, + pacing_events[i].cwin, pacing_events[i].send_mtu, pacing_events[i].rtt, NULL); + } + /* Check that the value are as expected */ + if (pacing.rate != pacing_events[i].rate || + pacing.packet_time_nanosec != pacing_events[i].expected_packet_nanosec || + pacing.bucket_max != pacing_events[i].expected_bucket_nanosec) { + DBG_PRINTF("Event %d, expected rate: " PRIu64 ", Packet_n: " PRIu64 ", Bucket: " PRIu64, + i, pacing.rate, pacing.packet_time_nanosec, pacing.bucket_max); + ret = -1; + } + } + else { + /* Set using CWIN and RTT */ + uint64_t next_time = UINT64_MAX; + int is_ok = picoquic_is_authorized_by_pacing(&pacing, pacing_events[i].current_time, &next_time, 0, NULL); + if (is_ok != pacing_events[i].expected_ok) { + DBG_PRINTF("Event %d, expected OK: %d", i, is_ok); + ret = -1; + } + else { + if (is_ok) { + picoquic_update_pacing_data_after_send(&pacing, pacing_events[i].length, pacing_events[i].send_mtu, pacing_events[i].current_time); + } + } + if (pacing.bucket_nanosec != pacing_events[i].expected_bucket_nanosec || + next_time != pacing_events[i].expected_next_time) { + DBG_PRINTF("Event %d, expected bucket: " PRIu64, + i, pacing.rate, pacing.bucket_nanosec); + ret = -1; + } + } + } + return ret; +} \ No newline at end of file diff --git a/picoquictest/picoquictest.h b/picoquictest/picoquictest.h index ed5099466..f70cde31c 100644 --- a/picoquictest/picoquictest.h +++ b/picoquictest/picoquictest.h @@ -376,6 +376,7 @@ int app_limited_rpr_test(); int cwin_max_test(); int initial_race_test(); int pacing_test(); +int pacing_repeat_test(); int chacha20_test(); int cnx_limit_test(); int cert_verify_bad_cert_test(); diff --git a/picoquictest/picoquictest.vcxproj b/picoquictest/picoquictest.vcxproj index f6ef5d9f6..275f43365 100644 --- a/picoquictest/picoquictest.vcxproj +++ b/picoquictest/picoquictest.vcxproj @@ -173,6 +173,7 @@ + diff --git a/picoquictest/picoquictest.vcxproj.filters b/picoquictest/picoquictest.vcxproj.filters index 8207a9de0..bd2882d8f 100644 --- a/picoquictest/picoquictest.vcxproj.filters +++ b/picoquictest/picoquictest.vcxproj.filters @@ -165,6 +165,9 @@ Source Files + + Source Files + diff --git a/picoquictest/tls_api_test.c b/picoquictest/tls_api_test.c index f4e6f54f3..4d5c5cc1c 100644 --- a/picoquictest/tls_api_test.c +++ b/picoquictest/tls_api_test.c @@ -11015,98 +11015,6 @@ int initial_race_test() return ret; } -/* Test of the pacing functions. - */ - -int pacing_test() -{ - /* Create a connection so as to instantiate the pacing context */ - int ret = 0; - uint64_t current_time = 0; - picoquic_quic_t* quic = NULL; - picoquic_cnx_t* cnx = NULL; - struct sockaddr_in saddr; - const uint64_t test_byte_per_sec = 1250000; - const uint64_t test_quantum = 0x4000; - int nb_sent = 0; - int nb_round = 0; - const int nb_target = 10000; - - quic = picoquic_create(8, NULL, NULL, NULL, NULL, NULL, - NULL, NULL, NULL, NULL, current_time, - ¤t_time, NULL, NULL, 0); - - memset(&saddr, 0, sizeof(struct sockaddr_in)); - saddr.sin_family = AF_INET; - saddr.sin_port = 1000; - - if (quic == NULL) { - DBG_PRINTF("%s", "Cannot create QUIC context\n"); - ret = -1; - } - else { - cnx = picoquic_create_cnx(quic, - picoquic_null_connection_id, picoquic_null_connection_id, (struct sockaddr*) & saddr, - current_time, 0, "test-sni", "test-alpn", 1); - - if (cnx == NULL) { - DBG_PRINTF("%s", "Cannot create connection\n"); - ret = -1; - } - } - - if (ret == 0) { - /* Set pacing parameters to specified value */ - picoquic_update_pacing_rate(cnx, cnx->path[0], (double)test_byte_per_sec, test_quantum); - /* Run a loop of N tests based on next wake time. */ - while (ret == 0 && nb_sent < nb_target) { - nb_round++; - if (nb_round > 4 * nb_target) { - DBG_PRINTF("Pacing needs more that %d rounds for %d packets", nb_round, nb_target); - ret = -1; - } - else { - uint64_t next_time = current_time + 10000000; - if (picoquic_is_sending_authorized_by_pacing(cnx, cnx->path[0], current_time, &next_time)) { - nb_sent++; - picoquic_update_pacing_after_send(cnx->path[0], cnx->path[0]->send_mtu, current_time); - } - else { - if (current_time < next_time) { - current_time = next_time; - } - else { - DBG_PRINTF("Pacing next = %" PRIu64", current = %d" PRIu64, next_time, current_time); - ret = -1; - } - } - } - } - - /* Verify that the total send time matches expectations */ - if (ret == 0) { - uint64_t volume_sent = ((uint64_t)nb_target) * cnx->path[0]->send_mtu; - uint64_t time_max = ((volume_sent * 1000000) / test_byte_per_sec) + 1; - uint64_t time_min = (((volume_sent - test_quantum) * 1000000) / test_byte_per_sec) + 1; - - if (current_time > time_max) { - DBG_PRINTF("Pacing used = %" PRIu64", expected max = %d" PRIu64, current_time, time_max); - ret = -1; - } - else if (current_time < time_min) { - DBG_PRINTF("Pacing used = %" PRIu64", expected min = %d" PRIu64, current_time, time_min); - ret = -1; - } - } - } - - if (quic != NULL) { - picoquic_free(quic); - } - - return ret; -} - /* * Test connection establishment with ChaCha20 */ @@ -11476,122 +11384,6 @@ int multi_segment_test() return ret; } -/* Test effects of leaky bucket pacer - */ - -static int pacing_cc_algotest(picoquic_congestion_algorithm_t* cc_algo, uint64_t target_time, uint64_t loss_target) -{ - uint64_t simulated_time = 0; - uint64_t loss_mask = 0; - const uint64_t latency_target = 7500; - const double bucket_increase_per_microsec = 1.25; /* 1.25 bytes per microsec = 10 Mbps */ - const uint64_t bucket_max = 16 * PICOQUIC_MAX_PACKET_SIZE; - const uint64_t picosec_per_byte = (1000000ull * 8) / 100; /* Underlying rate = 100 Mbps */ - uint64_t observed_loss = 0; - - picoquic_test_tls_api_ctx_t* test_ctx = NULL; - picoquic_connection_id_t initial_cid = { {0x9a, 0xc1, 0xcc, 0xa1, 0x90, 6, 7, 8}, 8 }; - int ret; - - initial_cid.id[4] = cc_algo->congestion_algorithm_number; - - ret = tls_api_init_ctx_ex(&test_ctx, PICOQUIC_INTERNAL_TEST_VERSION_1, - PICOQUIC_TEST_SNI, PICOQUIC_TEST_ALPN, &simulated_time, NULL, NULL, 0, 0, 0, &initial_cid); - - if (ret == 0) { - /* Set link */ - test_ctx->c_to_s_link->microsec_latency = latency_target; - test_ctx->c_to_s_link->picosec_per_byte = picosec_per_byte; - test_ctx->s_to_c_link->microsec_latency = latency_target; - test_ctx->s_to_c_link->picosec_per_byte = picosec_per_byte; - /* Set leaky bucket parameters */ - test_ctx->c_to_s_link->bucket_increase_per_microsec = bucket_increase_per_microsec; - test_ctx->c_to_s_link->bucket_max = bucket_max; - test_ctx->c_to_s_link->bucket_current = (double)bucket_max; - test_ctx->c_to_s_link->bucket_arrival_last = simulated_time; - test_ctx->s_to_c_link->bucket_increase_per_microsec = bucket_increase_per_microsec; - test_ctx->s_to_c_link->bucket_max = bucket_max; - test_ctx->s_to_c_link->bucket_current = (double)bucket_max; - test_ctx->s_to_c_link->bucket_arrival_last = simulated_time; - /* Set the CC algorithm to selected value */ - picoquic_set_default_congestion_algorithm(test_ctx->qserver, cc_algo); - picoquic_set_binlog(test_ctx->qserver, "."); - test_ctx->qserver->use_long_log = 1; - } - - if (ret == 0) { - ret = tls_api_connection_loop(test_ctx, &loss_mask, latency_target, &simulated_time); - } - - /* Prepare to send data */ - if (ret == 0) { - ret = test_api_init_send_recv_scenario(test_ctx, test_scenario_very_long, sizeof(test_scenario_very_long)); - } - - /* Try to complete the data sending loop */ - if (ret == 0) { - ret = tls_api_data_sending_loop(test_ctx, &loss_mask, &simulated_time, 0); - } - - if (ret == 0) { - observed_loss = (test_ctx->cnx_server == NULL) ? UINT64_MAX : test_ctx->cnx_server->nb_retransmission_total; - } - - /* verify that the transmission was complete */ - if (ret == 0) { - ret = tls_api_one_scenario_body_verify(test_ctx, &simulated_time, target_time); - } - - if (ret == 0 && observed_loss > loss_target) { - DBG_PRINTF("Pacing, for cc=%s, expected %" PRIu64 " losses, got %" PRIu64 "\n", - cc_algo->congestion_algorithm_id, loss_target, observed_loss); - ret = -1; - } - - if (test_ctx != NULL) { - tls_api_delete_ctx(test_ctx); - test_ctx = NULL; - } - - return ret; -} - -int pacing_bbr_test() -{ - /* BBRv3 includes a short term loop that detects losses and tune the - * sending rate accordingly. The packet losses cause startup to - * give up too soon, but this is fixed by probing up "quickly" - * after exiting startup. The packet losses occur during startup - * and during the probing periods. - */ - int ret = pacing_cc_algotest(picoquic_bbr_algorithm, 900000, 150); - return ret; -} - -int pacing_cubic_test() -{ - int ret = pacing_cc_algotest(picoquic_cubic_algorithm, 900000, 210); - return ret; -} - -int pacing_dcubic_test() -{ - int ret = pacing_cc_algotest(picoquic_dcubic_algorithm, 900000, 240); - return ret; -} - -int pacing_fast_test() -{ - int ret = pacing_cc_algotest(picoquic_fastcc_algorithm, 960000, 180); - return ret; -} - -int pacing_newreno_test() -{ - int ret = pacing_cc_algotest(picoquic_newreno_algorithm, 900000, 100); - return ret; -} - /* heavy loss test: * Simulate a connection experiencing heavy packet * loss, such as 50% packet loss, for a duration of From 03b8e29dc1eab7d7c698fc90336d4bec75b36747 Mon Sep 17 00:00:00 2001 From: Christian Huitema Date: Thu, 2 May 2024 18:30:05 -0700 Subject: [PATCH 5/7] fix uint64-size_t issue --- picoquictest/pacing_test.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/picoquictest/pacing_test.c b/picoquictest/pacing_test.c index 6aa71d64e..ef20dfd88 100644 --- a/picoquictest/pacing_test.c +++ b/picoquictest/pacing_test.c @@ -247,7 +247,7 @@ int pacing_newreno_test() typedef struct st_pacing_test_t { uint64_t current_time; - uint64_t length; + size_t length; size_t send_mtu; uint64_t cwin; int slow_start; From fc034ac6516dbaafeec612417f0040cd45d7ec77 Mon Sep 17 00:00:00 2001 From: Christian Huitema Date: Thu, 2 May 2024 19:09:38 -0700 Subject: [PATCH 6/7] Limit pacing rate and quantum for CC bypass --- picoquic/picoquic_internal.h | 4 +++ picoquic/quicctx.c | 6 ++++ picoquic/sender.c | 55 ++++++++++++++++++------------------ 3 files changed, 37 insertions(+), 28 deletions(-) diff --git a/picoquic/picoquic_internal.h b/picoquic/picoquic_internal.h index adbdb0fe5..d8aed209d 100644 --- a/picoquic/picoquic_internal.h +++ b/picoquic/picoquic_internal.h @@ -85,6 +85,9 @@ extern "C" { #define PICOQUIC_CWIN_INITIAL (10 * PICOQUIC_MAX_PACKET_SIZE) #define PICOQUIC_CWIN_MINIMUM (2 * PICOQUIC_MAX_PACKET_SIZE) +#define PICOQUIC_PRIORITY_BYPASS_MAX_RATE 125000 +#define PICOQUIC_PRIORITY_BYPASS_QUANTUM 2560 + #define PICOQUIC_DEFAULT_CRYPTO_EPOCH_LENGTH (1<<22) #define PICOQUIC_DEFAULT_SIMULTANEOUS_LOGS 32 @@ -1453,6 +1456,7 @@ typedef struct st_picoquic_cnx_t { uint64_t high_priority_stream_id; uint64_t next_stream_id[4]; uint64_t priority_limit_for_bypass; /* Bypass CC if dtagram or stream priority lower than this, 0 means never */ + picoquic_pacing_t priority_bypass_pacing; /* Repeat queue contains packets with data frames that should be * sent according to priority when congestion window opens. */ diff --git a/picoquic/quicctx.c b/picoquic/quicctx.c index 1625c1ec6..bc8bd2cf7 100644 --- a/picoquic/quicctx.c +++ b/picoquic/quicctx.c @@ -3639,6 +3639,7 @@ picoquic_cnx_t* picoquic_create_cnx(picoquic_quic_t* quic, for (int i = 0; i < 4; i++) { cnx->next_stream_id[i] = i; } + picoquic_pacing_init(&cnx->priority_bypass_pacing, start_time); picoquic_register_path(cnx, cnx->path[0]); } } @@ -4781,6 +4782,11 @@ void picoquic_set_default_bbr_quantum_ratio(picoquic_quic_t* quic, double quantu void picoquic_set_priority_limit_for_bypass(picoquic_cnx_t* cnx, uint8_t priority_limit) { cnx->priority_limit_for_bypass = priority_limit; + if (priority_limit > 0) { + picoquic_update_pacing_parameters(&cnx->priority_bypass_pacing, + PICOQUIC_PRIORITY_BYPASS_MAX_RATE, PICOQUIC_PRIORITY_BYPASS_QUANTUM, + cnx->path[0]->send_mtu, cnx->path[0]->smoothed_rtt, NULL); + } } void picoquic_set_feedback_loss_notification(picoquic_cnx_t* cnx, unsigned int should_notify) diff --git a/picoquic/sender.c b/picoquic/sender.c index 08126062c..3662cc252 100644 --- a/picoquic/sender.c +++ b/picoquic/sender.c @@ -2931,7 +2931,7 @@ static uint8_t* picoquic_prepare_datagram_ready(picoquic_cnx_t* cnx, picoquic_pa */ static uint8_t* picoquic_prepare_stream_and_datagrams(picoquic_cnx_t* cnx, picoquic_path_t* path_x, uint8_t* bytes_next, uint8_t* bytes_max, - uint64_t max_priority_allowed, + uint64_t max_priority_allowed, uint64_t current_time, int* more_data, int* is_pure_ack, int* no_data_to_send, int* ret) { int datagram_sent = 0; @@ -2950,6 +2950,7 @@ static uint8_t* picoquic_prepare_stream_and_datagrams(picoquic_cnx_t* cnx, picoq picoquic_packet_t* first_repeat = picoquic_first_data_repeat_packet(cnx); uint64_t current_priority = UINT64_MAX; uint64_t stream_priority = UINT64_MAX; + uint8_t* bytes_before_iteration = bytes_next; int something_sent = 0; int conflict_found = 0; @@ -3030,6 +3031,12 @@ static uint8_t* picoquic_prepare_stream_and_datagrams(picoquic_cnx_t* cnx, picoq more_data, is_pure_ack, &datagram_tried_and_failed, &datagram_sent, ret); something_sent = datagram_sent; } + + if (current_priority < cnx->priority_limit_for_bypass && bytes_next > bytes_before_iteration) { + picoquic_update_pacing_data_after_send(&cnx->priority_bypass_pacing, bytes_next - bytes_before_iteration, + cnx->path[0]->send_mtu, current_time); + } + if (is_first_round) { *no_data_to_send = ((first_stream == NULL && first_repeat == NULL) || stream_tried_and_failed) && (!datagram_present || datagram_tried_and_failed); @@ -3174,25 +3181,13 @@ int picoquic_prepare_packet_almost_ready(picoquic_cnx_t* cnx, picoquic_path_t* p length = bytes_next - bytes; if (path_x->cwin < path_x->bytes_in_transit) { - /* Implementation of experimental API, picoquic_set_priority_limit_for_bypass */ - uint8_t* bytes_next_before_bypass = bytes_next; - if (cnx->priority_limit_for_bypass > 0 && cnx->nb_paths == 1) { - bytes_next = picoquic_prepare_stream_and_datagrams(cnx, path_x, bytes_next, bytes_max, - cnx->priority_limit_for_bypass, - &more_data, &is_pure_ack, &no_data_to_send, &ret); - } - if (bytes_next != bytes_next_before_bypass) { - length = bytes_next - bytes; - } - else { - picoquic_per_ack_state_t ack_state = { 0 }; - cnx->cwin_blocked = 1; - path_x->last_cwin_blocked_time = current_time; - if (cnx->congestion_alg != NULL) { - cnx->congestion_alg->alg_notify(cnx, path_x, - picoquic_congestion_notification_cwin_blocked, - &ack_state, current_time); - } + picoquic_per_ack_state_t ack_state = { 0 }; + cnx->cwin_blocked = 1; + path_x->last_cwin_blocked_time = current_time; + if (cnx->congestion_alg != NULL) { + cnx->congestion_alg->alg_notify(cnx, path_x, + picoquic_congestion_notification_cwin_blocked, + &ack_state, current_time); } } else { @@ -3234,7 +3229,7 @@ int picoquic_prepare_packet_almost_ready(picoquic_cnx_t* cnx, picoquic_path_t* p } if (ret == 0) { bytes_next = picoquic_prepare_stream_and_datagrams(cnx, path_x, bytes_next, bytes_max, - UINT64_MAX, + UINT64_MAX, current_time, &more_data, &is_pure_ack, &no_data_to_send, &ret); } /* TODO: replace this by posting of frame when CWIN estimated */ @@ -3533,9 +3528,11 @@ int picoquic_prepare_packet_ready(picoquic_cnx_t* cnx, picoquic_path_t* path_x, /* Implementation of experimental API, picoquic_set_priority_limit_for_bypass */ uint8_t* bytes_next_before_bypass = bytes_next; int no_data_to_send = 0; - if (cnx->priority_limit_for_bypass > 0 && cnx->nb_paths == 1) { + if (cnx->priority_limit_for_bypass > 0 && cnx->nb_paths == 1 && + picoquic_is_authorized_by_pacing(&cnx->priority_bypass_pacing, current_time, next_wake_time, + cnx->quic->packet_train_mode, cnx->quic)) { bytes_next = picoquic_prepare_stream_and_datagrams(cnx, path_x, bytes_next, bytes_max, - cnx->priority_limit_for_bypass, + cnx->priority_limit_for_bypass, current_time, &more_data, &is_pure_ack, &no_data_to_send, &ret); } if (bytes_next != bytes_next_before_bypass) { @@ -3582,8 +3579,8 @@ int picoquic_prepare_packet_ready(picoquic_cnx_t* cnx, picoquic_path_t* path_x, bytes_next = picoquic_format_ack_frequency_frame(cnx, bytes_next, bytes_max, &more_data); } if (ret == 0) { - bytes_next = picoquic_prepare_stream_and_datagrams(cnx, path_x, bytes_next, bytes_max, UINT64_MAX, - &more_data, &is_pure_ack, &no_data_to_send, &ret); + bytes_next = picoquic_prepare_stream_and_datagrams(cnx, path_x, bytes_next, bytes_max, + UINT64_MAX, current_time, &more_data, &is_pure_ack, &no_data_to_send, &ret); } /* TODO: replace this by scheduling of BDP frame when window has been estimated */ @@ -3659,12 +3656,14 @@ int picoquic_prepare_packet_ready(picoquic_cnx_t* cnx, picoquic_path_t* path_x, } } /* end of CC */ } /* End of pacing */ - else if (cnx->priority_limit_for_bypass > 0 && cnx->nb_paths == 1) { + else if (cnx->priority_limit_for_bypass > 0 && cnx->nb_paths == 1 && + picoquic_is_authorized_by_pacing(&cnx->priority_bypass_pacing, current_time, next_wake_time, + cnx->quic->packet_train_mode, cnx->quic)) { /* If congestion bypass is implemented, also consider pacing bypass */ int no_data_to_send = 0; - + if ((bytes_next = picoquic_prepare_stream_and_datagrams(cnx, path_x, bytes_next, bytes_max, - cnx->priority_limit_for_bypass, + cnx->priority_limit_for_bypass, current_time, &more_data, &is_pure_ack, &no_data_to_send, &ret)) != NULL) { length = bytes_next - bytes; } From c1d1ba03da25bd11ef31b3d3e20ddf4ef12b6d21 Mon Sep 17 00:00:00 2001 From: Christian Huitema Date: Thu, 2 May 2024 22:34:10 -0700 Subject: [PATCH 7/7] Cleanup, and version update for the new APIs. --- CMakeLists.txt | 2 +- picoquic/pacing.c | 21 --------------------- picoquic/picoquic.h | 7 ++++++- 3 files changed, 7 insertions(+), 23 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 7adbb21bb..874a59a93 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -8,7 +8,7 @@ else() endif() project(picoquic - VERSION 1.1.19.12 + VERSION 1.1.20.0 DESCRIPTION "picoquic library" LANGUAGES C CXX) diff --git a/picoquic/pacing.c b/picoquic/pacing.c index 057f193fc..129631076 100644 --- a/picoquic/pacing.c +++ b/picoquic/pacing.c @@ -151,26 +151,6 @@ static void picoquic_report_pacing_update(picoquic_pacing_t* pacing, picoquic_pa void picoquic_update_pacing_parameters(picoquic_pacing_t * pacing, double pacing_rate, uint64_t quantum, size_t send_mtu, uint64_t smoothed_rtt, picoquic_path_t * signalled_path) { -#if 0 - const uint64_t nanosec_per_sec = 1000000000ull; - - pacing->rate = (uint64_t)pacing_rate; - - if (quantum > pacing->quantum_max) { - pacing->quantum_max = quantum; - } - if (pacing->rate > pacing->rate_max) { - pacing->rate_max = pacing->rate; - } - - pacing->packet_time_nanosec = picoquic_packet_time_nanosec(pacing, send_mtu); - - pacing->bucket_max = (nanosec_per_sec * quantum) / pacing->rate; - if (pacing->bucket_max <= 0) { - pacing->bucket_max = 16 * pacing->packet_time_nanosec; - } - -#else double packet_time = (double)send_mtu / pacing_rate; double quantum_time = (double)quantum / pacing_rate; uint64_t rtt_nanosec = smoothed_rtt * 1000; @@ -201,7 +181,6 @@ void picoquic_update_pacing_parameters(picoquic_pacing_t * pacing, double pacing if (pacing->bucket_max <= 0) { pacing->bucket_max = 16 * pacing->packet_time_nanosec; } -#endif if (pacing->bucket_nanosec > pacing->bucket_max) { pacing->bucket_nanosec = pacing->bucket_max; diff --git a/picoquic/picoquic.h b/picoquic/picoquic.h index abfbe7b92..c80374670 100644 --- a/picoquic/picoquic.h +++ b/picoquic/picoquic.h @@ -40,7 +40,7 @@ extern "C" { #endif -#define PICOQUIC_VERSION "1.1.19.12" +#define PICOQUIC_VERSION "1.1.20.0" #define PICOQUIC_ERROR_CLASS 0x400 #define PICOQUIC_ERROR_DUPLICATE (PICOQUIC_ERROR_CLASS + 1) #define PICOQUIC_ERROR_AEAD_CHECK (PICOQUIC_ERROR_CLASS + 3) @@ -1499,6 +1499,11 @@ void picoquic_set_default_bbr_quantum_ratio(picoquic_quic_t* quic, double quantu * * This experimental feature will not be activated in a multipath * environment, i.e., if more that 1 path is activated. +* +* To protect against potential abuse, the code includes a rate limiter, +* ensuring that if congestion control is blocking transmission, +* the "bypass" will not result in more than 1 Mbps of +* traffic. */ void picoquic_set_priority_limit_for_bypass(picoquic_cnx_t* cnx, uint8_t priority_limit);