diff --git a/.clippy.toml b/.clippy.toml new file mode 100644 index 0000000000..e928b4be64 --- /dev/null +++ b/.clippy.toml @@ -0,0 +1,3 @@ +disallowed-methods = [ + { path = "std::slice::from_raw_parts", reason = "see null_safe_slice" } +] diff --git a/.codecov.yml b/.codecov.yml new file mode 100644 index 0000000000..7ff673d877 --- /dev/null +++ b/.codecov.yml @@ -0,0 +1,17 @@ +# neqo has no test coverage for its example client and server +ignore: + - "neqo-bin" + +# Do not notify until at least three results have been uploaded from the CI pipeline. +# (This corresponds to the three main platforms we support: Linux, macOS, and Windows.) +codecov: + notify: + after_n_builds: 3 +comment: + after_n_builds: 3 + +coverage: + status: + project: + default: + threshold: 0.05% diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 0000000000..324520383b --- /dev/null +++ b/.dockerignore @@ -0,0 +1,10 @@ +# Ignore everything: +* +# Except for the following: +!**/*.toml +!**/*.rs +!**/*.h +!**/*.hpp +!neqo-crypto/min_version.txt +!qns +!Cargo.lock diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index bf1ad1efba..9fd92c3c70 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -1,7 +1 @@ -* @KershawChang @martinthomson -/docker/ @martinthomson -/hooks/ @martinthomson -/neqo-crypto/ @martinthomson -/neqo-http3/ @KershawChang -/neqo-qpack/ @KershawChang -/qns/ @martinthomson +* @KershawChang @martinthomson @larseggert diff --git a/.github/actionlint-matcher.json b/.github/actionlint-matcher.json new file mode 100644 index 0000000000..4613e1617b --- /dev/null +++ b/.github/actionlint-matcher.json @@ -0,0 +1,17 @@ +{ + "problemMatcher": [ + { + "owner": "actionlint", + "pattern": [ + { + "regexp": "^(?:\\x1b\\[\\d+m)?(.+?)(?:\\x1b\\[\\d+m)*:(?:\\x1b\\[\\d+m)*(\\d+)(?:\\x1b\\[\\d+m)*:(?:\\x1b\\[\\d+m)*(\\d+)(?:\\x1b\\[\\d+m)*: (?:\\x1b\\[\\d+m)*(.+?)(?:\\x1b\\[\\d+m)* \\[(.+?)\\]$", + "file": 1, + "line": 2, + "column": 3, + "message": 4, + "code": 5 + } + ] + } + ] +} diff --git a/.github/actions/nss/action.yml b/.github/actions/nss/action.yml new file mode 100644 index 0000000000..051b54143b --- /dev/null +++ b/.github/actions/nss/action.yml @@ -0,0 +1,101 @@ +name: Fetch and build NSS +description: Fetch and build NSS + +inputs: + type: + description: "Whether to do a debug or release build of NSS" + default: "Release" + +# This step might be removed if the distro included a recent enough +# version of NSS. Ubuntu 20.04 only has 3.49, which is far too old. +# (neqo-crypto/build.rs would also need to query pkg-config to get the +# right build flags rather than building NSS.) +# +# Also see https://github.com/mozilla/neqo/issues/1711 + +runs: + using: composite + steps: + - name: Check system NSS version + shell: bash + run: | + if ! command -v pkg-config &> /dev/null; then + echo "BUILD_NSS=1" >> "$GITHUB_ENV" + exit 0 + fi + if ! pkg-config --exists nss; then + echo "BUILD_NSS=1" >> "$GITHUB_ENV" + exit 0 + fi + NSS_VERSION="$(pkg-config --modversion nss)" + if [ "$?" -ne 0 ]; then + echo "BUILD_NSS=1" >> "$GITHUB_ENV" + exit 0 + fi + NSS_MAJOR=$(echo "$NSS_VERSION" | cut -d. -f1) + NSS_MINOR=$(echo "$NSS_VERSION" | cut -d. -f2) + REQ_NSS_MAJOR=$(cut -d. -f1 < neqo-crypto/min_version.txt) + REQ_NSS_MINOR=$(cut -d. -f2 < neqo-crypto/min_version.txt) + if [[ "$NSS_MAJOR" -lt "$REQ_NSS_MAJOR" || "$NSS_MAJOR" -eq "$REQ_NSS_MAJOR" && "$NSS_MINOR" -lt "$REQ_NSS_MINOR" ]]; then + echo "System NSS is too old: $NSS_VERSION" + echo "BUILD_NSS=1" >> "$GITHUB_ENV" + exit 0 + fi + echo "System NSS is suitable: $NSS_VERSION" + echo "BUILD_NSS=0" >> "$GITHUB_ENV" + + # Ideally, we'd use this. But things are sufficiently flaky that we're better off + # trying both hg and git. Leaving this here in case we want to re-try in the future. + # + # - name: Checkout NSPR + # if: env.BUILD_NSS == '1' + # uses: actions/checkout@v4 + # with: + # repository: "nss-dev/nspr" + # path: ${{ github.workspace }}/nspr + + # - name: Checkout NSS + # if: env.BUILD_NSS == '1' + # uses: actions/checkout@v4 + # with: + # repository: "nss-dev/nss" + # path: ${{ github.workspace }}/nss + + - name: Checkout NSPR + shell: bash + if: env.BUILD_NSS == '1' + run: | + hg clone https://hg.mozilla.org/projects/nspr "${{ github.workspace }}/nspr" || \ + git clone --depth=1 https://github.com/nss-dev/nspr "${{ github.workspace }}/nspr" + + - name: Checkout NSS + shell: bash + if: env.BUILD_NSS == '1' + run: | + hg clone https://hg.mozilla.org/projects/nss "${{ github.workspace }}/nss" || \ + git clone --depth=1 https://github.com/nss-dev/nss "${{ github.workspace }}/nss" + + - name: Build + shell: bash + if: env.BUILD_NSS == '1' + run: | + if [ "${{ inputs.type }}" != "Debug" ]; then + # We want to do an optimized build for accurate CPU profiling, but + # we also want debug symbols and frame pointers for that, which the normal optimized NSS + # build process doesn't provide. + OPT="-o" + NSS_TARGET=Release + [ "${{ runner.os }}" != "Windows" ] && export CFLAGS="-ggdb3 -fno-omit-frame-pointer" + else + NSS_TARGET=Debug + fi + $NSS_DIR/build.sh -g -Ddisable_tests=1 $OPT --static + echo "NSS_TARGET=$NSS_TARGET" >> "$GITHUB_ENV" + NSS_OUT="$NSS_DIR/../dist/$NSS_TARGET" + echo "LD_LIBRARY_PATH=$NSS_OUT/lib" >> "$GITHUB_ENV" + echo "DYLD_FALLBACK_LIBRARY_PATH=$NSS_OUT/lib" >> "$GITHUB_ENV" + echo "$NSS_OUT/lib" >> "$GITHUB_PATH" + echo "NSS_DIR=$NSS_DIR" >> "$GITHUB_ENV" + env: + NSS_DIR: ${{ github.workspace }}/nss + NSPR_DIR: ${{ github.workspace }}/nspr diff --git a/.github/actions/pr-comment-data-export/action.yml b/.github/actions/pr-comment-data-export/action.yml new file mode 100644 index 0000000000..8a8cc50232 --- /dev/null +++ b/.github/actions/pr-comment-data-export/action.yml @@ -0,0 +1,37 @@ +name: 'Export data for PR comment' +description: 'Exports the neccessary data to post a PR comment securely.' + +# This action might be running off of a fork and would thus not have write +# permissions on the origin repository. In order to allow a separate +# priviledged action to post a comment on a pull request, upload the +# necessary metadata. + +inputs: + name: + description: 'A unique name for the artifact used for exporting.' + required: true + contents: + description: 'A filename with a comment (in Markdown) to be added to the PR.' + required: true + log-url: + description: 'A URL to a log to be linked from the PR comment.' + required: false + +runs: + using: composite + steps: + - if: github.event_name == 'pull_request' + shell: bash + run: | + mkdir comment-data + cp "${{ inputs.contents }}" comment-data/contents + echo "${{ inputs.name }}" > comment-data/name + echo "${{ inputs.log-url }}" > comment-data/log-url + echo "${{ github.event.number }}" > comment-data/pr-number + + - if: github.event_name == 'pull_request' + uses: actions/upload-artifact@v4 + with: + name: ${{ inputs.name }} + path: comment-data + retention-days: 1 diff --git a/.github/actions/pr-comment/action.yml b/.github/actions/pr-comment/action.yml new file mode 100644 index 0000000000..1e84aa5bb4 --- /dev/null +++ b/.github/actions/pr-comment/action.yml @@ -0,0 +1,37 @@ +name: 'Comment on PR' +description: 'Post a PR comment securely.' + +inputs: + name: + description: 'Artifact name to import comment data from.' + required: true + mode: + description: 'Mode of operation (upsert/recreate/delete).' + default: 'upsert' + token: + description: 'A Github PAT' + required: true + +runs: + using: composite + steps: + - uses: actions/download-artifact@v4 + with: + run-id: ${{ github.event.workflow_run.id }} + name: ${{ inputs.name }} + github-token: ${{ inputs.token }} + + - id: pr-number + shell: bash + run: echo "number=$(cat pr-number)" >> "$GITHUB_OUTPUT" + + - shell: bash + run: | + [ -s log-url ] && echo "" >> contents && echo "[:arrow_down: Download logs]($(cat log-url))" >> contents + + - uses: thollander/actions-comment-pull-request@v2 + with: + filePath: contents + mode: ${{ inputs.mode }} + pr_number: ${{ steps.pr-number.outputs.number }} + comment_tag: ${{ inputs.name }}-comment diff --git a/.github/actions/quic-interop-runner/action.yml b/.github/actions/quic-interop-runner/action.yml new file mode 100644 index 0000000000..cdc617d275 --- /dev/null +++ b/.github/actions/quic-interop-runner/action.yml @@ -0,0 +1,111 @@ +name: 'QUIC Interop Runner Action' +description: 'Run the QUIC Interop Runner tests.' +author: 'mxinden' + +inputs: + name: + description: 'Name of the QUIC implementation' + required: true + image: + description: 'Docker image to be tested. Needs to reside either locally, or on some registry.' + required: true + url: + description: 'URL of the QUIC implementation' + required: true + role: + description: 'client/server/both' + required: false + default: 'both' + client: + description: 'client implementations (comma-separated)' + required: false + default: '' + server: + description: 'server implementations (comma-separated)' + required: false + default: '' + test: + description: 'test cases (comma-separatated)' + required: false + default: '' + +runs: + using: "composite" + steps: + - name: Checkout quic-interop/quic-interop-runner repository + uses: actions/checkout@v4 + with: + repository: 'quic-interop/quic-interop-runner' + path: 'quic-interop-runner' + + - name: Enable IPv6 support + run: sudo modprobe ip6table_filter + shell: bash + + - name: Install dependencies + run: | + sudo add-apt-repository ppa:wireshark-dev/stable + sudo apt-get update + sudo apt-get install -y wireshark tshark jq + shell: bash + + - uses: actions/setup-python@v5 + with: + python-version: 3.8 + cache: 'pip' + cache-dependency-path: 'quic-interop-runner/requirements.txt' + + - name: Install Python packages + run: | + cd quic-interop-runner + pip install -U pip + pip install -r requirements.txt + shell: bash + + - name: Run tests + id: test-run + run: | + cd quic-interop-runner + jq --arg key "${{ inputs.name }}" --argjson newEntry '{"image": "${{ inputs.image }}", "url": "${{ inputs.url }}", "role": "${{ inputs.role }}"}' '.[$key] = $newEntry' implementations.json > temp.$$ && mv temp.$$ implementations.json + cat implementations.json + ARGS="--log-dir logs --markdown --must-include ${{ inputs.name }}" + if [ -n "${{ inputs.client }}" ]; then + ARGS="$ARGS --client ${{ inputs.client }}" + fi + if [ -n "${{ inputs.server }}" ]; then + ARGS="$ARGS --server ${{ inputs.server }}" + fi + if [ -n "${{ inputs.test }}" ]; then + ARGS="$ARGS --test ${{ inputs.test }}" + fi + python run.py $ARGS 2>&1 | tee summary + shell: bash + + - uses: actions/upload-artifact@v4 + id: artifact-upload-step + if: always() + with: + name: logs + path: quic-interop-runner/logs + + - name: Format GitHub comment + if: always() + run: | + echo '[**QUIC Interop Runner**](https://github.com/quic-interop/quic-interop-runner)' >> comment + echo '' >> comment + # Ignore all, but table, which starts with "|". Also reformat it to GitHub Markdown. + grep -E '^\|' quic-interop-runner/summary |\ + awk '(!/^\| *:-/ || (d++ && d < 3))' |\ + sed -E -e 's/✓/:white_check_mark:/gi' -e 's/✕/:x:/gi' -e 's/\?/:grey_question:/gi' \ + >> comment + echo '' >> comment + echo "EXPORT_COMMENT=1" >> "$GITHUB_ENV" + shell: bash + + - name: Export PR comment data + if: always() + uses: ./.github/actions/pr-comment-data-export + with: + name: qns + contents: comment + log-url: ${{ steps.artifact-upload-step.outputs.artifact-url }} diff --git a/.github/actions/rust/action.yml b/.github/actions/rust/action.yml new file mode 100644 index 0000000000..4b03b37b8d --- /dev/null +++ b/.github/actions/rust/action.yml @@ -0,0 +1,58 @@ +name: Install Rust +description: Install Rust and tools + +inputs: + version: + description: 'Rust toolchain version to install' + default: 'stable' + components: + description: 'Rust components to install' + default: '' + +runs: + using: composite + steps: + - name: Install Rust + uses: dtolnay/rust-toolchain@master + with: + toolchain: ${{ inputs.version }} + components: ${{ inputs.components }} + + - name: Install cargo-binstall (Windows) + if: runner.os == 'Windows' + shell: pwsh + run: Set-ExecutionPolicy Unrestricted -Scope Process; iex (iwr "https://raw.githubusercontent.com/cargo-bins/cargo-binstall/main/install-from-binstall-release.ps1").Content + + - name: Install cargo-binstall (Linux & MacOS) + if: runner.os != 'Windows' + shell: bash + run: curl -L --proto '=https' --tlsv1.2 -sSf https://raw.githubusercontent.com/cargo-bins/cargo-binstall/main/install-from-binstall-release.sh | bash + + - name: Install Rust tools + shell: bash + run: cargo +${{ inputs.version }} binstall --no-confirm cargo-llvm-cov cargo-nextest flamegraph cargo-hack cargo-mutants hyperfine + + # sccache slows CI down, so we leave it disabled. + # Leaving the steps below commented out, so we can re-evaluate enabling it later. + # - name: Use sccache + # uses: mozilla-actions/sccache-action@v0.0.4 + + # - name: Enable sscache + # shell: bash + # run: | + # if [ "${{ runner.os }}" = "Windows" ]; then + # echo "CC=sccache cl" >> "$GITHUB_ENV" + # echo "CXX=sccache cl" >> "$GITHUB_ENV" + # else + # echo "CC=sccache cc" >> "$GITHUB_ENV" + # echo "CXX=sccache c++" >> "$GITHUB_ENV" + # fi + # echo "SCCACHE_GHA_ENABLED=true" >> "$GITHUB_ENV" + # echo "RUSTC_WRAPPER=sccache" >> "$GITHUB_ENV" + # echo "CARGO_INCREMENTAL=0" >> "$GITHUB_ENV" + + # Ditto for rust-cache. + # - name: Use Rust cache + # uses: Swatinem/rust-cache@v2 + # with: + # cache-all-crates: "true" diff --git a/.github/dependabot.yml b/.github/dependabot.yml new file mode 100644 index 0000000000..dbbb80c154 --- /dev/null +++ b/.github/dependabot.yml @@ -0,0 +1,17 @@ +# Please see the documentation for all configuration options: +# https://docs.github.com/github/administering-a-repository/configuration-options-for-dependency-updates + +version: 2 +updates: + - package-ecosystem: "cargo" + directory: "/" + schedule: + interval: "weekly" + - package-ecosystem: "github-actions" + directory: "/" + schedule: + interval: "weekly" + - package-ecosystem: "docker" + directory: "/qns" + schedule: + interval: "weekly" diff --git a/.github/semantic.yml b/.github/semantic.yml new file mode 100644 index 0000000000..be3439f6b9 --- /dev/null +++ b/.github/semantic.yml @@ -0,0 +1,3 @@ +enabled: true +titleOnly: true +targetUrl: "https://www.conventionalcommits.org/en/v1.0.0/#summary" diff --git a/.github/workflows/actionlint.yml b/.github/workflows/actionlint.yml new file mode 100644 index 0000000000..c0e6de01c0 --- /dev/null +++ b/.github/workflows/actionlint.yml @@ -0,0 +1,30 @@ +name: Lint GitHub Actions workflows +on: + push: + branches: ["main"] + paths: [".github/**"] + pull_request: + branches: ["main"] + paths: [".github/**"] + merge_group: + +concurrency: + group: ${{ github.workflow }}-${{ github.ref_name }} + cancel-in-progress: true + +jobs: + actionlint: + runs-on: ubuntu-latest + defaults: + run: + shell: bash + steps: + - uses: actions/checkout@v4 + - name: Download actionlint + id: get_actionlint + run: bash <(curl https://raw.githubusercontent.com/rhysd/actionlint/main/scripts/download-actionlint.bash) + - name: Check workflow files + run: | + echo "::add-matcher::.github/actionlint-matcher.json" + ${{ steps.get_actionlint.outputs.executable }} -color + diff --git a/.github/workflows/bench-comment.yml b/.github/workflows/bench-comment.yml new file mode 100644 index 0000000000..f89d223059 --- /dev/null +++ b/.github/workflows/bench-comment.yml @@ -0,0 +1,28 @@ +# Post test results as pull request comment. +# +# This is done as a separate workflow as it requires write permissions. The +# tests itself might run off of a fork, i.e., an untrusted environment and should +# thus not be granted write permissions. + +name: Benchmark Comment + +on: + workflow_run: + workflows: ["CI"] + types: + - completed + +jobs: + comment: + permissions: + pull-requests: write + runs-on: ubuntu-latest + if: | + github.event.workflow_run.event == 'pull_request' && + github.event.workflow_run.conclusion == 'success' + steps: + - uses: actions/checkout@v4 + - uses: ./.github/actions/pr-comment + with: + name: bench + token: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/bench.yml b/.github/workflows/bench.yml new file mode 100644 index 0000000000..5df8bcfd91 --- /dev/null +++ b/.github/workflows/bench.yml @@ -0,0 +1,247 @@ +name: Bench +on: + workflow_call: + workflow_dispatch: + schedule: + # Run at 1 AM each day, so there is a `main`-branch baseline in the cache. + - cron: '0 1 * * *' +env: + CARGO_PROFILE_BENCH_BUILD_OVERRIDE_DEBUG: true + CARGO_PROFILE_RELEASE_DEBUG: true + CARGO_TERM_COLOR: always + RUST_BACKTRACE: 1 + TOOLCHAIN: nightly + RUSTFLAGS: -C link-arg=-fuse-ld=lld -C link-arg=-Wl,--no-rosegment, -C force-frame-pointers=yes + PERF_OPT: record -F997 --call-graph fp -g + +jobs: + bench: + name: Benchmark + runs-on: self-hosted + defaults: + run: + shell: bash + + steps: + - name: Checkout neqo + uses: actions/checkout@v4 + + - name: Checkout msquic + uses: actions/checkout@v4 + with: + repository: microsoft/msquic + ref: main + path: msquic + submodules: true + + - name: Set PATH + run: echo "/home/bench/.cargo/bin" >> "${GITHUB_PATH}" + + - name: Install Rust + uses: ./.github/actions/rust + with: + version: $TOOLCHAIN + components: rustfmt + + - name: Fetch and build NSS and NSPR + uses: ./.github/actions/nss + + - name: Build neqo + run: | + cargo "+$TOOLCHAIN" bench --features bench --no-run + cargo "+$TOOLCHAIN" build --release + + - name: Build msquic + run: | + mkdir -p msquic/build + cd msquic/build + cmake -GNinja -DCMAKE_BUILD_TYPE=RelWithDebInfo -DQUIC_BUILD_TOOLS=1 -DQUIC_BUILD_PERF=1 .. + cmake --build . + + - name: Download cached main-branch results + id: criterion-cache + uses: actions/cache/restore@v4 + with: + path: ./target/criterion + key: criterion-${{ runner.name }}-${{ github.sha }} + restore-keys: criterion-${{ runner.name }}- + + # Disable turboboost, hyperthreading and use performance governor. + - name: Prepare machine + run: sudo /root/bin/prep.sh + + - name: Run cargo bench + run: | + # Pin all but neqo-bin benchmarks to CPU 0. neqo-bin benchmarks run + # both a client and a server, thus benefiting from multiple CPU cores. + # + # Run all benchmarks at elevated priority. + taskset -c 0 nice -n -20 cargo "+$TOOLCHAIN" bench --workspace --exclude neqo-bin --features bench -- --noplot | tee results.txt + nice -n -20 cargo "+$TOOLCHAIN" bench --package neqo-bin --features bench -- --noplot | tee -a results.txt + + + # Compare various configurations of neqo against msquic, and gather perf data + # during the hyperfine runs. + - name: Compare neqo and msquic + env: + HOST: 127.0.0.1 + PORT: 4433 + SIZE: 134217728 # 128 MB + run: | + TMP=$(mktemp -d) + # Make a cert and key for msquic. + openssl req -nodes -new -x509 -keyout "$TMP/key" -out "$TMP/cert" -subj "/CN=DOMAIN" 2>/dev/null + # Make a test file for msquic to serve. + truncate -s "$SIZE" "$TMP/$SIZE" + # Define the commands to run for each client and server. + declare -A client_cmd=( + ["neqo"]="target/release/neqo-client _cc _pacing --output-dir . -o -a hq-interop -Q 1 https://$HOST:$PORT/$SIZE" + ["msquic"]="msquic/build/bin/Release/quicinterop -test:D -custom:$HOST -port:$PORT -urls:https://$HOST:$PORT/$SIZE" + ) + declare -A server_cmd=( + ["neqo"]="target/release/neqo-server _cc _pacing -o -a hq-interop -Q 1 $HOST:$PORT 2> /dev/null" + ["msquic"]="msquic/build/bin/Release/quicinteropserver -root:$TMP -listen:$HOST -port:$PORT -file:$TMP/cert -key:$TMP/key -noexit > /dev/null || true" + ) + + # Replace various placeholders in the commands with the actual values. + # Also generate an extension to append to the file name. + function transmogrify { + CMD=$1 + local cc=$2 + local pacing=$3 + if [ "$cc" != "" ]; then + CMD=${CMD//_cc/--cc $cc} + EXT="-$cc" + fi + if [ "$pacing" == "on" ]; then + CMD=${CMD//_pacing/} + EXT="$EXT-pacing" + else + CMD=${CMD//_pacing/--no-pacing} + EXT="$EXT-nopacing" + fi + } + + for server in msquic neqo; do + for client in msquic neqo; do + # msquic doesn't let us configure the congestion control or pacing. + if [ "$client" == "msquic" ] && [ "$server" == "msquic" ]; then + cc_opt=("") + pacing_opt=("") + else + cc_opt=("reno" "cubic") + pacing_opt=("on" "") + fi + for cc in "${cc_opt[@]}"; do + for pacing in "${pacing_opt[@]}"; do + # Make a tag string for this test, for the results. + TAG="$client,$server,$cc,$pacing" + echo "Running benchmarks for $TAG" | tee -a comparison.txt + transmogrify "${server_cmd[$server]}" "$cc" "$pacing" + # shellcheck disable=SC2086 + taskset -c 0 nice -n -20 \ + perf $PERF_OPT -o "$client-$server$EXT.server.perf" $CMD & + PID=$! + transmogrify "${client_cmd[$client]}" "$cc" "$pacing" + # shellcheck disable=SC2086 + taskset -c 1 nice -n -20 \ + perf $PERF_OPT -o "$client-$server$EXT.client.perf" \ + hyperfine -N --output null -w 1 -s "sleep 1" -n "$TAG" -u millisecond --export-markdown step.md "$CMD" | + tee -a comparison.txt + echo >> comparison.txt + kill $PID + cat step.md >> steps.md + # Sanity check the size of the last retrieved file. + [ "$(wc -c <"$SIZE")" -eq "$SIZE" ] || exit 1 + done + done + done + done + # Merge the results tables generated by hyperfine into a single table. + echo "Transfer of $SIZE bytes over loopback." > comparison.md + awk '(!/^\| Command/ || !c++) && (!/^\|:/ || !d++)' < steps.md |\ + sed -E 's/`//g; s/^\|:/\|:---\|:---\|:---\|:/g; s/,/ \| /g; s/^\| Command/\| Client \| Server \| CC \| Pacing/g' >> comparison.md + rm -r "$TMP" + + # Re-enable turboboost, hyperthreading and use powersave governor. + - name: Restore machine + run: sudo /root/bin/unprep.sh + if: success() || failure() || cancelled() + + - name: Post-process perf data + run: | + for f in *.perf; do + # Convert for profiler.firefox.com + perf script -i "$f" -F +pid > "$f.fx" & + # Generate perf reports + perf report -i "$f" --no-children --stdio > "$f.txt" & + # Generate flamegraphs + flamegraph --perfdata "$f" --palette rust -o "${f//.perf/.svg}" & + done + wait + rm neqo.svg + + - name: Format results as Markdown + id: results + run: | + { + echo "### Benchmark results" + echo + } > results.md + SHA=$(cat target/criterion/baseline-sha.txt || true) + if [ -n "$SHA" ]; then + { + echo "Performance differences relative to $SHA." + echo + } >> results.md + fi + grep -Ev 'ignored|running \d+ tests|%\)' results.txt |\ + sed -E -e 's/(Performance has regressed.)/:broken_heart: **\1**/gi' \ + -e 's/(Performance has improved.)/:green_heart: **\1**/gi' \ + -e 's/^ +/ /gi' \ + -e 's/^([a-z0-9].*)$/* **\1**/gi' \ + -e 's/(change:[^%]*% )([^%]*%)(.*)/\1**\2**\3/gi' \ + >> results.md + { + echo "### Client/server transfer results" + cat comparison.md + } >> results.md + cat results.md > "$GITHUB_STEP_SUMMARY" + + - name: Remember main-branch push URL + if: github.ref == 'refs/heads/main' + run: echo "${{ github.sha }}" > target/criterion/baseline-sha.txt + + - name: Store history + if: github.ref == 'refs/heads/main' + run: | + mkdir -p target/criterion-history + cp -r target/criterion "target/criterion-history/$(date +%s)-${{ github.sha }}" + + - name: Cache main-branch results + if: github.ref == 'refs/heads/main' + uses: actions/cache/save@v4 + with: + path: ./target/criterion + key: criterion-${{ runner.name }}-${{ github.sha }} + + - name: Export perf data + id: export + uses: actions/upload-artifact@v4 + with: + name: ${{ github.event.repository.name }}-${{ github.sha }} + path: | + *.svg + *.perf + *.perf.fx + *.txt + results.* + target/criterion* + compression-level: 9 + + - name: Export PR comment data + uses: ./.github/actions/pr-comment-data-export + with: + name: bench + contents: results.md + log-url: ${{ steps.export.outputs.artifact-url }} diff --git a/.github/workflows/check.yml b/.github/workflows/check.yml index bc545a6a91..4e47961d8e 100644 --- a/.github/workflows/check.yml +++ b/.github/workflows/check.yml @@ -1,105 +1,159 @@ name: CI -on: [push, pull_request] +on: + push: + branches: ["main"] + paths-ignore: ["*.md", "*.png", "*.svg", "LICENSE-*"] + pull_request: + branches: ["main"] + paths-ignore: ["*.md", "*.png", "*.svg", "LICENSE-*"] + merge_group: env: CARGO_TERM_COLOR: always + RUST_BACKTRACE: 1 + +concurrency: + group: ${{ github.workflow }}-${{ github.ref_name }} + cancel-in-progress: true jobs: check: - name: Neqo Build and Test - runs-on: ubuntu-22.04 - env: - RUSTFLAGS: -C link-arg=-fuse-ld=lld + name: Build & test strategy: + fail-fast: false matrix: - rust-toolchain: [stable, 1.65.0] + os: [ubuntu-latest, macos-14, windows-latest] + # Don't increase beyond what Firefox is currently using: + # https://firefox-source-docs.mozilla.org/writing-rust-code/update-policy.html#schedule + rust-toolchain: [1.74.0, stable, nightly] + type: [debug] + include: + - os: ubuntu-latest + rust-toolchain: stable + type: release + env: + BUILD_TYPE: ${{ matrix.type == 'release' && '--release' || '' }} + runs-on: ${{ matrix.os }} + defaults: + run: + shell: bash steps: - - name: Install Packages + - name: Checkout + uses: actions/checkout@v4 + + - name: Install dependencies (Linux) + if: runner.os == 'Linux' env: DEBIAN_FRONTEND: noninteractive - llvm_version: 14 run: | - sudo apt-get update - sudo apt-get install -y \ - build-essential \ - clang \ - coreutils \ - curl \ - git \ - gyp \ - libclang-dev \ - libssl-dev \ - lld \ - llvm-"$llvm_version" \ - llvm-"$llvm_version"-runtime \ - make \ - mercurial \ - ninja-build \ - pkg-config \ - ssh \ - zlib1g-dev \ - --no-install-recommends - [ -d "/usr/lib/llvm-$llvm_version/lib" ] - echo "LIBCLANG_DIR=/usr/lib/llvm-$llvm_version/lib" >> "$GITHUB_ENV" - - # Rust installation cribbed from Dockerfiles at https://github.com/rust-lang/docker-rust - - name: Install Rust + sudo apt-get install -y --no-install-recommends gyp mercurial ninja-build lld + echo "RUSTFLAGS=-C link-arg=-fuse-ld=lld" >> "$GITHUB_ENV" + + - name: Install dependencies (MacOS) + if: runner.os == 'MacOS' run: | - export RUSTUP_HOME=~/.rustup - export CARGO_HOME=~/.cargo - rustup_dir=`mktemp -d /tmp/rustup-XXXXX` - mkdir -p "$rustup_dir" - rustup_init="$rustup_dir/rustup-init" - rustup_url="https://static.rust-lang.org/rustup/archive/$rustup_version/$rustup_host/rustup-init" - curl -SsLf "$rustup_url" -o "$rustup_init" - echo "${rustup_hash} $rustup_init" | sha256sum -c - - chmod +x "$rustup_init" - "$rustup_init" -y -q --no-modify-path --profile minimal \ - --default-toolchain ${{ matrix.rust-toolchain }} \ - --default-host "$rustup_host" \ - --component clippy --component rustfmt - rm -rf "$rustup_dir" - chmod -R a+w "$RUSTUP_HOME" "$CARGO_HOME" - "$CARGO_HOME/bin/rustup" default ${{ matrix.rust-toolchain }} - echo "RUSTUP_HOME=$RUSTUP_HOME" >> "$GITHUB_ENV" - echo "CARGO_HOME=$CARGO_HOME" >> "$GITHUB_ENV" - echo "$CARGO_HOME/bin" >> "$GITHUB_PATH" - env: - rustup_version: 1.26.0 - rustup_host: x86_64-unknown-linux-gnu - rustup_hash: 0b2f6c8f85a3d02fde2efc0ced4657869d73fccfce59defb4e8d29233116e6db + brew update + brew install llvm nss + echo "/opt/homebrew/opt/llvm/bin" >> "$GITHUB_PATH" + echo "RUSTFLAGS=-C link-arg=-fuse-ld=lld" >> "$GITHUB_ENV" - - name: Checkout - uses: actions/checkout@v3 + - name: Install dependencies (Windows) + if: runner.os == 'Windows' + run: | + # shellcheck disable=SC2028 + { + echo C:/msys64/usr/bin + echo C:/msys64/mingw64/bin + } >> "$GITHUB_PATH" + /c/msys64/usr/bin/pacman -S --noconfirm nsinstall + python3 -m pip install git+https://github.com/nodejs/gyp-next + echo "$(python3 -m site --user-base)/bin" >> "$GITHUB_PATH" - # This step might be removed if the distro included a recent enough - # version of NSS. Ubuntu 20.04 only has 3.49, which is far too old. - # (neqo-crypto/build.rs would also need to query pkg-config to get the - # right build flags rather than building NSS.) - - name: Fetch NSS and NSPR + - name: Set up MSVC build environment (Windows) + if: runner.os == 'Windows' + uses: ilammy/msvc-dev-cmd@v1 + + - name: Set up NSS/NSPR build environment (Windows) + if: runner.os == 'Windows' run: | - hg clone https://hg.mozilla.org/projects/nspr "$NSPR_DIR" - git clone --depth=1 https://github.com/nss-dev/nss "$NSS_DIR" - echo "NSS_DIR=$NSS_DIR" >> "$GITHUB_ENV" - echo "NSPR_DIR=$NSPR_DIR" >> "$GITHUB_ENV" - env: - NSS_DIR: ${{ github.workspace }}/nss - NSPR_DIR: ${{ github.workspace }}/nspr + { + echo "GYP_MSVS_OVERRIDE_PATH=$VSINSTALLDIR" + echo "GYP_MSVS_VERSION=2022" + echo "BASH=$SHELL" + } >> "$GITHUB_ENV" + # See https://github.com/ilammy/msvc-dev-cmd#name-conflicts-with-shell-bash + rm /usr/bin/link.exe + + - name: Install Rust + uses: ./.github/actions/rust + with: + version: ${{ matrix.rust-toolchain }} + components: rustfmt, clippy, llvm-tools-preview + + - name: Fetch and build NSS and NSPR + uses: ./.github/actions/nss - name: Build - run: cargo +${{ matrix.rust-toolchain }} build -v --tests + run: | + # shellcheck disable=SC2086 + cargo +${{ matrix.rust-toolchain }} build $BUILD_TYPE --all-targets --features ci - - name: Run Tests - run: cargo +${{ matrix.rust-toolchain }} test -v + - name: Run tests and determine coverage + run: | + # shellcheck disable=SC2086 + cargo +${{ matrix.rust-toolchain }} llvm-cov nextest $BUILD_TYPE --features ci --no-fail-fast --lcov --output-path lcov.info + cargo +${{ matrix.rust-toolchain }} bench --features bench --no-run + + - name: Run client/server transfer + run: | + # shellcheck disable=SC2086 + cargo +${{ matrix.rust-toolchain }} build $BUILD_TYPE --bin neqo-client --bin neqo-server + "target/$BUILD_DIR/neqo-server" "$HOST:4433" & + PID=$! + # Give the server time to start. + sleep 1 + "target/$BUILD_DIR/neqo-client" --output-dir . "https://$HOST:4433/$SIZE" + kill $PID + [ "$(wc -c <"$SIZE")" -eq "$SIZE" ] || exit 1 env: - LD_LIBRARY_PATH: ${{ github.workspace }}/dist/Debug/lib - RUST_BACKTRACE: 1 - RUST_LOG: neqo=debug + HOST: localhost + SIZE: 54321 + RUST_LOG: warn + BUILD_DIR: ${{ matrix.type == 'release' && 'release' || 'debug' }} - name: Check formatting - run: cargo +${{ matrix.rust-toolchain }} fmt --all -- --check - if: ${{ success() || failure() }} + run: | + if [ "${{ matrix.rust-toolchain }}" != "nightly" ]; then + CONFIG_PATH="--config-path=$(mktemp)" + fi + # shellcheck disable=SC2086 + cargo +${{ matrix.rust-toolchain }} fmt --all -- --check $CONFIG_PATH + if: success() || failure() - name: Clippy - run: cargo +${{ matrix.rust-toolchain }} clippy -v --tests -- -D warnings - if: ${{ success() || failure() }} + run: | + # Use cargo-hack to run clippy on each crate individually with its + # respective default features only. Can reveal warnings otherwise + # hidden given that a plain cargo clippy combines all features of the + # workspace. See e.g. https://github.com/mozilla/neqo/pull/1695. + cargo +${{ matrix.rust-toolchain }} hack clippy --all-targets --feature-powerset --exclude-features gecko -- -D warnings || ${{ matrix.rust-toolchain == 'nightly' }} + if: success() || failure() + + - name: Check rustdoc links + run: cargo +${{ matrix.rust-toolchain }} doc --workspace --no-deps --document-private-items + env: + RUSTDOCFLAGS: "--deny rustdoc::broken_intra_doc_links --deny warnings" + if: success() || failure() + + - name: Upload coverage reports to Codecov + uses: codecov/codecov-action@v4 + with: + file: lcov.info + fail_ci_if_error: false + token: ${{ secrets.CODECOV_TOKEN }} + if: matrix.type == 'debug' && matrix.rust-toolchain == 'stable' + + bench: + name: "Benchmark" + needs: [check] + uses: ./.github/workflows/bench.yml diff --git a/.github/workflows/mutants.yml b/.github/workflows/mutants.yml new file mode 100644 index 0000000000..4db2e4b925 --- /dev/null +++ b/.github/workflows/mutants.yml @@ -0,0 +1,68 @@ +name: Find mutants +on: + schedule: + - cron: '42 3 * * 2,5' # Runs at 03:42 UTC (m and h chosen arbitrarily) twice a week. + workflow_dispatch: + pull_request: + branches: ["main"] + paths-ignore: ["*.md", "*.png", "*.svg", "LICENSE-*"] + +concurrency: + group: ${{ github.workflow }}-${{ github.ref_name }} + cancel-in-progress: true + +jobs: + mutants: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - name: Install dependencies + env: + DEBIAN_FRONTEND: noninteractive + run: | + sudo apt-get install -y --no-install-recommends gyp mercurial ninja-build lld + echo "RUSTFLAGS=-C link-arg=-fuse-ld=lld" >> "$GITHUB_ENV" + + - name: Fetch and build NSS and NSPR + uses: ./.github/actions/nss + + - name: Install Rust + uses: ./.github/actions/rust + with: + version: stable + + - name: Find incremental mutants + if: github.event_name == 'pull_request' + run: | + git diff origin/${{ github.base_ref }}.. > pr.diff + set -o pipefail + cargo mutants --test-tool=nextest --no-shuffle -j 2 -vV --in-diff pr.diff | tee results.txt || true + echo 'TITLE=Incremental Mutants' >> "$GITHUB_ENV" + + - name: Find mutants + if: github.event_name == 'schedule' || github.event_name == 'workflow_dispatch' + run: | + set -o pipefail + cargo mutants --test-tool=nextest -vV --in-place | tee results.txt || true + echo 'TITLE=All Mutants' >> "$GITHUB_ENV" + + - name: Post step summary + if: always() + run: | + { + echo "### $TITLE" + echo "See https://mutants.rs/using-results.html for more information." + echo '```' + sed 's/\x1b\[[0-9;]*[mGKHF]//g' results.txt + echo '```' + } > "$GITHUB_STEP_SUMMARY" + + - name: Archive mutants.out + uses: actions/upload-artifact@v4 + if: always() + with: + name: mutants.out + path: mutants.out diff --git a/.github/workflows/qns-comment.yml b/.github/workflows/qns-comment.yml new file mode 100644 index 0000000000..db9f74f7bf --- /dev/null +++ b/.github/workflows/qns-comment.yml @@ -0,0 +1,28 @@ +# Post test results as pull request comment. +# +# This is done as a separate workflow as it requires write permissions. The +# tests itself might run off of a fork, i.e., an untrusted environment and should +# thus not be granted write permissions. + +name: QUIC Network Simulator Comment + +on: + workflow_run: + workflows: ["QUIC Network Simulator"] + types: + - completed + +jobs: + comment: + permissions: + pull-requests: write + runs-on: ubuntu-latest + if: | + github.event.workflow_run.event == 'pull_request' + steps: + - uses: actions/checkout@v4 + - uses: ./.github/actions/pr-comment + with: + name: qns + mode: ${{ github.event.workflow_run.conclusion == 'success' && 'delete' || 'upsert' }} + token: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/qns.yml b/.github/workflows/qns.yml new file mode 100644 index 0000000000..17cd584a26 --- /dev/null +++ b/.github/workflows/qns.yml @@ -0,0 +1,76 @@ +name: QUIC Network Simulator + +on: + schedule: + - cron: '42 3 * * 2,5' # Runs at 03:42 UTC (m and h chosen arbitrarily) twice a week. + workflow_dispatch: + pull_request: + branches: ["main"] + merge_group: + +concurrency: + group: ${{ github.workflow }}-${{ github.ref_name }} + cancel-in-progress: true + +jobs: + quic-network-simulator: + runs-on: ubuntu-latest + permissions: + packages: write + steps: + - name: Set up QEMU + uses: docker/setup-qemu-action@v3 + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + + - name: Login to GitHub Container Registry + uses: docker/login-action@v3 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ github.token }} + + - name: Docker meta + id: meta + uses: docker/metadata-action@v5 + with: + images: ghcr.io/${{ github.repository }}-qns + tags: | + # default + type=schedule + type=ref,event=branch + type=ref,event=tag + type=ref,event=pr + # set latest tag for default branch + type=raw,value=latest,enable={{is_default_branch}} + + - name: Build and push + id: docker_build_and_push + uses: docker/build-push-action@v5 + with: + push: ${{ github.event_name != 'pull_request' }} + tags: ${{ steps.meta.outputs.tags }} + file: qns/Dockerfile + build-args: | + RUST_VERSION=stable + cache-from: type=gha + cache-to: type=gha,mode=max + # On pull requests only build amd64 for the sake of CI time. + platforms: ${{ github.event_name == 'pull_request' && 'linux/amd64' || 'linux/amd64, linux/arm64' }} + load: ${{ github.event_name == 'pull_request' }} + + - name: Checkout + uses: actions/checkout@v4 + + - name: Run QUIC Interop tests + if: ${{ github.event_name == 'pull_request' }} + # TODO: Replace once https://github.com/quic-interop/quic-interop-runner/pull/356 is merged. + uses: ./.github/actions/quic-interop-runner + with: + name: 'neqo-latest' + image: ${{ steps.docker_build_and_push.outputs.imageID }} + url: https://github.com/mozilla/neqo + test: handshake,keyupdate + client: neqo-latest,quic-go,ngtcp2,neqo,msquic + server: neqo-latest,quic-go,ngtcp2,neqo,msquic diff --git a/.github/workflows/scorecard.yml b/.github/workflows/scorecard.yml new file mode 100644 index 0000000000..651a30be01 --- /dev/null +++ b/.github/workflows/scorecard.yml @@ -0,0 +1,73 @@ +# This workflow uses actions that are not certified by GitHub. They are provided +# by a third-party and are governed by separate terms of service, privacy +# policy, and support documentation. + +name: Scorecard supply-chain security +on: + workflow_dispatch: +# # For Branch-Protection check. Only the default branch is supported. See +# # https://github.com/ossf/scorecard/blob/main/docs/checks.md#branch-protection +# branch_protection_rule: +# # To guarantee Maintained check is occasionally updated. See +# # https://github.com/ossf/scorecard/blob/main/docs/checks.md#maintained +# schedule: +# - cron: '26 8 * * 6' +# push: +# branches: [ "main" ] + +# Declare default permissions as read only. +permissions: read-all + +jobs: + analysis: + name: Scorecard analysis + runs-on: ubuntu-latest + permissions: + # Needed to upload the results to code-scanning dashboard. + security-events: write + # Needed to publish results and get a badge (see publish_results below). + id-token: write + # Uncomment the permissions below if installing in a private repository. + # contents: read + # actions: read + + steps: + - name: "Checkout code" + uses: actions/checkout@93ea575cb5d8a053eaa0ac8fa3b40d7e05a33cc8 # v3.1.0 + with: + persist-credentials: false + + - name: "Run analysis" + uses: ossf/scorecard-action@0864cf19026789058feabb7e87baa5f140aac736 # v2.3.1 + with: + results_file: results.sarif + results_format: sarif + # (Optional) "write" PAT token. Uncomment the `repo_token` line below if: + # - you want to enable the Branch-Protection check on a *public* repository, or + # - you are installing Scorecard on a *private* repository + # To create the PAT, follow the steps in https://github.com/ossf/scorecard-action#authentication-with-pat. + # repo_token: ${{ secrets.SCORECARD_TOKEN }} + + # Public repositories: + # - Publish results to OpenSSF REST API for easy access by consumers + # - Allows the repository to include the Scorecard badge. + # - See https://github.com/ossf/scorecard-action#publishing-results. + # For private repositories: + # - `publish_results` will always be set to `false`, regardless + # of the value entered here. + publish_results: false + + # Upload the results as artifacts (optional). Commenting out will disable uploads of run results in SARIF + # format to the repository Actions tab. + - name: "Upload artifact" + uses: actions/upload-artifact@3cea5372237819ed00197afe530f5a7ea3e805c8 # v3.1.0 + with: + name: SARIF file + path: results.sarif + retention-days: 5 + + # Upload the results to GitHub's code scanning dashboard. + - name: "Upload to code-scanning" + uses: github/codeql-action/upload-sarif@4355270be187e1b672a7a1c7c7bae5afdc1ab94a # v3.24.10 + with: + sarif_file: results.sarif diff --git a/.gitignore b/.gitignore index 41867fa4e8..542ed5205d 100644 --- a/.gitignore +++ b/.gitignore @@ -1,10 +1,7 @@ -/target/ -**/*.rs.bk -Cargo.lock -/db +.DS_Store +*.qlog *~ /.vscode/ -.idea -*.qlog -*.swp -/qns/.last-update-* +/lcov.info +/target/ +Cargo.lock diff --git a/.rustfmt.toml b/.rustfmt.toml new file mode 100644 index 0000000000..60a3e6946a --- /dev/null +++ b/.rustfmt.toml @@ -0,0 +1,14 @@ +# Keep in sync with `Cargo.toml` `edition`. +# +# `rustfmt` envoked not through `cargo fmt` but directly does not pick up Rust +# edition in `Cargo.toml`. Thus duplicate here. See +# https://github.com/mozilla/neqo/pull/1722 for details. +edition = "2021" + +comment_width=100 +wrap_comments=true + +imports_granularity="Crate" +group_imports="StdExternalCrate" + +format_code_in_doc_comments=true diff --git a/Cargo.toml b/Cargo.toml index b3449d4a5e..cddc19c190 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,12 +1,34 @@ [workspace] members = [ - "neqo-client", + "neqo-bin", "neqo-common", "neqo-crypto", "neqo-http3", - "neqo-server", "neqo-qpack", "neqo-transport", - "neqo-interop", "test-fixture", ] +resolver = "2" + +[workspace.package] +homepage = "https://github.com/mozilla/neqo/" +repository = "https://github.com/mozilla/neqo/" +authors = ["The Neqo Authors "] +version = "0.7.5" +# Keep in sync with `.rustfmt.toml` `edition`. +edition = "2021" +license = "MIT OR Apache-2.0" +# Don't increase beyond what Firefox is currently using: +# https://firefox-source-docs.mozilla.org/writing-rust-code/update-policy.html#schedule +rust-version = "1.74.0" + +[workspace.lints.clippy] +pedantic = { level = "warn", priority = -1 } + +[profile.release] +lto = "fat" + +[profile.bench] +# Inherits from the "release" profile, so just provide overrides here: +# https://doc.rust-lang.org/cargo/reference/profiles.html#release +debug = true diff --git a/README.md b/README.md index 86beb7e230..beadf22ecf 100644 --- a/README.md +++ b/README.md @@ -1,78 +1,102 @@ -# Neqo, an Implementation of QUIC written in Rust +# Neqo, an Implementation of QUIC in Rust ![neqo logo](https://github.com/mozilla/neqo/raw/main/neqo.png "neqo logo") -To run test HTTP/3 programs (neqo-client and neqo-server): +To build Neqo: -* `cargo build` -* `./target/debug/neqo-server [::]:12345 --db ./test-fixture/db` -* `./target/debug/neqo-client http://127.0.0.1:12345/` - -If a "Failure to load dynamic library" error happens at runtime, do +```shell +cargo build ``` -export LD_LIBRARY_PATH="$(dirname "$(find . -name libssl3.so -print | head -1)")" + +This will use a system-installed [NSS][NSS] library if it is new enough. (See "Build with Separate NSS/NSPR" below if NSS is not installed or it is deemed too old.) + +To run test HTTP/3 programs (`neqo-client` and `neqo-server`): + +```shell +./target/debug/neqo-server '[::]:12345' +./target/debug/neqo-client 'https://[::]:12345/' ``` -On a mac, use `DYLD_LIBRARY_PATH` instead. -## Faster Builds with Separate NSS/NSPR +## Build with separate NSS/NSPR -You can clone NSS (https://hg.mozilla.org/projects/nss) and NSPR -(https://hg.mozilla.org/projects/nspr) into the same directory and export an +You can clone [NSS][NSS] and [NSPR][NSPR] into the same directory and export an environment variable called `NSS_DIR` pointing to NSS. This causes the build to use the existing NSS checkout. However, in order to run anything that depends -on NSS, you need to set `$\[DY]LD\_LIBRARY\_PATH` to point to -`$NSS_DIR/../dist/Debug/lib`. +on NSS, you need to set an environment as follows: + +### Linux + +```shell +export LD_LIBRARY_PATH="$(dirname "$(find . -name libssl3.so -print | head -1)")" +``` + +### macOS + +```shell +export DYLD_LIBRARY_PATH="$(dirname "$(find . -name libssl3.dylib -print | head -1)")" +``` -Note: If you did not compile NSS separately, you need to have mercurial (hg), installed. -NSS builds require gyp, and ninja (or ninja-build) to be present also. +Note: If you did not already compile NSS separately, you need to have +[Mercurial (hg)][HG], installed. NSS builds require [GYP][GYP] and +[Ninja][NINJA] to be installed. ## Debugging Neqo -### QUIC Logging +### QUIC logging -Enable [QLOG](https://datatracker.ietf.org/doc/draft-ietf-quic-qlog-main-schema/) with: +Enable generation of [QLOG][QLOG] logs with: +```shell +target/debug/neqo-server '[::]:12345' --qlog-dir . +target/debug/neqo-client 'https://[::]:12345/' --qlog-dir . ``` -$ mkdir "$logdir" -$ ./target/debug/neqo-server '[::]:12345' --db ./test-fixture/db --qlog-dir "$logdir" -$ ./target/debug/neqo-client 'https://[::]:12345/' --qlog-dir "$logdir" -``` - -You may use https://qvis.quictools.info/ by uploading the QLOG files and visualize the flows. -### Using SSLKEYLOGFILE to decrypt Wireshark logs +You can of course specify a different directory for the QLOG files. +You can upload QLOG files to [qvis][QVIS] to visualize the flows. -[Info here](https://developer.mozilla.org/en-US/docs/Mozilla/Projects/NSS/Key_Log_Format) +### Using `SSLKEYLOGFILE` to decrypt Wireshark logs -TODO: What is the minimum Wireshark version needed? -TODO: Above link may be incorrect, protocol now called TLS instead of SSL? +You can export TLS keys by setting the `SSLKEYLOGFILE` environment variable +to a filename to instruct NSS to dump keys in the +[standard format](https://datatracker.ietf.org/doc/draft-ietf-tls-keylogfile/) +to enable decryption by [Wireshark](https://wiki.wireshark.org/TLS) and other tools. ### Using RUST_LOG effectively As documented in the [env_logger documentation](https://docs.rs/env_logger/), the `RUST_LOG` environment variable can be used to selectively enable log messages -from Rust code. This works for Neqo's cmdline tools, as well as for when Neqo is +from Rust code. This works for Neqo's command line tools, as well as for when Neqo is incorporated into Gecko, although [Gecko needs to be built in debug mode](https://developer.mozilla.org/en-US/docs/Mozilla/Developer_guide/Build_Instructions/Configuring_Build_Options). Some examples: -1. `RUST_LOG=neqo_transport::dump ./mach run` lists sent and received QUIC - packets and their frames' contents only. -1. `RUST_LOG=neqo_transport=debug,neqo_http3=trace,info ./mach run` sets a - 'debug' log level for transport, 'trace' level for http3, and 'info' log + +1. ```shell + RUST_LOG=neqo_transport::dump ./mach run + ``` + + lists sent and received QUIC packets and their frames' contents only. + +1. ```shell + RUST_LOG=neqo_transport=debug,neqo_http3=trace,info ./mach run + ``` + + sets a `debug` log level for `transport`, `trace` level for `http3`, and `info` log level for all other Rust crates, both Neqo and others used by Gecko. -1. `RUST_LOG=neqo=trace,error ./mach run` sets `trace` level for all modules - starting with "neqo", and sets `error` as minimum log level for other - unrelated Rust log messages. +1. ```shell + RUST_LOG=neqo=trace,error ./mach run + ``` -### Trying In-development Neqo code in Gecko + sets `trace` level for all modules starting with `neqo`, and sets `error` as minimum log level for other unrelated Rust log messages. + +### Trying in-development Neqo code in Gecko In a checked-out copy of Gecko source, set `[patches.*]` values for the four Neqo crates to local versions in the root `Cargo.toml`. For example, if Neqo was checked out to `/home/alice/git/neqo`, add the following lines to the root `Cargo.toml`. -``` +```toml [patch."https://github.com/mozilla/neqo"] neqo-http3 = { path = "/home/alice/git/neqo/neqo-http3" } neqo-transport = { path = "/home/alice/git/neqo/neqo-transport" } @@ -83,11 +107,23 @@ neqo-crypto = { path = "/home/alice/git/neqo/neqo-crypto" } Then run the following: -``` +```shell ./mach vendor rust ``` -Compile Gecko as usual with `./mach build`. +Compile Gecko as usual with + +```shell +./mach build +``` Note: Using newer Neqo code with Gecko may also require changes (likely to `neqo_glue`) if something has changed. + +[NSS]: https://hg.mozilla.org/projects/nss +[NSPR]: https://hg.mozilla.org/projects/nspr +[GYP]: https://github.com/nodejs/gyp-next +[HG]: https://www.mercurial-scm.org/ +[NINJA]: https://ninja-build.org/ +[QLOG]: https://datatracker.ietf.org/doc/draft-ietf-quic-qlog-main-schema/ +[QVIS]: https://qvis.quictools.info/ diff --git a/SECURITY.md b/SECURITY.md new file mode 100644 index 0000000000..5b70d7ba3b --- /dev/null +++ b/SECURITY.md @@ -0,0 +1,31 @@ +# Security Policy + +This document describes how security vulnerabilities in this project should be reported. + +## Supported Versions + +Support for neqo is based on the Firefox version in which it has landed. +Versions of neqo in [current versions of Firefox](https://whattrainisitnow.com/calendar/) are actively supported. + +The version of neqo that is active can be found in the Firefox repositories: + +- [release](https://hg.mozilla.org/mozilla-unified/file/release/third_party/rust/neqo-transport/Cargo.toml), +- [beta](https://hg.mozilla.org/mozilla-unified/file/beta/third_party/rust/neqo-transport/Cargo.toml), and +- [trunk/central](https://hg.mozilla.org/mozilla-unified/file/central/third_party/rust/neqo-transport/Cargo.toml), +- [ESR 115](https://hg.mozilla.org/mozilla-unified/file/esr115/third_party/rust/neqo-transport/Cargo.toml). + +The listed version in these files corresponds to [tags](https://github.com/mozilla/neqo/tags) on this repository. +Releases do not always correspond to a branch. + +We welcome reports of security vulnerabilities in any of these released versions or the latest code on the `main` branch. + +## Reporting a Vulnerability + +To report a security problem with neqo, create a bug in Mozilla's Bugzilla instance in the [Core :: Networking](https://bugzilla.mozilla.org/enter_bug.cgi?product=Core&component=Networking) component. + +**IMPORTANT: For security issues, please make sure that you check the box labelled "Many users could be harmed by this security problem".** +We advise that you check this option for anything that involves anything security-relevant, including memory safety, crashes, race conditions, and handling of confidential information. + +Review Mozilla's [guides on bug reporting](https://bugzilla.mozilla.org/page.cgi?id=bug-writing.html) before you open a bug. + +Mozilla operates a [bug bounty program](https://www.mozilla.org/en-US/security/bug-bounty/), for which this project is eligible. diff --git a/clippy.toml b/clippy.toml deleted file mode 100644 index e034672c76..0000000000 --- a/clippy.toml +++ /dev/null @@ -1 +0,0 @@ -msrv = "1.65.0" diff --git a/docker/Dockerfile b/docker/Dockerfile deleted file mode 100644 index e3a7dfbadf..0000000000 --- a/docker/Dockerfile +++ /dev/null @@ -1,64 +0,0 @@ -# This image is used for running CI tests. -# The image is not built unless the `docker` branch is updated. -# Push to `docker` to trigger a build: -# $ git push origin main:docker - -FROM ubuntu:20.04 -LABEL maintainer="Martin Thomson " - -RUN apt-get update && apt-get install -y --no-install-recommends \ - ca-certificates \ - coreutils \ - curl \ - git \ - make \ - mercurial \ - ssh \ - build-essential \ - clang \ - llvm \ - libclang-dev \ - lld \ - gyp \ - ninja-build \ - pkg-config \ - python-is-python3 \ - python3 \ - python3-pip \ - sudo \ - zlib1g-dev \ - && apt-get autoremove -y && apt-get clean -y \ - && rm -rf /var/lib/apt/lists/* - -ENV RUSTUP_HOME=/usr/local/rustup \ - CARGO_HOME=/usr/local/cargo \ - PATH=/usr/local/cargo/bin:$PATH \ - RUST_VERSION=1.51.0 - -RUN set -eux; \ - curl -sSLf "https://static.rust-lang.org/rustup/archive/1.20.2/x86_64-unknown-linux-gnu/rustup-init" -o rustup-init; \ - echo 'e68f193542c68ce83c449809d2cad262cc2bbb99640eb47c58fc1dc58cc30add *rustup-init' | sha256sum -c -; \ - chmod +x rustup-init; \ - ./rustup-init -y -q --no-modify-path --profile minimal --component rustfmt --component clippy --default-toolchain "$RUST_VERSION"; \ - rm -f rustup-init; \ - chmod -R a+w "$RUSTUP_HOME" "$CARGO_HOME" - -ENV USER neqo -ENV LOGNAME $USER -ENV HOSTNAME $USER -ENV HOME /home/$USER -ENV SHELL /bin/bash - -RUN useradd -d "$HOME" -s "$SHELL" -m "$USER" -RUN echo "$USER ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers -WORKDIR $HOME -USER $USER - -ENV NSS_DIR=$HOME/nss \ - NSPR_DIR=$HOME/nspr \ - LD_LIBRARY_PATH=$HOME/dist/Debug/lib \ - RUSTFLAGS="-C link-arg=-fuse-ld=lld" - -RUN set -eux; \ - hg clone -u c7a1c91cd9be https://hg.mozilla.org/projects/nss "$NSS_DIR"; \ - hg clone -u NSPR_4_25_RTM https://hg.mozilla.org/projects/nspr "$NSPR_DIR" diff --git a/hooks/pre-commit b/hooks/pre-commit index 2a6022b3d4..9166f739b3 100755 --- a/hooks/pre-commit +++ b/hooks/pre-commit @@ -32,12 +32,20 @@ if [[ ./neqo-crypto/bindings/bindings.toml -nt ./neqo-crypto/src/lib.rs ]]; then exit 1 fi +toolchain=nightly +fmtconfig="$root/.rustfmt.toml" +if ! cargo "+$toolchain" version >/dev/null; then + echo "warning: A rust $toolchain toolchain is recommended to check formatting." + toolchain=stable + fmtconfig=/dev/null +fi + # Check formatting. trap 'git stash pop -q' EXIT git stash push -k -u -q -m "pre-commit stash" -if ! errors=($(cargo fmt -- --check -l)); then +if ! errors=($(cargo "+$toolchain" fmt -- --check -l --config-path="$fmtconfig")); then echo "Formatting errors found." - echo "Run \`cargo fmt\` to fix the following files:" + echo "Run \`cargo fmt +$toolchain\` to fix the following files:" for err in "${errors[@]}"; do echo " $err" done diff --git a/neqo-bin/Cargo.toml b/neqo-bin/Cargo.toml new file mode 100644 index 0000000000..a165a4ac32 --- /dev/null +++ b/neqo-bin/Cargo.toml @@ -0,0 +1,57 @@ +[package] +name = "neqo-bin" +description = "A basic QUIC HTTP/0.9 and HTTP/3 client and server." +authors.workspace = true +homepage.workspace = true +repository.workspace = true +version.workspace = true +edition.workspace = true +rust-version.workspace = true +license.workspace = true + +[[bin]] +name = "neqo-client" +path = "src/bin/client.rs" +bench = false + +[[bin]] +name = "neqo-server" +path = "src/bin/server.rs" +bench = false + +[lints] +workspace = true + +[dependencies] +# neqo-bin is not used in Firefox, so we can be liberal with dependency versions +clap = { version = "4.4", default-features = false, features = ["std", "color", "help", "usage", "error-context", "suggestions", "derive"] } +clap-verbosity-flag = { version = "2.2", default-features = false } +futures = { version = "0.3", default-features = false, features = ["alloc"] } +hex = { version = "0.4", default-features = false, features = ["std"] } +log = { version = "0.4", default-features = false } +neqo-common = { path = "./../neqo-common" } +neqo-crypto = { path = "./../neqo-crypto" } +neqo-http3 = { path = "./../neqo-http3" } +neqo-qpack = { path = "./../neqo-qpack" } +neqo-transport = { path = "./../neqo-transport" } +qlog = { version = "0.12", default-features = false } +quinn-udp = { git = "https://github.com/quinn-rs/quinn/", rev = "a947962131aba8a6521253d03cc948b20098a2d6" } +regex = { version = "1.9", default-features = false, features = ["unicode-perl"] } +tokio = { version = "1", default-features = false, features = ["net", "time", "macros", "rt", "rt-multi-thread"] } +url = { version = "2.5", default-features = false } + +[dev-dependencies] +criterion = { version = "0.5", default-features = false, features = ["html_reports", "async_tokio"] } +tokio = { version = "1", default-features = false, features = ["sync"] } + +[features] +bench = [] + +[lib] +# See https://github.com/bheisler/criterion.rs/blob/master/book/src/faq.md#cargo-bench-gives-unrecognized-option-errors-for-valid-command-line-options +bench = false + +[[bench]] +name = "main" +harness = false +required-features = ["bench"] diff --git a/neqo-bin/benches/main.rs b/neqo-bin/benches/main.rs new file mode 100644 index 0000000000..59927ebe0c --- /dev/null +++ b/neqo-bin/benches/main.rs @@ -0,0 +1,86 @@ +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use std::{path::PathBuf, str::FromStr}; + +use criterion::{criterion_group, criterion_main, BatchSize, Criterion, Throughput}; +use neqo_bin::{client, server}; +use tokio::runtime::Runtime; + +struct Benchmark { + name: String, + requests: Vec, + sample_size: Option, +} + +fn transfer(c: &mut Criterion) { + neqo_common::log::init(Some(log::LevelFilter::Off)); + neqo_crypto::init_db(PathBuf::from_str("../test-fixture/db").unwrap()).unwrap(); + + let done_sender = spawn_server(); + + for Benchmark { + name, + requests, + sample_size, + } in [ + Benchmark { + name: "1-conn/1-100mb-resp (aka. Download)".to_string(), + requests: vec![100 * 1024 * 1024], + sample_size: Some(10), + }, + Benchmark { + name: "1-conn/10_000-parallel-1b-resp (aka. RPS)".to_string(), + requests: vec![1; 10_000], + sample_size: None, + }, + Benchmark { + name: "1-conn/1-1b-resp (aka. HPS)".to_string(), + requests: vec![1; 1], + sample_size: None, + }, + ] { + let mut group = c.benchmark_group(name); + group.throughput(if requests[0] > 1 { + assert_eq!(requests.len(), 1); + Throughput::Bytes(requests[0]) + } else { + Throughput::Elements(requests.len() as u64) + }); + if let Some(size) = sample_size { + group.sample_size(size); + } + group.bench_function("client", |b| { + b.to_async(Runtime::new().unwrap()).iter_batched( + || client::client(client::Args::new(&requests)), + |client| async move { + client.await.unwrap(); + }, + BatchSize::PerIteration, + ); + }); + group.finish(); + } + + done_sender.send(()).unwrap(); +} + +fn spawn_server() -> tokio::sync::oneshot::Sender<()> { + let (done_sender, mut done_receiver) = tokio::sync::oneshot::channel(); + std::thread::spawn(move || { + Runtime::new().unwrap().block_on(async { + let mut server = Box::pin(server::server(server::Args::default())); + tokio::select! { + _ = &mut done_receiver => {} + _ = &mut server => {} + } + }); + }); + done_sender +} + +criterion_group!(benches, transfer); +criterion_main!(benches); diff --git a/neqo-bin/src/bin/client.rs b/neqo-bin/src/bin/client.rs new file mode 100644 index 0000000000..25c0e8753f --- /dev/null +++ b/neqo-bin/src/bin/client.rs @@ -0,0 +1,14 @@ +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use clap::Parser; + +#[tokio::main] +async fn main() -> Result<(), neqo_bin::client::Error> { + let args = neqo_bin::client::Args::parse(); + + neqo_bin::client::client(args).await +} diff --git a/neqo-bin/src/bin/server.rs b/neqo-bin/src/bin/server.rs new file mode 100644 index 0000000000..e9b30261e4 --- /dev/null +++ b/neqo-bin/src/bin/server.rs @@ -0,0 +1,14 @@ +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use clap::Parser; + +#[tokio::main] +async fn main() -> Result<(), neqo_bin::server::Error> { + let args = neqo_bin::server::Args::parse(); + + neqo_bin::server::server(args).await +} diff --git a/neqo-bin/src/client/http09.rs b/neqo-bin/src/client/http09.rs new file mode 100644 index 0000000000..a9ed12b157 --- /dev/null +++ b/neqo-bin/src/client/http09.rs @@ -0,0 +1,291 @@ +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! An [HTTP 0.9](https://www.w3.org/Protocols/HTTP/AsImplemented.html) client implementation. + +use std::{ + cell::RefCell, + collections::{HashMap, VecDeque}, + fs::File, + io::{BufWriter, Write}, + net::SocketAddr, + path::PathBuf, + rc::Rc, + time::Instant, +}; + +use neqo_common::{event::Provider, qdebug, qinfo, qwarn, Datagram}; +use neqo_crypto::{AuthenticationStatus, ResumptionToken}; +use neqo_transport::{ + Connection, ConnectionError, ConnectionEvent, EmptyConnectionIdGenerator, Error, Output, State, + StreamId, StreamType, +}; +use url::Url; + +use super::{get_output_file, qlog_new, Args, Res}; + +pub struct Handler<'a> { + streams: HashMap>>, + url_queue: VecDeque, + all_paths: Vec, + args: &'a Args, + token: Option, + needs_key_update: bool, +} + +impl<'a> super::Handler for Handler<'a> { + type Client = Connection; + + fn handle(&mut self, client: &mut Self::Client) -> Res { + while let Some(event) = client.next_event() { + if self.needs_key_update { + match client.initiate_key_update() { + Ok(()) => { + qdebug!("Keys updated"); + self.needs_key_update = false; + self.download_urls(client); + } + Err(neqo_transport::Error::KeyUpdateBlocked) => (), + Err(e) => return Err(e.into()), + } + } + + match event { + ConnectionEvent::AuthenticationNeeded => { + client.authenticated(AuthenticationStatus::Ok, Instant::now()); + } + ConnectionEvent::RecvStreamReadable { stream_id } => { + self.read(client, stream_id)?; + } + ConnectionEvent::SendStreamWritable { stream_id } => { + qdebug!("stream {stream_id} writable"); + } + ConnectionEvent::SendStreamComplete { stream_id } => { + qdebug!("stream {stream_id} complete"); + } + ConnectionEvent::SendStreamCreatable { stream_type } => { + qdebug!("stream {stream_type:?} creatable"); + if stream_type == StreamType::BiDi { + self.download_urls(client); + } + } + ConnectionEvent::StateChange( + State::WaitInitial | State::Handshaking | State::Connected, + ) => { + qdebug!("{event:?}"); + self.download_urls(client); + } + ConnectionEvent::ResumptionToken(token) => { + self.token = Some(token); + } + _ => { + qwarn!("Unhandled event {event:?}"); + } + } + } + + if self.streams.is_empty() && self.url_queue.is_empty() { + // Handler is done. + return Ok(true); + } + + Ok(false) + } + + fn take_token(&mut self) -> Option { + self.token.take() + } + + fn has_token(&self) -> bool { + self.token.is_some() + } +} + +pub(crate) fn create_client( + args: &Args, + local_addr: SocketAddr, + remote_addr: SocketAddr, + hostname: &str, + resumption_token: Option, +) -> Res { + let alpn = match args.shared.alpn.as_str() { + "hq-29" | "hq-30" | "hq-31" | "hq-32" => args.shared.alpn.as_str(), + _ => "hq-interop", + }; + + let mut client = Connection::new_client( + hostname, + &[alpn], + Rc::new(RefCell::new(EmptyConnectionIdGenerator::default())), + local_addr, + remote_addr, + args.shared.quic_parameters.get(alpn), + Instant::now(), + )?; + + if let Some(tok) = resumption_token { + client.enable_resumption(Instant::now(), tok)?; + } + + let ciphers = args.get_ciphers(); + if !ciphers.is_empty() { + client.set_ciphers(&ciphers)?; + } + + client.set_qlog(qlog_new(args, hostname, client.odcid().unwrap())?); + + Ok(client) +} + +impl super::Client for Connection { + fn process_output(&mut self, now: Instant) -> Output { + self.process_output(now) + } + + fn process_multiple_input<'a, I>(&mut self, dgrams: I, now: Instant) + where + I: IntoIterator, + { + self.process_multiple_input(dgrams, now); + } + + fn close(&mut self, now: Instant, app_error: neqo_transport::AppError, msg: S) + where + S: AsRef + std::fmt::Display, + { + self.close(now, app_error, msg); + } + + fn is_closed(&self) -> Option { + if let State::Closed(err) = self.state() { + return Some(err.clone()); + } + None + } + + fn stats(&self) -> neqo_transport::Stats { + self.stats() + } +} + +impl<'b> Handler<'b> { + pub fn new(url_queue: VecDeque, args: &'b Args) -> Self { + Self { + streams: HashMap::new(), + url_queue, + all_paths: Vec::new(), + args, + token: None, + needs_key_update: args.key_update, + } + } + + fn download_urls(&mut self, client: &mut Connection) { + loop { + if self.url_queue.is_empty() { + break; + } + if self.streams.len() >= self.args.concurrency { + break; + } + if !self.download_next(client) { + break; + } + } + } + + fn download_next(&mut self, client: &mut Connection) -> bool { + if self.needs_key_update { + qdebug!("Deferring requests until after first key update"); + return false; + } + let url = self + .url_queue + .pop_front() + .expect("download_next called with empty queue"); + match client.stream_create(StreamType::BiDi) { + Ok(client_stream_id) => { + qinfo!("Created stream {client_stream_id} for {url}"); + let req = format!("GET {}\r\n", url.path()); + _ = client + .stream_send(client_stream_id, req.as_bytes()) + .unwrap(); + client.stream_close_send(client_stream_id).unwrap(); + let out_file = get_output_file(&url, &self.args.output_dir, &mut self.all_paths); + self.streams.insert(client_stream_id, out_file); + true + } + Err(e @ (Error::StreamLimitError | Error::ConnectionState)) => { + qwarn!("Cannot create stream {e:?}"); + self.url_queue.push_front(url); + false + } + Err(e) => { + panic!("Error creating stream {e:?}"); + } + } + } + + /// Read and maybe print received data from a stream. + // Returns bool: was fin received? + fn read_from_stream( + client: &mut Connection, + stream_id: StreamId, + output_read_data: bool, + maybe_out_file: &mut Option>, + ) -> Res { + let mut data = vec![0; 4096]; + loop { + let (sz, fin) = client.stream_recv(stream_id, &mut data)?; + if sz == 0 { + return Ok(fin); + } + + if let Some(out_file) = maybe_out_file { + out_file.write_all(&data[..sz])?; + } else if !output_read_data { + qdebug!("READ[{stream_id}]: {sz} bytes"); + } else { + qdebug!( + "READ[{}]: {}", + stream_id, + String::from_utf8(data.clone()).unwrap() + ); + } + if fin { + return Ok(true); + } + } + } + + fn read(&mut self, client: &mut Connection, stream_id: StreamId) -> Res<()> { + match self.streams.get_mut(&stream_id) { + None => { + qwarn!("Data on unexpected stream: {stream_id}"); + return Ok(()); + } + Some(maybe_out_file) => { + let fin_recvd = Self::read_from_stream( + client, + stream_id, + self.args.output_read_data, + maybe_out_file, + )?; + + if fin_recvd { + if let Some(mut out_file) = maybe_out_file.take() { + out_file.flush()?; + } else { + qinfo!(""); + } + self.streams.remove(&stream_id); + self.download_urls(client); + } + } + } + Ok(()) + } +} diff --git a/neqo-bin/src/client/http3.rs b/neqo-bin/src/client/http3.rs new file mode 100644 index 0000000000..b3f577127e --- /dev/null +++ b/neqo-bin/src/client/http3.rs @@ -0,0 +1,463 @@ +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! An HTTP 3 client implementation. + +use std::{ + cell::RefCell, + collections::{HashMap, VecDeque}, + fmt::Display, + fs::File, + io::{BufWriter, Write}, + net::SocketAddr, + path::PathBuf, + rc::Rc, + time::Instant, +}; + +use neqo_common::{event::Provider, hex, qdebug, qinfo, qwarn, Datagram, Header}; +use neqo_crypto::{AuthenticationStatus, ResumptionToken}; +use neqo_http3::{Error, Http3Client, Http3ClientEvent, Http3Parameters, Http3State, Priority}; +use neqo_transport::{ + AppError, Connection, ConnectionError, EmptyConnectionIdGenerator, Error as TransportError, + Output, StreamId, +}; +use url::Url; + +use super::{get_output_file, qlog_new, Args, Res}; + +pub(crate) struct Handler<'a> { + #[allow( + unknown_lints, + clippy::struct_field_names, + clippy::redundant_field_names + )] + url_handler: UrlHandler<'a>, + token: Option, + output_read_data: bool, +} + +impl<'a> Handler<'a> { + pub(crate) fn new(url_queue: VecDeque, args: &'a Args) -> Self { + let url_handler = UrlHandler { + url_queue, + stream_handlers: HashMap::new(), + all_paths: Vec::new(), + handler_type: if args.test.is_some() { + StreamHandlerType::Upload + } else { + StreamHandlerType::Download + }, + args, + }; + + Self { + url_handler, + token: None, + output_read_data: args.output_read_data, + } + } +} + +pub(crate) fn create_client( + args: &Args, + local_addr: SocketAddr, + remote_addr: SocketAddr, + hostname: &str, + resumption_token: Option, +) -> Res { + let mut transport = Connection::new_client( + hostname, + &[&args.shared.alpn], + Rc::new(RefCell::new(EmptyConnectionIdGenerator::default())), + local_addr, + remote_addr, + args.shared.quic_parameters.get(args.shared.alpn.as_str()), + Instant::now(), + )?; + let ciphers = args.get_ciphers(); + if !ciphers.is_empty() { + transport.set_ciphers(&ciphers)?; + } + let mut client = Http3Client::new_with_conn( + transport, + Http3Parameters::default() + .max_table_size_encoder(args.shared.max_table_size_encoder) + .max_table_size_decoder(args.shared.max_table_size_decoder) + .max_blocked_streams(args.shared.max_blocked_streams) + .max_concurrent_push_streams(args.max_concurrent_push_streams), + ); + + let qlog = qlog_new(args, hostname, client.connection_id())?; + client.set_qlog(qlog); + if let Some(ech) = &args.ech { + client.enable_ech(ech).expect("enable ECH"); + } + if let Some(token) = resumption_token { + client + .enable_resumption(Instant::now(), token) + .expect("enable resumption"); + } + + Ok(client) +} + +impl super::Client for Http3Client { + fn is_closed(&self) -> Option { + if let Http3State::Closed(err) = self.state() { + return Some(err); + } + None + } + + fn process_output(&mut self, now: Instant) -> Output { + self.process_output(now) + } + + fn process_multiple_input<'a, I>(&mut self, dgrams: I, now: Instant) + where + I: IntoIterator, + { + self.process_multiple_input(dgrams, now); + } + + fn close(&mut self, now: Instant, app_error: AppError, msg: S) + where + S: AsRef + Display, + { + self.close(now, app_error, msg); + } + + fn stats(&self) -> neqo_transport::Stats { + self.transport_stats() + } +} + +impl<'a> super::Handler for Handler<'a> { + type Client = Http3Client; + + fn handle(&mut self, client: &mut Http3Client) -> Res { + while let Some(event) = client.next_event() { + match event { + Http3ClientEvent::AuthenticationNeeded => { + client.authenticated(AuthenticationStatus::Ok, Instant::now()); + } + Http3ClientEvent::HeaderReady { + stream_id, + headers, + fin, + .. + } => { + if let Some(handler) = self.url_handler.stream_handler(stream_id) { + handler.process_header_ready(stream_id, fin, headers); + } else { + qwarn!("Data on unexpected stream: {stream_id}"); + } + if fin { + self.url_handler.on_stream_fin(client, stream_id); + } + } + Http3ClientEvent::DataReadable { stream_id } => { + let mut stream_done = false; + match self.url_handler.stream_handler(stream_id) { + None => { + qwarn!("Data on unexpected stream: {stream_id}"); + } + Some(handler) => loop { + let mut data = vec![0; 4096]; + let (sz, fin) = client + .read_data(Instant::now(), stream_id, &mut data) + .expect("Read should succeed"); + + handler.process_data_readable( + stream_id, + fin, + data, + sz, + self.output_read_data, + )?; + + if fin { + stream_done = true; + break; + } + + if sz == 0 { + break; + } + }, + } + + if stream_done { + self.url_handler.on_stream_fin(client, stream_id); + } + } + Http3ClientEvent::DataWritable { stream_id } => { + match self.url_handler.stream_handler(stream_id) { + None => { + qwarn!("Data on unexpected stream: {stream_id}"); + } + Some(handler) => { + handler.process_data_writable(client, stream_id); + } + } + } + Http3ClientEvent::StateChange(Http3State::Connected) + | Http3ClientEvent::RequestsCreatable => { + self.url_handler.process_urls(client); + } + Http3ClientEvent::ResumptionToken(t) => self.token = Some(t), + _ => { + qwarn!("Unhandled event {event:?}"); + } + } + } + + Ok(self.url_handler.done()) + } + + fn take_token(&mut self) -> Option { + self.token.take() + } + + fn has_token(&self) -> bool { + self.token.is_some() + } +} + +trait StreamHandler { + fn process_header_ready(&mut self, stream_id: StreamId, fin: bool, headers: Vec
); + fn process_data_readable( + &mut self, + stream_id: StreamId, + fin: bool, + data: Vec, + sz: usize, + output_read_data: bool, + ) -> Res; + fn process_data_writable(&mut self, client: &mut Http3Client, stream_id: StreamId); +} + +enum StreamHandlerType { + Download, + Upload, +} + +impl StreamHandlerType { + fn make_handler( + handler_type: &Self, + url: &Url, + args: &Args, + all_paths: &mut Vec, + client: &mut Http3Client, + client_stream_id: StreamId, + ) -> Box { + match handler_type { + Self::Download => { + let out_file = get_output_file(url, &args.output_dir, all_paths); + client.stream_close_send(client_stream_id).unwrap(); + Box::new(DownloadStreamHandler { out_file }) + } + Self::Upload => Box::new(UploadStreamHandler { + data: vec![42; args.upload_size], + offset: 0, + chunk_size: 32768, + start: Instant::now(), + }), + } + } +} + +struct DownloadStreamHandler { + out_file: Option>, +} + +impl StreamHandler for DownloadStreamHandler { + fn process_header_ready(&mut self, stream_id: StreamId, fin: bool, headers: Vec
) { + if self.out_file.is_none() { + qdebug!("READ HEADERS[{stream_id}]: fin={fin} {headers:?}"); + } + } + + fn process_data_readable( + &mut self, + stream_id: StreamId, + fin: bool, + data: Vec, + sz: usize, + output_read_data: bool, + ) -> Res { + if let Some(out_file) = &mut self.out_file { + if sz > 0 { + out_file.write_all(&data[..sz])?; + } + return Ok(true); + } else if !output_read_data { + qdebug!("READ[{stream_id}]: {sz} bytes"); + } else if let Ok(txt) = String::from_utf8(data.clone()) { + qdebug!("READ[{stream_id}]: {txt}"); + } else { + qdebug!("READ[{}]: 0x{}", stream_id, hex(&data)); + } + + if fin { + if let Some(mut out_file) = self.out_file.take() { + out_file.flush()?; + } else { + qdebug!(""); + } + } + + Ok(true) + } + + fn process_data_writable(&mut self, _client: &mut Http3Client, _stream_id: StreamId) {} +} + +struct UploadStreamHandler { + data: Vec, + offset: usize, + chunk_size: usize, + start: Instant, +} + +impl StreamHandler for UploadStreamHandler { + fn process_header_ready(&mut self, stream_id: StreamId, fin: bool, headers: Vec
) { + qdebug!("READ HEADERS[{stream_id}]: fin={fin} {headers:?}"); + } + + fn process_data_readable( + &mut self, + stream_id: StreamId, + _fin: bool, + data: Vec, + _sz: usize, + _output_read_data: bool, + ) -> Res { + if let Ok(txt) = String::from_utf8(data.clone()) { + let trimmed_txt = txt.trim_end_matches(char::from(0)); + let parsed: usize = trimmed_txt.parse().unwrap(); + if parsed == self.data.len() { + let upload_time = Instant::now().duration_since(self.start); + qinfo!("Stream ID: {stream_id:?}, Upload time: {upload_time:?}"); + } + } else { + panic!("Unexpected data [{}]: 0x{}", stream_id, hex(&data)); + } + Ok(true) + } + + fn process_data_writable(&mut self, client: &mut Http3Client, stream_id: StreamId) { + while self.offset < self.data.len() { + let end = self.offset + self.chunk_size.min(self.data.len() - self.offset); + let chunk = &self.data[self.offset..end]; + match client.send_data(stream_id, chunk) { + Ok(amount) => { + if amount == 0 { + break; + } + self.offset += amount; + if self.offset == self.data.len() { + client.stream_close_send(stream_id).unwrap(); + } + } + Err(_) => break, + }; + } + } +} + +struct UrlHandler<'a> { + url_queue: VecDeque, + stream_handlers: HashMap>, + all_paths: Vec, + handler_type: StreamHandlerType, + args: &'a Args, +} + +impl<'a> UrlHandler<'a> { + fn stream_handler(&mut self, stream_id: StreamId) -> Option<&mut Box> { + self.stream_handlers.get_mut(&stream_id) + } + + fn process_urls(&mut self, client: &mut Http3Client) { + loop { + if self.url_queue.is_empty() { + break; + } + if self.stream_handlers.len() >= self.args.concurrency { + break; + } + if !self.next_url(client) { + break; + } + } + } + + fn next_url(&mut self, client: &mut Http3Client) -> bool { + let url = self + .url_queue + .pop_front() + .expect("download_next called with empty queue"); + match client.fetch( + Instant::now(), + &self.args.method, + &url, + &to_headers(&self.args.header), + Priority::default(), + ) { + Ok(client_stream_id) => { + qdebug!("Successfully created stream id {client_stream_id} for {url}"); + + let handler: Box = StreamHandlerType::make_handler( + &self.handler_type, + &url, + self.args, + &mut self.all_paths, + client, + client_stream_id, + ); + self.stream_handlers.insert(client_stream_id, handler); + true + } + Err( + Error::TransportError(TransportError::StreamLimitError) + | Error::StreamLimitError + | Error::Unavailable, + ) => { + self.url_queue.push_front(url); + false + } + Err(e) => { + panic!("Can't create stream {e}"); + } + } + } + + fn done(&mut self) -> bool { + self.stream_handlers.is_empty() && self.url_queue.is_empty() + } + + fn on_stream_fin(&mut self, client: &mut Http3Client, stream_id: StreamId) { + self.stream_handlers.remove(&stream_id); + self.process_urls(client); + } +} + +fn to_headers(values: &[impl AsRef]) -> Vec
{ + values + .iter() + .scan(None, |state, value| { + if let Some(name) = state.take() { + *state = None; + Some(Header::new(name, value.as_ref())) + } else { + *state = Some(value.as_ref().to_string()); + None + } + }) + .collect() +} diff --git a/neqo-bin/src/client/mod.rs b/neqo-bin/src/client/mod.rs new file mode 100644 index 0000000000..61e43c00d1 --- /dev/null +++ b/neqo-bin/src/client/mod.rs @@ -0,0 +1,589 @@ +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use std::{ + collections::{HashMap, VecDeque}, + fmt::{self, Display}, + fs::{create_dir_all, File, OpenOptions}, + io::{self, BufWriter}, + net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr, ToSocketAddrs}, + path::PathBuf, + pin::Pin, + process::exit, + time::Instant, +}; + +use clap::Parser; +use futures::{ + future::{select, Either}, + FutureExt, TryFutureExt, +}; +use neqo_common::{self as common, qdebug, qerror, qinfo, qlog::NeqoQlog, qwarn, Datagram, Role}; +use neqo_crypto::{ + constants::{TLS_AES_128_GCM_SHA256, TLS_AES_256_GCM_SHA384, TLS_CHACHA20_POLY1305_SHA256}, + init, Cipher, ResumptionToken, +}; +use neqo_http3::Output; +use neqo_transport::{AppError, ConnectionError, ConnectionId, Error as TransportError, Version}; +use qlog::{events::EventImportance, streamer::QlogStreamer}; +use tokio::time::Sleep; +use url::{Origin, Url}; + +use crate::{udp, SharedArgs}; + +mod http09; +mod http3; + +const BUFWRITER_BUFFER_SIZE: usize = 64 * 1024; + +#[derive(Debug)] +pub enum Error { + ArgumentError(&'static str), + Http3Error(neqo_http3::Error), + IoError(io::Error), + QlogError, + TransportError(neqo_transport::Error), + ApplicationError(neqo_transport::AppError), + CryptoError(neqo_crypto::Error), +} + +impl From for Error { + fn from(err: neqo_crypto::Error) -> Self { + Self::CryptoError(err) + } +} + +impl From for Error { + fn from(err: io::Error) -> Self { + Self::IoError(err) + } +} + +impl From for Error { + fn from(err: neqo_http3::Error) -> Self { + Self::Http3Error(err) + } +} + +impl From for Error { + fn from(_err: qlog::Error) -> Self { + Self::QlogError + } +} + +impl From for Error { + fn from(err: neqo_transport::Error) -> Self { + Self::TransportError(err) + } +} + +impl From for Error { + fn from(err: neqo_transport::ConnectionError) -> Self { + match err { + ConnectionError::Transport(e) => Self::TransportError(e), + ConnectionError::Application(e) => Self::ApplicationError(e), + } + } +} + +impl Display for Error { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "Error: {self:?}")?; + Ok(()) + } +} + +impl std::error::Error for Error {} + +type Res = Result; + +#[derive(Debug, Parser)] +#[command(author, version, about, long_about = None)] +#[allow(clippy::struct_excessive_bools)] // Not a good use of that lint. +pub struct Args { + #[command(flatten)] + verbose: clap_verbosity_flag::Verbosity, + + #[command(flatten)] + shared: SharedArgs, + + urls: Vec, + + #[arg(short = 'm', default_value = "GET")] + method: String, + + #[arg(short = 'H', long, number_of_values = 2)] + header: Vec, + + #[arg(name = "max-push", short = 'p', long, default_value = "10")] + max_concurrent_push_streams: u64, + + #[arg(name = "download-in-series", long)] + /// Download resources in series using separate connections. + download_in_series: bool, + + #[arg(name = "concurrency", long, default_value = "100")] + /// The maximum number of requests to have outstanding at one time. + concurrency: usize, + + #[arg(name = "output-read-data", long)] + /// Output received data to stdout + output_read_data: bool, + + #[arg(name = "output-dir", long)] + /// Save contents of fetched URLs to a directory + output_dir: Option, + + #[arg(short = 'r', long)] + /// Client attempts to resume by making multiple connections to servers. + /// Requires that 2 or more URLs are listed for each server. + /// Use this for 0-RTT: the stack always attempts 0-RTT on resumption. + resume: bool, + + #[arg(name = "key-update", long, hide = true)] + /// Attempt to initiate a key update immediately after confirming the connection. + key_update: bool, + + #[arg(name = "ech", long, value_parser = |s: &str| hex::decode(s))] + /// Enable encrypted client hello (ECH). + /// This takes an encoded ECH configuration in hexadecimal format. + ech: Option>, + + #[arg(name = "ipv4-only", short = '4', long)] + /// Connect only over IPv4 + ipv4_only: bool, + + #[arg(name = "ipv6-only", short = '6', long)] + /// Connect only over IPv6 + ipv6_only: bool, + + /// The test that this client will run. Currently, we only support "upload". + #[arg(name = "test", long)] + test: Option, + + /// The request size that will be used for upload test. + #[arg(name = "upload-size", long, default_value = "100")] + upload_size: usize, + + /// Print connection stats after close. + #[arg(name = "stats", long)] + stats: bool, +} + +impl Args { + #[must_use] + #[cfg(feature = "bench")] + #[allow(clippy::missing_panics_doc)] + pub fn new(requests: &[u64]) -> Self { + use std::str::FromStr; + Self { + verbose: clap_verbosity_flag::Verbosity::::default(), + shared: crate::SharedArgs::default(), + urls: requests + .iter() + .map(|r| Url::from_str(&format!("http://[::1]:12345/{r}")).unwrap()) + .collect(), + method: "GET".into(), + header: vec![], + max_concurrent_push_streams: 10, + download_in_series: false, + concurrency: 100, + output_read_data: false, + output_dir: Some("/dev/null".into()), + resume: false, + key_update: false, + ech: None, + ipv4_only: false, + ipv6_only: false, + test: None, + upload_size: 100, + stats: false, + } + } + + fn get_ciphers(&self) -> Vec { + self.shared + .ciphers + .iter() + .filter_map(|c| match c.as_str() { + "TLS_AES_128_GCM_SHA256" => Some(TLS_AES_128_GCM_SHA256), + "TLS_AES_256_GCM_SHA384" => Some(TLS_AES_256_GCM_SHA384), + "TLS_CHACHA20_POLY1305_SHA256" => Some(TLS_CHACHA20_POLY1305_SHA256), + _ => None, + }) + .collect::>() + } + + fn update_for_tests(&mut self) { + let Some(testcase) = self.shared.qns_test.as_ref() else { + return; + }; + + if self.key_update { + qerror!("internal option key_update set by user"); + exit(127) + } + + // Only use v1 for most QNS tests. + self.shared.quic_parameters.quic_version = vec![Version::Version1]; + match testcase.as_str() { + // TODO: Add "ecn" when that is ready. + "http3" => { + if let Some(testcase) = &self.test { + if testcase.as_str() != "upload" { + qerror!("Unsupported test case: {testcase}"); + exit(127) + } + + self.method = String::from("POST"); + } + } + "handshake" | "transfer" | "retry" => { + self.shared.use_old_http = true; + } + "zerortt" | "resumption" => { + if self.urls.len() < 2 { + qerror!("Warning: resumption tests won't work without >1 URL"); + exit(127); + } + self.shared.use_old_http = true; + self.resume = true; + } + "multiconnect" => { + self.shared.use_old_http = true; + self.download_in_series = true; + } + "chacha20" => { + self.shared.use_old_http = true; + self.shared.ciphers.clear(); + self.shared + .ciphers + .extend_from_slice(&[String::from("TLS_CHACHA20_POLY1305_SHA256")]); + } + "keyupdate" => { + self.shared.use_old_http = true; + self.key_update = true; + } + "v2" => { + self.shared.use_old_http = true; + // Use default version set for this test (which allows compatible vneg.) + self.shared.quic_parameters.quic_version.clear(); + } + _ => exit(127), + } + } +} + +fn get_output_file( + url: &Url, + output_dir: &Option, + all_paths: &mut Vec, +) -> Option> { + if let Some(ref dir) = output_dir { + let mut out_path = dir.clone(); + + let url_path = if url.path() == "/" { + // If no path is given... call it "root"? + "root" + } else { + // Omit leading slash + &url.path()[1..] + }; + out_path.push(url_path); + + if all_paths.contains(&out_path) { + qerror!("duplicate path {}", out_path.display()); + return None; + } + + qinfo!("Saving {url} to {out_path:?}"); + + if let Some(parent) = out_path.parent() { + create_dir_all(parent).ok()?; + } + + let f = OpenOptions::new() + .write(true) + .create(true) + .truncate(true) + .open(&out_path) + .ok()?; + + all_paths.push(out_path); + Some(BufWriter::with_capacity(BUFWRITER_BUFFER_SIZE, f)) + } else { + None + } +} + +enum Ready { + Socket, + Timeout, +} + +// Wait for the socket to be readable or the timeout to fire. +async fn ready( + socket: &udp::Socket, + mut timeout: Option<&mut Pin>>, +) -> Result { + let socket_ready = Box::pin(socket.readable()).map_ok(|()| Ready::Socket); + let timeout_ready = timeout + .as_mut() + .map_or(Either::Right(futures::future::pending()), Either::Left) + .map(|()| Ok(Ready::Timeout)); + select(socket_ready, timeout_ready).await.factor_first().0 +} + +/// Handles a given task on the provided [`Client`]. +trait Handler { + type Client: Client; + + fn handle(&mut self, client: &mut Self::Client) -> Res; + fn take_token(&mut self) -> Option; + fn has_token(&self) -> bool; +} + +/// Network client, e.g. [`neqo_transport::Connection`] or [`neqo_http3::Http3Client`]. +trait Client { + fn process_output(&mut self, now: Instant) -> Output; + fn process_multiple_input<'a, I>(&mut self, dgrams: I, now: Instant) + where + I: IntoIterator; + fn close(&mut self, now: Instant, app_error: AppError, msg: S) + where + S: AsRef + Display; + /// Returns [`Some(_)`] if the connection is closed. + /// + /// Note that connection was closed without error on + /// [`Some(ConnectionError::Transport(TransportError::NoError))`]. + fn is_closed(&self) -> Option; + fn stats(&self) -> neqo_transport::Stats; +} + +struct Runner<'a, H: Handler> { + local_addr: SocketAddr, + socket: &'a mut udp::Socket, + client: H::Client, + handler: H, + timeout: Option>>, + args: &'a Args, +} + +impl<'a, H: Handler> Runner<'a, H> { + async fn run(mut self) -> Res> { + loop { + let handler_done = self.handler.handle(&mut self.client)?; + + match (handler_done, self.args.resume, self.handler.has_token()) { + // Handler isn't done. Continue. + (false, _, _) => {}, + // Handler done. Resumption token needed but not present. Continue. + (true, true, false) => { + qdebug!("Handler done. Waiting for resumption token."); + } + // Handler is done, no resumption token needed. Close. + (true, false, _) | + // Handler is done, resumption token needed and present. Close. + (true, true, true) => { + self.client.close(Instant::now(), 0, "kthxbye!"); + } + } + + self.process_output().await?; + + if let Some(reason) = self.client.is_closed() { + if self.args.stats { + qinfo!("{:?}", self.client.stats()); + } + return match reason { + ConnectionError::Transport(TransportError::NoError) + | ConnectionError::Application(0) => Ok(self.handler.take_token()), + _ => Err(reason.into()), + }; + } + + match ready(self.socket, self.timeout.as_mut()).await? { + Ready::Socket => self.process_multiple_input().await?, + Ready::Timeout => { + self.timeout = None; + } + } + } + } + + async fn process_output(&mut self) -> Result<(), io::Error> { + loop { + match self.client.process_output(Instant::now()) { + Output::Datagram(dgram) => { + self.socket.writable().await?; + self.socket.send(dgram)?; + } + Output::Callback(new_timeout) => { + qdebug!("Setting timeout of {:?}", new_timeout); + self.timeout = Some(Box::pin(tokio::time::sleep(new_timeout))); + break; + } + Output::None => { + qdebug!("Output::None"); + break; + } + } + } + + Ok(()) + } + + async fn process_multiple_input(&mut self) -> Res<()> { + loop { + let dgrams = self.socket.recv(&self.local_addr)?; + if dgrams.is_empty() { + break; + } + self.client + .process_multiple_input(dgrams.iter(), Instant::now()); + self.process_output().await?; + } + + Ok(()) + } +} + +fn qlog_new(args: &Args, hostname: &str, cid: &ConnectionId) -> Res { + if let Some(qlog_dir) = &args.shared.qlog_dir { + let mut qlog_path = qlog_dir.clone(); + let filename = format!("{hostname}-{cid}.sqlog"); + qlog_path.push(filename); + + let f = OpenOptions::new() + .write(true) + .create(true) + .truncate(true) + .open(&qlog_path)?; + + let streamer = QlogStreamer::new( + qlog::QLOG_VERSION.to_string(), + Some("Example qlog".to_string()), + Some("Example qlog description".to_string()), + None, + std::time::Instant::now(), + common::qlog::new_trace(Role::Client), + EventImportance::Base, + Box::new(f), + ); + + Ok(NeqoQlog::enabled(streamer, qlog_path)?) + } else { + Ok(NeqoQlog::disabled()) + } +} + +pub async fn client(mut args: Args) -> Res<()> { + neqo_common::log::init(Some(args.verbose.log_level_filter())); + init()?; + + args.update_for_tests(); + + init()?; + + let urls_by_origin = args + .urls + .clone() + .into_iter() + .fold(HashMap::>::new(), |mut urls, url| { + urls.entry(url.origin()).or_default().push_back(url); + urls + }) + .into_iter() + .filter_map(|(origin, urls)| match origin { + Origin::Tuple(_scheme, h, p) => Some(((h, p), urls)), + Origin::Opaque(x) => { + qwarn!("Opaque origin {x:?}"); + None + } + }); + + for ((host, port), mut urls) in urls_by_origin { + if args.resume && urls.len() < 2 { + qerror!("Resumption to {host} cannot work without at least 2 URLs."); + exit(127); + } + + let remote_addr = format!("{host}:{port}").to_socket_addrs()?.find(|addr| { + !matches!( + (addr, args.ipv4_only, args.ipv6_only), + (SocketAddr::V4(..), false, true) | (SocketAddr::V6(..), true, false) + ) + }); + let Some(remote_addr) = remote_addr else { + qerror!("No compatible address found for: {host}"); + exit(1); + }; + + let local_addr = match remote_addr { + SocketAddr::V4(..) => SocketAddr::new(IpAddr::V4(Ipv4Addr::from([0; 4])), 0), + SocketAddr::V6(..) => SocketAddr::new(IpAddr::V6(Ipv6Addr::from([0; 16])), 0), + }; + + let mut socket = udp::Socket::bind(local_addr)?; + let real_local = socket.local_addr().unwrap(); + qinfo!( + "{} Client connecting: {:?} -> {:?}", + if args.shared.use_old_http { "H9" } else { "H3" }, + real_local, + remote_addr, + ); + + let hostname = format!("{host}"); + let mut token: Option = None; + let mut first = true; + while !urls.is_empty() { + let to_request = if (args.resume && first) || args.download_in_series { + urls.pop_front().into_iter().collect() + } else { + std::mem::take(&mut urls) + }; + + first = false; + + token = if args.shared.use_old_http { + let client = + http09::create_client(&args, real_local, remote_addr, &hostname, token) + .expect("failed to create client"); + + let handler = http09::Handler::new(to_request, &args); + + Runner { + args: &args, + client, + handler, + local_addr: real_local, + socket: &mut socket, + timeout: None, + } + .run() + .await? + } else { + let client = http3::create_client(&args, real_local, remote_addr, &hostname, token) + .expect("failed to create client"); + + let handler = http3::Handler::new(to_request, &args); + + Runner { + args: &args, + client, + handler, + local_addr: real_local, + socket: &mut socket, + timeout: None, + } + .run() + .await? + }; + } + } + + Ok(()) +} diff --git a/neqo-bin/src/lib.rs b/neqo-bin/src/lib.rs new file mode 100644 index 0000000000..380c56ddce --- /dev/null +++ b/neqo-bin/src/lib.rs @@ -0,0 +1,244 @@ +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +#![allow(clippy::missing_panics_doc)] +#![allow(clippy::missing_errors_doc)] + +use std::{ + fmt::{self, Display}, + net::{SocketAddr, ToSocketAddrs}, + path::PathBuf, + time::Duration, +}; + +use clap::Parser; +use neqo_transport::{ + tparams::PreferredAddress, CongestionControlAlgorithm, ConnectionParameters, StreamType, + Version, +}; + +pub mod client; +pub mod server; +mod udp; + +#[derive(Debug, Parser)] +pub struct SharedArgs { + #[arg(short = 'a', long, default_value = "h3")] + /// ALPN labels to negotiate. + /// + /// This client still only does HTTP/3 no matter what the ALPN says. + pub alpn: String, + + #[arg(name = "qlog-dir", long, value_parser=clap::value_parser!(PathBuf))] + /// Enable QLOG logging and QLOG traces to this directory + pub qlog_dir: Option, + + #[arg(name = "encoder-table-size", long, default_value = "16384")] + pub max_table_size_encoder: u64, + + #[arg(name = "decoder-table-size", long, default_value = "16384")] + pub max_table_size_decoder: u64, + + #[arg(name = "max-blocked-streams", short = 'b', long, default_value = "10")] + pub max_blocked_streams: u16, + + #[arg(short = 'c', long, number_of_values = 1)] + /// The set of TLS cipher suites to enable. + /// From: `TLS_AES_128_GCM_SHA256`, `TLS_AES_256_GCM_SHA384`, `TLS_CHACHA20_POLY1305_SHA256`. + pub ciphers: Vec, + + #[arg(name = "qns-test", long)] + /// Enable special behavior for use with QUIC Network Simulator + pub qns_test: Option, + + #[arg(name = "use-old-http", short = 'o', long)] + /// Use http 0.9 instead of HTTP/3 + pub use_old_http: bool, + + #[command(flatten)] + pub quic_parameters: QuicParameters, +} + +#[cfg(feature = "bench")] +impl Default for SharedArgs { + fn default() -> Self { + Self { + alpn: "h3".into(), + qlog_dir: None, + max_table_size_encoder: 16384, + max_table_size_decoder: 16384, + max_blocked_streams: 10, + ciphers: vec![], + qns_test: None, + use_old_http: false, + quic_parameters: QuicParameters::default(), + } + } +} + +#[derive(Debug, Parser)] +pub struct QuicParameters { + #[arg( + short = 'Q', + long, + num_args = 1.., + value_delimiter = ' ', + number_of_values = 1, + value_parser = from_str)] + /// A list of versions to support, in hex. + /// The first is the version to attempt. + /// Adding multiple values adds versions in order of preference. + /// If the first listed version appears in the list twice, the position + /// of the second entry determines the preference order of that version. + pub quic_version: Vec, + + #[arg(long, default_value = "16")] + /// Set the `MAX_STREAMS_BIDI` limit. + pub max_streams_bidi: u64, + + #[arg(long, default_value = "16")] + /// Set the `MAX_STREAMS_UNI` limit. + pub max_streams_uni: u64, + + #[arg(long = "idle", default_value = "30")] + /// The idle timeout for connections, in seconds. + pub idle_timeout: u64, + + #[arg(long = "cc", default_value = "newreno")] + /// The congestion controller to use. + pub congestion_control: CongestionControlAlgorithm, + + #[arg(long = "no-pacing")] + /// Whether to disable pacing. + pub no_pacing: bool, + + #[arg(name = "preferred-address-v4", long)] + /// An IPv4 address for the server preferred address. + pub preferred_address_v4: Option, + + #[arg(name = "preferred-address-v6", long)] + /// An IPv6 address for the server preferred address. + pub preferred_address_v6: Option, +} + +#[cfg(feature = "bench")] +impl Default for QuicParameters { + fn default() -> Self { + Self { + quic_version: vec![], + max_streams_bidi: 16, + max_streams_uni: 16, + idle_timeout: 30, + congestion_control: CongestionControlAlgorithm::NewReno, + no_pacing: false, + preferred_address_v4: None, + preferred_address_v6: None, + } + } +} + +impl QuicParameters { + fn get_sock_addr(opt: &Option, v: &str, f: F) -> Option + where + F: FnMut(&SocketAddr) -> bool, + { + let addr = opt + .iter() + .filter_map(|spa| spa.to_socket_addrs().ok()) + .flatten() + .find(f); + assert_eq!( + opt.is_some(), + addr.is_some(), + "unable to resolve '{}' to an {} address", + opt.as_ref().unwrap(), + v, + ); + addr + } + + #[must_use] + pub fn preferred_address_v4(&self) -> Option { + Self::get_sock_addr(&self.preferred_address_v4, "IPv4", SocketAddr::is_ipv4) + } + + #[must_use] + pub fn preferred_address_v6(&self) -> Option { + Self::get_sock_addr(&self.preferred_address_v6, "IPv6", SocketAddr::is_ipv6) + } + + #[must_use] + pub fn preferred_address(&self) -> Option { + let v4 = self.preferred_address_v4(); + let v6 = self.preferred_address_v6(); + if v4.is_none() && v6.is_none() { + None + } else { + let v4 = v4.map(|v4| { + let SocketAddr::V4(v4) = v4 else { + unreachable!(); + }; + v4 + }); + let v6 = v6.map(|v6| { + let SocketAddr::V6(v6) = v6 else { + unreachable!(); + }; + v6 + }); + Some(PreferredAddress::new(v4, v6)) + } + } + + #[must_use] + pub fn get(&self, alpn: &str) -> ConnectionParameters { + let params = ConnectionParameters::default() + .max_streams(StreamType::BiDi, self.max_streams_bidi) + .max_streams(StreamType::UniDi, self.max_streams_uni) + .idle_timeout(Duration::from_secs(self.idle_timeout)) + .cc_algorithm(self.congestion_control) + .pacing(!self.no_pacing); + + if let Some(&first) = self.quic_version.first() { + let all = if self.quic_version[1..].contains(&first) { + &self.quic_version[1..] + } else { + &self.quic_version + }; + params.versions(first, all.to_vec()) + } else { + let version = match alpn { + "h3" | "hq-interop" => Version::Version1, + "h3-29" | "hq-29" => Version::Draft29, + "h3-30" | "hq-30" => Version::Draft30, + "h3-31" | "hq-31" => Version::Draft31, + "h3-32" | "hq-32" => Version::Draft32, + _ => Version::default(), + }; + params.versions(version, Version::all()) + } + } +} + +fn from_str(s: &str) -> Result { + let v = u32::from_str_radix(s, 16) + .map_err(|_| Error::Argument("versions need to be specified in hex"))?; + Version::try_from(v).map_err(|_| Error::Argument("unknown version")) +} + +#[derive(Debug)] +pub enum Error { + Argument(&'static str), +} + +impl Display for Error { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "Error: {self:?}")?; + Ok(()) + } +} + +impl std::error::Error for Error {} diff --git a/neqo-bin/src/server/mod.rs b/neqo-bin/src/server/mod.rs new file mode 100644 index 0000000000..3490b3e9b3 --- /dev/null +++ b/neqo-bin/src/server/mod.rs @@ -0,0 +1,628 @@ +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use std::{ + borrow::Cow, + cell::RefCell, + cmp::min, + collections::HashMap, + fmt::{self, Display}, + fs, io, + net::{SocketAddr, ToSocketAddrs}, + path::PathBuf, + pin::Pin, + process::exit, + rc::Rc, + time::{Duration, Instant}, +}; + +use clap::Parser; +use futures::{ + future::{select, select_all, Either}, + FutureExt, +}; +use neqo_common::{hex, qdebug, qerror, qinfo, qwarn, Datagram, Header}; +use neqo_crypto::{ + constants::{TLS_AES_128_GCM_SHA256, TLS_AES_256_GCM_SHA384, TLS_CHACHA20_POLY1305_SHA256}, + generate_ech_keys, init_db, random, AntiReplay, Cipher, +}; +use neqo_http3::{ + Http3OrWebTransportStream, Http3Parameters, Http3Server, Http3ServerEvent, StreamId, +}; +use neqo_transport::{ + server::ValidateAddress, ConnectionIdGenerator, Output, RandomConnectionIdGenerator, Version, +}; +use old_https::Http09Server; +use tokio::time::Sleep; + +use crate::{udp, SharedArgs}; + +const ANTI_REPLAY_WINDOW: Duration = Duration::from_secs(10); + +mod old_https; + +#[derive(Debug)] +pub enum Error { + ArgumentError(&'static str), + Http3Error(neqo_http3::Error), + IoError(io::Error), + QlogError, + TransportError(neqo_transport::Error), + CryptoError(neqo_crypto::Error), +} + +impl From for Error { + fn from(err: neqo_crypto::Error) -> Self { + Self::CryptoError(err) + } +} + +impl From for Error { + fn from(err: io::Error) -> Self { + Self::IoError(err) + } +} + +impl From for Error { + fn from(err: neqo_http3::Error) -> Self { + Self::Http3Error(err) + } +} + +impl From for Error { + fn from(_err: qlog::Error) -> Self { + Self::QlogError + } +} + +impl From for Error { + fn from(err: neqo_transport::Error) -> Self { + Self::TransportError(err) + } +} + +impl Display for Error { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "Error: {self:?}")?; + Ok(()) + } +} + +impl std::error::Error for Error {} + +type Res = Result; + +#[derive(Debug, Parser)] +#[command(author, version, about, long_about = None)] +pub struct Args { + #[command(flatten)] + verbose: clap_verbosity_flag::Verbosity, + + #[command(flatten)] + shared: SharedArgs, + + /// List of IP:port to listen on + #[arg(default_value = "[::]:4433")] + hosts: Vec, + + #[arg(short = 'd', long, default_value = "./test-fixture/db")] + /// NSS database directory. + db: PathBuf, + + #[arg(short = 'k', long, default_value = "key")] + /// Name of key from NSS database. + key: String, + + #[arg(name = "retry", long)] + /// Force a retry + retry: bool, + + #[arg(name = "ech", long)] + /// Enable encrypted client hello (ECH). + /// This generates a new set of ECH keys when it is invoked. + /// The resulting configuration is printed to stdout in hexadecimal format. + ech: bool, +} + +#[cfg(feature = "bench")] +impl Default for Args { + fn default() -> Self { + use std::str::FromStr; + Self { + verbose: clap_verbosity_flag::Verbosity::::default(), + shared: crate::SharedArgs::default(), + hosts: vec!["[::]:12345".to_string()], + db: PathBuf::from_str("../test-fixture/db").unwrap(), + key: "key".to_string(), + retry: false, + ech: false, + } + } +} + +impl Args { + fn get_ciphers(&self) -> Vec { + self.shared + .ciphers + .iter() + .filter_map(|c| match c.as_str() { + "TLS_AES_128_GCM_SHA256" => Some(TLS_AES_128_GCM_SHA256), + "TLS_AES_256_GCM_SHA384" => Some(TLS_AES_256_GCM_SHA384), + "TLS_CHACHA20_POLY1305_SHA256" => Some(TLS_CHACHA20_POLY1305_SHA256), + _ => None, + }) + .collect::>() + } + + fn listen_addresses(&self) -> Vec { + self.hosts + .iter() + .filter_map(|host| host.to_socket_addrs().ok()) + .flatten() + .chain(self.shared.quic_parameters.preferred_address_v4()) + .chain(self.shared.quic_parameters.preferred_address_v6()) + .collect() + } + + fn now(&self) -> Instant { + if self.shared.qns_test.is_some() { + // When NSS starts its anti-replay it blocks any acceptance of 0-RTT for a + // single period. This ensures that an attacker that is able to force a + // server to reboot is unable to use that to flush the anti-replay buffers + // and have something replayed. + // + // However, this is a massive inconvenience for us when we are testing. + // As we can't initialize `AntiReplay` in the past (see `neqo_common::time` + // for why), fast forward time here so that the connections get times from + // in the future. + // + // This is NOT SAFE. Don't do this. + Instant::now() + ANTI_REPLAY_WINDOW + } else { + Instant::now() + } + } +} + +fn qns_read_response(filename: &str) -> Result, io::Error> { + let path: PathBuf = ["/www", filename.trim_matches(|p| p == '/')] + .iter() + .collect(); + fs::read(path) +} + +trait HttpServer: Display { + fn process(&mut self, dgram: Option<&Datagram>, now: Instant) -> Output; + fn process_events(&mut self, args: &Args, now: Instant); + fn set_qlog_dir(&mut self, dir: Option); + fn set_ciphers(&mut self, ciphers: &[Cipher]); + fn validate_address(&mut self, when: ValidateAddress); + fn enable_ech(&mut self) -> &[u8]; +} + +struct ResponseData { + data: Cow<'static, [u8]>, + offset: usize, + remaining: usize, +} + +impl From<&[u8]> for ResponseData { + fn from(data: &[u8]) -> Self { + Self::from(data.to_vec()) + } +} + +impl From> for ResponseData { + fn from(data: Vec) -> Self { + let remaining = data.len(); + Self { + data: Cow::Owned(data), + offset: 0, + remaining, + } + } +} + +impl ResponseData { + fn repeat(buf: &'static [u8], total: usize) -> Self { + Self { + data: Cow::Borrowed(buf), + offset: 0, + remaining: total, + } + } + + fn send(&mut self, stream: &mut Http3OrWebTransportStream) { + while self.remaining > 0 { + let end = min(self.data.len(), self.offset + self.remaining); + let slice = &self.data[self.offset..end]; + match stream.send_data(slice) { + Ok(0) => { + return; + } + Ok(sent) => { + self.remaining -= sent; + self.offset = (self.offset + sent) % self.data.len(); + } + Err(e) => { + qwarn!("Error writing to stream {}: {:?}", stream, e); + return; + } + } + } + } + + fn done(&self) -> bool { + self.remaining == 0 + } +} + +struct SimpleServer { + server: Http3Server, + /// Progress writing to each stream. + remaining_data: HashMap, + posts: HashMap, +} + +impl SimpleServer { + const MESSAGE: &'static [u8] = &[0; 4096]; + + pub fn new( + args: &Args, + anti_replay: AntiReplay, + cid_mgr: Rc>, + ) -> Self { + let server = Http3Server::new( + args.now(), + &[args.key.clone()], + &[args.shared.alpn.clone()], + anti_replay, + cid_mgr, + Http3Parameters::default() + .connection_parameters(args.shared.quic_parameters.get(&args.shared.alpn)) + .max_table_size_encoder(args.shared.max_table_size_encoder) + .max_table_size_decoder(args.shared.max_table_size_decoder) + .max_blocked_streams(args.shared.max_blocked_streams), + None, + ) + .expect("We cannot make a server!"); + Self { + server, + remaining_data: HashMap::new(), + posts: HashMap::new(), + } + } +} + +impl Display for SimpleServer { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + self.server.fmt(f) + } +} + +impl HttpServer for SimpleServer { + fn process(&mut self, dgram: Option<&Datagram>, now: Instant) -> Output { + self.server.process(dgram, now) + } + + fn process_events(&mut self, args: &Args, _now: Instant) { + while let Some(event) = self.server.next_event() { + match event { + Http3ServerEvent::Headers { + mut stream, + headers, + fin, + } => { + qdebug!("Headers (request={stream} fin={fin}): {headers:?}"); + + if headers + .iter() + .any(|h| h.name() == ":method" && h.value() == "POST") + { + self.posts.insert(stream, 0); + continue; + } + + let Some(path) = headers.iter().find(|&h| h.name() == ":path") else { + stream + .cancel_fetch(neqo_http3::Error::HttpRequestIncomplete.code()) + .unwrap(); + continue; + }; + + let mut response = if args.shared.qns_test.is_some() { + match qns_read_response(path.value()) { + Ok(data) => ResponseData::from(data), + Err(e) => { + qerror!("Failed to read {}: {e}", path.value()); + stream + .send_headers(&[Header::new(":status", "404")]) + .unwrap(); + stream.stream_close_send().unwrap(); + continue; + } + } + } else if let Ok(count) = + path.value().trim_matches(|p| p == '/').parse::() + { + ResponseData::repeat(Self::MESSAGE, count) + } else { + ResponseData::from(Self::MESSAGE) + }; + + stream + .send_headers(&[ + Header::new(":status", "200"), + Header::new("content-length", response.remaining.to_string()), + ]) + .unwrap(); + response.send(&mut stream); + if response.done() { + stream.stream_close_send().unwrap(); + } else { + self.remaining_data.insert(stream.stream_id(), response); + } + } + Http3ServerEvent::DataWritable { mut stream } => { + if self.posts.get_mut(&stream).is_none() { + if let Some(remaining) = self.remaining_data.get_mut(&stream.stream_id()) { + remaining.send(&mut stream); + if remaining.done() { + self.remaining_data.remove(&stream.stream_id()); + stream.stream_close_send().unwrap(); + } + } + } + } + + Http3ServerEvent::Data { + mut stream, + data, + fin, + } => { + if let Some(received) = self.posts.get_mut(&stream) { + *received += data.len(); + } + if fin { + if let Some(received) = self.posts.remove(&stream) { + let msg = received.to_string().as_bytes().to_vec(); + stream + .send_headers(&[Header::new(":status", "200")]) + .unwrap(); + stream.send_data(&msg).unwrap(); + stream.stream_close_send().unwrap(); + } + } + } + _ => {} + } + } + } + + fn set_qlog_dir(&mut self, dir: Option) { + self.server.set_qlog_dir(dir); + } + + fn validate_address(&mut self, v: ValidateAddress) { + self.server.set_validation(v); + } + + fn set_ciphers(&mut self, ciphers: &[Cipher]) { + self.server.set_ciphers(ciphers); + } + + fn enable_ech(&mut self) -> &[u8] { + let (sk, pk) = generate_ech_keys().expect("should create ECH keys"); + self.server + .enable_ech(random::<1>()[0], "public.example", &sk, &pk) + .unwrap(); + self.server.ech_config() + } +} + +struct ServersRunner { + args: Args, + server: Box, + timeout: Option>>, + sockets: Vec<(SocketAddr, udp::Socket)>, +} + +impl ServersRunner { + pub fn new(args: Args) -> Result { + let hosts = args.listen_addresses(); + if hosts.is_empty() { + qerror!("No valid hosts defined"); + return Err(io::Error::new(io::ErrorKind::InvalidInput, "No hosts")); + } + let sockets = hosts + .into_iter() + .map(|host| { + let socket = udp::Socket::bind(host)?; + let local_addr = socket.local_addr()?; + qinfo!("Server waiting for connection on: {local_addr:?}"); + + Ok((host, socket)) + }) + .collect::>()?; + let server = Self::create_server(&args); + + Ok(Self { + args, + server, + timeout: None, + sockets, + }) + } + + fn create_server(args: &Args) -> Box { + // Note: this is the exception to the case where we use `Args::now`. + let anti_replay = AntiReplay::new(Instant::now(), ANTI_REPLAY_WINDOW, 7, 14) + .expect("unable to setup anti-replay"); + let cid_mgr = Rc::new(RefCell::new(RandomConnectionIdGenerator::new(10))); + + let mut svr: Box = if args.shared.use_old_http { + Box::new( + Http09Server::new( + args.now(), + &[args.key.clone()], + &[args.shared.alpn.clone()], + anti_replay, + cid_mgr, + args.shared.quic_parameters.get(&args.shared.alpn), + ) + .expect("We cannot make a server!"), + ) + } else { + Box::new(SimpleServer::new(args, anti_replay, cid_mgr)) + }; + svr.set_ciphers(&args.get_ciphers()); + svr.set_qlog_dir(args.shared.qlog_dir.clone()); + if args.retry { + svr.validate_address(ValidateAddress::Always); + } + if args.ech { + let cfg = svr.enable_ech(); + qinfo!("ECHConfigList: {}", hex(cfg)); + } + svr + } + + /// Tries to find a socket, but then just falls back to sending from the first. + fn find_socket(&mut self, addr: SocketAddr) -> &mut udp::Socket { + let ((_host, first_socket), rest) = self.sockets.split_first_mut().unwrap(); + rest.iter_mut() + .map(|(_host, socket)| socket) + .find(|socket| { + socket + .local_addr() + .ok() + .map_or(false, |socket_addr| socket_addr == addr) + }) + .unwrap_or(first_socket) + } + + async fn process(&mut self, mut dgram: Option<&Datagram>) -> Result<(), io::Error> { + loop { + match self.server.process(dgram.take(), self.args.now()) { + Output::Datagram(dgram) => { + let socket = self.find_socket(dgram.source()); + socket.writable().await?; + socket.send(dgram)?; + } + Output::Callback(new_timeout) => { + qdebug!("Setting timeout of {:?}", new_timeout); + self.timeout = Some(Box::pin(tokio::time::sleep(new_timeout))); + break; + } + Output::None => { + break; + } + } + } + Ok(()) + } + + // Wait for any of the sockets to be readable or the timeout to fire. + async fn ready(&mut self) -> Result { + let sockets_ready = select_all( + self.sockets + .iter() + .map(|(_host, socket)| Box::pin(socket.readable())), + ) + .map(|(res, inx, _)| match res { + Ok(()) => Ok(Ready::Socket(inx)), + Err(e) => Err(e), + }); + let timeout_ready = self + .timeout + .as_mut() + .map_or(Either::Right(futures::future::pending()), Either::Left) + .map(|()| Ok(Ready::Timeout)); + select(sockets_ready, timeout_ready).await.factor_first().0 + } + + async fn run(&mut self) -> Res<()> { + loop { + match self.ready().await? { + Ready::Socket(inx) => loop { + let (host, socket) = self.sockets.get_mut(inx).unwrap(); + let dgrams = socket.recv(host)?; + if dgrams.is_empty() { + break; + } + for dgram in dgrams { + self.process(Some(&dgram)).await?; + } + }, + Ready::Timeout => { + self.timeout = None; + self.process(None).await?; + } + } + + self.server.process_events(&self.args, self.args.now()); + self.process(None).await?; + } + } +} + +enum Ready { + Socket(usize), + Timeout, +} + +pub async fn server(mut args: Args) -> Res<()> { + const HQ_INTEROP: &str = "hq-interop"; + + neqo_common::log::init(Some(args.verbose.log_level_filter())); + assert!(!args.key.is_empty(), "Need at least one key"); + + init_db(args.db.clone())?; + + if let Some(testcase) = args.shared.qns_test.as_ref() { + if args.shared.quic_parameters.quic_version.is_empty() { + // Quic Interop Runner expects the server to support `Version1` + // only. Exceptions are testcases `versionnegotiation` (not yet + // implemented) and `v2`. + if testcase != "v2" { + args.shared.quic_parameters.quic_version = vec![Version::Version1]; + } + } else { + qwarn!("Both -V and --qns-test were set. Ignoring testcase specific versions."); + } + + // TODO: More options to deduplicate with client? + match testcase.as_str() { + "http3" => (), + "zerortt" => { + args.shared.use_old_http = true; + args.shared.alpn = String::from(HQ_INTEROP); + args.shared.quic_parameters.max_streams_bidi = 100; + } + "handshake" | "transfer" | "resumption" | "multiconnect" | "v2" => { + args.shared.use_old_http = true; + args.shared.alpn = String::from(HQ_INTEROP); + } + "chacha20" => { + args.shared.use_old_http = true; + args.shared.alpn = String::from(HQ_INTEROP); + args.shared.ciphers.clear(); + args.shared + .ciphers + .extend_from_slice(&[String::from("TLS_CHACHA20_POLY1305_SHA256")]); + } + "retry" => { + args.shared.use_old_http = true; + args.shared.alpn = String::from(HQ_INTEROP); + args.retry = true; + } + _ => exit(127), + } + } + + let mut servers_runner = ServersRunner::new(args)?; + servers_runner.run().await +} diff --git a/neqo-server/src/old_https.rs b/neqo-bin/src/server/old_https.rs similarity index 83% rename from neqo-server/src/old_https.rs rename to neqo-bin/src/server/old_https.rs index 1cb1d59fc7..38f3fdc3a7 100644 --- a/neqo-server/src/old_https.rs +++ b/neqo-bin/src/server/old_https.rs @@ -4,25 +4,18 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -#![cfg_attr(feature = "deny-warnings", deny(warnings))] -#![warn(clippy::use_self)] - -use std::cell::RefCell; -use std::collections::HashMap; -use std::fmt::Display; -use std::path::PathBuf; -use std::rc::Rc; -use std::time::Instant; - -use regex::Regex; +use std::{ + cell::RefCell, collections::HashMap, fmt::Display, path::PathBuf, rc::Rc, time::Instant, +}; -use neqo_common::{event::Provider, hex, qdebug, Datagram}; +use neqo_common::{event::Provider, hex, qdebug, qerror, qinfo, qwarn, Datagram}; use neqo_crypto::{generate_ech_keys, random, AllowZeroRtt, AntiReplay, Cipher}; use neqo_http3::Error; use neqo_transport::{ server::{ActiveConnectionRef, Server, ValidateAddress}, ConnectionEvent, ConnectionIdGenerator, ConnectionParameters, Output, State, StreamId, }; +use regex::Regex; use super::{qns_read_response, Args, HttpServer}; @@ -138,33 +131,36 @@ impl Http09Server { data }; - let msg = if let Ok(s) = std::str::from_utf8(&buf[..]) { - s - } else { + let Ok(msg) = std::str::from_utf8(&buf[..]) else { self.save_partial(stream_id, buf, conn); return; }; - let re = if args.qns_test.is_some() { + let re = if args.shared.qns_test.is_some() { Regex::new(r"GET +/(\S+)(?:\r)?\n").unwrap() } else { Regex::new(r"GET +/(\d+)(?:\r)?\n").unwrap() }; let m = re.captures(msg); - let resp = match m.and_then(|m| m.get(1)) { - None => { - self.save_partial(stream_id, buf, conn); - return; - } - Some(path) => { - let path = path.as_str(); - eprintln!("Path = '{}'", path); - if args.qns_test.is_some() { - qns_read_response(path) - } else { - let count = path.parse().unwrap(); - Some(vec![b'a'; count]) + let Some(path) = m.and_then(|m| m.get(1)) else { + self.save_partial(stream_id, buf, conn); + return; + }; + + let resp = { + let path = path.as_str(); + qdebug!("Path = '{path}'"); + if args.shared.qns_test.is_some() { + match qns_read_response(path) { + Ok(data) => Some(data), + Err(e) => { + qerror!("Failed to read {path}: {e}"); + Some(b"404".to_vec()) + } } + } else { + let count = path.parse().unwrap(); + Some(vec![b'a'; count]) } }; self.write(stream_id, resp, conn); @@ -173,7 +169,7 @@ impl Http09Server { fn stream_writable(&mut self, stream_id: StreamId, conn: &mut ActiveConnectionRef) { match self.write_state.get_mut(&stream_id) { None => { - eprintln!("Unknown stream {}, ignoring event", stream_id); + qwarn!("Unknown stream {stream_id}, ignoring event"); } Some(stream_state) => { stream_state.writable = true; @@ -184,9 +180,9 @@ impl Http09Server { .unwrap(); qdebug!("Wrote {}", sent); *offset += sent; - self.server.add_to_waiting(conn.clone()); + self.server.add_to_waiting(conn); if *offset == data.len() { - eprintln!("Sent {} on {}, closing", sent, stream_id); + qinfo!("Sent {sent} on {stream_id}, closing"); conn.borrow_mut().stream_close_send(stream_id).unwrap(); self.write_state.remove(&stream_id); } else { @@ -199,7 +195,7 @@ impl Http09Server { } impl HttpServer for Http09Server { - fn process(&mut self, dgram: Option, now: Instant) -> Output { + fn process(&mut self, dgram: Option<&Datagram>, now: Instant) -> Output { self.server.process(dgram, now) } @@ -211,7 +207,6 @@ impl HttpServer for Http09Server { None => break, Some(e) => e, }; - eprintln!("Event {:?}", event); match event { ConnectionEvent::NewStream { stream_id } => { self.write_state @@ -230,15 +225,16 @@ impl HttpServer for Http09Server { .unwrap(); } ConnectionEvent::StateChange(_) + | ConnectionEvent::SendStreamCreatable { .. } | ConnectionEvent::SendStreamComplete { .. } => (), - e => eprintln!("unhandled event {:?}", e), + e => qwarn!("unhandled event {e:?}"), } } } } fn set_qlog_dir(&mut self, dir: Option) { - self.server.set_qlog_dir(dir) + self.server.set_qlog_dir(dir); } fn validate_address(&mut self, v: ValidateAddress) { @@ -252,7 +248,7 @@ impl HttpServer for Http09Server { fn enable_ech(&mut self) -> &[u8] { let (sk, pk) = generate_ech_keys().expect("generate ECH keys"); self.server - .enable_ech(random(1)[0], "public.example", &sk, &pk) + .enable_ech(random::<1>()[0], "public.example", &sk, &pk) .expect("enable ECH"); self.server.ech_config() } diff --git a/neqo-bin/src/udp.rs b/neqo-bin/src/udp.rs new file mode 100644 index 0000000000..7ccfa1f36f --- /dev/null +++ b/neqo-bin/src/udp.rs @@ -0,0 +1,224 @@ +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +#![allow(clippy::missing_errors_doc)] // Functions simply delegate to tokio and quinn-udp. +#![allow(clippy::missing_panics_doc)] // Functions simply delegate to tokio and quinn-udp. + +use std::{ + io::{self, IoSliceMut}, + net::{SocketAddr, ToSocketAddrs}, + slice, +}; + +use neqo_common::{Datagram, IpTos}; +use quinn_udp::{EcnCodepoint, RecvMeta, Transmit, UdpSocketState}; +use tokio::io::Interest; + +/// Socket receive buffer size. +/// +/// Allows reading multiple datagrams in a single [`Socket::recv`] call. +const RECV_BUF_SIZE: usize = u16::MAX as usize; + +pub struct Socket { + #[allow(unknown_lints)] // available with Rust v1.75 + #[allow(clippy::struct_field_names)] + socket: tokio::net::UdpSocket, + state: UdpSocketState, + recv_buf: Vec, +} + +impl Socket { + /// Calls [`std::net::UdpSocket::bind`] and instantiates [`quinn_udp::UdpSocketState`]. + pub fn bind(addr: A) -> Result { + let socket = std::net::UdpSocket::bind(addr)?; + + Ok(Self { + state: quinn_udp::UdpSocketState::new((&socket).into())?, + socket: tokio::net::UdpSocket::from_std(socket)?, + recv_buf: vec![0; RECV_BUF_SIZE], + }) + } + + /// See [`tokio::net::UdpSocket::local_addr`]. + pub fn local_addr(&self) -> io::Result { + self.socket.local_addr() + } + + /// See [`tokio::net::UdpSocket::writable`]. + pub async fn writable(&self) -> Result<(), io::Error> { + self.socket.writable().await + } + + /// See [`tokio::net::UdpSocket::readable`]. + pub async fn readable(&self) -> Result<(), io::Error> { + self.socket.readable().await + } + + /// Send the UDP datagram on the specified socket. + pub fn send(&self, d: Datagram) -> io::Result<()> { + let transmit = Transmit { + destination: d.destination(), + ecn: EcnCodepoint::from_bits(Into::::into(d.tos())), + contents: Vec::from(d).into(), + segment_size: None, + src_ip: None, + }; + + let n = self.socket.try_io(Interest::WRITABLE, || { + self.state + .send((&self.socket).into(), slice::from_ref(&transmit)) + })?; + + assert_eq!(n, 1, "only passed one slice"); + + Ok(()) + } + + /// Receive a UDP datagram on the specified socket. + pub fn recv(&mut self, local_address: &SocketAddr) -> Result, io::Error> { + let mut meta = RecvMeta::default(); + + match self.socket.try_io(Interest::READABLE, || { + self.state.recv( + (&self.socket).into(), + &mut [IoSliceMut::new(&mut self.recv_buf)], + slice::from_mut(&mut meta), + ) + }) { + Ok(n) => { + assert_eq!(n, 1, "only passed one slice"); + } + Err(ref err) + if err.kind() == io::ErrorKind::WouldBlock + || err.kind() == io::ErrorKind::Interrupted => + { + return Ok(vec![]) + } + Err(err) => { + return Err(err); + } + }; + + if meta.len == 0 { + eprintln!("zero length datagram received?"); + return Ok(vec![]); + } + if meta.len == self.recv_buf.len() { + eprintln!( + "Might have received more than {} bytes", + self.recv_buf.len() + ); + } + + Ok(self.recv_buf[0..meta.len] + .chunks(meta.stride.min(self.recv_buf.len())) + .map(|d| { + Datagram::new( + meta.addr, + *local_address, + meta.ecn.map(|n| IpTos::from(n as u8)).unwrap_or_default(), + None, // TODO: get the real TTL https://github.com/quinn-rs/quinn/issues/1749 + d, + ) + }) + .collect()) + } +} + +#[cfg(test)] +mod tests { + use neqo_common::{IpTosDscp, IpTosEcn}; + + use super::*; + + #[tokio::test] + async fn datagram_tos() -> Result<(), io::Error> { + let sender = Socket::bind("127.0.0.1:0")?; + let receiver_addr: SocketAddr = "127.0.0.1:0".parse().unwrap(); + let mut receiver = Socket::bind(receiver_addr)?; + + let datagram = Datagram::new( + sender.local_addr()?, + receiver.local_addr()?, + IpTos::from((IpTosDscp::Le, IpTosEcn::Ect1)), + None, + "Hello, world!".as_bytes().to_vec(), + ); + + sender.writable().await?; + sender.send(datagram.clone())?; + + receiver.readable().await?; + let received_datagram = receiver + .recv(&receiver_addr) + .expect("receive to succeed") + .into_iter() + .next() + .expect("receive to yield datagram"); + + // Assert that the ECN is correct. + assert_eq!( + IpTosEcn::from(datagram.tos()), + IpTosEcn::from(received_datagram.tos()) + ); + + Ok(()) + } + + /// Expect [`Socket::recv`] to handle multiple [`Datagram`]s on GRO read. + #[tokio::test] + #[cfg_attr(not(any(target_os = "linux", target_os = "windows")), ignore)] + async fn many_datagrams_through_gro() -> Result<(), io::Error> { + const SEGMENT_SIZE: usize = 128; + + let sender = Socket::bind("127.0.0.1:0")?; + let receiver_addr: SocketAddr = "127.0.0.1:0".parse().unwrap(); + let mut receiver = Socket::bind(receiver_addr)?; + + // `neqo_common::udp::Socket::send` does not yet + // (https://github.com/mozilla/neqo/issues/1693) support GSO. Use + // `quinn_udp` directly. + let max_gso_segments = sender.state.max_gso_segments(); + let msg = vec![0xAB; SEGMENT_SIZE * max_gso_segments]; + let transmit = Transmit { + destination: receiver.local_addr()?, + ecn: EcnCodepoint::from_bits(Into::::into(IpTos::from(( + IpTosDscp::Le, + IpTosEcn::Ect1, + )))), + contents: msg.clone().into(), + segment_size: Some(SEGMENT_SIZE), + src_ip: None, + }; + sender.writable().await?; + let n = sender.socket.try_io(Interest::WRITABLE, || { + sender + .state + .send((&sender.socket).into(), slice::from_ref(&transmit)) + })?; + assert_eq!(n, 1, "only passed one slice"); + + // Allow for one GSO sendmmsg to result in multiple GRO recvmmsg. + let mut num_received = 0; + while num_received < max_gso_segments { + receiver.readable().await?; + receiver + .recv(&receiver_addr) + .expect("receive to succeed") + .into_iter() + .for_each(|d| { + assert_eq!( + SEGMENT_SIZE, + d.len(), + "Expect received datagrams to have same length as sent datagrams." + ); + num_received += 1; + }); + } + + Ok(()) + } +} diff --git a/neqo-client/Cargo.toml b/neqo-client/Cargo.toml deleted file mode 100644 index bc679c3e10..0000000000 --- a/neqo-client/Cargo.toml +++ /dev/null @@ -1,23 +0,0 @@ -[package] -name = "neqo-client" -version = "0.6.4" -authors = ["Martin Thomson ", - "Dragana Damjanovic ", - "Andy Grover "] -edition = "2018" -rust-version = "1.65.0" -license = "MIT/Apache-2.0" - -[dependencies] -neqo-crypto = { path = "./../neqo-crypto" } -neqo-transport = { path = "./../neqo-transport" } -neqo-common = { path="./../neqo-common" } -neqo-http3 = { path = "./../neqo-http3" } -neqo-qpack = { path = "./../neqo-qpack" } -structopt = "0.3.7" -url = "2.0" -qlog = "0.4.0" - -[features] -default = ["deny-warnings"] -deny-warnings = [] diff --git a/neqo-client/src/main.rs b/neqo-client/src/main.rs deleted file mode 100644 index e68000c2f8..0000000000 --- a/neqo-client/src/main.rs +++ /dev/null @@ -1,1194 +0,0 @@ -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -#![cfg_attr(feature = "deny-warnings", deny(warnings))] -#![warn(clippy::use_self)] - -use qlog::QlogStreamer; - -use neqo_common::{self as common, event::Provider, hex, qlog::NeqoQlog, Datagram, Role}; -use neqo_crypto::{ - constants::{TLS_AES_128_GCM_SHA256, TLS_AES_256_GCM_SHA384, TLS_CHACHA20_POLY1305_SHA256}, - init, AuthenticationStatus, Cipher, ResumptionToken, -}; -use neqo_http3::{ - self, Error, Header, Http3Client, Http3ClientEvent, Http3Parameters, Http3State, Output, - Priority, -}; -use neqo_transport::{ - CongestionControlAlgorithm, Connection, ConnectionId, ConnectionParameters, - EmptyConnectionIdGenerator, Error as TransportError, StreamId, StreamType, Version, -}; - -use std::{ - cell::RefCell, - collections::{HashMap, VecDeque}, - convert::TryFrom, - fmt::{self, Display}, - fs::{create_dir_all, File, OpenOptions}, - io::{self, ErrorKind, Write}, - net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr, ToSocketAddrs, UdpSocket}, - path::PathBuf, - process::exit, - rc::Rc, - str::FromStr, - time::{Duration, Instant}, -}; - -use structopt::StructOpt; -use url::{Origin, Url}; - -#[derive(Debug)] -pub enum ClientError { - ArgumentError(&'static str), - Http3Error(neqo_http3::Error), - IoError(io::Error), - QlogError, - TransportError(neqo_transport::Error), -} - -impl From for ClientError { - fn from(err: io::Error) -> Self { - Self::IoError(err) - } -} - -impl From for ClientError { - fn from(err: neqo_http3::Error) -> Self { - Self::Http3Error(err) - } -} - -impl From for ClientError { - fn from(_err: qlog::Error) -> Self { - Self::QlogError - } -} - -impl From for ClientError { - fn from(err: neqo_transport::Error) -> Self { - Self::TransportError(err) - } -} - -impl Display for ClientError { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "Error: {:?}", self)?; - Ok(()) - } -} - -type Res = Result; - -/// Track whether a key update is needed. -#[derive(Debug, PartialEq, Eq)] -struct KeyUpdateState(bool); - -impl KeyUpdateState { - pub fn maybe_update(&mut self, update_fn: F) -> Res<()> - where - F: FnOnce() -> Result<(), E>, - E: Into, - { - if self.0 { - if let Err(e) = update_fn() { - let e = e.into(); - match e { - ClientError::TransportError(TransportError::KeyUpdateBlocked) - | ClientError::Http3Error(Error::TransportError( - TransportError::KeyUpdateBlocked, - )) => (), - _ => return Err(e), - } - } else { - println!("Keys updated"); - self.0 = false; - } - } - Ok(()) - } - - fn needed(&self) -> bool { - self.0 - } -} - -#[derive(Debug)] -struct HexArg(Vec); -impl FromStr for HexArg { - type Err = ClientError; - - fn from_str(s: &str) -> Res { - fn v(c: u8) -> Res { - match c { - b'A'..=b'F' => Ok(c - b'A' + 10), - b'a'..=b'f' => Ok(c - b'a' + 10), - b'0'..=b'9' => Ok(c - b'0'), - _ => Err(ClientError::ArgumentError("non-hex character")), - } - } - let s: &[u8] = s.as_ref(); - if s.len() % 2 != 0 { - return Err(ClientError::ArgumentError("invalid length")); - } - let mut buf = vec![0; s.len() / 2]; - for i in 0..buf.len() { - buf[i] = (v(s[i * 2])? << 4) | v(s[i * 2 + 1])?; - } - Ok(Self(buf)) - } -} - -impl AsRef<[u8]> for HexArg { - fn as_ref(&self) -> &[u8] { - &self.0 - } -} - -#[derive(Debug, StructOpt)] -#[structopt( - name = "neqo-client", - about = "A basic QUIC HTTP/0.9 and HTTP/3 client." -)] -pub struct Args { - #[structopt(short = "a", long, default_value = "h3")] - /// ALPN labels to negotiate. - /// - /// This client still only does HTTP/3 no matter what the ALPN says. - alpn: String, - - urls: Vec, - - #[structopt(short = "m", default_value = "GET")] - method: String, - - #[structopt(short = "h", long, number_of_values = 2)] - header: Vec, - - #[structopt(name = "encoder-table-size", long, default_value = "16384")] - max_table_size_encoder: u64, - - #[structopt(name = "decoder-table-size", long, default_value = "16384")] - max_table_size_decoder: u64, - - #[structopt(name = "max-blocked-streams", short = "b", long, default_value = "10")] - max_blocked_streams: u16, - - #[structopt(name = "max-push", short = "p", long, default_value = "10")] - max_concurrent_push_streams: u64, - - #[structopt(name = "use-old-http", short = "o", long)] - /// Use http 0.9 instead of HTTP/3 - use_old_http: bool, - - #[structopt(name = "download-in-series", long)] - /// Download resources in series using separate connections. - download_in_series: bool, - - #[structopt(name = "concurrency", long, default_value = "100")] - /// The maximum number of requests to have outstanding at one time. - concurrency: usize, - - #[structopt(name = "output-read-data", long)] - /// Output received data to stdout - output_read_data: bool, - - #[structopt(name = "qlog-dir", long)] - /// Enable QLOG logging and QLOG traces to this directory - qlog_dir: Option, - - #[structopt(name = "output-dir", long)] - /// Save contents of fetched URLs to a directory - output_dir: Option, - - #[structopt(name = "qns-test", long)] - /// Enable special behavior for use with QUIC Network Simulator - qns_test: Option, - - #[structopt(short = "r", long)] - /// Client attempts to resume by making multiple connections to servers. - /// Requires that 2 or more URLs are listed for each server. - /// Use this for 0-RTT: the stack always attempts 0-RTT on resumption. - resume: bool, - - #[structopt(name = "key-update", long)] - /// Attempt to initiate a key update immediately after confirming the connection. - key_update: bool, - - #[structopt(short = "c", long, number_of_values = 1)] - /// The set of TLS cipher suites to enable. - /// From: TLS_AES_128_GCM_SHA256, TLS_AES_256_GCM_SHA384, TLS_CHACHA20_POLY1305_SHA256. - ciphers: Vec, - - #[structopt(name = "ech", long)] - /// Enable encrypted client hello (ECH). - /// This takes an encoded ECH configuration in hexadecimal format. - ech: Option, - - #[structopt(flatten)] - quic_parameters: QuicParameters, - - #[structopt(name = "ipv4-only", short = "4", long)] - /// Connect only over IPv4 - ipv4_only: bool, - - #[structopt(name = "ipv6-only", short = "6", long)] - /// Connect only over IPv6 - ipv6_only: bool, -} - -impl Args { - fn get_ciphers(&self) -> Vec { - self.ciphers - .iter() - .filter_map(|c| match c.as_str() { - "TLS_AES_128_GCM_SHA256" => Some(TLS_AES_128_GCM_SHA256), - "TLS_AES_256_GCM_SHA384" => Some(TLS_AES_256_GCM_SHA384), - "TLS_CHACHA20_POLY1305_SHA256" => Some(TLS_CHACHA20_POLY1305_SHA256), - _ => None, - }) - .collect::>() - } -} - -#[derive(Debug, Clone, Copy, PartialEq, Eq)] -struct VersionArg(Version); -impl FromStr for VersionArg { - type Err = ClientError; - - fn from_str(s: &str) -> Res { - let v = u32::from_str_radix(s, 16) - .map_err(|_| ClientError::ArgumentError("versions need to be specified in hex"))?; - Ok(Self(Version::try_from(v).map_err(|_| { - ClientError::ArgumentError("unknown version") - })?)) - } -} - -#[derive(Debug, StructOpt)] -struct QuicParameters { - #[structopt( - short = "V", - long, - multiple = true, - use_delimiter = true, - number_of_values = 1 - )] - /// A list of versions to support, in hex. - /// The first is the version to attempt. - /// Adding multiple values adds versions in order of preference. - /// If the first listed version appears in the list twice, the position - /// of the second entry determines the preference order of that version. - quic_version: Vec, - - #[structopt(long, default_value = "16")] - /// Set the MAX_STREAMS_BIDI limit. - max_streams_bidi: u64, - - #[structopt(long, default_value = "16")] - /// Set the MAX_STREAMS_UNI limit. - max_streams_uni: u64, - - #[structopt(long = "idle", default_value = "30")] - /// The idle timeout for connections, in seconds. - idle_timeout: u64, - - #[structopt(long = "cc", default_value = "newreno")] - /// The congestion controller to use. - congestion_control: CongestionControlAlgorithm, -} - -impl QuicParameters { - fn get(&self, alpn: &str) -> ConnectionParameters { - let params = ConnectionParameters::default() - .max_streams(StreamType::BiDi, self.max_streams_bidi) - .max_streams(StreamType::UniDi, self.max_streams_uni) - .idle_timeout(Duration::from_secs(self.idle_timeout)) - .cc_algorithm(self.congestion_control); - - if let Some(&first) = self.quic_version.first() { - let all = if self.quic_version[1..].contains(&first) { - &self.quic_version[1..] - } else { - &self.quic_version - }; - params.versions(first.0, all.iter().map(|&x| x.0).collect()) - } else { - let version = match alpn { - "h3" | "hq-interop" => Version::default(), - "h3-29" | "hq-29" => Version::Draft29, - "h3-30" | "hq-30" => Version::Draft30, - "h3-31" | "hq-31" => Version::Draft31, - "h3-32" | "hq-32" => Version::Draft32, - _ => Version::default(), - }; - params.versions(version, Version::all()) - } - } -} - -fn emit_datagram(socket: &UdpSocket, d: Datagram) -> io::Result<()> { - let sent = socket.send_to(&d[..], d.destination())?; - if sent != d.len() { - eprintln!("Unable to send all {} bytes of datagram", d.len()); - } - Ok(()) -} - -fn get_output_file( - url: &Url, - output_dir: &Option, - all_paths: &mut Vec, -) -> Option { - if let Some(ref dir) = output_dir { - let mut out_path = dir.clone(); - - let url_path = if url.path() == "/" { - // If no path is given... call it "root"? - "root" - } else { - // Omit leading slash - &url.path()[1..] - }; - out_path.push(url_path); - - if all_paths.contains(&out_path) { - eprintln!("duplicate path {}", out_path.display()); - return None; - } - - eprintln!("Saving {} to {:?}", url, out_path); - - if let Some(parent) = out_path.parent() { - create_dir_all(parent).ok()?; - } - - let f = OpenOptions::new() - .write(true) - .create(true) - .truncate(true) - .open(&out_path) - .ok()?; - - all_paths.push(out_path); - Some(f) - } else { - None - } -} - -fn process_loop( - local_addr: &SocketAddr, - socket: &UdpSocket, - client: &mut Http3Client, - handler: &mut Handler, -) -> Res { - let buf = &mut [0u8; 2048]; - loop { - if let Http3State::Closed(..) = client.state() { - return Ok(client.state()); - } - - let mut exiting = !handler.handle(client)?; - - loop { - match client.process_output(Instant::now()) { - Output::Datagram(dgram) => { - if let Err(e) = emit_datagram(socket, dgram) { - eprintln!("UDP write error: {}", e); - client.close(Instant::now(), 0, e.to_string()); - exiting = true; - break; - } - } - Output::Callback(duration) => { - socket.set_read_timeout(Some(duration)).unwrap(); - break; - } - Output::None => { - // Not strictly necessary, since we're about to exit - socket.set_read_timeout(None).unwrap(); - exiting = true; - break; - } - } - } - - if exiting { - return Ok(client.state()); - } - - match socket.recv_from(&mut buf[..]) { - Err(ref err) - if err.kind() == ErrorKind::WouldBlock || err.kind() == ErrorKind::Interrupted => {} - Err(err) => { - eprintln!("UDP error: {}", err); - exit(1) - } - Ok((sz, remote)) => { - if sz == buf.len() { - eprintln!("Received more than {} bytes", buf.len()); - continue; - } - if sz > 0 { - let d = Datagram::new(remote, *local_addr, &buf[..sz]); - client.process_input(d, Instant::now()); - handler.maybe_key_update(client)?; - } - } - }; - } -} - -struct Handler<'a> { - streams: HashMap>, - url_queue: VecDeque, - all_paths: Vec, - args: &'a Args, - key_update: KeyUpdateState, - token: Option, -} - -impl<'a> Handler<'a> { - fn download_urls(&mut self, client: &mut Http3Client) { - loop { - if self.url_queue.is_empty() { - break; - } - if self.streams.len() >= self.args.concurrency { - break; - } - if !self.download_next(client) { - break; - } - } - } - - fn download_next(&mut self, client: &mut Http3Client) -> bool { - if self.key_update.needed() { - println!("Deferring requests until first key update"); - return false; - } - let url = self - .url_queue - .pop_front() - .expect("download_next called with empty queue"); - match client.fetch( - Instant::now(), - &self.args.method, - &url, - &to_headers(&self.args.header), - Priority::default(), - ) { - Ok(client_stream_id) => { - println!( - "Successfully created stream id {} for {}", - client_stream_id, url - ); - client - .stream_close_send(client_stream_id) - .expect("failed to close send stream"); - - let out_file = get_output_file(&url, &self.args.output_dir, &mut self.all_paths); - - self.streams.insert(client_stream_id, out_file); - true - } - Err(Error::TransportError(TransportError::StreamLimitError)) - | Err(Error::StreamLimitError) - | Err(Error::Unavailable) => { - self.url_queue.push_front(url); - false - } - Err(e) => { - panic!("Can't create stream {}", e); - } - } - } - - fn maybe_key_update(&mut self, c: &mut Http3Client) -> Res<()> { - self.key_update.maybe_update(|| c.initiate_key_update())?; - self.download_urls(c); - Ok(()) - } - - fn done(&mut self) -> bool { - self.streams.is_empty() && self.url_queue.is_empty() - } - - fn on_stream_fin(&mut self, client: &mut Http3Client, stream_id: StreamId) -> bool { - self.streams.remove(&stream_id); - self.download_urls(client); - if self.done() { - client.close(Instant::now(), 0, "kthxbye!"); - return false; - } - true - } - - fn handle(&mut self, client: &mut Http3Client) -> Res { - while let Some(event) = client.next_event() { - match event { - Http3ClientEvent::AuthenticationNeeded => { - client.authenticated(AuthenticationStatus::Ok, Instant::now()); - } - Http3ClientEvent::HeaderReady { - stream_id, - headers, - fin, - .. - } => { - match self.streams.get(&stream_id) { - Some(out_file) => { - if out_file.is_none() { - println!("READ HEADERS[{}]: fin={} {:?}", stream_id, fin, headers); - } - } - None => { - println!("Data on unexpected stream: {}", stream_id); - return Ok(false); - } - } - if fin { - return Ok(self.on_stream_fin(client, stream_id)); - } - } - Http3ClientEvent::DataReadable { stream_id } => { - let mut stream_done = false; - match self.streams.get_mut(&stream_id) { - None => { - println!("Data on unexpected stream: {}", stream_id); - return Ok(false); - } - Some(out_file) => loop { - let mut data = vec![0; 4096]; - let (sz, fin) = client - .read_data(Instant::now(), stream_id, &mut data) - .expect("Read should succeed"); - - if let Some(out_file) = out_file { - if sz > 0 { - out_file.write_all(&data[..sz])?; - } - } else if !self.args.output_read_data { - println!("READ[{}]: {} bytes", stream_id, sz); - } else if let Ok(txt) = String::from_utf8(data.clone()) { - println!("READ[{}]: {}", stream_id, txt); - } else { - println!("READ[{}]: 0x{}", stream_id, hex(&data)); - } - - if fin { - if out_file.is_none() { - println!("", stream_id); - } - stream_done = true; - break; - } - - if sz == 0 { - break; - } - }, - } - - if stream_done { - return Ok(self.on_stream_fin(client, stream_id)); - } - } - Http3ClientEvent::StateChange(Http3State::Connected) - | Http3ClientEvent::RequestsCreatable => { - self.download_urls(client); - } - Http3ClientEvent::ResumptionToken(t) => self.token = Some(t), - _ => { - println!("Unhandled event {:?}", event); - } - } - } - - Ok(true) - } -} - -fn to_headers(values: &[impl AsRef]) -> Vec
{ - values - .iter() - .scan(None, |state, value| { - if let Some(name) = state.take() { - *state = None; - Some(Header::new(name, value.as_ref())) - } else { - *state = Some(value.as_ref().to_string()); - None - } - }) - .collect() -} - -fn client( - args: &Args, - socket: &UdpSocket, - local_addr: SocketAddr, - remote_addr: SocketAddr, - hostname: &str, - urls: &[Url], - resumption_token: Option, -) -> Res> { - let mut transport = Connection::new_client( - hostname, - &[&args.alpn], - Rc::new(RefCell::new(EmptyConnectionIdGenerator::default())), - local_addr, - remote_addr, - args.quic_parameters.get(args.alpn.as_str()), - Instant::now(), - )?; - let ciphers = args.get_ciphers(); - if !ciphers.is_empty() { - transport.set_ciphers(&ciphers)?; - } - let mut client = Http3Client::new_with_conn( - transport, - Http3Parameters::default() - .max_table_size_encoder(args.max_table_size_encoder) - .max_table_size_decoder(args.max_table_size_decoder) - .max_blocked_streams(args.max_blocked_streams) - .max_concurrent_push_streams(args.max_concurrent_push_streams), - ); - - let qlog = qlog_new(args, hostname, client.connection_id())?; - client.set_qlog(qlog); - if let Some(ech) = &args.ech { - client.enable_ech(ech).expect("enable ECH"); - } - if let Some(token) = resumption_token { - client - .enable_resumption(Instant::now(), token) - .expect("enable resumption"); - } - - let key_update = KeyUpdateState(args.key_update); - let mut h = Handler { - streams: HashMap::new(), - url_queue: VecDeque::from(urls.to_vec()), - all_paths: Vec::new(), - args, - key_update, - token: None, - }; - - process_loop(&local_addr, socket, &mut client, &mut h)?; - - let token = if args.resume { - // If we haven't received an event, take a token if there is one. - // Lots of servers don't provide NEW_TOKEN, but a session ticket - // without NEW_TOKEN is better than nothing. - h.token - .or_else(|| client.take_resumption_token(Instant::now())) - } else { - None - }; - Ok(token) -} - -fn qlog_new(args: &Args, hostname: &str, cid: &ConnectionId) -> Res { - if let Some(qlog_dir) = &args.qlog_dir { - let mut qlog_path = qlog_dir.to_path_buf(); - let filename = format!("{}-{}.qlog", hostname, cid); - qlog_path.push(filename); - - let f = OpenOptions::new() - .write(true) - .create(true) - .truncate(true) - .open(&qlog_path)?; - - let streamer = QlogStreamer::new( - qlog::QLOG_VERSION.to_string(), - Some("Example qlog".to_string()), - Some("Example qlog description".to_string()), - None, - std::time::Instant::now(), - common::qlog::new_trace(Role::Client), - Box::new(f), - ); - - Ok(NeqoQlog::enabled(streamer, qlog_path)?) - } else { - Ok(NeqoQlog::disabled()) - } -} - -fn main() -> Res<()> { - init(); - - let mut args = Args::from_args(); - - if let Some(testcase) = args.qns_test.as_ref() { - match testcase.as_str() { - "http3" => {} - "handshake" | "transfer" | "retry" => { - args.use_old_http = true; - } - "zerortt" | "resumption" => { - if args.urls.len() < 2 { - eprintln!("Warning: resumption tests won't work without >1 URL"); - exit(127); - } - args.use_old_http = true; - args.resume = true; - } - "multiconnect" => { - args.use_old_http = true; - args.download_in_series = true; - } - "chacha20" => { - args.use_old_http = true; - args.ciphers.clear(); - args.ciphers - .extend_from_slice(&[String::from("TLS_CHACHA20_POLY1305_SHA256")]); - } - "keyupdate" => { - args.use_old_http = true; - args.key_update = true; - } - _ => exit(127), - } - } - - let mut urls_by_origin: HashMap> = HashMap::new(); - for url in &args.urls { - let entry = urls_by_origin.entry(url.origin()).or_default(); - entry.push(url.clone()); - } - - for ((_scheme, host, port), urls) in urls_by_origin.into_iter().filter_map(|(k, v)| match k { - Origin::Tuple(s, h, p) => Some(((s, h, p), v)), - Origin::Opaque(x) => { - eprintln!("Opaque origin {:?}", x); - None - } - }) { - let remote_addr = format!("{}:{}", host, port) - .to_socket_addrs()? - .find(|addr| { - !matches!( - (addr, args.ipv4_only, args.ipv6_only), - (SocketAddr::V4(..), false, true) | (SocketAddr::V6(..), true, false) - ) - }); - let remote_addr = match remote_addr { - Some(a) => a, - None => { - eprintln!("No compatible address found for: {}", host); - exit(1); - } - }; - - let local_addr = match remote_addr { - SocketAddr::V4(..) => SocketAddr::new(IpAddr::V4(Ipv4Addr::from([0; 4])), 0), - SocketAddr::V6(..) => SocketAddr::new(IpAddr::V6(Ipv6Addr::from([0; 16])), 0), - }; - - let socket = match UdpSocket::bind(local_addr) { - Err(e) => { - eprintln!("Unable to bind UDP socket: {}", e); - exit(1) - } - Ok(s) => s, - }; - - let real_local = socket.local_addr().unwrap(); - println!( - "{} Client connecting: {:?} -> {:?}", - if args.use_old_http { "H9" } else { "H3" }, - real_local, - remote_addr, - ); - - let hostname = format!("{}", host); - let mut token: Option = None; - let mut remaining = &urls[..]; - let mut first = true; - loop { - let to_request; - if (args.resume && first) || args.download_in_series { - to_request = &remaining[..1]; - remaining = &remaining[1..]; - if args.resume && first && remaining.is_empty() { - println!( - "Error: resumption to {} cannot work without at least 2 URLs.", - hostname - ); - exit(127); - } - } else { - to_request = remaining; - remaining = &[][..]; - } - if to_request.is_empty() { - break; - } - - first = false; - token = if args.use_old_http { - old::old_client( - &args, - &socket, - real_local, - remote_addr, - &hostname, - to_request, - token, - )? - } else { - client( - &args, - &socket, - real_local, - remote_addr, - &hostname, - to_request, - token, - )? - }; - } - } - - Ok(()) -} - -mod old { - use std::{ - cell::RefCell, - collections::{HashMap, VecDeque}, - fs::File, - io::{ErrorKind, Write}, - net::{SocketAddr, UdpSocket}, - path::PathBuf, - process::exit, - rc::Rc, - time::Instant, - }; - - use url::Url; - - use super::{qlog_new, KeyUpdateState, Res}; - - use neqo_common::{event::Provider, Datagram}; - use neqo_crypto::{AuthenticationStatus, ResumptionToken}; - use neqo_transport::{ - Connection, ConnectionEvent, EmptyConnectionIdGenerator, Error, Output, State, StreamId, - StreamType, - }; - - use super::{emit_datagram, get_output_file, Args}; - - struct HandlerOld<'b> { - streams: HashMap>, - url_queue: VecDeque, - all_paths: Vec, - args: &'b Args, - token: Option, - key_update: KeyUpdateState, - } - - impl<'b> HandlerOld<'b> { - fn download_urls(&mut self, client: &mut Connection) { - loop { - if self.url_queue.is_empty() { - break; - } - if self.streams.len() >= self.args.concurrency { - break; - } - if !self.download_next(client) { - break; - } - } - } - - fn download_next(&mut self, client: &mut Connection) -> bool { - if self.key_update.needed() { - println!("Deferring requests until after first key update"); - return false; - } - let url = self - .url_queue - .pop_front() - .expect("download_next called with empty queue"); - match client.stream_create(StreamType::BiDi) { - Ok(client_stream_id) => { - println!("Created stream {} for {}", client_stream_id, url); - let req = format!("GET {}\r\n", url.path()); - _ = client - .stream_send(client_stream_id, req.as_bytes()) - .unwrap(); - client.stream_close_send(client_stream_id).unwrap(); - let out_file = - get_output_file(&url, &self.args.output_dir, &mut self.all_paths); - self.streams.insert(client_stream_id, out_file); - true - } - Err(e @ Error::StreamLimitError) | Err(e @ Error::ConnectionState) => { - println!("Cannot create stream {:?}", e); - self.url_queue.push_front(url); - false - } - Err(e) => { - panic!("Error creating stream {:?}", e); - } - } - } - - /// Read and maybe print received data from a stream. - // Returns bool: was fin received? - fn read_from_stream( - client: &mut Connection, - stream_id: StreamId, - output_read_data: bool, - maybe_out_file: &mut Option, - ) -> Res { - let mut data = vec![0; 4096]; - loop { - let (sz, fin) = client.stream_recv(stream_id, &mut data)?; - if sz == 0 { - return Ok(fin); - } - - if let Some(out_file) = maybe_out_file { - out_file.write_all(&data[..sz])?; - } else if !output_read_data { - println!("READ[{}]: {} bytes", stream_id, sz); - } else { - println!( - "READ[{}]: {}", - stream_id, - String::from_utf8(data.clone()).unwrap() - ) - } - if fin { - return Ok(true); - } - } - } - - fn maybe_key_update(&mut self, c: &mut Connection) -> Res<()> { - self.key_update.maybe_update(|| c.initiate_key_update())?; - self.download_urls(c); - Ok(()) - } - - fn read(&mut self, client: &mut Connection, stream_id: StreamId) -> Res { - let mut maybe_maybe_out_file = self.streams.get_mut(&stream_id); - match &mut maybe_maybe_out_file { - None => { - println!("Data on unexpected stream: {}", stream_id); - return Ok(false); - } - Some(maybe_out_file) => { - let fin_recvd = Self::read_from_stream( - client, - stream_id, - self.args.output_read_data, - maybe_out_file, - )?; - - if fin_recvd { - if maybe_out_file.is_none() { - println!("", stream_id); - } - self.streams.remove(&stream_id); - self.download_urls(client); - if self.streams.is_empty() && self.url_queue.is_empty() { - return Ok(false); - } - } - } - } - Ok(true) - } - - /// Just in case we didn't get a resumption token event, this - /// iterates through events until one is found. - fn get_token(&mut self, client: &mut Connection) { - for event in client.events() { - if let ConnectionEvent::ResumptionToken(token) = event { - self.token = Some(token); - } - } - } - - fn handle(&mut self, client: &mut Connection) -> Res { - while let Some(event) = client.next_event() { - match event { - ConnectionEvent::AuthenticationNeeded => { - client.authenticated(AuthenticationStatus::Ok, Instant::now()); - } - ConnectionEvent::RecvStreamReadable { stream_id } => { - if !self.read(client, stream_id)? { - self.get_token(client); - client.close(Instant::now(), 0, "kthxbye!"); - return Ok(false); - }; - } - ConnectionEvent::SendStreamWritable { stream_id } => { - println!("stream {} writable", stream_id) - } - ConnectionEvent::SendStreamComplete { stream_id } => { - println!("stream {} complete", stream_id); - } - ConnectionEvent::SendStreamCreatable { stream_type } => { - println!("stream {:?} creatable", stream_type); - if stream_type == StreamType::BiDi { - self.download_urls(client); - } - } - ConnectionEvent::StateChange(State::WaitInitial) - | ConnectionEvent::StateChange(State::Handshaking) - | ConnectionEvent::StateChange(State::Connected) => { - println!("{:?}", event); - self.download_urls(client); - } - ConnectionEvent::StateChange(State::Confirmed) => { - self.maybe_key_update(client)?; - } - ConnectionEvent::ResumptionToken(token) => { - self.token = Some(token); - } - _ => { - println!("Unhandled event {:?}", event); - } - } - } - - Ok(true) - } - } - - fn process_loop_old( - local_addr: &SocketAddr, - socket: &UdpSocket, - client: &mut Connection, - handler: &mut HandlerOld, - ) -> Res { - let buf = &mut [0u8; 2048]; - loop { - if let State::Closed(..) = client.state() { - return Ok(client.state().clone()); - } - - let mut exiting = !handler.handle(client)?; - - loop { - match client.process_output(Instant::now()) { - Output::Datagram(dgram) => { - if let Err(e) = emit_datagram(socket, dgram) { - eprintln!("UDP write error: {}", e); - client.close(Instant::now(), 0, e.to_string()); - exiting = true; - break; - } - } - Output::Callback(duration) => { - socket.set_read_timeout(Some(duration)).unwrap(); - break; - } - Output::None => { - // Not strictly necessary, since we're about to exit - socket.set_read_timeout(None).unwrap(); - exiting = true; - break; - } - } - } - - if exiting { - return Ok(client.state().clone()); - } - - match socket.recv_from(&mut buf[..]) { - Err(err) => { - if err.kind() != ErrorKind::WouldBlock && err.kind() != ErrorKind::Interrupted { - eprintln!("UDP error: {}", err); - exit(1); - } - } - Ok((sz, addr)) => { - if sz == buf.len() { - eprintln!("Received more than {} bytes", buf.len()); - continue; - } - if sz > 0 { - let d = Datagram::new(addr, *local_addr, &buf[..sz]); - client.process_input(d, Instant::now()); - handler.maybe_key_update(client)?; - } - } - } - } - } - - pub fn old_client( - args: &Args, - socket: &UdpSocket, - local_addr: SocketAddr, - remote_addr: SocketAddr, - origin: &str, - urls: &[Url], - token: Option, - ) -> Res> { - let alpn = match args.alpn.as_str() { - "hq-29" | "hq-30" | "hq-31" | "hq-32" => args.alpn.as_str(), - _ => "hq-interop", - }; - - let mut client = Connection::new_client( - origin, - &[alpn], - Rc::new(RefCell::new(EmptyConnectionIdGenerator::default())), - local_addr, - remote_addr, - args.quic_parameters.get(alpn), - Instant::now(), - )?; - - if let Some(tok) = token { - client.enable_resumption(Instant::now(), tok)?; - } - - let ciphers = args.get_ciphers(); - if !ciphers.is_empty() { - client.set_ciphers(&ciphers)?; - } - - client.set_qlog(qlog_new(args, origin, client.odcid().unwrap())?); - - let key_update = KeyUpdateState(args.key_update); - let mut h = HandlerOld { - streams: HashMap::new(), - url_queue: VecDeque::from(urls.to_vec()), - all_paths: Vec::new(), - args, - token: None, - key_update, - }; - - process_loop_old(&local_addr, socket, &mut client, &mut h)?; - - let token = if args.resume { - // If we haven't received an event, take a token if there is one. - // Lots of servers don't provide NEW_TOKEN, but a session ticket - // without NEW_TOKEN is better than nothing. - h.token - .or_else(|| client.take_resumption_token(Instant::now())) - } else { - None - }; - Ok(token) - } -} diff --git a/neqo-common/Cargo.toml b/neqo-common/Cargo.toml index 04571ff8ea..069d67b834 100644 --- a/neqo-common/Cargo.toml +++ b/neqo-common/Cargo.toml @@ -1,23 +1,40 @@ [package] name = "neqo-common" -version = "0.6.4" -authors = ["Bobby Holley "] -edition = "2018" -rust-version = "1.65.0" -license = "MIT/Apache-2.0" build = "build.rs" +authors.workspace = true +homepage.workspace = true +repository.workspace = true +version.workspace = true +edition.workspace = true +rust-version.workspace = true +license.workspace = true + +[lints] +workspace = true [dependencies] -log = {version = "0.4.0", default-features = false} -env_logger = {version = "0.10", default-features = false} -lazy_static = "1.3.0" -qlog = "0.4.0" -time = {version = "0.3", features = ["formatting"]} +# Sync with https://searchfox.org/mozilla-central/source/Cargo.lock 2024-02-08 +enum-map = { version = "2.7", default-features = false } +env_logger = { version = "0.10", default-features = false } +log = { version = "0.4", default-features = false } +qlog = { version = "0.12", default-features = false } +time = { version = "0.3", default-features = false, features = ["formatting"] } + +[dev-dependencies] +criterion = { version = "0.5", default-features = false, features = ["html_reports"] } +test-fixture = { path = "../test-fixture" } [features] -default = ["deny-warnings"] -deny-warnings = [] +ci = [] [target."cfg(windows)".dependencies.winapi] version = "0.3" features = ["timeapi"] + +[lib] +# See https://github.com/bheisler/criterion.rs/blob/master/book/src/faq.md#cargo-bench-gives-unrecognized-option-errors-for-valid-command-line-options +bench = false + +[[bench]] +name = "timer" +harness = false diff --git a/neqo-common/benches/timer.rs b/neqo-common/benches/timer.rs new file mode 100644 index 0000000000..5ac8019db4 --- /dev/null +++ b/neqo-common/benches/timer.rs @@ -0,0 +1,39 @@ +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use std::time::{Duration, Instant}; + +use criterion::{criterion_group, criterion_main, Criterion}; +use neqo_common::timer::Timer; +use test_fixture::now; + +fn benchmark_timer(c: &mut Criterion) { + c.bench_function("drain a timer quickly", |b| { + b.iter_batched_ref( + make_timer, + |(_now, timer)| { + while let Some(t) = timer.next_time() { + assert!(timer.take_next(t).is_some()); + } + }, + criterion::BatchSize::SmallInput, + ); + }); +} + +fn make_timer() -> (Instant, Timer<()>) { + const TIMES: &[u64] = &[1, 2, 3, 5, 8, 13, 21, 34]; + + let now = now(); + let mut timer = Timer::new(now, Duration::from_millis(777), 100); + for &t in TIMES { + timer.add(now + Duration::from_secs(t), ()); + } + (now, timer) +} + +criterion_group!(benches, benchmark_timer); +criterion_main!(benches); diff --git a/neqo-common/build.rs b/neqo-common/build.rs index 0af1a1dbbd..9047b1f5d0 100644 --- a/neqo-common/build.rs +++ b/neqo-common/build.rs @@ -1,3 +1,9 @@ +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + use std::env; fn main() { diff --git a/neqo-common/src/codec.rs b/neqo-common/src/codec.rs index 6c8f3bd5a3..7fea2f71ab 100644 --- a/neqo-common/src/codec.rs +++ b/neqo-common/src/codec.rs @@ -4,7 +4,7 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use std::{convert::TryFrom, fmt::Debug}; +use std::fmt::Debug; use crate::hex_with_len; @@ -34,16 +34,18 @@ impl<'a> Decoder<'a> { } /// Skip n bytes. + /// /// # Panics + /// /// If the remaining quantity is less than `n`. pub fn skip(&mut self, n: usize) { - assert!(self.remaining() >= n); + assert!(self.remaining() >= n, "insufficient data"); self.offset += n; } /// Skip helper that panics if `n` is `None` or not able to fit in `usize`. fn skip_inner(&mut self, n: Option) { - self.skip(usize::try_from(n.unwrap()).unwrap()); + self.skip(usize::try_from(n.expect("invalid length")).unwrap()); } /// Skip a vector. Panics if there isn't enough space. @@ -90,7 +92,9 @@ impl<'a> Decoder<'a> { } /// Decodes an unsigned integer of length 1..=8. + /// /// # Panics + /// /// This panics if `n` is not in the range `1..=8`. pub fn decode_uint(&mut self, n: usize) -> Option { assert!(n > 0 && n <= 8); @@ -107,9 +111,8 @@ impl<'a> Decoder<'a> { } /// Decodes a QUIC varint. - #[allow(clippy::missing_panics_doc)] // See https://github.com/rust-lang/rust-clippy/issues/6699 pub fn decode_varint(&mut self) -> Option { - let Some(b1) = self.decode_byte() else { return None }; + let b1 = self.decode_byte()?; match b1 >> 6 { 0 => Some(u64::from(b1 & 0x3f)), 1 => Some((u64::from(b1 & 0x3f) << 8) | self.decode_uint(1)?), @@ -127,8 +130,7 @@ impl<'a> Decoder<'a> { } fn decode_checked(&mut self, n: Option) -> Option<&'a [u8]> { - let Some(len) = n else { return None }; - if let Ok(l) = usize::try_from(len) { + if let Ok(l) = usize::try_from(n?) { self.decode(l) } else { // sizeof(usize) < sizeof(u64) and the value is greater than @@ -198,21 +200,25 @@ pub struct Encoder { impl Encoder { /// Static helper function for previewing the results of encoding without doing it. + /// /// # Panics + /// /// When `v` is too large. #[must_use] pub const fn varint_len(v: u64) -> usize { match () { - _ if v < (1 << 6) => 1, - _ if v < (1 << 14) => 2, - _ if v < (1 << 30) => 4, - _ if v < (1 << 62) => 8, - _ => panic!("Varint value too large"), + () if v < (1 << 6) => 1, + () if v < (1 << 14) => 2, + () if v < (1 << 30) => 4, + () if v < (1 << 62) => 8, + () => panic!("Varint value too large"), } } /// Static helper to determine how long a varint-prefixed array encodes to. + /// /// # Panics + /// /// When `len` doesn't fit in a `u64`. #[must_use] pub fn vvec_len(len: usize) -> usize { @@ -261,7 +267,9 @@ impl Encoder { } /// Don't use this except in testing. + /// /// # Panics + /// /// When `s` contains non-hex values or an odd number of values. #[must_use] pub fn from_hex(s: impl AsRef) -> Self { @@ -291,7 +299,9 @@ impl Encoder { } /// Encode an integer of any size up to u64. + /// /// # Panics + /// /// When `n` is outside the range `1..=8`. #[allow(clippy::cast_possible_truncation)] pub fn encode_uint>(&mut self, n: usize, v: T) -> &mut Self { @@ -304,22 +314,26 @@ impl Encoder { } /// Encode a QUIC varint. + /// /// # Panics + /// /// When `v >= 1<<62`. pub fn encode_varint>(&mut self, v: T) -> &mut Self { let v = v.into(); match () { - _ if v < (1 << 6) => self.encode_uint(1, v), - _ if v < (1 << 14) => self.encode_uint(2, v | (1 << 14)), - _ if v < (1 << 30) => self.encode_uint(4, v | (2 << 30)), - _ if v < (1 << 62) => self.encode_uint(8, v | (3 << 62)), - _ => panic!("Varint value too large"), + () if v < (1 << 6) => self.encode_uint(1, v), + () if v < (1 << 14) => self.encode_uint(2, v | (1 << 14)), + () if v < (1 << 30) => self.encode_uint(4, v | (2 << 30)), + () if v < (1 << 62) => self.encode_uint(8, v | (3 << 62)), + () => panic!("Varint value too large"), }; self } /// Encode a vector in TLS style. + /// /// # Panics + /// /// When `v` is longer than 2^64. pub fn encode_vec(&mut self, n: usize, v: &[u8]) -> &mut Self { self.encode_uint(n, u64::try_from(v.as_ref().len()).unwrap()) @@ -327,7 +341,9 @@ impl Encoder { } /// Encode a vector in TLS style using a closure for the contents. + /// /// # Panics + /// /// When `f()` returns a length larger than `2^8n`. #[allow(clippy::cast_possible_truncation)] pub fn encode_vec_with(&mut self, n: usize, f: F) -> &mut Self { @@ -343,7 +359,9 @@ impl Encoder { } /// Encode a vector with a varint length. + /// /// # Panics + /// /// When `v` is longer than 2^64. pub fn encode_vvec(&mut self, v: &[u8]) -> &mut Self { self.encode_varint(u64::try_from(v.as_ref().len()).unwrap()) @@ -351,7 +369,9 @@ impl Encoder { } /// Encode a vector with a varint length using a closure. + /// /// # Panics + /// /// When `f()` writes more than 2^62 bytes. #[allow(clippy::cast_possible_truncation)] pub fn encode_vvec_with(&mut self, f: F) -> &mut Self { @@ -378,11 +398,11 @@ impl Encoder { self.buf[start] = (v & 0xff) as u8; let (count, bits) = match () { // Great. The byte we have is enough. - _ if v < (1 << 6) => return self, - _ if v < (1 << 14) => (1, 1 << 6), - _ if v < (1 << 30) => (3, 2 << 22), - _ if v < (1 << 62) => (7, 3 << 54), - _ => panic!("Varint value too large"), + () if v < (1 << 6) => return self, + () if v < (1 << 14) => (1, 1 << 6), + () if v < (1 << 30) => (3, 2 << 22), + () if v < (1 << 62) => (7, 3 << 54), + () => panic!("Varint value too large"), }; // Now, we need to encode the high bits after the main block, ... self.encode_uint(count, (v >> 8) | bits); @@ -545,7 +565,7 @@ mod tests { } #[test] - #[should_panic] + #[should_panic(expected = "insufficient data")] fn skip_too_much() { let enc = Encoder::from_hex("ff"); let mut dec = enc.as_decoder(); @@ -561,7 +581,7 @@ mod tests { } #[test] - #[should_panic] + #[should_panic(expected = "insufficient data")] fn skip_vec_too_much() { let enc = Encoder::from_hex("ff1234"); let mut dec = enc.as_decoder(); @@ -569,7 +589,7 @@ mod tests { } #[test] - #[should_panic] + #[should_panic(expected = "invalid length")] fn skip_vec_short_length() { let enc = Encoder::from_hex("ff"); let mut dec = enc.as_decoder(); @@ -584,7 +604,7 @@ mod tests { } #[test] - #[should_panic] + #[should_panic(expected = "insufficient data")] fn skip_vvec_too_much() { let enc = Encoder::from_hex("0f1234"); let mut dec = enc.as_decoder(); @@ -592,7 +612,7 @@ mod tests { } #[test] - #[should_panic] + #[should_panic(expected = "invalid length")] fn skip_vvec_short_length() { let enc = Encoder::from_hex("ff"); let mut dec = enc.as_decoder(); @@ -611,7 +631,7 @@ mod tests { } #[test] - #[should_panic] + #[should_panic(expected = "Varint value too large")] fn encoded_length_oob() { _ = Encoder::varint_len(1 << 62); } @@ -628,7 +648,7 @@ mod tests { } #[test] - #[should_panic] + #[should_panic(expected = "Varint value too large")] fn encoded_vvec_length_oob() { _ = Encoder::vvec_len(1 << 62); } @@ -752,7 +772,7 @@ mod tests { } #[test] - #[should_panic] + #[should_panic(expected = "assertion failed")] fn encode_vec_with_overflow() { let mut enc = Encoder::default(); enc.encode_vec_with(1, |enc_inner| { diff --git a/neqo-common/src/datagram.rs b/neqo-common/src/datagram.rs index 0316dd2309..cc2cb7d113 100644 --- a/neqo-common/src/datagram.rs +++ b/neqo-common/src/datagram.rs @@ -4,23 +4,32 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use std::net::SocketAddr; -use std::ops::Deref; +use std::{net::SocketAddr, ops::Deref}; -use crate::hex_with_len; +use crate::{hex_with_len, IpTos}; -#[derive(PartialEq, Eq, Clone)] +#[derive(Clone, PartialEq, Eq)] pub struct Datagram { src: SocketAddr, dst: SocketAddr, + tos: IpTos, + ttl: Option, d: Vec, } impl Datagram { - pub fn new>>(src: SocketAddr, dst: SocketAddr, d: V) -> Self { + pub fn new>>( + src: SocketAddr, + dst: SocketAddr, + tos: IpTos, + ttl: Option, + d: V, + ) -> Self { Self { src, dst, + tos, + ttl, d: d.into(), } } @@ -34,6 +43,20 @@ impl Datagram { pub fn destination(&self) -> SocketAddr { self.dst } + + #[must_use] + pub fn tos(&self) -> IpTos { + self.tos + } + + #[must_use] + pub fn ttl(&self) -> Option { + self.ttl + } + + pub fn set_tos(&mut self, tos: IpTos) { + self.tos = tos; + } } impl Deref for Datagram { @@ -48,10 +71,30 @@ impl std::fmt::Debug for Datagram { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { write!( f, - "Datagram {:?}->{:?}: {}", + "Datagram {:?} TTL {:?} {:?}->{:?}: {}", + self.tos, + self.ttl, self.src, self.dst, hex_with_len(&self.d) ) } } + +impl From for Vec { + fn from(datagram: Datagram) -> Self { + datagram.d + } +} + +#[cfg(test)] +use test_fixture::datagram; + +#[test] +fn fmt_datagram() { + let d = datagram([0; 1].to_vec()); + assert_eq!( + &format!("{d:?}"), + "Datagram IpTos(Cs0, NotEct) TTL Some(128) [fe80::1]:443->[fe80::1]:443: [1]: 00" + ); +} diff --git a/neqo-common/src/event.rs b/neqo-common/src/event.rs index 8598383e76..ea8d491822 100644 --- a/neqo-common/src/event.rs +++ b/neqo-common/src/event.rs @@ -4,7 +4,6 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use std::iter::Iterator; use std::marker::PhantomData; /// An event provider is able to generate a stream of events. diff --git a/neqo-common/src/header.rs b/neqo-common/src/header.rs index 101c8ef91c..112fcf0057 100644 --- a/neqo-common/src/header.rs +++ b/neqo-common/src/header.rs @@ -11,11 +11,14 @@ pub struct Header { } impl Header { - #[allow(clippy::needless_pass_by_value)] - pub fn new(name: impl ToString, value: impl ToString) -> Self { + pub fn new(name: N, value: V) -> Self + where + N: Into + ?Sized, + V: Into + ?Sized, + { Self { - name: name.to_string(), - value: value.to_string(), + name: name.into(), + value: value.into(), } } diff --git a/neqo-common/src/hrtime.rs b/neqo-common/src/hrtime.rs index 2ac0a08cdd..e70b5f0ffb 100644 --- a/neqo-common/src/hrtime.rs +++ b/neqo-common/src/hrtime.rs @@ -6,7 +6,6 @@ use std::{ cell::RefCell, - convert::TryFrom, rc::{Rc, Weak}, time::Duration, }; @@ -27,12 +26,12 @@ impl Period { const MIN: Period = Period(1); #[cfg(windows)] - fn as_uint(&self) -> UINT { + fn as_uint(self) -> UINT { UINT::from(self.0) } #[cfg(target_os = "macos")] - fn scaled(&self, scale: f64) -> f64 { + fn scaled(self, scale: f64) -> f64 { scale * f64::from(self.0) } } @@ -126,6 +125,7 @@ mod mac { } const THREAD_TIME_CONSTRAINT_POLICY: thread_policy_flavor_t = 2; + #[allow(clippy::cast_possible_truncation)] const THREAD_TIME_CONSTRAINT_POLICY_COUNT: mach_msg_type_number_t = (size_of::() / size_of::()) as mach_msg_type_number_t; @@ -163,7 +163,7 @@ mod mac { thread_policy_set( pthread_mach_thread_np(pthread_self()), THREAD_TIME_CONSTRAINT_POLICY, - addr_of_mut!(policy) as _, // horror! + addr_of_mut!(policy).cast(), // horror! THREAD_TIME_CONSTRAINT_POLICY_COUNT, ) }; @@ -180,6 +180,7 @@ mod mac { /// Create a realtime policy and set it. pub fn set_realtime(base: f64) { + #[allow(clippy::cast_possible_truncation, clippy::cast_sign_loss)] let policy = thread_time_constraint_policy { period: base as u32, // Base interval computation: (base * 0.5) as u32, @@ -198,7 +199,7 @@ mod mac { thread_policy_get( pthread_mach_thread_np(pthread_self()), THREAD_TIME_CONSTRAINT_POLICY, - addr_of_mut!(policy) as _, // horror! + addr_of_mut!(policy).cast(), // horror! &mut count, &mut get_default, ) @@ -292,14 +293,14 @@ impl Time { if let Some(p) = self.active { mac::set_realtime(p.scaled(self.scale)); } else { - mac::set_thread_policy(self.deflt.clone()); + mac::set_thread_policy(self.deflt); } } #[cfg(windows)] { if let Some(p) = self.active { - assert_eq!(0, unsafe { timeBeginPeriod(p.as_uint()) }); + _ = unsafe { timeBeginPeriod(p.as_uint()) }; } } } @@ -309,7 +310,7 @@ impl Time { #[cfg(windows)] { if let Some(p) = self.active { - assert_eq!(0, unsafe { timeEndPeriod(p.as_uint()) }); + _ = unsafe { timeEndPeriod(p.as_uint()) }; } } } @@ -338,9 +339,7 @@ impl Time { /// The handle can also be used to update the resolution. #[must_use] pub fn get(period: Duration) -> Handle { - thread_local! { - static HR_TIME: RefCell>> = RefCell::default(); - } + thread_local!(static HR_TIME: RefCell>> = RefCell::default()); HR_TIME.with(|r| { let mut b = r.borrow_mut(); @@ -370,14 +369,20 @@ impl Drop for Time { } } -#[cfg(test)] +// Only run these tests in CI on platforms other than MacOS and Windows, where the timer +// inaccuracies are too high to pass the tests. +#[cfg(all( + test, + not(all(any(target_os = "macos", target_os = "windows"), feature = "ci")) +))] mod test { - use super::Time; use std::{ thread::{sleep, spawn}, time::{Duration, Instant}, }; + use super::Time; + const ONE: Duration = Duration::from_millis(1); const ONE_AND_A_BIT: Duration = Duration::from_micros(1500); /// A limit for when high resolution timers are disabled. diff --git a/neqo-common/src/incrdecoder.rs b/neqo-common/src/incrdecoder.rs index 351de240f0..8468102cb6 100644 --- a/neqo-common/src/incrdecoder.rs +++ b/neqo-common/src/incrdecoder.rs @@ -21,7 +21,10 @@ impl IncrementalDecoderUint { } /// Consume some data. - #[allow(clippy::missing_panics_doc)] // See https://github.com/rust-lang/rust-clippy/issues/6699 + /// + /// # Panics + /// + /// Never, but this is not something the compiler can tell. pub fn consume(&mut self, dv: &mut Decoder) -> Option { if let Some(r) = &mut self.remaining { let amount = min(*r, dv.remaining()); @@ -86,7 +89,9 @@ impl IncrementalDecoderBuffer { } /// Consume some bytes from the decoder. + /// /// # Panics + /// /// Never; but rust doesn't know that. pub fn consume(&mut self, dv: &mut Decoder) -> Option> { let amount = min(self.remaining, dv.remaining()); @@ -108,7 +113,9 @@ pub struct IncrementalDecoderIgnore { impl IncrementalDecoderIgnore { /// Make a new ignoring decoder. + /// /// # Panics + /// /// If the amount to ignore is zero. #[must_use] pub fn new(n: usize) -> Self { diff --git a/neqo-common/src/lib.rs b/neqo-common/src/lib.rs index 3fb0fd27ec..e988c6071d 100644 --- a/neqo-common/src/lib.rs +++ b/neqo-common/src/lib.rs @@ -4,8 +4,7 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -#![cfg_attr(feature = "deny-warnings", deny(warnings))] -#![warn(clippy::pedantic)] +#![allow(clippy::module_name_repetitions)] // This lint doesn't work here. mod codec; mod datagram; @@ -16,16 +15,20 @@ mod incrdecoder; pub mod log; pub mod qlog; pub mod timer; - -pub use self::codec::{Decoder, Encoder}; -pub use self::datagram::Datagram; -pub use self::header::Header; -pub use self::incrdecoder::{ - IncrementalDecoderBuffer, IncrementalDecoderIgnore, IncrementalDecoderUint, -}; +pub mod tos; use std::fmt::Write; +use enum_map::Enum; + +pub use self::{ + codec::{Decoder, Encoder}, + datagram::Datagram, + header::Header, + incrdecoder::{IncrementalDecoderBuffer, IncrementalDecoderIgnore, IncrementalDecoderUint}, + tos::{IpTos, IpTosDscp, IpTosEcn}, +}; + #[must_use] pub fn hex(buf: impl AsRef<[u8]>) -> String { let mut ret = String::with_capacity(buf.as_ref().len() * 2); @@ -75,7 +78,7 @@ pub const fn const_min(a: usize, b: usize) -> usize { [a, b][(a >= b) as usize] } -#[derive(Debug, PartialEq, Eq, Copy, Clone)] +#[derive(Debug, PartialEq, Eq, Copy, Clone, Enum)] /// Client or Server. pub enum Role { Client, diff --git a/neqo-common/src/log.rs b/neqo-common/src/log.rs index 6cd22b4901..04028a26bd 100644 --- a/neqo-common/src/log.rs +++ b/neqo-common/src/log.rs @@ -6,17 +6,19 @@ #![allow(clippy::module_name_repetitions)] +use std::{ + io::Write, + sync::{Once, OnceLock}, + time::{Duration, Instant}, +}; + use env_logger::Builder; -use lazy_static::lazy_static; -use std::io::Write; -use std::sync::Once; -use std::time::Instant; #[macro_export] macro_rules! do_log { (target: $target:expr, $lvl:expr, $($arg:tt)+) => ({ let lvl = $lvl; - if lvl <= ::log::max_level() { + if lvl <= ::log::STATIC_MAX_LEVEL && lvl <= ::log::max_level() { ::log::logger().log( &::log::Record::builder() .args(format_args!($($arg)+)) @@ -29,7 +31,7 @@ macro_rules! do_log { ); } }); - ($lvl:expr, $($arg:tt)+) => ($crate::do_log!(target: ::log::__log_module_path!(), $lvl, $($arg)+)) + ($lvl:expr, $($arg:tt)+) => ($crate::do_log!(target: module_path!(), $lvl, $($arg)+)) } #[macro_export] @@ -43,17 +45,25 @@ macro_rules! log_subject { }}; } -static INIT_ONCE: Once = Once::new(); - -lazy_static! { - static ref START_TIME: Instant = Instant::now(); +fn since_start() -> Duration { + static START_TIME: OnceLock = OnceLock::new(); + START_TIME.get_or_init(Instant::now).elapsed() } -pub fn init() { +pub fn init(level_filter: Option) { + static INIT_ONCE: Once = Once::new(); + + if ::log::STATIC_MAX_LEVEL == ::log::LevelFilter::Off { + return; + } + INIT_ONCE.call_once(|| { let mut builder = Builder::from_env("RUST_LOG"); + if let Some(filter) = level_filter { + builder.filter_level(filter); + } builder.format(|buf, record| { - let elapsed = START_TIME.elapsed(); + let elapsed = since_start(); writeln!( buf, "{}s{:3}ms {} {}", @@ -64,9 +74,9 @@ pub fn init() { ) }); if let Err(e) = builder.try_init() { - do_log!(::log::Level::Info, "Logging initialization error {:?}", e); + do_log!(::log::Level::Warn, "Logging initialization error {:?}", e); } else { - do_log!(::log::Level::Info, "Logging initialized"); + do_log!(::log::Level::Debug, "Logging initialized"); } }); } @@ -74,32 +84,32 @@ pub fn init() { #[macro_export] macro_rules! log_invoke { ($lvl:expr, $ctx:expr, $($arg:tt)*) => ( { - ::neqo_common::log::init(); + ::neqo_common::log::init(None); ::neqo_common::do_log!($lvl, "[{}] {}", $ctx, format!($($arg)*)); } ) } #[macro_export] macro_rules! qerror { ([$ctx:expr], $($arg:tt)*) => (::neqo_common::log_invoke!(::log::Level::Error, $ctx, $($arg)*);); - ($($arg:tt)*) => ( { ::neqo_common::log::init(); ::neqo_common::do_log!(::log::Level::Error, $($arg)*); } ); + ($($arg:tt)*) => ( { ::neqo_common::log::init(None); ::neqo_common::do_log!(::log::Level::Error, $($arg)*); } ); } #[macro_export] macro_rules! qwarn { ([$ctx:expr], $($arg:tt)*) => (::neqo_common::log_invoke!(::log::Level::Warn, $ctx, $($arg)*);); - ($($arg:tt)*) => ( { ::neqo_common::log::init(); ::neqo_common::do_log!(::log::Level::Warn, $($arg)*); } ); + ($($arg:tt)*) => ( { ::neqo_common::log::init(None); ::neqo_common::do_log!(::log::Level::Warn, $($arg)*); } ); } #[macro_export] macro_rules! qinfo { ([$ctx:expr], $($arg:tt)*) => (::neqo_common::log_invoke!(::log::Level::Info, $ctx, $($arg)*);); - ($($arg:tt)*) => ( { ::neqo_common::log::init(); ::neqo_common::do_log!(::log::Level::Info, $($arg)*); } ); + ($($arg:tt)*) => ( { ::neqo_common::log::init(None); ::neqo_common::do_log!(::log::Level::Info, $($arg)*); } ); } #[macro_export] macro_rules! qdebug { ([$ctx:expr], $($arg:tt)*) => (::neqo_common::log_invoke!(::log::Level::Debug, $ctx, $($arg)*);); - ($($arg:tt)*) => ( { ::neqo_common::log::init(); ::neqo_common::do_log!(::log::Level::Debug, $($arg)*); } ); + ($($arg:tt)*) => ( { ::neqo_common::log::init(None); ::neqo_common::do_log!(::log::Level::Debug, $($arg)*); } ); } #[macro_export] macro_rules! qtrace { ([$ctx:expr], $($arg:tt)*) => (::neqo_common::log_invoke!(::log::Level::Trace, $ctx, $($arg)*);); - ($($arg:tt)*) => ( { ::neqo_common::log::init(); ::neqo_common::do_log!(::log::Level::Trace, $($arg)*); } ); + ($($arg:tt)*) => ( { ::neqo_common::log::init(None); ::neqo_common::do_log!(::log::Level::Trace, $($arg)*); } ); } diff --git a/neqo-common/src/qlog.rs b/neqo-common/src/qlog.rs index 29445cce9d..c67ce62afe 100644 --- a/neqo-common/src/qlog.rs +++ b/neqo-common/src/qlog.rs @@ -4,14 +4,15 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use std::cell::RefCell; -use std::fmt; -use std::path::{Path, PathBuf}; -use std::rc::Rc; +use std::{ + cell::RefCell, + fmt, + path::{Path, PathBuf}, + rc::Rc, +}; use qlog::{ - self, CommonFields, Configuration, QlogStreamer, TimeUnits, Trace, VantagePoint, - VantagePointType, + streamer::QlogStreamer, CommonFields, Configuration, TraceSeq, VantagePoint, VantagePointType, }; use crate::Role; @@ -29,6 +30,7 @@ pub struct NeqoQlogShared { impl NeqoQlog { /// Create an enabled `NeqoQlog` configuration. + /// /// # Errors /// /// Will return `qlog::Error` if cannot write to the new log. @@ -46,6 +48,11 @@ impl NeqoQlog { }) } + #[must_use] + pub fn inner(&self) -> Rc>> { + Rc::clone(&self.inner) + } + /// Create a disabled `NeqoQlog` configuration. #[must_use] pub fn disabled() -> Self { @@ -55,7 +62,7 @@ impl NeqoQlog { /// If logging enabled, closure may generate an event to be logged. pub fn add_event(&mut self, f: F) where - F: FnOnce() -> Option, + F: FnOnce() -> Option, { self.add_event_with_stream(|s| { if let Some(evt) = f() { @@ -65,6 +72,19 @@ impl NeqoQlog { }); } + /// If logging enabled, closure may generate an event to be logged. + pub fn add_event_data(&mut self, f: F) + where + F: FnOnce() -> Option, + { + self.add_event_with_stream(|s| { + if let Some(ev_data) = f() { + s.add_event_data_now(ev_data)?; + } + Ok(()) + }); + } + /// If logging enabled, closure is given the Qlog stream to write events and /// frames to. pub fn add_event_with_stream(&mut self, f: F) @@ -99,8 +119,8 @@ impl Drop for NeqoQlogShared { } #[must_use] -pub fn new_trace(role: Role) -> qlog::Trace { - Trace { +pub fn new_trace(role: Role) -> qlog::TraceSeq { + TraceSeq { vantage_point: VantagePoint { name: Some(format!("neqo-{role}")), ty: match role { @@ -112,26 +132,56 @@ pub fn new_trace(role: Role) -> qlog::Trace { title: Some(format!("neqo-{role} trace")), description: Some("Example qlog trace description".to_string()), configuration: Some(Configuration { - time_offset: Some("0".into()), - time_units: Some(TimeUnits::Us), + time_offset: Some(0.0), original_uris: None, }), common_fields: Some(CommonFields { group_id: None, protocol_type: None, reference_time: { - let datetime = time::OffsetDateTime::now_utc(); - datetime - .format(&time::format_description::well_known::Rfc3339) - .ok() // This is expected to never fail. + // It is better to allow this than deal with a conversion from i64 to f64. + // We can't do the obvious two-step conversion with f64::from(i32::try_from(...)), + // because that overflows earlier than is ideal. This should be fine for a while. + #[allow(clippy::cast_precision_loss)] + Some(time::OffsetDateTime::now_utc().unix_timestamp() as f64) }, + time_format: Some("relative".to_string()), }), - event_fields: vec![ - "relative_time".to_string(), - "category".to_string(), - "event".to_string(), - "data".to_string(), - ], - events: Vec::new(), + } +} + +#[cfg(test)] +mod test { + use qlog::events::Event; + use test_fixture::EXPECTED_LOG_HEADER; + + const EV_DATA: qlog::events::EventData = + qlog::events::EventData::SpinBitUpdated(qlog::events::connectivity::SpinBitUpdated { + state: true, + }); + + const EXPECTED_LOG_EVENT: &str = concat!( + "\u{1e}", + r#"{"time":0.0,"name":"connectivity:spin_bit_updated","data":{"state":true}}"#, + "\n" + ); + + #[test] + fn new_neqo_qlog() { + let (_log, contents) = test_fixture::new_neqo_qlog(); + assert_eq!(contents.to_string(), EXPECTED_LOG_HEADER); + } + + #[test] + fn add_event() { + let (mut log, contents) = test_fixture::new_neqo_qlog(); + log.add_event(|| Some(Event::with_time(1.1, EV_DATA))); + assert_eq!( + contents.to_string(), + format!( + "{EXPECTED_LOG_HEADER}{e}", + e = EXPECTED_LOG_EVENT.replace("\"time\":0.0,", "\"time\":1.1,") + ) + ); } } diff --git a/neqo-common/src/timer.rs b/neqo-common/src/timer.rs index 83836ad773..3feddb2226 100644 --- a/neqo-common/src/timer.rs +++ b/neqo-common/src/timer.rs @@ -4,9 +4,11 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use std::convert::TryFrom; -use std::mem; -use std::time::{Duration, Instant}; +use std::{ + collections::VecDeque, + mem, + time::{Duration, Instant}, +}; /// Internal structure for a timer item. struct TimerItem { @@ -21,12 +23,12 @@ impl TimerItem { } /// A timer queue. -/// This uses a classic timer wheel arrangement, with some characteristics that might be considered peculiar. -/// Each slot in the wheel is sorted (complexity O(N) insertions, but O(logN) to find cut points). -/// Time is relative, the wheel has an origin time and it is unable to represent times that are more than -/// `granularity * capacity` past that time. +/// This uses a classic timer wheel arrangement, with some characteristics that might be considered +/// peculiar. Each slot in the wheel is sorted (complexity O(N) insertions, but O(logN) to find cut +/// points). Time is relative, the wheel has an origin time and it is unable to represent times that +/// are more than `granularity * capacity` past that time. pub struct Timer { - items: Vec>>, + items: Vec>>, now: Instant, granularity: Duration, cursor: usize, @@ -34,7 +36,9 @@ pub struct Timer { impl Timer { /// Construct a new wheel at the given granularity, starting at the given time. + /// /// # Panics + /// /// When `capacity` is too large to fit in `u32` or `granularity` is zero. pub fn new(now: Instant, granularity: Duration, capacity: usize) -> Self { assert!(u32::try_from(capacity).is_ok()); @@ -52,9 +56,14 @@ impl Timer { /// Return a reference to the time of the next entry. #[must_use] pub fn next_time(&self) -> Option { - for i in 0..self.items.len() { - let idx = self.bucket(i); - if let Some(t) = self.items[idx].first() { + let idx = self.bucket(0); + for i in idx..self.items.len() { + if let Some(t) = self.items[i].front() { + return Some(t.time); + } + } + for i in 0..idx { + if let Some(t) = self.items[i].front() { return Some(t.time); } } @@ -75,7 +84,7 @@ impl Timer { #[inline] #[allow(clippy::cast_possible_truncation)] // guarded by assertion fn delta(&self, time: Instant) -> usize { - // This really should use Instant::div_duration(), but it can't yet. + // This really should use Duration::div_duration_f??(), but it can't yet. ((time - self.now).as_nanos() / self.granularity.as_nanos()) as usize } @@ -109,7 +118,9 @@ impl Timer { } /// Asserts if the time given is in the past or too far in the future. + /// /// # Panics + /// /// When `time` is in the past relative to previous calls. pub fn add(&mut self, time: Instant, item: T) { assert!(time >= self.now); @@ -140,6 +151,9 @@ impl Timer { /// Given knowledge of the time an item was added, remove it. /// This requires use of a predicate that identifies matching items. + /// + /// # Panics + /// Impossible, I think. pub fn remove(&mut self, time: Instant, mut selector: F) -> Option where F: FnMut(&T) -> bool, @@ -151,7 +165,10 @@ impl Timer { return None; } let bucket = self.time_bucket(time); - let Ok(start_index) = self.items[bucket].binary_search_by_key(&time, TimerItem::time) else { return None }; + let Ok(start_index) = self.items[bucket].binary_search_by_key(&time, TimerItem::time) + else { + return None; + }; // start_index is just one of potentially many items with the same time. // Search backwards for a match, ... for i in (0..=start_index).rev() { @@ -159,7 +176,7 @@ impl Timer { break; } if selector(&self.items[bucket][i].item) { - return Some(self.items[bucket].remove(i).item); + return Some(self.items[bucket].remove(i).unwrap().item); } } // ... then forwards. @@ -168,7 +185,7 @@ impl Timer { break; } if selector(&self.items[bucket][i].item) { - return Some(self.items[bucket].remove(i).item); + return Some(self.items[bucket].remove(i).unwrap().item); } } None @@ -177,10 +194,25 @@ impl Timer { /// Take the next item, unless there are no items with /// a timeout in the past relative to `until`. pub fn take_next(&mut self, until: Instant) -> Option { - for i in 0..self.items.len() { - let idx = self.bucket(i); - if !self.items[idx].is_empty() && self.items[idx][0].time <= until { - return Some(self.items[idx].remove(0).item); + fn maybe_take(v: &mut VecDeque>, until: Instant) -> Option { + if !v.is_empty() && v[0].time <= until { + Some(v.pop_front().unwrap().item) + } else { + None + } + } + + let idx = self.bucket(0); + for i in idx..self.items.len() { + let res = maybe_take(&mut self.items[i], until); + if res.is_some() { + return res; + } + } + for i in 0..idx { + let res = maybe_take(&mut self.items[i], until); + if res.is_some() { + return res; } } None @@ -193,7 +225,7 @@ impl Timer { if until >= self.now + self.span() { // Drain everything, so a clean sweep. let mut empty_items = Vec::with_capacity(self.items.len()); - empty_items.resize_with(self.items.len(), Vec::default); + empty_items.resize_with(self.items.len(), VecDeque::default); let mut items = mem::replace(&mut self.items, empty_items); self.now = until; self.cursor = 0; @@ -238,48 +270,50 @@ impl Timer { #[cfg(test)] mod test { + use std::sync::OnceLock; + use super::{Duration, Instant, Timer}; - use lazy_static::lazy_static; - lazy_static! { - static ref NOW: Instant = Instant::now(); + fn now() -> Instant { + static NOW: OnceLock = OnceLock::new(); + *NOW.get_or_init(Instant::now) } const GRANULARITY: Duration = Duration::from_millis(10); const CAPACITY: usize = 10; #[test] fn create() { - let t: Timer<()> = Timer::new(*NOW, GRANULARITY, CAPACITY); + let t: Timer<()> = Timer::new(now(), GRANULARITY, CAPACITY); assert_eq!(t.span(), Duration::from_millis(100)); assert_eq!(None, t.next_time()); } #[test] fn immediate_entry() { - let mut t = Timer::new(*NOW, GRANULARITY, CAPACITY); - t.add(*NOW, 12); - assert_eq!(*NOW, t.next_time().expect("should have an entry")); - let values: Vec<_> = t.take_until(*NOW).collect(); + let mut t = Timer::new(now(), GRANULARITY, CAPACITY); + t.add(now(), 12); + assert_eq!(now(), t.next_time().expect("should have an entry")); + let values: Vec<_> = t.take_until(now()).collect(); assert_eq!(vec![12], values); } #[test] fn same_time() { - let mut t = Timer::new(*NOW, GRANULARITY, CAPACITY); + let mut t = Timer::new(now(), GRANULARITY, CAPACITY); let v1 = 12; let v2 = 13; - t.add(*NOW, v1); - t.add(*NOW, v2); - assert_eq!(*NOW, t.next_time().expect("should have an entry")); - let values: Vec<_> = t.take_until(*NOW).collect(); + t.add(now(), v1); + t.add(now(), v2); + assert_eq!(now(), t.next_time().expect("should have an entry")); + let values: Vec<_> = t.take_until(now()).collect(); assert!(values.contains(&v1)); assert!(values.contains(&v2)); } #[test] fn add() { - let mut t = Timer::new(*NOW, GRANULARITY, CAPACITY); - let near_future = *NOW + Duration::from_millis(17); + let mut t = Timer::new(now(), GRANULARITY, CAPACITY); + let near_future = now() + Duration::from_millis(17); let v = 9; t.add(near_future, v); assert_eq!(near_future, t.next_time().expect("should return a value")); @@ -295,8 +329,8 @@ mod test { #[test] fn add_future() { - let mut t = Timer::new(*NOW, GRANULARITY, CAPACITY); - let future = *NOW + Duration::from_millis(117); + let mut t = Timer::new(now(), GRANULARITY, CAPACITY); + let future = now() + Duration::from_millis(117); let v = 9; t.add(future, v); assert_eq!(future, t.next_time().expect("should return a value")); @@ -305,8 +339,8 @@ mod test { #[test] fn add_far_future() { - let mut t = Timer::new(*NOW, GRANULARITY, CAPACITY); - let far_future = *NOW + Duration::from_millis(892); + let mut t = Timer::new(now(), GRANULARITY, CAPACITY); + let far_future = now() + Duration::from_millis(892); let v = 9; t.add(far_future, v); assert_eq!(far_future, t.next_time().expect("should return a value")); @@ -323,12 +357,12 @@ mod test { ]; fn with_times() -> Timer { - let mut t = Timer::new(*NOW, GRANULARITY, CAPACITY); + let mut t = Timer::new(now(), GRANULARITY, CAPACITY); for (i, time) in TIMES.iter().enumerate() { - t.add(*NOW + *time, i); + t.add(now() + *time, i); } assert_eq!( - *NOW + *TIMES.iter().min().unwrap(), + now() + *TIMES.iter().min().unwrap(), t.next_time().expect("should have a time") ); t @@ -338,7 +372,7 @@ mod test { #[allow(clippy::needless_collect)] // false positive fn multiple_values() { let mut t = with_times(); - let values: Vec<_> = t.take_until(*NOW + *TIMES.iter().max().unwrap()).collect(); + let values: Vec<_> = t.take_until(now() + *TIMES.iter().max().unwrap()).collect(); for i in 0..TIMES.len() { assert!(values.contains(&i)); } @@ -348,7 +382,7 @@ mod test { #[allow(clippy::needless_collect)] // false positive fn take_far_future() { let mut t = with_times(); - let values: Vec<_> = t.take_until(*NOW + Duration::from_secs(100)).collect(); + let values: Vec<_> = t.take_until(now() + Duration::from_secs(100)).collect(); for i in 0..TIMES.len() { assert!(values.contains(&i)); } @@ -358,15 +392,15 @@ mod test { fn remove_each() { let mut t = with_times(); for (i, time) in TIMES.iter().enumerate() { - assert_eq!(Some(i), t.remove(*NOW + *time, |&x| x == i)); + assert_eq!(Some(i), t.remove(now() + *time, |&x| x == i)); } assert_eq!(None, t.next_time()); } #[test] fn remove_future() { - let mut t = Timer::new(*NOW, GRANULARITY, CAPACITY); - let future = *NOW + Duration::from_millis(117); + let mut t = Timer::new(now(), GRANULARITY, CAPACITY); + let future = now() + Duration::from_millis(117); let v = 9; t.add(future, v); @@ -375,9 +409,9 @@ mod test { #[test] fn remove_too_far_future() { - let mut t = Timer::new(*NOW, GRANULARITY, CAPACITY); - let future = *NOW + Duration::from_millis(117); - let too_far_future = *NOW + t.span() + Duration::from_millis(117); + let mut t = Timer::new(now(), GRANULARITY, CAPACITY); + let future = now() + Duration::from_millis(117); + let too_far_future = now() + t.span() + Duration::from_millis(117); let v = 9; t.add(future, v); diff --git a/neqo-common/src/tos.rs b/neqo-common/src/tos.rs new file mode 100644 index 0000000000..533c5447e2 --- /dev/null +++ b/neqo-common/src/tos.rs @@ -0,0 +1,349 @@ +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use std::fmt::Debug; + +use enum_map::Enum; + +/// ECN (Explicit Congestion Notification) codepoints mapped to the +/// lower 2 bits of the TOS field. +/// +#[derive(Copy, Clone, PartialEq, Eq, Enum, Default, Debug)] +#[repr(u8)] +pub enum IpTosEcn { + #[default] + /// Not-ECT, Not ECN-Capable Transport, RFC3168 + NotEct = 0b00, + + /// ECT(1), ECN-Capable Transport(1), RFC8311 and RFC9331 + Ect1 = 0b01, + + /// ECT(0), ECN-Capable Transport(0), RFC3168 + Ect0 = 0b10, + + /// CE, Congestion Experienced, RFC3168 + Ce = 0b11, +} + +impl From for u8 { + fn from(v: IpTosEcn) -> Self { + v as u8 + } +} + +impl From for IpTosEcn { + fn from(v: u8) -> Self { + match v & 0b0000_0011 { + 0b00 => IpTosEcn::NotEct, + 0b01 => IpTosEcn::Ect1, + 0b10 => IpTosEcn::Ect0, + 0b11 => IpTosEcn::Ce, + _ => unreachable!(), + } + } +} + +impl From for IpTosEcn { + fn from(v: IpTos) -> Self { + IpTosEcn::from(u8::from(v)) + } +} + +/// Diffserv Codepoints, mapped to the upper six bits of the TOS field. +/// +#[derive(Copy, Clone, PartialEq, Eq, Enum, Default, Debug)] +#[repr(u8)] +pub enum IpTosDscp { + #[default] + /// Class Selector 0, RFC2474 + Cs0 = 0b0000_0000, + + /// Class Selector 1, RFC2474 + Cs1 = 0b0010_0000, + + /// Class Selector 2, RFC2474 + Cs2 = 0b0100_0000, + + /// Class Selector 3, RFC2474 + Cs3 = 0b0110_0000, + + /// Class Selector 4, RFC2474 + Cs4 = 0b1000_0000, + + /// Class Selector 5, RFC2474 + Cs5 = 0b1010_0000, + + /// Class Selector 6, RFC2474 + Cs6 = 0b1100_0000, + + /// Class Selector 7, RFC2474 + Cs7 = 0b1110_0000, + + /// Assured Forwarding 11, RFC2597 + Af11 = 0b0010_1000, + + /// Assured Forwarding 12, RFC2597 + Af12 = 0b0011_0000, + + /// Assured Forwarding 13, RFC2597 + Af13 = 0b0011_1000, + + /// Assured Forwarding 21, RFC2597 + Af21 = 0b0100_1000, + + /// Assured Forwarding 22, RFC2597 + Af22 = 0b0101_0000, + + /// Assured Forwarding 23, RFC2597 + Af23 = 0b0101_1000, + + /// Assured Forwarding 31, RFC2597 + Af31 = 0b0110_1000, + + /// Assured Forwarding 32, RFC2597 + Af32 = 0b0111_0000, + + /// Assured Forwarding 33, RFC2597 + Af33 = 0b0111_1000, + + /// Assured Forwarding 41, RFC2597 + Af41 = 0b1000_1000, + + /// Assured Forwarding 42, RFC2597 + Af42 = 0b1001_0000, + + /// Assured Forwarding 43, RFC2597 + Af43 = 0b1001_1000, + + /// Expedited Forwarding, RFC3246 + Ef = 0b1011_1000, + + /// Capacity-Admitted Traffic, RFC5865 + VoiceAdmit = 0b1011_0000, + + /// Lower-Effort, RFC8622 + Le = 0b0000_0100, +} + +impl From for u8 { + fn from(v: IpTosDscp) -> Self { + v as u8 + } +} + +impl From for IpTosDscp { + fn from(v: u8) -> Self { + match v & 0b1111_1100 { + 0b0000_0000 => IpTosDscp::Cs0, + 0b0010_0000 => IpTosDscp::Cs1, + 0b0100_0000 => IpTosDscp::Cs2, + 0b0110_0000 => IpTosDscp::Cs3, + 0b1000_0000 => IpTosDscp::Cs4, + 0b1010_0000 => IpTosDscp::Cs5, + 0b1100_0000 => IpTosDscp::Cs6, + 0b1110_0000 => IpTosDscp::Cs7, + 0b0010_1000 => IpTosDscp::Af11, + 0b0011_0000 => IpTosDscp::Af12, + 0b0011_1000 => IpTosDscp::Af13, + 0b0100_1000 => IpTosDscp::Af21, + 0b0101_0000 => IpTosDscp::Af22, + 0b0101_1000 => IpTosDscp::Af23, + 0b0110_1000 => IpTosDscp::Af31, + 0b0111_0000 => IpTosDscp::Af32, + 0b0111_1000 => IpTosDscp::Af33, + 0b1000_1000 => IpTosDscp::Af41, + 0b1001_0000 => IpTosDscp::Af42, + 0b1001_1000 => IpTosDscp::Af43, + 0b1011_1000 => IpTosDscp::Ef, + 0b1011_0000 => IpTosDscp::VoiceAdmit, + 0b0000_0100 => IpTosDscp::Le, + _ => unreachable!(), + } + } +} + +impl From for IpTosDscp { + fn from(v: IpTos) -> Self { + IpTosDscp::from(u8::from(v)) + } +} + +/// The type-of-service field in an IP packet. +#[derive(Copy, Clone, PartialEq, Eq, Default)] +pub struct IpTos(u8); + +impl From for IpTos { + fn from(v: IpTosEcn) -> Self { + Self(u8::from(v)) + } +} + +impl From for IpTos { + fn from(v: IpTosDscp) -> Self { + Self(u8::from(v)) + } +} + +impl From<(IpTosDscp, IpTosEcn)> for IpTos { + fn from(v: (IpTosDscp, IpTosEcn)) -> Self { + Self(u8::from(v.0) | u8::from(v.1)) + } +} + +impl From<(IpTosEcn, IpTosDscp)> for IpTos { + fn from(v: (IpTosEcn, IpTosDscp)) -> Self { + Self(u8::from(v.0) | u8::from(v.1)) + } +} + +impl From for u8 { + fn from(v: IpTos) -> Self { + v.0 + } +} + +impl From for IpTos { + fn from(v: u8) -> Self { + Self(v) + } +} + +impl Debug for IpTos { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_tuple("IpTos") + .field(&IpTosDscp::from(*self)) + .field(&IpTosEcn::from(*self)) + .finish() + } +} + +impl IpTos { + pub fn set_ecn(&mut self, ecn: IpTosEcn) { + self.0 = u8::from(IpTosDscp::from(*self)) | u8::from(ecn); + } + + pub fn set_dscp(&mut self, dscp: IpTosDscp) { + self.0 = u8::from(IpTosEcn::from(*self)) | u8::from(dscp); + } +} + +#[cfg(test)] +mod tests { + use crate::{IpTos, IpTosDscp, IpTosEcn}; + + #[test] + fn iptosecn_into_u8() { + assert_eq!(u8::from(IpTosEcn::NotEct), 0b00); + assert_eq!(u8::from(IpTosEcn::Ect1), 0b01); + assert_eq!(u8::from(IpTosEcn::Ect0), 0b10); + assert_eq!(u8::from(IpTosEcn::Ce), 0b11); + } + + #[test] + fn u8_into_iptosecn() { + assert_eq!(IpTosEcn::from(0b00), IpTosEcn::NotEct); + assert_eq!(IpTosEcn::from(0b01), IpTosEcn::Ect1); + assert_eq!(IpTosEcn::from(0b10), IpTosEcn::Ect0); + assert_eq!(IpTosEcn::from(0b11), IpTosEcn::Ce); + } + + #[test] + fn iptosdscp_into_u8() { + assert_eq!(u8::from(IpTosDscp::Cs0), 0b0000_0000); + assert_eq!(u8::from(IpTosDscp::Cs1), 0b0010_0000); + assert_eq!(u8::from(IpTosDscp::Cs2), 0b0100_0000); + assert_eq!(u8::from(IpTosDscp::Cs3), 0b0110_0000); + assert_eq!(u8::from(IpTosDscp::Cs4), 0b1000_0000); + assert_eq!(u8::from(IpTosDscp::Cs5), 0b1010_0000); + assert_eq!(u8::from(IpTosDscp::Cs6), 0b1100_0000); + assert_eq!(u8::from(IpTosDscp::Cs7), 0b1110_0000); + assert_eq!(u8::from(IpTosDscp::Af11), 0b0010_1000); + assert_eq!(u8::from(IpTosDscp::Af12), 0b0011_0000); + assert_eq!(u8::from(IpTosDscp::Af13), 0b0011_1000); + assert_eq!(u8::from(IpTosDscp::Af21), 0b0100_1000); + assert_eq!(u8::from(IpTosDscp::Af22), 0b0101_0000); + assert_eq!(u8::from(IpTosDscp::Af23), 0b0101_1000); + assert_eq!(u8::from(IpTosDscp::Af31), 0b0110_1000); + assert_eq!(u8::from(IpTosDscp::Af32), 0b0111_0000); + assert_eq!(u8::from(IpTosDscp::Af33), 0b0111_1000); + assert_eq!(u8::from(IpTosDscp::Af41), 0b1000_1000); + assert_eq!(u8::from(IpTosDscp::Af42), 0b1001_0000); + assert_eq!(u8::from(IpTosDscp::Af43), 0b1001_1000); + assert_eq!(u8::from(IpTosDscp::Ef), 0b1011_1000); + assert_eq!(u8::from(IpTosDscp::VoiceAdmit), 0b1011_0000); + assert_eq!(u8::from(IpTosDscp::Le), 0b0000_0100); + } + + #[test] + fn u8_into_iptosdscp() { + assert_eq!(IpTosDscp::from(0b0000_0000), IpTosDscp::Cs0); + assert_eq!(IpTosDscp::from(0b0010_0000), IpTosDscp::Cs1); + assert_eq!(IpTosDscp::from(0b0100_0000), IpTosDscp::Cs2); + assert_eq!(IpTosDscp::from(0b0110_0000), IpTosDscp::Cs3); + assert_eq!(IpTosDscp::from(0b1000_0000), IpTosDscp::Cs4); + assert_eq!(IpTosDscp::from(0b1010_0000), IpTosDscp::Cs5); + assert_eq!(IpTosDscp::from(0b1100_0000), IpTosDscp::Cs6); + assert_eq!(IpTosDscp::from(0b1110_0000), IpTosDscp::Cs7); + assert_eq!(IpTosDscp::from(0b0010_1000), IpTosDscp::Af11); + assert_eq!(IpTosDscp::from(0b0011_0000), IpTosDscp::Af12); + assert_eq!(IpTosDscp::from(0b0011_1000), IpTosDscp::Af13); + assert_eq!(IpTosDscp::from(0b0100_1000), IpTosDscp::Af21); + assert_eq!(IpTosDscp::from(0b0101_0000), IpTosDscp::Af22); + assert_eq!(IpTosDscp::from(0b0101_1000), IpTosDscp::Af23); + assert_eq!(IpTosDscp::from(0b0110_1000), IpTosDscp::Af31); + assert_eq!(IpTosDscp::from(0b0111_0000), IpTosDscp::Af32); + assert_eq!(IpTosDscp::from(0b0111_1000), IpTosDscp::Af33); + assert_eq!(IpTosDscp::from(0b1000_1000), IpTosDscp::Af41); + assert_eq!(IpTosDscp::from(0b1001_0000), IpTosDscp::Af42); + assert_eq!(IpTosDscp::from(0b1001_1000), IpTosDscp::Af43); + assert_eq!(IpTosDscp::from(0b1011_1000), IpTosDscp::Ef); + assert_eq!(IpTosDscp::from(0b1011_0000), IpTosDscp::VoiceAdmit); + assert_eq!(IpTosDscp::from(0b0000_0100), IpTosDscp::Le); + } + + #[test] + fn iptosecn_into_iptos() { + let ecn = IpTosEcn::default(); + let iptos_ecn: IpTos = ecn.into(); + assert_eq!(u8::from(iptos_ecn), ecn as u8); + } + + #[test] + fn iptosdscp_into_iptos() { + let dscp = IpTosDscp::default(); + let iptos_dscp: IpTos = dscp.into(); + assert_eq!(u8::from(iptos_dscp), dscp as u8); + } + + #[test] + fn u8_to_iptos() { + let tos = 0x8b; + let iptos: IpTos = (IpTosEcn::Ce, IpTosDscp::Af41).into(); + assert_eq!(tos, u8::from(iptos)); + assert_eq!(IpTos::from(tos), iptos); + } + + #[test] + fn iptos_to_iptosdscp() { + let tos = IpTos::from((IpTosDscp::Af41, IpTosEcn::NotEct)); + let dscp = IpTosDscp::from(tos); + assert_eq!(dscp, IpTosDscp::Af41); + } + + #[test] + fn tos_modify_ecn() { + let mut iptos: IpTos = (IpTosDscp::Af41, IpTosEcn::NotEct).into(); + iptos.set_ecn(IpTosEcn::Ce); + assert_eq!(u8::from(iptos), 0b1000_1011); + } + + #[test] + fn tos_modify_dscp() { + let mut iptos: IpTos = (IpTosDscp::Af41, IpTosEcn::Ect1).into(); + iptos.set_dscp(IpTosDscp::Le); + assert_eq!(u8::from(iptos), 0b0000_0101); + } +} diff --git a/neqo-common/tests/log.rs b/neqo-common/tests/log.rs index 33b42d1411..135a667146 100644 --- a/neqo-common/tests/log.rs +++ b/neqo-common/tests/log.rs @@ -4,9 +4,6 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -#![cfg_attr(feature = "deny-warnings", deny(warnings))] -#![warn(clippy::use_self)] - use neqo_common::{qdebug, qerror, qinfo, qtrace, qwarn}; #[test] diff --git a/neqo-crypto/.gitignore b/neqo-crypto/.gitignore deleted file mode 100644 index 0136220822..0000000000 --- a/neqo-crypto/.gitignore +++ /dev/null @@ -1,6 +0,0 @@ -Cargo.lock -/target -**/*.rs.bk -/nss -/nspr -/dist diff --git a/neqo-crypto/Cargo.toml b/neqo-crypto/Cargo.toml index 5903a227d9..47337d99c0 100644 --- a/neqo-crypto/Cargo.toml +++ b/neqo-crypto/Cargo.toml @@ -1,28 +1,38 @@ [package] name = "neqo-crypto" -version = "0.6.4" -authors = ["Martin Thomson "] -edition = "2018" -rust-version = "1.65.0" build = "build.rs" -license = "MIT/Apache-2.0" +authors.workspace = true +homepage.workspace = true +repository.workspace = true +version.workspace = true +edition.workspace = true +rust-version.workspace = true +license.workspace = true + +[lints] +workspace = true [dependencies] +# Sync with https://searchfox.org/mozilla-central/source/Cargo.lock 2024-02-08 +log = { version = "0.4", default-features = false } neqo-common = { path = "../neqo-common" } -log = {version = "0.4.0", default-features = false} [build-dependencies] -bindgen = {version = "0.64", default-features = false, features= ["runtime"]} -serde = "1.0" -serde_derive = "1.0" -toml = "0.5" -mozbuild = {version = "0.1", optional = true} +# Sync with https://searchfox.org/mozilla-central/source/Cargo.lock 2024-02-08 +bindgen = { version = "0.69", default-features = false, features = ["runtime"] } +mozbuild = { version = "0.1", default-features = false, optional = true } +semver = { version = "1.0", default-features = false } +serde = { version = "1.0", default-features = false } +serde_derive = { version = "1.0", default-features = false } +toml = { version = "0.5", default-features = false } [dev-dependencies] test-fixture = { path = "../test-fixture" } [features] -default = ["deny-warnings"] -deny-warnings = [] gecko = ["mozbuild"] -fuzzing = [] +disable-encryption = [] + +[lib] +# See https://github.com/bheisler/criterion.rs/blob/master/book/src/faq.md#cargo-bench-gives-unrecognized-option-errors-for-valid-command-line-options +bench = false diff --git a/neqo-crypto/TODO b/neqo-crypto/TODO deleted file mode 100644 index b0552ea10f..0000000000 --- a/neqo-crypto/TODO +++ /dev/null @@ -1,4 +0,0 @@ -early data - API in place for inspection, but depends on resumption -handle panics more gracefully for extension handlers -client certificates -read/write - probably never \ No newline at end of file diff --git a/neqo-crypto/bindings/bindings.toml b/neqo-crypto/bindings/bindings.toml index 7c35a0a224..72c6d524d5 100644 --- a/neqo-crypto/bindings/bindings.toml +++ b/neqo-crypto/bindings/bindings.toml @@ -49,6 +49,7 @@ functions = [ "SSL_PeerSignedCertTimestamps", "SSL_PeerStapledOCSPResponses", "SSL_ResetHandshake", + "SSL_SendAdditionalKeyShares", "SSL_SetNextProtoNego", "SSL_SetURL", "SSL_VersionRangeSet", @@ -264,8 +265,3 @@ enums = [ [nspr_time] types = ["PRTime"] functions = ["PR_Now"] - -[mozpkix] -cplusplus = true -types = ["mozilla::pkix::ErrorCode"] -enums = ["mozilla::pkix::ErrorCode"] diff --git a/neqo-crypto/bindings/mozpkix.hpp b/neqo-crypto/bindings/mozpkix.hpp deleted file mode 100644 index d0a6cb5861..0000000000 --- a/neqo-crypto/bindings/mozpkix.hpp +++ /dev/null @@ -1 +0,0 @@ -#include "mozpkix/pkixnss.h" \ No newline at end of file diff --git a/neqo-crypto/build.rs b/neqo-crypto/build.rs index e19f197710..2dd4543797 100644 --- a/neqo-crypto/build.rs +++ b/neqo-crypto/build.rs @@ -4,16 +4,20 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -#![cfg_attr(feature = "deny-warnings", deny(warnings))] -#![warn(clippy::pedantic)] +use std::{ + collections::HashMap, + env, fs, + path::{Path, PathBuf}, + process::Command, +}; use bindgen::Builder; +use semver::{Version, VersionReq}; use serde_derive::Deserialize; -use std::collections::HashMap; -use std::env; -use std::fs; -use std::path::{Path, PathBuf}; -use std::process::Command; + +#[path = "src/min_version.rs"] +mod min_version; +use min_version::MINIMUM_NSS_VERSION; const BINDINGS_DIR: &str = "bindings"; const BINDINGS_CONFIG: &str = "bindings.toml"; @@ -35,7 +39,7 @@ struct Bindings { opaque: Vec, /// enumerations that are turned into a module (without this, the enum is /// mapped using the default, which means that the individual values are - /// formed with an underscore as _). + /// formed with an underscore as <`enum_type`>_<`enum_value_name`>). #[serde(default)] enums: Vec, @@ -51,16 +55,18 @@ struct Bindings { } fn is_debug() -> bool { - env::var("DEBUG") - .map(|d| d.parse::().unwrap_or(false)) - .unwrap_or(false) + // Check the build profile and not whether debug symbols are enabled (i.e., + // `env::var("DEBUG")`), because we enable those for benchmarking/profiling and still want + // to build NSS in release mode. + env::var("PROFILE").unwrap_or_default() == "debug" } // bindgen needs access to libclang. // On windows, this doesn't just work, you have to set LIBCLANG_PATH. // Rather than download the 400Mb+ files, like gecko does, let's just reuse their work. fn setup_clang() { - if env::consts::OS != "windows" { + // If this isn't Windows, or we're in CI, then we don't need to do anything. + if env::consts::OS != "windows" || env::var("GITHUB_WORKFLOW").unwrap() == "CI" { return; } println!("rerun-if-env-changed=LIBCLANG_PATH"); @@ -89,47 +95,12 @@ fn setup_clang() { } } -fn nss_dir() -> PathBuf { - let dir = if let Ok(dir) = env::var("NSS_DIR") { - let path = PathBuf::from(dir.trim()); - assert!( - !path.is_relative(), - "The NSS_DIR environment variable is expected to be an absolute path." - ); - path - } else { - let out_dir = env::var("OUT_DIR").unwrap(); - let dir = Path::new(&out_dir).join("nss"); - if !dir.exists() { - Command::new("hg") - .args([ - "clone", - "https://hg.mozilla.org/projects/nss", - dir.to_str().unwrap(), - ]) - .status() - .expect("can't clone nss"); - } - let nspr_dir = Path::new(&out_dir).join("nspr"); - if !nspr_dir.exists() { - Command::new("hg") - .args([ - "clone", - "https://hg.mozilla.org/projects/nspr", - nspr_dir.to_str().unwrap(), - ]) - .status() - .expect("can't clone nspr"); - } - dir - }; - assert!(dir.is_dir(), "NSS_DIR {:?} doesn't exist", dir); - // Note that this returns a relative path because UNC - // paths on windows cause certain tools to explode. - dir -} - fn get_bash() -> PathBuf { + // If BASH is set, use that. + if let Ok(bash) = env::var("BASH") { + return PathBuf::from(bash); + } + // When running under MOZILLABUILD, we need to make sure not to invoke // another instance of bash that might be sitting around (like WSL). match env::var("MOZILLABUILD") { @@ -142,10 +113,10 @@ fn build_nss(dir: PathBuf) { let mut build_nss = vec![ String::from("./build.sh"), String::from("-Ddisable_tests=1"), + // Generate static libraries in addition to shared libraries. + String::from("--static"), ]; - if is_debug() { - build_nss.push(String::from("--static")); - } else { + if !is_debug() { build_nss.push(String::from("-o")); } if let Ok(d) = env::var("NSS_JOBS") { @@ -257,7 +228,7 @@ fn build_bindings(base: &str, bindings: &Bindings, flags: &[String], gecko: bool builder = builder.clang_arg("-DANDROID"); } if bindings.cplusplus { - builder = builder.clang_args(&["-x", "c++", "-std=c++11"]); + builder = builder.clang_args(&["-x", "c++", "-std=c++14"]); } } @@ -289,11 +260,63 @@ fn build_bindings(base: &str, bindings: &Bindings, flags: &[String], gecko: bool .expect("couldn't write bindings"); } -fn setup_standalone() -> Vec { +fn pkg_config() -> Vec { + let modversion = Command::new("pkg-config") + .args(["--modversion", "nss"]) + .output() + .expect("pkg-config reports NSS as absent") + .stdout; + let modversion = String::from_utf8(modversion).expect("non-UTF8 from pkg-config"); + let modversion = modversion.trim(); + // The NSS version number does not follow semver numbering, because it omits the patch version + // when that's 0. Deal with that. + let modversion_for_cmp = if modversion.chars().filter(|c| *c == '.').count() == 1 { + modversion.to_owned() + ".0" + } else { + modversion.to_owned() + }; + let modversion_for_cmp = + Version::parse(&modversion_for_cmp).expect("NSS version not in semver format"); + let version_req = VersionReq::parse(&format!(">={}", MINIMUM_NSS_VERSION.trim())).unwrap(); + assert!( + version_req.matches(&modversion_for_cmp), + "neqo has NSS version requirement {version_req}, found {modversion}" + ); + + let cfg = Command::new("pkg-config") + .args(["--cflags", "--libs", "nss"]) + .output() + .expect("NSS flags not returned by pkg-config") + .stdout; + let cfg_str = String::from_utf8(cfg).expect("non-UTF8 from pkg-config"); + + let mut flags: Vec = Vec::new(); + for f in cfg_str.split(' ') { + if let Some(include) = f.strip_prefix("-I") { + flags.push(String::from(f)); + println!("cargo:include={include}"); + } else if let Some(path) = f.strip_prefix("-L") { + println!("cargo:rustc-link-search=native={path}"); + } else if let Some(lib) = f.strip_prefix("-l") { + println!("cargo:rustc-link-lib=dylib={lib}"); + } else { + println!("Warning: Unknown flag from pkg-config: {f}"); + } + } + + flags +} + +fn setup_standalone(nss: &str) -> Vec { setup_clang(); println!("cargo:rerun-if-env-changed=NSS_DIR"); - let nss = nss_dir(); + let nss = PathBuf::from(nss); + assert!( + !nss.is_relative(), + "The NSS_DIR environment variable is expected to be an absolute path." + ); + build_nss(nss.clone()); // $NSS_DIR/../dist/ @@ -310,7 +333,7 @@ fn setup_standalone() -> Vec { "cargo:rustc-link-search=native={}", nsslibdir.to_str().unwrap() ); - if is_debug() { + if is_debug() || env::consts::OS == "windows" { static_link(); } else { dynamic_link(); @@ -400,8 +423,10 @@ fn setup_for_gecko() -> Vec { fn main() { let flags = if cfg!(feature = "gecko") { setup_for_gecko() + } else if let Ok(nss_dir) = env::var("NSS_DIR") { + setup_standalone(nss_dir.trim()) } else { - setup_standalone() + pkg_config() }; let config_file = PathBuf::from(BINDINGS_DIR).join(BINDINGS_CONFIG); diff --git a/neqo-crypto/min_version.txt b/neqo-crypto/min_version.txt new file mode 100644 index 0000000000..422c9c7093 --- /dev/null +++ b/neqo-crypto/min_version.txt @@ -0,0 +1 @@ +3.98 diff --git a/neqo-crypto/src/aead.rs b/neqo-crypto/src/aead.rs index 8bb84c856d..21027d55b2 100644 --- a/neqo-crypto/src/aead.rs +++ b/neqo-crypto/src/aead.rs @@ -4,6 +4,13 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +use std::{ + fmt, + ops::{Deref, DerefMut}, + os::raw::{c_char, c_uint}, + ptr::null_mut, +}; + use crate::{ constants::{Cipher, Version}, err::Res, @@ -13,14 +20,6 @@ use crate::{ ssl::{self, PRUint16, PRUint64, PRUint8, SSLAeadContext}, }; -use std::{ - convert::{TryFrom, TryInto}, - fmt, - ops::{Deref, DerefMut}, - os::raw::{c_char, c_uint}, - ptr::null_mut, -}; - experimental_api!(SSL_MakeAead( version: PRUint16, cipher: PRUint16, @@ -62,14 +61,9 @@ impl RealAead { /// Create a new AEAD based on the indicated TLS version and cipher suite. /// /// # Errors + /// /// Returns `Error` when the supporting NSS functions fail. - pub fn new( - _fuzzing: bool, - version: Version, - cipher: Cipher, - secret: &SymKey, - prefix: &str, - ) -> Res { + pub fn new(version: Version, cipher: Cipher, secret: &SymKey, prefix: &str) -> Res { let s: *mut PK11SymKey = **secret; unsafe { Self::from_raw(version, cipher, s, prefix) } } @@ -107,6 +101,7 @@ impl RealAead { /// the value provided in `Aead::expansion`. /// /// # Errors + /// /// If the input can't be protected or any input is too large for NSS. pub fn encrypt<'a>( &self, @@ -118,7 +113,7 @@ impl RealAead { let mut l: c_uint = 0; unsafe { SSL_AeadEncrypt( - *self.ctx.deref(), + *self.ctx, count, aad.as_ptr(), c_uint::try_from(aad.len())?, @@ -139,6 +134,7 @@ impl RealAead { /// the final result will be shorter. /// /// # Errors + /// /// If the input isn't authenticated or any input is too large for NSS. pub fn decrypt<'a>( &self, @@ -150,7 +146,7 @@ impl RealAead { let mut l: c_uint = 0; unsafe { SSL_AeadDecrypt( - *self.ctx.deref(), + *self.ctx, count, aad.as_ptr(), c_uint::try_from(aad.len())?, diff --git a/neqo-crypto/src/aead_fuzzing.rs b/neqo-crypto/src/aead_fuzzing.rs deleted file mode 100644 index 4293d2bc70..0000000000 --- a/neqo-crypto/src/aead_fuzzing.rs +++ /dev/null @@ -1,100 +0,0 @@ -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use crate::constants::{Cipher, Version}; -use crate::err::{sec::SEC_ERROR_BAD_DATA, Error, Res}; -use crate::p11::SymKey; -use crate::RealAead; -use std::fmt; - -pub const FIXED_TAG_FUZZING: &[u8] = &[0x0a; 16]; - -pub struct FuzzingAead { - real: Option, -} - -impl FuzzingAead { - pub fn new( - fuzzing: bool, - version: Version, - cipher: Cipher, - secret: &SymKey, - prefix: &str, - ) -> Res { - let real = if fuzzing { - None - } else { - Some(RealAead::new(false, version, cipher, secret, prefix)?) - }; - Ok(Self { real }) - } - - #[must_use] - pub fn expansion(&self) -> usize { - if let Some(aead) = &self.real { - aead.expansion() - } else { - FIXED_TAG_FUZZING.len() - } - } - - pub fn encrypt<'a>( - &self, - count: u64, - aad: &[u8], - input: &[u8], - output: &'a mut [u8], - ) -> Res<&'a [u8]> { - if let Some(aead) = &self.real { - return aead.encrypt(count, aad, input, output); - } - - let l = input.len(); - output[..l].copy_from_slice(input); - output[l..l + 16].copy_from_slice(FIXED_TAG_FUZZING); - Ok(&output[..l + 16]) - } - - pub fn decrypt<'a>( - &self, - count: u64, - aad: &[u8], - input: &[u8], - output: &'a mut [u8], - ) -> Res<&'a [u8]> { - if let Some(aead) = &self.real { - return aead.decrypt(count, aad, input, output); - } - - if input.len() < FIXED_TAG_FUZZING.len() { - return Err(Error::from(SEC_ERROR_BAD_DATA)); - } - - let len_encrypted = input.len() - FIXED_TAG_FUZZING.len(); - // Check that: - // 1) expansion is all zeros and - // 2) if the encrypted data is also supplied that at least some values - // are no zero (otherwise padding will be interpreted as a valid packet) - if &input[len_encrypted..] == FIXED_TAG_FUZZING - && (len_encrypted == 0 || input[..len_encrypted].iter().any(|x| *x != 0x0)) - { - output[..len_encrypted].copy_from_slice(&input[..len_encrypted]); - Ok(&output[..len_encrypted]) - } else { - Err(Error::from(SEC_ERROR_BAD_DATA)) - } - } -} - -impl fmt::Debug for FuzzingAead { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - if let Some(a) = &self.real { - a.fmt(f) - } else { - write!(f, "[FUZZING AEAD]") - } - } -} diff --git a/neqo-crypto/src/aead_null.rs b/neqo-crypto/src/aead_null.rs new file mode 100644 index 0000000000..2d5656de73 --- /dev/null +++ b/neqo-crypto/src/aead_null.rs @@ -0,0 +1,78 @@ +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +#![cfg(feature = "disable-encryption")] + +use std::fmt; + +use crate::{ + constants::{Cipher, Version}, + err::{sec::SEC_ERROR_BAD_DATA, Error, Res}, + p11::SymKey, +}; + +pub const AEAD_NULL_TAG: &[u8] = &[0x0a; 16]; + +pub struct AeadNull {} + +impl AeadNull { + #[allow(clippy::missing_errors_doc)] + pub fn new(_version: Version, _cipher: Cipher, _secret: &SymKey, _prefix: &str) -> Res { + Ok(Self {}) + } + + #[must_use] + pub fn expansion(&self) -> usize { + AEAD_NULL_TAG.len() + } + + #[allow(clippy::missing_errors_doc)] + pub fn encrypt<'a>( + &self, + _count: u64, + _aad: &[u8], + input: &[u8], + output: &'a mut [u8], + ) -> Res<&'a [u8]> { + let l = input.len(); + output[..l].copy_from_slice(input); + output[l..l + 16].copy_from_slice(AEAD_NULL_TAG); + Ok(&output[..l + 16]) + } + + #[allow(clippy::missing_errors_doc)] + pub fn decrypt<'a>( + &self, + _count: u64, + _aad: &[u8], + input: &[u8], + output: &'a mut [u8], + ) -> Res<&'a [u8]> { + if input.len() < AEAD_NULL_TAG.len() { + return Err(Error::from(SEC_ERROR_BAD_DATA)); + } + + let len_encrypted = input.len() - AEAD_NULL_TAG.len(); + // Check that: + // 1) expansion is all zeros and + // 2) if the encrypted data is also supplied that at least some values are no zero + // (otherwise padding will be interpreted as a valid packet) + if &input[len_encrypted..] == AEAD_NULL_TAG + && (len_encrypted == 0 || input[..len_encrypted].iter().any(|x| *x != 0x0)) + { + output[..len_encrypted].copy_from_slice(&input[..len_encrypted]); + Ok(&output[..len_encrypted]) + } else { + Err(Error::from(SEC_ERROR_BAD_DATA)) + } + } +} + +impl fmt::Debug for AeadNull { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "[NULL AEAD]") + } +} diff --git a/neqo-crypto/src/agent.rs b/neqo-crypto/src/agent.rs index e02788fbdb..3d5a8b9f35 100644 --- a/neqo-crypto/src/agent.rs +++ b/neqo-crypto/src/agent.rs @@ -4,6 +4,20 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +use std::{ + cell::RefCell, + ffi::{CStr, CString}, + mem::{self, MaybeUninit}, + ops::{Deref, DerefMut}, + os::raw::{c_uint, c_void}, + pin::Pin, + ptr::{null, null_mut}, + rc::Rc, + time::Instant, +}; + +use neqo_common::{hex_snip_middle, hex_with_len, qdebug, qtrace, qwarn}; + pub use crate::{ agentio::{as_c_void, Record, RecordList}, cert::CertificateInfo, @@ -18,6 +32,7 @@ use crate::{ ech, err::{is_blocked, secstatus_to_res, Error, PRErrorCode, Res}, ext::{ExtensionHandler, ExtensionTracker}, + null_safe_slice, p11::{self, PrivateKey, PublicKey}, prio, replay::AntiReplay, @@ -25,19 +40,6 @@ use crate::{ ssl::{self, PRBool}, time::{Time, TimeHolder}, }; -use neqo_common::{hex_snip_middle, hex_with_len, qdebug, qinfo, qtrace, qwarn}; -use std::{ - cell::RefCell, - convert::TryFrom, - ffi::{CStr, CString}, - mem::{self, MaybeUninit}, - ops::{Deref, DerefMut}, - os::raw::{c_uint, c_void}, - pin::Pin, - ptr::{null, null_mut}, - rc::Rc, - time::Instant, -}; /// The maximum number of tickets to remember for a given connection. const MAX_TICKETS: usize = 4; @@ -157,6 +159,7 @@ impl SecretAgentPreInfo { } /// # Panics + /// /// If `usize` is less than 32 bits and the value is too large. #[must_use] pub fn max_early_data(&self) -> usize { @@ -183,6 +186,7 @@ impl SecretAgentPreInfo { /// which contains a valid ECH configuration. /// /// # Errors + /// /// When the public name is not valid UTF-8. (Note: names should be ASCII.) pub fn ech_public_name(&self) -> Res> { if self.info.valuesSet & ssl::ssl_preinfo_ech == 0 || self.info.echPublicName.is_null() { @@ -395,22 +399,21 @@ impl SecretAgent { /// Default configuration. /// /// # Errors + /// /// If `set_version_range` fails. fn configure(&mut self, grease: bool) -> Res<()> { self.set_version_range(TLS_VERSION_1_3, TLS_VERSION_1_3)?; self.set_option(ssl::Opt::Locking, false)?; self.set_option(ssl::Opt::Tickets, false)?; self.set_option(ssl::Opt::OcspStapling, true)?; - if let Err(e) = self.set_option(ssl::Opt::Grease, grease) { - // Until NSS supports greasing, it's OK to fail here. - qinfo!([self], "Failed to enable greasing {:?}", e); - } + self.set_option(ssl::Opt::Grease, grease)?; Ok(()) } /// Set the versions that are supported. /// /// # Errors + /// /// If the range of versions isn't supported. pub fn set_version_range(&mut self, min: Version, max: Version) -> Res<()> { let range = ssl::SSLVersionRange { min, max }; @@ -420,6 +423,7 @@ impl SecretAgent { /// Enable a set of ciphers. Note that the order of these is not respected. /// /// # Errors + /// /// If NSS can't enable or disable ciphers. pub fn set_ciphers(&mut self, ciphers: &[Cipher]) -> Res<()> { if self.state != HandshakeState::New { @@ -447,6 +451,7 @@ impl SecretAgent { /// Set key exchange groups. /// /// # Errors + /// /// If the underlying API fails (which shouldn't happen). pub fn set_groups(&mut self, groups: &[Group]) -> Res<()> { // SSLNamedGroup is a different size to Group, so copy one by one. @@ -461,9 +466,21 @@ impl SecretAgent { }) } + /// Set the number of additional key shares that will be sent in the client hello + /// + /// # Errors + /// + /// If the underlying API fails (which shouldn't happen). + pub fn send_additional_key_shares(&mut self, count: usize) -> Res<()> { + secstatus_to_res(unsafe { + ssl::SSL_SendAdditionalKeyShares(self.fd, c_uint::try_from(count)?) + }) + } + /// Set TLS options. /// /// # Errors + /// /// Returns an error if the option or option value is invalid; i.e., never. pub fn set_option(&mut self, opt: ssl::Opt, value: bool) -> Res<()> { opt.set(self.fd, value) @@ -472,6 +489,7 @@ impl SecretAgent { /// Enable 0-RTT. /// /// # Errors + /// /// See `set_option`. pub fn enable_0rtt(&mut self) -> Res<()> { self.set_option(ssl::Opt::EarlyData, true) @@ -480,6 +498,7 @@ impl SecretAgent { /// Disable the `EndOfEarlyData` message. /// /// # Errors + /// /// See `set_option`. pub fn disable_end_of_early_data(&mut self) -> Res<()> { self.set_option(ssl::Opt::SuppressEndOfEarlyData, true) @@ -493,8 +512,11 @@ impl SecretAgent { /// 255 octets in length. /// /// # Errors + /// /// This should always panic rather than return an error. + /// /// # Panics + /// /// If any of the provided `protocols` are more than 255 bytes long. /// /// [RFC7301]: https://datatracker.ietf.org/doc/html/rfc7301 @@ -539,11 +561,12 @@ impl SecretAgent { /// Install an extension handler. /// - /// This can be called multiple times with different values for `ext`. The handler is provided as - /// Rc> so that the caller is able to hold a reference to the handler and later access any - /// state that it accumulates. + /// This can be called multiple times with different values for `ext`. The handler is provided + /// as `Rc>` so that the caller is able to hold a reference to the handler + /// and later access any state that it accumulates. /// /// # Errors + /// /// When the extension handler can't be successfully installed. pub fn extension_handler( &mut self, @@ -587,6 +610,7 @@ impl SecretAgent { /// Calling this function collects all the relevant information. /// /// # Errors + /// /// When the underlying socket functions fail. pub fn preinfo(&self) -> Res { SecretAgentPreInfo::new(self.fd) @@ -605,7 +629,9 @@ impl SecretAgent { } /// Call this function to mark the peer as authenticated. + /// /// # Panics + /// /// If the handshake doesn't need to be authenticated. pub fn authenticated(&mut self, status: AuthenticationStatus) { assert!(self.state.authentication_needed()); @@ -641,7 +667,7 @@ impl SecretAgent { let info = self.capture_error(SecretAgentInfo::new(self.fd))?; HandshakeState::Complete(info) }; - qinfo!([self], "state -> {:?}", self.state); + qdebug!([self], "state -> {:?}", self.state); Ok(()) } @@ -654,6 +680,7 @@ impl SecretAgent { /// function if you want to proceed, because this will mark the certificate as OK. /// /// # Errors + /// /// When the handshake fails this returns an error. pub fn handshake(&mut self, now: Instant, input: &[u8]) -> Res> { self.now.set(now)?; @@ -690,6 +717,7 @@ impl SecretAgent { /// If you send data from multiple epochs, you might end up being sad. /// /// # Errors + /// /// When the handshake fails this returns an error. pub fn handshake_raw(&mut self, now: Instant, input: Option) -> Res { self.now.set(now)?; @@ -716,6 +744,9 @@ impl SecretAgent { Ok(*Pin::into_inner(records)) } + /// # Panics + /// + /// If setup fails. #[allow(unknown_lints, clippy::branches_sharing_code)] pub fn close(&mut self) { // It should be safe to close multiple times. @@ -820,6 +851,7 @@ impl Client { /// Create a new client agent. /// /// # Errors + /// /// Errors returned if the socket can't be created or configured. pub fn new(server_name: impl Into, grease: bool) -> Res { let server_name = server_name.into(); @@ -862,8 +894,8 @@ impl Client { let resumption = arg.cast::>().as_mut().unwrap(); let len = usize::try_from(len).unwrap(); let mut v = Vec::with_capacity(len); - v.extend_from_slice(std::slice::from_raw_parts(token, len)); - qinfo!( + v.extend_from_slice(null_safe_slice(token, len)); + qdebug!( [format!("{fd:p}")], "Got resumption token {}", hex_snip_middle(&v) @@ -909,6 +941,7 @@ impl Client { /// Enable resumption, using a token previously provided. /// /// # Errors + /// /// Error returned when the resumption token is invalid or /// the socket is not able to use the value. pub fn enable_resumption(&mut self, token: impl AsRef<[u8]>) -> Res<()> { @@ -932,6 +965,7 @@ impl Client { /// ECH greasing. When that is done, there is no need to look for `EchRetry` /// /// # Errors + /// /// Error returned when the configuration is invalid. pub fn enable_ech(&mut self, ech_config_list: impl AsRef<[u8]>) -> Res<()> { let config = ech_config_list.as_ref(); @@ -978,13 +1012,14 @@ pub enum ZeroRttCheckResult { Accept, /// Reject 0-RTT, but continue the handshake normally. Reject, - /// Send HelloRetryRequest (probably not needed for QUIC). + /// Send `HelloRetryRequest` (probably not needed for QUIC). HelloRetryRequest(Vec), /// Fail the handshake. Fail, } -/// A `ZeroRttChecker` is used by the agent to validate the application token (as provided by `send_ticket`) +/// A `ZeroRttChecker` is used by the agent to validate the application token (as provided by +/// `send_ticket`) pub trait ZeroRttChecker: std::fmt::Debug + std::marker::Unpin { fn check(&self, token: &[u8]) -> ZeroRttCheckResult; } @@ -1025,6 +1060,7 @@ impl Server { /// Create a new server agent. /// /// # Errors + /// /// Errors returned when NSS fails. pub fn new(certificates: &[impl AsRef]) -> Res { let mut agent = SecretAgent::new()?; @@ -1035,12 +1071,12 @@ impl Server { let Ok(cert) = p11::Certificate::from_ptr(cert_ptr) else { return Err(Error::CertificateLoading); }; - let key_ptr = unsafe { p11::PK11_FindKeyByAnyCert(*cert.deref(), null_mut()) }; + let key_ptr = unsafe { p11::PK11_FindKeyByAnyCert(*cert, null_mut()) }; let Ok(key) = p11::PrivateKey::from_ptr(key_ptr) else { return Err(Error::CertificateLoading); }; secstatus_to_res(unsafe { - ssl::SSL_ConfigServerCert(agent.fd, *cert.deref(), *key.deref(), null(), 0) + ssl::SSL_ConfigServerCert(agent.fd, *cert, *key, null(), 0) })?; } @@ -1066,11 +1102,7 @@ impl Server { } let check_state = arg.cast::().as_mut().unwrap(); - let token = if client_token.is_null() { - &[] - } else { - std::slice::from_raw_parts(client_token, usize::try_from(client_token_len).unwrap()) - }; + let token = null_safe_slice(client_token, usize::try_from(client_token_len).unwrap()); match check_state.checker.check(token) { ZeroRttCheckResult::Accept => ssl::SSLHelloRetryRequestAction::ssl_hello_retry_accept, ZeroRttCheckResult::Fail => ssl::SSLHelloRetryRequestAction::ssl_hello_retry_fail, @@ -1078,7 +1110,8 @@ impl Server { ssl::SSLHelloRetryRequestAction::ssl_hello_retry_reject_0rtt } ZeroRttCheckResult::HelloRetryRequest(tok) => { - // Don't bother propagating errors from this, because it should be caught in testing. + // Don't bother propagating errors from this, because it should be caught in + // testing. assert!(tok.len() <= usize::try_from(retry_token_max).unwrap()); let slc = std::slice::from_raw_parts_mut(retry_token, tok.len()); slc.copy_from_slice(&tok); @@ -1092,6 +1125,7 @@ impl Server { /// via the Deref implementation on Server. /// /// # Errors + /// /// Returns an error if the underlying NSS functions fail. pub fn enable_0rtt( &mut self, @@ -1119,6 +1153,7 @@ impl Server { /// The records that are sent are captured and returned. /// /// # Errors + /// /// If NSS is unable to send a ticket, or if this agent is incorrectly configured. pub fn send_ticket(&mut self, now: Instant, extra: &[u8]) -> Res { self.agent.now.set(now)?; @@ -1134,6 +1169,7 @@ impl Server { /// Enable encrypted client hello (ECH). /// /// # Errors + /// /// Fails when NSS cannot create a key pair. pub fn enable_ech( &mut self, diff --git a/neqo-crypto/src/agentio.rs b/neqo-crypto/src/agentio.rs index 1d39b2398a..7c57a0ef45 100644 --- a/neqo-crypto/src/agentio.rs +++ b/neqo-crypto/src/agentio.rs @@ -4,21 +4,22 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use crate::constants::{ContentType, Epoch}; -use crate::err::{nspr, Error, PR_SetError, Res}; -use crate::prio; -use crate::ssl; +use std::{ + cmp::min, + fmt, mem, + ops::Deref, + os::raw::{c_uint, c_void}, + pin::Pin, + ptr::{null, null_mut}, +}; use neqo_common::{hex, hex_with_len, qtrace}; -use std::cmp::min; -use std::convert::{TryFrom, TryInto}; -use std::fmt; -use std::mem; -use std::ops::Deref; -use std::os::raw::{c_uint, c_void}; -use std::pin::Pin; -use std::ptr::{null, null_mut}; -use std::vec::Vec; + +use crate::{ + constants::{ContentType, Epoch}, + err::{nspr, Error, PR_SetError, Res}, + null_safe_slice, prio, ssl, +}; // Alias common types. type PrFd = *mut prio::PRFileDesc; @@ -97,7 +98,7 @@ impl RecordList { ) -> ssl::SECStatus { let records = arg.cast::().as_mut().unwrap(); - let slice = std::slice::from_raw_parts(data, len as usize); + let slice = null_safe_slice(data, len); records.append(epoch, ContentType::try_from(ct).unwrap(), slice); ssl::SECSuccess } @@ -175,6 +176,7 @@ impl AgentIoInput { return Err(Error::NoDataAvailable); } + #[allow(clippy::disallowed_methods)] // We just checked if this was empty. let src = unsafe { std::slice::from_raw_parts(self.input, amount) }; qtrace!([self], "read {}", hex(src)); let dst = unsafe { std::slice::from_raw_parts_mut(buf, amount) }; @@ -229,7 +231,7 @@ impl AgentIo { // Stage output from TLS into the output buffer. fn save_output(&mut self, buf: *const u8, count: usize) { - let slice = unsafe { std::slice::from_raw_parts(buf, count) }; + let slice = unsafe { null_safe_slice(buf, count) }; qtrace!([self], "save output {}", hex(slice)); self.output.extend_from_slice(slice); } diff --git a/neqo-crypto/src/cert.rs b/neqo-crypto/src/cert.rs index 14d91843d3..2836b5237c 100644 --- a/neqo-crypto/src/cert.rs +++ b/neqo-crypto/src/cert.rs @@ -4,23 +4,24 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use crate::err::secstatus_to_res; -use crate::p11::{CERTCertListNode, CERT_GetCertificateDer, CertList, Item, SECItem, SECItemArray}; -use crate::ssl::{ - PRFileDesc, SSL_PeerCertificateChain, SSL_PeerSignedCertTimestamps, - SSL_PeerStapledOCSPResponses, -}; -use neqo_common::qerror; - -use std::convert::TryFrom; use std::ptr::{addr_of, NonNull}; -use std::slice; +use neqo_common::qerror; + +use crate::{ + err::secstatus_to_res, + null_safe_slice, + p11::{CERTCertListNode, CERT_GetCertificateDer, CertList, Item, SECItem, SECItemArray}, + ssl::{ + PRFileDesc, SSL_PeerCertificateChain, SSL_PeerSignedCertTimestamps, + SSL_PeerStapledOCSPResponses, + }, +}; pub struct CertificateInfo { certs: CertList, cursor: *const CERTCertListNode, - /// stapled_ocsp_responses and signed_cert_timestamp are properties + /// `stapled_ocsp_responses` and `signed_cert_timestamp` are properties /// associated with each of the certificates. Right now, NSS only /// reports the value for the end-entity certificate (the first). stapled_ocsp_responses: Option>>, @@ -48,7 +49,7 @@ fn stapled_ocsp_responses(fd: *mut PRFileDesc) -> Option>> { }; for idx in 0..len { let itemp: *const SECItem = unsafe { ocsp_ptr.as_ref().items.offset(idx).cast() }; - let item = unsafe { slice::from_raw_parts((*itemp).data, (*itemp).len as usize) }; + let item = unsafe { null_safe_slice((*itemp).data, (*itemp).len) }; ocsp_helper.push(item.to_owned()); } Some(ocsp_helper) @@ -64,9 +65,8 @@ fn signed_cert_timestamp(fd: *mut PRFileDesc) -> Option> { if unsafe { sct_ptr.as_ref().len == 0 || sct_ptr.as_ref().data.is_null() } { Some(Vec::new()) } else { - let sct_slice = unsafe { - slice::from_raw_parts(sct_ptr.as_ref().data, sct_ptr.as_ref().len as usize) - }; + let sct_slice = + unsafe { null_safe_slice(sct_ptr.as_ref().data, sct_ptr.as_ref().len) }; Some(sct_slice.to_owned()) } } @@ -101,7 +101,7 @@ impl<'a> Iterator for &'a mut CertificateInfo { let cert = unsafe { *self.cursor }.cert; secstatus_to_res(unsafe { CERT_GetCertificateDer(cert, &mut item) }) .expect("getting DER from certificate should work"); - Some(unsafe { std::slice::from_raw_parts(item.data, item.len as usize) }) + Some(unsafe { null_safe_slice(item.data, item.len) }) } } diff --git a/neqo-crypto/src/constants.rs b/neqo-crypto/src/constants.rs index 21e1a5aceb..76db972290 100644 --- a/neqo-crypto/src/constants.rs +++ b/neqo-crypto/src/constants.rs @@ -62,6 +62,7 @@ remap_enum! { TLS_GRP_EC_SECP384R1 = ssl_grp_ec_secp384r1, TLS_GRP_EC_SECP521R1 = ssl_grp_ec_secp521r1, TLS_GRP_EC_X25519 = ssl_grp_ec_curve25519, + TLS_GRP_KEM_XYBER768D00 = ssl_grp_kem_xyber768d00, } } diff --git a/neqo-crypto/src/ech.rs b/neqo-crypto/src/ech.rs index 5425e1a64c..4ff2cda7e8 100644 --- a/neqo-crypto/src/ech.rs +++ b/neqo-crypto/src/ech.rs @@ -4,23 +4,23 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +use std::{ + ffi::CString, + os::raw::{c_char, c_uint}, + ptr::{addr_of_mut, null_mut}, +}; + +use neqo_common::qtrace; + use crate::{ err::{ssl::SSL_ERROR_ECH_RETRY_WITH_ECH, Error, Res}, - experimental_api, + experimental_api, null_safe_slice, p11::{ self, Item, PrivateKey, PublicKey, SECITEM_FreeItem, SECItem, SECKEYPrivateKey, SECKEYPublicKey, Slot, }, ssl::{PRBool, PRFileDesc}, }; -use neqo_common::qtrace; -use std::{ - convert::TryFrom, - ffi::CString, - os::raw::{c_char, c_uint}, - ptr::{addr_of_mut, null_mut}, -}; - pub use crate::{ p11::{HpkeAeadId as AeadId, HpkeKdfId as KdfId, HpkeKemId as KemId}, ssl::HpkeSymmetricSuite as SymmetricSuite, @@ -75,7 +75,7 @@ pub fn convert_ech_error(fd: *mut PRFileDesc, err: Error) -> Error { return Error::InternalError; } let buf = unsafe { - let slc = std::slice::from_raw_parts(item.data, usize::try_from(item.len).unwrap()); + let slc = null_safe_slice(item.data, item.len); let buf = Vec::from(slc); SECITEM_FreeItem(&mut item, PRBool::from(false)); buf @@ -89,16 +89,18 @@ pub fn convert_ech_error(fd: *mut PRFileDesc, err: Error) -> Error { /// Generate a key pair for encrypted client hello (ECH). /// /// # Errors +/// /// When NSS fails to generate a key pair or when the KEM is not supported. +/// /// # Panics +/// /// When underlying types aren't large enough to hold keys. So never. pub fn generate_keys() -> Res<(PrivateKey, PublicKey)> { let slot = Slot::internal()?; let oid_data = unsafe { p11::SECOID_FindOIDByTag(p11::SECOidTag::SEC_OID_CURVE25519) }; let oid = unsafe { oid_data.as_ref() }.ok_or(Error::InternalError)?; - let oid_slc = - unsafe { std::slice::from_raw_parts(oid.oid.data, usize::try_from(oid.oid.len).unwrap()) }; + let oid_slc = unsafe { null_safe_slice(oid.oid.data, oid.oid.len) }; let mut params: Vec = Vec::with_capacity(oid_slc.len() + 2); params.push(u8::try_from(p11::SEC_ASN1_OBJECT_ID).unwrap()); params.push(u8::try_from(oid.oid.len).unwrap()); @@ -151,6 +153,7 @@ pub fn generate_keys() -> Res<(PrivateKey, PublicKey)> { /// Encode a configuration for encrypted client hello (ECH). /// /// # Errors +/// /// When NSS fails to generate a valid configuration encoding (i.e., unlikely). pub fn encode_config(config: u8, public_name: &str, pk: &PublicKey) -> Res> { // A sensible fixed value for the maximum length of a name. diff --git a/neqo-crypto/src/err.rs b/neqo-crypto/src/err.rs index fae81f9cb9..8d4f239a0b 100644 --- a/neqo-crypto/src/err.rs +++ b/neqo-crypto/src/err.rs @@ -7,8 +7,7 @@ #![allow(dead_code)] #![allow(clippy::upper_case_acronyms)] -use std::os::raw::c_char; -use std::str::Utf8Error; +use std::{os::raw::c_char, str::Utf8Error}; use crate::ssl::{SECStatus, SECSuccess}; @@ -17,15 +16,39 @@ mod codes { #![allow(non_snake_case)] include!(concat!(env!("OUT_DIR"), "/nss_secerr.rs")); include!(concat!(env!("OUT_DIR"), "/nss_sslerr.rs")); - include!(concat!(env!("OUT_DIR"), "/mozpkix.rs")); } -pub use codes::mozilla_pkix_ErrorCode as mozpkix; -pub use codes::SECErrorCodes as sec; -pub use codes::SSLErrorCodes as ssl; +pub use codes::{SECErrorCodes as sec, SSLErrorCodes as ssl}; pub mod nspr { include!(concat!(env!("OUT_DIR"), "/nspr_err.rs")); } +pub mod mozpkix { + // These are manually extracted from the many bindings generated + // by bindgen when provided with the simple header: + // #include "mozpkix/pkixnss.h" + + #[allow(non_camel_case_types)] + pub type mozilla_pkix_ErrorCode = ::std::os::raw::c_int; + pub const MOZILLA_PKIX_ERROR_KEY_PINNING_FAILURE: mozilla_pkix_ErrorCode = -16384; + pub const MOZILLA_PKIX_ERROR_CA_CERT_USED_AS_END_ENTITY: mozilla_pkix_ErrorCode = -16383; + pub const MOZILLA_PKIX_ERROR_INADEQUATE_KEY_SIZE: mozilla_pkix_ErrorCode = -16382; + pub const MOZILLA_PKIX_ERROR_V1_CERT_USED_AS_CA: mozilla_pkix_ErrorCode = -16381; + pub const MOZILLA_PKIX_ERROR_NO_RFC822NAME_MATCH: mozilla_pkix_ErrorCode = -16380; + pub const MOZILLA_PKIX_ERROR_NOT_YET_VALID_CERTIFICATE: mozilla_pkix_ErrorCode = -16379; + pub const MOZILLA_PKIX_ERROR_NOT_YET_VALID_ISSUER_CERTIFICATE: mozilla_pkix_ErrorCode = -16378; + pub const MOZILLA_PKIX_ERROR_SIGNATURE_ALGORITHM_MISMATCH: mozilla_pkix_ErrorCode = -16377; + pub const MOZILLA_PKIX_ERROR_OCSP_RESPONSE_FOR_CERT_MISSING: mozilla_pkix_ErrorCode = -16376; + pub const MOZILLA_PKIX_ERROR_VALIDITY_TOO_LONG: mozilla_pkix_ErrorCode = -16375; + pub const MOZILLA_PKIX_ERROR_REQUIRED_TLS_FEATURE_MISSING: mozilla_pkix_ErrorCode = -16374; + pub const MOZILLA_PKIX_ERROR_INVALID_INTEGER_ENCODING: mozilla_pkix_ErrorCode = -16373; + pub const MOZILLA_PKIX_ERROR_EMPTY_ISSUER_NAME: mozilla_pkix_ErrorCode = -16372; + pub const MOZILLA_PKIX_ERROR_ADDITIONAL_POLICY_CONSTRAINT_FAILED: mozilla_pkix_ErrorCode = + -16371; + pub const MOZILLA_PKIX_ERROR_SELF_SIGNED_CERT: mozilla_pkix_ErrorCode = -16370; + pub const MOZILLA_PKIX_ERROR_MITM_DETECTED: mozilla_pkix_ErrorCode = -16369; + pub const END_OF_LIST: mozilla_pkix_ErrorCode = -16368; +} + pub type Res = Result; #[derive(Clone, Debug, PartialEq, PartialOrd, Ord, Eq)] @@ -137,10 +160,13 @@ pub fn is_blocked(result: &Res<()>) -> bool { #[cfg(test)] mod tests { - use crate::err::{self, is_blocked, secstatus_to_res, Error, PRErrorCode, PR_SetError}; - use crate::ssl::{SECFailure, SECSuccess}; use test_fixture::fixture_init; + use crate::{ + err::{self, is_blocked, secstatus_to_res, Error, PRErrorCode, PR_SetError}, + ssl::{SECFailure, SECSuccess}, + }; + fn set_error_code(code: PRErrorCode) { // This code doesn't work without initializing NSS first. fixture_init(); diff --git a/neqo-crypto/src/ext.rs b/neqo-crypto/src/ext.rs index aa89677b98..02ee6340c1 100644 --- a/neqo-crypto/src/ext.rs +++ b/neqo-crypto/src/ext.rs @@ -4,22 +4,23 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +use std::{ + cell::RefCell, + os::raw::{c_uint, c_void}, + pin::Pin, + rc::Rc, +}; + use crate::{ agentio::as_c_void, constants::{Extension, HandshakeMessage, TLS_HS_CLIENT_HELLO, TLS_HS_ENCRYPTED_EXTENSIONS}, err::Res, + null_safe_slice, ssl::{ PRBool, PRFileDesc, SECFailure, SECStatus, SECSuccess, SSLAlertDescription, SSLExtensionHandler, SSLExtensionWriter, SSLHandshakeType, }, }; -use std::{ - cell::RefCell, - convert::TryFrom, - os::raw::{c_uint, c_void}, - pin::Pin, - rc::Rc, -}; experimental_api!(SSL_InstallExtensionHooks( fd: *mut PRFileDesc, @@ -74,7 +75,7 @@ impl ExtensionTracker { f(&mut *rc.borrow_mut()) } - #[allow(clippy::cast_possible_truncation)] + #[allow(clippy::cast_possible_truncation, clippy::cast_sign_loss)] unsafe extern "C" fn extension_writer( _fd: *mut PRFileDesc, message: SSLHandshakeType::Type, @@ -104,8 +105,8 @@ impl ExtensionTracker { alert: *mut SSLAlertDescription, arg: *mut c_void, ) -> SECStatus { - let d = std::slice::from_raw_parts(data, len as usize); - #[allow(clippy::cast_possible_truncation)] + let d = null_safe_slice(data, len); + #[allow(clippy::cast_possible_truncation, clippy::cast_sign_loss)] Self::wrap_handler_call(arg, |handler| { // Cast is safe here because the message type is always part of the enum match handler.handle(message as HandshakeMessage, d) { @@ -121,11 +122,13 @@ impl ExtensionTracker { /// Use the provided handler to manage an extension. This is quite unsafe. /// /// # Safety + /// /// The holder of this `ExtensionTracker` needs to ensure that it lives at /// least as long as the file descriptor, as NSS provides no way to remove /// an extension handler once it is configured. /// /// # Errors + /// /// If the underlying NSS API fails to register a handler. pub unsafe fn new( fd: *mut PRFileDesc, diff --git a/neqo-crypto/src/hkdf.rs b/neqo-crypto/src/hkdf.rs index 3745d646d5..3706be6c3b 100644 --- a/neqo-crypto/src/hkdf.rs +++ b/neqo-crypto/src/hkdf.rs @@ -4,6 +4,11 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +use std::{ + os::raw::{c_char, c_uint}, + ptr::null_mut, +}; + use crate::{ constants::{ Cipher, Version, TLS_AES_128_GCM_SHA256, TLS_AES_256_GCM_SHA384, @@ -11,15 +16,10 @@ use crate::{ }, err::{Error, Res}, p11::{ - random, Item, PK11Origin, PK11SymKey, PK11_ImportDataKey, Slot, SymKey, CKA_DERIVE, + Item, PK11Origin, PK11SymKey, PK11_ImportDataKey, Slot, SymKey, CKA_DERIVE, CKM_HKDF_DERIVE, CK_ATTRIBUTE_TYPE, CK_MECHANISM_TYPE, }, -}; - -use std::{ - convert::TryFrom, - os::raw::{c_char, c_uint}, - ptr::null_mut, + random, }; experimental_api!(SSL_HkdfExtract( @@ -40,28 +40,38 @@ experimental_api!(SSL_HkdfExpandLabel( secret: *mut *mut PK11SymKey, )); -fn key_size(version: Version, cipher: Cipher) -> Res { +const MAX_KEY_SIZE: usize = 48; +const fn key_size(version: Version, cipher: Cipher) -> Res { if version != TLS_VERSION_1_3 { return Err(Error::UnsupportedVersion); } - Ok(match cipher { + let size = match cipher { TLS_AES_128_GCM_SHA256 | TLS_CHACHA20_POLY1305_SHA256 => 32, TLS_AES_256_GCM_SHA384 => 48, _ => return Err(Error::UnsupportedCipher), - }) + }; + debug_assert!(size <= MAX_KEY_SIZE); + Ok(size) } /// Generate a random key of the right size for the given suite. /// /// # Errors -/// Only if NSS fails. +/// +/// If the ciphersuite or protocol version is not supported. pub fn generate_key(version: Version, cipher: Cipher) -> Res { - import_key(version, &random(key_size(version, cipher)?)) + // With generic_const_expr, this becomes: + // import_key(version, &random::<{ key_size(version, cipher) }>()) + import_key( + version, + &random::()[0..key_size(version, cipher)?], + ) } /// Import a symmetric key for use with HKDF. /// /// # Errors +/// /// Errors returned if the key buffer is an incompatible size or the NSS functions fail. pub fn import_key(version: Version, buf: &[u8]) -> Res { if version != TLS_VERSION_1_3 { @@ -84,6 +94,7 @@ pub fn import_key(version: Version, buf: &[u8]) -> Res { /// Extract a PRK from the given salt and IKM using the algorithm defined in RFC 5869. /// /// # Errors +/// /// Errors returned if inputs are too large or the NSS functions fail. pub fn extract( version: Version, @@ -103,6 +114,7 @@ pub fn extract( /// Expand a PRK using the HKDF-Expand-Label function defined in RFC 8446. /// /// # Errors +/// /// Errors returned if inputs are too large or the NSS functions fail. pub fn expand_label( version: Version, diff --git a/neqo-crypto/src/hp.rs b/neqo-crypto/src/hp.rs index ee2f77cea0..1eba6a9cb5 100644 --- a/neqo-crypto/src/hp.rs +++ b/neqo-crypto/src/hp.rs @@ -4,6 +4,14 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +use std::{ + cell::RefCell, + fmt::{self, Debug}, + os::raw::{c_char, c_int, c_uint}, + ptr::{addr_of_mut, null, null_mut}, + rc::Rc, +}; + use crate::{ constants::{ Cipher, Version, TLS_AES_128_GCM_SHA256, TLS_AES_256_GCM_SHA384, @@ -16,14 +24,6 @@ use crate::{ CK_CHACHA20_PARAMS, CK_MECHANISM_TYPE, }, }; -use std::{ - cell::RefCell, - convert::TryFrom, - fmt::{self, Debug}, - os::raw::{c_char, c_int, c_uint}, - ptr::{addr_of_mut, null, null_mut}, - rc::Rc, -}; experimental_api!(SSL_HkdfExpandLabelWithMech( version: Version, @@ -45,7 +45,7 @@ pub enum HpKey { /// track references using `Rc`. `PK11Context` can't be used with `PK11_CloneContext` /// as that is not supported for these contexts. Aes(Rc>), - /// The ChaCha20 mask has to invoke a new PK11_Encrypt every time as it needs to + /// The `ChaCha20` mask has to invoke a new `PK11_Encrypt` every time as it needs to /// change the counter and nonce on each invocation. Chacha(SymKey), } @@ -62,8 +62,11 @@ impl HpKey { /// QUIC-specific API for extracting a header-protection key. /// /// # Errors + /// /// Errors if HKDF fails or if the label is too long to fit in a `c_uint`. + /// /// # Panics + /// /// When `cipher` is not known to this code. #[allow(clippy::cast_sign_loss)] // Cast for PK11_GetBlockSize is safe. pub fn extract(version: Version, cipher: Cipher, prk: &SymKey, label: &str) -> Res { @@ -138,9 +141,12 @@ impl HpKey { /// Generate a header protection mask for QUIC. /// /// # Errors + /// /// An error is returned if the NSS functions fail; a sample of the /// wrong size is the obvious cause. + /// /// # Panics + /// /// When the mechanism for our key is not supported. pub fn mask(&self, sample: &[u8]) -> Res> { let mut output = vec![0_u8; self.block_size()]; @@ -164,9 +170,9 @@ impl HpKey { Self::Chacha(key) => { let params: CK_CHACHA20_PARAMS = CK_CHACHA20_PARAMS { - pBlockCounter: sample.as_ptr() as *mut u8, + pBlockCounter: sample.as_ptr().cast_mut(), blockCounterBits: 32, - pNonce: sample[4..Self::SAMPLE_SIZE].as_ptr() as *mut _, + pNonce: sample[4..Self::SAMPLE_SIZE].as_ptr().cast_mut(), ulNonceBits: 96, }; let mut output_len: c_uint = 0; diff --git a/neqo-crypto/src/lib.rs b/neqo-crypto/src/lib.rs index 4a5673107e..2db985e8ee 100644 --- a/neqo-crypto/src/lib.rs +++ b/neqo-crypto/src/lib.rs @@ -4,17 +4,12 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -#![cfg_attr(feature = "deny-warnings", deny(warnings))] -#![warn(clippy::pedantic)] -// Bindgen auto generated code -// won't adhere to the clippy rules below -#![allow(clippy::module_name_repetitions)] -#![allow(clippy::unseparated_literal_suffix)] -#![allow(clippy::used_underscore_binding)] +#![allow(clippy::module_name_repetitions)] // This lint doesn't work here. +#![allow(clippy::unseparated_literal_suffix, clippy::used_underscore_binding)] // For bindgen code. mod aead; -#[cfg(feature = "fuzzing")] -mod aead_fuzzing; +#[cfg(feature = "disable-encryption")] +pub mod aead_null; pub mod agent; mod agentio; mod auth; @@ -27,7 +22,6 @@ mod exp; pub mod ext; pub mod hkdf; pub mod hp; -mod once; #[macro_use] mod p11; mod prio; @@ -37,15 +31,14 @@ pub mod selfencrypt; mod ssl; mod time; -#[cfg(not(feature = "fuzzing"))] -pub use self::aead::RealAead as Aead; - -#[cfg(feature = "fuzzing")] -pub use self::aead_fuzzing::FuzzingAead as Aead; +use std::{ffi::CString, path::PathBuf, ptr::null, sync::OnceLock}; -#[cfg(feature = "fuzzing")] +#[cfg(not(feature = "disable-encryption"))] +pub use self::aead::RealAead as Aead; +#[cfg(feature = "disable-encryption")] pub use self::aead::RealAead; - +#[cfg(feature = "disable-encryption")] +pub use self::aead_null::AeadNull as Aead; pub use self::{ agent::{ Agent, AllowZeroRtt, Client, HandshakeState, Record, RecordList, ResumptionToken, @@ -60,21 +53,14 @@ pub use self::{ }, err::{Error, PRErrorCode, Res}, ext::{ExtensionHandler, ExtensionHandlerResult, ExtensionWriterResult}, - p11::{random, PrivateKey, PublicKey, SymKey}, + p11::{random, randomize, PrivateKey, PublicKey, SymKey}, replay::AntiReplay, secrets::SecretDirection, ssl::Opt, }; -use self::once::OnceResult; - -use std::{ - ffi::CString, - path::{Path, PathBuf}, - ptr::null, -}; - -const MINIMUM_NSS_VERSION: &str = "3.74"; +mod min_version; +use min_version::MINIMUM_NSS_VERSION; #[allow(non_upper_case_globals, clippy::redundant_static_lifetimes)] #[allow(clippy::upper_case_acronyms)] @@ -91,7 +77,7 @@ fn secstatus_to_res(code: nss::SECStatus) -> Res<()> { enum NssLoaded { External, NoDb, - Db(Box), + Db, } impl Drop for NssLoaded { @@ -104,7 +90,7 @@ impl Drop for NssLoaded { } } -static mut INITIALIZED: OnceResult = OnceResult::new(); +static INITIALIZED: OnceLock> = OnceLock::new(); fn already_initialized() -> bool { unsafe { nss::NSS_IsInitialized() != 0 } @@ -119,23 +105,27 @@ fn version_check() { ); } -/// Initialize NSS. This only executes the initialization routines once, so if there is any chance that -pub fn init() { +/// Initialize NSS. This only executes the initialization routines once, so if there is any chance +/// that +/// +/// # Errors +/// +/// When NSS initialization fails. +pub fn init() -> Res<()> { // Set time zero. time::init(); - unsafe { - INITIALIZED.call_once(|| { - version_check(); - if already_initialized() { - return NssLoaded::External; - } + let res = INITIALIZED.get_or_init(|| { + version_check(); + if already_initialized() { + return Ok(NssLoaded::External); + } - secstatus_to_res(nss::NSS_NoDB_Init(null())).expect("NSS_NoDB_Init failed"); - secstatus_to_res(nss::NSS_SetDomesticPolicy()).expect("NSS_SetDomesticPolicy failed"); + secstatus_to_res(unsafe { nss::NSS_NoDB_Init(null()) })?; + secstatus_to_res(unsafe { nss::NSS_SetDomesticPolicy() })?; - NssLoaded::NoDb - }); - } + Ok(NssLoaded::NoDb) + }); + res.as_ref().map(|_| ()).map_err(Clone::clone) } /// This enables SSLTRACE by calling a simple, harmless function to trigger its @@ -143,62 +133,84 @@ pub fn init() { /// global options are accessed. Reading an option is the least impact approach. /// This allows us to use SSLTRACE in all of our unit tests and programs. #[cfg(debug_assertions)] -fn enable_ssl_trace() { +fn enable_ssl_trace() -> Res<()> { let opt = ssl::Opt::Locking.as_int(); let mut v: ::std::os::raw::c_int = 0; secstatus_to_res(unsafe { ssl::SSL_OptionGetDefault(opt, &mut v) }) - .expect("SSL_OptionGetDefault failed"); } /// Initialize with a database. -/// # Panics +/// +/// # Errors +/// /// If NSS cannot be initialized. -pub fn init_db>(dir: P) { +pub fn init_db>(dir: P) -> Res<()> { time::init(); - unsafe { - INITIALIZED.call_once(|| { - version_check(); - if already_initialized() { - return NssLoaded::External; - } + let res = INITIALIZED.get_or_init(|| { + version_check(); + if already_initialized() { + return Ok(NssLoaded::External); + } - let path = dir.into(); - assert!(path.is_dir()); - let pathstr = path.to_str().expect("path converts to string").to_string(); - let dircstr = CString::new(pathstr).unwrap(); - let empty = CString::new("").unwrap(); - secstatus_to_res(nss::NSS_Initialize( + let path = dir.into(); + if !path.is_dir() { + return Err(Error::InternalError); + } + let pathstr = path.to_str().ok_or(Error::InternalError)?; + let dircstr = CString::new(pathstr)?; + let empty = CString::new("")?; + secstatus_to_res(unsafe { + nss::NSS_Initialize( dircstr.as_ptr(), empty.as_ptr(), empty.as_ptr(), nss::SECMOD_DB.as_ptr().cast(), nss::NSS_INIT_READONLY, - )) - .expect("NSS_Initialize failed"); - - secstatus_to_res(nss::NSS_SetDomesticPolicy()).expect("NSS_SetDomesticPolicy failed"); - secstatus_to_res(ssl::SSL_ConfigServerSessionIDCache( - 1024, - 0, - 0, - dircstr.as_ptr(), - )) - .expect("SSL_ConfigServerSessionIDCache failed"); + ) + })?; - #[cfg(debug_assertions)] - enable_ssl_trace(); + secstatus_to_res(unsafe { nss::NSS_SetDomesticPolicy() })?; + secstatus_to_res(unsafe { + ssl::SSL_ConfigServerSessionIDCache(1024, 0, 0, dircstr.as_ptr()) + })?; - NssLoaded::Db(path.into_boxed_path()) - }); - } + #[cfg(debug_assertions)] + enable_ssl_trace()?; + + Ok(NssLoaded::Db) + }); + res.as_ref().map(|_| ()).map_err(Clone::clone) } /// # Panics +/// /// If NSS isn't initialized. pub fn assert_initialized() { - unsafe { - INITIALIZED.call_once(|| { - panic!("NSS not initialized with init or init_db"); - }); + INITIALIZED + .get() + .expect("NSS not initialized with init or init_db"); +} + +/// NSS tends to return empty "slices" with a null pointer, which will cause +/// `std::slice::from_raw_parts` to panic if passed directly. This wrapper avoids +/// that issue. It also performs conversion for lengths, as a convenience. +/// +/// # Panics +/// If the provided length doesn't fit into a `usize`. +/// +/// # Safety +/// The caller must adhere to the safety constraints of `std::slice::from_raw_parts`, +/// except that this will accept a null value for `data`. +unsafe fn null_safe_slice<'a, T>(data: *const u8, len: T) -> &'a [u8] +where + usize: TryFrom, +{ + if data.is_null() { + &[] + } else if let Ok(len) = usize::try_from(len) { + #[allow(clippy::disallowed_methods)] + std::slice::from_raw_parts(data, len) + } else { + panic!("null_safe_slice: size overflow"); } } diff --git a/neqo-crypto/src/min_version.rs b/neqo-crypto/src/min_version.rs new file mode 100644 index 0000000000..4386371b1b --- /dev/null +++ b/neqo-crypto/src/min_version.rs @@ -0,0 +1,9 @@ +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +/// The minimum version of NSS that is required by this version of neqo. +/// Note that the string may contain whitespace at the beginning and/or end. +pub(crate) const MINIMUM_NSS_VERSION: &str = include_str!("../min_version.txt"); diff --git a/neqo-crypto/src/once.rs b/neqo-crypto/src/once.rs deleted file mode 100644 index 80657cfe26..0000000000 --- a/neqo-crypto/src/once.rs +++ /dev/null @@ -1,44 +0,0 @@ -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use std::sync::Once; - -#[allow(clippy::module_name_repetitions)] -pub struct OnceResult { - once: Once, - v: Option, -} - -impl OnceResult { - #[must_use] - pub const fn new() -> Self { - Self { - once: Once::new(), - v: None, - } - } - - pub fn call_once T>(&mut self, f: F) -> &T { - let v = &mut self.v; - self.once.call_once(|| { - *v = Some(f()); - }); - self.v.as_ref().unwrap() - } -} - -#[cfg(test)] -mod test { - use super::OnceResult; - - static mut STATIC_ONCE_RESULT: OnceResult = OnceResult::new(); - - #[test] - fn static_update() { - assert_eq!(*unsafe { STATIC_ONCE_RESULT.call_once(|| 23) }, 23); - assert_eq!(*unsafe { STATIC_ONCE_RESULT.call_once(|| 24) }, 23); - } -} diff --git a/neqo-crypto/src/p11.rs b/neqo-crypto/src/p11.rs index 3f60577369..5552882e2e 100644 --- a/neqo-crypto/src/p11.rs +++ b/neqo-crypto/src/p11.rs @@ -9,16 +9,21 @@ #![allow(non_camel_case_types)] #![allow(non_snake_case)] -use crate::err::{secstatus_to_res, Error, Res}; -use neqo_common::hex_with_len; use std::{ - convert::TryFrom, + cell::RefCell, mem, ops::{Deref, DerefMut}, os::raw::{c_int, c_uint}, ptr::null_mut, }; +use neqo_common::hex_with_len; + +use crate::{ + err::{secstatus_to_res, Error, Res}, + null_safe_slice, +}; + #[allow(clippy::upper_case_acronyms)] #[allow(clippy::unreadable_literal)] #[allow(unknown_lints, clippy::borrow_as_ptr)] @@ -39,6 +44,7 @@ macro_rules! scoped_ptr { /// Create a new instance of `$scoped` from a pointer. /// /// # Errors + /// /// When passed a null pointer generates an error. pub fn from_ptr(ptr: *mut $target) -> Result { if ptr.is_null() { @@ -80,8 +86,11 @@ impl PublicKey { /// Get the HPKE serialization of the public key. /// /// # Errors + /// /// When the key cannot be exported, which can be because the type is not supported. + /// /// # Panics + /// /// When keys are too large to fit in `c_uint/usize`. So only on programming error. pub fn key_data(&self) -> Res> { let mut buf = vec![0; 100]; @@ -124,9 +133,12 @@ impl PrivateKey { /// Get the bits of the private key. /// /// # Errors + /// /// When the key cannot be exported, which can be because the type is not supported /// or because the key data cannot be extracted from the PKCS#11 module. + /// /// # Panics + /// /// When the values are too large to fit. So never. pub fn key_data(&self) -> Res> { let mut key_item = Item::make_empty(); @@ -138,9 +150,7 @@ impl PrivateKey { &mut key_item, ) })?; - let slc = unsafe { - std::slice::from_raw_parts(key_item.data, usize::try_from(key_item.len).unwrap()) - }; + let slc = unsafe { null_safe_slice(key_item.data, key_item.len) }; let key = Vec::from(slc); // The data that `key_item` refers to needs to be freed, but we can't // use the scoped `Item` implementation. This is OK as long as nothing @@ -187,6 +197,7 @@ impl SymKey { /// You really don't want to use this. /// /// # Errors + /// /// Internal errors in case of failures in NSS. pub fn as_bytes(&self) -> Res<&[u8]> { secstatus_to_res(unsafe { PK11_ExtractKeyValue(self.ptr) })?; @@ -195,7 +206,7 @@ impl SymKey { // This is accessing a value attached to the key, so we can treat this as a borrow. match unsafe { key_item.as_mut() } { None => Err(Error::InternalError), - Some(key) => Ok(unsafe { std::slice::from_raw_parts(key.data, key.len as usize) }), + Some(key) => Ok(unsafe { null_safe_slice(key.data, key.len) }), } } } @@ -237,7 +248,7 @@ impl Item { pub fn wrap(buf: &[u8]) -> SECItem { SECItem { type_: SECItemType::siBuffer, - data: buf.as_ptr() as *mut u8, + data: buf.as_ptr().cast_mut(), len: c_uint::try_from(buf.len()).unwrap(), } } @@ -247,9 +258,10 @@ impl Item { /// Minimally, it can only be passed as a `const SECItem*` argument to functions, /// or those that treat their argument as `const`. pub fn wrap_struct(v: &T) -> SECItem { + let data: *const T = v; SECItem { type_: SECItemType::siBuffer, - data: (v as *const T as *mut T).cast(), + data: data.cast_mut().cast(), len: c_uint::try_from(mem::size_of::()).unwrap(), } } @@ -267,38 +279,118 @@ impl Item { /// content that is referenced there. /// /// # Safety + /// /// This dereferences two pointers. It doesn't get much less safe. pub unsafe fn into_vec(self) -> Vec { let b = self.ptr.as_ref().unwrap(); // Sanity check the type, as some types don't count bytes in `Item::len`. assert_eq!(b.type_, SECItemType::siBuffer); - let slc = std::slice::from_raw_parts(b.data, usize::try_from(b.len).unwrap()); + let slc = null_safe_slice(b.data, b.len); Vec::from(slc) } } -/// Generate a randomized buffer. +/// Fill a buffer with randomness. +/// /// # Panics +/// /// When `size` is too large or NSS fails. -#[must_use] -pub fn random(size: usize) -> Vec { - let mut buf = vec![0; size]; - secstatus_to_res(unsafe { - PK11_GenerateRandom(buf.as_mut_ptr(), c_int::try_from(buf.len()).unwrap()) - }) - .unwrap(); +pub fn randomize>(mut buf: B) -> B { + let m_buf = buf.as_mut(); + let len = c_int::try_from(m_buf.len()).unwrap(); + secstatus_to_res(unsafe { PK11_GenerateRandom(m_buf.as_mut_ptr(), len) }).unwrap(); buf } +struct RandomCache { + cache: [u8; Self::SIZE], + used: usize, +} + +impl RandomCache { + const SIZE: usize = 256; + const CUTOFF: usize = 32; + + fn new() -> Self { + RandomCache { + cache: [0; Self::SIZE], + used: Self::SIZE, + } + } + + fn randomize>(&mut self, mut buf: B) -> B { + let m_buf = buf.as_mut(); + debug_assert!(m_buf.len() <= Self::CUTOFF); + let avail = Self::SIZE - self.used; + if m_buf.len() <= avail { + m_buf.copy_from_slice(&self.cache[self.used..self.used + m_buf.len()]); + self.used += m_buf.len(); + } else { + if avail > 0 { + m_buf[..avail].copy_from_slice(&self.cache[self.used..]); + } + randomize(&mut self.cache[..]); + self.used = m_buf.len() - avail; + m_buf[avail..].copy_from_slice(&self.cache[..self.used]); + } + buf + } +} + +/// Generate a randomized array. +/// +/// # Panics +/// +/// When `size` is too large or NSS fails. +#[must_use] +pub fn random() -> [u8; N] { + thread_local!(static CACHE: RefCell = RefCell::new(RandomCache::new())); + + let buf = [0; N]; + if N <= RandomCache::CUTOFF { + CACHE.with_borrow_mut(|c| c.randomize(buf)) + } else { + randomize(buf) + } +} + #[cfg(test)] mod test { - use super::random; use test_fixture::fixture_init; + use super::RandomCache; + use crate::{random, randomize}; + #[test] fn randomness() { fixture_init(); - // If this ever fails, there is either a bug, or it's time to buy a lottery ticket. - assert_ne!(random(16), random(16)); + // If any of these ever fail, there is either a bug, or it's time to buy a lottery ticket. + assert_ne!(random::<16>(), randomize([0; 16])); + assert_ne!([0; 16], random::<16>()); + assert_ne!([0; 64], random::<64>()); + } + + #[test] + fn cache_random_lengths() { + const ZERO: [u8; 256] = [0; 256]; + + fixture_init(); + let mut cache = RandomCache::new(); + let mut buf = [0; 256]; + let bits = usize::BITS - (RandomCache::CUTOFF - 1).leading_zeros(); + let mask = 0xff >> (u8::BITS - bits); + + for _ in 0..100 { + let len = loop { + let len = usize::from(random::<1>()[0] & mask) + 1; + if len <= RandomCache::CUTOFF { + break len; + } + }; + buf.fill(0); + if len >= 16 { + assert_ne!(&cache.randomize(&mut buf[..len])[..len], &ZERO[..len]); + } + } } } diff --git a/neqo-crypto/src/replay.rs b/neqo-crypto/src/replay.rs index 8f35ed6401..5fd6fd1250 100644 --- a/neqo-crypto/src/replay.rs +++ b/neqo-crypto/src/replay.rs @@ -4,19 +4,19 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use crate::{ - err::Res, - ssl::PRFileDesc, - time::{Interval, PRTime, Time}, -}; use std::{ - convert::{TryFrom, TryInto}, ops::{Deref, DerefMut}, os::raw::c_uint, ptr::null_mut, time::{Duration, Instant}, }; +use crate::{ + err::Res, + ssl::PRFileDesc, + time::{Interval, PRTime, Time}, +}; + // This is an opaque struct in NSS. #[allow(clippy::upper_case_acronyms)] #[allow(clippy::empty_enum)] @@ -55,6 +55,7 @@ impl AntiReplay { /// See the documentation in NSS for advice on how to set these values. /// /// # Errors + /// /// Returns an error if `now` is in the past relative to our baseline or /// NSS is unable to generate an anti-replay context. pub fn new(now: Instant, window: Duration, k: usize, bits: usize) -> Res { diff --git a/neqo-crypto/src/secrets.rs b/neqo-crypto/src/secrets.rs index 7fff5d4f68..75677636b6 100644 --- a/neqo-crypto/src/secrets.rs +++ b/neqo-crypto/src/secrets.rs @@ -4,6 +4,10 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +use std::{os::raw::c_void, pin::Pin}; + +use neqo_common::qdebug; + use crate::{ agentio::as_c_void, constants::Epoch, @@ -11,8 +15,6 @@ use crate::{ p11::{PK11SymKey, PK11_ReferenceSymKey, SymKey}, ssl::{PRFileDesc, SSLSecretCallback, SSLSecretDirection}, }; -use neqo_common::qdebug; -use std::{os::raw::c_void, pin::Pin}; experimental_api!(SSL_SecretCallback( fd: *mut PRFileDesc, diff --git a/neqo-crypto/src/selfencrypt.rs b/neqo-crypto/src/selfencrypt.rs index 62d7057435..d0a85830b0 100644 --- a/neqo-crypto/src/selfencrypt.rs +++ b/neqo-crypto/src/selfencrypt.rs @@ -4,14 +4,17 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use crate::constants::{Cipher, Version}; -use crate::err::{Error, Res}; -use crate::p11::{random, SymKey}; -use crate::{hkdf, Aead}; +use std::mem; use neqo_common::{hex, qinfo, qtrace, Encoder}; -use std::mem; +use crate::{ + constants::{Cipher, Version}, + err::{Error, Res}, + hkdf, + p11::{random, SymKey}, + Aead, +}; #[derive(Debug)] pub struct SelfEncrypt { @@ -27,6 +30,7 @@ impl SelfEncrypt { const SALT_LENGTH: usize = 16; /// # Errors + /// /// Failure to generate a new HKDF key using NSS results in an error. pub fn new(version: Version, cipher: Cipher) -> Res { let key = hkdf::generate_key(version, cipher)?; @@ -43,12 +47,14 @@ impl SelfEncrypt { debug_assert_eq!(salt.len(), Self::SALT_LENGTH); let salt = hkdf::import_key(self.version, salt)?; let secret = hkdf::extract(self.version, self.cipher, Some(&salt), k)?; - Aead::new(false, self.version, self.cipher, &secret, "neqo self") + Aead::new(self.version, self.cipher, &secret, "neqo self") } - /// Rotate keys. This causes any previous key that is being held to be replaced by the current key. + /// Rotate keys. This causes any previous key that is being held to be replaced by the current + /// key. /// /// # Errors + /// /// Failure to generate a new HKDF key using NSS results in an error. pub fn rotate(&mut self) -> Res<()> { let new_key = hkdf::generate_key(self.version, self.cipher)?; @@ -65,6 +71,7 @@ impl SelfEncrypt { /// caller is responsible for carrying the AAD as appropriate. /// /// # Errors + /// /// Failure to protect using NSS AEAD APIs produces an error. pub fn seal(&self, aad: &[u8], plaintext: &[u8]) -> Res> { // Format is: @@ -75,7 +82,7 @@ impl SelfEncrypt { // opaque aead_encrypted(plaintext)[length as expanded]; // }; // AAD covers the entire header, plus the value of the AAD parameter that is provided. - let salt = random(Self::SALT_LENGTH); + let salt = random::<{ Self::SALT_LENGTH }>(); let cipher = self.make_aead(&self.key, &salt)?; let encoded_len = 2 + salt.len() + plaintext.len() + cipher.expansion(); @@ -117,6 +124,7 @@ impl SelfEncrypt { /// Open the protected `ciphertext`. /// /// # Errors + /// /// Returns an error when the self-encrypted object is invalid; /// when the keys have been rotated; or when NSS fails. #[allow(clippy::similar_names)] // aad is similar to aead diff --git a/neqo-crypto/src/ssl.rs b/neqo-crypto/src/ssl.rs index 08776f34ba..8aaacffae6 100644 --- a/neqo-crypto/src/ssl.rs +++ b/neqo-crypto/src/ssl.rs @@ -15,11 +15,13 @@ clippy::borrow_as_ptr )] -use crate::constants::Epoch; -use crate::err::{secstatus_to_res, Res}; - use std::os::raw::{c_uint, c_void}; +use crate::{ + constants::Epoch, + err::{secstatus_to_res, Res}, +}; + include!(concat!(env!("OUT_DIR"), "/nss_ssl.rs")); mod SSLOption { include!(concat!(env!("OUT_DIR"), "/nss_sslopt.rs")); diff --git a/neqo-crypto/src/time.rs b/neqo-crypto/src/time.rs index 981ac6f420..359436a854 100644 --- a/neqo-crypto/src/time.rs +++ b/neqo-crypto/src/time.rs @@ -6,22 +6,20 @@ #![allow(clippy::upper_case_acronyms)] -use crate::{ - agentio::as_c_void, - err::{Error, Res}, - once::OnceResult, - ssl::{PRFileDesc, SSLTimeFunc}, -}; - use std::{ - boxed::Box, - convert::{TryFrom, TryInto}, ops::Deref, os::raw::c_void, pin::Pin, + sync::OnceLock, time::{Duration, Instant}, }; +use crate::{ + agentio::as_c_void, + err::{Error, Res}, + ssl::{PRFileDesc, SSLTimeFunc}, +}; + include!(concat!(env!("OUT_DIR"), "/nspr_time.rs")); experimental_api!(SSL_SetTimeFunc( @@ -67,14 +65,13 @@ impl TimeZero { } } -static mut BASE_TIME: OnceResult = OnceResult::new(); +static BASE_TIME: OnceLock = OnceLock::new(); fn get_base() -> &'static TimeZero { - let f = || TimeZero { + BASE_TIME.get_or_init(|| TimeZero { instant: Instant::now(), prtime: unsafe { PR_Now() }, - }; - unsafe { BASE_TIME.call_once(f) } + }) } pub(crate) fn init() { @@ -97,9 +94,8 @@ impl Deref for Time { impl From for Time { /// Convert from an Instant into a Time. fn from(t: Instant) -> Self { - // Call `TimeZero::baseline(t)` so that time zero can be set. - let f = || TimeZero::baseline(t); - _ = unsafe { BASE_TIME.call_once(f) }; + // Initialize `BASE_TIME` using `TimeZero::baseline(t)`. + BASE_TIME.get_or_init(|| TimeZero::baseline(t)); Self { t } } } @@ -108,14 +104,17 @@ impl TryFrom for Time { type Error = Error; fn try_from(prtime: PRTime) -> Res { let base = get_base(); - if let Some(delta) = prtime.checked_sub(base.prtime) { - let d = Duration::from_micros(delta.try_into()?); - base.instant - .checked_add(d) - .map_or(Err(Error::TimeTravelError), |t| Ok(Self { t })) + let delta = prtime + .checked_sub(base.prtime) + .ok_or(Error::TimeTravelError)?; + let d = Duration::from_micros(u64::try_from(delta.abs())?); + let t = if delta >= 0 { + base.instant.checked_add(d) } else { - Err(Error::TimeTravelError) - } + base.instant.checked_sub(d) + }; + let t = t.ok_or(Error::TimeTravelError)?; + Ok(Self { t }) } } @@ -123,14 +122,21 @@ impl TryInto for Time { type Error = Error; fn try_into(self) -> Res { let base = get_base(); - let delta = self - .t - .checked_duration_since(base.instant) - .ok_or(Error::TimeTravelError)?; - if let Ok(d) = PRTime::try_from(delta.as_micros()) { - d.checked_add(base.prtime).ok_or(Error::TimeTravelError) + + if let Some(delta) = self.t.checked_duration_since(base.instant) { + if let Ok(d) = PRTime::try_from(delta.as_micros()) { + d.checked_add(base.prtime).ok_or(Error::TimeTravelError) + } else { + Err(Error::TimeTravelError) + } } else { - Err(Error::TimeTravelError) + // Try to go backwards from the base time. + let backwards = base.instant - self.t; // infallible + if let Ok(d) = PRTime::try_from(backwards.as_micros()) { + base.prtime.checked_sub(d).ok_or(Error::TimeTravelError) + } else { + Err(Error::TimeTravelError) + } } } } @@ -207,12 +213,10 @@ impl Default for TimeHolder { #[cfg(test)] mod test { + use std::time::{Duration, Instant}; + use super::{get_base, init, Interval, PRTime, Time}; use crate::err::Res; - use std::{ - convert::{TryFrom, TryInto}, - time::{Duration, Instant}, - }; #[test] fn convert_stable() { @@ -227,16 +231,23 @@ mod test { } #[test] - fn past_time() { + fn past_prtime() { + const DELTA: Duration = Duration::from_secs(1); init(); let base = get_base(); - assert!(Time::try_from(base.prtime - 1).is_err()); + let delta_micros = PRTime::try_from(DELTA.as_micros()).unwrap(); + println!("{} - {}", base.prtime, delta_micros); + let t = Time::try_from(base.prtime - delta_micros).unwrap(); + assert_eq!(Instant::from(t) + DELTA, base.instant); } #[test] - fn negative_time() { + fn past_instant() { + const DELTA: Duration = Duration::from_secs(1); init(); - assert!(Time::try_from(-1).is_err()); + let base = get_base(); + let t = Time::from(base.instant.checked_sub(DELTA).unwrap()); + assert_eq!(Instant::from(t) + DELTA, base.instant); } #[test] @@ -247,11 +258,11 @@ mod test { #[test] // We allow replace_consts here because - // std::u64::max_value() isn't available + // std::u64::MAX isn't available // in all of our targets fn overflow_interval() { init(); - let interval = Interval::from(Duration::from_micros(u64::max_value())); + let interval = Interval::from(Duration::from_micros(u64::MAX)); let res: Res = interval.try_into(); assert!(res.is_err()); } diff --git a/neqo-crypto/tests/aead.rs b/neqo-crypto/tests/aead.rs index b9721e3d64..f8416ed9a7 100644 --- a/neqo-crypto/tests/aead.rs +++ b/neqo-crypto/tests/aead.rs @@ -1,10 +1,16 @@ -#![cfg_attr(feature = "deny-warnings", deny(warnings))] +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + #![warn(clippy::pedantic)] -#![cfg(not(feature = "fuzzing"))] +#![cfg(not(feature = "disable-encryption"))] -use neqo_crypto::constants::{Cipher, TLS_AES_128_GCM_SHA256, TLS_VERSION_1_3}; -use neqo_crypto::hkdf; -use neqo_crypto::Aead; +use neqo_crypto::{ + constants::{Cipher, TLS_AES_128_GCM_SHA256, TLS_VERSION_1_3}, + hkdf, Aead, +}; use test_fixture::fixture_init; const AAD: &[u8] = &[ @@ -34,7 +40,6 @@ fn make_aead(cipher: Cipher) -> Aead { ) .expect("make a secret"); Aead::new( - false, TLS_VERSION_1_3, cipher, &secret, diff --git a/neqo-crypto/tests/agent.rs b/neqo-crypto/tests/agent.rs index d487062d51..80bf816930 100644 --- a/neqo-crypto/tests/agent.rs +++ b/neqo-crypto/tests/agent.rs @@ -1,20 +1,22 @@ -#![cfg_attr(feature = "deny-warnings", deny(warnings))] -#![warn(clippy::pedantic)] +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. use neqo_crypto::{ generate_ech_keys, AuthenticationStatus, Client, Error, HandshakeState, SecretAgentPreInfo, Server, ZeroRttCheckResult, ZeroRttChecker, TLS_AES_128_GCM_SHA256, - TLS_CHACHA20_POLY1305_SHA256, TLS_GRP_EC_SECP256R1, TLS_VERSION_1_3, + TLS_CHACHA20_POLY1305_SHA256, TLS_GRP_EC_SECP256R1, TLS_GRP_EC_X25519, TLS_VERSION_1_3, }; -use std::boxed::Box; - mod handshake; +use test_fixture::{fixture_init, now}; + use crate::handshake::{ connect, connect_fail, forward_records, resumption_setup, PermissiveZeroRttChecker, Resumption, ZERO_RTT_TOKEN_DATA, }; -use test_fixture::{fixture_init, now}; #[test] fn make_client() { @@ -155,6 +157,48 @@ fn chacha_client() { ); } +#[test] +fn server_prefers_first_client_share() { + fixture_init(); + let mut client = Client::new("server.example", true).expect("should create client"); + let mut server = Server::new(&["key"]).expect("should create server"); + server + .set_groups(&[TLS_GRP_EC_X25519, TLS_GRP_EC_SECP256R1]) + .expect("groups set"); + client + .set_groups(&[TLS_GRP_EC_X25519, TLS_GRP_EC_SECP256R1]) + .expect("groups set"); + client + .send_additional_key_shares(1) + .expect("should set additional key share count"); + + connect(&mut client, &mut server); + + assert_eq!(client.info().unwrap().key_exchange(), TLS_GRP_EC_X25519); + assert_eq!(server.info().unwrap().key_exchange(), TLS_GRP_EC_X25519); +} + +#[test] +fn server_prefers_second_client_share() { + fixture_init(); + let mut client = Client::new("server.example", true).expect("should create client"); + let mut server = Server::new(&["key"]).expect("should create server"); + server + .set_groups(&[TLS_GRP_EC_SECP256R1, TLS_GRP_EC_X25519]) + .expect("groups set"); + client + .set_groups(&[TLS_GRP_EC_X25519, TLS_GRP_EC_SECP256R1]) + .expect("groups set"); + client + .send_additional_key_shares(1) + .expect("should set additional key share count"); + + connect(&mut client, &mut server); + + assert_eq!(client.info().unwrap().key_exchange(), TLS_GRP_EC_SECP256R1); + assert_eq!(server.info().unwrap().key_exchange(), TLS_GRP_EC_SECP256R1); +} + #[test] fn p256_server() { fixture_init(); @@ -170,6 +214,27 @@ fn p256_server() { assert_eq!(server.info().unwrap().key_exchange(), TLS_GRP_EC_SECP256R1); } +#[test] +fn p256_server_hrr() { + fixture_init(); + let mut client = Client::new("server.example", true).expect("should create client"); + let mut server = Server::new(&["key"]).expect("should create server"); + server + .set_groups(&[TLS_GRP_EC_SECP256R1]) + .expect("groups set"); + client + .set_groups(&[TLS_GRP_EC_X25519, TLS_GRP_EC_SECP256R1]) + .expect("groups set"); + client + .send_additional_key_shares(0) + .expect("should set additional key share count"); + + connect(&mut client, &mut server); + + assert_eq!(client.info().unwrap().key_exchange(), TLS_GRP_EC_SECP256R1); + assert_eq!(server.info().unwrap().key_exchange(), TLS_GRP_EC_SECP256R1); +} + #[test] fn alpn() { fixture_init(); @@ -439,7 +504,7 @@ fn ech_retry() { HandshakeState::EchFallbackAuthenticationPending(String::from(PUBLIC_NAME)) ); client.authenticated(AuthenticationStatus::Ok); - let Err(Error::EchRetry(updated_config)) = client.handshake_raw(now(), None) else { + let Err(Error::EchRetry(updated_config)) = client.handshake_raw(now(), None) else { panic!( "Handshake should fail with EchRetry, state is instead {:?}", client.state() diff --git a/neqo-crypto/tests/ext.rs b/neqo-crypto/tests/ext.rs index 02d78603b6..c8732dd014 100644 --- a/neqo-crypto/tests/ext.rs +++ b/neqo-crypto/tests/ext.rs @@ -1,11 +1,16 @@ -#![cfg_attr(feature = "deny-warnings", deny(warnings))] -#![warn(clippy::pedantic)] +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. -use neqo_crypto::constants::{HandshakeMessage, TLS_HS_CLIENT_HELLO, TLS_HS_ENCRYPTED_EXTENSIONS}; -use neqo_crypto::ext::{ExtensionHandler, ExtensionHandlerResult, ExtensionWriterResult}; -use neqo_crypto::{Client, Server}; -use std::cell::RefCell; -use std::rc::Rc; +use std::{cell::RefCell, rc::Rc}; + +use neqo_crypto::{ + constants::{HandshakeMessage, TLS_HS_CLIENT_HELLO, TLS_HS_ENCRYPTED_EXTENSIONS}, + ext::{ExtensionHandler, ExtensionHandlerResult, ExtensionWriterResult}, + Client, Server, +}; use test_fixture::fixture_init; mod handshake; diff --git a/neqo-crypto/tests/handshake.rs b/neqo-crypto/tests/handshake.rs index 779ec5ac22..3cb31337fd 100644 --- a/neqo-crypto/tests/handshake.rs +++ b/neqo-crypto/tests/handshake.rs @@ -1,12 +1,20 @@ +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + #![allow(dead_code)] +#![allow(clippy::missing_panics_doc)] +#![allow(clippy::missing_errors_doc)] + +use std::{mem, time::Instant}; use neqo_common::qinfo; use neqo_crypto::{ AntiReplay, AuthenticationStatus, Client, HandshakeState, RecordList, Res, ResumptionToken, SecretAgent, Server, ZeroRttCheckResult, ZeroRttChecker, }; -use std::mem; -use std::time::Instant; use test_fixture::{anti_replay, fixture_init, now}; /// Consume records until the handshake state changes. @@ -127,6 +135,7 @@ fn zero_rtt_setup( } } +#[must_use] pub fn resumption_setup(mode: Resumption) -> (Option, ResumptionToken) { fixture_init(); diff --git a/neqo-crypto/tests/hkdf.rs b/neqo-crypto/tests/hkdf.rs index 10a66f10b7..acb5bbdda8 100644 --- a/neqo-crypto/tests/hkdf.rs +++ b/neqo-crypto/tests/hkdf.rs @@ -1,11 +1,16 @@ -#![cfg_attr(feature = "deny-warnings", deny(warnings))] -#![warn(clippy::pedantic)] - -use neqo_crypto::constants::{ - Cipher, TLS_AES_128_GCM_SHA256, TLS_AES_256_GCM_SHA384, TLS_CHACHA20_POLY1305_SHA256, - TLS_VERSION_1_3, +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use neqo_crypto::{ + constants::{ + Cipher, TLS_AES_128_GCM_SHA256, TLS_AES_256_GCM_SHA384, TLS_CHACHA20_POLY1305_SHA256, + TLS_VERSION_1_3, + }, + hkdf, SymKey, }; -use neqo_crypto::{hkdf, SymKey}; use test_fixture::fixture_init; const SALT: &[u8] = &[ diff --git a/neqo-crypto/tests/hp.rs b/neqo-crypto/tests/hp.rs index 8b2ba7612b..da7df2cc19 100644 --- a/neqo-crypto/tests/hp.rs +++ b/neqo-crypto/tests/hp.rs @@ -1,5 +1,10 @@ -#![cfg_attr(feature = "deny-warnings", deny(warnings))] -#![warn(clippy::pedantic)] +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use std::mem; use neqo_crypto::{ constants::{ @@ -9,7 +14,6 @@ use neqo_crypto::{ hkdf, hp::HpKey, }; -use std::mem; use test_fixture::fixture_init; fn make_hp(cipher: Cipher) -> HpKey { @@ -67,14 +71,14 @@ fn chacha20_ctr() { } #[test] -#[should_panic] +#[should_panic(expected = "out of range")] fn aes_short() { let hp = make_hp(TLS_AES_128_GCM_SHA256); mem::drop(hp.mask(&[0; 15])); } #[test] -#[should_panic] +#[should_panic(expected = "out of range")] fn chacha20_short() { let hp = make_hp(TLS_CHACHA20_POLY1305_SHA256); mem::drop(hp.mask(&[0; 15])); diff --git a/neqo-crypto/tests/init.rs b/neqo-crypto/tests/init.rs index 21291ceebb..ee7d808e29 100644 --- a/neqo-crypto/tests/init.rs +++ b/neqo-crypto/tests/init.rs @@ -1,5 +1,8 @@ -#![cfg_attr(feature = "deny-warnings", deny(warnings))] -#![warn(clippy::pedantic)] +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. // This uses external interfaces to neqo_crypto rather than being a module // inside of lib.rs. Because all other code uses the test_fixture module, @@ -12,13 +15,7 @@ use neqo_crypto::{assert_initialized, init_db}; // Pull in the NSS internals so that we can ask NSS if it thinks that // it is properly initialized. -#[allow( - dead_code, - non_upper_case_globals, - clippy::redundant_static_lifetimes, - clippy::unseparated_literal_suffix, - clippy::upper_case_acronyms -)] +#[allow(dead_code, non_upper_case_globals)] mod nss { include!(concat!(env!("OUT_DIR"), "/nss_init.rs")); } @@ -26,19 +23,54 @@ mod nss { #[cfg(nss_nodb)] #[test] fn init_nodb() { - init(); + neqo_crypto::init().unwrap(); assert_initialized(); unsafe { - assert!(nss::NSS_IsInitialized() != 0); + assert_ne!(nss::NSS_IsInitialized(), 0); } } +#[cfg(nss_nodb)] +#[test] +fn init_twice_nodb() { + unsafe { + nss::NSS_NoDB_Init(std::ptr::null()); + assert_ne!(nss::NSS_IsInitialized(), 0); + } + // Now do it again + init_nodb(); +} + #[cfg(not(nss_nodb))] #[test] fn init_withdb() { - init_db(::test_fixture::NSS_DB_PATH); + init_db(::test_fixture::NSS_DB_PATH).unwrap(); assert_initialized(); unsafe { - assert!(nss::NSS_IsInitialized() != 0); + assert_ne!(nss::NSS_IsInitialized(), 0); + } +} + +#[cfg(not(nss_nodb))] +#[test] +fn init_twice_withdb() { + use std::{ffi::CString, path::PathBuf}; + + let empty = CString::new("").unwrap(); + let path: PathBuf = ::test_fixture::NSS_DB_PATH.into(); + assert!(path.is_dir()); + let pathstr = path.to_str().unwrap(); + let dircstr = CString::new(pathstr).unwrap(); + unsafe { + nss::NSS_Initialize( + dircstr.as_ptr(), + empty.as_ptr(), + empty.as_ptr(), + nss::SECMOD_DB.as_ptr().cast(), + nss::NSS_INIT_READONLY, + ); + assert_ne!(nss::NSS_IsInitialized(), 0); } + // Now do it again + init_withdb(); } diff --git a/neqo-crypto/tests/selfencrypt.rs b/neqo-crypto/tests/selfencrypt.rs index 5828f09392..9fc2162fe2 100644 --- a/neqo-crypto/tests/selfencrypt.rs +++ b/neqo-crypto/tests/selfencrypt.rs @@ -1,13 +1,21 @@ -#![cfg_attr(feature = "deny-warnings", deny(warnings))] -#![warn(clippy::pedantic)] -#![cfg(not(feature = "fuzzing"))] +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. -use neqo_crypto::constants::{TLS_AES_128_GCM_SHA256, TLS_VERSION_1_3}; -use neqo_crypto::{init, selfencrypt::SelfEncrypt, Error}; +#![cfg(not(feature = "disable-encryption"))] + +use neqo_crypto::{ + constants::{TLS_AES_128_GCM_SHA256, TLS_VERSION_1_3}, + init, + selfencrypt::SelfEncrypt, + Error, +}; #[test] fn se_create() { - init(); + init().unwrap(); SelfEncrypt::new(TLS_VERSION_1_3, TLS_AES_128_GCM_SHA256).expect("constructor works"); } @@ -15,7 +23,7 @@ const PLAINTEXT: &[u8] = b"PLAINTEXT"; const AAD: &[u8] = b"AAD"; fn sealed() -> (SelfEncrypt, Vec) { - init(); + init().unwrap(); let se = SelfEncrypt::new(TLS_VERSION_1_3, TLS_AES_128_GCM_SHA256).unwrap(); let sealed = se.seal(AAD, PLAINTEXT).expect("sealing works"); (se, sealed) diff --git a/neqo-http3/Cargo.toml b/neqo-http3/Cargo.toml index 78a77a29cd..27f43fd93f 100644 --- a/neqo-http3/Cargo.toml +++ b/neqo-http3/Cargo.toml @@ -1,28 +1,35 @@ [package] name = "neqo-http3" -version = "0.6.4" -authors = ["Dragana Damjanovic "] -edition = "2018" -rust-version = "1.65.0" -license = "MIT/Apache-2.0" +authors.workspace = true +homepage.workspace = true +repository.workspace = true +version.workspace = true +edition.workspace = true +rust-version.workspace = true +license.workspace = true + +[lints] +workspace = true [dependencies] +# Sync with https://searchfox.org/mozilla-central/source/Cargo.lock 2024-02-08 +enumset = { version = "1.1", default-features = false } +log = { version = "0.4", default-features = false } neqo-common = { path = "./../neqo-common" } neqo-crypto = { path = "./../neqo-crypto" } -neqo-transport = { path = "./../neqo-transport" } neqo-qpack = { path = "./../neqo-qpack" } -log = {version = "0.4.0", default-features = false} -smallvec = "1.0.0" -qlog = "0.4.0" -sfv = "0.9.1" -url = "2.0" -lazy_static = "1.3.0" -enumset = "1.0.8" +neqo-transport = { path = "./../neqo-transport" } +qlog = { version = "0.12", default-features = false } +sfv = { version = "0.9", default-features = false } +smallvec = { version = "1.11", default-features = false } +url = { version = "2.5", default-features = false } [dev-dependencies] test-fixture = { path = "../test-fixture" } [features] -default = ["deny-warnings"] -deny-warnings = [] -fuzzing = ["neqo-transport/fuzzing", "neqo-crypto/fuzzing"] +disable-encryption = ["neqo-transport/disable-encryption", "neqo-crypto/disable-encryption"] + +[lib] +# See https://github.com/bheisler/criterion.rs/blob/master/book/src/faq.md#cargo-bench-gives-unrecognized-option-errors-for-valid-command-line-options +bench = false diff --git a/neqo-http3/src/buffered_send_stream.rs b/neqo-http3/src/buffered_send_stream.rs index 2a7d01bb74..4f6761fa80 100644 --- a/neqo-http3/src/buffered_send_stream.rs +++ b/neqo-http3/src/buffered_send_stream.rs @@ -4,10 +4,11 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use crate::Res; use neqo_common::qtrace; use neqo_transport::{Connection, StreamId}; +use crate::Res; + #[derive(Debug, PartialEq, Eq)] pub enum BufferedStream { Uninitialized, @@ -36,6 +37,7 @@ impl BufferedStream { } /// # Panics + /// /// If the `BufferedStream` is initialized more than one it will panic. pub fn init(&mut self, stream_id: StreamId) { debug_assert!(&Self::Uninitialized == self); @@ -46,6 +48,7 @@ impl BufferedStream { } /// # Panics + /// /// This functon cannot be called before the `BufferedStream` is initialized. pub fn buffer(&mut self, to_buf: &[u8]) { if let Self::Initialized { buf, .. } = self { @@ -56,6 +59,7 @@ impl BufferedStream { } /// # Errors + /// /// Returns `neqo_transport` errors. pub fn send_buffer(&mut self, conn: &mut Connection) -> Res { let label = ::neqo_common::log_subject!(::log::Level::Debug, self); @@ -76,6 +80,7 @@ impl BufferedStream { } /// # Errors + /// /// Returns `neqo_transport` errors. pub fn send_atomic(&mut self, conn: &mut Connection, to_send: &[u8]) -> Res { // First try to send anything that is in the buffer. diff --git a/neqo-http3/src/client_events.rs b/neqo-http3/src/client_events.rs index b4fdde8e13..61aba8f9f1 100644 --- a/neqo-http3/src/client_events.rs +++ b/neqo-http3/src/client_events.rs @@ -6,19 +6,18 @@ #![allow(clippy::module_name_repetitions)] -use crate::connection::Http3State; -use crate::settings::HSettingType; -use crate::{ - features::extended_connect::{ExtendedConnectEvents, ExtendedConnectType, SessionCloseReason}, - CloseType, Http3StreamInfo, HttpRecvStreamEvents, RecvStreamEvents, SendStreamEvents, -}; +use std::{cell::RefCell, collections::VecDeque, rc::Rc}; + use neqo_common::{event::Provider as EventProvider, Header}; use neqo_crypto::ResumptionToken; use neqo_transport::{AppError, StreamId, StreamType}; -use std::cell::RefCell; -use std::collections::VecDeque; -use std::rc::Rc; +use crate::{ + connection::Http3State, + features::extended_connect::{ExtendedConnectEvents, ExtendedConnectType, SessionCloseReason}, + settings::HSettingType, + CloseType, Http3StreamInfo, HttpRecvStreamEvents, RecvStreamEvents, SendStreamEvents, +}; #[derive(Debug, PartialEq, Eq, Clone)] pub enum WebTransportEvent { @@ -62,7 +61,7 @@ pub enum Http3ClientEvent { error: AppError, local: bool, }, - /// Peer has sent a STOP_SENDING. + /// Peer has sent a `STOP_SENDING`. StopSending { stream_id: StreamId, error: AppError, @@ -84,7 +83,7 @@ pub enum Http3ClientEvent { PushDataReadable { push_id: u64 }, /// A push has been canceled. PushCanceled { push_id: u64 }, - /// A push stream was been reset due to a HttpGeneralProtocol error. + /// A push stream was been reset due to a `HttpGeneralProtocol` error. /// Most common case are malformed response headers. PushReset { push_id: u64, error: AppError }, /// New stream can be created @@ -103,7 +102,7 @@ pub enum Http3ClientEvent { GoawayReceived, /// Connection state change. StateChange(Http3State), - /// WebTransport events + /// `WebTransport` events WebTransport(WebTransportEvent), } @@ -338,7 +337,7 @@ impl Http3ClientEvents { } pub fn has_push(&self, push_id: u64) -> bool { - for iter in self.events.borrow().iter() { + for iter in &*self.events.borrow() { if matches!(iter, Http3ClientEvent::PushPromise{push_id:x, ..} if *x == push_id) { return true; } diff --git a/neqo-http3/src/conn_params.rs b/neqo-http3/src/conn_params.rs index 1ba2a601ad..23a5d2cc67 100644 --- a/neqo-http3/src/conn_params.rs +++ b/neqo-http3/src/conn_params.rs @@ -4,9 +4,10 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +use std::cmp::min; + use neqo_qpack::QpackSettings; use neqo_transport::ConnectionParameters; -use std::cmp::min; const QPACK_MAX_TABLE_SIZE_DEFAULT: u64 = 65536; const QPACK_TABLE_SIZE_LIMIT: u64 = (1 << 30) - 1; @@ -53,6 +54,7 @@ impl Http3Parameters { } /// # Panics + /// /// The table size must be smaller than 1 << 30 by the spec. #[must_use] pub fn max_table_size_encoder(mut self, mut max_table: u64) -> Self { @@ -68,6 +70,7 @@ impl Http3Parameters { } /// # Panics + /// /// The table size must be smaller than 1 << 30 by the spec. #[must_use] pub fn max_table_size_decoder(mut self, mut max_table: u64) -> Self { diff --git a/neqo-http3/src/connection.rs b/neqo-http3/src/connection.rs index f2d0f28806..dd45797baa 100644 --- a/neqo-http3/src/connection.rs +++ b/neqo-http3/src/connection.rs @@ -6,41 +6,43 @@ #![allow(clippy::module_name_repetitions)] -use crate::control_stream_local::ControlStreamLocal; -use crate::control_stream_remote::ControlStreamRemote; -use crate::features::extended_connect::{ - webtransport_session::WebTransportSession, - webtransport_streams::{WebTransportRecvStream, WebTransportSendStream}, - ExtendedConnectEvents, ExtendedConnectFeature, ExtendedConnectType, -}; -use crate::frames::HFrame; -use crate::push_controller::PushController; -use crate::qpack_decoder_receiver::DecoderRecvStream; -use crate::qpack_encoder_receiver::EncoderRecvStream; -use crate::recv_message::{RecvMessage, RecvMessageInfo}; -use crate::request_target::{AsRequestTarget, RequestTarget}; -use crate::send_message::SendMessage; -use crate::settings::{HSettingType, HSettings, HttpZeroRttChecker}; -use crate::stream_type_reader::NewStreamHeadReader; -use crate::{ - client_events::Http3ClientEvents, CloseType, Http3Parameters, Http3StreamType, - HttpRecvStreamEvents, NewStreamType, Priority, PriorityHandler, ReceiveOutput, RecvStream, - RecvStreamEvents, SendStream, SendStreamEvents, +use std::{ + cell::RefCell, + collections::{BTreeSet, HashMap}, + fmt::Debug, + mem, + rc::Rc, }; + use neqo_common::{qdebug, qerror, qinfo, qtrace, qwarn, Decoder, Header, MessageType, Role}; -use neqo_qpack::decoder::QPackDecoder; -use neqo_qpack::encoder::QPackEncoder; +use neqo_qpack::{decoder::QPackDecoder, encoder::QPackEncoder}; use neqo_transport::{ streams::SendOrder, AppError, Connection, ConnectionError, DatagramTracking, State, StreamId, StreamType, ZeroRttState, }; -use std::cell::RefCell; -use std::collections::{BTreeSet, HashMap}; -use std::fmt::Debug; -use std::mem; -use std::rc::Rc; -use crate::{Error, Res}; +use crate::{ + client_events::Http3ClientEvents, + control_stream_local::ControlStreamLocal, + control_stream_remote::ControlStreamRemote, + features::extended_connect::{ + webtransport_session::WebTransportSession, + webtransport_streams::{WebTransportRecvStream, WebTransportSendStream}, + ExtendedConnectEvents, ExtendedConnectFeature, ExtendedConnectType, + }, + frames::HFrame, + push_controller::PushController, + qpack_decoder_receiver::DecoderRecvStream, + qpack_encoder_receiver::EncoderRecvStream, + recv_message::{RecvMessage, RecvMessageInfo}, + request_target::{AsRequestTarget, RequestTarget}, + send_message::SendMessage, + settings::{HSettingType, HSettings, HttpZeroRttChecker}, + stream_type_reader::NewStreamHeadReader, + CloseType, Error, Http3Parameters, Http3StreamType, HttpRecvStreamEvents, NewStreamType, + Priority, PriorityHandler, ReceiveOutput, RecvStream, RecvStreamEvents, Res, SendStream, + SendStreamEvents, +}; pub(crate) struct RequestDescription<'b, 't, T> where @@ -79,8 +81,8 @@ enum Http3RemoteSettingsState { /// - `ZeroRtt`: 0-RTT has been enabled and is active /// - Connected /// - GoingAway(StreamId): The connection has received a `GOAWAY` frame -/// - Closing(ConnectionError): The connection is closed. The closing has been initiated by this -/// end of the connection, e.g., the `CONNECTION_CLOSE` frame has been sent. In this state, the +/// - Closing(ConnectionError): The connection is closed. The closing has been initiated by this end +/// of the connection, e.g., the `CONNECTION_CLOSE` frame has been sent. In this state, the /// connection waits a certain amount of time to retransmit the `CONNECTION_CLOSE` frame if /// needed. /// - Closed(ConnectionError): This is the final close state: closing has been initialized by the @@ -352,7 +354,7 @@ impl Http3Connection { /// This function creates and initializes, i.e. send stream type, the control and qpack /// streams. fn initialize_http3_connection(&mut self, conn: &mut Connection) -> Res<()> { - qinfo!([self], "Initialize the http3 connection."); + qdebug!([self], "Initialize the http3 connection."); self.control_stream_local.create(conn)?; self.send_settings(); @@ -384,7 +386,8 @@ impl Http3Connection { Ok(()) } - /// Inform a `HttpConnection` that a stream has data to send and that `send` should be called for the stream. + /// Inform an [`Http3Connection`] that a stream has data to send and that + /// [`SendStream::send`] should be called for the stream. pub fn stream_has_pending_data(&mut self, stream_id: StreamId) { self.streams_with_pending_data.insert(stream_id); } @@ -502,8 +505,8 @@ impl Http3Connection { /// stream and unidi stream that are still do not have a type. /// The function cannot handle: /// 1) a `Push(_)`, `Htttp` or `WebTransportStream(_)` stream - /// 2) frames `MaxPushId`, `PriorityUpdateRequest`, `PriorityUpdateRequestPush` or `Goaway` - /// must be handled by `Http3Client`/`Server`. + /// 2) frames `MaxPushId`, `PriorityUpdateRequest`, `PriorityUpdateRequestPush` or `Goaway` must + /// be handled by `Http3Client`/`Server`. /// The function returns `ReceiveOutput`. pub fn handle_stream_readable( &mut self, @@ -579,8 +582,8 @@ impl Http3Connection { Ok(()) } - /// This is called when `neqo_transport::Connection` state has been change to take proper actions in - /// the HTTP3 layer. + /// This is called when `neqo_transport::Connection` state has been change to take proper + /// actions in the HTTP3 layer. pub fn handle_state_change(&mut self, conn: &mut Connection, state: &State) -> Res { qdebug!([self], "Handle state change {:?}", state); match state { @@ -626,7 +629,8 @@ impl Http3Connection { } } - /// This is called when 0RTT has been reset to clear `send_streams`, `recv_streams` and settings. + /// This is called when 0RTT has been reset to clear `send_streams`, `recv_streams` and + /// settings. pub fn handle_zero_rtt_rejected(&mut self) -> Res<()> { if self.state == Http3State::ZeroRtt { self.state = Http3State::Initializing; @@ -700,7 +704,7 @@ impl Http3Connection { ); } NewStreamType::Decoder => { - qinfo!([self], "A new remote qpack encoder stream {}", stream_id); + qdebug!([self], "A new remote qpack encoder stream {}", stream_id); self.check_stream_exists(Http3StreamType::Decoder)?; self.recv_streams.insert( stream_id, @@ -711,7 +715,7 @@ impl Http3Connection { ); } NewStreamType::Encoder => { - qinfo!([self], "A new remote qpack decoder stream {}", stream_id); + qdebug!([self], "A new remote qpack decoder stream {}", stream_id); self.check_stream_exists(Http3StreamType::Encoder)?; self.recv_streams.insert( stream_id, @@ -762,7 +766,7 @@ impl Http3Connection { /// This is called when an application closes the connection. pub fn close(&mut self, error: AppError) { - qinfo!([self], "Close connection error {:?}.", error); + qdebug!([self], "Close connection error {:?}.", error); self.state = Http3State::Closing(ConnectionError::Application(error)); if (!self.send_streams.is_empty() || !self.recv_streams.is_empty()) && (error == 0) { qwarn!("close(0) called when streams still active"); @@ -774,16 +778,16 @@ impl Http3Connection { /// This function will not handle the output of the function completely, but only /// handle the indication that a stream is closed. There are 2 cases: /// - an error occurred or - /// - the stream is done, i.e. the second value in `output` tuple is true if - /// the stream is done and can be removed from the `recv_streams` + /// - the stream is done, i.e. the second value in `output` tuple is true if the stream is done + /// and can be removed from the `recv_streams` /// How it is handling `output`: /// - if the stream is done, it removes the stream from `recv_streams` /// - if the stream is not done and there is no error, return `output` and the caller will /// handle it. /// - in case of an error: - /// - if it is only a stream error and the stream is not critical, send `STOP_SENDING` - /// frame, remove the stream from `recv_streams` and inform the listener that the stream - /// has been reset. + /// - if it is only a stream error and the stream is not critical, send `STOP_SENDING` frame, + /// remove the stream from `recv_streams` and inform the listener that the stream has been + /// reset. /// - otherwise this is a connection error. In this case, propagate the error to the caller /// that will handle it properly. fn handle_stream_manipulation_output( @@ -831,9 +835,6 @@ impl Http3Connection { final_headers.push(Header::new(":protocol", conn_type.string())); } - if let Some(priority_header) = request.priority.header() { - final_headers.push(priority_header); - } final_headers.extend_from_slice(request.headers); Ok(final_headers) } @@ -861,7 +862,8 @@ impl Http3Connection { } fn create_bidi_transport_stream(&self, conn: &mut Connection) -> Res { - // Requests cannot be created when a connection is in states: Initializing, GoingAway, Closing and Closed. + // Requests cannot be created when a connection is in states: Initializing, GoingAway, + // Closing and Closed. match self.state() { Http3State::GoingAway(..) | Http3State::Closing(..) | Http3State::Closed(..) => { return Err(Error::AlreadyClosed) @@ -927,8 +929,9 @@ impl Http3Connection { )), ); - // Call immediately send so that at least headers get sent. This will make Firefox faster, since - // it can send request body immediately in most cases and does not need to do a complete process loop. + // Call immediately send so that at least headers get sent. This will make Firefox faster, + // since it can send request body immediately in most cases and does not need to do + // a complete process loop. self.send_streams .get_mut(&stream_id) .ok_or(Error::InvalidStreamId)? @@ -936,18 +939,20 @@ impl Http3Connection { Ok(()) } - /// Stream data are read directly into a buffer supplied as a parameter of this function to avoid copying - /// data. + /// Stream data are read directly into a buffer supplied as a parameter of this function to + /// avoid copying data. + /// /// # Errors - /// It returns an error if a stream does not exist or an error happens while reading a stream, e.g. - /// early close, protocol error, etc. + /// + /// It returns an error if a stream does not exist or an error happens while reading a stream, + /// e.g. early close, protocol error, etc. pub fn read_data( &mut self, conn: &mut Connection, stream_id: StreamId, buf: &mut [u8], ) -> Res<(usize, bool)> { - qinfo!([self], "read_data from stream {}.", stream_id); + qdebug!([self], "read_data from stream {}.", stream_id); let res = self .recv_streams .get_mut(&stream_id) @@ -1004,7 +1009,9 @@ impl Http3Connection { } /// Set the stream `SendOrder`. + /// /// # Errors + /// /// Returns `InvalidStreamId` if the stream id doesn't exist pub fn stream_set_sendorder( conn: &mut Connection, @@ -1018,7 +1025,9 @@ impl Http3Connection { /// Set the stream Fairness. Fair streams will share bandwidth with other /// streams of the same sendOrder group (or the unordered group). Unfair streams /// will give bandwidth preferentially to the lowest streamId with data to send. + /// /// # Errors + /// /// Returns `InvalidStreamId` if the stream id doesn't exist pub fn stream_set_fairness( conn: &mut Connection, @@ -1082,14 +1091,14 @@ impl Http3Connection { /// This is called when an application wants to close the sending side of a stream. pub fn stream_close_send(&mut self, conn: &mut Connection, stream_id: StreamId) -> Res<()> { - qinfo!([self], "Close the sending side for stream {}.", stream_id); + qdebug!([self], "Close the sending side for stream {}.", stream_id); debug_assert!(self.state.active()); let send_stream = self .send_streams .get_mut(&stream_id) .ok_or(Error::InvalidStreamId)?; - // The following function may return InvalidStreamId from the transport layer if the stream has been closed - // already. It is ok to ignore it here. + // The following function may return InvalidStreamId from the transport layer if the stream + // has been closed already. It is ok to ignore it here. mem::drop(send_stream.close(conn)); if send_stream.done() { self.remove_send_stream(stream_id, conn); @@ -1184,7 +1193,8 @@ impl Http3Connection { .is_ok() { mem::drop(self.stream_close_send(conn, stream_id)); - // TODO issue 1294: add a timer to clean up the recv_stream if the peer does not do that in a short time. + // TODO issue 1294: add a timer to clean up the recv_stream if the peer does not + // do that in a short time. self.streams_with_pending_data.insert(stream_id); } else { self.cancel_fetch(stream_id, Error::HttpRequestRejected.code(), conn)?; @@ -1392,7 +1402,7 @@ impl Http3Connection { /// `PriorityUpdateRequestPush` which handling is specific to the client and server, we must /// give them to the specific client/server handler. fn handle_control_frame(&mut self, f: HFrame) -> Res> { - qinfo!([self], "Handle a control frame {:?}", f); + qdebug!([self], "Handle a control frame {:?}", f); if !matches!(f, HFrame::Settings { .. }) && !matches!( self.settings_state, @@ -1423,7 +1433,7 @@ impl Http3Connection { } fn handle_settings(&mut self, new_settings: HSettings) -> Res<()> { - qinfo!([self], "Handle SETTINGS frame."); + qdebug!([self], "Handle SETTINGS frame."); match &self.settings_state { Http3RemoteSettingsState::NotReceived => { self.set_qpack_settings(&new_settings)?; @@ -1571,8 +1581,8 @@ impl Http3Connection { for id in recv { qtrace!("Remove the extended connect sub receiver stream {}", id); - // Use CloseType::ResetRemote so that an event will be sent. CloseType::LocalError would have - // the same effect. + // Use CloseType::ResetRemote so that an event will be sent. CloseType::LocalError would + // have the same effect. if let Some(mut s) = self.recv_streams.remove(&id) { mem::drop(s.reset(CloseType::ResetRemote(Error::HttpRequestCancelled.code()))); } diff --git a/neqo-http3/src/connection_client.rs b/neqo-http3/src/connection_client.rs index 8d0d78922a..4c8772d14a 100644 --- a/neqo-http3/src/connection_client.rs +++ b/neqo-http3/src/connection_client.rs @@ -4,16 +4,15 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use crate::{ - client_events::{Http3ClientEvent, Http3ClientEvents}, - connection::{Http3Connection, Http3State, RequestDescription}, - frames::HFrame, - push_controller::{PushController, RecvPushEvents}, - recv_message::{RecvMessage, RecvMessageInfo}, - request_target::AsRequestTarget, - settings::HSettings, - Http3Parameters, Http3StreamType, NewStreamType, Priority, PriorityHandler, ReceiveOutput, +use std::{ + cell::RefCell, + fmt::{Debug, Display}, + iter, mem, + net::SocketAddr, + rc::Rc, + time::Instant, }; + use neqo_common::{ event::Provider as EventProvider, hex, hex_with_len, qdebug, qinfo, qlog::NeqoQlog, qtrace, Datagram, Decoder, Encoder, Header, MessageType, Role, @@ -25,20 +24,21 @@ use neqo_transport::{ DatagramTracking, Output, RecvStreamStats, SendStreamStats, Stats as TransportStats, StreamId, StreamType, Version, ZeroRttState, }; -use std::{ - cell::RefCell, - convert::TryFrom, - fmt::{Debug, Display}, - mem, - net::SocketAddr, - rc::Rc, - time::Instant, -}; -use crate::{Error, Res}; +use crate::{ + client_events::{Http3ClientEvent, Http3ClientEvents}, + connection::{Http3Connection, Http3State, RequestDescription}, + frames::HFrame, + push_controller::{PushController, RecvPushEvents}, + recv_message::{RecvMessage, RecvMessageInfo}, + request_target::AsRequestTarget, + settings::HSettings, + Error, Http3Parameters, Http3StreamType, NewStreamType, Priority, PriorityHandler, + ReceiveOutput, Res, +}; -// This is used for filtering send_streams and recv_Streams with a stream_ids greater than or equal a given id. -// Only the same type (bidirectional or unidirectionsl) streams are filtered. +// This is used for filtering send_streams and recv_Streams with a stream_ids greater than or equal +// a given id. Only the same type (bidirectional or unidirectionsl) streams are filtered. fn id_gte(base: StreamId) -> impl FnMut((&StreamId, &U)) -> Option + 'static where U: ?Sized, @@ -71,43 +71,43 @@ fn alpn_from_quic_version(version: Version) -> &'static str { /// The API is used for: /// - create and close an endpoint: -/// - [`new`](struct.Http3Client.html#method.new) -/// - [`new_with_conn`](struct.Http3Client.html#method.new_with_conn) -/// - [`close`](struct.Http3Client.html#method.close) +/// - [`Http3Client::new`] +/// - [`Http3Client::new_with_conn`] +/// - [`Http3Client::close`] /// - configuring an endpoint: -/// - [`authenticated`](struct.Http3Client.html#method.authenticated) -/// - [`enable_ech`](struct.Http3Client.html#method.enable_ech) -/// - [`enable_resumption`](struct.Http3Client.html#method.enable_resumption) -/// - [`initiate_key_update`](struct.Http3Client.html#method.initiate_key_update) -/// - [`set_qlog`](struct.Http3Client.html#method.set_qlog) +/// - [`Http3Client::authenticated`] +/// - [`Http3Client::enable_ech`] +/// - [`Http3Client::enable_resumption`] +/// - [`Http3Client::initiate_key_update`] +/// - [`Http3Client::set_qlog`] /// - retrieving information about a connection: -/// - [`peer_certificate`](struct.Http3Client.html#method.peer_certificate) -/// - [`qpack_decoder_stats`](struct.Http3Client.html#method.qpack_decoder_stats) -/// - [`qpack_encoder_stats`](struct.Http3Client.html#method.qpack_encoder_stats) -/// - [`transport_stats`](struct.Http3Client.html#method.transport_stats) -/// - [`state`](struct.Http3Client.html#method.state) -/// - [`take_resumption_token`](struct.Http3Client.html#method.take_resumption_token) -/// - [`tls_inf`](struct.Http3Client.html#method.tls_info) +/// - [`Http3Client::peer_certificate`] +/// - [`Http3Client::qpack_decoder_stats`] +/// - [`Http3Client::qpack_encoder_stats`] +/// - [`Http3Client::transport_stats`] +/// - [`Http3Client::state`] +/// - [`Http3Client::take_resumption_token`] +/// - [`Http3Client::tls_info`] /// - driving HTTP/3 session: -/// - [`process_output`](struct.Http3Client.html#method.process_output) -/// - [`process_input`](struct.Http3Client.html#method.process_input) -/// - [`process`](struct.Http3Client.html#method.process) +/// - [`Http3Client::process_output`] +/// - [`Http3Client::process_input`] +/// - [`Http3Client::process`] /// - create requests, send/receive data, and cancel requests: -/// - [`fetch`](struct.Http3Client.html#method.fetch) -/// - [`send_data`](struct.Http3Client.html#method.send_data) -/// - [`read_dara`](struct.Http3Client.html#method.read_data) -/// - [`stream_close_send`](struct.Http3Client.html#method.stream_close_send) -/// - [`cancel_fetch`](struct.Http3Client.html#method.cancel_fetch) -/// - [`stream_reset_send`](struct.Http3Client.html#method.stream_reset_send) -/// - [`stream_stop_sending`](struct.Http3Client.html#method.stream_stop_sending) -/// - [`set_stream_max_data`](struct.Http3Client.html#method.set_stream_max_data) +/// - [`Http3Client::fetch`] +/// - [`Http3Client::send_data`] +/// - [`Http3Client::read_data`] +/// - [`Http3Client::stream_close_send`] +/// - [`Http3Client::cancel_fetch`] +/// - [`Http3Client::stream_reset_send`] +/// - [`Http3Client::stream_stop_sending`] +/// - [`Http3Client::set_stream_max_data`] /// - priority feature: -/// - [`priority_update`](struct.Http3Client.html#method.priority_update) +/// - [`Http3Client::priority_update`] /// - `WebTransport` feature: -/// - [`webtransport_create_session`](struct.Http3Client.html#method.webtransport_create_session) -/// - [`webtransport_close_session`](struct.Http3Client.html#method.webtransport_close_session) -/// - [`webtransport_create_stream`](struct.Http3Client.html#method.webtransport_create_sstream) -/// - [`webtransport_enabled`](struct.Http3Client.html#method.webtransport_enabled) +/// - [`Http3Client::webtransport_create_session`] +/// - [`Http3Client::webtransport_close_session`] +/// - [`Http3Client::webtransport_create_stream`] +/// - [`Http3Client::webtransport_enabled`] /// /// ## Examples /// @@ -161,7 +161,7 @@ fn alpn_from_quic_version(version: Version) -> &'static str { /// } /// } /// } -///``` +/// ``` /// /// ### Creating a `WebTransport` session /// @@ -198,8 +198,7 @@ fn alpn_from_quic_version(version: Version) -> &'static str { /// } /// } /// } -/// -///``` +/// ``` /// /// ### `WebTransport`: create a stream, send and receive data on the stream /// @@ -287,7 +286,6 @@ fn alpn_from_quic_version(version: Version) -> &'static str { /// } /// } /// ``` -/// pub struct Http3Client { conn: Connection, base_handler: Http3Connection, @@ -303,8 +301,9 @@ impl Display for Http3Client { impl Http3Client { /// # Errors - /// Making a `neqo-transport::connection` may produce an error. This can only be a crypto error if - /// the crypto context can't be created or configured. + /// + /// Making a `neqo-transport::connection` may produce an error. This can only be a crypto error + /// if the crypto context can't be created or configured. pub fn new( server_name: impl Into, cid_manager: Rc>, @@ -391,6 +390,7 @@ impl Http3Client { /// Enable encrypted client hello (ECH). /// /// # Errors + /// /// Fails when the configuration provided is bad. pub fn enable_ech(&mut self, ech_config_list: impl AsRef<[u8]>) -> Res<()> { self.conn.client_enable_ech(ech_config_list)?; @@ -399,6 +399,10 @@ impl Http3Client { /// Get the connection id, which is useful for disambiguating connections to /// the same origin. + /// + /// # Panics + /// + /// Never, because clients always have this field. #[must_use] pub fn connection_id(&self) -> &ConnectionId { self.conn.odcid().expect("Client always has odcid") @@ -431,21 +435,27 @@ impl Http3Client { .and_then(|t| self.encode_resumption_token(&t)) } - /// This may be call if an application has a resumption token. This must be called before connection starts. + /// This may be call if an application has a resumption token. This must be called before + /// connection starts. /// /// The resumption token also contains encoded HTTP/3 settings. The settings will be decoded /// and used until the setting are received from the server. /// /// # Errors + /// /// An error is return if token cannot be decoded or a connection is is a wrong state. + /// /// # Panics + /// /// On closing if the base handler can't handle it (debug only). pub fn enable_resumption(&mut self, now: Instant, token: impl AsRef<[u8]>) -> Res<()> { if self.base_handler.state != Http3State::Initializing { return Err(Error::InvalidState); } let mut dec = Decoder::from(token.as_ref()); - let Some(settings_slice) = dec.decode_vvec() else { return Err(Error::InvalidResumptionToken) }; + let Some(settings_slice) = dec.decode_vvec() else { + return Err(Error::InvalidResumptionToken); + }; qtrace!([self], " settings {}", hex_with_len(settings_slice)); let mut dec_settings = Decoder::from(settings_slice); let mut settings = HSettings::default(); @@ -495,7 +505,9 @@ impl Http3Client { } /// Attempt to force a key update. + /// /// # Errors + /// /// If the connection isn't confirmed, or there is an outstanding key update, this /// returns `Err(Error::TransportError(neqo_transport::Error::KeyUpdateBlocked))`. pub fn initiate_key_update(&mut self) -> Res<()> { @@ -508,9 +520,13 @@ impl Http3Client { /// The function fetches a resource using `method`, `target` and `headers`. A response body /// may be added by calling `send_data`. `stream_close_send` must be sent to finish the request /// even if request data are not sent. + /// /// # Errors + /// /// If a new stream cannot be created an error will be return. + /// /// # Panics + /// /// `SendMessage` implements `http_stream` so it will not panic. pub fn fetch<'x, 't: 'x, T>( &mut self, @@ -546,7 +562,9 @@ impl Http3Client { /// Send an [`PRIORITY_UPDATE`-frame][1] on next `Http3Client::process_output()` call. /// Returns if the priority got changed. + /// /// # Errors + /// /// `InvalidStreamId` if the stream does not exist /// /// [1]: https://datatracker.ietf.org/doc/html/draft-kazuho-httpbis-priority-04#section-5.2 @@ -556,7 +574,9 @@ impl Http3Client { /// An application may cancel a stream(request). /// Both sides, the receiviing and sending side, sending and receiving side, will be closed. + /// /// # Errors + /// /// An error will be return if a stream does not exist. pub fn cancel_fetch(&mut self, stream_id: StreamId, error: AppError) -> Res<()> { qinfo!([self], "reset_stream {} error={}.", stream_id, error); @@ -565,15 +585,18 @@ impl Http3Client { } /// This is call when application is done sending a request. + /// /// # Errors + /// /// An error will be return if stream does not exist. pub fn stream_close_send(&mut self, stream_id: StreamId) -> Res<()> { - qinfo!([self], "Close sending side stream={}.", stream_id); + qdebug!([self], "Close sending side stream={}.", stream_id); self.base_handler .stream_close_send(&mut self.conn, stream_id) } /// # Errors + /// /// An error will be return if a stream does not exist. pub fn stream_reset_send(&mut self, stream_id: StreamId, error: AppError) -> Res<()> { qinfo!([self], "stream_reset_send {} error={}.", stream_id, error); @@ -582,6 +605,7 @@ impl Http3Client { } /// # Errors + /// /// An error will be return if a stream does not exist. pub fn stream_stop_sending(&mut self, stream_id: StreamId, error: AppError) -> Res<()> { qinfo!([self], "stream_stop_sending {} error={}.", stream_id, error); @@ -594,11 +618,13 @@ impl Http3Client { /// headers are supplied through the `fetch` function. /// /// # Errors + /// /// `InvalidStreamId` if the stream does not exist, /// `AlreadyClosed` if the stream has already been closed. - /// `TransportStreamDoesNotExist` if the transport stream does not exist (this may happen if `process_output` - /// has not been called when needed, and HTTP3 layer has not picked up the info that the stream has been closed.) - /// `InvalidInput` if an empty buffer has been supplied. + /// `TransportStreamDoesNotExist` if the transport stream does not exist (this may happen if + /// `process_output` has not been called when needed, and HTTP3 layer has not picked up the + /// info that the stream has been closed.) `InvalidInput` if an empty buffer has been + /// supplied. pub fn send_data(&mut self, stream_id: StreamId, buf: &[u8]) -> Res { qinfo!( [self], @@ -613,18 +639,20 @@ impl Http3Client { .send_data(&mut self.conn, buf) } - /// Response data are read directly into a buffer supplied as a parameter of this function to avoid copying - /// data. + /// Response data are read directly into a buffer supplied as a parameter of this function to + /// avoid copying data. + /// /// # Errors - /// It returns an error if a stream does not exist or an error happen while reading a stream, e.g. - /// early close, protocol error, etc. + /// + /// It returns an error if a stream does not exist or an error happen while reading a stream, + /// e.g. early close, protocol error, etc. pub fn read_data( &mut self, now: Instant, stream_id: StreamId, buf: &mut [u8], ) -> Res<(usize, bool)> { - qinfo!([self], "read_data from stream {}.", stream_id); + qdebug!([self], "read_data from stream {}.", stream_id); let res = self.base_handler.read_data(&mut self.conn, stream_id, buf); if let Err(e) = &res { if e.connection_error() { @@ -637,7 +665,9 @@ impl Http3Client { // API: Push streams /// Cancel a push + /// /// # Errors + /// /// `InvalidStreamId` if the stream does not exist. pub fn cancel_push(&mut self, push_id: u64) -> Res<()> { self.push_handler @@ -647,9 +677,11 @@ impl Http3Client { /// Push response data are read directly into a buffer supplied as a parameter of this function /// to avoid copying data. + /// /// # Errors - /// It returns an error if a stream does not exist(`InvalidStreamId`) or an error has happened while - /// reading a stream, e.g. early close, protocol error, etc. + /// + /// It returns an error if a stream does not exist(`InvalidStreamId`) or an error has happened + /// while reading a stream, e.g. early close, protocol error, etc. pub fn push_read_data( &mut self, now: Instant, @@ -666,8 +698,9 @@ impl Http3Client { } // API WebTransport - + // /// # Errors + /// /// If `WebTransport` cannot be created, e.g. the `WebTransport` support is /// not negotiated or the HTTP/3 connection is closed. pub fn webtransport_create_session<'x, 't: 'x, T>( @@ -695,11 +728,14 @@ impl Http3Client { } /// Close `WebTransport` cleanly + /// /// # Errors + /// /// `InvalidStreamId` if the stream does not exist, - /// `TransportStreamDoesNotExist` if the transport stream does not exist (this may happen if `process_output` - /// has not been called when needed, and HTTP3 layer has not picked up the info that the stream has been closed.) - /// `InvalidInput` if an empty buffer has been supplied. + /// `TransportStreamDoesNotExist` if the transport stream does not exist (this may happen if + /// `process_output` has not been called when needed, and HTTP3 layer has not picked up the + /// info that the stream has been closed.) `InvalidInput` if an empty buffer has been + /// supplied. pub fn webtransport_close_session( &mut self, session_id: StreamId, @@ -711,6 +747,7 @@ impl Http3Client { } /// # Errors + /// /// This may return an error if the particular session does not exist /// or the connection is not in the active state. pub fn webtransport_create_stream( @@ -728,7 +765,9 @@ impl Http3Client { } /// Send `WebTransport` datagram. + /// /// # Errors + /// /// It may return `InvalidStreamId` if a stream does not exist anymore. /// The function returns `TooMuchData` if the supply buffer is bigger than /// the allowed remote datagram size. @@ -745,10 +784,14 @@ impl Http3Client { /// Returns the current max size of a datagram that can fit into a packet. /// The value will change over time depending on the encoded size of the - /// packet number, ack frames, etc. + /// packet number, ack frames, etc. + /// /// # Errors + /// /// The function returns `NotAvailable` if datagrams are not enabled. + /// /// # Panics + /// /// This cannot panic. The max varint length is 8. pub fn webtransport_max_datagram_size(&self, session_id: StreamId) -> Res { Ok(self.conn.max_datagram_size()? @@ -756,29 +799,39 @@ impl Http3Client { } /// Sets the `SendOrder` for a given stream + /// /// # Errors + /// /// It may return `InvalidStreamId` if a stream does not exist anymore. + /// /// # Panics + /// /// This cannot panic. pub fn webtransport_set_sendorder( &mut self, stream_id: StreamId, - sendorder: SendOrder, + sendorder: Option, ) -> Res<()> { - Http3Connection::stream_set_sendorder(&mut self.conn, stream_id, Some(sendorder)) + Http3Connection::stream_set_sendorder(&mut self.conn, stream_id, sendorder) } /// Sets the `Fairness` for a given stream + /// /// # Errors + /// /// It may return `InvalidStreamId` if a stream does not exist anymore. + /// /// # Panics + /// /// This cannot panic. pub fn webtransport_set_fairness(&mut self, stream_id: StreamId, fairness: bool) -> Res<()> { Http3Connection::stream_set_fairness(&mut self.conn, stream_id, fairness) } /// Returns the current `SendStreamStats` of a `WebTransportSendStream`. + /// /// # Errors + /// /// `InvalidStreamId` if the stream does not exist. pub fn webtransport_send_stream_stats(&mut self, stream_id: StreamId) -> Res { self.base_handler @@ -789,7 +842,9 @@ impl Http3Client { } /// Returns the current `RecvStreamStats` of a `WebTransportRecvStream`. + /// /// # Errors + /// /// `InvalidStreamId` if the stream does not exist. pub fn webtransport_recv_stream_stats(&mut self, stream_id: StreamId) -> Res { self.base_handler @@ -800,7 +855,7 @@ impl Http3Client { } /// This function combines `process_input` and `process_output` function. - pub fn process(&mut self, dgram: Option, now: Instant) -> Output { + pub fn process(&mut self, dgram: Option<&Datagram>, now: Instant) -> Output { qtrace!([self], "Process."); if let Some(d) = dgram { self.process_input(d, now); @@ -818,17 +873,21 @@ impl Http3Client { /// packets need to be sent or if a timer needs to be updated. /// /// [1]: ../neqo_transport/enum.ConnectionEvent.html - pub fn process_input(&mut self, dgram: Datagram, now: Instant) { - qtrace!([self], "Process input."); - self.conn.process_input(dgram, now); - self.process_http3(now); + pub fn process_input(&mut self, dgram: &Datagram, now: Instant) { + self.process_multiple_input(iter::once(dgram), now); } - /// This should not be used because it gives access to functionalities that may disrupt the - /// proper functioning of the HTTP/3 session. - /// Only used by `neqo-interop`. - pub fn conn(&mut self) -> &mut Connection { - &mut self.conn + pub fn process_multiple_input<'a, I>(&mut self, dgrams: I, now: Instant) + where + I: IntoIterator, + { + let mut dgrams = dgrams.into_iter().peekable(); + qtrace!([self], "Process multiple datagrams"); + if dgrams.peek().is_none() { + return; + } + self.conn.process_multiple_input(dgrams, now); + self.process_http3(now); } /// Process HTTP3 layer. @@ -865,19 +924,20 @@ impl Http3Client { /// /// `process_output` can return: /// - a [`Output::Datagram(Datagram)`][1]: data that should be sent as a UDP payload, - /// - a [`Output::Callback(Duration)`][1]: the duration of a timer. `process_output` should be called at least after the time expires, + /// - a [`Output::Callback(Duration)`][1]: the duration of a timer. `process_output` should be + /// called at least after the time expires, /// - [`Output::None`][1]: this is returned when `Nttp3Client` is done and can be destroyed. /// /// The application should call this function repeatedly until a timer value or None is /// returned. After that, the application should call the function again if a new UDP packet is /// received and processed or the timer value expires. /// - /// The HTTP/3 neqo implementation drives the HTTP/3 and QUC layers, therefore this function + /// The HTTP/3 neqo implementation drives the HTTP/3 and QUIC layers, therefore this function /// will call both layers: /// - First it calls HTTP/3 layer processing (`process_http3`) to make sure the layer writes /// data to QUIC layer or cancels streams if needed. /// - Then QUIC layer processing is called - [`Connection::process_output`][3]. This produces a - /// packet or a timer value. It may also produce ned [`ConnectionEvent`][2]s, e.g. connection + /// packet or a timer value. It may also produce new [`ConnectionEvent`][2]s, e.g. connection /// state-change event. /// - Therefore the HTTP/3 layer processing (`process_http3`) is called again. /// @@ -920,14 +980,14 @@ impl Http3Client { } } - /// This function checks [`ConnectionEvent`][2]s emitted by the QUIC layer, e.g. connection change - /// state events, new incoming stream data is available, a stream is was reset, etc. The HTTP/3 - /// layer needs to handle these events. Most of the events are handled by + /// This function checks [`ConnectionEvent`][2]s emitted by the QUIC layer, e.g. connection + /// change state events, new incoming stream data is available, a stream is was reset, etc. + /// The HTTP/3 layer needs to handle these events. Most of the events are handled by /// [`Http3Connection`][1] by calling appropriate functions, e.g. `handle_state_change`, /// `handle_stream_reset`, etc. [`Http3Connection`][1] handle functionalities that are common /// for the client and server side. Some of the functionalities are specific to the client and - /// they are handled by `Http3Client`. For example, [`ConnectionEvent::RecvStreamReadable`][3] event - /// is handled by `Http3Client::handle_stream_readable`. The function calls + /// they are handled by `Http3Client`. For example, [`ConnectionEvent::RecvStreamReadable`][3] + /// event is handled by `Http3Client::handle_stream_readable`. The function calls /// `Http3Connection::handle_stream_readable` and then hands the return value as appropriate /// for the client-side. /// @@ -940,11 +1000,11 @@ impl Http3Client { qdebug!([self], "check_connection_events - event {:?}.", e); match e { ConnectionEvent::NewStream { stream_id } => { - // During this event we only add a new stream to the Http3Connection stream list, - // with NewStreamHeadReader stream handler. + // During this event we only add a new stream to the Http3Connection stream + // list, with NewStreamHeadReader stream handler. // This function will not read from the stream and try to decode the stream. - // RecvStreamReadable will be emitted after this event and reading, i.e. decoding - // of a stream will happen during that event. + // RecvStreamReadable will be emitted after this event and reading, i.e. + // decoding of a stream will happen during that event. self.base_handler.add_new_stream(stream_id); } ConnectionEvent::SendStreamWritable { stream_id } => { @@ -1018,12 +1078,12 @@ impl Http3Client { /// - `ReceiveOutput::NewStream(NewStreamType::WebTransportStream(_))` - because /// `Http3ClientEvents`is needed and events handler is specific to the client. /// - `ReceiveOutput::ControlFrames(control_frames)` - some control frame handling differs - /// between the client and the server: + /// between the client and the server: /// - `HFrame::CancelPush` - only the client-side may receive it, /// - `HFrame::MaxPushId { .. }`, `HFrame::PriorityUpdateRequest { .. } ` and - /// `HFrame::PriorityUpdatePush` can only be receive on the server side, + /// `HFrame::PriorityUpdatePush` can only be receive on the server side, /// - `HFrame::Goaway { stream_id }` needs specific handling by the client by the protocol - /// specification. + /// specification. /// /// [1]: https://github.com/mozilla/neqo/blob/main/neqo-http3/src/connection.rs fn handle_stream_readable(&mut self, stream_id: StreamId) -> Res<()> { @@ -1176,7 +1236,9 @@ impl Http3Client { } /// Increases `max_stream_data` for a `stream_id`. + /// /// # Errors + /// /// Returns `InvalidStreamId` if a stream does not exist or the receiving /// side is closed. pub fn set_stream_max_data(&mut self, stream_id: StreamId, max_data: u64) -> Res<()> { @@ -1223,6 +1285,21 @@ impl EventProvider for Http3Client { #[cfg(test)] mod tests { + use std::{mem, time::Duration}; + + use neqo_common::{event::Provider, qtrace, Datagram, Decoder, Encoder}; + use neqo_crypto::{AllowZeroRtt, AntiReplay, ResumptionToken}; + use neqo_qpack::{encoder::QPackEncoder, QpackSettings}; + use neqo_transport::{ + ConnectionError, ConnectionEvent, ConnectionParameters, Output, State, StreamId, + StreamType, Version, RECV_BUFFER_SIZE, SEND_BUFFER_SIZE, + }; + use test_fixture::{ + anti_replay, default_server_h3, fixture_init, new_server, now, + CountingConnectionIdGenerator, DEFAULT_ADDR, DEFAULT_ALPN_H3, DEFAULT_KEYS, + DEFAULT_SERVER_NAME, + }; + use super::{ AuthenticationStatus, Connection, Error, HSettings, Header, Http3Client, Http3ClientEvent, Http3Parameters, Http3State, Rc, RefCell, @@ -1233,18 +1310,6 @@ mod tests { settings::{HSetting, HSettingType, H3_RESERVED_SETTINGS}, Http3Server, Priority, RecvStream, }; - use neqo_common::{event::Provider, qtrace, Datagram, Decoder, Encoder}; - use neqo_crypto::{AllowZeroRtt, AntiReplay, ResumptionToken}; - use neqo_qpack::{encoder::QPackEncoder, QpackSettings}; - use neqo_transport::{ - ConnectionError, ConnectionEvent, ConnectionParameters, Output, State, StreamId, - StreamType, Version, RECV_BUFFER_SIZE, SEND_BUFFER_SIZE, - }; - use std::{convert::TryFrom, mem, time::Duration}; - use test_fixture::{ - addr, anti_replay, default_server_h3, fixture_init, new_server, now, - CountingConnectionIdGenerator, DEFAULT_ALPN_H3, DEFAULT_KEYS, DEFAULT_SERVER_NAME, - }; fn assert_closed(client: &Http3Client, expected: &Error) { match client.state() { @@ -1265,8 +1330,8 @@ mod tests { Http3Client::new( DEFAULT_SERVER_NAME, Rc::new(RefCell::new(CountingConnectionIdGenerator::default())), - addr(), - addr(), + DEFAULT_ADDR, + DEFAULT_ADDR, Http3Parameters::default() .connection_parameters( // Disable compatible upgrade, which complicates tests. @@ -1576,11 +1641,11 @@ mod tests { assert_eq!(client.state(), Http3State::Initializing); assert_eq!(*server.conn.state(), State::Init); - let out = server.conn.process(out.dgram(), now()); + let out = server.conn.process(out.as_dgram_ref(), now()); assert_eq!(*server.conn.state(), State::Handshaking); - let out = client.process(out.dgram(), now()); - let out = server.conn.process(out.dgram(), now()); + let out = client.process(out.as_dgram_ref(), now()); + let out = server.conn.process(out.as_dgram_ref(), now()); assert!(out.as_dgram_ref().is_none()); let authentication_needed = |e| matches!(e, Http3ClientEvent::AuthenticationNeeded); @@ -1593,12 +1658,14 @@ mod tests { fn connect_only_transport_with(client: &mut Http3Client, server: &mut TestServer) { let out = handshake_only(client, server); - let out = client.process(out.dgram(), now()); + let out = client.process(out.as_dgram_ref(), now()); let connected = |e| matches!(e, Http3ClientEvent::StateChange(Http3State::Connected)); assert!(client.events().any(connected)); assert_eq!(client.state(), Http3State::Connected); - server.conn.process_input(out.dgram().unwrap(), now()); + server + .conn + .process_input(out.as_dgram_ref().unwrap(), now()); assert!(server.conn.state().connected()); } @@ -1612,8 +1679,10 @@ mod tests { fn send_and_receive_client_settings(client: &mut Http3Client, server: &mut TestServer) { // send and receive client settings - let dgram = client.process(None, now()).dgram(); - server.conn.process_input(dgram.unwrap(), now()); + let out = client.process(None, now()); + server + .conn + .process_input(out.as_dgram_ref().unwrap(), now()); server.check_client_control_qpack_streams_no_resumption(); } @@ -1627,8 +1696,8 @@ mod tests { server.create_qpack_streams(); // Send the server's control and qpack streams data. - let dgram = server.conn.process(None, now()).dgram(); - client.process_input(dgram.unwrap(), now()); + let out = server.conn.process(None, now()); + client.process_input(out.as_dgram_ref().unwrap(), now()); // assert no error occured. assert_eq!(client.state(), Http3State::Connected); @@ -1688,8 +1757,8 @@ mod tests { 0x43, 0xd3, 0xc1, ]; - // For fetch request fetch("GET", "https", "something.com", "/", &[(String::from("myheaders", "myvalue"))]) - // the following request header frame will be sent: + // For fetch request fetch("GET", "https", "something.com", "/", &[(String::from("myheaders", + // "myvalue"))]) the following request header frame will be sent: const EXPECTED_REQUEST_HEADER_FRAME_VERSION2: &[u8] = &[ 0x01, 0x11, 0x02, 0x80, 0xd1, 0xd7, 0x50, 0x89, 0x41, 0xe9, 0x2a, 0x67, 0x35, 0x53, 0x2e, 0x43, 0xd3, 0xc1, 0x10, @@ -1697,8 +1766,8 @@ mod tests { const HTTP_HEADER_FRAME_0: &[u8] = &[0x01, 0x06, 0x00, 0x00, 0xd9, 0x54, 0x01, 0x30]; - // The response header from HTTP_HEADER_FRAME (0x01, 0x06, 0x00, 0x00, 0xd9, 0x54, 0x01, 0x30) are - // decoded into: + // The response header from HTTP_HEADER_FRAME (0x01, 0x06, 0x00, 0x00, 0xd9, 0x54, 0x01, 0x30) + // are decoded into: fn check_response_header_0(header: &[Header]) { let expected_response_header_0 = &[ Header::new(":status", "200"), @@ -1770,8 +1839,10 @@ mod tests { ) -> StreamId { let request_stream_id = make_request(client, close_sending_side, &[]); - let dgram = client.process(None, now()).dgram(); - server.conn.process_input(dgram.unwrap(), now()); + let out = client.process(None, now()); + server + .conn + .process_input(out.as_dgram_ref().unwrap(), now()); // find the new request/response stream and send frame v on it. while let Some(e) = server.conn.next_event() { @@ -1801,7 +1872,7 @@ mod tests { } let dgram = server.conn.process_output(now()).dgram(); if let Some(d) = dgram { - client.process_input(d, now()); + client.process_input(&d, now()); } request_stream_id } @@ -1830,8 +1901,8 @@ mod tests { server.conn.stream_close_send(stream_id).unwrap(); } let out = server.conn.process(None, now()); - let out = client.process(out.dgram(), now()); - mem::drop(server.conn.process(out.dgram(), now())); + let out = client.process(out.as_dgram_ref(), now()); + mem::drop(server.conn.process(out.as_dgram_ref(), now())); } const PUSH_PROMISE_DATA: &[u8] = &[ @@ -1869,8 +1940,8 @@ mod tests { let push_stream_id = send_push_data(&mut server.conn, push_id, close_push_stream); let out = server.conn.process(None, now()); - let out = client.process(out.dgram(), now()); - mem::drop(server.conn.process(out.dgram(), now())); + let out = client.process(out.as_dgram_ref(), now()); + mem::drop(server.conn.process(out.as_dgram_ref(), now())); push_stream_id } @@ -1884,8 +1955,8 @@ mod tests { send_push_promise(&mut server.conn, stream_id, push_id); let out = server.conn.process(None, now()); - let out = client.process(out.dgram(), now()); - mem::drop(server.conn.process(out.dgram(), now())); + let out = client.process(out.as_dgram_ref(), now()); + mem::drop(server.conn.process(out.as_dgram_ref(), now())); } fn send_cancel_push_and_exchange_packets( @@ -1902,8 +1973,8 @@ mod tests { .unwrap(); let out = server.conn.process(None, now()); - let out = client.process(out.dgram(), now()); - mem::drop(server.conn.process(out.dgram(), now())); + let out = client.process(out.as_dgram_ref(), now()); + mem::drop(server.conn.process(out.as_dgram_ref(), now())); } const PUSH_DATA: &[u8] = &[ @@ -1915,7 +1986,7 @@ mod tests { // The response header from PUSH_DATA (0x01, 0x06, 0x00, 0x00, 0xd9, 0x54, 0x01, 0x34) are // decoded into: fn check_push_response_header(header: &[Header]) { - let expected_push_response_header = vec![ + let expected_push_response_header = [ Header::new(":status", "200"), Header::new("content-length", "4"), ]; @@ -2070,7 +2141,7 @@ mod tests { .stream_close_send(server.control_stream_id.unwrap()) .unwrap(); let out = server.conn.process(None, now()); - client.process(out.dgram(), now()); + client.process(out.as_dgram_ref(), now()); assert_closed(&client, &Error::HttpClosedCriticalStream); } @@ -2084,7 +2155,7 @@ mod tests { .stream_reset_send(server.control_stream_id.unwrap(), Error::HttpNoError.code()) .unwrap(); let out = server.conn.process(None, now()); - client.process(out.dgram(), now()); + client.process(out.as_dgram_ref(), now()); assert_closed(&client, &Error::HttpClosedCriticalStream); } @@ -2098,7 +2169,7 @@ mod tests { .stream_reset_send(server.encoder_stream_id.unwrap(), Error::HttpNoError.code()) .unwrap(); let out = server.conn.process(None, now()); - client.process(out.dgram(), now()); + client.process(out.as_dgram_ref(), now()); assert_closed(&client, &Error::HttpClosedCriticalStream); } @@ -2112,7 +2183,7 @@ mod tests { .stream_reset_send(server.decoder_stream_id.unwrap(), Error::HttpNoError.code()) .unwrap(); let out = server.conn.process(None, now()); - client.process(out.dgram(), now()); + client.process(out.as_dgram_ref(), now()); assert_closed(&client, &Error::HttpClosedCriticalStream); } @@ -2126,7 +2197,7 @@ mod tests { .stream_stop_sending(CLIENT_SIDE_CONTROL_STREAM_ID, Error::HttpNoError.code()) .unwrap(); let out = server.conn.process(None, now()); - client.process(out.dgram(), now()); + client.process(out.as_dgram_ref(), now()); assert_closed(&client, &Error::HttpClosedCriticalStream); } @@ -2140,7 +2211,7 @@ mod tests { .stream_stop_sending(CLIENT_SIDE_ENCODER_STREAM_ID, Error::HttpNoError.code()) .unwrap(); let out = server.conn.process(None, now()); - client.process(out.dgram(), now()); + client.process(out.as_dgram_ref(), now()); assert_closed(&client, &Error::HttpClosedCriticalStream); } @@ -2154,7 +2225,7 @@ mod tests { .stream_stop_sending(CLIENT_SIDE_DECODER_STREAM_ID, Error::HttpNoError.code()) .unwrap(); let out = server.conn.process(None, now()); - client.process(out.dgram(), now()); + client.process(out.as_dgram_ref(), now()); assert_closed(&client, &Error::HttpClosedCriticalStream); } @@ -2171,7 +2242,7 @@ mod tests { .stream_send(control_stream, &[0x0, 0x1, 0x3, 0x0, 0x1, 0x2]); assert_eq!(sent, Ok(6)); let out = server.conn.process(None, now()); - client.process(out.dgram(), now()); + client.process(out.as_dgram_ref(), now()); assert_closed(&client, &Error::HttpMissingSettings); } @@ -2187,7 +2258,7 @@ mod tests { ); assert_eq!(sent, Ok(8)); let out = server.conn.process(None, now()); - client.process(out.dgram(), now()); + client.process(out.as_dgram_ref(), now()); assert_closed(&client, &Error::HttpFrameUnexpected); } @@ -2201,7 +2272,7 @@ mod tests { .unwrap(); let out = server.conn.process(None, now()); - client.process(out.dgram(), now()); + client.process(out.as_dgram_ref(), now()); assert_closed(&client, &Error::HttpFrameUnexpected); } @@ -2250,8 +2321,8 @@ mod tests { _ = server.conn.stream_send(push_stream_id, v).unwrap(); let out = server.conn.process(None, now()); - let out = client.process(out.dgram(), now()); - mem::drop(server.conn.process(out.dgram(), now())); + let out = client.process(out.as_dgram_ref(), now()); + mem::drop(server.conn.process(out.as_dgram_ref(), now())); assert_closed(&client, &Error::HttpFrameUnexpected); } @@ -2310,8 +2381,8 @@ mod tests { .stream_send(new_stream_id, &[0x41, 0x19, 0x4, 0x4, 0x6, 0x0, 0x8, 0x0]) .unwrap(); let out = server.conn.process(None, now()); - let out = client.process(out.dgram(), now()); - mem::drop(server.conn.process(out.dgram(), now())); + let out = client.process(out.as_dgram_ref(), now()); + mem::drop(server.conn.process(out.as_dgram_ref(), now())); // check for stop-sending with Error::HttpStreamCreation. let mut stop_sending_event_found = false; @@ -2339,7 +2410,7 @@ mod tests { // Generate packet with the above bad h3 input let out = server.conn.process(None, now()); // Process bad input and close the connection. - mem::drop(client.process(out.dgram(), now())); + mem::drop(client.process(out.as_dgram_ref(), now())); assert_closed(&client, &Error::HttpFrameUnexpected); } @@ -2386,38 +2457,38 @@ mod tests { let mut sent = server.conn.stream_send(control_stream, &[0x0]); assert_eq!(sent, Ok(1)); let out = server.conn.process(None, now()); - client.process(out.dgram(), now()); + client.process(out.as_dgram_ref(), now()); // start sending SETTINGS frame sent = server.conn.stream_send(control_stream, &[0x4]); assert_eq!(sent, Ok(1)); let out = server.conn.process(None, now()); - client.process(out.dgram(), now()); + client.process(out.as_dgram_ref(), now()); sent = server.conn.stream_send(control_stream, &[0x4]); assert_eq!(sent, Ok(1)); let out = server.conn.process(None, now()); - client.process(out.dgram(), now()); + client.process(out.as_dgram_ref(), now()); sent = server.conn.stream_send(control_stream, &[0x6]); assert_eq!(sent, Ok(1)); let out = server.conn.process(None, now()); - client.process(out.dgram(), now()); + client.process(out.as_dgram_ref(), now()); sent = server.conn.stream_send(control_stream, &[0x0]); assert_eq!(sent, Ok(1)); let out = server.conn.process(None, now()); - client.process(out.dgram(), now()); + client.process(out.as_dgram_ref(), now()); sent = server.conn.stream_send(control_stream, &[0x8]); assert_eq!(sent, Ok(1)); let out = server.conn.process(None, now()); - client.process(out.dgram(), now()); + client.process(out.as_dgram_ref(), now()); sent = server.conn.stream_send(control_stream, &[0x0]); assert_eq!(sent, Ok(1)); let out = server.conn.process(None, now()); - client.process(out.dgram(), now()); + client.process(out.as_dgram_ref(), now()); assert_eq!(client.state(), Http3State::Connected); @@ -2425,37 +2496,37 @@ mod tests { sent = server.conn.stream_send(control_stream, &[0x5]); assert_eq!(sent, Ok(1)); let out = server.conn.process(None, now()); - client.process(out.dgram(), now()); + client.process(out.as_dgram_ref(), now()); sent = server.conn.stream_send(control_stream, &[0x5]); assert_eq!(sent, Ok(1)); let out = server.conn.process(None, now()); - client.process(out.dgram(), now()); + client.process(out.as_dgram_ref(), now()); sent = server.conn.stream_send(control_stream, &[0x4]); assert_eq!(sent, Ok(1)); let out = server.conn.process(None, now()); - client.process(out.dgram(), now()); + client.process(out.as_dgram_ref(), now()); sent = server.conn.stream_send(control_stream, &[0x61]); assert_eq!(sent, Ok(1)); let out = server.conn.process(None, now()); - client.process(out.dgram(), now()); + client.process(out.as_dgram_ref(), now()); sent = server.conn.stream_send(control_stream, &[0x62]); assert_eq!(sent, Ok(1)); let out = server.conn.process(None, now()); - client.process(out.dgram(), now()); + client.process(out.as_dgram_ref(), now()); sent = server.conn.stream_send(control_stream, &[0x63]); assert_eq!(sent, Ok(1)); let out = server.conn.process(None, now()); - client.process(out.dgram(), now()); + client.process(out.as_dgram_ref(), now()); sent = server.conn.stream_send(control_stream, &[0x64]); assert_eq!(sent, Ok(1)); let out = server.conn.process(None, now()); - client.process(out.dgram(), now()); + client.process(out.as_dgram_ref(), now()); // PUSH_PROMISE on a control stream will cause an error assert_closed(&client, &Error::HttpFrameUnexpected); @@ -2463,7 +2534,8 @@ mod tests { #[test] fn fetch_basic() { - // Connect exchange headers and send a request. Also check if the correct header frame has been sent. + // Connect exchange headers and send a request. Also check if the correct header frame has + // been sent. let (mut client, mut server, request_stream_id) = connect_and_send_request(true); // send response - 200 Content-Length: 7 @@ -2531,14 +2603,14 @@ mod tests { let d1 = dgram(&mut client.conn); let d2 = dgram(&mut client.conn); - server.conn.process_input(d2, now()); - server.conn.process_input(d1, now()); + server.conn.process_input(&d2, now()); + server.conn.process_input(&d1, now()); let d3 = dgram(&mut server.conn); let d4 = dgram(&mut server.conn); - client.process_input(d4, now()); - client.process_input(d3, now()); + client.process_input(&d4, now()); + client.process_input(&d3, now()); let ack = client.process_output(now()).dgram(); - server.conn.process_input(ack.unwrap(), now()); + server.conn.process_input(&ack.unwrap(), now()); } /// The client should keep a connection alive if it has unanswered requests. @@ -2558,7 +2630,7 @@ mod tests { request_stream_id: StreamId, ) { let out = server.process(None, now()); - client.process(out.dgram(), now()); + client.process(out.as_dgram_ref(), now()); while let Some(e) = client.next_event() { match e { @@ -2603,7 +2675,8 @@ mod tests { // Send a request with the request body. #[test] fn fetch_with_data() { - // Connect exchange headers and send a request. Also check if the correct header frame has been sent. + // Connect exchange headers and send a request. Also check if the correct header frame has + // been sent. let (mut client, mut server, request_stream_id) = connect_and_send_request(false); // Get DataWritable for the request stream so that we can write the request body. @@ -2614,7 +2687,7 @@ mod tests { client.stream_close_send(request_stream_id).unwrap(); let out = client.process(None, now()); - mem::drop(server.conn.process(out.dgram(), now())); + mem::drop(server.conn.process(out.as_dgram_ref(), now())); // find the new request/response stream and send response on it. while let Some(e) = server.conn.next_event() { @@ -2645,9 +2718,11 @@ mod tests { read_response(&mut client, &mut server.conn, request_stream_id); } - // send a request with request body containing request_body. We expect to receive expected_data_frame_header. + // send a request with request body containing request_body. We expect to receive + // expected_data_frame_header. fn fetch_with_data_length_xbytes(request_body: &[u8], expected_data_frame_header: &[u8]) { - // Connect exchange headers and send a request. Also check if the correct header frame has been sent. + // Connect exchange headers and send a request. Also check if the correct header frame has + // been sent. let (mut client, mut server, request_stream_id) = connect_and_send_request(false); // Get DataWritable for the request stream so that we can write the request body. @@ -2662,8 +2737,8 @@ mod tests { // We need to loop a bit until all data has been sent. let mut out = client.process(None, now()); for _i in 0..20 { - out = server.conn.process(out.dgram(), now()); - out = client.process(out.dgram(), now()); + out = server.conn.process(out.as_dgram_ref(), now()); + out = client.process(out.as_dgram_ref(), now()); } // check request body is received. @@ -2733,7 +2808,8 @@ mod tests { expected_second_data_frame_header: &[u8], expected_second_data_frame: &[u8], ) { - // Connect exchange headers and send a request. Also check if the correct header frame has been sent. + // Connect exchange headers and send a request. Also check if the correct header frame has + // been sent. let (mut client, mut server, request_stream_id) = connect_and_send_request(false); // Get DataWritable for the request stream so that we can write the request body. @@ -2755,11 +2831,11 @@ mod tests { // We need to loop a bit until all data has been sent. Once for every 1K // of data. for _i in 0..SEND_BUFFER_SIZE / 1000 { - out = server.conn.process(out.dgram(), now()); - out = client.process(out.dgram(), now()); + out = server.conn.process(out.as_dgram_ref(), now()); + out = client.process(out.as_dgram_ref(), now()); } - // check received frames and send a response. + // Check received frames and send a response. while let Some(e) = server.conn.next_event() { if let ConnectionEvent::RecvStreamReadable { stream_id } = e { if stream_id == request_stream_id { @@ -2848,7 +2924,8 @@ mod tests { } // Send 2 frames. For the second one we can only send 16383 bytes. - // After the first frame there is exactly 16383+4 bytes left in the send buffer, but we can only send 16383 bytes. + // After the first frame there is exactly 16383+4 bytes left in the send buffer, but we can only + // send 16383 bytes. #[test] fn fetch_two_data_frame_second_16383bytes_place_for_16387() { let (buf, hdr) = alloc_buffer(SEND_BUFFER_SIZE - 16410); @@ -2856,7 +2933,8 @@ mod tests { } // Send 2 frames. For the second one we can only send 16383 bytes. - // After the first frame there is exactly 16383+5 bytes left in the send buffer, but we can only send 16383 bytes. + // After the first frame there is exactly 16383+5 bytes left in the send buffer, but we can only + // send 16383 bytes. #[test] fn fetch_two_data_frame_second_16383bytes_place_for_16388() { let (buf, hdr) = alloc_buffer(SEND_BUFFER_SIZE - 16411); @@ -2864,7 +2942,8 @@ mod tests { } // Send 2 frames. For the second one we can send 16384 bytes. - // After the first frame there is exactly 16384+5 bytes left in the send buffer, but we can send 16384 bytes. + // After the first frame there is exactly 16384+5 bytes left in the send buffer, but we can send + // 16384 bytes. #[test] fn fetch_two_data_frame_second_16384bytes_place_for_16389() { let (buf, hdr) = alloc_buffer(SEND_BUFFER_SIZE - 16412); @@ -2874,7 +2953,8 @@ mod tests { // Test receiving STOP_SENDING with the HttpNoError error code. #[test] fn test_stop_sending_early_response() { - // Connect exchange headers and send a request. Also check if the correct header frame has been sent. + // Connect exchange headers and send a request. Also check if the correct header frame has + // been sent. let (mut client, mut server, request_stream_id) = connect_and_send_request(false); // Stop sending with early_response. @@ -2951,7 +3031,8 @@ mod tests { // Server sends stop sending and reset. #[test] fn test_stop_sending_other_error_with_reset() { - // Connect exchange headers and send a request. Also check if the correct header frame has been sent. + // Connect exchange headers and send a request. Also check if the correct header frame has + // been sent. let (mut client, mut server, request_stream_id) = connect_and_send_request(false); // Stop sending with RequestRejected. @@ -2970,7 +3051,7 @@ mod tests { ); let out = server.conn.process(None, now()); - client.process(out.dgram(), now()); + client.process(out.as_dgram_ref(), now()); let mut reset = false; let mut stop_sending = false; @@ -3014,7 +3095,8 @@ mod tests { // Server sends stop sending with RequestRejected, but it does not send reset. #[test] fn test_stop_sending_other_error_wo_reset() { - // Connect exchange headers and send a request. Also check if the correct header frame has been sent. + // Connect exchange headers and send a request. Also check if the correct header frame has + // been sent. let (mut client, mut server, request_stream_id) = connect_and_send_request(false); // Stop sending with RequestRejected. @@ -3026,7 +3108,7 @@ mod tests { ); let out = server.conn.process(None, now()); - client.process(out.dgram(), now()); + client.process(out.as_dgram_ref(), now()); let mut stop_sending = false; @@ -3061,7 +3143,8 @@ mod tests { // in client.events. The events will be removed. #[test] fn test_stop_sending_and_reset_other_error_with_events() { - // Connect exchange headers and send a request. Also check if the correct header frame has been sent. + // Connect exchange headers and send a request. Also check if the correct header frame has + // been sent. let (mut client, mut server, request_stream_id) = connect_and_send_request(false); // send response - 200 Content-Length: 3 @@ -3090,7 +3173,7 @@ mod tests { ); let out = server.conn.process(None, now()); - client.process(out.dgram(), now()); + client.process(out.as_dgram_ref(), now()); let mut reset = false; @@ -3134,7 +3217,8 @@ mod tests { // The events will be removed. #[test] fn test_stop_sending_other_error_with_events() { - // Connect exchange headers and send a request. Also check if the correct header frame has been sent. + // Connect exchange headers and send a request. Also check if the correct header frame has + // been sent. let (mut client, mut server, request_stream_id) = connect_and_send_request(false); // send response - 200 Content-Length: 3 @@ -3157,7 +3241,7 @@ mod tests { ); let out = server.conn.process(None, now()); - client.process(out.dgram(), now()); + client.process(out.as_dgram_ref(), now()); let mut stop_sending = false; let mut header_ready = false; @@ -3197,7 +3281,8 @@ mod tests { // Server sends a reset. We will close sending side as well. #[test] fn test_reset_wo_stop_sending() { - // Connect exchange headers and send a request. Also check if the correct header frame has been sent. + // Connect exchange headers and send a request. Also check if the correct header frame has + // been sent. let (mut client, mut server, request_stream_id) = connect_and_send_request(false); // Send a reset. @@ -3209,7 +3294,7 @@ mod tests { ); let out = server.conn.process(None, now()); - client.process(out.dgram(), now()); + client.process(out.as_dgram_ref(), now()); let mut reset = false; @@ -3299,7 +3384,7 @@ mod tests { assert_eq!(request_stream_id_3, 8); let out = client.process(None, now()); - mem::drop(server.conn.process(out.dgram(), now())); + mem::drop(server.conn.process(out.as_dgram_ref(), now())); _ = server .conn @@ -3321,7 +3406,7 @@ mod tests { } } let out = server.conn.process(None, now()); - client.process(out.dgram(), now()); + client.process(out.as_dgram_ref(), now()); let mut stream_reset = false; while let Some(e) = client.next_event() { @@ -3383,7 +3468,7 @@ mod tests { assert_eq!(request_stream_id_3, 8); let out = client.process(None, now()); - mem::drop(server.conn.process(out.dgram(), now())); + mem::drop(server.conn.process(out.as_dgram_ref(), now())); // First send a Goaway frame with an higher number _ = server @@ -3392,7 +3477,7 @@ mod tests { .unwrap(); let out = server.conn.process(None, now()); - client.process(out.dgram(), now()); + client.process(out.as_dgram_ref(), now()); // Check that there is one reset for stream_id 8 let mut stream_reset_1 = 0; @@ -3478,7 +3563,7 @@ mod tests { .unwrap(); let out = server.conn.process(None, now()); - client.process(out.dgram(), now()); + client.process(out.as_dgram_ref(), now()); assert_eq!(client.state(), Http3State::GoingAway(StreamId::new(4))); @@ -3489,7 +3574,7 @@ mod tests { .unwrap(); let out = server.conn.process(None, now()); - client.process(out.dgram(), now()); + client.process(out.as_dgram_ref(), now()); assert_closed(&client, &Error::HttpGeneralProtocol); } @@ -3504,7 +3589,7 @@ mod tests { .unwrap(); let out = server.conn.process(None, now()); - client.process(out.dgram(), now()); + client.process(out.as_dgram_ref(), now()); assert_closed(&client, &Error::HttpId); } @@ -3517,7 +3602,7 @@ mod tests { server.conn.stream_close_send(request_stream_id).unwrap(); let out = server.conn.process(None, now()); - client.process(out.dgram(), now()); + client.process(out.as_dgram_ref(), now()); // Recv HeaderReady wo headers with fin. let e = client.events().next().unwrap(); @@ -3615,7 +3700,7 @@ mod tests { server.conn.stream_close_send(request_stream_id).unwrap(); let out = server.conn.process(None, now()); - client.process(out.dgram(), now()); + client.process(out.as_dgram_ref(), now()); // Recv DataReadable wo data with fin while let Some(e) = client.next_event() { @@ -3662,7 +3747,7 @@ mod tests { server.conn.stream_close_send(request_stream_id).unwrap(); let out = server.conn.process(None, now()); - client.process(out.dgram(), now()); + client.process(out.as_dgram_ref(), now()); // Recv HeaderReady with fin. while let Some(e) = client.next_event() { @@ -3713,7 +3798,7 @@ mod tests { .unwrap(); let out = server.conn.process(None, now()); - client.process(out.dgram(), now()); + client.process(out.as_dgram_ref(), now()); // Recv headers wo fin while let Some(e) = client.next_event() { @@ -3740,7 +3825,7 @@ mod tests { server.conn.stream_close_send(request_stream_id).unwrap(); let out = server.conn.process(None, now()); - client.process(out.dgram(), now()); + client.process(out.as_dgram_ref(), now()); // Recv no data, but do get fin while let Some(e) = client.next_event() { @@ -3810,7 +3895,7 @@ mod tests { // ok NOW send fin server.conn.stream_close_send(request_stream_id).unwrap(); let out = server.conn.process(None, now()); - client.process(out.dgram(), now()); + client.process(out.as_dgram_ref(), now()); // fin wo data should generate DataReadable let e = client.events().next().unwrap(); @@ -3857,7 +3942,7 @@ mod tests { ); } x => { - panic!("event {:?}", x); + panic!("event {x:?}"); } } @@ -3903,7 +3988,7 @@ mod tests { assert!(fin); } x => { - panic!("event {:?}", x); + panic!("event {x:?}"); } } // Stream should now be closed and gone @@ -3934,7 +4019,8 @@ mod tests { header_block: encoded_headers.to_vec(), }; - // Send the encoder instructions, but delay them so that the stream is blocked on decoding headers. + // Send the encoder instructions, but delay them so that the stream is blocked on decoding + // headers. let encoder_inst_pkt = server.conn.process(None, now()); // Send response @@ -3955,10 +4041,10 @@ mod tests { assert!(!client.events().any(header_ready_event)); // Let client receive the encoder instructions. - mem::drop(client.process(encoder_inst_pkt.dgram(), now())); + mem::drop(client.process(encoder_inst_pkt.as_dgram_ref(), now())); let out = server.conn.process(None, now()); - mem::drop(client.process(out.dgram(), now())); + mem::drop(client.process(out.as_dgram_ref(), now())); mem::drop(client.process(None, now())); let mut recv_header = false; @@ -3975,7 +4061,7 @@ mod tests { assert_eq!(stream_id, request_stream_id); } x => { - panic!("event {:?}", x); + panic!("event {x:?}"); } } } @@ -4002,7 +4088,8 @@ mod tests { header_block: encoded_headers.to_vec(), }; - // Send the encoder instructions, but delay them so that the stream is blocked on decoding headers. + // Send the encoder instructions, but delay them so that the stream is blocked on decoding + // headers. let encoder_inst_pkt = server.conn.process(None, now()); let mut d = Encoder::default(); @@ -4020,7 +4107,7 @@ mod tests { assert!(!hconn.events().any(header_ready_event)); // Let client receive the encoder instructions. - let _out = hconn.process(encoder_inst_pkt.dgram(), now()); + let _out = hconn.process(encoder_inst_pkt.as_dgram_ref(), now()); let mut recv_header = false; // Now the stream is unblocked. After headers we will receive a fin. @@ -4038,7 +4125,7 @@ mod tests { assert!(!interim); recv_header = true; } else { - panic!("event {:?}", e); + panic!("event {e:?}"); } } assert!(recv_header); @@ -4048,7 +4135,7 @@ mod tests { server.send_ticket(now(), &[]).expect("can send ticket"); let out = server.process_output(now()); assert!(out.as_dgram_ref().is_some()); - client.process_input(out.dgram().unwrap(), now()); + client.process_input(out.as_dgram_ref().unwrap(), now()); // We do not have a token so we need to wait for a resumption token timer to trigger. client.process_output(now() + Duration::from_millis(250)); assert_eq!(client.state(), Http3State::Connected); @@ -4092,7 +4179,7 @@ mod tests { assert_eq!(client.state(), Http3State::ZeroRtt); assert_eq!(*server.conn.state(), State::Init); - let out = server.conn.process(out.dgram(), now()); + let out = server.conn.process(out.as_dgram_ref(), now()); // Check that control and qpack streams are received and a // SETTINGS frame has been received. @@ -4105,10 +4192,10 @@ mod tests { ); assert_eq!(*server.conn.state(), State::Handshaking); - let out = client.process(out.dgram(), now()); + let out = client.process(out.as_dgram_ref(), now()); assert_eq!(client.state(), Http3State::Connected); - mem::drop(server.conn.process(out.dgram(), now())); + mem::drop(server.conn.process(out.as_dgram_ref(), now())); assert!(server.conn.state().connected()); assert!(client.tls_info().unwrap().resumed()); @@ -4127,7 +4214,7 @@ mod tests { assert_eq!(client.state(), Http3State::ZeroRtt); assert_eq!(*server.conn.state(), State::Init); - let out = server.conn.process(out.dgram(), now()); + let out = server.conn.process(out.as_dgram_ref(), now()); // Check that control and qpack streams are received and a // SETTINGS frame has been received. @@ -4140,11 +4227,11 @@ mod tests { ); assert_eq!(*server.conn.state(), State::Handshaking); - let out = client.process(out.dgram(), now()); + let out = client.process(out.as_dgram_ref(), now()); assert_eq!(client.state(), Http3State::Connected); - let out = server.conn.process(out.dgram(), now()); + let out = server.conn.process(out.as_dgram_ref(), now()); assert!(server.conn.state().connected()); - let out = client.process(out.dgram(), now()); + let out = client.process(out.as_dgram_ref(), now()); assert!(out.as_dgram_ref().is_none()); // After the server has been connected, send a response. @@ -4211,9 +4298,9 @@ mod tests { let client_0rtt = client.process(None, now()); assert!(client_0rtt.as_dgram_ref().is_some()); - let server_hs = server.process(client_hs.dgram(), now()); + let server_hs = server.process(client_hs.as_dgram_ref(), now()); assert!(server_hs.as_dgram_ref().is_some()); // Should produce ServerHello etc... - let server_ignored = server.process(client_0rtt.dgram(), now()); + let server_ignored = server.process(client_0rtt.as_dgram_ref(), now()); assert!(server_ignored.as_dgram_ref().is_none()); // The server shouldn't receive that 0-RTT data. @@ -4221,7 +4308,7 @@ mod tests { assert!(!server.events().any(recvd_stream_evt)); // Client should get a rejection. - let client_out = client.process(server_hs.dgram(), now()); + let client_out = client.process(server_hs.as_dgram_ref(), now()); assert!(client_out.as_dgram_ref().is_some()); let recvd_0rtt_reject = |e| e == Http3ClientEvent::ZeroRttRejected; assert!(client.events().any(recvd_0rtt_reject)); @@ -4232,7 +4319,7 @@ mod tests { assert_eq!(res.unwrap_err(), Error::InvalidStreamId); // Client will send Setting frame and open new qpack streams. - mem::drop(server.process(client_out.dgram(), now())); + mem::drop(server.process(client_out.as_dgram_ref(), now())); TestServer::new_with_conn(server).check_client_control_qpack_streams_no_resumption(); // Check that we can send a request and that the stream_id starts again from 0. @@ -4263,7 +4350,7 @@ mod tests { assert_eq!(client.state(), Http3State::ZeroRtt); assert_eq!(*server.conn.state(), State::Init); - let out = server.conn.process(out.dgram(), now()); + let out = server.conn.process(out.as_dgram_ref(), now()); // Check that control and qpack streams anda SETTINGS frame are received. // Also qpack encoder stream will send "change capacity" instruction because it has @@ -4275,10 +4362,10 @@ mod tests { ); assert_eq!(*server.conn.state(), State::Handshaking); - let out = client.process(out.dgram(), now()); + let out = client.process(out.as_dgram_ref(), now()); assert_eq!(client.state(), Http3State::Connected); - mem::drop(server.conn.process(out.dgram(), now())); + mem::drop(server.conn.process(out.as_dgram_ref(), now())); assert!(server.conn.state().connected()); assert!(client.tls_info().unwrap().resumed()); @@ -4294,7 +4381,7 @@ mod tests { assert_eq!(sent.unwrap(), enc.len()); let out = server.conn.process(None, now()); - client.process(out.dgram(), now()); + client.process(out.as_dgram_ref(), now()); assert_eq!(&client.state(), expected_client_state); assert!(server.conn.state().connected()); @@ -4654,7 +4741,7 @@ mod tests { server.conn.stream_close_send(request_stream_id).unwrap(); let out = server.conn.process(None, now()); - client.process(out.dgram(), now()); + client.process(out.as_dgram_ref(), now()); let events: Vec = client.events().collect(); @@ -4766,7 +4853,8 @@ mod tests { #[test] fn no_data_ready_events_after_fin() { - // Connect exchange headers and send a request. Also check if the correct header frame has been sent. + // Connect exchange headers and send a request. Also check if the correct header frame has + // been sent. let (mut client, mut server, request_stream_id) = connect_and_send_request(true); // send response - 200 Content-Length: 7 @@ -4869,7 +4957,7 @@ mod tests { _ = server.conn.stream_send(request_stream_id, &[0, 0]).unwrap(); server.conn.stream_close_send(request_stream_id).unwrap(); let dgram = server.conn.process_output(now()).dgram(); - client.process_input(dgram.unwrap(), now()); + client.process_input(&dgram.unwrap(), now()); let data_readable_event = |e: &_| matches!(e, Http3ClientEvent::DataReadable { stream_id } if *stream_id == request_stream_id); assert_eq!(client.events().filter(data_readable_event).count(), 1); @@ -4893,7 +4981,7 @@ mod tests { server.create_control_stream(); // Send the server's control stream data. let out = server.conn.process(None, now()); - client.process(out.dgram(), now()); + client.process(out.as_dgram_ref(), now()); server.create_qpack_streams(); let qpack_pkt1 = server.conn.process(None, now()); @@ -4901,7 +4989,7 @@ mod tests { let request_stream_id = make_request(&mut client, true, &[]); let out = client.process(None, now()); - mem::drop(server.conn.process(out.dgram(), now())); + mem::drop(server.conn.process(out.as_dgram_ref(), now())); setup_server_side_encoder(&mut client, &mut server); @@ -4921,7 +5009,7 @@ mod tests { // Send the encoder instructions, let out = server.conn.process(None, now()); - client.process(out.dgram(), now()); + client.process(out.as_dgram_ref(), now()); // Send response let mut d = Encoder::default(); @@ -4936,13 +5024,13 @@ mod tests { server.conn.stream_close_send(request_stream_id).unwrap(); let out = server.conn.process(None, now()); - mem::drop(client.process(out.dgram(), now())); + mem::drop(client.process(out.as_dgram_ref(), now())); let header_ready_event = |e| matches!(e, Http3ClientEvent::HeaderReady { .. }); assert!(!client.events().any(header_ready_event)); // Let client receive the encoder instructions. - mem::drop(client.process(qpack_pkt1.dgram(), now())); + mem::drop(client.process(qpack_pkt1.as_dgram_ref(), now())); assert!(client.events().any(header_ready_event)); } @@ -4979,7 +5067,8 @@ mod tests { assert_eq!(client.state(), Http3State::Connected); - // Check that the push has been closed, e.g. calling cancel_push should return InvalidStreamId. + // Check that the push has been closed, e.g. calling cancel_push should return + // InvalidStreamId. assert_eq!(client.cancel_push(0), Err(Error::InvalidStreamId)); } @@ -5017,8 +5106,8 @@ mod tests { // Reading push data will stop the client from being idle. _ = send_push_data(&mut server.conn, 0, false); - let dgram = server.conn.process_output(now()).dgram(); - client.process_input(dgram.unwrap(), now()); + let out = server.conn.process_output(now()); + client.process_input(out.as_dgram_ref().unwrap(), now()); let mut buf = [0; 16]; let (read, fin) = client.push_read_data(now(), 0, &mut buf).unwrap(); @@ -5070,7 +5159,8 @@ mod tests { assert_eq!(client.state(), Http3State::Connected); - // Check that the push has been closed, e.g. calling cancel_push should return InvalidStreamId. + // Check that the push has been closed, e.g. calling cancel_push should return + // InvalidStreamId. assert_eq!(client.cancel_push(0), Err(Error::InvalidStreamId)); assert_eq!(client.cancel_push(1), Err(Error::InvalidStreamId)); } @@ -5327,7 +5417,7 @@ mod tests { assert_eq!(request_stream_id_2, 4); let out = client.process(None, now()); - mem::drop(server.conn.process(out.dgram(), now())); + mem::drop(server.conn.process(out.as_dgram_ref(), now())); send_push_promise_and_exchange_packets(&mut client, &mut server, request_stream_id_2, 5); @@ -5363,7 +5453,7 @@ mod tests { assert_eq!(request_stream_id_2, 4); let out = client.process(None, now()); - mem::drop(server.conn.process(out.dgram(), now())); + mem::drop(server.conn.process(out.as_dgram_ref(), now())); send_push_promise_and_exchange_packets(&mut client, &mut server, request_stream_id_2, 5); @@ -5411,7 +5501,7 @@ mod tests { assert_eq!(request_stream_id_2, 4); let out = client.process(None, now()); - mem::drop(server.conn.process(out.dgram(), now())); + mem::drop(server.conn.process(out.as_dgram_ref(), now())); send_push_promise_and_exchange_packets(&mut client, &mut server, request_stream_id_2, 5); @@ -5420,7 +5510,7 @@ mod tests { assert!(!client.events().any(push_event)); } - // Test that max_push_id is enforced when a push promise frame is received. + // Test that max_push_id is enforced when a push promise frame is received. #[test] fn exceed_max_push_id_promise() { // Connect and send a request @@ -5504,7 +5594,7 @@ mod tests { ); let out = client.process(None, now()); - mem::drop(server.conn.process(out.dgram(), now())); + mem::drop(server.conn.process(out.as_dgram_ref(), now())); // Check max_push_id frame has been received let control_stream_readable = @@ -5522,8 +5612,8 @@ mod tests { send_push_data(&mut server.conn, 8, true); let out = server.conn.process(None, now()); - let out = client.process(out.dgram(), now()); - mem::drop(server.conn.process(out.dgram(), now())); + let out = client.process(out.as_dgram_ref(), now()); + mem::drop(server.conn.process(out.as_dgram_ref(), now())); assert_eq!(client.state(), Http3State::Connected); @@ -5584,7 +5674,8 @@ mod tests { ))); } - // Test CANCEL_PUSH frame: after cancel push any new PUSH_PROMISE or push stream will be ignored. + // Test CANCEL_PUSH frame: after cancel push any new PUSH_PROMISE or push stream will be + // ignored. #[test] fn cancel_push_ignore_promise() { // Connect and send a request @@ -5600,7 +5691,8 @@ mod tests { // Assert that we do not have any push event. assert!(!check_push_events(&mut client)); - // Check that the push has been closed, e.g. calling cancel_push should return InvalidStreamId. + // Check that the push has been closed, e.g. calling cancel_push should return + // InvalidStreamId. assert_eq!(client.cancel_push(0), Err(Error::InvalidStreamId)); // Check that the push has been canceled by the client. @@ -5629,7 +5721,8 @@ mod tests { // Assert that we do not have any push event. assert!(!check_push_events(&mut client)); - // Check that the push has been closed, e.g. calling cancel_push should return InvalidStreamId. + // Check that the push has been closed, e.g. calling cancel_push should return + // InvalidStreamId. assert_eq!(client.cancel_push(0), Err(Error::InvalidStreamId)); // Check that the push has been canceled by the client. @@ -5657,7 +5750,8 @@ mod tests { // Assert that we do not have any push event. assert!(!check_push_events(&mut client)); - // Check that the push has been closed, e.g. calling cancel_push should return InvalidStreamId. + // Check that the push has been closed, e.g. calling cancel_push should return + // InvalidStreamId. assert_eq!(client.cancel_push(0), Err(Error::InvalidStreamId)); // Check that the push has been canceled by the client. @@ -5670,7 +5764,8 @@ mod tests { assert_eq!(client.state(), Http3State::Connected); } - // Test a push stream reset after a new PUSH_PROMISE or/and push stream. The events will be ignored. + // Test a push stream reset after a new PUSH_PROMISE or/and push stream. The events will be + // ignored. #[test] fn cancel_push_stream_after_push_promise_and_push_stream() { // Connect and send a request @@ -5685,13 +5780,14 @@ mod tests { .conn .stream_reset_send(push_stream_id, Error::HttpRequestCancelled.code()) .unwrap(); - let out = server.conn.process(None, now()).dgram(); - client.process(out, now()); + let out = server.conn.process(None, now()); + client.process(out.as_dgram_ref(), now()); // Assert that we do not have any push event. assert!(!check_push_events(&mut client)); - // Check that the push has been closed, e.g. calling cancel_push should return InvalidStreamId. + // Check that the push has been closed, e.g. calling cancel_push should return + // InvalidStreamId. assert_eq!(client.cancel_push(0), Err(Error::InvalidStreamId)); assert_eq!(client.state(), Http3State::Connected); @@ -5711,15 +5807,16 @@ mod tests { .conn .stream_reset_send(push_stream_id, Error::HttpRequestCancelled.code()) .unwrap(); - let out = server.conn.process(None, now()).dgram(); - client.process(out, now()); + let out = server.conn.process(None, now()); + client.process(out.as_dgram_ref(), now()); send_push_promise_and_exchange_packets(&mut client, &mut server, request_stream_id, 0); // Assert that we do not have any push event. assert!(!check_push_events(&mut client)); - // Check that the push has been closed, e.g. calling cancel_push should return InvalidStreamId. + // Check that the push has been closed, e.g. calling cancel_push should return + // InvalidStreamId. assert_eq!(client.cancel_push(0), Err(Error::InvalidStreamId)); assert_eq!(client.state(), Http3State::Connected); @@ -5738,13 +5835,15 @@ mod tests { // Assert that we do not have any push event. assert!(!check_push_events(&mut client)); - // Check that the push has been closed, e.g. calling cancel_push should return InvalidStreamId. + // Check that the push has been closed, e.g. calling cancel_push should return + // InvalidStreamId. assert_eq!(client.cancel_push(0), Err(Error::InvalidStreamId)); assert_eq!(client.state(), Http3State::Connected); } - // Test that push_promise and push data events will be removed after application calls cancel_push. + // Test that push_promise and push data events will be removed after application calls + // cancel_push. #[test] fn app_cancel_push_after_push_promise_and_push_stream() { // Connect and send a request @@ -5755,13 +5854,14 @@ mod tests { send_push_data_and_exchange_packets(&mut client, &mut server, 0, false); assert!(client.cancel_push(0).is_ok()); - let out = client.process(None, now()).dgram(); - mem::drop(server.conn.process(out, now())); + let out = client.process(None, now()); + mem::drop(server.conn.process(out.as_dgram_ref(), now())); // Assert that we do not have any push event. assert!(!check_push_events(&mut client)); - // Check that the push has been closed, e.g. calling cancel_push should return InvalidStreamId. + // Check that the push has been closed, e.g. calling cancel_push should return + // InvalidStreamId. assert_eq!(client.cancel_push(0), Err(Error::InvalidStreamId)); // Check that the push has been canceled by the client. @@ -5785,15 +5885,16 @@ mod tests { send_push_data_and_exchange_packets(&mut client, &mut server, 0, false); assert!(client.cancel_push(0).is_ok()); - let out = client.process(None, now()).dgram(); - mem::drop(server.conn.process(out, now())); + let out = client.process(None, now()); + mem::drop(server.conn.process(out.as_dgram_ref(), now())); send_push_promise_and_exchange_packets(&mut client, &mut server, request_stream_id, 0); // Assert that we do not have any push event. assert!(!check_push_events(&mut client)); - // Check that the push has been closed, e.g. calling cancel_push should return InvalidStreamId. + // Check that the push has been closed, e.g. calling cancel_push should return + // InvalidStreamId. assert_eq!(client.cancel_push(0), Err(Error::InvalidStreamId)); // Check that the push has been canceled by the client. @@ -5827,7 +5928,7 @@ mod tests { .send_encoder_updates(&mut server.conn) .unwrap(); let out = server.conn.process(None, now()); - mem::drop(client.process(out.dgram(), now())); + mem::drop(client.process(out.as_dgram_ref(), now())); } fn setup_server_side_encoder(client: &mut Http3Client, server: &mut TestServer) { @@ -5875,7 +5976,8 @@ mod tests { header_block: encoded_headers.to_vec(), }; - // Send the encoder instructions, but delay them so that the stream is blocked on decoding headers. + // Send the encoder instructions, but delay them so that the stream is blocked on decoding + // headers. let encoder_inst_pkt = server.conn.process(None, now()).dgram(); assert!(encoder_inst_pkt.is_some()); @@ -5899,7 +6001,7 @@ mod tests { assert!(!check_push_events(&mut client)); // Let client receive the encoder instructions. - let _out = client.process(encoder_inst_pkt, now()); + let _out = client.process(encoder_inst_pkt.as_ref(), now()); // PushPromise is blocked wathing for encoder instructions. assert!(check_push_events(&mut client)); @@ -5939,7 +6041,7 @@ mod tests { assert!(check_data_readable(&mut client)); // Let client receive the encoder instructions. - let _out = client.process(encoder_inst_pkt, now()); + let _out = client.process(encoder_inst_pkt.as_ref(), now()); // PushPromise is blocked wathing for encoder instructions. assert!(check_push_events(&mut client)); @@ -5981,7 +6083,7 @@ mod tests { assert!(check_header_ready(&mut client)); // Let client receive the encoder instructions. - let _out = client.process(encoder_inst_pkt, now()); + let _out = client.process(encoder_inst_pkt.as_ref(), now()); // PushPromise is blocked wathing for encoder instructions. assert!(check_push_events(&mut client)); @@ -6002,7 +6104,7 @@ mod tests { .send_and_insert(&mut server.conn, b"content-length", b"1234") .unwrap(); let encoder_inst_pkt1 = server.conn.process(None, now()).dgram(); - let _out = client.process(encoder_inst_pkt1, now()); + let _out = client.process(encoder_inst_pkt1.as_ref(), now()); // Send a PushPromise that is blocked until encoder_inst_pkt2 is process by the client. let encoder_inst_pkt2 = @@ -6037,7 +6139,7 @@ mod tests { assert!(!check_header_ready(&mut client)); // Let client receive the encoder instructions. - let _out = client.process(encoder_inst_pkt2, now()); + let _out = client.process(encoder_inst_pkt2.as_ref(), now()); // The response headers are blocked. assert!(check_header_ready_and_push_promise(&mut client)); @@ -6110,12 +6212,12 @@ mod tests { assert!(!check_header_ready(&mut client)); // Let client receive the encoder instructions. - let _out = client.process(encoder_inst_pkt1, now()); + let _out = client.process(encoder_inst_pkt1.as_ref(), now()); assert!(check_push_events(&mut client)); // Let client receive the encoder instructions. - let _out = client.process(encoder_inst_pkt2, now()); + let _out = client.process(encoder_inst_pkt2.as_ref(), now()); assert!(check_header_ready_and_push_promise(&mut client)); } @@ -6150,7 +6252,7 @@ mod tests { .unwrap(); let out = client.process(None, now()); - mem::drop(server.conn.process(out.dgram(), now())); + mem::drop(server.conn.process(out.as_dgram_ref(), now())); // Check that encoder got stream_canceled instruction. let mut inst = [0_u8; 100]; let (amount, fin) = server @@ -6214,7 +6316,7 @@ mod tests { ); // Now read headers. - mem::drop(client.process(encoder_insts.dgram(), now())); + mem::drop(client.process(encoder_insts.as_dgram_ref(), now())); } #[test] @@ -6225,7 +6327,7 @@ mod tests { mem::drop(client.cancel_fetch(request_stream_id, Error::HttpRequestCancelled.code())); assert_eq!(server.encoder.borrow_mut().stats().stream_cancelled_recv, 0); let out = client.process(None, now()); - mem::drop(server.conn.process(out.dgram(), now())); + mem::drop(server.conn.process(out.as_dgram_ref(), now())); mem::drop(server.encoder_receiver.receive(&mut server.conn)); assert_eq!(server.encoder.borrow_mut().stats().stream_cancelled_recv, 1); } @@ -6273,8 +6375,8 @@ mod tests { .unwrap(); assert_eq!(server.encoder.borrow_mut().stats().stream_cancelled_recv, 0); let out = server.conn.process(None, now()); - let out = client.process(out.dgram(), now()); - mem::drop(server.conn.process(out.dgram(), now())); + let out = client.process(out.as_dgram_ref(), now()); + mem::drop(server.conn.process(out.as_dgram_ref(), now())); mem::drop(server.encoder_receiver.receive(&mut server.conn)); assert_eq!(server.encoder.borrow_mut().stats().stream_cancelled_recv, 1); } @@ -6310,7 +6412,7 @@ mod tests { assert_eq!(server.encoder.borrow_mut().stats().stream_cancelled_recv, 0); let out = client.process(None, now()); - mem::drop(server.conn.process(out.dgram(), now())); + mem::drop(server.conn.process(out.as_dgram_ref(), now())); mem::drop(server.encoder_receiver.receive(&mut server.conn).unwrap()); assert_eq!(server.encoder.borrow_mut().stats().stream_cancelled_recv, 1); } @@ -6334,7 +6436,7 @@ mod tests { ); // Exchange encoder instructions - mem::drop(client.process(encoder_instruct, now())); + mem::drop(client.process(encoder_instruct.as_ref(), now())); let header_ready_event = |e| matches!(e, Http3ClientEvent::HeaderReady { .. }); assert!(client.events().any(header_ready_event)); @@ -6347,7 +6449,7 @@ mod tests { assert_eq!(server.encoder.borrow_mut().stats().stream_cancelled_recv, 0); let out = client.process(None, now()); - mem::drop(server.conn.process(out.dgram(), now())); + mem::drop(server.conn.process(out.as_dgram_ref(), now())); mem::drop(server.encoder_receiver.receive(&mut server.conn).unwrap()); assert_eq!(server.encoder.borrow_mut().stats().stream_cancelled_recv, 0); } @@ -6373,7 +6475,7 @@ mod tests { // Send the encoder instructions. let out = server.conn.process(None, now()); - mem::drop(client.process(out.dgram(), now())); + mem::drop(client.process(out.as_dgram_ref(), now())); // Send PushPromise that will be blocked waiting for decoder instructions. mem::drop( @@ -6403,7 +6505,7 @@ mod tests { .unwrap(); let out = client.process(None, now()); - mem::drop(server.conn.process(out.dgram(), now())); + mem::drop(server.conn.process(out.as_dgram_ref(), now())); mem::drop(server.encoder_receiver.receive(&mut server.conn).unwrap()); assert_eq!(server.encoder.borrow_mut().stats().stream_cancelled_recv, 1); } @@ -6417,7 +6519,7 @@ mod tests { .unwrap(); assert_eq!(server.encoder.borrow_mut().stats().stream_cancelled_recv, 0); let out = client.process(None, now()); - mem::drop(server.conn.process(out.dgram(), now())); + mem::drop(server.conn.process(out.as_dgram_ref(), now())); mem::drop(server.encoder_receiver.receive(&mut server.conn).unwrap()); assert_eq!(server.encoder.borrow_mut().stats().stream_cancelled_recv, 0); } @@ -6470,7 +6572,7 @@ mod tests { assert!(!client.events().any(header_ready_event)); // Now make the encoder instructions available. - mem::drop(client.process(encoder_insts.dgram(), now())); + mem::drop(client.process(encoder_insts.as_dgram_ref(), now())); // Header blocks for both streams should be ready. let mut count_responses = 0; @@ -6514,7 +6616,7 @@ mod tests { let sent = server.conn.stream_send(control_stream, enc.as_ref()); assert_eq!(sent, Ok(4)); let out = server.conn.process(None, now()); - client.process(out.dgram(), now()); + client.process(out.as_dgram_ref(), now()); assert_closed(&client, &Error::HttpSettings); } } @@ -6614,8 +6716,8 @@ mod tests { } ); - let out = client.process(None, now()).dgram(); - mem::drop(server.conn.process(out, now())); + let out = client.process(None, now()); + mem::drop(server.conn.process(out.as_dgram_ref(), now())); // Check that server has received a reset. let stop_sending_event = |e| { @@ -6747,8 +6849,8 @@ mod tests { assert!(client.events().any(push_reset_event)); - let out = client.process(None, now()).dgram(); - mem::drop(server.conn.process(out, now())); + let out = client.process(None, now()); + mem::drop(server.conn.process(out.as_dgram_ref(), now())); // Check that server has received a reset. let stop_sending_event = |e| { @@ -6762,7 +6864,7 @@ mod tests { fn handshake_client_error(client: &mut Http3Client, server: &mut TestServer, error: &Error) { let out = handshake_only(client, server); - client.process(out.dgram(), now()); + client.process(out.as_dgram_ref(), now()); assert_closed(client, error); } @@ -6923,14 +7025,14 @@ mod tests { let is_done = |c: &Http3Client| matches!(c.state(), Http3State::Connected); while !is_done(&mut client) { maybe_authenticate(&mut client); - datagram = client.process(datagram, now()).dgram(); - datagram = server.process(datagram, now()).dgram(); + datagram = client.process(datagram.as_ref(), now()).dgram(); + datagram = server.process(datagram.as_ref(), now()).dgram(); } // exchange qpack settings, server will send a token as well. - datagram = client.process(datagram, now()).dgram(); - datagram = server.process(datagram, now()).dgram(); - mem::drop(client.process(datagram, now()).dgram()); + datagram = client.process(datagram.as_ref(), now()).dgram(); + datagram = server.process(datagram.as_ref(), now()).dgram(); + mem::drop(client.process(datagram.as_ref(), now()).dgram()); client .events() @@ -6984,15 +7086,15 @@ mod tests { // Exchange packets until header-ack is received. // These many packet exchange is needed, to get a header-ack. // TODO this may be optimize at Http3Server. - let out = client.process(None, now()).dgram(); - let out = server.process(out, now()).dgram(); - let out = client.process(out, now()).dgram(); - let out = server.process(out, now()).dgram(); - let out = client.process(out, now()).dgram(); - let out = server.process(out, now()).dgram(); - let out = client.process(out, now()).dgram(); - let out = server.process(out, now()).dgram(); - mem::drop(client.process(out, now())); + let out = client.process(None, now()); + let out = server.process(out.as_dgram_ref(), now()); + let out = client.process(out.as_dgram_ref(), now()); + let out = server.process(out.as_dgram_ref(), now()); + let out = client.process(out.as_dgram_ref(), now()); + let out = server.process(out.as_dgram_ref(), now()); + let out = client.process(out.as_dgram_ref(), now()); + let out = server.process(out.as_dgram_ref(), now()); + mem::drop(client.process(out.as_dgram_ref(), now())); // The header ack for the first request has been received. assert_eq!(client.qpack_encoder_stats().header_acks_recv, 1); @@ -7048,7 +7150,7 @@ mod tests { _ = server.conn.stream_send(push_stream_id, &[0]).unwrap(); server.conn.stream_close_send(push_stream_id).unwrap(); let out = server.conn.process(None, now()); - client.process(out.dgram(), now()); + client.process(out.as_dgram_ref(), now()); assert_closed(&client, &Error::HttpGeneralProtocol); } @@ -7072,21 +7174,22 @@ mod tests { let md_before = server.conn.stats().frame_tx.max_data; // sending the http request and most most of the request data - let out = client.process(None, now()).dgram(); - let out = server.conn.process(out, now()).dgram(); + let out = client.process(None, now()); + let out = server.conn.process(out.as_dgram_ref(), now()); // the server responses with an ack, but the max_data didn't change assert_eq!(md_before, server.conn.stats().frame_tx.max_data); - let out = client.process(out, now()).dgram(); - let out = server.conn.process(out, now()).dgram(); + let out = client.process(out.as_dgram_ref(), now()); + let out = server.conn.process(out.as_dgram_ref(), now()); // the server increased the max_data during the second read if that isn't the case - // in the future and therefore this asserts fails, the request data on stream 0 could be read - // to cause a max_update frame + // in the future and therefore this asserts fails, the request data on stream 0 could be + // read to cause a max_update frame assert_eq!(md_before + 1, server.conn.stats().frame_tx.max_data); - // make sure that the server didn't receive a priority_update on client control stream (stream_id 2) yet + // make sure that the server didn't receive a priority_update on client control stream + // (stream_id 2) yet let mut buf = [0; 32]; assert_eq!( server.conn.stream_recv(StreamId::new(2), &mut buf), @@ -7094,8 +7197,10 @@ mod tests { ); // the client now sends the priority update - let out = client.process(out, now()).dgram(); - server.conn.process_input(out.unwrap(), now()); + let out = client.process(out.as_dgram_ref(), now()); + server + .conn + .process_input(out.as_dgram_ref().unwrap(), now()); // check that the priority_update arrived at the client control stream let num_read = server.conn.stream_recv(StreamId::new(2), &mut buf).unwrap(); @@ -7123,7 +7228,8 @@ mod tests { header_block: encoded_headers.to_vec(), }; - // Send the encoder instructions, but delay them so that the stream is blocked on decoding headers. + // Send the encoder instructions, but delay them so that the stream is blocked on decoding + // headers. let encoder_inst_pkt = server.conn.process(None, now()); // Send response @@ -7141,7 +7247,7 @@ mod tests { ); // Let client receive the encoder instructions. - client.process_input(encoder_inst_pkt.dgram().unwrap(), now()); + client.process_input(encoder_inst_pkt.as_dgram_ref().unwrap(), now()); let reset_event = |e| matches!(e, Http3ClientEvent::Reset { stream_id, .. } if stream_id == request_stream_id); assert!(client.events().any(reset_event)); diff --git a/neqo-http3/src/connection_server.rs b/neqo-http3/src/connection_server.rs index c8cab52dd0..cc887a26fc 100644 --- a/neqo-http3/src/connection_server.rs +++ b/neqo-http3/src/connection_server.rs @@ -4,21 +4,22 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use crate::connection::{Http3Connection, Http3State, WebTransportSessionAcceptAction}; -use crate::frames::HFrame; -use crate::recv_message::{RecvMessage, RecvMessageInfo}; -use crate::send_message::SendMessage; -use crate::server_connection_events::{Http3ServerConnEvent, Http3ServerConnEvents}; -use crate::{ - Error, Http3Parameters, Http3StreamType, NewStreamType, Priority, PriorityHandler, - ReceiveOutput, Res, -}; +use std::{rc::Rc, time::Instant}; + use neqo_common::{event::Provider, qdebug, qinfo, qtrace, Header, MessageType, Role}; use neqo_transport::{ AppError, Connection, ConnectionEvent, DatagramTracking, StreamId, StreamType, }; -use std::rc::Rc; -use std::time::Instant; + +use crate::{ + connection::{Http3Connection, Http3State, WebTransportSessionAcceptAction}, + frames::HFrame, + recv_message::{RecvMessage, RecvMessageInfo}, + send_message::SendMessage, + server_connection_events::{Http3ServerConnEvent, Http3ServerConnEvents}, + Error, Http3Parameters, Http3StreamType, NewStreamType, Priority, PriorityHandler, + ReceiveOutput, Res, +}; #[derive(Debug)] pub struct Http3ServerHandler { @@ -48,25 +49,32 @@ impl Http3ServerHandler { } /// Supply a response for a request. + /// /// # Errors + /// /// `InvalidStreamId` if the stream does not exist, /// `AlreadyClosed` if the stream has already been closed. - /// `TransportStreamDoesNotExist` if the transport stream does not exist (this may happen if `process_output` - /// has not been called when needed, and HTTP3 layer has not picked up the info that the stream has been closed.) - /// `InvalidInput` if an empty buffer has been supplied. + /// `TransportStreamDoesNotExist` if the transport stream does not exist (this may happen if + /// `process_output` has not been called when needed, and HTTP3 layer has not picked up the + /// info that the stream has been closed.) `InvalidInput` if an empty buffer has been + /// supplied. pub(crate) fn send_data( &mut self, stream_id: StreamId, data: &[u8], conn: &mut Connection, ) -> Res { - self.base_handler.stream_has_pending_data(stream_id); - self.needs_processing = true; - self.base_handler + let n = self + .base_handler .send_streams .get_mut(&stream_id) .ok_or(Error::InvalidStreamId)? - .send_data(conn, data) + .send_data(conn, data)?; + if n > 0 { + self.base_handler.stream_has_pending_data(stream_id); + } + self.needs_processing = true; + Ok(n) } /// Supply response heeaders for a request. @@ -89,19 +97,22 @@ impl Http3ServerHandler { } /// This is called when application is done sending a request. + /// /// # Errors + /// /// An error will be returned if stream does not exist. pub fn stream_close_send(&mut self, stream_id: StreamId, conn: &mut Connection) -> Res<()> { - qinfo!([self], "Close sending side stream={}.", stream_id); + qdebug!([self], "Close sending side stream={}.", stream_id); self.base_handler.stream_close_send(conn, stream_id)?; - self.base_handler.stream_has_pending_data(stream_id); self.needs_processing = true; Ok(()) } /// An application may reset a stream(request). /// Both sides, sending and receiving side, will be closed. + /// /// # Errors + /// /// An error will be return if a stream does not exist. pub fn cancel_fetch( &mut self, @@ -154,11 +165,14 @@ impl Http3ServerHandler { } /// Close `WebTransport` cleanly + /// /// # Errors + /// /// `InvalidStreamId` if the stream does not exist, - /// `TransportStreamDoesNotExist` if the transport stream does not exist (this may happen if `process_output` - /// has not been called when needed, and HTTP3 layer has not picked up the info that the stream has been closed.) - /// `InvalidInput` if an empty buffer has been supplied. + /// `TransportStreamDoesNotExist` if the transport stream does not exist (this may happen if + /// `process_output` has not been called when needed, and HTTP3 layer has not picked up the + /// info that the stream has been closed.) `InvalidInput` if an empty buffer has been + /// supplied. pub fn webtransport_close_session( &mut self, conn: &mut Connection, @@ -354,7 +368,7 @@ impl Http3ServerHandler { } HFrame::PriorityUpdatePush { element_id, priority } => { // TODO: check if the element_id references a promised push stream or - // is greater than the maximum Push ID. + // is greater than the maximum Push ID. self.events.priority_update(StreamId::from(element_id), priority); Ok(()) } @@ -383,11 +397,13 @@ impl Http3ServerHandler { } } - /// Response data are read directly into a buffer supplied as a parameter of this function to avoid copying - /// data. + /// Response data are read directly into a buffer supplied as a parameter of this function to + /// avoid copying data. + /// /// # Errors - /// It returns an error if a stream does not exist or an error happen while reading a stream, e.g. - /// early close, protocol error, etc. + /// + /// It returns an error if a stream does not exist or an error happen while reading a stream, + /// e.g. early close, protocol error, etc. pub fn read_data( &mut self, conn: &mut Connection, @@ -395,7 +411,7 @@ impl Http3ServerHandler { stream_id: StreamId, buf: &mut [u8], ) -> Res<(usize, bool)> { - qinfo!([self], "read_data from stream {}.", stream_id); + qdebug!([self], "read_data from stream {}.", stream_id); let res = self.base_handler.read_data(conn, stream_id, buf); if let Err(e) = &res { if e.connection_error() { diff --git a/neqo-http3/src/control_stream_local.rs b/neqo-http3/src/control_stream_local.rs index a1842476e6..2f336c63a4 100644 --- a/neqo-http3/src/control_stream_local.rs +++ b/neqo-http3/src/control_stream_local.rs @@ -4,12 +4,12 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use crate::frames::HFrame; -use crate::{BufferedStream, Http3StreamType, RecvStream, Res}; +use std::collections::{HashMap, VecDeque}; + use neqo_common::{qtrace, Encoder}; use neqo_transport::{Connection, StreamId, StreamType}; -use std::collections::{HashMap, VecDeque}; -use std::convert::TryFrom; + +use crate::{frames::HFrame, BufferedStream, Http3StreamType, RecvStream, Res}; pub const HTTP3_UNI_STREAM_TYPE_CONTROL: u64 = 0x0; @@ -63,7 +63,9 @@ impl ControlStreamLocal { ) -> Res<()> { // send all necessary priority updates while let Some(update_id) = self.outstanding_priority_update.pop_front() { - let Some(update_stream) = recv_conn.get_mut(&update_id) else { continue }; + let Some(update_stream) = recv_conn.get_mut(&update_id) else { + continue; + }; // can assert and unwrap here, because priority updates can only be added to // HttpStreams in [Http3Connection::queue_update_priority} diff --git a/neqo-http3/src/control_stream_remote.rs b/neqo-http3/src/control_stream_remote.rs index 7b42ed2b11..aef4b4c0a4 100644 --- a/neqo-http3/src/control_stream_remote.rs +++ b/neqo-http3/src/control_stream_remote.rs @@ -4,12 +4,16 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use crate::frames::{FrameReader, HFrame, StreamReaderConnectionWrapper}; -use crate::{CloseType, Error, Http3StreamType, ReceiveOutput, RecvStream, Res, Stream}; use neqo_common::qdebug; use neqo_transport::{Connection, StreamId}; -/// The remote control stream is responsible only for reading frames. The frames are handled by `Http3Connection`. +use crate::{ + frames::{FrameReader, HFrame, StreamReaderConnectionWrapper}, + CloseType, Error, Http3StreamType, ReceiveOutput, RecvStream, Res, Stream, +}; + +/// The remote control stream is responsible only for reading frames. The frames are handled by +/// `Http3Connection`. #[derive(Debug)] pub(crate) struct ControlStreamRemote { stream_id: StreamId, diff --git a/neqo-http3/src/features/extended_connect/mod.rs b/neqo-http3/src/features/extended_connect/mod.rs index 6be92dabba..77655833f7 100644 --- a/neqo-http3/src/features/extended_connect/mod.rs +++ b/neqo-http3/src/features/extended_connect/mod.rs @@ -9,15 +9,19 @@ pub(crate) mod webtransport_session; pub(crate) mod webtransport_streams; -use crate::client_events::Http3ClientEvents; -use crate::features::NegotiationState; -use crate::settings::{HSettingType, HSettings}; -use crate::{CloseType, Http3StreamInfo, Http3StreamType}; +use std::fmt::Debug; + use neqo_common::Header; use neqo_transport::{AppError, StreamId}; -use std::fmt::Debug; pub(crate) use webtransport_session::WebTransportSession; +use crate::{ + client_events::Http3ClientEvents, + features::NegotiationState, + settings::{HSettingType, HSettings}, + CloseType, Http3StreamInfo, Http3StreamType, +}; + #[derive(Debug, PartialEq, Eq, Clone)] pub enum SessionCloseReason { Error(AppError), diff --git a/neqo-http3/src/features/extended_connect/tests/webtransport/datagrams.rs b/neqo-http3/src/features/extended_connect/tests/webtransport/datagrams.rs index 1b9511b255..27b7d2b2f2 100644 --- a/neqo-http3/src/features/extended_connect/tests/webtransport/datagrams.rs +++ b/neqo-http3/src/features/extended_connect/tests/webtransport/datagrams.rs @@ -4,13 +4,15 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use crate::features::extended_connect::tests::webtransport::{ - wt_default_parameters, WtTest, DATAGRAM_SIZE, -}; -use crate::{Error, Http3Parameters, WebTransportRequest}; use neqo_common::Encoder; use neqo_transport::Error as TransportError; -use std::convert::TryFrom; + +use crate::{ + features::extended_connect::tests::webtransport::{ + wt_default_parameters, WtTest, DATAGRAM_SIZE, + }, + Error, Http3Parameters, WebTransportRequest, +}; const DGRAM: &[u8] = &[0, 100]; diff --git a/neqo-http3/src/features/extended_connect/tests/webtransport/mod.rs b/neqo-http3/src/features/extended_connect/tests/webtransport/mod.rs index 4ac5f72b0f..3753c3122d 100644 --- a/neqo-http3/src/features/extended_connect/tests/webtransport/mod.rs +++ b/neqo-http3/src/features/extended_connect/tests/webtransport/mod.rs @@ -8,7 +8,15 @@ mod datagrams; mod negotiation; mod sessions; mod streams; +use std::{cell::RefCell, rc::Rc, time::Duration}; + use neqo_common::event::Provider; +use neqo_crypto::AuthenticationStatus; +use neqo_transport::{ConnectionParameters, StreamId, StreamType}; +use test_fixture::{ + anti_replay, fixture_init, now, CountingConnectionIdGenerator, DEFAULT_ADDR, DEFAULT_ALPN_H3, + DEFAULT_KEYS, DEFAULT_SERVER_NAME, +}; use crate::{ features::extended_connect::SessionCloseReason, Error, Header, Http3Client, Http3ClientEvent, @@ -16,16 +24,6 @@ use crate::{ RecvStreamStats, SendStreamStats, WebTransportEvent, WebTransportRequest, WebTransportServerEvent, WebTransportSessionAcceptAction, }; -use neqo_crypto::AuthenticationStatus; -use neqo_transport::{ConnectionParameters, StreamId, StreamType}; -use std::cell::RefCell; -use std::rc::Rc; -use std::time::Duration; - -use test_fixture::{ - addr, anti_replay, fixture_init, now, CountingConnectionIdGenerator, DEFAULT_ALPN_H3, - DEFAULT_KEYS, DEFAULT_SERVER_NAME, -}; const DATAGRAM_SIZE: u64 = 1200; @@ -40,8 +38,8 @@ pub fn default_http3_client(client_params: Http3Parameters) -> Http3Client { Http3Client::new( DEFAULT_SERVER_NAME, Rc::new(RefCell::new(CountingConnectionIdGenerator::default())), - addr(), - addr(), + DEFAULT_ADDR, + DEFAULT_ADDR, client_params, now(), ) @@ -64,8 +62,8 @@ pub fn default_http3_server(server_params: Http3Parameters) -> Http3Server { fn exchange_packets(client: &mut Http3Client, server: &mut Http3Server) { let mut out = None; loop { - out = client.process(out, now()).dgram(); - out = server.process(out, now()).dgram(); + out = client.process(out.as_ref(), now()).dgram(); + out = server.process(out.as_ref(), now()).dgram(); if out.is_none() { break; } @@ -78,28 +76,28 @@ fn connect_with(client: &mut Http3Client, server: &mut Http3Server) { let out = client.process(None, now()); assert_eq!(client.state(), Http3State::Initializing); - let out = server.process(out.dgram(), now()); - let out = client.process(out.dgram(), now()); - let out = server.process(out.dgram(), now()); + let out = server.process(out.as_dgram_ref(), now()); + let out = client.process(out.as_dgram_ref(), now()); + let out = server.process(out.as_dgram_ref(), now()); assert!(out.as_dgram_ref().is_none()); let authentication_needed = |e| matches!(e, Http3ClientEvent::AuthenticationNeeded); assert!(client.events().any(authentication_needed)); client.authenticated(AuthenticationStatus::Ok, now()); - let out = client.process(out.dgram(), now()); + let out = client.process(out.as_dgram_ref(), now()); let connected = |e| matches!(e, Http3ClientEvent::StateChange(Http3State::Connected)); assert!(client.events().any(connected)); assert_eq!(client.state(), Http3State::Connected); // Exchange H3 setttings - let out = server.process(out.dgram(), now()); - let out = client.process(out.dgram(), now()); - let out = server.process(out.dgram(), now()); - let out = client.process(out.dgram(), now()); - let out = server.process(out.dgram(), now()); - std::mem::drop(client.process(out.dgram(), now())); + let out = server.process(out.as_dgram_ref(), now()); + let out = client.process(out.as_dgram_ref(), now()); + let out = server.process(out.as_dgram_ref(), now()); + let out = client.process(out.as_dgram_ref(), now()); + let out = server.process(out.as_dgram_ref(), now()); + std::mem::drop(client.process(out.as_dgram_ref(), now())); } fn connect( @@ -201,10 +199,10 @@ impl WtTest { let mut now = now(); loop { now += RTT / 2; - out = self.client.process(out, now).dgram(); + out = self.client.process(out.as_ref(), now).dgram(); let client_none = out.is_none(); now += RTT / 2; - out = self.server.process(out, now).dgram(); + out = self.server.process(out.as_ref(), now).dgram(); if client_none && out.is_none() { break; } diff --git a/neqo-http3/src/features/extended_connect/tests/webtransport/negotiation.rs b/neqo-http3/src/features/extended_connect/tests/webtransport/negotiation.rs index 23784e5609..27f669861d 100644 --- a/neqo-http3/src/features/extended_connect/tests/webtransport/negotiation.rs +++ b/neqo-http3/src/features/extended_connect/tests/webtransport/negotiation.rs @@ -4,17 +4,19 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +use std::time::Duration; + +use neqo_common::{event::Provider, Encoder}; +use neqo_crypto::AuthenticationStatus; +use neqo_transport::{Connection, ConnectionError, StreamType}; +use test_fixture::{default_server_h3, now}; + use super::{connect, default_http3_client, default_http3_server, exchange_packets}; use crate::{ settings::{HSetting, HSettingType, HSettings}, Error, HFrame, Http3Client, Http3ClientEvent, Http3Parameters, Http3Server, Http3State, WebTransportEvent, }; -use neqo_common::{event::Provider, Encoder}; -use neqo_crypto::AuthenticationStatus; -use neqo_transport::{Connection, ConnectionError, StreamType}; -use std::time::Duration; -use test_fixture::{default_server_h3, now}; fn check_wt_event(client: &mut Http3Client, wt_enable_client: bool, wt_enable_server: bool) { let wt_event = client.events().find_map(|e| { @@ -86,7 +88,7 @@ fn zero_rtt( // exchange token let out = server.process(None, now()); // We do not have a token so we need to wait for a resumption token timer to trigger. - std::mem::drop(client.process(out.dgram(), now() + Duration::from_millis(250))); + std::mem::drop(client.process(out.as_dgram_ref(), now() + Duration::from_millis(250))); assert_eq!(client.state(), Http3State::Connected); let token = client .events() @@ -234,8 +236,8 @@ fn zero_rtt_wt_settings() { fn exchange_packets2(client: &mut Http3Client, server: &mut Connection) { let mut out = None; loop { - out = client.process(out, now()).dgram(); - out = server.process(out, now()).dgram(); + out = client.process(out.as_ref(), now()).dgram(); + out = server.process(out.as_ref(), now()).dgram(); if out.is_none() { break; } diff --git a/neqo-http3/src/features/extended_connect/tests/webtransport/sessions.rs b/neqo-http3/src/features/extended_connect/tests/webtransport/sessions.rs index 65572a1c2a..5f929d0e4b 100644 --- a/neqo-http3/src/features/extended_connect/tests/webtransport/sessions.rs +++ b/neqo-http3/src/features/extended_connect/tests/webtransport/sessions.rs @@ -4,19 +4,25 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use crate::features::extended_connect::tests::webtransport::{ - default_http3_client, default_http3_server, wt_default_parameters, WtTest, -}; -use crate::{ - features::extended_connect::SessionCloseReason, frames::WebTransportFrame, Error, Header, - Http3ClientEvent, Http3OrWebTransportStream, Http3Server, Http3ServerEvent, Http3State, - Priority, WebTransportEvent, WebTransportServerEvent, WebTransportSessionAcceptAction, -}; +use std::mem; + use neqo_common::{event::Provider, Encoder}; use neqo_transport::StreamType; -use std::mem; use test_fixture::now; +use crate::{ + features::extended_connect::{ + tests::webtransport::{ + default_http3_client, default_http3_server, wt_default_parameters, WtTest, + }, + SessionCloseReason, + }, + frames::WebTransportFrame, + Error, Header, Http3ClientEvent, Http3OrWebTransportStream, Http3Server, Http3ServerEvent, + Http3State, Priority, WebTransportEvent, WebTransportServerEvent, + WebTransportSessionAcceptAction, +}; + #[test] fn wt_session() { let mut wt = WtTest::new(); @@ -419,18 +425,18 @@ fn wt_close_session_cannot_be_sent_at_once() { Err(Error::InvalidStreamId) ); - let out = wt.server.process(None, now()).dgram(); - let out = wt.client.process(out, now()).dgram(); + let out = wt.server.process(None, now()); + let out = wt.client.process(out.as_dgram_ref(), now()); // Client has not received the full CloseSession frame and it can create more streams. let unidi_client = wt.create_wt_stream_client(wt_session.stream_id(), StreamType::UniDi); - let out = wt.server.process(out, now()).dgram(); - let out = wt.client.process(out, now()).dgram(); - let out = wt.server.process(out, now()).dgram(); - let out = wt.client.process(out, now()).dgram(); - let out = wt.server.process(out, now()).dgram(); - let _out = wt.client.process(out, now()).dgram(); + let out = wt.server.process(out.as_dgram_ref(), now()); + let out = wt.client.process(out.as_dgram_ref(), now()); + let out = wt.server.process(out.as_dgram_ref(), now()); + let out = wt.client.process(out.as_dgram_ref(), now()); + let out = wt.server.process(out.as_dgram_ref(), now()); + let _out = wt.client.process(out.as_dgram_ref(), now()); wt.check_events_after_closing_session_client( &[], diff --git a/neqo-http3/src/features/extended_connect/tests/webtransport/streams.rs b/neqo-http3/src/features/extended_connect/tests/webtransport/streams.rs index a50c45d518..b898dbb31e 100644 --- a/neqo-http3/src/features/extended_connect/tests/webtransport/streams.rs +++ b/neqo-http3/src/features/extended_connect/tests/webtransport/streams.rs @@ -4,11 +4,15 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use crate::features::extended_connect::tests::webtransport::WtTest; -use crate::{features::extended_connect::SessionCloseReason, Error}; -use neqo_transport::StreamType; use std::mem; +use neqo_transport::StreamType; + +use crate::{ + features::extended_connect::{tests::webtransport::WtTest, SessionCloseReason}, + Error, +}; + #[test] fn wt_client_stream_uni() { const BUF_CLIENT: &[u8] = &[0; 10]; @@ -287,13 +291,17 @@ fn wt_server_stream_bidi_stop_sending() { // 1) Both sides of a bidirectional client stream are opened. // 2) A client unidirectional stream is opened. // 3) A client unidirectional stream has been closed and both sides consumed the closing info. -// 4) A client unidirectional stream has been closed, but only the server has consumed the closing info. -// 5) A client unidirectional stream has been closed, but only the client has consum the closing info. +// 4) A client unidirectional stream has been closed, but only the server has consumed the closing +// info. +// 5) A client unidirectional stream has been closed, but only the client has consum the closing +// info. // 6) Both sides of a bidirectional server stream are opened. // 7) A server unidirectional stream is opened. // 8) A server unidirectional stream has been closed and both sides consumed the closing info. -// 9) A server unidirectional stream has been closed, but only the server has consumed the closing info. -// 10) A server unidirectional stream has been closed, but only the client has consumed the closing info. +// 9) A server unidirectional stream has been closed, but only the server has consumed the closing +// info. +// 10) A server unidirectional stream has been closed, but only the client has consumed the closing +// info. // 11) Both sides of a bidirectional stream have been closed and consumed by both sides. // 12) Both sides of a bidirectional stream have been closed, but not consumed by both sides. // 13) Multiples open streams diff --git a/neqo-http3/src/features/extended_connect/webtransport_session.rs b/neqo-http3/src/features/extended_connect/webtransport_session.rs index c446fd3843..5e89225956 100644 --- a/neqo-http3/src/features/extended_connect/webtransport_session.rs +++ b/neqo-http3/src/features/extended_connect/webtransport_session.rs @@ -4,7 +4,11 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -#![allow(clippy::module_name_repetitions)] +use std::{cell::RefCell, collections::BTreeSet, mem, rc::Rc}; + +use neqo_common::{qtrace, Encoder, Header, MessageType, Role}; +use neqo_qpack::{QPackDecoder, QPackEncoder}; +use neqo_transport::{Connection, DatagramTracking, StreamId}; use super::{ExtendedConnectEvents, ExtendedConnectType, SessionCloseReason}; use crate::{ @@ -15,14 +19,6 @@ use crate::{ HttpRecvStreamEvents, Priority, PriorityHandler, ReceiveOutput, RecvStream, RecvStreamEvents, Res, SendStream, SendStreamEvents, Stream, }; -use neqo_common::{qtrace, Encoder, Header, MessageType, Role}; -use neqo_qpack::{QPackDecoder, QPackEncoder}; -use neqo_transport::{streams::SendOrder, Connection, DatagramTracking, StreamId}; -use std::any::Any; -use std::cell::RefCell; -use std::collections::BTreeSet; -use std::mem; -use std::rc::Rc; #[derive(Debug, PartialEq)] enum SessionState { @@ -100,6 +96,7 @@ impl WebTransportSession { } /// # Panics + /// /// This function is only called with `RecvStream` and `SendStream` that also implement /// the http specific functions and `http_stream()` will never return `None`. #[must_use] @@ -134,8 +131,11 @@ impl WebTransportSession { } /// # Errors + /// /// The function can only fail if supplied headers are not valid http headers. + /// /// # Panics + /// /// `control_stream_send` implements the http specific functions and `http_stream()` /// will never return `None`. pub fn send_request(&mut self, headers: &[Header], conn: &mut Connection) -> Res<()> { @@ -220,6 +220,7 @@ impl WebTransportSession { } /// # Panics + /// /// This cannot panic because headers are checked before this function called. pub fn maybe_check_headers(&mut self) { if SessionState::Negotiating != self.state { @@ -335,6 +336,7 @@ impl WebTransportSession { } /// # Errors + /// /// It may return an error if the frame is not correctly decoded. pub fn read_control_stream(&mut self, conn: &mut Connection) -> Res<()> { let (f, fin) = self @@ -373,8 +375,9 @@ impl WebTransportSession { } /// # Errors - /// Return an error if the stream was closed on the transport layer, but that information is not yet - /// consumed on the http/3 layer. + /// + /// Return an error if the stream was closed on the transport layer, but that information is not + /// yet consumed on the http/3 layer. pub fn close_session(&mut self, conn: &mut Connection, error: u32, message: &str) -> Res<()> { self.state = SessionState::Done; let close_frame = WebTransportFrame::CloseSession { @@ -399,6 +402,7 @@ impl WebTransportSession { } /// # Errors + /// /// Returns an error if the datagram exceeds the remote datagram size limit. pub fn send_datagram( &self, @@ -467,10 +471,6 @@ impl HttpRecvStream for Rc> { fn priority_update_sent(&mut self) { self.borrow_mut().priority_update_sent(); } - - fn any(&self) -> &dyn Any { - self - } } impl SendStream for Rc> { @@ -486,16 +486,6 @@ impl SendStream for Rc> { self.borrow_mut().has_data_to_send() } - fn set_sendorder(&mut self, _conn: &mut Connection, _sendorder: Option) -> Res<()> { - // Not relevant on session - Ok(()) - } - - fn set_fairness(&mut self, _conn: &mut Connection, _fairness: bool) -> Res<()> { - // Not relevant on session - Ok(()) - } - fn stream_writable(&self) {} fn done(&self) -> bool { diff --git a/neqo-http3/src/features/extended_connect/webtransport_streams.rs b/neqo-http3/src/features/extended_connect/webtransport_streams.rs index ca918dce9e..cdc692b8d7 100644 --- a/neqo-http3/src/features/extended_connect/webtransport_streams.rs +++ b/neqo-http3/src/features/extended_connect/webtransport_streams.rs @@ -4,15 +4,16 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +use std::{cell::RefCell, rc::Rc}; + +use neqo_common::Encoder; +use neqo_transport::{Connection, RecvStreamStats, SendStreamStats, StreamId}; + use super::WebTransportSession; use crate::{ CloseType, Http3StreamInfo, Http3StreamType, ReceiveOutput, RecvStream, RecvStreamEvents, Res, SendStream, SendStreamEvents, Stream, }; -use neqo_common::Encoder; -use neqo_transport::{Connection, RecvStreamStats, SendStreamStats, StreamId}; -use std::cell::RefCell; -use std::rc::Rc; pub const WEBTRANSPORT_UNI_STREAM: u64 = 0x54; pub const WEBTRANSPORT_STREAM: u64 = 0x41; @@ -214,16 +215,6 @@ impl SendStream for WebTransportSendStream { } } - fn set_sendorder(&mut self, conn: &mut Connection, sendorder: Option) -> Res<()> { - conn.stream_sendorder(self.stream_id, sendorder) - .map_err(|_| crate::Error::InvalidStreamId) - } - - fn set_fairness(&mut self, conn: &mut Connection, fairness: bool) -> Res<()> { - conn.stream_fairness(self.stream_id, fairness) - .map_err(|_| crate::Error::InvalidStreamId) - } - fn handle_stop_sending(&mut self, close_type: CloseType) { self.set_done(close_type); } diff --git a/neqo-http3/src/features/mod.rs b/neqo-http3/src/features/mod.rs index 0e045ed80b..34e21f50ac 100644 --- a/neqo-http3/src/features/mod.rs +++ b/neqo-http3/src/features/mod.rs @@ -4,23 +4,24 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +use std::{fmt::Debug, mem}; + +use neqo_common::qtrace; + use crate::{ client_events::Http3ClientEvents, settings::{HSettingType, HSettings}, }; -use neqo_common::qtrace; -use std::fmt::Debug; -use std::mem; pub mod extended_connect; /// States: /// - `Disable` - it is not turned on for this connection. -/// - `Negotiating` - the feature is enabled locally, but settings from the peer -/// have not been received yet. +/// - `Negotiating` - the feature is enabled locally, but settings from the peer have not been +/// received yet. /// - `Negotiated` - the settings have been received and both sides support the feature. -/// - `NegotiationFailed` - the settings have been received and the peer does not -/// support the feature. +/// - `NegotiationFailed` - the settings have been received and the peer does not support the +/// feature. #[derive(Debug)] pub enum NegotiationState { Disabled, diff --git a/neqo-http3/src/frames/hframe.rs b/neqo-http3/src/frames/hframe.rs index 28ce7608f9..e69f7b449e 100644 --- a/neqo-http3/src/frames/hframe.rs +++ b/neqo-http3/src/frames/hframe.rs @@ -4,12 +4,13 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use crate::{frames::reader::FrameDecoder, settings::HSettings, Error, Priority, Res}; +use std::{fmt::Debug, io::Write}; + use neqo_common::{Decoder, Encoder}; use neqo_crypto::random; use neqo_transport::StreamId; -use std::fmt::Debug; -use std::io::Write; + +use crate::{frames::reader::FrameDecoder, settings::HSettings, Error, Priority, Res}; pub(crate) type HFrameType = u64; @@ -73,10 +74,7 @@ impl HFrame { Self::MaxPushId { .. } => H3_FRAME_TYPE_MAX_PUSH_ID, Self::PriorityUpdateRequest { .. } => H3_FRAME_TYPE_PRIORITY_UPDATE_REQUEST, Self::PriorityUpdatePush { .. } => H3_FRAME_TYPE_PRIORITY_UPDATE_PUSH, - Self::Grease => { - let r = random(7); - Decoder::from(&r).decode_uint(7).unwrap() * 0x1f + 0x21 - } + Self::Grease => Decoder::from(&random::<7>()).decode_uint(7).unwrap() * 0x1f + 0x21, } } @@ -119,7 +117,7 @@ impl HFrame { } Self::Grease => { // Encode some number of random bytes. - let r = random(8); + let r = random::<8>(); enc.encode_vvec(&r[1..usize::from(1 + (r[0] & 0x7))]); } Self::PriorityUpdateRequest { diff --git a/neqo-http3/src/frames/reader.rs b/neqo-http3/src/frames/reader.rs index 9d81f2c1c1..1a086683cf 100644 --- a/neqo-http3/src/frames/reader.rs +++ b/neqo-http3/src/frames/reader.rs @@ -6,34 +6,39 @@ #![allow(clippy::module_name_repetitions)] -use crate::{Error, RecvStream, Res}; +use std::fmt::Debug; + use neqo_common::{ hex_with_len, qtrace, Decoder, IncrementalDecoderBuffer, IncrementalDecoderIgnore, IncrementalDecoderUint, }; use neqo_transport::{Connection, StreamId}; -use std::convert::TryFrom; -use std::fmt::Debug; + +use crate::{Error, RecvStream, Res}; const MAX_READ_SIZE: usize = 4096; pub(crate) trait FrameDecoder { fn is_known_type(frame_type: u64) -> bool; /// # Errors + /// /// Returns `HttpFrameUnexpected` if frames is not alowed, i.e. is a `H3_RESERVED_FRAME_TYPES`. fn frame_type_allowed(_frame_type: u64) -> Res<()> { Ok(()) } + /// # Errors + /// /// If a frame cannot be properly decoded. fn decode(frame_type: u64, frame_len: u64, data: Option<&[u8]>) -> Res>; } pub(crate) trait StreamReader { /// # Errors + /// /// An error may happen while reading a stream, e.g. early close, protocol error, etc. - /// Return an error if the stream was closed on the transport layer, but that information is not yet - /// consumed on the http/3 layer. + /// Return an error if the stream was closed on the transport layer, but that information is not + /// yet consumed on the http/3 layer. fn read_data(&mut self, buf: &mut [u8]) -> Res<(usize, bool)>; } @@ -50,6 +55,7 @@ impl<'a> StreamReaderConnectionWrapper<'a> { impl<'a> StreamReader for StreamReaderConnectionWrapper<'a> { /// # Errors + /// /// An error may happen while reading a stream, e.g. early close, protocol error, etc. fn read_data(&mut self, buf: &mut [u8]) -> Res<(usize, bool)> { let res = self.conn.stream_recv(self.stream_id, buf)?; @@ -70,6 +76,7 @@ impl<'a> StreamReaderRecvStreamWrapper<'a> { impl<'a> StreamReader for StreamReaderRecvStreamWrapper<'a> { /// # Errors + /// /// An error may happen while reading a stream, e.g. early close, protocol error, etc. fn read_data(&mut self, buf: &mut [u8]) -> Res<(usize, bool)> { self.recv_stream.read_data(self.conn, buf) @@ -146,7 +153,9 @@ impl FrameReader { } /// returns true if quic stream was closed. + /// /// # Errors + /// /// May return `HttpFrame` if a frame cannot be decoded. /// and `TransportStreamDoesNotExist` if `stream_recv` fails. pub fn receive>( @@ -186,6 +195,7 @@ impl FrameReader { } /// # Errors + /// /// May return `HttpFrame` if a frame cannot be decoded. fn consume>(&mut self, mut input: Decoder) -> Res> { match &mut self.state { diff --git a/neqo-http3/src/frames/tests/hframe.rs b/neqo-http3/src/frames/tests/hframe.rs index 54b7c94c8e..3da7e7fc36 100644 --- a/neqo-http3/src/frames/tests/hframe.rs +++ b/neqo-http3/src/frames/tests/hframe.rs @@ -4,15 +4,16 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +use neqo_common::{Decoder, Encoder}; +use neqo_transport::StreamId; +use test_fixture::fixture_init; + use super::enc_dec_hframe; use crate::{ frames::HFrame, settings::{HSetting, HSettingType, HSettings}, Priority, }; -use neqo_common::{Decoder, Encoder}; -use neqo_transport::StreamId; -use test_fixture::fixture_init; #[test] fn test_data_frame() { diff --git a/neqo-http3/src/frames/tests/mod.rs b/neqo-http3/src/frames/tests/mod.rs index 092b3039ec..33eea5497a 100644 --- a/neqo-http3/src/frames/tests/mod.rs +++ b/neqo-http3/src/frames/tests/mod.rs @@ -4,15 +4,17 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use crate::frames::{ - reader::FrameDecoder, FrameReader, HFrame, StreamReaderConnectionWrapper, WebTransportFrame, -}; +use std::mem; + use neqo_common::Encoder; use neqo_crypto::AuthenticationStatus; use neqo_transport::StreamType; -use std::mem; use test_fixture::{default_client, default_server, now}; +use crate::frames::{ + reader::FrameDecoder, FrameReader, HFrame, StreamReaderConnectionWrapper, WebTransportFrame, +}; + #[allow(clippy::many_single_char_names)] pub(crate) fn enc_dec>(d: &Encoder, st: &str, remaining: usize) -> T { // For data, headers and push_promise we do not read all bytes from the buffer @@ -22,12 +24,12 @@ pub(crate) fn enc_dec>(d: &Encoder, st: &str, remaining: usiz let mut conn_c = default_client(); let mut conn_s = default_server(); let out = conn_c.process(None, now()); - let out = conn_s.process(out.dgram(), now()); - let out = conn_c.process(out.dgram(), now()); - mem::drop(conn_s.process(out.dgram(), now())); + let out = conn_s.process(out.as_dgram_ref(), now()); + let out = conn_c.process(out.as_dgram_ref(), now()); + mem::drop(conn_s.process(out.as_dgram_ref(), now())); conn_c.authenticated(AuthenticationStatus::Ok, now()); let out = conn_c.process(None, now()); - mem::drop(conn_s.process(out.dgram(), now())); + mem::drop(conn_s.process(out.as_dgram_ref(), now())); // create a stream let stream_id = conn_s.stream_create(StreamType::BiDi).unwrap(); @@ -38,7 +40,7 @@ pub(crate) fn enc_dec>(d: &Encoder, st: &str, remaining: usiz let buf = Encoder::from_hex(st); conn_s.stream_send(stream_id, buf.as_ref()).unwrap(); let out = conn_s.process(None, now()); - mem::drop(conn_c.process(out.dgram(), now())); + mem::drop(conn_c.process(out.as_dgram_ref(), now())); let (frame, fin) = fr .receive::(&mut StreamReaderConnectionWrapper::new( diff --git a/neqo-http3/src/frames/tests/reader.rs b/neqo-http3/src/frames/tests/reader.rs index f694e4dbe3..fed1477ba4 100644 --- a/neqo-http3/src/frames/tests/reader.rs +++ b/neqo-http3/src/frames/tests/reader.rs @@ -4,6 +4,12 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +use std::{fmt::Debug, mem}; + +use neqo_common::Encoder; +use neqo_transport::{Connection, StreamId, StreamType}; +use test_fixture::{connect, now}; + use crate::{ frames::{ reader::FrameDecoder, FrameReader, HFrame, StreamReaderConnectionWrapper, WebTransportFrame, @@ -11,11 +17,6 @@ use crate::{ settings::{HSetting, HSettingType, HSettings}, Error, }; -use neqo_common::Encoder; -use neqo_transport::{Connection, StreamId, StreamType}; -use std::fmt::Debug; -use std::mem; -use test_fixture::{connect, now}; struct FrameReaderTest { pub fr: FrameReader, @@ -39,7 +40,7 @@ impl FrameReaderTest { fn process>(&mut self, v: &[u8]) -> Option { self.conn_s.stream_send(self.stream_id, v).unwrap(); let out = self.conn_s.process(None, now()); - mem::drop(self.conn_c.process(out.dgram(), now())); + mem::drop(self.conn_c.process(out.as_dgram_ref(), now())); let (frame, fin) = self .fr .receive::(&mut StreamReaderConnectionWrapper::new( @@ -230,12 +231,12 @@ fn test_reading_frame + PartialEq + Debug>( } let out = fr.conn_s.process(None, now()); - mem::drop(fr.conn_c.process(out.dgram(), now())); + mem::drop(fr.conn_c.process(out.as_dgram_ref(), now())); if let FrameReadingTestSend::DataThenFin = test_to_send { fr.conn_s.stream_close_send(fr.stream_id).unwrap(); let out = fr.conn_s.process(None, now()); - mem::drop(fr.conn_c.process(out.dgram(), now())); + mem::drop(fr.conn_c.process(out.as_dgram_ref(), now())); } let rv = fr.fr.receive::(&mut StreamReaderConnectionWrapper::new( @@ -478,11 +479,11 @@ fn test_frame_reading_when_stream_is_closed_before_sending_data() { fr.conn_s.stream_send(fr.stream_id, &[0x00]).unwrap(); let out = fr.conn_s.process(None, now()); - mem::drop(fr.conn_c.process(out.dgram(), now())); + mem::drop(fr.conn_c.process(out.as_dgram_ref(), now())); assert_eq!(Ok(()), fr.conn_c.stream_close_send(fr.stream_id)); let out = fr.conn_c.process(None, now()); - mem::drop(fr.conn_s.process(out.dgram(), now())); + mem::drop(fr.conn_s.process(out.as_dgram_ref(), now())); assert_eq!( Ok((None, true)), fr.fr @@ -501,11 +502,11 @@ fn test_wt_frame_reading_when_stream_is_closed_before_sending_data() { fr.conn_s.stream_send(fr.stream_id, &[0x00]).unwrap(); let out = fr.conn_s.process(None, now()); - mem::drop(fr.conn_c.process(out.dgram(), now())); + mem::drop(fr.conn_c.process(out.as_dgram_ref(), now())); assert_eq!(Ok(()), fr.conn_c.stream_close_send(fr.stream_id)); let out = fr.conn_c.process(None, now()); - mem::drop(fr.conn_s.process(out.dgram(), now())); + mem::drop(fr.conn_s.process(out.as_dgram_ref(), now())); assert_eq!( Ok((None, true)), fr.fr diff --git a/neqo-http3/src/frames/wtframe.rs b/neqo-http3/src/frames/wtframe.rs index 091c3fabe6..20e9b81936 100644 --- a/neqo-http3/src/frames/wtframe.rs +++ b/neqo-http3/src/frames/wtframe.rs @@ -4,9 +4,9 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use crate::{frames::reader::FrameDecoder, Error, Res}; use neqo_common::{Decoder, Encoder}; -use std::convert::TryFrom; + +use crate::{frames::reader::FrameDecoder, Error, Res}; pub(crate) type WebTransportFrameType = u64; @@ -42,7 +42,9 @@ impl FrameDecoder for WebTransportFrame { } let error = u32::try_from(dec.decode_uint(4).ok_or(Error::HttpMessageError)?).unwrap(); - let Ok(message) = String::from_utf8(dec.decode_remainder().to_vec()) else { return Err(Error::HttpMessageError) }; + let Ok(message) = String::from_utf8(dec.decode_remainder().to_vec()) else { + return Err(Error::HttpMessageError); + }; Ok(Some(WebTransportFrame::CloseSession { error, message })) } else { Ok(None) diff --git a/neqo-http3/src/headers_checks.rs b/neqo-http3/src/headers_checks.rs index 7d679409ad..2dbf43cd32 100644 --- a/neqo-http3/src/headers_checks.rs +++ b/neqo-http3/src/headers_checks.rs @@ -6,10 +6,10 @@ #![allow(clippy::unused_unit)] // see https://github.com/Lymia/enumset/issues/44 -use crate::{Error, MessageType, Res}; use enumset::{enum_set, EnumSet, EnumSetType}; use neqo_common::Header; -use std::convert::TryFrom; + +use crate::{Error, MessageType, Res}; #[derive(EnumSetType, Debug)] enum PseudoHeaderState { @@ -45,7 +45,9 @@ impl TryFrom<(MessageType, &str)> for PseudoHeaderState { } /// Check whether the response is informational(1xx). +/// /// # Errors +/// /// Returns an error if response headers do not contain /// a status header or if the value of the header is 101 or cannot be parsed. pub fn is_interim(headers: &[Header]) -> Res { @@ -89,7 +91,9 @@ fn track_pseudo( /// Checks if request/response headers are well formed, i.e. contain /// allowed pseudo headers and in a right order, etc. +/// /// # Errors +/// /// Returns an error if headers are not well formed. pub fn headers_valid(headers: &[Header], message_type: MessageType) -> Res<()> { let mut method_value: Option<&str> = None; @@ -155,7 +159,9 @@ pub fn headers_valid(headers: &[Header], message_type: MessageType) -> Res<()> { /// Checks if trailers are well formed, i.e. pseudo headers are not /// allowed in trailers. +/// /// # Errors +/// /// Returns an error if trailers are not well formed. pub fn trailers_valid(headers: &[Header]) -> Res<()> { for header in headers { @@ -168,9 +174,10 @@ pub fn trailers_valid(headers: &[Header]) -> Res<()> { #[cfg(test)] mod tests { + use neqo_common::Header; + use super::headers_valid; use crate::MessageType; - use neqo_common::Header; fn create_connect_headers() -> Vec
{ vec![ diff --git a/neqo-http3/src/lib.rs b/neqo-http3/src/lib.rs index 76be301a8e..8272151cc1 100644 --- a/neqo-http3/src/lib.rs +++ b/neqo-http3/src/lib.rs @@ -4,8 +4,7 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -#![cfg_attr(feature = "deny-warnings", deny(warnings))] -#![warn(clippy::pedantic)] +#![allow(clippy::module_name_repetitions)] // This lint doesn't work here. /*! @@ -35,7 +34,7 @@ supported and can be enabled using [`Http3Parameters`](struct.Http3Parameters.ht The crate does not create an OS level UDP socket, it produces, i.e. encodes, data that should be sent as a payload in a UDP packet and consumes data received on the UDP socket. For example, -[`std::net::UdpSocket`](std::net::UdpSocket) or [`mio::net::UdpSocket`](https://crates.io/crates/mio) +[`std::net::UdpSocket`] or [`mio::net::UdpSocket`](https://crates.io/crates/mio) could be used for creating UDP sockets. The application is responsible for creating a socket, polling the socket, and sending and receiving @@ -160,14 +159,8 @@ mod server_events; mod settings; mod stream_type_reader; -use neqo_qpack::Error as QpackError; -pub use neqo_transport::{streams::SendOrder, Output, StreamId}; -use neqo_transport::{ - AppError, Connection, Error as TransportError, RecvStreamStats, SendStreamStats, -}; -use std::fmt::Debug; +use std::{cell::RefCell, fmt::Debug, rc::Rc}; -use crate::priority::PriorityHandler; use buffered_send_stream::BufferedStream; pub use client_events::{Http3ClientEvent, WebTransportEvent}; pub use conn_params::Http3Parameters; @@ -177,23 +170,28 @@ use features::extended_connect::WebTransportSession; use frames::HFrame; pub use neqo_common::Header; use neqo_common::MessageType; +use neqo_qpack::Error as QpackError; +pub use neqo_transport::{streams::SendOrder, Output, StreamId}; +use neqo_transport::{ + AppError, Connection, Error as TransportError, RecvStreamStats, SendStreamStats, +}; pub use priority::Priority; pub use server::Http3Server; pub use server_events::{ Http3OrWebTransportStream, Http3ServerEvent, WebTransportRequest, WebTransportServerEvent, }; -use std::any::Any; -use std::cell::RefCell; -use std::rc::Rc; use stream_type_reader::NewStreamType; +use crate::priority::PriorityHandler; + type Res = Result; #[derive(Clone, Debug, PartialEq, Eq)] pub enum Error { HttpNoError, HttpGeneralProtocol, - HttpGeneralProtocolStream, //this is the same as the above but it should only close a stream not a connection. + HttpGeneralProtocolStream, /* this is the same as the above but it should only close a + * stream not a connection. */ // When using this error, you need to provide a value that is unique, which // will allow the specific error to be identified. This will be validated in CI. HttpInternal(u16), @@ -288,6 +286,7 @@ impl Error { } /// # Panics + /// /// On unexpected errors, in debug mode. #[must_use] pub fn map_stream_send_errors(err: &Error) -> Self { @@ -304,6 +303,7 @@ impl Error { } /// # Panics + /// /// On unexpected errors, in debug mode. #[must_use] pub fn map_stream_create_errors(err: &TransportError) -> Self { @@ -318,6 +318,7 @@ impl Error { } /// # Panics + /// /// On unexpected errors, in debug mode. #[must_use] pub fn map_stream_recv_errors(err: &Error) -> Self { @@ -345,8 +346,11 @@ impl Error { } /// # Errors - /// Any error is mapped to the indicated type. + /// + /// Any error is mapped to the indicated type. + /// /// # Panics + /// /// On internal errors, in debug mode. fn map_error(r: Result>, err: Self) -> Result { r.map_err(|e| { @@ -428,20 +432,15 @@ pub enum Http3StreamType { } #[must_use] -#[derive(PartialEq, Eq, Debug)] +#[derive(Default, PartialEq, Eq, Debug)] enum ReceiveOutput { + #[default] NoOutput, ControlFrames(Vec), UnblockedStreams(Vec), NewStream(NewStreamType), } -impl Default for ReceiveOutput { - fn default() -> Self { - Self::NoOutput - } -} - trait Stream: Debug { fn stream_type(&self) -> Http3StreamType; } @@ -450,16 +449,23 @@ trait RecvStream: Stream { /// The stream reads data from the corresponding quic stream and returns `ReceiveOutput`. /// The function also returns true as the second parameter if the stream is done and /// could be forgotten, i.e. removed from all records. + /// /// # Errors + /// /// An error may happen while reading a stream, e.g. early close, protocol error, etc. fn receive(&mut self, conn: &mut Connection) -> Res<(ReceiveOutput, bool)>; + /// # Errors + /// /// An error may happen while reading a stream, e.g. early close, etc. fn reset(&mut self, close_type: CloseType) -> Res<()>; + /// The function allows an app to read directly from the quic stream. The function /// returns the number of bytes written into `buf` and true/false if the stream is /// completely done and can be forgotten, i.e. removed from all records. + /// /// # Errors + /// /// An error may happen while reading a stream, e.g. early close, protocol error, etc. fn read_data(&mut self, _conn: &mut Connection, _buf: &mut [u8]) -> Res<(usize, bool)> { Err(Error::InvalidStreamId) @@ -483,7 +489,9 @@ trait HttpRecvStream: RecvStream { /// This function is similar to the receive function and has the same output, i.e. /// a `ReceiveOutput` enum and bool. The bool is true if the stream is completely done /// and can be forgotten, i.e. removed from all records. + /// /// # Errors + /// /// An error may happen while reading a stream, e.g. early close, protocol error, etc. fn header_unblocked(&mut self, conn: &mut Connection) -> Res<(ReceiveOutput, bool)>; @@ -495,8 +503,6 @@ trait HttpRecvStream: RecvStream { fn extended_connect_wait_for_response(&self) -> bool { false } - - fn any(&self) -> &dyn Any; } #[derive(Debug, PartialEq, Eq, Copy, Clone)] @@ -552,22 +558,26 @@ trait HttpRecvStreamEvents: RecvStreamEvents { trait SendStream: Stream { /// # Errors - /// Error my occur during sending data, e.g. protocol error, etc. + /// + /// Error may occur during sending data, e.g. protocol error, etc. fn send(&mut self, conn: &mut Connection) -> Res<()>; fn has_data_to_send(&self) -> bool; fn stream_writable(&self); fn done(&self) -> bool; - fn set_sendorder(&mut self, conn: &mut Connection, sendorder: Option) -> Res<()>; - fn set_fairness(&mut self, conn: &mut Connection, fairness: bool) -> Res<()>; + /// # Errors - /// Error my occur during sending data, e.g. protocol error, etc. + /// + /// Error may occur during sending data, e.g. protocol error, etc. fn send_data(&mut self, _conn: &mut Connection, _buf: &[u8]) -> Res; /// # Errors - /// It may happen that the transport stream is already close. This is unlikely. + /// + /// It may happen that the transport stream is already closed. This is unlikely. fn close(&mut self, conn: &mut Connection) -> Res<()>; + /// # Errors - /// It may happen that the transport stream is already close. This is unlikely. + /// + /// It may happen that the transport stream is already closed. This is unlikely. fn close_with_message( &mut self, _conn: &mut Connection, @@ -576,6 +586,7 @@ trait SendStream: Stream { ) -> Res<()> { Err(Error::InvalidStreamId) } + /// This function is called when sending side is closed abruptly by the peer or /// the application. fn handle_stop_sending(&mut self, close_type: CloseType); @@ -584,7 +595,8 @@ trait SendStream: Stream { } /// # Errors - /// It may happen that the transport stream is already close. This is unlikely. + /// + /// It may happen that the transport stream is already closed. This is unlikely. fn send_data_atomic(&mut self, _conn: &mut Connection, _buf: &[u8]) -> Res<()> { Err(Error::InvalidStreamId) } @@ -599,11 +611,12 @@ trait HttpSendStream: SendStream { /// This function is used to supply headers to a http message. The /// function is used for request headers, response headers, 1xx response and /// trailers. + /// /// # Errors + /// /// This can also return an error if the underlying stream is closed. fn send_headers(&mut self, headers: &[Header], conn: &mut Connection) -> Res<()>; fn set_new_listener(&mut self, _conn_events: Box) {} - fn any(&self) -> &dyn Any; } trait SendStreamEvents: Debug { diff --git a/neqo-http3/src/priority.rs b/neqo-http3/src/priority.rs index 6a391de578..76a2cb9a85 100644 --- a/neqo-http3/src/priority.rs +++ b/neqo-http3/src/priority.rs @@ -1,8 +1,15 @@ -use crate::{frames::HFrame, Error, Header, Res}; +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use std::fmt; + use neqo_transport::StreamId; use sfv::{BareItem, Item, ListEntry, Parser}; -use std::convert::TryFrom; -use std::fmt; + +use crate::{frames::HFrame, Error, Header, Res}; #[derive(Debug, Copy, Clone, PartialEq, Eq)] pub struct Priority { @@ -21,6 +28,7 @@ impl Default for Priority { impl Priority { /// # Panics + /// /// If an invalid urgency (>7 is given) #[must_use] pub fn new(urgency: u8, incremental: bool) -> Priority { @@ -44,9 +52,13 @@ impl Priority { } /// Constructs a priority from raw bytes (either a field value of frame content). + /// /// # Errors + /// /// When the contained syntax is invalid. + /// /// # Panics + /// /// Never, but the compiler is not smart enough to work that out. pub fn from_bytes(bytes: &[u8]) -> Res { let dict = Parser::parse_dictionary(bytes).map_err(|_| Error::HttpFrame)?; @@ -149,10 +161,10 @@ impl PriorityHandler { #[cfg(test)] mod test { - use crate::priority::PriorityHandler; - use crate::{HFrame, Priority}; use neqo_transport::StreamId; + use crate::{priority::PriorityHandler, HFrame, Priority}; + #[test] fn priority_updates_ignore_same() { let mut p = PriorityHandler::new(false, Priority::new(5, false)); @@ -183,7 +195,8 @@ mod test { let mut p = PriorityHandler::new(false, Priority::new(5, false)); assert!(p.maybe_update_priority(Priority::new(6, false))); assert!(p.maybe_update_priority(Priority::new(7, false))); - // updating two times with a different priority -> the last priority update should be in the next frame + // updating two times with a different priority -> the last priority update should be in the + // next frame let expected = HFrame::PriorityUpdateRequest { element_id: 4, priority: Priority::new(7, false), diff --git a/neqo-http3/src/push_controller.rs b/neqo-http3/src/push_controller.rs index 79ebab4efc..ab6afccdf6 100644 --- a/neqo-http3/src/push_controller.rs +++ b/neqo-http3/src/push_controller.rs @@ -1,30 +1,35 @@ +// Licensed under the Apache License, Version 2.0 or the MIT license // , at your // option. This file may not be copied, modified, or distributed // except according to those terms. -use crate::client_events::{Http3ClientEvent, Http3ClientEvents}; -use crate::connection::Http3Connection; -use crate::frames::HFrame; -use crate::{CloseType, Error, Http3StreamInfo, HttpRecvStreamEvents, RecvStreamEvents, Res}; +use std::{ + cell::RefCell, + collections::VecDeque, + fmt::{Debug, Display}, + mem, + rc::Rc, + slice::SliceIndex, +}; + use neqo_common::{qerror, qinfo, qtrace, Header}; use neqo_transport::{Connection, StreamId}; -use std::cell::RefCell; -use std::collections::VecDeque; -use std::convert::TryFrom; -use std::fmt::Debug; -use std::fmt::Display; -use std::mem; -use std::rc::Rc; -use std::slice::SliceIndex; + +use crate::{ + client_events::{Http3ClientEvent, Http3ClientEvents}, + connection::Http3Connection, + frames::HFrame, + CloseType, Error, Http3StreamInfo, HttpRecvStreamEvents, RecvStreamEvents, Res, +}; /// `PushStates`: -/// `Init`: there is no push stream nor a push promise. This state is only used to keep track of opened and closed -/// push streams. +/// `Init`: there is no push stream nor a push promise. This state is only used to keep track of +/// opened and closed push streams. /// `PushPromise`: the push has only ever receive a pushpromise frame -/// `OnlyPushStream`: there is only a push stream. All push stream events, i.e. `PushHeaderReady` and -/// `PushDataReadable` will be delayed until a push promise is received (they are kept in -/// `events`). +/// `OnlyPushStream`: there is only a push stream. All push stream events, i.e. `PushHeaderReady` +/// and `PushDataReadable` will be delayed until a push promise is received +/// (they are kept in `events`). /// `Active`: there is a push steam and at least one push promise frame. /// `Close`: the push stream has been closed or reset already. #[derive(Debug, PartialEq, Clone)] @@ -93,9 +98,7 @@ impl ActivePushStreams { None | Some(PushState::Closed) => None, Some(s) => { let res = mem::replace(s, PushState::Closed); - while self.push_streams.get(0).is_some() - && *self.push_streams.get(0).unwrap() == PushState::Closed - { + while let Some(PushState::Closed) = self.push_streams.front() { self.push_streams.pop_front(); self.first_push_id += 1; } @@ -124,21 +127,22 @@ impl ActivePushStreams { /// `PushController` keeps information about push stream states. /// -/// A `PushStream` calls `add_new_push_stream` that may change the push state from Init to `OnlyPushStream` or from -/// `PushPromise` to `Active`. If a stream has already been closed `add_new_push_stream` returns false (the `PushStream` -/// will close the transport stream). +/// A `PushStream` calls `add_new_push_stream` that may change the push state from Init to +/// `OnlyPushStream` or from `PushPromise` to `Active`. If a stream has already been closed +/// `add_new_push_stream` returns false (the `PushStream` will close the transport stream). /// A `PushStream` calls `push_stream_reset` if the transport stream has been canceled. /// When a push stream is done it calls `close`. /// /// The `PushController` handles: -/// `PUSH_PROMISE` frame: frames may change the push state from Init to `PushPromise` and from `OnlyPushStream` to -/// `Active`. Frames for a closed steams are ignored. -/// `CANCEL_PUSH` frame: (`handle_cancel_push` will be called). If a push is in state `PushPromise` or `Active`, any -/// posted events will be removed and a `PushCanceled` event will be posted. If a push is in -/// state `OnlyPushStream` or `Active` the transport stream and the `PushStream` will be closed. -/// The frame will be ignored for already closed pushes. -/// Application calling cancel: the actions are similar to the `CANCEL_PUSH` frame. The difference is that -/// `PushCanceled` will not be posted and a `CANCEL_PUSH` frame may be sent. +/// `PUSH_PROMISE` frame: frames may change the push state from Init to `PushPromise` and from +/// `OnlyPushStream` to `Active`. Frames for a closed steams are ignored. +/// `CANCEL_PUSH` frame: (`handle_cancel_push` will be called). If a push is in state `PushPromise` +/// or `Active`, any posted events will be removed and a `PushCanceled` event +/// will be posted. If a push is in state `OnlyPushStream` or `Active` the +/// transport stream and the `PushStream` will be closed. The frame will be +/// ignored for already closed pushes. Application calling cancel: the actions are similar to the +/// `CANCEL_PUSH` frame. The difference is that `PushCanceled` will not +/// be posted and a `CANCEL_PUSH` frame may be sent. #[derive(Debug)] pub(crate) struct PushController { max_concurent_push: u64, @@ -147,8 +151,8 @@ pub(crate) struct PushController { // We keep a stream until the stream has been closed. push_streams: ActivePushStreams, // The keeps the next consecutive push_id that should be open. - // All push_id < next_push_id_to_open are in the push_stream lists. If they are not in the list they have - // been already closed. + // All push_id < next_push_id_to_open are in the push_stream lists. If they are not in the list + // they have been already closed. conn_events: Http3ClientEvents, } @@ -171,7 +175,9 @@ impl Display for PushController { impl PushController { /// A new `push_promise` has been received. + /// /// # Errors + /// /// `HttpId` if `push_id` greater than it is allowed has been received. pub fn new_push_promise( &mut self, @@ -340,8 +346,9 @@ impl PushController { match self.push_streams.get(push_id) { None => { qtrace!("Push has already been closed."); - // If we have some events for the push_id in the event queue, the caller still does not - // not know that the push has been closed. Otherwise return InvalidStreamId. + // If we have some events for the push_id in the event queue, the caller still does + // not not know that the push has been closed. Otherwise return + // InvalidStreamId. if self.conn_events.has_push(push_id) { self.conn_events.remove_events_for_push_id(push_id); Ok(()) diff --git a/neqo-http3/src/qlog.rs b/neqo-http3/src/qlog.rs index 3d43a2c906..81f9245a3c 100644 --- a/neqo-http3/src/qlog.rs +++ b/neqo-http3/src/qlog.rs @@ -4,35 +4,38 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use std::convert::TryFrom; - -use qlog::{self, event::Event, H3DataRecipient}; +// Functions that handle capturing QLOG traces. use neqo_common::qlog::NeqoQlog; use neqo_transport::StreamId; +use qlog::events::{DataRecipient, EventData}; pub fn h3_data_moved_up(qlog: &mut NeqoQlog, stream_id: StreamId, amount: usize) { - qlog.add_event(|| { - Some(Event::h3_data_moved( - stream_id.to_string(), - None, - Some(u64::try_from(amount).unwrap()), - Some(H3DataRecipient::Transport), - Some(H3DataRecipient::Application), - None, - )) + qlog.add_event_data(|| { + let ev_data = EventData::DataMoved(qlog::events::quic::DataMoved { + stream_id: Some(stream_id.as_u64()), + offset: None, + length: Some(u64::try_from(amount).unwrap()), + from: Some(DataRecipient::Transport), + to: Some(DataRecipient::Application), + raw: None, + }); + + Some(ev_data) }); } pub fn h3_data_moved_down(qlog: &mut NeqoQlog, stream_id: StreamId, amount: usize) { - qlog.add_event(|| { - Some(Event::h3_data_moved( - stream_id.to_string(), - None, - Some(u64::try_from(amount).unwrap()), - Some(H3DataRecipient::Application), - Some(H3DataRecipient::Transport), - None, - )) + qlog.add_event_data(|| { + let ev_data = EventData::DataMoved(qlog::events::quic::DataMoved { + stream_id: Some(stream_id.as_u64()), + offset: None, + length: Some(u64::try_from(amount).unwrap()), + from: Some(DataRecipient::Application), + to: Some(DataRecipient::Transport), + raw: None, + }); + + Some(ev_data) }); } diff --git a/neqo-http3/src/qpack_decoder_receiver.rs b/neqo-http3/src/qpack_decoder_receiver.rs index 3cdfdf74cd..46b9ca590b 100644 --- a/neqo-http3/src/qpack_decoder_receiver.rs +++ b/neqo-http3/src/qpack_decoder_receiver.rs @@ -4,11 +4,12 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use crate::{CloseType, Error, Http3StreamType, ReceiveOutput, RecvStream, Res, Stream}; +use std::{cell::RefCell, rc::Rc}; + use neqo_qpack::QPackDecoder; use neqo_transport::{Connection, StreamId}; -use std::cell::RefCell; -use std::rc::Rc; + +use crate::{CloseType, Error, Http3StreamType, ReceiveOutput, RecvStream, Res, Stream}; #[derive(Debug)] pub(crate) struct DecoderRecvStream { diff --git a/neqo-http3/src/qpack_encoder_receiver.rs b/neqo-http3/src/qpack_encoder_receiver.rs index efe234173f..76c779bcf2 100644 --- a/neqo-http3/src/qpack_encoder_receiver.rs +++ b/neqo-http3/src/qpack_encoder_receiver.rs @@ -4,11 +4,12 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use crate::{CloseType, Error, Http3StreamType, ReceiveOutput, RecvStream, Res, Stream}; +use std::{cell::RefCell, rc::Rc}; + use neqo_qpack::QPackEncoder; use neqo_transport::{Connection, StreamId}; -use std::cell::RefCell; -use std::rc::Rc; + +use crate::{CloseType, Error, Http3StreamType, ReceiveOutput, RecvStream, Res, Stream}; #[derive(Debug)] pub(crate) struct EncoderRecvStream { diff --git a/neqo-http3/src/recv_message.rs b/neqo-http3/src/recv_message.rs index dd27c51337..55970849ef 100644 --- a/neqo-http3/src/recv_message.rs +++ b/neqo-http3/src/recv_message.rs @@ -4,24 +4,20 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use crate::frames::{FrameReader, HFrame, StreamReaderConnectionWrapper, H3_FRAME_TYPE_HEADERS}; -use crate::push_controller::PushController; +use std::{cell::RefCell, cmp::min, collections::VecDeque, fmt::Debug, rc::Rc}; + +use neqo_common::{qdebug, qinfo, qtrace, Header}; +use neqo_qpack::decoder::QPackDecoder; +use neqo_transport::{Connection, StreamId}; + use crate::{ + frames::{FrameReader, HFrame, StreamReaderConnectionWrapper, H3_FRAME_TYPE_HEADERS}, headers_checks::{headers_valid, is_interim}, priority::PriorityHandler, + push_controller::PushController, qlog, CloseType, Error, Http3StreamInfo, Http3StreamType, HttpRecvStream, HttpRecvStreamEvents, MessageType, Priority, ReceiveOutput, RecvStream, Res, Stream, }; -use neqo_common::{qdebug, qinfo, qtrace, Header}; -use neqo_qpack::decoder::QPackDecoder; -use neqo_transport::{Connection, StreamId}; -use std::any::Any; -use std::cell::RefCell; -use std::cmp::min; -use std::collections::VecDeque; -use std::convert::TryFrom; -use std::fmt::Debug; -use std::rc::Rc; #[allow(clippy::module_name_repetitions)] pub(crate) struct RecvMessageInfo { @@ -275,7 +271,7 @@ impl RecvMessage { } (None, false) => break Ok(()), (Some(frame), fin) => { - qinfo!( + qdebug!( [self], "A new frame has been received: {:?}; state={:?} fin={}", frame, @@ -348,7 +344,8 @@ impl RecvMessage { panic!("Stream readable after being closed!"); } RecvMessageState::ExtendedConnect => { - // Ignore read event, this request is waiting to be picked up by a new WebTransportSession + // Ignore read event, this request is waiting to be picked up by a new + // WebTransportSession break Ok(()); } }; @@ -495,8 +492,4 @@ impl HttpRecvStream for RecvMessage { fn extended_connect_wait_for_response(&self) -> bool { matches!(self.state, RecvMessageState::ExtendedConnect) } - - fn any(&self) -> &dyn Any { - self - } } diff --git a/neqo-http3/src/request_target.rs b/neqo-http3/src/request_target.rs index a58445b5d7..28bc22ac2d 100644 --- a/neqo-http3/src/request_target.rs +++ b/neqo-http3/src/request_target.rs @@ -7,6 +7,7 @@ #![allow(clippy::module_name_repetitions)] use std::fmt::{Debug, Formatter}; + use url::{ParseError, Url}; pub trait RequestTarget: Debug { @@ -58,7 +59,9 @@ pub trait AsRequestTarget<'x> { type Target: RequestTarget; type Error; /// Produce a `RequestTarget` that refers to `self`. + /// /// # Errors + /// /// This method can generate an error of type `Self::Error` /// if the conversion is unsuccessful. fn as_request_target(&'x self) -> Result; diff --git a/neqo-http3/src/send_message.rs b/neqo-http3/src/send_message.rs index deb0cf3c34..15965c44f6 100644 --- a/neqo-http3/src/send_message.rs +++ b/neqo-http3/src/send_message.rs @@ -4,23 +4,19 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use crate::frames::HFrame; +use std::{cell::RefCell, cmp::min, fmt::Debug, rc::Rc}; + +use neqo_common::{qdebug, qtrace, Encoder, Header, MessageType}; +use neqo_qpack::encoder::QPackEncoder; +use neqo_transport::{Connection, StreamId}; + use crate::{ + frames::HFrame, headers_checks::{headers_valid, is_interim, trailers_valid}, qlog, BufferedStream, CloseType, Error, Http3StreamInfo, Http3StreamType, HttpSendStream, Res, SendStream, SendStreamEvents, Stream, }; -use neqo_common::{qdebug, qinfo, qtrace, Encoder, Header, MessageType}; -use neqo_qpack::encoder::QPackEncoder; -use neqo_transport::{streams::SendOrder, Connection, StreamId}; -use std::any::Any; -use std::cell::RefCell; -use std::cmp::min; -use std::fmt::Debug; -use std::mem; -use std::rc::Rc; - const MAX_DATA_HEADER_SIZE_2: usize = (1 << 6) - 1; // Maximal amount of data with DATA frame header size 2 const MAX_DATA_HEADER_SIZE_2_LIMIT: usize = MAX_DATA_HEADER_SIZE_2 + 3; // 63 + 3 (size of the next buffer data frame header) const MAX_DATA_HEADER_SIZE_3: usize = (1 << 14) - 1; // Maximal amount of data with DATA frame header size 3 @@ -123,7 +119,7 @@ impl SendMessage { encoder: Rc>, conn_events: Box, ) -> Self { - qinfo!("Create a request stream_id={}", stream_id); + qdebug!("Create a request stream_id={}", stream_id); Self { state: MessageState::WaitingForHeaders, message_type, @@ -135,6 +131,7 @@ impl SendMessage { } /// # Errors + /// /// `ClosedCriticalStream` if the encoder stream is closed. /// `InternalError` if an unexpected error occurred. fn encode( @@ -196,7 +193,7 @@ impl SendStream for SendMessage { min(buf.len(), available - 9) }; - qinfo!( + qdebug!( [self], "send_request_body: available={} to_send={}.", available, @@ -237,11 +234,13 @@ impl SendStream for SendMessage { } /// # Errors + /// /// `InternalError` if an unexpected error occurred. /// `InvalidStreamId` if the stream does not exist, /// `AlreadyClosed` if the stream has already been closed. - /// `TransportStreamDoesNotExist` if the transport stream does not exist (this may happen if `process_output` - /// has not been called when needed, and HTTP3 layer has not picked up the info that the stream has been closed.) + /// `TransportStreamDoesNotExist` if the transport stream does not exist (this may happen if + /// `process_output` has not been called when needed, and HTTP3 layer has not picked up the + /// info that the stream has been closed.) fn send(&mut self, conn: &mut Connection) -> Res<()> { let sent = Error::map_error(self.stream.send_buffer(conn), Error::HttpInternal(5))?; qlog::h3_data_moved_down(conn.qlog_mut(), self.stream_id(), sent); @@ -271,16 +270,6 @@ impl SendStream for SendMessage { self.stream.has_buffered_data() } - fn set_sendorder(&mut self, _conn: &mut Connection, _sendorder: Option) -> Res<()> { - // Not relevant for SendMessage - Ok(()) - } - - fn set_fairness(&mut self, _conn: &mut Connection, _fairness: bool) -> Res<()> { - // Not relevant for SendMessage - Ok(()) - } - fn close(&mut self, conn: &mut Connection) -> Res<()> { self.state.fin()?; if !self.stream.has_buffered_data() { @@ -303,7 +292,6 @@ impl SendStream for SendMessage { Some(self) } - #[allow(clippy::drop_copy)] fn send_data_atomic(&mut self, conn: &mut Connection, buf: &[u8]) -> Res<()> { let data_frame = HFrame::Data { len: buf.len() as u64, @@ -312,7 +300,7 @@ impl SendStream for SendMessage { data_frame.encode(&mut enc); self.stream.buffer(enc.as_ref()); self.stream.buffer(buf); - mem::drop(self.stream.send_buffer(conn)?); + _ = self.stream.send_buffer(conn)?; Ok(()) } } @@ -334,10 +322,6 @@ impl HttpSendStream for SendMessage { self.stream_type = Http3StreamType::ExtendedConnect; self.conn_events = conn_events; } - - fn any(&self) -> &dyn Any { - self - } } impl ::std::fmt::Display for SendMessage { diff --git a/neqo-http3/src/server.rs b/neqo-http3/src/server.rs index 0ee1301564..1396a4e4cf 100644 --- a/neqo-http3/src/server.rs +++ b/neqo-http3/src/server.rs @@ -6,6 +6,21 @@ #![allow(clippy::module_name_repetitions)] +use std::{ + cell::{RefCell, RefMut}, + collections::HashMap, + path::PathBuf, + rc::Rc, + time::Instant, +}; + +use neqo_common::{qtrace, Datagram}; +use neqo_crypto::{AntiReplay, Cipher, PrivateKey, PublicKey, ZeroRttChecker}; +use neqo_transport::{ + server::{ActiveConnectionRef, Server, ValidateAddress}, + ConnectionIdGenerator, Output, +}; + use crate::{ connection::Http3State, connection_server::Http3ServerHandler, @@ -16,19 +31,6 @@ use crate::{ settings::HttpZeroRttChecker, Http3Parameters, Http3StreamInfo, Res, }; -use neqo_common::{qtrace, Datagram}; -use neqo_crypto::{AntiReplay, Cipher, PrivateKey, PublicKey, ZeroRttChecker}; -use neqo_transport::{ - server::{ActiveConnectionRef, Server, ValidateAddress}, - ConnectionIdGenerator, Output, -}; -use std::{ - cell::{RefCell, RefMut}, - collections::HashMap, - path::PathBuf, - rc::Rc, - time::Instant, -}; type HandlerRef = Rc>; @@ -49,6 +51,7 @@ impl ::std::fmt::Display for Http3Server { impl Http3Server { /// # Errors + /// /// Making a `neqo_transport::Server` may produce an error. This can only be a crypto error if /// the socket can't be created or configured. pub fn new( @@ -92,6 +95,7 @@ impl Http3Server { /// Enable encrypted client hello (ECH). /// /// # Errors + /// /// Only when NSS can't serialize a configuration. pub fn enable_ech( &mut self, @@ -109,7 +113,7 @@ impl Http3Server { self.server.ech_config() } - pub fn process(&mut self, dgram: Option, now: Instant) -> Output { + pub fn process(&mut self, dgram: Option<&Datagram>, now: Instant) -> Output { qtrace!([self], "Process."); let out = self.server.process(dgram, now); self.process_http3(now); @@ -119,7 +123,7 @@ impl Http3Server { qtrace!([self], "Send packet: {:?}", d); Output::Datagram(d) } - _ => self.server.process(None, now), + _ => self.server.process(Option::<&Datagram>::None, now), } } @@ -147,7 +151,7 @@ impl Http3Server { active_conns.dedup(); active_conns .iter() - .for_each(|conn| self.server.add_to_waiting(conn.clone())); + .for_each(|conn| self.server.add_to_waiting(conn)); for mut conn in active_conns { self.process_events(&mut conn, now); } @@ -309,24 +313,26 @@ fn prepare_data( #[cfg(test)] mod tests { - use super::{Http3Server, Http3ServerEvent, Http3State, Rc, RefCell}; - use crate::{Error, HFrame, Header, Http3Parameters, Priority}; + use std::{ + collections::HashMap, + mem, + ops::{Deref, DerefMut}, + }; + use neqo_common::{event::Provider, Encoder}; use neqo_crypto::{AuthenticationStatus, ZeroRttCheckResult, ZeroRttChecker}; use neqo_qpack::{encoder::QPackEncoder, QpackSettings}; use neqo_transport::{ Connection, ConnectionError, ConnectionEvent, State, StreamId, StreamType, ZeroRttState, }; - use std::{ - collections::HashMap, - mem, - ops::{Deref, DerefMut}, - }; use test_fixture::{ anti_replay, default_client, fixture_init, now, CountingConnectionIdGenerator, DEFAULT_ALPN, DEFAULT_KEYS, }; + use super::{Http3Server, Http3ServerEvent, Http3State, Rc, RefCell}; + use crate::{Error, HFrame, Header, Http3Parameters, Priority}; + const DEFAULT_SETTINGS: QpackSettings = QpackSettings { max_table_size_encoder: 100, max_table_size_decoder: 100, @@ -399,29 +405,29 @@ mod tests { const SERVER_SIDE_DECODER_STREAM_ID: StreamId = StreamId::new(11); fn connect_transport(server: &mut Http3Server, client: &mut Connection, resume: bool) { - let c1 = client.process(None, now()).dgram(); - let s1 = server.process(c1, now()).dgram(); - let c2 = client.process(s1, now()).dgram(); + let c1 = client.process(None, now()); + let s1 = server.process(c1.as_dgram_ref(), now()); + let c2 = client.process(s1.as_dgram_ref(), now()); let needs_auth = client .events() .any(|e| e == ConnectionEvent::AuthenticationNeeded); let c2 = if needs_auth { assert!(!resume); // c2 should just be an ACK, so absorb that. - let s_ack = server.process(c2, now()).dgram(); - assert!(s_ack.is_none()); + let s_ack = server.process(c2.as_dgram_ref(), now()); + assert!(s_ack.as_dgram_ref().is_none()); client.authenticated(AuthenticationStatus::Ok, now()); - client.process(None, now()).dgram() + client.process(None, now()) } else { assert!(resume); c2 }; assert!(client.state().connected()); - let s2 = server.process(c2, now()).dgram(); + let s2 = server.process(c2.as_dgram_ref(), now()); assert_connected(server); - let c3 = client.process(s2, now()).dgram(); - assert!(c3.is_none()); + let c3 = client.process(s2.as_dgram_ref(), now()); + assert!(c3.as_dgram_ref().is_none()); } // Start a client/server and check setting frame. @@ -556,8 +562,8 @@ mod tests { sent = neqo_trans_conn.stream_send(decoder_stream, &[0x3]); assert_eq!(sent, Ok(1)); let out1 = neqo_trans_conn.process(None, now()); - let out2 = server.process(out1.dgram(), now()); - mem::drop(neqo_trans_conn.process(out2.dgram(), now())); + let out2 = server.process(out1.as_dgram_ref(), now()); + mem::drop(neqo_trans_conn.process(out2.as_dgram_ref(), now())); // assert no error occured. assert_not_closed(server); @@ -588,7 +594,7 @@ mod tests { let control = peer_conn.control_stream_id; peer_conn.stream_close_send(control).unwrap(); let out = peer_conn.process(None, now()); - hconn.process(out.dgram(), now()); + hconn.process(out.as_dgram_ref(), now()); assert_closed(&mut hconn, &Error::HttpClosedCriticalStream); } @@ -603,7 +609,7 @@ mod tests { let sent = neqo_trans_conn.stream_send(control_stream, &[0x0, 0xd, 0x1, 0xf]); assert_eq!(sent, Ok(4)); let out = neqo_trans_conn.process(None, now()); - hconn.process(out.dgram(), now()); + hconn.process(out.as_dgram_ref(), now()); assert_closed(&mut hconn, &Error::HttpMissingSettings); } @@ -615,7 +621,7 @@ mod tests { // send the second SETTINGS frame. peer_conn.control_send(&[0x4, 0x6, 0x1, 0x40, 0x64, 0x7, 0x40, 0x64]); let out = peer_conn.process(None, now()); - hconn.process(out.dgram(), now()); + hconn.process(out.as_dgram_ref(), now()); assert_closed(&mut hconn, &Error::HttpFrameUnexpected); } @@ -630,7 +636,7 @@ mod tests { frame.encode(&mut e); peer_conn.control_send(e.as_ref()); let out = peer_conn.process(None, now()); - hconn.process(out.dgram(), now()); + hconn.process(out.as_dgram_ref(), now()); // check if the given connection got closed on invalid stream ids if valid { assert_not_closed(&mut hconn); @@ -673,7 +679,7 @@ mod tests { peer_conn.control_send(v); let out = peer_conn.process(None, now()); - hconn.process(out.dgram(), now()); + hconn.process(out.as_dgram_ref(), now()); assert_closed(&mut hconn, &Error::HttpFrameUnexpected); } @@ -707,10 +713,10 @@ mod tests { .stream_send(new_stream_id, &[0x41, 0x19, 0x4, 0x4, 0x6, 0x0, 0x8, 0x0]) .unwrap(); let out = peer_conn.process(None, now()); - let out = hconn.process(out.dgram(), now()); - mem::drop(peer_conn.process(out.dgram(), now())); + let out = hconn.process(out.as_dgram_ref(), now()); + mem::drop(peer_conn.process(out.as_dgram_ref(), now())); let out = hconn.process(None, now()); - mem::drop(peer_conn.process(out.dgram(), now())); + mem::drop(peer_conn.process(out.as_dgram_ref(), now())); // check for stop-sending with Error::HttpStreamCreation. let mut stop_sending_event_found = false; @@ -738,12 +744,12 @@ mod tests { let push_stream_id = peer_conn.stream_create(StreamType::UniDi).unwrap(); _ = peer_conn.stream_send(push_stream_id, &[0x1]).unwrap(); let out = peer_conn.process(None, now()); - let out = hconn.process(out.dgram(), now()); - mem::drop(peer_conn.conn.process(out.dgram(), now())); + let out = hconn.process(out.as_dgram_ref(), now()); + mem::drop(peer_conn.conn.process(out.as_dgram_ref(), now())); assert_closed(&mut hconn, &Error::HttpStreamCreation); } - //// Test reading of a slowly streamed frame. bytes are received one by one + /// Test reading of a slowly streamed frame. bytes are received one by one #[test] fn test_server_frame_reading() { let (mut hconn, mut peer_conn) = connect_and_receive_settings(); @@ -755,38 +761,38 @@ mod tests { let mut sent = peer_conn.stream_send(control_stream, &[0x0]); assert_eq!(sent, Ok(1)); let out = peer_conn.process(None, now()); - hconn.process(out.dgram(), now()); + hconn.process(out.as_dgram_ref(), now()); // start sending SETTINGS frame sent = peer_conn.stream_send(control_stream, &[0x4]); assert_eq!(sent, Ok(1)); let out = peer_conn.process(None, now()); - hconn.process(out.dgram(), now()); + hconn.process(out.as_dgram_ref(), now()); sent = peer_conn.stream_send(control_stream, &[0x4]); assert_eq!(sent, Ok(1)); let out = peer_conn.process(None, now()); - hconn.process(out.dgram(), now()); + hconn.process(out.as_dgram_ref(), now()); sent = peer_conn.stream_send(control_stream, &[0x6]); assert_eq!(sent, Ok(1)); let out = peer_conn.process(None, now()); - hconn.process(out.dgram(), now()); + hconn.process(out.as_dgram_ref(), now()); sent = peer_conn.stream_send(control_stream, &[0x0]); assert_eq!(sent, Ok(1)); let out = peer_conn.process(None, now()); - hconn.process(out.dgram(), now()); + hconn.process(out.as_dgram_ref(), now()); sent = peer_conn.stream_send(control_stream, &[0x8]); assert_eq!(sent, Ok(1)); let out = peer_conn.process(None, now()); - hconn.process(out.dgram(), now()); + hconn.process(out.as_dgram_ref(), now()); sent = peer_conn.stream_send(control_stream, &[0x0]); assert_eq!(sent, Ok(1)); let out = peer_conn.process(None, now()); - hconn.process(out.dgram(), now()); + hconn.process(out.as_dgram_ref(), now()); assert_not_closed(&mut hconn); @@ -794,37 +800,37 @@ mod tests { sent = peer_conn.stream_send(control_stream, &[0x5]); assert_eq!(sent, Ok(1)); let out = peer_conn.process(None, now()); - hconn.process(out.dgram(), now()); + hconn.process(out.as_dgram_ref(), now()); sent = peer_conn.stream_send(control_stream, &[0x5]); assert_eq!(sent, Ok(1)); let out = peer_conn.process(None, now()); - hconn.process(out.dgram(), now()); + hconn.process(out.as_dgram_ref(), now()); sent = peer_conn.stream_send(control_stream, &[0x4]); assert_eq!(sent, Ok(1)); let out = peer_conn.process(None, now()); - hconn.process(out.dgram(), now()); + hconn.process(out.as_dgram_ref(), now()); sent = peer_conn.stream_send(control_stream, &[0x61]); assert_eq!(sent, Ok(1)); let out = peer_conn.process(None, now()); - hconn.process(out.dgram(), now()); + hconn.process(out.as_dgram_ref(), now()); sent = peer_conn.stream_send(control_stream, &[0x62]); assert_eq!(sent, Ok(1)); let out = peer_conn.process(None, now()); - hconn.process(out.dgram(), now()); + hconn.process(out.as_dgram_ref(), now()); sent = peer_conn.stream_send(control_stream, &[0x63]); assert_eq!(sent, Ok(1)); let out = peer_conn.process(None, now()); - hconn.process(out.dgram(), now()); + hconn.process(out.as_dgram_ref(), now()); sent = peer_conn.stream_send(control_stream, &[0x64]); assert_eq!(sent, Ok(1)); let out = peer_conn.process(None, now()); - hconn.process(out.dgram(), now()); + hconn.process(out.as_dgram_ref(), now()); // PUSH_PROMISE on a control stream will cause an error assert_closed(&mut hconn, &Error::HttpFrameUnexpected); @@ -840,7 +846,7 @@ mod tests { peer_conn.stream_close_send(stream_id).unwrap(); let out = peer_conn.process(None, now()); - hconn.process(out.dgram(), now()); + hconn.process(out.as_dgram_ref(), now()); assert_closed(&mut hconn, &Error::HttpFrame); } @@ -892,7 +898,7 @@ mod tests { peer_conn.stream_close_send(stream_id).unwrap(); let out = peer_conn.process(None, now()); - hconn.process(out.dgram(), now()); + hconn.process(out.as_dgram_ref(), now()); // Check connection event. There should be 1 Header and 2 data events. let mut headers_frames = 0; @@ -943,7 +949,7 @@ mod tests { .unwrap(); let out = peer_conn.process(None, now()); - hconn.process(out.dgram(), now()); + hconn.process(out.as_dgram_ref(), now()); // Check connection event. There should be 1 Header and no data events. let mut headers_frames = 0; @@ -987,8 +993,8 @@ mod tests { .unwrap(); peer_conn.stream_close_send(stream_id).unwrap(); - let out = peer_conn.process(out.dgram(), now()); - hconn.process(out.dgram(), now()); + let out = peer_conn.process(out.as_dgram_ref(), now()); + hconn.process(out.as_dgram_ref(), now()); while let Some(event) = hconn.next_event() { match event { @@ -1020,7 +1026,7 @@ mod tests { .unwrap(); let out = peer_conn.process(None, now()); - hconn.process(out.dgram(), now()); + hconn.process(out.as_dgram_ref(), now()); // Check connection event. There should be 1 Header and no data events. // The server will reset the stream. @@ -1052,8 +1058,8 @@ mod tests { } let out = hconn.process(None, now()); - let out = peer_conn.process(out.dgram(), now()); - hconn.process(out.dgram(), now()); + let out = peer_conn.process(out.as_dgram_ref(), now()); + hconn.process(out.as_dgram_ref(), now()); // Check that STOP_SENDING and REET has been received. let mut reset = 0; @@ -1085,7 +1091,7 @@ mod tests { .stream_reset_send(CLIENT_SIDE_CONTROL_STREAM_ID, Error::HttpNoError.code()) .unwrap(); let out = peer_conn.process(None, now()); - hconn.process(out.dgram(), now()); + hconn.process(out.as_dgram_ref(), now()); assert_closed(&mut hconn, &Error::HttpClosedCriticalStream); } @@ -1098,7 +1104,7 @@ mod tests { .stream_reset_send(CLIENT_SIDE_ENCODER_STREAM_ID, Error::HttpNoError.code()) .unwrap(); let out = peer_conn.process(None, now()); - hconn.process(out.dgram(), now()); + hconn.process(out.as_dgram_ref(), now()); assert_closed(&mut hconn, &Error::HttpClosedCriticalStream); } @@ -1111,7 +1117,7 @@ mod tests { .stream_reset_send(CLIENT_SIDE_DECODER_STREAM_ID, Error::HttpNoError.code()) .unwrap(); let out = peer_conn.process(None, now()); - hconn.process(out.dgram(), now()); + hconn.process(out.as_dgram_ref(), now()); assert_closed(&mut hconn, &Error::HttpClosedCriticalStream); } @@ -1125,7 +1131,7 @@ mod tests { .stream_stop_sending(SERVER_SIDE_CONTROL_STREAM_ID, Error::HttpNoError.code()) .unwrap(); let out = peer_conn.process(None, now()); - hconn.process(out.dgram(), now()); + hconn.process(out.as_dgram_ref(), now()); assert_closed(&mut hconn, &Error::HttpClosedCriticalStream); } @@ -1138,7 +1144,7 @@ mod tests { .stream_stop_sending(SERVER_SIDE_ENCODER_STREAM_ID, Error::HttpNoError.code()) .unwrap(); let out = peer_conn.process(None, now()); - hconn.process(out.dgram(), now()); + hconn.process(out.as_dgram_ref(), now()); assert_closed(&mut hconn, &Error::HttpClosedCriticalStream); } @@ -1151,7 +1157,7 @@ mod tests { .stream_stop_sending(SERVER_SIDE_DECODER_STREAM_ID, Error::HttpNoError.code()) .unwrap(); let out = peer_conn.process(None, now()); - hconn.process(out.dgram(), now()); + hconn.process(out.as_dgram_ref(), now()); assert_closed(&mut hconn, &Error::HttpClosedCriticalStream); } @@ -1259,17 +1265,17 @@ mod tests { .unwrap(); let out = peer_conn.process(None, now()); - hconn.process(out.dgram(), now()); + hconn.process(out.as_dgram_ref(), now()); let mut requests = HashMap::new(); while let Some(event) = hconn.next_event() { match event { Http3ServerEvent::Headers { stream, .. } => { - assert!(requests.get(&stream).is_none()); + assert!(!requests.contains_key(&stream)); requests.insert(stream, 0); } Http3ServerEvent::Data { stream, .. } => { - assert!(requests.get(&stream).is_some()); + assert!(requests.contains_key(&stream)); } Http3ServerEvent::DataWritable { .. } | Http3ServerEvent::StreamReset { .. } diff --git a/neqo-http3/src/server_connection_events.rs b/neqo-http3/src/server_connection_events.rs index f56288e204..cbc8e6d56e 100644 --- a/neqo-http3/src/server_connection_events.rs +++ b/neqo-http3/src/server_connection_events.rs @@ -4,17 +4,16 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use crate::connection::Http3State; +use std::{cell::RefCell, collections::VecDeque, rc::Rc}; + +use neqo_common::Header; +use neqo_transport::{AppError, StreamId}; + use crate::{ + connection::Http3State, features::extended_connect::{ExtendedConnectEvents, ExtendedConnectType, SessionCloseReason}, CloseType, Http3StreamInfo, HttpRecvStreamEvents, Priority, RecvStreamEvents, SendStreamEvents, }; -use neqo_common::Header; -use neqo_transport::AppError; -use neqo_transport::StreamId; -use std::cell::RefCell; -use std::collections::VecDeque; -use std::rc::Rc; #[derive(Debug, PartialEq, Eq, Clone)] pub(crate) enum Http3ServerConnEvent { diff --git a/neqo-http3/src/server_events.rs b/neqo-http3/src/server_events.rs index e0cc84ed4c..214a48c757 100644 --- a/neqo-http3/src/server_events.rs +++ b/neqo-http3/src/server_events.rs @@ -6,20 +6,24 @@ #![allow(clippy::module_name_repetitions)] -use crate::connection::{Http3State, WebTransportSessionAcceptAction}; -use crate::connection_server::Http3ServerHandler; -use crate::{ - features::extended_connect::SessionCloseReason, Http3StreamInfo, Http3StreamType, Priority, Res, +use std::{ + cell::RefCell, + collections::VecDeque, + ops::{Deref, DerefMut}, + rc::Rc, +}; + +use neqo_common::{qdebug, Encoder, Header}; +use neqo_transport::{ + server::ActiveConnectionRef, AppError, Connection, DatagramTracking, StreamId, StreamType, }; -use neqo_common::{qdebug, qinfo, Encoder, Header}; -use neqo_transport::server::ActiveConnectionRef; -use neqo_transport::{AppError, Connection, DatagramTracking, StreamId, StreamType}; -use std::cell::RefCell; -use std::collections::VecDeque; -use std::convert::TryFrom; -use std::ops::{Deref, DerefMut}; -use std::rc::Rc; +use crate::{ + connection::{Http3State, WebTransportSessionAcceptAction}, + connection_server::Http3ServerHandler, + features::extended_connect::SessionCloseReason, + Http3StreamInfo, Http3StreamType, Priority, Res, +}; #[derive(Debug, Clone)] pub struct StreamHandler { @@ -57,7 +61,9 @@ impl StreamHandler { } /// Supply a response header to a request. + /// /// # Errors + /// /// It may return `InvalidStreamId` if a stream does not exist anymore. pub fn send_headers(&mut self, headers: &[Header]) -> Res<()> { self.handler.borrow_mut().send_headers( @@ -68,7 +74,9 @@ impl StreamHandler { } /// Supply response data to a request. + /// /// # Errors + /// /// It may return `InvalidStreamId` if a stream does not exist anymore. pub fn send_data(&mut self, buf: &[u8]) -> Res { self.handler @@ -77,7 +85,9 @@ impl StreamHandler { } /// Close sending side. + /// /// # Errors + /// /// It may return `InvalidStreamId` if a stream does not exist anymore. pub fn stream_close_send(&mut self) -> Res<()> { self.handler @@ -86,7 +96,9 @@ impl StreamHandler { } /// Request a peer to stop sending a stream. + /// /// # Errors + /// /// It may return `InvalidStreamId` if a stream does not exist anymore. pub fn stream_stop_sending(&mut self, app_error: AppError) -> Res<()> { qdebug!( @@ -103,7 +115,9 @@ impl StreamHandler { } /// Reset sending side of a stream. + /// /// # Errors + /// /// It may return `InvalidStreamId` if a stream does not exist anymore. pub fn stream_reset_send(&mut self, app_error: AppError) -> Res<()> { qdebug!( @@ -120,7 +134,9 @@ impl StreamHandler { } /// Reset a stream/request. + /// /// # Errors + /// /// It may return `InvalidStreamId` if a stream does not exist anymore pub fn cancel_fetch(&mut self, app_error: AppError) -> Res<()> { qdebug!([self], "reset error:{}.", app_error); @@ -159,25 +175,31 @@ impl Http3OrWebTransportStream { } /// Supply a response header to a request. + /// /// # Errors + /// /// It may return `InvalidStreamId` if a stream does not exist anymore. pub fn send_headers(&mut self, headers: &[Header]) -> Res<()> { self.stream_handler.send_headers(headers) } /// Supply response data to a request. + /// /// # Errors + /// /// It may return `InvalidStreamId` if a stream does not exist anymore. pub fn send_data(&mut self, data: &[u8]) -> Res { - qinfo!([self], "Set new response."); + qdebug!([self], "Set new response."); self.stream_handler.send_data(data) } /// Close sending side. + /// /// # Errors + /// /// It may return `InvalidStreamId` if a stream does not exist anymore. pub fn stream_close_send(&mut self) -> Res<()> { - qinfo!([self], "Set new response."); + qdebug!([self], "Set new response."); self.stream_handler.stream_close_send() } } @@ -243,10 +265,12 @@ impl WebTransportRequest { } /// Respond to a `WebTransport` session request. + /// /// # Errors + /// /// It may return `InvalidStreamId` if a stream does not exist anymore. pub fn response(&mut self, accept: &WebTransportSessionAcceptAction) -> Res<()> { - qinfo!([self], "Set a response for a WebTransport session."); + qdebug!([self], "Set a response for a WebTransport session."); self.stream_handler .handler .borrow_mut() @@ -258,6 +282,7 @@ impl WebTransportRequest { } /// # Errors + /// /// It may return `InvalidStreamId` if a stream does not exist anymore. /// Also return an error if the stream was closed on the transport layer, /// but that information is not yet consumed on the http/3 layer. @@ -279,7 +304,9 @@ impl WebTransportRequest { } /// Close sending side. + /// /// # Errors + /// /// It may return `InvalidStreamId` if a stream does not exist anymore. pub fn create_stream(&mut self, stream_type: StreamType) -> Res { let session_id = self.stream_handler.stream_id(); @@ -301,7 +328,9 @@ impl WebTransportRequest { } /// Send `WebTransport` datagram. + /// /// # Errors + /// /// It may return `InvalidStreamId` if a stream does not exist anymore. /// The function returns `TooMuchData` if the supply buffer is bigger than /// the allowed remote datagram size. @@ -326,9 +355,13 @@ impl WebTransportRequest { /// Returns the current max size of a datagram that can fit into a packet. /// The value will change over time depending on the encoded size of the /// packet number, ack frames, etc. + /// /// # Errors + /// /// The function returns `NotAvailable` if datagrams are not enabled. + /// /// # Panics + /// /// This cannot panic. The max varint length is 8. pub fn max_datagram_size(&self) -> Res { let max_size = self.stream_handler.conn.borrow().max_datagram_size()?; diff --git a/neqo-http3/src/settings.rs b/neqo-http3/src/settings.rs index 1e952dae6d..9cd4b994b7 100644 --- a/neqo-http3/src/settings.rs +++ b/neqo-http3/src/settings.rs @@ -6,10 +6,12 @@ #![allow(clippy::module_name_repetitions)] -use crate::{Error, Http3Parameters, Res}; +use std::ops::Deref; + use neqo_common::{Decoder, Encoder}; use neqo_crypto::{ZeroRttCheckResult, ZeroRttChecker}; -use std::ops::Deref; + +use crate::{Error, Http3Parameters, Res}; type SettingsType = u64; @@ -120,6 +122,7 @@ impl HSettings { } /// # Errors + /// /// Returns an error if settings types are reserved of settings value are not permitted. pub fn decode_frame_contents(&mut self, dec: &mut Decoder) -> Res<()> { while dec.remaining() > 0 { diff --git a/neqo-http3/src/stream_type_reader.rs b/neqo-http3/src/stream_type_reader.rs index 364064f26b..f36181d3b1 100644 --- a/neqo-http3/src/stream_type_reader.rs +++ b/neqo-http3/src/stream_type_reader.rs @@ -6,14 +6,15 @@ #![allow(clippy::module_name_repetitions)] -use crate::control_stream_local::HTTP3_UNI_STREAM_TYPE_CONTROL; -use crate::frames::H3_FRAME_TYPE_HEADERS; -use crate::{CloseType, Error, Http3StreamType, ReceiveOutput, RecvStream, Res, Stream}; use neqo_common::{qtrace, Decoder, IncrementalDecoderUint, Role}; -use neqo_qpack::decoder::QPACK_UNI_STREAM_TYPE_DECODER; -use neqo_qpack::encoder::QPACK_UNI_STREAM_TYPE_ENCODER; +use neqo_qpack::{decoder::QPACK_UNI_STREAM_TYPE_DECODER, encoder::QPACK_UNI_STREAM_TYPE_ENCODER}; use neqo_transport::{Connection, StreamId, StreamType}; +use crate::{ + control_stream_local::HTTP3_UNI_STREAM_TYPE_CONTROL, frames::H3_FRAME_TYPE_HEADERS, CloseType, + Error, Http3StreamType, ReceiveOutput, RecvStream, Res, Stream, +}; + pub(crate) const HTTP3_UNI_STREAM_TYPE_PUSH: u64 = 0x1; pub(crate) const WEBTRANSPORT_UNI_STREAM: u64 = 0x54; pub(crate) const WEBTRANSPORT_STREAM: u64 = 0x41; @@ -33,7 +34,9 @@ impl NewStreamType { /// Get the final `NewStreamType` from a stream type. All streams, except Push stream, /// are identified by the type only. This function will return None for the Push stream /// because it needs the ID besides the type. - /// # Error + /// + /// # Errors + /// /// Push streams received by the server are not allowed and this function will return /// `HttpStreamCreation` error. fn final_stream_type( @@ -67,12 +70,11 @@ impl NewStreamType { /// `NewStreamHeadReader` reads the head of an unidirectional stream to identify the stream. /// There are 2 type of streams: -/// - streams identified by the single type (varint encoded). Most streams belong to -/// this category. The `NewStreamHeadReader` will switch from `ReadType`to `Done` state. -/// - streams identified by the type and the ID (both varint encoded). For example, a -/// push stream is identified by the type and `PushId`. After reading the type in -/// the `ReadType` state, `NewStreamHeadReader` changes to `ReadId` state and from there -/// to `Done` state +/// - streams identified by the single type (varint encoded). Most streams belong to this category. +/// The `NewStreamHeadReader` will switch from `ReadType`to `Done` state. +/// - streams identified by the type and the ID (both varint encoded). For example, a push stream +/// is identified by the type and `PushId`. After reading the type in the `ReadType` state, +/// `NewStreamHeadReader` changes to `ReadId` state and from there to `Done` state #[derive(Debug)] pub(crate) enum NewStreamHeadReader { ReadType { @@ -140,12 +142,12 @@ impl NewStreamHeadReader { role, stream_id, .. } => { // final_stream_type may return: - // - an error if a stream type is not allowed for the role, e.g. Push - // stream received at the server. + // - an error if a stream type is not allowed for the role, e.g. Push stream + // received at the server. // - a final type if a stream is only identify by the type // - None - if a stream is not identified by the type only, but it needs - // additional data from the header to produce the final type, e.g. - // a push stream needs pushId as well. + // additional data from the header to produce the final type, e.g. a push + // stream needs pushId as well. let final_type = NewStreamType::final_stream_type(output, stream_id.stream_type(), *role); match (&final_type, fin) { @@ -234,20 +236,23 @@ impl RecvStream for NewStreamHeadReader { #[cfg(test)] mod tests { - use super::{ - NewStreamHeadReader, HTTP3_UNI_STREAM_TYPE_PUSH, WEBTRANSPORT_STREAM, - WEBTRANSPORT_UNI_STREAM, + use std::mem; + + use neqo_common::{Encoder, Role}; + use neqo_qpack::{ + decoder::QPACK_UNI_STREAM_TYPE_DECODER, encoder::QPACK_UNI_STREAM_TYPE_ENCODER, }; use neqo_transport::{Connection, StreamId, StreamType}; - use std::mem; use test_fixture::{connect, now}; - use crate::control_stream_local::HTTP3_UNI_STREAM_TYPE_CONTROL; - use crate::frames::H3_FRAME_TYPE_HEADERS; - use crate::{CloseType, Error, NewStreamType, ReceiveOutput, RecvStream, Res}; - use neqo_common::{Encoder, Role}; - use neqo_qpack::decoder::QPACK_UNI_STREAM_TYPE_DECODER; - use neqo_qpack::encoder::QPACK_UNI_STREAM_TYPE_ENCODER; + use super::{ + NewStreamHeadReader, HTTP3_UNI_STREAM_TYPE_PUSH, WEBTRANSPORT_STREAM, + WEBTRANSPORT_UNI_STREAM, + }; + use crate::{ + control_stream_local::HTTP3_UNI_STREAM_TYPE_CONTROL, frames::H3_FRAME_TYPE_HEADERS, + CloseType, Error, NewStreamType, ReceiveOutput, RecvStream, Res, + }; struct Test { conn_c: Connection, @@ -262,7 +267,7 @@ mod tests { // create a stream let stream_id = conn_s.stream_create(stream_type).unwrap(); let out = conn_s.process(None, now()); - mem::drop(conn_c.process(out.dgram(), now())); + mem::drop(conn_c.process(out.as_dgram_ref(), now())); Self { conn_c, @@ -285,7 +290,7 @@ mod tests { .stream_send(self.stream_id, &enc[i..=i]) .unwrap(); let out = self.conn_s.process(None, now()); - mem::drop(self.conn_c.process(out.dgram(), now())); + mem::drop(self.conn_c.process(out.as_dgram_ref(), now())); assert_eq!( self.decoder.receive(&mut self.conn_c).unwrap(), (ReceiveOutput::NoOutput, false) @@ -299,7 +304,7 @@ mod tests { self.conn_s.stream_close_send(self.stream_id).unwrap(); } let out = self.conn_s.process(None, now()); - mem::drop(self.conn_c.process(out.dgram(), now())); + mem::drop(self.conn_c.process(out.dgram().as_ref(), now())); assert_eq!(&self.decoder.receive(&mut self.conn_c), outcome); assert_eq!(self.decoder.done(), done); } @@ -397,7 +402,8 @@ mod tests { let mut t = Test::new(StreamType::UniDi, Role::Server); t.decode( - &[H3_FRAME_TYPE_HEADERS], // this is the same as a HTTP3_UNI_STREAM_TYPE_PUSH which is not aallowed on the server side. + &[H3_FRAME_TYPE_HEADERS], /* this is the same as a HTTP3_UNI_STREAM_TYPE_PUSH which + * is not aallowed on the server side. */ false, &Err(Error::HttpStreamCreation), true, @@ -413,7 +419,8 @@ mod tests { let mut t = Test::new(StreamType::UniDi, Role::Client); t.decode( - &[H3_FRAME_TYPE_HEADERS, 0xaaaa_aaaa], // this is the same as a HTTP3_UNI_STREAM_TYPE_PUSH + &[H3_FRAME_TYPE_HEADERS, 0xaaaa_aaaa], /* this is the same as a + * HTTP3_UNI_STREAM_TYPE_PUSH */ false, &Ok(( ReceiveOutput::NewStream(NewStreamType::Push(0xaaaa_aaaa)), diff --git a/neqo-http3/tests/httpconn.rs b/neqo-http3/tests/httpconn.rs index c78b3f0be8..a0b2bcdb80 100644 --- a/neqo-http3/tests/httpconn.rs +++ b/neqo-http3/tests/httpconn.rs @@ -6,6 +6,11 @@ #![allow(unused_assignments)] +use std::{ + mem, + time::{Duration, Instant}, +}; + use neqo_common::{event::Provider, qtrace, Datagram}; use neqo_crypto::{AuthenticationStatus, ResumptionToken}; use neqo_http3::{ @@ -13,8 +18,6 @@ use neqo_http3::{ Http3ServerEvent, Http3State, Priority, }; use neqo_transport::{ConnectionError, ConnectionParameters, Error, Output, StreamType}; -use std::mem; -use std::time::{Duration, Instant}; use test_fixture::*; const RESPONSE_DATA: &[u8] = &[0x61, 0x62, 0x63]; @@ -94,19 +97,19 @@ fn process_client_events(conn: &mut Http3Client) { fn connect_peers(hconn_c: &mut Http3Client, hconn_s: &mut Http3Server) -> Option { assert_eq!(hconn_c.state(), Http3State::Initializing); let out = hconn_c.process(None, now()); // Initial - let out = hconn_s.process(out.dgram(), now()); // Initial + Handshake - let out = hconn_c.process(out.dgram(), now()); // ACK - mem::drop(hconn_s.process(out.dgram(), now())); //consume ACK + let out = hconn_s.process(out.as_dgram_ref(), now()); // Initial + Handshake + let out = hconn_c.process(out.as_dgram_ref(), now()); // ACK + mem::drop(hconn_s.process(out.as_dgram_ref(), now())); // consume ACK let authentication_needed = |e| matches!(e, Http3ClientEvent::AuthenticationNeeded); assert!(hconn_c.events().any(authentication_needed)); hconn_c.authenticated(AuthenticationStatus::Ok, now()); let out = hconn_c.process(None, now()); // Handshake assert_eq!(hconn_c.state(), Http3State::Connected); - let out = hconn_s.process(out.dgram(), now()); // Handshake - let out = hconn_c.process(out.dgram(), now()); - let out = hconn_s.process(out.dgram(), now()); + let out = hconn_s.process(out.as_dgram_ref(), now()); // Handshake + let out = hconn_c.process(out.as_dgram_ref(), now()); + let out = hconn_s.process(out.as_dgram_ref(), now()); // assert!(hconn_s.settings_received); - let out = hconn_c.process(out.dgram(), now()); + let out = hconn_c.process(out.as_dgram_ref(), now()); // assert!(hconn_c.settings_received); out.dgram() @@ -122,11 +125,11 @@ fn connect_peers_with_network_propagation_delay( let mut now = now(); let out = hconn_c.process(None, now); // Initial now += net_delay; - let out = hconn_s.process(out.dgram(), now); // Initial + Handshake + let out = hconn_s.process(out.as_dgram_ref(), now); // Initial + Handshake now += net_delay; - let out = hconn_c.process(out.dgram(), now); // ACK + let out = hconn_c.process(out.as_dgram_ref(), now); // ACK now += net_delay; - let out = hconn_s.process(out.dgram(), now); //consume ACK + let out = hconn_s.process(out.as_dgram_ref(), now); // consume ACK assert!(out.dgram().is_none()); let authentication_needed = |e| matches!(e, Http3ClientEvent::AuthenticationNeeded); assert!(hconn_c.events().any(authentication_needed)); @@ -135,13 +138,13 @@ fn connect_peers_with_network_propagation_delay( let out = hconn_c.process(None, now); // Handshake assert_eq!(hconn_c.state(), Http3State::Connected); now += net_delay; - let out = hconn_s.process(out.dgram(), now); // HANDSHAKE_DONE + let out = hconn_s.process(out.as_dgram_ref(), now); // HANDSHAKE_DONE now += net_delay; - let out = hconn_c.process(out.dgram(), now); // Consume HANDSHAKE_DONE, send control streams. + let out = hconn_c.process(out.as_dgram_ref(), now); // Consume HANDSHAKE_DONE, send control streams. now += net_delay; - let out = hconn_s.process(out.dgram(), now); // consume and send control streams. + let out = hconn_s.process(out.as_dgram_ref(), now); // consume and send control streams. now += net_delay; - let out = hconn_c.process(out.dgram(), now); // consume control streams. + let out = hconn_c.process(out.as_dgram_ref(), now); // consume control streams. (out.dgram(), now) } @@ -156,8 +159,8 @@ fn connect() -> (Http3Client, Http3Server, Option) { fn exchange_packets(client: &mut Http3Client, server: &mut Http3Server, out_ex: Option) { let mut out = out_ex; loop { - out = client.process(out, now()).dgram(); - out = server.process(out, now()).dgram(); + out = client.process(out.as_ref(), now()).dgram(); + out = server.process(out.as_ref(), now()).dgram(); if out.is_none() { break; } @@ -185,17 +188,17 @@ fn test_fetch() { .unwrap(); assert_eq!(req, 0); hconn_c.stream_close_send(req).unwrap(); - let out = hconn_c.process(dgram, now()); + let out = hconn_c.process(dgram.as_ref(), now()); qtrace!("-----server"); - let out = hconn_s.process(out.dgram(), now()); - mem::drop(hconn_c.process(out.dgram(), now())); + let out = hconn_s.process(out.as_dgram_ref(), now()); + mem::drop(hconn_c.process(out.as_dgram_ref(), now())); process_server_events(&mut hconn_s); let out = hconn_s.process(None, now()); qtrace!("-----client"); - mem::drop(hconn_c.process(out.dgram(), now())); + mem::drop(hconn_c.process(out.as_dgram_ref(), now())); let out = hconn_s.process(None, now()); - mem::drop(hconn_c.process(out.dgram(), now())); + mem::drop(hconn_c.process(out.as_dgram_ref(), now())); process_client_events(&mut hconn_c); } @@ -214,10 +217,10 @@ fn test_103_response() { .unwrap(); assert_eq!(req, 0); hconn_c.stream_close_send(req).unwrap(); - let out = hconn_c.process(dgram, now()); + let out = hconn_c.process(dgram.as_ref(), now()); - let out = hconn_s.process(out.dgram(), now()); - mem::drop(hconn_c.process(out.dgram(), now())); + let out = hconn_s.process(out.as_dgram_ref(), now()); + mem::drop(hconn_c.process(out.as_dgram_ref(), now())); let mut request = receive_request(&mut hconn_s).unwrap(); let info_headers = [ @@ -228,7 +231,7 @@ fn test_103_response() { request.send_headers(&info_headers).unwrap(); let out = hconn_s.process(None, now()); - mem::drop(hconn_c.process(out.dgram(), now())); + mem::drop(hconn_c.process(out.as_dgram_ref(), now())); let info_headers_event = |e| { matches!(e, Http3ClientEvent::HeaderReady { headers, @@ -239,8 +242,8 @@ fn test_103_response() { set_response(&mut request); let out = hconn_s.process(None, now()); - mem::drop(hconn_c.process(out.dgram(), now())); - process_client_events(&mut hconn_c) + mem::drop(hconn_c.process(out.as_dgram_ref(), now())); + process_client_events(&mut hconn_c); } #[test] @@ -371,8 +374,8 @@ fn zerortt() { .unwrap(); hconn_c.stream_close_send(req).unwrap(); - let out = hconn_c.process(dgram, now()); - let out = hconn_s.process(out.dgram(), now()); + let out = hconn_c.process(dgram.as_ref(), now()); + let out = hconn_s.process(out.as_dgram_ref(), now()); let mut request_stream = None; let mut zerortt_state_change = false; @@ -436,7 +439,7 @@ fn fetch_noresponse_will_idletimeout() { .unwrap(); assert_eq!(req, 0); hconn_c.stream_close_send(req).unwrap(); - let _out = hconn_c.process(dgram, now); + let _out = hconn_c.process(dgram.as_ref(), now); qtrace!("-----server"); let mut done = false; diff --git a/neqo-http3/tests/priority.rs b/neqo-http3/tests/priority.rs index df9259ad4b..77d19e6fcf 100644 --- a/neqo-http3/tests/priority.rs +++ b/neqo-http3/tests/priority.rs @@ -4,22 +4,21 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use neqo_common::event::Provider; +use std::time::Instant; +use neqo_common::event::Provider; use neqo_crypto::AuthenticationStatus; use neqo_http3::{ Header, Http3Client, Http3ClientEvent, Http3Server, Http3ServerEvent, Http3State, Priority, }; - -use std::time::Instant; use test_fixture::*; fn exchange_packets(client: &mut Http3Client, server: &mut Http3Server) { let mut out = None; loop { - out = client.process(out, now()).dgram(); + out = client.process(out.as_ref(), now()).dgram(); let client_done = out.is_none(); - out = server.process(out, now()).dgram(); + out = server.process(out.as_ref(), now()).dgram(); if out.is_none() && client_done { break; } @@ -32,26 +31,26 @@ fn connect_with(client: &mut Http3Client, server: &mut Http3Server) { let out = client.process(None, now()); assert_eq!(client.state(), Http3State::Initializing); - let out = server.process(out.dgram(), now()); - let out = client.process(out.dgram(), now()); - let out = server.process(out.dgram(), now()); + let out = server.process(out.as_dgram_ref(), now()); + let out = client.process(out.as_dgram_ref(), now()); + let out = server.process(out.as_dgram_ref(), now()); assert!(out.as_dgram_ref().is_none()); let authentication_needed = |e| matches!(e, Http3ClientEvent::AuthenticationNeeded); assert!(client.events().any(authentication_needed)); client.authenticated(AuthenticationStatus::Ok, now()); - let out = client.process(out.dgram(), now()); + let out = client.process(out.as_dgram_ref(), now()); let connected = |e| matches!(e, Http3ClientEvent::StateChange(Http3State::Connected)); assert!(client.events().any(connected)); assert_eq!(client.state(), Http3State::Connected); // Exchange H3 setttings - let out = server.process(out.dgram(), now()); - let out = client.process(out.dgram(), now()); - let out = server.process(out.dgram(), now()); - let out = client.process(out.dgram(), now()); - _ = server.process(out.dgram(), now()); + let out = server.process(out.as_dgram_ref(), now()); + let out = client.process(out.as_dgram_ref(), now()); + let out = server.process(out.as_dgram_ref(), now()); + let out = client.process(out.as_dgram_ref(), now()); + _ = server.process(out.as_dgram_ref(), now()); } fn connect() -> (Http3Client, Http3Server) { @@ -69,7 +68,7 @@ fn priority_update() { Instant::now(), "GET", &("https", "something.com", "/"), - &[], + &[Header::new("priority", "u=4,i")], Priority::new(4, true), ) .unwrap(); @@ -99,7 +98,7 @@ fn priority_update() { assert_eq!(&headers, expected_headers); assert!(!fin); } - other => panic!("unexpected server event: {:?}", other), + other => panic!("unexpected server event: {other:?}"), } let update_priority = Priority::new(3, false); @@ -130,7 +129,7 @@ fn priority_update_dont_send_for_cancelled_stream() { Instant::now(), "GET", &("https", "something.com", "/"), - &[], + &[Header::new("priority", "u=5")], Priority::new(5, false), ) .unwrap(); diff --git a/neqo-http3/tests/send_message.rs b/neqo-http3/tests/send_message.rs index ef4a571dff..fbf9a7a3ea 100644 --- a/neqo-http3/tests/send_message.rs +++ b/neqo-http3/tests/send_message.rs @@ -4,7 +4,8 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use lazy_static::lazy_static; +use std::sync::OnceLock; + use neqo_common::event::Provider; use neqo_crypto::AuthenticationStatus; use neqo_http3::{ @@ -15,21 +16,21 @@ use test_fixture::*; const RESPONSE_DATA: &[u8] = &[0x61, 0x62, 0x63]; -lazy_static! { - static ref RESPONSE_HEADER_NO_DATA: Vec
= - vec![Header::new(":status", "200"), Header::new("something", "3")]; +fn response_header_no_data() -> &'static Vec
{ + static HEADERS: OnceLock> = OnceLock::new(); + HEADERS.get_or_init(|| vec![Header::new(":status", "200"), Header::new("something", "3")]) } -lazy_static! { - static ref RESPONSE_HEADER_103: Vec
= - vec![Header::new(":status", "103"), Header::new("link", "...")]; +fn response_header_103() -> &'static Vec
{ + static HEADERS: OnceLock> = OnceLock::new(); + HEADERS.get_or_init(|| vec![Header::new(":status", "103"), Header::new("link", "...")]) } fn exchange_packets(client: &mut Http3Client, server: &mut Http3Server) { let mut out = None; loop { - out = client.process(out, now()).dgram(); - out = server.process(out, now()).dgram(); + out = client.process(out.as_ref(), now()).dgram(); + out = server.process(out.as_ref(), now()).dgram(); if out.is_none() { break; } @@ -68,7 +69,7 @@ fn send_trailers(request: &mut Http3OrWebTransportStream) -> Result<(), Error> { } fn send_informational_headers(request: &mut Http3OrWebTransportStream) -> Result<(), Error> { - request.send_headers(&RESPONSE_HEADER_103) + request.send_headers(response_header_103()) } fn send_headers(request: &mut Http3OrWebTransportStream) -> Result<(), Error> { @@ -90,7 +91,7 @@ fn process_client_events(conn: &mut Http3Client) { Header::new(":status", "200"), Header::new("content-length", "3"), ]) - || (headers.as_ref() == *RESPONSE_HEADER_103) + || (headers.as_ref() == *response_header_103()) ); assert!(!fin); response_header_found = true; @@ -116,7 +117,7 @@ fn process_client_events_no_data(conn: &mut Http3Client) { while let Some(event) = conn.next_event() { match event { Http3ClientEvent::HeaderReady { headers, fin, .. } => { - assert_eq!(headers.as_ref(), *RESPONSE_HEADER_NO_DATA); + assert_eq!(headers.as_ref(), *response_header_no_data()); fin_received = fin; response_header_found = true; } @@ -201,7 +202,7 @@ fn response_trailers3() { #[test] fn response_trailers_no_data() { let (mut hconn_c, mut hconn_s, mut request) = connect_send_and_receive_request(); - request.send_headers(&RESPONSE_HEADER_NO_DATA).unwrap(); + request.send_headers(response_header_no_data()).unwrap(); exchange_packets(&mut hconn_c, &mut hconn_s); send_trailers(&mut request).unwrap(); exchange_packets(&mut hconn_c, &mut hconn_s); @@ -258,10 +259,10 @@ fn trailers_after_close() { #[test] fn multiple_response_headers() { let (mut hconn_c, mut hconn_s, mut request) = connect_send_and_receive_request(); - request.send_headers(&RESPONSE_HEADER_NO_DATA).unwrap(); + request.send_headers(response_header_no_data()).unwrap(); assert_eq!( - request.send_headers(&RESPONSE_HEADER_NO_DATA), + request.send_headers(response_header_no_data()), Err(Error::InvalidHeader) ); @@ -273,7 +274,7 @@ fn multiple_response_headers() { #[test] fn informational_after_response_headers() { let (mut hconn_c, mut hconn_s, mut request) = connect_send_and_receive_request(); - request.send_headers(&RESPONSE_HEADER_NO_DATA).unwrap(); + request.send_headers(response_header_no_data()).unwrap(); assert_eq!( send_informational_headers(&mut request), @@ -307,7 +308,7 @@ fn non_trailers_headers_after_data() { exchange_packets(&mut hconn_c, &mut hconn_s); assert_eq!( - request.send_headers(&RESPONSE_HEADER_NO_DATA), + request.send_headers(response_header_no_data()), Err(Error::InvalidHeader) ); diff --git a/neqo-http3/tests/webtransport.rs b/neqo-http3/tests/webtransport.rs index e0556708f1..b1e18a5a98 100644 --- a/neqo-http3/tests/webtransport.rs +++ b/neqo-http3/tests/webtransport.rs @@ -4,6 +4,8 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +use std::{cell::RefCell, rc::Rc}; + use neqo_common::{event::Provider, Header}; use neqo_crypto::AuthenticationStatus; use neqo_http3::{ @@ -12,10 +14,8 @@ use neqo_http3::{ WebTransportSessionAcceptAction, }; use neqo_transport::{StreamId, StreamType}; -use std::cell::RefCell; -use std::rc::Rc; use test_fixture::{ - addr, anti_replay, fixture_init, now, CountingConnectionIdGenerator, DEFAULT_ALPN_H3, + anti_replay, fixture_init, now, CountingConnectionIdGenerator, DEFAULT_ADDR, DEFAULT_ALPN_H3, DEFAULT_KEYS, DEFAULT_SERVER_NAME, }; @@ -24,8 +24,8 @@ fn connect() -> (Http3Client, Http3Server) { let mut client = Http3Client::new( DEFAULT_SERVER_NAME, Rc::new(RefCell::new(CountingConnectionIdGenerator::default())), - addr(), - addr(), + DEFAULT_ADDR, + DEFAULT_ADDR, Http3Parameters::default().webtransport(true), now(), ) @@ -44,16 +44,16 @@ fn connect() -> (Http3Client, Http3Server) { let out = client.process(None, now()); assert_eq!(client.state(), Http3State::Initializing); - let out = server.process(out.dgram(), now()); - let out = client.process(out.dgram(), now()); - let out = server.process(out.dgram(), now()); + let out = server.process(out.as_dgram_ref(), now()); + let out = client.process(out.as_dgram_ref(), now()); + let out = server.process(out.as_dgram_ref(), now()); assert!(out.as_dgram_ref().is_none()); let authentication_needed = |e| matches!(e, Http3ClientEvent::AuthenticationNeeded); assert!(client.events().any(authentication_needed)); client.authenticated(AuthenticationStatus::Ok, now()); - let mut out = client.process(out.dgram(), now()).dgram(); + let mut out = client.process(out.as_dgram_ref(), now()).dgram(); let connected = |e| matches!(e, Http3ClientEvent::StateChange(Http3State::Connected)); assert!(client.events().any(connected)); @@ -61,9 +61,9 @@ fn connect() -> (Http3Client, Http3Server) { // Exchange H3 setttings loop { - out = server.process(out, now()).dgram(); + out = server.process(out.as_ref(), now()).dgram(); let dgram_present = out.is_some(); - out = client.process(out, now()).dgram(); + out = client.process(out.as_ref(), now()).dgram(); if out.is_none() && !dgram_present { break; } @@ -74,8 +74,8 @@ fn connect() -> (Http3Client, Http3Server) { fn exchange_packets(client: &mut Http3Client, server: &mut Http3Server) { let mut out = None; loop { - out = client.process(out, now()).dgram(); - out = server.process(out, now()).dgram(); + out = client.process(out.as_ref(), now()).dgram(); + out = server.process(out.as_ref(), now()).dgram(); if out.is_none() { break; } diff --git a/neqo-interop/Cargo.toml b/neqo-interop/Cargo.toml deleted file mode 100644 index d569992185..0000000000 --- a/neqo-interop/Cargo.toml +++ /dev/null @@ -1,21 +0,0 @@ -[package] -name = "neqo-interop" -version = "0.6.4" -authors = ["EKR "] -edition = "2018" -rust-version = "1.65.0" -license = "MIT/Apache-2.0" - -[dependencies] -neqo-crypto = { path = "./../neqo-crypto" } -neqo-transport = { path = "./../neqo-transport" } -neqo-common = { path="./../neqo-common" } -neqo-http3 = { path = "./../neqo-http3" } -neqo-qpack = { path = "./../neqo-qpack" } - -structopt = "0.3.7" -lazy_static = "1.3.0" - -[features] -default = ["deny-warnings"] -deny-warnings = [] diff --git a/neqo-interop/src/main.rs b/neqo-interop/src/main.rs deleted file mode 100644 index 0469c7bab0..0000000000 --- a/neqo-interop/src/main.rs +++ /dev/null @@ -1,933 +0,0 @@ -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -#![cfg_attr(feature = "deny-warnings", deny(warnings))] -#![warn(clippy::use_self)] - -use neqo_common::{event::Provider, hex, Datagram}; -use neqo_crypto::{init, AuthenticationStatus, ResumptionToken}; -use neqo_http3::{Header, Http3Client, Http3ClientEvent, Http3Parameters, Http3State, Priority}; -use neqo_transport::{ - Connection, ConnectionError, ConnectionEvent, ConnectionParameters, EmptyConnectionIdGenerator, - Error, Output, State, StreamId, StreamType, -}; - -use std::{ - cell::RefCell, - cmp::min, - collections::HashSet, - mem, - net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr, ToSocketAddrs, UdpSocket}, - rc::Rc, -}; -// use std::path::PathBuf; -use std::{ - str::FromStr, - string::ParseError, - sync::Mutex, - thread, - time::{Duration, Instant}, -}; -use structopt::StructOpt; - -#[derive(Debug, StructOpt, Clone)] -#[structopt(name = "neqo-interop", about = "A QUIC interop client.")] -struct Args { - #[structopt(short = "p", long)] - // Peers to include - include: Vec, - - #[structopt(short = "P", long)] - exclude: Vec, - - #[structopt(short = "t", long)] - include_tests: Vec, - - #[structopt(short = "T", long)] - exclude_tests: Vec, - - #[structopt(long, default_value = "5")] - timeout: u64, -} - -trait Handler { - fn handle(&mut self, client: &mut Connection) -> bool; - fn rewrite_out(&mut self, _dgram: &Datagram) -> Option { - None - } -} - -fn emit_datagram(socket: &UdpSocket, d: Datagram) { - let sent = socket.send(&d[..]).expect("Error sending datagram"); - if sent != d.len() { - eprintln!("Unable to send all {} bytes of datagram", d.len()); - } -} - -lazy_static::lazy_static! { - static ref TEST_TIMEOUT: Mutex = Mutex::new(Duration::from_secs(5)); -} - -struct Timer { - end: Instant, -} -impl Timer { - pub fn new() -> Self { - Self { - end: Instant::now() + *TEST_TIMEOUT.lock().unwrap(), - } - } - - pub fn set_timeout(t: Duration) { - *TEST_TIMEOUT.lock().unwrap() = t; - } - - pub fn check(&self) -> Result { - if let Some(d) = self.end.checked_duration_since(Instant::now()) { - if d.as_nanos() > 0 { - Ok(d) - } else { - Err(String::from("Timed out")) - } - } else { - Err(String::from("Timed out")) - } - } -} - -fn process_loop( - nctx: &NetworkCtx, - client: &mut Connection, - handler: &mut dyn Handler, -) -> Result { - let buf = &mut [0u8; 2048]; - let timer = Timer::new(); - - loop { - if let State::Closed(..) = client.state() { - return Ok(client.state().clone()); - } - - loop { - let output = client.process_output(Instant::now()); - match output { - Output::Datagram(dgram) => { - let dgram = handler.rewrite_out(&dgram).unwrap_or(dgram); - emit_datagram(&nctx.socket, dgram); - } - Output::Callback(duration) => { - let delay = min(timer.check()?, duration); - nctx.socket.set_read_timeout(Some(delay)).unwrap(); - break; - } - Output::None => { - return Ok(client.state().clone()); - } - } - } - - if !handler.handle(client) { - return Ok(client.state().clone()); - } - - let sz = match nctx.socket.recv(&mut buf[..]) { - Ok(sz) => sz, - Err(e) => { - return Err(String::from(match e.kind() { - std::io::ErrorKind::WouldBlock => continue, - _ => "Read error", - })); - } - }; - - if sz == buf.len() { - eprintln!("Received more than {} bytes", buf.len()); - continue; - } - if sz > 0 { - let received = Datagram::new(nctx.remote_addr, nctx.local_addr, &buf[..sz]); - client.process_input(received, Instant::now()); - } - } -} - -struct PreConnectHandler {} -impl Handler for PreConnectHandler { - fn handle(&mut self, client: &mut Connection) -> bool { - let authentication_needed = |e| matches!(e, ConnectionEvent::AuthenticationNeeded); - if client.events().any(authentication_needed) { - client.authenticated(AuthenticationStatus::Ok, Instant::now()); - } - !matches!(client.state(), State::Connected | State::Closing { .. }) - } -} - -// HTTP/0.9 IMPLEMENTATION -#[derive(Default)] -struct H9Handler { - rbytes: usize, - rsfin: bool, - streams: HashSet, -} - -// This is a bit fancier than actually needed. -impl Handler for H9Handler { - fn handle(&mut self, client: &mut Connection) -> bool { - let mut data = vec![0; 4000]; - while let Some(event) = client.next_event() { - eprintln!("Event: {:?}", event); - match event { - ConnectionEvent::RecvStreamReadable { stream_id } => { - if !self.streams.contains(&stream_id) { - eprintln!("Data on unexpected stream: {}", stream_id); - return false; - } - - let (sz, fin) = client - .stream_recv(stream_id, &mut data) - .expect("Read should succeed"); - data.truncate(sz); - eprintln!("Length={}", sz); - self.rbytes += sz; - if fin { - eprintln!("", stream_id); - client.close(Instant::now(), 0, "kthxbye!"); - self.rsfin = true; - return false; - } - } - ConnectionEvent::SendStreamWritable { stream_id } => { - eprintln!("stream {} writable", stream_id) - } - _ => { - eprintln!("Unexpected event {:?}", event); - } - } - } - - true - } -} - -// HTTP/3 IMPLEMENTATION -#[derive(Debug)] -struct Headers { - pub h: Vec
, -} - -// dragana: this is a very stupid parser. -// headers should be in form "[(something1, something2), (something3, something4)]" -impl FromStr for Headers { - type Err = ParseError; - - fn from_str(s: &str) -> Result { - let mut res = Self { h: Vec::new() }; - let h1: Vec<&str> = s - .trim_matches(|p| p == '[' || p == ']') - .split(')') - .collect(); - - for h in h1 { - let h2: Vec<&str> = h - .trim_matches(|p| p == ',') - .trim() - .trim_matches(|p| p == '(' || p == ')') - .split(',') - .collect(); - - if h2.len() == 2 { - res.h.push(Header::new(h2[0].trim(), h2[1].trim())); - } - } - - Ok(res) - } -} - -struct H3Handler { - streams: HashSet, - h3: Http3Client, - host: String, - path: String, - token: Option, -} - -// TODO(ekr@rtfm.com): Figure out how to merge this. -fn process_loop_h3( - nctx: &NetworkCtx, - handler: &mut H3Handler, - connect: bool, - close: bool, -) -> Result { - let buf = &mut [0u8; 2048]; - let timer = Timer::new(); - - loop { - if let State::Closed(..) = handler.h3.conn().state() { - return Ok(handler.h3.conn().state().clone()); - } - - if connect { - if let Http3State::Connected = handler.h3.state() { - return Ok(handler.h3.conn().state().clone()); - } - } - - loop { - let output = handler.h3.conn().process_output(Instant::now()); - match output { - Output::Datagram(dgram) => emit_datagram(&nctx.socket, dgram), - Output::Callback(duration) => { - let delay = min(timer.check()?, duration); - nctx.socket.set_read_timeout(Some(delay)).unwrap(); - break; - } - Output::None => { - return Ok(handler.h3.conn().state().clone()); - } - } - } - if !handler.handle(close) { - return Ok(handler.h3.conn().state().clone()); - } - - let sz = match nctx.socket.recv(&mut buf[..]) { - Ok(sz) => sz, - Err(e) => { - return Err(String::from(match e.kind() { - std::io::ErrorKind::WouldBlock => continue, - _ => "Read error", - })); - } - }; - - if sz == buf.len() { - eprintln!("Received more than {} bytes", buf.len()); - continue; - } - if sz > 0 { - let received = Datagram::new(nctx.remote_addr, nctx.local_addr, &buf[..sz]); - handler.h3.process_input(received, Instant::now()); - } - } -} - -// This is a bit fancier than actually needed. -impl H3Handler { - fn handle(&mut self, close: bool) -> bool { - let mut data = vec![0; 4000]; - while let Some(event) = self.h3.next_event() { - match event { - Http3ClientEvent::HeaderReady { - stream_id, - headers, - fin, - .. - } => { - if !self.streams.contains(&stream_id) { - eprintln!("Data on unexpected stream: {}", stream_id); - return false; - } - - eprintln!("READ HEADERS[{}]: fin={} {:?}", stream_id, fin, headers); - } - Http3ClientEvent::DataReadable { stream_id } => { - if !self.streams.contains(&stream_id) { - eprintln!("Data on unexpected stream: {}", stream_id); - return false; - } - - let (_sz, fin) = self - .h3 - .read_data(Instant::now(), stream_id, &mut data) - .expect("Read should succeed"); - if let Ok(txt) = String::from_utf8(data.clone()) { - eprintln!("READ[{}]: {}", stream_id, txt); - } else { - eprintln!("READ[{}]: 0x{}", stream_id, hex(&data)); - } - if fin { - eprintln!("", stream_id); - if close { - self.h3.close(Instant::now(), 0, "kthxbye!"); - } - return false; - } - } - Http3ClientEvent::ResumptionToken(token) => { - self.token = Some(token); - } - _ => {} - } - } - - true - } -} - -struct Peer { - label: &'static str, - host: &'static str, - port: u16, -} - -impl Peer { - fn addr(&self) -> SocketAddr { - self.to_socket_addrs() - .expect("Remote address error") - .next() - .expect("No remote addresses") - } - - fn bind(&self) -> SocketAddr { - match self.addr() { - SocketAddr::V4(..) => SocketAddr::new(IpAddr::V4(Ipv4Addr::from([0; 4])), 0), - SocketAddr::V6(..) => SocketAddr::new(IpAddr::V6(Ipv6Addr::from([0; 16])), 0), - } - } - - fn test_enabled(&self, _test: &Test) -> bool { - true - } -} - -impl ToSocketAddrs for Peer { - type Iter = ::std::vec::IntoIter; - fn to_socket_addrs(&self) -> ::std::io::Result { - // This is idiotic. There is no path from hostname: String to IpAddr. - // And no means of controlling name resolution either. - std::fmt::format(format_args!("{}:{}", self.host, self.port)).to_socket_addrs() - } -} - -#[allow(clippy::upper_case_acronyms)] -#[derive(Debug, PartialEq)] -enum Test { - Connect, - H9, - H3, - VN, - R, - Z, - D, -} - -impl Test { - fn alpn(&self) -> Vec { - match self { - Self::H3 | Self::R | Self::Z | Self::D => vec![String::from("h3-28")], - _ => vec![String::from("hq-28")], - } - } - - fn label(&self) -> String { - String::from(match self { - Self::Connect => "connect", - Self::H9 => "h9", - Self::H3 => "h3", - Self::VN => "vn", - Self::R => "r", - Self::Z => "z", - Self::D => "d", - }) - } - - fn letters(&self) -> Vec { - match self { - Self::Connect => vec!['H'], - Self::H9 => vec!['D', 'C'], - Self::H3 => vec!['3', 'C', 'D'], - Self::VN => vec!['V'], - Self::R => vec!['R'], - Self::Z => vec!['Z'], - Self::D => vec!['d'], - } - } -} - -struct NetworkCtx { - local_addr: SocketAddr, - remote_addr: SocketAddr, - socket: UdpSocket, -} - -fn test_connect(nctx: &NetworkCtx, test: &Test, peer: &Peer) -> Result { - let mut client = Connection::new_client( - peer.host, - &test.alpn(), - Rc::new(RefCell::new(EmptyConnectionIdGenerator::default())), - nctx.local_addr, - nctx.remote_addr, - ConnectionParameters::default(), - Instant::now(), - ) - .expect("must succeed"); - // Temporary here to help out the type inference engine - let mut h = PreConnectHandler {}; - let res = process_loop(nctx, &mut client, &mut h); - - let st = match res { - Ok(st) => st, - Err(e) => { - return Err(format!("ERROR: {}", e)); - } - }; - - if st.connected() { - Ok(client) - } else { - Err(format!("{:?}", st)) - } -} - -fn test_h9(nctx: &NetworkCtx, client: &mut Connection) -> Result<(), String> { - let client_stream_id = client.stream_create(StreamType::BiDi).unwrap(); - let req: String = "GET /10\r\n".to_string(); - client - .stream_send(client_stream_id, req.as_bytes()) - .unwrap(); - let mut hc = H9Handler::default(); - hc.streams.insert(client_stream_id); - let res = process_loop(nctx, client, &mut hc); - - if let Err(e) = res { - return Err(format!("ERROR: {}", e)); - } - if hc.rbytes == 0 { - return Err(String::from("Empty response")); - } - if !hc.rsfin { - return Err(String::from("No FIN")); - } - Ok(()) -} - -fn connect_h3(nctx: &NetworkCtx, peer: &Peer, client: Connection) -> Result { - let mut hc = H3Handler { - streams: HashSet::new(), - h3: Http3Client::new_with_conn( - client, - Http3Parameters::default() - .max_table_size_encoder(16384) - .max_table_size_decoder(16384) - .max_blocked_streams(10) - .max_concurrent_push_streams(10), - ), - host: String::from(peer.host), - path: String::from("/"), - token: None, - }; - - if let Err(e) = process_loop_h3(nctx, &mut hc, true, false) { - return Err(format!("ERROR: {}", e)); - } - Ok(hc) -} - -fn test_h3(nctx: &NetworkCtx, peer: &Peer, client: Connection, test: &Test) -> Result<(), String> { - let mut hc = connect_h3(nctx, peer, client)?; - - let client_stream_id = hc - .h3 - .fetch( - Instant::now(), - "GET", - &("https", &hc.host, &hc.path), - &[], - Priority::default(), - ) - .unwrap(); - hc.h3.stream_close_send(client_stream_id).unwrap(); - - hc.streams.insert(client_stream_id); - if let Err(e) = process_loop_h3(nctx, &mut hc, false, *test != Test::D) { - return Err(format!("ERROR: {}", e)); - } - - if *test == Test::D { - // Send another request, when the first one was send we probably did not have the peer's qpack parameter. - let client_stream_id = hc - .h3 - .fetch( - Instant::now(), - "GET", - &("https", &hc.host, &hc.path), - &[Header::new("something1", "something2")], - Priority::default(), - ) - .unwrap(); - hc.h3.stream_close_send(client_stream_id).unwrap(); - hc.streams.insert(client_stream_id); - if let Err(e) = process_loop_h3(nctx, &mut hc, false, true) { - return Err(format!("ERROR: {}", e)); - } - - if hc.h3.qpack_decoder_stats().dynamic_table_references == 0 { - return Err("ERROR: qpack decoder does not use the dynamic table.".into()); - } - if hc.h3.qpack_encoder_stats().dynamic_table_references == 0 { - return Err("ERROR: qpack encoder does not use the dynamic table.".into()); - } - } - - Ok(()) -} - -// Return true if 0RTT was negotiated. -fn test_h3_rz( - nctx: &NetworkCtx, - peer: &Peer, - client: Connection, - test: &Test, -) -> Result<(), String> { - let mut hc = connect_h3(nctx, peer, client)?; - - // Exchange some data to get http3 control streams and a resumption token. - let client_stream_id = hc - .h3 - .fetch( - Instant::now(), - "GET", - &("https", &hc.host, &hc.path), - &[], - Priority::default(), - ) - .unwrap(); - hc.h3.stream_close_send(client_stream_id).unwrap(); - - hc.streams.insert(client_stream_id); - if let Err(e) = process_loop_h3(nctx, &mut hc, false, true) { - return Err(format!("ERROR: {}", e)); - } - - // get resumption ticket - let res_token = hc.token.ok_or("ERROR: no resumption token")?; - - let handler = Http3Client::new( - peer.host, - Rc::new(RefCell::new(EmptyConnectionIdGenerator::default())), - nctx.local_addr, - nctx.remote_addr, - Http3Parameters::default() - .max_table_size_encoder(16384) - .max_table_size_decoder(16384) - .max_blocked_streams(10) - .max_concurrent_push_streams(0), - Instant::now(), - ); - if handler.is_err() { - return Err(String::from("ERROR: creating a client failed")); - } - - let mut hc = H3Handler { - streams: HashSet::new(), - h3: handler.unwrap(), - host: String::from(peer.host), - path: String::from("/"), - token: None, - }; - - hc.h3.enable_resumption(Instant::now(), res_token).unwrap(); - - if *test == Test::Z { - println!("Test 0RTT"); - if Http3State::ZeroRtt != hc.h3.state() { - return Err("ERROR: zerortt not negotiated".into()); - } - - // SendH3 data during 0rtt - let client_stream_id = hc - .h3 - .fetch( - Instant::now(), - "GET", - &("https", &hc.host, &hc.path), - &[], - Priority::default(), - ) - .unwrap(); - mem::drop(hc.h3.stream_close_send(client_stream_id)); - hc.streams.insert(client_stream_id); - if let Err(e) = process_loop_h3(nctx, &mut hc, false, true) { - return Err(format!("ERROR: {}", e)); - } - - let recvd_0rtt_reject = |e| e == Http3ClientEvent::ZeroRttRejected; - if hc.h3.events().any(recvd_0rtt_reject) { - return Err("ERROR: 0RTT rejected".into()); - } - } else { - println!("Test resumption"); - if let Err(e) = process_loop_h3(nctx, &mut hc, true, true) { - return Err(format!("ERROR: {}", e)); - } - } - - if !hc.h3.conn().stats().resumed { - return Err("ERROR: resumption failed".into()); - } - Ok(()) -} - -struct VnHandler {} - -impl Handler for VnHandler { - fn handle(&mut self, client: &mut Connection) -> bool { - !matches!(client.state(), State::Connected | State::Closing { .. }) - } - - fn rewrite_out(&mut self, d: &Datagram) -> Option { - let mut payload = d[..].to_vec(); - payload[1] = 0x1a; - Some(Datagram::new(d.source(), d.destination(), payload)) - } -} - -fn test_vn(nctx: &NetworkCtx, peer: &Peer) -> Connection { - let mut client = Connection::new_client( - peer.host, - &["hq-28"], - Rc::new(RefCell::new(EmptyConnectionIdGenerator::default())), - nctx.local_addr, - nctx.remote_addr, - ConnectionParameters::default(), - Instant::now(), - ) - .expect("must succeed"); - // Temporary here to help out the type inference engine - let mut h = VnHandler {}; - let _res = process_loop(nctx, &mut client, &mut h); - client -} - -fn run_test<'t>(peer: &Peer, test: &'t Test) -> (&'t Test, String) { - let socket = UdpSocket::bind(peer.bind()).expect("Unable to bind UDP socket"); - socket.connect(peer).expect("Unable to connect UDP socket"); - - let local_addr = socket.local_addr().expect("Socket local address not bound"); - let remote_addr = peer.addr(); - - let nctx = NetworkCtx { - local_addr, - remote_addr, - socket, - }; - - if let Test::VN = test { - let client = test_vn(&nctx, peer); - return match client.state() { - State::Closed(ConnectionError::Transport(Error::VersionNegotiation)) => { - (test, String::from("OK")) - } - _ => (test, format!("ERROR: Wrong state {:?}", client.state())), - }; - } - - let mut client = match test_connect(&nctx, test, peer) { - Ok(client) => client, - Err(e) => return (test, e), - }; - - let res = match test { - Test::Connect => { - return (test, String::from("OK")); - } - Test::H9 => test_h9(&nctx, &mut client), - Test::H3 => test_h3(&nctx, peer, client, test), - Test::VN => unimplemented!(), - Test::R => test_h3_rz(&nctx, peer, client, test), - Test::Z => test_h3_rz(&nctx, peer, client, test), - Test::D => test_h3(&nctx, peer, client, test), - }; - - if let Err(e) = res { - return (test, e); - } - - (test, String::from("OK")) -} - -fn run_peer(args: &Args, peer: &'static Peer) -> Vec<(&'static Test, String)> { - let mut results: Vec<(&'static Test, String)> = Vec::new(); - - eprintln!("Running tests for {}", peer.label); - - let mut children = Vec::new(); - - for test in &TESTS { - if !peer.test_enabled(test) { - continue; - } - - if !args.include_tests.is_empty() && !args.include_tests.contains(&test.label()) { - continue; - } - if args.exclude_tests.contains(&test.label()) { - continue; - } - - let child = thread::spawn(move || run_test(peer, test)); - children.push((test, child)); - } - - for child in children { - match child.1.join() { - Ok(e) => { - eprintln!("Test complete {:?}, {:?}", child.0, e); - results.push(e) - } - Err(_) => { - eprintln!("Thread crashed {:?}", child.0); - results.push((child.0, String::from("CRASHED"))); - } - } - } - - eprintln!("Tests for {} complete {:?}", peer.label, results); - results -} - -const PEERS: &[Peer] = &[ - Peer { - label: "quiche", - host: "quic.tech", - port: 4433, - }, - Peer { - label: "quiche2", - host: "quic.tech", - port: 8443, - }, - Peer { - label: "quiche3", - host: "quic.tech", - port: 8444, - }, - Peer { - label: "quant", - host: "quant.eggert.org", - port: 4433, - }, - Peer { - label: "quicly", - host: "quic.examp1e.net", - port: 443, - }, - Peer { - label: "quicly2", - host: "quic.examp1e.net", - port: 4433, - }, - Peer { - label: "local", - host: "127.0.0.1", - port: 4433, - }, - Peer { - label: "applequic", - host: "[2a00:79e1:abc:301:fca8:166e:525f:9b5c]", - port: 4433, - }, - Peer { - label: "f5", - host: "f5quic.com", - port: 4433, - }, - Peer { - label: "msft", - host: "quic.westus.cloudapp.azure.com", - port: 443, - }, - Peer { - label: "mvfst", - host: "fb.mvfst.net", - port: 443, - }, - Peer { - label: "google", - host: "quic.rocks", - port: 4433, - }, - Peer { - label: "ngtcp2", - host: "nghttp2.org", - port: 4433, - }, - Peer { - label: "picoquic", - host: "test.privateoctopus.com", - port: 4433, - }, - Peer { - label: "ats", - host: "quic.ogre.com", - port: 4433, - }, - Peer { - label: "cloudflare", - host: "www.cloudflare.com", - port: 443, - }, - Peer { - label: "litespeed", - host: "http3-test.litespeedtech.com", - port: 4433, - }, -]; - -const TESTS: [Test; 7] = [ - Test::Connect, - Test::H9, - Test::H3, - Test::VN, - Test::R, - Test::Z, - Test::D, -]; - -fn main() { - let _tests = vec![Test::Connect]; - - let args = Args::from_args(); - init(); - Timer::set_timeout(Duration::from_secs(args.timeout)); - - let mut children = Vec::new(); - - // Start all the children. - for peer in PEERS { - if !args.include.is_empty() && !args.include.contains(&String::from(peer.label)) { - continue; - } - if args.exclude.contains(&String::from(peer.label)) { - continue; - } - - let at = args.clone(); - let child = thread::spawn(move || run_peer(&at, peer)); - children.push((peer, child)); - } - - // Now wait for them. - for child in children { - let res = child.1.join().unwrap(); - let mut all_letters = HashSet::new(); - for r in &res { - for l in r.0.letters() { - if r.1 == "OK" { - all_letters.insert(l); - } - } - } - let mut letter_str = String::from(""); - for l in &['V', 'H', 'D', 'C', 'R', 'Z', 'S', '3'] { - if all_letters.contains(l) { - letter_str.push(*l); - } - } - println!("{}: {} -> {:?}", child.0.label, letter_str, res); - } -} diff --git a/neqo-qpack/Cargo.toml b/neqo-qpack/Cargo.toml index 033b2d71a1..c3e2ab8a66 100644 --- a/neqo-qpack/Cargo.toml +++ b/neqo-qpack/Cargo.toml @@ -1,23 +1,28 @@ [package] name = "neqo-qpack" -version = "0.6.4" -authors = ["Dragana Damjanovic "] -edition = "2018" -rust-version = "1.65.0" -license = "MIT/Apache-2.0" +authors.workspace = true +homepage.workspace = true +repository.workspace = true +version.workspace = true +edition.workspace = true +rust-version.workspace = true +license.workspace = true + +[lints] +workspace = true [dependencies] +# Sync with https://searchfox.org/mozilla-central/source/Cargo.lock 2024-02-08 +log = { version = "0.4", default-features = false } neqo-common = { path = "./../neqo-common" } -neqo-transport = { path = "./../neqo-transport" } neqo-crypto = { path = "./../neqo-crypto" } -log = {version = "0.4.0", default-features = false} -static_assertions = "1.1.0" -qlog = "0.4.0" -lazy_static = "1.3.0" +neqo-transport = { path = "./../neqo-transport" } +qlog = { version = "0.12", default-features = false } +static_assertions = { version = "1.1", default-features = false } [dev-dependencies] test-fixture = { path = "../test-fixture" } -[features] -default = ["deny-warnings"] -deny-warnings = [] +[lib] +# See https://github.com/bheisler/criterion.rs/blob/master/book/src/faq.md#cargo-bench-gives-unrecognized-option-errors-for-valid-command-line-options +bench = false diff --git a/neqo-qpack/src/decoder.rs b/neqo-qpack/src/decoder.rs index 5971545938..b2cfb6629a 100644 --- a/neqo-qpack/src/decoder.rs +++ b/neqo-qpack/src/decoder.rs @@ -4,6 +4,9 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +use neqo_common::{qdebug, Header}; +use neqo_transport::{Connection, StreamId}; + use crate::{ decoder_instructions::DecoderInstruction, encoder_instructions::{DecodedEncoderInstruction, EncoderInstructionReader}, @@ -14,9 +17,6 @@ use crate::{ table::HeaderTable, Error, QpackSettings, Res, }; -use neqo_common::{qdebug, Header}; -use neqo_transport::{Connection, StreamId}; -use std::convert::TryFrom; pub const QPACK_UNI_STREAM_TYPE_DECODER: u64 = 0x3; @@ -30,12 +30,13 @@ pub struct QPackDecoder { local_stream_id: Option, max_table_size: u64, max_blocked_streams: usize, - blocked_streams: Vec<(StreamId, u64)>, //stream_id and requested inserts count. + blocked_streams: Vec<(StreamId, u64)>, // stream_id and requested inserts count. stats: Stats, } impl QPackDecoder { /// # Panics + /// /// If settings include invalid values. #[must_use] pub fn new(qpack_settings: &QpackSettings) -> Self { @@ -50,7 +51,7 @@ impl QPackDecoder { send_buf, local_stream_id: None, max_table_size: qpack_settings.max_table_size_decoder, - max_blocked_streams: usize::try_from(qpack_settings.max_blocked_streams).unwrap(), + max_blocked_streams: usize::from(qpack_settings.max_blocked_streams), blocked_streams: Vec::new(), stats: Stats::default(), } @@ -67,6 +68,7 @@ impl QPackDecoder { } /// # Panics + /// /// If the number of blocked streams is too large. #[must_use] pub fn get_blocked_streams(&self) -> u16 { @@ -74,7 +76,9 @@ impl QPackDecoder { } /// returns a list of unblocked streams + /// /// # Errors + /// /// May return: `ClosedCriticalStream` if stream has been closed or `EncoderStream` /// in case of any other transport error. pub fn receive(&mut self, conn: &mut Connection, stream_id: StreamId) -> Res> { @@ -164,8 +168,11 @@ impl QPackDecoder { } /// # Errors + /// /// May return an error in case of any transport error. TODO: define transport errors. + /// /// # Panics + /// /// Never, but rust doesn't know that. #[allow(clippy::map_err_ignore)] pub fn send(&mut self, conn: &mut Connection) -> Res<()> { @@ -186,6 +193,7 @@ impl QPackDecoder { } /// # Errors + /// /// May return `DecompressionFailed` if header block is incorrect or incomplete. pub fn refers_dynamic_table(&self, buf: &[u8]) -> Res { HeaderDecoder::new(buf).refers_dynamic_table(self.max_entries, self.table.base()) @@ -193,9 +201,13 @@ impl QPackDecoder { /// This function returns None if the stream is blocked waiting for table insertions. /// 'buf' must contain the complete header block. + /// /// # Errors + /// /// May return `DecompressionFailed` if header block is incorrect or incomplete. + /// /// # Panics + /// /// When there is a programming error. pub fn decode_header_block( &mut self, @@ -236,6 +248,7 @@ impl QPackDecoder { } /// # Panics + /// /// When a stream has already been added. pub fn add_send_stream(&mut self, stream_id: StreamId) { assert!( @@ -272,13 +285,15 @@ fn map_error(err: &Error) -> Error { #[cfg(test)] mod tests { - use super::{Connection, Error, QPackDecoder, Res}; - use crate::QpackSettings; + use std::mem; + use neqo_common::Header; use neqo_transport::{StreamId, StreamType}; - use std::{convert::TryFrom, mem}; use test_fixture::now; + use super::{Connection, Error, QPackDecoder, Res}; + use crate::QpackSettings; + const STREAM_0: StreamId = StreamId::new(0); struct TestDecoder { @@ -319,7 +334,7 @@ mod tests { .stream_send(decoder.recv_stream_id, encoder_instruction) .unwrap(); let out = decoder.peer_conn.process(None, now()); - mem::drop(decoder.conn.process(out.dgram(), now())); + mem::drop(decoder.conn.process(out.as_dgram_ref(), now())); assert_eq!( decoder .decoder @@ -331,7 +346,7 @@ mod tests { fn send_instructions_and_check(decoder: &mut TestDecoder, decoder_instruction: &[u8]) { decoder.decoder.send(&mut decoder.conn).unwrap(); let out = decoder.conn.process(None, now()); - mem::drop(decoder.peer_conn.process(out.dgram(), now())); + mem::drop(decoder.peer_conn.process(out.as_dgram_ref(), now())); let mut buf = [0_u8; 100]; let (amount, fin) = decoder .peer_conn @@ -434,7 +449,8 @@ mod tests { ); } - // this test tests header decoding, the header acks command and the insert count increment command. + // this test tests header decoding, the header acks command and the insert count increment + // command. #[test] fn test_duplicate() { let mut decoder = connect(); @@ -467,8 +483,8 @@ mod tests { fn test_encode_incr_encode_header_ack_some() { // 1. Decoder receives an instruction (header and value both as literal) // 2. Decoder process the instruction and sends an increment instruction. - // 3. Decoder receives another two instruction (header and value both as literal) and - // a header block. + // 3. Decoder receives another two instruction (header and value both as literal) and a + // header block. // 4. Now it sends only a header ack and an increment instruction with increment==1. let headers = vec![ Header::new("my-headera", "my-valuea"), @@ -504,8 +520,8 @@ mod tests { fn test_encode_incr_encode_header_ack_all() { // 1. Decoder receives an instruction (header and value both as literal) // 2. Decoder process the instruction and sends an increment instruction. - // 3. Decoder receives another instruction (header and value both as literal) and - // a header block. + // 3. Decoder receives another instruction (header and value both as literal) and a header + // block. // 4. Now it sends only a header ack. let headers = vec![ Header::new("my-headera", "my-valuea"), @@ -604,7 +620,8 @@ mod tests { ], encoder_inst: &[], }, - // test adding a new header and encode_post_base_index, also test fix_header_block_prefix + // test adding a new header and encode_post_base_index, also test + // fix_header_block_prefix TestElement { headers: vec![Header::new("my-header", "my-value")], header_block: &[0x02, 0x80, 0x10], @@ -683,7 +700,8 @@ mod tests { ], encoder_inst: &[], }, - // test adding a new header and encode_post_base_index, also test fix_header_block_prefix + // test adding a new header and encode_post_base_index, also test + // fix_header_block_prefix TestElement { headers: vec![Header::new("my-header", "my-value")], header_block: &[0x02, 0x80, 0x10], diff --git a/neqo-qpack/src/decoder_instructions.rs b/neqo-qpack/src/decoder_instructions.rs index eb8a331f3a..029cd61db6 100644 --- a/neqo-qpack/src/decoder_instructions.rs +++ b/neqo-qpack/src/decoder_instructions.rs @@ -4,15 +4,17 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use crate::prefix::{ - DECODER_HEADER_ACK, DECODER_INSERT_COUNT_INCREMENT, DECODER_STREAM_CANCELLATION, -}; -use crate::qpack_send_buf::QpackData; -use crate::reader::{IntReader, ReadByte}; -use crate::Res; +use std::mem; + use neqo_common::{qdebug, qtrace}; use neqo_transport::StreamId; -use std::mem; + +use crate::{ + prefix::{DECODER_HEADER_ACK, DECODER_INSERT_COUNT_INCREMENT, DECODER_STREAM_CANCELLATION}, + qpack_send_buf::QpackData, + reader::{IntReader, ReadByte}, + Res, +}; #[derive(Debug, Copy, Clone, PartialEq, Eq)] pub enum DecoderInstruction { @@ -81,10 +83,11 @@ impl DecoderInstructionReader { } } - /// ### Errors - /// 1) `NeedMoreData` if the reader needs more data - /// 2) `ClosedCriticalStream` - /// 3) other errors will be translated to `DecoderStream` by the caller of this function. + /// # Errors + /// + /// 1) `NeedMoreData` if the reader needs more data + /// 2) `ClosedCriticalStream` + /// 3) other errors will be translated to `DecoderStream` by the caller of this function. pub fn read_instructions(&mut self, recv: &mut R) -> Res { qdebug!([self], "read a new instraction"); loop { @@ -137,11 +140,11 @@ impl DecoderInstructionReader { #[cfg(test)] mod test { - use super::{DecoderInstruction, DecoderInstructionReader, QpackData}; - use crate::reader::test_receiver::TestReceiver; - use crate::Error; use neqo_transport::StreamId; + use super::{DecoderInstruction, DecoderInstructionReader, QpackData}; + use crate::{reader::test_receiver::TestReceiver, Error}; + fn test_encoding_decoding(instruction: DecoderInstruction) { let mut buf = QpackData::default(); instruction.marshal(&mut buf); diff --git a/neqo-qpack/src/encoder.rs b/neqo-qpack/src/encoder.rs index a5ebd01666..c90570ccdc 100644 --- a/neqo-qpack/src/encoder.rs +++ b/neqo-qpack/src/encoder.rs @@ -4,19 +4,22 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use crate::decoder_instructions::{DecoderInstruction, DecoderInstructionReader}; -use crate::encoder_instructions::EncoderInstruction; -use crate::header_block::HeaderEncoder; -use crate::qlog; -use crate::qpack_send_buf::QpackData; -use crate::reader::ReceiverConnWrapper; -use crate::stats::Stats; -use crate::table::{HeaderTable, LookupResult, ADDITIONAL_TABLE_ENTRY_SIZE}; -use crate::{Error, QpackSettings, Res}; +use std::collections::{HashMap, HashSet, VecDeque}; + use neqo_common::{qdebug, qerror, qlog::NeqoQlog, qtrace, Header}; use neqo_transport::{Connection, Error as TransportError, StreamId}; -use std::collections::{HashMap, HashSet, VecDeque}; -use std::convert::TryFrom; + +use crate::{ + decoder_instructions::{DecoderInstruction, DecoderInstructionReader}, + encoder_instructions::EncoderInstruction, + header_block::HeaderEncoder, + qlog, + qpack_send_buf::QpackData, + reader::ReceiverConnWrapper, + stats::Stats, + table::{HeaderTable, LookupResult, ADDITIONAL_TABLE_ENTRY_SIZE}, + Error, QpackSettings, Res, +}; pub const QPACK_UNI_STREAM_TYPE_ENCODER: u64 = 0x2; @@ -45,9 +48,9 @@ pub struct QPackEncoder { local_stream: LocalStreamState, max_blocked_streams: u16, // Remember header blocks that are referring to dynamic table. - // There can be multiple header blocks in one stream, headers, trailer, push stream request, etc. - // This HashMap maps a stream ID to a list of header blocks. Each header block is a list of - // referenced dynamic table entries. + // There can be multiple header blocks in one stream, headers, trailer, push stream request, + // etc. This HashMap maps a stream ID to a list of header blocks. Each header block is a + // list of referenced dynamic table entries. unacked_header_blocks: HashMap>>, blocked_stream_cnt: u16, use_huffman: bool, @@ -75,7 +78,9 @@ impl QPackEncoder { /// This function is use for setting encoders table max capacity. The value is received as /// a `SETTINGS_QPACK_MAX_TABLE_CAPACITY` setting parameter. + /// /// # Errors + /// /// `EncoderStream` if value is too big. /// `ChangeCapacity` if table capacity cannot be reduced. pub fn set_max_capacity(&mut self, cap: u64) -> Res<()> { @@ -103,7 +108,9 @@ impl QPackEncoder { /// This function is use for setting encoders max blocked streams. The value is received as /// a `SETTINGS_QPACK_BLOCKED_STREAMS` setting parameter. + /// /// # Errors + /// /// `EncoderStream` if value is too big. pub fn set_max_blocked_streams(&mut self, blocked_streams: u64) -> Res<()> { self.max_blocked_streams = u16::try_from(blocked_streams).or(Err(Error::EncoderStream))?; @@ -111,7 +118,9 @@ impl QPackEncoder { } /// Reads decoder instructions. + /// /// # Errors + /// /// May return: `ClosedCriticalStream` if stream has been closed or `DecoderStream` /// in case of any other transport error. pub fn receive(&mut self, conn: &mut Connection, stream_id: StreamId) -> Res<()> { @@ -221,14 +230,20 @@ impl QPackEncoder { } } - /// Inserts a new entry into a table and sends the corresponding instruction to a peer. An entry is added only - /// if it is possible to send the corresponding instruction immediately, i.e. the encoder stream is not - /// blocked by the flow control (or stream internal buffer(this is very unlikely)). - /// ### Errors + /// Inserts a new entry into a table and sends the corresponding instruction to a peer. An entry + /// is added only if it is possible to send the corresponding instruction immediately, i.e. + /// the encoder stream is not blocked by the flow control (or stream internal buffer(this is + /// very unlikely)). + /// + /// # Errors + /// /// `EncoderStreamBlocked` if the encoder stream is blocked by the flow control. /// `DynamicTableFull` if the dynamic table does not have enough space for the entry. - /// The function can return transport errors: `InvalidStreamId`, `InvalidInput` and `FinalSizeError`. + /// The function can return transport errors: `InvalidStreamId`, `InvalidInput` and + /// `FinalSizeError`. + /// /// # Panics + /// /// When the insertion fails (it should not). pub fn send_and_insert( &mut self, @@ -279,7 +294,8 @@ impl QPackEncoder { stream_id: StreamId, ) -> Res<()> { if let Some(cap) = self.next_capacity { - // Check if it is possible to reduce the capacity, e.g. if enough space can be make free for the reduction. + // Check if it is possible to reduce the capacity, e.g. if enough space can be make free + // for the reduction. if cap < self.table.capacity() && !self.table.can_evict_to(cap) { return Err(Error::DynamicTableFull); } @@ -293,7 +309,7 @@ impl QPackEncoder { false, "can_evict_to should have checked and make sure this operation is possible" ); - return Err(Error::InternalError(1)); + return Err(Error::InternalError); } self.max_entries = cap / 32; self.next_capacity = None; @@ -302,7 +318,9 @@ impl QPackEncoder { } /// Sends any qpack encoder instructions. + /// /// # Errors + /// /// returns `EncoderStream` in case of an error. pub fn send_encoder_updates(&mut self, conn: &mut Connection) -> Res<()> { match self.local_stream { @@ -338,10 +356,14 @@ impl QPackEncoder { } /// Encodes headers + /// /// # Errors + /// /// `ClosedCriticalStream` if the encoder stream is closed. /// `InternalError` if an unexpected error occurred. + /// /// # Panics + /// /// If there is a programming error. pub fn encode_header_block( &mut self, @@ -358,11 +380,9 @@ impl QPackEncoder { // to write to the encoder stream AND if it can't uses // literal instructions. // The errors can be: - // 1) `EncoderStreamBlocked` - this is an error that - // can occur. + // 1) `EncoderStreamBlocked` - this is an error that can occur. // 2) `InternalError` - this is unexpected error. - // 3) `ClosedCriticalStream` - this is error that should - // close the HTTP/3 session. + // 3) `ClosedCriticalStream` - this is error that should close the HTTP/3 session. // The last 2 errors are ignored here and will be picked up // by the main loop. encoder_blocked = true; @@ -376,7 +396,7 @@ impl QPackEncoder { let mut ref_entries = HashSet::new(); - for iter in h.iter() { + for iter in h { let name = iter.name().as_bytes().to_vec(); let value = iter.value().as_bytes().to_vec(); qtrace!("encoding {:x?} {:x?}.", name, value); @@ -406,8 +426,9 @@ impl QPackEncoder { self.table.add_ref(index); } } else if can_block && !encoder_blocked { - // Insert using an InsertWithNameLiteral instruction. This entry name does not match any name in the - // tables therefore we cannot use any other instruction. + // Insert using an InsertWithNameLiteral instruction. This entry name does not match + // any name in the tables therefore we cannot use any other + // instruction. if let Ok(index) = self.send_and_insert(conn, &name, &value) { encoded_h.encode_indexed_dynamic(index); ref_entries.insert(index); @@ -417,16 +438,15 @@ impl QPackEncoder { // to write to the encoder stream AND if it can't uses // literal instructions. // The errors can be: - // 1) `EncoderStreamBlocked` - this is an error that - // can occur. - // 2) `DynamicTableFull` - this is an error that - // can occur. + // 1) `EncoderStreamBlocked` - this is an error that can occur. + // 2) `DynamicTableFull` - this is an error that can occur. // 3) `InternalError` - this is unexpected error. - // 4) `ClosedCriticalStream` - this is error that should - // close the HTTP/3 session. + // 4) `ClosedCriticalStream` - this is error that should close the HTTP/3 + // session. // The last 2 errors are ignored here and will be picked up // by the main loop. - // As soon as one of the instructions cannot be written or the table is full, do not try again. + // As soon as one of the instructions cannot be written or the table is full, do + // not try again. encoder_blocked = true; encoded_h.encode_literal_with_name_literal(&name, &value); } @@ -450,7 +470,7 @@ impl QPackEncoder { if !ref_entries.is_empty() { self.unacked_header_blocks .entry(stream_id) - .or_insert_with(VecDeque::new) + .or_default() .push_front(ref_entries); self.stats.dynamic_table_references += 1; } @@ -458,7 +478,9 @@ impl QPackEncoder { } /// Encoder stream has been created. Add the stream id. + /// /// # Panics + /// /// If a stream has already been added. pub fn add_send_stream(&mut self, stream_id: StreamId) { if self.local_stream == LocalStreamState::NoStream { @@ -505,19 +527,21 @@ fn map_stream_send_atomic_error(err: &TransportError) -> Error { } _ => { debug_assert!(false, "Unexpected error"); - Error::InternalError(2) + Error::InternalError } } } #[cfg(test)] mod tests { - use super::{Connection, Error, Header, QPackEncoder, Res}; - use crate::QpackSettings; - use neqo_transport::{ConnectionParameters, StreamId, StreamType}; use std::mem; + + use neqo_transport::{ConnectionParameters, StreamId, StreamType}; use test_fixture::{default_client, default_server, handshake, new_server, now, DEFAULT_ALPN}; + use super::{Connection, Error, Header, QPackEncoder, Res}; + use crate::QpackSettings; + struct TestEncoder { encoder: QPackEncoder, send_stream_id: StreamId, @@ -529,7 +553,8 @@ mod tests { impl TestEncoder { pub fn change_capacity(&mut self, capacity: u64) -> Res<()> { self.encoder.set_max_capacity(capacity).unwrap(); - // We will try to really change the table only when we send the change capacity instruction. + // We will try to really change the table only when we send the change capacity + // instruction. self.encoder.send_encoder_updates(&mut self.conn) } @@ -556,8 +581,8 @@ mod tests { pub fn send_instructions(&mut self, encoder_instruction: &[u8]) { self.encoder.send_encoder_updates(&mut self.conn).unwrap(); let out = self.conn.process(None, now()); - let out2 = self.peer_conn.process(out.dgram(), now()); - mem::drop(self.conn.process(out2.dgram(), now())); + let out2 = self.peer_conn.process(out.as_dgram_ref(), now()); + mem::drop(self.conn.process(out2.as_dgram_ref(), now())); let mut buf = [0_u8; 100]; let (amount, fin) = self .peer_conn @@ -619,7 +644,7 @@ mod tests { .stream_send(encoder.recv_stream_id, decoder_instruction) .unwrap(); let out = encoder.peer_conn.process(None, now()); - mem::drop(encoder.conn.process(out.dgram(), now())); + mem::drop(encoder.conn.process(out.as_dgram_ref(), now())); assert!(encoder .encoder .read_instructions(&mut encoder.conn, encoder.recv_stream_id) @@ -722,7 +747,8 @@ mod tests { ], encoder_inst: &[], }, - // test adding a new header and encode_post_base_index, also test fix_header_block_prefix + // test adding a new header and encode_post_base_index, also test + // fix_header_block_prefix TestElement { headers: vec![Header::new("my-header", "my-value")], header_block: &[0x02, 0x80, 0x10], @@ -796,7 +822,8 @@ mod tests { ], encoder_inst: &[], }, - // test adding a new header and encode_post_base_index, also test fix_header_block_prefix + // test adding a new header and encode_post_base_index, also test + // fix_header_block_prefix TestElement { headers: vec![Header::new("my-header", "my-value")], header_block: &[0x02, 0x80, 0x10], @@ -870,7 +897,8 @@ mod tests { assert!(res.is_ok()); encoder.send_instructions(HEADER_CONTENT_LENGTH_VALUE_1_NAME_LITERAL); - // insert "content-length: 12345 which will fail because the ntry in the table cannot be evicted. + // insert "content-length: 12345 which will fail because the ntry in the table cannot be + // evicted. let res = encoder .encoder @@ -921,7 +949,8 @@ mod tests { assert_eq!(&buf[..], ENCODE_INDEXED_REF_DYNAMIC); encoder.send_instructions(&[]); - // insert "content-length: 12345 which will fail because the entry in the table cannot be evicted + // insert "content-length: 12345 which will fail because the entry in the table cannot be + // evicted let res = encoder .encoder @@ -1004,8 +1033,8 @@ mod tests { encoder.send_instructions(&[]); - // The next one will not use the dynamic entry because it is exceeding the max_blocked_streams - // limit. + // The next one will not use the dynamic entry because it is exceeding the + // max_blocked_streams limit. let buf = encoder.encoder.encode_header_block( &mut encoder.conn, &[Header::new("content-length", "1234")], @@ -1099,7 +1128,8 @@ mod tests { assert_eq!(encoder.encoder.blocked_stream_cnt(), 1); - // The next one will not create a new entry because the encoder is on max_blocked_streams limit. + // The next one will not create a new entry because the encoder is on max_blocked_streams + // limit. let buf = encoder.encoder.encode_header_block( &mut encoder.conn, &[Header::new("name2", "value2")], @@ -1274,8 +1304,8 @@ mod tests { assert_eq!(encoder.encoder.blocked_stream_cnt(), 2); // receive a stream cancel for the first stream. - // This will remove the first stream as blocking but it will not mark the instruction as acked. - // and the second steam will still be blocking. + // This will remove the first stream as blocking but it will not mark the instruction as + // acked. and the second steam will still be blocking. recv_instruction(&mut encoder, STREAM_CANCELED_ID_1); // The stream is not blocking anymore because header ack also acks the instruction. @@ -1507,9 +1537,10 @@ mod tests { assert!(encoder.encoder.set_max_capacity(1000).is_ok()); encoder.send_instructions(CAP_INSTRUCTION_1000); - // Encode a header block with 2 headers. The first header will be added to the dynamic table. - // The second will not be added to the dynamic table, because the corresponding instruction - // cannot be written immediately due to the flow control limit. + // Encode a header block with 2 headers. The first header will be added to the dynamic + // table. The second will not be added to the dynamic table, because the + // corresponding instruction cannot be written immediately due to the flow control + // limit. let buf1 = encoder.encoder.encode_header_block( &mut encoder.conn, &[ @@ -1524,7 +1555,8 @@ mod tests { // Assert that the second header is encoded as a literal with a name literal assert_eq!(buf1[3] & 0xf0, 0x20); - // Try to encode another header block. Here both headers will be encoded as a literal with a name literal + // Try to encode another header block. Here both headers will be encoded as a literal with a + // name literal let buf2 = encoder.encoder.encode_header_block( &mut encoder.conn, &[ @@ -1540,10 +1572,10 @@ mod tests { // exchange a flow control update. let out = encoder.peer_conn.process(None, now()); - mem::drop(encoder.conn.process(out.dgram(), now())); + mem::drop(encoder.conn.process(out.as_dgram_ref(), now())); - // Try writing a new header block. Now, headers will be added to the dynamic table again, because - // instructions can be sent. + // Try writing a new header block. Now, headers will be added to the dynamic table again, + // because instructions can be sent. let buf3 = encoder.encoder.encode_header_block( &mut encoder.conn, &[ @@ -1587,7 +1619,7 @@ mod tests { .send_encoder_updates(&mut encoder.conn) .unwrap(); let out = encoder.conn.process(None, now()); - mem::drop(encoder.peer_conn.process(out.dgram(), now())); + mem::drop(encoder.peer_conn.process(out.as_dgram_ref(), now())); // receive an insert count increment. recv_instruction(&mut encoder, &[0x01]); diff --git a/neqo-qpack/src/encoder_instructions.rs b/neqo-qpack/src/encoder_instructions.rs index 93be06bf7f..5564af969e 100644 --- a/neqo-qpack/src/encoder_instructions.rs +++ b/neqo-qpack/src/encoder_instructions.rs @@ -4,16 +4,20 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use crate::prefix::{ - ENCODER_CAPACITY, ENCODER_DUPLICATE, ENCODER_INSERT_WITH_NAME_LITERAL, - ENCODER_INSERT_WITH_NAME_REF_DYNAMIC, ENCODER_INSERT_WITH_NAME_REF_STATIC, NO_PREFIX, -}; -use crate::qpack_send_buf::QpackData; -use crate::reader::{IntReader, LiteralReader, ReadByte, Reader}; -use crate::Res; -use neqo_common::{qdebug, qtrace}; use std::mem; +use neqo_common::{qdebug, qtrace}; + +use crate::{ + prefix::{ + ENCODER_CAPACITY, ENCODER_DUPLICATE, ENCODER_INSERT_WITH_NAME_LITERAL, + ENCODER_INSERT_WITH_NAME_REF_DYNAMIC, ENCODER_INSERT_WITH_NAME_REF_STATIC, NO_PREFIX, + }, + qpack_send_buf::QpackData, + reader::{IntReader, LiteralReader, ReadByte, Reader}, + Res, +}; + // The encoder only uses InsertWithNameLiteral, therefore clippy is complaining about dead_code. // We may decide to use othe instruction in the future. // All instructions are used for testing, therefore they are defined. @@ -183,10 +187,11 @@ impl EncoderInstructionReader { Ok(()) } - /// ### Errors - /// 1) `NeedMoreData` if the reader needs more data - /// 2) `ClosedCriticalStream` - /// 3) other errors will be translated to `EncoderStream` by the caller of this function. + /// # Errors + /// + /// 1) `NeedMoreData` if the reader needs more data + /// 2) `ClosedCriticalStream` + /// 3) other errors will be translated to `EncoderStream` by the caller of this function. pub fn read_instructions( &mut self, recv: &mut T, @@ -265,8 +270,7 @@ impl EncoderInstructionReader { mod test { use super::{EncoderInstruction, EncoderInstructionReader, QpackData}; - use crate::reader::test_receiver::TestReceiver; - use crate::Error; + use crate::{reader::test_receiver::TestReceiver, Error}; fn test_encoding_decoding(instruction: &EncoderInstruction, use_huffman: bool) { let mut buf = QpackData::default(); diff --git a/neqo-qpack/src/header_block.rs b/neqo-qpack/src/header_block.rs index 38f8738df9..2e15bdf1fe 100644 --- a/neqo-qpack/src/header_block.rs +++ b/neqo-qpack/src/header_block.rs @@ -4,19 +4,26 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use crate::prefix::{ - BASE_PREFIX_NEGATIVE, BASE_PREFIX_POSITIVE, HEADER_FIELD_INDEX_DYNAMIC, - HEADER_FIELD_INDEX_DYNAMIC_POST, HEADER_FIELD_INDEX_STATIC, HEADER_FIELD_LITERAL_NAME_LITERAL, - HEADER_FIELD_LITERAL_NAME_REF_DYNAMIC, HEADER_FIELD_LITERAL_NAME_REF_DYNAMIC_POST, - HEADER_FIELD_LITERAL_NAME_REF_STATIC, NO_PREFIX, +use std::{ + mem, + ops::{Deref, Div}, }; -use crate::qpack_send_buf::QpackData; -use crate::reader::{to_string, ReceiverBufferWrapper}; -use crate::table::HeaderTable; -use crate::{Error, Res}; + use neqo_common::{qtrace, Header}; -use std::mem; -use std::ops::{Deref, Div}; + +use crate::{ + prefix::{ + BASE_PREFIX_NEGATIVE, BASE_PREFIX_POSITIVE, HEADER_FIELD_INDEX_DYNAMIC, + HEADER_FIELD_INDEX_DYNAMIC_POST, HEADER_FIELD_INDEX_STATIC, + HEADER_FIELD_LITERAL_NAME_LITERAL, HEADER_FIELD_LITERAL_NAME_REF_DYNAMIC, + HEADER_FIELD_LITERAL_NAME_REF_DYNAMIC_POST, HEADER_FIELD_LITERAL_NAME_REF_STATIC, + NO_PREFIX, + }, + qpack_send_buf::QpackData, + reader::{parse_utf8, ReceiverBufferWrapper}, + table::HeaderTable, + Error, Res, +}; #[derive(Default, Debug, PartialEq)] pub struct HeaderEncoder { @@ -331,8 +338,8 @@ impl<'a> HeaderDecoder<'a> { qtrace!([self], "decoder static indexed {}.", index); let entry = HeaderTable::get_static(index)?; Ok(Header::new( - to_string(entry.name())?, - to_string(entry.value())?, + parse_utf8(entry.name())?, + parse_utf8(entry.value())?, )) } @@ -343,8 +350,8 @@ impl<'a> HeaderDecoder<'a> { qtrace!([self], "decoder dynamic indexed {}.", index); let entry = table.get_dynamic(index, self.base, false)?; Ok(Header::new( - to_string(entry.name())?, - to_string(entry.value())?, + parse_utf8(entry.name())?, + parse_utf8(entry.value())?, )) } @@ -355,8 +362,8 @@ impl<'a> HeaderDecoder<'a> { qtrace!([self], "decode post-based {}.", index); let entry = table.get_dynamic(index, self.base, true)?; Ok(Header::new( - to_string(entry.name())?, - to_string(entry.value())?, + parse_utf8(entry.name())?, + parse_utf8(entry.value())?, )) } @@ -371,7 +378,7 @@ impl<'a> HeaderDecoder<'a> { .read_prefixed_int(HEADER_FIELD_LITERAL_NAME_REF_STATIC.len())?; Ok(Header::new( - to_string(HeaderTable::get_static(index)?.name())?, + parse_utf8(HeaderTable::get_static(index)?.name())?, self.buf.read_literal_from_buffer(0)?, )) } @@ -387,7 +394,7 @@ impl<'a> HeaderDecoder<'a> { .read_prefixed_int(HEADER_FIELD_LITERAL_NAME_REF_DYNAMIC.len())?; Ok(Header::new( - to_string(table.get_dynamic(index, self.base, false)?.name())?, + parse_utf8(table.get_dynamic(index, self.base, false)?.name())?, self.buf.read_literal_from_buffer(0)?, )) } @@ -400,7 +407,7 @@ impl<'a> HeaderDecoder<'a> { .read_prefixed_int(HEADER_FIELD_LITERAL_NAME_REF_DYNAMIC_POST.len())?; Ok(Header::new( - to_string(table.get_dynamic(index, self.base, true)?.name())?, + parse_utf8(table.get_dynamic(index, self.base, true)?.name())?, self.buf.read_literal_from_buffer(0)?, )) } diff --git a/neqo-qpack/src/huffman.rs b/neqo-qpack/src/huffman.rs index 31657ca826..30bb880438 100644 --- a/neqo-qpack/src/huffman.rs +++ b/neqo-qpack/src/huffman.rs @@ -4,10 +4,11 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use crate::huffman_decode_helper::{HuffmanDecoderNode, HUFFMAN_DECODE_ROOT}; -use crate::huffman_table::HUFFMAN_TABLE; -use crate::{Error, Res}; -use std::convert::TryFrom; +use crate::{ + huffman_decode_helper::{huffman_decoder_root, HuffmanDecoderNode}, + huffman_table::HUFFMAN_TABLE, + Error, Res, +}; struct BitReader<'a> { input: &'a [u8], @@ -65,9 +66,14 @@ impl<'a> BitReader<'a> { } /// Decodes huffman encoded input. +/// /// # Errors -/// This function may return `HuffmanDecompressionFailed` if `input` is not a correct huffman-encoded array of bits. +/// +/// This function may return `HuffmanDecompressionFailed` if `input` is not a correct +/// huffman-encoded array of bits. +/// /// # Panics +/// /// Never, but rust can't know that. pub fn decode_huffman(input: &[u8]) -> Res> { let mut reader = BitReader::new(input); @@ -85,7 +91,7 @@ pub fn decode_huffman(input: &[u8]) -> Res> { } fn decode_character(reader: &mut BitReader) -> Res> { - let mut node: &HuffmanDecoderNode = &HUFFMAN_DECODE_ROOT; + let mut node: &HuffmanDecoderNode = huffman_decoder_root(); let mut i = 0; while node.value.is_none() { match reader.read_bit() { @@ -109,6 +115,7 @@ fn decode_character(reader: &mut BitReader) -> Res> { } /// # Panics +/// /// Never, but rust doesn't know that. #[must_use] pub fn encode_huffman(input: &[u8]) -> Vec { diff --git a/neqo-qpack/src/huffman_decode_helper.rs b/neqo-qpack/src/huffman_decode_helper.rs index 7589ebd11a..939312ab22 100644 --- a/neqo-qpack/src/huffman_decode_helper.rs +++ b/neqo-qpack/src/huffman_decode_helper.rs @@ -4,17 +4,18 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +use std::sync::OnceLock; + use crate::huffman_table::HUFFMAN_TABLE; -use lazy_static::lazy_static; -use std::convert::TryFrom; pub struct HuffmanDecoderNode { pub next: [Option>; 2], pub value: Option, } -lazy_static! { - pub static ref HUFFMAN_DECODE_ROOT: HuffmanDecoderNode = make_huffman_tree(0, 0); +pub fn huffman_decoder_root() -> &'static HuffmanDecoderNode { + static ROOT: OnceLock = OnceLock::new(); + ROOT.get_or_init(|| make_huffman_tree(0, 0)) } fn make_huffman_tree(prefix: u32, len: u8) -> HuffmanDecoderNode { diff --git a/neqo-qpack/src/lib.rs b/neqo-qpack/src/lib.rs index c2d3bd8359..10ee5df61c 100644 --- a/neqo-qpack/src/lib.rs +++ b/neqo-qpack/src/lib.rs @@ -4,10 +4,7 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -#![cfg_attr(feature = "deny-warnings", deny(warnings))] -#![warn(clippy::pedantic)] -// This is because of Encoder and Decoder structs. TODO: think about a better namings for crate and structs. -#![allow(clippy::module_name_repetitions)] +#![allow(clippy::module_name_repetitions)] // This lint doesn't work here. pub mod decoder; mod decoder_instructions; @@ -44,13 +41,14 @@ pub enum Error { EncoderStream, DecoderStream, ClosedCriticalStream, - InternalError(u16), + InternalError, // These are internal errors, they will be transformed into one of the above. - NeedMoreData, // Return when an input stream does not have more data that a decoder needs.(It does not mean that a stream is closed.) + NeedMoreData, /* Return when an input stream does not have more data that a decoder + * needs.(It does not mean that a stream is closed.) */ HeaderLookup, HuffmanDecompressionFailed, - ToStringFailed, + BadUtf8, ChangeCapacity, DynamicTableFull, IncrementAck, @@ -78,7 +76,8 @@ impl Error { } /// # Errors - /// Any error is mapped to the indicated type. + /// + /// Any error is mapped to the indicated type. fn map_error(r: Result, err: Self) -> Result { r.map_err(|e| { if matches!(e, Self::ClosedCriticalStream) { diff --git a/neqo-qpack/src/prefix.rs b/neqo-qpack/src/prefix.rs index ee0826850d..0085de0df9 100644 --- a/neqo-qpack/src/prefix.rs +++ b/neqo-qpack/src/prefix.rs @@ -6,6 +6,8 @@ #[derive(Copy, Clone, Debug)] pub struct Prefix { + #[allow(unknown_lints)] // available with Rust v1.75 + #[allow(clippy::struct_field_names)] prefix: u8, len: u8, mask: u8, @@ -14,9 +16,10 @@ pub struct Prefix { impl Prefix { pub fn new(prefix: u8, len: u8) -> Self { // len should never be larger than 7. - // Most of Prefixes are instantiated as consts bellow. The only place where this construcrtor is used - // is in tests and when literals are encoded and the Huffman bit is added to one of the consts bellow. - // create_prefix guaranty that all const have len < 7 so we can safely assert that len is <=7. + // Most of Prefixes are instantiated as consts bellow. The only place where this + // construcrtor is used is in tests and when literals are encoded and the Huffman + // bit is added to one of the consts bellow. create_prefix guaranty that all const + // have len < 7 so we can safely assert that len is <=7. assert!(len <= 7); assert!((len == 0) || (prefix & ((1 << (8 - len)) - 1) == 0)); Self { @@ -108,7 +111,7 @@ create_prefix!(ENCODER_INSERT_WITH_NAME_LITERAL, 0x40, 2); create_prefix!(ENCODER_DUPLICATE, 0x00, 3); //===================================================================== -//Header block encoding prefixes +// Header block encoding prefixes //===================================================================== create_prefix!(BASE_PREFIX_POSITIVE, 0x00, 1); @@ -135,5 +138,6 @@ create_prefix!(HEADER_FIELD_LITERAL_NAME_REF_DYNAMIC, 0x40, 4, 0xD0); create_prefix!(HEADER_FIELD_LITERAL_NAME_REF_DYNAMIC_POST, 0x00, 5, 0xF0); // | 0 | 0 | 1 | N | H | Index(3+) | -// N is ignored and H is not relevant for decoding this prefix, therefore the mask is 1110 0000 = 0xE0 +// N is ignored and H is not relevant for decoding this prefix, therefore the mask is 1110 0000 = +// 0xE0 create_prefix!(HEADER_FIELD_LITERAL_NAME_LITERAL, 0x20, 4, 0xE0); diff --git a/neqo-qpack/src/qlog.rs b/neqo-qpack/src/qlog.rs index 11f9dbc0b3..8d48efb0aa 100644 --- a/neqo-qpack/src/qlog.rs +++ b/neqo-qpack/src/qlog.rs @@ -6,23 +6,31 @@ // Functions that handle capturing QLOG traces. -use neqo_common::hex; -use neqo_common::qlog::NeqoQlog; -use qlog::{event::Event, QPackInstruction, QpackInstructionTypeName}; +use neqo_common::{hex, qlog::NeqoQlog}; +use qlog::events::{ + qpack::{QPackInstruction, QpackInstructionParsed, QpackInstructionTypeName}, + EventData, RawInfo, +}; pub fn qpack_read_insert_count_increment_instruction( qlog: &mut NeqoQlog, increment: u64, data: &[u8], ) { - qlog.add_event(|| { - Some(Event::qpack_instruction_received( - QPackInstruction::InsertCountIncrementInstruction { + qlog.add_event_data(|| { + let raw = RawInfo { + length: Some(8), + payload_length: None, + data: Some(hex(data)), + }; + let ev_data = EventData::QpackInstructionParsed(QpackInstructionParsed { + instruction: QPackInstruction::InsertCountIncrementInstruction { instruction_type: QpackInstructionTypeName::InsertCountIncrementInstruction, increment, }, - Some(8.to_string()), - Some(hex(data)), - )) + raw: Some(raw), + }); + + Some(ev_data) }); } diff --git a/neqo-qpack/src/qpack_send_buf.rs b/neqo-qpack/src/qpack_send_buf.rs index 4fbdbf12bd..c0b8d7af1b 100644 --- a/neqo-qpack/src/qpack_send_buf.rs +++ b/neqo-qpack/src/qpack_send_buf.rs @@ -4,12 +4,12 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use crate::huffman::encode_huffman; -use crate::prefix::Prefix; -use neqo_common::Encoder; -use std::convert::TryFrom; use std::ops::Deref; +use neqo_common::Encoder; + +use crate::{huffman::encode_huffman, prefix::Prefix}; + #[derive(Default, Debug, PartialEq)] pub(crate) struct QpackData { buf: Vec, diff --git a/neqo-qpack/src/reader.rs b/neqo-qpack/src/reader.rs index 386a25ffc1..0173ed7888 100644 --- a/neqo-qpack/src/reader.rs +++ b/neqo-qpack/src/reader.rs @@ -4,26 +4,26 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use crate::huffman::decode_huffman; -use crate::prefix::Prefix; -use crate::{Error, Res}; +use std::{mem, str}; + use neqo_common::{qdebug, qerror}; use neqo_transport::{Connection, StreamId}; -use std::convert::TryInto; -use std::mem; -use std::str; + +use crate::{huffman::decode_huffman, prefix::Prefix, Error, Res}; pub trait ReadByte { /// # Errors - /// Return error occurred while reading a byte. - /// The exact error depends on trait implementation. + /// + /// Return error occurred while reading a byte. + /// The exact error depends on trait implementation. fn read_byte(&mut self) -> Res; } pub trait Reader { /// # Errors - /// Return error occurred while reading date into a buffer. - /// The exact error depends on trait implementation. + /// + /// Return error occurred while reading date into a buffer. + /// The exact error depends on trait implementation. fn read(&mut self, buf: &mut [u8]) -> Res; } @@ -130,9 +130,9 @@ impl<'a> ReceiverBufferWrapper<'a> { .try_into() .or(Err(Error::DecompressionFailed))?; if use_huffman { - Ok(to_string(&decode_huffman(self.slice(length)?)?)?) + Ok(parse_utf8(&decode_huffman(self.slice(length)?)?)?.to_string()) } else { - Ok(to_string(self.slice(length)?)?) + Ok(parse_utf8(self.slice(length)?)?.to_string()) } } @@ -158,7 +158,9 @@ pub struct IntReader { impl IntReader { /// `IntReader` is created by suppling the first byte anf prefix length. /// A varint may take only one byte, In that case already the first by has set state to done. + /// /// # Panics + /// /// When `prefix_len` is 8 or larger. #[must_use] pub fn new(first_byte: u8, prefix_len: u8) -> Self { @@ -178,6 +180,7 @@ impl IntReader { } /// # Panics + /// /// Never, but rust doesn't know that. #[must_use] pub fn make(first_byte: u8, prefixes: &[Prefix]) -> Self { @@ -191,7 +194,9 @@ impl IntReader { /// This function reads bytes until the varint is decoded or until stream/buffer does not /// have any more date. + /// /// # Errors + /// /// Possible errors are: /// 1) `NeedMoreData` if the reader needs more data, /// 2) `IntegerOverflow`, @@ -218,20 +223,19 @@ impl IntReader { } } -#[derive(Debug)] +#[derive(Debug, Default)] enum LiteralReaderState { + #[default] ReadHuffman, - ReadLength { reader: IntReader }, - ReadLiteral { offset: usize }, + ReadLength { + reader: IntReader, + }, + ReadLiteral { + offset: usize, + }, Done, } -impl Default for LiteralReaderState { - fn default() -> Self { - Self::ReadHuffman - } -} - /// This is decoder of a literal with a prefix: /// 1) ignores `prefix_len` bits of the first byte, /// 2) reads "huffman bit" @@ -249,7 +253,9 @@ impl LiteralReader { /// Creates `LiteralReader` with the first byte. This constructor is always used /// when a litreral has a prefix. /// For literals without a prefix please use the default constructor. + /// /// # Panics + /// /// If `prefix_len` is 8 or more. #[must_use] pub fn new_with_first_byte(first_byte: u8, prefix_len: u8) -> Self { @@ -265,13 +271,17 @@ impl LiteralReader { /// This function reads bytes until the literal is decoded or until stream/buffer does not /// have any more date ready. + /// /// # Errors + /// /// Possible errors are: /// 1) `NeedMoreData` if the reader needs more data, /// 2) `IntegerOverflow` /// 3) Any `ReadByte`'s error /// It returns value if reading the literal is done or None if it needs more data. + /// /// # Panics + /// /// When this object is complete. pub fn read(&mut self, s: &mut T) -> Res> { loop { @@ -313,21 +323,21 @@ impl LiteralReader { /// This is a helper function used only by `ReceiverBufferWrapper`, therefore it returns /// `DecompressionFailed` if any error happens. +/// /// # Errors -/// If an parsing error occurred, the function returns `ToStringFailed`. -pub fn to_string(v: &[u8]) -> Res { - match str::from_utf8(v) { - Ok(s) => Ok(s.to_string()), - Err(_) => Err(Error::ToStringFailed), - } +/// +/// If an parsing error occurred, the function returns `BadUtf8`. +pub fn parse_utf8(v: &[u8]) -> Res<&str> { + str::from_utf8(v).map_err(|_| Error::BadUtf8) } #[cfg(test)] pub(crate) mod test_receiver { - use super::{Error, ReadByte, Reader, Res}; use std::collections::VecDeque; + use super::{Error, ReadByte, Reader, Res}; + #[derive(Default)] pub struct TestReceiver { buf: VecDeque, @@ -365,11 +375,12 @@ pub(crate) mod test_receiver { #[cfg(test)] mod tests { + use test_receiver::TestReceiver; + use super::{ - str, test_receiver, to_string, Error, IntReader, LiteralReader, ReadByte, + parse_utf8, str, test_receiver, Error, IntReader, LiteralReader, ReadByte, ReceiverBufferWrapper, Res, }; - use test_receiver::TestReceiver; const TEST_CASES_NUMBERS: [(&[u8], u8, u64); 7] = [ (&[0xEA], 3, 10), @@ -531,7 +542,7 @@ mod tests { let mut test_receiver: TestReceiver = TestReceiver::default(); test_receiver.write(&buf[1..]); assert_eq!( - to_string(&reader.read(&mut test_receiver).unwrap()).unwrap(), + parse_utf8(&reader.read(&mut test_receiver).unwrap()).unwrap(), *value ); } diff --git a/neqo-qpack/src/table.rs b/neqo-qpack/src/table.rs index 8b2d70edce..d5275ec98f 100644 --- a/neqo-qpack/src/table.rs +++ b/neqo-qpack/src/table.rs @@ -4,11 +4,14 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use crate::static_table::{StaticTableEntry, HEADER_STATIC_TABLE}; -use crate::{Error, Res}; -use neqo_common::qtrace; use std::collections::VecDeque; -use std::convert::TryFrom; + +use neqo_common::qtrace; + +use crate::{ + static_table::{StaticTableEntry, HEADER_STATIC_TABLE}, + Error, Res, +}; pub const ADDITIONAL_TABLE_ENTRY_SIZE: usize = 32; @@ -91,7 +94,7 @@ impl HeaderTable { capacity: 0, used: 0, base: 0, - acked_inserts_cnt: if encoder { 0 } else { u64::max_value() }, + acked_inserts_cnt: if encoder { 0 } else { u64::MAX }, } } @@ -106,9 +109,12 @@ impl HeaderTable { } /// Change the dynamic table capacity. - /// ### Errors + /// + /// # Errors + /// /// `ChangeCapacity` if table capacity cannot be reduced. - /// The table cannot be reduce if there are entries that are referred at the moment or their inserts are unacked. + /// The table cannot be reduce if there are entries that are referred at the moment or their + /// inserts are unacked. pub fn set_capacity(&mut self, cap: u64) -> Res<()> { qtrace!([self], "set capacity to {}", cap); if !self.evict_to(cap) { @@ -119,7 +125,9 @@ impl HeaderTable { } /// Get a static entry with `index`. - /// ### Errors + /// + /// # Errors + /// /// `HeaderLookup` if the index does not exist in the static table. pub fn get_static(index: u64) -> Res<&'static StaticTableEntry> { let inx = usize::try_from(index).or(Err(Error::HeaderLookup))?; @@ -151,7 +159,9 @@ impl HeaderTable { } /// Get a entry in the dynamic table. - /// ### Errors + /// + /// # Errors + /// /// `HeaderLookup` if entry does not exist. pub fn get_dynamic(&self, index: u64, base: u64, post: bool) -> Res<&DynamicTableEntry> { let inx = if post { @@ -186,8 +196,8 @@ impl HeaderTable { } /// Look for a header pair. - /// The function returns `LookupResult`: `index`, `static_table` (if it is a static table entry) and `value_matches` - /// (if the header value matches as well not only header name) + /// The function returns `LookupResult`: `index`, `static_table` (if it is a static table entry) + /// and `value_matches` (if the header value matches as well not only header name) pub fn lookup(&mut self, name: &[u8], value: &[u8], can_block: bool) -> Option { qtrace!( [self], @@ -197,7 +207,7 @@ impl HeaderTable { can_block ); let mut name_match = None; - for iter in HEADER_STATIC_TABLE.iter() { + for iter in HEADER_STATIC_TABLE { if iter.name() == name { if iter.value() == value { return Some(LookupResult { @@ -280,9 +290,11 @@ impl HeaderTable { } /// Insert a new entry. - /// ### Errors - /// `DynamicTableFull` if an entry cannot be added to the table because there is not enough space and/or - /// other entry cannot be evicted. + /// + /// # Errors + /// + /// `DynamicTableFull` if an entry cannot be added to the table because there is not enough + /// space and/or other entry cannot be evicted. pub fn insert(&mut self, name: &[u8], value: &[u8]) -> Res { qtrace!([self], "insert name={:?} value={:?}", name, value); let entry = DynamicTableEntry { @@ -304,9 +316,11 @@ impl HeaderTable { } /// Insert a new entry with the name refer to by a index to static or dynamic table. - /// ### Errors - /// `DynamicTableFull` if an entry cannot be added to the table because there is not enough space and/or - /// other entry cannot be evicted. + /// + /// # Errors + /// + /// `DynamicTableFull` if an entry cannot be added to the table because there is not enough + /// space and/or other entry cannot be evicted. /// `HeaderLookup` if the index dos not exits in the static/dynamic table. pub fn insert_with_name_ref( &mut self, @@ -336,9 +350,11 @@ impl HeaderTable { } /// Duplicate an entry. - /// ### Errors - /// `DynamicTableFull` if an entry cannot be added to the table because there is not enough space and/or - /// other entry cannot be evicted. + /// + /// # Errors + /// + /// `DynamicTableFull` if an entry cannot be added to the table because there is not enough + /// space and/or other entry cannot be evicted. /// `HeaderLookup` if the index dos not exits in the static/dynamic table. pub fn duplicate(&mut self, index: u64) -> Res { qtrace!([self], "duplicate entry={}", index); @@ -355,7 +371,9 @@ impl HeaderTable { } /// Increment number of acknowledge entries. - /// ### Errors + /// + /// # Errors + /// /// `IncrementAck` if ack is greater than actual number of inserts. pub fn increment_acked(&mut self, increment: u64) -> Res<()> { qtrace!([self], "increment acked by {}", increment); diff --git a/neqo-server/Cargo.toml b/neqo-server/Cargo.toml deleted file mode 100644 index 74e597c95e..0000000000 --- a/neqo-server/Cargo.toml +++ /dev/null @@ -1,24 +0,0 @@ -[package] -name = "neqo-server" -version = "0.6.4" -authors = ["Dragana Damjanovic "] -edition = "2018" -rust-version = "1.65.0" -license = "MIT/Apache-2.0" - -[dependencies] -neqo-crypto = { path = "./../neqo-crypto" } -neqo-transport = { path = "./../neqo-transport" } -neqo-common = { path="./../neqo-common" } -neqo-http3 = { path = "./../neqo-http3" } -neqo-qpack = { path = "./../neqo-qpack" } -structopt = "0.3.7" -regex = "1" -mio = "0.6.17" -mio-extras = "2.0.5" -log = {version = "0.4.0", default-features = false} -qlog = "0.4.0" - -[features] -default = ["deny-warnings"] -deny-warnings = [] diff --git a/neqo-server/src/main.rs b/neqo-server/src/main.rs deleted file mode 100644 index 79c54adaa2..0000000000 --- a/neqo-server/src/main.rs +++ /dev/null @@ -1,828 +0,0 @@ -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -#![cfg_attr(feature = "deny-warnings", deny(warnings))] -#![warn(clippy::use_self)] - -use std::{ - cell::RefCell, - cmp::min, - collections::{HashMap, HashSet}, - convert::TryFrom, - fmt::{self, Display}, - fs::OpenOptions, - io, - io::Read, - mem, - net::{SocketAddr, ToSocketAddrs}, - path::PathBuf, - process::exit, - rc::Rc, - str::FromStr, - time::{Duration, Instant}, -}; - -use mio::{net::UdpSocket, Events, Poll, PollOpt, Ready, Token}; -use mio_extras::timer::{Builder, Timeout, Timer}; -use neqo_transport::ConnectionIdGenerator; -use structopt::StructOpt; - -use neqo_common::{hex, qdebug, qinfo, qwarn, Datagram, Header}; -use neqo_crypto::{ - constants::{TLS_AES_128_GCM_SHA256, TLS_AES_256_GCM_SHA384, TLS_CHACHA20_POLY1305_SHA256}, - generate_ech_keys, init_db, random, AntiReplay, Cipher, -}; -use neqo_http3::{ - Error, Http3OrWebTransportStream, Http3Parameters, Http3Server, Http3ServerEvent, StreamId, -}; -use neqo_transport::{ - server::ValidateAddress, tparams::PreferredAddress, CongestionControlAlgorithm, - ConnectionParameters, Output, RandomConnectionIdGenerator, StreamType, Version, -}; - -use crate::old_https::Http09Server; - -const TIMER_TOKEN: Token = Token(0xffff_ffff); -const ANTI_REPLAY_WINDOW: Duration = Duration::from_secs(10); - -mod old_https; - -#[derive(Debug)] -pub enum ServerError { - ArgumentError(&'static str), - Http3Error(neqo_http3::Error), - IoError(io::Error), - QlogError, - TransportError(neqo_transport::Error), -} - -impl From for ServerError { - fn from(err: io::Error) -> Self { - Self::IoError(err) - } -} - -impl From for ServerError { - fn from(err: neqo_http3::Error) -> Self { - Self::Http3Error(err) - } -} - -impl From for ServerError { - fn from(_err: qlog::Error) -> Self { - Self::QlogError - } -} - -impl From for ServerError { - fn from(err: neqo_transport::Error) -> Self { - Self::TransportError(err) - } -} - -impl Display for ServerError { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "Error: {:?}", self)?; - Ok(()) - } -} - -#[derive(Debug, StructOpt)] -#[structopt(name = "neqo-server", about = "A basic HTTP3 server.")] -struct Args { - /// List of IP:port to listen on - #[structopt(default_value = "[::]:4433")] - hosts: Vec, - - #[structopt(name = "encoder-table-size", long, default_value = "16384")] - max_table_size_encoder: u64, - - #[structopt(name = "decoder-table-size", long, default_value = "16384")] - max_table_size_decoder: u64, - - #[structopt(short = "b", long, default_value = "10")] - max_blocked_streams: u16, - - #[structopt( - short = "d", - long, - default_value = "./test-fixture/db", - parse(from_os_str) - )] - /// NSS database directory. - db: PathBuf, - - #[structopt(short = "k", long, default_value = "key")] - /// Name of key from NSS database. - key: String, - - #[structopt(short = "a", long, default_value = "h3")] - /// ALPN labels to negotiate. - /// - /// This server still only does HTTP3 no matter what the ALPN says. - alpn: String, - - #[structopt(name = "qlog-dir", long)] - /// Enable QLOG logging and QLOG traces to this directory - qlog_dir: Option, - - #[structopt(name = "qns-test", long)] - /// Enable special behavior for use with QUIC Network Simulator - qns_test: Option, - - #[structopt(name = "use-old-http", short = "o", long)] - /// Use http 0.9 instead of HTTP/3 - use_old_http: bool, - - #[structopt(flatten)] - quic_parameters: QuicParameters, - - #[structopt(name = "retry", long)] - /// Force a retry - retry: bool, - - #[structopt(short = "c", long, number_of_values = 1)] - /// The set of TLS cipher suites to enable. - /// From: TLS_AES_128_GCM_SHA256, TLS_AES_256_GCM_SHA384, TLS_CHACHA20_POLY1305_SHA256. - ciphers: Vec, - - #[structopt(name = "ech", long)] - /// Enable encrypted client hello (ECH). - /// This generates a new set of ECH keys when it is invoked. - /// The resulting configuration is printed to stdout in hexadecimal format. - ech: bool, -} - -impl Args { - fn get_ciphers(&self) -> Vec { - self.ciphers - .iter() - .filter_map(|c| match c.as_str() { - "TLS_AES_128_GCM_SHA256" => Some(TLS_AES_128_GCM_SHA256), - "TLS_AES_256_GCM_SHA384" => Some(TLS_AES_256_GCM_SHA384), - "TLS_CHACHA20_POLY1305_SHA256" => Some(TLS_CHACHA20_POLY1305_SHA256), - _ => None, - }) - .collect::>() - } - - fn listen_addresses(&self) -> Vec { - self.hosts - .iter() - .filter_map(|host| host.to_socket_addrs().ok()) - .flatten() - .chain(self.quic_parameters.preferred_address_v4()) - .chain(self.quic_parameters.preferred_address_v6()) - .collect() - } - - fn now(&self) -> Instant { - if self.qns_test.is_some() { - // When NSS starts its anti-replay it blocks any acceptance of 0-RTT for a - // single period. This ensures that an attacker that is able to force a - // server to reboot is unable to use that to flush the anti-replay buffers - // and have something replayed. - // - // However, this is a massive inconvenience for us when we are testing. - // As we can't initialize `AntiReplay` in the past (see `neqo_common::time` - // for why), fast forward time here so that the connections get times from - // in the future. - // - // This is NOT SAFE. Don't do this. - Instant::now() + ANTI_REPLAY_WINDOW - } else { - Instant::now() - } - } -} - -#[derive(Debug, Clone, Copy, PartialEq, Eq)] -struct VersionArg(Version); -impl FromStr for VersionArg { - type Err = ServerError; - - fn from_str(s: &str) -> Result { - let v = u32::from_str_radix(s, 16) - .map_err(|_| ServerError::ArgumentError("versions need to be specified in hex"))?; - Ok(Self(Version::try_from(v).map_err(|_| { - ServerError::ArgumentError("unknown version") - })?)) - } -} - -#[derive(Debug, StructOpt)] -struct QuicParameters { - #[structopt( - short = "V", - long, - multiple = true, - use_delimiter = true, - number_of_values = 1 - )] - /// A list of versions to support in order of preference, in hex. - quic_version: Vec, - - #[structopt(long, default_value = "16")] - /// Set the MAX_STREAMS_BIDI limit. - max_streams_bidi: u64, - - #[structopt(long, default_value = "16")] - /// Set the MAX_STREAMS_UNI limit. - max_streams_uni: u64, - - #[structopt(long = "idle", default_value = "30")] - /// The idle timeout for connections, in seconds. - idle_timeout: u64, - - #[structopt(long = "cc", default_value = "newreno")] - /// The congestion controller to use. - congestion_control: CongestionControlAlgorithm, - - #[structopt(name = "preferred-address-v4", long)] - /// An IPv4 address for the server preferred address. - preferred_address_v4: Option, - - #[structopt(name = "preferred-address-v6", long)] - /// An IPv6 address for the server preferred address. - preferred_address_v6: Option, -} - -impl QuicParameters { - fn get_sock_addr(opt: &Option, v: &str, f: F) -> Option - where - F: FnMut(&SocketAddr) -> bool, - { - let addr = opt - .iter() - .flat_map(|spa| spa.to_socket_addrs().ok()) - .flatten() - .find(f); - if opt.is_some() != addr.is_some() { - panic!( - "unable to resolve '{}' to an {} address", - opt.as_ref().unwrap(), - v - ); - } - addr - } - - fn preferred_address_v4(&self) -> Option { - Self::get_sock_addr(&self.preferred_address_v4, "IPv4", |addr| addr.is_ipv4()) - } - - fn preferred_address_v6(&self) -> Option { - Self::get_sock_addr(&self.preferred_address_v6, "IPv6", |addr| addr.is_ipv6()) - } - - fn preferred_address(&self) -> Option { - let v4 = self.preferred_address_v4(); - let v6 = self.preferred_address_v6(); - if v4.is_none() && v6.is_none() { - None - } else { - Some(PreferredAddress::new(v4, v6)) - } - } - - fn get(&self) -> ConnectionParameters { - let mut params = ConnectionParameters::default() - .max_streams(StreamType::BiDi, self.max_streams_bidi) - .max_streams(StreamType::UniDi, self.max_streams_uni) - .idle_timeout(Duration::from_secs(self.idle_timeout)) - .cc_algorithm(self.congestion_control); - if let Some(pa) = self.preferred_address() { - params = params.preferred_address(pa); - } - - if let Some(first) = self.quic_version.first() { - params = params.versions(first.0, self.quic_version.iter().map(|&v| v.0).collect()); - } - params - } -} - -fn emit_packet(socket: &mut UdpSocket, out_dgram: Datagram) { - let sent = socket - .send_to(&out_dgram, &out_dgram.destination()) - .expect("Error sending datagram"); - if sent != out_dgram.len() { - eprintln!("Unable to send all {} bytes of datagram", out_dgram.len()); - } -} - -fn qns_read_response(filename: &str) -> Option> { - let mut file_path = PathBuf::from("/www"); - file_path.push(filename.trim_matches(|p| p == '/')); - - OpenOptions::new() - .read(true) - .open(&file_path) - .map_err(|_e| eprintln!("Could not open {}", file_path.display())) - .ok() - .and_then(|mut f| { - let mut data = Vec::new(); - match f.read_to_end(&mut data) { - Ok(sz) => { - println!("{} bytes read from {}", sz, file_path.display()); - Some(data) - } - Err(e) => { - eprintln!("Error reading data: {:?}", e); - None - } - } - }) -} - -trait HttpServer: Display { - fn process(&mut self, dgram: Option, now: Instant) -> Output; - fn process_events(&mut self, args: &Args, now: Instant); - fn set_qlog_dir(&mut self, dir: Option); - fn set_ciphers(&mut self, ciphers: &[Cipher]); - fn validate_address(&mut self, when: ValidateAddress); - fn enable_ech(&mut self) -> &[u8]; -} - -struct ResponseData { - data: Vec, - offset: usize, - remaining: usize, -} - -impl From<&[u8]> for ResponseData { - fn from(data: &[u8]) -> Self { - Self::from(data.to_vec()) - } -} - -impl From> for ResponseData { - fn from(data: Vec) -> Self { - let remaining = data.len(); - Self { - data, - offset: 0, - remaining, - } - } -} - -impl ResponseData { - fn repeat(buf: &[u8], total: usize) -> Self { - Self { - data: buf.to_owned(), - offset: 0, - remaining: total, - } - } - - fn send(&mut self, stream: &mut Http3OrWebTransportStream) { - while self.remaining > 0 { - let end = min(self.data.len(), self.offset + self.remaining); - let slice = &self.data[self.offset..end]; - match stream.send_data(slice) { - Ok(0) => { - return; - } - Ok(sent) => { - self.remaining -= sent; - self.offset = (self.offset + sent) % self.data.len(); - } - Err(e) => { - qwarn!("Error writing to stream {}: {:?}", stream, e); - return; - } - } - } - } - - fn done(&self) -> bool { - self.remaining == 0 - } -} - -struct SimpleServer { - server: Http3Server, - /// Progress writing to each stream. - remaining_data: HashMap, -} - -impl SimpleServer { - const MESSAGE: &'static [u8] = b"I am the very model of a modern Major-General,\n\ - I've information vegetable, animal, and mineral,\n\ - I know the kings of England, and I quote the fights historical\n\ - From Marathon to Waterloo, in order categorical;\n\ - I'm very well acquainted, too, with matters mathematical,\n\ - I understand equations, both the simple and quadratical,\n\ - About binomial theorem, I'm teeming with a lot o' news,\n\ - With many cheerful facts about the square of the hypotenuse.\n"; - - pub fn new( - args: &Args, - anti_replay: AntiReplay, - cid_mgr: Rc>, - ) -> Self { - let server = Http3Server::new( - args.now(), - &[args.key.clone()], - &[args.alpn.clone()], - anti_replay, - cid_mgr, - Http3Parameters::default() - .connection_parameters(args.quic_parameters.get()) - .max_table_size_encoder(args.max_table_size_encoder) - .max_table_size_decoder(args.max_table_size_decoder) - .max_blocked_streams(args.max_blocked_streams), - None, - ) - .expect("We cannot make a server!"); - Self { - server, - remaining_data: HashMap::new(), - } - } -} - -impl Display for SimpleServer { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - self.server.fmt(f) - } -} - -impl HttpServer for SimpleServer { - fn process(&mut self, dgram: Option, now: Instant) -> Output { - self.server.process(dgram, now) - } - - fn process_events(&mut self, args: &Args, _now: Instant) { - while let Some(event) = self.server.next_event() { - match event { - Http3ServerEvent::Headers { - mut stream, - headers, - fin, - } => { - println!("Headers (request={} fin={}): {:?}", stream, fin, headers); - - let mut response = - if let Some(path) = headers.iter().find(|&h| h.name() == ":path") { - if args.qns_test.is_some() { - if let Some(data) = qns_read_response(path.value()) { - ResponseData::from(data) - } else { - ResponseData::from(Self::MESSAGE) - } - } else if let Ok(count) = - path.value().trim_matches(|p| p == '/').parse::() - { - ResponseData::repeat(Self::MESSAGE, count) - } else { - ResponseData::from(Self::MESSAGE) - } - } else { - stream - .cancel_fetch(Error::HttpRequestIncomplete.code()) - .unwrap(); - continue; - }; - - stream - .send_headers(&[ - Header::new(":status", "200"), - Header::new("content-length", response.remaining), - ]) - .unwrap(); - response.send(&mut stream); - if response.done() { - stream.stream_close_send().unwrap(); - } else { - self.remaining_data.insert(stream.stream_id(), response); - } - } - Http3ServerEvent::DataWritable { mut stream } => { - if let Some(remaining) = self.remaining_data.get_mut(&stream.stream_id()) { - remaining.send(&mut stream); - if remaining.done() { - self.remaining_data.remove(&stream.stream_id()); - stream.stream_close_send().unwrap(); - } - } - } - - Http3ServerEvent::Data { stream, data, fin } => { - println!("Data (request={} fin={}): {:?}", stream, fin, data); - } - _ => {} - } - } - } - - fn set_qlog_dir(&mut self, dir: Option) { - self.server.set_qlog_dir(dir) - } - - fn validate_address(&mut self, v: ValidateAddress) { - self.server.set_validation(v); - } - - fn set_ciphers(&mut self, ciphers: &[Cipher]) { - self.server.set_ciphers(ciphers); - } - - fn enable_ech(&mut self) -> &[u8] { - let (sk, pk) = generate_ech_keys().expect("should create ECH keys"); - self.server - .enable_ech(random(1)[0], "public.example", &sk, &pk) - .unwrap(); - self.server.ech_config() - } -} - -fn read_dgram( - socket: &mut UdpSocket, - local_address: &SocketAddr, -) -> Result, io::Error> { - let buf = &mut [0u8; 2048]; - let (sz, remote_addr) = match socket.recv_from(&mut buf[..]) { - Err(ref err) if err.kind() == io::ErrorKind::WouldBlock => return Ok(None), - Err(err) => { - eprintln!("UDP recv error: {:?}", err); - return Err(err); - } - Ok(res) => res, - }; - - if sz == buf.len() { - eprintln!("Might have received more than {} bytes", buf.len()); - } - - if sz == 0 { - eprintln!("zero length datagram received?"); - Ok(None) - } else { - Ok(Some(Datagram::new(remote_addr, *local_address, &buf[..sz]))) - } -} - -struct ServersRunner { - args: Args, - poll: Poll, - hosts: Vec, - server: Box, - timeout: Option, - sockets: Vec, - active_sockets: HashSet, - timer: Timer, -} - -impl ServersRunner { - pub fn new(args: Args) -> Result { - let server = Self::create_server(&args); - let mut runner = Self { - args, - poll: Poll::new()?, - hosts: Vec::new(), - server, - timeout: None, - sockets: Vec::new(), - active_sockets: HashSet::new(), - timer: Builder::default() - .tick_duration(Duration::from_millis(1)) - .build::(), - }; - runner.init()?; - Ok(runner) - } - - /// Init Poll for all hosts. Create sockets, and a map of the - /// socketaddrs to instances of the HttpServer handling that addr. - fn init(&mut self) -> Result<(), io::Error> { - self.hosts = self.args.listen_addresses(); - if self.hosts.is_empty() { - eprintln!("No valid hosts defined"); - return Err(io::Error::new(io::ErrorKind::InvalidInput, "No hosts")); - } - - for (i, host) in self.hosts.iter().enumerate() { - let socket = match UdpSocket::bind(host) { - Err(err) => { - eprintln!("Unable to bind UDP socket: {}", err); - return Err(err); - } - Ok(s) => s, - }; - - let local_addr = match socket.local_addr() { - Err(err) => { - eprintln!("Socket local address not bound: {}", err); - return Err(err); - } - Ok(s) => s, - }; - - let also_v4 = if socket.only_v6().unwrap_or(true) { - "" - } else { - " as well as V4" - }; - println!( - "Server waiting for connection on: {:?}{}", - local_addr, also_v4 - ); - - self.poll.register( - &socket, - Token(i), - Ready::readable() | Ready::writable(), - PollOpt::edge(), - )?; - - self.sockets.push(socket); - } - - self.poll - .register(&self.timer, TIMER_TOKEN, Ready::readable(), PollOpt::edge())?; - - Ok(()) - } - - fn create_server(args: &Args) -> Box { - // Note: this is the exception to the case where we use `Args::now`. - let anti_replay = AntiReplay::new(Instant::now(), ANTI_REPLAY_WINDOW, 7, 14) - .expect("unable to setup anti-replay"); - let cid_mgr = Rc::new(RefCell::new(RandomConnectionIdGenerator::new(10))); - - let mut svr: Box = if args.use_old_http { - Box::new( - Http09Server::new( - args.now(), - &[args.key.clone()], - &[args.alpn.clone()], - anti_replay, - cid_mgr, - args.quic_parameters.get(), - ) - .expect("We cannot make a server!"), - ) - } else { - Box::new(SimpleServer::new(args, anti_replay, cid_mgr)) - }; - svr.set_ciphers(&args.get_ciphers()); - svr.set_qlog_dir(args.qlog_dir.clone()); - if args.retry { - svr.validate_address(ValidateAddress::Always); - } - if args.ech { - let cfg = svr.enable_ech(); - println!("ECHConfigList: {}", hex(cfg)); - } - svr - } - - /// Tries to find a socket, but then just falls back to sending from the first. - fn find_socket(&mut self, addr: SocketAddr) -> &mut UdpSocket { - let (first, rest) = self.sockets.split_first_mut().unwrap(); - rest.iter_mut() - .find(|s| { - s.local_addr() - .ok() - .map_or(false, |socket_addr| socket_addr == addr) - }) - .unwrap_or(first) - } - - fn process(&mut self, inx: usize, dgram: Option) -> bool { - match self.server.process(dgram, self.args.now()) { - Output::Datagram(dgram) => { - let socket = self.find_socket(dgram.source()); - emit_packet(socket, dgram); - true - } - Output::Callback(new_timeout) => { - if let Some(to) = &self.timeout { - self.timer.cancel_timeout(to); - } - - qinfo!("Setting timeout of {:?} for socket {}", new_timeout, inx); - self.timeout = Some(self.timer.set_timeout(new_timeout, inx)); - false - } - Output::None => { - qdebug!("Output::None"); - false - } - } - } - - fn process_datagrams_and_events( - &mut self, - inx: usize, - read_socket: bool, - ) -> Result<(), io::Error> { - if self.sockets.get_mut(inx).is_some() { - if read_socket { - loop { - let socket = self.sockets.get_mut(inx).unwrap(); - let dgram = read_dgram(socket, &self.hosts[inx])?; - if dgram.is_none() { - break; - } - _ = self.process(inx, dgram); - } - } else { - _ = self.process(inx, None); - } - self.server.process_events(&self.args, self.args.now()); - if self.process(inx, None) { - self.active_sockets.insert(inx); - } - } - Ok(()) - } - - fn process_active_conns(&mut self) -> Result<(), io::Error> { - let curr_active = mem::take(&mut self.active_sockets); - for inx in curr_active { - self.process_datagrams_and_events(inx, false)?; - } - Ok(()) - } - - fn process_timeout(&mut self) -> Result<(), io::Error> { - while let Some(inx) = self.timer.poll() { - qinfo!("Timer expired for {:?}", inx); - self.process_datagrams_and_events(inx, false)?; - } - Ok(()) - } - - pub fn run(&mut self) -> Result<(), io::Error> { - let mut events = Events::with_capacity(1024); - loop { - // If there are active servers do not block in poll. - self.poll.poll( - &mut events, - if self.active_sockets.is_empty() { - None - } else { - Some(Duration::from_millis(0)) - }, - )?; - - for event in &events { - if event.token() == TIMER_TOKEN { - self.process_timeout()?; - } else { - if !event.readiness().is_readable() { - continue; - } - self.process_datagrams_and_events(event.token().0, true)?; - } - } - self.process_active_conns()?; - } - } -} - -fn main() -> Result<(), io::Error> { - const HQ_INTEROP: &str = "hq-interop"; - - let mut args = Args::from_args(); - assert!(!args.key.is_empty(), "Need at least one key"); - - init_db(args.db.clone()); - - if let Some(testcase) = args.qns_test.as_ref() { - match testcase.as_str() { - "http3" => (), - "zerortt" => { - args.use_old_http = true; - args.alpn = String::from(HQ_INTEROP); - args.quic_parameters.max_streams_bidi = 100; - } - "handshake" | "transfer" | "resumption" | "multiconnect" => { - args.use_old_http = true; - args.alpn = String::from(HQ_INTEROP); - } - "chacha20" => { - args.use_old_http = true; - args.alpn = String::from(HQ_INTEROP); - args.ciphers.clear(); - args.ciphers - .extend_from_slice(&[String::from("TLS_CHACHA20_POLY1305_SHA256")]); - } - "retry" => { - args.use_old_http = true; - args.alpn = String::from(HQ_INTEROP); - args.retry = true; - } - _ => exit(127), - } - } - - let mut servers_runner = ServersRunner::new(args)?; - servers_runner.run() -} diff --git a/neqo-transport/.gitignore b/neqo-transport/.gitignore deleted file mode 100644 index aa085cd807..0000000000 --- a/neqo-transport/.gitignore +++ /dev/null @@ -1,3 +0,0 @@ -Cargo.lock -/target -**/*.rs.bk diff --git a/neqo-transport/Cargo.toml b/neqo-transport/Cargo.toml index aa6f1ad1b9..125da11508 100644 --- a/neqo-transport/Cargo.toml +++ b/neqo-transport/Cargo.toml @@ -1,24 +1,49 @@ [package] name = "neqo-transport" -version = "0.6.4" -authors = ["EKR ", "Andy Grover "] -edition = "2018" -rust-version = "1.65.0" -license = "MIT/Apache-2.0" +authors.workspace = true +homepage.workspace = true +repository.workspace = true +version.workspace = true +edition.workspace = true +rust-version.workspace = true +license.workspace = true + +[lints] +workspace = true [dependencies] -neqo-crypto = { path = "../neqo-crypto" } +# Sync with https://searchfox.org/mozilla-central/source/Cargo.lock 2024-02-08 +indexmap = { version = "1.9", default-features = false } +log = { version = "0.4", default-features = false } neqo-common = { path = "../neqo-common" } -lazy_static = "1.3.0" -log = {version = "0.4.0", default-features = false} -smallvec = "1.0.0" -qlog = "0.4.0" -indexmap = "1.0" +neqo-crypto = { path = "../neqo-crypto" } +qlog = { version = "0.12", default-features = false } +smallvec = { version = "1.11", default-features = false } [dev-dependencies] +criterion = { version = "0.5", default-features = false, features = ["html_reports"] } +enum-map = { version = "2.7", default-features = false } test-fixture = { path = "../test-fixture" } [features] -default = ["deny-warnings"] -deny-warnings = [] -fuzzing = ["neqo-crypto/fuzzing"] +bench = [] +disable-encryption = ["neqo-crypto/disable-encryption"] + +[lib] +# See https://github.com/bheisler/criterion.rs/blob/master/book/src/faq.md#cargo-bench-gives-unrecognized-option-errors-for-valid-command-line-options +bench = false + +[[bench]] +name = "transfer" +harness = false +required-features = ["bench"] + +[[bench]] +name = "rx_stream_orderer" +harness = false +required-features = ["bench"] + +[[bench]] +name = "range_tracker" +harness = false +required-features = ["bench"] diff --git a/neqo-transport/TODO b/neqo-transport/TODO deleted file mode 100755 index 151dbd1753..0000000000 --- a/neqo-transport/TODO +++ /dev/null @@ -1,9 +0,0 @@ -Use stream events in h3 // grover or dragana? -harmonize our rust usage: - - use foo::* or use foo::{bar, baz} and ordering/grouping - - remove extern crate - - sort #[derive()] args -cleanup public API -write docs for public API -write docs for everything else -CI diff --git a/neqo-transport/benches/range_tracker.rs b/neqo-transport/benches/range_tracker.rs new file mode 100644 index 0000000000..ee611cf4ea --- /dev/null +++ b/neqo-transport/benches/range_tracker.rs @@ -0,0 +1,52 @@ +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use criterion::{criterion_group, criterion_main, Criterion}; // black_box +use neqo_transport::send_stream::RangeTracker; + +const CHUNK: u64 = 1000; +const END: u64 = 100_000; +fn build_coalesce(len: u64) -> RangeTracker { + let mut used = RangeTracker::default(); + let chunk = usize::try_from(CHUNK).expect("should fit"); + used.mark_acked(0, chunk); + used.mark_sent(CHUNK, usize::try_from(END).expect("should fit")); + // leave a gap or it will coalesce here + for i in 2..=len { + // These do not get immediately coalesced when marking since they're not at the end or start + used.mark_acked(i * CHUNK, chunk); + } + used +} + +fn coalesce(c: &mut Criterion, count: u64) { + let chunk = usize::try_from(CHUNK).expect("should fit"); + c.bench_function( + &format!("coalesce_acked_from_zero {count}+1 entries"), + |b| { + b.iter_batched_ref( + || build_coalesce(count), + |used| { + used.mark_acked(CHUNK, chunk); + let tail = (count + 1) * CHUNK; + used.mark_sent(tail, chunk); + used.mark_acked(tail, chunk); + }, + criterion::BatchSize::SmallInput, + ); + }, + ); +} + +fn benchmark_coalesce(c: &mut Criterion) { + coalesce(c, 1); + coalesce(c, 3); + coalesce(c, 10); + coalesce(c, 1000); +} + +criterion_group!(benches, benchmark_coalesce); +criterion_main!(benches); diff --git a/neqo-transport/benches/rx_stream_orderer.rs b/neqo-transport/benches/rx_stream_orderer.rs new file mode 100644 index 0000000000..d58e11ee86 --- /dev/null +++ b/neqo-transport/benches/rx_stream_orderer.rs @@ -0,0 +1,26 @@ +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use criterion::{criterion_group, criterion_main, Criterion}; +use neqo_transport::recv_stream::RxStreamOrderer; + +fn rx_stream_orderer() { + let mut rx = RxStreamOrderer::new(); + let data: &[u8] = &[0; 1337]; + + for i in 0..100_000 { + rx.inbound_frame(i * 1337, data); + } +} + +fn criterion_benchmark(c: &mut Criterion) { + c.bench_function("RxStreamOrderer::inbound_frame()", |b| { + b.iter(rx_stream_orderer); + }); +} + +criterion_group!(benches, criterion_benchmark); +criterion_main!(benches); diff --git a/neqo-transport/benches/transfer.rs b/neqo-transport/benches/transfer.rs new file mode 100644 index 0000000000..32959f6cb5 --- /dev/null +++ b/neqo-transport/benches/transfer.rs @@ -0,0 +1,74 @@ +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use std::time::Duration; + +use criterion::{criterion_group, criterion_main, BatchSize::SmallInput, Criterion, Throughput}; +use test_fixture::{ + boxed, + sim::{ + connection::{ConnectionNode, ReceiveData, SendData}, + network::{Delay, TailDrop}, + Simulator, + }, +}; + +const ZERO: Duration = Duration::from_millis(0); +const JITTER: Duration = Duration::from_millis(10); +const TRANSFER_AMOUNT: usize = 1 << 22; // 4Mbyte + +fn benchmark_transfer(c: &mut Criterion, label: &str, seed: &Option>) { + let mut group = c.benchmark_group("transfer"); + group.throughput(Throughput::Bytes(u64::try_from(TRANSFER_AMOUNT).unwrap())); + group.noise_threshold(0.03); + group.bench_function(label, |b| { + b.iter_batched( + || { + let nodes = boxed![ + ConnectionNode::default_client(boxed![SendData::new(TRANSFER_AMOUNT)]), + TailDrop::dsl_uplink(), + Delay::new(ZERO..JITTER), + ConnectionNode::default_server(boxed![ReceiveData::new(TRANSFER_AMOUNT)]), + TailDrop::dsl_downlink(), + Delay::new(ZERO..JITTER), + ]; + let mut sim = Simulator::new(label, nodes); + if let Some(seed) = &seed { + sim.seed_str(seed); + } + sim.setup() + }, + |sim| { + sim.run(); + }, + SmallInput, + ); + }); + group.finish(); +} + +fn benchmark_transfer_variable(c: &mut Criterion) { + benchmark_transfer( + c, + "Run multiple transfers with varying seeds", + &std::env::var("SIMULATION_SEED").ok(), + ); +} + +fn benchmark_transfer_fixed(c: &mut Criterion) { + benchmark_transfer( + c, + "Run multiple transfers with the same seed", + &Some("62df6933ba1f543cece01db8f27fb2025529b27f93df39e19f006e1db3b8c843"), + ); +} + +criterion_group! { + name = transfer; + config = Criterion::default().warm_up_time(Duration::from_secs(5)).measurement_time(Duration::from_secs(15)); + targets = benchmark_transfer_variable, benchmark_transfer_fixed +} +criterion_main!(transfer); diff --git a/neqo-transport/src/ackrate.rs b/neqo-transport/src/ackrate.rs index 6c4ae44f86..d5923805d9 100644 --- a/neqo-transport/src/ackrate.rs +++ b/neqo-transport/src/ackrate.rs @@ -5,18 +5,15 @@ // except according to those terms. // Management of the peer's ack rate. -#![deny(clippy::pedantic)] -use crate::connection::params::ACK_RATIO_SCALE; -use crate::frame::FRAME_TYPE_ACK_FREQUENCY; -use crate::packet::PacketBuilder; -use crate::recovery::RecoveryToken; -use crate::stats::FrameStats; +use std::{cmp::max, time::Duration}; use neqo_common::qtrace; -use std::cmp::max; -use std::convert::TryFrom; -use std::time::Duration; + +use crate::{ + connection::params::ACK_RATIO_SCALE, frame::FRAME_TYPE_ACK_FREQUENCY, packet::PacketBuilder, + recovery::RecoveryToken, stats::FrameStats, +}; #[derive(Debug, Clone)] pub struct AckRate { diff --git a/neqo-transport/src/addr_valid.rs b/neqo-transport/src/addr_valid.rs index fcb8106742..f596cfc3cb 100644 --- a/neqo-transport/src/addr_valid.rs +++ b/neqo-transport/src/addr_valid.rs @@ -6,32 +6,31 @@ // This file implements functions necessary for address validation. +use std::{ + net::{IpAddr, SocketAddr}, + time::{Duration, Instant}, +}; + use neqo_common::{qinfo, qtrace, Decoder, Encoder, Role}; use neqo_crypto::{ constants::{TLS_AES_128_GCM_SHA256, TLS_VERSION_1_3}, selfencrypt::SelfEncrypt, }; - -use crate::cid::ConnectionId; -use crate::packet::PacketBuilder; -use crate::recovery::RecoveryToken; -use crate::stats::FrameStats; -use crate::{Error, Res}; - use smallvec::SmallVec; -use std::convert::TryFrom; -use std::net::{IpAddr, SocketAddr}; -use std::time::{Duration, Instant}; -/// A prefix we add to Retry tokens to distinguish them from NEW_TOKEN tokens. +use crate::{ + cid::ConnectionId, packet::PacketBuilder, recovery::RecoveryToken, stats::FrameStats, Res, +}; + +/// A prefix we add to Retry tokens to distinguish them from `NEW_TOKEN` tokens. const TOKEN_IDENTIFIER_RETRY: &[u8] = &[0x52, 0x65, 0x74, 0x72, 0x79]; -/// A prefix on NEW_TOKEN tokens, that is maximally Hamming distant from NEW_TOKEN. +/// A prefix on `NEW_TOKEN` tokens, that is maximally Hamming distant from `NEW_TOKEN`. /// Together, these need to have a low probability of collision, even if there is /// corruption of individual bits in transit. const TOKEN_IDENTIFIER_NEW_TOKEN: &[u8] = &[0xad, 0x9a, 0x8b, 0x8d, 0x86]; -/// The maximum number of tokens we'll save from NEW_TOKEN frames. -/// This should be the same as the value of MAX_TICKETS in neqo-crypto. +/// The maximum number of tokens we'll save from `NEW_TOKEN` frames. +/// This should be the same as the value of `MAX_TICKETS` in neqo-crypto. const MAX_NEW_TOKEN: usize = 4; /// The number of tokens we'll track for the purposes of looking for duplicates. /// This is based on how many might be received over a period where could be @@ -44,9 +43,9 @@ const MAX_SAVED_TOKENS: usize = 8; pub enum ValidateAddress { /// Require address validation never. Never, - /// Require address validation unless a NEW_TOKEN token is provided. + /// Require address validation unless a `NEW_TOKEN` token is provided. NoToken, - /// Require address validation even if a NEW_TOKEN token is provided. + /// Require address validation even if a `NEW_TOKEN` token is provided. Always, } @@ -143,7 +142,7 @@ impl AddressValidation { self.generate_token(Some(dcid), peer_address, now) } - /// This generates a token for use with NEW_TOKEN. + /// This generates a token for use with `NEW_TOKEN`. pub fn generate_new_token(&self, peer_address: SocketAddr, now: Instant) -> Res> { self.generate_token(None, peer_address, now) } @@ -184,7 +183,7 @@ impl AddressValidation { /// Less than one difference per byte indicates that it is likely not a Retry. /// This generous interpretation allows for a lot of damage in transit. /// Note that if this check fails, then the token will be treated like it came - /// from NEW_TOKEN instead. If there truly is corruption of packets that causes + /// from `NEW_TOKEN` instead. If there truly is corruption of packets that causes /// validation failure, it will be a failure that we try to recover from. fn is_likely_retry(token: &[u8]) -> bool { let mut difference = 0; @@ -210,10 +209,9 @@ impl AddressValidation { if self.validation == ValidateAddress::Never { qinfo!("AddressValidation: no token; accepting"); return AddressValidationResult::Pass; - } else { - qinfo!("AddressValidation: no token; validating"); - return AddressValidationResult::Validate; } + qinfo!("AddressValidation: no token; validating"); + return AddressValidationResult::Validate; } if token.len() <= TOKEN_IDENTIFIER_RETRY.len() { // Treat bad tokens strictly. @@ -231,7 +229,7 @@ impl AddressValidation { qinfo!("AddressValidation: valid Retry token for {}", cid); AddressValidationResult::ValidRetry(cid) } else { - panic!("AddressValidation: Retry token with small CID {}", cid); + panic!("AddressValidation: Retry token with small CID {cid}"); } } else if cid.is_empty() { // An empty connection ID means NEW_TOKEN. @@ -243,7 +241,7 @@ impl AddressValidation { AddressValidationResult::Pass } } else { - panic!("AddressValidation: NEW_TOKEN token with CID {}", cid); + panic!("AddressValidation: NEW_TOKEN token with CID {cid}"); } } else { // From here on, we have a token that we couldn't decrypt. @@ -351,14 +349,13 @@ impl NewTokenState { builder: &mut PacketBuilder, tokens: &mut Vec, stats: &mut FrameStats, - ) -> Res<()> { + ) { if let Self::Server(ref mut sender) = self { - sender.write_frames(builder, tokens, stats)?; + sender.write_frames(builder, tokens, stats); } - Ok(()) } - /// If this a server, buffer a NEW_TOKEN for sending. + /// If this a server, buffer a `NEW_TOKEN` for sending. /// If this is a client, panic. pub fn send_new_token(&mut self, token: Vec) { if let Self::Server(ref mut sender) = self { @@ -368,7 +365,7 @@ impl NewTokenState { } } - /// If this a server, process a lost signal for a NEW_TOKEN frame. + /// If this a server, process a lost signal for a `NEW_TOKEN` frame. /// If this is a client, panic. pub fn lost(&mut self, seqno: usize) { if let Self::Server(ref mut sender) = self { @@ -378,7 +375,7 @@ impl NewTokenState { } } - /// If this a server, process remove the acknowledged NEW_TOKEN frame. + /// If this a server, process remove the acknowledged `NEW_TOKEN` frame. /// If this is a client, panic. pub fn acked(&mut self, seqno: usize) { if let Self::Server(ref mut sender) = self { @@ -403,7 +400,7 @@ impl NewTokenFrameStatus { #[derive(Default)] pub struct NewTokenSender { - /// The unacknowledged NEW_TOKEN frames we are yet to send. + /// The unacknowledged `NEW_TOKEN` frames we are yet to send. tokens: Vec, /// A sequence number that is used to track individual tokens /// by reference (so that recovery tokens can be simple). @@ -426,26 +423,22 @@ impl NewTokenSender { builder: &mut PacketBuilder, tokens: &mut Vec, stats: &mut FrameStats, - ) -> Res<()> { - for t in self.tokens.iter_mut() { + ) { + for t in &mut self.tokens { if t.needs_sending && t.len() <= builder.remaining() { t.needs_sending = false; builder.encode_varint(crate::frame::FRAME_TYPE_NEW_TOKEN); builder.encode_vvec(&t.token); - if builder.len() > builder.limit() { - return Err(Error::InternalError(7)); - } tokens.push(RecoveryToken::NewToken(t.seqno)); stats.new_token += 1; } } - Ok(()) } pub fn lost(&mut self, seqno: usize) { - for t in self.tokens.iter_mut() { + for t in &mut self.tokens { if t.seqno == seqno { t.needs_sending = true; break; @@ -460,9 +453,10 @@ impl NewTokenSender { #[cfg(test)] mod tests { - use super::NewTokenState; use neqo_common::Role; + use super::NewTokenState; + const ONE: &[u8] = &[1, 2, 3]; const TWO: &[u8] = &[4, 5]; diff --git a/neqo-transport/src/cc/classic_cc.rs b/neqo-transport/src/cc/classic_cc.rs index fbaa5e1227..f8bcee6722 100644 --- a/neqo-transport/src/cc/classic_cc.rs +++ b/neqo-transport/src/cc/classic_cc.rs @@ -5,18 +5,24 @@ // except according to those terms. // Congestion control -#![deny(clippy::pedantic)] -use std::cmp::{max, min}; -use std::fmt::{self, Debug, Display}; -use std::time::{Duration, Instant}; +use std::{ + cmp::{max, min}, + fmt::{self, Debug, Display}, + time::{Duration, Instant}, +}; use super::CongestionControl; - -use crate::cc::MAX_DATAGRAM_SIZE; -use crate::qlog::{self, QlogMetric}; -use crate::sender::PACING_BURST_SIZE; -use crate::tracking::SentPacket; +use crate::{ + cc::MAX_DATAGRAM_SIZE, + packet::PacketNumber, + qlog::{self, QlogMetric}, + rtt::RttEstimate, + sender::PACING_BURST_SIZE, + tracking::SentPacket, +}; +#[rustfmt::skip] // to keep `::` and thus prevent conflict with `crate::qlog` +use ::qlog::events::{quic::CongestionStateUpdated, EventData}; use neqo_common::{const_max, const_min, qdebug, qinfo, qlog::NeqoQlog, qtrace}; pub const CWND_INITIAL_PKTS: usize = 10; @@ -107,7 +113,15 @@ pub struct ClassicCongestionControl { bytes_in_flight: usize, acked_bytes: usize, ssthresh: usize, - recovery_start: Option, + recovery_start: Option, + /// `first_app_limited` indicates the packet number after which the application might be + /// underutilizing the congestion window. When underutilizing the congestion window due to not + /// sending out enough data, we SHOULD NOT increase the congestion window.[1] Packets sent + /// before this point are deemed to fully utilize the congestion window and count towards + /// increasing the congestion window. + /// + /// [1]: https://datatracker.ietf.org/doc/html/rfc9002#section-7.8 + first_app_limited: PacketNumber, qlog: NeqoQlog, } @@ -146,24 +160,28 @@ impl CongestionControl for ClassicCongestionControl { } // Multi-packet version of OnPacketAckedCC - fn on_packets_acked(&mut self, acked_pkts: &[SentPacket], min_rtt: Duration, now: Instant) { - // Check whether we are app limited before acked packets are removed - // from bytes_in_flight. - let is_app_limited = self.app_limited(); - qtrace!( - [self], - "limited={}, bytes_in_flight={}, cwnd={}, state={:?} pacing_burst_size={}", - is_app_limited, - self.bytes_in_flight, - self.congestion_window, - self.state, - MAX_DATAGRAM_SIZE * PACING_BURST_SIZE, - ); - + fn on_packets_acked(&mut self, acked_pkts: &[SentPacket], rtt_est: &RttEstimate, now: Instant) { + let mut is_app_limited = true; let mut new_acked = 0; - for pkt in acked_pkts.iter().filter(|pkt| pkt.cc_outstanding()) { - assert!(self.bytes_in_flight >= pkt.size); - self.bytes_in_flight -= pkt.size; + for pkt in acked_pkts { + qdebug!( + "packet_acked this={:p}, pn={}, ps={}, ignored={}, lost={}, rtt_est={:?}", + self, + pkt.pn, + pkt.size, + i32::from(!pkt.cc_outstanding()), + i32::from(pkt.lost()), + rtt_est, + ); + if !pkt.cc_outstanding() { + continue; + } + if pkt.pn < self.first_app_limited { + is_app_limited = false; + } + // BIF is set to 0 on a path change, but in case that was because of a simple rebinding + // event, we may still get ACKs for packets sent before the rebinding. + self.bytes_in_flight = self.bytes_in_flight.saturating_sub(pkt.size); if !self.after_recovery_start(pkt) { // Do not increase congestion window for packets sent before @@ -181,18 +199,17 @@ impl CongestionControl for ClassicCongestionControl { if is_app_limited { self.cc_algorithm.on_app_limited(); + qdebug!("on_packets_acked this={:p}, limited=1, bytes_in_flight={}, cwnd={}, state={:?}, new_acked={}", self, self.bytes_in_flight, self.congestion_window, self.state, new_acked); return; } - qtrace!([self], "ACK received, acked_bytes = {}", self.acked_bytes); - // Slow start, up to the slow start threshold. if self.congestion_window < self.ssthresh { self.acked_bytes += new_acked; let increase = min(self.ssthresh - self.congestion_window, self.acked_bytes); self.congestion_window += increase; self.acked_bytes -= increase; - qinfo!([self], "slow start += {}", increase); + qdebug!([self], "slow start += {}", increase); if self.congestion_window == self.ssthresh { // This doesn't look like it is necessary, but it can happen // after persistent congestion. @@ -206,7 +223,7 @@ impl CongestionControl for ClassicCongestionControl { let bytes_for_increase = self.cc_algorithm.bytes_for_cwnd_increase( self.congestion_window, new_acked, - min_rtt, + rtt_est.minimum(), now, ); debug_assert!(bytes_for_increase > 0); @@ -233,6 +250,7 @@ impl CongestionControl for ClassicCongestionControl { QlogMetric::BytesInFlight(self.bytes_in_flight), ], ); + qdebug!([self], "on_packets_acked this={:p}, limited=0, bytes_in_flight={}, cwnd={}, state={:?}, new_acked={}", self, self.bytes_in_flight, self.congestion_window, self.state, new_acked); } /// Update congestion controller state based on lost packets. @@ -248,16 +266,21 @@ impl CongestionControl for ClassicCongestionControl { } for pkt in lost_packets.iter().filter(|pkt| pkt.cc_in_flight()) { - assert!(self.bytes_in_flight >= pkt.size); - self.bytes_in_flight -= pkt.size; + qdebug!( + "packet_lost this={:p}, pn={}, ps={}", + self, + pkt.pn, + pkt.size + ); + // BIF is set to 0 on a path change, but in case that was because of a simple rebinding + // event, we may still declare packets lost that were sent before the rebinding. + self.bytes_in_flight = self.bytes_in_flight.saturating_sub(pkt.size); } qlog::metrics_updated( &mut self.qlog, &[QlogMetric::BytesInFlight(self.bytes_in_flight)], ); - qdebug!([self], "Pkts lost {}", lost_packets.len()); - let congestion = self.on_congestion_event(lost_packets.last().unwrap()); let persistent_congestion = self.detect_persistent_congestion( first_rtt_sample_time, @@ -265,6 +288,13 @@ impl CongestionControl for ClassicCongestionControl { pto, lost_packets, ); + qdebug!( + "on_packets_lost this={:p}, bytes_in_flight={}, cwnd={}, state={:?}", + self, + self.bytes_in_flight, + self.congestion_window, + self.state + ); congestion || persistent_congestion } @@ -291,21 +321,27 @@ impl CongestionControl for ClassicCongestionControl { fn on_packet_sent(&mut self, pkt: &SentPacket) { // Record the recovery time and exit any transient state. if self.state.transient() { - self.recovery_start = Some(pkt.time_sent); + self.recovery_start = Some(pkt.pn); self.state.update(); } if !pkt.cc_in_flight() { return; } + if !self.app_limited() { + // Given the current non-app-limited condition, we're fully utilizing the congestion + // window. Assume that all in-flight packets up to this one are NOT app-limited. + // However, subsequent packets might be app-limited. Set `first_app_limited` to the + // next packet number. + self.first_app_limited = pkt.pn + 1; + } self.bytes_in_flight += pkt.size; qdebug!( - [self], - "Pkt Sent len {}, bif {}, cwnd {}", - pkt.size, - self.bytes_in_flight, - self.congestion_window + "packet_sent this={:p}, pn={}, ps={}", + self, + pkt.pn, + pkt.size ); qlog::metrics_updated( &mut self.qlog, @@ -330,6 +366,7 @@ impl ClassicCongestionControl { ssthresh: usize::MAX, recovery_start: None, qlog: NeqoQlog::disabled(), + first_app_limited: 0, } } @@ -363,15 +400,17 @@ impl ClassicCongestionControl { if self.state != state { qdebug!([self], "state -> {:?}", state); let old_state = self.state; - self.qlog.add_event(|| { + self.qlog.add_event_data(|| { // No need to tell qlog about exit from transient states. if old_state.transient() { None } else { - Some(::qlog::event::Event::congestion_state_updated( - Some(old_state.to_qlog().to_owned()), - state.to_qlog().to_owned(), - )) + let ev_data = EventData::CongestionStateUpdated(CongestionStateUpdated { + old: Some(old_state.to_qlog().to_owned()), + new: state.to_qlog().to_owned(), + trigger: None, + }); + Some(ev_data) } }); self.state = state; @@ -413,7 +452,11 @@ impl ClassicCongestionControl { continue; } if let Some(t) = start { - if p.time_sent.checked_duration_since(t).unwrap() > pc_period { + let elapsed = p + .time_sent + .checked_duration_since(t) + .expect("time is monotonic"); + if elapsed > pc_period { qinfo!([self], "persistent congestion"); self.congestion_window = CWND_MIN; self.acked_bytes = 0; @@ -433,10 +476,13 @@ impl ClassicCongestionControl { #[must_use] fn after_recovery_start(&mut self, packet: &SentPacket) -> bool { - // At the start of the first recovery period, if the state is - // transient, all packets will have been sent before recovery. - self.recovery_start - .map_or(!self.state.transient(), |t| packet.time_sent >= t) + // At the start of the recovery period, the state is transient and + // all packets will have been sent before recovery. When sending out + // the first packet we transition to the non-transient `Recovery` + // state and update the variable `self.recovery_start`. Before the + // first recovery, all packets were sent after the recovery event, + // allowing to reduce the cwnd on congestion events. + !self.state.transient() && self.recovery_start.map_or(true, |pn| packet.pn >= pn) } /// Handle a congestion event. @@ -454,7 +500,7 @@ impl ClassicCongestionControl { self.congestion_window = max(cwnd, CWND_MIN); self.acked_bytes = acked_bytes; self.ssthresh = self.congestion_window; - qinfo!( + qdebug!( [self], "Cong event -> recovery; cwnd {}, ssthresh {}", self.congestion_window, @@ -472,7 +518,6 @@ impl ClassicCongestionControl { true } - #[allow(clippy::unused_self)] fn app_limited(&self) -> bool { if self.bytes_in_flight >= self.congestion_window { false @@ -491,22 +536,29 @@ impl ClassicCongestionControl { #[cfg(test)] mod tests { + use std::time::{Duration, Instant}; + + use neqo_common::qinfo; + use test_fixture::now; + use super::{ ClassicCongestionControl, WindowAdjustment, CWND_INITIAL, CWND_MIN, PERSISTENT_CONG_THRESH, }; - use crate::cc::cubic::{Cubic, CUBIC_BETA_USIZE_DIVISOR, CUBIC_BETA_USIZE_QUOTIENT}; - use crate::cc::new_reno::NewReno; - use crate::cc::{ - CongestionControl, CongestionControlAlgorithm, CWND_INITIAL_PKTS, MAX_DATAGRAM_SIZE, + use crate::{ + cc::{ + classic_cc::State, + cubic::{Cubic, CUBIC_BETA_USIZE_DIVIDEND, CUBIC_BETA_USIZE_DIVISOR}, + new_reno::NewReno, + CongestionControl, CongestionControlAlgorithm, CWND_INITIAL_PKTS, MAX_DATAGRAM_SIZE, + }, + packet::{PacketNumber, PacketType}, + rtt::RttEstimate, + tracking::SentPacket, }; - use crate::packet::{PacketNumber, PacketType}; - use crate::tracking::SentPacket; - use std::convert::TryFrom; - use std::time::{Duration, Instant}; - use test_fixture::now; const PTO: Duration = Duration::from_millis(100); const RTT: Duration = Duration::from_millis(98); + const RTT_ESTIMATE: RttEstimate = RttEstimate::from_duration(Duration::from_millis(98)); const ZERO: Duration = Duration::from_secs(0); const EPSILON: Duration = Duration::from_nanos(1); const GAP: Duration = Duration::from_secs(1); @@ -580,7 +632,7 @@ mod tests { ); persistent_congestion_by_algorithm( CongestionControlAlgorithm::Cubic, - CWND_INITIAL * CUBIC_BETA_USIZE_QUOTIENT / CUBIC_BETA_USIZE_DIVISOR, + CWND_INITIAL * CUBIC_BETA_USIZE_DIVIDEND / CUBIC_BETA_USIZE_DIVISOR, lost_packets, persistent_expected, ); @@ -923,7 +975,7 @@ mod tests { /// The code asserts on ordering errors. #[test] - #[should_panic] + #[should_panic(expected = "time is monotonic")] fn persistent_congestion_unsorted_newreno() { let lost = make_lost(&[PERSISTENT_CONG_THRESH + 2, 1]); assert!(!persistent_congestion_by_pto( @@ -936,7 +988,7 @@ mod tests { /// The code asserts on ordering errors. #[test] - #[should_panic] + #[should_panic(expected = "time is monotonic")] fn persistent_congestion_unsorted_cubic() { let lost = make_lost(&[PERSISTENT_CONG_THRESH + 2, 1]); assert!(!persistent_congestion_by_pto( @@ -949,131 +1001,183 @@ mod tests { #[test] fn app_limited_slow_start() { - const LESS_THAN_CWND_PKTS: usize = 4; + const BELOW_APP_LIMIT_PKTS: usize = 5; + const ABOVE_APP_LIMIT_PKTS: usize = BELOW_APP_LIMIT_PKTS + 1; let mut cc = ClassicCongestionControl::new(NewReno::default()); - - for i in 0..CWND_INITIAL_PKTS { - let sent = SentPacket::new( - PacketType::Short, - u64::try_from(i).unwrap(), // pn - now(), // time sent - true, // ack eliciting - Vec::new(), // tokens - MAX_DATAGRAM_SIZE, // size - ); - cc.on_packet_sent(&sent); + let cwnd = cc.congestion_window; + let mut now = now(); + let mut next_pn = 0; + + // simulate packet bursts below app_limit + for packet_burst_size in 1..=BELOW_APP_LIMIT_PKTS { + // always stay below app_limit during sent. + let mut pkts = Vec::new(); + for _ in 0..packet_burst_size { + let p = SentPacket::new( + PacketType::Short, + next_pn, // pn + now, // time sent + true, // ack eliciting + Vec::new(), // tokens + MAX_DATAGRAM_SIZE, // size + ); + next_pn += 1; + cc.on_packet_sent(&p); + pkts.push(p); + } + assert_eq!(cc.bytes_in_flight(), packet_burst_size * MAX_DATAGRAM_SIZE); + now += RTT; + cc.on_packets_acked(&pkts, &RTT_ESTIMATE, now); + assert_eq!(cc.bytes_in_flight(), 0); + assert_eq!(cc.acked_bytes, 0); + assert_eq!(cwnd, cc.congestion_window); // CWND doesn't grow because we're app limited } - assert_eq!(cc.bytes_in_flight(), CWND_INITIAL); - for i in 0..LESS_THAN_CWND_PKTS { - let acked = SentPacket::new( + // Fully utilize the congestion window by sending enough packets to + // have `bytes_in_flight` above the `app_limited` threshold. + let mut pkts = Vec::new(); + for _ in 0..ABOVE_APP_LIMIT_PKTS { + let p = SentPacket::new( PacketType::Short, - u64::try_from(i).unwrap(), // pn - now(), // time sent - true, // ack eliciting - Vec::new(), // tokens - MAX_DATAGRAM_SIZE, // size + next_pn, // pn + now, // time sent + true, // ack eliciting + Vec::new(), // tokens + MAX_DATAGRAM_SIZE, // size ); - cc.on_packets_acked(&[acked], RTT, now()); - - assert_eq!( - cc.bytes_in_flight(), - (CWND_INITIAL_PKTS - i - 1) * MAX_DATAGRAM_SIZE - ); - assert_eq!(cc.cwnd(), (CWND_INITIAL_PKTS + i + 1) * MAX_DATAGRAM_SIZE); + next_pn += 1; + cc.on_packet_sent(&p); + pkts.push(p); } - - // Now we are app limited - for i in 4..CWND_INITIAL_PKTS { - let p = [SentPacket::new( - PacketType::Short, - u64::try_from(i).unwrap(), // pn - now(), // time sent - true, // ack eliciting - Vec::new(), // tokens - MAX_DATAGRAM_SIZE, // size - )]; - cc.on_packets_acked(&p, RTT, now()); + assert_eq!( + cc.bytes_in_flight(), + ABOVE_APP_LIMIT_PKTS * MAX_DATAGRAM_SIZE + ); + now += RTT; + // Check if congestion window gets increased for all packets currently in flight + for (i, pkt) in pkts.into_iter().enumerate() { + cc.on_packets_acked(&[pkt], &RTT_ESTIMATE, now); assert_eq!( cc.bytes_in_flight(), - (CWND_INITIAL_PKTS - i - 1) * MAX_DATAGRAM_SIZE + (ABOVE_APP_LIMIT_PKTS - i - 1) * MAX_DATAGRAM_SIZE ); - assert_eq!(cc.cwnd(), (CWND_INITIAL_PKTS + 4) * MAX_DATAGRAM_SIZE); + // increase acked_bytes with each packet + qinfo!("{} {}", cc.congestion_window, cwnd + i * MAX_DATAGRAM_SIZE); + assert_eq!(cc.congestion_window, cwnd + (i + 1) * MAX_DATAGRAM_SIZE); + assert_eq!(cc.acked_bytes, 0); } } #[test] fn app_limited_congestion_avoidance() { const CWND_PKTS_CA: usize = CWND_INITIAL_PKTS / 2; + const BELOW_APP_LIMIT_PKTS: usize = CWND_PKTS_CA - 2; + const ABOVE_APP_LIMIT_PKTS: usize = BELOW_APP_LIMIT_PKTS + 1; let mut cc = ClassicCongestionControl::new(NewReno::default()); + let mut now = now(); // Change state to congestion avoidance by introducing loss. let p_lost = SentPacket::new( PacketType::Short, 1, // pn - now(), // time sent + now, // time sent true, // ack eliciting Vec::new(), // tokens MAX_DATAGRAM_SIZE, // size ); cc.on_packet_sent(&p_lost); cwnd_is_default(&cc); - cc.on_packets_lost(Some(now()), None, PTO, &[p_lost]); + now += PTO; + cc.on_packets_lost(Some(now), None, PTO, &[p_lost]); cwnd_is_halved(&cc); let p_not_lost = SentPacket::new( PacketType::Short, - 1, // pn - now(), // time sent + 2, // pn + now, // time sent true, // ack eliciting Vec::new(), // tokens MAX_DATAGRAM_SIZE, // size ); cc.on_packet_sent(&p_not_lost); - cc.on_packets_acked(&[p_not_lost], RTT, now()); + now += RTT; + cc.on_packets_acked(&[p_not_lost], &RTT_ESTIMATE, now); cwnd_is_halved(&cc); // cc is app limited therefore cwnd in not increased. assert_eq!(cc.acked_bytes, 0); // Now we are in the congestion avoidance state. + assert_eq!(cc.state, State::CongestionAvoidance); + // simulate packet bursts below app_limit + let mut next_pn = 3; + for packet_burst_size in 1..=BELOW_APP_LIMIT_PKTS { + // always stay below app_limit during sent. + let mut pkts = Vec::new(); + for _ in 0..packet_burst_size { + let p = SentPacket::new( + PacketType::Short, + next_pn, // pn + now, // time sent + true, // ack eliciting + Vec::new(), // tokens + MAX_DATAGRAM_SIZE, // size + ); + next_pn += 1; + cc.on_packet_sent(&p); + pkts.push(p); + } + assert_eq!(cc.bytes_in_flight(), packet_burst_size * MAX_DATAGRAM_SIZE); + now += RTT; + for (i, pkt) in pkts.into_iter().enumerate() { + cc.on_packets_acked(&[pkt], &RTT_ESTIMATE, now); + + assert_eq!( + cc.bytes_in_flight(), + (packet_burst_size - i - 1) * MAX_DATAGRAM_SIZE + ); + cwnd_is_halved(&cc); // CWND doesn't grow because we're app limited + assert_eq!(cc.acked_bytes, 0); + } + } + + // Fully utilize the congestion window by sending enough packets to + // have `bytes_in_flight` above the `app_limited` threshold. let mut pkts = Vec::new(); - for i in 0..CWND_PKTS_CA { + for _ in 0..ABOVE_APP_LIMIT_PKTS { let p = SentPacket::new( PacketType::Short, - u64::try_from(i + 3).unwrap(), // pn - now(), // time sent - true, // ack eliciting - Vec::new(), // tokens - MAX_DATAGRAM_SIZE, // size + next_pn, // pn + now, // time sent + true, // ack eliciting + Vec::new(), // tokens + MAX_DATAGRAM_SIZE, // size ); + next_pn += 1; cc.on_packet_sent(&p); pkts.push(p); } - assert_eq!(cc.bytes_in_flight(), CWND_INITIAL / 2); - - for i in 0..CWND_PKTS_CA - 2 { - cc.on_packets_acked(&pkts[i..=i], RTT, now()); - - assert_eq!( - cc.bytes_in_flight(), - (CWND_PKTS_CA - i - 1) * MAX_DATAGRAM_SIZE - ); - assert_eq!(cc.cwnd(), CWND_PKTS_CA * MAX_DATAGRAM_SIZE); - assert_eq!(cc.acked_bytes, MAX_DATAGRAM_SIZE * (i + 1)); - } - - // Now we are app limited - for i in CWND_PKTS_CA - 2..CWND_PKTS_CA { - cc.on_packets_acked(&pkts[i..=i], RTT, now()); + assert_eq!( + cc.bytes_in_flight(), + ABOVE_APP_LIMIT_PKTS * MAX_DATAGRAM_SIZE + ); + now += RTT; + let mut last_acked_bytes = 0; + // Check if congestion window gets increased for all packets currently in flight + for (i, pkt) in pkts.into_iter().enumerate() { + cc.on_packets_acked(&[pkt], &RTT_ESTIMATE, now); assert_eq!( cc.bytes_in_flight(), - (CWND_PKTS_CA - i - 1) * MAX_DATAGRAM_SIZE + (ABOVE_APP_LIMIT_PKTS - i - 1) * MAX_DATAGRAM_SIZE ); - assert_eq!(cc.cwnd(), CWND_PKTS_CA * MAX_DATAGRAM_SIZE); - assert_eq!(cc.acked_bytes, MAX_DATAGRAM_SIZE * 3); + // The cwnd doesn't increase, but the acked_bytes do, which will eventually lead to an + // increase, once the number of bytes reaches the necessary level + cwnd_is_halved(&cc); + // increase acked_bytes with each packet + assert_ne!(cc.acked_bytes, last_acked_bytes); + last_acked_bytes = cc.acked_bytes; } } } diff --git a/neqo-transport/src/cc/cubic.rs b/neqo-transport/src/cc/cubic.rs index ab3fedb74f..058a4c2aa4 100644 --- a/neqo-transport/src/cc/cubic.rs +++ b/neqo-transport/src/cc/cubic.rs @@ -4,14 +4,14 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -#![deny(clippy::pedantic)] +use std::{ + fmt::{self, Display}, + time::{Duration, Instant}, +}; -use std::fmt::{self, Display}; -use std::time::{Duration, Instant}; +use neqo_common::qtrace; use crate::cc::{classic_cc::WindowAdjustment, MAX_DATAGRAM_SIZE_F64}; -use neqo_common::qtrace; -use std::convert::TryFrom; // CUBIC congestion control @@ -21,7 +21,7 @@ pub const CUBIC_C: f64 = 0.4; pub const CUBIC_ALPHA: f64 = 3.0 * (1.0 - 0.7) / (1.0 + 0.7); // CUBIC_BETA = 0.7; -pub const CUBIC_BETA_USIZE_QUOTIENT: usize = 7; +pub const CUBIC_BETA_USIZE_DIVIDEND: usize = 7; pub const CUBIC_BETA_USIZE_DIVISOR: usize = 10; /// The fast convergence ratio further reduces the congestion window when a congestion event @@ -39,9 +39,9 @@ const EXPONENTIAL_GROWTH_REDUCTION: f64 = 2.0; /// This has the effect of reducing larger values to `1<<53`. /// If you have a congestion window that large, something is probably wrong. fn convert_to_f64(v: usize) -> f64 { - let mut f_64 = f64::try_from(u32::try_from(v >> 21).unwrap_or(u32::MAX)).unwrap(); + let mut f_64 = f64::from(u32::try_from(v >> 21).unwrap_or(u32::MAX)); f_64 *= 2_097_152.0; // f_64 <<= 21 - f_64 += f64::try_from(u32::try_from(v & 0x1f_ffff).unwrap()).unwrap(); + f_64 += f64::from(u32::try_from(v & 0x1f_ffff).unwrap()); f_64 } @@ -163,8 +163,8 @@ impl WindowAdjustment for Cubic { // of `MAX_DATAGRAM_SIZE` to match the increase of `target - cwnd / cwnd` as defined // in the specification (Sections 4.4 and 4.5). // The amount of data required therefore reduces asymptotically as the target increases. - // If the target is not significantly higher than the congestion window, require a very large - // amount of acknowledged data (effectively block increases). + // If the target is not significantly higher than the congestion window, require a very + // large amount of acknowledged data (effectively block increases). let mut acked_to_increase = MAX_DATAGRAM_SIZE_F64 * curr_cwnd_f64 / (target_cwnd - curr_cwnd_f64).max(1.0); @@ -178,9 +178,10 @@ impl WindowAdjustment for Cubic { fn reduce_cwnd(&mut self, curr_cwnd: usize, acked_bytes: usize) -> (usize, usize) { let curr_cwnd_f64 = convert_to_f64(curr_cwnd); // Fast Convergence - // If congestion event occurs before the maximum congestion window before the last congestion event, - // we reduce the the maximum congestion window and thereby W_max. - // check cwnd + MAX_DATAGRAM_SIZE instead of cwnd because with cwnd in bytes, cwnd may be slightly off. + // If congestion event occurs before the maximum congestion window before the last + // congestion event, we reduce the the maximum congestion window and thereby W_max. + // check cwnd + MAX_DATAGRAM_SIZE instead of cwnd because with cwnd in bytes, cwnd may be + // slightly off. self.last_max_cwnd = if curr_cwnd_f64 + MAX_DATAGRAM_SIZE_F64 < self.last_max_cwnd { curr_cwnd_f64 * CUBIC_FAST_CONVERGENCE } else { @@ -188,8 +189,8 @@ impl WindowAdjustment for Cubic { }; self.ca_epoch_start = None; ( - curr_cwnd * CUBIC_BETA_USIZE_QUOTIENT / CUBIC_BETA_USIZE_DIVISOR, - acked_bytes * CUBIC_BETA_USIZE_QUOTIENT / CUBIC_BETA_USIZE_DIVISOR, + curr_cwnd * CUBIC_BETA_USIZE_DIVIDEND / CUBIC_BETA_USIZE_DIVISOR, + acked_bytes * CUBIC_BETA_USIZE_DIVIDEND / CUBIC_BETA_USIZE_DIVISOR, ) } diff --git a/neqo-transport/src/cc/mod.rs b/neqo-transport/src/cc/mod.rs index 5cd5676747..486d15e67e 100644 --- a/neqo-transport/src/cc/mod.rs +++ b/neqo-transport/src/cc/mod.rs @@ -5,10 +5,6 @@ // except according to those terms. // Congestion control -#![deny(clippy::pedantic)] - -use crate::{path::PATH_MTU_V6, tracking::SentPacket, Error}; -use neqo_common::qlog::NeqoQlog; use std::{ fmt::{Debug, Display}, @@ -16,11 +12,17 @@ use std::{ time::{Duration, Instant}, }; +use neqo_common::qlog::NeqoQlog; + +use crate::{path::PATH_MTU_V6, rtt::RttEstimate, tracking::SentPacket, Error}; + mod classic_cc; mod cubic; mod new_reno; -pub use classic_cc::{ClassicCongestionControl, CWND_INITIAL, CWND_INITIAL_PKTS, CWND_MIN}; +pub use classic_cc::ClassicCongestionControl; +#[cfg(test)] +pub use classic_cc::{CWND_INITIAL, CWND_INITIAL_PKTS, CWND_MIN}; pub use cubic::Cubic; pub use new_reno::NewReno; @@ -40,7 +42,7 @@ pub trait CongestionControl: Display + Debug { #[must_use] fn cwnd_avail(&self) -> usize; - fn on_packets_acked(&mut self, acked_pkts: &[SentPacket], min_rtt: Duration, now: Instant); + fn on_packets_acked(&mut self, acked_pkts: &[SentPacket], rtt_est: &RttEstimate, now: Instant); /// Returns true if the congestion window was reduced. fn on_packets_lost( diff --git a/neqo-transport/src/cc/new_reno.rs b/neqo-transport/src/cc/new_reno.rs index d34cdfbab9..47d0d56f37 100644 --- a/neqo-transport/src/cc/new_reno.rs +++ b/neqo-transport/src/cc/new_reno.rs @@ -5,12 +5,13 @@ // except according to those terms. // Congestion control -#![deny(clippy::pedantic)] -use std::fmt::{self, Display}; +use std::{ + fmt::{self, Display}, + time::{Duration, Instant}, +}; use crate::cc::classic_cc::WindowAdjustment; -use std::time::{Duration, Instant}; #[derive(Debug, Default)] pub struct NewReno {} diff --git a/neqo-transport/src/cc/tests/cubic.rs b/neqo-transport/src/cc/tests/cubic.rs index a737f90f7c..2e0200fd6d 100644 --- a/neqo-transport/src/cc/tests/cubic.rs +++ b/neqo-transport/src/cc/tests/cubic.rs @@ -7,32 +7,35 @@ #![allow(clippy::cast_possible_truncation)] #![allow(clippy::cast_sign_loss)] +use std::{ + ops::Sub, + time::{Duration, Instant}, +}; + +use test_fixture::now; + use crate::{ cc::{ classic_cc::{ClassicCongestionControl, CWND_INITIAL}, cubic::{ - Cubic, CUBIC_ALPHA, CUBIC_BETA_USIZE_DIVISOR, CUBIC_BETA_USIZE_QUOTIENT, CUBIC_C, + Cubic, CUBIC_ALPHA, CUBIC_BETA_USIZE_DIVIDEND, CUBIC_BETA_USIZE_DIVISOR, CUBIC_C, CUBIC_FAST_CONVERGENCE, }, CongestionControl, MAX_DATAGRAM_SIZE, MAX_DATAGRAM_SIZE_F64, }, packet::PacketType, + rtt::RttEstimate, tracking::SentPacket, }; -use std::{ - convert::TryFrom, - ops::Sub, - time::{Duration, Instant}, -}; -use test_fixture::now; const RTT: Duration = Duration::from_millis(100); +const RTT_ESTIMATE: RttEstimate = RttEstimate::from_duration(Duration::from_millis(100)); const CWND_INITIAL_F64: f64 = 10.0 * MAX_DATAGRAM_SIZE_F64; const CWND_INITIAL_10_F64: f64 = 10.0 * CWND_INITIAL_F64; const CWND_INITIAL_10: usize = 10 * CWND_INITIAL; -const CWND_AFTER_LOSS: usize = CWND_INITIAL * CUBIC_BETA_USIZE_QUOTIENT / CUBIC_BETA_USIZE_DIVISOR; +const CWND_AFTER_LOSS: usize = CWND_INITIAL * CUBIC_BETA_USIZE_DIVIDEND / CUBIC_BETA_USIZE_DIVISOR; const CWND_AFTER_LOSS_SLOW_START: usize = - (CWND_INITIAL + MAX_DATAGRAM_SIZE) * CUBIC_BETA_USIZE_QUOTIENT / CUBIC_BETA_USIZE_DIVISOR; + (CWND_INITIAL + MAX_DATAGRAM_SIZE) * CUBIC_BETA_USIZE_DIVIDEND / CUBIC_BETA_USIZE_DIVISOR; fn fill_cwnd(cc: &mut ClassicCongestionControl, mut next_pn: u64, now: Instant) -> u64 { while cc.bytes_in_flight() < cc.cwnd() { @@ -59,7 +62,7 @@ fn ack_packet(cc: &mut ClassicCongestionControl, pn: u64, now: Instant) { Vec::new(), // tokens MAX_DATAGRAM_SIZE, // size ); - cc.on_packets_acked(&[acked], RTT, now); + cc.on_packets_acked(&[acked], &RTT_ESTIMATE, now); } fn packet_lost(cc: &mut ClassicCongestionControl, pn: u64) { @@ -76,9 +79,7 @@ fn packet_lost(cc: &mut ClassicCongestionControl, pn: u64) { } fn expected_tcp_acks(cwnd_rtt_start: usize) -> u64 { - (f64::try_from(i32::try_from(cwnd_rtt_start).unwrap()).unwrap() - / MAX_DATAGRAM_SIZE_F64 - / CUBIC_ALPHA) + (f64::from(i32::try_from(cwnd_rtt_start).unwrap()) / MAX_DATAGRAM_SIZE_F64 / CUBIC_ALPHA) .round() as u64 } @@ -109,7 +110,7 @@ fn tcp_phase() { for _ in 0..num_tcp_increases { let cwnd_rtt_start = cubic.cwnd(); - //Expected acks during a period of RTT / CUBIC_ALPHA. + // Expected acks during a period of RTT / CUBIC_ALPHA. let acks = expected_tcp_acks(cwnd_rtt_start); // The time between acks if they are ideally paced over a RTT. let time_increase = RTT / u32::try_from(cwnd_rtt_start / MAX_DATAGRAM_SIZE).unwrap(); @@ -145,9 +146,10 @@ fn tcp_phase() { let expected_ack_tcp_increase = expected_tcp_acks(cwnd_rtt_start); assert!(num_acks < expected_ack_tcp_increase); - // This first increase after a TCP phase may be shorter than what it would take by a regular cubic phase, - // because of the proper byte counting and the credit it already had before entering this phase. Therefore - // We will perform another round and compare it to expected increase using the cubic equation. + // This first increase after a TCP phase may be shorter than what it would take by a regular + // cubic phase, because of the proper byte counting and the credit it already had before + // entering this phase. Therefore We will perform another round and compare it to expected + // increase using the cubic equation. let cwnd_rtt_start_after_tcp = cubic.cwnd(); let elapsed_time = now - start_time; @@ -167,12 +169,12 @@ fn tcp_phase() { let expected_ack_tcp_increase2 = expected_tcp_acks(cwnd_rtt_start_after_tcp); assert!(num_acks2 < expected_ack_tcp_increase2); - // The time needed to increase cwnd by MAX_DATAGRAM_SIZE using the cubic equation will be calculates from: - // W_cubic(elapsed_time + t_to_increase) - W_cubis(elapsed_time) = MAX_DATAGRAM_SIZE => - // CUBIC_C * (elapsed_time + t_to_increase)^3 * MAX_DATAGRAM_SIZE + CWND_INITIAL - - // CUBIC_C * elapsed_time^3 * MAX_DATAGRAM_SIZE + CWND_INITIAL = MAX_DATAGRAM_SIZE => - // t_to_increase = cbrt((1 + CUBIC_C * elapsed_time^3) / CUBIC_C) - elapsed_time - // (t_to_increase is in seconds) + // The time needed to increase cwnd by MAX_DATAGRAM_SIZE using the cubic equation will be + // calculates from: W_cubic(elapsed_time + t_to_increase) - W_cubis(elapsed_time) = + // MAX_DATAGRAM_SIZE => CUBIC_C * (elapsed_time + t_to_increase)^3 * MAX_DATAGRAM_SIZE + + // CWND_INITIAL - CUBIC_C * elapsed_time^3 * MAX_DATAGRAM_SIZE + CWND_INITIAL = + // MAX_DATAGRAM_SIZE => t_to_increase = cbrt((1 + CUBIC_C * elapsed_time^3) / CUBIC_C) - + // elapsed_time (t_to_increase is in seconds) // number of ack needed is t_to_increase / time_increase. let expected_ack_cubic_increase = ((((1.0 + CUBIC_C * (elapsed_time).as_secs_f64().powi(3)) / CUBIC_C).cbrt() @@ -180,15 +182,16 @@ fn tcp_phase() { / time_increase.as_secs_f64()) .ceil() as u64; // num_acks is very close to the calculated value. The exact value is hard to calculate - // because the proportional increase(i.e. curr_cwnd_f64 / (target - curr_cwnd_f64) * MAX_DATAGRAM_SIZE_F64) - // and the byte counting. + // because the proportional increase(i.e. curr_cwnd_f64 / (target - curr_cwnd_f64) * + // MAX_DATAGRAM_SIZE_F64) and the byte counting. assert_eq!(num_acks2, expected_ack_cubic_increase + 2); } #[test] fn cubic_phase() { let mut cubic = ClassicCongestionControl::new(Cubic::default()); - // Set last_max_cwnd to a higher number make sure that cc is the cubic phase (cwnd is calculated by the cubic equation). + // Set last_max_cwnd to a higher number make sure that cc is the cubic phase (cwnd is calculated + // by the cubic equation). cubic.set_last_max_cwnd(CWND_INITIAL_10_F64); // Set ssthresh to something small to make sure that cc is in the congection avoidance phase. cubic.set_ssthresh(1); @@ -205,7 +208,7 @@ fn cubic_phase() { let num_rtts_w_max = (k / RTT.as_secs_f64()).round() as u64; for _ in 0..num_rtts_w_max { let cwnd_rtt_start = cubic.cwnd(); - //Expected acks + // Expected acks let acks = cwnd_rtt_start / MAX_DATAGRAM_SIZE; let time_increase = RTT / u32::try_from(acks).unwrap(); for _ in 0..acks { @@ -264,7 +267,8 @@ fn congestion_event_congestion_avoidance() { // Set ssthresh to something small to make sure that cc is in the congection avoidance phase. cubic.set_ssthresh(1); - // Set last_max_cwnd to something smaller than cwnd so that the fast convergence is not triggered. + // Set last_max_cwnd to something smaller than cwnd so that the fast convergence is not + // triggered. cubic.set_last_max_cwnd(3.0 * MAX_DATAGRAM_SIZE_F64); _ = fill_cwnd(&mut cubic, 0, now()); diff --git a/neqo-transport/src/cc/tests/mod.rs b/neqo-transport/src/cc/tests/mod.rs index 238a7ad012..879693fb24 100644 --- a/neqo-transport/src/cc/tests/mod.rs +++ b/neqo-transport/src/cc/tests/mod.rs @@ -1,3 +1,4 @@ +// Licensed under the Apache License, Version 2.0 or the MIT license // , at your // option. This file may not be copied, modified, or distributed diff --git a/neqo-transport/src/cc/tests/new_reno.rs b/neqo-transport/src/cc/tests/new_reno.rs index 0b678ca55e..4cc20de5a7 100644 --- a/neqo-transport/src/cc/tests/new_reno.rs +++ b/neqo-transport/src/cc/tests/new_reno.rs @@ -5,17 +5,24 @@ // except according to those terms. // Congestion control -#![deny(clippy::pedantic)] -use crate::cc::new_reno::NewReno; -use crate::cc::{ClassicCongestionControl, CongestionControl, CWND_INITIAL, MAX_DATAGRAM_SIZE}; -use crate::packet::PacketType; -use crate::tracking::SentPacket; use std::time::Duration; + use test_fixture::now; +use crate::{ + cc::{ + new_reno::NewReno, ClassicCongestionControl, CongestionControl, CWND_INITIAL, + MAX_DATAGRAM_SIZE, + }, + packet::PacketType, + rtt::RttEstimate, + tracking::SentPacket, +}; + const PTO: Duration = Duration::from_millis(100); const RTT: Duration = Duration::from_millis(98); +const RTT_ESTIMATE: RttEstimate = RttEstimate::from_duration(Duration::from_millis(98)); fn cwnd_is_default(cc: &ClassicCongestionControl) { assert_eq!(cc.cwnd(), CWND_INITIAL); @@ -117,7 +124,7 @@ fn issue_876() { assert_eq!(cc.bytes_in_flight(), 6 * MAX_DATAGRAM_SIZE - 5); // and ack it. cwnd increases slightly - cc.on_packets_acked(&sent_packets[6..], RTT, time_now); + cc.on_packets_acked(&sent_packets[6..], &RTT_ESTIMATE, time_now); assert_eq!(cc.acked_bytes(), sent_packets[6].size); cwnd_is_halved(&cc); assert_eq!(cc.bytes_in_flight(), 5 * MAX_DATAGRAM_SIZE - 2); @@ -129,3 +136,83 @@ fn issue_876() { cwnd_is_halved(&cc); assert_eq!(cc.bytes_in_flight(), 4 * MAX_DATAGRAM_SIZE); } + +#[test] +// https://github.com/mozilla/neqo/pull/1465 +fn issue_1465() { + let mut cc = ClassicCongestionControl::new(NewReno::default()); + let mut pn = 0; + let mut now = now(); + let mut next_packet = |now| { + let p = SentPacket::new( + PacketType::Short, + pn, // pn + now, // time_sent + true, // ack eliciting + Vec::new(), // tokens + MAX_DATAGRAM_SIZE, // size + ); + pn += 1; + p + }; + let mut send_next = |cc: &mut ClassicCongestionControl, now| { + let p = next_packet(now); + cc.on_packet_sent(&p); + p + }; + + let p1 = send_next(&mut cc, now); + let p2 = send_next(&mut cc, now); + let p3 = send_next(&mut cc, now); + + assert_eq!(cc.acked_bytes(), 0); + cwnd_is_default(&cc); + assert_eq!(cc.bytes_in_flight(), 3 * MAX_DATAGRAM_SIZE); + + // advance one rtt to detect lost packet there this simplifies the timers, because + // on_packet_loss would only be called after RTO, but that is not relevant to the problem + now += RTT; + cc.on_packets_lost(Some(now), None, PTO, &[p1]); + + // We are now in recovery + assert!(cc.recovery_packet()); + assert_eq!(cc.acked_bytes(), 0); + cwnd_is_halved(&cc); + assert_eq!(cc.bytes_in_flight(), 2 * MAX_DATAGRAM_SIZE); + + // Don't reduce the cwnd again on second packet loss + cc.on_packets_lost(Some(now), None, PTO, &[p3]); + assert_eq!(cc.acked_bytes(), 0); + cwnd_is_halved(&cc); // still the same as after first packet loss + assert_eq!(cc.bytes_in_flight(), MAX_DATAGRAM_SIZE); + + // the acked packets before on_packet_sent were the cause of + // https://github.com/mozilla/neqo/pull/1465 + cc.on_packets_acked(&[p2], &RTT_ESTIMATE, now); + + assert_eq!(cc.bytes_in_flight(), 0); + + // send out recovery packet and get it acked to get out of recovery state + let p4 = send_next(&mut cc, now); + cc.on_packet_sent(&p4); + now += RTT; + cc.on_packets_acked(&[p4], &RTT_ESTIMATE, now); + + // do the same as in the first rtt but now the bug appears + let p5 = send_next(&mut cc, now); + let p6 = send_next(&mut cc, now); + now += RTT; + + let cur_cwnd = cc.cwnd(); + cc.on_packets_lost(Some(now), None, PTO, &[p5]); + + // go back into recovery + assert!(cc.recovery_packet()); + assert_eq!(cc.cwnd(), cur_cwnd / 2); + assert_eq!(cc.acked_bytes(), 0); + assert_eq!(cc.bytes_in_flight(), 2 * MAX_DATAGRAM_SIZE); + + // this shouldn't introduce further cwnd reduction, but it did before https://github.com/mozilla/neqo/pull/1465 + cc.on_packets_lost(Some(now), None, PTO, &[p6]); + assert_eq!(cc.cwnd(), cur_cwnd / 2); +} diff --git a/neqo-transport/src/cid.rs b/neqo-transport/src/cid.rs index 38157419de..6b3a95eaf0 100644 --- a/neqo-transport/src/cid.rs +++ b/neqo-transport/src/cid.rs @@ -6,24 +6,22 @@ // Representation and management of connection IDs. -use crate::frame::FRAME_TYPE_NEW_CONNECTION_ID; -use crate::packet::PacketBuilder; -use crate::recovery::RecoveryToken; -use crate::stats::FrameStats; -use crate::{Error, Res}; +use std::{ + borrow::Borrow, + cell::{Ref, RefCell}, + cmp::{max, min}, + ops::Deref, + rc::Rc, +}; use neqo_common::{hex, hex_with_len, qinfo, Decoder, Encoder}; -use neqo_crypto::random; - -use smallvec::SmallVec; -use std::borrow::Borrow; -use std::cell::{Ref, RefCell}; -use std::cmp::max; -use std::cmp::min; -use std::convert::AsRef; -use std::convert::TryFrom; -use std::ops::Deref; -use std::rc::Rc; +use neqo_crypto::{random, randomize}; +use smallvec::{smallvec, SmallVec}; + +use crate::{ + frame::FRAME_TYPE_NEW_CONNECTION_ID, packet::PacketBuilder, recovery::RecoveryToken, + stats::FrameStats, Error, Res, +}; pub const MAX_CONNECTION_ID_LEN: usize = 20; pub const LOCAL_ACTIVE_CID_LIMIT: usize = 8; @@ -40,19 +38,26 @@ pub struct ConnectionId { } impl ConnectionId { + /// # Panics + /// When `len` is larger than `MAX_CONNECTION_ID_LEN`. + #[must_use] pub fn generate(len: usize) -> Self { assert!(matches!(len, 0..=MAX_CONNECTION_ID_LEN)); - Self::from(random(len)) + let mut cid = smallvec![0; len]; + randomize(&mut cid); + Self { cid } } // Apply a wee bit of greasing here in picking a length between 8 and 20 bytes long. + #[must_use] pub fn generate_initial() -> Self { - let v = random(1); + let v = random::<1>()[0]; // Bias selection toward picking 8 (>50% of the time). - let len: usize = max(8, 5 + (v[0] & (v[0] >> 4))).into(); + let len: usize = max(8, 5 + (v & (v >> 4))).into(); Self::generate(len) } + #[must_use] pub fn as_cid_ref(&self) -> ConnectionIdRef { ConnectionIdRef::from(&self.cid[..]) } @@ -76,20 +81,14 @@ impl From> for ConnectionId { } } -impl From> for ConnectionId { - fn from(cid: Vec) -> Self { - Self::from(SmallVec::from(cid)) - } -} - impl + ?Sized> From<&T> for ConnectionId { fn from(buf: &T) -> Self { Self::from(SmallVec::from(buf.as_ref())) } } -impl<'a> From<&ConnectionIdRef<'a>> for ConnectionId { - fn from(cidref: &ConnectionIdRef<'a>) -> Self { +impl<'a> From> for ConnectionId { + fn from(cidref: ConnectionIdRef<'a>) -> Self { Self::from(SmallVec::from(cidref.cid)) } } @@ -120,7 +119,7 @@ impl<'a> PartialEq> for ConnectionId { } } -#[derive(Hash, Eq, PartialEq)] +#[derive(Hash, Eq, PartialEq, Clone, Copy)] pub struct ConnectionIdRef<'a> { cid: &'a [u8], } @@ -202,7 +201,7 @@ impl ConnectionIdGenerator for EmptyConnectionIdGenerator { } } -/// An RandomConnectionIdGenerator produces connection IDs of +/// An `RandomConnectionIdGenerator` produces connection IDs of /// a fixed length and random content. No effort is made to /// prevent collisions. pub struct RandomConnectionIdGenerator { @@ -210,6 +209,7 @@ pub struct RandomConnectionIdGenerator { } impl RandomConnectionIdGenerator { + #[must_use] pub fn new(len: usize) -> Self { Self { len } } @@ -223,7 +223,9 @@ impl ConnectionIdDecoder for RandomConnectionIdGenerator { impl ConnectionIdGenerator for RandomConnectionIdGenerator { fn generate_cid(&mut self) -> Option { - Some(ConnectionId::from(&random(self.len))) + let mut buf = smallvec![0; self.len]; + randomize(&mut buf); + Some(ConnectionId::from(buf)) } fn as_decoder(&self) -> &dyn ConnectionIdDecoder { @@ -235,7 +237,7 @@ impl ConnectionIdGenerator for RandomConnectionIdGenerator { } } -/// A single connection ID, as saved from NEW_CONNECTION_ID. +/// A single connection ID, as saved from `NEW_CONNECTION_ID`. /// This is templated so that the connection ID entries from a peer can be /// saved with a stateless reset token. Local entries don't need that. #[derive(Debug, PartialEq, Eq, Clone)] @@ -251,8 +253,8 @@ pub struct ConnectionIdEntry { impl ConnectionIdEntry<[u8; 16]> { /// Create a random stateless reset token so that it is hard to guess the correct /// value and reset the connection. - fn random_srt() -> [u8; 16] { - <[u8; 16]>::try_from(&random(16)[..]).unwrap() + pub fn random_srt() -> [u8; 16] { + random::<16>() } /// Create the first entry, which won't have a stateless reset token. @@ -295,6 +297,23 @@ impl ConnectionIdEntry<[u8; 16]> { pub fn sequence_number(&self) -> u64 { self.seqno } + + /// Write the entry out in a `NEW_CONNECTION_ID` frame. + /// Returns `true` if the frame was written, `false` if there is insufficient space. + pub fn write(&self, builder: &mut PacketBuilder, stats: &mut FrameStats) -> bool { + let len = 1 + Encoder::varint_len(self.seqno) + 1 + 1 + self.cid.len() + 16; + if builder.remaining() < len { + return false; + } + + builder.encode_varint(FRAME_TYPE_NEW_CONNECTION_ID); + builder.encode_varint(self.seqno); + builder.encode_varint(0u64); + builder.encode_vec(1, &self.cid); + builder.encode(&self.srt); + stats.new_connection_id += 1; + true + } } impl ConnectionIdEntry<()> { @@ -324,6 +343,10 @@ impl ConnectionIdEntry { pub fn connection_id(&self) -> &ConnectionId { &self.cid } + + pub fn reset_token(&self) -> &SRT { + &self.srt + } } pub type RemoteConnectionIdEntry = ConnectionIdEntry<[u8; 16]>; @@ -340,8 +363,8 @@ impl ConnectionIdStore { self.cids.retain(|c| c.seqno != seqno); } - pub fn contains(&self, cid: &ConnectionIdRef) -> bool { - self.cids.iter().any(|c| &c.cid == cid) + pub fn contains(&self, cid: ConnectionIdRef) -> bool { + self.cids.iter().any(|c| c.cid == cid) } pub fn next(&mut self) -> Option> { @@ -418,15 +441,16 @@ pub struct ConnectionIdManager { /// The `ConnectionIdGenerator` instance that is used to create connection IDs. generator: Rc>, /// The connection IDs that we will accept. - /// This includes any we advertise in `NEW_CONNECTION_ID` that haven't been bound to a path yet. - /// During the handshake at the server, it also includes the randomized DCID pick by the client. + /// This includes any we advertise in `NEW_CONNECTION_ID` that haven't been bound to a path + /// yet. During the handshake at the server, it also includes the randomized DCID pick by + /// the client. connection_ids: ConnectionIdStore<()>, /// The maximum number of connection IDs this will accept. This is at least 2 and won't /// be more than `LOCAL_ACTIVE_CID_LIMIT`. limit: usize, /// The next sequence number that will be used for sending `NEW_CONNECTION_ID` frames. next_seqno: u64, - /// Outstanding, but lost NEW_CONNECTION_ID frames will be stored here. + /// Outstanding, but lost `NEW_CONNECTION_ID` frames will be stored here. lost_new_connection_id: Vec>, } @@ -472,14 +496,14 @@ impl ConnectionIdManager { .add_local(ConnectionIdEntry::new(self.next_seqno, cid.clone(), ())); self.next_seqno += 1; - let srt = <[u8; 16]>::try_from(&random(16)[..]).unwrap(); + let srt = ConnectionIdEntry::random_srt(); Ok((cid, srt)) } else { Err(Error::ConnectionIdsExhausted) } } - pub fn is_valid(&self, cid: &ConnectionIdRef) -> bool { + pub fn is_valid(&self, cid: ConnectionIdRef) -> bool { self.connection_ids.contains(cid) } @@ -512,43 +536,19 @@ impl ConnectionIdManager { ); } - fn write_entry( - &mut self, - entry: &ConnectionIdEntry<[u8; 16]>, - builder: &mut PacketBuilder, - stats: &mut FrameStats, - ) -> Res { - let len = 1 + Encoder::varint_len(entry.seqno) + 1 + 1 + entry.cid.len() + 16; - if builder.remaining() < len { - return Ok(false); - } - - builder.encode_varint(FRAME_TYPE_NEW_CONNECTION_ID); - builder.encode_varint(entry.seqno); - builder.encode_varint(0u64); - builder.encode_vec(1, &entry.cid); - builder.encode(&entry.srt); - if builder.len() > builder.limit() { - return Err(Error::InternalError(8)); - } - - stats.new_connection_id += 1; - Ok(true) - } - pub fn write_frames( &mut self, builder: &mut PacketBuilder, tokens: &mut Vec, stats: &mut FrameStats, - ) -> Res<()> { + ) { if self.generator.deref().borrow().generates_empty_cids() { debug_assert_eq!(self.generator.borrow_mut().generate_cid().unwrap().len(), 0); - return Ok(()); + return; } while let Some(entry) = self.lost_new_connection_id.pop() { - if self.write_entry(&entry, builder, stats)? { + if entry.write(builder, stats) { tokens.push(RecoveryToken::NewConnectionId(entry)); } else { // This shouldn't happen often. @@ -565,7 +565,7 @@ impl ConnectionIdManager { if let Some(cid) = maybe_cid { assert_ne!(cid.len(), 0); // TODO: generate the stateless reset tokens from the connection ID and a key. - let srt = <[u8; 16]>::try_from(&random(16)[..]).unwrap(); + let srt = ConnectionIdEntry::random_srt(); let seqno = self.next_seqno; self.next_seqno += 1; @@ -573,11 +573,10 @@ impl ConnectionIdManager { .add_local(ConnectionIdEntry::new(seqno, cid.clone(), ())); let entry = ConnectionIdEntry::new(seqno, cid, srt); - debug_assert!(self.write_entry(&entry, builder, stats)?); + entry.write(builder, stats); tokens.push(RecoveryToken::NewConnectionId(entry)); } } - Ok(()) } pub fn lost(&mut self, entry: &ConnectionIdEntry<[u8; 16]>) { @@ -592,17 +591,19 @@ impl ConnectionIdManager { #[cfg(test)] mod tests { - use super::*; use test_fixture::fixture_init; + use crate::{cid::MAX_CONNECTION_ID_LEN, ConnectionId}; + #[test] fn generate_initial_cid() { fixture_init(); for _ in 0..100 { let cid = ConnectionId::generate_initial(); - if !matches!(cid.len(), 8..=MAX_CONNECTION_ID_LEN) { - panic!("connection ID {:?}", cid); - } + assert!( + matches!(cid.len(), 8..=MAX_CONNECTION_ID_LEN), + "connection ID length {cid:?}", + ); } } } diff --git a/neqo-transport/src/dump.rs b/neqo-transport/src/connection/dump.rs similarity index 56% rename from neqo-transport/src/dump.rs rename to neqo-transport/src/connection/dump.rs index fceb6b6f5d..12d337c570 100644 --- a/neqo-transport/src/dump.rs +++ b/neqo-transport/src/connection/dump.rs @@ -7,14 +7,17 @@ // Enable just this file for logging to just see packets. // e.g. "RUST_LOG=neqo_transport::dump neqo-client ..." -use crate::connection::Connection; -use crate::frame::Frame; -use crate::packet::{PacketNumber, PacketType}; -use crate::path::PathRef; -use neqo_common::{qdebug, Decoder}; - use std::fmt::Write; +use neqo_common::{qdebug, Decoder, IpTos}; + +use crate::{ + connection::Connection, + frame::Frame, + packet::{PacketNumber, PacketType}, + path::PathRef, +}; + #[allow(clippy::module_name_repetitions)] pub fn dump_packet( conn: &Connection, @@ -23,24 +26,31 @@ pub fn dump_packet( pt: PacketType, pn: PacketNumber, payload: &[u8], + tos: IpTos, ) { - if ::log::Level::Debug > ::log::max_level() { + if log::STATIC_MAX_LEVEL == log::LevelFilter::Off || !log::log_enabled!(log::Level::Debug) { return; } - let mut s = String::from(""); + let mut s = String::new(); let mut d = Decoder::from(payload); while d.remaining() > 0 { - let f = match Frame::decode(&mut d) { - Ok(f) => f, - Err(_) => { - s.push_str(" [broken]..."); - break; - } + let Ok(f) = Frame::decode(&mut d) else { + s.push_str(" [broken]..."); + break; }; - if let Some(x) = f.dump() { + let x = f.dump(); + if !x.is_empty() { write!(&mut s, "\n {} {}", dir, &x).unwrap(); } } - qdebug!([conn], "pn={} type={:?} {}{}", pn, pt, path.borrow(), s); + qdebug!( + [conn], + "pn={} type={:?} {} {:?}{}", + pn, + pt, + path.borrow(), + tos, + s + ); } diff --git a/neqo-transport/src/connection/idle.rs b/neqo-transport/src/connection/idle.rs index 5b1bd857dc..e33f3defb3 100644 --- a/neqo-transport/src/connection/idle.rs +++ b/neqo-transport/src/connection/idle.rs @@ -4,9 +4,14 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +use std::{ + cmp::{max, min}, + time::{Duration, Instant}, +}; + +use neqo_common::qtrace; + use crate::recovery::RecoveryToken; -use std::cmp::{max, min}; -use std::time::{Duration, Instant}; #[derive(Debug, Clone)] /// There's a little bit of different behavior for resetting idle timeout. See @@ -53,6 +58,10 @@ impl IdleTimeout { } else { max(self.timeout, pto * 3) }; + qtrace!( + "IdleTimeout::expiry@{now:?} pto={pto:?}, ka={keep_alive} => {t:?}", + t = start + delay + ); start + delay } diff --git a/neqo-transport/src/connection/mod.rs b/neqo-transport/src/connection/mod.rs index c27f6ecde1..33743e6c7a 100644 --- a/neqo-transport/src/connection/mod.rs +++ b/neqo-transport/src/connection/mod.rs @@ -9,26 +9,24 @@ use std::{ cell::RefCell, cmp::{max, min}, - convert::TryFrom, fmt::{self, Debug}, - mem, + iter, mem, net::{IpAddr, SocketAddr}, ops::RangeInclusive, rc::{Rc, Weak}, time::{Duration, Instant}, }; -use smallvec::SmallVec; - use neqo_common::{ event::Provider as EventProvider, hex, hex_snip_middle, hrtime, qdebug, qerror, qinfo, - qlog::NeqoQlog, qtrace, qwarn, Datagram, Decoder, Encoder, Role, + qlog::NeqoQlog, qtrace, qwarn, Datagram, Decoder, Encoder, IpTos, Role, }; use neqo_crypto::{ - agent::CertificateInfo, random, Agent, AntiReplay, AuthenticationStatus, Cipher, Client, + agent::CertificateInfo, Agent, AntiReplay, AuthenticationStatus, Cipher, Client, Group, HandshakeState, PrivateKey, PublicKey, ResumptionToken, SecretAgentInfo, SecretAgentPreInfo, Server, ZeroRttChecker, }; +use smallvec::SmallVec; use crate::{ addr_valid::{AddressValidation, NewTokenState}, @@ -36,13 +34,7 @@ use crate::{ ConnectionId, ConnectionIdEntry, ConnectionIdGenerator, ConnectionIdManager, ConnectionIdRef, ConnectionIdStore, LOCAL_ACTIVE_CID_LIMIT, }, -}; - -use crate::recv_stream::RecvStreamStats; -pub use crate::send_stream::{RetransmissionPriority, SendStreamStats, TransmissionPriority}; -use crate::{ crypto::{Crypto, CryptoDxState, CryptoSpace}, - dump::*, events::{ConnectionEvent, ConnectionEvents, OutgoingDatagramOutcome}, frame::{ CloseError, Frame, FrameType, FRAME_TYPE_CONNECTION_CLOSE_APPLICATION, @@ -53,7 +45,9 @@ use crate::{ qlog, quic_datagrams::{DatagramTracking, QuicDatagrams}, recovery::{LossRecovery, RecoveryToken, SendProfile}, + recv_stream::RecvStreamStats, rtt::GRANULARITY, + send_stream::SendStream, stats::{Stats, StatsCell}, stream_id::StreamType, streams::{SendOrder, Streams}, @@ -66,6 +60,7 @@ use crate::{ AppError, ConnectionError, Error, Res, StreamId, }; +mod dump; mod idle; pub mod params; mod saved; @@ -73,15 +68,17 @@ mod state; #[cfg(test)] pub mod test_internal; +use dump::dump_packet; use idle::IdleTimeout; +pub use params::ConnectionParameters; use params::PreferredAddressConfig; -pub use params::{ConnectionParameters, ACK_RATIO_SCALE}; +#[cfg(test)] +pub use params::ACK_RATIO_SCALE; use saved::SavedDatagrams; use state::StateSignaling; pub use state::{ClosingFrame, State}; -#[derive(Debug, Default)] -struct Packet(Vec); +pub use crate::send_stream::{RetransmissionPriority, SendStreamStats, TransmissionPriority}; /// The number of Initial packets that the client will send in response /// to receiving an undecryptable packet during the early part of the @@ -98,7 +95,7 @@ pub enum ZeroRttState { } #[derive(Clone, Debug, PartialEq, Eq)] -/// Type returned from process() and `process_output()`. Users are required to +/// Type returned from `process()` and `process_output()`. Users are required to /// call these repeatedly until `Callback` or `None` is returned. pub enum Output { /// Connection requires no action. @@ -121,6 +118,7 @@ impl Output { } /// Get a reference to the Datagram, if any. + #[must_use] pub fn as_dgram_ref(&self) -> Option<&Datagram> { match self { Self::Datagram(dg) => Some(dg), @@ -138,7 +136,7 @@ impl Output { } } -/// Used by inner functions like Connection::output. +/// Used by inner functions like `Connection::output`. enum SendOption { /// Yes, please send this datagram. Yes(Datagram), @@ -259,7 +257,7 @@ pub struct Connection { /// Some packets were received, but not tracked. received_untracked: bool, - /// This is responsible for the QuicDatagrams' handling: + /// This is responsible for the `QuicDatagrams`' handling: /// quic_datagrams: QuicDatagrams, @@ -273,8 +271,8 @@ pub struct Connection { new_token: NewTokenState, stats: StatsCell, qlog: NeqoQlog, - /// A session ticket was received without NEW_TOKEN, - /// this is when that turns into an event without NEW_TOKEN. + /// A session ticket was received without `NEW_TOKEN`, + /// this is when that turns into an event without `NEW_TOKEN`. release_resumption_token_timer: Option, conn_params: ConnectionParameters, hrtime: hrtime::Handle, @@ -304,6 +302,8 @@ impl Connection { const LOOSE_TIMER_RESOLUTION: Duration = Duration::from_millis(50); /// Create a new QUIC connection with Client role. + /// # Errors + /// When NSS fails and an agent cannot be created. pub fn new_client( server_name: impl Into, protocols: &[impl AsRef], @@ -331,6 +331,7 @@ impl Connection { local_addr, remote_addr, c.conn_params.get_cc_algorithm(), + c.conn_params.pacing_enabled(), NeqoQlog::default(), now, ); @@ -339,6 +340,8 @@ impl Connection { } /// Create a new QUIC connection with Server role. + /// # Errors + /// When NSS fails and an agent cannot be created. pub fn new_server( certs: &[impl AsRef], protocols: &[impl AsRef], @@ -380,7 +383,6 @@ impl Connection { agent, protocols.iter().map(P::as_ref).map(String::from).collect(), Rc::clone(&tphandler), - conn_params.is_fuzzing(), )?; let stats = StatsCell::default(); @@ -424,10 +426,12 @@ impl Connection { #[cfg(test)] test_frame_writer: None, }; - c.stats.borrow_mut().init(format!("{}", c)); + c.stats.borrow_mut().init(format!("{c}")); Ok(c) } + /// # Errors + /// When the operation fails. pub fn server_enable_0rtt( &mut self, anti_replay: &AntiReplay, @@ -437,6 +441,8 @@ impl Connection { .server_enable_0rtt(self.tps.clone(), anti_replay, zero_rtt_checker) } + /// # Errors + /// When the operation fails. pub fn server_enable_ech( &mut self, config: u8, @@ -448,10 +454,13 @@ impl Connection { } /// Get the active ECH configuration, which is empty if ECH is disabled. + #[must_use] pub fn ech_config(&self) -> &[u8] { self.crypto.ech_config() } + /// # Errors + /// When the operation fails. pub fn client_enable_ech(&mut self, ech_config_list: impl AsRef<[u8]>) -> Res<()> { self.crypto.client_enable_ech(ech_config_list) } @@ -469,8 +478,9 @@ impl Connection { } /// Get the original destination connection id for this connection. This - /// will always be present for Role::Client but not if Role::Server is in - /// State::Init. + /// will always be present for `Role::Client` but not if `Role::Server` is in + /// `State::Init`. + #[must_use] pub fn odcid(&self) -> Option<&ConnectionId> { self.original_destination_cid.as_ref() } @@ -478,6 +488,9 @@ impl Connection { /// Set a local transport parameter, possibly overriding a default value. /// This only sets transport parameters without dealing with other aspects of /// setting the value. + /// + /// # Errors + /// When the transport parameter is invalid. /// # Panics /// This panics if the transport parameter is known to this crate. pub fn set_local_tparam(&self, tp: TransportParameterId, value: TransportParameter) -> Res<()> { @@ -496,14 +509,14 @@ impl Connection { } /// `odcid` is their original choice for our CID, which we get from the Retry token. - /// `remote_cid` is the value from the Source Connection ID field of - /// an incoming packet: what the peer wants us to use now. - /// `retry_cid` is what we asked them to use when we sent the Retry. + /// `remote_cid` is the value from the Source Connection ID field of an incoming packet: what + /// the peer wants us to use now. `retry_cid` is what we asked them to use when we sent the + /// Retry. pub(crate) fn set_retry_cids( &mut self, - odcid: ConnectionId, + odcid: &ConnectionId, remote_cid: ConnectionId, - retry_cid: ConnectionId, + retry_cid: &ConnectionId, ) { debug_assert_eq!(self.role, Role::Server); qtrace!( @@ -532,12 +545,16 @@ impl Connection { /// Set ALPN preferences. Strings that appear earlier in the list are given /// higher preference. + /// # Errors + /// When the operation fails, which is usually due to bad inputs or bad connection state. pub fn set_alpn(&mut self, protocols: &[impl AsRef]) -> Res<()> { self.crypto.tls.set_alpn(protocols)?; Ok(()) } /// Enable a set of ciphers. + /// # Errors + /// When the operation fails, which is usually due to bad inputs or bad connection state. pub fn set_ciphers(&mut self, ciphers: &[Cipher]) -> Res<()> { if self.state != State::Init { qerror!([self], "Cannot enable ciphers in state {:?}", self.state); @@ -547,6 +564,30 @@ impl Connection { Ok(()) } + /// Enable a set of key exchange groups. + /// # Errors + /// When the operation fails, which is usually due to bad inputs or bad connection state. + pub fn set_groups(&mut self, groups: &[Group]) -> Res<()> { + if self.state != State::Init { + qerror!([self], "Cannot enable groups in state {:?}", self.state); + return Err(Error::ConnectionState); + } + self.crypto.tls.set_groups(groups)?; + Ok(()) + } + + /// Set the number of additional key shares to send in the client hello. + /// # Errors + /// When the operation fails, which is usually due to bad inputs or bad connection state. + pub fn send_additional_key_shares(&mut self, count: usize) -> Res<()> { + if self.state != State::Init { + qerror!([self], "Cannot enable groups in state {:?}", self.state); + return Err(Error::ConnectionState); + } + self.crypto.tls.send_additional_key_shares(count)?; + Ok(()) + } + fn make_resumption_token(&mut self) -> ResumptionToken { debug_assert_eq!(self.role, Role::Client); debug_assert!(self.crypto.has_resumption_token()); @@ -624,7 +665,9 @@ impl Connection { /// problem for short-lived connections, where the connection is closed before any events are /// released. This function retrieves the token, without waiting for a `NEW_TOKEN` frame to /// arrive. + /// /// # Panics + /// /// If this is called on a server. pub fn take_resumption_token(&mut self, now: Instant) -> Option { assert_eq!(self.role, Role::Client); @@ -644,6 +687,8 @@ impl Connection { /// This can only be called once and only on the client. /// After calling the function, it should be possible to attempt 0-RTT /// if the token supports that. + /// # Errors + /// When the operation fails, which is usually due to bad inputs or bad connection state. pub fn enable_resumption(&mut self, now: Instant, token: impl AsRef<[u8]>) -> Res<()> { if self.state != State::Init { qerror!([self], "set token in state {:?}", self.state); @@ -660,8 +705,9 @@ impl Connection { ); let mut dec = Decoder::from(token.as_ref()); - let version = - Version::try_from(dec.decode_uint(4).ok_or(Error::InvalidResumptionToken)? as u32)?; + let version = Version::try_from(u32::try_from( + dec.decode_uint(4).ok_or(Error::InvalidResumptionToken)?, + )?)?; qtrace!([self], " version {:?}", version); if !self.conn_params.get_versions().all().contains(&version) { return Err(Error::DisabledVersion); @@ -709,13 +755,15 @@ impl Connection { Ok(()) } - pub(crate) fn set_validation(&mut self, validation: Rc>) { + pub(crate) fn set_validation(&mut self, validation: &Rc>) { qtrace!([self], "Enabling NEW_TOKEN"); assert_eq!(self.role, Role::Server); - self.address_validation = AddressValidationInfo::Server(Rc::downgrade(&validation)); + self.address_validation = AddressValidationInfo::Server(Rc::downgrade(validation)); } - /// Send a TLS session ticket AND a NEW_TOKEN frame (if possible). + /// Send a TLS session ticket AND a `NEW_TOKEN` frame (if possible). + /// # Errors + /// When the operation fails, which is usually due to bad inputs or bad connection state. pub fn send_ticket(&mut self, now: Instant, extra: &[u8]) -> Res<()> { if self.role == Role::Client { return Err(Error::WrongRole); @@ -729,7 +777,7 @@ impl Connection { }); enc.encode(extra); let records = s.send_ticket(now, enc.as_ref())?; - qinfo!([self], "send session ticket {}", hex(&enc)); + qdebug!([self], "send session ticket {}", hex(&enc)); self.crypto.buffer_records(records)?; } else { unreachable!(); @@ -751,15 +799,19 @@ impl Connection { } } + #[must_use] pub fn tls_info(&self) -> Option<&SecretAgentInfo> { self.crypto.tls.info() } + /// # Errors + /// When there is no information to obtain. pub fn tls_preinfo(&self) -> Res { Ok(self.crypto.tls.preinfo()?) } /// Get the peer's certificate chain and other info. + #[must_use] pub fn peer_certificate(&self) -> Option { self.crypto.tls.peer_certificate() } @@ -771,7 +823,7 @@ impl Connection { /// the connection to fail. However, if no packets have been /// exchanged, it's not OK. pub fn authenticated(&mut self, status: AuthenticationStatus, now: Instant) { - qinfo!([self], "Authenticated {:?}", status); + qdebug!([self], "Authenticated {:?}", status); self.crypto.tls.authenticated(status); let res = self.handshake(now, self.version, PacketNumberSpace::Handshake, None); self.absorb_error(now, res); @@ -779,26 +831,31 @@ impl Connection { } /// Get the role of the connection. + #[must_use] pub fn role(&self) -> Role { self.role } /// Get the state of the connection. + #[must_use] pub fn state(&self) -> &State { &self.state } /// The QUIC version in use. + #[must_use] pub fn version(&self) -> Version { self.version } /// Get the 0-RTT state of the connection. + #[must_use] pub fn zero_rtt_state(&self) -> ZeroRttState { self.zero_rtt_state } /// Get a snapshot of collected statistics. + #[must_use] pub fn stats(&self) -> Stats { let mut v = self.stats.borrow().clone(); if let Some(p) = self.paths.primary_fallible() { @@ -820,7 +877,7 @@ impl Connection { ) -> Res { if let Err(v) = &res { #[cfg(debug_assertions)] - let msg = format!("{:?}", v); + let msg = format!("{v:?}"); #[cfg(not(debug_assertions))] let msg = ""; let error = ConnectionError::Transport(v.clone()); @@ -831,8 +888,8 @@ impl Connection { qwarn!([self], "Closing again after error {:?}", err); } State::Init => { - // We have not even sent anything just close the connection without sending any error. - // This may happen when client_start fails. + // We have not even sent anything just close the connection without sending any + // error. This may happen when client_start fails. self.set_state(State::Closed(error)); } State::WaitInitial => { @@ -865,7 +922,7 @@ impl Connection { res } - /// For use with process_input(). Errors there can be ignored, but this + /// For use with `process_input()`. Errors there can be ignored, but this /// needs to ensure that the state is updated. fn absorb_error(&mut self, now: Instant, res: Res) -> Option { self.capture_error(None, now, 0, res).ok() @@ -920,8 +977,23 @@ impl Connection { } /// Process new input datagrams on the connection. - pub fn process_input(&mut self, d: Datagram, now: Instant) { - self.input(d, now, now); + pub fn process_input(&mut self, d: &Datagram, now: Instant) { + self.process_multiple_input(iter::once(d), now); + } + + /// Process new input datagrams on the connection. + pub fn process_multiple_input<'a, I>(&mut self, dgrams: I, now: Instant) + where + I: IntoIterator, + { + let mut dgrams = dgrams.into_iter().peekable(); + if dgrams.peek().is_none() { + return; + } + + for d in dgrams { + self.input(d, now, now); + } self.process_saved(now); self.streams.cleanup_closed_streams(); } @@ -1003,7 +1075,7 @@ impl Connection { let res = self.client_start(now); self.absorb_error(now, res); } - (State::Init, Role::Server) | (State::WaitInitial, Role::Server) => { + (State::Init | State::WaitInitial, Role::Server) => { return Output::None; } _ => { @@ -1025,7 +1097,7 @@ impl Connection { /// Process input and generate output. #[must_use = "Output of the process function must be handled"] - pub fn process(&mut self, dgram: Option, now: Instant) -> Output { + pub fn process(&mut self, dgram: Option<&Datagram>, now: Instant) -> Output { if let Some(d) = dgram { self.input(d, now, now); self.process_saved(now); @@ -1078,7 +1150,7 @@ impl Connection { fn discard_keys(&mut self, space: PacketNumberSpace, now: Instant) { if self.crypto.discard(space) { - qinfo!([self], "Drop packet number space {}", space); + qdebug!([self], "Drop packet number space {}", space); let primary = self.paths.primary(); self.loss_recovery.discard(&primary, space, now); self.acks.drop_space(space); @@ -1124,18 +1196,24 @@ impl Connection { debug_assert!(self.crypto.states.rx_hp(self.version, cspace).is_some()); for saved in self.saved_datagrams.take_saved() { qtrace!([self], "input saved @{:?}: {:?}", saved.t, saved.d); - self.input(saved.d, saved.t, now); + self.input(&saved.d, saved.t, now); } } } /// In case a datagram arrives that we can only partially process, save any /// part that we don't have keys for. - fn save_datagram(&mut self, cspace: CryptoSpace, d: Datagram, remaining: usize, now: Instant) { + fn save_datagram(&mut self, cspace: CryptoSpace, d: &Datagram, remaining: usize, now: Instant) { let d = if remaining < d.len() { - Datagram::new(d.source(), d.destination(), &d[d.len() - remaining..]) + Datagram::new( + d.source(), + d.destination(), + d.tos(), + d.ttl(), + &d[d.len() - remaining..], + ) } else { - d + d.clone() }; self.saved_datagrams.save(cspace, d, now); self.stats.borrow_mut().saved_datagrams += 1; @@ -1168,6 +1246,12 @@ impl Connection { .get_versions_mut() .set_initial(self.conn_params.get_versions().initial()); mem::swap(self, &mut c); + qlog::client_version_information_negotiated( + &mut self.qlog, + self.conn_params.get_versions().all(), + supported, + version, + ); Ok(()) } else { qinfo!([self], "Version negotiation: failed with {:?}", supported); @@ -1181,6 +1265,7 @@ impl Connection { /// Perform any processing that we might have to do on packets prior to /// attempting to remove protection. + #[allow(clippy::too_many_lines)] // Yeah, it's a work in progress. fn preprocess_packet( &mut self, packet: &PublicPacket, @@ -1188,7 +1273,7 @@ impl Connection { dcid: Option<&ConnectionId>, now: Instant, ) -> Res { - if dcid.map_or(false, |d| d != packet.dcid()) { + if dcid.map_or(false, |d| d != &packet.dcid()) { self.stats .borrow_mut() .pkt_dropped("Coalesced packet has different DCID"); @@ -1235,43 +1320,34 @@ impl Connection { self.tps.borrow_mut().local.set_bytes( tparams::ORIGINAL_DESTINATION_CONNECTION_ID, packet.dcid().to_vec(), - ) + ); } } (PacketType::VersionNegotiation, State::WaitInitial, Role::Client) => { - match packet.supported_versions() { - Ok(versions) => { - if versions.is_empty() - || versions.contains(&self.version().wire_version()) - || versions.contains(&0) - || packet.scid() != self.odcid().unwrap() - || matches!( - self.address_validation, - AddressValidationInfo::Retry { .. } - ) - { - // Ignore VersionNegotiation packets that contain the current version. - // Or don't have the right connection ID. - // Or are received after a Retry. - self.stats.borrow_mut().pkt_dropped("Invalid VN"); - return Ok(PreprocessResult::End); - } - + if let Ok(versions) = packet.supported_versions() { + if versions.is_empty() + || versions.contains(&self.version().wire_version()) + || versions.contains(&0) + || &packet.scid() != self.odcid().unwrap() + || matches!(self.address_validation, AddressValidationInfo::Retry { .. }) + { + // Ignore VersionNegotiation packets that contain the current version. + // Or don't have the right connection ID. + // Or are received after a Retry. + self.stats.borrow_mut().pkt_dropped("Invalid VN"); + } else { self.version_negotiation(&versions, now)?; - return Ok(PreprocessResult::End); } - Err(_) => { - self.stats.borrow_mut().pkt_dropped("VN with no versions"); - return Ok(PreprocessResult::End); - } - } + } else { + self.stats.borrow_mut().pkt_dropped("VN with no versions"); + }; + return Ok(PreprocessResult::End); } (PacketType::Retry, State::WaitInitial, Role::Client) => { self.handle_retry(packet, now); return Ok(PreprocessResult::Next); } - (PacketType::Handshake, State::WaitInitial, Role::Client) - | (PacketType::Short, State::WaitInitial, Role::Client) => { + (PacketType::Handshake | PacketType::Short, State::WaitInitial, Role::Client) => { // This packet can't be processed now, but it could be a sign // that Initial packets were lost. // Resend Initial CRYPTO frames immediately a few times just @@ -1284,9 +1360,7 @@ impl Connection { self.crypto.resend_unacked(PacketNumberSpace::Initial); } } - (PacketType::VersionNegotiation, ..) - | (PacketType::Retry, ..) - | (PacketType::OtherVersion, ..) => { + (PacketType::VersionNegotiation | PacketType::Retry | PacketType::OtherVersion, ..) => { self.stats .borrow_mut() .pkt_dropped(format!("{:?}", packet.packet_type())); @@ -1304,17 +1378,17 @@ impl Connection { } State::WaitInitial => PreprocessResult::Continue, State::WaitVersion | State::Handshaking | State::Connected | State::Confirmed => { - if !self.cid_manager.is_valid(packet.dcid()) { - self.stats - .borrow_mut() - .pkt_dropped(format!("Invalid DCID {:?}", packet.dcid())); - PreprocessResult::Next - } else { + if self.cid_manager.is_valid(packet.dcid()) { if self.role == Role::Server && packet.packet_type() == PacketType::Handshake { // Server has received a Handshake packet -> discard Initial keys and states self.discard_keys(PacketNumberSpace::Initial, now); } PreprocessResult::Continue + } else { + self.stats + .borrow_mut() + .pkt_dropped(format!("Invalid DCID {:?}", packet.dcid())); + PreprocessResult::Next } } State::Closing { .. } => { @@ -1334,7 +1408,7 @@ impl Connection { Ok(res) } - /// After a Initial, Handshake, ZeroRtt, or Short packet is successfully processed. + /// After a Initial, Handshake, `ZeroRtt`, or Short packet is successfully processed. fn postprocess_packet( &mut self, path: &PathRef, @@ -1351,7 +1425,7 @@ impl Connection { self.handle_migration(path, d, migrate, now); } else if self.role != Role::Client && (packet.packet_type() == PacketType::Handshake - || (packet.dcid().len() >= 8 && packet.dcid() == &self.local_initial_source_cid)) + || (packet.dcid().len() >= 8 && packet.dcid() == self.local_initial_source_cid)) { // We only allow one path during setup, so apply handshake // path validation to this path. @@ -1361,12 +1435,13 @@ impl Connection { /// Take a datagram as input. This reports an error if the packet was bad. /// This takes two times: when the datagram was received, and the current time. - fn input(&mut self, d: Datagram, received: Instant, now: Instant) { + fn input(&mut self, d: &Datagram, received: Instant, now: Instant) { // First determine the path. let path = self.paths.find_path_with_rebinding( d.destination(), d.source(), self.conn_params.get_cc_algorithm(), + self.conn_params.pacing_enabled(), now, ); path.borrow_mut().add_received(d.len()); @@ -1374,7 +1449,7 @@ impl Connection { self.capture_error(Some(path), now, 0, res).ok(); } - fn input_path(&mut self, path: &PathRef, d: Datagram, now: Instant) -> Res<()> { + fn input_path(&mut self, path: &PathRef, d: &Datagram, now: Instant) -> Res<()> { let mut slc = &d[..]; let mut dcid = None; @@ -1413,6 +1488,7 @@ impl Connection { payload.packet_type(), payload.pn(), &payload[..], + d.tos(), ); qlog::packet_received(&mut self.qlog, &packet, &payload); @@ -1422,7 +1498,7 @@ impl Connection { self.stats.borrow_mut().dups_rx += 1; } else { match self.process_packet(path, &payload, now) { - Ok(migrate) => self.postprocess_packet(path, &d, &packet, migrate, now), + Ok(migrate) => self.postprocess_packet(path, d, &packet, migrate, now), Err(e) => { self.ensure_error_path(path, &packet, now); return Err(e); @@ -1454,7 +1530,7 @@ impl Connection { // Decryption failure, or not having keys is not fatal. // If the state isn't available, or we can't decrypt the packet, drop // the rest of the datagram on the floor, but don't generate an error. - self.check_stateless_reset(path, &d, dcid.is_none(), now)?; + self.check_stateless_reset(path, d, dcid.is_none(), now)?; self.stats.borrow_mut().pkt_dropped("Decryption failure"); qlog::packet_dropped(&mut self.qlog, &packet); } @@ -1462,7 +1538,7 @@ impl Connection { slc = remainder; dcid = Some(ConnectionId::from(packet.dcid())); } - self.check_stateless_reset(path, &d, dcid.is_none(), now)?; + self.check_stateless_reset(path, d, dcid.is_none(), now)?; Ok(()) } @@ -1473,6 +1549,10 @@ impl Connection { packet: &DecryptedPacket, now: Instant, ) -> Res { + (!packet.is_empty()) + .then_some(()) + .ok_or(Error::ProtocolViolation)?; + // TODO(ekr@rtfm.com): Have the server blow away the initial // crypto state if this fails? Otherwise, we will get a panic // on the assert for doesn't exist. @@ -1481,24 +1561,8 @@ impl Connection { let mut ack_eliciting = false; let mut probing = true; let mut d = Decoder::from(&packet[..]); - let mut consecutive_padding = 0; while d.remaining() > 0 { - let mut f = Frame::decode(&mut d)?; - - // Skip padding - while f == Frame::Padding && d.remaining() > 0 { - consecutive_padding += 1; - f = Frame::decode(&mut d)?; - } - if consecutive_padding > 0 { - qdebug!( - [self], - "PADDING frame repeated {} times", - consecutive_padding - ); - consecutive_padding = 0; - } - + let f = Frame::decode(&mut d)?; ack_eliciting |= f.ack_eliciting(); probing &= f.path_probing(); let t = f.get_type(); @@ -1533,7 +1597,6 @@ impl Connection { /// During connection setup, the first path needs to be setup. /// This uses the connection IDs that were provided during the handshake /// to setup that path. - #[allow(clippy::or_fun_call)] // Remove when MSRV >= 1.59 fn setup_handshake_path(&mut self, path: &PathRef, now: Instant) { self.paths.make_permanent( path, @@ -1573,7 +1636,7 @@ impl Connection { } } - /// After an error, a permanent path is needed to send the CONNECTION_CLOSE. + /// After an error, a permanent path is needed to send the `CONNECTION_CLOSE`. /// This attempts to ensure that this exists. As the connection is now /// temporary, there is no reason to do anything special here. fn ensure_error_path(&mut self, path: &PathRef, packet: &PublicPacket, now: Instant) { @@ -1634,6 +1697,7 @@ impl Connection { /// Either way, the path is probed and will be abandoned if the probe fails. /// /// # Errors + /// /// Fails if this is not a client, not confirmed, or there are not enough connection /// IDs available to use. pub fn migrate( @@ -1672,9 +1736,13 @@ impl Connection { return Err(Error::InvalidMigration); } - let path = self - .paths - .find_path(local, remote, self.conn_params.get_cc_algorithm(), now); + let path = self.paths.find_path( + local, + remote, + self.conn_params.get_cc_algorithm(), + self.conn_params.pacing_enabled(), + now, + ); self.ensure_permanent(&path)?; qinfo!( [self], @@ -1707,8 +1775,8 @@ impl Connection { // be needed to work out how to get addresses from a different family. let prev = self.paths.primary().borrow().remote_address(); let remote = match prev.ip() { - IpAddr::V4(_) => addr.ipv4(), - IpAddr::V6(_) => addr.ipv6(), + IpAddr::V4(_) => addr.ipv4().map(SocketAddr::V4), + IpAddr::V6(_) => addr.ipv6().map(SocketAddr::V6), }; if let Some(remote) = remote { @@ -1758,7 +1826,7 @@ impl Connection { | State::Connected | State::Confirmed => { if let Some(path) = self.paths.select_path() { - let res = self.output_path(&path, now); + let res = self.output_path(&path, now, &None); self.capture_error(Some(path), now, 0, res) } else { Ok(SendOption::default()) @@ -1767,7 +1835,16 @@ impl Connection { State::Closing { .. } | State::Draining { .. } | State::Closed(_) => { if let Some(details) = self.state_signaling.close_frame() { let path = Rc::clone(details.path()); - let res = self.output_close(details); + // In some error cases, we will not be able to make a new, permanent path. + // For example, if we run out of connection IDs and the error results from + // a packet on a new path, we avoid sending (and the privacy risk) rather + // than reuse a connection ID. + let res = if path.borrow().is_temporary() { + assert!(!cfg!(test), "attempting to close with a temporary path"); + Err(Error::InternalError) + } else { + self.output_path(&path, now, &Some(details)) + }; self.capture_error(Some(path), now, 0, res) } else { Ok(SendOption::default()) @@ -1844,149 +1921,88 @@ impl Connection { } } - fn output_close(&mut self, close: ClosingFrame) -> Res { - let mut encoder = Encoder::with_capacity(256); - let grease_quic_bit = self.can_grease_quic_bit(); - let version = self.version(); - for space in PacketNumberSpace::iter() { - let (cspace, tx) = - if let Some(crypto) = self.crypto.states.select_tx_mut(self.version, *space) { - crypto - } else { - continue; - }; - - let path = close.path().borrow(); - let (_, mut builder) = Self::build_packet_header( - &path, - cspace, - encoder, - tx, - &AddressValidationInfo::None, - version, - grease_quic_bit, - ); - _ = Self::add_packet_number( - &mut builder, - tx, - self.loss_recovery.largest_acknowledged_pn(*space), - ); - // The builder will set the limit to 0 if there isn't enough space for the header. - if builder.is_full() { - encoder = builder.abort(); - break; - } - builder.set_limit(min(path.amplification_limit(), path.mtu()) - tx.expansion()); - debug_assert!(builder.limit() <= 2048); - - // ConnectionError::Application is only allowed at 1RTT. - let sanitized = if *space == PacketNumberSpace::ApplicationData { - None - } else { - close.sanitize() - }; - sanitized - .as_ref() - .unwrap_or(&close) - .write_frame(&mut builder); - if builder.len() > builder.limit() { - return Err(Error::InternalError(10)); - } - encoder = builder.build(tx)?; - } - - Ok(SendOption::Yes(close.path().borrow().datagram(encoder))) - } - /// Write the frames that are exchanged in the application data space. /// The order of calls here determines the relative priority of frames. fn write_appdata_frames( &mut self, builder: &mut PacketBuilder, tokens: &mut Vec, - ) -> Res<()> { + ) { + let stats = &mut self.stats.borrow_mut(); + let frame_stats = &mut stats.frame_tx; if self.role == Role::Server { - if let Some(t) = self.state_signaling.write_done(builder)? { + if let Some(t) = self.state_signaling.write_done(builder) { tokens.push(t); - self.stats.borrow_mut().frame_tx.handshake_done += 1; + frame_stats.handshake_done += 1; } } - { - let stats = &mut self.stats.borrow_mut().frame_tx; - - + for prio in [ + TransmissionPriority::Critical, + TransmissionPriority::Important, + ] { self.streams - .write_frames(TransmissionPriority::Critical, builder, tokens, stats); + .write_frames(prio, builder, tokens, frame_stats); if builder.is_full() { - return Ok(()); - } - - self.streams - .write_frames(TransmissionPriority::Important, builder, tokens, stats); - if builder.is_full() { - return Ok(()); + return; } + } - // NEW_CONNECTION_ID, RETIRE_CONNECTION_ID, and ACK_FREQUENCY. - self.cid_manager.write_frames(builder, tokens, stats)?; - if builder.is_full() { - return Ok(()); - } - self.paths.write_frames(builder, tokens, stats)?; - if builder.is_full() { - return Ok(()); - } + // NEW_CONNECTION_ID, RETIRE_CONNECTION_ID, and ACK_FREQUENCY. + self.cid_manager.write_frames(builder, tokens, frame_stats); + if builder.is_full() { + return; + } - self.streams - .write_frames(TransmissionPriority::High, builder, tokens, stats); - if builder.is_full() { - return Ok(()); - } + self.paths.write_frames(builder, tokens, frame_stats); + if builder.is_full() { + return; + } + for prio in [TransmissionPriority::High, TransmissionPriority::Normal] { self.streams - .write_frames(TransmissionPriority::Normal, builder, tokens, stats); + .write_frames(prio, builder, tokens, &mut stats.frame_tx); if builder.is_full() { - return Ok(()); + return; } } // Check if there is a Datagram to be written - // Currently we're giving them priority over user streams; they could be moved - // to after them (after Normal) - self.quic_datagrams - .write_frames(builder, tokens, &mut self.stats.borrow_mut()); + self.quic_datagrams.write_frames(builder, tokens, stats); if builder.is_full() { - return Ok(()); + return; } - { - let stats = &mut self.stats.borrow_mut().frame_tx; - - // CRYPTO here only includes NewSessionTicket, plus NEW_TOKEN. - // Both of these are only used for resumption and so can be relatively low priority. - self.crypto - .write_frame(PacketNumberSpace::ApplicationData, builder, tokens, stats)?; - if builder.is_full() { - return Ok(()); - } - self.new_token.write_frames(builder, tokens, stats)?; - if builder.is_full() { - return Ok(()); - } + // CRYPTO here only includes NewSessionTicket, plus NEW_TOKEN. + // Both of these are only used for resumption and so can be relatively low priority. + let frame_stats = &mut stats.frame_tx; + self.crypto.write_frame( + PacketNumberSpace::ApplicationData, + builder, + tokens, + frame_stats, + ); + if builder.is_full() { + return; + } - self.streams - .write_frames(TransmissionPriority::Low, builder, tokens, stats); + self.new_token.write_frames(builder, tokens, frame_stats); + if builder.is_full() { + return; } + self.streams + .write_frames(TransmissionPriority::Low, builder, tokens, frame_stats); + #[cfg(test)] { + if builder.is_full() { + return; + } if let Some(w) = &mut self.test_frame_writer { w.write_frames(builder); } } - - Ok(()) } // Maybe send a probe. Return true if the packet was ack-eliciting. @@ -2047,15 +2063,21 @@ impl Connection { profile: &SendProfile, builder: &mut PacketBuilder, now: Instant, - ) -> Res<(Vec, bool, bool)> { + ) -> (Vec, bool, bool) { let mut tokens = Vec::new(); let primary = path.borrow().is_primary(); let mut ack_eliciting = false; if primary { let stats = &mut self.stats.borrow_mut().frame_tx; - self.acks - .write_frame(space, now, builder, &mut tokens, stats)?; + self.acks.write_frame( + space, + now, + path.borrow().rtt().estimate(), + builder, + &mut tokens, + stats, + ); } let ack_end = builder.len(); @@ -2070,23 +2092,22 @@ impl Connection { &mut self.stats.borrow_mut().frame_tx, full_mtu, now, - )? { + ) { builder.enable_padding(true); } } if profile.ack_only(space) { // If we are CC limited we can only send acks! - return Ok((tokens, false, false)); + return (tokens, false, false); } if primary { if space == PacketNumberSpace::ApplicationData { - self.write_appdata_frames(builder, &mut tokens)?; + self.write_appdata_frames(builder, &mut tokens); } else { let stats = &mut self.stats.borrow_mut().frame_tx; - self.crypto - .write_frame(space, builder, &mut tokens, stats)?; + self.crypto.write_frame(space, builder, &mut tokens, stats); } } @@ -2110,12 +2131,18 @@ impl Connection { }; stats.all += tokens.len(); - Ok((tokens, ack_eliciting, padded)) + (tokens, ack_eliciting, padded) } /// Build a datagram, possibly from multiple packets (for different PN /// spaces) and each containing 1+ frames. - fn output_path(&mut self, path: &PathRef, now: Instant) -> Res { + #[allow(clippy::too_many_lines)] // Yeah, that's just the way it is. + fn output_path( + &mut self, + path: &PathRef, + now: Instant, + closing_frame: &Option, + ) -> Res { let mut initial_sent = None; let mut needs_padding = false; let grease_quic_bit = self.can_grease_quic_bit(); @@ -2131,12 +2158,9 @@ impl Connection { let mut encoder = Encoder::with_capacity(profile.limit()); for space in PacketNumberSpace::iter() { // Ensure we have tx crypto state for this epoch, or skip it. - let (cspace, tx) = - if let Some(crypto) = self.crypto.states.select_tx_mut(self.version, *space) { - crypto - } else { - continue; - }; + let Some((cspace, tx)) = self.crypto.states.select_tx_mut(self.version, *space) else { + continue; + }; let header_start = encoder.len(); let (pt, mut builder) = Self::build_packet_header( @@ -2171,8 +2195,23 @@ impl Connection { // Add frames to the packet. let payload_start = builder.len(); - let (tokens, ack_eliciting, padded) = - self.write_frames(path, *space, &profile, &mut builder, now)?; + let (mut tokens, mut ack_eliciting, mut padded) = (Vec::new(), false, false); + if let Some(ref close) = closing_frame { + // ConnectionError::Application is only allowed at 1RTT. + let sanitized = if *space == PacketNumberSpace::ApplicationData { + None + } else { + close.sanitize() + }; + sanitized + .as_ref() + .unwrap_or(close) + .write_frame(&mut builder); + self.stats.borrow_mut().frame_tx.connection_close += 1; + } else { + (tokens, ack_eliciting, padded) = + self.write_frames(path, *space, &profile, &mut builder, now); + } if builder.packet_empty() { // Nothing to include in this packet. encoder = builder.abort(); @@ -2186,6 +2225,7 @@ impl Connection { pt, pn, &builder.as_ref()[payload_start..], + IpTos::default(), // TODO: set from path ); qlog::packet_sent( &mut self.qlog, @@ -2238,6 +2278,7 @@ impl Connection { } if encoder.is_empty() { + qdebug!("TX blocked, profile={:?} ", profile); Ok(SendOption::No(profile.paced())) } else { // Perform additional padding for Initial packets as necessary. @@ -2251,6 +2292,8 @@ impl Connection { mtu ); initial.size += mtu - packets.len(); + // These zeros aren't padding frames, they are an invalid all-zero coalesced + // packet, which is why we don't increase `frame_tx.padding` count here. packets.resize(mtu, 0); } self.loss_recovery.on_packet_sent(path, initial); @@ -2260,6 +2303,8 @@ impl Connection { } } + /// # Errors + /// When connection state is not valid. pub fn initiate_key_update(&mut self) -> Res<()> { if self.state == State::Confirmed { let la = self @@ -2273,14 +2318,16 @@ impl Connection { } #[cfg(test)] + #[must_use] pub fn get_epochs(&self) -> (Option, Option) { self.crypto.states.get_epochs() } fn client_start(&mut self, now: Instant) -> Res<()> { - qinfo!([self], "client_start"); + qdebug!([self], "client_start"); debug_assert_eq!(self.role, Role::Client); qlog::client_connection_started(&mut self.qlog, &self.paths.primary()); + qlog::client_version_information_initiated(&mut self.qlog, self.conn_params.get_versions()); self.handshake(now, self.version, PacketNumberSpace::Initial, None)?; self.set_state(State::WaitInitial); @@ -2330,6 +2377,7 @@ impl Connection { ); } + #[must_use] pub fn is_stream_id_allowed(&self, stream_id: StreamId) -> bool { self.streams.is_stream_id_allowed(stream_id) } @@ -2357,7 +2405,7 @@ impl Connection { } else { // The other side didn't provide a stateless reset token. // That's OK, they can try guessing this. - <[u8; 16]>::try_from(&random(16)[..]).unwrap() + ConnectionIdEntry::random_srt() }; self.paths .primary() @@ -2508,7 +2556,7 @@ impl Connection { fn confirm_version(&mut self, v: Version) { if self.version != v { - qinfo!([self], "Compatible upgrade {:?} ==> {:?}", self.version, v); + qdebug!([self], "Compatible upgrade {:?} ==> {:?}", self.version, v); } self.crypto.confirm_version(v); self.version = v; @@ -2538,10 +2586,16 @@ impl Connection { ) -> Res<()> { qtrace!([self], "Handshake space={} data={:0x?}", space, data); + let was_authentication_pending = + *self.crypto.tls.state() == HandshakeState::AuthenticationPending; let try_update = data.is_some(); match self.crypto.handshake(now, space, data)? { HandshakeState::Authenticated(_) | HandshakeState::InProgress => (), - HandshakeState::AuthenticationPending => self.events.authentication_needed(), + HandshakeState::AuthenticationPending => { + if !was_authentication_pending { + self.events.authentication_needed(); + } + } HandshakeState::EchFallbackAuthenticationPending(public_name) => self .events .ech_fallback_authentication_needed(public_name.clone()), @@ -2576,6 +2630,7 @@ impl Connection { Ok(()) } + #[allow(clippy::too_many_lines)] // Yep, but it's a nice big match, which is basically lots of little functions. fn input_frame( &mut self, path: &PathRef, @@ -2593,12 +2648,11 @@ impl Connection { if frame.is_stream() { return self .streams - .input_frame(frame, &mut self.stats.borrow_mut().frame_rx); + .input_frame(&frame, &mut self.stats.borrow_mut().frame_rx); } match frame { - Frame::Padding => { - // Note: This counts contiguous padding as a single frame. - self.stats.borrow_mut().frame_rx.padding += 1; + Frame::Padding(length) => { + self.stats.borrow_mut().frame_rx.padding += usize::from(length); } Frame::Ping => { // If we get a PING and there are outstanding CRYPTO frames, @@ -2629,7 +2683,7 @@ impl Connection { &data ); self.stats.borrow_mut().frame_rx.crypto += 1; - self.crypto.streams.inbound_frame(space, offset, data); + self.crypto.streams.inbound_frame(space, offset, data)?; if self.crypto.streams.data_ready(space) { let mut buf = Vec::new(); let read = self.crypto.streams.read_to_end(space, &mut buf); @@ -2801,7 +2855,7 @@ impl Connection { R: IntoIterator> + Debug, R::IntoIter: ExactSizeIterator, { - qinfo!([self], "Rx ACK space={}, ranges={:?}", space, ack_ranges); + qdebug!([self], "Rx ACK space={}, ranges={:?}", space, ack_ranges); let (acked_packets, lost_packets) = self.loss_recovery.on_ack_received( &self.paths.primary(), @@ -2855,7 +2909,7 @@ impl Connection { } fn set_connected(&mut self, now: Instant) -> Res<()> { - qinfo!([self], "TLS connection complete"); + qdebug!([self], "TLS connection complete"); if self.crypto.tls.info().map(SecretAgentInfo::alpn).is_none() { qwarn!([self], "No ALPN. Closing connection."); // 120 = no_application_protocol @@ -2898,13 +2952,13 @@ impl Connection { fn set_state(&mut self, state: State) { if state > self.state { - qinfo!([self], "State change from {:?} -> {:?}", self.state, state); + qdebug!([self], "State change from {:?} -> {:?}", self.state, state); self.state = state.clone(); if self.state.closed() { self.streams.clear_streams(); } self.events.connection_state_change(state); - qlog::connection_state_updated(&mut self.qlog, &self.state) + qlog::connection_state_updated(&mut self.qlog, &self.state); } else if mem::discriminant(&state) != mem::discriminant(&self.state) { // Only tolerate a regression in state if the new state is closing // and the connection is already closed. @@ -2918,7 +2972,9 @@ impl Connection { /// Create a stream. /// Returns new stream id + /// /// # Errors + /// /// `ConnectionState` if the connecton stat does not allow to create streams. /// `StreamLimitError` if we are limiied by server's stream concurence. pub fn stream_create(&mut self, st: StreamType) -> Res { @@ -2940,7 +2996,9 @@ impl Connection { } /// Set the priority of a stream. + /// /// # Errors + /// /// `InvalidStreamId` the stream does not exist. pub fn stream_priority( &mut self, @@ -2954,9 +3012,10 @@ impl Connection { Ok(()) } - /// Set the SendOrder of a stream. Re-enqueues to keep the ordering correct + /// Set the `SendOrder` of a stream. Re-enqueues to keep the ordering correct + /// /// # Errors - /// Returns InvalidStreamId if the stream id doesn't exist + /// When the stream does not exist. pub fn stream_sendorder( &mut self, stream_id: StreamId, @@ -2966,16 +3025,23 @@ impl Connection { } /// Set the Fairness of a stream + /// /// # Errors - /// Returns InvalidStreamId if the stream id doesn't exist + /// When the stream does not exist. pub fn stream_fairness(&mut self, stream_id: StreamId, fairness: bool) -> Res<()> { self.streams.set_fairness(stream_id, fairness) } + /// # Errors + /// When the stream does not exist. pub fn send_stream_stats(&self, stream_id: StreamId) -> Res { - self.streams.get_send_stream(stream_id).map(|s| s.stats()) + self.streams + .get_send_stream(stream_id) + .map(SendStream::stats) } + /// # Errors + /// When the stream does not exist. pub fn recv_stream_stats(&mut self, stream_id: StreamId) -> Res { let stream = self.streams.get_recv_stream_mut(stream_id)?; @@ -2985,7 +3051,9 @@ impl Connection { /// Send data on a stream. /// Returns how many bytes were successfully sent. Could be less /// than total, based on receiver credit space available, etc. + /// /// # Errors + /// /// `InvalidStreamId` the stream does not exist, /// `InvalidInput` if length of `data` is zero, /// `FinalSizeError` if the stream has already been closed. @@ -2993,10 +3061,12 @@ impl Connection { self.streams.get_send_stream_mut(stream_id)?.send(data) } - /// Send all data or nothing on a stream. May cause DATA_BLOCKED or - /// STREAM_DATA_BLOCKED frames to be sent. + /// Send all data or nothing on a stream. May cause `DATA_BLOCKED` or + /// `STREAM_DATA_BLOCKED` frames to be sent. /// Returns true if data was successfully sent, otherwise false. + /// /// # Errors + /// /// `InvalidStreamId` the stream does not exist, /// `InvalidInput` if length of `data` is zero, /// `FinalSizeError` if the stream has already been closed. @@ -3016,20 +3086,26 @@ impl Connection { val.map(|v| v == data.len()) } - /// Bytes that stream_send() is guaranteed to accept for sending. + /// Bytes that `stream_send()` is guaranteed to accept for sending. /// i.e. that will not be blocked by flow credits or send buffer max /// capacity. + /// # Errors + /// When the stream ID is invalid. pub fn stream_avail_send_space(&self, stream_id: StreamId) -> Res { Ok(self.streams.get_send_stream(stream_id)?.avail()) } /// Close the stream. Enqueued data will be sent. + /// # Errors + /// When the stream ID is invalid. pub fn stream_close_send(&mut self, stream_id: StreamId) -> Res<()> { self.streams.get_send_stream_mut(stream_id)?.close(); Ok(()) } /// Abandon transmission of in-flight and future stream data. + /// # Errors + /// When the stream ID is invalid. pub fn stream_reset_send(&mut self, stream_id: StreamId, err: AppError) -> Res<()> { self.streams.get_send_stream_mut(stream_id)?.reset(err); Ok(()) @@ -3037,7 +3113,9 @@ impl Connection { /// Read buffered data from stream. bool says whether read bytes includes /// the final data on stream. + /// /// # Errors + /// /// `InvalidStreamId` if the stream does not exist. /// `NoMoreData` if data and fin bit were previously read by the application. pub fn stream_recv(&mut self, stream_id: StreamId, data: &mut [u8]) -> Res<(usize, bool)> { @@ -3048,6 +3126,8 @@ impl Connection { } /// Application is no longer interested in this stream. + /// # Errors + /// When the stream ID is invalid. pub fn stream_stop_sending(&mut self, stream_id: StreamId, err: AppError) -> Res<()> { let stream = self.streams.get_recv_stream_mut(stream_id)?; @@ -3056,7 +3136,9 @@ impl Connection { } /// Increases `max_stream_data` for a `stream_id`. + /// /// # Errors + /// /// Returns `InvalidStreamId` if a stream does not exist or the receiving /// side is closed. pub fn set_stream_max_data(&mut self, stream_id: StreamId, max_data: u64) -> Res<()> { @@ -3070,13 +3152,16 @@ impl Connection { /// (if `keep` is `true`) or no longer important (if `keep` is `false`). If any /// stream is marked this way, PING frames will be used to keep the connection /// alive, even when there is no activity. + /// /// # Errors + /// /// Returns `InvalidStreamId` if a stream does not exist or the receiving /// side is closed. pub fn stream_keep_alive(&mut self, stream_id: StreamId, keep: bool) -> Res<()> { self.streams.keep_alive(stream_id, keep) } + #[must_use] pub fn remote_datagram_size(&self) -> u64 { self.quic_datagrams.remote_datagram_size() } @@ -3084,21 +3169,22 @@ impl Connection { /// Returns the current max size of a datagram that can fit into a packet. /// The value will change over time depending on the encoded size of the /// packet number, ack frames, etc. - /// # Error + /// + /// # Errors /// The function returns `NotAvailable` if datagrams are not enabled. + /// # Panics + /// Basically never, because that unwrap won't fail. pub fn max_datagram_size(&self) -> Res { let max_dgram_size = self.quic_datagrams.remote_datagram_size(); if max_dgram_size == 0 { return Err(Error::NotAvailable); } let version = self.version(); - let (cspace, tx) = if let Some(crypto) = self + let Some((cspace, tx)) = self .crypto .states .select_tx(self.version, PacketNumberSpace::ApplicationData) - { - crypto - } else { + else { return Err(Error::NotAvailable); }; let path = self.paths.primary_fallible().ok_or(Error::NotAvailable)?; @@ -3127,7 +3213,9 @@ impl Connection { } /// Queue a datagram for sending. - /// # Error + /// + /// # Errors + /// /// The function returns `TooMuchData` if the supply buffer is bigger than /// the allowed remote datagram size. The funcion does not check if the /// datagram can fit into a packet (i.e. MTU limit). This is checked during @@ -3136,7 +3224,6 @@ impl Connection { /// to check the estimated max datagram size and to use smaller datagrams. /// `max_datagram_size` is just a current estimate and will change over /// time depending on the encoded size of the packet number, ack frames, etc. - pub fn send_datagram(&mut self, buf: &[u8], id: impl Into) -> Res<()> { self.quic_datagrams .add_datagram(buf, id.into(), &mut self.stats.borrow_mut()) diff --git a/neqo-transport/src/connection/params.rs b/neqo-transport/src/connection/params.rs index e6617b5adc..d8aa617024 100644 --- a/neqo-transport/src/connection/params.rs +++ b/neqo-transport/src/connection/params.rs @@ -4,18 +4,19 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use crate::connection::{ConnectionIdManager, Role, LOCAL_ACTIVE_CID_LIMIT}; +use std::{cmp::max, time::Duration}; + pub use crate::recovery::FAST_PTO_SCALE; -use crate::recv_stream::RECV_BUFFER_SIZE; -use crate::rtt::GRANULARITY; -use crate::stream_id::StreamType; -use crate::tparams::{self, PreferredAddress, TransportParameter, TransportParametersHandler}; -use crate::tracking::DEFAULT_ACK_DELAY; -use crate::version::{Version, VersionConfig}; -use crate::{CongestionControlAlgorithm, Res}; -use std::cmp::max; -use std::convert::TryFrom; -use std::time::Duration; +use crate::{ + connection::{ConnectionIdManager, Role, LOCAL_ACTIVE_CID_LIMIT}, + recv_stream::RECV_BUFFER_SIZE, + rtt::GRANULARITY, + stream_id::StreamType, + tparams::{self, PreferredAddress, TransportParameter, TransportParametersHandler}, + tracking::DEFAULT_ACK_DELAY, + version::{Version, VersionConfig}, + CongestionControlAlgorithm, Res, +}; const LOCAL_MAX_DATA: u64 = 0x3FFF_FFFF_FFFF_FFFF; // 2^62-1 const LOCAL_STREAM_LIMIT_BIDI: u64 = 16; @@ -30,7 +31,7 @@ const DEFAULT_IDLE_TIMEOUT: Duration = Duration::from_secs(30); const MAX_QUEUED_DATAGRAMS_DEFAULT: usize = 10; /// What to do with preferred addresses. -#[derive(Debug, Clone, Copy)] +#[derive(Debug, Clone)] pub enum PreferredAddressConfig { /// Disabled, whether for client or server. Disabled, @@ -40,7 +41,7 @@ pub enum PreferredAddressConfig { Address(PreferredAddress), } -/// ConnectionParameters use for setting intitial value for QUIC parameters. +/// `ConnectionParameters` use for setting intitial value for QUIC parameters. /// This collects configuration like initial limits, protocol version, and /// congestion control algorithm. #[derive(Debug, Clone)] @@ -49,11 +50,14 @@ pub struct ConnectionParameters { cc_algorithm: CongestionControlAlgorithm, /// Initial connection-level flow control limit. max_data: u64, - /// Initial flow control limit for receiving data on bidirectional streams that the peer creates. + /// Initial flow control limit for receiving data on bidirectional streams that the peer + /// creates. max_stream_data_bidi_remote: u64, - /// Initial flow control limit for receiving data on bidirectional streams that this endpoint creates. + /// Initial flow control limit for receiving data on bidirectional streams that this endpoint + /// creates. max_stream_data_bidi_local: u64, - /// Initial flow control limit for receiving data on unidirectional streams that the peer creates. + /// Initial flow control limit for receiving data on unidirectional streams that the peer + /// creates. max_stream_data_uni: u64, /// Initial limit on bidirectional streams that the peer creates. max_streams_bidi: u64, @@ -73,8 +77,8 @@ pub struct ConnectionParameters { outgoing_datagram_queue: usize, incoming_datagram_queue: usize, fast_pto: u8, - fuzzing: bool, grease: bool, + pacing: bool, } impl Default for ConnectionParameters { @@ -95,13 +99,14 @@ impl Default for ConnectionParameters { outgoing_datagram_queue: MAX_QUEUED_DATAGRAMS_DEFAULT, incoming_datagram_queue: MAX_QUEUED_DATAGRAMS_DEFAULT, fast_pto: FAST_PTO_SCALE, - fuzzing: false, grease: true, + pacing: true, } } } impl ConnectionParameters { + #[must_use] pub fn get_versions(&self) -> &VersionConfig { &self.versions } @@ -114,29 +119,35 @@ impl ConnectionParameters { /// versions that should be enabled. This list should contain the initial /// version and be in order of preference, with more preferred versions /// before less preferred. + #[must_use] pub fn versions(mut self, initial: Version, all: Vec) -> Self { self.versions = VersionConfig::new(initial, all); self } + #[must_use] pub fn get_cc_algorithm(&self) -> CongestionControlAlgorithm { self.cc_algorithm } + #[must_use] pub fn cc_algorithm(mut self, v: CongestionControlAlgorithm) -> Self { self.cc_algorithm = v; self } + #[must_use] pub fn get_max_data(&self) -> u64 { self.max_data } + #[must_use] pub fn max_data(mut self, v: u64) -> Self { self.max_data = v; self } + #[must_use] pub fn get_max_streams(&self, stream_type: StreamType) -> u64 { match stream_type { StreamType::BiDi => self.max_streams_bidi, @@ -145,7 +156,9 @@ impl ConnectionParameters { } /// # Panics + /// /// If v > 2^60 (the maximum allowed by the protocol). + #[must_use] pub fn max_streams(mut self, stream_type: StreamType, v: u64) -> Self { assert!(v <= (1 << 60), "max_streams is too large"); match stream_type { @@ -160,8 +173,11 @@ impl ConnectionParameters { } /// Get the maximum stream data that we will accept on different types of streams. + /// /// # Panics + /// /// If `StreamType::UniDi` and `false` are passed as that is not a valid combination. + #[must_use] pub fn get_max_stream_data(&self, stream_type: StreamType, remote: bool) -> u64 { match (stream_type, remote) { (StreamType::BiDi, false) => self.max_stream_data_bidi_local, @@ -174,9 +190,12 @@ impl ConnectionParameters { } /// Set the maximum stream data that we will accept on different types of streams. + /// /// # Panics + /// /// If `StreamType::UniDi` and `false` are passed as that is not a valid combination /// or if v >= 62 (the maximum allowed by the protocol). + #[must_use] pub fn max_stream_data(mut self, stream_type: StreamType, remote: bool, v: u64) -> Self { assert!(v < (1 << 62), "max stream data is too large"); match (stream_type, remote) { @@ -197,71 +216,86 @@ impl ConnectionParameters { } /// Set a preferred address (which only has an effect for a server). + #[must_use] pub fn preferred_address(mut self, preferred: PreferredAddress) -> Self { self.preferred_address = PreferredAddressConfig::Address(preferred); self } /// Disable the use of preferred addresses. + #[must_use] pub fn disable_preferred_address(mut self) -> Self { self.preferred_address = PreferredAddressConfig::Disabled; self } + #[must_use] pub fn get_preferred_address(&self) -> &PreferredAddressConfig { &self.preferred_address } + #[must_use] pub fn ack_ratio(mut self, ack_ratio: u8) -> Self { self.ack_ratio = ack_ratio; self } + #[must_use] pub fn get_ack_ratio(&self) -> u8 { self.ack_ratio } /// # Panics + /// /// If `timeout` is 2^62 milliseconds or more. + #[must_use] pub fn idle_timeout(mut self, timeout: Duration) -> Self { assert!(timeout.as_millis() < (1 << 62), "idle timeout is too long"); self.idle_timeout = timeout; self } + #[must_use] pub fn get_idle_timeout(&self) -> Duration { self.idle_timeout } + #[must_use] pub fn get_datagram_size(&self) -> u64 { self.datagram_size } + #[must_use] pub fn datagram_size(mut self, v: u64) -> Self { self.datagram_size = v; self } + #[must_use] pub fn get_outgoing_datagram_queue(&self) -> usize { self.outgoing_datagram_queue } + #[must_use] pub fn outgoing_datagram_queue(mut self, v: usize) -> Self { // The max queue length must be at least 1. self.outgoing_datagram_queue = max(v, 1); self } + #[must_use] pub fn get_incoming_datagram_queue(&self) -> usize { self.incoming_datagram_queue } + #[must_use] pub fn incoming_datagram_queue(mut self, v: usize) -> Self { // The max queue length must be at least 1. self.incoming_datagram_queue = max(v, 1); self } + #[must_use] pub fn get_fast_pto(&self) -> u8 { self.fast_pto } @@ -279,31 +313,41 @@ impl ConnectionParameters { /// congestion. /// /// # Panics + /// /// A value of 0 is invalid and will cause a panic. + #[must_use] pub fn fast_pto(mut self, scale: u8) -> Self { assert_ne!(scale, 0); self.fast_pto = scale; self } - pub fn is_fuzzing(&self) -> bool { - self.fuzzing + #[must_use] + pub fn is_greasing(&self) -> bool { + self.grease } - pub fn fuzzing(mut self, enable: bool) -> Self { - self.fuzzing = enable; + #[must_use] + pub fn grease(mut self, grease: bool) -> Self { + self.grease = grease; self } - pub fn is_greasing(&self) -> bool { - self.grease + #[must_use] + pub fn pacing_enabled(&self) -> bool { + self.pacing } - pub fn grease(mut self, grease: bool) -> Self { - self.grease = grease; + #[must_use] + pub fn pacing(mut self, pacing: bool) -> Self { + self.pacing = pacing; self } + /// # Errors + /// When a connection ID cannot be obtained. + /// # Panics + /// Only when this code includes a transport parameter that is invalid. pub fn create_transport_parameter( &self, role: Role, diff --git a/neqo-transport/src/connection/saved.rs b/neqo-transport/src/connection/saved.rs index 368a859f5d..f5616c732a 100644 --- a/neqo-transport/src/connection/saved.rs +++ b/neqo-transport/src/connection/saved.rs @@ -4,12 +4,12 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use std::mem; -use std::time::Instant; +use std::{mem, time::Instant}; -use crate::crypto::CryptoSpace; use neqo_common::{qdebug, qinfo, Datagram}; +use crate::crypto::CryptoSpace; + /// The number of datagrams that are saved during the handshake when /// keys to decrypt them are not yet available. const MAX_SAVED_DATAGRAMS: usize = 4; diff --git a/neqo-transport/src/connection/state.rs b/neqo-transport/src/connection/state.rs index a34c91865e..cc2f6e30d2 100644 --- a/neqo-transport/src/connection/state.rs +++ b/neqo-transport/src/connection/state.rs @@ -4,20 +4,25 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +use std::{ + cmp::{min, Ordering}, + mem, + rc::Rc, + time::Instant, +}; + use neqo_common::Encoder; -use std::cmp::{min, Ordering}; -use std::mem; -use std::rc::Rc; -use std::time::Instant; -use crate::frame::{ - FrameType, FRAME_TYPE_CONNECTION_CLOSE_APPLICATION, FRAME_TYPE_CONNECTION_CLOSE_TRANSPORT, - FRAME_TYPE_HANDSHAKE_DONE, +use crate::{ + frame::{ + FrameType, FRAME_TYPE_CONNECTION_CLOSE_APPLICATION, FRAME_TYPE_CONNECTION_CLOSE_TRANSPORT, + FRAME_TYPE_HANDSHAKE_DONE, + }, + packet::PacketBuilder, + path::PathRef, + recovery::RecoveryToken, + ConnectionError, Error, }; -use crate::packet::PacketBuilder; -use crate::path::PathRef; -use crate::recovery::RecoveryToken; -use crate::{ConnectionError, Error, Res}; #[derive(Clone, Debug, PartialEq, Eq)] /// The state of the Connection. @@ -61,6 +66,7 @@ impl State { ) } + #[must_use] pub fn error(&self) -> Option<&ConnectionError> { if let Self::Closing { error, .. } | Self::Draining { error, .. } | Self::Closed(error) = self @@ -179,13 +185,13 @@ impl ClosingFrame { } } -/// `StateSignaling` manages whether we need to send HANDSHAKE_DONE and CONNECTION_CLOSE. +/// `StateSignaling` manages whether we need to send `HANDSHAKE_DONE` and `CONNECTION_CLOSE`. /// Valid state transitions are: -/// * Idle -> HandshakeDone: at the server when the handshake completes -/// * HandshakeDone -> Idle: when a HANDSHAKE_DONE frame is sent +/// * Idle -> `HandshakeDone`: at the server when the handshake completes +/// * `HandshakeDone` -> Idle: when a `HANDSHAKE_DONE` frame is sent /// * Idle/HandshakeDone -> Closing/Draining: when closing or draining -/// * Closing/Draining -> CloseSent: after sending CONNECTION_CLOSE -/// * CloseSent -> Closing: any time a new CONNECTION_CLOSE is needed +/// * Closing/Draining -> `CloseSent`: after sending `CONNECTION_CLOSE` +/// * `CloseSent` -> Closing: any time a new `CONNECTION_CLOSE` is needed /// * -> Reset: from any state in case of a stateless reset #[derive(Debug, Clone)] pub enum StateSignaling { @@ -203,22 +209,22 @@ pub enum StateSignaling { impl StateSignaling { pub fn handshake_done(&mut self) { if !matches!(self, Self::Idle) { - debug_assert!(false, "StateSignaling must be in Idle state."); + debug_assert!( + false, + "StateSignaling must be in Idle state but is in {self:?} state.", + ); return; } - *self = Self::HandshakeDone + *self = Self::HandshakeDone; } - pub fn write_done(&mut self, builder: &mut PacketBuilder) -> Res> { + pub fn write_done(&mut self, builder: &mut PacketBuilder) -> Option { if matches!(self, Self::HandshakeDone) && builder.remaining() >= 1 { *self = Self::Idle; builder.encode_varint(FRAME_TYPE_HANDSHAKE_DONE); - if builder.len() > builder.limit() { - return Err(Error::InternalError(14)); - } - Ok(Some(RecoveryToken::HandshakeDone)) + Some(RecoveryToken::HandshakeDone) } else { - Ok(None) + None } } diff --git a/neqo-transport/src/connection/tests/ackrate.rs b/neqo-transport/src/connection/tests/ackrate.rs index 5a0c7ae351..f0a1d17cd9 100644 --- a/neqo-transport/src/connection/tests/ackrate.rs +++ b/neqo-transport/src/connection/tests/ackrate.rs @@ -4,6 +4,10 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +use std::{mem, time::Duration}; + +use test_fixture::{assertions, DEFAULT_ADDR_V4}; + use super::{ super::{ConnectionParameters, ACK_RATIO_SCALE}, ack_bytes, connect_rtt_idle, default_client, default_server, fill_cwnd, increase_cwnd, @@ -11,9 +15,6 @@ use super::{ }; use crate::stream_id::StreamType; -use std::{mem, time::Duration}; -use test_fixture::{addr_v4, assertions}; - /// With the default RTT here (100ms) and default ratio (4), endpoints won't send /// `ACK_FREQUENCY` as the ACK delay isn't different enough from the default. #[test] @@ -71,7 +72,7 @@ fn ack_rate_exit_slow_start() { // and to send ACK_FREQUENCY. now += DEFAULT_RTT / 2; assert_eq!(client.stats().frame_tx.ack_frequency, 0); - let af = client.process(Some(ack), now).dgram(); + let af = client.process(Some(&ack), now).dgram(); assert!(af.is_some()); assert_eq!(client.stats().frame_tx.ack_frequency, 1); } @@ -117,9 +118,14 @@ fn ack_rate_client_one_rtt() { // A single packet from the client will cause the server to engage its delayed // acknowledgment timer, which should now be equal to RTT. + // The first packet will elicit an immediate ACK however, so do this twice. let d = send_something(&mut client, now); now += RTT / 2; - let delay = server.process(Some(d), now).callback(); + let ack = server.process(Some(&d), now).dgram(); + assert!(ack.is_some()); + let d = send_something(&mut client, now); + now += RTT / 2; + let delay = server.process(Some(&d), now).callback(); assert_eq!(delay, RTT); assert_eq!(client.stats().frame_tx.ack_frequency, 1); @@ -133,9 +139,16 @@ fn ack_rate_server_half_rtt() { let mut server = new_server(ConnectionParameters::default().ack_ratio(ACK_RATIO_SCALE * 2)); let mut now = connect_rtt_idle(&mut client, &mut server, RTT); + // The server now sends something. + let d = send_something(&mut server, now); + now += RTT / 2; + // The client now will acknowledge immediately because it has been more than + // an RTT since it last sent an acknowledgment. + let ack = client.process(Some(&d), now); + assert!(ack.as_dgram_ref().is_some()); let d = send_something(&mut server, now); now += RTT / 2; - let delay = client.process(Some(d), now).callback(); + let delay = client.process(Some(&d), now).callback(); assert_eq!(delay, RTT / 2); assert_eq!(server.stats().frame_tx.ack_frequency, 1); @@ -151,7 +164,7 @@ fn migrate_ack_delay() { let mut now = connect_rtt_idle(&mut client, &mut server, DEFAULT_RTT); client - .migrate(Some(addr_v4()), Some(addr_v4()), true, now) + .migrate(Some(DEFAULT_ADDR_V4), Some(DEFAULT_ADDR_V4), true, now) .unwrap(); let client1 = send_something(&mut client, now); @@ -159,7 +172,7 @@ fn migrate_ack_delay() { let client2 = send_something(&mut client, now); assertions::assert_v4_path(&client2, false); // Doesn't. Is dropped. now += DEFAULT_RTT / 2; - server.process_input(client1, now); + server.process_input(&client1, now); let stream = client.stream_create(StreamType::UniDi).unwrap(); let now = increase_cwnd(&mut client, &mut server, stream, now); @@ -175,7 +188,7 @@ fn migrate_ack_delay() { // After noticing this new loss, the client sends ACK_FREQUENCY. // It has sent a few before (as we dropped `client2`), so ignore those. let ad_before = client.stats().frame_tx.ack_frequency; - let af = client.process(Some(ack), now).dgram(); + let af = client.process(Some(&ack), now).dgram(); assert!(af.is_some()); assert_eq!(client.stats().frame_tx.ack_frequency, ad_before + 1); } diff --git a/neqo-transport/src/connection/tests/cc.rs b/neqo-transport/src/connection/tests/cc.rs index 26e4dbd014..b708bc421d 100644 --- a/neqo-transport/src/connection/tests/cc.rs +++ b/neqo-transport/src/connection/tests/cc.rs @@ -4,23 +4,23 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use super::super::Output; -use super::{ - ack_bytes, assert_full_cwnd, connect_rtt_idle, cwnd, cwnd_avail, cwnd_packets, default_client, - default_server, fill_cwnd, induce_persistent_congestion, send_something, DEFAULT_RTT, - FORCE_IDLE_CLIENT_1RTT_PACKETS, POST_HANDSHAKE_CWND, -}; -use crate::cc::MAX_DATAGRAM_SIZE; -use crate::packet::PacketNumber; -use crate::recovery::{ACK_ONLY_SIZE_LIMIT, PACKET_THRESHOLD}; -use crate::sender::PACING_BURST_SIZE; -use crate::stream_id::StreamType; -use crate::tracking::DEFAULT_ACK_PACKET_TOLERANCE; +use std::{mem, time::Duration}; use neqo_common::{qdebug, qinfo, Datagram}; -use std::convert::TryFrom; -use std::mem; -use std::time::Duration; + +use super::{ + super::Output, ack_bytes, assert_full_cwnd, connect_rtt_idle, cwnd, cwnd_avail, cwnd_packets, + default_client, default_server, fill_cwnd, induce_persistent_congestion, send_something, + CLIENT_HANDSHAKE_1RTT_PACKETS, DEFAULT_RTT, POST_HANDSHAKE_CWND, +}; +use crate::{ + cc::MAX_DATAGRAM_SIZE, + packet::PacketNumber, + recovery::{ACK_ONLY_SIZE_LIMIT, PACKET_THRESHOLD}, + sender::PACING_BURST_SIZE, + stream_id::StreamType, + tracking::DEFAULT_ACK_PACKET_TOLERANCE, +}; #[test] /// Verify initial CWND is honored. @@ -54,7 +54,7 @@ fn cc_slow_start_to_cong_avoidance_recovery_period() { // We have already sent packets in `connect_rtt_idle`, // so include a fudge factor. let flight1_largest = - PacketNumber::try_from(c_tx_dgrams.len() + FORCE_IDLE_CLIENT_1RTT_PACKETS).unwrap(); + PacketNumber::try_from(c_tx_dgrams.len() + CLIENT_HANDSHAKE_1RTT_PACKETS).unwrap(); // Server: Receive and generate ack now += DEFAULT_RTT / 2; @@ -66,11 +66,12 @@ fn cc_slow_start_to_cong_avoidance_recovery_period() { // Client: Process ack now += DEFAULT_RTT / 2; - client.process_input(s_ack, now); + client.process_input(&s_ack, now); assert_eq!( client.stats().frame_rx.largest_acknowledged, flight1_largest ); + let cwnd_before_cong = cwnd(&client); // Client: send more let (mut c_tx_dgrams, mut now) = fill_cwnd(&mut client, stream_id, now); @@ -88,11 +89,12 @@ fn cc_slow_start_to_cong_avoidance_recovery_period() { // Client: Process ack now += DEFAULT_RTT / 2; - client.process_input(s_ack, now); + client.process_input(&s_ack, now); assert_eq!( client.stats().frame_rx.largest_acknowledged, flight2_largest ); + assert!(cwnd(&client) < cwnd_before_cong); } #[test] @@ -118,7 +120,7 @@ fn cc_cong_avoidance_recovery_period_unchanged() { // Server: Receive and generate ack let s_ack = ack_bytes(&mut server, stream_id, c_tx_dgrams, now); - client.process_input(s_ack, now); + client.process_input(&s_ack, now); let cwnd1 = cwnd(&client); @@ -126,7 +128,7 @@ fn cc_cong_avoidance_recovery_period_unchanged() { let s_ack = ack_bytes(&mut server, stream_id, c_tx_dgrams2, now); // ACK more packets but they were sent before end of recovery period - client.process_input(s_ack, now); + client.process_input(&s_ack, now); // cwnd should not have changed since ACKed packets were sent before // recovery period expired @@ -156,12 +158,12 @@ fn single_packet_on_recovery() { // Acknowledge just one packet and cause one packet to be declared lost. // The length is the amount of credit the client should have. - let ack = server.process(Some(delivered), now).dgram(); + let ack = server.process(Some(&delivered), now).dgram(); assert!(ack.is_some()); // The client should see the loss and enter recovery. // As there are many outstanding packets, there should be no available cwnd. - client.process_input(ack.unwrap(), now); + client.process_input(&ack.unwrap(), now); assert_eq!(cwnd_avail(&client), 0); // The client should send one packet, ignoring the cwnd. @@ -193,7 +195,7 @@ fn cc_cong_avoidance_recovery_period_to_cong_avoidance() { // Client: Process ack now += DEFAULT_RTT / 2; - client.process_input(s_ack, now); + client.process_input(&s_ack, now); // Should be in CARP now. now += DEFAULT_RTT / 2; @@ -227,7 +229,7 @@ fn cc_cong_avoidance_recovery_period_to_cong_avoidance() { let most = c_tx_dgrams.len() - usize::try_from(DEFAULT_ACK_PACKET_TOLERANCE).unwrap() - 1; let s_ack = ack_bytes(&mut server, stream_id, c_tx_dgrams.drain(..most), now); assert_eq!(cwnd(&client), expected_cwnd); - client.process_input(s_ack, now); + client.process_input(&s_ack, now); // make sure to fill cwnd again. let (mut new_pkts, next_now) = fill_cwnd(&mut client, stream_id, now); now = next_now; @@ -235,7 +237,7 @@ fn cc_cong_avoidance_recovery_period_to_cong_avoidance() { let s_ack = ack_bytes(&mut server, stream_id, c_tx_dgrams, now); assert_eq!(cwnd(&client), expected_cwnd); - client.process_input(s_ack, now); + client.process_input(&s_ack, now); // make sure to fill cwnd again. let (mut new_pkts, next_now) = fill_cwnd(&mut client, stream_id, now); now = next_now; @@ -287,7 +289,7 @@ fn cc_slow_start_to_persistent_congestion_some_acks() { let s_ack = ack_bytes(&mut server, stream, c_tx_dgrams, now); now += Duration::from_millis(100); - client.process_input(s_ack, now); + client.process_input(&s_ack, now); // send bytes that will be lost let (_, next_now) = fill_cwnd(&mut client, stream, now); @@ -333,7 +335,7 @@ fn cc_persistent_congestion_to_slow_start() { // No longer in CARP. (pkts acked from after start of CARP) // Should be in slow start now. - client.process_input(s_ack, now); + client.process_input(&s_ack, now); // ACKing 2 packets should let client send 4. let (c_tx_dgrams, _) = fill_cwnd(&mut client, stream, now); @@ -371,11 +373,11 @@ fn ack_are_not_cc() { // The client can ack the server packet even if cc windows is full. qdebug!([client], "Process ack-eliciting"); - let ack_pkt = client.process(ack_eliciting_packet, now).dgram(); + let ack_pkt = client.process(ack_eliciting_packet.as_ref(), now).dgram(); assert!(ack_pkt.is_some()); qdebug!([server], "Handle ACK"); let prev_ack_count = server.stats().frame_rx.ack; - server.process_input(ack_pkt.unwrap(), now); + server.process_input(&ack_pkt.unwrap(), now); assert_eq!(server.stats().frame_rx.ack, prev_ack_count + 1); } diff --git a/neqo-transport/src/connection/tests/close.rs b/neqo-transport/src/connection/tests/close.rs index a9f1fafa25..5351dd0d5c 100644 --- a/neqo-transport/src/connection/tests/close.rs +++ b/neqo-transport/src/connection/tests/close.rs @@ -4,14 +4,18 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use super::super::{Connection, Output, State}; -use super::{connect, connect_force_idle, default_client, default_server, send_something}; -use crate::tparams::{self, TransportParameter}; -use crate::{AppError, ConnectionError, Error, ERROR_APPLICATION_CLOSE}; - -use neqo_common::Datagram; use std::time::Duration; -use test_fixture::{self, addr, now}; + +use test_fixture::{datagram, now}; + +use super::{ + super::{Connection, Output, State}, + connect, connect_force_idle, default_client, default_server, send_something, +}; +use crate::{ + tparams::{self, TransportParameter}, + AppError, ConnectionError, Error, ERROR_APPLICATION_CLOSE, +}; fn assert_draining(c: &Connection, expected: &Error) { assert!(c.state().closed()); @@ -38,7 +42,7 @@ fn connection_close() { let out = client.process(None, now); - server.process_input(out.dgram().unwrap(), now); + server.process_input(&out.dgram().unwrap(), now); assert_draining(&server, &Error::PeerApplicationError(42)); } @@ -55,7 +59,7 @@ fn connection_close_with_long_reason_string() { let out = client.process(None, now); - server.process_input(out.dgram().unwrap(), now); + server.process_input(&out.dgram().unwrap(), now); assert_draining(&server, &Error::PeerApplicationError(42)); } @@ -68,7 +72,7 @@ fn early_application_close() { // One flight each. let dgram = client.process(None, now()).dgram(); assert!(dgram.is_some()); - let dgram = server.process(dgram, now()).dgram(); + let dgram = server.process(dgram.as_ref(), now()).dgram(); assert!(dgram.is_some()); server.close(now(), 77, String::new()); @@ -76,7 +80,7 @@ fn early_application_close() { let dgram = server.process(None, now()).dgram(); assert!(dgram.is_some()); - client.process_input(dgram.unwrap(), now()); + client.process_input(&dgram.unwrap(), now()); assert_draining(&client, &Error::PeerError(ERROR_APPLICATION_CLOSE)); } @@ -93,13 +97,13 @@ fn bad_tls_version() { let dgram = client.process(None, now()).dgram(); assert!(dgram.is_some()); - let dgram = server.process(dgram, now()).dgram(); + let dgram = server.process(dgram.as_ref(), now()).dgram(); assert_eq!( *server.state(), State::Closed(ConnectionError::Transport(Error::ProtocolViolation)) ); assert!(dgram.is_some()); - client.process_input(dgram.unwrap(), now()); + client.process_input(&dgram.unwrap(), now()); assert_draining(&client, &Error::PeerError(Error::ProtocolViolation.code())); } @@ -116,11 +120,11 @@ fn closing_timers_interation() { // We're going to induce time-based loss recovery so that timer is set. let _p1 = send_something(&mut client, now); let p2 = send_something(&mut client, now); - let ack = server.process(Some(p2), now).dgram(); + let ack = server.process(Some(&p2), now).dgram(); assert!(ack.is_some()); // This is an ACK. // After processing the ACK, we should be on the loss recovery timer. - let cb = client.process(ack, now).callback(); + let cb = client.process(ack.as_ref(), now).callback(); assert_ne!(cb, Duration::from_secs(0)); now += cb; @@ -153,7 +157,7 @@ fn closing_and_draining() { // The client will spit out the same packet in response to anything it receives. let p3 = send_something(&mut server, now()); - let client_close2 = client.process(Some(p3), now()).dgram(); + let client_close2 = client.process(Some(&p3), now()).dgram(); assert_eq!( client_close.as_ref().unwrap().len(), client_close2.as_ref().unwrap().len() @@ -168,14 +172,14 @@ fn closing_and_draining() { ); // When the server receives the close, it too should generate CONNECTION_CLOSE. - let server_close = server.process(client_close, now()).dgram(); + let server_close = server.process(client_close.as_ref(), now()).dgram(); assert!(server.state().closed()); assert!(server_close.is_some()); // .. but it ignores any further close packets. - let server_close_timer = server.process(client_close2, now()).callback(); + let server_close_timer = server.process(client_close2.as_ref(), now()).callback(); assert_ne!(server_close_timer, Duration::from_secs(0)); // Even a legitimate packet without a close in it. - let server_close_timer2 = server.process(Some(p1), now()).callback(); + let server_close_timer2 = server.process(Some(&p1), now()).callback(); assert_eq!(server_close_timer, server_close_timer2); let end = server.process(None, now() + server_close_timer); @@ -201,6 +205,6 @@ fn stateless_reset_client() { .unwrap(); connect_force_idle(&mut client, &mut server); - client.process_input(Datagram::new(addr(), addr(), vec![77; 21]), now()); + client.process_input(&datagram(vec![77; 21]), now()); assert_draining(&client, &Error::StatelessReset); } diff --git a/neqo-transport/src/connection/tests/datagram.rs b/neqo-transport/src/connection/tests/datagram.rs index f81f52ee98..ade8c753be 100644 --- a/neqo-transport/src/connection/tests/datagram.rs +++ b/neqo-transport/src/connection/tests/datagram.rs @@ -4,20 +4,23 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +use std::{cell::RefCell, rc::Rc}; + +use neqo_common::event::Provider; +use test_fixture::now; + use super::{ assert_error, connect_force_idle, default_client, default_server, new_client, new_server, AT_LEAST_PTO, }; -use crate::events::{ConnectionEvent, OutgoingDatagramOutcome}; -use crate::frame::FRAME_TYPE_DATAGRAM; -use crate::packet::PacketBuilder; -use crate::quic_datagrams::MAX_QUIC_DATAGRAM; -use crate::{Connection, ConnectionError, ConnectionParameters, Error}; -use neqo_common::event::Provider; -use std::cell::RefCell; -use std::convert::TryFrom; -use std::rc::Rc; -use test_fixture::now; +use crate::{ + events::{ConnectionEvent, OutgoingDatagramOutcome}, + frame::FRAME_TYPE_DATAGRAM, + packet::PacketBuilder, + quic_datagrams::MAX_QUIC_DATAGRAM, + send_stream::{RetransmissionPriority, TransmissionPriority}, + Connection, ConnectionError, ConnectionParameters, Error, StreamType, +}; const DATAGRAM_LEN_MTU: u64 = 1310; const DATA_MTU: &[u8] = &[1; 1310]; @@ -79,7 +82,7 @@ fn datagram_enabled_on_client() { let out = server.process_output(now()).dgram().unwrap(); assert_eq!(server.stats().frame_tx.datagram, dgram_sent + 1); - client.process_input(out, now()); + client.process_input(&out, now()); assert!(matches!( client.next_event().unwrap(), ConnectionEvent::Datagram(data) if data == DATA_SMALLER_THAN_MTU @@ -107,7 +110,7 @@ fn datagram_enabled_on_server() { let out = client.process_output(now()).dgram().unwrap(); assert_eq!(client.stats().frame_tx.datagram, dgram_sent + 1); - server.process_input(out, now()); + server.process_input(&out, now()); assert!(matches!( server.next_event().unwrap(), ConnectionEvent::Datagram(data) if data == DATA_SMALLER_THAN_MTU @@ -204,7 +207,7 @@ fn datagram_acked() { assert_eq!(client.stats().frame_tx.datagram, dgram_sent + 1); let dgram_received = server.stats().frame_rx.datagram; - server.process_input(out.unwrap(), now()); + server.process_input(&out.unwrap(), now()); assert_eq!(server.stats().frame_rx.datagram, dgram_received + 1); let now = now() + AT_LEAST_PTO; // Ack should be sent @@ -217,13 +220,96 @@ fn datagram_acked() { ConnectionEvent::Datagram(data) if data == DATA_SMALLER_THAN_MTU )); - client.process_input(out.unwrap(), now); + client.process_input(&out.unwrap(), now); assert!(matches!( client.next_event().unwrap(), ConnectionEvent::OutgoingDatagramOutcome { id, outcome } if id == 1 && outcome == OutgoingDatagramOutcome::Acked )); } +fn send_packet_and_get_server_event( + client: &mut Connection, + server: &mut Connection, +) -> ConnectionEvent { + let out = client.process_output(now()).dgram(); + server.process_input(&out.unwrap(), now()); + let mut events: Vec<_> = server + .events() + .filter_map(|evt| match evt { + ConnectionEvent::RecvStreamReadable { .. } | ConnectionEvent::Datagram { .. } => { + Some(evt) + } + _ => None, + }) + .collect(); + // We should only get one event - either RecvStreamReadable or Datagram. + assert_eq!(events.len(), 1); + events.remove(0) +} + +/// Write a datagram that is big enough to fill a packet, but then see that +/// normal priority stream data is sent first. +#[test] +fn datagram_after_stream_data() { + let (mut client, mut server) = connect_datagram(); + + // Write a datagram first. + let dgram_sent = client.stats().frame_tx.datagram; + assert_eq!(client.send_datagram(DATA_MTU, Some(1)), Ok(())); + + // Create a stream with normal priority and send some data. + let stream_id = client.stream_create(StreamType::BiDi).unwrap(); + client.stream_send(stream_id, &[6; 1200]).unwrap(); + + assert!( + matches!(send_packet_and_get_server_event(&mut client, &mut server), ConnectionEvent::RecvStreamReadable { stream_id: s } if s == stream_id) + ); + assert_eq!(client.stats().frame_tx.datagram, dgram_sent); + + if let ConnectionEvent::Datagram(data) = + &send_packet_and_get_server_event(&mut client, &mut server) + { + assert_eq!(data, DATA_MTU); + } else { + panic!(); + } + assert_eq!(client.stats().frame_tx.datagram, dgram_sent + 1); +} + +#[test] +fn datagram_before_stream_data() { + let (mut client, mut server) = connect_datagram(); + + // Create a stream with low priority and send some data before datagram. + let stream_id = client.stream_create(StreamType::BiDi).unwrap(); + client + .stream_priority( + stream_id, + TransmissionPriority::Low, + RetransmissionPriority::default(), + ) + .unwrap(); + client.stream_send(stream_id, &[6; 1200]).unwrap(); + + // Write a datagram. + let dgram_sent = client.stats().frame_tx.datagram; + assert_eq!(client.send_datagram(DATA_MTU, Some(1)), Ok(())); + + if let ConnectionEvent::Datagram(data) = + &send_packet_and_get_server_event(&mut client, &mut server) + { + assert_eq!(data, DATA_MTU); + } else { + panic!(); + } + assert_eq!(client.stats().frame_tx.datagram, dgram_sent + 1); + + assert!( + matches!(send_packet_and_get_server_event(&mut client, &mut server), ConnectionEvent::RecvStreamReadable { stream_id: s } if s == stream_id) + ); + assert_eq!(client.stats().frame_tx.datagram, dgram_sent + 1); +} + #[test] fn datagram_lost() { let (mut client, _) = connect_datagram(); @@ -239,7 +325,7 @@ fn datagram_lost() { let pings_sent = client.stats().frame_tx.ping; let dgram_lost = client.stats().datagram_tx.lost; let out = client.process_output(now).dgram(); - assert!(out.is_some()); //PING probing + assert!(out.is_some()); // PING probing // Datagram is not sent again. assert_eq!(client.stats().frame_tx.ping, pings_sent + 1); assert_eq!(client.stats().frame_tx.datagram, dgram_sent2); @@ -274,7 +360,7 @@ fn dgram_no_allowed() { let out = server.process_output(now()).dgram().unwrap(); server.test_frame_writer = None; - client.process_input(out, now()); + client.process_input(&out, now()); assert_error( &client, @@ -295,7 +381,7 @@ fn dgram_too_big() { let out = server.process_output(now()).dgram().unwrap(); server.test_frame_writer = None; - client.process_input(out, now()); + client.process_input(&out, now()); assert_error( &client, @@ -330,7 +416,7 @@ fn outgoing_datagram_queue_full() { // Send DATA_SMALLER_THAN_MTU_2 datagram let out = client.process_output(now()).dgram(); assert_eq!(client.stats().frame_tx.datagram, dgram_sent + 1); - server.process_input(out.unwrap(), now()); + server.process_input(&out.unwrap(), now()); assert!(matches!( server.next_event().unwrap(), ConnectionEvent::Datagram(data) if data == DATA_SMALLER_THAN_MTU_2 @@ -340,7 +426,7 @@ fn outgoing_datagram_queue_full() { let dgram_sent2 = client.stats().frame_tx.datagram; let out = client.process_output(now()).dgram(); assert_eq!(client.stats().frame_tx.datagram, dgram_sent2 + 1); - server.process_input(out.unwrap(), now()); + server.process_input(&out.unwrap(), now()); assert!(matches!( server.next_event().unwrap(), ConnectionEvent::Datagram(data) if data == DATA_MTU @@ -354,7 +440,7 @@ fn send_datagram(sender: &mut Connection, receiver: &mut Connection, data: &[u8] assert_eq!(sender.stats().frame_tx.datagram, dgram_sent + 1); let dgram_received = receiver.stats().frame_rx.datagram; - receiver.process_input(out, now()); + receiver.process_input(&out, now()); assert_eq!(receiver.stats().frame_rx.datagram, dgram_received + 1); } @@ -468,7 +554,7 @@ fn multiple_quic_datagrams_in_one_packet() { let out = client.process_output(now()).dgram(); assert_eq!(client.stats().frame_tx.datagram, dgram_sent + 2); - server.process_input(out.unwrap(), now()); + server.process_input(&out.unwrap(), now()); let datagram = |e: &_| matches!(e, ConnectionEvent::Datagram(..)); assert_eq!(server.events().filter(datagram).count(), 2); } diff --git a/neqo-transport/src/connection/tests/handshake.rs b/neqo-transport/src/connection/tests/handshake.rs index 76e0c0a898..f2103523ec 100644 --- a/neqo-transport/src/connection/tests/handshake.rs +++ b/neqo-transport/src/connection/tests/handshake.rs @@ -4,34 +4,40 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use super::super::{Connection, Output, State}; +use std::{ + cell::RefCell, + mem, + net::{IpAddr, Ipv6Addr, SocketAddr}, + rc::Rc, + time::Duration, +}; + +use neqo_common::{event::Provider, qdebug, Datagram}; +use neqo_crypto::{ + constants::TLS_CHACHA20_POLY1305_SHA256, generate_ech_keys, AuthenticationStatus, +}; +#[cfg(not(feature = "disable-encryption"))] +use test_fixture::datagram; +use test_fixture::{ + assertions, assertions::assert_coalesced_0rtt, fixture_init, now, split_datagram, DEFAULT_ADDR, +}; + use super::{ + super::{Connection, Output, State}, assert_error, connect, connect_force_idle, connect_with_rtt, default_client, default_server, get_tokens, handshake, maybe_authenticate, resumed_server, send_something, CountingConnectionIdGenerator, AT_LEAST_PTO, DEFAULT_RTT, DEFAULT_STREAM_DATA, }; -use crate::connection::AddressValidation; -use crate::events::ConnectionEvent; -use crate::path::PATH_MTU_V6; -use crate::server::ValidateAddress; -use crate::tparams::{TransportParameter, MIN_ACK_DELAY}; -use crate::tracking::DEFAULT_ACK_DELAY; use crate::{ + connection::AddressValidation, + events::ConnectionEvent, + path::PATH_MTU_V6, + server::ValidateAddress, + tparams::{TransportParameter, MIN_ACK_DELAY}, + tracking::DEFAULT_ACK_DELAY, ConnectionError, ConnectionParameters, EmptyConnectionIdGenerator, Error, StreamType, Version, }; -use neqo_common::{event::Provider, qdebug, Datagram}; -use neqo_crypto::{ - constants::TLS_CHACHA20_POLY1305_SHA256, generate_ech_keys, AuthenticationStatus, -}; -use std::cell::RefCell; -use std::convert::TryFrom; -use std::mem; -use std::net::{IpAddr, Ipv6Addr, SocketAddr}; -use std::rc::Rc; -use std::time::Duration; -use test_fixture::{self, addr, assertions, fixture_init, now, split_datagram}; - const ECH_CONFIG_ID: u8 = 7; const ECH_PUBLIC_NAME: &str = "public.example"; @@ -45,31 +51,31 @@ fn full_handshake() { qdebug!("---- server: CH -> SH, EE, CERT, CV, FIN"); let mut server = default_server(); - let out = server.process(out.dgram(), now()); + let out = server.process(out.as_dgram_ref(), now()); assert!(out.as_dgram_ref().is_some()); assert_eq!(out.as_dgram_ref().unwrap().len(), PATH_MTU_V6); qdebug!("---- client: cert verification"); - let out = client.process(out.dgram(), now()); + let out = client.process(out.as_dgram_ref(), now()); assert!(out.as_dgram_ref().is_some()); - let out = server.process(out.dgram(), now()); + let out = server.process(out.as_dgram_ref(), now()); assert!(out.as_dgram_ref().is_none()); assert!(maybe_authenticate(&mut client)); qdebug!("---- client: SH..FIN -> FIN"); - let out = client.process(out.dgram(), now()); + let out = client.process(out.as_dgram_ref(), now()); assert!(out.as_dgram_ref().is_some()); assert_eq!(*client.state(), State::Connected); qdebug!("---- server: FIN -> ACKS"); - let out = server.process(out.dgram(), now()); + let out = server.process(out.as_dgram_ref(), now()); assert!(out.as_dgram_ref().is_some()); assert_eq!(*server.state(), State::Confirmed); qdebug!("---- client: ACKS -> 0"); - let out = client.process(out.dgram(), now()); + let out = client.process(out.as_dgram_ref(), now()); assert!(out.as_dgram_ref().is_none()); assert_eq!(*client.state(), State::Confirmed); } @@ -83,14 +89,14 @@ fn handshake_failed_authentication() { qdebug!("---- server: CH -> SH, EE, CERT, CV, FIN"); let mut server = default_server(); - let out = server.process(out.dgram(), now()); + let out = server.process(out.as_dgram_ref(), now()); assert!(out.as_dgram_ref().is_some()); qdebug!("---- client: cert verification"); - let out = client.process(out.dgram(), now()); + let out = client.process(out.as_dgram_ref(), now()); assert!(out.as_dgram_ref().is_some()); - let out = server.process(out.dgram(), now()); + let out = server.process(out.as_dgram_ref(), now()); assert!(out.as_dgram_ref().is_none()); let authentication_needed = |e| matches!(e, ConnectionEvent::AuthenticationNeeded); @@ -103,7 +109,7 @@ fn handshake_failed_authentication() { assert!(out.as_dgram_ref().is_some()); qdebug!("---- server: Alert(certificate_revoked)"); - let out = server.process(out.dgram(), now()); + let out = server.process(out.as_dgram_ref(), now()); assert!(out.as_dgram_ref().is_some()); assert_error(&client, &ConnectionError::Transport(Error::CryptoAlert(44))); assert_error(&server, &ConnectionError::Transport(Error::PeerError(300))); @@ -116,8 +122,8 @@ fn no_alpn() { "example.com", &["bad-alpn"], Rc::new(RefCell::new(CountingConnectionIdGenerator::default())), - addr(), - addr(), + DEFAULT_ADDR, + DEFAULT_ADDR, ConnectionParameters::default(), now(), ) @@ -127,7 +133,7 @@ fn no_alpn() { handshake(&mut client, &mut server, now(), Duration::new(0, 0)); // TODO (mt): errors are immediate, which means that we never send CONNECTION_CLOSE // and the client never sees the server's rejection of its handshake. - //assert_error(&client, ConnectionError::Transport(Error::CryptoAlert(120))); + // assert_error(&client, ConnectionError::Transport(Error::CryptoAlert(120))); assert_error( &server, &ConnectionError::Transport(Error::CryptoAlert(120)), @@ -145,16 +151,16 @@ fn dup_server_flight1() { qdebug!("---- server: CH -> SH, EE, CERT, CV, FIN"); let mut server = default_server(); - let out_to_rep = server.process(out.dgram(), now()); + let out_to_rep = server.process(out.as_dgram_ref(), now()); assert!(out_to_rep.as_dgram_ref().is_some()); qdebug!("Output={:0x?}", out_to_rep.as_dgram_ref()); qdebug!("---- client: cert verification"); - let out = client.process(Some(out_to_rep.as_dgram_ref().unwrap().clone()), now()); + let out = client.process(Some(out_to_rep.as_dgram_ref().unwrap()), now()); assert!(out.as_dgram_ref().is_some()); qdebug!("Output={:0x?}", out.as_dgram_ref()); - let out = server.process(out.dgram(), now()); + let out = server.process(out.as_dgram_ref(), now()); assert!(out.as_dgram_ref().is_none()); assert!(maybe_authenticate(&mut client)); @@ -169,7 +175,7 @@ fn dup_server_flight1() { assert_eq!(1, client.stats().dropped_rx); qdebug!("---- Dup, ignored"); - let out = client.process(out_to_rep.dgram(), now()); + let out = client.process(out_to_rep.as_dgram_ref(), now()); assert!(out.as_dgram_ref().is_none()); qdebug!("Output={:0x?}", out.as_dgram_ref()); @@ -199,12 +205,12 @@ fn crypto_frame_split() { // The entire server flight doesn't fit in a single packet because the // certificate is large, therefore the server will produce 2 packets. - let server1 = server.process(client1.dgram(), now()); + let server1 = server.process(client1.as_dgram_ref(), now()); assert!(server1.as_dgram_ref().is_some()); let server2 = server.process(None, now()); assert!(server2.as_dgram_ref().is_some()); - let client2 = client.process(server1.dgram(), now()); + let client2 = client.process(server1.as_dgram_ref(), now()); // This is an ack. assert!(client2.as_dgram_ref().is_some()); // The client might have the certificate now, so we can't guarantee that @@ -213,11 +219,11 @@ fn crypto_frame_split() { assert_eq!(*client.state(), State::Handshaking); // let server process the ack for the first packet. - let server3 = server.process(client2.dgram(), now()); + let server3 = server.process(client2.as_dgram_ref(), now()); assert!(server3.as_dgram_ref().is_none()); // Consume the second packet from the server. - let client3 = client.process(server2.dgram(), now()); + let client3 = client.process(server2.as_dgram_ref(), now()); // Check authentication. let auth2 = maybe_authenticate(&mut client); @@ -225,13 +231,13 @@ fn crypto_frame_split() { // Now client has all data to finish handshake. assert_eq!(*client.state(), State::Connected); - let client4 = client.process(server3.dgram(), now()); + let client4 = client.process(server3.as_dgram_ref(), now()); // One of these will contain data depending on whether Authentication was completed // after the first or second server packet. assert!(client3.as_dgram_ref().is_some() ^ client4.as_dgram_ref().is_some()); - mem::drop(server.process(client3.dgram(), now())); - mem::drop(server.process(client4.dgram(), now())); + mem::drop(server.process(client3.as_dgram_ref(), now())); + mem::drop(server.process(client4.as_dgram_ref(), now())); assert_eq!(*client.state(), State::Connected); assert_eq!(*server.state(), State::Confirmed); @@ -245,8 +251,8 @@ fn chacha20poly1305() { test_fixture::DEFAULT_SERVER_NAME, test_fixture::DEFAULT_ALPN, Rc::new(RefCell::new(EmptyConnectionIdGenerator::default())), - addr(), - addr(), + DEFAULT_ADDR, + DEFAULT_ADDR, ConnectionParameters::default(), now(), ) @@ -263,19 +269,19 @@ fn send_05rtt() { let c1 = client.process(None, now()).dgram(); assert!(c1.is_some()); - let s1 = server.process(c1, now()).dgram().unwrap(); + let s1 = server.process(c1.as_ref(), now()).dgram().unwrap(); assert_eq!(s1.len(), PATH_MTU_V6); // The server should accept writes at this point. let s2 = send_something(&mut server, now()); // Complete the handshake at the client. - client.process_input(s1, now()); + client.process_input(&s1, now()); maybe_authenticate(&mut client); assert_eq!(*client.state(), State::Connected); // The client should receive the 0.5-RTT data now. - client.process_input(s2, now()); + client.process_input(&s2, now()); let mut buf = vec![0; DEFAULT_STREAM_DATA.len() + 1]; let stream_id = client .events() @@ -300,19 +306,19 @@ fn reorder_05rtt() { let c1 = client.process(None, now()).dgram(); assert!(c1.is_some()); - let s1 = server.process(c1, now()).dgram().unwrap(); + let s1 = server.process(c1.as_ref(), now()).dgram().unwrap(); // The server should accept writes at this point. let s2 = send_something(&mut server, now()); // We can't use the standard facility to complete the handshake, so // drive it as aggressively as possible. - client.process_input(s2, now()); + client.process_input(&s2, now()); assert_eq!(client.stats().saved_datagrams, 1); // After processing the first packet, the client should go back and // process the 0.5-RTT packet data, which should make data available. - client.process_input(s1, now()); + client.process_input(&s1, now()); // We can't use `maybe_authenticate` here as that consumes events. client.authenticated(AuthenticationStatus::Ok, now()); assert_eq!(*client.state(), State::Connected); @@ -341,7 +347,7 @@ fn reorder_05rtt_with_0rtt() { let mut server = default_server(); let validation = AddressValidation::new(now(), ValidateAddress::NoToken).unwrap(); let validation = Rc::new(RefCell::new(validation)); - server.set_validation(Rc::clone(&validation)); + server.set_validation(&validation); let mut now = connect_with_rtt(&mut client, &mut server, now(), RTT); // Include RTT in sending the ticket or the ticket age reported by the @@ -350,7 +356,7 @@ fn reorder_05rtt_with_0rtt() { server.send_ticket(now, &[]).unwrap(); let ticket = server.process_output(now).dgram().unwrap(); now += RTT / 2; - client.process_input(ticket, now); + client.process_input(&ticket, now); let token = get_tokens(&mut client).pop().unwrap(); let mut client = default_client(); @@ -367,35 +373,35 @@ fn reorder_05rtt_with_0rtt() { // Handle the first packet and send 0.5-RTT in response. Drop the response. now += RTT / 2; - mem::drop(server.process(Some(c1), now).dgram().unwrap()); + mem::drop(server.process(Some(&c1), now).dgram().unwrap()); // The gap in 0-RTT will result in this 0.5 RTT containing an ACK. - server.process_input(c2, now); + server.process_input(&c2, now); let s2 = send_something(&mut server, now); // Save the 0.5 RTT. now += RTT / 2; - client.process_input(s2, now); + client.process_input(&s2, now); assert_eq!(client.stats().saved_datagrams, 1); // Now PTO at the client and cause the server to re-send handshake packets. now += AT_LEAST_PTO; let c3 = client.process(None, now).dgram(); + assert_coalesced_0rtt(c3.as_ref().unwrap()); now += RTT / 2; - let s3 = server.process(c3, now).dgram().unwrap(); - assertions::assert_no_1rtt(&s3[..]); + let s3 = server.process(c3.as_ref(), now).dgram().unwrap(); // The client should be able to process the 0.5 RTT now. // This should contain an ACK, so we are processing an ACK from the past. now += RTT / 2; - client.process_input(s3, now); + client.process_input(&s3, now); maybe_authenticate(&mut client); let c4 = client.process(None, now).dgram(); assert_eq!(*client.state(), State::Connected); assert_eq!(client.paths.rtt(), RTT); now += RTT / 2; - server.process_input(c4.unwrap(), now); + server.process_input(&c4.unwrap(), now); assert_eq!(*server.state(), State::Confirmed); // Don't check server RTT as it will be massively inflated by a // poor initial estimate received when the server dropped the @@ -416,7 +422,7 @@ fn coalesce_05rtt() { let c1 = client.process(None, now).dgram(); assert!(c1.is_some()); now += RTT / 2; - let s1 = server.process(c1, now).dgram(); + let s1 = server.process(c1.as_ref(), now).dgram(); assert!(s1.is_some()); // Drop the server flight. Then send some data. @@ -431,7 +437,7 @@ fn coalesce_05rtt() { let c2 = client.process(None, now).dgram(); assert!(c2.is_some()); now += RTT / 2; - let s2 = server.process(c2, now).dgram(); + let s2 = server.process(c2.as_ref(), now).dgram(); // Even though there is a 1-RTT packet at the end of the datagram, the // flight should be padded to full size. assert_eq!(s2.as_ref().unwrap().len(), PATH_MTU_V6); @@ -440,7 +446,7 @@ fn coalesce_05rtt() { // packet until authentication completes though. So it saves it. now += RTT / 2; assert_eq!(client.stats().dropped_rx, 0); - mem::drop(client.process(s2, now).dgram()); + mem::drop(client.process(s2.as_ref(), now).dgram()); // This packet will contain an ACK, but we can ignore it. assert_eq!(client.stats().dropped_rx, 0); assert_eq!(client.stats().packets_rx, 3); @@ -453,15 +459,15 @@ fn coalesce_05rtt() { assert_eq!(client.stats().dropped_rx, 0); // No Initial padding. assert_eq!(client.stats().packets_rx, 4); assert_eq!(client.stats().saved_datagrams, 1); - assert_eq!(client.stats().frame_rx.padding, 1); // Padding uses frames. + assert!(client.stats().frame_rx.padding > 0); // Padding uses frames. // Allow the handshake to complete. now += RTT / 2; - let s3 = server.process(c3, now).dgram(); + let s3 = server.process(c3.as_ref(), now).dgram(); assert!(s3.is_some()); assert_eq!(*server.state(), State::Confirmed); now += RTT / 2; - mem::drop(client.process(s3, now).dgram()); + mem::drop(client.process(s3.as_ref(), now).dgram()); assert_eq!(*client.state(), State::Confirmed); assert_eq!(client.stats().dropped_rx, 0); // No dropped packets. @@ -478,7 +484,7 @@ fn reorder_handshake() { assert!(c1.is_some()); now += RTT / 2; - let s1 = server.process(c1, now).dgram(); + let s1 = server.process(c1.as_ref(), now).dgram(); assert!(s1.is_some()); // Drop the Initial packet from this. @@ -488,7 +494,7 @@ fn reorder_handshake() { // Pass just the handshake packet in and the client can't handle it yet. // It can only send another Initial packet. now += RTT / 2; - let dgram = client.process(s_hs, now).dgram(); + let dgram = client.process(s_hs.as_ref(), now).dgram(); assertions::assert_initial(dgram.as_ref().unwrap(), false); assert_eq!(client.stats().saved_datagrams, 1); assert_eq!(client.stats().packets_rx, 1); @@ -499,7 +505,7 @@ fn reorder_handshake() { now += AT_LEAST_PTO; let c2 = client.process(None, now).dgram(); now += RTT / 2; - let s2 = server.process(c2, now).dgram(); + let s2 = server.process(c2.as_ref(), now).dgram(); assert!(s2.is_some()); let (s_init, s_hs) = split_datagram(&s2.unwrap()); @@ -507,11 +513,11 @@ fn reorder_handshake() { // Processing the Handshake packet first should save it. now += RTT / 2; - client.process_input(s_hs.unwrap(), now); + client.process_input(&s_hs.unwrap(), now); assert_eq!(client.stats().saved_datagrams, 2); assert_eq!(client.stats().packets_rx, 2); - client.process_input(s_init, now); + client.process_input(&s_init, now); // Each saved packet should now be "received" again. assert_eq!(client.stats().packets_rx, 7); maybe_authenticate(&mut client); @@ -521,14 +527,14 @@ fn reorder_handshake() { // Note that though packets were saved and processed very late, // they don't cause the RTT to change. now += RTT / 2; - let s3 = server.process(c3, now).dgram(); + let s3 = server.process(c3.as_ref(), now).dgram(); assert_eq!(*server.state(), State::Confirmed); // Don't check server RTT estimate as it will be inflated due to // it making a guess based on retransmissions when it dropped // the Initial packet number space. now += RTT / 2; - client.process_input(s3.unwrap(), now); + client.process_input(&s3.unwrap(), now); assert_eq!(*client.state(), State::Confirmed); assert_eq!(client.paths.rtt(), RTT); } @@ -545,11 +551,11 @@ fn reorder_1rtt() { assert!(c1.is_some()); now += RTT / 2; - let s1 = server.process(c1, now).dgram(); + let s1 = server.process(c1.as_ref(), now).dgram(); assert!(s1.is_some()); now += RTT / 2; - client.process_input(s1.unwrap(), now); + client.process_input(&s1.unwrap(), now); maybe_authenticate(&mut client); let c2 = client.process(None, now).dgram(); assert!(c2.is_some()); @@ -558,7 +564,7 @@ fn reorder_1rtt() { // Give them to the server before giving it `c2`. for _ in 0..PACKETS { let d = send_something(&mut client, now); - server.process_input(d, now + RTT / 2); + server.process_input(&d, now + RTT / 2); } // The server has now received those packets, and saved them. // The two extra received are Initial + the junk we use for padding. @@ -567,7 +573,7 @@ fn reorder_1rtt() { assert_eq!(server.stats().dropped_rx, 1); now += RTT / 2; - let s2 = server.process(c2, now).dgram(); + let s2 = server.process(c2.as_ref(), now).dgram(); // The server has now received those packets, and saved them. // The two additional are a Handshake and a 1-RTT (w/ NEW_CONNECTION_ID). assert_eq!(server.stats().packets_rx, PACKETS * 2 + 4); @@ -577,7 +583,7 @@ fn reorder_1rtt() { assert_eq!(server.paths.rtt(), RTT); now += RTT / 2; - client.process_input(s2.unwrap(), now); + client.process_input(&s2.unwrap(), now); assert_eq!(client.paths.rtt(), RTT); // All the stream data that was sent should now be available. @@ -600,7 +606,7 @@ fn reorder_1rtt() { } } -#[cfg(not(feature = "fuzzing"))] +#[cfg(not(feature = "disable-encryption"))] #[test] fn corrupted_initial() { let mut client = default_client(); @@ -615,8 +621,8 @@ fn corrupted_initial() { .find(|(_, &v)| v != 0) .unwrap(); corrupted[idx] ^= 0x76; - let dgram = Datagram::new(d.source(), d.destination(), corrupted); - server.process_input(dgram, now()); + let dgram = Datagram::new(d.source(), d.destination(), d.tos(), d.ttl(), corrupted); + server.process_input(&dgram, now()); // The server should have received two packets, // the first should be dropped, the second saved. assert_eq!(server.stats().packets_rx, 2); @@ -654,7 +660,7 @@ fn extra_initial_hs() { let c_init = client.process(None, now).dgram(); assert!(c_init.is_some()); now += DEFAULT_RTT / 2; - let s_init = server.process(c_init, now).dgram(); + let s_init = server.process(c_init.as_ref(), now).dgram(); assert!(s_init.is_some()); now += DEFAULT_RTT / 2; @@ -666,13 +672,13 @@ fn extra_initial_hs() { // Do that EXTRA_INITIALS times and each time the client will emit // another Initial packet. for _ in 0..=super::super::EXTRA_INITIALS { - let c_init = client.process(undecryptable.clone(), now).dgram(); + let c_init = client.process(undecryptable.as_ref(), now).dgram(); assertions::assert_initial(c_init.as_ref().unwrap(), false); now += DEFAULT_RTT / 10; } // After EXTRA_INITIALS, the client stops sending Initial packets. - let nothing = client.process(undecryptable, now).dgram(); + let nothing = client.process(undecryptable.as_ref(), now).dgram(); assert!(nothing.is_none()); // Until PTO, where another Initial can be used to complete the handshake. @@ -680,14 +686,14 @@ fn extra_initial_hs() { let c_init = client.process(None, now).dgram(); assertions::assert_initial(c_init.as_ref().unwrap(), false); now += DEFAULT_RTT / 2; - let s_init = server.process(c_init, now).dgram(); + let s_init = server.process(c_init.as_ref(), now).dgram(); now += DEFAULT_RTT / 2; - client.process_input(s_init.unwrap(), now); + client.process_input(&s_init.unwrap(), now); maybe_authenticate(&mut client); let c_fin = client.process_output(now).dgram(); assert_eq!(*client.state(), State::Connected); now += DEFAULT_RTT / 2; - server.process_input(c_fin.unwrap(), now); + server.process_input(&c_fin.unwrap(), now); assert_eq!(*server.state(), State::Confirmed); } @@ -700,7 +706,7 @@ fn extra_initial_invalid_cid() { let c_init = client.process(None, now).dgram(); assert!(c_init.is_some()); now += DEFAULT_RTT / 2; - let s_init = server.process(c_init, now).dgram(); + let s_init = server.process(c_init.as_ref(), now).dgram(); assert!(s_init.is_some()); now += DEFAULT_RTT / 2; @@ -711,8 +717,8 @@ fn extra_initial_invalid_cid() { let mut copy = hs.to_vec(); assert_ne!(copy[5], 0); // The DCID should be non-zero length. copy[6] ^= 0xc4; - let dgram_copy = Datagram::new(hs.destination(), hs.source(), copy); - let nothing = client.process(Some(dgram_copy), now).dgram(); + let dgram_copy = Datagram::new(hs.destination(), hs.source(), hs.tos(), hs.ttl(), copy); + let nothing = client.process(Some(&dgram_copy), now).dgram(); assert!(nothing.is_none()); } @@ -724,8 +730,8 @@ fn connect_one_version() { test_fixture::DEFAULT_SERVER_NAME, test_fixture::DEFAULT_ALPN, Rc::new(RefCell::new(CountingConnectionIdGenerator::default())), - addr(), - addr(), + DEFAULT_ADDR, + DEFAULT_ADDR, ConnectionParameters::default().versions(version, vec![version]), now(), ) @@ -761,7 +767,7 @@ fn anti_amplification() { let c_init = client.process_output(now).dgram(); now += DEFAULT_RTT / 2; - let s_init1 = server.process(c_init, now).dgram().unwrap(); + let s_init1 = server.process(c_init.as_ref(), now).dgram().unwrap(); assert_eq!(s_init1.len(), PATH_MTU_V6); let s_init2 = server.process_output(now).dgram().unwrap(); assert_eq!(s_init2.len(), PATH_MTU_V6); @@ -777,11 +783,11 @@ fn anti_amplification() { assert_ne!(cb, Duration::new(0, 0)); now += DEFAULT_RTT / 2; - client.process_input(s_init1, now); - client.process_input(s_init2, now); + client.process_input(&s_init1, now); + client.process_input(&s_init2, now); let ack_count = client.stats().frame_tx.ack; let frame_count = client.stats().frame_tx.all; - let ack = client.process(Some(s_init3), now).dgram().unwrap(); + let ack = client.process(Some(&s_init3), now).dgram().unwrap(); assert!(!maybe_authenticate(&mut client)); // No need yet. // The client sends a padded datagram, with just ACK for Handshake. @@ -790,20 +796,20 @@ fn anti_amplification() { assert_ne!(ack.len(), PATH_MTU_V6); // Not padded (it includes Handshake). now += DEFAULT_RTT / 2; - let remainder = server.process(Some(ack), now).dgram(); + let remainder = server.process(Some(&ack), now).dgram(); now += DEFAULT_RTT / 2; - client.process_input(remainder.unwrap(), now); + client.process_input(&remainder.unwrap(), now); assert!(maybe_authenticate(&mut client)); // OK, we have all of it. let fin = client.process_output(now).dgram(); assert_eq!(*client.state(), State::Connected); now += DEFAULT_RTT / 2; - server.process_input(fin.unwrap(), now); + server.process_input(&fin.unwrap(), now); assert_eq!(*server.state(), State::Confirmed); } -#[cfg(not(feature = "fuzzing"))] +#[cfg(not(feature = "disable-encryption"))] #[test] fn garbage_initial() { let mut client = default_client(); @@ -814,8 +820,8 @@ fn garbage_initial() { let mut corrupted = Vec::from(&initial[..initial.len() - 1]); corrupted.push(initial[initial.len() - 1] ^ 0xb7); corrupted.extend_from_slice(rest.as_ref().map_or(&[], |r| &r[..])); - let garbage = Datagram::new(addr(), addr(), corrupted); - assert_eq!(Output::None, server.process(Some(garbage), now())); + let garbage = datagram(corrupted); + assert_eq!(Output::None, server.process(Some(&garbage), now())); } #[test] @@ -825,17 +831,19 @@ fn drop_initial_packet_from_wrong_address() { assert!(out.as_dgram_ref().is_some()); let mut server = default_server(); - let out = server.process(out.dgram(), now()); + let out = server.process(out.as_dgram_ref(), now()); assert!(out.as_dgram_ref().is_some()); let p = out.dgram().unwrap(); let dgram = Datagram::new( SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0xfe80, 0, 0, 0, 0, 0, 0, 2)), 443), p.destination(), + p.tos(), + p.ttl(), &p[..], ); - let out = client.process(Some(dgram), now()); + let out = client.process(Some(&dgram), now()); assert!(out.as_dgram_ref().is_none()); } @@ -846,22 +854,24 @@ fn drop_handshake_packet_from_wrong_address() { assert!(out.as_dgram_ref().is_some()); let mut server = default_server(); - let out = server.process(out.dgram(), now()); + let out = server.process(out.as_dgram_ref(), now()); assert!(out.as_dgram_ref().is_some()); let (s_in, s_hs) = split_datagram(&out.dgram().unwrap()); // Pass the initial packet. - mem::drop(client.process(Some(s_in), now()).dgram()); + mem::drop(client.process(Some(&s_in), now()).dgram()); let p = s_hs.unwrap(); let dgram = Datagram::new( SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0xfe80, 0, 0, 0, 0, 0, 0, 2)), 443), p.destination(), + p.tos(), + p.ttl(), &p[..], ); - let out = client.process(Some(dgram), now()); + let out = client.process(Some(&dgram), now()); assert!(out.as_dgram_ref().is_none()); } @@ -910,8 +920,8 @@ fn ech_retry() { .unwrap(); let dgram = client.process_output(now()).dgram(); - let dgram = server.process(dgram, now()).dgram(); - client.process_input(dgram.unwrap(), now()); + let dgram = server.process(dgram.as_ref(), now()).dgram(); + client.process_input(&dgram.unwrap(), now()); let auth_event = ConnectionEvent::EchFallbackAuthenticationNeeded { public_name: String::from(ECH_PUBLIC_NAME), }; @@ -921,18 +931,19 @@ fn ech_retry() { // Tell the server about the error. let dgram = client.process_output(now()).dgram(); - server.process_input(dgram.unwrap(), now()); + server.process_input(&dgram.unwrap(), now()); assert_eq!( server.state().error(), Some(&ConnectionError::Transport(Error::PeerError(0x100 + 121))) ); - let Some(ConnectionError::Transport(Error::EchRetry(updated_config))) = client.state().error() else { - panic!( - "Client state should be failed with EchRetry, is {:?}", - client.state() - ); - }; + let Some(ConnectionError::Transport(Error::EchRetry(updated_config))) = client.state().error() + else { + panic!( + "Client state should be failed with EchRetry, is {:?}", + client.state() + ); + }; let mut server = default_server(); server @@ -964,8 +975,8 @@ fn ech_retry_fallback_rejected() { .unwrap(); let dgram = client.process_output(now()).dgram(); - let dgram = server.process(dgram, now()).dgram(); - client.process_input(dgram.unwrap(), now()); + let dgram = server.process(dgram.as_ref(), now()).dgram(); + client.process_input(&dgram.unwrap(), now()); let auth_event = ConnectionEvent::EchFallbackAuthenticationNeeded { public_name: String::from(ECH_PUBLIC_NAME), }; @@ -979,7 +990,7 @@ fn ech_retry_fallback_rejected() { // Pass the error on. let dgram = client.process_output(now()).dgram(); - server.process_input(dgram.unwrap(), now()); + server.process_input(&dgram.unwrap(), now()); assert_eq!( server.state().error(), Some(&ConnectionError::Transport(Error::PeerError(298))) @@ -998,13 +1009,13 @@ fn bad_min_ack_delay() { let mut client = default_client(); let dgram = client.process_output(now()).dgram(); - let dgram = server.process(dgram, now()).dgram(); - client.process_input(dgram.unwrap(), now()); + let dgram = server.process(dgram.as_ref(), now()).dgram(); + client.process_input(&dgram.unwrap(), now()); client.authenticated(AuthenticationStatus::Ok, now()); assert_eq!(client.state().error(), Some(&EXPECTED_ERROR)); let dgram = client.process_output(now()).dgram(); - server.process_input(dgram.unwrap(), now()); + server.process_input(&dgram.unwrap(), now()); assert_eq!( server.state().error(), Some(&ConnectionError::Transport(Error::PeerError( @@ -1024,7 +1035,7 @@ fn only_server_initial() { let client_dgram = client.process_output(now).dgram(); // Now fetch two flights of messages from the server. - let server_dgram1 = server.process(client_dgram, now).dgram(); + let server_dgram1 = server.process(client_dgram.as_ref(), now).dgram(); let server_dgram2 = server.process_output(now + AT_LEAST_PTO).dgram(); // Only pass on the Initial from the first. We should get a Handshake in return. @@ -1034,7 +1045,7 @@ fn only_server_initial() { // The client will not acknowledge the Initial as it discards keys. // It sends a Handshake probe instead, containing just a PING frame. assert_eq!(client.stats().frame_tx.ping, 0); - let probe = client.process(Some(initial), now).dgram(); + let probe = client.process(Some(&initial), now).dgram(); assertions::assert_handshake(&probe.unwrap()); assert_eq!(client.stats().dropped_rx, 0); assert_eq!(client.stats().frame_tx.ping, 1); @@ -1046,17 +1057,17 @@ fn only_server_initial() { now += AT_LEAST_PTO; assert_eq!(client.stats().frame_tx.ping, 1); let discarded = client.stats().dropped_rx; - let probe = client.process(Some(initial), now).dgram(); + let probe = client.process(Some(&initial), now).dgram(); assertions::assert_handshake(&probe.unwrap()); assert_eq!(client.stats().frame_tx.ping, 2); assert_eq!(client.stats().dropped_rx, discarded + 1); // Pass the Handshake packet and complete the handshake. - client.process_input(handshake.unwrap(), now); + client.process_input(&handshake.unwrap(), now); maybe_authenticate(&mut client); let dgram = client.process_output(now).dgram(); - let dgram = server.process(dgram, now).dgram(); - client.process_input(dgram.unwrap(), now); + let dgram = server.process(dgram.as_ref(), now).dgram(); + client.process_input(&dgram.unwrap(), now); assert_eq!(*client.state(), State::Confirmed); assert_eq!(*server.state(), State::Confirmed); @@ -1082,25 +1093,25 @@ fn no_extra_probes_after_confirmed() { // Finally, run the handshake. now += AT_LEAST_PTO * 2; let dgram = client.process_output(now).dgram(); - let dgram = server.process(dgram, now).dgram(); + let dgram = server.process(dgram.as_ref(), now).dgram(); // The server should have dropped the Initial keys now, so passing in the Initial // should elicit a retransmit rather than having it completely ignored. - let spare_handshake = server.process(Some(replay_initial), now).dgram(); + let spare_handshake = server.process(Some(&replay_initial), now).dgram(); assert!(spare_handshake.is_some()); - client.process_input(dgram.unwrap(), now); + client.process_input(&dgram.unwrap(), now); maybe_authenticate(&mut client); let dgram = client.process_output(now).dgram(); - let dgram = server.process(dgram, now).dgram(); - client.process_input(dgram.unwrap(), now); + let dgram = server.process(dgram.as_ref(), now).dgram(); + client.process_input(&dgram.unwrap(), now); assert_eq!(*client.state(), State::Confirmed); assert_eq!(*server.state(), State::Confirmed); - let probe = server.process(spare_initial, now).dgram(); + let probe = server.process(spare_initial.as_ref(), now).dgram(); assert!(probe.is_none()); - let probe = client.process(spare_handshake, now).dgram(); + let probe = client.process(spare_handshake.as_ref(), now).dgram(); assert!(probe.is_none()); } @@ -1113,14 +1124,65 @@ fn implicit_rtt_server() { let dgram = client.process_output(now).dgram(); now += RTT / 2; - let dgram = server.process(dgram, now).dgram(); + let dgram = server.process(dgram.as_ref(), now).dgram(); now += RTT / 2; - let dgram = client.process(dgram, now).dgram(); + let dgram = client.process(dgram.as_ref(), now).dgram(); assertions::assert_handshake(dgram.as_ref().unwrap()); now += RTT / 2; - server.process_input(dgram.unwrap(), now); + server.process_input(&dgram.unwrap(), now); // The server doesn't receive any acknowledgments, but it can infer // an RTT estimate from having discarded the Initial packet number space. assert_eq!(server.stats().rtt, RTT); } + +#[test] +fn emit_authentication_needed_once() { + let mut client = default_client(); + + let mut server = Connection::new_server( + test_fixture::LONG_CERT_KEYS, + test_fixture::DEFAULT_ALPN, + Rc::new(RefCell::new(CountingConnectionIdGenerator::default())), + ConnectionParameters::default(), + ) + .expect("create a server"); + + let client1 = client.process(None, now()); + assert!(client1.as_dgram_ref().is_some()); + + // The entire server flight doesn't fit in a single packet because the + // certificate is large, therefore the server will produce 2 packets. + let server1 = server.process(client1.as_dgram_ref(), now()); + assert!(server1.as_dgram_ref().is_some()); + let server2 = server.process(None, now()); + assert!(server2.as_dgram_ref().is_some()); + + let authentication_needed_count = |client: &mut Connection| { + client + .events() + .filter(|e| matches!(e, ConnectionEvent::AuthenticationNeeded)) + .count() + }; + + // Upon receiving the first packet, the client has the server certificate, + // but not yet all required handshake data. It moves to + // `HandshakeState::AuthenticationPending` and emits a + // `ConnectionEvent::AuthenticationNeeded` event. + // + // Note that this is a tiny bit fragile in that it depends on having a certificate + // that is within a fairly narrow range of sizes. It has to fit in a single + // packet, but be large enough that the CertificateVerify message does not + // also fit in the same packet. Our default test setup achieves this, but + // changes to the setup might invalidate this test. + let _ = client.process(server1.as_dgram_ref(), now()); + assert_eq!(1, authentication_needed_count(&mut client)); + assert!(client.peer_certificate().is_some()); + + // The `AuthenticationNeeded` event is still pending a call to + // `Connection::authenticated`. On receiving the second packet from the + // server, the client must not emit a another + // `ConnectionEvent::AuthenticationNeeded`. + let _ = client.process(server2.as_dgram_ref(), now()); + assert_eq!(0, authentication_needed_count(&mut client)); +} diff --git a/neqo-transport/src/connection/tests/idle.rs b/neqo-transport/src/connection/tests/idle.rs index 947a800244..5d01131541 100644 --- a/neqo-transport/src/connection/tests/idle.rs +++ b/neqo-transport/src/connection/tests/idle.rs @@ -4,6 +4,14 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +use std::{ + mem, + time::{Duration, Instant}, +}; + +use neqo_common::{qtrace, Encoder}; +use test_fixture::{now, split_datagram}; + use super::{ super::{Connection, ConnectionParameters, IdleTimeout, Output, State}, connect, connect_force_idle, connect_rtt_idle, connect_with_rtt, default_client, @@ -18,13 +26,6 @@ use crate::{ tracking::PacketNumberSpace, }; -use neqo_common::{qtrace, Encoder}; -use std::{ - mem, - time::{Duration, Instant}, -}; -use test_fixture::{self, now, split_datagram}; - fn default_timeout() -> Duration { ConnectionParameters::default().get_idle_timeout() } @@ -107,15 +108,18 @@ fn asymmetric_idle_timeout() { connect(&mut client, &mut server); let c1 = send_something(&mut client, now()); let c2 = send_something(&mut client, now()); - server.process_input(c2, now()); - server.process_input(c1, now()); + server.process_input(&c2, now()); + server.process_input(&c1, now()); let s1 = send_something(&mut server, now()); let s2 = send_something(&mut server, now()); - client.process_input(s2, now()); - let ack = client.process(Some(s1), now()).dgram(); + client.process_input(&s2, now()); + let ack = client.process(Some(&s1), now()).dgram(); assert!(ack.is_some()); // Now both should have received ACK frames so should be idle. - assert_eq!(server.process(ack, now()), Output::Callback(LOWER_TIMEOUT)); + assert_eq!( + server.process(ack.as_ref(), now()), + Output::Callback(LOWER_TIMEOUT) + ); assert_eq!(client.process(None, now()), Output::Callback(LOWER_TIMEOUT)); } @@ -144,13 +148,13 @@ fn tiny_idle_timeout() { let c1 = send_something(&mut client, now); let c2 = send_something(&mut client, now); now += RTT / 2; - server.process_input(c2, now); - server.process_input(c1, now); + server.process_input(&c2, now); + server.process_input(&c1, now); let s1 = send_something(&mut server, now); let s2 = send_something(&mut server, now); now += RTT / 2; - client.process_input(s2, now); - let ack = client.process(Some(s1), now).dgram(); + client.process_input(&s2, now); + let ack = client.process(Some(&s1), now).dgram(); assert!(ack.is_some()); // The client should be idle now, but with a different timer. @@ -162,7 +166,7 @@ fn tiny_idle_timeout() { // The server should go idle after the ACK, but again with a larger timeout. now += RTT / 2; - if let Output::Callback(t) = client.process(ack, now) { + if let Output::Callback(t) = client.process(ack.as_ref(), now) { assert!(t > LOWER_TIMEOUT); } else { panic!("Client not idle"); @@ -183,7 +187,7 @@ fn idle_send_packet1() { now += Duration::from_secs(10); let dgram = send_and_receive(&mut client, &mut server, now); - assert!(dgram.is_none()); + assert!(dgram.is_some()); // the server will want to ACK, we can drop that. // Still connected after 39 seconds because idle timer reset by the // outgoing packet. @@ -237,11 +241,13 @@ fn idle_send_packet2() { #[test] fn idle_recv_packet() { + const FUDGE: Duration = Duration::from_millis(10); + let mut client = default_client(); let mut server = default_server(); connect_force_idle(&mut client, &mut server); - let now = now(); + let mut now = now(); let res = client.process(None, now); assert_eq!(res, Output::Callback(default_timeout())); @@ -250,23 +256,25 @@ fn idle_recv_packet() { assert_eq!(stream, 0); assert_eq!(client.stream_send(stream, b"hello").unwrap(), 5); - // Respond with another packet - let out = client.process(None, now + Duration::from_secs(10)); - server.process_input(out.dgram().unwrap(), now + Duration::from_secs(10)); + // Respond with another packet. + // Note that it is important that this not result in the RTT increasing above 0. + // Otherwise, the eventual timeout will be extended (and we're not testing that). + now += Duration::from_secs(10); + let out = client.process(None, now); + server.process_input(&out.dgram().unwrap(), now); assert_eq!(server.stream_send(stream, b"world").unwrap(), 5); - let out = server.process_output(now + Duration::from_secs(10)); + let out = server.process_output(now); assert_ne!(out.as_dgram_ref(), None); - - mem::drop(client.process(out.dgram(), now + Duration::from_secs(20))); + mem::drop(client.process(out.as_dgram_ref(), now)); assert!(matches!(client.state(), State::Confirmed)); - // Still connected after 49 seconds because idle timer reset by received - // packet - mem::drop(client.process(None, now + default_timeout() + Duration::from_secs(19))); + // Add a little less than the idle timeout and we're still connected. + now += default_timeout() - FUDGE; + mem::drop(client.process(None, now)); assert!(matches!(client.state(), State::Confirmed)); - // Not connected after 50 seconds. - mem::drop(client.process(None, now + default_timeout() + Duration::from_secs(20))); + now += FUDGE; + mem::drop(client.process(None, now)); assert!(matches!(client.state(), State::Closed(_))); } @@ -284,9 +292,9 @@ fn idle_caching() { // Perform the first round trip, but drop the Initial from the server. // The client then caches the Handshake packet. let dgram = client.process_output(start).dgram(); - let dgram = server.process(dgram, start).dgram(); + let dgram = server.process(dgram.as_ref(), start).dgram(); let (_, handshake) = split_datagram(&dgram.unwrap()); - client.process_input(handshake.unwrap(), start); + client.process_input(&handshake.unwrap(), start); // Perform an exchange and keep the connection alive. // Only allow a packet containing a PING to pass. @@ -299,31 +307,23 @@ fn idle_caching() { // Now let the server process the client PING. This causes the server // to send CRYPTO frames again, so manually extract and discard those. let ping_before_s = server.stats().frame_rx.ping; - server.process_input(dgram.unwrap(), middle); + server.process_input(&dgram.unwrap(), middle); assert_eq!(server.stats().frame_rx.ping, ping_before_s + 1); let mut tokens = Vec::new(); - server - .crypto - .streams - .write_frame( - PacketNumberSpace::Initial, - &mut builder, - &mut tokens, - &mut FrameStats::default(), - ) - .unwrap(); + server.crypto.streams.write_frame( + PacketNumberSpace::Initial, + &mut builder, + &mut tokens, + &mut FrameStats::default(), + ); assert_eq!(tokens.len(), 1); tokens.clear(); - server - .crypto - .streams - .write_frame( - PacketNumberSpace::Initial, - &mut builder, - &mut tokens, - &mut FrameStats::default(), - ) - .unwrap(); + server.crypto.streams.write_frame( + PacketNumberSpace::Initial, + &mut builder, + &mut tokens, + &mut FrameStats::default(), + ); assert!(tokens.is_empty()); let dgram = server.process_output(middle).dgram(); @@ -332,7 +332,7 @@ fn idle_caching() { let (initial, _) = split_datagram(&dgram.unwrap()); let ping_before_c = client.stats().frame_rx.ping; let ack_before = client.stats().frame_rx.ack; - client.process_input(initial, middle); + client.process_input(&initial, middle); assert_eq!(client.stats().frame_rx.ping, ping_before_c + 1); assert_eq!(client.stats().frame_rx.ack, ack_before + 1); @@ -341,11 +341,11 @@ fn idle_caching() { let dgram = server.process_output(end).dgram(); let (initial, _) = split_datagram(&dgram.unwrap()); neqo_common::qwarn!("client ingests initial, finally"); - mem::drop(client.process(Some(initial), end)); + mem::drop(client.process(Some(&initial), end)); maybe_authenticate(&mut client); let dgram = client.process_output(end).dgram(); - let dgram = server.process(dgram, end).dgram(); - client.process_input(dgram.unwrap(), end); + let dgram = server.process(dgram.as_ref(), end).dgram(); + client.process_input(&dgram.unwrap(), end); assert_eq!(*client.state(), State::Confirmed); assert_eq!(*server.state(), State::Confirmed); } @@ -374,7 +374,7 @@ fn create_stream_idle_rtt( _ = initiator.stream_send(stream, DEFAULT_STREAM_DATA).unwrap(); let req = initiator.process_output(now).dgram(); now += rtt / 2; - responder.process_input(req.unwrap(), now); + responder.process_input(&req.unwrap(), now); // Reordering two packets from the responder forces the initiator to be idle. _ = responder.stream_send(stream, DEFAULT_STREAM_DATA).unwrap(); @@ -383,15 +383,15 @@ fn create_stream_idle_rtt( let resp2 = responder.process_output(now).dgram(); now += rtt / 2; - initiator.process_input(resp2.unwrap(), now); - initiator.process_input(resp1.unwrap(), now); + initiator.process_input(&resp2.unwrap(), now); + initiator.process_input(&resp1.unwrap(), now); let ack = initiator.process_output(now).dgram(); assert!(ack.is_some()); check_idle(initiator, now); // Receiving the ACK should return the responder to idle too. now += rtt / 2; - responder.process_input(ack.unwrap(), now); + responder.process_input(&ack.unwrap(), now); check_idle(responder, now); (now, stream) @@ -427,9 +427,9 @@ fn keep_alive_initiator() { assert_eq!(server.stats().frame_tx.ping, pings_before + 1); // Exchange ack for the PING. - let out = client.process(ping, now).dgram(); - let out = server.process(out, now).dgram(); - assert!(client.process(out, now).dgram().is_none()); + let out = client.process(ping.as_ref(), now).dgram(); + let out = server.process(out.as_ref(), now).dgram(); + assert!(client.process(out.as_ref(), now).dgram().is_none()); // Check that there will be next keep-alive ping after default_timeout() / 2. assert_idle(&mut server, now, default_timeout() / 2); @@ -469,12 +469,12 @@ fn keep_alive_lost() { assert_eq!(server.stats().frame_tx.ping, pings_before2 + 1); // Exchange ack for the PING. - let out = client.process(ping, now).dgram(); + let out = client.process(ping.as_ref(), now).dgram(); now += Duration::from_millis(20); - let out = server.process(out, now).dgram(); + let out = server.process(out.as_ref(), now).dgram(); - assert!(client.process(out, now).dgram().is_none()); + assert!(client.process(out.as_ref(), now).dgram().is_none()); // TODO: if we run server.process with current value of now, the server will // return some small timeout for the recovry although it does not have @@ -527,10 +527,10 @@ fn keep_alive_unmark() { fn transfer_force_idle(sender: &mut Connection, receiver: &mut Connection) { let dgram = sender.process_output(now()).dgram(); let chaff = send_something(sender, now()); - receiver.process_input(chaff, now()); - receiver.process_input(dgram.unwrap(), now()); + receiver.process_input(&chaff, now()); + receiver.process_input(&dgram.unwrap(), now()); let ack = receiver.process_output(now()).dgram(); - sender.process_input(ack.unwrap(), now()); + sender.process_input(&ack.unwrap(), now()); } /// Receiving the end of the stream stops keep-alives for that stream. @@ -598,7 +598,7 @@ fn keep_alive_stop_sending() { // The server will have sent RESET_STREAM, which the client will // want to acknowledge, so force that out. let junk = send_something(&mut server, now()); - let ack = client.process(Some(junk), now()).dgram(); + let ack = client.process(Some(&junk), now()).dgram(); assert!(ack.is_some()); // Now the client should be idle. @@ -661,7 +661,7 @@ fn keep_alive_uni() { _ = client.stream_send(stream, DEFAULT_STREAM_DATA).unwrap(); let dgram = client.process_output(now()).dgram(); - server.process_input(dgram.unwrap(), now()); + server.process_input(&dgram.unwrap(), now()); server.stream_keep_alive(stream, true).unwrap(); } @@ -671,11 +671,14 @@ fn keep_alive_uni() { fn keep_alive_with_ack_eliciting_packet_lost() { const RTT: Duration = Duration::from_millis(500); // PTO will be ~1.1125s - // The idle time out will be set to ~ 5 * PTO. (IDLE_TIMEOUT/2 > pto and IDLE_TIMEOUT/2 < pto + 2pto) - // After handshake all packets will be lost. The following steps will happen after the handshake: + // The idle time out will be set to ~ 5 * PTO. (IDLE_TIMEOUT/2 > pto and IDLE_TIMEOUT/2 < pto + // + 2pto) After handshake all packets will be lost. The following steps will happen after + // the handshake: // - data will be sent on a stream that is marked for keep-alive, (at start time) - // - PTO timer will trigger first, and the data will be retransmited toghether with a PING, (at the start time + pto) - // - keep-alive timer will trigger and a keep-alive PING will be sent, (at the start time + IDLE_TIMEOUT / 2) + // - PTO timer will trigger first, and the data will be retransmited toghether with a PING, (at + // the start time + pto) + // - keep-alive timer will trigger and a keep-alive PING will be sent, (at the start time + + // IDLE_TIMEOUT / 2) // - PTO timer will trigger again. (at the start time + pto + 2*pto) // - Idle time out will trigger (at the timeout + IDLE_TIMEOUT) const IDLE_TIMEOUT: Duration = Duration::from_millis(6000); diff --git a/neqo-transport/src/connection/tests/keys.rs b/neqo-transport/src/connection/tests/keys.rs index 26a3768b7b..847b253284 100644 --- a/neqo-transport/src/connection/tests/keys.rs +++ b/neqo-transport/src/connection/tests/keys.rs @@ -4,23 +4,28 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use super::super::super::{ConnectionError, ERROR_AEAD_LIMIT_REACHED}; -use super::super::{Connection, ConnectionParameters, Error, Output, State, StreamType}; +use std::mem; + +use neqo_common::{qdebug, Datagram}; +use test_fixture::now; + use super::{ + super::{ + super::{ConnectionError, ERROR_AEAD_LIMIT_REACHED}, + Connection, ConnectionParameters, Error, Output, State, StreamType, + }, connect, connect_force_idle, default_client, default_server, maybe_authenticate, send_and_receive, send_something, AT_LEAST_PTO, }; -use crate::crypto::{OVERWRITE_INVOCATIONS, UPDATE_WRITE_KEYS_AT}; -use crate::packet::PacketNumber; -use crate::path::PATH_MTU_V6; - -use neqo_common::{qdebug, Datagram}; -use std::mem; -use test_fixture::{self, now}; +use crate::{ + crypto::{OVERWRITE_INVOCATIONS, UPDATE_WRITE_KEYS_AT}, + packet::PacketNumber, + path::PATH_MTU_V6, +}; fn check_discarded( peer: &mut Connection, - pkt: Datagram, + pkt: &Datagram, response: bool, dropped: usize, dups: usize, @@ -59,11 +64,11 @@ fn discarded_initial_keys() { qdebug!("---- server: CH -> SH, EE, CERT, CV, FIN"); let mut server = default_server(); - let init_pkt_s = server.process(init_pkt_c.clone(), now()).dgram(); + let init_pkt_s = server.process(init_pkt_c.as_ref(), now()).dgram(); assert!(init_pkt_s.is_some()); qdebug!("---- client: cert verification"); - let out = client.process(init_pkt_s.clone(), now()).dgram(); + let out = client.process(init_pkt_s.as_ref(), now()).dgram(); assert!(out.is_some()); // The client has received a handshake packet. It will remove the Initial keys. @@ -71,7 +76,7 @@ fn discarded_initial_keys() { // The initial packet should be dropped. The packet contains a Handshake packet as well, which // will be marked as dup. And it will contain padding, which will be "dropped". // The client will generate a Handshake packet here to avoid stalling. - check_discarded(&mut client, init_pkt_s.unwrap(), true, 2, 1); + check_discarded(&mut client, &init_pkt_s.unwrap(), true, 2, 1); assert!(maybe_authenticate(&mut client)); @@ -79,7 +84,7 @@ fn discarded_initial_keys() { // packet from the client. // We will check this by processing init_pkt_c a second time. // The dropped packet is padding. The Initial packet has been mark dup. - check_discarded(&mut server, init_pkt_c.clone().unwrap(), false, 1, 1); + check_discarded(&mut server, &init_pkt_c.clone().unwrap(), false, 1, 1); qdebug!("---- client: SH..FIN -> FIN"); let out = client.process(None, now()).dgram(); @@ -87,14 +92,14 @@ fn discarded_initial_keys() { // The server will process the first Handshake packet. // After this the Initial keys will be dropped. - let out = server.process(out, now()).dgram(); + let out = server.process(out.as_ref(), now()).dgram(); assert!(out.is_some()); // Check that the Initial keys are dropped at the server // We will check this by processing init_pkt_c a third time. // The Initial packet has been dropped and padding that follows it. // There is no dups, everything has been dropped. - check_discarded(&mut server, init_pkt_c.unwrap(), false, 1, 0); + check_discarded(&mut server, &init_pkt_c.unwrap(), false, 1, 0); } #[test] @@ -116,7 +121,8 @@ fn key_update_client() { assert_eq!(client.get_epochs(), (Some(4), Some(3))); // Send something to propagate the update. - assert!(send_and_receive(&mut client, &mut server, now).is_none()); + // Note that the server will acknowledge immediately when RTT is zero. + assert!(send_and_receive(&mut client, &mut server, now).is_some()); // The server should now be waiting to discharge read keys. assert_eq!(server.get_epochs(), (Some(4), Some(3))); @@ -150,7 +156,7 @@ fn key_update_client() { // The previous PTO packet (see above) was dropped, so we should get an ACK here. let dgram = send_and_receive(&mut client, &mut server, now); assert!(dgram.is_some()); - let res = client.process(dgram, now); + let res = client.process(dgram.as_ref(), now); // This is the first packet that the client has received from the server // with new keys, so its read timer just started. if let Output::Callback(t) = res { @@ -189,7 +195,7 @@ fn key_update_consecutive() { assert_eq!(client.get_epochs(), (Some(4), Some(3))); // Have the server process the ACK. - if let Output::Callback(_) = server.process(dgram, now) { + if let Output::Callback(_) = server.process(dgram.as_ref(), now) { assert_eq!(server.get_epochs(), (Some(4), Some(3))); // Now move the server temporarily into the future so that it // rotates the keys. The client stays in the present. @@ -207,7 +213,7 @@ fn key_update_consecutive() { // However, as the server didn't wait long enough to update again, the // client hasn't rotated its keys, so the packet gets dropped. - check_discarded(&mut client, dgram, false, 1, 0); + check_discarded(&mut client, &dgram, false, 1, 0); } // Key updates can't be initiated too early. @@ -224,12 +230,12 @@ fn key_update_before_confirmed() { assert_update_blocked(&mut client); // Server Initial + Handshake - let dgram = server.process(dgram, now()).dgram(); + let dgram = server.process(dgram.as_ref(), now()).dgram(); assert!(dgram.is_some()); assert_update_blocked(&mut server); // Client Handshake - client.process_input(dgram.unwrap(), now()); + client.process_input(&dgram.unwrap(), now()); assert_update_blocked(&mut client); assert!(maybe_authenticate(&mut client)); @@ -240,12 +246,12 @@ fn key_update_before_confirmed() { assert_update_blocked(&mut client); // Server HANDSHAKE_DONE - let dgram = server.process(dgram, now()).dgram(); + let dgram = server.process(dgram.as_ref(), now()).dgram(); assert!(dgram.is_some()); assert!(server.initiate_key_update().is_ok()); // Client receives HANDSHAKE_DONE - let dgram = client.process(dgram, now()).dgram(); + let dgram = client.process(dgram.as_ref(), now()).dgram(); assert!(dgram.is_none()); assert!(client.initiate_key_update().is_ok()); } @@ -276,13 +282,13 @@ fn exhaust_read_keys() { let dgram = send_something(&mut client, now()); overwrite_invocations(0); - let dgram = server.process(Some(dgram), now()).dgram(); + let dgram = server.process(Some(&dgram), now()).dgram(); assert!(matches!( server.state(), State::Closed(ConnectionError::Transport(Error::KeysExhausted)) )); - client.process_input(dgram.unwrap(), now()); + client.process_input(&dgram.unwrap(), now()); assert!(matches!( client.state(), State::Draining { diff --git a/neqo-transport/src/connection/tests/migration.rs b/neqo-transport/src/connection/tests/migration.rs index 9e6a2ba90b..405ae161a4 100644 --- a/neqo-transport/src/connection/tests/migration.rs +++ b/neqo-transport/src/connection/tests/migration.rs @@ -4,32 +4,36 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use super::super::{Connection, Output, State, StreamType}; +use std::{ + cell::RefCell, + mem, + net::{IpAddr, Ipv6Addr, SocketAddr}, + rc::Rc, + time::{Duration, Instant}, +}; + +use neqo_common::{Datagram, Decoder}; +use test_fixture::{ + assertions::{assert_v4_path, assert_v6_path}, + fixture_init, new_neqo_qlog, now, DEFAULT_ADDR, DEFAULT_ADDR_V4, +}; + use super::{ + super::{Connection, Output, State, StreamType}, connect_fail, connect_force_idle, connect_rtt_idle, default_client, default_server, maybe_authenticate, new_client, new_server, send_something, CountingConnectionIdGenerator, }; -use crate::cid::LOCAL_ACTIVE_CID_LIMIT; -use crate::frame::FRAME_TYPE_NEW_CONNECTION_ID; -use crate::packet::PacketBuilder; -use crate::path::{PATH_MTU_V4, PATH_MTU_V6}; -use crate::tparams::{self, PreferredAddress, TransportParameter}; use crate::{ + cid::LOCAL_ACTIVE_CID_LIMIT, + connection::tests::send_something_paced, + frame::FRAME_TYPE_NEW_CONNECTION_ID, + packet::PacketBuilder, + path::{PATH_MTU_V4, PATH_MTU_V6}, + tparams::{self, PreferredAddress, TransportParameter}, ConnectionError, ConnectionId, ConnectionIdDecoder, ConnectionIdGenerator, ConnectionIdRef, ConnectionParameters, EmptyConnectionIdGenerator, Error, }; -use neqo_common::{Datagram, Decoder}; -use std::cell::RefCell; -use std::net::{IpAddr, Ipv6Addr, SocketAddr}; -use std::rc::Rc; -use std::time::{Duration, Instant}; -use test_fixture::{ - self, addr, addr_v4, - assertions::{assert_v4_path, assert_v6_path}, - fixture_init, now, -}; - /// This should be a valid-seeming transport parameter. /// And it should have different values to `addr` and `addr_v4`. const SAMPLE_PREFERRED_ADDRESS: &[u8] = &[ @@ -49,7 +53,7 @@ fn loopback() -> SocketAddr { } fn change_path(d: &Datagram, a: SocketAddr) -> Datagram { - Datagram::new(a, a, &d[..]) + Datagram::new(a, a, d.tos(), d.ttl(), &d[..]) } fn new_port(a: SocketAddr) -> SocketAddr { @@ -58,7 +62,13 @@ fn new_port(a: SocketAddr) -> SocketAddr { } fn change_source_port(d: &Datagram) -> Datagram { - Datagram::new(new_port(d.source()), d.destination(), &d[..]) + Datagram::new( + new_port(d.source()), + d.destination(), + d.tos(), + d.ttl(), + &d[..], + ) } /// As these tests use a new path, that path often has a non-zero RTT. @@ -78,14 +88,14 @@ fn rebinding_port() { let dgram = send_something(&mut client, now()); let dgram = change_source_port(&dgram); - server.process_input(dgram, now()); + server.process_input(&dgram, now()); // Have the server send something so that it generates a packet. let stream_id = server.stream_create(StreamType::UniDi).unwrap(); server.stream_close_send(stream_id).unwrap(); let dgram = server.process_output(now()).dgram(); let dgram = dgram.unwrap(); - assert_eq!(dgram.source(), addr()); - assert_eq!(dgram.destination(), new_port(addr())); + assert_eq!(dgram.source(), DEFAULT_ADDR); + assert_eq!(dgram.destination(), new_port(DEFAULT_ADDR)); } /// This simulates an attack where a valid packet is forwarded on @@ -99,8 +109,8 @@ fn path_forwarding_attack() { let mut now = now(); let dgram = send_something(&mut client, now); - let dgram = change_path(&dgram, addr_v4()); - server.process_input(dgram, now); + let dgram = change_path(&dgram, DEFAULT_ADDR_V4); + server.process_input(&dgram, now); // The server now probes the new (primary) path. let new_probe = server.process_output(now).dgram().unwrap(); @@ -120,14 +130,14 @@ fn path_forwarding_attack() { // The client should respond to the challenge on the new path. // The server couldn't pad, so the client is also amplification limited. - let new_resp = client.process(Some(new_probe), now).dgram().unwrap(); + let new_resp = client.process(Some(&new_probe), now).dgram().unwrap(); assert_eq!(client.stats().frame_rx.path_challenge, 1); assert_eq!(client.stats().frame_tx.path_challenge, 1); assert_eq!(client.stats().frame_tx.path_response, 1); assert_v4_path(&new_resp, false); // The client also responds to probes on the old path. - let old_resp = client.process(Some(old_probe), now).dgram().unwrap(); + let old_resp = client.process(Some(&old_probe), now).dgram().unwrap(); assert_eq!(client.stats().frame_rx.path_challenge, 2); assert_eq!(client.stats().frame_tx.path_challenge, 1); assert_eq!(client.stats().frame_tx.path_response, 2); @@ -140,12 +150,12 @@ fn path_forwarding_attack() { // Receiving the PATH_RESPONSE from the client opens the amplification // limit enough for the server to respond. // This is padded because it includes PATH_CHALLENGE. - let server_data1 = server.process(Some(new_resp), now).dgram().unwrap(); + let server_data1 = server.process(Some(&new_resp), now).dgram().unwrap(); assert_v4_path(&server_data1, true); assert_eq!(server.stats().frame_tx.path_challenge, 3); // The client responds to this probe on the new path. - client.process_input(server_data1, now); + client.process_input(&server_data1, now); let stream_before = client.stats().frame_tx.stream; let padded_resp = send_something(&mut client, now); assert_eq!(stream_before, client.stats().frame_tx.stream); @@ -161,7 +171,7 @@ fn path_forwarding_attack() { assert_v4_path(&server_data2, false); // Until new data is received from the client on the old path. - server.process_input(client_data2, now); + server.process_input(&client_data2, now); // The server sends a probe on the "old" path. let server_data3 = send_something(&mut server, now); assert_v4_path(&server_data3, true); @@ -175,10 +185,10 @@ fn migrate_immediate() { let mut client = default_client(); let mut server = default_server(); connect_force_idle(&mut client, &mut server); - let mut now = now(); + let now = now(); client - .migrate(Some(addr_v4()), Some(addr_v4()), true, now) + .migrate(Some(DEFAULT_ADDR_V4), Some(DEFAULT_ADDR_V4), true, now) .unwrap(); let client1 = send_something(&mut client, now); @@ -189,7 +199,7 @@ fn migrate_immediate() { let server_delayed = send_something(&mut server, now); // The server accepts the first packet and migrates (but probes). - let server1 = server.process(Some(client1), now).dgram().unwrap(); + let server1 = server.process(Some(&client1), now).dgram().unwrap(); assert_v4_path(&server1, true); let server2 = server.process_output(now).dgram().unwrap(); assert_v6_path(&server2, true); @@ -197,15 +207,16 @@ fn migrate_immediate() { // The second packet has no real effect, it just elicits an ACK. let all_before = server.stats().frame_tx.all; let ack_before = server.stats().frame_tx.ack; - let server3 = server.process(Some(client2), now).dgram(); + let server3 = server.process(Some(&client2), now).dgram(); assert!(server3.is_some()); assert_eq!(server.stats().frame_tx.all, all_before + 1); assert_eq!(server.stats().frame_tx.ack, ack_before + 1); // Receiving a packet sent by the server before migration doesn't change path. - client.process_input(server_delayed, now); - now = skip_pacing(&mut client, now); - let client3 = send_something(&mut client, now); + client.process_input(&server_delayed, now); + // The client has sent two unpaced packets and this new path has no RTT estimate + // so this might be paced. + let (client3, _t) = send_something_paced(&mut client, now, true); assert_v4_path(&client3, false); } @@ -218,7 +229,7 @@ fn migrate_rtt() { let now = connect_rtt_idle(&mut client, &mut server, RTT); client - .migrate(Some(addr_v4()), Some(addr_v4()), true, now) + .migrate(Some(DEFAULT_ADDR_V4), Some(DEFAULT_ADDR_V4), true, now) .unwrap(); // The RTT might be increased for the new path, so allow a little flexibility. let rtt = client.paths.rtt(); @@ -234,7 +245,7 @@ fn migrate_immediate_fail() { let mut now = now(); client - .migrate(Some(addr_v4()), Some(addr_v4()), true, now) + .migrate(Some(DEFAULT_ADDR_V4), Some(DEFAULT_ADDR_V4), true, now) .unwrap(); let probe = client.process_output(now).dgram().unwrap(); @@ -282,20 +293,20 @@ fn migrate_same() { let now = now(); client - .migrate(Some(addr()), Some(addr()), true, now) + .migrate(Some(DEFAULT_ADDR), Some(DEFAULT_ADDR), true, now) .unwrap(); let probe = client.process_output(now).dgram().unwrap(); assert_v6_path(&probe, true); // Contains PATH_CHALLENGE. assert_eq!(client.stats().frame_tx.path_challenge, 1); - let resp = server.process(Some(probe), now).dgram().unwrap(); + let resp = server.process(Some(&probe), now).dgram().unwrap(); assert_v6_path(&resp, true); assert_eq!(server.stats().frame_tx.path_response, 1); assert_eq!(server.stats().frame_tx.path_challenge, 0); // Everything continues happily. - client.process_input(resp, now); + client.process_input(&resp, now); let contd = send_something(&mut client, now); assert_v6_path(&contd, false); } @@ -309,7 +320,7 @@ fn migrate_same_fail() { let mut now = now(); client - .migrate(Some(addr()), Some(addr()), true, now) + .migrate(Some(DEFAULT_ADDR), Some(DEFAULT_ADDR), true, now) .unwrap(); let probe = client.process_output(now).dgram().unwrap(); @@ -364,15 +375,15 @@ fn migration(mut client: Connection) { let now = now(); client - .migrate(Some(addr_v4()), Some(addr_v4()), false, now) + .migrate(Some(DEFAULT_ADDR_V4), Some(DEFAULT_ADDR_V4), false, now) .unwrap(); let probe = client.process_output(now).dgram().unwrap(); assert_v4_path(&probe, true); // Contains PATH_CHALLENGE. assert_eq!(client.stats().frame_tx.path_challenge, 1); - let probe_cid = ConnectionId::from(&get_cid(&probe)); + let probe_cid = ConnectionId::from(get_cid(&probe)); - let resp = server.process(Some(probe), now).dgram().unwrap(); + let resp = server.process(Some(&probe), now).dgram().unwrap(); assert_v4_path(&resp, true); assert_eq!(server.stats().frame_tx.path_response, 1); assert_eq!(server.stats().frame_tx.path_challenge, 1); @@ -381,12 +392,12 @@ fn migration(mut client: Connection) { let client_data = send_something(&mut client, now); assert_ne!(get_cid(&client_data), probe_cid); assert_v6_path(&client_data, false); - server.process_input(client_data, now); + server.process_input(&client_data, now); let server_data = send_something(&mut server, now); assert_v6_path(&server_data, false); // Once the client receives the probe response, it migrates to the new path. - client.process_input(resp, now); + client.process_input(&resp, now); assert_eq!(client.stats().frame_rx.path_challenge, 1); let migrate_client = send_something(&mut client, now); assert_v4_path(&migrate_client, true); // Responds to server probe. @@ -395,7 +406,7 @@ fn migration(mut client: Connection) { // However, it will probe the old path again, even though it has just // received a response to its last probe, because it needs to verify // that the migration is genuine. - server.process_input(migrate_client, now); + server.process_input(&migrate_client, now); let stream_before = server.stats().frame_tx.stream; let probe_old_server = send_something(&mut server, now); // This is just the double-check probe; no STREAM frames. @@ -410,8 +421,8 @@ fn migration(mut client: Connection) { assert_eq!(server.stats().frame_tx.stream, stream_before + 1); // The client receives these checks and responds to the probe, but uses the new path. - client.process_input(migrate_server, now); - client.process_input(probe_old_server, now); + client.process_input(&migrate_server, now); + client.process_input(&probe_old_server, now); let old_probe_resp = send_something(&mut client, now); assert_v6_path(&old_probe_resp, true); let client_confirmation = client.process_output(now).dgram().unwrap(); @@ -438,8 +449,8 @@ fn migration_client_empty_cid() { test_fixture::DEFAULT_SERVER_NAME, test_fixture::DEFAULT_ALPN, Rc::new(RefCell::new(EmptyConnectionIdGenerator::default())), - addr(), - addr(), + DEFAULT_ADDR, + DEFAULT_ADDR, ConnectionParameters::default(), now(), ) @@ -451,11 +462,11 @@ fn migration_client_empty_cid() { /// Returns the packet containing `HANDSHAKE_DONE` from the server. fn fast_handshake(client: &mut Connection, server: &mut Connection) -> Option { let dgram = client.process_output(now()).dgram(); - let dgram = server.process(dgram, now()).dgram(); - client.process_input(dgram.unwrap(), now()); + let dgram = server.process(dgram.as_ref(), now()).dgram(); + client.process_input(&dgram.unwrap(), now()); assert!(maybe_authenticate(client)); let dgram = client.process_output(now()).dgram(); - server.process(dgram, now()).dgram() + server.process(dgram.as_ref(), now()).dgram() } fn preferred_address(hs_client: SocketAddr, hs_server: SocketAddr, preferred: SocketAddr) { @@ -494,6 +505,7 @@ fn preferred_address(hs_client: SocketAddr, hs_server: SocketAddr, preferred: So }; fixture_init(); + let (log, _contents) = new_neqo_qlog(); let mut client = Connection::new_client( test_fixture::DEFAULT_SERVER_NAME, test_fixture::DEFAULT_ALPN, @@ -504,10 +516,10 @@ fn preferred_address(hs_client: SocketAddr, hs_server: SocketAddr, preferred: So now(), ) .unwrap(); - let spa = if preferred.ip().is_ipv6() { - PreferredAddress::new(None, Some(preferred)) - } else { - PreferredAddress::new(Some(preferred), None) + client.set_qlog(log); + let spa = match preferred { + SocketAddr::V6(v6) => PreferredAddress::new(None, Some(v6)), + SocketAddr::V4(v4) => PreferredAddress::new(Some(v4), None), }; let mut server = new_server(ConnectionParameters::default().preferred_address(spa)); @@ -515,7 +527,7 @@ fn preferred_address(hs_client: SocketAddr, hs_server: SocketAddr, preferred: So // The client is about to process HANDSHAKE_DONE. // It should start probing toward the server's preferred address. - let probe = client.process(dgram, now()).dgram().unwrap(); + let probe = client.process(dgram.as_ref(), now()).dgram().unwrap(); assert_toward_spa(&probe, true); assert_eq!(client.stats().frame_tx.path_challenge, 1); assert_ne!(client.process_output(now()).callback(), Duration::new(0, 0)); @@ -525,26 +537,26 @@ fn preferred_address(hs_client: SocketAddr, hs_server: SocketAddr, preferred: So assert_orig_path(&data, false); // The server responds to the probe. - let resp = server.process(Some(probe), now()).dgram().unwrap(); + let resp = server.process(Some(&probe), now()).dgram().unwrap(); assert_from_spa(&resp, true); assert_eq!(server.stats().frame_tx.path_challenge, 1); assert_eq!(server.stats().frame_tx.path_response, 1); // Data continues on the main path for the server. - server.process_input(data, now()); + server.process_input(&data, now()); let data = send_something(&mut server, now()); assert_orig_path(&data, false); // Client gets the probe response back and it migrates. - client.process_input(resp, now()); - client.process_input(data, now()); + client.process_input(&resp, now()); + client.process_input(&data, now()); let data = send_something(&mut client, now()); assert_toward_spa(&data, true); assert_eq!(client.stats().frame_tx.stream, 2); assert_eq!(client.stats().frame_tx.path_response, 1); // The server sees the migration and probes the old path. - let probe = server.process(Some(data), now()).dgram().unwrap(); + let probe = server.process(Some(&data), now()).dgram().unwrap(); assert_orig_path(&probe, true); assert_eq!(server.stats().frame_tx.path_challenge, 2); @@ -556,22 +568,22 @@ fn preferred_address(hs_client: SocketAddr, hs_server: SocketAddr, preferred: So /// Migration works for a new port number. #[test] fn preferred_address_new_port() { - let a = addr(); + let a = DEFAULT_ADDR; preferred_address(a, a, new_port(a)); } /// Migration works for a new address too. #[test] fn preferred_address_new_address() { - let mut preferred = addr(); + let mut preferred = DEFAULT_ADDR; preferred.set_ip(IpAddr::V6(Ipv6Addr::new(0xfe80, 0, 0, 0, 0, 0, 0, 2))); - preferred_address(addr(), addr(), preferred); + preferred_address(DEFAULT_ADDR, DEFAULT_ADDR, preferred); } /// Migration works for IPv4 addresses. #[test] fn preferred_address_new_port_v4() { - let a = addr_v4(); + let a = DEFAULT_ADDR_V4; preferred_address(a, a, new_port(a)); } @@ -586,7 +598,7 @@ fn expect_no_migration(client: &mut Connection, server: &mut Connection) { let dgram = fast_handshake(client, server); // The client won't probe now, though it could; it remains idle. - let out = client.process(dgram, now()); + let out = client.process(dgram.as_ref(), now()); assert_ne!(out.callback(), Duration::new(0, 0)); // Data continues on the main path for the client. @@ -605,13 +617,13 @@ fn preferred_address_ignored(spa: PreferredAddress) { /// Using a loopback address in the preferred address is ignored. #[test] fn preferred_address_ignore_loopback() { - preferred_address_ignored(PreferredAddress::new(None, Some(loopback()))); + preferred_address_ignored(PreferredAddress::new_any(None, Some(loopback()))); } /// A preferred address in the wrong address family is ignored. #[test] fn preferred_address_ignore_different_family() { - preferred_address_ignored(PreferredAddress::new(Some(addr_v4()), None)); + preferred_address_ignored(PreferredAddress::new_any(Some(DEFAULT_ADDR_V4), None)); } /// Disabling preferred addresses at the client means that it ignores a perfectly @@ -619,9 +631,9 @@ fn preferred_address_ignore_different_family() { #[test] fn preferred_address_disabled_client() { let mut client = new_client(ConnectionParameters::default().disable_preferred_address()); - let mut preferred = addr(); + let mut preferred = DEFAULT_ADDR; preferred.set_ip(IpAddr::V6(Ipv6Addr::new(0xfe80, 0, 0, 0, 0, 0, 0, 2))); - let spa = PreferredAddress::new(None, Some(preferred)); + let spa = PreferredAddress::new_any(None, Some(preferred)); let mut server = new_server(ConnectionParameters::default().preferred_address(spa)); expect_no_migration(&mut client, &mut server); @@ -631,7 +643,7 @@ fn preferred_address_disabled_client() { fn preferred_address_empty_cid() { fixture_init(); - let spa = PreferredAddress::new(None, Some(new_port(addr()))); + let spa = PreferredAddress::new_any(None, Some(new_port(DEFAULT_ADDR))); let res = Connection::new_server( test_fixture::DEFAULT_KEYS, test_fixture::DEFAULT_ALPN, @@ -694,33 +706,33 @@ fn preferred_address_client() { fn migration_invalid_state() { let mut client = default_client(); assert!(client - .migrate(Some(addr()), Some(addr()), false, now()) + .migrate(Some(DEFAULT_ADDR), Some(DEFAULT_ADDR), false, now()) .is_err()); let mut server = default_server(); assert!(server - .migrate(Some(addr()), Some(addr()), false, now()) + .migrate(Some(DEFAULT_ADDR), Some(DEFAULT_ADDR), false, now()) .is_err()); connect_force_idle(&mut client, &mut server); assert!(server - .migrate(Some(addr()), Some(addr()), false, now()) + .migrate(Some(DEFAULT_ADDR), Some(DEFAULT_ADDR), false, now()) .is_err()); client.close(now(), 0, "closing"); assert!(client - .migrate(Some(addr()), Some(addr()), false, now()) + .migrate(Some(DEFAULT_ADDR), Some(DEFAULT_ADDR), false, now()) .is_err()); let close = client.process(None, now()).dgram(); - let dgram = server.process(close, now()).dgram(); + let dgram = server.process(close.as_ref(), now()).dgram(); assert!(server - .migrate(Some(addr()), Some(addr()), false, now()) + .migrate(Some(DEFAULT_ADDR), Some(DEFAULT_ADDR), false, now()) .is_err()); - client.process_input(dgram.unwrap(), now()); + client.process_input(&dgram.unwrap(), now()); assert!(client - .migrate(Some(addr()), Some(addr()), false, now()) + .migrate(Some(DEFAULT_ADDR), Some(DEFAULT_ADDR), false, now()) .is_err()); } @@ -741,32 +753,32 @@ fn migration_invalid_address() { cant_migrate(None, None); // Providing a zero port number isn't valid. - let mut zero_port = addr(); + let mut zero_port = DEFAULT_ADDR; zero_port.set_port(0); cant_migrate(None, Some(zero_port)); cant_migrate(Some(zero_port), None); // An unspecified remote address is bad. - let mut remote_unspecified = addr(); + let mut remote_unspecified = DEFAULT_ADDR; remote_unspecified.set_ip(IpAddr::V6(Ipv6Addr::from(0))); cant_migrate(None, Some(remote_unspecified)); // Mixed address families is bad. - cant_migrate(Some(addr()), Some(addr_v4())); - cant_migrate(Some(addr_v4()), Some(addr())); + cant_migrate(Some(DEFAULT_ADDR), Some(DEFAULT_ADDR_V4)); + cant_migrate(Some(DEFAULT_ADDR_V4), Some(DEFAULT_ADDR)); // Loopback to non-loopback is bad. - cant_migrate(Some(addr()), Some(loopback())); - cant_migrate(Some(loopback()), Some(addr())); + cant_migrate(Some(DEFAULT_ADDR), Some(loopback())); + cant_migrate(Some(loopback()), Some(DEFAULT_ADDR)); assert_eq!( client - .migrate(Some(addr()), Some(loopback()), true, now()) + .migrate(Some(DEFAULT_ADDR), Some(loopback()), true, now()) .unwrap_err(), Error::InvalidMigration ); assert_eq!( client - .migrate(Some(loopback()), Some(addr()), true, now()) + .migrate(Some(loopback()), Some(DEFAULT_ADDR), true, now()) .unwrap_err(), Error::InvalidMigration ); @@ -811,7 +823,7 @@ fn retire_all() { .unwrap(); connect_force_idle(&mut client, &mut server); - let original_cid = ConnectionId::from(&get_cid(&send_something(&mut client, now()))); + let original_cid = ConnectionId::from(get_cid(&send_something(&mut client, now()))); server.test_frame_writer = Some(Box::new(RetireAll { cid_gen })); let ncid = send_something(&mut server, now()); @@ -819,7 +831,7 @@ fn retire_all() { let new_cid_before = client.stats().frame_rx.new_connection_id; let retire_cid_before = client.stats().frame_tx.retire_connection_id; - client.process_input(ncid, now()); + client.process_input(&ncid, now()); let retire = send_something(&mut client, now()); assert_eq!( client.stats().frame_rx.new_connection_id, @@ -849,17 +861,17 @@ fn retire_prior_to_migration_failure() { .unwrap(); connect_force_idle(&mut client, &mut server); - let original_cid = ConnectionId::from(&get_cid(&send_something(&mut client, now()))); + let original_cid = ConnectionId::from(get_cid(&send_something(&mut client, now()))); client - .migrate(Some(addr_v4()), Some(addr_v4()), false, now()) + .migrate(Some(DEFAULT_ADDR_V4), Some(DEFAULT_ADDR_V4), false, now()) .unwrap(); // The client now probes the new path. let probe = client.process_output(now()).dgram().unwrap(); assert_v4_path(&probe, true); assert_eq!(client.stats().frame_tx.path_challenge, 1); - let probe_cid = ConnectionId::from(&get_cid(&probe)); + let probe_cid = ConnectionId::from(get_cid(&probe)); assert_ne!(original_cid, probe_cid); // Have the server receive the probe, but separately have it decide to @@ -868,17 +880,17 @@ fn retire_prior_to_migration_failure() { let retire_all = send_something(&mut server, now()); server.test_frame_writer = None; - let resp = server.process(Some(probe), now()).dgram().unwrap(); + let resp = server.process(Some(&probe), now()).dgram().unwrap(); assert_v4_path(&resp, true); assert_eq!(server.stats().frame_tx.path_response, 1); assert_eq!(server.stats().frame_tx.path_challenge, 1); // Have the client receive the NEW_CONNECTION_ID with Retire Prior To. - client.process_input(retire_all, now()); + client.process_input(&retire_all, now()); // This packet contains the probe response, which should be fine, but it // also includes PATH_CHALLENGE for the new path, and the client can't // respond without a connection ID. We treat this as a connection error. - client.process_input(resp, now()); + client.process_input(&resp, now()); assert!(matches!( client.state(), State::Closing { @@ -904,17 +916,17 @@ fn retire_prior_to_migration_success() { .unwrap(); connect_force_idle(&mut client, &mut server); - let original_cid = ConnectionId::from(&get_cid(&send_something(&mut client, now()))); + let original_cid = ConnectionId::from(get_cid(&send_something(&mut client, now()))); client - .migrate(Some(addr_v4()), Some(addr_v4()), false, now()) + .migrate(Some(DEFAULT_ADDR_V4), Some(DEFAULT_ADDR_V4), false, now()) .unwrap(); // The client now probes the new path. let probe = client.process_output(now()).dgram().unwrap(); assert_v4_path(&probe, true); assert_eq!(client.stats().frame_tx.path_challenge, 1); - let probe_cid = ConnectionId::from(&get_cid(&probe)); + let probe_cid = ConnectionId::from(get_cid(&probe)); assert_ne!(original_cid, probe_cid); // Have the server receive the probe, but separately have it decide to @@ -923,15 +935,15 @@ fn retire_prior_to_migration_success() { let retire_all = send_something(&mut server, now()); server.test_frame_writer = None; - let resp = server.process(Some(probe), now()).dgram().unwrap(); + let resp = server.process(Some(&probe), now()).dgram().unwrap(); assert_v4_path(&resp, true); assert_eq!(server.stats().frame_tx.path_response, 1); assert_eq!(server.stats().frame_tx.path_challenge, 1); // Have the client receive the NEW_CONNECTION_ID with Retire Prior To second. // As this occurs in a very specific order, migration succeeds. - client.process_input(resp, now()); - client.process_input(retire_all, now()); + client.process_input(&resp, now()); + client.process_input(&retire_all, now()); // Migration succeeds and the new path gets the last connection ID. let dgram = send_something(&mut client, now()); @@ -939,3 +951,39 @@ fn retire_prior_to_migration_success() { assert_ne!(get_cid(&dgram), original_cid); assert_ne!(get_cid(&dgram), probe_cid); } + +struct GarbageWriter {} + +impl crate::connection::test_internal::FrameWriter for GarbageWriter { + fn write_frames(&mut self, builder: &mut PacketBuilder) { + // Not a valid frame type. + builder.encode_varint(u32::MAX); + } +} + +/// Test the case that we run out of connection ID and receive an invalid frame +/// from a new path. +#[test] +#[should_panic(expected = "attempting to close with a temporary path")] +fn error_on_new_path_with_no_connection_id() { + let mut client = default_client(); + let mut server = default_server(); + connect_force_idle(&mut client, &mut server); + + let cid_gen: Rc> = + Rc::new(RefCell::new(CountingConnectionIdGenerator::default())); + server.test_frame_writer = Some(Box::new(RetireAll { cid_gen })); + let retire_all = send_something(&mut server, now()); + + client.process_input(&retire_all, now()); + + server.test_frame_writer = Some(Box::new(GarbageWriter {})); + let garbage = send_something(&mut server, now()); + + let dgram = change_path(&garbage, DEFAULT_ADDR_V4); + client.process_input(&dgram, now()); + + // See issue #1697. We had a crash when the client had a temporary path and + // process_output is called. + mem::drop(client.process_output(now())); +} diff --git a/neqo-transport/src/connection/tests/mod.rs b/neqo-transport/src/connection/tests/mod.rs index c12f3576fb..c8c87a0df0 100644 --- a/neqo-transport/src/connection/tests/mod.rs +++ b/neqo-transport/src/connection/tests/mod.rs @@ -4,7 +4,18 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -#![deny(clippy::pedantic)] +use std::{ + cell::RefCell, + cmp::min, + mem, + rc::Rc, + time::{Duration, Instant}, +}; + +use enum_map::enum_map; +use neqo_common::{event::Provider, qdebug, qtrace, Datagram, Decoder, Role}; +use neqo_crypto::{random, AllowZeroRtt, AuthenticationStatus, ResumptionToken}; +use test_fixture::{fixture_init, new_neqo_qlog, now, DEFAULT_ADDR}; use super::{Connection, ConnectionError, ConnectionId, Output, State}; use crate::{ @@ -12,6 +23,8 @@ use crate::{ cc::{CWND_INITIAL_PKTS, CWND_MIN}, cid::ConnectionIdRef, events::ConnectionEvent, + frame::FRAME_TYPE_PING, + packet::PacketBuilder, path::PATH_MTU_V6, recovery::ACK_ONLY_SIZE_LIMIT, stats::{FrameStats, Stats, MAX_PTO_COUNTS}, @@ -19,29 +32,16 @@ use crate::{ Version, }; -use std::{ - cell::RefCell, - cmp::min, - convert::TryFrom, - mem, - rc::Rc, - time::{Duration, Instant}, -}; - -use neqo_common::{event::Provider, qdebug, qtrace, Datagram, Decoder, Role}; -use neqo_crypto::{random, AllowZeroRtt, AuthenticationStatus, ResumptionToken}; -use test_fixture::{self, addr, fixture_init, now}; - // All the tests. mod ackrate; mod cc; mod close; mod datagram; -mod fuzzing; mod handshake; mod idle; mod keys; mod migration; +mod null; mod priority; mod recovery; mod resumption; @@ -53,7 +53,7 @@ const DEFAULT_RTT: Duration = Duration::from_millis(100); const AT_LEAST_PTO: Duration = Duration::from_secs(1); const DEFAULT_STREAM_DATA: &[u8] = b"message"; /// The number of 1-RTT packets sent in `force_idle` by a client. -const FORCE_IDLE_CLIENT_1RTT_PACKETS: usize = 3; +const CLIENT_HANDSHAKE_1RTT_PACKETS: usize = 1; /// WARNING! In this module, this version of the generator needs to be used. /// This copies the implementation from @@ -76,7 +76,7 @@ impl ConnectionIdDecoder for CountingConnectionIdGenerator { impl ConnectionIdGenerator for CountingConnectionIdGenerator { fn generate_cid(&mut self) -> Option { - let mut r = random(20); + let mut r = random::<20>(); r[0] = 8; r[1] = u8::try_from(self.counter >> 24).unwrap(); r[2] = u8::try_from((self.counter >> 16) & 0xff).unwrap(); @@ -99,24 +99,28 @@ impl ConnectionIdGenerator for CountingConnectionIdGenerator { // These are a direct copy of those functions. pub fn new_client(params: ConnectionParameters) -> Connection { fixture_init(); - Connection::new_client( + let (log, _contents) = new_neqo_qlog(); + let mut client = Connection::new_client( test_fixture::DEFAULT_SERVER_NAME, test_fixture::DEFAULT_ALPN, Rc::new(RefCell::new(CountingConnectionIdGenerator::default())), - addr(), - addr(), + DEFAULT_ADDR, + DEFAULT_ADDR, params, now(), ) - .expect("create a default client") + .expect("create a default client"); + client.set_qlog(log); + client } + pub fn default_client() -> Connection { new_client(ConnectionParameters::default()) } pub fn new_server(params: ConnectionParameters) -> Connection { fixture_init(); - + let (log, _contents) = new_neqo_qlog(); let mut c = Connection::new_server( test_fixture::DEFAULT_KEYS, test_fixture::DEFAULT_ALPN, @@ -124,6 +128,7 @@ pub fn new_server(params: ConnectionParameters) -> Connection { params, ) .expect("create a default server"); + c.set_qlog(log); c.server_enable_0rtt(&test_fixture::anti_replay(), AllowZeroRtt {}) .expect("enable 0-RTT"); c @@ -146,12 +151,36 @@ pub fn maybe_authenticate(conn: &mut Connection) -> bool { false } +/// Compute the RTT variance after `n` ACKs or other RTT updates. +pub fn rttvar_after_n_updates(n: usize, rtt: Duration) -> Duration { + assert!(n > 0); + let mut rttvar = rtt / 2; + for _ in 1..n { + rttvar = rttvar * 3 / 4; + } + rttvar +} + +/// This inserts a PING frame into packets. +struct PingWriter {} + +impl crate::connection::test_internal::FrameWriter for PingWriter { + fn write_frames(&mut self, builder: &mut PacketBuilder) { + builder.encode_varint(FRAME_TYPE_PING); + } +} + +trait DatagramModifier: FnMut(Datagram) -> Option {} + +impl DatagramModifier for T where T: FnMut(Datagram) -> Option {} + /// Drive the handshake between the client and server. -fn handshake( +fn handshake_with_modifier( client: &mut Connection, server: &mut Connection, now: Instant, rtt: Duration, + mut modifier: impl DatagramModifier, ) -> Instant { let mut a = client; let mut b = server; @@ -165,22 +194,53 @@ fn handshake( ) }; + let mut did_ping = enum_map! {_ => false}; while !is_done(a) { _ = maybe_authenticate(a); let had_input = input.is_some(); - let output = a.process(input, now).dgram(); + // Insert a PING frame into the first application data packet an endpoint sends, + // in order to force the peer to ACK it. For the server, this is depending on the + // client's connection state, which is accessible during the tests. + // + // We're doing this to prevent packet loss from delaying ACKs, which would cause + // cwnd to shrink, and also to prevent the delayed ACK timer from being armed after + // the handshake, which is not something the tests are written to account for. + let should_ping = !did_ping[a.role()] + && (a.role() == Role::Client && *a.state() == State::Connected + || (a.role() == Role::Server && *b.state() == State::Connected)); + if should_ping { + a.test_frame_writer = Some(Box::new(PingWriter {})); + } + let output = a.process(input.as_ref(), now).dgram(); + if should_ping { + a.test_frame_writer = None; + did_ping[a.role()] = true; + } assert!(had_input || output.is_some()); - input = output; + if let Some(d) = output { + input = modifier(d); + } else { + input = output; + } qtrace!("handshake: t += {:?}", rtt / 2); now += rtt / 2; mem::swap(&mut a, &mut b); } if let Some(d) = input { - a.process_input(d, now); + a.process_input(&d, now); } now } +fn handshake( + client: &mut Connection, + server: &mut Connection, + now: Instant, + rtt: Duration, +) -> Instant { + handshake_with_modifier(client, server, now, rtt, Some) +} + fn connect_fail( client: &mut Connection, server: &mut Connection, @@ -192,19 +252,20 @@ fn connect_fail( assert_error(server, &ConnectionError::Transport(server_error)); } -fn connect_with_rtt( +fn connect_with_rtt_and_modifier( client: &mut Connection, server: &mut Connection, now: Instant, rtt: Duration, + modifier: impl DatagramModifier, ) -> Instant { fn check_rtt(stats: &Stats, rtt: Duration) { assert_eq!(stats.rtt, rtt); - // Confirmation takes 2 round trips, - // so rttvar is reduced by 1/4 (from rtt/2). - assert_eq!(stats.rttvar, rtt * 3 / 8); + // Validate that rttvar has been computed correctly based on the number of RTT updates. + let n = stats.frame_rx.ack + usize::from(stats.rtt_init_guess); + assert_eq!(stats.rttvar, rttvar_after_n_updates(n, rtt)); } - let now = handshake(client, server, now, rtt); + let now = handshake_with_modifier(client, server, now, rtt, modifier); assert_eq!(*client.state(), State::Confirmed); assert_eq!(*server.state(), State::Confirmed); @@ -213,6 +274,15 @@ fn connect_with_rtt( now } +fn connect_with_rtt( + client: &mut Connection, + server: &mut Connection, + now: Instant, + rtt: Duration, +) -> Instant { + connect_with_rtt_and_modifier(client, server, now, rtt, Some) +} + fn connect(client: &mut Connection, server: &mut Connection) { connect_with_rtt(client, server, now(), Duration::new(0, 0)); } @@ -233,57 +303,40 @@ fn exchange_ticket( ) -> ResumptionToken { let validation = AddressValidation::new(now, ValidateAddress::NoToken).unwrap(); let validation = Rc::new(RefCell::new(validation)); - server.set_validation(Rc::clone(&validation)); + server.set_validation(&validation); server.send_ticket(now, &[]).expect("can send ticket"); let ticket = server.process_output(now).dgram(); assert!(ticket.is_some()); - client.process_input(ticket.unwrap(), now); + client.process_input(&ticket.unwrap(), now); assert_eq!(*client.state(), State::Confirmed); get_tokens(client).pop().expect("should have token") } -/// Getting the client and server to reach an idle state is surprisingly hard. -/// The server sends `HANDSHAKE_DONE` at the end of the handshake, and the client -/// doesn't immediately acknowledge it. Reordering packets does the trick. -fn force_idle( - client: &mut Connection, - server: &mut Connection, - rtt: Duration, - mut now: Instant, -) -> Instant { - // The client has sent NEW_CONNECTION_ID, so ensure that the server generates - // an acknowledgment by sending some reordered packets. - qtrace!("force_idle: send reordered client packets"); - let c1 = send_something(client, now); - let c2 = send_something(client, now); - now += rtt / 2; - server.process_input(c2, now); - server.process_input(c1, now); - - // Now do the same for the server. (The ACK is in the first one.) - qtrace!("force_idle: send reordered server packets"); - let s1 = send_something(server, now); - let s2 = send_something(server, now); - now += rtt / 2; - // Delivering s2 first at the client causes it to want to ACK. - client.process_input(s2, now); - // Delivering s1 should not have the client change its mind about the ACK. - let ack = client.process(Some(s1), now).dgram(); - assert!(ack.is_some()); +/// The `handshake` method inserts PING frames into the first application data packets, +/// which forces each peer to ACK them. As a side effect, that causes both sides of the +/// connection to be idle aftwerwards. This method simply verifies that this is the case. +fn assert_idle(client: &mut Connection, server: &mut Connection, rtt: Duration, now: Instant) { let idle_timeout = min( client.conn_params.get_idle_timeout(), server.conn_params.get_idle_timeout(), ); - assert_eq!(client.process_output(now), Output::Callback(idle_timeout)); - now += rtt / 2; - assert_eq!(server.process(ack, now), Output::Callback(idle_timeout)); - now + // Client started its idle period half an RTT before now. + assert_eq!( + client.process_output(now), + Output::Callback(idle_timeout - rtt / 2) + ); + assert_eq!(server.process_output(now), Output::Callback(idle_timeout)); } /// Connect with an RTT and then force both peers to be idle. -fn connect_rtt_idle(client: &mut Connection, server: &mut Connection, rtt: Duration) -> Instant { - let now = connect_with_rtt(client, server, now(), rtt); - let now = force_idle(client, server, rtt, now); +fn connect_rtt_idle_with_modifier( + client: &mut Connection, + server: &mut Connection, + rtt: Duration, + modifier: impl DatagramModifier, +) -> Instant { + let now = connect_with_rtt_and_modifier(client, server, now(), rtt, modifier); + assert_idle(client, server, rtt, now); // Drain events from both as well. _ = client.events().count(); _ = server.events().count(); @@ -291,8 +344,20 @@ fn connect_rtt_idle(client: &mut Connection, server: &mut Connection, rtt: Durat now } +fn connect_rtt_idle(client: &mut Connection, server: &mut Connection, rtt: Duration) -> Instant { + connect_rtt_idle_with_modifier(client, server, rtt, Some) +} + +fn connect_force_idle_with_modifier( + client: &mut Connection, + server: &mut Connection, + modifier: impl DatagramModifier, +) { + connect_rtt_idle_with_modifier(client, server, Duration::new(0, 0), modifier); +} + fn connect_force_idle(client: &mut Connection, server: &mut Connection) { - connect_rtt_idle(client, server, Duration::new(0, 0)); + connect_force_idle_with_modifier(client, server, Some); } fn fill_stream(c: &mut Connection, stream: StreamId) { @@ -359,7 +424,7 @@ fn increase_cwnd( let pkt = sender.process_output(now); match pkt { Output::Datagram(dgram) => { - receiver.process_input(dgram, now + DEFAULT_RTT / 2); + receiver.process_input(&dgram, now + DEFAULT_RTT / 2); } Output::Callback(t) => { if t < DEFAULT_RTT { @@ -376,12 +441,14 @@ fn increase_cwnd( now += DEFAULT_RTT / 2; let ack = receiver.process_output(now).dgram(); now += DEFAULT_RTT / 2; - sender.process_input(ack.unwrap(), now); + sender.process_input(&ack.unwrap(), now); now } /// Receive multiple packets and generate an ack-only packet. +/// /// # Panics +/// /// The caller is responsible for ensuring that `dest` has received /// enough data that it wants to generate an ACK. This panics if /// no ACK frame is generated. @@ -395,7 +462,7 @@ where let in_dgrams = in_dgrams.into_iter(); qdebug!([dest], "ack_bytes {} datagrams", in_dgrams.len()); for dgram in in_dgrams { - dest.process_input(dgram, now); + dest.process_input(&dgram, now); } loop { @@ -461,7 +528,7 @@ fn induce_persistent_congestion( // An ACK for the third PTO causes persistent congestion. let s_ack = ack_bytes(server, stream, c_tx_dgrams, now); - client.process_input(s_ack, now); + client.process_input(&s_ack, now); assert_eq!(cwnd(client), CWND_MIN); now } @@ -501,16 +568,55 @@ fn assert_full_cwnd(packets: &[Datagram], cwnd: usize) { assert_eq!(last.len(), last_packet(cwnd)); } -/// Send something on a stream from `sender` to `receiver`. -/// Return the resulting datagram. +/// Send something on a stream from `sender` to `receiver`, maybe allowing for pacing. +/// Takes a modifier function that can be used to modify the datagram before it is sent. +/// Return the resulting datagram and the new time. #[must_use] -fn send_something(sender: &mut Connection, now: Instant) -> Datagram { +fn send_something_paced_with_modifier( + sender: &mut Connection, + mut now: Instant, + allow_pacing: bool, + mut modifier: impl DatagramModifier, +) -> (Datagram, Instant) { let stream_id = sender.stream_create(StreamType::UniDi).unwrap(); assert!(sender.stream_send(stream_id, DEFAULT_STREAM_DATA).is_ok()); assert!(sender.stream_close_send(stream_id).is_ok()); qdebug!([sender], "send_something on {}", stream_id); - let dgram = sender.process(None, now).dgram(); - dgram.expect("should have something to send") + let dgram = match sender.process_output(now) { + Output::Callback(t) => { + assert!(allow_pacing, "send_something: unexpected delay"); + now += t; + sender + .process_output(now) + .dgram() + .expect("send_something: should have something to send") + } + Output::Datagram(d) => modifier(d).unwrap(), + Output::None => panic!("send_something: got Output::None"), + }; + (dgram, now) +} + +fn send_something_paced( + sender: &mut Connection, + now: Instant, + allow_pacing: bool, +) -> (Datagram, Instant) { + send_something_paced_with_modifier(sender, now, allow_pacing, Some) +} + +fn send_something_with_modifier( + sender: &mut Connection, + now: Instant, + modifier: impl DatagramModifier, +) -> Datagram { + send_something_paced_with_modifier(sender, now, false, modifier).0 +} + +/// Send something on a stream from `sender` to `receiver`. +/// Return the resulting datagram. +fn send_something(sender: &mut Connection, now: Instant) -> Datagram { + send_something_with_modifier(sender, now, Some) } /// Send something on a stream from `sender` to `receiver`. @@ -521,7 +627,7 @@ fn send_and_receive( now: Instant, ) -> Option { let dgram = send_something(sender, now); - receiver.process(Some(dgram), now).dgram() + receiver.process(Some(&dgram), now).dgram() } fn get_tokens(client: &mut Connection) -> Vec { diff --git a/neqo-transport/src/connection/tests/fuzzing.rs b/neqo-transport/src/connection/tests/null.rs similarity index 75% rename from neqo-transport/src/connection/tests/fuzzing.rs rename to neqo-transport/src/connection/tests/null.rs index 24201eff26..e4d60445c6 100644 --- a/neqo-transport/src/connection/tests/fuzzing.rs +++ b/neqo-transport/src/connection/tests/null.rs @@ -4,14 +4,13 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -#![cfg_attr(feature = "deny-warnings", deny(warnings))] -#![warn(clippy::pedantic)] -#![cfg(feature = "fuzzing")] +#![cfg(feature = "disable-encryption")] + +use neqo_crypto::aead_null::AEAD_NULL_TAG; +use test_fixture::now; use super::{connect_force_idle, default_client, default_server}; use crate::StreamType; -use neqo_crypto::FIXED_TAG_FUZZING; -use test_fixture::now; #[test] fn no_encryption() { @@ -25,18 +24,18 @@ fn no_encryption() { client.stream_send(stream_id, DATA_CLIENT).unwrap(); let client_pkt = client.process_output(now()).dgram().unwrap(); - assert!(client_pkt[..client_pkt.len() - FIXED_TAG_FUZZING.len()].ends_with(DATA_CLIENT)); + assert!(client_pkt[..client_pkt.len() - AEAD_NULL_TAG.len()].ends_with(DATA_CLIENT)); - server.process_input(client_pkt, now()); + server.process_input(&client_pkt, now()); let mut buf = vec![0; 100]; let (len, _) = server.stream_recv(stream_id, &mut buf).unwrap(); assert_eq!(len, DATA_CLIENT.len()); assert_eq!(&buf[..len], DATA_CLIENT); server.stream_send(stream_id, DATA_SERVER).unwrap(); let server_pkt = server.process_output(now()).dgram().unwrap(); - assert!(server_pkt[..server_pkt.len() - FIXED_TAG_FUZZING.len()].ends_with(DATA_SERVER)); + assert!(server_pkt[..server_pkt.len() - AEAD_NULL_TAG.len()].ends_with(DATA_SERVER)); - client.process_input(server_pkt, now()); + client.process_input(&server_pkt, now()); let (len, _) = client.stream_recv(stream_id, &mut buf).unwrap(); assert_eq!(len, DATA_SERVER.len()); assert_eq!(&buf[..len], DATA_SERVER); diff --git a/neqo-transport/src/connection/tests/priority.rs b/neqo-transport/src/connection/tests/priority.rs index 2b0b5ecdc2..079ba93b9f 100644 --- a/neqo-transport/src/connection/tests/priority.rs +++ b/neqo-transport/src/connection/tests/priority.rs @@ -4,6 +4,11 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +use std::{cell::RefCell, mem, rc::Rc}; + +use neqo_common::event::Provider; +use test_fixture::now; + use super::{ super::{Connection, Error, Output}, connect, default_client, default_server, fill_cwnd, maybe_authenticate, @@ -14,10 +19,6 @@ use crate::{ ConnectionEvent, StreamId, StreamType, }; -use neqo_common::event::Provider; -use std::{cell::RefCell, mem, rc::Rc}; -use test_fixture::{self, now}; - const BLOCK_SIZE: usize = 4_096; fn fill_stream(c: &mut Connection, id: StreamId) { @@ -40,7 +41,7 @@ fn receive_stream() { assert_eq!(MESSAGE.len(), client.stream_send(id, MESSAGE).unwrap()); let dgram = client.process_output(now()).dgram(); - server.process_input(dgram.unwrap(), now()); + server.process_input(&dgram.unwrap(), now()); assert_eq!( server .stream_priority( @@ -82,7 +83,7 @@ fn relative() { .unwrap(); let dgram = client.process_output(now()).dgram(); - server.process_input(dgram.unwrap(), now()); + server.process_input(&dgram.unwrap(), now()); // The "id_normal" stream will get a `NewStream` event, but no data. for e in server.events() { @@ -113,7 +114,7 @@ fn reprioritize() { .unwrap(); let dgram = client.process_output(now()).dgram(); - server.process_input(dgram.unwrap(), now()); + server.process_input(&dgram.unwrap(), now()); // The "id_normal" stream will get a `NewStream` event, but no data. for e in server.events() { @@ -132,7 +133,7 @@ fn reprioritize() { ) .unwrap(); let dgram = client.process_output(now()).dgram(); - server.process_input(dgram.unwrap(), now()); + server.process_input(&dgram.unwrap(), now()); for e in server.events() { if let ConnectionEvent::RecvStreamReadable { stream_id } = e { @@ -163,7 +164,7 @@ fn repairing_loss() { let _lost = client.process_output(now).dgram(); for _ in 0..5 { match client.process_output(now) { - Output::Datagram(d) => server.process_input(d, now), + Output::Datagram(d) => server.process_input(&d, now), Output::Callback(delay) => now += delay, Output::None => unreachable!(), } @@ -176,9 +177,9 @@ fn repairing_loss() { let id_normal = client.stream_create(StreamType::UniDi).unwrap(); fill_stream(&mut client, id_normal); - let dgram = client.process(ack, now).dgram(); + let dgram = client.process(ack.as_ref(), now).dgram(); assert_eq!(client.stats().lost, 1); // Client should have noticed the loss. - server.process_input(dgram.unwrap(), now); + server.process_input(&dgram.unwrap(), now); // Only the low priority stream has data as the retransmission of the data from // the lost packet is now more important than new data from the high priority stream. @@ -194,7 +195,7 @@ fn repairing_loss() { // the retransmitted data into a second packet, it will also contain data from the // normal priority stream. let dgram = client.process_output(now).dgram(); - server.process_input(dgram.unwrap(), now); + server.process_input(&dgram.unwrap(), now); assert!(server.events().any( |e| matches!(e, ConnectionEvent::RecvStreamReadable { stream_id } if stream_id == id_normal), )); @@ -209,8 +210,8 @@ fn critical() { // Rather than connect, send stream data in 0.5-RTT. // That allows this to test that critical streams pre-empt most frame types. let dgram = client.process_output(now).dgram(); - let dgram = server.process(dgram, now).dgram(); - client.process_input(dgram.unwrap(), now); + let dgram = server.process(dgram.as_ref(), now).dgram(); + client.process_input(&dgram.unwrap(), now); maybe_authenticate(&mut client); let id = server.stream_create(StreamType::UniDi).unwrap(); @@ -237,8 +238,8 @@ fn critical() { assert_eq!(stats_after.handshake_done, 0); // Complete the handshake. - let dgram = client.process(dgram, now).dgram(); - server.process_input(dgram.unwrap(), now); + let dgram = client.process(dgram.as_ref(), now).dgram(); + server.process_input(&dgram.unwrap(), now); // Critical beats everything but HANDSHAKE_DONE. let stats_before = server.stats().frame_tx; @@ -260,8 +261,8 @@ fn important() { // Rather than connect, send stream data in 0.5-RTT. // That allows this to test that important streams pre-empt most frame types. let dgram = client.process_output(now).dgram(); - let dgram = server.process(dgram, now).dgram(); - client.process_input(dgram.unwrap(), now); + let dgram = server.process(dgram.as_ref(), now).dgram(); + client.process_input(&dgram.unwrap(), now); maybe_authenticate(&mut client); let id = server.stream_create(StreamType::UniDi).unwrap(); @@ -289,8 +290,8 @@ fn important() { assert_eq!(stats_after.stream, stats_before.stream + 1); // Complete the handshake. - let dgram = client.process(dgram, now).dgram(); - server.process_input(dgram.unwrap(), now); + let dgram = client.process(dgram.as_ref(), now).dgram(); + server.process_input(&dgram.unwrap(), now); // Important beats everything but flow control. let stats_before = server.stats().frame_tx; @@ -313,8 +314,8 @@ fn high_normal() { // Rather than connect, send stream data in 0.5-RTT. // That allows this to test that important streams pre-empt most frame types. let dgram = client.process_output(now).dgram(); - let dgram = server.process(dgram, now).dgram(); - client.process_input(dgram.unwrap(), now); + let dgram = server.process(dgram.as_ref(), now).dgram(); + client.process_input(&dgram.unwrap(), now); maybe_authenticate(&mut client); let id = server.stream_create(StreamType::UniDi).unwrap(); @@ -342,8 +343,8 @@ fn high_normal() { assert_eq!(stats_after.stream, stats_before.stream + 1); // Complete the handshake. - let dgram = client.process(dgram, now).dgram(); - server.process_input(dgram.unwrap(), now); + let dgram = client.process(dgram.as_ref(), now).dgram(); + server.process_input(&dgram.unwrap(), now); // High or Normal doesn't beat NEW_CONNECTION_ID, // but they beat CRYPTO/NEW_TOKEN. @@ -369,7 +370,7 @@ fn low() { let validation = Rc::new(RefCell::new( AddressValidation::new(now, ValidateAddress::Never).unwrap(), )); - server.set_validation(Rc::clone(&validation)); + server.set_validation(&validation); connect(&mut client, &mut server); let id = server.stream_create(StreamType::UniDi).unwrap(); diff --git a/neqo-transport/src/connection/tests/recovery.rs b/neqo-transport/src/connection/tests/recovery.rs index 910d7470c7..0f12d03107 100644 --- a/neqo-transport/src/connection/tests/recovery.rs +++ b/neqo-transport/src/connection/tests/recovery.rs @@ -4,28 +4,36 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use super::super::{Connection, ConnectionParameters, Output, State}; +use std::{ + mem, + time::{Duration, Instant}, +}; + +use neqo_common::qdebug; +use neqo_crypto::AuthenticationStatus; +use test_fixture::{ + assertions::{assert_handshake, assert_initial}, + now, split_datagram, +}; + use super::{ + super::{Connection, ConnectionParameters, Output, State}, assert_full_cwnd, connect, connect_force_idle, connect_rtt_idle, connect_with_rtt, cwnd, default_client, default_server, fill_cwnd, maybe_authenticate, new_client, send_and_receive, send_something, AT_LEAST_PTO, DEFAULT_RTT, DEFAULT_STREAM_DATA, POST_HANDSHAKE_CWND, }; -use crate::cc::CWND_MIN; -use crate::path::PATH_MTU_V6; -use crate::recovery::{ - FAST_PTO_SCALE, MAX_OUTSTANDING_UNACK, MIN_OUTSTANDING_UNACK, PTO_PACKET_COUNT, +use crate::{ + cc::CWND_MIN, + path::PATH_MTU_V6, + recovery::{ + FAST_PTO_SCALE, MAX_OUTSTANDING_UNACK, MAX_PTO_PACKET_COUNT, MIN_OUTSTANDING_UNACK, + }, + rtt::GRANULARITY, + stats::MAX_PTO_COUNTS, + tparams::TransportParameter, + tracking::DEFAULT_ACK_DELAY, + StreamType, }; -use crate::rtt::GRANULARITY; -use crate::stats::MAX_PTO_COUNTS; -use crate::tparams::TransportParameter; -use crate::tracking::DEFAULT_ACK_DELAY; -use crate::StreamType; - -use neqo_common::qdebug; -use neqo_crypto::AuthenticationStatus; -use std::mem; -use std::time::{Duration, Instant}; -use test_fixture::{self, now, split_datagram}; #[test] fn pto_works_basic() { @@ -61,7 +69,7 @@ fn pto_works_basic() { let out = client.process(None, now); let stream_before = server.stats().frame_rx.stream; - server.process_input(out.dgram().unwrap(), now); + server.process_input(&out.dgram().unwrap(), now); assert_eq!(server.stats().frame_rx.stream, stream_before + 2); } @@ -86,7 +94,7 @@ fn pto_works_full_cwnd() { // Both datagrams contain one or more STREAM frames. for d in dgrams { let stream_before = server.stats().frame_rx.stream; - server.process_input(d, now); + server.process_input(&d, now); assert!(server.stats().frame_rx.stream > stream_before); } } @@ -96,15 +104,7 @@ fn pto_works_ping() { let mut client = default_client(); let mut server = default_server(); connect_force_idle(&mut client, &mut server); - let mut now = now(); - - let res = client.process(None, now); - assert_eq!( - res, - Output::Callback(ConnectionParameters::default().get_idle_timeout()) - ); - - now += Duration::from_secs(10); + let mut now = now() + Duration::from_secs(10); // Send a few packets from the client. let pkt0 = send_something(&mut client, now); @@ -120,32 +120,32 @@ fn pto_works_ping() { assert_eq!(cb, GRANULARITY * 2); // Process these by server, skipping pkt0 - let srv0 = server.process(Some(pkt1), now).dgram(); + let srv0 = server.process(Some(&pkt1), now).dgram(); assert!(srv0.is_some()); // ooo, ack client pkt1 now += Duration::from_millis(20); - // process pkt2 (no ack yet) - let srv1 = server.process(Some(pkt2), now).dgram(); - assert!(srv1.is_none()); + // process pkt2 (immediate ack because last ack was more than an RTT ago; RTT=0) + let srv1 = server.process(Some(&pkt2), now).dgram(); + assert!(srv1.is_some()); // this is now dropped - // process pkt3 (acked) - let srv2 = server.process(Some(pkt3), now).dgram(); + now += Duration::from_millis(20); + // process pkt3 (acked for same reason) + let srv2 = server.process(Some(&pkt3), now).dgram(); // ack client pkt 2 & 3 assert!(srv2.is_some()); - now += Duration::from_millis(20); // client processes ack - let pkt4 = client.process(srv2, now).dgram(); + let pkt4 = client.process(srv2.as_ref(), now).dgram(); // client resends data from pkt0 assert!(pkt4.is_some()); - // server sees ooo pkt0 and generates ack - let srv3 = server.process(Some(pkt0), now).dgram(); + // server sees ooo pkt0 and generates immediate ack + let srv3 = server.process(Some(&pkt0), now).dgram(); assert!(srv3.is_some()); // Accept the acknowledgment. - let pkt5 = client.process(srv3, now).dgram(); + let pkt5 = client.process(srv3.as_ref(), now).dgram(); assert!(pkt5.is_none()); now += Duration::from_millis(70); @@ -155,7 +155,7 @@ fn pto_works_ping() { assert_eq!(client.stats().frame_tx.ping, client_pings + 1); let server_pings = server.stats().frame_rx.ping; - server.process_input(pkt6.unwrap(), now); + server.process_input(&pkt6.unwrap(), now); assert_eq!(server.stats().frame_rx.ping, server_pings + 1); } @@ -179,24 +179,20 @@ fn pto_initial() { assert!(pkt2.is_some()); assert_eq!(pkt2.unwrap().len(), PATH_MTU_V6); - let pkt3 = client.process(None, now).dgram(); - assert!(pkt3.is_some()); - assert_eq!(pkt3.unwrap().len(), PATH_MTU_V6); - let delay = client.process(None, now).callback(); // PTO has doubled. assert_eq!(delay, INITIAL_PTO * 2); // Server process the first initial pkt. let mut server = default_server(); - let out = server.process(pkt1, now).dgram(); + let out = server.process(pkt1.as_ref(), now).dgram(); assert!(out.is_some()); // Client receives ack for the first initial packet as well a Handshake packet. // After the handshake packet the initial keys and the crypto stream for the initial // packet number space will be discarded. // Here only an ack for the Handshake packet will be sent. - let out = client.process(out, now).dgram(); + let out = client.process(out.as_ref(), now).dgram(); assert!(out.is_some()); // We do not have PTO for the resent initial packet any more, but @@ -218,14 +214,17 @@ fn pto_handshake_complete() { let mut server = default_server(); let pkt = client.process(None, now).dgram(); + assert_initial(pkt.as_ref().unwrap(), false); let cb = client.process(None, now).callback(); assert_eq!(cb, Duration::from_millis(300)); now += HALF_RTT; - let pkt = server.process(pkt, now).dgram(); + let pkt = server.process(pkt.as_ref(), now).dgram(); + assert_initial(pkt.as_ref().unwrap(), false); now += HALF_RTT; - let pkt = client.process(pkt, now).dgram(); + let pkt = client.process(pkt.as_ref(), now).dgram(); + assert_handshake(pkt.as_ref().unwrap()); let cb = client.process(None, now).callback(); // The client now has a single RTT estimate (20ms), so @@ -233,7 +232,7 @@ fn pto_handshake_complete() { assert_eq!(cb, HALF_RTT * 6); now += HALF_RTT; - let pkt = server.process(pkt, now).dgram(); + let pkt = server.process(pkt.as_ref(), now).dgram(); assert!(pkt.is_none()); now += HALF_RTT; @@ -241,7 +240,7 @@ fn pto_handshake_complete() { qdebug!("---- client: SH..FIN -> FIN"); let pkt1 = client.process(None, now).dgram(); - assert!(pkt1.is_some()); + assert_handshake(pkt1.as_ref().unwrap()); assert_eq!(*client.state(), State::Connected); let cb = client.process(None, now).callback(); @@ -255,6 +254,7 @@ fn pto_handshake_complete() { qdebug!("---- client: PTO"); now += HALF_RTT * 6; let pkt2 = client.process(None, now).dgram(); + assert_handshake(pkt2.as_ref().unwrap()); pto_counts[0] = 1; assert_eq!(client.stats.borrow().pto_counts, pto_counts); @@ -265,7 +265,10 @@ fn pto_handshake_complete() { let stream_id = client.stream_create(StreamType::UniDi).unwrap(); client.stream_close_send(stream_id).unwrap(); let pkt3 = client.process(None, now).dgram(); + assert_handshake(pkt3.as_ref().unwrap()); let (pkt3_hs, pkt3_1rtt) = split_datagram(&pkt3.unwrap()); + assert_handshake(&pkt3_hs); + assert!(pkt3_1rtt.is_some()); // PTO has been doubled. let cb = client.process(None, now).callback(); @@ -282,8 +285,8 @@ fn pto_handshake_complete() { // This should remove the 1-RTT PTO from messing this test up. let server_acks = server.stats().frame_tx.ack; let server_done = server.stats().frame_tx.handshake_done; - server.process_input(pkt3_1rtt.unwrap(), now); - let ack = server.process(pkt1, now).dgram(); + server.process_input(&pkt3_1rtt.unwrap(), now); + let ack = server.process(pkt1.as_ref(), now).dgram(); assert!(ack.is_some()); assert_eq!(server.stats().frame_tx.ack, server_acks + 2); assert_eq!(server.stats().frame_tx.handshake_done, server_done + 1); @@ -291,22 +294,27 @@ fn pto_handshake_complete() { // Check that the other packets (pkt2, pkt3) are Handshake packets. // The server discarded the Handshake keys already, therefore they are dropped. // Note that these don't include 1-RTT packets, because 1-RTT isn't send on PTO. + let (pkt2_hs, pkt2_1rtt) = split_datagram(&pkt2.unwrap()); + assert_handshake(&pkt2_hs); + assert!(pkt2_1rtt.is_some()); let dropped_before1 = server.stats().dropped_rx; let server_frames = server.stats().frame_rx.all; - server.process_input(pkt2.unwrap(), now); + server.process_input(&pkt2_hs, now); assert_eq!(1, server.stats().dropped_rx - dropped_before1); assert_eq!(server.stats().frame_rx.all, server_frames); + server.process_input(&pkt2_1rtt.unwrap(), now); + let server_frames2 = server.stats().frame_rx.all; let dropped_before2 = server.stats().dropped_rx; - server.process_input(pkt3_hs, now); + server.process_input(&pkt3_hs, now); assert_eq!(1, server.stats().dropped_rx - dropped_before2); - assert_eq!(server.stats().frame_rx.all, server_frames); + assert_eq!(server.stats().frame_rx.all, server_frames2); now += HALF_RTT; // Let the client receive the ACK. // It should now be wait to acknowledge the HANDSHAKE_DONE. - let cb = client.process(ack, now).callback(); + let cb = client.process(ack.as_ref(), now).callback(); // The default ack delay is the RTT divided by the default ACK ratio of 4. let expected_ack_delay = HALF_RTT * 2 / 4; assert_eq!(cb, expected_ack_delay); @@ -315,13 +323,6 @@ fn pto_handshake_complete() { now += cb; let out = client.process(None, now).dgram(); assert!(out.is_some()); - let cb = client.process(None, now).callback(); - // The handshake keys are discarded, but now we're back to the idle timeout. - // We don't send another PING because the handshake space is done and there - // is nothing to probe for. - - let idle_timeout = ConnectionParameters::default().get_idle_timeout(); - assert_eq!(cb, idle_timeout - expected_ack_delay); } /// Test that PTO in the Handshake space contains the right frames. @@ -335,14 +336,14 @@ fn pto_handshake_frames() { now += Duration::from_millis(10); qdebug!("---- server: CH -> SH, EE, CERT, CV, FIN"); let mut server = default_server(); - let pkt = server.process(pkt.dgram(), now); + let pkt = server.process(pkt.as_dgram_ref(), now); now += Duration::from_millis(10); qdebug!("---- client: cert verification"); - let pkt = client.process(pkt.dgram(), now); + let pkt = client.process(pkt.as_dgram_ref(), now); now += Duration::from_millis(10); - mem::drop(server.process(pkt.dgram(), now)); + mem::drop(server.process(pkt.as_dgram_ref(), now)); now += Duration::from_millis(10); client.authenticated(AuthenticationStatus::Ok, now); @@ -365,7 +366,7 @@ fn pto_handshake_frames() { now += Duration::from_millis(10); let crypto_before = server.stats().frame_rx.crypto; - server.process_input(pkt2.unwrap(), now); + server.process_input(&pkt2.unwrap(), now); assert_eq!(server.stats().frame_rx.crypto, crypto_before + 1); } @@ -387,7 +388,7 @@ fn handshake_ack_pto() { let c1 = client.process(None, now).dgram(); now += RTT / 2; - let s1 = server.process(c1, now).dgram(); + let s1 = server.process(c1.as_ref(), now).dgram(); assert!(s1.is_some()); let s2 = server.process(None, now).dgram(); assert!(s1.is_some()); @@ -395,8 +396,8 @@ fn handshake_ack_pto() { // Now let the client have the Initial, but drop the first coalesced Handshake packet. now += RTT / 2; let (initial, _) = split_datagram(&s1.unwrap()); - client.process_input(initial, now); - let c2 = client.process(s2, now).dgram(); + client.process_input(&initial, now); + let c2 = client.process(s2.as_ref(), now).dgram(); assert!(c2.is_some()); // This is an ACK. Drop it. let delay = client.process(None, now).callback(); assert_eq!(delay, RTT * 3); @@ -411,7 +412,7 @@ fn handshake_ack_pto() { now += RTT / 2; let ping_before = server.stats().frame_rx.ping; - server.process_input(c3.unwrap(), now); + server.process_input(&c3.unwrap(), now); assert_eq!(server.stats().frame_rx.ping, ping_before + 1); pto_counts[0] = 1; @@ -419,13 +420,13 @@ fn handshake_ack_pto() { // Now complete the handshake as cheaply as possible. let dgram = server.process(None, now).dgram(); - client.process_input(dgram.unwrap(), now); + client.process_input(&dgram.unwrap(), now); maybe_authenticate(&mut client); let dgram = client.process(None, now).dgram(); assert_eq!(*client.state(), State::Connected); - let dgram = server.process(dgram, now).dgram(); + let dgram = server.process(dgram.as_ref(), now).dgram(); assert_eq!(*server.state(), State::Confirmed); - client.process_input(dgram.unwrap(), now); + client.process_input(&dgram.unwrap(), now); assert_eq!(*client.state(), State::Confirmed); assert_eq!(client.stats.borrow().pto_counts, pto_counts); @@ -446,7 +447,7 @@ fn loss_recovery_crash() { assert!(ack.is_some()); // Have the server process the ACK. - let cb = server.process(ack, now).callback(); + let cb = server.process(ack.as_ref(), now).callback(); assert!(cb > Duration::from_secs(0)); // Now we leap into the future. The server should regard the first @@ -474,7 +475,8 @@ fn ack_after_pto() { // Jump forward to the PTO and drain the PTO packets. now += AT_LEAST_PTO; - for _ in 0..PTO_PACKET_COUNT { + // We can use MAX_PTO_PACKET_COUNT, because we know the handshake is over. + for _ in 0..MAX_PTO_PACKET_COUNT { let dgram = client.process(None, now).dgram(); assert!(dgram.is_some()); } @@ -490,13 +492,13 @@ fn ack_after_pto() { // The client is now after a PTO, but if it receives something // that demands acknowledgment, it will send just the ACK. - let ack = client.process(Some(dgram), now).dgram(); + let ack = client.process(Some(&dgram), now).dgram(); assert!(ack.is_some()); // Make sure that the packet only contained an ACK frame. let all_frames_before = server.stats().frame_rx.all; let ack_before = server.stats().frame_rx.ack; - server.process_input(ack.unwrap(), now); + server.process_input(&ack.unwrap(), now); assert_eq!(server.stats().frame_rx.all, all_frames_before + 1); assert_eq!(server.stats().frame_rx.ack, ack_before + 1); } @@ -517,7 +519,7 @@ fn lost_but_kept_and_lr_timer() { // At t=RTT/2 the server receives the packet and ACKs it. now += RTT / 2; - let ack = server.process(Some(p2), now).dgram(); + let ack = server.process(Some(&p2), now).dgram(); assert!(ack.is_some()); // The client also sends another two packets (p3, p4), again losing the first. let _p3 = send_something(&mut client, now); @@ -526,14 +528,14 @@ fn lost_but_kept_and_lr_timer() { // At t=RTT the client receives the ACK and goes into timed loss recovery. // The client doesn't call p1 lost at this stage, but it will soon. now += RTT / 2; - let res = client.process(ack, now); + let res = client.process(ack.as_ref(), now); // The client should be on a loss recovery timer as p1 is missing. let lr_timer = res.callback(); // Loss recovery timer should be RTT/8, but only check for 0 or >=RTT/2. assert_ne!(lr_timer, Duration::from_secs(0)); assert!(lr_timer < (RTT / 2)); // The server also receives and acknowledges p4, again sending an ACK. - let ack = server.process(Some(p4), now).dgram(); + let ack = server.process(Some(&p4), now).dgram(); assert!(ack.is_some()); // At t=RTT*3/2 the client should declare p1 to be lost. @@ -543,7 +545,7 @@ fn lost_but_kept_and_lr_timer() { assert!(res.dgram().is_some()); // When the client processes the ACK, it should engage the // loss recovery timer for p3, not p1 (even though it still tracks p1). - let res = client.process(ack, now); + let res = client.process(ack.as_ref(), now); let lr_timer2 = res.callback(); assert_eq!(lr_timer, lr_timer2); } @@ -566,7 +568,7 @@ fn loss_time_past_largest_acked() { // Start the handshake. let c_in = client.process(None, now).dgram(); now += RTT / 2; - let s_hs1 = server.process(c_in, now).dgram(); + let s_hs1 = server.process(c_in.as_ref(), now).dgram(); // Get some spare server handshake packets for the client to ACK. // This involves a time machine, so be a little cautious. @@ -589,7 +591,7 @@ fn loss_time_past_largest_acked() { // to generate an ack-eliciting packet. For that, we use the Finished message. // Reordering delivery ensures that the later packet is also acknowledged. now += RTT / 2; - let c_hs1 = client.process(s_hs1, now).dgram(); + let c_hs1 = client.process(s_hs1.as_ref(), now).dgram(); assert!(c_hs1.is_some()); // This comes first, so it's useless. maybe_authenticate(&mut client); let c_hs2 = client.process(None, now).dgram(); @@ -598,17 +600,17 @@ fn loss_time_past_largest_acked() { // The we need the outstanding packet to be sent after the // application data packet, so space these out a tiny bit. let _p1 = send_something(&mut client, now + INCR); - let c_hs3 = client.process(s_hs2, now + (INCR * 2)).dgram(); + let c_hs3 = client.process(s_hs2.as_ref(), now + (INCR * 2)).dgram(); assert!(c_hs3.is_some()); // This will be left outstanding. - let c_hs4 = client.process(s_hs3, now + (INCR * 3)).dgram(); + let c_hs4 = client.process(s_hs3.as_ref(), now + (INCR * 3)).dgram(); assert!(c_hs4.is_some()); // This will be acknowledged. // Process c_hs2 and c_hs4, but skip c_hs3. // Then get an ACK for the client. now += RTT / 2; // Deliver c_hs4 first, but don't generate a packet. - server.process_input(c_hs4.unwrap(), now); - let s_ack = server.process(c_hs2, now).dgram(); + server.process_input(&c_hs4.unwrap(), now); + let s_ack = server.process(c_hs2.as_ref(), now).dgram(); assert!(s_ack.is_some()); // This includes an ACK, but it also includes HANDSHAKE_DONE, // which we need to remove because that will cause the Handshake loss @@ -617,20 +619,12 @@ fn loss_time_past_largest_acked() { // Now the client should start its loss recovery timer based on the ACK. now += RTT / 2; - let c_ack = client.process(Some(s_hs_ack), now).dgram(); + let c_ack = client.process(Some(&s_hs_ack), now).dgram(); assert!(c_ack.is_none()); // The client should now have the loss recovery timer active. let lr_time = client.process(None, now).callback(); assert_ne!(lr_time, Duration::from_secs(0)); assert!(lr_time < (RTT / 2)); - - // Skipping forward by the loss recovery timer should cause the client to - // mark packets as lost and retransmit, after which we should be on the PTO - // timer. - now += lr_time; - let delay = client.process(None, now).callback(); - assert_ne!(delay, Duration::from_secs(0)); - assert!(delay > lr_time); } /// `sender` sends a little, `receiver` acknowledges it. @@ -642,12 +636,12 @@ fn trickle(sender: &mut Connection, receiver: &mut Connection, mut count: usize, while count > 0 { qdebug!("trickle: remaining={}", count); assert_eq!(sender.stream_send(id, &[9]).unwrap(), 1); - let dgram = sender.process(maybe_ack, now).dgram(); + let dgram = sender.process(maybe_ack.as_ref(), now).dgram(); - maybe_ack = receiver.process(dgram, now).dgram(); + maybe_ack = receiver.process(dgram.as_ref(), now).dgram(); count -= usize::from(maybe_ack.is_some()); } - sender.process_input(maybe_ack.unwrap(), now); + sender.process_input(&maybe_ack.unwrap(), now); } /// Ensure that a PING frame is sent with ACK sometimes. @@ -763,7 +757,7 @@ fn fast_pto() { let dgram = client.process(None, now).dgram(); let stream_before = server.stats().frame_rx.stream; - server.process_input(dgram.unwrap(), now); + server.process_input(&dgram.unwrap(), now); assert_eq!(server.stats().frame_rx.stream, stream_before + 1); } @@ -803,8 +797,8 @@ fn fast_pto_persistent_congestion() { // Now acknowledge the tail packet and enter persistent congestion. now += DEFAULT_RTT / 2; - let ack = server.process(Some(dgram), now).dgram(); + let ack = server.process(Some(&dgram), now).dgram(); now += DEFAULT_RTT / 2; - client.process_input(ack.unwrap(), now); + client.process_input(&ack.unwrap(), now); assert_eq!(cwnd(&client), CWND_MIN); } diff --git a/neqo-transport/src/connection/tests/resumption.rs b/neqo-transport/src/connection/tests/resumption.rs index 0c34f3448d..7410e76ef8 100644 --- a/neqo-transport/src/connection/tests/resumption.rs +++ b/neqo-transport/src/connection/tests/resumption.rs @@ -4,18 +4,18 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +use std::{cell::RefCell, mem, rc::Rc, time::Duration}; + +use test_fixture::{assertions, now}; + use super::{ connect, connect_with_rtt, default_client, default_server, exchange_ticket, get_tokens, new_client, resumed_server, send_something, AT_LEAST_PTO, }; -use crate::addr_valid::{AddressValidation, ValidateAddress}; -use crate::{ConnectionParameters, Error, Version}; - -use std::cell::RefCell; -use std::mem; -use std::rc::Rc; -use std::time::Duration; -use test_fixture::{self, assertions, now}; +use crate::{ + addr_valid::{AddressValidation, ValidateAddress}, + ConnectionParameters, Error, Version, +}; #[test] fn resume() { @@ -50,12 +50,12 @@ fn remember_smoothed_rtt() { // wants to acknowledge; so the ticket will include an ACK frame too. let validation = AddressValidation::new(now, ValidateAddress::NoToken).unwrap(); let validation = Rc::new(RefCell::new(validation)); - server.set_validation(Rc::clone(&validation)); + server.set_validation(&validation); server.send_ticket(now, &[]).expect("can send ticket"); let ticket = server.process_output(now).dgram(); assert!(ticket.is_some()); now += RTT1 / 2; - client.process_input(ticket.unwrap(), now); + client.process_input(&ticket.unwrap(), now); let token = get_tokens(&mut client).pop().unwrap(); let mut client = default_client(); @@ -84,7 +84,7 @@ fn address_validation_token_resume() { let mut server = default_server(); let validation = AddressValidation::new(now(), ValidateAddress::Always).unwrap(); let validation = Rc::new(RefCell::new(validation)); - server.set_validation(Rc::clone(&validation)); + server.set_validation(&validation); let mut now = connect_with_rtt(&mut client, &mut server, now(), RTT); let token = exchange_ticket(&mut client, &mut server, now); @@ -122,7 +122,7 @@ fn two_tickets_on_timer() { let pkt = send_something(&mut server, now()); // process() will return an ack first - assert!(client.process(Some(pkt), now()).dgram().is_some()); + assert!(client.process(Some(&pkt), now()).dgram().is_some()); // We do not have a ResumptionToken event yet, because NEW_TOKEN was not sent. assert_eq!(get_tokens(&mut client).len(), 0); @@ -155,7 +155,7 @@ fn two_tickets_with_new_token() { let mut server = default_server(); let validation = AddressValidation::new(now(), ValidateAddress::Always).unwrap(); let validation = Rc::new(RefCell::new(validation)); - server.set_validation(Rc::clone(&validation)); + server.set_validation(&validation); connect(&mut client, &mut server); // Send two tickets with tokens and then bundle those into a packet. @@ -163,7 +163,7 @@ fn two_tickets_with_new_token() { server.send_ticket(now(), &[]).expect("send ticket2"); let pkt = send_something(&mut server, now()); - client.process_input(pkt, now()); + client.process_input(&pkt, now()); let mut all_tokens = get_tokens(&mut client); assert_eq!(all_tokens.len(), 2); let token1 = all_tokens.pop().unwrap(); @@ -184,7 +184,7 @@ fn take_token() { server.send_ticket(now(), &[]).unwrap(); let dgram = server.process(None, now()).dgram(); - client.process_input(dgram.unwrap(), now()); + client.process_input(&dgram.unwrap(), now()); // There should be no ResumptionToken event here. let tokens = get_tokens(&mut client); diff --git a/neqo-transport/src/connection/tests/stream.rs b/neqo-transport/src/connection/tests/stream.rs index 036a3adff9..66d3bf32f3 100644 --- a/neqo-transport/src/connection/tests/stream.rs +++ b/neqo-transport/src/connection/tests/stream.rs @@ -4,6 +4,11 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +use std::{cmp::max, collections::HashMap, mem}; + +use neqo_common::{event::Provider, qdebug}; +use test_fixture::now; + use super::{ super::State, assert_error, connect, connect_force_idle, default_client, default_server, maybe_authenticate, new_client, new_server, send_something, DEFAULT_STREAM_DATA, @@ -11,18 +16,17 @@ use super::{ use crate::{ events::ConnectionEvent, recv_stream::RECV_BUFFER_SIZE, - send_stream::OrderGroup, - send_stream::{SendStreamState, SEND_BUFFER_SIZE}, + send_stream::{OrderGroup, SendStreamState, SEND_BUFFER_SIZE}, streams::{SendOrder, StreamOrder}, tparams::{self, TransportParameter}, - tracking::DEFAULT_ACK_PACKET_TOLERANCE, - Connection, ConnectionError, ConnectionParameters, Error, StreamId, StreamType, + // tracking::DEFAULT_ACK_PACKET_TOLERANCE, + Connection, + ConnectionError, + ConnectionParameters, + Error, + StreamId, + StreamType, }; -use std::collections::HashMap; - -use neqo_common::{event::Provider, qdebug}; -use std::{cmp::max, convert::TryFrom, mem}; -use test_fixture::now; #[test] fn stream_create() { @@ -30,10 +34,10 @@ fn stream_create() { let out = client.process(None, now()); let mut server = default_server(); - let out = server.process(out.dgram(), now()); + let out = server.process(out.as_dgram_ref(), now()); - let out = client.process(out.dgram(), now()); - mem::drop(server.process(out.dgram(), now())); + let out = client.process(out.as_dgram_ref(), now()); + mem::drop(server.process(out.as_dgram_ref(), now())); assert!(maybe_authenticate(&mut client)); let out = client.process(None, now()); @@ -43,7 +47,7 @@ fn stream_create() { assert_eq!(client.stream_create(StreamType::BiDi).unwrap(), 0); assert_eq!(client.stream_create(StreamType::BiDi).unwrap(), 4); - mem::drop(server.process(out.dgram(), now())); + mem::drop(server.process(out.as_dgram_ref(), now())); // server now in State::Connected assert_eq!(server.stream_create(StreamType::UniDi).unwrap(), 3); assert_eq!(server.stream_create(StreamType::UniDi).unwrap(), 7); @@ -81,12 +85,10 @@ fn transfer() { assert_eq!(*client.state(), State::Confirmed); qdebug!("---- server receives"); - for (d_num, d) in datagrams.into_iter().enumerate() { - let out = server.process(Some(d), now()); - assert_eq!( - out.as_dgram_ref().is_some(), - (d_num + 1) % usize::try_from(DEFAULT_ACK_PACKET_TOLERANCE + 1).unwrap() == 0 - ); + for d in datagrams { + let out = server.process(Some(&d), now()); + // With an RTT of zero, the server will acknowledge every packet immediately. + assert!(out.as_dgram_ref().is_some()); qdebug!("Output={:0x?}", out.as_dgram_ref()); } assert_eq!(*server.state(), State::Confirmed); @@ -114,12 +116,6 @@ fn transfer() { assert!(fin3); } -#[derive(PartialEq, Eq, PartialOrd, Ord)] -struct IdEntry { - sendorder: StreamOrder, - stream_id: StreamId, -} - // tests stream sendorder priorization fn sendorder_test(order_of_sendorder: &[Option]) { let mut client = default_client(); @@ -154,8 +150,8 @@ fn sendorder_test(order_of_sendorder: &[Option]) { assert_eq!(*client.state(), State::Confirmed); qdebug!("---- server receives"); - for (_, d) in datagrams.into_iter().enumerate() { - let out = server.process(Some(d), now()); + for d in datagrams { + let out = server.process(Some(&d), now()); qdebug!("Output={:0x?}", out.as_dgram_ref()); } assert_eq!(*server.state(), State::Confirmed); @@ -322,11 +318,11 @@ fn report_fin_when_stream_closed_wo_data() { let stream_id = client.stream_create(StreamType::BiDi).unwrap(); client.stream_send(stream_id, &[0x00]).unwrap(); let out = client.process(None, now()); - mem::drop(server.process(out.dgram(), now())); + mem::drop(server.process(out.as_dgram_ref(), now())); server.stream_close_send(stream_id).unwrap(); let out = server.process(None, now()); - mem::drop(client.process(out.dgram(), now())); + mem::drop(client.process(out.as_dgram_ref(), now())); let stream_readable = |e| matches!(e, ConnectionEvent::RecvStreamReadable { .. }); assert!(client.events().any(stream_readable)); } @@ -334,9 +330,9 @@ fn report_fin_when_stream_closed_wo_data() { fn exchange_data(client: &mut Connection, server: &mut Connection) { let mut input = None; loop { - let out = client.process(input, now()).dgram(); + let out = client.process(input.as_ref(), now()).dgram(); let c_done = out.is_none(); - let out = server.process(out, now()).dgram(); + let out = server.process(out.as_ref(), now()).dgram(); if out.is_none() && c_done { break; } @@ -378,7 +374,7 @@ fn sending_max_data() { assert!(!fin); let out = server.process(None, now()).dgram(); - client.process_input(out.unwrap(), now()); + client.process_input(&out.unwrap(), now()); assert_eq!( client @@ -519,7 +515,7 @@ fn do_not_accept_data_after_stop_sending() { let stream_id = client.stream_create(StreamType::BiDi).unwrap(); client.stream_send(stream_id, &[0x00]).unwrap(); let out = client.process(None, now()); - mem::drop(server.process(out.dgram(), now())); + mem::drop(server.process(out.as_dgram_ref(), now())); let stream_readable = |e| matches!(e, ConnectionEvent::RecvStreamReadable { .. }); assert!(server.events().any(stream_readable)); @@ -536,10 +532,10 @@ fn do_not_accept_data_after_stop_sending() { // Receive the second data frame. The frame should be ignored and // DataReadable events shouldn't be posted. - let out = server.process(out_second_data_frame.dgram(), now()); + let out = server.process(out_second_data_frame.as_dgram_ref(), now()); assert!(!server.events().any(stream_readable)); - mem::drop(client.process(out.dgram(), now())); + mem::drop(client.process(out.as_dgram_ref(), now())); assert_eq!( Err(Error::FinalSizeError), client.stream_send(stream_id, &[0x00]) @@ -557,7 +553,7 @@ fn simultaneous_stop_sending_and_reset() { let stream_id = client.stream_create(StreamType::BiDi).unwrap(); client.stream_send(stream_id, &[0x00]).unwrap(); let out = client.process(None, now()); - let ack = server.process(out.dgram(), now()).dgram(); + let ack = server.process(out.as_dgram_ref(), now()).dgram(); let stream_readable = |e| matches!(e, ConnectionEvent::RecvStreamReadable { stream_id: id } if id == stream_id); @@ -566,23 +562,23 @@ fn simultaneous_stop_sending_and_reset() { // The client resets the stream. The packet with reset should arrive after the server // has already requested stop_sending. client.stream_reset_send(stream_id, 0).unwrap(); - let out_reset_frame = client.process(ack, now()).dgram(); + let out_reset_frame = client.process(ack.as_ref(), now()).dgram(); // Send something out of order to force the server to generate an // acknowledgment at the next opportunity. let force_ack = send_something(&mut client, now()); - server.process_input(force_ack, now()); + server.process_input(&force_ack, now()); // Call stop sending. server.stream_stop_sending(stream_id, 0).unwrap(); // Receive the second data frame. The frame should be ignored and // DataReadable events shouldn't be posted. - let ack = server.process(out_reset_frame, now()).dgram(); + let ack = server.process(out_reset_frame.as_ref(), now()).dgram(); assert!(ack.is_some()); assert!(!server.events().any(stream_readable)); // The client gets the STOP_SENDING frame. - client.process_input(ack.unwrap(), now()); + client.process_input(&ack.unwrap(), now()); assert_eq!( Err(Error::InvalidStreamId), client.stream_send(stream_id, &[0x00]) @@ -598,13 +594,13 @@ fn client_fin_reorder() { let client_hs = client.process(None, now()); assert!(client_hs.as_dgram_ref().is_some()); - let server_hs = server.process(client_hs.dgram(), now()); + let server_hs = server.process(client_hs.as_dgram_ref(), now()); assert!(server_hs.as_dgram_ref().is_some()); // ServerHello, etc... - let client_ack = client.process(server_hs.dgram(), now()); + let client_ack = client.process(server_hs.as_dgram_ref(), now()); assert!(client_ack.as_dgram_ref().is_some()); - let server_out = server.process(client_ack.dgram(), now()); + let server_out = server.process(client_ack.as_dgram_ref(), now()); assert!(server_out.as_dgram_ref().is_none()); assert!(maybe_authenticate(&mut client)); @@ -619,11 +615,11 @@ fn client_fin_reorder() { assert!(client_stream_data.as_dgram_ref().is_some()); // Now stream data gets before client_fin - let server_out = server.process(client_stream_data.dgram(), now()); + let server_out = server.process(client_stream_data.as_dgram_ref(), now()); assert!(server_out.as_dgram_ref().is_none()); // the packet will be discarded assert_eq!(*server.state(), State::Handshaking); - let server_out = server.process(client_fin.dgram(), now()); + let server_out = server.process(client_fin.as_dgram_ref(), now()); assert!(server_out.as_dgram_ref().is_some()); } @@ -639,7 +635,7 @@ fn after_fin_is_read_conn_events_for_stream_should_be_removed() { let out = server.process(None, now()).dgram(); assert!(out.is_some()); - mem::drop(client.process(out, now())); + mem::drop(client.process(out.as_ref(), now())); // read from the stream before checking connection events. let mut buf = vec![0; 4000]; @@ -664,7 +660,7 @@ fn after_stream_stop_sending_is_called_conn_events_for_stream_should_be_removed( let out = server.process(None, now()).dgram(); assert!(out.is_some()); - mem::drop(client.process(out, now())); + mem::drop(client.process(out.as_ref(), now())); // send stop seending. client @@ -693,7 +689,7 @@ fn stream_data_blocked_generates_max_stream_data() { assert!(dgram.is_some()); // Consume the data. - client.process_input(dgram.unwrap(), now); + client.process_input(&dgram.unwrap(), now); let mut buf = [0; 10]; let (count, end) = client.stream_recv(stream_id, &mut buf[..]).unwrap(); assert_eq!(count, DEFAULT_STREAM_DATA.len()); @@ -710,14 +706,14 @@ fn stream_data_blocked_generates_max_stream_data() { assert!(dgram.is_some()); let sdb_before = client.stats().frame_rx.stream_data_blocked; - let dgram = client.process(dgram, now).dgram(); + let dgram = client.process(dgram.as_ref(), now).dgram(); assert_eq!(client.stats().frame_rx.stream_data_blocked, sdb_before + 1); assert!(dgram.is_some()); // Client should have sent a MAX_STREAM_DATA frame with just a small increase // on the default window size. let msd_before = server.stats().frame_rx.max_stream_data; - server.process_input(dgram.unwrap(), now); + server.process_input(&dgram.unwrap(), now); assert_eq!(server.stats().frame_rx.max_stream_data, msd_before + 1); // Test that the entirety of the receive buffer is available now. @@ -752,19 +748,19 @@ fn max_streams_after_bidi_closed() { let dgram = client.process(None, now()).dgram(); // Now handle the stream and send an incomplete response. - server.process_input(dgram.unwrap(), now()); + server.process_input(&dgram.unwrap(), now()); server.stream_send(stream_id, RESPONSE).unwrap(); let dgram = server.process_output(now()).dgram(); // The server shouldn't have released more stream credit. - client.process_input(dgram.unwrap(), now()); + client.process_input(&dgram.unwrap(), now()); let e = client.stream_create(StreamType::BiDi).unwrap_err(); assert!(matches!(e, Error::StreamLimitError)); // Closing the stream isn't enough. server.stream_close_send(stream_id).unwrap(); let dgram = server.process_output(now()).dgram(); - client.process_input(dgram.unwrap(), now()); + client.process_input(&dgram.unwrap(), now()); assert!(client.stream_create(StreamType::BiDi).is_err()); // The server needs to see an acknowledgment from the client for its @@ -778,12 +774,12 @@ fn max_streams_after_bidi_closed() { // We need an ACK from the client now, but that isn't guaranteed, // so give the client one more packet just in case. let dgram = send_something(&mut server, now()); - client.process_input(dgram, now()); + client.process_input(&dgram, now()); // Now get the client to send the ACK and have the server handle that. let dgram = send_something(&mut client, now()); - let dgram = server.process(Some(dgram), now()).dgram(); - client.process_input(dgram.unwrap(), now()); + let dgram = server.process(Some(&dgram), now()).dgram(); + client.process_input(&dgram.unwrap(), now()); assert!(client.stream_create(StreamType::BiDi).is_ok()); assert!(client.stream_create(StreamType::BiDi).is_err()); } @@ -798,7 +794,7 @@ fn no_dupdata_readable_events() { let stream_id = client.stream_create(StreamType::BiDi).unwrap(); client.stream_send(stream_id, &[0x00]).unwrap(); let out = client.process(None, now()); - mem::drop(server.process(out.dgram(), now())); + mem::drop(server.process(out.as_dgram_ref(), now())); // We have a data_readable event. let stream_readable = |e| matches!(e, ConnectionEvent::RecvStreamReadable { .. }); @@ -808,7 +804,7 @@ fn no_dupdata_readable_events() { // therefore there should not be a new DataReadable event. client.stream_send(stream_id, &[0x00]).unwrap(); let out_second_data_frame = client.process(None, now()); - mem::drop(server.process(out_second_data_frame.dgram(), now())); + mem::drop(server.process(out_second_data_frame.as_dgram_ref(), now())); assert!(!server.events().any(stream_readable)); // One more frame with a fin will not produce a new DataReadable event, because the @@ -816,7 +812,7 @@ fn no_dupdata_readable_events() { client.stream_send(stream_id, &[0x00]).unwrap(); client.stream_close_send(stream_id).unwrap(); let out_third_data_frame = client.process(None, now()); - mem::drop(server.process(out_third_data_frame.dgram(), now())); + mem::drop(server.process(out_third_data_frame.as_dgram_ref(), now())); assert!(!server.events().any(stream_readable)); } @@ -830,7 +826,7 @@ fn no_dupdata_readable_events_empty_last_frame() { let stream_id = client.stream_create(StreamType::BiDi).unwrap(); client.stream_send(stream_id, &[0x00]).unwrap(); let out = client.process(None, now()); - mem::drop(server.process(out.dgram(), now())); + mem::drop(server.process(out.as_dgram_ref(), now())); // We have a data_readable event. let stream_readable = |e| matches!(e, ConnectionEvent::RecvStreamReadable { .. }); @@ -840,7 +836,7 @@ fn no_dupdata_readable_events_empty_last_frame() { // the previous stream data has not been read yet. client.stream_close_send(stream_id).unwrap(); let out_second_data_frame = client.process(None, now()); - mem::drop(server.process(out_second_data_frame.dgram(), now())); + mem::drop(server.process(out_second_data_frame.as_dgram_ref(), now())); assert!(!server.events().any(stream_readable)); } @@ -862,14 +858,14 @@ fn change_flow_control(stream_type: StreamType, new_fc: u64) { // Send the stream to the client. let out = server.process(None, now()); - mem::drop(client.process(out.dgram(), now())); + mem::drop(client.process(out.as_dgram_ref(), now())); // change max_stream_data for stream_id. client.set_stream_max_data(stream_id, new_fc).unwrap(); // server should receive a MAX_SREAM_DATA frame if the flow control window is updated. let out2 = client.process(None, now()); - let out3 = server.process(out2.dgram(), now()); + let out3 = server.process(out2.as_dgram_ref(), now()); let expected = usize::from(RECV_BUFFER_START < new_fc); assert_eq!(server.stats().frame_rx.max_stream_data, expected); @@ -882,9 +878,9 @@ fn change_flow_control(stream_type: StreamType, new_fc: u64) { } // Exchange packets so that client gets all data. - let out4 = client.process(out3.dgram(), now()); - let out5 = server.process(out4.dgram(), now()); - mem::drop(client.process(out5.dgram(), now())); + let out4 = client.process(out3.as_dgram_ref(), now()); + let out5 = server.process(out4.as_dgram_ref(), now()); + mem::drop(client.process(out5.as_dgram_ref(), now())); // read all data by client let mut buf = [0x0; 10000]; @@ -892,7 +888,7 @@ fn change_flow_control(stream_type: StreamType, new_fc: u64) { assert_eq!(u64::try_from(read).unwrap(), max(RECV_BUFFER_START, new_fc)); let out4 = client.process(None, now()); - mem::drop(server.process(out4.dgram(), now())); + mem::drop(server.process(out4.as_dgram_ref(), now())); let written3 = server.stream_send(stream_id, &[0x0; 10000]).unwrap(); assert_eq!(u64::try_from(written3).unwrap(), new_fc); @@ -947,12 +943,12 @@ fn session_flow_control_stop_sending_state_recv() { // The server sends STOP_SENDING -> the client sends RESET -> the server // sends MAX_DATA. let out = server.process(None, now()).dgram(); - let out = client.process(out, now()).dgram(); + let out = client.process(out.as_ref(), now()).dgram(); // the client is still limited. let stream_id2 = client.stream_create(StreamType::UniDi).unwrap(); assert_eq!(client.stream_avail_send_space(stream_id2).unwrap(), 0); - let out = server.process(out, now()).dgram(); - client.process_input(out.unwrap(), now()); + let out = server.process(out.as_ref(), now()).dgram(); + client.process_input(&out.unwrap(), now()); assert_eq!( client.stream_avail_send_space(stream_id2).unwrap(), SMALL_MAX_DATA @@ -989,7 +985,7 @@ fn session_flow_control_stop_sending_state_size_known() { client.stream_close_send(stream_id).unwrap(); let out2 = client.process(None, now()).dgram(); - server.process_input(out2.unwrap(), now()); + server.process_input(&out2.unwrap(), now()); server .stream_stop_sending(stream_id, Error::NoError.code()) @@ -998,8 +994,8 @@ fn session_flow_control_stop_sending_state_size_known() { // In this case the final size is known when stream_stop_sending is called // and the server releases flow control immediately and sends STOP_SENDING and // MAX_DATA in the same packet. - let out = server.process(out1, now()).dgram(); - client.process_input(out.unwrap(), now()); + let out = server.process(out1.as_ref(), now()).dgram(); + client.process_input(&out.unwrap(), now()); // The flow control should have been updated and the client can again send // SMALL_MAX_DATA. @@ -1121,10 +1117,10 @@ fn connect_w_different_limit(bidi_limit: u64, unidi_limit: u64) { .max_streams(StreamType::BiDi, bidi_limit) .max_streams(StreamType::UniDi, unidi_limit), ); - let out = server.process(out.dgram(), now()); + let out = server.process(out.as_dgram_ref(), now()); - let out = client.process(out.dgram(), now()); - mem::drop(server.process(out.dgram(), now())); + let out = client.process(out.as_dgram_ref(), now()); + mem::drop(server.process(out.as_dgram_ref(), now())); assert!(maybe_authenticate(&mut client)); @@ -1140,7 +1136,7 @@ fn connect_w_different_limit(bidi_limit: u64, unidi_limit: u64) { unidi_events += 1; } } - ConnectionEvent::StateChange(state) if state == State::Connected => { + ConnectionEvent::StateChange(State::Connected) => { connected_events += 1; } _ => {} diff --git a/neqo-transport/src/connection/tests/vn.rs b/neqo-transport/src/connection/tests/vn.rs index 416128f74e..93872a94f4 100644 --- a/neqo-transport/src/connection/tests/vn.rs +++ b/neqo-transport/src/connection/tests/vn.rs @@ -4,19 +4,21 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use super::super::{ConnectionError, ConnectionEvent, Output, State, ZeroRttState}; +use std::{mem, time::Duration}; + +use neqo_common::{event::Provider, Decoder, Encoder}; +use test_fixture::{assertions, datagram, now}; + use super::{ + super::{ConnectionError, ConnectionEvent, Output, State, ZeroRttState}, connect, connect_fail, default_client, default_server, exchange_ticket, new_client, new_server, send_something, }; -use crate::packet::PACKET_BIT_LONG; -use crate::tparams::{self, TransportParameter}; -use crate::{ConnectionParameters, Error, Version}; - -use neqo_common::{event::Provider, Datagram, Decoder, Encoder}; -use std::mem; -use std::time::Duration; -use test_fixture::{self, addr, assertions, now}; +use crate::{ + packet::PACKET_BIT_LONG, + tparams::{self, TransportParameter}, + ConnectionParameters, Error, Version, +}; // The expected PTO duration after the first Initial is sent. const INITIAL_PTO: Duration = Duration::from_millis(300); @@ -29,10 +31,7 @@ fn unknown_version() { let mut unknown_version_packet = vec![0x80, 0x1a, 0x1a, 0x1a, 0x1a]; unknown_version_packet.resize(1200, 0x0); - mem::drop(client.process( - Some(Datagram::new(addr(), addr(), unknown_version_packet)), - now(), - )); + mem::drop(client.process(Some(&datagram(unknown_version_packet)), now())); assert_eq!(1, client.stats().dropped_rx); } @@ -44,10 +43,7 @@ fn server_receive_unknown_first_packet() { unknown_version_packet.resize(1200, 0x0); assert_eq!( - server.process( - Some(Datagram::new(addr(), addr(), unknown_version_packet,)), - now(), - ), + server.process(Some(&datagram(unknown_version_packet,)), now(),), Output::None ); @@ -86,8 +82,8 @@ fn version_negotiation_current_version() { &[0x1a1a_1a1a, Version::default().wire_version()], ); - let dgram = Datagram::new(addr(), addr(), vn); - let delay = client.process(Some(dgram), now()).callback(); + let dgram = datagram(vn); + let delay = client.process(Some(&dgram), now()).callback(); assert_eq!(delay, INITIAL_PTO); assert_eq!(*client.state(), State::WaitInitial); assert_eq!(1, client.stats().dropped_rx); @@ -105,8 +101,8 @@ fn version_negotiation_version0() { let vn = create_vn(&initial_pkt, &[0, 0x1a1a_1a1a]); - let dgram = Datagram::new(addr(), addr(), vn); - let delay = client.process(Some(dgram), now()).callback(); + let dgram = datagram(vn); + let delay = client.process(Some(&dgram), now()).callback(); assert_eq!(delay, INITIAL_PTO); assert_eq!(*client.state(), State::WaitInitial); assert_eq!(1, client.stats().dropped_rx); @@ -124,8 +120,8 @@ fn version_negotiation_only_reserved() { let vn = create_vn(&initial_pkt, &[0x1a1a_1a1a, 0x2a2a_2a2a]); - let dgram = Datagram::new(addr(), addr(), vn); - assert_eq!(client.process(Some(dgram), now()), Output::None); + let dgram = datagram(vn); + assert_eq!(client.process(Some(&dgram), now()), Output::None); match client.state() { State::Closed(err) => { assert_eq!(*err, ConnectionError::Transport(Error::VersionNegotiation)); @@ -146,8 +142,8 @@ fn version_negotiation_corrupted() { let vn = create_vn(&initial_pkt, &[0x1a1a_1a1a, 0x2a2a_2a2a]); - let dgram = Datagram::new(addr(), addr(), &vn[..vn.len() - 1]); - let delay = client.process(Some(dgram), now()).callback(); + let dgram = datagram(vn[..vn.len() - 1].to_vec()); + let delay = client.process(Some(&dgram), now()).callback(); assert_eq!(delay, INITIAL_PTO); assert_eq!(*client.state(), State::WaitInitial); assert_eq!(1, client.stats().dropped_rx); @@ -165,8 +161,8 @@ fn version_negotiation_empty() { let vn = create_vn(&initial_pkt, &[]); - let dgram = Datagram::new(addr(), addr(), vn); - let delay = client.process(Some(dgram), now()).callback(); + let dgram = datagram(vn); + let delay = client.process(Some(&dgram), now()).callback(); assert_eq!(delay, INITIAL_PTO); assert_eq!(*client.state(), State::WaitInitial); assert_eq!(1, client.stats().dropped_rx); @@ -183,8 +179,8 @@ fn version_negotiation_not_supported() { .to_vec(); let vn = create_vn(&initial_pkt, &[0x1a1a_1a1a, 0x2a2a_2a2a, 0xff00_0001]); - let dgram = Datagram::new(addr(), addr(), vn); - assert_eq!(client.process(Some(dgram), now()), Output::None); + let dgram = datagram(vn); + assert_eq!(client.process(Some(&dgram), now()), Output::None); match client.state() { State::Closed(err) => { assert_eq!(*err, ConnectionError::Transport(Error::VersionNegotiation)); @@ -206,8 +202,8 @@ fn version_negotiation_bad_cid() { initial_pkt[6] ^= 0xc4; let vn = create_vn(&initial_pkt, &[0x1a1a_1a1a, 0x2a2a_2a2a, 0xff00_0001]); - let dgram = Datagram::new(addr(), addr(), vn); - let delay = client.process(Some(dgram), now()).callback(); + let dgram = datagram(vn); + let delay = client.process(Some(&dgram), now()).callback(); assert_eq!(delay, INITIAL_PTO); assert_eq!(*client.state(), State::WaitInitial); assert_eq!(1, client.stats().dropped_rx); @@ -223,8 +219,8 @@ fn compatible_upgrade() { assert_eq!(server.version(), Version::Version2); } -/// When the first packet from the client is gigantic, the server might generate acknowledgment packets in -/// version 1. Both client and server need to handle that gracefully. +/// When the first packet from the client is gigantic, the server might generate acknowledgment +/// packets in version 1. Both client and server need to handle that gracefully. #[test] fn compatible_upgrade_large_initial() { let params = ConnectionParameters::default().versions( @@ -244,11 +240,11 @@ fn compatible_upgrade_large_initial() { // Each should elicit a Version 1 ACK from the server. let dgram = client.process_output(now()).dgram(); assert!(dgram.is_some()); - let dgram = server.process(dgram, now()).dgram(); + let dgram = server.process(dgram.as_ref(), now()).dgram(); assert!(dgram.is_some()); // The following uses the Version from *outside* this crate. assertions::assert_version(dgram.as_ref().unwrap(), Version::Version1.wire_version()); - client.process_input(dgram.unwrap(), now()); + client.process_input(&dgram.unwrap(), now()); connect(&mut client, &mut server); assert_eq!(client.version(), Version::Version2); @@ -311,8 +307,8 @@ fn version_negotiation_downgrade() { // Start the handshake and spoof a VN packet. let initial = client.process_output(now()).dgram().unwrap(); let vn = create_vn(&initial, &[DOWNGRADE.wire_version()]); - let dgram = Datagram::new(addr(), addr(), vn); - client.process_input(dgram, now()); + let dgram = datagram(vn); + client.process_input(&dgram, now()); connect_fail( &mut client, @@ -332,7 +328,7 @@ fn invalid_server_version() { new_server(ConnectionParameters::default().versions(Version::Version2, Version::all())); let dgram = client.process_output(now()).dgram(); - server.process_input(dgram.unwrap(), now()); + server.process_input(&dgram.unwrap(), now()); // One packet received. assert_eq!(server.stats().packets_rx, 1); @@ -358,7 +354,7 @@ fn invalid_current_version_client() { assert_ne!(OTHER_VERSION, client.version()); client .set_local_tparam( - tparams::VERSION_NEGOTIATION, + tparams::VERSION_INFORMATION, TransportParameter::Versions { current: OTHER_VERSION.wire_version(), other: Version::all() @@ -394,7 +390,7 @@ fn invalid_current_version_server() { assert!(!Version::default().is_compatible(OTHER_VERSION)); server .set_local_tparam( - tparams::VERSION_NEGOTIATION, + tparams::VERSION_INFORMATION, TransportParameter::Versions { current: OTHER_VERSION.wire_version(), other: vec![OTHER_VERSION.wire_version()], @@ -420,7 +416,7 @@ fn no_compatible_version() { assert_ne!(OTHER_VERSION, client.version()); client .set_local_tparam( - tparams::VERSION_NEGOTIATION, + tparams::VERSION_INFORMATION, TransportParameter::Versions { current: Version::default().wire_version(), other: vec![OTHER_VERSION.wire_version()], @@ -463,7 +459,7 @@ fn compatible_upgrade_0rtt_rejected() { let initial = send_something(&mut client, now()); assertions::assert_version(&initial, Version::Version1.wire_version()); assertions::assert_coalesced_0rtt(&initial); - server.process_input(initial, now()); + server.process_input(&initial, now()); assert!(!server .events() .any(|e| matches!(e, ConnectionEvent::NewStream { .. }))); @@ -471,9 +467,9 @@ fn compatible_upgrade_0rtt_rejected() { // Finalize the connection. Don't use connect() because it uses // maybe_authenticate() too liberally and that eats the events we want to check. let dgram = server.process_output(now()).dgram(); // ServerHello flight - let dgram = client.process(dgram, now()).dgram(); // Client Finished (note: no authentication) - let dgram = server.process(dgram, now()).dgram(); // HANDSHAKE_DONE - client.process_input(dgram.unwrap(), now()); + let dgram = client.process(dgram.as_ref(), now()).dgram(); // Client Finished (note: no authentication) + let dgram = server.process(dgram.as_ref(), now()).dgram(); // HANDSHAKE_DONE + client.process_input(&dgram.unwrap(), now()); assert!(matches!(client.state(), State::Confirmed)); assert!(matches!(server.state(), State::Confirmed)); diff --git a/neqo-transport/src/connection/tests/zerortt.rs b/neqo-transport/src/connection/tests/zerortt.rs index 8c8a980c0c..b5e5f0d758 100644 --- a/neqo-transport/src/connection/tests/zerortt.rs +++ b/neqo-transport/src/connection/tests/zerortt.rs @@ -4,19 +4,17 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use super::super::Connection; -use super::{ - connect, default_client, default_server, exchange_ticket, new_server, resumed_server, - CountingConnectionIdGenerator, -}; -use crate::events::ConnectionEvent; -use crate::{ConnectionParameters, Error, StreamType, Version}; +use std::{cell::RefCell, rc::Rc}; use neqo_common::event::Provider; use neqo_crypto::{AllowZeroRtt, AntiReplay}; -use std::cell::RefCell; -use std::rc::Rc; -use test_fixture::{self, assertions, now}; +use test_fixture::{assertions, now}; + +use super::{ + super::Connection, connect, default_client, default_server, exchange_ticket, new_server, + resumed_server, CountingConnectionIdGenerator, +}; +use crate::{events::ConnectionEvent, ConnectionParameters, Error, StreamType, Version}; #[test] fn zero_rtt_negotiate() { @@ -62,12 +60,12 @@ fn zero_rtt_send_recv() { // 0-RTT packets on their own shouldn't be padded to 1200. assert!(client_0rtt.as_dgram_ref().unwrap().len() < 1200); - let server_hs = server.process(client_hs.dgram(), now()); + let server_hs = server.process(client_hs.as_dgram_ref(), now()); assert!(server_hs.as_dgram_ref().is_some()); // ServerHello, etc... let all_frames = server.stats().frame_tx.all; let ack_frames = server.stats().frame_tx.ack; - let server_process_0rtt = server.process(client_0rtt.dgram(), now()); + let server_process_0rtt = server.process(client_0rtt.as_dgram_ref(), now()); assert!(server_process_0rtt.as_dgram_ref().is_some()); assert_eq!(server.stats().frame_tx.all, all_frames + 1); assert_eq!(server.stats().frame_tx.ack, ack_frames + 1); @@ -104,7 +102,7 @@ fn zero_rtt_send_coalesce() { assertions::assert_coalesced_0rtt(&client_0rtt.as_dgram_ref().unwrap()[..]); - let server_hs = server.process(client_0rtt.dgram(), now()); + let server_hs = server.process(client_0rtt.as_dgram_ref(), now()); assert!(server_hs.as_dgram_ref().is_some()); // Should produce ServerHello etc... let server_stream_id = server @@ -161,9 +159,9 @@ fn zero_rtt_send_reject() { let client_0rtt = client.process(None, now()); assert!(client_0rtt.as_dgram_ref().is_some()); - let server_hs = server.process(client_hs.dgram(), now()); + let server_hs = server.process(client_hs.as_dgram_ref(), now()); assert!(server_hs.as_dgram_ref().is_some()); // Should produce ServerHello etc... - let server_ignored = server.process(client_0rtt.dgram(), now()); + let server_ignored = server.process(client_0rtt.as_dgram_ref(), now()); assert!(server_ignored.as_dgram_ref().is_none()); // The server shouldn't receive that 0-RTT data. @@ -171,14 +169,14 @@ fn zero_rtt_send_reject() { assert!(!server.events().any(recvd_stream_evt)); // Client should get a rejection. - let client_fin = client.process(server_hs.dgram(), now()); + let client_fin = client.process(server_hs.as_dgram_ref(), now()); let recvd_0rtt_reject = |e| e == ConnectionEvent::ZeroRttRejected; assert!(client.events().any(recvd_0rtt_reject)); // Server consume client_fin - let server_ack = server.process(client_fin.dgram(), now()); + let server_ack = server.process(client_fin.as_dgram_ref(), now()); assert!(server_ack.as_dgram_ref().is_some()); - let client_out = client.process(server_ack.dgram(), now()); + let client_out = client.process(server_ack.as_dgram_ref(), now()); assert!(client_out.as_dgram_ref().is_none()); // ...and the client stream should be gone. @@ -194,7 +192,7 @@ fn zero_rtt_send_reject() { assert!(client_after_reject.is_some()); // The server should receive new stream - server.process_input(client_after_reject.unwrap(), now()); + server.process_input(&client_after_reject.unwrap(), now()); assert!(server.events().any(recvd_stream_evt)); } @@ -233,8 +231,8 @@ fn zero_rtt_update_flow_control() { assert!(!client.stream_send_atomic(bidi_stream, MESSAGE).unwrap()); // Now get the server transport parameters. - let server_hs = server.process(client_hs, now()).dgram(); - client.process_input(server_hs.unwrap(), now()); + let server_hs = server.process(client_hs.as_ref(), now()).dgram(); + client.process_input(&server_hs.unwrap(), now()); // The streams should report a writeable event. let mut uni_stream_event = false; diff --git a/neqo-transport/src/crypto.rs b/neqo-transport/src/crypto.rs index e97f8fc9b7..60d056f2d2 100644 --- a/neqo-transport/src/crypto.rs +++ b/neqo-transport/src/crypto.rs @@ -8,7 +8,6 @@ use std::{ cell::RefCell, cmp::{max, min}, collections::HashMap, - convert::TryFrom, mem, ops::{Index, IndexMut, Range}, rc::Rc, @@ -16,12 +15,12 @@ use std::{ }; use neqo_common::{hex, hex_snip_middle, qdebug, qinfo, qtrace, Encoder, Role}; - use neqo_crypto::{ hkdf, hp::HpKey, Aead, Agent, AntiReplay, Cipher, Epoch, Error as CryptoError, HandshakeState, PrivateKey, PublicKey, Record, RecordList, ResumptionToken, SymKey, ZeroRttChecker, TLS_AES_128_GCM_SHA256, TLS_AES_256_GCM_SHA384, TLS_CHACHA20_POLY1305_SHA256, TLS_CT_HANDSHAKE, TLS_EPOCH_APPLICATION_DATA, TLS_EPOCH_HANDSHAKE, TLS_EPOCH_INITIAL, TLS_EPOCH_ZERO_RTT, + TLS_GRP_EC_SECP256R1, TLS_GRP_EC_SECP384R1, TLS_GRP_EC_SECP521R1, TLS_GRP_EC_X25519, TLS_VERSION_1_3, }; @@ -70,7 +69,6 @@ impl Crypto { mut agent: Agent, protocols: Vec, tphandler: TpHandler, - fuzzing: bool, ) -> Res { agent.set_version_range(TLS_VERSION_1_3, TLS_VERSION_1_3)?; agent.set_ciphers(&[ @@ -78,6 +76,13 @@ impl Crypto { TLS_AES_256_GCM_SHA384, TLS_CHACHA20_POLY1305_SHA256, ])?; + agent.set_groups(&[ + TLS_GRP_EC_X25519, + TLS_GRP_EC_SECP256R1, + TLS_GRP_EC_SECP384R1, + TLS_GRP_EC_SECP521R1, + ])?; + agent.send_additional_key_shares(1)?; agent.set_alpn(&protocols)?; agent.disable_end_of_early_data()?; // Always enable 0-RTT on the client, but the server needs @@ -94,10 +99,9 @@ impl Crypto { version, protocols, tls: agent, - streams: Default::default(), + streams: CryptoStreams::default(), states: CryptoStates { - fuzzing, - ..Default::default() + ..CryptoStates::default() }, }) } @@ -218,7 +222,7 @@ impl Crypto { self.tls.read_secret(TLS_EPOCH_ZERO_RTT), ), }; - let secret = secret.ok_or(Error::InternalError(1))?; + let secret = secret.ok_or(Error::InternalError)?; self.states .set_0rtt_keys(version, dir, &secret, cipher.unwrap()); Ok(true) @@ -232,34 +236,32 @@ impl Crypto { /// Returns true if new handshake keys were installed. pub fn install_keys(&mut self, role: Role) -> Res { - if !self.tls.state().is_final() { + if self.tls.state().is_final() { + Ok(false) + } else { let installed_hs = self.install_handshake_keys()?; if role == Role::Server { self.maybe_install_application_write_key(self.version)?; } Ok(installed_hs) - } else { - Ok(false) } } fn install_handshake_keys(&mut self) -> Res { qtrace!([self], "Attempt to install handshake keys"); - let write_secret = if let Some(secret) = self.tls.write_secret(TLS_EPOCH_HANDSHAKE) { - secret - } else { + let Some(write_secret) = self.tls.write_secret(TLS_EPOCH_HANDSHAKE) else { // No keys is fine. return Ok(false); }; let read_secret = self .tls .read_secret(TLS_EPOCH_HANDSHAKE) - .ok_or(Error::InternalError(2))?; + .ok_or(Error::InternalError)?; let cipher = match self.tls.info() { None => self.tls.preinfo()?.cipher_suite(), Some(info) => Some(info.cipher_suite()), } - .ok_or(Error::InternalError(3))?; + .ok_or(Error::InternalError)?; self.states .set_handshake_keys(self.version, &write_secret, &read_secret, cipher); qdebug!([self], "Handshake keys installed"); @@ -269,7 +271,7 @@ impl Crypto { fn maybe_install_application_write_key(&mut self, version: Version) -> Res<()> { qtrace!([self], "Attempt to install application write key"); if let Some(secret) = self.tls.write_secret(TLS_EPOCH_APPLICATION_DATA) { - self.states.set_application_write_key(version, secret)?; + self.states.set_application_write_key(version, &secret)?; qdebug!([self], "Application write key installed"); } Ok(()) @@ -283,9 +285,9 @@ impl Crypto { let read_secret = self .tls .read_secret(TLS_EPOCH_APPLICATION_DATA) - .ok_or(Error::InternalError(4))?; + .ok_or(Error::InternalError)?; self.states - .set_application_read_key(version, read_secret, expire_0rtt)?; + .set_application_read_key(version, &read_secret, expire_0rtt)?; qdebug!([self], "application read keys installed"); Ok(()) } @@ -308,12 +310,12 @@ impl Crypto { builder: &mut PacketBuilder, tokens: &mut Vec, stats: &mut FrameStats, - ) -> Res<()> { - self.streams.write_frame(space, builder, tokens, stats) + ) { + self.streams.write_frame(space, builder, tokens, stats); } pub fn acked(&mut self, token: &CryptoRecoveryToken) { - qinfo!( + qdebug!( "Acked crypto frame space={} offset={} length={}", token.space, token.offset, @@ -363,7 +365,7 @@ impl Crypto { }); enc.encode_vvec(new_token.unwrap_or(&[])); enc.encode(t.as_ref()); - qinfo!("resumption token {}", hex_snip_middle(enc.as_ref())); + qdebug!("resumption token {}", hex_snip_middle(enc.as_ref())); Some(ResumptionToken::new(enc.into(), t.expiration_time())) } else { None @@ -416,7 +418,6 @@ pub struct CryptoDxState { /// The total number of operations that are remaining before the keys /// become exhausted and can't be used any more. invocations: PacketNumber, - fuzzing: bool, } impl CryptoDxState { @@ -427,9 +428,8 @@ impl CryptoDxState { epoch: Epoch, secret: &SymKey, cipher: Cipher, - fuzzing: bool, ) -> Self { - qinfo!( + qdebug!( "Making {:?} {} CryptoDxState, v={:?} cipher={}", direction, epoch, @@ -441,19 +441,11 @@ impl CryptoDxState { version, direction, epoch: usize::from(epoch), - aead: Aead::new( - fuzzing, - TLS_VERSION_1_3, - cipher, - secret, - version.label_prefix(), - ) - .unwrap(), + aead: Aead::new(TLS_VERSION_1_3, cipher, secret, version.label_prefix()).unwrap(), hpkey: HpKey::extract(TLS_VERSION_1_3, cipher, secret, &hplabel).unwrap(), used_pn: 0..0, min_pn: 0, invocations: Self::limit(direction, cipher), - fuzzing, } } @@ -462,7 +454,6 @@ impl CryptoDxState { direction: CryptoDxDirection, label: &str, dcid: &[u8], - fuzzing: bool, ) -> Self { qtrace!("new_initial {:?} {}", version, ConnectionIdRef::from(dcid)); let salt = version.initial_salt(); @@ -478,14 +469,7 @@ impl CryptoDxState { let secret = hkdf::expand_label(TLS_VERSION_1_3, cipher, &initial_secret, &[], label).unwrap(); - Self::new( - version, - direction, - TLS_EPOCH_INITIAL, - &secret, - cipher, - fuzzing, - ) + Self::new(version, direction, TLS_EPOCH_INITIAL, &secret, cipher) } /// Determine the confidentiality and integrity limits for the cipher. @@ -545,7 +529,6 @@ impl CryptoDxState { direction: self.direction, epoch: self.epoch + 1, aead: Aead::new( - self.fuzzing, TLS_VERSION_1_3, cipher, next_secret, @@ -556,7 +539,6 @@ impl CryptoDxState { used_pn: pn..pn, min_pn: pn, invocations, - fuzzing: self.fuzzing, } } @@ -657,7 +639,7 @@ impl CryptoDxState { // The numbers in `Self::limit` assume a maximum packet size of 2^11. if body.len() > 2048 { debug_assert!(false); - return Err(Error::InternalError(12)); + return Err(Error::InternalError); } self.invoked()?; @@ -692,7 +674,7 @@ impl CryptoDxState { Ok(res.to_vec()) } - #[cfg(all(test, not(feature = "fuzzing")))] + #[cfg(all(test, not(feature = "disable-encryption")))] pub(crate) fn test_default() -> Self { // This matches the value in packet.rs const CLIENT_CID: &[u8] = &[0x83, 0x94, 0xc8, 0xf0, 0x3e, 0x51, 0x57, 0x08]; @@ -701,7 +683,6 @@ impl CryptoDxState { CryptoDxDirection::Write, "server in", CLIENT_CID, - false, ) } @@ -755,29 +736,19 @@ pub(crate) struct CryptoDxAppData { cipher: Cipher, // Not the secret used to create `self.dx`, but the one needed for the next iteration. next_secret: SymKey, - fuzzing: bool, } impl CryptoDxAppData { pub fn new( version: Version, dir: CryptoDxDirection, - secret: SymKey, + secret: &SymKey, cipher: Cipher, - fuzzing: bool, ) -> Res { Ok(Self { - dx: CryptoDxState::new( - version, - dir, - TLS_EPOCH_APPLICATION_DATA, - &secret, - cipher, - fuzzing, - ), + dx: CryptoDxState::new(version, dir, TLS_EPOCH_APPLICATION_DATA, secret, cipher), cipher, - next_secret: Self::update_secret(cipher, &secret)?, - fuzzing, + next_secret: Self::update_secret(cipher, secret)?, }) } @@ -787,7 +758,7 @@ impl CryptoDxAppData { } pub fn next(&self) -> Res { - if self.dx.epoch == usize::max_value() { + if self.dx.epoch == usize::MAX { // Guard against too many key updates. return Err(Error::KeysExhausted); } @@ -796,7 +767,6 @@ impl CryptoDxAppData { dx: self.dx.next(&self.next_secret, self.cipher), cipher: self.cipher, next_secret, - fuzzing: self.fuzzing, }) } @@ -830,7 +800,6 @@ pub struct CryptoStates { // If this is set, then we have noticed a genuine update. // Once this time passes, we should switch in new keys. read_update_time: Option, - fuzzing: bool, } impl CryptoStates { @@ -976,7 +945,7 @@ impl CryptoStates { }; for v in versions { - qinfo!( + qdebug!( [self], "Creating initial cipher state v={:?}, role={:?} dcid={}", v, @@ -985,20 +954,8 @@ impl CryptoStates { ); let mut initial = CryptoState { - tx: CryptoDxState::new_initial( - *v, - CryptoDxDirection::Write, - write, - dcid, - self.fuzzing, - ), - rx: CryptoDxState::new_initial( - *v, - CryptoDxDirection::Read, - read, - dcid, - self.fuzzing, - ), + tx: CryptoDxState::new_initial(*v, CryptoDxDirection::Write, write, dcid), + rx: CryptoDxState::new_initial(*v, CryptoDxDirection::Read, read, dcid), }; if let Some(prev) = self.initials.get(v) { qinfo!( @@ -1052,7 +1009,6 @@ impl CryptoStates { TLS_EPOCH_ZERO_RTT, secret, cipher, - self.fuzzing, )); } @@ -1093,7 +1049,6 @@ impl CryptoStates { TLS_EPOCH_HANDSHAKE, write_secret, cipher, - self.fuzzing, ), rx: CryptoDxState::new( version, @@ -1101,21 +1056,14 @@ impl CryptoStates { TLS_EPOCH_HANDSHAKE, read_secret, cipher, - self.fuzzing, ), }); } - pub fn set_application_write_key(&mut self, version: Version, secret: SymKey) -> Res<()> { + pub fn set_application_write_key(&mut self, version: Version, secret: &SymKey) -> Res<()> { debug_assert!(self.app_write.is_none()); debug_assert_ne!(self.cipher, 0); - let mut app = CryptoDxAppData::new( - version, - CryptoDxDirection::Write, - secret, - self.cipher, - self.fuzzing, - )?; + let mut app = CryptoDxAppData::new(version, CryptoDxDirection::Write, secret, self.cipher)?; if let Some(z) = &self.zero_rtt { if z.direction == CryptoDxDirection::Write { app.dx.continuation(z)?; @@ -1129,18 +1077,12 @@ impl CryptoStates { pub fn set_application_read_key( &mut self, version: Version, - secret: SymKey, + secret: &SymKey, expire_0rtt: Instant, ) -> Res<()> { debug_assert!(self.app_write.is_some(), "should have write keys installed"); debug_assert!(self.app_read.is_none()); - let mut app = CryptoDxAppData::new( - version, - CryptoDxDirection::Read, - secret, - self.cipher, - self.fuzzing, - )?; + let mut app = CryptoDxAppData::new(version, CryptoDxDirection::Read, secret, self.cipher)?; if let Some(z) = &self.zero_rtt { if z.direction == CryptoDxDirection::Read { app.dx.continuation(z)?; @@ -1282,7 +1224,7 @@ impl CryptoStates { } /// Make some state for removing protection in tests. - #[cfg(not(feature = "fuzzing"))] + #[cfg(not(feature = "disable-encryption"))] #[cfg(test)] pub(crate) fn test_default() -> Self { let read = |epoch| { @@ -1295,7 +1237,6 @@ impl CryptoStates { dx: read(epoch), cipher: TLS_AES_128_GCM_SHA256, next_secret: hkdf::import_key(TLS_VERSION_1_3, &[0xaa; 32]).unwrap(), - fuzzing: false, }; let mut initials = HashMap::new(); initials.insert( @@ -1315,11 +1256,10 @@ impl CryptoStates { app_read: Some(app_read(3)), app_read_next: Some(app_read(4)), read_update_time: None, - fuzzing: false, } } - #[cfg(all(not(feature = "fuzzing"), test))] + #[cfg(all(not(feature = "disable-encryption"), test))] pub(crate) fn test_chacha() -> Self { const SECRET: &[u8] = &[ 0x9a, 0xc3, 0x12, 0xa7, 0xf8, 0x77, 0x46, 0x8e, 0xbe, 0x69, 0x42, 0x27, 0x48, 0xad, @@ -1333,7 +1273,6 @@ impl CryptoStates { direction: CryptoDxDirection::Read, epoch, aead: Aead::new( - false, TLS_VERSION_1_3, TLS_CHACHA20_POLY1305_SHA256, &secret, @@ -1350,11 +1289,9 @@ impl CryptoStates { used_pn: 0..645_971_972, min_pn: 0, invocations: 10, - fuzzing: false, }, cipher: TLS_CHACHA20_POLY1305_SHA256, next_secret: secret.clone(), - fuzzing: false, }; Self { initials: HashMap::new(), @@ -1365,7 +1302,6 @@ impl CryptoStates { app_read: Some(app_read(3)), app_read_next: Some(app_read(4)), read_update_time: None, - fuzzing: false, } } } @@ -1400,6 +1336,9 @@ pub enum CryptoStreams { } impl CryptoStreams { + /// Keep around 64k if a server wants to push excess data at us. + const BUFFER_LIMIT: u64 = 65536; + pub fn discard(&mut self, space: PacketNumberSpace) { match space { PacketNumberSpace::Initial => { @@ -1434,8 +1373,14 @@ impl CryptoStreams { self.get_mut(space).unwrap().tx.send(data); } - pub fn inbound_frame(&mut self, space: PacketNumberSpace, offset: u64, data: &[u8]) { - self.get_mut(space).unwrap().rx.inbound_frame(offset, data); + pub fn inbound_frame(&mut self, space: PacketNumberSpace, offset: u64, data: &[u8]) -> Res<()> { + let rx = &mut self.get_mut(space).unwrap().rx; + rx.inbound_frame(offset, data); + if rx.received() - rx.retired() <= Self::BUFFER_LIMIT { + Ok(()) + } else { + Err(Error::CryptoBufferExceeded) + } } pub fn data_ready(&self, space: PacketNumberSpace) -> bool { @@ -1516,19 +1461,19 @@ impl CryptoStreams { builder: &mut PacketBuilder, tokens: &mut Vec, stats: &mut FrameStats, - ) -> Res<()> { + ) { let cs = self.get_mut(space).unwrap(); if let Some((offset, data)) = cs.tx.next_bytes() { let mut header_len = 1 + Encoder::varint_len(offset) + 1; // Don't bother if there isn't room for the header and some data. if builder.remaining() < header_len + 1 { - return Ok(()); + return; } // Calculate length of data based on the minimum of: // - available data - // - remaining space, less the header, which counts only one byte - // for the length at first to avoid underestimating length + // - remaining space, less the header, which counts only one byte for the length at + // first to avoid underestimating length let length = min(data.len(), builder.remaining() - header_len); header_len += Encoder::varint_len(u64::try_from(length).unwrap()) - 1; let length = min(data.len(), builder.remaining() - header_len); @@ -1536,9 +1481,6 @@ impl CryptoStreams { builder.encode_varint(crate::frame::FRAME_TYPE_CRYPTO); builder.encode_varint(offset); builder.encode_vvec(&data[..length]); - if builder.len() > builder.limit() { - return Err(Error::InternalError(15)); - } cs.tx.mark_as_sent(offset, length); @@ -1550,7 +1492,6 @@ impl CryptoStreams { })); stats.crypto += 1; } - Ok(()) } } diff --git a/neqo-transport/src/events.rs b/neqo-transport/src/events.rs index 65b376eb0b..a892e384b9 100644 --- a/neqo-transport/src/events.rs +++ b/neqo-transport/src/events.rs @@ -6,17 +6,18 @@ // Collecting a list of events relevant to whoever is using the Connection. -use std::cell::RefCell; -use std::collections::VecDeque; -use std::rc::Rc; - -use crate::connection::State; -use crate::quic_datagrams::DatagramTracking; -use crate::stream_id::{StreamId, StreamType}; -use crate::{AppError, Stats}; +use std::{cell::RefCell, collections::VecDeque, rc::Rc}; + use neqo_common::event::Provider as EventProvider; use neqo_crypto::ResumptionToken; +use crate::{ + connection::State, + quic_datagrams::DatagramTracking, + stream_id::{StreamId, StreamType}, + AppError, Stats, +}; + #[derive(Debug, PartialOrd, Ord, PartialEq, Eq)] pub enum OutgoingDatagramOutcome { DroppedTooBig, @@ -51,7 +52,7 @@ pub enum ConnectionEvent { stream_id: StreamId, app_error: AppError, }, - /// Peer has sent STOP_SENDING + /// Peer has sent `STOP_SENDING` SendStreamStopSending { stream_id: StreamId, app_error: AppError, @@ -60,7 +61,7 @@ pub enum ConnectionEvent { SendStreamComplete { stream_id: StreamId, }, - /// Peer increased MAX_STREAMS + /// Peer increased `MAX_STREAMS` SendStreamCreatable { stream_type: StreamType, }, @@ -235,7 +236,7 @@ impl ConnectionEvents { where F: Fn(&ConnectionEvent) -> bool, { - self.events.borrow_mut().retain(|evt| !f(evt)) + self.events.borrow_mut().retain(|evt| !f(evt)); } } @@ -253,8 +254,9 @@ impl EventProvider for ConnectionEvents { #[cfg(test)] mod tests { - use super::*; - use crate::{ConnectionError, Error}; + use neqo_common::event::Provider; + + use crate::{ConnectionError, ConnectionEvent, ConnectionEvents, Error, State, StreamId}; #[test] fn event_culling() { diff --git a/neqo-transport/src/fc.rs b/neqo-transport/src/fc.rs index 8ed6573521..5ddfce6463 100644 --- a/neqo-transport/src/fc.rs +++ b/neqo-transport/src/fc.rs @@ -7,6 +7,13 @@ // Tracks possibly-redundant flow control signals from other code and converts // into flow control frames needing to be sent to the remote. +use std::{ + fmt::Debug, + ops::{Deref, DerefMut, Index, IndexMut}, +}; + +use neqo_common::{qtrace, Role}; + use crate::{ frame::{ FRAME_TYPE_DATA_BLOCKED, FRAME_TYPE_MAX_DATA, FRAME_TYPE_MAX_STREAMS_BIDI, @@ -19,13 +26,6 @@ use crate::{ stream_id::{StreamId, StreamType}, Error, Res, }; -use neqo_common::{qtrace, Role}; - -use std::{ - convert::TryFrom, - fmt::Debug, - ops::{Deref, DerefMut, Index, IndexMut}, -}; #[derive(Debug)] pub struct SenderFlowControl @@ -248,7 +248,7 @@ where } } - /// This function is called when STREAM_DATA_BLOCKED frame is received. + /// This function is called when `STREAM_DATA_BLOCKED` frame is received. /// The flow control will try to send an update if possible. pub fn send_flowc_update(&mut self) { if self.retired + self.max_active > self.max_allowed { @@ -575,6 +575,8 @@ impl IndexMut for LocalStreamLimits { #[cfg(test)] mod test { + use neqo_common::{Encoder, Role}; + use super::{LocalStreamLimits, ReceiverFlowControl, RemoteStreamLimits, SenderFlowControl}; use crate::{ packet::PacketBuilder, @@ -582,7 +584,6 @@ mod test { stream_id::{StreamId, StreamType}, Error, }; - use neqo_common::{Encoder, Role}; #[test] fn blocked_at_zero() { @@ -858,7 +859,7 @@ mod test { remote_stream_limits(Role::Server, 0, 2); } - #[should_panic] + #[should_panic(expected = ".is_allowed")] #[test] fn remote_stream_limits_asserts_if_limit_exceeded() { let mut fc = RemoteStreamLimits::new(2, 1, Role::Client); diff --git a/neqo-transport/src/frame.rs b/neqo-transport/src/frame.rs index 8d56fd3000..d84eb61ce8 100644 --- a/neqo-transport/src/frame.rs +++ b/neqo-transport/src/frame.rs @@ -6,20 +6,21 @@ // Directly relating to QUIC frames. +use std::ops::RangeInclusive; + use neqo_common::{qtrace, Decoder}; -use crate::cid::MAX_CONNECTION_ID_LEN; -use crate::packet::PacketType; -use crate::stream_id::{StreamId, StreamType}; -use crate::{AppError, ConnectionError, Error, Res, TransportError}; - -use std::convert::TryFrom; -use std::ops::RangeInclusive; +use crate::{ + cid::MAX_CONNECTION_ID_LEN, + packet::PacketType, + stream_id::{StreamId, StreamType}, + AppError, ConnectionError, Error, Res, TransportError, +}; #[allow(clippy::module_name_repetitions)] pub type FrameType = u64; -const FRAME_TYPE_PADDING: FrameType = 0x0; +pub const FRAME_TYPE_PADDING: FrameType = 0x0; pub const FRAME_TYPE_PING: FrameType = 0x1; pub const FRAME_TYPE_ACK: FrameType = 0x2; const FRAME_TYPE_ACK_ECN: FrameType = 0x3; @@ -77,6 +78,7 @@ impl CloseError { } } + #[must_use] pub fn code(&self) -> u64 { match self { Self::Transport(c) | Self::Application(c) => *c, @@ -93,6 +95,12 @@ impl From for CloseError { } } +impl From for Error { + fn from(_err: std::array::TryFromSliceError) -> Self { + Self::FrameEncodingError + } +} + #[derive(PartialEq, Eq, Debug, Default, Clone)] pub struct AckRange { pub(crate) gap: u64, @@ -101,7 +109,7 @@ pub struct AckRange { #[derive(PartialEq, Eq, Debug, Clone)] pub enum Frame<'a> { - Padding, + Padding(u16), Ping, Ack { largest_acknowledged: u64, @@ -211,9 +219,10 @@ impl<'a> Frame<'a> { } } + #[must_use] pub fn get_type(&self) -> FrameType { match self { - Self::Padding => FRAME_TYPE_PADDING, + Self::Padding { .. } => FRAME_TYPE_PADDING, Self::Ping => FRAME_TYPE_PING, Self::Ack { .. } => FRAME_TYPE_ACK, // We don't do ACK ECN. Self::ResetStream { .. } => FRAME_TYPE_RESET_STREAM, @@ -252,6 +261,7 @@ impl<'a> Frame<'a> { } } + #[must_use] pub fn is_stream(&self) -> bool { matches!( self, @@ -267,6 +277,7 @@ impl<'a> Frame<'a> { ) } + #[must_use] pub fn stream_type(fin: bool, nonzero_offset: bool, fill: bool) -> u64 { let mut t = FRAME_TYPE_STREAM; if fin { @@ -283,28 +294,34 @@ impl<'a> Frame<'a> { /// If the frame causes a recipient to generate an ACK within its /// advertised maximum acknowledgement delay. + #[must_use] pub fn ack_eliciting(&self) -> bool { !matches!( self, - Self::Ack { .. } | Self::Padding | Self::ConnectionClose { .. } + Self::Ack { .. } | Self::Padding { .. } | Self::ConnectionClose { .. } ) } /// If the frame can be sent in a path probe /// without initiating migration to that path. + #[must_use] pub fn path_probing(&self) -> bool { matches!( self, - Self::Padding + Self::Padding { .. } | Self::NewConnectionId { .. } | Self::PathChallenge { .. } | Self::PathResponse { .. } ) } - /// Converts AckRanges as encoded in a ACK frame (see -transport + /// Converts `AckRanges` as encoded in a ACK frame (see -transport /// 19.3.1) into ranges of acked packets (end, start), inclusive of /// start and end values. + /// + /// # Errors + /// + /// Returns an error if the ranges are invalid. pub fn decode_ack_frame( largest_acked: u64, first_ack_range: u64, @@ -345,36 +362,36 @@ impl<'a> Frame<'a> { Ok(acked_ranges) } - pub fn dump(&self) -> Option { + #[must_use] + pub fn dump(&self) -> String { match self { - Self::Crypto { offset, data } => Some(format!( - "Crypto {{ offset: {}, len: {} }}", - offset, - data.len() - )), + Self::Crypto { offset, data } => { + format!("Crypto {{ offset: {}, len: {} }}", offset, data.len()) + } Self::Stream { stream_id, offset, fill, data, fin, - } => Some(format!( + } => format!( "Stream {{ stream_id: {}, offset: {}, len: {}{}, fin: {} }}", stream_id.as_u64(), offset, if *fill { ">>" } else { "" }, data.len(), fin, - )), - Self::Padding => None, - Self::Datagram { data, .. } => Some(format!("Datagram {{ len: {} }}", data.len())), - _ => Some(format!("{:?}", self)), + ), + Self::Padding(length) => format!("Padding {{ len: {length} }}"), + Self::Datagram { data, .. } => format!("Datagram {{ len: {} }}", data.len()), + _ => format!("{self:?}"), } } + #[must_use] pub fn is_allowed(&self, pt: PacketType) -> bool { match self { - Self::Padding | Self::Ping => true, + Self::Padding { .. } | Self::Ping => true, Self::Crypto { .. } | Self::Ack { .. } | Self::ConnectionClose { @@ -386,7 +403,22 @@ impl<'a> Frame<'a> { } } + /// # Errors + /// + /// Returns an error if the frame cannot be decoded. + #[allow(clippy::too_many_lines)] // Yeah, but it's a nice match statement. pub fn decode(dec: &mut Decoder<'a>) -> Res { + /// Maximum ACK Range Count in ACK Frame + /// + /// Given a max UDP datagram size of 64k bytes and a minimum ACK Range size of 2 + /// bytes (2 QUIC varints), a single datagram can at most contain 32k ACK + /// Ranges. + /// + /// Note that the maximum (jumbogram) Ethernet MTU of 9216 or on the + /// Internet the regular Ethernet MTU of 1518 are more realistically to + /// be the limiting factor. Though for simplicity the higher limit is chosen. + const MAX_ACK_RANGE_COUNT: u64 = 32 * 1024; + fn d(v: Option) -> Res { v.ok_or(Error::NoMoreData) } @@ -395,13 +427,23 @@ impl<'a> Frame<'a> { } // TODO(ekr@rtfm.com): check for minimal encoding - let t = d(dec.decode_varint())?; + let t = dv(dec)?; match t { - FRAME_TYPE_PADDING => Ok(Self::Padding), + FRAME_TYPE_PADDING => { + let mut length: u16 = 1; + while let Some(b) = dec.peek_byte() { + if u64::from(b) != FRAME_TYPE_PADDING { + break; + } + length += 1; + dec.skip(1); + } + Ok(Self::Padding(length)) + } FRAME_TYPE_PING => Ok(Self::Ping), FRAME_TYPE_RESET_STREAM => Ok(Self::ResetStream { stream_id: StreamId::from(dv(dec)?), - application_error_code: d(dec.decode_varint())?, + application_error_code: dv(dec)?, final_size: match dec.decode_varint() { Some(v) => v, _ => return Err(Error::NoMoreData), @@ -410,9 +452,15 @@ impl<'a> Frame<'a> { FRAME_TYPE_ACK | FRAME_TYPE_ACK_ECN => { let la = dv(dec)?; let ad = dv(dec)?; - let nr = dv(dec)?; + let nr = dv(dec).and_then(|nr| { + if nr < MAX_ACK_RANGE_COUNT { + Ok(nr) + } else { + Err(Error::TooMuchData) + } + })?; let fa = dv(dec)?; - let mut arr: Vec = Vec::with_capacity(nr as usize); + let mut arr: Vec = Vec::with_capacity(usize::try_from(nr)?); for _ in 0..nr { let ar = AckRange { gap: dv(dec)?, @@ -437,12 +485,12 @@ impl<'a> Frame<'a> { } FRAME_TYPE_STOP_SENDING => Ok(Self::StopSending { stream_id: StreamId::from(dv(dec)?), - application_error_code: d(dec.decode_varint())?, + application_error_code: dv(dec)?, }), FRAME_TYPE_CRYPTO => { let offset = dv(dec)?; let data = d(dec.decode_vvec())?; - if offset + u64::try_from(data.len()).unwrap() > ((1 << 62) - 1) { + if offset + u64::try_from(data.len())? > ((1 << 62) - 1) { return Err(Error::FrameEncodingError); } Ok(Self::Crypto { offset, data }) @@ -469,7 +517,7 @@ impl<'a> Frame<'a> { qtrace!("STREAM frame, with length"); d(dec.decode_vvec())? }; - if o + u64::try_from(data.len()).unwrap() > ((1 << 62) - 1) { + if o + u64::try_from(data.len())? > ((1 << 62) - 1) { return Err(Error::FrameEncodingError); } Ok(Self::Stream { @@ -518,7 +566,7 @@ impl<'a> Frame<'a> { return Err(Error::DecodingFrame); } let srt = d(dec.decode(16))?; - let stateless_reset_token = <&[_; 16]>::try_from(srt).unwrap(); + let stateless_reset_token = <&[_; 16]>::try_from(srt)?; Ok(Self::NewConnectionId { sequence_number, @@ -543,7 +591,7 @@ impl<'a> Frame<'a> { Ok(Self::PathResponse { data: datav }) } FRAME_TYPE_CONNECTION_CLOSE_TRANSPORT | FRAME_TYPE_CONNECTION_CLOSE_APPLICATION => { - let error_code = CloseError::from_type_bit(t, d(dec.decode_varint())?); + let error_code = CloseError::from_type_bit(t, dv(dec)?); let frame_type = if t == FRAME_TYPE_CONNECTION_CLOSE_TRANSPORT { dv(dec)? } else { @@ -595,9 +643,14 @@ impl<'a> Frame<'a> { #[cfg(test)] mod tests { - use super::*; use neqo_common::{Decoder, Encoder}; + use crate::{ + cid::MAX_CONNECTION_ID_LEN, + frame::{AckRange, Frame, FRAME_TYPE_ACK}, + CloseError, Error, StreamId, StreamType, + }; + fn just_dec(f: &Frame, s: &str) { let encoded = Encoder::from_hex(s); let decoded = Frame::decode(&mut encoded.as_decoder()).unwrap(); @@ -606,8 +659,10 @@ mod tests { #[test] fn padding() { - let f = Frame::Padding; + let f = Frame::Padding(1); just_dec(&f, "00"); + let f = Frame::Padding(2); + just_dec(&f, "0000"); } #[test] @@ -658,7 +713,7 @@ mod tests { application_error_code: 0x77, }; - just_dec(&f, "053F4077") + just_dec(&f, "053F4077"); } #[test] @@ -863,8 +918,8 @@ mod tests { #[test] fn test_compare() { - let f1 = Frame::Padding; - let f2 = Frame::Padding; + let f1 = Frame::Padding(1); + let f2 = Frame::Padding(1); let f3 = Frame::Crypto { offset: 0, data: &[1, 2, 3], @@ -943,4 +998,16 @@ mod tests { }; just_dec(&f, "403103010203"); } + + #[test] + fn frame_decode_enforces_bound_on_ack_range() { + let mut e = Encoder::new(); + + e.encode_varint(FRAME_TYPE_ACK); + e.encode_varint(0u64); // largest acknowledged + e.encode_varint(0u64); // ACK delay + e.encode_varint(u32::MAX); // ACK range count = huge, but maybe available for allocation + + assert_eq!(Err(Error::TooMuchData), Frame::decode(&mut e.as_decoder())); + } } diff --git a/neqo-transport/src/lib.rs b/neqo-transport/src/lib.rs index daff7e73c2..5488472b58 100644 --- a/neqo-transport/src/lib.rs +++ b/neqo-transport/src/lib.rs @@ -4,10 +4,9 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -#![cfg_attr(feature = "deny-warnings", deny(warnings))] -#![warn(clippy::use_self)] +#![allow(clippy::module_name_repetitions)] // This lint doesn't work here. -use neqo_common::qinfo; +use neqo_common::qwarn; use neqo_crypto::Error as CryptoError; mod ackrate; @@ -16,7 +15,6 @@ mod cc; mod cid; mod connection; mod crypto; -mod dump; mod events; mod fc; mod frame; @@ -26,8 +24,14 @@ mod path; mod qlog; mod quic_datagrams; mod recovery; +#[cfg(feature = "bench")] +pub mod recv_stream; +#[cfg(not(feature = "bench"))] mod recv_stream; mod rtt; +#[cfg(feature = "bench")] +pub mod send_stream; +#[cfg(not(feature = "bench"))] mod send_stream; mod sender; pub mod server; @@ -38,34 +42,37 @@ pub mod tparams; mod tracking; pub mod version; -pub use self::cc::CongestionControlAlgorithm; -pub use self::cid::{ - ConnectionId, ConnectionIdDecoder, ConnectionIdGenerator, ConnectionIdRef, - EmptyConnectionIdGenerator, RandomConnectionIdGenerator, +pub use self::{ + cc::CongestionControlAlgorithm, + cid::{ + ConnectionId, ConnectionIdDecoder, ConnectionIdGenerator, ConnectionIdRef, + EmptyConnectionIdGenerator, RandomConnectionIdGenerator, + }, + connection::{ + params::{ConnectionParameters, ACK_RATIO_SCALE}, + Connection, Output, State, ZeroRttState, + }, + events::{ConnectionEvent, ConnectionEvents}, + frame::CloseError, + quic_datagrams::DatagramTracking, + recv_stream::{RecvStreamStats, RECV_BUFFER_SIZE}, + send_stream::{SendStreamStats, SEND_BUFFER_SIZE}, + stats::Stats, + stream_id::{StreamId, StreamType}, + version::Version, }; -pub use self::connection::{ - params::ConnectionParameters, params::ACK_RATIO_SCALE, Connection, Output, State, ZeroRttState, -}; -pub use self::events::{ConnectionEvent, ConnectionEvents}; -pub use self::frame::CloseError; -pub use self::quic_datagrams::DatagramTracking; -pub use self::stats::Stats; -pub use self::stream_id::{StreamId, StreamType}; -pub use self::version::Version; - -pub use self::recv_stream::{RecvStreamStats, RECV_BUFFER_SIZE}; -pub use self::send_stream::{SendStreamStats, SEND_BUFFER_SIZE}; pub type TransportError = u64; const ERROR_APPLICATION_CLOSE: TransportError = 12; +const ERROR_CRYPTO_BUFFER_EXCEEDED: TransportError = 13; const ERROR_AEAD_LIMIT_REACHED: TransportError = 15; #[derive(Clone, Debug, PartialEq, PartialOrd, Ord, Eq)] pub enum Error { NoError, - // Each time tihe error is return a different parameter is supply. - // This will be use to distinguish each occurance of this error. - InternalError(u16), + // Each time this error is returned a different parameter is supplied. + // This will be used to distinguish each occurance of this error. + InternalError, ConnectionRefused, FlowControlError, StreamLimitError, @@ -76,6 +83,7 @@ pub enum Error { ProtocolViolation, InvalidToken, ApplicationError, + CryptoBufferExceeded, CryptoError(CryptoError), QlogError, CryptoAlert(u8), @@ -124,6 +132,7 @@ pub enum Error { } impl Error { + #[must_use] pub fn code(&self) -> TransportError { match self { Self::NoError @@ -142,6 +151,7 @@ impl Error { Self::KeysExhausted => ERROR_AEAD_LIMIT_REACHED, Self::ApplicationError => ERROR_APPLICATION_CLOSE, Self::NoAvailablePath => 16, + Self::CryptoBufferExceeded => ERROR_CRYPTO_BUFFER_EXCEEDED, Self::CryptoAlert(a) => 0x100 + u64::from(*a), // As we have a special error code for ECH fallbacks, we lose the alert. // Send the server "ech_required" directly. @@ -155,7 +165,7 @@ impl Error { impl From for Error { fn from(err: CryptoError) -> Self { - qinfo!("Crypto operation failed {:?}", err); + qwarn!("Crypto operation failed {:?}", err); match err { CryptoError::EchRetry(config) => Self::EchRetry(config), _ => Self::CryptoError(err), @@ -176,7 +186,7 @@ impl From for Error { } impl ::std::error::Error for Error { - fn source(&self) -> Option<&(dyn ::std::error::Error + 'static)> { + fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { match self { Self::CryptoError(e) => Some(e), _ => None, @@ -186,7 +196,7 @@ impl ::std::error::Error for Error { impl ::std::fmt::Display for Error { fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result { - write!(f, "Transport error: {:?}", self) + write!(f, "Transport error: {self:?}") } } @@ -199,10 +209,11 @@ pub enum ConnectionError { } impl ConnectionError { + #[must_use] pub fn app_code(&self) -> Option { match self { Self::Application(e) => Some(*e), - _ => None, + Self::Transport(_) => None, } } } diff --git a/neqo-transport/src/pace.rs b/neqo-transport/src/pace.rs index 84a60bcd3e..5b88e5c0c4 100644 --- a/neqo-transport/src/pace.rs +++ b/neqo-transport/src/pace.rs @@ -5,14 +5,14 @@ // except according to those terms. // Pacer -#![deny(clippy::pedantic)] -use neqo_common::qtrace; +use std::{ + cmp::min, + fmt::{Debug, Display}, + time::{Duration, Instant}, +}; -use std::cmp::min; -use std::convert::TryFrom; -use std::fmt::{Debug, Display}; -use std::time::{Duration, Instant}; +use neqo_common::qtrace; /// This value determines how much faster the pacer operates than the /// congestion window. @@ -26,6 +26,8 @@ const PACER_SPEEDUP: usize = 2; /// A pacer that uses a leaky bucket. pub struct Pacer { + /// Whether pacing is enabled. + enabled: bool, /// The last update time. t: Instant, /// The maximum capacity, or burst size, in bytes. @@ -47,9 +49,15 @@ impl Pacer { /// The value of `p` is the packet size in bytes, which determines the minimum /// credit needed before a packet is sent. This should be a substantial /// fraction of the maximum packet size, if not the packet size. - pub fn new(now: Instant, m: usize, p: usize) -> Self { + pub fn new(enabled: bool, now: Instant, m: usize, p: usize) -> Self { assert!(m >= p, "maximum capacity has to be at least one packet"); - Self { t: now, m, c: m, p } + Self { + enabled, + t: now, + m, + c: m, + p, + } } /// Determine when the next packet will be available based on the provided RTT @@ -74,10 +82,15 @@ impl Pacer { } /// Spend credit. This cannot fail; users of this API are expected to call - /// next() to determine when to spend. This takes the current time (`now`), + /// `next()` to determine when to spend. This takes the current time (`now`), /// an estimate of the round trip time (`rtt`), the estimated congestion /// window (`cwnd`), and the number of bytes that were sent (`count`). pub fn spend(&mut self, now: Instant, rtt: Duration, cwnd: usize, count: usize) { + if !self.enabled { + self.t = now; + return; + } + qtrace!([self], "spend {} over {}, {:?}", count, cwnd, rtt); // Increase the capacity by: // `(now - self.t) * PACER_SPEEDUP * cwnd / rtt` @@ -108,31 +121,43 @@ impl Debug for Pacer { } } -#[cfg(tests)] +#[cfg(test)] mod tests { - use super::Pacer; + use std::time::Duration; + use test_fixture::now; + use super::Pacer; + const RTT: Duration = Duration::from_millis(1000); const PACKET: usize = 1000; const CWND: usize = PACKET * 10; #[test] fn even() { - let mut n = now(); - let p = Pacer::new(n, PACKET, PACKET); - assert_eq!(p.next(RTT, CWND), None); + let n = now(); + let mut p = Pacer::new(true, n, PACKET, PACKET); + assert_eq!(p.next(RTT, CWND), n); p.spend(n, RTT, CWND, PACKET); - assert_eq!(p.next(RTT, CWND), Some(n + (RTT / 10))); + assert_eq!(p.next(RTT, CWND), n + (RTT / 20)); } #[test] fn backwards_in_time() { - let mut n = now(); - let p = Pacer::new(n + RTT, PACKET, PACKET); - assert_eq!(p.next(RTT, CWND), None); + let n = now(); + let mut p = Pacer::new(true, n + RTT, PACKET, PACKET); + assert_eq!(p.next(RTT, CWND), n + RTT); // Now spend some credit in the past using a time machine. p.spend(n, RTT, CWND, PACKET); - assert_eq!(p.next(RTT, CWND), Some(n + (RTT / 10))); + assert_eq!(p.next(RTT, CWND), n + (RTT / 20)); + } + + #[test] + fn pacing_disabled() { + let n = now(); + let mut p = Pacer::new(false, n, PACKET, PACKET); + assert_eq!(p.next(RTT, CWND), n); + p.spend(n, RTT, CWND, PACKET); + assert_eq!(p.next(RTT, CWND), n); } } diff --git a/neqo-transport/src/packet/mod.rs b/neqo-transport/src/packet/mod.rs index 631bf84795..ce611a9664 100644 --- a/neqo-transport/src/packet/mod.rs +++ b/neqo-transport/src/packet/mod.rs @@ -5,20 +5,23 @@ // except according to those terms. // Encoding and decoding packets off the wire. -use crate::cid::{ConnectionId, ConnectionIdDecoder, ConnectionIdRef, MAX_CONNECTION_ID_LEN}; -use crate::crypto::{CryptoDxState, CryptoSpace, CryptoStates}; -use crate::version::{Version, WireVersion}; -use crate::{Error, Res}; +use std::{ + cmp::min, + fmt, + ops::{Deref, DerefMut, Range}, + time::Instant, +}; use neqo_common::{hex, hex_with_len, qtrace, qwarn, Decoder, Encoder}; use neqo_crypto::random; -use std::cmp::min; -use std::convert::TryFrom; -use std::fmt; -use std::iter::ExactSizeIterator; -use std::ops::{Deref, DerefMut, Range}; -use std::time::Instant; +use crate::{ + cid::{ConnectionId, ConnectionIdDecoder, ConnectionIdRef, MAX_CONNECTION_ID_LEN}, + crypto::{CryptoDxState, CryptoSpace, CryptoStates}, + frame::FRAME_TYPE_PADDING, + version::{Version, WireVersion}, + Error, Res, +}; pub const PACKET_BIT_LONG: u8 = 0x80; const PACKET_BIT_SHORT: u8 = 0x00; @@ -155,7 +158,7 @@ impl PacketBuilder { } Self { encoder, - pn: u64::max_value(), + pn: u64::MAX, header: header_start..header_start, offsets: PacketBuilderOffsets { first_byte_mask: PACKET_HP_MASK_SHORT, @@ -168,11 +171,12 @@ impl PacketBuilder { } /// Start building a long header packet. - /// For an Initial packet you will need to call initial_token(), + /// For an Initial packet you will need to call `initial_token()`, /// even if the token is empty. /// /// See `short()` for more on how to handle this in cases where there is no space. #[allow(clippy::reversed_empty_ranges)] // For initializing an empty range. + #[allow(clippy::similar_names)] // For dcid and scid, which are fine here. pub fn long( mut encoder: Encoder, pt: PacketType, @@ -197,7 +201,7 @@ impl PacketBuilder { Self { encoder, - pn: u64::max_value(), + pn: u64::MAX, header: header_start..header_start, offsets: PacketBuilderOffsets { first_byte_mask: PACKET_HP_MASK_LONG, @@ -241,7 +245,7 @@ impl PacketBuilder { /// Adjust the limit to ensure that no more data is added. pub fn mark_full(&mut self) { - self.limit = self.encoder.len() + self.limit = self.encoder.len(); } /// Mark the packet as needing padding (or not). @@ -252,9 +256,14 @@ impl PacketBuilder { /// Maybe pad with "PADDING" frames. /// Only does so if padding was needed and this is a short packet. /// Returns true if padding was added. + /// + /// # Panics + /// + /// Cannot happen. pub fn pad(&mut self) -> bool { if self.padding && !self.is_long() { - self.encoder.pad_to(self.limit, 0); + self.encoder + .pad_to(self.limit, FRAME_TYPE_PADDING.try_into().unwrap()); true } else { false @@ -267,7 +276,7 @@ impl PacketBuilder { let mask = if quic_bit { PACKET_BIT_FIXED_QUIC } else { 0 } | if self.is_long() { 0 } else { PACKET_BIT_SPIN }; let first = self.header.start; - self.encoder.as_mut()[first] ^= random(1)[0] & mask; + self.encoder.as_mut()[first] ^= random::<1>()[0] & mask; } /// For an Initial packet, encode the token. @@ -285,6 +294,10 @@ impl PacketBuilder { /// The length is filled in after calling `build`. /// Does nothing if there isn't 4 bytes available other than render this builder /// unusable; if `remaining()` returns 0 at any point, call `abort()`. + /// + /// # Panics + /// + /// This will panic if the packet number length is too large. pub fn pn(&mut self, pn: PacketNumber, pn_len: usize) { if self.remaining() < 4 { self.limit = 0; @@ -311,6 +324,7 @@ impl PacketBuilder { self.pn = pn; } + #[allow(clippy::cast_possible_truncation)] // Nope. fn write_len(&mut self, expansion: usize) { let len = self.encoder.len() - (self.offsets.len + 2) + expansion; self.encoder.as_mut()[self.offsets.len] = 0x40 | ((len >> 8) & 0x3f) as u8; @@ -348,11 +362,15 @@ impl PacketBuilder { } /// Build the packet and return the encoder. + /// + /// # Errors + /// + /// This will return an error if the packet is too large. pub fn build(mut self, crypto: &mut CryptoDxState) -> Res { if self.len() > self.limit { qwarn!("Packet contents are more than the limit"); debug_assert!(false); - return Err(Error::InternalError(5)); + return Err(Error::InternalError); } self.pad_for_crypto(crypto); @@ -372,7 +390,9 @@ impl PacketBuilder { // Calculate the mask. let offset = SAMPLE_OFFSET - self.offsets.pn.len(); - assert!(offset + SAMPLE_SIZE <= ciphertext.len()); + if offset + SAMPLE_SIZE > ciphertext.len() { + return Err(Error::InternalError); + } let sample = &ciphertext[offset..offset + SAMPLE_SIZE]; let mask = crypto.compute_mask(sample)?; @@ -405,7 +425,12 @@ impl PacketBuilder { /// Make a retry packet. /// As this is a simple packet, this is just an associated function. /// As Retry is odd (it has to be constructed with leading bytes), - /// this returns a Vec rather than building on an encoder. + /// this returns a [`Vec`] rather than building on an encoder. + /// + /// # Errors + /// + /// This will return an error if AEAD encrypt fails. + #[allow(clippy::similar_names)] // scid and dcid are fine here. pub fn retry( version: Version, dcid: &[u8], @@ -420,7 +445,7 @@ impl PacketBuilder { PACKET_BIT_LONG | PACKET_BIT_FIXED_QUIC | (PacketType::Retry.to_byte(version) << 4) - | (random(1)[0] & 0xf), + | (random::<1>()[0] & 0xf), ); encoder.encode_uint(4, version.wire_version()); encoder.encode_vec(1, dcid); @@ -437,6 +462,8 @@ impl PacketBuilder { } /// Make a Version Negotiation packet. + #[allow(clippy::similar_names)] // scid and dcid are fine here. + #[must_use] pub fn version_negotiation( dcid: &[u8], scid: &[u8], @@ -444,7 +471,7 @@ impl PacketBuilder { versions: &[Version], ) -> Vec { let mut encoder = Encoder::default(); - let mut grease = random(4); + let mut grease = random::<4>(); // This will not include the "QUIC bit" sometimes. Intentionally. encoder.encode_byte(PACKET_BIT_LONG | (grease[3] & 0x7f)); encoder.encode(&[0; 4]); // Zero version == VN. @@ -488,7 +515,7 @@ impl From for Encoder { } } -/// PublicPacket holds information from packets that is public only. This allows for +/// `PublicPacket` holds information from packets that is public only. This allows for /// processing of packets prior to decryption. pub struct PublicPacket<'a> { /// The packet type. @@ -497,8 +524,8 @@ pub struct PublicPacket<'a> { dcid: ConnectionIdRef<'a>, /// The source connection ID, if this is a long header packet. scid: Option>, - /// Any token that is included in the packet (Retry always has a token; Initial sometimes does). - /// This is empty when there is no token. + /// Any token that is included in the packet (Retry always has a token; Initial sometimes + /// does). This is empty when there is no token. token: &'a [u8], /// The size of the header, not including the packet number. header_len: usize, @@ -528,7 +555,10 @@ impl<'a> PublicPacket<'a> { if packet_type == PacketType::Retry { let header_len = decoder.offset(); let expansion = retry::expansion(version); - let token = Self::opt(decoder.decode(decoder.remaining() - expansion))?; + let token = decoder + .remaining() + .checked_sub(expansion) + .map_or(Err(Error::InvalidPacket), |v| Self::opt(decoder.decode(v)))?; if token.is_empty() { return Err(Error::InvalidPacket); } @@ -548,6 +578,11 @@ impl<'a> PublicPacket<'a> { /// Decode the common parts of a packet. This provides minimal parsing and validation. /// Returns a tuple of a `PublicPacket` and a slice with any remainder from the datagram. + /// + /// # Errors + /// + /// This will return an error if the packet could not be decoded. + #[allow(clippy::similar_names)] // For dcid and scid, which are fine. pub fn decode(data: &'a [u8], dcid_decoder: &dyn ConnectionIdDecoder) -> Res<(Self, &'a [u8])> { let mut decoder = Decoder::new(data); let first = Self::opt(decoder.decode_byte())?; @@ -578,7 +613,7 @@ impl<'a> PublicPacket<'a> { } // Generic long header. - let version = WireVersion::try_from(Self::opt(decoder.decode_uint(4))?).unwrap(); + let version = WireVersion::try_from(Self::opt(decoder.decode_uint(4))?)?; let dcid = ConnectionIdRef::from(Self::opt(decoder.decode_vec(1))?); let scid = ConnectionIdRef::from(Self::opt(decoder.decode_vec(1))?); @@ -599,9 +634,7 @@ impl<'a> PublicPacket<'a> { } // Check that this is a long header from a supported version. - let version = if let Ok(v) = Version::try_from(version) { - v - } else { + let Ok(version) = Version::try_from(version) else { return Ok(( Self { packet_type: PacketType::OtherVersion, @@ -640,11 +673,14 @@ impl<'a> PublicPacket<'a> { } /// Validate the given packet as though it were a retry. + #[must_use] pub fn is_valid_retry(&self, odcid: &ConnectionId) -> bool { if self.packet_type != PacketType::Retry { return false; } - let version = self.version().unwrap(); + let Some(version) = self.version() else { + return false; + }; let expansion = retry::expansion(version); if self.data.len() <= expansion { return false; @@ -660,6 +696,7 @@ impl<'a> PublicPacket<'a> { .unwrap_or(false) } + #[must_use] pub fn is_valid_initial(&self) -> bool { // Packet has to be an initial, with a DCID of 8 bytes, or a token. // Note: the Server class validates the token and checks the length. @@ -667,33 +704,42 @@ impl<'a> PublicPacket<'a> { && (self.dcid().len() >= 8 || !self.token.is_empty()) } + #[must_use] pub fn packet_type(&self) -> PacketType { self.packet_type } - pub fn dcid(&self) -> &ConnectionIdRef<'a> { - &self.dcid + #[must_use] + pub fn dcid(&self) -> ConnectionIdRef<'a> { + self.dcid } - pub fn scid(&self) -> &ConnectionIdRef<'a> { + /// # Panics + /// + /// This will panic if called for a short header packet. + #[must_use] + pub fn scid(&self) -> ConnectionIdRef<'a> { self.scid - .as_ref() .expect("should only be called for long header packets") } + #[must_use] pub fn token(&self) -> &'a [u8] { self.token } + #[must_use] pub fn version(&self) -> Option { self.version.and_then(|v| Version::try_from(v).ok()) } + #[must_use] pub fn wire_version(&self) -> WireVersion { debug_assert!(self.version.is_some()); self.version.unwrap_or(0) } + #[must_use] pub fn len(&self) -> usize { self.data.len() } @@ -721,14 +767,10 @@ impl<'a> PublicPacket<'a> { assert_ne!(self.packet_type, PacketType::Retry); assert_ne!(self.packet_type, PacketType::VersionNegotiation); - qtrace!( - "unmask hdr={}", - hex(&self.data[..self.header_len + SAMPLE_OFFSET]) - ); - let sample_offset = self.header_len + SAMPLE_OFFSET; let mask = if let Some(sample) = self.data.get(sample_offset..(sample_offset + SAMPLE_SIZE)) { + qtrace!("unmask hdr={}", hex(&self.data[..sample_offset])); crypto.compute_mask(sample) } else { Err(Error::NoMoreData) @@ -772,6 +814,9 @@ impl<'a> PublicPacket<'a> { )) } + /// # Errors + /// + /// This will return an error if the packet cannot be decrypted. pub fn decrypt(&self, crypto: &mut CryptoStates, release_at: Instant) -> Res { let cspace: CryptoSpace = self.packet_type.into(); // When we don't have a version, the crypto code doesn't need a version @@ -786,7 +831,9 @@ impl<'a> PublicPacket<'a> { // too small (which is public information). let (key_phase, pn, header, body) = self.decrypt_header(rx)?; qtrace!([rx], "decoded header: {:?}", header); - let rx = crypto.rx(version, cspace, key_phase).unwrap(); + let Some(rx) = crypto.rx(version, cspace, key_phase) else { + return Err(Error::DecryptError); + }; let version = rx.version(); // Version fixup; see above. let d = rx.decrypt(pn, &header, body)?; // If this is the first packet ever successfully decrypted @@ -809,8 +856,14 @@ impl<'a> PublicPacket<'a> { } } + /// # Errors + /// + /// This will return an error if the packet is not a version negotiation packet + /// or if the versions cannot be decoded. pub fn supported_versions(&self) -> Res> { - assert_eq!(self.packet_type, PacketType::VersionNegotiation); + if self.packet_type != PacketType::VersionNegotiation { + return Err(Error::InvalidPacket); + } let mut decoder = Decoder::new(&self.data[self.header_len..]); let mut res = Vec::new(); while decoder.remaining() > 0 { @@ -841,14 +894,17 @@ pub struct DecryptedPacket { } impl DecryptedPacket { + #[must_use] pub fn version(&self) -> Version { self.version } + #[must_use] pub fn packet_type(&self) -> PacketType { self.pt } + #[must_use] pub fn pn(&self) -> PacketNumber { self.pn } @@ -862,14 +918,21 @@ impl Deref for DecryptedPacket { } } -#[cfg(all(test, not(feature = "fuzzing")))] +#[cfg(all(test, not(feature = "disable-encryption")))] mod tests { - use super::*; - use crate::crypto::{CryptoDxState, CryptoStates}; - use crate::{EmptyConnectionIdGenerator, RandomConnectionIdGenerator, Version}; use neqo_common::Encoder; use test_fixture::{fixture_init, now}; + use crate::{ + cid::MAX_CONNECTION_ID_LEN, + crypto::{CryptoDxState, CryptoStates}, + packet::{ + PacketBuilder, PacketType, PublicPacket, PACKET_BIT_FIXED_QUIC, PACKET_BIT_LONG, + PACKET_BIT_SPIN, + }, + ConnectionId, EmptyConnectionIdGenerator, RandomConnectionIdGenerator, Version, + }; + const CLIENT_CID: &[u8] = &[0x83, 0x94, 0xc8, 0xf0, 0x3e, 0x51, 0x57, 0x08]; const SERVER_CID: &[u8] = &[0xf0, 0x67, 0xa5, 0x50, 0x2a, 0x42, 0x62, 0xb5]; @@ -1020,7 +1083,8 @@ mod tests { assert_eq!(&decrypted[..], SAMPLE_SHORT_PAYLOAD); } - /// By telling the decoder that the connection ID is shorter than it really is, we get a decryption error. + /// By telling the decoder that the connection ID is shorter than it really is, we get a + /// decryption error. #[test] fn decode_short_bad_cid() { fixture_init(); @@ -1170,9 +1234,9 @@ mod tests { } const SAMPLE_RETRY_V2: &[u8] = &[ - 0xcf, 0x70, 0x9a, 0x50, 0xc4, 0x00, 0x08, 0xf0, 0x67, 0xa5, 0x50, 0x2a, 0x42, 0x62, 0xb5, - 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x1d, 0xc7, 0x11, 0x30, 0xcd, 0x1e, 0xd3, 0x9d, 0x6e, 0xfc, - 0xee, 0x5c, 0x85, 0x80, 0x65, 0x01, + 0xcf, 0x6b, 0x33, 0x43, 0xcf, 0x00, 0x08, 0xf0, 0x67, 0xa5, 0x50, 0x2a, 0x42, 0x62, 0xb5, + 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0xc8, 0x64, 0x6c, 0xe8, 0xbf, 0xe3, 0x39, 0x52, 0xd9, 0x55, + 0x54, 0x36, 0x65, 0xdc, 0xc7, 0xb6, ]; const SAMPLE_RETRY_V1: &[u8] = &[ @@ -1353,7 +1417,7 @@ mod tests { const SAMPLE_VN: &[u8] = &[ 0x80, 0x00, 0x00, 0x00, 0x00, 0x08, 0xf0, 0x67, 0xa5, 0x50, 0x2a, 0x42, 0x62, 0xb5, 0x08, - 0x83, 0x94, 0xc8, 0xf0, 0x3e, 0x51, 0x57, 0x08, 0x70, 0x9a, 0x50, 0xc4, 0x00, 0x00, 0x00, + 0x83, 0x94, 0xc8, 0xf0, 0x3e, 0x51, 0x57, 0x08, 0x6b, 0x33, 0x43, 0xcf, 0x00, 0x00, 0x00, 0x01, 0xff, 0x00, 0x00, 0x20, 0xff, 0x00, 0x00, 0x1f, 0xff, 0x00, 0x00, 0x1e, 0xff, 0x00, 0x00, 0x1d, 0x0a, 0x0a, 0x0a, 0x0a, ]; @@ -1361,8 +1425,12 @@ mod tests { #[test] fn build_vn() { fixture_init(); - let mut vn = - PacketBuilder::version_negotiation(SERVER_CID, CLIENT_CID, 0x0a0a0a0a, &Version::all()); + let mut vn = PacketBuilder::version_negotiation( + SERVER_CID, + CLIENT_CID, + 0x0a0a_0a0a, + &Version::all(), + ); // Erase randomness from greasing... assert_eq!(vn.len(), SAMPLE_VN.len()); vn[0] &= 0x80; @@ -1375,8 +1443,12 @@ mod tests { #[test] fn vn_do_not_repeat_client_grease() { fixture_init(); - let vn = - PacketBuilder::version_negotiation(SERVER_CID, CLIENT_CID, 0x0a0a0a0a, &Version::all()); + let vn = PacketBuilder::version_negotiation( + SERVER_CID, + CLIENT_CID, + 0x0a0a_0a0a, + &Version::all(), + ); assert_ne!(&vn[SAMPLE_VN.len() - 4..], &[0x0a, 0x0a, 0x0a, 0x0a]); } @@ -1449,4 +1521,21 @@ mod tests { assert_eq!(decrypted.pn(), 654_360_564); assert_eq!(&decrypted[..], &[0x01]); } + + #[test] + fn decode_empty() { + neqo_crypto::init().unwrap(); + let res = PublicPacket::decode(&[], &EmptyConnectionIdGenerator::default()); + assert!(res.is_err()); + } + + #[test] + fn decode_too_short() { + neqo_crypto::init().unwrap(); + let res = PublicPacket::decode( + &[179, 255, 0, 0, 32, 0, 0], + &EmptyConnectionIdGenerator::default(), + ); + assert!(res.is_err()); + } } diff --git a/neqo-transport/src/packet/retry.rs b/neqo-transport/src/packet/retry.rs index e9a7e90ab9..71193b9100 100644 --- a/neqo-transport/src/packet/retry.rs +++ b/neqo-transport/src/packet/retry.rs @@ -4,15 +4,12 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -#![deny(clippy::pedantic)] - -use crate::version::Version; -use crate::{Error, Res}; +use std::cell::RefCell; use neqo_common::qerror; use neqo_crypto::{hkdf, Aead, TLS_AES_128_GCM_SHA256, TLS_VERSION_1_3}; -use std::cell::RefCell; +use crate::{version::Version, Error, Res}; /// The AEAD used for Retry is fixed, so use thread local storage. fn make_aead(version: Version) -> Aead { @@ -21,7 +18,6 @@ fn make_aead(version: Version) -> Aead { let secret = hkdf::import_key(TLS_VERSION_1_3, version.retry_secret()).unwrap(); Aead::new( - false, TLS_VERSION_1_3, TLS_AES_128_GCM_SHA256, &secret, @@ -46,7 +42,7 @@ where .try_with(|aead| f(&aead.borrow())) .map_err(|e| { qerror!("Unable to access Retry AEAD: {:?}", e); - Error::InternalError(6) + Error::InternalError })? } diff --git a/neqo-transport/src/path.rs b/neqo-transport/src/path.rs index 3a25a1bea9..50e458ff36 100644 --- a/neqo-transport/src/path.rs +++ b/neqo-transport/src/path.rs @@ -4,34 +4,34 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -#![deny(clippy::pedantic)] #![allow(clippy::module_name_repetitions)] -use std::cell::RefCell; -use std::convert::TryFrom; -use std::fmt::{self, Display}; -use std::mem; -use std::net::{IpAddr, SocketAddr}; -use std::rc::Rc; -use std::time::{Duration, Instant}; - -use crate::ackrate::{AckRate, PeerAckDelay}; -use crate::cc::CongestionControlAlgorithm; -use crate::cid::{ConnectionId, ConnectionIdRef, ConnectionIdStore, RemoteConnectionIdEntry}; -use crate::frame::{ - FRAME_TYPE_PATH_CHALLENGE, FRAME_TYPE_PATH_RESPONSE, FRAME_TYPE_RETIRE_CONNECTION_ID, +use std::{ + cell::RefCell, + fmt::{self, Display}, + mem, + net::{IpAddr, SocketAddr}, + rc::Rc, + time::{Duration, Instant}, }; -use crate::packet::PacketBuilder; -use crate::recovery::RecoveryToken; -use crate::rtt::RttEstimate; -use crate::sender::PacketSender; -use crate::stats::FrameStats; -use crate::tracking::{PacketNumberSpace, SentPacket}; -use crate::{Error, Res}; - -use neqo_common::{hex, qdebug, qinfo, qlog::NeqoQlog, qtrace, Datagram, Encoder}; + +use neqo_common::{hex, qdebug, qinfo, qlog::NeqoQlog, qtrace, Datagram, Encoder, IpTos}; use neqo_crypto::random; +use crate::{ + ackrate::{AckRate, PeerAckDelay}, + cc::CongestionControlAlgorithm, + cid::{ConnectionId, ConnectionIdRef, ConnectionIdStore, RemoteConnectionIdEntry}, + frame::{FRAME_TYPE_PATH_CHALLENGE, FRAME_TYPE_PATH_RESPONSE, FRAME_TYPE_RETIRE_CONNECTION_ID}, + packet::PacketBuilder, + recovery::RecoveryToken, + rtt::RttEstimate, + sender::PacketSender, + stats::FrameStats, + tracking::{PacketNumberSpace, SentPacket}, + Stats, +}; + /// This is the MTU that we assume when using IPv6. /// We use this size for Initial packets, so we don't need to worry about probing for support. /// If the path doesn't support this MTU, we will assume that it doesn't support QUIC. @@ -56,6 +56,8 @@ pub type PathRef = Rc>; #[derive(Debug, Default)] pub struct Paths { /// All of the paths. All of these paths will be permanent. + #[allow(unknown_lints)] // available with Rust v1.75 + #[allow(clippy::struct_field_names)] paths: Vec, /// This is the primary path. This will only be `None` initially, so /// care needs to be taken regarding that only during the handshake. @@ -68,7 +70,7 @@ pub struct Paths { /// Connection IDs that need to be retired. to_retire: Vec, - /// QLog handler. + /// `QLog` handler. qlog: NeqoQlog, } @@ -80,6 +82,7 @@ impl Paths { local: SocketAddr, remote: SocketAddr, cc: CongestionControlAlgorithm, + pacing: bool, now: Instant, ) -> PathRef { self.paths @@ -92,7 +95,7 @@ impl Paths { } }) .unwrap_or_else(|| { - let mut p = Path::temporary(local, remote, cc, self.qlog.clone(), now); + let mut p = Path::temporary(local, remote, cc, pacing, self.qlog.clone(), now); if let Some(primary) = self.primary.as_ref() { p.prime_rtt(primary.borrow().rtt()); } @@ -109,6 +112,7 @@ impl Paths { local: SocketAddr, remote: SocketAddr, cc: CongestionControlAlgorithm, + pacing: bool, now: Instant, ) -> PathRef { self.paths @@ -134,6 +138,7 @@ impl Paths { local, remote, cc, + pacing, self.qlog.clone(), now, ))) @@ -149,7 +154,7 @@ impl Paths { /// Get a reference to the primary path. Use this prior to handshake completion. pub fn primary_fallible(&self) -> Option { - self.primary.as_ref().map(Rc::clone) + self.primary.clone() } /// Returns true if the path is not permanent. @@ -211,7 +216,7 @@ impl Paths { /// to a migration from a peer, in which case the old path needs to be probed. #[must_use] fn select_primary(&mut self, path: &PathRef) -> Option { - qinfo!([path.borrow()], "set as primary path"); + qdebug!([path.borrow()], "set as primary path"); let old_path = self.primary.replace(Rc::clone(path)).map(|old| { old.borrow_mut().set_primary(false); old @@ -334,7 +339,7 @@ impl Paths { None } }) - .or_else(|| self.primary.as_ref().map(Rc::clone)) + .or_else(|| self.primary.clone()) } /// A `PATH_RESPONSE` was received. @@ -408,7 +413,7 @@ impl Paths { builder: &mut PacketBuilder, tokens: &mut Vec, stats: &mut FrameStats, - ) -> Res<()> { + ) { while let Some(seqno) = self.to_retire.pop() { if builder.remaining() < 1 + Encoder::varint_len(seqno) { self.to_retire.push(seqno); @@ -416,9 +421,6 @@ impl Paths { } builder.encode_varint(FRAME_TYPE_RETIRE_CONNECTION_ID); builder.encode_varint(seqno); - if builder.len() > builder.limit() { - return Err(Error::InternalError(20)); - } tokens.push(RecoveryToken::RetireConnectionId(seqno)); stats.retire_connection_id += 1; } @@ -427,8 +429,6 @@ impl Paths { self.primary() .borrow_mut() .write_cc_frames(builder, tokens, stats); - - Ok(()) } pub fn lost_retire_cid(&mut self, lost: u64) { @@ -491,7 +491,7 @@ enum ProbeState { } impl ProbeState { - /// Determine whether the current state requires probing. + /// Determine whether the current state requires probing. fn probe_needed(&self) -> bool { matches!(self, Self::ProbeNeeded { .. }) } @@ -525,13 +525,17 @@ pub struct Path { /// For a path that is not validated, this is `None`. For a validated /// path, the time that the path was last valid. validated: Option, - /// A path challenge was received and PATH_RESPONSE has not been sent. + /// A path challenge was received and `PATH_RESPONSE` has not been sent. challenge: Option<[u8; 8]>, /// The round trip time estimate for this path. rtt: RttEstimate, /// A packet sender for the path, which includes congestion control and a pacer. sender: PacketSender, + /// The DSCP/ECN marking to use for outgoing packets on this path. + tos: IpTos, + /// The IP TTL to use for outgoing packets on this path. + ttl: u8, /// The number of bytes received on this path. /// Note that this value might saturate on a long-lived connection, @@ -551,10 +555,11 @@ impl Path { local: SocketAddr, remote: SocketAddr, cc: CongestionControlAlgorithm, + pacing: bool, qlog: NeqoQlog, now: Instant, ) -> Self { - let mut sender = PacketSender::new(cc, Self::mtu_by_addr(remote.ip()), now); + let mut sender = PacketSender::new(cc, pacing, Self::mtu_by_addr(remote.ip()), now); sender.set_qlog(qlog.clone()); Self { local, @@ -567,6 +572,8 @@ impl Path { challenge: None, rtt: RttEstimate::default(), sender, + tos: IpTos::default(), // TODO: Default to Ect0 when ECN is supported. + ttl: 64, // This is the default TTL on many OSes. received_bytes: 0, sent_bytes: 0, qlog, @@ -658,7 +665,7 @@ impl Path { /// Set the remote connection ID based on the peer's choice. /// This is only valid during the handshake. - pub fn set_remote_cid(&mut self, cid: &ConnectionIdRef) { + pub fn set_remote_cid(&mut self, cid: ConnectionIdRef) { self.remote_cid .as_mut() .unwrap() @@ -689,7 +696,7 @@ impl Path { /// Make a datagram. pub fn datagram>>(&self, payload: V) -> Datagram { - Datagram::new(self.local, self.remote, payload) + Datagram::new(self.local, self.remote, self.tos, Some(self.ttl), payload) } /// Get local address as `SocketAddr` @@ -760,9 +767,9 @@ impl Path { stats: &mut FrameStats, mtu: bool, // Whether the packet we're writing into will be a full MTU. now: Instant, - ) -> Res { + ) -> bool { if builder.remaining() < 9 { - return Ok(false); + return false; } // Send PATH_RESPONSE. @@ -770,9 +777,6 @@ impl Path { qtrace!([self], "Responding to path challenge {}", hex(challenge)); builder.encode_varint(FRAME_TYPE_PATH_RESPONSE); builder.encode(&challenge[..]); - if builder.len() > builder.limit() { - return Err(Error::InternalError(21)); - } // These frames are not retransmitted in the usual fashion. // There is no token, therefore we need to count `all` specially. @@ -780,7 +784,7 @@ impl Path { stats.all += 1; if builder.remaining() < 9 { - return Ok(true); + return true; } true } else { @@ -790,12 +794,9 @@ impl Path { // Send PATH_CHALLENGE. if let ProbeState::ProbeNeeded { probe_count } = self.state { qtrace!([self], "Initiating path challenge {}", probe_count); - let data = <[u8; 8]>::try_from(&random(8)[..]).unwrap(); + let data = random::<8>(); builder.encode_varint(FRAME_TYPE_PATH_CHALLENGE); builder.encode(&data); - if builder.len() > builder.limit() { - return Err(Error::InternalError(22)); - } // As above, no recovery token. stats.path_challenge += 1; @@ -807,9 +808,9 @@ impl Path { mtu, sent: now, }; - Ok(true) + true } else { - Ok(resp_sent) + resp_sent } } @@ -932,7 +933,7 @@ impl Path { } /// Discard a packet that previously might have been in-flight. - pub fn discard_packet(&mut self, sent: &SentPacket, now: Instant) { + pub fn discard_packet(&mut self, sent: &SentPacket, now: Instant, stats: &mut Stats) { if self.rtt.first_sample_time().is_none() { // When discarding a packet there might not be a good RTT estimate. // But discards only occur after receiving something, so that means @@ -944,6 +945,7 @@ impl Path { "discarding a packet without an RTT estimate; guessing RTT={:?}", now - sent.time_sent ); + stats.rtt_init_guess = true; self.rtt.update( &mut self.qlog, now - sent.time_sent, @@ -959,8 +961,7 @@ impl Path { /// Record packets as acknowledged with the sender. pub fn on_packets_acked(&mut self, acked_pkts: &[SentPacket], now: Instant) { debug_assert!(self.is_primary()); - self.sender - .on_packets_acked(acked_pkts, self.rtt.minimum(), now); + self.sender.on_packets_acked(acked_pkts, &self.rtt, now); } /// Record packets as lost with the sender. @@ -994,7 +995,8 @@ impl Path { .map_or(usize::MAX, |limit| { let budget = if limit == 0 { // If we have received absolutely nothing thus far, then this endpoint - // is the one initiating communication on this path. Allow enough space for probing. + // is the one initiating communication on this path. Allow enough space for + // probing. self.mtu() * 5 } else { limit diff --git a/neqo-transport/src/qlog.rs b/neqo-transport/src/qlog.rs index ad86ec2b2e..a8ad986d2a 100644 --- a/neqo-transport/src/qlog.rs +++ b/neqo-transport/src/qlog.rs @@ -6,118 +6,184 @@ // Functions that handle capturing QLOG traces. -use std::convert::TryFrom; -use std::ops::{Deref, RangeInclusive}; -use std::string::String; -use std::time::Duration; - -use qlog::{self, event::Event, PacketHeader, QuicFrame}; +use std::{ + ops::{Deref, RangeInclusive}, + time::Duration, +}; use neqo_common::{hex, qinfo, qlog::NeqoQlog, Decoder}; +use qlog::events::{ + connectivity::{ConnectionStarted, ConnectionState, ConnectionStateUpdated}, + quic::{ + AckedRanges, ErrorSpace, MetricsUpdated, PacketDropped, PacketHeader, PacketLost, + PacketReceived, PacketSent, QuicFrame, StreamType, VersionInformation, + }, + EventData, RawInfo, +}; +use smallvec::SmallVec; -use crate::connection::State; -use crate::frame::{CloseError, Frame}; -use crate::packet::{DecryptedPacket, PacketNumber, PacketType, PublicPacket}; -use crate::path::PathRef; -use crate::stream_id::StreamType as NeqoStreamType; -use crate::tparams::{self, TransportParametersHandler}; -use crate::tracking::SentPacket; -use crate::Version; +use crate::{ + connection::State, + frame::{CloseError, Frame}, + packet::{DecryptedPacket, PacketNumber, PacketType, PublicPacket}, + path::PathRef, + stream_id::StreamType as NeqoStreamType, + tparams::{self, TransportParametersHandler}, + tracking::SentPacket, + version::{Version, VersionConfig, WireVersion}, +}; pub fn connection_tparams_set(qlog: &mut NeqoQlog, tph: &TransportParametersHandler) { - qlog.add_event(|| { + qlog.add_event_data(|| { let remote = tph.remote(); - Some(Event::transport_parameters_set( - None, - None, - None, - None, - None, - None, - remote + #[allow(clippy::cast_possible_truncation)] // Nope. + let ev_data = EventData::TransportParametersSet( + qlog::events::quic::TransportParametersSet { + owner: None, + resumption_allowed: None, + early_data_enabled: None, + tls_cipher: None, + aead_tag_length: None, + original_destination_connection_id: remote .get_bytes(tparams::ORIGINAL_DESTINATION_CONNECTION_ID) .map(hex), - remote.get_bytes(tparams::STATELESS_RESET_TOKEN).map(hex), - if remote.get_empty(tparams::DISABLE_MIGRATION) { - Some(true) - } else { - None - }, - Some(remote.get_integer(tparams::IDLE_TIMEOUT)), - Some(remote.get_integer(tparams::MAX_UDP_PAYLOAD_SIZE)), - Some(remote.get_integer(tparams::ACK_DELAY_EXPONENT)), - Some(remote.get_integer(tparams::MAX_ACK_DELAY)), - // TODO(hawkinsw@obs.cr): We do not yet handle ACTIVE_CONNECTION_ID_LIMIT in tparams yet. - None, - Some(format!("{}", remote.get_integer(tparams::INITIAL_MAX_DATA))), - Some(format!( - "{}", - remote.get_integer(tparams::INITIAL_MAX_STREAM_DATA_BIDI_LOCAL) - )), - Some(format!( - "{}", - remote.get_integer(tparams::INITIAL_MAX_STREAM_DATA_BIDI_REMOTE) - )), - Some(format!( - "{}", - remote.get_integer(tparams::INITIAL_MAX_STREAM_DATA_UNI) - )), - Some(format!( - "{}", - remote.get_integer(tparams::INITIAL_MAX_STREAMS_BIDI) - )), - Some(format!( - "{}", - remote.get_integer(tparams::INITIAL_MAX_STREAMS_UNI) - )), - // TODO(hawkinsw@obs.cr): We do not yet handle PREFERRED_ADDRESS in tparams yet. - None, - )) - }) + initial_source_connection_id: None, + retry_source_connection_id: None, + stateless_reset_token: remote.get_bytes(tparams::STATELESS_RESET_TOKEN).map(hex), + disable_active_migration: if remote.get_empty(tparams::DISABLE_MIGRATION) { + Some(true) + } else { + None + }, + max_idle_timeout: Some(remote.get_integer(tparams::IDLE_TIMEOUT)), + max_udp_payload_size: Some(remote.get_integer(tparams::MAX_UDP_PAYLOAD_SIZE) as u32), + ack_delay_exponent: Some(remote.get_integer(tparams::ACK_DELAY_EXPONENT) as u16), + max_ack_delay: Some(remote.get_integer(tparams::MAX_ACK_DELAY) as u16), + active_connection_id_limit: Some(remote.get_integer(tparams::ACTIVE_CONNECTION_ID_LIMIT) as u32), + initial_max_data: Some(remote.get_integer(tparams::INITIAL_MAX_DATA)), + initial_max_stream_data_bidi_local: Some(remote.get_integer(tparams::INITIAL_MAX_STREAM_DATA_BIDI_LOCAL)), + initial_max_stream_data_bidi_remote: Some(remote.get_integer(tparams::INITIAL_MAX_STREAM_DATA_BIDI_REMOTE)), + initial_max_stream_data_uni: Some(remote.get_integer(tparams::INITIAL_MAX_STREAM_DATA_UNI)), + initial_max_streams_bidi: Some(remote.get_integer(tparams::INITIAL_MAX_STREAMS_BIDI)), + initial_max_streams_uni: Some(remote.get_integer(tparams::INITIAL_MAX_STREAMS_UNI)), + preferred_address: remote.get_preferred_address().and_then(|(paddr, cid)| { + Some(qlog::events::quic::PreferredAddress { + ip_v4: paddr.ipv4()?.ip().to_string(), + ip_v6: paddr.ipv6()?.ip().to_string(), + port_v4: paddr.ipv4()?.port(), + port_v6: paddr.ipv6()?.port(), + connection_id: cid.connection_id().to_string(), + stateless_reset_token: hex(cid.reset_token()), + }) + }), + }); + + Some(ev_data) + }); } pub fn server_connection_started(qlog: &mut NeqoQlog, path: &PathRef) { - connection_started(qlog, path) + connection_started(qlog, path); } pub fn client_connection_started(qlog: &mut NeqoQlog, path: &PathRef) { - connection_started(qlog, path) + connection_started(qlog, path); } fn connection_started(qlog: &mut NeqoQlog, path: &PathRef) { - qlog.add_event(|| { + qlog.add_event_data(|| { let p = path.deref().borrow(); - Some(Event::connection_started( - if p.local_address().ip().is_ipv4() { - "ipv4".into() + let ev_data = EventData::ConnectionStarted(ConnectionStarted { + ip_version: if p.local_address().ip().is_ipv4() { + Some("ipv4".into()) } else { - "ipv6".into() + Some("ipv6".into()) }, - format!("{}", p.local_address().ip()), - format!("{}", p.remote_address().ip()), - Some("QUIC".into()), - p.local_address().port().into(), - p.remote_address().port().into(), - Some(format!("{:x}", Version::default().wire_version())), - Some(format!("{}", p.local_cid())), - Some(format!("{}", p.remote_cid())), - )) - }) + src_ip: format!("{}", p.local_address().ip()), + dst_ip: format!("{}", p.remote_address().ip()), + protocol: Some("QUIC".into()), + src_port: p.local_address().port().into(), + dst_port: p.remote_address().port().into(), + src_cid: Some(format!("{}", p.local_cid())), + dst_cid: Some(format!("{}", p.remote_cid())), + }); + + Some(ev_data) + }); } pub fn connection_state_updated(qlog: &mut NeqoQlog, new: &State) { - qlog.add_event(|| { - Some(Event::connection_state_updated_min(match new { - State::Init => qlog::ConnectionState::Attempted, - State::WaitInitial => qlog::ConnectionState::Attempted, - State::WaitVersion | State::Handshaking => qlog::ConnectionState::Handshake, - State::Connected => qlog::ConnectionState::Active, - State::Confirmed => qlog::ConnectionState::Active, - State::Closing { .. } => qlog::ConnectionState::Draining, - State::Draining { .. } => qlog::ConnectionState::Draining, - State::Closed { .. } => qlog::ConnectionState::Closed, + qlog.add_event_data(|| { + let ev_data = EventData::ConnectionStateUpdated(ConnectionStateUpdated { + old: None, + new: match new { + State::Init | State::WaitInitial => ConnectionState::Attempted, + State::WaitVersion | State::Handshaking => ConnectionState::HandshakeStarted, + State::Connected => ConnectionState::HandshakeCompleted, + State::Confirmed => ConnectionState::HandshakeConfirmed, + State::Closing { .. } => ConnectionState::Closing, + State::Draining { .. } => ConnectionState::Draining, + State::Closed { .. } => ConnectionState::Closed, + }, + }); + + Some(ev_data) + }); +} + +pub fn client_version_information_initiated(qlog: &mut NeqoQlog, version_config: &VersionConfig) { + qlog.add_event_data(|| { + Some(EventData::VersionInformation(VersionInformation { + client_versions: Some( + version_config + .all() + .iter() + .map(|v| format!("{:02x}", v.wire_version())) + .collect(), + ), + server_versions: None, + chosen_version: Some(format!("{:02x}", version_config.initial().wire_version())), })) - }) + }); +} + +pub fn client_version_information_negotiated( + qlog: &mut NeqoQlog, + client: &[Version], + server: &[WireVersion], + chosen: Version, +) { + qlog.add_event_data(|| { + Some(EventData::VersionInformation(VersionInformation { + client_versions: Some( + client + .iter() + .map(|v| format!("{:02x}", v.wire_version())) + .collect(), + ), + server_versions: Some(server.iter().map(|v| format!("{v:02x}")).collect()), + chosen_version: Some(format!("{:02x}", chosen.wire_version())), + })) + }); +} + +pub fn server_version_information_failed( + qlog: &mut NeqoQlog, + server: &[Version], + client: WireVersion, +) { + qlog.add_event_data(|| { + Some(EventData::VersionInformation(VersionInformation { + client_versions: Some(vec![format!("{client:02x}")]), + server_versions: Some( + server + .iter() + .map(|v| format!("{:02x}", v.wire_version())) + .collect(), + ), + chosen_version: None, + })) + }); } pub fn packet_sent( @@ -129,59 +195,77 @@ pub fn packet_sent( ) { qlog.add_event_with_stream(|stream| { let mut d = Decoder::from(body); + let header = PacketHeader::with_type(pt.into(), Some(pn), None, None, None); + let raw = RawInfo { + length: Some(plen as u64), + payload_length: None, + data: None, + }; - stream.add_event(Event::packet_sent_min( - to_qlog_pkt_type(pt), - PacketHeader::new( - pn, - Some(u64::try_from(plen).unwrap()), - None, - None, - None, - None, - ), - Some(Vec::new()), - ))?; - + let mut frames = SmallVec::new(); while d.remaining() > 0 { - match Frame::decode(&mut d) { - Ok(f) => { - stream.add_frame(frame_to_qlogframe(&f), false)?; - } - Err(_) => { - qinfo!("qlog: invalid frame"); - break; - } + if let Ok(f) = Frame::decode(&mut d) { + frames.push(QuicFrame::from(&f)); + } else { + qinfo!("qlog: invalid frame"); + break; } } - stream.finish_frames() - }) + let ev_data = EventData::PacketSent(PacketSent { + header, + frames: Some(frames), + is_coalesced: None, + retry_token: None, + stateless_reset_token: None, + supported_versions: None, + raw: Some(raw), + datagram_id: None, + send_at_time: None, + trigger: None, + }); + + stream.add_event_data_now(ev_data) + }); } -pub fn packet_dropped(qlog: &mut NeqoQlog, payload: &PublicPacket) { - qlog.add_event(|| { - Some(Event::packet_dropped( - Some(to_qlog_pkt_type(payload.packet_type())), - Some(u64::try_from(payload.len()).unwrap()), - None, - )) - }) +pub fn packet_dropped(qlog: &mut NeqoQlog, public_packet: &PublicPacket) { + qlog.add_event_data(|| { + let header = + PacketHeader::with_type(public_packet.packet_type().into(), None, None, None, None); + let raw = RawInfo { + length: Some(public_packet.len() as u64), + payload_length: None, + data: None, + }; + + let ev_data = EventData::PacketDropped(PacketDropped { + header: Some(header), + raw: Some(raw), + datagram_id: None, + details: None, + trigger: None, + }); + + Some(ev_data) + }); } pub fn packets_lost(qlog: &mut NeqoQlog, pkts: &[SentPacket]) { qlog.add_event_with_stream(|stream| { for pkt in pkts { - stream.add_event(Event::packet_lost_min( - to_qlog_pkt_type(pkt.pt), - pkt.pn.to_string(), - Vec::new(), - ))?; + let header = PacketHeader::with_type(pkt.pt.into(), Some(pkt.pn), None, None, None); - stream.finish_frames()?; + let ev_data = EventData::PacketLost(PacketLost { + header: Some(header), + trigger: None, + frames: None, + }); + + stream.add_event_data_now(ev_data)?; } Ok(()) - }) + }); } pub fn packet_received( @@ -192,34 +276,44 @@ pub fn packet_received( qlog.add_event_with_stream(|stream| { let mut d = Decoder::from(&payload[..]); - stream.add_event(Event::packet_received( - to_qlog_pkt_type(payload.packet_type()), - PacketHeader::new( - payload.pn(), - Some(u64::try_from(public_packet.len()).unwrap()), - None, - None, - None, - None, - ), - Some(Vec::new()), + let header = PacketHeader::with_type( + public_packet.packet_type().into(), + Some(payload.pn()), None, None, None, - ))?; + ); + let raw = RawInfo { + length: Some(public_packet.len() as u64), + payload_length: None, + data: None, + }; + + let mut frames = Vec::new(); while d.remaining() > 0 { - match Frame::decode(&mut d) { - Ok(f) => stream.add_frame(frame_to_qlogframe(&f), false)?, - Err(_) => { - qinfo!("qlog: invalid frame"); - break; - } + if let Ok(f) = Frame::decode(&mut d) { + frames.push(QuicFrame::from(&f)); + } else { + qinfo!("qlog: invalid frame"); + break; } } - stream.finish_frames() - }) + let ev_data = EventData::PacketReceived(PacketReceived { + header, + frames: Some(frames), + is_coalesced: None, + retry_token: None, + stateless_reset_token: None, + supported_versions: None, + raw: Some(raw), + datagram_id: None, + trigger: None, + }); + + stream.add_event_data_now(ev_data) + }); } #[allow(dead_code)] @@ -241,198 +335,232 @@ pub enum QlogMetric { pub fn metrics_updated(qlog: &mut NeqoQlog, updated_metrics: &[QlogMetric]) { debug_assert!(!updated_metrics.is_empty()); - qlog.add_event(|| { - let mut min_rtt: Option = None; - let mut smoothed_rtt: Option = None; - let mut latest_rtt: Option = None; - let mut rtt_variance: Option = None; - let mut max_ack_delay: Option = None; - let mut pto_count: Option = None; + qlog.add_event_data(|| { + let mut min_rtt: Option = None; + let mut smoothed_rtt: Option = None; + let mut latest_rtt: Option = None; + let mut rtt_variance: Option = None; + let mut pto_count: Option = None; let mut congestion_window: Option = None; let mut bytes_in_flight: Option = None; let mut ssthresh: Option = None; let mut packets_in_flight: Option = None; - let mut in_recovery: Option = None; let mut pacing_rate: Option = None; for metric in updated_metrics { + #[allow(clippy::cast_precision_loss)] // Nought to do here. match metric { - QlogMetric::MinRtt(v) => min_rtt = Some(u64::try_from(v.as_millis()).unwrap()), - QlogMetric::SmoothedRtt(v) => { - smoothed_rtt = Some(u64::try_from(v.as_millis()).unwrap()) - } - QlogMetric::LatestRtt(v) => { - latest_rtt = Some(u64::try_from(v.as_millis()).unwrap()) - } - QlogMetric::RttVariance(v) => rtt_variance = Some(*v), - QlogMetric::MaxAckDelay(v) => max_ack_delay = Some(*v), - QlogMetric::PtoCount(v) => pto_count = Some(u64::try_from(*v).unwrap()), + QlogMetric::MinRtt(v) => min_rtt = Some(v.as_secs_f32() * 1000.0), + QlogMetric::SmoothedRtt(v) => smoothed_rtt = Some(v.as_secs_f32() * 1000.0), + QlogMetric::LatestRtt(v) => latest_rtt = Some(v.as_secs_f32() * 1000.0), + QlogMetric::RttVariance(v) => rtt_variance = Some(*v as f32), + QlogMetric::PtoCount(v) => pto_count = Some(u16::try_from(*v).unwrap()), QlogMetric::CongestionWindow(v) => { - congestion_window = Some(u64::try_from(*v).unwrap()) + congestion_window = Some(u64::try_from(*v).unwrap()); } QlogMetric::BytesInFlight(v) => bytes_in_flight = Some(u64::try_from(*v).unwrap()), QlogMetric::SsThresh(v) => ssthresh = Some(u64::try_from(*v).unwrap()), QlogMetric::PacketsInFlight(v) => packets_in_flight = Some(*v), - QlogMetric::InRecovery(v) => in_recovery = Some(*v), QlogMetric::PacingRate(v) => pacing_rate = Some(*v), + _ => (), } } - Some(Event::metrics_updated( + let ev_data = EventData::MetricsUpdated(MetricsUpdated { min_rtt, smoothed_rtt, latest_rtt, rtt_variance, - max_ack_delay, pto_count, congestion_window, bytes_in_flight, ssthresh, packets_in_flight, - in_recovery, pacing_rate, - )) - }) + }); + + Some(ev_data) + }); } // Helper functions -fn frame_to_qlogframe(frame: &Frame) -> QuicFrame { - match frame { - Frame::Padding => QuicFrame::padding(), - Frame::Ping => QuicFrame::ping(), - Frame::Ack { - largest_acknowledged, - ack_delay, - first_ack_range, - ack_ranges, - } => { - let ranges = - Frame::decode_ack_frame(*largest_acknowledged, *first_ack_range, ack_ranges).ok(); - - QuicFrame::ack( - Some(ack_delay.to_string()), - ranges.map(|all| { - all.into_iter() - .map(RangeInclusive::into_inner) - .collect::>() - }), - None, - None, - None, - ) - } - Frame::ResetStream { - stream_id, - application_error_code, - final_size, - } => QuicFrame::reset_stream( - stream_id.as_u64().to_string(), - *application_error_code, - final_size.to_string(), - ), - Frame::StopSending { - stream_id, - application_error_code, - } => QuicFrame::stop_sending(stream_id.as_u64().to_string(), *application_error_code), - Frame::Crypto { offset, data } => { - QuicFrame::crypto(offset.to_string(), data.len().to_string()) - } - Frame::NewToken { token } => QuicFrame::new_token(token.len().to_string(), hex(token)), - Frame::Stream { - fin, - stream_id, - offset, - data, - .. - } => QuicFrame::stream( - stream_id.as_u64().to_string(), - offset.to_string(), - data.len().to_string(), - *fin, - None, - ), - Frame::MaxData { maximum_data } => QuicFrame::max_data(maximum_data.to_string()), - Frame::MaxStreamData { - stream_id, - maximum_stream_data, - } => QuicFrame::max_stream_data( - stream_id.as_u64().to_string(), - maximum_stream_data.to_string(), - ), - Frame::MaxStreams { - stream_type, - maximum_streams, - } => QuicFrame::max_streams( - match stream_type { - NeqoStreamType::BiDi => qlog::StreamType::Bidirectional, - NeqoStreamType::UniDi => qlog::StreamType::Unidirectional, +#[allow(clippy::too_many_lines)] // Yeah, but it's a nice match. +#[allow(clippy::cast_possible_truncation, clippy::cast_precision_loss)] // No choice here. +impl From<&Frame<'_>> for QuicFrame { + fn from(frame: &Frame) -> Self { + match frame { + // TODO: Add payload length to `QuicFrame::Padding` once + // https://github.com/cloudflare/quiche/pull/1745 is available via the qlog crate. + Frame::Padding { .. } => QuicFrame::Padding, + Frame::Ping => QuicFrame::Ping, + Frame::Ack { + largest_acknowledged, + ack_delay, + first_ack_range, + ack_ranges, + } => { + let ranges = + Frame::decode_ack_frame(*largest_acknowledged, *first_ack_range, ack_ranges) + .ok(); + + let acked_ranges = ranges.map(|all| { + AckedRanges::Double( + all.into_iter() + .map(RangeInclusive::into_inner) + .collect::>(), + ) + }); + + QuicFrame::Ack { + ack_delay: Some(*ack_delay as f32 / 1000.0), + acked_ranges, + ect1: None, + ect0: None, + ce: None, + } + } + Frame::ResetStream { + stream_id, + application_error_code, + final_size, + } => QuicFrame::ResetStream { + stream_id: stream_id.as_u64(), + error_code: *application_error_code, + final_size: *final_size, }, - maximum_streams.to_string(), - ), - Frame::DataBlocked { data_limit } => QuicFrame::data_blocked(data_limit.to_string()), - Frame::StreamDataBlocked { - stream_id, - stream_data_limit, - } => QuicFrame::stream_data_blocked( - stream_id.as_u64().to_string(), - stream_data_limit.to_string(), - ), - Frame::StreamsBlocked { - stream_type, - stream_limit, - } => QuicFrame::streams_blocked( - match stream_type { - NeqoStreamType::BiDi => qlog::StreamType::Bidirectional, - NeqoStreamType::UniDi => qlog::StreamType::Unidirectional, + Frame::StopSending { + stream_id, + application_error_code, + } => QuicFrame::StopSending { + stream_id: stream_id.as_u64(), + error_code: *application_error_code, }, - stream_limit.to_string(), - ), - Frame::NewConnectionId { - sequence_number, - retire_prior, - connection_id, - stateless_reset_token, - } => QuicFrame::new_connection_id( - sequence_number.to_string(), - retire_prior.to_string(), - connection_id.len() as u64, - hex(connection_id), - hex(stateless_reset_token), - ), - Frame::RetireConnectionId { sequence_number } => { - QuicFrame::retire_connection_id(sequence_number.to_string()) - } - Frame::PathChallenge { data } => QuicFrame::path_challenge(Some(hex(data))), - Frame::PathResponse { data } => QuicFrame::path_response(Some(hex(data))), - Frame::ConnectionClose { - error_code, - frame_type, - reason_phrase, - } => QuicFrame::connection_close( - match error_code { - CloseError::Transport(_) => qlog::ErrorSpace::TransportError, - CloseError::Application(_) => qlog::ErrorSpace::ApplicationError, + Frame::Crypto { offset, data } => QuicFrame::Crypto { + offset: *offset, + length: data.len() as u64, + }, + Frame::NewToken { token } => QuicFrame::NewToken { + token: qlog::Token { + ty: Some(qlog::TokenType::Retry), + details: None, + raw: Some(RawInfo { + data: Some(hex(token)), + length: Some(token.len() as u64), + payload_length: None, + }), + }, + }, + Frame::Stream { + fin, + stream_id, + offset, + data, + .. + } => QuicFrame::Stream { + stream_id: stream_id.as_u64(), + offset: *offset, + length: data.len() as u64, + fin: Some(*fin), + raw: None, + }, + Frame::MaxData { maximum_data } => QuicFrame::MaxData { + maximum: *maximum_data, + }, + Frame::MaxStreamData { + stream_id, + maximum_stream_data, + } => QuicFrame::MaxStreamData { + stream_id: stream_id.as_u64(), + maximum: *maximum_stream_data, + }, + Frame::MaxStreams { + stream_type, + maximum_streams, + } => QuicFrame::MaxStreams { + stream_type: match stream_type { + NeqoStreamType::BiDi => StreamType::Bidirectional, + NeqoStreamType::UniDi => StreamType::Unidirectional, + }, + maximum: *maximum_streams, + }, + Frame::DataBlocked { data_limit } => QuicFrame::DataBlocked { limit: *data_limit }, + Frame::StreamDataBlocked { + stream_id, + stream_data_limit, + } => QuicFrame::StreamDataBlocked { + stream_id: stream_id.as_u64(), + limit: *stream_data_limit, + }, + Frame::StreamsBlocked { + stream_type, + stream_limit, + } => QuicFrame::StreamsBlocked { + stream_type: match stream_type { + NeqoStreamType::BiDi => StreamType::Bidirectional, + NeqoStreamType::UniDi => StreamType::Unidirectional, + }, + limit: *stream_limit, }, - error_code.code(), - 0, - String::from_utf8_lossy(reason_phrase).to_string(), - Some(frame_type.to_string()), - ), - Frame::HandshakeDone => QuicFrame::handshake_done(), - Frame::AckFrequency { .. } => QuicFrame::unknown(frame.get_type()), - Frame::Datagram { .. } => QuicFrame::unknown(frame.get_type()), + Frame::NewConnectionId { + sequence_number, + retire_prior, + connection_id, + stateless_reset_token, + } => QuicFrame::NewConnectionId { + sequence_number: *sequence_number as u32, + retire_prior_to: *retire_prior as u32, + connection_id_length: Some(connection_id.len() as u8), + connection_id: hex(connection_id), + stateless_reset_token: Some(hex(stateless_reset_token)), + }, + Frame::RetireConnectionId { sequence_number } => QuicFrame::RetireConnectionId { + sequence_number: *sequence_number as u32, + }, + Frame::PathChallenge { data } => QuicFrame::PathChallenge { + data: Some(hex(data)), + }, + Frame::PathResponse { data } => QuicFrame::PathResponse { + data: Some(hex(data)), + }, + Frame::ConnectionClose { + error_code, + frame_type, + reason_phrase, + } => QuicFrame::ConnectionClose { + error_space: match error_code { + CloseError::Transport(_) => Some(ErrorSpace::TransportError), + CloseError::Application(_) => Some(ErrorSpace::ApplicationError), + }, + error_code: Some(error_code.code()), + error_code_value: Some(0), + reason: Some(String::from_utf8_lossy(reason_phrase).to_string()), + trigger_frame_type: Some(*frame_type), + }, + Frame::HandshakeDone => QuicFrame::HandshakeDone, + Frame::AckFrequency { .. } => QuicFrame::Unknown { + frame_type_value: None, + raw_frame_type: frame.get_type(), + raw: None, + }, + Frame::Datagram { data, .. } => QuicFrame::Datagram { + length: data.len() as u64, + raw: None, + }, + } } } -fn to_qlog_pkt_type(ptype: PacketType) -> qlog::PacketType { - match ptype { - PacketType::Initial => qlog::PacketType::Initial, - PacketType::Handshake => qlog::PacketType::Handshake, - PacketType::ZeroRtt => qlog::PacketType::ZeroRtt, - PacketType::Short => qlog::PacketType::OneRtt, - PacketType::Retry => qlog::PacketType::Retry, - PacketType::VersionNegotiation => qlog::PacketType::VersionNegotiation, - PacketType::OtherVersion => qlog::PacketType::Unknown, +impl From for qlog::events::quic::PacketType { + fn from(value: PacketType) -> Self { + match value { + PacketType::Initial => qlog::events::quic::PacketType::Initial, + PacketType::Handshake => qlog::events::quic::PacketType::Handshake, + PacketType::ZeroRtt => qlog::events::quic::PacketType::ZeroRtt, + PacketType::Short => qlog::events::quic::PacketType::OneRtt, + PacketType::Retry => qlog::events::quic::PacketType::Retry, + PacketType::VersionNegotiation => qlog::events::quic::PacketType::VersionNegotiation, + PacketType::OtherVersion => qlog::events::quic::PacketType::Unknown, + } } } diff --git a/neqo-transport/src/quic_datagrams.rs b/neqo-transport/src/quic_datagrams.rs index e9c4497cde..d7c4769e31 100644 --- a/neqo-transport/src/quic_datagrams.rs +++ b/neqo-transport/src/quic_datagrams.rs @@ -6,14 +6,17 @@ // https://datatracker.ietf.org/doc/html/draft-ietf-quic-datagram -use crate::frame::{FRAME_TYPE_DATAGRAM, FRAME_TYPE_DATAGRAM_WITH_LEN}; -use crate::packet::PacketBuilder; -use crate::recovery::RecoveryToken; -use crate::{events::OutgoingDatagramOutcome, ConnectionEvents, Error, Res, Stats}; +use std::{cmp::min, collections::VecDeque}; + use neqo_common::Encoder; -use std::cmp::min; -use std::collections::VecDeque; -use std::convert::TryFrom; + +use crate::{ + events::OutgoingDatagramOutcome, + frame::{FRAME_TYPE_DATAGRAM, FRAME_TYPE_DATAGRAM_WITH_LEN}, + packet::PacketBuilder, + recovery::RecoveryToken, + ConnectionEvents, Error, Res, Stats, +}; pub const MAX_QUIC_DATAGRAM: u64 = 65535; @@ -100,7 +103,7 @@ impl QuicDatagrams { /// This function tries to write a datagram frame into a packet. /// If the frame does not fit into the packet, the datagram will - /// be dropped and a DatagramLost event will be posted. + /// be dropped and a `DatagramLost` event will be posted. pub fn write_frames( &mut self, builder: &mut PacketBuilder, @@ -140,7 +143,9 @@ impl QuicDatagrams { } /// Returns true if there was an unsent datagram that has been dismissed. + /// /// # Error + /// /// The function returns `TooMuchData` if the supply buffer is bigger than /// the allowed remote datagram size. The funcion does not check if the /// datagram can fit into a packet (i.e. MTU limit). This is checked during diff --git a/neqo-transport/src/recovery.rs b/neqo-transport/src/recovery.rs index 9256a0727c..dbea3aaf57 100644 --- a/neqo-transport/src/recovery.rs +++ b/neqo-transport/src/recovery.rs @@ -6,39 +6,39 @@ // Tracking of sent packets and detecting their loss. -#![deny(clippy::pedantic)] - -use std::cmp::{max, min}; -use std::collections::BTreeMap; -use std::convert::TryFrom; -use std::mem; -use std::ops::RangeInclusive; -use std::time::{Duration, Instant}; - -use smallvec::{smallvec, SmallVec}; +use std::{ + cmp::{max, min}, + collections::BTreeMap, + mem, + ops::RangeInclusive, + time::{Duration, Instant}, +}; use neqo_common::{qdebug, qinfo, qlog::NeqoQlog, qtrace, qwarn}; +use smallvec::{smallvec, SmallVec}; -use crate::ackrate::AckRate; -use crate::cid::ConnectionIdEntry; -use crate::crypto::CryptoRecoveryToken; -use crate::packet::PacketNumber; -use crate::path::{Path, PathRef}; -use crate::qlog::{self, QlogMetric}; -use crate::quic_datagrams::DatagramTracking; -use crate::rtt::RttEstimate; -use crate::send_stream::SendStreamRecoveryToken; -use crate::stats::{Stats, StatsCell}; -use crate::stream_id::{StreamId, StreamType}; -use crate::tracking::{AckToken, PacketNumberSpace, PacketNumberSpaceSet, SentPacket}; +use crate::{ + ackrate::AckRate, + cid::ConnectionIdEntry, + crypto::CryptoRecoveryToken, + packet::PacketNumber, + path::{Path, PathRef}, + qlog::{self, QlogMetric}, + quic_datagrams::DatagramTracking, + rtt::RttEstimate, + send_stream::SendStreamRecoveryToken, + stats::{Stats, StatsCell}, + stream_id::{StreamId, StreamType}, + tracking::{AckToken, PacketNumberSpace, PacketNumberSpaceSet, SentPacket}, +}; pub(crate) const PACKET_THRESHOLD: u64 = 3; /// `ACK_ONLY_SIZE_LIMIT` is the minimum size of the congestion window. /// If the congestion window is this small, we will only send ACK frames. pub(crate) const ACK_ONLY_SIZE_LIMIT: usize = 256; -/// The number of packets we send on a PTO. -/// And the number to declare lost when the PTO timer is hit. -pub const PTO_PACKET_COUNT: usize = 2; +/// The maximum number of packets we send on a PTO. +/// And the maximum number to declare lost when the PTO timer is hit. +pub const MAX_PTO_PACKET_COUNT: usize = 2; /// The preferred limit on the number of packets that are tracked. /// If we exceed this number, we start sending `PING` frames sooner to /// force the peer to acknowledge some of them. @@ -408,7 +408,7 @@ impl LossRecoverySpace { .sent_packets .iter_mut() // BTreeMap iterates in order of ascending PN - .take_while(|(&k, _)| Some(k) < largest_acked) + .take_while(|(&k, _)| k < largest_acked.unwrap_or(PacketNumber::MAX)) { // Packets sent before now - loss_delay are deemed lost. if packet.time_sent + loss_delay <= now { @@ -426,7 +426,9 @@ impl LossRecoverySpace { largest_acked ); } else { - self.first_ooo_time = Some(packet.time_sent); + if largest_acked.is_some() { + self.first_ooo_time = Some(packet.time_sent); + } // No more packets can be declared lost after this one. break; }; @@ -458,7 +460,9 @@ impl LossRecoverySpaces { /// Drop a packet number space and return all the packets that were /// outstanding, so that those can be marked as lost. + /// /// # Panics + /// /// If the space has already been removed. pub fn drop_space(&mut self, space: PacketNumberSpace) -> impl IntoIterator { let sp = match space { @@ -516,21 +520,34 @@ struct PtoState { } impl PtoState { - pub fn new(space: PacketNumberSpace, probe: PacketNumberSpaceSet) -> Self { + /// The number of packets we send on a PTO. + /// And the number to declare lost when the PTO timer is hit. + fn pto_packet_count(space: PacketNumberSpace, rx_count: usize) -> usize { + if space == PacketNumberSpace::Initial && rx_count == 0 { + // For the Initial space, we only send one packet on PTO if we have not received any + // packets from the peer yet. This avoids sending useless PING-only packets + // when the Client Initial is deemed lost. + 1 + } else { + MAX_PTO_PACKET_COUNT + } + } + + pub fn new(space: PacketNumberSpace, probe: PacketNumberSpaceSet, rx_count: usize) -> Self { debug_assert!(probe[space]); Self { space, count: 1, - packets: PTO_PACKET_COUNT, + packets: Self::pto_packet_count(space, rx_count), probe, } } - pub fn pto(&mut self, space: PacketNumberSpace, probe: PacketNumberSpaceSet) { + pub fn pto(&mut self, space: PacketNumberSpace, probe: PacketNumberSpaceSet, rx_count: usize) { debug_assert!(probe[space]); self.space = space; self.count += 1; - self.packets = PTO_PACKET_COUNT; + self.packets = Self::pto_packet_count(space, rx_count); self.probe = probe; } @@ -605,7 +622,7 @@ impl LossRecovery { .collect::>(); let mut path = primary_path.borrow_mut(); for p in &mut dropped { - path.discard_packet(p, now); + path.discard_packet(p, now, &mut self.stats.borrow_mut()); } dropped } @@ -745,7 +762,7 @@ impl LossRecovery { .collect::>(); let mut path = primary_path.borrow_mut(); for p in &mut dropped { - path.discard_packet(p, now); + path.discard_packet(p, now, &mut self.stats.borrow_mut()); } dropped } @@ -778,7 +795,7 @@ impl LossRecovery { qdebug!([self], "Reset loss recovery state for {}", space); let mut path = primary_path.borrow_mut(); for p in self.spaces.drop_space(space) { - path.discard_packet(&p, now); + path.discard_packet(&p, now, &mut self.stats.borrow_mut()); } // We just made progress, so discard PTO count. @@ -806,7 +823,7 @@ impl LossRecovery { (Some(loss_time), Some(pto_time)) => Some(min(loss_time, pto_time)), (Some(loss_time), None) => Some(loss_time), (None, Some(pto_time)) => Some(pto_time), - _ => None, + (None, None) => None, } } @@ -831,11 +848,7 @@ impl LossRecovery { // where F = fast_pto / FAST_PTO_SCALE (== 1 by default) let pto_count = pto_state.map_or(0, |p| u32::try_from(p.count).unwrap_or(0)); rtt.pto(pn_space) - .checked_mul( - u32::from(fast_pto) - .checked_shl(pto_count) - .unwrap_or(u32::MAX), - ) + .checked_mul(u32::from(fast_pto) << min(pto_count, u32::BITS - u8::BITS)) .map_or(Duration::from_secs(3600), |p| p / u32::from(FAST_PTO_SCALE)) } @@ -873,10 +886,11 @@ impl LossRecovery { } fn fire_pto(&mut self, pn_space: PacketNumberSpace, allow_probes: PacketNumberSpaceSet) { + let rx_count = self.stats.borrow().packets_rx; if let Some(st) = &mut self.pto_state { - st.pto(pn_space, allow_probes); + st.pto(pn_space, allow_probes, rx_count); } else { - self.pto_state = Some(PtoState::new(pn_space, allow_probes)); + self.pto_state = Some(PtoState::new(pn_space, allow_probes, rx_count)); } self.pto_state @@ -906,7 +920,14 @@ impl LossRecovery { if t <= now { qdebug!([self], "PTO timer fired for {}", pn_space); let space = self.spaces.get_mut(*pn_space).unwrap(); - lost.extend(space.pto_packets(PTO_PACKET_COUNT).cloned()); + lost.extend( + space + .pto_packets(PtoState::pto_packet_count( + *pn_space, + self.stats.borrow().packets_rx, + )) + .cloned(), + ); pto_space = pto_space.or(Some(*pn_space)); } @@ -994,22 +1015,27 @@ impl ::std::fmt::Display for LossRecovery { #[cfg(test)] mod tests { + use std::{ + cell::RefCell, + ops::{Deref, DerefMut, RangeInclusive}, + rc::Rc, + time::{Duration, Instant}, + }; + + use neqo_common::qlog::NeqoQlog; + use test_fixture::{now, DEFAULT_ADDR}; + use super::{ LossRecovery, LossRecoverySpace, PacketNumberSpace, SendProfile, SentPacket, FAST_PTO_SCALE, }; - use crate::cc::CongestionControlAlgorithm; - use crate::cid::{ConnectionId, ConnectionIdEntry}; - use crate::packet::PacketType; - use crate::path::{Path, PathRef}; - use crate::rtt::RttEstimate; - use crate::stats::{Stats, StatsCell}; - use neqo_common::qlog::NeqoQlog; - use std::cell::RefCell; - use std::convert::TryInto; - use std::ops::{Deref, DerefMut, RangeInclusive}; - use std::rc::Rc; - use std::time::{Duration, Instant}; - use test_fixture::{addr, now}; + use crate::{ + cc::CongestionControlAlgorithm, + cid::{ConnectionId, ConnectionIdEntry}, + packet::PacketType, + path::{Path, PathRef}, + rtt::RttEstimate, + stats::{Stats, StatsCell}, + }; // Shorthand for a time in milliseconds. const fn ms(t: u64) -> Duration { @@ -1075,7 +1101,14 @@ mod tests { impl Default for Fixture { fn default() -> Self { const CC: CongestionControlAlgorithm = CongestionControlAlgorithm::NewReno; - let mut path = Path::temporary(addr(), addr(), CC, NeqoQlog::default(), now()); + let mut path = Path::temporary( + DEFAULT_ADDR, + DEFAULT_ADDR, + CC, + true, + NeqoQlog::default(), + now(), + ); path.make_permanent( None, ConnectionIdEntry::new(0, ConnectionId::from(&[1, 2, 3]), [0; 16]), diff --git a/neqo-transport/src/recv_stream.rs b/neqo-transport/src/recv_stream.rs index fbd2fad7bb..5da80d6004 100644 --- a/neqo-transport/src/recv_stream.rs +++ b/neqo-transport/src/recv_stream.rs @@ -7,29 +7,33 @@ // Building a stream of ordered bytes to give the application from a series of // incoming STREAM frames. -use std::cmp::max; -use std::collections::BTreeMap; -use std::convert::TryFrom; -use std::mem; -use std::rc::{Rc, Weak}; +use std::{ + cell::RefCell, + cmp::max, + collections::BTreeMap, + mem, + rc::{Rc, Weak}, +}; +use neqo_common::{qtrace, Role}; use smallvec::SmallVec; -use crate::events::ConnectionEvents; -use crate::fc::ReceiverFlowControl; -use crate::frame::FRAME_TYPE_STOP_SENDING; -use crate::packet::PacketBuilder; -use crate::recovery::{RecoveryToken, StreamRecoveryToken}; -use crate::send_stream::SendStreams; -use crate::stats::FrameStats; -use crate::stream_id::StreamId; -use crate::{AppError, Error, Res}; -use neqo_common::{qtrace, Role}; -use std::cell::RefCell; +use crate::{ + events::ConnectionEvents, + fc::ReceiverFlowControl, + frame::FRAME_TYPE_STOP_SENDING, + packet::PacketBuilder, + recovery::{RecoveryToken, StreamRecoveryToken}, + send_stream::SendStreams, + stats::FrameStats, + stream_id::StreamId, + AppError, Error, Res, +}; const RX_STREAM_DATA_WINDOW: u64 = 0x10_0000; // 1MiB // Export as usize for consistency with SEND_BUFFER_SIZE +#[allow(clippy::cast_possible_truncation)] // Yeah, nope. pub const RECV_BUFFER_SIZE: usize = RX_STREAM_DATA_WINDOW as usize; #[derive(Debug, Default)] @@ -126,6 +130,7 @@ pub struct RxStreamOrderer { } impl RxStreamOrderer { + #[must_use] pub fn new() -> Self { Self::default() } @@ -133,6 +138,9 @@ impl RxStreamOrderer { /// Process an incoming stream frame off the wire. This may result in data /// being available to upper layers if frame is not out of order (ooo) or /// if the frame fills a gap. + /// # Panics + /// Only when `u64` values cannot be converted to `usize`, which only + /// happens on 32-bit machines that hold far too much data at the same time. pub fn inbound_frame(&mut self, mut new_start: u64, mut new_data: &[u8]) { qtrace!("Inbound data offset={} len={}", new_start, new_data.len()); @@ -196,26 +204,49 @@ impl RxStreamOrderer { false }; - // Now handle possible overlap with next entries - let mut to_remove = SmallVec::<[_; 8]>::new(); let mut to_add = new_data; - - for (&next_start, next_data) in self.data_ranges.range_mut(new_start..) { - let next_end = next_start + u64::try_from(next_data.len()).unwrap(); - let overlap = new_end.saturating_sub(next_start); - if overlap == 0 { - break; - } else if next_end >= new_end { - qtrace!( - "New frame {}-{} overlaps with next frame by {}, truncating", - new_start, - new_end, - overlap - ); - let truncate_to = new_data.len() - usize::try_from(overlap).unwrap(); - to_add = &new_data[..truncate_to]; - break; - } else { + if self + .data_ranges + .last_entry() + .map_or(false, |e| *e.key() >= new_start) + { + // Is this at the end (common case)? If so, nothing to do in this block + // Common case: + // PPPPPP -> PPPPPP + // NNNNNNN NNNNNNN + // or + // PPPPPP -> PPPPPP + // NNNNNNN NNNNNNN + // + // Not the common case, handle possible overlap with next entries + // PPPPPP AAA -> PPPPPP + // NNNNNNN NNNNNNN + // or + // PPPPPP AAAA -> PPPPPP AAAA + // NNNNNNN NNNNN + // or (this is where to_remove is used) + // PPPPPP AA -> PPPPPP + // NNNNNNN NNNNNNN + + let mut to_remove = SmallVec::<[_; 8]>::new(); + + for (&next_start, next_data) in self.data_ranges.range_mut(new_start..) { + let next_end = next_start + u64::try_from(next_data.len()).unwrap(); + let overlap = new_end.saturating_sub(next_start); + if overlap == 0 { + // Fills in the hole, exactly (probably common) + break; + } else if next_end >= new_end { + qtrace!( + "New frame {}-{} overlaps with next frame by {}, truncating", + new_start, + new_end, + overlap + ); + let truncate_to = new_data.len() - usize::try_from(overlap).unwrap(); + to_add = &new_data[..truncate_to]; + break; + } qtrace!( "New frame {}-{} spans entire next frame {}-{}, replacing", new_start, @@ -224,11 +255,12 @@ impl RxStreamOrderer { next_end ); to_remove.push(next_start); + // Continue, since we may have more overlaps } - } - for start in to_remove { - self.data_ranges.remove(&start); + for start in to_remove { + self.data_ranges.remove(&start); + } } if !to_add.is_empty() { @@ -247,6 +279,7 @@ impl RxStreamOrderer { } /// Are any bytes readable? + #[must_use] pub fn data_ready(&self) -> bool { self.data_ranges .keys() @@ -273,20 +306,24 @@ impl RxStreamOrderer { false } }) - .map(|(_, data_len)| data_len as usize) - .sum() + // Accumulate, but saturate at usize::MAX. + .fold(0, |acc: usize, (_, data_len)| { + acc.saturating_add(usize::try_from(data_len).unwrap_or(usize::MAX)) + }) } /// Bytes read by the application. - fn retired(&self) -> u64 { + #[must_use] + pub fn retired(&self) -> u64 { self.retired } - fn received(&self) -> u64 { + #[must_use] + pub fn received(&self) -> u64 { self.received } - /// Data bytes buffered. Could be more than bytes_readable if there are + /// Data bytes buffered. Could be more than `bytes_readable` if there are /// ranges missing. fn buffered(&self) -> u64 { self.data_ranges @@ -560,6 +597,7 @@ impl RecvStream { self.state = new_state; } + #[must_use] pub fn stats(&self) -> RecvStreamStats { match &self.state { RecvStreamState::Recv { recv_buf, .. } @@ -594,6 +632,11 @@ impl RecvStream { } } + /// # Errors + /// When the incoming data violates flow control limits. + /// # Panics + /// Only when `u64` values are so big that they can't fit in a `usize`, which + /// only happens on a 32-bit machine that has far too much unread data. pub fn inbound_stream_frame(&mut self, fin: bool, offset: u64, data: &[u8]) -> Res<()> { // We should post a DataReadable event only once when we change from no-data-ready to // data-ready. Therefore remember the state before processing a new frame. @@ -652,17 +695,19 @@ impl RecvStream { | RecvStreamState::AbortReading { .. } | RecvStreamState::WaitForReset { .. } | RecvStreamState::ResetRecvd { .. } => { - qtrace!("data received when we are in state {}", self.state.name()) + qtrace!("data received when we are in state {}", self.state.name()); } } if !already_data_ready && (self.data_ready() || self.needs_to_inform_app_about_fin()) { - self.conn_events.recv_stream_readable(self.stream_id) + self.conn_events.recv_stream_readable(self.stream_id); } Ok(()) } + /// # Errors + /// When the reset occurs at an invalid point. pub fn reset(&mut self, application_error_code: AppError, final_size: u64) -> Res<()> { self.state.flow_control_consume_data(final_size, true)?; match &mut self.state { @@ -745,6 +790,7 @@ impl RecvStream { } } + #[must_use] pub fn is_terminal(&self) -> bool { matches!( self.state, @@ -765,6 +811,7 @@ impl RecvStream { /// # Errors /// `NoMoreData` if data and fin bit were previously read by the application. + #[allow(clippy::missing_panics_doc)] // with a >16 exabyte packet on a 128-bit machine, maybe pub fn read(&mut self, buf: &mut [u8]) -> Res<(usize, bool)> { let data_recvd_state = matches!(self.state, RecvStreamState::DataRecvd { .. }); match &mut self.state { @@ -837,7 +884,7 @@ impl RecvStream { err, final_received: received, final_read: read, - }) + }); } RecvStreamState::DataRecvd { fc, @@ -938,6 +985,7 @@ impl RecvStream { } #[cfg(test)] + #[must_use] pub fn has_frames_to_write(&self) -> bool { if let RecvStreamState::Recv { fc, .. } = &self.state { fc.frame_needed() @@ -947,6 +995,7 @@ impl RecvStream { } #[cfg(test)] + #[must_use] pub fn fc(&self) -> Option<&ReceiverFlowControl> { match &self.state { RecvStreamState::Recv { fc, .. } @@ -961,9 +1010,18 @@ impl RecvStream { #[cfg(test)] mod tests { - use super::*; - use neqo_common::Encoder; - use std::ops::Range; + use std::{cell::RefCell, ops::Range, rc::Rc}; + + use neqo_common::{qtrace, Encoder}; + + use super::RecvStream; + use crate::{ + fc::ReceiverFlowControl, + packet::PacketBuilder, + recv_stream::{RxStreamOrderer, RX_STREAM_DATA_WINDOW}, + stats::FrameStats, + ConnectionEvents, Error, StreamId, RECV_BUFFER_SIZE, + }; const SESSION_WINDOW: usize = 1024; @@ -991,6 +1049,7 @@ mod tests { } #[test] + #[allow(unknown_lints, clippy::single_range_in_vec_init)] // Because that lint makes no sense here. fn recv_noncontiguous() { // Non-contiguous with the start, no data available. recv_ranges(&[10..20], 0); @@ -1109,7 +1168,7 @@ mod tests { s.inbound_frame(offset, &[0; EXTRA_SIZE]); // Read, providing only enough space for the first. - let mut buf = vec![0; 100]; + let mut buf = [0; 100]; let count = s.read(&mut buf[..CHUNK_SIZE]); assert_eq!(count, CHUNK_SIZE); let count = s.read(&mut buf[..]); @@ -1412,8 +1471,8 @@ mod tests { let mut buf = vec![0u8; RECV_BUFFER_SIZE + 100]; // Make it overlarge assert!(!s.has_frames_to_write()); - s.inbound_stream_frame(false, 0, &[0; RECV_BUFFER_SIZE]) - .unwrap(); + let big_buf = vec![0; RECV_BUFFER_SIZE]; + s.inbound_stream_frame(false, 0, &big_buf).unwrap(); assert!(!s.has_frames_to_write()); assert_eq!(s.read(&mut buf).unwrap(), (RECV_BUFFER_SIZE, false)); assert!(!s.data_ready()); @@ -1444,8 +1503,8 @@ mod tests { fn stream_max_stream_data() { let mut s = create_stream(1024 * RX_STREAM_DATA_WINDOW); assert!(!s.has_frames_to_write()); - s.inbound_stream_frame(false, 0, &[0; RECV_BUFFER_SIZE]) - .unwrap(); + let big_buf = vec![0; RECV_BUFFER_SIZE]; + s.inbound_stream_frame(false, 0, &big_buf).unwrap(); s.inbound_stream_frame(false, RX_STREAM_DATA_WINDOW, &[1; 1]) .unwrap_err(); } @@ -1488,9 +1547,10 @@ mod tests { #[test] fn no_stream_flowc_event_after_exiting_recv() { let mut s = create_stream(1024 * RX_STREAM_DATA_WINDOW); - s.inbound_stream_frame(false, 0, &[0; RECV_BUFFER_SIZE]) - .unwrap(); - let mut buf = [0; RECV_BUFFER_SIZE]; + let mut buf = vec![0; RECV_BUFFER_SIZE]; + // Write from buf at first. + s.inbound_stream_frame(false, 0, &buf).unwrap(); + // Then read into it. s.read(&mut buf).unwrap(); assert!(s.has_frames_to_write()); s.inbound_stream_frame(true, RX_STREAM_DATA_WINDOW, &[]) @@ -1602,7 +1662,7 @@ mod tests { assert_eq!(fc.retired(), retired); } - /// Test consuming the flow control in RecvStreamState::Recv + /// Test consuming the flow control in `RecvStreamState::Recv` #[test] fn fc_state_recv_1() { const SW: u64 = 1024; @@ -1619,7 +1679,7 @@ mod tests { check_fc(s.fc().unwrap(), SW / 4, 0); } - /// Test consuming the flow control in RecvStreamState::Recv + /// Test consuming the flow control in `RecvStreamState::Recv` /// with multiple streams #[test] fn fc_state_recv_2() { @@ -1646,7 +1706,7 @@ mod tests { check_fc(s2.fc().unwrap(), SW / 4, 0); } - /// Test retiring the flow control in RecvStreamState::Recv + /// Test retiring the flow control in `RecvStreamState::Recv` /// with multiple streams #[test] fn fc_state_recv_3() { @@ -1698,7 +1758,7 @@ mod tests { check_fc(s2.fc().unwrap(), SW / 4, SW / 4); } - /// Test consuming the flow control in RecvStreamState::Recv - duplicate data + /// Test consuming the flow control in `RecvStreamState::Recv` - duplicate data #[test] fn fc_state_recv_4() { const SW: u64 = 1024; @@ -1721,7 +1781,7 @@ mod tests { check_fc(s.fc().unwrap(), SW / 4, 0); } - /// Test consuming the flow control in RecvStreamState::Recv - filling a gap in the + /// Test consuming the flow control in `RecvStreamState::Recv` - filling a gap in the /// data stream. #[test] fn fc_state_recv_5() { @@ -1742,7 +1802,7 @@ mod tests { check_fc(s.fc().unwrap(), SW / 4, 0); } - /// Test consuming the flow control in RecvStreamState::Recv - receiving frame past + /// Test consuming the flow control in `RecvStreamState::Recv` - receiving frame past /// the flow control will cause an error. #[test] fn fc_state_recv_6() { @@ -1827,7 +1887,7 @@ mod tests { assert_eq!(stats.max_stream_data, 1); } - /// Test flow control in RecvStreamState::SizeKnown + /// Test flow control in `RecvStreamState::SizeKnown` #[test] fn fc_state_size_known() { const SW: u64 = 1024; @@ -1884,7 +1944,7 @@ mod tests { assert!(s.fc().is_none()); } - /// Test flow control in RecvStreamState::DataRecvd + /// Test flow control in `RecvStreamState::DataRecvd` #[test] fn fc_state_data_recv() { const SW: u64 = 1024; @@ -1929,7 +1989,7 @@ mod tests { assert!(s.fc().is_none()); } - /// Test flow control in RecvStreamState::DataRead + /// Test flow control in `RecvStreamState::DataRead` #[test] fn fc_state_data_read() { const SW: u64 = 1024; @@ -1967,7 +2027,7 @@ mod tests { assert!(s.fc().is_none()); } - /// Test flow control in RecvStreamState::AbortReading and final size is known + /// Test flow control in `RecvStreamState::AbortReading` and final size is known #[test] fn fc_state_abort_reading_1() { const SW: u64 = 1024; @@ -2009,7 +2069,7 @@ mod tests { check_fc(s.fc().unwrap(), SW / 2, SW / 2); } - /// Test flow control in RecvStreamState::AbortReading and final size is unknown + /// Test flow control in `RecvStreamState::AbortReading` and final size is unknown #[test] fn fc_state_abort_reading_2() { const SW: u64 = 1024; @@ -2067,7 +2127,7 @@ mod tests { check_fc(s.fc().unwrap(), SW / 2 + 20, SW / 2 + 20); } - /// Test flow control in RecvStreamState::WaitForReset + /// Test flow control in `RecvStreamState::WaitForReset` #[test] fn fc_state_wait_for_reset() { const SW: u64 = 1024; diff --git a/neqo-transport/src/rtt.rs b/neqo-transport/src/rtt.rs index 3d6d0e70f8..3b2969f689 100644 --- a/neqo-transport/src/rtt.rs +++ b/neqo-transport/src/rtt.rs @@ -6,19 +6,21 @@ // Tracking of sent packets and detecting their loss. -#![deny(clippy::pedantic)] - -use std::cmp::{max, min}; -use std::time::{Duration, Instant}; +use std::{ + cmp::{max, min}, + time::{Duration, Instant}, +}; use neqo_common::{qlog::NeqoQlog, qtrace}; -use crate::ackrate::{AckRate, PeerAckDelay}; -use crate::packet::PacketBuilder; -use crate::qlog::{self, QlogMetric}; -use crate::recovery::RecoveryToken; -use crate::stats::FrameStats; -use crate::tracking::PacketNumberSpace; +use crate::{ + ackrate::{AckRate, PeerAckDelay}, + packet::PacketBuilder, + qlog::{self, QlogMetric}, + recovery::RecoveryToken, + stats::FrameStats, + tracking::PacketNumberSpace, +}; /// The smallest time that the system timer (via `sleep()`, `nanosleep()`, /// `select()`, or similar) can reliably deliver; see `neqo_common::hrtime`. @@ -47,6 +49,18 @@ impl RttEstimate { self.rttvar = rtt / 2; } + #[cfg(test)] + pub const fn from_duration(rtt: Duration) -> Self { + Self { + first_sample_time: None, + latest_rtt: rtt, + smoothed_rtt: rtt, + rttvar: Duration::from_millis(0), + min_rtt: rtt, + ack_delay: PeerAckDelay::Fixed(Duration::from_millis(25)), + } + } + pub fn set_initial(&mut self, rtt: Duration) { qtrace!("initial RTT={:?}", rtt); if rtt >= GRANULARITY { diff --git a/neqo-transport/src/send_stream.rs b/neqo-transport/src/send_stream.rs index 4a2bf08002..8771ec7765 100644 --- a/neqo-transport/src/send_stream.rs +++ b/neqo-transport/src/send_stream.rs @@ -9,18 +9,16 @@ use std::{ cell::RefCell, cmp::{max, min, Ordering}, - collections::{BTreeMap, VecDeque}, - convert::TryFrom, + collections::{btree_map::Entry, BTreeMap, VecDeque}, + hash::{Hash, Hasher}, mem, ops::Add, rc::Rc, }; use indexmap::IndexMap; +use neqo_common::{qdebug, qerror, qtrace, Encoder, Role}; use smallvec::SmallVec; -use std::hash::{Hash, Hasher}; - -use neqo_common::{qdebug, qerror, qinfo, qtrace, Encoder, Role}; use crate::{ events::ConnectionEvents, @@ -112,7 +110,7 @@ impl Add for TransmissionPriority { /// If data is lost, this determines the priority that applies to retransmissions /// of that data. -#[derive(Debug, Clone, Copy, PartialEq, Eq)] +#[derive(Debug, Default, Clone, Copy, PartialEq, Eq)] pub enum RetransmissionPriority { /// Prioritize retransmission at a fixed priority. /// With this, it is possible to prioritize retransmissions lower than transmissions. @@ -124,19 +122,14 @@ pub enum RetransmissionPriority { Same, /// Increase the priority of retransmissions (the default). /// Retransmissions of `Critical` or `Important` aren't elevated at all. + #[default] Higher, /// Increase the priority of retransmissions a lot. /// This is useful for streams that are particularly exposed to head-of-line blocking. MuchHigher, } -impl Default for RetransmissionPriority { - fn default() -> Self { - Self::Higher - } -} - -#[derive(Debug, PartialEq, Clone, Copy)] +#[derive(Debug, PartialEq, Eq, Clone, Copy)] enum RangeState { Sent, Acked, @@ -145,174 +138,268 @@ enum RangeState { /// Track ranges in the stream as sent or acked. Acked implies sent. Not in a /// range implies needing-to-be-sent, either initially or as a retransmission. #[derive(Debug, Default, PartialEq)] -struct RangeTracker { - // offset, (len, RangeState). Use u64 for len because ranges can exceed 32bits. +pub struct RangeTracker { + /// The number of bytes that have been acknowledged starting from offset 0. + acked: u64, + /// A map that tracks the state of ranges. + /// Keys are the offset of the start of the range. + /// Values is a tuple of the range length and its state. used: BTreeMap, + /// This is a cache for the output of `first_unmarked_range`, which we check a lot. + first_unmarked: Option<(u64, Option)>, } impl RangeTracker { fn highest_offset(&self) -> u64 { self.used - .range(..) - .next_back() - .map_or(0, |(k, (v, _))| *k + *v) + .last_key_value() + .map_or(self.acked, |(&k, &(v, _))| k + v) } fn acked_from_zero(&self) -> u64 { - self.used - .get(&0) - .filter(|(_, state)| *state == RangeState::Acked) - .map_or(0, |(v, _)| *v) + self.acked } /// Find the first unmarked range. If all are contiguous, this will return - /// (highest_offset(), None). - fn first_unmarked_range(&self) -> (u64, Option) { - let mut prev_end = 0; + /// (`highest_offset()`, None). + fn first_unmarked_range(&mut self) -> (u64, Option) { + if let Some(first_unmarked) = self.first_unmarked { + return first_unmarked; + } + + let mut prev_end = self.acked; - for (cur_off, (cur_len, _)) in &self.used { - if prev_end == *cur_off { + for (&cur_off, &(cur_len, _)) in &self.used { + if prev_end == cur_off { prev_end = cur_off + cur_len; } else { - return (prev_end, Some(cur_off - prev_end)); + let res = (prev_end, Some(cur_off - prev_end)); + self.first_unmarked = Some(res); + return res; } } + self.first_unmarked = Some((prev_end, None)); (prev_end, None) } - /// Turn one range into a list of subranges that align with existing - /// ranges. - /// Check impermissible overlaps in subregions: Sent cannot overwrite Acked. - // - // e.g. given N is new and ABC are existing: - // NNNNNNNNNNNNNNNN - // AAAAA BBBCCCCC ...then we want 5 chunks: - // 1122222333444555 - // - // but also if we have this: - // NNNNNNNNNNNNNNNN - // AAAAAAAAAA BBBB ...then break existing A and B ranges up: - // - // 1111111122222233 - // aaAAAAAAAA BBbb - // - // Doing all this work up front should make handling each chunk much - // easier. - fn chunk_range_on_edges( - &mut self, - new_off: u64, - new_len: u64, - new_state: RangeState, - ) -> Vec<(u64, u64, RangeState)> { - let mut tmp_off = new_off; - let mut tmp_len = new_len; - let mut v = Vec::new(); - - // cut previous overlapping range if needed - let prev = self.used.range_mut(..tmp_off).next_back(); - if let Some((prev_off, (prev_len, prev_state))) = prev { - let prev_state = *prev_state; - let overlap = (*prev_off + *prev_len).saturating_sub(new_off); - *prev_len -= overlap; - if overlap > 0 { - self.used.insert(new_off, (overlap, prev_state)); + /// When the range of acknowledged bytes from zero increases, we need to drop any + /// ranges within that span AND maybe extend it to include any adjacent acknowledged ranges. + fn coalesce_acked(&mut self) { + while let Some(e) = self.used.first_entry() { + match self.acked.cmp(e.key()) { + Ordering::Greater => { + let (off, (len, state)) = e.remove_entry(); + let overflow = (off + len).saturating_sub(self.acked); + if overflow > 0 { + if state == RangeState::Acked { + self.acked += overflow; + } else { + self.used.insert(self.acked, (overflow, state)); + } + break; + } + } + Ordering::Equal => { + if e.get().1 == RangeState::Acked { + let (len, _) = e.remove(); + self.acked += len; + } + break; + } + Ordering::Less => break, } } + } - let mut last_existing_remaining = None; - for (off, (len, state)) in self.used.range(tmp_off..tmp_off + tmp_len) { - // Create chunk for "overhang" before an existing range - if tmp_off < *off { - let sub_len = off - tmp_off; - v.push((tmp_off, sub_len, new_state)); - tmp_off += sub_len; - tmp_len -= sub_len; - } + /// Mark a range as acknowledged. This is simpler than marking a range as sent + /// because an acknowledged range can never turn back into a sent range, so + /// this function can just override the entire range. + /// + /// The only tricky parts are making sure that we maintain `self.acked`, + /// which is the first acknowledged range. And making sure that we don't create + /// ranges of the same type that are adjacent; these need to be merged. + #[allow(clippy::missing_panics_doc)] // with a >16 exabyte packet on a 128-bit machine, maybe + pub fn mark_acked(&mut self, new_off: u64, new_len: usize) { + let end = new_off + u64::try_from(new_len).unwrap(); + let new_off = max(self.acked, new_off); + let mut new_len = end.saturating_sub(new_off); + if new_len == 0 { + return; + } - // Create chunk to match existing range - let sub_len = min(*len, tmp_len); - let remaining_len = len - sub_len; - if new_state == RangeState::Sent && *state == RangeState::Acked { - qinfo!( - "Attempted to downgrade overlapping range Acked range {}-{} with Sent {}-{}", - off, - len, - new_off, - new_len - ); - } else { - v.push((tmp_off, sub_len, new_state)); - } - tmp_off += sub_len; - tmp_len -= sub_len; + self.first_unmarked = None; + if new_off == self.acked { + self.acked += new_len; + self.coalesce_acked(); + return; + } + let mut new_end = new_off + new_len; - if remaining_len > 0 { - last_existing_remaining = Some((*off, sub_len, remaining_len, *state)); + // Get all existing ranges that start within this new range. + let mut covered = self + .used + .range(new_off..new_end) + .map(|(&k, _)| k) + .collect::>(); + + if let Entry::Occupied(next_entry) = self.used.entry(new_end) { + // Check if the very next entry is the same type as this. + if next_entry.get().1 == RangeState::Acked { + // If is is acked, drop it and extend this new range. + let (extra_len, _) = next_entry.remove(); + new_len += extra_len; + new_end += extra_len; + } + } else if let Some(last) = covered.pop() { + // Otherwise, the last of the existing ranges might overhang this one by some. + let (old_off, (old_len, old_state)) = self.used.remove_entry(&last).unwrap(); // can't fail + let remainder = (old_off + old_len).saturating_sub(new_end); + if remainder > 0 { + if old_state == RangeState::Acked { + // Just extend the current range. + new_len += remainder; + new_end += remainder; + } else { + self.used.insert(new_end, (remainder, RangeState::Sent)); + } } } - - // Maybe break last existing range in two so that a final chunk will - // have the same length as an existing range entry - if let Some((off, sub_len, remaining_len, state)) = last_existing_remaining { - *self.used.get_mut(&off).expect("must be there") = (sub_len, state); - self.used.insert(off + sub_len, (remaining_len, state)); + // All covered ranges can just be trashed. + for k in covered { + self.used.remove(&k); } - // Create final chunk if anything remains of the new range - if tmp_len > 0 { - v.push((tmp_off, tmp_len, new_state)) + // Now either merge with a preceding acked range + // or cut a preceding sent range as needed. + let prev = self.used.range_mut(..new_off).next_back(); + if let Some((prev_off, (prev_len, prev_state))) = prev { + let prev_end = *prev_off + *prev_len; + if prev_end >= new_off { + if *prev_state == RangeState::Sent { + *prev_len = new_off - *prev_off; + if prev_end > new_end { + // There is some extra sent range after the new acked range. + self.used + .insert(new_end, (prev_end - new_end, RangeState::Sent)); + } + } else { + *prev_len = max(prev_end, new_end) - *prev_off; + return; + } + } + } + self.used.insert(new_off, (new_len, RangeState::Acked)); + } + + /// Turn a single sent range into a list of subranges that align with existing + /// acknowledged ranges. + /// + /// This is more complicated than adding acked ranges because any acked ranges + /// need to be kept in place, with sent ranges filling the gaps. + /// + /// This means: + /// ```ignore + /// AAA S AAAS AAAAA + /// + SSSSSSSSSSSSS + /// = AAASSSAAASSAAAAA + /// ``` + /// + /// But we also have to ensure that: + /// ```ignore + /// SSSS + /// + SS + /// = SSSSSS + /// ``` + /// and + /// ```ignore + /// SSSSS + /// + SS + /// = SSSSSS + /// ``` + #[allow(clippy::missing_panics_doc)] // not possible + pub fn mark_sent(&mut self, mut new_off: u64, new_len: usize) { + let new_end = new_off + u64::try_from(new_len).unwrap(); + new_off = max(self.acked, new_off); + let mut new_len = new_end.saturating_sub(new_off); + if new_len == 0 { + return; } - v - } + self.first_unmarked = None; - /// Merge contiguous Acked ranges into the first entry (0). This range may - /// be dropped from the send buffer. - fn coalesce_acked_from_zero(&mut self) { - let acked_range_from_zero = self + // Get all existing ranges that start within this new range. + let covered = self .used - .get_mut(&0) - .filter(|(_, state)| *state == RangeState::Acked) - .map(|(len, _)| *len); - - if let Some(len_from_zero) = acked_range_from_zero { - let mut to_remove = SmallVec::<[_; 8]>::new(); - - let mut new_len_from_zero = len_from_zero; - - // See if there's another Acked range entry contiguous to this one - while let Some((next_len, _)) = self - .used - .get(&new_len_from_zero) - .filter(|(_, state)| *state == RangeState::Acked) - { - to_remove.push(new_len_from_zero); - new_len_from_zero += *next_len; + .range(new_off..(new_off + new_len)) + .map(|(&k, _)| k) + .collect::>(); + + if let Entry::Occupied(next_entry) = self.used.entry(new_end) { + if next_entry.get().1 == RangeState::Sent { + // Check if the very next entry is the same type as this, so it can be merged. + let (extra_len, _) = next_entry.remove(); + new_len += extra_len; } + } - if len_from_zero != new_len_from_zero { - self.used.get_mut(&0).expect("must be there").0 = new_len_from_zero; - } + // Merge with any preceding sent range that might overlap, + // or cut the head of this if the preceding range is acked. + let prev = self.used.range(..new_off).next_back(); + if let Some((&prev_off, &(prev_len, prev_state))) = prev { + if prev_off + prev_len >= new_off { + let overlap = prev_off + prev_len - new_off; + new_len = new_len.saturating_sub(overlap); + if new_len == 0 { + // The previous range completely covers this one (no more to do). + return; + } - for val in to_remove { - self.used.remove(&val); + if prev_state == RangeState::Acked { + // The previous range is acked, so it cuts this one. + new_off += overlap; + } else { + // Extend the current range backwards. + new_off = prev_off; + new_len += prev_len; + // The previous range will be updated below. + // It might need to be cut because of a covered acked range. + } } } - } - - fn mark_range(&mut self, off: u64, len: usize, state: RangeState) { - if len == 0 { - qinfo!("mark 0-length range at {}", off); - return; - } - let subranges = self.chunk_range_on_edges(off, len as u64, state); - - for (sub_off, sub_len, sub_state) in subranges { - self.used.insert(sub_off, (sub_len, sub_state)); + // Now interleave new sent chunks with any existing acked chunks. + for old_off in covered { + let Entry::Occupied(e) = self.used.entry(old_off) else { + unreachable!(); + }; + let &(old_len, old_state) = e.get(); + if old_state == RangeState::Acked { + // Now we have to insert a chunk ahead of this acked chunk. + let chunk_len = old_off - new_off; + if chunk_len > 0 { + self.used.insert(new_off, (chunk_len, RangeState::Sent)); + } + let included = chunk_len + old_len; + new_len = new_len.saturating_sub(included); + if new_len == 0 { + return; + } + new_off += included; + } else { + let overhang = (old_off + old_len).saturating_sub(new_off + new_len); + new_len += overhang; + if *e.key() != new_off { + // Retain a sent entry at `new_off`. + // This avoids the work of removing and re-creating an entry. + // The value will be overwritten when the next insert occurs, + // either when this loop hits an acked range (above) + // or for any remainder (below). + e.remove(); + } + } } - self.coalesce_acked_from_zero() + self.used.insert(new_off, (new_len, RangeState::Sent)); } fn unmark_range(&mut self, off: u64, len: usize) { @@ -321,6 +408,7 @@ impl RangeTracker { return; } + self.first_unmarked = None; let len = u64::try_from(len).unwrap(); let end_off = off + len; @@ -382,6 +470,9 @@ impl RangeTracker { } /// Unmark all sent ranges. + /// # Panics + /// On 32-bit machines where far too much is sent before calling this. + /// Note that this should not be called for handshakes, which should never exceed that limit. pub fn unmark_sent(&mut self) { self.unmark_range(0, usize::try_from(self.highest_offset()).unwrap()); } @@ -390,36 +481,37 @@ impl RangeTracker { /// Buffer to contain queued bytes and track their state. #[derive(Debug, Default, PartialEq)] pub struct TxBuffer { - retired: u64, // contig acked bytes, no longer in buffer send_buf: VecDeque, // buffer of not-acked bytes ranges: RangeTracker, // ranges in buffer that have been sent or acked } impl TxBuffer { + #[must_use] pub fn new() -> Self { Self::default() } - /// Attempt to add some or all of the passed-in buffer to the TxBuffer. + /// Attempt to add some or all of the passed-in buffer to the `TxBuffer`. pub fn send(&mut self, buf: &[u8]) -> usize { let can_buffer = min(SEND_BUFFER_SIZE - self.buffered(), buf.len()); if can_buffer > 0 { self.send_buf.extend(&buf[..can_buffer]); - assert!(self.send_buf.len() <= SEND_BUFFER_SIZE); + debug_assert!(self.send_buf.len() <= SEND_BUFFER_SIZE); } can_buffer } - pub fn next_bytes(&self) -> Option<(u64, &[u8])> { + #[allow(clippy::missing_panics_doc)] // These are not possible. + pub fn next_bytes(&mut self) -> Option<(u64, &[u8])> { let (start, maybe_len) = self.ranges.first_unmarked_range(); - if start == self.retired + u64::try_from(self.buffered()).unwrap() { + if start == self.retired() + u64::try_from(self.buffered()).unwrap() { return None; } // Convert from ranges-relative-to-zero to // ranges-relative-to-buffer-start - let buff_off = usize::try_from(start - self.retired).unwrap(); + let buff_off = usize::try_from(start - self.retired()).unwrap(); // Deque returns two slices. Create a subslice from whichever // one contains the first unmarked data. @@ -443,27 +535,26 @@ impl TxBuffer { } pub fn mark_as_sent(&mut self, offset: u64, len: usize) { - self.ranges.mark_range(offset, len, RangeState::Sent) + self.ranges.mark_sent(offset, len); } + #[allow(clippy::missing_panics_doc)] // Not possible here. pub fn mark_as_acked(&mut self, offset: u64, len: usize) { - self.ranges.mark_range(offset, len, RangeState::Acked); + let prev_retired = self.retired(); + self.ranges.mark_acked(offset, len); - // We can drop contig acked range from the buffer - let new_retirable = self.ranges.acked_from_zero() - self.retired; + // Any newly-retired bytes can be dropped from the buffer. + let new_retirable = self.retired() - prev_retired; debug_assert!(new_retirable <= self.buffered() as u64); - let keep_len = - self.buffered() - usize::try_from(new_retirable).expect("should fit in usize"); + let keep = self.buffered() - usize::try_from(new_retirable).unwrap(); // Truncate front - self.send_buf.rotate_left(self.buffered() - keep_len); - self.send_buf.truncate(keep_len); - - self.retired += new_retirable; + self.send_buf.rotate_left(self.buffered() - keep); + self.send_buf.truncate(keep); } pub fn mark_as_lost(&mut self, offset: u64, len: usize) { - self.ranges.unmark_range(offset, len) + self.ranges.unmark_range(offset, len); } /// Forget about anything that was marked as sent. @@ -471,8 +562,9 @@ impl TxBuffer { self.ranges.unmark_sent(); } + #[must_use] pub fn retired(&self) -> u64 { - self.retired + self.ranges.acked_from_zero() } fn buffered(&self) -> usize { @@ -484,7 +576,7 @@ impl TxBuffer { } fn used(&self) -> u64 { - self.retired + u64::try_from(self.buffered()).unwrap() + self.retired() + u64::try_from(self.buffered()).unwrap() } } @@ -622,7 +714,7 @@ pub struct SendStream { impl Hash for SendStream { fn hash(&self, state: &mut H) { - self.stream_id.hash(state) + self.stream_id.hash(state); } } @@ -699,6 +791,7 @@ impl SendStream { self.fair = make_fair; } + #[must_use] pub fn is_fair(&self) -> bool { self.fair } @@ -712,6 +805,7 @@ impl SendStream { self.retransmission_priority = retransmission; } + #[must_use] pub fn sendorder(&self) -> Option { self.sendorder } @@ -721,6 +815,7 @@ impl SendStream { } /// If all data has been buffered or written, how much was sent. + #[must_use] pub fn final_size(&self) -> Option { match &self.state { SendStreamState::DataSent { send_buf, .. } => Some(send_buf.used()), @@ -729,10 +824,13 @@ impl SendStream { } } + #[must_use] pub fn stats(&self) -> SendStreamStats { SendStreamStats::new(self.bytes_written(), self.bytes_sent, self.bytes_acked()) } + #[must_use] + #[allow(clippy::missing_panics_doc)] // not possible pub fn bytes_written(&self) -> u64 { match &self.state { SendStreamState::Send { send_buf, .. } | SendStreamState::DataSent { send_buf, .. } => { @@ -751,10 +849,11 @@ impl SendStream { final_written, .. } => *final_retired + *final_written, - _ => 0, + SendStreamState::Ready { .. } => 0, } } + #[must_use] pub fn bytes_acked(&self) -> u64 { match &self.state { SendStreamState::Send { send_buf, .. } | SendStreamState::DataSent { send_buf, .. } => { @@ -763,7 +862,7 @@ impl SendStream { SendStreamState::DataRecvd { retired, .. } => *retired, SendStreamState::ResetSent { final_retired, .. } | SendStreamState::ResetRecvd { final_retired, .. } => *final_retired, - _ => 0, + SendStreamState::Ready { .. } => 0, } } @@ -772,11 +871,13 @@ impl SendStream { /// offset. fn next_bytes(&mut self, retransmission_only: bool) -> Option<(u64, &[u8])> { match self.state { - SendStreamState::Send { ref send_buf, .. } => { - send_buf.next_bytes().and_then(|(offset, slice)| { + SendStreamState::Send { + ref mut send_buf, .. + } => { + let result = send_buf.next_bytes(); + if let Some((offset, slice)) = result { if retransmission_only { qtrace!( - [self], "next_bytes apply retransmission limit at {}", self.retransmission_offset ); @@ -792,13 +893,16 @@ impl SendStream { } else { Some((offset, slice)) } - }) + } else { + None + } } SendStreamState::DataSent { - ref send_buf, + ref mut send_buf, fin_sent, .. } => { + let used = send_buf.used(); // immutable first let bytes = send_buf.next_bytes(); if bytes.is_some() { bytes @@ -806,7 +910,7 @@ impl SendStream { None } else { // Send empty stream frame with fin set - Some((send_buf.used(), &[])) + Some((used, &[])) } } SendStreamState::Ready { .. } @@ -839,6 +943,7 @@ impl SendStream { } /// Maybe write a `STREAM` frame. + #[allow(clippy::missing_panics_doc)] // not possible pub fn write_stream_frame( &mut self, priority: TransmissionPriority, @@ -909,7 +1014,7 @@ impl SendStream { | SendStreamState::Send { .. } | SendStreamState::DataSent { .. } | SendStreamState::DataRecvd { .. } => { - qtrace!([self], "Reset acked while in {} state?", self.state.name()) + qtrace!([self], "Reset acked while in {} state?", self.state.name()); } SendStreamState::ResetSent { final_retired, @@ -1001,6 +1106,7 @@ impl SendStream { } } + #[allow(clippy::missing_panics_doc)] // not possible pub fn mark_as_sent(&mut self, offset: u64, len: usize, fin: bool) { self.bytes_sent = max(self.bytes_sent, offset + u64::try_from(len).unwrap()); @@ -1016,6 +1122,7 @@ impl SendStream { } } + #[allow(clippy::missing_panics_doc)] // not possible pub fn mark_as_acked(&mut self, offset: u64, len: usize, fin: bool) { match self.state { SendStreamState::Send { @@ -1023,7 +1130,7 @@ impl SendStream { } => { send_buf.mark_as_acked(offset, len); if self.avail() > 0 { - self.conn_events.send_stream_writable(self.stream_id) + self.conn_events.send_stream_writable(self.stream_id); } } SendStreamState::DataSent { @@ -1053,6 +1160,7 @@ impl SendStream { } } + #[allow(clippy::missing_panics_doc)] // not possible pub fn mark_as_lost(&mut self, offset: u64, len: usize, fin: bool) { self.retransmission_offset = max( self.retransmission_offset, @@ -1081,6 +1189,7 @@ impl SendStream { /// Bytes sendable on stream. Constrained by stream credit available, /// connection credit available, and space in the tx buffer. + #[must_use] pub fn avail(&self) -> usize { if let SendStreamState::Ready { fc, conn_fc } | SendStreamState::Send { fc, conn_fc, .. } = &self.state @@ -1101,11 +1210,12 @@ impl SendStream { let stream_was_blocked = fc.available() == 0; fc.update(limit); if stream_was_blocked && self.avail() > 0 { - self.conn_events.send_stream_writable(self.stream_id) + self.conn_events.send_stream_writable(self.stream_id); } } } + #[must_use] pub fn is_terminal(&self) -> bool { matches!( self.state, @@ -1113,10 +1223,14 @@ impl SendStream { ) } + /// # Errors + /// When `buf` is empty or when the stream is already closed. pub fn send(&mut self, buf: &[u8]) -> Res { self.send_internal(buf, false) } + /// # Errors + /// When `buf` is empty or when the stream is already closed. pub fn send_atomic(&mut self, buf: &[u8]) -> Res { self.send_internal(buf, true) } @@ -1161,9 +1275,9 @@ impl SendStream { if atomic { self.send_blocked_if_space_needed(buf.len()); return Ok(0); - } else { - &buf[..self.avail()] } + + &buf[..self.avail()] } else { buf }; @@ -1208,6 +1322,7 @@ impl SendStream { } } + #[allow(clippy::missing_panics_doc)] // not possible pub fn reset(&mut self, err: AppError) { match &self.state { SendStreamState::Ready { fc, .. } => { @@ -1285,7 +1400,8 @@ pub struct OrderGroupIter<'a> { // We store the next position in the OrderGroup. // Otherwise we'd need an explicit "done iterating" call to be made, or implement Drop to // copy the value back. - // This is where next was when we iterated for the first time; when we get back to that we stop. + // This is where next was when we iterated for the first time; when we get back to that we + // stop. started_at: Option, } @@ -1301,6 +1417,7 @@ impl OrderGroup { } } + #[must_use] pub fn stream_ids(&self) -> &Vec { &self.vec } @@ -1324,20 +1441,24 @@ impl OrderGroup { next } + /// # Panics + /// If the stream ID is already present. pub fn insert(&mut self, stream_id: StreamId) { - match self.vec.binary_search(&stream_id) { - Ok(_) => panic!("Duplicate stream_id {}", stream_id), // element already in vector @ `pos` - Err(pos) => self.vec.insert(pos, stream_id), - } + let Err(pos) = self.vec.binary_search(&stream_id) else { + // element already in vector @ `pos` + panic!("Duplicate stream_id {stream_id}"); + }; + self.vec.insert(pos, stream_id); } + /// # Panics + /// If the stream ID is not present. pub fn remove(&mut self, stream_id: StreamId) { - match self.vec.binary_search(&stream_id) { - Ok(pos) => { - self.vec.remove(pos); - } - Err(_) => panic!("Missing stream_id {}", stream_id), // element already in vector @ `pos` - } + let Ok(pos) = self.vec.binary_search(&stream_id) else { + // element already in vector @ `pos` + panic!("Missing stream_id {stream_id}"); + }; + self.vec.remove(pos); } } @@ -1484,7 +1605,7 @@ impl SendStreams { pub fn reset_acked(&mut self, id: StreamId) { if let Some(ss) = self.map.get_mut(&id) { - ss.reset_acked() + ss.reset_acked(); } } @@ -1526,7 +1647,7 @@ impl SendStreams { match stream.sendorder() { None => regular.remove(*stream_id), Some(sendorder) => { - sendordered.get_mut(&sendorder).unwrap().remove(*stream_id) + sendordered.get_mut(&sendorder).unwrap().remove(*stream_id); } }; } @@ -1578,16 +1699,16 @@ impl SendStreams { // Iterate the map, but only those without fairness, then iterate // OrderGroups, then iterate each group - qdebug!("processing streams... unfair:"); + qtrace!("processing streams... unfair:"); for stream in self.map.values_mut() { if !stream.is_fair() { - qdebug!(" {}", stream); + qtrace!(" {}", stream); if !stream.write_frames_with_early_return(priority, builder, tokens, stats) { break; } } } - qdebug!("fair streams:"); + qtrace!("fair streams:"); let stream_ids = self.regular.iter().chain( self.sendordered .values_mut() @@ -1595,23 +1716,20 @@ impl SendStreams { .flat_map(|group| group.iter()), ); for stream_id in stream_ids { - match self.map.get_mut(&stream_id).unwrap().sendorder() { - Some(order) => qdebug!(" {} ({})", stream_id, order), - None => qdebug!(" None"), + let stream = self.map.get_mut(&stream_id).unwrap(); + if let Some(order) = stream.sendorder() { + qtrace!(" {} ({})", stream_id, order); + } else { + qtrace!(" None"); } - if !self - .map - .get_mut(&stream_id) - .unwrap() - .write_frames_with_early_return(priority, builder, tokens, stats) - { + if !stream.write_frames_with_early_return(priority, builder, tokens, stats) { break; } } } pub fn update_initial_limit(&mut self, remote: &TransportParameters) { - for (id, ss) in self.map.iter_mut() { + for (id, ss) in &mut self.map { let limit = if id.is_bidi() { assert!(!id.is_remote_initiated(Role::Client)); remote.get_integer(tparams::INITIAL_MAX_STREAM_DATA_BIDI_REMOTE) @@ -1642,55 +1760,391 @@ pub struct SendStreamRecoveryToken { #[cfg(test)] mod tests { - use super::*; - - use crate::events::ConnectionEvent; - use neqo_common::{event::Provider, hex_with_len, qtrace}; + use std::{cell::RefCell, collections::VecDeque, rc::Rc}; + + use neqo_common::{event::Provider, hex_with_len, qtrace, Encoder}; + + use super::SendStreamRecoveryToken; + use crate::{ + connection::{RetransmissionPriority, TransmissionPriority}, + events::ConnectionEvent, + fc::SenderFlowControl, + packet::PacketBuilder, + recovery::{RecoveryToken, StreamRecoveryToken}, + send_stream::{ + RangeState, RangeTracker, SendStream, SendStreamState, SendStreams, TxBuffer, + }, + stats::FrameStats, + ConnectionEvents, StreamId, SEND_BUFFER_SIZE, + }; fn connection_fc(limit: u64) -> Rc>> { Rc::new(RefCell::new(SenderFlowControl::new((), limit))) } #[test] - fn test_mark_range() { + fn mark_acked_from_zero() { let mut rt = RangeTracker::default(); // ranges can go from nothing->Sent if queued for retrans and then // acks arrive - rt.mark_range(5, 5, RangeState::Acked); + rt.mark_acked(5, 5); assert_eq!(rt.highest_offset(), 10); assert_eq!(rt.acked_from_zero(), 0); - rt.mark_range(10, 4, RangeState::Acked); + rt.mark_acked(10, 4); assert_eq!(rt.highest_offset(), 14); assert_eq!(rt.acked_from_zero(), 0); - rt.mark_range(0, 5, RangeState::Sent); + rt.mark_sent(0, 5); assert_eq!(rt.highest_offset(), 14); assert_eq!(rt.acked_from_zero(), 0); - rt.mark_range(0, 5, RangeState::Acked); + rt.mark_acked(0, 5); assert_eq!(rt.highest_offset(), 14); assert_eq!(rt.acked_from_zero(), 14); - rt.mark_range(12, 20, RangeState::Acked); + rt.mark_acked(12, 20); assert_eq!(rt.highest_offset(), 32); assert_eq!(rt.acked_from_zero(), 32); // ack the lot - rt.mark_range(0, 400, RangeState::Acked); + rt.mark_acked(0, 400); assert_eq!(rt.highest_offset(), 400); assert_eq!(rt.acked_from_zero(), 400); // acked trumps sent - rt.mark_range(0, 200, RangeState::Sent); + rt.mark_sent(0, 200); assert_eq!(rt.highest_offset(), 400); assert_eq!(rt.acked_from_zero(), 400); } + /// Check that `marked_acked` correctly handles all paths. + /// ```ignore + /// SSS SSSAAASSS + /// + AAAAAAAAA + /// = SSSAAAAAAAAASS + /// ``` + #[test] + fn mark_acked_1() { + let mut rt = RangeTracker::default(); + rt.mark_sent(0, 3); + rt.mark_sent(6, 3); + rt.mark_acked(9, 3); + rt.mark_sent(12, 3); + + rt.mark_acked(3, 10); + + let mut canon = RangeTracker::default(); + canon.used.insert(0, (3, RangeState::Sent)); + canon.used.insert(3, (10, RangeState::Acked)); + canon.used.insert(13, (2, RangeState::Sent)); + assert_eq!(rt, canon); + } + + /// Check that `marked_acked` correctly handles all paths. + /// ```ignore + /// SSS SSS AAA + /// + AAAAAAAAA + /// = SSAAAAAAAAAAAA + /// ``` + #[test] + fn mark_acked_2() { + let mut rt = RangeTracker::default(); + rt.mark_sent(0, 3); + rt.mark_sent(6, 3); + rt.mark_acked(12, 3); + + rt.mark_acked(2, 10); + + let mut canon = RangeTracker::default(); + canon.used.insert(0, (2, RangeState::Sent)); + canon.used.insert(2, (13, RangeState::Acked)); + assert_eq!(rt, canon); + } + + /// Check that `marked_acked` correctly handles all paths. + /// ```ignore + /// AASSS AAAA + /// + AAAAAAAAA + /// = AAAAAAAAAAAA + /// ``` + #[test] + fn mark_acked_3() { + let mut rt = RangeTracker::default(); + rt.mark_acked(1, 2); + rt.mark_sent(3, 3); + rt.mark_acked(8, 4); + + rt.mark_acked(0, 9); + + let canon = RangeTracker { + acked: 12, + ..RangeTracker::default() + }; + assert_eq!(rt, canon); + } + + /// Check that `marked_acked` correctly handles all paths. + /// ```ignore + /// SSS + /// + AAAA + /// = AAAASS + /// ``` + #[test] + fn mark_acked_4() { + let mut rt = RangeTracker::default(); + rt.mark_sent(3, 3); + + rt.mark_acked(0, 4); + + let mut canon = RangeTracker { + acked: 4, + ..Default::default() + }; + canon.used.insert(4, (2, RangeState::Sent)); + assert_eq!(rt, canon); + } + + /// Check that `marked_acked` correctly handles all paths. + /// ```ignore + /// AAAAAASSS + /// + AAA + /// = AAAAAASSS + /// ``` + #[test] + fn mark_acked_5() { + let mut rt = RangeTracker::default(); + rt.mark_acked(0, 6); + rt.mark_sent(6, 3); + + rt.mark_acked(3, 3); + + let mut canon = RangeTracker { + acked: 6, + ..RangeTracker::default() + }; + canon.used.insert(6, (3, RangeState::Sent)); + assert_eq!(rt, canon); + } + + /// Check that `marked_acked` correctly handles all paths. + /// ```ignore + /// AAA AAA AAA + /// + AAAAAAA + /// = AAAAAAAAAAAAA + /// ``` + #[test] + fn mark_acked_6() { + let mut rt = RangeTracker::default(); + rt.mark_acked(3, 3); + rt.mark_acked(8, 3); + rt.mark_acked(13, 3); + + rt.mark_acked(6, 7); + + let mut canon = RangeTracker::default(); + canon.used.insert(3, (13, RangeState::Acked)); + assert_eq!(rt, canon); + } + + /// Check that `marked_acked` correctly handles all paths. + /// ```ignore + /// AAA AAA + /// + AAA + /// = AAAAAAAA + /// ``` + #[test] + fn mark_acked_7() { + let mut rt = RangeTracker::default(); + rt.mark_acked(3, 3); + rt.mark_acked(8, 3); + + rt.mark_acked(6, 3); + + let mut canon = RangeTracker::default(); + canon.used.insert(3, (8, RangeState::Acked)); + assert_eq!(rt, canon); + } + + /// Check that `marked_acked` correctly handles all paths. + /// ```ignore + /// SSSSSSSS + /// + AAAA + /// = SSAAAASS + /// ``` + #[test] + fn mark_acked_8() { + let mut rt = RangeTracker::default(); + rt.mark_sent(0, 8); + + rt.mark_acked(2, 4); + + let mut canon = RangeTracker::default(); + canon.used.insert(0, (2, RangeState::Sent)); + canon.used.insert(2, (4, RangeState::Acked)); + canon.used.insert(6, (2, RangeState::Sent)); + assert_eq!(rt, canon); + } + + /// Check that `marked_acked` correctly handles all paths. + /// ```ignore + /// SSS + /// + AAA + /// = AAA SSS + /// ``` + #[test] + fn mark_acked_9() { + let mut rt = RangeTracker::default(); + rt.mark_sent(5, 3); + + rt.mark_acked(0, 3); + + let mut canon = RangeTracker { + acked: 3, + ..Default::default() + }; + canon.used.insert(5, (3, RangeState::Sent)); + assert_eq!(rt, canon); + } + + /// Check that `marked_sent` correctly handles all paths. + /// ```ignore + /// AAA AAA SSS + /// + SSSSSSSSSSSS + /// = AAASSSAAASSSSSS + /// ``` + #[test] + fn mark_sent_1() { + let mut rt = RangeTracker::default(); + rt.mark_acked(0, 3); + rt.mark_acked(6, 3); + rt.mark_sent(12, 3); + + rt.mark_sent(0, 12); + + let mut canon = RangeTracker { + acked: 3, + ..RangeTracker::default() + }; + canon.used.insert(3, (3, RangeState::Sent)); + canon.used.insert(6, (3, RangeState::Acked)); + canon.used.insert(9, (6, RangeState::Sent)); + assert_eq!(rt, canon); + } + + /// Check that `marked_sent` correctly handles all paths. + /// ```ignore + /// AAASS AAA S SSSS + /// + SSSSSSSSSSSSS + /// = AAASSSAAASSSSSSS + /// ``` + #[test] + fn mark_sent_2() { + let mut rt = RangeTracker::default(); + rt.mark_acked(0, 3); + rt.mark_sent(3, 2); + rt.mark_acked(6, 3); + rt.mark_sent(10, 1); + rt.mark_sent(12, 4); + + rt.mark_sent(0, 13); + + let mut canon = RangeTracker { + acked: 3, + ..RangeTracker::default() + }; + canon.used.insert(3, (3, RangeState::Sent)); + canon.used.insert(6, (3, RangeState::Acked)); + canon.used.insert(9, (7, RangeState::Sent)); + assert_eq!(rt, canon); + } + + /// Check that `marked_sent` correctly handles all paths. + /// ```ignore + /// AAA AAA + /// + SSSS + /// = AAASSAAA + /// ``` + #[test] + fn mark_sent_3() { + let mut rt = RangeTracker::default(); + rt.mark_acked(0, 3); + rt.mark_acked(5, 3); + + rt.mark_sent(2, 4); + + let mut canon = RangeTracker { + acked: 3, + ..RangeTracker::default() + }; + canon.used.insert(3, (2, RangeState::Sent)); + canon.used.insert(5, (3, RangeState::Acked)); + assert_eq!(rt, canon); + } + + /// Check that `marked_sent` correctly handles all paths. + /// ```ignore + /// SSS AAA SS + /// + SSSSSSSS + /// = SSSSSAAASSSS + /// ``` + #[test] + fn mark_sent_4() { + let mut rt = RangeTracker::default(); + rt.mark_sent(0, 3); + rt.mark_acked(5, 3); + rt.mark_sent(10, 2); + + rt.mark_sent(2, 8); + + let mut canon = RangeTracker::default(); + canon.used.insert(0, (5, RangeState::Sent)); + canon.used.insert(5, (3, RangeState::Acked)); + canon.used.insert(8, (4, RangeState::Sent)); + assert_eq!(rt, canon); + } + + /// Check that `marked_sent` correctly handles all paths. + /// ```ignore + /// AAA + /// + SSSSSS + /// = AAASSS + /// ``` + #[test] + fn mark_sent_5() { + let mut rt = RangeTracker::default(); + rt.mark_acked(3, 3); + + rt.mark_sent(3, 6); + + let mut canon = RangeTracker::default(); + canon.used.insert(3, (3, RangeState::Acked)); + canon.used.insert(6, (3, RangeState::Sent)); + assert_eq!(rt, canon); + } + + /// Check that `marked_sent` correctly handles all paths. + /// ```ignore + /// SSSSS + /// + SSS + /// = SSSSS + /// ``` + #[test] + fn mark_sent_6() { + let mut rt = RangeTracker::default(); + rt.mark_sent(0, 5); + + rt.mark_sent(1, 3); + + let mut canon = RangeTracker::default(); + canon.used.insert(0, (5, RangeState::Sent)); + assert_eq!(rt, canon); + } + #[test] fn unmark_sent_start() { let mut rt = RangeTracker::default(); - rt.mark_range(0, 5, RangeState::Sent); + rt.mark_sent(0, 5); assert_eq!(rt.highest_offset(), 5); assert_eq!(rt.acked_from_zero(), 0); @@ -1704,13 +2158,13 @@ mod tests { fn unmark_sent_middle() { let mut rt = RangeTracker::default(); - rt.mark_range(0, 5, RangeState::Acked); + rt.mark_acked(0, 5); assert_eq!(rt.highest_offset(), 5); assert_eq!(rt.acked_from_zero(), 5); - rt.mark_range(5, 5, RangeState::Sent); + rt.mark_sent(5, 5); assert_eq!(rt.highest_offset(), 10); assert_eq!(rt.acked_from_zero(), 5); - rt.mark_range(10, 5, RangeState::Acked); + rt.mark_acked(10, 5); assert_eq!(rt.highest_offset(), 15); assert_eq!(rt.acked_from_zero(), 5); assert_eq!(rt.first_unmarked_range(), (15, None)); @@ -1725,10 +2179,10 @@ mod tests { fn unmark_sent_end() { let mut rt = RangeTracker::default(); - rt.mark_range(0, 5, RangeState::Acked); + rt.mark_acked(0, 5); assert_eq!(rt.highest_offset(), 5); assert_eq!(rt.acked_from_zero(), 5); - rt.mark_range(5, 5, RangeState::Sent); + rt.mark_sent(5, 5); assert_eq!(rt.highest_offset(), 10); assert_eq!(rt.acked_from_zero(), 5); assert_eq!(rt.first_unmarked_range(), (10, None)); @@ -1754,11 +2208,11 @@ mod tests { } #[test] - fn test_unmark_range() { + fn unmark_range() { let mut rt = RangeTracker::default(); - rt.mark_range(5, 5, RangeState::Acked); - rt.mark_range(10, 5, RangeState::Sent); + rt.mark_acked(5, 5); + rt.mark_sent(10, 5); // Should unmark sent but not acked range rt.unmark_range(7, 6); @@ -1774,11 +2228,11 @@ mod tests { (&13, &(2, RangeState::Sent)) ); assert!(rt.used.iter().nth(2).is_none()); - rt.mark_range(0, 5, RangeState::Sent); + rt.mark_sent(0, 5); let res = rt.first_unmarked_range(); assert_eq!(res, (10, Some(3))); - rt.mark_range(10, 3, RangeState::Sent); + rt.mark_sent(10, 3); let res = rt.first_unmarked_range(); assert_eq!(res, (15, None)); @@ -1792,14 +2246,15 @@ mod tests { assert_eq!(txb.avail(), SEND_BUFFER_SIZE); // Fill the buffer - assert_eq!(txb.send(&[1; SEND_BUFFER_SIZE * 2]), SEND_BUFFER_SIZE); + let big_buf = vec![1; SEND_BUFFER_SIZE * 2]; + assert_eq!(txb.send(&big_buf), SEND_BUFFER_SIZE); assert!(matches!(txb.next_bytes(), - Some((0, x)) if x.len()==SEND_BUFFER_SIZE + Some((0, x)) if x.len() == SEND_BUFFER_SIZE && x.iter().all(|ch| *ch == 1))); // Mark almost all as sent. Get what's left let one_byte_from_end = SEND_BUFFER_SIZE as u64 - 1; - txb.mark_as_sent(0, one_byte_from_end as usize); + txb.mark_as_sent(0, usize::try_from(one_byte_from_end).unwrap()); assert!(matches!(txb.next_bytes(), Some((start, x)) if x.len() == 1 && start == one_byte_from_end @@ -1807,7 +2262,7 @@ mod tests { // Mark all as sent. Get nothing txb.mark_as_sent(0, SEND_BUFFER_SIZE); - assert!(matches!(txb.next_bytes(), None)); + assert!(txb.next_bytes().is_none()); // Mark as lost. Get it again txb.mark_as_lost(one_byte_from_end, 1); @@ -1828,14 +2283,14 @@ mod tests { // Contig acked range at start means it can be removed from buffer // Impl of vecdeque should now result in a split buffer when more data // is sent - txb.mark_as_acked(0, five_bytes_from_end as usize); + txb.mark_as_acked(0, usize::try_from(five_bytes_from_end).unwrap()); assert_eq!(txb.send(&[2; 30]), 30); // Just get 5 even though there is more assert!(matches!(txb.next_bytes(), Some((start, x)) if x.len() == 5 && start == five_bytes_from_end && x.iter().all(|ch| *ch == 1))); - assert_eq!(txb.retired, five_bytes_from_end); + assert_eq!(txb.retired(), five_bytes_from_end); assert_eq!(txb.buffered(), 35); // Marking that bit as sent should let the last contig bit be returned @@ -1854,7 +2309,8 @@ mod tests { assert_eq!(txb.avail(), SEND_BUFFER_SIZE); // Fill the buffer - assert_eq!(txb.send(&[1; SEND_BUFFER_SIZE * 2]), SEND_BUFFER_SIZE); + let big_buf = vec![1; SEND_BUFFER_SIZE * 2]; + assert_eq!(txb.send(&big_buf), SEND_BUFFER_SIZE); assert!(matches!(txb.next_bytes(), Some((0, x)) if x.len()==SEND_BUFFER_SIZE && x.iter().all(|ch| *ch == 1))); @@ -1862,7 +2318,7 @@ mod tests { // As above let forty_bytes_from_end = SEND_BUFFER_SIZE as u64 - 40; - txb.mark_as_acked(0, forty_bytes_from_end as usize); + txb.mark_as_acked(0, usize::try_from(forty_bytes_from_end).unwrap()); assert!(matches!(txb.next_bytes(), Some((start, x)) if x.len() == 40 && start == forty_bytes_from_end @@ -1890,7 +2346,7 @@ mod tests { // Ack entire first slice and into second slice let ten_bytes_past_end = SEND_BUFFER_SIZE as u64 + 10; - txb.mark_as_acked(0, ten_bytes_past_end as usize); + txb.mark_as_acked(0, usize::try_from(ten_bytes_past_end).unwrap()); // Get up to marked range A assert!(matches!(txb.next_bytes(), @@ -1908,11 +2364,11 @@ mod tests { // No more bytes. txb.mark_as_sent(range_a_end, 60); - assert!(matches!(txb.next_bytes(), None)); + assert!(txb.next_bytes().is_none()); } #[test] - fn test_stream_tx() { + fn stream_tx() { let conn_fc = connection_fc(4096); let conn_events = ConnectionEvents::default(); @@ -1928,22 +2384,23 @@ mod tests { } // Should hit stream flow control limit before filling up send buffer - let res = s.send(&[4; SEND_BUFFER_SIZE]).unwrap(); + let big_buf = vec![4; SEND_BUFFER_SIZE + 100]; + let res = s.send(&big_buf[..SEND_BUFFER_SIZE]).unwrap(); assert_eq!(res, 1024 - 100); // should do nothing, max stream data already 1024 s.set_max_stream_data(1024); - let res = s.send(&[4; SEND_BUFFER_SIZE]).unwrap(); + let res = s.send(&big_buf[..SEND_BUFFER_SIZE]).unwrap(); assert_eq!(res, 0); // should now hit the conn flow control (4096) s.set_max_stream_data(1_048_576); - let res = s.send(&[4; SEND_BUFFER_SIZE]).unwrap(); + let res = s.send(&big_buf[..SEND_BUFFER_SIZE]).unwrap(); assert_eq!(res, 3072); // should now hit the tx buffer size conn_fc.borrow_mut().update(SEND_BUFFER_SIZE as u64); - let res = s.send(&[4; SEND_BUFFER_SIZE + 100]).unwrap(); + let res = s.send(&big_buf).unwrap(); assert_eq!(res, SEND_BUFFER_SIZE - 4096); // TODO(agrover@mozilla.com): test ooo acks somehow @@ -2014,10 +2471,8 @@ mod tests { // tx buffer size. assert_eq!(s.avail(), SEND_BUFFER_SIZE - 4); - assert_eq!( - s.send(&[b'a'; SEND_BUFFER_SIZE]).unwrap(), - SEND_BUFFER_SIZE - 4 - ); + let big_buf = vec![b'a'; SEND_BUFFER_SIZE]; + assert_eq!(s.send(&big_buf).unwrap(), SEND_BUFFER_SIZE - 4); // No event because still blocked by tx buffer full s.set_max_stream_data(2_000_000_000); @@ -2397,8 +2852,7 @@ mod tests { ); let mut send_buf = TxBuffer::new(); - send_buf.retired = u64::try_from(offset).unwrap(); - send_buf.ranges.mark_range(0, offset, RangeState::Acked); + send_buf.ranges.mark_acked(0, offset); let mut fc = SenderFlowControl::new(StreamId::from(stream), MAX_VARINT); fc.consume(offset); let conn_fc = Rc::new(RefCell::new(SenderFlowControl::new((), MAX_VARINT))); diff --git a/neqo-transport/src/sender.rs b/neqo-transport/src/sender.rs index 05cf9740bb..3a54851533 100644 --- a/neqo-transport/src/sender.rs +++ b/neqo-transport/src/sender.rs @@ -5,18 +5,22 @@ // except according to those terms. // Congestion control -#![deny(clippy::pedantic)] + #![allow(clippy::module_name_repetitions)] -use crate::cc::{ - ClassicCongestionControl, CongestionControl, CongestionControlAlgorithm, Cubic, NewReno, +use std::{ + fmt::{self, Debug, Display}, + time::{Duration, Instant}, }; -use crate::pace::Pacer; -use crate::tracking::SentPacket; + use neqo_common::qlog::NeqoQlog; -use std::fmt::{self, Debug, Display}; -use std::time::{Duration, Instant}; +use crate::{ + cc::{ClassicCongestionControl, CongestionControl, CongestionControlAlgorithm, Cubic, NewReno}, + pace::Pacer, + rtt::RttEstimate, + tracking::SentPacket, +}; /// The number of packets we allow to burst from the pacer. pub const PACING_BURST_SIZE: usize = 2; @@ -35,7 +39,12 @@ impl Display for PacketSender { impl PacketSender { #[must_use] - pub fn new(alg: CongestionControlAlgorithm, mtu: usize, now: Instant) -> Self { + pub fn new( + alg: CongestionControlAlgorithm, + pacing_enabled: bool, + mtu: usize, + now: Instant, + ) -> Self { Self { cc: match alg { CongestionControlAlgorithm::NewReno => { @@ -45,7 +54,7 @@ impl PacketSender { Box::new(ClassicCongestionControl::new(Cubic::default())) } }, - pacer: Pacer::new(now, mtu * PACING_BURST_SIZE, mtu), + pacer: Pacer::new(pacing_enabled, now, mtu * PACING_BURST_SIZE, mtu), } } @@ -63,8 +72,13 @@ impl PacketSender { self.cc.cwnd_avail() } - pub fn on_packets_acked(&mut self, acked_pkts: &[SentPacket], min_rtt: Duration, now: Instant) { - self.cc.on_packets_acked(acked_pkts, min_rtt, now); + pub fn on_packets_acked( + &mut self, + acked_pkts: &[SentPacket], + rtt_est: &RttEstimate, + now: Instant, + ) { + self.cc.on_packets_acked(acked_pkts, rtt_est, now); } /// Called when packets are lost. Returns true if the congestion window was reduced. diff --git a/neqo-transport/src/server.rs b/neqo-transport/src/server.rs index 6db5e3e8c7..96a6244ef1 100644 --- a/neqo-transport/src/server.rs +++ b/neqo-transport/src/server.rs @@ -6,6 +6,18 @@ // This file implements a server that can handle multiple connections. +use std::{ + cell::RefCell, + collections::{HashMap, HashSet, VecDeque}, + fs::OpenOptions, + mem, + net::SocketAddr, + ops::{Deref, DerefMut}, + path::PathBuf, + rc::{Rc, Weak}, + time::{Duration, Instant}, +}; + use neqo_common::{ self as common, event::Provider, hex, qdebug, qerror, qinfo, qlog::NeqoQlog, qtrace, qwarn, timer::Timer, Datagram, Decoder, Role, @@ -14,23 +26,16 @@ use neqo_crypto::{ encode_ech_config, AntiReplay, Cipher, PrivateKey, PublicKey, ZeroRttCheckResult, ZeroRttChecker, }; +use qlog::streamer::QlogStreamer; pub use crate::addr_valid::ValidateAddress; -use crate::addr_valid::{AddressValidation, AddressValidationResult}; -use crate::cid::{ConnectionId, ConnectionIdDecoder, ConnectionIdGenerator, ConnectionIdRef}; -use crate::connection::{Connection, Output, State}; -use crate::packet::{PacketBuilder, PacketType, PublicPacket}; -use crate::{ConnectionParameters, Res, Version}; - -use std::cell::RefCell; -use std::collections::{HashMap, HashSet, VecDeque}; -use std::fs::OpenOptions; -use std::mem; -use std::net::SocketAddr; -use std::ops::{Deref, DerefMut}; -use std::path::PathBuf; -use std::rc::{Rc, Weak}; -use std::time::{Duration, Instant}; +use crate::{ + addr_valid::{AddressValidation, AddressValidationResult}, + cid::{ConnectionId, ConnectionIdDecoder, ConnectionIdGenerator, ConnectionIdRef}, + connection::{Connection, Output, State}, + packet::{PacketBuilder, PacketType, PublicPacket}, + ConnectionParameters, Res, Version, +}; pub enum InitialResult { Accept, @@ -38,7 +43,7 @@ pub enum InitialResult { Retry(Vec), } -/// MIN_INITIAL_PACKET_SIZE is the smallest packet that can be used to establish +/// `MIN_INITIAL_PACKET_SIZE` is the smallest packet that can be used to establish /// a new connection across all QUIC versions this server supports. const MIN_INITIAL_PACKET_SIZE: usize = 1200; /// The size of timer buckets. This is higher than the actual timer granularity @@ -163,7 +168,7 @@ pub struct Server { /// the same key are routed to the connection that was first accepted. /// This is cleared out when the connection is closed or established. active_attempts: HashMap, - /// All connections, keyed by ConnectionId. + /// All connections, keyed by `ConnectionId`. connections: ConnectionTableRef, /// The connections that have new events. active: HashSet, @@ -185,11 +190,13 @@ impl Server { /// * `certs` is a list of the certificates that should be configured. /// * `protocols` is the preference list of ALPN values. /// * `anti_replay` is an anti-replay context. - /// * `zero_rtt_checker` determines whether 0-RTT should be accepted. This - /// will be passed the value of the `extra` argument that was passed to - /// `Connection::send_ticket` to see if it is OK. - /// * `cid_generator` is responsible for generating connection IDs and parsing them; - /// connection IDs produced by the manager cannot be zero-length. + /// * `zero_rtt_checker` determines whether 0-RTT should be accepted. This will be passed the + /// value of the `extra` argument that was passed to `Connection::send_ticket` to see if it is + /// OK. + /// * `cid_generator` is responsible for generating connection IDs and parsing them; connection + /// IDs produced by the manager cannot be zero-length. + /// # Errors + /// When address validation state cannot be created. pub fn new( now: Instant, certs: &[impl AsRef], @@ -235,6 +242,8 @@ impl Server { self.ciphers = Vec::from(ciphers.as_ref()); } + /// # Errors + /// When the configuration is invalid. pub fn enable_ech( &mut self, config: u8, @@ -246,6 +255,7 @@ impl Server { Ok(()) } + #[must_use] pub fn ech_config(&self) -> &[u8] { self.ech_config.as_ref().map_or(&[], |cfg| &cfg.encoded) } @@ -257,8 +267,8 @@ impl Server { fn process_connection( &mut self, - c: StateRef, - dgram: Option, + c: &StateRef, + dgram: Option<&Datagram>, now: Instant, ) -> Option { qtrace!([self], "Process connection {:?}", c); @@ -266,24 +276,24 @@ impl Server { match out { Output::Datagram(_) => { qtrace!([self], "Sending packet, added to waiting connections"); - self.waiting.push_back(Rc::clone(&c)); + self.waiting.push_back(Rc::clone(c)); } Output::Callback(delay) => { let next = now + delay; if next != c.borrow().last_timer { qtrace!([self], "Change timer to {:?}", next); - self.remove_timer(&c); + self.remove_timer(c); c.borrow_mut().last_timer = next; - self.timers.add(next, Rc::clone(&c)); + self.timers.add(next, Rc::clone(c)); } } - _ => { - self.remove_timer(&c); + Output::None => { + self.remove_timer(c); } } if c.borrow().has_events() { qtrace!([self], "Connection active: {:?}", c); - self.active.insert(ActiveConnectionRef { c: Rc::clone(&c) }); + self.active.insert(ActiveConnectionRef { c: Rc::clone(c) }); } if *c.borrow().state() > State::Handshaking { @@ -297,19 +307,19 @@ impl Server { c.borrow_mut().set_qlog(NeqoQlog::disabled()); self.connections .borrow_mut() - .retain(|_, v| !Rc::ptr_eq(v, &c)); + .retain(|_, v| !Rc::ptr_eq(v, c)); } out.dgram() } - fn connection(&self, cid: &ConnectionIdRef) -> Option { - self.connections.borrow().get(&cid[..]).map(Rc::clone) + fn connection(&self, cid: ConnectionIdRef) -> Option { + self.connections.borrow().get(&cid[..]).cloned() } fn handle_initial( &mut self, initial: InitialDetails, - dgram: Datagram, + dgram: &Datagram, now: Instant, ) -> Option { qdebug!([self], "Handle initial"); @@ -331,9 +341,7 @@ impl Server { dgram.source(), now, ); - let token = if let Ok(t) = res { - t - } else { + let Ok(token) = res else { qerror!([self], "unable to generate token, dropping packet"); return None; }; @@ -346,7 +354,13 @@ impl Server { &initial.dst_cid, ); if let Ok(p) = packet { - let retry = Datagram::new(dgram.destination(), dgram.source(), p); + let retry = Datagram::new( + dgram.destination(), + dgram.source(), + dgram.tos(), + dgram.ttl(), + p, + ); Some(retry) } else { qerror!([self], "unable to encode retry, dropping packet"); @@ -363,7 +377,7 @@ impl Server { fn connection_attempt( &mut self, initial: InitialDetails, - dgram: Datagram, + dgram: &Datagram, orig_dcid: Option, now: Instant, ) -> Option { @@ -378,17 +392,17 @@ impl Server { attempt_key ); let c = Rc::clone(c); - self.process_connection(c, Some(dgram), now) + self.process_connection(&c, Some(dgram), now) } else { self.accept_connection(attempt_key, initial, dgram, orig_dcid, now) } } - fn create_qlog_trace(&self, attempt_key: &AttemptKey) -> NeqoQlog { + fn create_qlog_trace(&self, odcid: ConnectionIdRef<'_>) -> NeqoQlog { if let Some(qlog_dir) = &self.qlog_dir { - let mut qlog_path = qlog_dir.to_path_buf(); + let mut qlog_path = qlog_dir.clone(); - qlog_path.push(format!("{}.qlog", attempt_key.odcid)); + qlog_path.push(format!("{odcid}.qlog")); // The original DCID is chosen by the client. Using create_new() // prevents attackers from overwriting existing logs. @@ -400,13 +414,14 @@ impl Server { Ok(f) => { qinfo!("Qlog output to {}", qlog_path.display()); - let streamer = ::qlog::QlogStreamer::new( + let streamer = QlogStreamer::new( qlog::QLOG_VERSION.to_string(), Some("Neqo server qlog".to_string()), Some("Neqo server qlog".to_string()), None, std::time::Instant::now(), common::qlog::new_trace(Role::Server), + qlog::events::EventImportance::Base, Box::new(f), ); let n_qlog = NeqoQlog::enabled(streamer, qlog_path); @@ -446,10 +461,10 @@ impl Server { } if let Some(odcid) = orig_dcid { // There was a retry, so set the connection IDs for. - c.set_retry_cids(odcid, initial.src_cid, initial.dst_cid); + c.set_retry_cids(&odcid, initial.src_cid, &initial.dst_cid); } - c.set_validation(Rc::clone(&self.address_validation)); - c.set_qlog(self.create_qlog_trace(attempt_key)); + c.set_validation(&self.address_validation); + c.set_qlog(self.create_qlog_trace(attempt_key.odcid.as_cid_ref())); if let Some(cfg) = &self.ech_config { if c.server_enable_ech(cfg.config, &cfg.public_name, &cfg.sk, &cfg.pk) .is_err() @@ -463,7 +478,7 @@ impl Server { &mut self, attempt_key: AttemptKey, initial: InitialDetails, - dgram: Datagram, + dgram: &Datagram, orig_dcid: Option, now: Instant, ) -> Option { @@ -487,20 +502,30 @@ impl Server { params, ); - if let Ok(mut c) = sconn { - self.setup_connection(&mut c, &attempt_key, initial, orig_dcid); - let c = Rc::new(RefCell::new(ServerConnectionState { - c, - last_timer: now, - active_attempt: Some(attempt_key.clone()), - })); - cid_mgr.borrow_mut().set_connection(Rc::clone(&c)); - let previous_attempt = self.active_attempts.insert(attempt_key, Rc::clone(&c)); - debug_assert!(previous_attempt.is_none()); - self.process_connection(c, Some(dgram), now) - } else { - qwarn!([self], "Unable to create connection"); - None + match sconn { + Ok(mut c) => { + self.setup_connection(&mut c, &attempt_key, initial, orig_dcid); + let c = Rc::new(RefCell::new(ServerConnectionState { + c, + last_timer: now, + active_attempt: Some(attempt_key.clone()), + })); + cid_mgr.borrow_mut().set_connection(&c); + let previous_attempt = self.active_attempts.insert(attempt_key, Rc::clone(&c)); + debug_assert!(previous_attempt.is_none()); + self.process_connection(&c, Some(dgram), now) + } + Err(e) => { + qwarn!([self], "Unable to create connection"); + if e == crate::Error::VersionNegotiation { + crate::qlog::server_version_information_failed( + &mut self.create_qlog_trace(attempt_key.odcid.as_cid_ref()), + self.conn_params.get_versions().all(), + initial.version.wire_version(), + ); + } + None + } } } @@ -509,7 +534,7 @@ impl Server { /// receives a connection ID from the server. fn handle_0rtt( &mut self, - dgram: Datagram, + dgram: &Datagram, dcid: ConnectionId, now: Instant, ) -> Option { @@ -524,30 +549,27 @@ impl Server { attempt_key ); let c = Rc::clone(c); - self.process_connection(c, Some(dgram), now) + self.process_connection(&c, Some(dgram), now) } else { qdebug!([self], "Dropping 0-RTT for unknown connection"); None } } - fn process_input(&mut self, dgram: Datagram, now: Instant) -> Option { + fn process_input(&mut self, dgram: &Datagram, now: Instant) -> Option { qtrace!("Process datagram: {}", hex(&dgram[..])); // This is only looking at the first packet header in the datagram. // All packets in the datagram are routed to the same connection. let res = PublicPacket::decode(&dgram[..], self.cid_generator.borrow().as_decoder()); - let (packet, _remainder) = match res { - Ok(res) => res, - _ => { - qtrace!([self], "Discarding {:?}", dgram); - return None; - } + let Ok((packet, _remainder)) = res else { + qtrace!([self], "Discarding {:?}", dgram); + return None; }; // Finding an existing connection. Should be the most common case. if let Some(c) = self.connection(packet.dcid()) { - return self.process_connection(c, Some(dgram), now); + return self.process_connection(&c, Some(dgram), now); } if packet.packet_type() == PacketType::Short { @@ -571,12 +593,25 @@ impl Server { qdebug!([self], "Unsupported version: {:x}", packet.wire_version()); let vn = PacketBuilder::version_negotiation( - packet.scid(), - packet.dcid(), + &packet.scid()[..], + &packet.dcid()[..], packet.wire_version(), self.conn_params.get_versions().all(), ); - return Some(Datagram::new(dgram.destination(), dgram.source(), vn)); + + crate::qlog::server_version_information_failed( + &mut self.create_qlog_trace(packet.dcid()), + self.conn_params.get_versions().all(), + packet.wire_version(), + ); + + return Some(Datagram::new( + dgram.destination(), + dgram.source(), + dgram.tos(), + dgram.ttl(), + vn, + )); } match packet.packet_type() { @@ -585,7 +620,8 @@ impl Server { qdebug!([self], "Drop initial: too short"); return None; } - // Copy values from `packet` because they are currently still borrowing from `dgram`. + // Copy values from `packet` because they are currently still borrowing from + // `dgram`. let initial = InitialDetails::new(&packet); self.handle_initial(initial, dgram, now) } @@ -606,13 +642,13 @@ impl Server { fn process_next_output(&mut self, now: Instant) -> Option { qtrace!([self], "No packet to send, look at waiting connections"); while let Some(c) = self.waiting.pop_front() { - if let Some(d) = self.process_connection(c, None, now) { + if let Some(d) = self.process_connection(&c, None, now) { return Some(d); } } qtrace!([self], "No packet to send still, run timers"); while let Some(c) = self.timers.take_next(now) { - if let Some(d) = self.process_connection(c, None, now) { + if let Some(d) = self.process_connection(&c, None, now) { return Some(d); } } @@ -627,29 +663,24 @@ impl Server { } } - pub fn process(&mut self, dgram: Option, now: Instant) -> Output { - let out = if let Some(d) = dgram { - self.process_input(d, now) - } else { - None - }; - let out = out.or_else(|| self.process_next_output(now)); - match out { - Some(d) => { + pub fn process(&mut self, dgram: Option<&Datagram>, now: Instant) -> Output { + dgram + .and_then(|d| self.process_input(d, now)) + .or_else(|| self.process_next_output(now)) + .map(|d| { qtrace!([self], "Send packet: {:?}", d); Output::Datagram(d) - } - _ => match self.next_time(now) { - Some(delay) => { + }) + .or_else(|| { + self.next_time(now).map(|delay| { qtrace!([self], "Wait: {:?}", delay); Output::Callback(delay) - } - _ => { - qtrace!([self], "Go dormant"); - Output::None - } - }, - } + }) + }) + .unwrap_or_else(|| { + qtrace!([self], "Go dormant"); + Output::None + }) } /// This lists the connections that have received new events @@ -658,7 +689,7 @@ impl Server { mem::take(&mut self.active).into_iter().collect() } - pub fn add_to_waiting(&mut self, c: ActiveConnectionRef) { + pub fn add_to_waiting(&mut self, c: &ActiveConnectionRef) { self.waiting.push_back(c.connection()); } } @@ -669,6 +700,7 @@ pub struct ActiveConnectionRef { } impl ActiveConnectionRef { + #[must_use] pub fn borrow(&self) -> impl Deref + '_ { std::cell::Ref::map(self.c.borrow(), |c| &c.c) } @@ -677,6 +709,7 @@ impl ActiveConnectionRef { std::cell::RefMut::map(self.c.borrow_mut(), |c| &mut c.c) } + #[must_use] pub fn connection(&self) -> StateRef { Rc::clone(&self.c) } @@ -685,7 +718,7 @@ impl ActiveConnectionRef { impl std::hash::Hash for ActiveConnectionRef { fn hash(&self, state: &mut H) { let ptr: *const _ = self.c.as_ref(); - ptr.hash(state) + ptr.hash(state); } } @@ -705,13 +738,13 @@ struct ServerConnectionIdGenerator { } impl ServerConnectionIdGenerator { - pub fn set_connection(&mut self, c: StateRef) { + pub fn set_connection(&mut self, c: &StateRef) { let saved = std::mem::replace(&mut self.saved_cids, Vec::with_capacity(0)); for cid in saved { qtrace!("ServerConnectionIdGenerator inserting saved cid {}", cid); - self.insert_cid(cid, Rc::clone(&c)); + self.insert_cid(cid, Rc::clone(c)); } - self.c = Rc::downgrade(&c); + self.c = Rc::downgrade(c); } fn insert_cid(&mut self, cid: ConnectionId, rc: StateRef) { diff --git a/neqo-transport/src/stats.rs b/neqo-transport/src/stats.rs index dd8f8d4db5..0a61097010 100644 --- a/neqo-transport/src/stats.rs +++ b/neqo-transport/src/stats.rs @@ -5,15 +5,18 @@ // except according to those terms. // Tracking of some useful statistics. -#![deny(clippy::pedantic)] + +use std::{ + cell::RefCell, + fmt::{self, Debug}, + ops::Deref, + rc::Rc, + time::Duration, +}; + +use neqo_common::qwarn; use crate::packet::PacketNumber; -use neqo_common::qinfo; -use std::cell::RefCell; -use std::fmt::{self, Debug}; -use std::ops::Deref; -use std::rc::Rc; -use std::time::Duration; pub(crate) const MAX_PTO_COUNTS: usize = 16; @@ -81,6 +84,7 @@ impl Debug for FrameStats { " blocked: stream {} data {} stream_data {}", self.streams_blocked, self.data_blocked, self.stream_data_blocked, )?; + writeln!(f, " datagram {}", self.datagram)?; writeln!( f, " ncid {} rcid {} pchallenge {} presponse {}", @@ -89,7 +93,7 @@ impl Debug for FrameStats { self.path_challenge, self.path_response, )?; - writeln!(f, " ack_frequency {} ", self.ack_frequency) + writeln!(f, " ack_frequency {}", self.ack_frequency) } } @@ -138,6 +142,8 @@ pub struct Stats { pub rtt: Duration, /// The current, estimated round-trip time variation on the primary path. pub rttvar: Duration, + /// Whether the first RTT sample was guessed from a discarded packet. + pub rtt_init_guess: bool, /// Count PTOs. Single PTOs, 2 PTOs in a row, 3 PTOs in row, etc. are counted /// separately. @@ -162,7 +168,7 @@ impl Stats { pub fn pkt_dropped(&mut self, reason: impl AsRef) { self.dropped_rx += 1; - qinfo!( + qwarn!( [self.info], "Dropped received packet: {}; Total: {}", reason.as_ref(), @@ -171,6 +177,7 @@ impl Stats { } /// # Panics + /// /// When preconditions are violated. pub fn add_pto_count(&mut self, count: usize) { debug_assert!(count > 0); @@ -199,7 +206,7 @@ impl Debug for Stats { " tx: {} lost {} lateack {} ptoack {}", self.packets_tx, self.lost, self.late_ack, self.pto_ack )?; - writeln!(f, " resumed: {} ", self.resumed)?; + writeln!(f, " resumed: {}", self.resumed)?; writeln!(f, " frames rx:")?; self.frame_rx.fmt(f)?; writeln!(f, " frames tx:")?; diff --git a/neqo-transport/src/stream_id.rs b/neqo-transport/src/stream_id.rs index 51df2ca9fb..8dbe2dcfbc 100644 --- a/neqo-transport/src/stream_id.rs +++ b/neqo-transport/src/stream_id.rs @@ -20,10 +20,12 @@ pub enum StreamType { pub struct StreamId(u64); impl StreamId { + #[must_use] pub const fn new(id: u64) -> Self { Self(id) } + #[must_use] pub fn init(stream_type: StreamType, role: Role) -> Self { let type_val = match stream_type { StreamType::BiDi => 0, @@ -32,18 +34,22 @@ impl StreamId { Self(type_val + Self::role_bit(role)) } + #[must_use] pub fn as_u64(self) -> u64 { self.0 } + #[must_use] pub fn is_bidi(self) -> bool { self.as_u64() & 0x02 == 0 } + #[must_use] pub fn is_uni(self) -> bool { !self.is_bidi() } + #[must_use] pub fn stream_type(self) -> StreamType { if self.is_bidi() { StreamType::BiDi @@ -52,14 +58,17 @@ impl StreamId { } } + #[must_use] pub fn is_client_initiated(self) -> bool { self.as_u64() & 0x01 == 0 } + #[must_use] pub fn is_server_initiated(self) -> bool { !self.is_client_initiated() } + #[must_use] pub fn role(self) -> Role { if self.is_client_initiated() { Role::Client @@ -68,6 +77,7 @@ impl StreamId { } } + #[must_use] pub fn is_self_initiated(self, my_role: Role) -> bool { match my_role { Role::Client if self.is_client_initiated() => true, @@ -76,14 +86,17 @@ impl StreamId { } } + #[must_use] pub fn is_remote_initiated(self, my_role: Role) -> bool { !self.is_self_initiated(my_role) } + #[must_use] pub fn is_send_only(self, my_role: Role) -> bool { self.is_uni() && self.is_self_initiated(my_role) } + #[must_use] pub fn is_recv_only(self, my_role: Role) -> bool { self.is_uni() && self.is_remote_initiated(my_role) } @@ -93,6 +106,7 @@ impl StreamId { } /// This returns a bit that is shared by all streams created by this role. + #[must_use] pub fn role_bit(role: Role) -> u64 { match role { Role::Server => 1, @@ -133,9 +147,10 @@ impl ::std::fmt::Display for StreamId { #[cfg(test)] mod test { - use super::StreamId; use neqo_common::Role; + use super::StreamId; + #[test] fn bidi_stream_properties() { let id1 = StreamId::from(16); diff --git a/neqo-transport/src/streams.rs b/neqo-transport/src/streams.rs index 735e602feb..d8662afa3b 100644 --- a/neqo-transport/src/streams.rs +++ b/neqo-transport/src/streams.rs @@ -5,6 +5,10 @@ // except according to those terms. // Stream management for a connection. +use std::{cell::RefCell, cmp::Ordering, rc::Rc}; + +use neqo_common::{qtrace, qwarn, Role}; + use crate::{ fc::{LocalStreamLimits, ReceiverFlowControl, RemoteStreamLimits, SenderFlowControl}, frame::Frame, @@ -17,9 +21,6 @@ use crate::{ tparams::{self, TransportParametersHandler}, ConnectionEvents, Error, Res, }; -use neqo_common::{qtrace, qwarn, Role}; -use std::cmp::Ordering; -use std::{cell::RefCell, rc::Rc}; pub type SendOrder = i64; @@ -94,6 +95,7 @@ impl Streams { } } + #[must_use] pub fn is_stream_id_allowed(&self, stream_id: StreamId) -> bool { self.remote_stream_limits[stream_id.stream_type()].is_allowed(stream_id) } @@ -117,7 +119,9 @@ impl Streams { self.local_stream_limits = LocalStreamLimits::new(self.role); } - pub fn input_frame(&mut self, frame: Frame, stats: &mut FrameStats) -> Res<()> { + /// # Errors + /// When the frame is invalid. + pub fn input_frame(&mut self, frame: &Frame, stats: &mut FrameStats) -> Res<()> { match frame { Frame::ResetStream { stream_id, @@ -125,8 +129,8 @@ impl Streams { final_size, } => { stats.reset_stream += 1; - if let (_, Some(rs)) = self.obtain_stream(stream_id)? { - rs.reset(application_error_code, final_size)?; + if let (_, Some(rs)) = self.obtain_stream(*stream_id)? { + rs.reset(*application_error_code, *final_size)?; } } Frame::StopSending { @@ -135,9 +139,9 @@ impl Streams { } => { stats.stop_sending += 1; self.events - .send_stream_stop_sending(stream_id, application_error_code); - if let (Some(ss), _) = self.obtain_stream(stream_id)? { - ss.reset(application_error_code); + .send_stream_stop_sending(*stream_id, *application_error_code); + if let (Some(ss), _) = self.obtain_stream(*stream_id)? { + ss.reset(*application_error_code); } } Frame::Stream { @@ -148,13 +152,13 @@ impl Streams { .. } => { stats.stream += 1; - if let (_, Some(rs)) = self.obtain_stream(stream_id)? { - rs.inbound_stream_frame(fin, offset, data)?; + if let (_, Some(rs)) = self.obtain_stream(*stream_id)? { + rs.inbound_stream_frame(*fin, *offset, data)?; } } Frame::MaxData { maximum_data } => { stats.max_data += 1; - self.handle_max_data(maximum_data); + self.handle_max_data(*maximum_data); } Frame::MaxStreamData { stream_id, @@ -162,12 +166,12 @@ impl Streams { } => { qtrace!( "Stream {} Received MaxStreamData {}", - stream_id, - maximum_stream_data + *stream_id, + *maximum_stream_data ); stats.max_stream_data += 1; - if let (Some(ss), _) = self.obtain_stream(stream_id)? { - ss.set_max_stream_data(maximum_stream_data); + if let (Some(ss), _) = self.obtain_stream(*stream_id)? { + ss.set_max_stream_data(*maximum_stream_data); } } Frame::MaxStreams { @@ -175,7 +179,7 @@ impl Streams { maximum_streams, } => { stats.max_streams += 1; - self.handle_max_streams(stream_type, maximum_streams); + self.handle_max_streams(*stream_type, *maximum_streams); } Frame::DataBlocked { data_limit } => { // Should never happen since we set data limit to max @@ -192,7 +196,7 @@ impl Streams { return Err(Error::StreamStateError); } - if let (_, Some(rs)) = self.obtain_stream(stream_id)? { + if let (_, Some(rs)) = self.obtain_stream(*stream_id)? { rs.send_flowc_update(); } } @@ -269,7 +273,7 @@ impl Streams { StreamRecoveryToken::Stream(st) => self.send.lost(st), StreamRecoveryToken::ResetStream { stream_id } => self.send.reset_lost(*stream_id), StreamRecoveryToken::StreamDataBlocked { stream_id, limit } => { - self.send.blocked_lost(*stream_id, *limit) + self.send.blocked_lost(*stream_id, *limit); } StreamRecoveryToken::MaxStreamData { stream_id, @@ -294,10 +298,10 @@ impl Streams { self.remote_stream_limits[*stream_type].frame_lost(*max_streams); } StreamRecoveryToken::DataBlocked(limit) => { - self.sender_fc.borrow_mut().frame_lost(*limit) + self.sender_fc.borrow_mut().frame_lost(*limit); } StreamRecoveryToken::MaxData(maximum_data) => { - self.receiver_fc.borrow_mut().frame_lost(*maximum_data) + self.receiver_fc.borrow_mut().frame_lost(*maximum_data); } } } @@ -400,6 +404,8 @@ impl Streams { /// Get or make a stream, and implicitly open additional streams as /// indicated by its stream id. + /// # Errors + /// When the stream cannot be created due to stream limits. pub fn obtain_stream( &mut self, stream_id: StreamId, @@ -411,14 +417,20 @@ impl Streams { )) } + /// # Errors + /// When the stream does not exist. pub fn set_sendorder(&mut self, stream_id: StreamId, sendorder: Option) -> Res<()> { self.send.set_sendorder(stream_id, sendorder) } + /// # Errors + /// When the stream does not exist. pub fn set_fairness(&mut self, stream_id: StreamId, fairness: bool) -> Res<()> { self.send.set_fairness(stream_id, fairness) } + /// # Errors + /// When a stream cannot be created, which might be temporary. pub fn stream_create(&mut self, st: StreamType) -> Res { match self.local_stream_limits.take_stream_id(st) { None => Err(Error::StreamLimitError), @@ -438,9 +450,10 @@ impl Streams { if st == StreamType::BiDi { // From the local perspective, this is a local- originated BiDi stream. From the - // remote perspective, this is a remote-originated BiDi stream. Therefore, look at - // the local transport parameters for the INITIAL_MAX_STREAM_DATA_BIDI_LOCAL value - // to decide how much this endpoint will allow its peer to send. + // remote perspective, this is a remote-originated BiDi stream. Therefore, look + // at the local transport parameters for the + // INITIAL_MAX_STREAM_DATA_BIDI_LOCAL value to decide how + // much this endpoint will allow its peer to send. let recv_initial_max_stream_data = self .tps .borrow() @@ -523,18 +536,26 @@ impl Streams { } } + /// # Errors + /// When the stream does not exist. pub fn get_send_stream_mut(&mut self, stream_id: StreamId) -> Res<&mut SendStream> { self.send.get_mut(stream_id) } + /// # Errors + /// When the stream does not exist. pub fn get_send_stream(&self, stream_id: StreamId) -> Res<&SendStream> { self.send.get(stream_id) } + /// # Errors + /// When the stream does not exist. pub fn get_recv_stream_mut(&mut self, stream_id: StreamId) -> Res<&mut RecvStream> { self.recv.get_mut(stream_id) } + /// # Errors + /// When the stream does not exist. pub fn keep_alive(&mut self, stream_id: StreamId, keep: bool) -> Res<()> { self.recv.keep_alive(stream_id, keep) } diff --git a/neqo-transport/src/tparams.rs b/neqo-transport/src/tparams.rs index e2150b0627..eada56cc4c 100644 --- a/neqo-transport/src/tparams.rs +++ b/neqo-transport/src/tparams.rs @@ -6,10 +6,11 @@ // Transport parameters. See -transport section 7.3. -use crate::{ - cid::{ConnectionId, ConnectionIdEntry, CONNECTION_ID_SEQNO_PREFERRED, MAX_CONNECTION_ID_LEN}, - version::{Version, VersionConfig, WireVersion}, - Error, Res, +use std::{ + cell::RefCell, + collections::HashMap, + net::{Ipv4Addr, Ipv6Addr, SocketAddrV4, SocketAddrV6}, + rc::Rc, }; use neqo_common::{hex, qdebug, qinfo, qtrace, Decoder, Encoder, Role}; @@ -19,12 +20,10 @@ use neqo_crypto::{ random, HandshakeMessage, ZeroRttCheckResult, ZeroRttChecker, }; -use std::{ - cell::RefCell, - collections::HashMap, - convert::TryFrom, - net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}, - rc::Rc, +use crate::{ + cid::{ConnectionId, ConnectionIdEntry, CONNECTION_ID_SEQNO_PREFERRED, MAX_CONNECTION_ID_LEN}, + version::{Version, VersionConfig, WireVersion}, + Error, Res, }; pub type TransportParameterId = u64; @@ -55,51 +54,67 @@ tpids! { ACTIVE_CONNECTION_ID_LIMIT = 0x0e, INITIAL_SOURCE_CONNECTION_ID = 0x0f, RETRY_SOURCE_CONNECTION_ID = 0x10, + VERSION_INFORMATION = 0x11, GREASE_QUIC_BIT = 0x2ab2, MIN_ACK_DELAY = 0xff02_de1a, MAX_DATAGRAM_FRAME_SIZE = 0x0020, - VERSION_NEGOTIATION = 0xff73db, } -#[derive(Clone, Debug, Copy)] +#[derive(Clone, Debug)] pub struct PreferredAddress { - v4: Option, - v6: Option, + v4: Option, + v6: Option, } impl PreferredAddress { /// Make a new preferred address configuration. /// /// # Panics + /// /// If neither address is provided, or if either address is of the wrong type. #[must_use] - pub fn new(v4: Option, v6: Option) -> Self { + pub fn new(v4: Option, v6: Option) -> Self { assert!(v4.is_some() || v6.is_some()); if let Some(a) = v4 { - if let IpAddr::V4(addr) = a.ip() { - assert!(!addr.is_unspecified()); - } else { - panic!("invalid address type for v4 address"); - } + assert!(!a.ip().is_unspecified()); assert_ne!(a.port(), 0); } if let Some(a) = v6 { - if let IpAddr::V6(addr) = a.ip() { - assert!(!addr.is_unspecified()); - } else { - panic!("invalid address type for v6 address"); - } + assert!(!a.ip().is_unspecified()); assert_ne!(a.port(), 0); } Self { v4, v6 } } + /// A generic version of `new()` for testing. + /// # Panics + /// When the addresses are the wrong type. + #[must_use] + #[cfg(test)] + pub fn new_any(v4: Option, v6: Option) -> Self { + use std::net::SocketAddr; + + let v4 = v4.map(|v4| { + let SocketAddr::V4(v4) = v4 else { + panic!("not v4"); + }; + v4 + }); + let v6 = v6.map(|v6| { + let SocketAddr::V6(v6) = v6 else { + panic!("not v6"); + }; + v6 + }); + Self::new(v4, v6) + } + #[must_use] - pub fn ipv4(&self) -> Option { + pub fn ipv4(&self) -> Option { self.v4 } #[must_use] - pub fn ipv6(&self) -> Option { + pub fn ipv6(&self) -> Option { self.v6 } } @@ -110,8 +125,8 @@ pub enum TransportParameter { Integer(u64), Empty, PreferredAddress { - v4: Option, - v6: Option, + v4: Option, + v6: Option, cid: ConnectionId, srt: [u8; 16], }, @@ -140,23 +155,13 @@ impl TransportParameter { Self::PreferredAddress { v4, v6, cid, srt } => { enc.encode_vvec_with(|enc_inner| { if let Some(v4) = v4 { - debug_assert!(v4.is_ipv4()); - if let IpAddr::V4(a) = v4.ip() { - enc_inner.encode(&a.octets()[..]); - } else { - unreachable!(); - } + enc_inner.encode(&v4.ip().octets()[..]); enc_inner.encode_uint(2, v4.port()); } else { enc_inner.encode(&[0; 6]); } if let Some(v6) = v6 { - debug_assert!(v6.is_ipv6()); - if let IpAddr::V6(a) = v6.ip() { - enc_inner.encode(&a.octets()[..]); - } else { - unreachable!(); - } + enc_inner.encode(&v6.ip().octets()[..]); enc_inner.encode_uint(2, v6.port()); } else { enc_inner.encode(&[0; 18]); @@ -188,7 +193,7 @@ impl TransportParameter { let v4 = if v4port == 0 { None } else { - Some(SocketAddr::new(IpAddr::V4(v4ip), v4port)) + Some(SocketAddrV4::new(v4ip, v4port)) }; // IPv6 address (mostly the same as v4) @@ -201,7 +206,7 @@ impl TransportParameter { let v6 = if v6port == 0 { None } else { - Some(SocketAddr::new(IpAddr::V6(v6ip), v6port)) + Some(SocketAddrV6::new(v6ip, v6port, 0, 0)) }; // Need either v4 or v6 to be present. if v4.is_none() && v6.is_none() { @@ -227,7 +232,7 @@ impl TransportParameter { if v == 0 { Err(Error::TransportParameterError) } else { - Ok(v as WireVersion) + Ok(WireVersion::try_from(v)?) } } @@ -295,7 +300,7 @@ impl TransportParameter { _ => return Err(Error::TransportParameterError), }, - VERSION_NEGOTIATION => Self::decode_versions(&mut d)?, + VERSION_INFORMATION => Self::decode_versions(&mut d)?, // Skip. _ => return Ok(None), @@ -349,6 +354,9 @@ impl TransportParameters { } // Get an integer type or a default. + /// # Panics + /// When the transport parameter isn't recognized as being an integer. + #[must_use] pub fn get_integer(&self, tp: TransportParameterId) -> u64 { let default = match tp { IDLE_TIMEOUT @@ -374,6 +382,8 @@ impl TransportParameters { } // Set an integer type or a default. + /// # Panics + /// When the transport parameter isn't recognized as being an integer. pub fn set_integer(&mut self, tp: TransportParameterId, value: u64) { match tp { IDLE_TIMEOUT @@ -395,6 +405,9 @@ impl TransportParameters { } } + /// # Panics + /// When the transport parameter isn't recognized as containing bytes. + #[must_use] pub fn get_bytes(&self, tp: TransportParameterId) -> Option<&[u8]> { match tp { ORIGINAL_DESTINATION_CONNECTION_ID @@ -411,6 +424,8 @@ impl TransportParameters { } } + /// # Panics + /// When the transport parameter isn't recognized as containing bytes. pub fn set_bytes(&mut self, tp: TransportParameterId, value: Vec) { match tp { ORIGINAL_DESTINATION_CONNECTION_ID @@ -423,6 +438,8 @@ impl TransportParameters { } } + /// # Panics + /// When the transport parameter isn't recognized as being empty. pub fn set_empty(&mut self, tp: TransportParameterId) { match tp { DISABLE_MIGRATION | GREASE_QUIC_BIT => { @@ -433,11 +450,14 @@ impl TransportParameters { } /// Set version information. + /// # Panics + /// Never. But rust doesn't know that. pub fn set_versions(&mut self, role: Role, versions: &VersionConfig) { - let rbuf = random(4); + let rbuf = random::<4>(); let mut other = Vec::with_capacity(versions.all().len() + 1); let mut dec = Decoder::new(&rbuf); - let grease = (dec.decode_uint(4).unwrap() as u32) & 0xf0f0_f0f0 | 0x0a0a0a0a; + let grease = + (u32::try_from(dec.decode_uint(4).unwrap()).unwrap()) & 0xf0f0_f0f0 | 0x0a0a_0a0a; other.push(grease); for &v in versions.all() { if role == Role::Client && !versions.initial().is_compatible(v) { @@ -447,7 +467,7 @@ impl TransportParameters { } let current = versions.initial().wire_version(); self.set( - VERSION_NEGOTIATION, + VERSION_INFORMATION, TransportParameter::Versions { current, other }, ); } @@ -455,7 +475,7 @@ impl TransportParameters { fn compatible_upgrade(&mut self, v: Version) { if let Some(TransportParameter::Versions { ref mut current, .. - }) = self.params.get_mut(&VERSION_NEGOTIATION) + }) = self.params.get_mut(&VERSION_INFORMATION) { *current = v.wire_version(); } else { @@ -463,6 +483,10 @@ impl TransportParameters { } } + /// # Panics + /// When the indicated transport parameter is present but NOT empty. + /// This should not happen if the parsing code in `TransportParameter::decode` is correct. + #[must_use] pub fn get_empty(&self, tipe: TransportParameterId) -> bool { match self.params.get(&tipe) { None => false, @@ -540,7 +564,7 @@ impl TransportParameters { #[must_use] pub fn get_versions(&self) -> Option<(WireVersion, &[WireVersion])> { if let Some(TransportParameter::Versions { current, other }) = - self.params.get(&VERSION_NEGOTIATION) + self.params.get(&VERSION_INFORMATION) { Some((*current, other)) } else { @@ -564,6 +588,7 @@ pub struct TransportParametersHandler { } impl TransportParametersHandler { + #[must_use] pub fn new(role: Role, versions: VersionConfig) -> Self { let mut local = TransportParameters::default(); local.set_versions(role, &versions); @@ -581,9 +606,13 @@ impl TransportParametersHandler { pub fn set_version(&mut self, version: Version) { debug_assert_eq!(self.role, Role::Client); self.versions.set_initial(version); - self.local.set_versions(self.role, &self.versions) + self.local.set_versions(self.role, &self.versions); } + /// # Panics + /// When this function is called before the peer has provided transport parameters. + /// Do not call this function if you are not also able to send data. + #[must_use] pub fn remote(&self) -> &TransportParameters { match (self.remote.as_ref(), self.remote_0rtt.as_ref()) { (Some(tp), _) | (_, Some(tp)) => tp, @@ -592,6 +621,7 @@ impl TransportParametersHandler { } /// Get the version as set (or as determined by a compatible upgrade). + #[must_use] pub fn version(&self) -> Version { self.versions.initial() } @@ -723,16 +753,12 @@ where return ZeroRttCheckResult::Reject; } let mut dec = Decoder::from(token); - let tpslice = if let Some(v) = dec.decode_vvec() { - v - } else { + let Some(tpslice) = dec.decode_vvec() else { qinfo!("0-RTT: token code error"); return ZeroRttCheckResult::Fail; }; let mut dec_tp = Decoder::from(tpslice); - let remembered = if let Ok(v) = TransportParameters::decode(&mut dec_tp) { - v - } else { + let Ok(remembered) = TransportParameters::decode(&mut dec_tp) else { qinfo!("0-RTT: transport parameter decode error"); return ZeroRttCheckResult::Fail; }; @@ -749,8 +775,24 @@ where #[cfg(test)] #[allow(unused_variables)] mod tests { - use super::*; - use std::mem; + use std::net::{Ipv4Addr, Ipv6Addr, SocketAddrV4, SocketAddrV6}; + + use neqo_common::{Decoder, Encoder}; + + use super::PreferredAddress; + use crate::{ + tparams::{ + TransportParameter, TransportParameterId, TransportParameters, + ACTIVE_CONNECTION_ID_LIMIT, IDLE_TIMEOUT, INITIAL_MAX_DATA, INITIAL_MAX_STREAMS_BIDI, + INITIAL_MAX_STREAMS_UNI, INITIAL_MAX_STREAM_DATA_BIDI_LOCAL, + INITIAL_MAX_STREAM_DATA_BIDI_REMOTE, INITIAL_MAX_STREAM_DATA_UNI, + INITIAL_SOURCE_CONNECTION_ID, MAX_ACK_DELAY, MAX_DATAGRAM_FRAME_SIZE, + MAX_UDP_PAYLOAD_SIZE, MIN_ACK_DELAY, ORIGINAL_DESTINATION_CONNECTION_ID, + PREFERRED_ADDRESS, RETRY_SOURCE_CONNECTION_ID, STATELESS_RESET_TOKEN, + VERSION_INFORMATION, + }, + ConnectionId, Error, Version, + }; #[test] fn basic_tps() { @@ -769,7 +811,7 @@ mod tests { let tps2 = TransportParameters::decode(&mut enc.as_decoder()).expect("Couldn't decode"); assert_eq!(tps, tps2); - println!("TPS = {:?}", tps); + println!("TPS = {tps:?}"); assert_eq!(tps2.get_integer(IDLE_TIMEOUT), 0); // Default assert_eq!(tps2.get_integer(MAX_ACK_DELAY), 25); // Default assert_eq!(tps2.get_integer(ACTIVE_CONNECTION_ID_LIMIT), 2); // Default @@ -791,13 +833,12 @@ mod tests { fn make_spa() -> TransportParameter { TransportParameter::PreferredAddress { - v4: Some(SocketAddr::new( - IpAddr::V4(Ipv4Addr::from(0xc000_0201)), - 443, - )), - v6: Some(SocketAddr::new( - IpAddr::V6(Ipv6Addr::from(0xfe80_0000_0000_0000_0000_0000_0000_0001)), + v4: Some(SocketAddrV4::new(Ipv4Addr::from(0xc000_0201), 443)), + v6: Some(SocketAddrV6::new( + Ipv6Addr::from(0xfe80_0000_0000_0000_0000_0000_0000_0001), 443, + 0, + 0, )), cid: ConnectionId::from(&[1, 2, 3, 4, 5]), srt: [3; 16], @@ -825,7 +866,7 @@ mod tests { fn mutate_spa(wrecker: F) -> TransportParameter where - F: FnOnce(&mut Option, &mut Option, &mut ConnectionId), + F: FnOnce(&mut Option, &mut Option, &mut ConnectionId), { let mut spa = make_spa(); if let TransportParameter::PreferredAddress { @@ -845,7 +886,7 @@ mod tests { /// This takes a `TransportParameter::PreferredAddress` that has been mutilated. /// It then encodes it, working from the knowledge that the `encode` function /// doesn't care about validity, and decodes it. The result should be failure. - fn assert_invalid_spa(spa: TransportParameter) { + fn assert_invalid_spa(spa: &TransportParameter) { let mut enc = Encoder::new(); spa.encode(&mut enc, PREFERRED_ADDRESS); assert_eq!( @@ -855,40 +896,40 @@ mod tests { } /// This is for those rare mutations that are acceptable. - fn assert_valid_spa(spa: TransportParameter) { + fn assert_valid_spa(spa: &TransportParameter) { let mut enc = Encoder::new(); spa.encode(&mut enc, PREFERRED_ADDRESS); let mut dec = enc.as_decoder(); let (id, decoded) = TransportParameter::decode(&mut dec).unwrap().unwrap(); assert_eq!(id, PREFERRED_ADDRESS); - assert_eq!(decoded, spa); + assert_eq!(&decoded, spa); } #[test] fn preferred_address_zero_address() { // Either port being zero is bad. - assert_invalid_spa(mutate_spa(|v4, _, _| { + assert_invalid_spa(&mutate_spa(|v4, _, _| { v4.as_mut().unwrap().set_port(0); })); - assert_invalid_spa(mutate_spa(|_, v6, _| { + assert_invalid_spa(&mutate_spa(|_, v6, _| { v6.as_mut().unwrap().set_port(0); })); // Either IP being zero is bad. - assert_invalid_spa(mutate_spa(|v4, _, _| { - v4.as_mut().unwrap().set_ip(IpAddr::V4(Ipv4Addr::from(0))); + assert_invalid_spa(&mutate_spa(|v4, _, _| { + v4.as_mut().unwrap().set_ip(Ipv4Addr::from(0)); })); - assert_invalid_spa(mutate_spa(|_, v6, _| { - v6.as_mut().unwrap().set_ip(IpAddr::V6(Ipv6Addr::from(0))); + assert_invalid_spa(&mutate_spa(|_, v6, _| { + v6.as_mut().unwrap().set_ip(Ipv6Addr::from(0)); })); // Either address being absent is OK. - assert_valid_spa(mutate_spa(|v4, _, _| { + assert_valid_spa(&mutate_spa(|v4, _, _| { *v4 = None; })); - assert_valid_spa(mutate_spa(|_, v6, _| { + assert_valid_spa(&mutate_spa(|_, v6, _| { *v6 = None; })); // Both addresses being absent is bad. - assert_invalid_spa(mutate_spa(|v4, v6, _| { + assert_invalid_spa(&mutate_spa(|v4, v6, _| { *v4 = None; *v6 = None; })); @@ -896,10 +937,10 @@ mod tests { #[test] fn preferred_address_bad_cid() { - assert_invalid_spa(mutate_spa(|_, _, cid| { + assert_invalid_spa(&mutate_spa(|_, _, cid| { *cid = ConnectionId::from(&[]); })); - assert_invalid_spa(mutate_spa(|_, _, cid| { + assert_invalid_spa(&mutate_spa(|_, _, cid| { *cid = ConnectionId::from(&[0x0c; 21]); })); } @@ -917,85 +958,36 @@ mod tests { } #[test] - #[should_panic] - fn preferred_address_wrong_family_v4() { - mutate_spa(|v4, _, _| { - v4.as_mut().unwrap().set_ip(IpAddr::V6(Ipv6Addr::from(0))); - }) - .encode(&mut Encoder::new(), PREFERRED_ADDRESS); - } - - #[test] - #[should_panic] - fn preferred_address_wrong_family_v6() { - mutate_spa(|_, v6, _| { - v6.as_mut().unwrap().set_ip(IpAddr::V4(Ipv4Addr::from(0))); - }) - .encode(&mut Encoder::new(), PREFERRED_ADDRESS); - } - - #[test] - #[should_panic] + #[should_panic(expected = "v4.is_some() || v6.is_some()")] fn preferred_address_neither() { - #[allow(clippy::drop_copy)] - mem::drop(PreferredAddress::new(None, None)); + _ = PreferredAddress::new(None, None); } #[test] - #[should_panic] + #[should_panic(expected = ".is_unspecified")] fn preferred_address_v4_unspecified() { - _ = PreferredAddress::new( - Some(SocketAddr::new(IpAddr::V4(Ipv4Addr::from(0)), 443)), - None, - ); + _ = PreferredAddress::new(Some(SocketAddrV4::new(Ipv4Addr::from(0), 443)), None); } #[test] - #[should_panic] + #[should_panic(expected = "left != right")] fn preferred_address_v4_zero_port() { _ = PreferredAddress::new( - Some(SocketAddr::new(IpAddr::V4(Ipv4Addr::from(0xc000_0201)), 0)), + Some(SocketAddrV4::new(Ipv4Addr::from(0xc000_0201), 0)), None, ); } #[test] - #[should_panic] + #[should_panic(expected = ".is_unspecified")] fn preferred_address_v6_unspecified() { - _ = PreferredAddress::new( - None, - Some(SocketAddr::new(IpAddr::V6(Ipv6Addr::from(0)), 443)), - ); + _ = PreferredAddress::new(None, Some(SocketAddrV6::new(Ipv6Addr::from(0), 443, 0, 0))); } #[test] - #[should_panic] + #[should_panic(expected = "left != right")] fn preferred_address_v6_zero_port() { - _ = PreferredAddress::new( - None, - Some(SocketAddr::new(IpAddr::V6(Ipv6Addr::from(1)), 0)), - ); - } - - #[test] - #[should_panic] - fn preferred_address_v4_is_v6() { - _ = PreferredAddress::new( - Some(SocketAddr::new(IpAddr::V6(Ipv6Addr::from(1)), 443)), - None, - ); - } - - #[test] - #[should_panic] - fn preferred_address_v6_is_v4() { - _ = PreferredAddress::new( - None, - Some(SocketAddr::new( - IpAddr::V4(Ipv4Addr::from(0xc000_0201)), - 443, - )), - ); + _ = PreferredAddress::new(None, Some(SocketAddrV6::new(Ipv6Addr::from(1), 0, 0, 0))); } #[test] @@ -1026,7 +1018,6 @@ mod tests { #[test] fn compatible_0rtt_integers() { - let mut tps_a = TransportParameters::default(); const INTEGER_KEYS: &[TransportParameterId] = &[ INITIAL_MAX_DATA, INITIAL_MAX_STREAM_DATA_BIDI_LOCAL, @@ -1038,6 +1029,8 @@ mod tests { MIN_ACK_DELAY, MAX_DATAGRAM_FRAME_SIZE, ]; + + let mut tps_a = TransportParameters::default(); for i in INTEGER_KEYS { tps_a.set(*i, TransportParameter::Integer(12)); } @@ -1075,7 +1068,8 @@ mod tests { fn active_connection_id_limit_min_2() { let mut tps = TransportParameters::default(); - // Intentionally set an invalid value for the ACTIVE_CONNECTION_ID_LIMIT transport parameter. + // Intentionally set an invalid value for the ACTIVE_CONNECTION_ID_LIMIT transport + // parameter. tps.params .insert(ACTIVE_CONNECTION_ID_LIMIT, TransportParameter::Integer(1)); @@ -1091,8 +1085,7 @@ mod tests { #[test] fn versions_encode_decode() { const ENCODED: &[u8] = &[ - 0x80, 0xff, 0x73, 0xdb, 0x0c, 0x00, 0x00, 0x00, 0x01, 0x1a, 0x2a, 0x3a, 0x4a, 0x5a, - 0x6a, 0x7a, 0x8a, + 0x11, 0x0c, 0x00, 0x00, 0x00, 0x01, 0x1a, 0x2a, 0x3a, 0x4a, 0x5a, 0x6a, 0x7a, 0x8a, ]; let vn = TransportParameter::Versions { current: Version::Version1.wire_version(), @@ -1100,12 +1093,12 @@ mod tests { }; let mut enc = Encoder::new(); - vn.encode(&mut enc, VERSION_NEGOTIATION); + vn.encode(&mut enc, VERSION_INFORMATION); assert_eq!(enc.as_ref(), ENCODED); let mut dec = enc.as_decoder(); let (id, decoded) = TransportParameter::decode(&mut dec).unwrap().unwrap(); - assert_eq!(id, VERSION_NEGOTIATION); + assert_eq!(id, VERSION_INFORMATION); assert_eq!(decoded, vn); } @@ -1124,10 +1117,8 @@ mod tests { #[test] fn versions_zero() { - const ZERO1: &[u8] = &[0x80, 0xff, 0x73, 0xdb, 0x04, 0x00, 0x00, 0x00, 0x00]; - const ZERO2: &[u8] = &[ - 0x80, 0xff, 0x73, 0xdb, 0x08, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, - ]; + const ZERO1: &[u8] = &[0x11, 0x04, 0x00, 0x00, 0x00, 0x00]; + const ZERO2: &[u8] = &[0x11, 0x08, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00]; let mut dec = Decoder::from(&ZERO1); assert_eq!( @@ -1145,7 +1136,7 @@ mod tests { fn versions_equal_0rtt() { let mut current = TransportParameters::default(); current.set( - VERSION_NEGOTIATION, + VERSION_INFORMATION, TransportParameter::Versions { current: Version::Version1.wire_version(), other: vec![0x1a2a_3a4a], @@ -1160,7 +1151,7 @@ mod tests { // If the version matches, it's OK to use 0-RTT. remembered.set( - VERSION_NEGOTIATION, + VERSION_INFORMATION, TransportParameter::Versions { current: Version::Version1.wire_version(), other: vec![0x5a6a_7a8a, 0x9aaa_baca], @@ -1171,7 +1162,7 @@ mod tests { // An apparent "upgrade" is still cause to reject 0-RTT. remembered.set( - VERSION_NEGOTIATION, + VERSION_INFORMATION, TransportParameter::Versions { current: Version::Version1.wire_version() + 1, other: vec![], diff --git a/neqo-transport/src/tracking.rs b/neqo-transport/src/tracking.rs index 0c3d25b9ed..bdd0f250c7 100644 --- a/neqo-transport/src/tracking.rs +++ b/neqo-transport/src/tracking.rs @@ -6,28 +6,23 @@ // Tracking of received packets and generating acks thereof. -#![deny(clippy::pedantic)] - use std::{ cmp::min, collections::VecDeque, - convert::TryFrom, ops::{Index, IndexMut}, time::{Duration, Instant}, }; use neqo_common::{qdebug, qinfo, qtrace, qwarn}; use neqo_crypto::{Epoch, TLS_EPOCH_HANDSHAKE, TLS_EPOCH_INITIAL}; +use smallvec::{smallvec, SmallVec}; use crate::{ packet::{PacketBuilder, PacketNumber, PacketType}, recovery::RecoveryToken, stats::FrameStats, - Error, Res, }; -use smallvec::{smallvec, SmallVec}; - // TODO(mt) look at enabling EnumMap for this: https://stackoverflow.com/a/44905797/1375574 #[derive(Clone, Copy, Debug, PartialEq, PartialOrd, Ord, Eq)] pub enum PacketNumberSpace { @@ -366,6 +361,8 @@ pub struct RecvdPackets { largest_pn_time: Option, /// The time that we should be sending an ACK. ack_time: Option, + /// The time we last sent an ACK. + last_ack_time: Option, /// The current ACK frequency sequence number. ack_frequency_seqno: u64, /// The time to delay after receiving the first packet that is @@ -391,6 +388,7 @@ impl RecvdPackets { min_tracked: 0, largest_pn_time: None, ack_time: None, + last_ack_time: None, ack_frequency_seqno: 0, ack_delay: DEFAULT_ACK_DELAY, unacknowledged_count: 0, @@ -424,11 +422,13 @@ impl RecvdPackets { } /// Returns true if an ACK frame should be sent now. - fn ack_now(&self, now: Instant) -> bool { - match self.ack_time { - Some(t) => t <= now, - None => false, - } + fn ack_now(&self, now: Instant, rtt: Duration) -> bool { + // If ack_time is Some, then we have something to acknowledge. + // In that case, either ack because `now >= ack_time`, or + // because it is more than an RTT since the last time we sent an ack. + self.ack_time.map_or(false, |next| { + next <= now || self.last_ack_time.map_or(false, |last| last + rtt <= now) + }) } // A simple addition of a packet number to the tracked set. @@ -558,6 +558,7 @@ impl RecvdPackets { fn write_frame( &mut self, now: Instant, + rtt: Duration, builder: &mut PacketBuilder, tokens: &mut Vec, stats: &mut FrameStats, @@ -567,7 +568,7 @@ impl RecvdPackets { const LONGEST_ACK_HEADER: usize = 1 + 8 + 8 + 1 + 8; // Check that we aren't delaying ACKs. - if !self.ack_now(now) { + if !self.ack_now(now, rtt) { return; } @@ -618,6 +619,7 @@ impl RecvdPackets { // We've sent an ACK, reset the timer. self.ack_time = None; + self.last_ack_time = Some(now); self.unacknowledged_count = 0; tokens.push(RecoveryToken::Ack(AckToken { @@ -714,17 +716,14 @@ impl AckTracker { &mut self, pn_space: PacketNumberSpace, now: Instant, + rtt: Duration, builder: &mut PacketBuilder, tokens: &mut Vec, stats: &mut FrameStats, - ) -> Res<()> { + ) { if let Some(space) = self.get_mut(pn_space) { - space.write_frame(now, builder, tokens, stats); - if builder.len() > builder.limit() { - return Err(Error::InternalError(24)); - } + space.write_frame(now, rtt, builder, tokens, stats); } - Ok(()) } } @@ -742,6 +741,11 @@ impl Default for AckTracker { #[cfg(test)] mod tests { + use std::collections::HashSet; + + use neqo_common::Encoder; + use test_fixture::now; + use super::{ AckTracker, Duration, Instant, PacketNumberSpace, PacketNumberSpaceSet, RecoveryToken, RecvdPackets, MAX_TRACKED_RANGES, @@ -751,20 +755,15 @@ mod tests { packet::{PacketBuilder, PacketNumber}, stats::FrameStats, }; - use lazy_static::lazy_static; - use neqo_common::Encoder; - use std::collections::HashSet; - lazy_static! { - static ref NOW: Instant = Instant::now(); - } + const RTT: Duration = Duration::from_millis(100); fn test_ack_range(pns: &[PacketNumber], nranges: usize) { let mut rp = RecvdPackets::new(PacketNumberSpace::Initial); // Any space will do. let mut packets = HashSet::new(); for pn in pns { - rp.set_received(*NOW, *pn, true); + rp.set_received(now(), *pn, true); packets.insert(*pn); } @@ -819,7 +818,7 @@ mod tests { // This will add one too many disjoint ranges. for i in 0..=MAX_TRACKED_RANGES { - rp.set_received(*NOW, (i * 2) as u64, true); + rp.set_received(now(), (i * 2) as u64, true); } assert_eq!(rp.ranges.len(), MAX_TRACKED_RANGES); @@ -838,22 +837,22 @@ mod tests { // Only application data packets are delayed. let mut rp = RecvdPackets::new(PacketNumberSpace::ApplicationData); assert!(rp.ack_time().is_none()); - assert!(!rp.ack_now(*NOW)); + assert!(!rp.ack_now(now(), RTT)); rp.ack_freq(0, COUNT, DELAY, false); // Some packets won't cause an ACK to be needed. for i in 0..COUNT { - rp.set_received(*NOW, i, true); - assert_eq!(Some(*NOW + DELAY), rp.ack_time()); - assert!(!rp.ack_now(*NOW)); - assert!(rp.ack_now(*NOW + DELAY)); + rp.set_received(now(), i, true); + assert_eq!(Some(now() + DELAY), rp.ack_time()); + assert!(!rp.ack_now(now(), RTT)); + assert!(rp.ack_now(now() + DELAY, RTT)); } // Exceeding COUNT will move the ACK time to now. - rp.set_received(*NOW, COUNT, true); - assert_eq!(Some(*NOW), rp.ack_time()); - assert!(rp.ack_now(*NOW)); + rp.set_received(now(), COUNT, true); + assert_eq!(Some(now()), rp.ack_time()); + assert!(rp.ack_now(now(), RTT)); } #[test] @@ -861,12 +860,12 @@ mod tests { for space in &[PacketNumberSpace::Initial, PacketNumberSpace::Handshake] { let mut rp = RecvdPackets::new(*space); assert!(rp.ack_time().is_none()); - assert!(!rp.ack_now(*NOW)); + assert!(!rp.ack_now(now(), RTT)); // Any packet in these spaces is acknowledged straight away. - rp.set_received(*NOW, 0, true); - assert_eq!(Some(*NOW), rp.ack_time()); - assert!(rp.ack_now(*NOW)); + rp.set_received(now(), 0, true); + assert_eq!(Some(now()), rp.ack_time()); + assert!(rp.ack_now(now(), RTT)); } } @@ -874,33 +873,55 @@ mod tests { fn ooo_no_ack_delay_new() { let mut rp = RecvdPackets::new(PacketNumberSpace::ApplicationData); assert!(rp.ack_time().is_none()); - assert!(!rp.ack_now(*NOW)); + assert!(!rp.ack_now(now(), RTT)); // Anything other than packet 0 is acknowledged immediately. - rp.set_received(*NOW, 1, true); - assert_eq!(Some(*NOW), rp.ack_time()); - assert!(rp.ack_now(*NOW)); + rp.set_received(now(), 1, true); + assert_eq!(Some(now()), rp.ack_time()); + assert!(rp.ack_now(now(), RTT)); } - fn write_frame(rp: &mut RecvdPackets) { + fn write_frame_at(rp: &mut RecvdPackets, now: Instant) { let mut builder = PacketBuilder::short(Encoder::new(), false, []); let mut stats = FrameStats::default(); let mut tokens = Vec::new(); - rp.write_frame(*NOW, &mut builder, &mut tokens, &mut stats); + rp.write_frame(now, RTT, &mut builder, &mut tokens, &mut stats); assert!(!tokens.is_empty()); assert_eq!(stats.ack, 1); } + fn write_frame(rp: &mut RecvdPackets) { + write_frame_at(rp, now()); + } + #[test] fn ooo_no_ack_delay_fill() { let mut rp = RecvdPackets::new(PacketNumberSpace::ApplicationData); - rp.set_received(*NOW, 1, true); + rp.set_received(now(), 1, true); + write_frame(&mut rp); + + // Filling in behind the largest acknowledged causes immediate ACK. + rp.set_received(now(), 0, true); + write_frame(&mut rp); + + // Receiving the next packet won't elicit an ACK. + rp.set_received(now(), 2, true); + assert!(!rp.ack_now(now(), RTT)); + } + + #[test] + fn immediate_ack_after_rtt() { + let mut rp = RecvdPackets::new(PacketNumberSpace::ApplicationData); + rp.set_received(now(), 1, true); write_frame(&mut rp); // Filling in behind the largest acknowledged causes immediate ACK. - rp.set_received(*NOW, 0, true); - assert_eq!(Some(*NOW), rp.ack_time()); - assert!(rp.ack_now(*NOW)); + rp.set_received(now(), 0, true); + write_frame(&mut rp); + + // A new packet ordinarily doesn't result in an ACK, but this time it does. + rp.set_received(now() + RTT, 2, true); + write_frame_at(&mut rp, now() + RTT); } #[test] @@ -910,29 +931,29 @@ mod tests { // Set tolerance to 2 and then it takes three packets. rp.ack_freq(0, 2, Duration::from_millis(10), true); - rp.set_received(*NOW, 1, true); - assert_ne!(Some(*NOW), rp.ack_time()); - rp.set_received(*NOW, 2, true); - assert_ne!(Some(*NOW), rp.ack_time()); - rp.set_received(*NOW, 3, true); - assert_eq!(Some(*NOW), rp.ack_time()); + rp.set_received(now(), 1, true); + assert_ne!(Some(now()), rp.ack_time()); + rp.set_received(now(), 2, true); + assert_ne!(Some(now()), rp.ack_time()); + rp.set_received(now(), 3, true); + assert_eq!(Some(now()), rp.ack_time()); } #[test] fn ooo_no_ack_delay_threshold_gap() { let mut rp = RecvdPackets::new(PacketNumberSpace::ApplicationData); - rp.set_received(*NOW, 1, true); + rp.set_received(now(), 1, true); write_frame(&mut rp); // Set tolerance to 2 and then it takes three packets. rp.ack_freq(0, 2, Duration::from_millis(10), true); - rp.set_received(*NOW, 3, true); - assert_ne!(Some(*NOW), rp.ack_time()); - rp.set_received(*NOW, 4, true); - assert_ne!(Some(*NOW), rp.ack_time()); - rp.set_received(*NOW, 5, true); - assert_eq!(Some(*NOW), rp.ack_time()); + rp.set_received(now(), 3, true); + assert_ne!(Some(now()), rp.ack_time()); + rp.set_received(now(), 4, true); + assert_ne!(Some(now()), rp.ack_time()); + rp.set_received(now(), 5, true); + assert_eq!(Some(now()), rp.ack_time()); } /// Test that an in-order packet that is not ack-eliciting doesn't @@ -943,13 +964,13 @@ mod tests { rp.ack_freq(0, 1, Duration::from_millis(10), true); // This should be ignored. - rp.set_received(*NOW, 0, false); - assert_ne!(Some(*NOW), rp.ack_time()); + rp.set_received(now(), 0, false); + assert_ne!(Some(now()), rp.ack_time()); // Skip 1 (it has no effect). - rp.set_received(*NOW, 2, true); - assert_ne!(Some(*NOW), rp.ack_time()); - rp.set_received(*NOW, 3, true); - assert_eq!(Some(*NOW), rp.ack_time()); + rp.set_received(now(), 2, true); + assert_ne!(Some(now()), rp.ack_time()); + rp.set_received(now(), 3, true); + assert_eq!(Some(now()), rp.ack_time()); } /// If a packet that is not ack-eliciting is reordered, that's fine too. @@ -959,16 +980,16 @@ mod tests { rp.ack_freq(0, 1, Duration::from_millis(10), false); // These are out of order, but they are not ack-eliciting. - rp.set_received(*NOW, 1, false); - assert_ne!(Some(*NOW), rp.ack_time()); - rp.set_received(*NOW, 0, false); - assert_ne!(Some(*NOW), rp.ack_time()); + rp.set_received(now(), 1, false); + assert_ne!(Some(now()), rp.ack_time()); + rp.set_received(now(), 0, false); + assert_ne!(Some(now()), rp.ack_time()); // These are in order. - rp.set_received(*NOW, 2, true); - assert_ne!(Some(*NOW), rp.ack_time()); - rp.set_received(*NOW, 3, true); - assert_eq!(Some(*NOW), rp.ack_time()); + rp.set_received(now(), 2, true); + assert_ne!(Some(now()), rp.ack_time()); + rp.set_received(now(), 3, true); + assert_eq!(Some(now()), rp.ack_time()); } #[test] @@ -980,23 +1001,23 @@ mod tests { tracker .get_mut(PacketNumberSpace::Handshake) .unwrap() - .set_received(*NOW, 0, false); - assert_eq!(None, tracker.ack_time(*NOW)); + .set_received(now(), 0, false); + assert_eq!(None, tracker.ack_time(now())); // This should be delayed. tracker .get_mut(PacketNumberSpace::ApplicationData) .unwrap() - .set_received(*NOW, 0, true); - assert_eq!(Some(*NOW + DELAY), tracker.ack_time(*NOW)); + .set_received(now(), 0, true); + assert_eq!(Some(now() + DELAY), tracker.ack_time(now())); // This should move the time forward. - let later = *NOW + (DELAY / 2); + let later = now() + (DELAY / 2); tracker .get_mut(PacketNumberSpace::Initial) .unwrap() .set_received(later, 0, true); - assert_eq!(Some(later), tracker.ack_time(*NOW)); + assert_eq!(Some(later), tracker.ack_time(now())); } #[test] @@ -1020,32 +1041,31 @@ mod tests { tracker .get_mut(PacketNumberSpace::Initial) .unwrap() - .set_received(*NOW, 0, true); + .set_received(now(), 0, true); // The reference time for `ack_time` has to be in the past or we filter out the timer. assert!(tracker - .ack_time(NOW.checked_sub(Duration::from_millis(1)).unwrap()) + .ack_time(now().checked_sub(Duration::from_millis(1)).unwrap()) .is_some()); let mut tokens = Vec::new(); let mut stats = FrameStats::default(); - tracker - .write_frame( - PacketNumberSpace::Initial, - *NOW, - &mut builder, - &mut tokens, - &mut stats, - ) - .unwrap(); + tracker.write_frame( + PacketNumberSpace::Initial, + now(), + RTT, + &mut builder, + &mut tokens, + &mut stats, + ); assert_eq!(stats.ack, 1); // Mark another packet as received so we have cause to send another ACK in that space. tracker .get_mut(PacketNumberSpace::Initial) .unwrap() - .set_received(*NOW, 1, true); + .set_received(now(), 1, true); assert!(tracker - .ack_time(NOW.checked_sub(Duration::from_millis(1)).unwrap()) + .ack_time(now().checked_sub(Duration::from_millis(1)).unwrap()) .is_some()); // Now drop that space. @@ -1053,17 +1073,16 @@ mod tests { assert!(tracker.get_mut(PacketNumberSpace::Initial).is_none()); assert!(tracker - .ack_time(NOW.checked_sub(Duration::from_millis(1)).unwrap()) + .ack_time(now().checked_sub(Duration::from_millis(1)).unwrap()) .is_none()); - tracker - .write_frame( - PacketNumberSpace::Initial, - *NOW, - &mut builder, - &mut tokens, - &mut stats, - ) - .unwrap(); + tracker.write_frame( + PacketNumberSpace::Initial, + now(), + RTT, + &mut builder, + &mut tokens, + &mut stats, + ); assert_eq!(stats.ack, 1); if let RecoveryToken::Ack(tok) = &tokens[0] { tracker.acked(tok); // Should be a noop. @@ -1078,24 +1097,23 @@ mod tests { tracker .get_mut(PacketNumberSpace::Initial) .unwrap() - .set_received(*NOW, 0, true); + .set_received(now(), 0, true); assert!(tracker - .ack_time(NOW.checked_sub(Duration::from_millis(1)).unwrap()) + .ack_time(now().checked_sub(Duration::from_millis(1)).unwrap()) .is_some()); let mut builder = PacketBuilder::short(Encoder::new(), false, []); builder.set_limit(10); let mut stats = FrameStats::default(); - tracker - .write_frame( - PacketNumberSpace::Initial, - *NOW, - &mut builder, - &mut Vec::new(), - &mut stats, - ) - .unwrap(); + tracker.write_frame( + PacketNumberSpace::Initial, + now(), + RTT, + &mut builder, + &mut Vec::new(), + &mut stats, + ); assert_eq!(stats.ack, 0); assert_eq!(builder.len(), 1); // Only the short packet header has been added. } @@ -1106,28 +1124,27 @@ mod tests { tracker .get_mut(PacketNumberSpace::Initial) .unwrap() - .set_received(*NOW, 0, true); + .set_received(now(), 0, true); tracker .get_mut(PacketNumberSpace::Initial) .unwrap() - .set_received(*NOW, 2, true); + .set_received(now(), 2, true); assert!(tracker - .ack_time(NOW.checked_sub(Duration::from_millis(1)).unwrap()) + .ack_time(now().checked_sub(Duration::from_millis(1)).unwrap()) .is_some()); let mut builder = PacketBuilder::short(Encoder::new(), false, []); builder.set_limit(32); let mut stats = FrameStats::default(); - tracker - .write_frame( - PacketNumberSpace::Initial, - *NOW, - &mut builder, - &mut Vec::new(), - &mut stats, - ) - .unwrap(); + tracker.write_frame( + PacketNumberSpace::Initial, + now(), + RTT, + &mut builder, + &mut Vec::new(), + &mut stats, + ); assert_eq!(stats.ack, 1); let mut dec = builder.as_decoder(); @@ -1145,19 +1162,19 @@ mod tests { let mut tracker = AckTracker::default(); // While we have multiple PN spaces, we ignore ACK timers from the past. - // Send out of order to cause the delayed ack timer to be set to `*NOW`. + // Send out of order to cause the delayed ack timer to be set to `now()`. tracker .get_mut(PacketNumberSpace::ApplicationData) .unwrap() - .set_received(*NOW, 3, true); - assert!(tracker.ack_time(*NOW + Duration::from_millis(1)).is_none()); + .set_received(now(), 3, true); + assert!(tracker.ack_time(now() + Duration::from_millis(1)).is_none()); // When we are reduced to one space, that filter is off. tracker.drop_space(PacketNumberSpace::Initial); tracker.drop_space(PacketNumberSpace::Handshake); assert_eq!( - tracker.ack_time(*NOW + Duration::from_millis(1)), - Some(*NOW) + tracker.ack_time(now() + Duration::from_millis(1)), + Some(now()) ); } diff --git a/neqo-transport/src/version.rs b/neqo-transport/src/version.rs index 71a1d7a8e6..eee598fdd0 100644 --- a/neqo-transport/src/version.rs +++ b/neqo-transport/src/version.rs @@ -4,15 +4,16 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use crate::{Error, Res}; use neqo_common::qdebug; -use std::convert::TryFrom; + +use crate::{Error, Res}; pub type WireVersion = u32; -#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] +#[derive(Debug, Default, Clone, Copy, PartialEq, Eq, Hash)] pub enum Version { Version2, + #[default] Version1, Draft29, Draft30, @@ -21,9 +22,10 @@ pub enum Version { } impl Version { + #[must_use] pub const fn wire_version(self) -> WireVersion { match self { - Self::Version2 => 0x709a50c4, + Self::Version2 => 0x6b33_43cf, Self::Version1 => 1, Self::Draft29 => 0xff00_0000 + 29, Self::Draft30 => 0xff00_0000 + 30, @@ -34,8 +36,8 @@ impl Version { pub(crate) fn initial_salt(self) -> &'static [u8] { const INITIAL_SALT_V2: &[u8] = &[ - 0xa7, 0x07, 0xc2, 0x03, 0xa5, 0x9b, 0x47, 0x18, 0x4a, 0x1d, 0x62, 0xca, 0x57, 0x04, - 0x06, 0xea, 0x7a, 0xe3, 0xe5, 0xd3, + 0x0d, 0xed, 0xe3, 0xde, 0xf7, 0x00, 0xa6, 0xdb, 0x81, 0x93, 0x81, 0xbe, 0x6e, 0x26, + 0x9d, 0xcb, 0xf9, 0xbd, 0x2e, 0xd9, ]; const INITIAL_SALT_V1: &[u8] = &[ 0x38, 0x76, 0x2c, 0xf7, 0xf5, 0x59, 0x34, 0xb3, 0x4d, 0x17, 0x9a, 0xe6, 0xa4, 0xc8, @@ -62,20 +64,20 @@ impl Version { } pub(crate) fn retry_secret(self) -> &'static [u8] { - const RETRY_SECRET_29: &[u8] = &[ - 0x8b, 0x0d, 0x37, 0xeb, 0x85, 0x35, 0x02, 0x2e, 0xbc, 0x8d, 0x76, 0xa2, 0x07, 0xd8, - 0x0d, 0xf2, 0x26, 0x46, 0xec, 0x06, 0xdc, 0x80, 0x96, 0x42, 0xc3, 0x0a, 0x8b, 0xaa, - 0x2b, 0xaa, 0xff, 0x4c, + const RETRY_SECRET_V2: &[u8] = &[ + 0xc4, 0xdd, 0x24, 0x84, 0xd6, 0x81, 0xae, 0xfa, 0x4f, 0xf4, 0xd6, 0x9c, 0x2c, 0x20, + 0x29, 0x99, 0x84, 0xa7, 0x65, 0xa5, 0xd3, 0xc3, 0x19, 0x82, 0xf3, 0x8f, 0xc7, 0x41, + 0x62, 0x15, 0x5e, 0x9f, ]; const RETRY_SECRET_V1: &[u8] = &[ 0xd9, 0xc9, 0x94, 0x3e, 0x61, 0x01, 0xfd, 0x20, 0x00, 0x21, 0x50, 0x6b, 0xcc, 0x02, 0x81, 0x4c, 0x73, 0x03, 0x0f, 0x25, 0xc7, 0x9d, 0x71, 0xce, 0x87, 0x6e, 0xca, 0x87, 0x6e, 0x6f, 0xca, 0x8e, ]; - const RETRY_SECRET_V2: &[u8] = &[ - 0x34, 0x25, 0xc2, 0x0c, 0xf8, 0x87, 0x79, 0xdf, 0x2f, 0xf7, 0x1e, 0x8a, 0xbf, 0xa7, - 0x82, 0x49, 0x89, 0x1e, 0x76, 0x3b, 0xbe, 0xd2, 0xf1, 0x3c, 0x04, 0x83, 0x43, 0xd3, - 0x48, 0xc0, 0x60, 0xe2, + const RETRY_SECRET_29: &[u8] = &[ + 0x8b, 0x0d, 0x37, 0xeb, 0x85, 0x35, 0x02, 0x2e, 0xbc, 0x8d, 0x76, 0xa2, 0x07, 0xd8, + 0x0d, 0xf2, 0x26, 0x46, 0xec, 0x06, 0xdc, 0x80, 0x96, 0x42, 0xc3, 0x0a, 0x8b, 0xaa, + 0x2b, 0xaa, 0xff, 0x4c, ]; match self { Self::Version2 => RETRY_SECRET_V2, @@ -92,6 +94,7 @@ impl Version { } /// Determine if `self` can be upgraded to `other` compatibly. + #[must_use] pub fn is_compatible(self, other: Self) -> bool { self == other || matches!( @@ -100,6 +103,7 @@ impl Version { ) } + #[must_use] pub fn all() -> Vec { vec![ Self::Version2, @@ -119,19 +123,13 @@ impl Version { } } -impl Default for Version { - fn default() -> Self { - Self::Version1 - } -} - impl TryFrom for Version { type Error = Error; fn try_from(wire: WireVersion) -> Res { if wire == 1 { Ok(Self::Version1) - } else if wire == 0x709a50c4 { + } else if wire == 0x6b33_43cf { Ok(Self::Version2) } else if wire == 0xff00_0000 + 29 { Ok(Self::Draft29) @@ -174,15 +172,20 @@ pub struct VersionConfig { } impl VersionConfig { + /// # Panics + /// When `all` does not include `initial`. + #[must_use] pub fn new(initial: Version, all: Vec) -> Self { assert!(all.contains(&initial)); Self { initial, all } } + #[must_use] pub fn initial(&self) -> Version { self.initial } + #[must_use] pub fn all(&self) -> &[Version] { &self.all } diff --git a/neqo-transport/tests/common/mod.rs b/neqo-transport/tests/common/mod.rs index 3bc97a0528..e36e66f753 100644 --- a/neqo-transport/tests/common/mod.rs +++ b/neqo-transport/tests/common/mod.rs @@ -4,10 +4,10 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -#![cfg_attr(feature = "deny-warnings", deny(warnings))] -#![warn(clippy::pedantic)] #![allow(unused)] +use std::{cell::RefCell, mem, ops::Range, rc::Rc}; + use neqo_common::{event::Provider, hex_with_len, qtrace, Datagram, Decoder, Role}; use neqo_crypto::{ constants::{TLS_AES_128_GCM_SHA256, TLS_VERSION_1_3}, @@ -19,13 +19,7 @@ use neqo_transport::{ server::{ActiveConnectionRef, Server, ValidateAddress}, Connection, ConnectionEvent, ConnectionParameters, State, }; -use test_fixture::{self, default_client, now, CountingConnectionIdGenerator}; - -use std::cell::RefCell; -use std::convert::TryFrom; -use std::mem; -use std::ops::Range; -use std::rc::Rc; +use test_fixture::{default_client, now, CountingConnectionIdGenerator}; /// Create a server. This is different than the one in the fixture, which is a single connection. pub fn new_server(params: ConnectionParameters) -> Server { @@ -63,28 +57,28 @@ pub fn connect(client: &mut Connection, server: &mut Server) -> ActiveConnection server.set_validation(ValidateAddress::Never); assert_eq!(*client.state(), State::Init); - let dgram = client.process(None, now()).dgram(); // ClientHello - assert!(dgram.is_some()); - let dgram = server.process(dgram, now()).dgram(); // ServerHello... - assert!(dgram.is_some()); + let out = client.process(None, now()); // ClientHello + assert!(out.as_dgram_ref().is_some()); + let out = server.process(out.as_dgram_ref(), now()); // ServerHello... + assert!(out.as_dgram_ref().is_some()); // Ingest the server Certificate. - let dgram = client.process(dgram, now()).dgram(); - assert!(dgram.is_some()); // This should just be an ACK. - let dgram = server.process(dgram, now()).dgram(); - assert!(dgram.is_none()); // So the server should have nothing to say. + let out = client.process(out.as_dgram_ref(), now()); + assert!(out.as_dgram_ref().is_some()); // This should just be an ACK. + let out = server.process(out.as_dgram_ref(), now()); + assert!(out.as_dgram_ref().is_none()); // So the server should have nothing to say. // Now mark the server as authenticated. client.authenticated(AuthenticationStatus::Ok, now()); - let dgram = client.process(None, now()).dgram(); - assert!(dgram.is_some()); + let out = client.process(None, now()); + assert!(out.as_dgram_ref().is_some()); assert_eq!(*client.state(), State::Connected); - let dgram = server.process(dgram, now()).dgram(); - assert!(dgram.is_some()); // ACK + HANDSHAKE_DONE + NST + let out = server.process(out.as_dgram_ref(), now()); + assert!(out.as_dgram_ref().is_some()); // ACK + HANDSHAKE_DONE + NST // Have the client process the HANDSHAKE_DONE. - let dgram = client.process(dgram, now()).dgram(); - assert!(dgram.is_none()); + let out = client.process(out.as_dgram_ref(), now()); + assert!(out.as_dgram_ref().is_none()); assert_eq!(*client.state(), State::Confirmed); connected_server(server) @@ -152,14 +146,7 @@ pub fn initial_aead_and_hp(dcid: &[u8], role: Role) -> (Aead, HpKey) { ) .unwrap(); ( - Aead::new( - false, - TLS_VERSION_1_3, - TLS_AES_128_GCM_SHA256, - &secret, - "quic ", - ) - .unwrap(), + Aead::new(TLS_VERSION_1_3, TLS_AES_128_GCM_SHA256, &secret, "quic ").unwrap(), HpKey::extract(TLS_VERSION_1_3, TLS_AES_128_GCM_SHA256, &secret, "quic hp").unwrap(), ) } @@ -225,14 +212,14 @@ pub fn generate_ticket(server: &mut Server) -> ResumptionToken { let mut server_conn = connect(&mut client, server); server_conn.borrow_mut().send_ticket(now(), &[]).unwrap(); - let dgram = server.process(None, now()).dgram(); - client.process_input(dgram.unwrap(), now()); // Consume ticket, ignore output. + let out = server.process(None, now()); + client.process_input(out.as_dgram_ref().unwrap(), now()); // Consume ticket, ignore output. let ticket = find_ticket(&mut client); // Have the client close the connection and then let the server clean up. client.close(now(), 0, "got a ticket"); - let dgram = client.process_output(now()).dgram(); - mem::drop(server.process(dgram, now())); + let out = client.process_output(now()); + mem::drop(server.process(out.as_dgram_ref(), now())); // Calling active_connections clears the set of active connections. assert_eq!(server.active_connections().len(), 1); ticket diff --git a/neqo-transport/tests/conn_vectors.rs b/neqo-transport/tests/conn_vectors.rs index 83de136d91..86fe9d36fc 100644 --- a/neqo-transport/tests/conn_vectors.rs +++ b/neqo-transport/tests/conn_vectors.rs @@ -5,94 +5,92 @@ // except according to those terms. // Tests with the test vectors from the spec. -#![deny(clippy::pedantic)] -#![cfg(not(feature = "fuzzing"))] -use neqo_common::Datagram; +#![cfg(not(feature = "disable-encryption"))] + +use std::{cell::RefCell, rc::Rc}; + use neqo_transport::{ Connection, ConnectionParameters, RandomConnectionIdGenerator, State, Version, }; -use test_fixture::{self, addr, now}; - -use std::cell::RefCell; -use std::rc::Rc; +use test_fixture::{datagram, now}; const INITIAL_PACKET_V2: &[u8] = &[ - 0xdd, 0x70, 0x9a, 0x50, 0xc4, 0x08, 0x83, 0x94, 0xc8, 0xf0, 0x3e, 0x51, 0x57, 0x08, 0x00, 0x00, - 0x44, 0x9e, 0x43, 0x91, 0xd8, 0x48, 0x23, 0xb8, 0xe6, 0x10, 0x58, 0x9c, 0x83, 0xc9, 0x2d, 0x0e, - 0x97, 0xeb, 0x7a, 0x6e, 0x50, 0x03, 0xf5, 0x77, 0x64, 0xc5, 0xc7, 0xf0, 0x09, 0x5b, 0xa5, 0x4b, - 0x90, 0x81, 0x8f, 0x1b, 0xfe, 0xec, 0xc1, 0xc9, 0x7c, 0x54, 0xfc, 0x73, 0x1e, 0xdb, 0xd2, 0xa2, - 0x44, 0xe3, 0xb1, 0xe6, 0x39, 0xa9, 0xbc, 0x75, 0xed, 0x54, 0x5b, 0x98, 0x64, 0x93, 0x43, 0xb2, - 0x53, 0x61, 0x5e, 0xc6, 0xb3, 0xe4, 0xdf, 0x0f, 0xd2, 0xe7, 0xfe, 0x9d, 0x69, 0x1a, 0x09, 0xe6, - 0xa1, 0x44, 0xb4, 0x36, 0xd8, 0xa2, 0xc0, 0x88, 0xa4, 0x04, 0x26, 0x23, 0x40, 0xdf, 0xd9, 0x95, - 0xec, 0x38, 0x65, 0x69, 0x4e, 0x30, 0x26, 0xec, 0xd8, 0xc6, 0xd2, 0x56, 0x1a, 0x5a, 0x36, 0x67, - 0x2a, 0x10, 0x05, 0x01, 0x81, 0x68, 0xc0, 0xf0, 0x81, 0xc1, 0x0e, 0x2b, 0xf1, 0x4d, 0x55, 0x0c, - 0x97, 0x7e, 0x28, 0xbb, 0x9a, 0x75, 0x9c, 0x57, 0xd0, 0xf7, 0xff, 0xb1, 0xcd, 0xfb, 0x40, 0xbd, - 0x77, 0x4d, 0xec, 0x58, 0x96, 0x57, 0x54, 0x20, 0x47, 0xdf, 0xfe, 0xfa, 0x56, 0xfc, 0x80, 0x89, - 0xa4, 0xd1, 0xef, 0x37, 0x9c, 0x81, 0xba, 0x3d, 0xf7, 0x1a, 0x05, 0xdd, 0xc7, 0x92, 0x83, 0x40, - 0x77, 0x59, 0x10, 0xfe, 0xb3, 0xce, 0x4c, 0xbc, 0xfd, 0x8d, 0x25, 0x3e, 0xdd, 0x05, 0xf1, 0x61, - 0x45, 0x8f, 0x9d, 0xc4, 0x4b, 0xea, 0x01, 0x7c, 0x31, 0x17, 0xcc, 0xa7, 0x06, 0x5a, 0x31, 0x5d, - 0xed, 0xa9, 0x46, 0x4e, 0x67, 0x2e, 0xc8, 0x0c, 0x3f, 0x79, 0xac, 0x99, 0x34, 0x37, 0xb4, 0x41, - 0xef, 0x74, 0x22, 0x7e, 0xcc, 0x4d, 0xc9, 0xd5, 0x97, 0xf6, 0x6a, 0xb0, 0xab, 0x8d, 0x21, 0x4b, - 0x55, 0x84, 0x0c, 0x70, 0x34, 0x9d, 0x76, 0x16, 0xcb, 0xe3, 0x8e, 0x5e, 0x1d, 0x05, 0x2d, 0x07, - 0xf1, 0xfe, 0xdb, 0x3d, 0xd3, 0xc4, 0xd8, 0xce, 0x29, 0x57, 0x24, 0x94, 0x5e, 0x67, 0xed, 0x2e, - 0xef, 0xcd, 0x9f, 0xb5, 0x24, 0x72, 0x38, 0x7f, 0x31, 0x8e, 0x3d, 0x9d, 0x23, 0x3b, 0xe7, 0xdf, - 0xc7, 0x9d, 0x6b, 0xf6, 0x08, 0x0d, 0xcb, 0xbb, 0x41, 0xfe, 0xb1, 0x80, 0xd7, 0x85, 0x88, 0x49, - 0x7c, 0x3e, 0x43, 0x9d, 0x38, 0xc3, 0x34, 0x74, 0x8d, 0x2b, 0x56, 0xfd, 0x19, 0xab, 0x36, 0x4d, - 0x05, 0x7a, 0x9b, 0xd5, 0xa6, 0x99, 0xae, 0x14, 0x5d, 0x7f, 0xdb, 0xc8, 0xf5, 0x77, 0x75, 0x18, - 0x1b, 0x0a, 0x97, 0xc3, 0xbd, 0xed, 0xc9, 0x1a, 0x55, 0x5d, 0x6c, 0x9b, 0x86, 0x34, 0xe1, 0x06, - 0xd8, 0xc9, 0xca, 0x45, 0xa9, 0xd5, 0x45, 0x0a, 0x76, 0x79, 0xed, 0xc5, 0x45, 0xda, 0x91, 0x02, - 0x5b, 0xc9, 0x3a, 0x7c, 0xf9, 0xa0, 0x23, 0xa0, 0x66, 0xff, 0xad, 0xb9, 0x71, 0x7f, 0xfa, 0xf3, - 0x41, 0x4c, 0x3b, 0x64, 0x6b, 0x57, 0x38, 0xb3, 0xcc, 0x41, 0x16, 0x50, 0x2d, 0x18, 0xd7, 0x9d, - 0x82, 0x27, 0x43, 0x63, 0x06, 0xd9, 0xb2, 0xb3, 0xaf, 0xc6, 0xc7, 0x85, 0xce, 0x3c, 0x81, 0x7f, - 0xeb, 0x70, 0x3a, 0x42, 0xb9, 0xc8, 0x3b, 0x59, 0xf0, 0xdc, 0xef, 0x12, 0x45, 0xd0, 0xb3, 0xe4, - 0x02, 0x99, 0x82, 0x1e, 0xc1, 0x95, 0x49, 0xce, 0x48, 0x97, 0x14, 0xfe, 0x26, 0x11, 0xe7, 0x2c, - 0xd8, 0x82, 0xf4, 0xf7, 0x0d, 0xce, 0x7d, 0x36, 0x71, 0x29, 0x6f, 0xc0, 0x45, 0xaf, 0x5c, 0x9f, - 0x63, 0x0d, 0x7b, 0x49, 0xa3, 0xeb, 0x82, 0x1b, 0xbc, 0xa6, 0x0f, 0x19, 0x84, 0xdc, 0xe6, 0x64, - 0x91, 0x71, 0x3b, 0xfe, 0x06, 0x00, 0x1a, 0x56, 0xf5, 0x1b, 0xb3, 0xab, 0xe9, 0x2f, 0x79, 0x60, - 0x54, 0x7c, 0x4d, 0x0a, 0x70, 0xf4, 0xa9, 0x62, 0xb3, 0xf0, 0x5d, 0xc2, 0x5a, 0x34, 0xbb, 0xe8, - 0x30, 0xa7, 0xea, 0x47, 0x36, 0xd3, 0xb0, 0x16, 0x17, 0x23, 0x50, 0x0d, 0x82, 0xbe, 0xda, 0x9b, - 0xe3, 0x32, 0x7a, 0xf2, 0xaa, 0x41, 0x38, 0x21, 0xff, 0x67, 0x8b, 0x2a, 0x87, 0x6e, 0xc4, 0xb0, - 0x0b, 0xb6, 0x05, 0xff, 0xcc, 0x39, 0x17, 0xff, 0xdc, 0x27, 0x9f, 0x18, 0x7d, 0xaa, 0x2f, 0xce, - 0x8c, 0xde, 0x12, 0x19, 0x80, 0xbb, 0xa8, 0xec, 0x8f, 0x44, 0xca, 0x56, 0x2b, 0x0f, 0x13, 0x19, - 0x14, 0xc9, 0x01, 0xcf, 0xbd, 0x84, 0x74, 0x08, 0xb7, 0x78, 0xe6, 0x73, 0x8c, 0x7b, 0xb5, 0xb1, - 0xb3, 0xf9, 0x7d, 0x01, 0xb0, 0xa2, 0x4d, 0xcc, 0xa4, 0x0e, 0x3b, 0xed, 0x29, 0x41, 0x1b, 0x1b, - 0xa8, 0xf6, 0x08, 0x43, 0xc4, 0xa2, 0x41, 0x02, 0x1b, 0x23, 0x13, 0x2b, 0x95, 0x00, 0x50, 0x9b, - 0x9a, 0x35, 0x16, 0xd4, 0xa9, 0xdd, 0x41, 0xd3, 0xba, 0xcb, 0xcd, 0x42, 0x6b, 0x45, 0x13, 0x93, - 0x52, 0x18, 0x28, 0xaf, 0xed, 0xcf, 0x20, 0xfa, 0x46, 0xac, 0x24, 0xf4, 0x4a, 0x8e, 0x29, 0x73, - 0x30, 0xb1, 0x67, 0x05, 0xd5, 0xd5, 0xf7, 0x98, 0xef, 0xf9, 0xe9, 0x13, 0x4a, 0x06, 0x59, 0x79, - 0x87, 0xa1, 0xdb, 0x46, 0x17, 0xca, 0xa2, 0xd9, 0x38, 0x37, 0x73, 0x08, 0x29, 0xd4, 0xd8, 0x9e, - 0x16, 0x41, 0x3b, 0xe4, 0xd8, 0xa8, 0xa3, 0x8a, 0x7e, 0x62, 0x26, 0x62, 0x3b, 0x64, 0xa8, 0x20, - 0x17, 0x8e, 0xc3, 0xa6, 0x69, 0x54, 0xe1, 0x07, 0x10, 0xe0, 0x43, 0xae, 0x73, 0xdd, 0x3f, 0xb2, - 0x71, 0x5a, 0x05, 0x25, 0xa4, 0x63, 0x43, 0xfb, 0x75, 0x90, 0xe5, 0xea, 0xc7, 0xee, 0x55, 0xfc, - 0x81, 0x0e, 0x0d, 0x8b, 0x4b, 0x8f, 0x7b, 0xe8, 0x2c, 0xd5, 0xa2, 0x14, 0x57, 0x5a, 0x1b, 0x99, - 0x62, 0x9d, 0x47, 0xa9, 0xb2, 0x81, 0xb6, 0x13, 0x48, 0xc8, 0x62, 0x7c, 0xab, 0x38, 0xe2, 0xa6, - 0x4d, 0xb6, 0x62, 0x6e, 0x97, 0xbb, 0x8f, 0x77, 0xbd, 0xcb, 0x0f, 0xee, 0x47, 0x6a, 0xed, 0xd7, - 0xba, 0x8f, 0x54, 0x41, 0xac, 0xaa, 0xb0, 0x0f, 0x44, 0x32, 0xed, 0xab, 0x37, 0x91, 0x04, 0x7d, - 0x90, 0x91, 0xb2, 0xa7, 0x53, 0xf0, 0x35, 0x64, 0x84, 0x31, 0xf6, 0xd1, 0x2f, 0x7d, 0x6a, 0x68, - 0x1e, 0x64, 0xc8, 0x61, 0xf4, 0xac, 0x91, 0x1a, 0x0f, 0x7d, 0x6e, 0xc0, 0x49, 0x1a, 0x78, 0xc9, - 0xf1, 0x92, 0xf9, 0x6b, 0x3a, 0x5e, 0x75, 0x60, 0xa3, 0xf0, 0x56, 0xbc, 0x1c, 0xa8, 0x59, 0x83, - 0x67, 0xad, 0x6a, 0xcb, 0x6f, 0x2e, 0x03, 0x4c, 0x7f, 0x37, 0xbe, 0xeb, 0x9e, 0xd4, 0x70, 0xc4, - 0x30, 0x4a, 0xf0, 0x10, 0x7f, 0x0e, 0xb9, 0x19, 0xbe, 0x36, 0xa8, 0x6f, 0x68, 0xf3, 0x7f, 0xa6, - 0x1d, 0xae, 0x7a, 0xff, 0x14, 0xde, 0xcd, 0x67, 0xec, 0x31, 0x57, 0xa1, 0x14, 0x88, 0xa1, 0x4f, - 0xed, 0x01, 0x42, 0x82, 0x83, 0x48, 0xf5, 0xf6, 0x08, 0xb0, 0xfe, 0x03, 0xe1, 0xf3, 0xc0, 0xaf, - 0x3a, 0xcc, 0xa0, 0xce, 0x36, 0x85, 0x2e, 0xd4, 0x2e, 0x22, 0x0a, 0xe9, 0xab, 0xf8, 0xf8, 0x90, - 0x6f, 0x00, 0xf1, 0xb8, 0x6b, 0xff, 0x85, 0x04, 0xc8, 0xf1, 0x6c, 0x78, 0x4f, 0xd5, 0x2d, 0x25, - 0xe0, 0x13, 0xff, 0x4f, 0xda, 0x90, 0x3e, 0x9e, 0x1e, 0xb4, 0x53, 0xc1, 0x46, 0x4b, 0x11, 0x96, - 0x6d, 0xb9, 0xb2, 0x8e, 0x8f, 0x26, 0xa3, 0xfc, 0x41, 0x9e, 0x6a, 0x60, 0xa4, 0x8d, 0x4c, 0x72, - 0x14, 0xee, 0x9c, 0x6c, 0x6a, 0x12, 0xb6, 0x8a, 0x32, 0xca, 0xc8, 0xf6, 0x15, 0x80, 0xc6, 0x4f, - 0x29, 0xcb, 0x69, 0x22, 0x40, 0x87, 0x83, 0xc6, 0xd1, 0x2e, 0x72, 0x5b, 0x01, 0x4f, 0xe4, 0x85, - 0xcd, 0x17, 0xe4, 0x84, 0xc5, 0x95, 0x2b, 0xf9, 0x9b, 0xc9, 0x49, 0x41, 0xd4, 0xb1, 0x91, 0x9d, - 0x04, 0x31, 0x7b, 0x8a, 0xa1, 0xbd, 0x37, 0x54, 0xec, 0xba, 0xa1, 0x0e, 0xc2, 0x27, 0xde, 0x85, - 0x40, 0x69, 0x5b, 0xf2, 0xfb, 0x8e, 0xe5, 0x6f, 0x6d, 0xc5, 0x26, 0xef, 0x36, 0x66, 0x25, 0xb9, - 0x1a, 0xa4, 0x97, 0x0b, 0x6f, 0xfa, 0x5c, 0x82, 0x84, 0xb9, 0xb5, 0xab, 0x85, 0x2b, 0x90, 0x5f, - 0x9d, 0x83, 0xf5, 0x66, 0x9c, 0x05, 0x35, 0xbc, 0x37, 0x7b, 0xcc, 0x05, 0xad, 0x5e, 0x48, 0xe2, - 0x81, 0xec, 0x0e, 0x19, 0x17, 0xca, 0x3c, 0x6a, 0x47, 0x1f, 0x8d, 0xa0, 0x89, 0x4b, 0xc8, 0x2a, - 0xc2, 0xa8, 0x96, 0x54, 0x05, 0xd6, 0xee, 0xf3, 0xb5, 0xe2, 0x93, 0xa8, 0x8f, 0xda, 0x20, 0x3f, - 0x09, 0xbd, 0xc7, 0x27, 0x57, 0xb1, 0x07, 0xab, 0x14, 0x88, 0x0e, 0xaa, 0x3e, 0xf7, 0x04, 0x5b, - 0x58, 0x0f, 0x48, 0x21, 0xce, 0x6d, 0xd3, 0x25, 0xb5, 0xa9, 0x06, 0x55, 0xd8, 0xc5, 0xb5, 0x5f, - 0x76, 0xfb, 0x84, 0x62, 0x79, 0xa9, 0xb5, 0x18, 0xc5, 0xe9, 0xb9, 0xa2, 0x11, 0x65, 0xc5, 0x09, - 0x3e, 0xd4, 0x9b, 0xaa, 0xac, 0xad, 0xf1, 0xf2, 0x18, 0x73, 0x26, 0x6c, 0x76, 0x7f, 0x67, 0x69, + 0xd7, 0x6b, 0x33, 0x43, 0xcf, 0x08, 0x83, 0x94, 0xc8, 0xf0, 0x3e, 0x51, 0x57, 0x08, 0x00, 0x00, + 0x44, 0x9e, 0xa0, 0xc9, 0x5e, 0x82, 0xff, 0xe6, 0x7b, 0x6a, 0xbc, 0xdb, 0x42, 0x98, 0xb4, 0x85, + 0xdd, 0x04, 0xde, 0x80, 0x60, 0x71, 0xbf, 0x03, 0xdc, 0xee, 0xbf, 0xa1, 0x62, 0xe7, 0x5d, 0x6c, + 0x96, 0x05, 0x8b, 0xdb, 0xfb, 0x12, 0x7c, 0xdf, 0xcb, 0xf9, 0x03, 0x38, 0x8e, 0x99, 0xad, 0x04, + 0x9f, 0x9a, 0x3d, 0xd4, 0x42, 0x5a, 0xe4, 0xd0, 0x99, 0x2c, 0xff, 0xf1, 0x8e, 0xcf, 0x0f, 0xdb, + 0x5a, 0x84, 0x2d, 0x09, 0x74, 0x70, 0x52, 0xf1, 0x7a, 0xc2, 0x05, 0x3d, 0x21, 0xf5, 0x7c, 0x5d, + 0x25, 0x0f, 0x2c, 0x4f, 0x0e, 0x02, 0x02, 0xb7, 0x07, 0x85, 0xb7, 0x94, 0x6e, 0x99, 0x2e, 0x58, + 0xa5, 0x9a, 0xc5, 0x2d, 0xea, 0x67, 0x74, 0xd4, 0xf0, 0x3b, 0x55, 0x54, 0x52, 0x43, 0xcf, 0x1a, + 0x12, 0x83, 0x4e, 0x3f, 0x24, 0x9a, 0x78, 0xd3, 0x95, 0xe0, 0xd1, 0x8f, 0x4d, 0x76, 0x60, 0x04, + 0xf1, 0xa2, 0x67, 0x48, 0x02, 0xa7, 0x47, 0xea, 0xa9, 0x01, 0xc3, 0xf1, 0x0c, 0xda, 0x55, 0x00, + 0xcb, 0x91, 0x22, 0xfa, 0xa9, 0xf1, 0xdf, 0x66, 0xc3, 0x92, 0x07, 0x9a, 0x1b, 0x40, 0xf0, 0xde, + 0x1c, 0x60, 0x54, 0x19, 0x6a, 0x11, 0xcb, 0xea, 0x40, 0xaf, 0xb6, 0xef, 0x52, 0x53, 0xcd, 0x68, + 0x18, 0xf6, 0x62, 0x5e, 0xfc, 0xe3, 0xb6, 0xde, 0xf6, 0xba, 0x7e, 0x4b, 0x37, 0xa4, 0x0f, 0x77, + 0x32, 0xe0, 0x93, 0xda, 0xa7, 0xd5, 0x21, 0x90, 0x93, 0x5b, 0x8d, 0xa5, 0x89, 0x76, 0xff, 0x33, + 0x12, 0xae, 0x50, 0xb1, 0x87, 0xc1, 0x43, 0x3c, 0x0f, 0x02, 0x8e, 0xdc, 0xc4, 0xc2, 0x83, 0x8b, + 0x6a, 0x9b, 0xfc, 0x22, 0x6c, 0xa4, 0xb4, 0x53, 0x0e, 0x7a, 0x4c, 0xce, 0xe1, 0xbf, 0xa2, 0xa3, + 0xd3, 0x96, 0xae, 0x5a, 0x3f, 0xb5, 0x12, 0x38, 0x4b, 0x2f, 0xdd, 0x85, 0x1f, 0x78, 0x4a, 0x65, + 0xe0, 0x3f, 0x2c, 0x4f, 0xbe, 0x11, 0xa5, 0x3c, 0x77, 0x77, 0xc0, 0x23, 0x46, 0x22, 0x39, 0xdd, + 0x6f, 0x75, 0x21, 0xa3, 0xf6, 0xc7, 0xd5, 0xdd, 0x3e, 0xc9, 0xb3, 0xf2, 0x33, 0x77, 0x3d, 0x4b, + 0x46, 0xd2, 0x3c, 0xc3, 0x75, 0xeb, 0x19, 0x8c, 0x63, 0x30, 0x1c, 0x21, 0x80, 0x1f, 0x65, 0x20, + 0xbc, 0xfb, 0x79, 0x66, 0xfc, 0x49, 0xb3, 0x93, 0xf0, 0x06, 0x1d, 0x97, 0x4a, 0x27, 0x06, 0xdf, + 0x8c, 0x4a, 0x94, 0x49, 0xf1, 0x1d, 0x7f, 0x3d, 0x2d, 0xcb, 0xb9, 0x0c, 0x6b, 0x87, 0x70, 0x45, + 0x63, 0x6e, 0x7c, 0x0c, 0x0f, 0xe4, 0xeb, 0x0f, 0x69, 0x75, 0x45, 0x46, 0x0c, 0x80, 0x69, 0x10, + 0xd2, 0xc3, 0x55, 0xf1, 0xd2, 0x53, 0xbc, 0x9d, 0x24, 0x52, 0xaa, 0xa5, 0x49, 0xe2, 0x7a, 0x1f, + 0xac, 0x7c, 0xf4, 0xed, 0x77, 0xf3, 0x22, 0xe8, 0xfa, 0x89, 0x4b, 0x6a, 0x83, 0x81, 0x0a, 0x34, + 0xb3, 0x61, 0x90, 0x17, 0x51, 0xa6, 0xf5, 0xeb, 0x65, 0xa0, 0x32, 0x6e, 0x07, 0xde, 0x7c, 0x12, + 0x16, 0xcc, 0xce, 0x2d, 0x01, 0x93, 0xf9, 0x58, 0xbb, 0x38, 0x50, 0xa8, 0x33, 0xf7, 0xae, 0x43, + 0x2b, 0x65, 0xbc, 0x5a, 0x53, 0x97, 0x5c, 0x15, 0x5a, 0xa4, 0xbc, 0xb4, 0xf7, 0xb2, 0xc4, 0xe5, + 0x4d, 0xf1, 0x6e, 0xfa, 0xf6, 0xdd, 0xea, 0x94, 0xe2, 0xc5, 0x0b, 0x4c, 0xd1, 0xdf, 0xe0, 0x60, + 0x17, 0xe0, 0xe9, 0xd0, 0x29, 0x00, 0xcf, 0xfe, 0x19, 0x35, 0xe0, 0x49, 0x1d, 0x77, 0xff, 0xb4, + 0xfd, 0xf8, 0x52, 0x90, 0xfd, 0xd8, 0x93, 0xd5, 0x77, 0xb1, 0x13, 0x1a, 0x61, 0x0e, 0xf6, 0xa5, + 0xc3, 0x2b, 0x2e, 0xe0, 0x29, 0x36, 0x17, 0xa3, 0x7c, 0xbb, 0x08, 0xb8, 0x47, 0x74, 0x1c, 0x3b, + 0x80, 0x17, 0xc2, 0x5c, 0xa9, 0x05, 0x2c, 0xa1, 0x07, 0x9d, 0x8b, 0x78, 0xae, 0xbd, 0x47, 0x87, + 0x6d, 0x33, 0x0a, 0x30, 0xf6, 0xa8, 0xc6, 0xd6, 0x1d, 0xd1, 0xab, 0x55, 0x89, 0x32, 0x9d, 0xe7, + 0x14, 0xd1, 0x9d, 0x61, 0x37, 0x0f, 0x81, 0x49, 0x74, 0x8c, 0x72, 0xf1, 0x32, 0xf0, 0xfc, 0x99, + 0xf3, 0x4d, 0x76, 0x6c, 0x69, 0x38, 0x59, 0x70, 0x40, 0xd8, 0xf9, 0xe2, 0xbb, 0x52, 0x2f, 0xf9, + 0x9c, 0x63, 0xa3, 0x44, 0xd6, 0xa2, 0xae, 0x8a, 0xa8, 0xe5, 0x1b, 0x7b, 0x90, 0xa4, 0xa8, 0x06, + 0x10, 0x5f, 0xcb, 0xca, 0x31, 0x50, 0x6c, 0x44, 0x61, 0x51, 0xad, 0xfe, 0xce, 0xb5, 0x1b, 0x91, + 0xab, 0xfe, 0x43, 0x96, 0x09, 0x77, 0xc8, 0x74, 0x71, 0xcf, 0x9a, 0xd4, 0x07, 0x4d, 0x30, 0xe1, + 0x0d, 0x6a, 0x7f, 0x03, 0xc6, 0x3b, 0xd5, 0xd4, 0x31, 0x7f, 0x68, 0xff, 0x32, 0x5b, 0xa3, 0xbd, + 0x80, 0xbf, 0x4d, 0xc8, 0xb5, 0x2a, 0x0b, 0xa0, 0x31, 0x75, 0x80, 0x22, 0xeb, 0x02, 0x5c, 0xdd, + 0x77, 0x0b, 0x44, 0xd6, 0xd6, 0xcf, 0x06, 0x70, 0xf4, 0xe9, 0x90, 0xb2, 0x23, 0x47, 0xa7, 0xdb, + 0x84, 0x82, 0x65, 0xe3, 0xe5, 0xeb, 0x72, 0xdf, 0xe8, 0x29, 0x9a, 0xd7, 0x48, 0x1a, 0x40, 0x83, + 0x22, 0xca, 0xc5, 0x57, 0x86, 0xe5, 0x2f, 0x63, 0x3b, 0x2f, 0xb6, 0xb6, 0x14, 0xea, 0xed, 0x18, + 0xd7, 0x03, 0xdd, 0x84, 0x04, 0x5a, 0x27, 0x4a, 0xe8, 0xbf, 0xa7, 0x33, 0x79, 0x66, 0x13, 0x88, + 0xd6, 0x99, 0x1f, 0xe3, 0x9b, 0x0d, 0x93, 0xde, 0xbb, 0x41, 0x70, 0x0b, 0x41, 0xf9, 0x0a, 0x15, + 0xc4, 0xd5, 0x26, 0x25, 0x02, 0x35, 0xdd, 0xcd, 0x67, 0x76, 0xfc, 0x77, 0xbc, 0x97, 0xe7, 0xa4, + 0x17, 0xeb, 0xcb, 0x31, 0x60, 0x0d, 0x01, 0xe5, 0x7f, 0x32, 0x16, 0x2a, 0x85, 0x60, 0xca, 0xcc, + 0x7e, 0x27, 0xa0, 0x96, 0xd3, 0x7a, 0x1a, 0x86, 0x95, 0x2e, 0xc7, 0x1b, 0xd8, 0x9a, 0x3e, 0x9a, + 0x30, 0xa2, 0xa2, 0x61, 0x62, 0x98, 0x4d, 0x77, 0x40, 0xf8, 0x11, 0x93, 0xe8, 0x23, 0x8e, 0x61, + 0xf6, 0xb5, 0xb9, 0x84, 0xd4, 0xd3, 0xdf, 0xa0, 0x33, 0xc1, 0xbb, 0x7e, 0x4f, 0x00, 0x37, 0xfe, + 0xbf, 0x40, 0x6d, 0x91, 0xc0, 0xdc, 0xcf, 0x32, 0xac, 0xf4, 0x23, 0xcf, 0xa1, 0xe7, 0x07, 0x10, + 0x10, 0xd3, 0xf2, 0x70, 0x12, 0x1b, 0x49, 0x3c, 0xe8, 0x50, 0x54, 0xef, 0x58, 0xba, 0xda, 0x42, + 0x31, 0x01, 0x38, 0xfe, 0x08, 0x1a, 0xdb, 0x04, 0xe2, 0xbd, 0x90, 0x1f, 0x2f, 0x13, 0x45, 0x8b, + 0x3d, 0x67, 0x58, 0x15, 0x81, 0x97, 0x10, 0x7c, 0x14, 0xeb, 0xb1, 0x93, 0x23, 0x0c, 0xd1, 0x15, + 0x73, 0x80, 0xaa, 0x79, 0xca, 0xe1, 0x37, 0x4a, 0x7c, 0x1e, 0x5b, 0xbc, 0xb8, 0x0e, 0xe2, 0x3e, + 0x06, 0xeb, 0xfd, 0xe2, 0x06, 0xbf, 0xb0, 0xfc, 0xbc, 0x0e, 0xdc, 0x4e, 0xbe, 0xc3, 0x09, 0x66, + 0x1b, 0xdd, 0x90, 0x8d, 0x53, 0x2e, 0xb0, 0xc6, 0xad, 0xc3, 0x8b, 0x7c, 0xa7, 0x33, 0x1d, 0xce, + 0x8d, 0xfc, 0xe3, 0x9a, 0xb7, 0x1e, 0x7c, 0x32, 0xd3, 0x18, 0xd1, 0x36, 0xb6, 0x10, 0x06, 0x71, + 0xa1, 0xae, 0x6a, 0x66, 0x00, 0xe3, 0x89, 0x9f, 0x31, 0xf0, 0xee, 0xd1, 0x9e, 0x34, 0x17, 0xd1, + 0x34, 0xb9, 0x0c, 0x90, 0x58, 0xf8, 0x63, 0x2c, 0x79, 0x8d, 0x44, 0x90, 0xda, 0x49, 0x87, 0x30, + 0x7c, 0xba, 0x92, 0x2d, 0x61, 0xc3, 0x98, 0x05, 0xd0, 0x72, 0xb5, 0x89, 0xbd, 0x52, 0xfd, 0xf1, + 0xe8, 0x62, 0x15, 0xc2, 0xd5, 0x4e, 0x66, 0x70, 0xe0, 0x73, 0x83, 0xa2, 0x7b, 0xbf, 0xfb, 0x5a, + 0xdd, 0xf4, 0x7d, 0x66, 0xaa, 0x85, 0xa0, 0xc6, 0xf9, 0xf3, 0x2e, 0x59, 0xd8, 0x5a, 0x44, 0xdd, + 0x5d, 0x3b, 0x22, 0xdc, 0x2b, 0xe8, 0x09, 0x19, 0xb4, 0x90, 0x43, 0x7a, 0xe4, 0xf3, 0x6a, 0x0a, + 0xe5, 0x5e, 0xdf, 0x1d, 0x0b, 0x5c, 0xb4, 0xe9, 0xa3, 0xec, 0xab, 0xee, 0x93, 0xdf, 0xc6, 0xe3, + 0x8d, 0x20, 0x9d, 0x0f, 0xa6, 0x53, 0x6d, 0x27, 0xa5, 0xd6, 0xfb, 0xb1, 0x76, 0x41, 0xcd, 0xe2, + 0x75, 0x25, 0xd6, 0x10, 0x93, 0xf1, 0xb2, 0x80, 0x72, 0xd1, 0x11, 0xb2, 0xb4, 0xae, 0x5f, 0x89, + 0xd5, 0x97, 0x4e, 0xe1, 0x2e, 0x5c, 0xf7, 0xd5, 0xda, 0x4d, 0x6a, 0x31, 0x12, 0x30, 0x41, 0xf3, + 0x3e, 0x61, 0x40, 0x7e, 0x76, 0xcf, 0xfc, 0xdc, 0xfd, 0x7e, 0x19, 0xba, 0x58, 0xcf, 0x4b, 0x53, + 0x6f, 0x4c, 0x49, 0x38, 0xae, 0x79, 0x32, 0x4d, 0xc4, 0x02, 0x89, 0x4b, 0x44, 0xfa, 0xf8, 0xaf, + 0xba, 0xb3, 0x52, 0x82, 0xab, 0x65, 0x9d, 0x13, 0xc9, 0x3f, 0x70, 0x41, 0x2e, 0x85, 0xcb, 0x19, + 0x9a, 0x37, 0xdd, 0xec, 0x60, 0x05, 0x45, 0x47, 0x3c, 0xfb, 0x5a, 0x05, 0xe0, 0x8d, 0x0b, 0x20, + 0x99, 0x73, 0xb2, 0x17, 0x2b, 0x4d, 0x21, 0xfb, 0x69, 0x74, 0x5a, 0x26, 0x2c, 0xcd, 0xe9, 0x6b, + 0xa1, 0x8b, 0x2f, 0xaa, 0x74, 0x5b, 0x6f, 0xe1, 0x89, 0xcf, 0x77, 0x2a, 0x9f, 0x84, 0xcb, 0xfc, ]; const INITIAL_PACKET_V1: &[u8] = &[ @@ -265,9 +263,9 @@ fn make_server(v: Version) -> Connection { fn process_client_initial(v: Version, packet: &[u8]) { let mut server = make_server(v); - let dgram = Datagram::new(addr(), addr(), packet); + let dgram = datagram(packet.to_vec()); assert_eq!(*server.state(), State::Init); - let out = server.process(Some(dgram), now()); + let out = server.process(Some(&dgram), now()); assert_eq!(*server.state(), State::Handshaking); assert!(out.dgram().is_some()); } diff --git a/neqo-transport/tests/connection.rs b/neqo-transport/tests/connection.rs index 6dd3d263cd..b8877b946d 100644 --- a/neqo-transport/tests/connection.rs +++ b/neqo-transport/tests/connection.rs @@ -4,17 +4,14 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -#![cfg_attr(feature = "deny-warnings", deny(warnings))] -#![warn(clippy::use_self)] - mod common; use common::{ apply_header_protection, decode_initial_header, initial_aead_and_hp, remove_header_protection, }; -use neqo_common::{Datagram, Decoder, Role}; -use neqo_transport::{ConnectionParameters, State, Version}; -use test_fixture::{self, default_client, default_server, new_client, now, split_datagram}; +use neqo_common::{Datagram, Decoder, Encoder, Role}; +use neqo_transport::{ConnectionError, ConnectionParameters, Error, State, Version}; +use test_fixture::{default_client, default_server, new_client, now, split_datagram}; #[test] fn connect() { @@ -26,33 +23,35 @@ fn truncate_long_packet() { let mut client = default_client(); let mut server = default_server(); - let dgram = client.process(None, now()).dgram(); - assert!(dgram.is_some()); - let dgram = server.process(dgram, now()).dgram(); - assert!(dgram.is_some()); + let out = client.process(None, now()); + assert!(out.as_dgram_ref().is_some()); + let out = server.process(out.as_dgram_ref(), now()); + assert!(out.as_dgram_ref().is_some()); // This will truncate the Handshake packet from the server. - let dupe = dgram.as_ref().unwrap().clone(); + let dupe = out.as_dgram_ref().unwrap().clone(); // Count the padding in the packet, plus 1. let tail = dupe.iter().rev().take_while(|b| **b == 0).count() + 1; let truncated = Datagram::new( dupe.source(), dupe.destination(), + dupe.tos(), + dupe.ttl(), &dupe[..(dupe.len() - tail)], ); - let hs_probe = client.process(Some(truncated), now()).dgram(); + let hs_probe = client.process(Some(&truncated), now()).dgram(); assert!(hs_probe.is_some()); // Now feed in the untruncated packet. - let dgram = client.process(dgram, now()).dgram(); - assert!(dgram.is_some()); // Throw this ACK away. + let out = client.process(out.as_dgram_ref(), now()); + assert!(out.as_dgram_ref().is_some()); // Throw this ACK away. assert!(test_fixture::maybe_authenticate(&mut client)); - let dgram = client.process(None, now()).dgram(); - assert!(dgram.is_some()); + let out = client.process(None, now()); + assert!(out.as_dgram_ref().is_some()); assert!(client.state().connected()); - let dgram = server.process(dgram, now()).dgram(); - assert!(dgram.is_some()); + let out = server.process(out.as_dgram_ref(), now()); + assert!(out.as_dgram_ref().is_some()); assert!(server.state().connected()); } @@ -67,12 +66,12 @@ fn reorder_server_initial() { ); let mut server = default_server(); - let client_initial = client.process_output(now()).dgram(); + let client_initial = client.process_output(now()); let (_, client_dcid, _, _) = - decode_initial_header(client_initial.as_ref().unwrap(), Role::Client); + decode_initial_header(client_initial.as_dgram_ref().unwrap(), Role::Client); let client_dcid = client_dcid.to_owned(); - let server_packet = server.process(client_initial, now()).dgram(); + let server_packet = server.process(client_initial.as_dgram_ref(), now()).dgram(); let (server_initial, server_hs) = split_datagram(server_packet.as_ref().unwrap()); let (protected_header, _, _, payload) = decode_initial_header(&server_initial, Role::Server); @@ -107,21 +106,169 @@ fn reorder_server_initial() { let reordered = Datagram::new( server_initial.source(), server_initial.destination(), + server_initial.tos(), + server_initial.ttl(), packet, ); // Now a connection can be made successfully. // Though we modified the server's Initial packet, we get away with it. // TLS only authenticates the content of the CRYPTO frame, which was untouched. - client.process_input(reordered, now()); - client.process_input(server_hs.unwrap(), now()); + client.process_input(&reordered, now()); + client.process_input(&server_hs.unwrap(), now()); assert!(test_fixture::maybe_authenticate(&mut client)); - let finished = client.process_output(now()).dgram(); + let finished = client.process_output(now()); assert_eq!(*client.state(), State::Connected); - let done = server.process(finished, now()).dgram(); + let done = server.process(finished.as_dgram_ref(), now()); assert_eq!(*server.state(), State::Confirmed); - client.process_input(done.unwrap(), now()); + client.process_input(done.as_dgram_ref().unwrap(), now()); assert_eq!(*client.state(), State::Confirmed); } + +fn set_payload(server_packet: &Option, client_dcid: &[u8], payload: &[u8]) -> Datagram { + let (server_initial, _server_hs) = split_datagram(server_packet.as_ref().unwrap()); + let (protected_header, _, _, orig_payload) = + decode_initial_header(&server_initial, Role::Server); + + // Now decrypt the packet. + let (aead, hp) = initial_aead_and_hp(client_dcid, Role::Server); + let (mut header, pn) = remove_header_protection(&hp, protected_header, orig_payload); + assert_eq!(pn, 0); + // Re-encode the packet number as four bytes, so we have enough material for the header + // protection sample if payload is empty. + let pn_pos = header.len() - 2; + header[pn_pos] = u8::try_from(4 + aead.expansion()).unwrap(); + header.resize(header.len() + 3, 0); + header[0] |= 0b0000_0011; // Set the packet number length to 4. + + // And build a packet containing the given payload. + let mut packet = header.clone(); + packet.resize(header.len() + payload.len() + aead.expansion(), 0); + aead.encrypt(pn, &header, payload, &mut packet[header.len()..]) + .unwrap(); + apply_header_protection(&hp, &mut packet, protected_header.len()..header.len()); + Datagram::new( + server_initial.source(), + server_initial.destination(), + server_initial.tos(), + server_initial.ttl(), + packet, + ) +} + +/// Test that the stack treats a packet without any frames as a protocol violation. +#[test] +fn packet_without_frames() { + let mut client = new_client( + ConnectionParameters::default().versions(Version::Version1, vec![Version::Version1]), + ); + let mut server = default_server(); + + let client_initial = client.process_output(now()); + let (_, client_dcid, _, _) = + decode_initial_header(client_initial.as_dgram_ref().unwrap(), Role::Client); + + let server_packet = server.process(client_initial.as_dgram_ref(), now()).dgram(); + let modified = set_payload(&server_packet, client_dcid, &[]); + client.process_input(&modified, now()); + assert_eq!( + client.state(), + &State::Closed(ConnectionError::Transport(Error::ProtocolViolation)) + ); +} + +/// Test that the stack permits a packet containing only padding. +#[test] +fn packet_with_only_padding() { + let mut client = new_client( + ConnectionParameters::default().versions(Version::Version1, vec![Version::Version1]), + ); + let mut server = default_server(); + + let client_initial = client.process_output(now()); + let (_, client_dcid, _, _) = + decode_initial_header(client_initial.as_dgram_ref().unwrap(), Role::Client); + + let server_packet = server.process(client_initial.as_dgram_ref(), now()).dgram(); + let modified = set_payload(&server_packet, client_dcid, &[0]); + client.process_input(&modified, now()); + assert_eq!(client.state(), &State::WaitInitial); +} + +/// Overflow the crypto buffer. +#[allow(clippy::similar_names)] // For ..._scid and ..._dcid, which are fine. +#[test] +fn overflow_crypto() { + let mut client = new_client( + ConnectionParameters::default().versions(Version::Version1, vec![Version::Version1]), + ); + let mut server = default_server(); + + let client_initial = client.process_output(now()).dgram(); + let (_, client_dcid, _, _) = + decode_initial_header(client_initial.as_ref().unwrap(), Role::Client); + let client_dcid = client_dcid.to_owned(); + + let server_packet = server.process(client_initial.as_ref(), now()).dgram(); + let (server_initial, _) = split_datagram(server_packet.as_ref().unwrap()); + + // Now decrypt the server packet to get AEAD and HP instances. + // We won't be using the packet, but making new ones. + let (aead, hp) = initial_aead_and_hp(&client_dcid, Role::Server); + let (_, server_dcid, server_scid, _) = decode_initial_header(&server_initial, Role::Server); + + // Send in 100 packets, each with 1000 bytes of crypto frame data each, + // eventually this will overrun the buffer we keep for crypto data. + let mut payload = Encoder::with_capacity(1024); + for pn in 0..100_u64 { + payload.truncate(0); + payload + .encode_varint(0x06_u64) // CRYPTO frame type. + .encode_varint(pn * 1000 + 1) // offset + .encode_varint(1000_u64); // length + let plen = payload.len(); + payload.pad_to(plen + 1000, 44); + + let mut packet = Encoder::with_capacity(1200); + packet + .encode_byte(0xc1) // Initial with packet number length of 2. + .encode_uint(4, Version::Version1.wire_version()) + .encode_vec(1, server_dcid) + .encode_vec(1, server_scid) + .encode_vvec(&[]) // token + .encode_varint(u64::try_from(2 + payload.len() + aead.expansion()).unwrap()); // length + let pn_offset = packet.len(); + packet.encode_uint(2, pn); + + let mut packet = Vec::from(packet); + let header = packet.clone(); + packet.resize(header.len() + payload.len() + aead.expansion(), 0); + aead.encrypt(pn, &header, payload.as_ref(), &mut packet[header.len()..]) + .unwrap(); + apply_header_protection(&hp, &mut packet, pn_offset..(pn_offset + 2)); + packet.resize(1200, 0); // Initial has to be 1200 bytes! + + let dgram = Datagram::new( + server_initial.source(), + server_initial.destination(), + server_initial.tos(), + server_initial.ttl(), + packet, + ); + client.process_input(&dgram, now()); + if let State::Closing { error, .. } = client.state() { + assert!( + matches!( + error, + ConnectionError::Transport(Error::CryptoBufferExceeded), + ), + "the connection need to abort on crypto buffer" + ); + assert!(pn > 64, "at least 64000 bytes of data is buffered"); + return; + } + } + panic!("Was not able to overflow the crypto buffer"); +} diff --git a/neqo-transport/tests/network.rs b/neqo-transport/tests/network.rs index 3f9d2240a0..27e5a83cd6 100644 --- a/neqo-transport/tests/network.rs +++ b/neqo-transport/tests/network.rs @@ -4,19 +4,18 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -#![cfg_attr(feature = "deny-warnings", deny(warnings))] -#![warn(clippy::pedantic)] - -mod sim; +use std::{ops::Range, time::Duration}; use neqo_transport::{ConnectionError, ConnectionParameters, Error, State}; -use sim::{ - connection::{ConnectionNode, ReachState, ReceiveData, SendData}, - network::{Delay, Drop, TailDrop}, - Simulator, +use test_fixture::{ + boxed, + sim::{ + connection::{ConnectionNode, ReachState, ReceiveData, SendData}, + network::{Delay, Drop, TailDrop}, + Simulator, + }, + simulate, }; -use std::ops::Range; -use std::time::Duration; /// The amount of transfer. Much more than this takes a surprising amount of time. const TRANSFER_AMOUNT: usize = 1 << 20; // 1M @@ -32,26 +31,28 @@ const fn weeks(m: u32) -> Duration { simulate!( connect_direct, [ - ConnectionNode::default_client(boxed![ReachState::new(State::Confirmed)]), - ConnectionNode::default_server(boxed![ReachState::new(State::Confirmed)]), + ConnectionNode::new_client( + ConnectionParameters::default(), + [], + boxed![ReachState::new(State::Confirmed)] + ), + ConnectionNode::new_server( + ConnectionParameters::default(), + [], + boxed![ReachState::new(State::Confirmed)] + ), ] ); simulate!( idle_timeout, [ - ConnectionNode::default_client(boxed![ - ReachState::new(State::Confirmed), - ReachState::new(State::Closed(ConnectionError::Transport( - Error::IdleTimeout - ))) - ]), - ConnectionNode::default_server(boxed![ - ReachState::new(State::Confirmed), - ReachState::new(State::Closed(ConnectionError::Transport( - Error::IdleTimeout - ))) - ]), + ConnectionNode::default_client(boxed![ReachState::new(State::Closed( + ConnectionError::Transport(Error::IdleTimeout) + ))]), + ConnectionNode::default_server(boxed![ReachState::new(State::Closed( + ConnectionError::Transport(Error::IdleTimeout) + ))]), ] ); @@ -60,25 +61,21 @@ simulate!( [ ConnectionNode::new_client( ConnectionParameters::default().idle_timeout(weeks(1000)), - boxed![ - ReachState::new(State::Confirmed), - ReachState::new(State::Closed(ConnectionError::Transport( - Error::IdleTimeout - ))) - ] + boxed![ReachState::new(State::Confirmed),], + boxed![ReachState::new(State::Closed(ConnectionError::Transport( + Error::IdleTimeout + )))] ), - Delay::new(weeks(150)..weeks(150)), + Delay::new(weeks(6)..weeks(6)), Drop::percentage(10), ConnectionNode::new_server( ConnectionParameters::default().idle_timeout(weeks(1000)), - boxed![ - ReachState::new(State::Confirmed), - ReachState::new(State::Closed(ConnectionError::Transport( - Error::IdleTimeout - ))) - ] + boxed![ReachState::new(State::Confirmed),], + boxed![ReachState::new(State::Closed(ConnectionError::Transport( + Error::IdleTimeout + )))] ), - Delay::new(weeks(100)..weeks(100)), + Delay::new(weeks(8)..weeks(8)), Drop::percentage(10), ], ); @@ -94,9 +91,17 @@ simulate!( simulate!( connect_fixed_rtt, [ - ConnectionNode::default_client(boxed![ReachState::new(State::Confirmed)]), + ConnectionNode::new_client( + ConnectionParameters::default(), + [], + boxed![ReachState::new(State::Confirmed)] + ), Delay::new(DELAY..DELAY), - ConnectionNode::default_server(boxed![ReachState::new(State::Confirmed)]), + ConnectionNode::new_server( + ConnectionParameters::default(), + [], + boxed![ReachState::new(State::Confirmed)] + ), Delay::new(DELAY..DELAY), ], ); @@ -104,22 +109,38 @@ simulate!( simulate!( connect_taildrop_jitter, [ - ConnectionNode::default_client(boxed![ReachState::new(State::Confirmed)]), - TailDrop::dsl_uplink(), - Delay::new(ZERO..JITTER), - ConnectionNode::default_server(boxed![ReachState::new(State::Confirmed)]), + ConnectionNode::new_client( + ConnectionParameters::default(), + [], + boxed![ReachState::new(State::Confirmed)] + ), TailDrop::dsl_downlink(), Delay::new(ZERO..JITTER), + ConnectionNode::new_server( + ConnectionParameters::default(), + [], + boxed![ReachState::new(State::Confirmed)] + ), + TailDrop::dsl_uplink(), + Delay::new(ZERO..JITTER), ], ); simulate!( connect_taildrop, [ - ConnectionNode::default_client(boxed![ReachState::new(State::Confirmed)]), - TailDrop::dsl_uplink(), - ConnectionNode::default_server(boxed![ReachState::new(State::Confirmed)]), + ConnectionNode::new_client( + ConnectionParameters::default(), + [], + boxed![ReachState::new(State::Confirmed)] + ), TailDrop::dsl_downlink(), + ConnectionNode::new_server( + ConnectionParameters::default(), + [], + boxed![ReachState::new(State::Confirmed)] + ), + TailDrop::dsl_uplink(), ], ); @@ -139,9 +160,9 @@ simulate!( transfer_taildrop, [ ConnectionNode::default_client(boxed![SendData::new(TRANSFER_AMOUNT)]), - TailDrop::dsl_uplink(), - ConnectionNode::default_server(boxed![ReceiveData::new(TRANSFER_AMOUNT)]), TailDrop::dsl_downlink(), + ConnectionNode::default_server(boxed![ReceiveData::new(TRANSFER_AMOUNT)]), + TailDrop::dsl_uplink(), ], ); @@ -149,10 +170,10 @@ simulate!( transfer_taildrop_jitter, [ ConnectionNode::default_client(boxed![SendData::new(TRANSFER_AMOUNT)]), - TailDrop::dsl_uplink(), + TailDrop::dsl_downlink(), Delay::new(ZERO..JITTER), ConnectionNode::default_server(boxed![ReceiveData::new(TRANSFER_AMOUNT)]), - TailDrop::dsl_downlink(), + TailDrop::dsl_uplink(), Delay::new(ZERO..JITTER), ], ); diff --git a/neqo-transport/tests/retry.rs b/neqo-transport/tests/retry.rs index 51cc442ddd..36eff71e7b 100644 --- a/neqo-transport/tests/retry.rs +++ b/neqo-transport/tests/retry.rs @@ -4,12 +4,16 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -#![cfg_attr(feature = "deny-warnings", deny(warnings))] -#![warn(clippy::pedantic)] -#![cfg(not(feature = "fuzzing"))] +#![cfg(not(feature = "disable-encryption"))] mod common; +use std::{ + mem, + net::{IpAddr, Ipv4Addr, SocketAddr}, + time::Duration, +}; + use common::{ apply_header_protection, connected_server, decode_initial_header, default_server, generate_ticket, initial_aead_and_hp, remove_header_protection, @@ -17,11 +21,7 @@ use common::{ use neqo_common::{hex_with_len, qdebug, qtrace, Datagram, Encoder, Role}; use neqo_crypto::AuthenticationStatus; use neqo_transport::{server::ValidateAddress, ConnectionError, Error, State, StreamType}; -use std::convert::TryFrom; -use std::mem; -use std::net::{IpAddr, Ipv4Addr, SocketAddr}; -use std::time::Duration; -use test_fixture::{self, addr, assertions, default_client, now, split_datagram}; +use test_fixture::{assertions, datagram, default_client, now, split_datagram}; #[test] fn retry_basic() { @@ -31,21 +31,21 @@ fn retry_basic() { let dgram = client.process(None, now()).dgram(); // Initial assert!(dgram.is_some()); - let dgram = server.process(dgram, now()).dgram(); // Retry + let dgram = server.process(dgram.as_ref(), now()).dgram(); // Retry assert!(dgram.is_some()); assertions::assert_retry(dgram.as_ref().unwrap()); - let dgram = client.process(dgram, now()).dgram(); // Initial w/token + let dgram = client.process(dgram.as_ref(), now()).dgram(); // Initial w/token assert!(dgram.is_some()); - let dgram = server.process(dgram, now()).dgram(); // Initial, HS + let dgram = server.process(dgram.as_ref(), now()).dgram(); // Initial, HS assert!(dgram.is_some()); - mem::drop(client.process(dgram, now()).dgram()); // Ingest, drop any ACK. + mem::drop(client.process(dgram.as_ref(), now()).dgram()); // Ingest, drop any ACK. client.authenticated(AuthenticationStatus::Ok, now()); let dgram = client.process(None, now()).dgram(); // Send Finished assert!(dgram.is_some()); assert_eq!(*client.state(), State::Connected); - let dgram = server.process(dgram, now()).dgram(); // (done) + let dgram = server.process(dgram.as_ref(), now()).dgram(); // (done) assert!(dgram.is_some()); // Note that this packet will be dropped... connected_server(&mut server); } @@ -62,10 +62,10 @@ fn implicit_rtt_retry() { let dgram = client.process(None, now).dgram(); now += RTT / 2; - let dgram = server.process(dgram, now).dgram(); + let dgram = server.process(dgram.as_ref(), now).dgram(); assertions::assert_retry(dgram.as_ref().unwrap()); now += RTT / 2; - client.process_input(dgram.unwrap(), now); + client.process_input(&dgram.unwrap(), now); assert_eq!(client.stats().rtt, RTT); } @@ -79,16 +79,16 @@ fn retry_expired() { let dgram = client.process(None, now).dgram(); // Initial assert!(dgram.is_some()); - let dgram = server.process(dgram, now).dgram(); // Retry + let dgram = server.process(dgram.as_ref(), now).dgram(); // Retry assert!(dgram.is_some()); assertions::assert_retry(dgram.as_ref().unwrap()); - let dgram = client.process(dgram, now).dgram(); // Initial w/token + let dgram = client.process(dgram.as_ref(), now).dgram(); // Initial w/token assert!(dgram.is_some()); now += Duration::from_secs(60); // Too long for Retry. - let dgram = server.process(dgram, now).dgram(); // Initial, HS + let dgram = server.process(dgram.as_ref(), now).dgram(); // Initial, HS assert!(dgram.is_none()); } @@ -108,23 +108,23 @@ fn retry_0rtt() { let dgram = client.process(None, now()).dgram(); // Initial w/0-RTT assert!(dgram.is_some()); assertions::assert_coalesced_0rtt(dgram.as_ref().unwrap()); - let dgram = server.process(dgram, now()).dgram(); // Retry + let dgram = server.process(dgram.as_ref(), now()).dgram(); // Retry assert!(dgram.is_some()); assertions::assert_retry(dgram.as_ref().unwrap()); // After retry, there should be a token and still coalesced 0-RTT. - let dgram = client.process(dgram, now()).dgram(); + let dgram = client.process(dgram.as_ref(), now()).dgram(); assert!(dgram.is_some()); assertions::assert_coalesced_0rtt(dgram.as_ref().unwrap()); - let dgram = server.process(dgram, now()).dgram(); // Initial, HS + let dgram = server.process(dgram.as_ref(), now()).dgram(); // Initial, HS assert!(dgram.is_some()); - let dgram = client.process(dgram, now()).dgram(); + let dgram = client.process(dgram.as_ref(), now()).dgram(); // Note: the client doesn't need to authenticate the server here // as there is no certificate; authentication is based on the ticket. assert!(dgram.is_some()); assert_eq!(*client.state(), State::Connected); - let dgram = server.process(dgram, now()).dgram(); // (done) + let dgram = server.process(dgram.as_ref(), now()).dgram(); // (done) assert!(dgram.is_some()); connected_server(&mut server); assert!(client.tls_info().unwrap().resumed()); @@ -136,22 +136,28 @@ fn retry_different_ip() { server.set_validation(ValidateAddress::Always); let mut client = default_client(); - let dgram = client.process(None, now()).dgram(); // Initial + let dgram = client.process(None.as_ref(), now()).dgram(); // Initial assert!(dgram.is_some()); - let dgram = server.process(dgram, now()).dgram(); // Retry + let dgram = server.process(dgram.as_ref(), now()).dgram(); // Retry assert!(dgram.is_some()); assertions::assert_retry(dgram.as_ref().unwrap()); - let dgram = client.process(dgram, now()).dgram(); // Initial w/token + let dgram = client.process(dgram.as_ref(), now()).dgram(); // Initial w/token assert!(dgram.is_some()); // Change the source IP on the address from the client. let dgram = dgram.unwrap(); let other_v4 = IpAddr::V4(Ipv4Addr::new(127, 0, 0, 2)); let other_addr = SocketAddr::new(other_v4, 443); - let from_other = Datagram::new(other_addr, dgram.destination(), &dgram[..]); - let dgram = server.process(Some(from_other), now()).dgram(); + let from_other = Datagram::new( + other_addr, + dgram.destination(), + dgram.tos(), + dgram.ttl(), + &dgram[..], + ); + let dgram = server.process(Some(&from_other), now()).dgram(); assert!(dgram.is_none()); } @@ -171,8 +177,14 @@ fn new_token_different_ip() { // Now rewrite the source address. let d = dgram.unwrap(); let src = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 2)), d.source().port()); - let dgram = Some(Datagram::new(src, d.destination(), &d[..])); - let dgram = server.process(dgram, now()).dgram(); // Retry + let dgram = Some(Datagram::new( + src, + d.destination(), + d.tos(), + d.ttl(), + &d[..], + )); + let dgram = server.process(dgram.as_ref(), now()).dgram(); // Retry assert!(dgram.is_some()); assertions::assert_retry(dgram.as_ref().unwrap()); } @@ -196,8 +208,14 @@ fn new_token_expired() { let the_future = now() + Duration::from_secs(60 * 60 * 24 * 30); let d = dgram.unwrap(); let src = SocketAddr::new(d.source().ip(), d.source().port() + 1); - let dgram = Some(Datagram::new(src, d.destination(), &d[..])); - let dgram = server.process(dgram, the_future).dgram(); // Retry + let dgram = Some(Datagram::new( + src, + d.destination(), + d.tos(), + d.ttl(), + &d[..], + )); + let dgram = server.process(dgram.as_ref(), the_future).dgram(); // Retry assert!(dgram.is_some()); assertions::assert_retry(dgram.as_ref().unwrap()); } @@ -211,32 +229,32 @@ fn retry_after_initial() { let cinit = client.process(None, now()).dgram(); // Initial assert!(cinit.is_some()); - let server_flight = server.process(cinit.clone(), now()).dgram(); // Initial + let server_flight = server.process(cinit.as_ref(), now()).dgram(); // Initial assert!(server_flight.is_some()); // We need to have the client just process the Initial. let (server_initial, _other) = split_datagram(server_flight.as_ref().unwrap()); - let dgram = client.process(Some(server_initial), now()).dgram(); + let dgram = client.process(Some(&server_initial), now()).dgram(); assert!(dgram.is_some()); assert!(*client.state() != State::Connected); - let retry = retry_server.process(cinit, now()).dgram(); // Retry! + let retry = retry_server.process(cinit.as_ref(), now()).dgram(); // Retry! assert!(retry.is_some()); assertions::assert_retry(retry.as_ref().unwrap()); // The client should ignore the retry. - let junk = client.process(retry, now()).dgram(); + let junk = client.process(retry.as_ref(), now()).dgram(); assert!(junk.is_none()); // Either way, the client should still be able to process the server flight and connect. - let dgram = client.process(server_flight, now()).dgram(); + let dgram = client.process(server_flight.as_ref(), now()).dgram(); assert!(dgram.is_some()); // Drop this one. assert!(test_fixture::maybe_authenticate(&mut client)); let dgram = client.process(None, now()).dgram(); assert!(dgram.is_some()); assert_eq!(*client.state(), State::Connected); - let dgram = server.process(dgram, now()).dgram(); // (done) + let dgram = server.process(dgram.as_ref(), now()).dgram(); // (done) assert!(dgram.is_some()); connected_server(&mut server); } @@ -249,7 +267,7 @@ fn retry_bad_integrity() { let dgram = client.process(None, now()).dgram(); // Initial assert!(dgram.is_some()); - let dgram = server.process(dgram, now()).dgram(); // Retry + let dgram = server.process(dgram.as_ref(), now()).dgram(); // Retry assert!(dgram.is_some()); let retry = &dgram.as_ref().unwrap(); @@ -257,10 +275,16 @@ fn retry_bad_integrity() { let mut tweaked = retry.to_vec(); tweaked[retry.len() - 1] ^= 0x45; // damage the auth tag - let tweaked_packet = Datagram::new(retry.source(), retry.destination(), tweaked); + let tweaked_packet = Datagram::new( + retry.source(), + retry.destination(), + retry.tos(), + retry.ttl(), + tweaked, + ); // The client should ignore this packet. - let dgram = client.process(Some(tweaked_packet), now()).dgram(); + let dgram = client.process(Some(&tweaked_packet), now()).dgram(); assert!(dgram.is_none()); } @@ -274,12 +298,14 @@ fn retry_bad_token() { // Send a retry to one server, then replay it to the other. let client_initial1 = client.process(None, now()).dgram(); assert!(client_initial1.is_some()); - let retry = retry_server.process(client_initial1, now()).dgram(); + let retry = retry_server + .process(client_initial1.as_ref(), now()) + .dgram(); assert!(retry.is_some()); - let client_initial2 = client.process(retry, now()).dgram(); + let client_initial2 = client.process(retry.as_ref(), now()).dgram(); assert!(client_initial2.is_some()); - let dgram = server.process(client_initial2, now()).dgram(); + let dgram = server.process(client_initial2.as_ref(), now()).dgram(); assert!(dgram.is_none()); } @@ -300,17 +326,15 @@ fn retry_after_pto() { // Let PTO fire on the client and then let it exhaust its PTO packets. now += Duration::from_secs(1); - let pto1 = client.process(None, now).dgram(); - assert!(pto1.unwrap().len() >= 1200); - let pto2 = client.process(None, now).dgram(); - assert!(pto2.unwrap().len() >= 1200); + let pto = client.process(None, now).dgram(); + assert!(pto.unwrap().len() >= 1200); let cb = client.process(None, now).callback(); assert_ne!(cb, Duration::new(0, 0)); - let retry = server.process(ci, now).dgram(); + let retry = server.process(ci.as_ref(), now).dgram(); assertions::assert_retry(retry.as_ref().unwrap()); - let ci2 = client.process(retry, now).dgram(); + let ci2 = client.process(retry.as_ref(), now).dgram(); assert!(ci2.unwrap().len() >= 1200); } @@ -322,12 +346,12 @@ fn vn_after_retry() { let dgram = client.process(None, now()).dgram(); // Initial assert!(dgram.is_some()); - let dgram = server.process(dgram, now()).dgram(); // Retry + let dgram = server.process(dgram.as_ref(), now()).dgram(); // Retry assert!(dgram.is_some()); assertions::assert_retry(dgram.as_ref().unwrap()); - let dgram = client.process(dgram, now()).dgram(); // Initial w/token + let dgram = client.process(dgram.as_ref(), now()).dgram(); // Initial w/token assert!(dgram.is_some()); let mut encoder = Encoder::default(); @@ -336,10 +360,10 @@ fn vn_after_retry() { encoder.encode_vec(1, &client.odcid().unwrap()[..]); encoder.encode_vec(1, &[]); encoder.encode_uint(4, 0x5a5a_6a6a_u64); - let vn = Datagram::new(addr(), addr(), encoder); + let vn = datagram(encoder.into()); assert_ne!( - client.process(Some(vn), now()).callback(), + client.process(Some(&vn), now()).callback(), Duration::from_secs(0) ); } @@ -365,9 +389,11 @@ fn mitm_retry() { // Trigger initial and a second client Initial. let client_initial1 = client.process(None, now()).dgram(); assert!(client_initial1.is_some()); - let retry = retry_server.process(client_initial1, now()).dgram(); + let retry = retry_server + .process(client_initial1.as_ref(), now()) + .dgram(); assert!(retry.is_some()); - let client_initial2 = client.process(retry, now()).dgram(); + let client_initial2 = client.process(retry.as_ref(), now()).dgram(); assert!(client_initial2.is_some()); // Now to start the epic process of decrypting the packet, @@ -421,18 +447,20 @@ fn mitm_retry() { let new_datagram = Datagram::new( client_initial2.source(), client_initial2.destination(), + client_initial2.tos(), + client_initial2.ttl(), notoken_packet, ); qdebug!("passing modified Initial to the main server"); - let dgram = server.process(Some(new_datagram), now()).dgram(); + let dgram = server.process(Some(&new_datagram), now()).dgram(); assert!(dgram.is_some()); - let dgram = client.process(dgram, now()).dgram(); // Generate an ACK. + let dgram = client.process(dgram.as_ref(), now()).dgram(); // Generate an ACK. assert!(dgram.is_some()); - let dgram = server.process(dgram, now()).dgram(); + let dgram = server.process(dgram.as_ref(), now()).dgram(); assert!(dgram.is_none()); assert!(test_fixture::maybe_authenticate(&mut client)); - let dgram = client.process(dgram, now()).dgram(); + let dgram = client.process(dgram.as_ref(), now()).dgram(); assert!(dgram.is_some()); // Client sending CLOSE_CONNECTIONs assert!(matches!( *client.state(), diff --git a/neqo-transport/tests/server.rs b/neqo-transport/tests/server.rs index fe03b2df1d..7388e0fee7 100644 --- a/neqo-transport/tests/server.rs +++ b/neqo-transport/tests/server.rs @@ -4,16 +4,14 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -#![cfg_attr(feature = "deny-warnings", deny(warnings))] -#![warn(clippy::pedantic)] - mod common; +use std::{cell::RefCell, mem, net::SocketAddr, rc::Rc, time::Duration}; + use common::{ apply_header_protection, connect, connected_server, decode_initial_header, default_server, find_ticket, generate_ticket, initial_aead_and_hp, new_server, remove_header_protection, }; - use neqo_common::{qtrace, Datagram, Decoder, Encoder, Role}; use neqo_crypto::{ generate_ech_keys, AllowZeroRtt, AuthenticationStatus, ZeroRttCheckResult, ZeroRttChecker, @@ -23,16 +21,16 @@ use neqo_transport::{ Connection, ConnectionError, ConnectionParameters, Error, Output, State, StreamType, Version, }; use test_fixture::{ - self, assertions, default_client, new_client, now, split_datagram, + assertions, datagram, default_client, new_client, now, split_datagram, CountingConnectionIdGenerator, }; -use std::{cell::RefCell, convert::TryFrom, mem, net::SocketAddr, rc::Rc, time::Duration}; - /// Take a pair of connections in any state and complete the handshake. /// The `datagram` argument is a packet that was received from the server. /// See `connect` for what this returns. +/// /// # Panics +/// /// Only when the connection fails. pub fn complete_connection( client: &mut Connection, @@ -47,8 +45,8 @@ pub fn complete_connection( }; while !is_done(client) { _ = test_fixture::maybe_authenticate(client); - let out = client.process(datagram, now()); - let out = server.process(out.dgram(), now()); + let out = client.process(datagram.as_ref(), now()); + let out = server.process(out.as_dgram_ref(), now()); datagram = out.dgram(); } @@ -109,11 +107,11 @@ fn connect_single_version_server() { if client.version() != version { // Run the version negotiation exchange if necessary. - let dgram = client.process_output(now()).dgram(); - assert!(dgram.is_some()); - let dgram = server.process(dgram, now()).dgram(); + let out = client.process_output(now()); + assert!(out.as_dgram_ref().is_some()); + let dgram = server.process(out.as_dgram_ref(), now()).dgram(); assertions::assert_vn(dgram.as_ref().unwrap()); - client.process_input(dgram.unwrap(), now()); + client.process_input(&dgram.unwrap(), now()); } let server_conn = connect(&mut client, &mut server); @@ -133,14 +131,14 @@ fn duplicate_initial() { let mut client = default_client(); assert_eq!(*client.state(), State::Init); - let initial = client.process(None, now()).dgram(); - assert!(initial.is_some()); + let initial = client.process(None, now()); + assert!(initial.as_dgram_ref().is_some()); // The server should ignore a packets with the same remote address and // destination connection ID as an existing connection attempt. - let server_initial = server.process(initial.clone(), now()).dgram(); + let server_initial = server.process(initial.as_dgram_ref(), now()).dgram(); assert!(server_initial.is_some()); - let dgram = server.process(initial, now()).dgram(); + let dgram = server.process(initial.as_dgram_ref(), now()).dgram(); assert!(dgram.is_none()); assert_eq!(server.active_connections().len(), 1); @@ -157,14 +155,16 @@ fn duplicate_initial_new_path() { let other = Datagram::new( SocketAddr::new(initial.source().ip(), initial.source().port() ^ 23), initial.destination(), + initial.tos(), + initial.ttl(), &initial[..], ); // The server should respond to both as these came from different addresses. - let dgram = server.process(Some(other), now()).dgram(); + let dgram = server.process(Some(&other), now()).dgram(); assert!(dgram.is_some()); - let server_initial = server.process(Some(initial), now()).dgram(); + let server_initial = server.process(Some(&initial), now()).dgram(); assert!(server_initial.is_some()); assert_eq!(server.active_connections().len(), 2); @@ -177,16 +177,20 @@ fn different_initials_same_path() { let mut client1 = default_client(); let mut client2 = default_client(); - let client_initial1 = client1.process(None, now()).dgram(); - assert!(client_initial1.is_some()); - let client_initial2 = client2.process(None, now()).dgram(); - assert!(client_initial2.is_some()); + let client_initial1 = client1.process(None, now()); + assert!(client_initial1.as_dgram_ref().is_some()); + let client_initial2 = client2.process(None, now()); + assert!(client_initial2.as_dgram_ref().is_some()); // The server should respond to both as these came from different addresses. - let server_initial1 = server.process(client_initial1, now()).dgram(); + let server_initial1 = server + .process(client_initial1.as_dgram_ref(), now()) + .dgram(); assert!(server_initial1.is_some()); - let server_initial2 = server.process(client_initial2, now()).dgram(); + let server_initial2 = server + .process(client_initial2.as_dgram_ref(), now()) + .dgram(); assert!(server_initial2.is_some()); assert_eq!(server.active_connections().len(), 2); @@ -199,10 +203,10 @@ fn same_initial_after_connected() { let mut server = default_server(); let mut client = default_client(); - let client_initial = client.process(None, now()).dgram(); - assert!(client_initial.is_some()); + let client_initial = client.process(None, now()); + assert!(client_initial.as_dgram_ref().is_some()); - let server_initial = server.process(client_initial.clone(), now()).dgram(); + let server_initial = server.process(client_initial.as_dgram_ref(), now()).dgram(); assert!(server_initial.is_some()); complete_connection(&mut client, &mut server, server_initial); // This removes the connection from the active set until something happens to it. @@ -210,7 +214,7 @@ fn same_initial_after_connected() { // Now make a new connection using the exact same initial as before. // The server should respond to an attempt to connect with the same Initial. - let dgram = server.process(client_initial, now()).dgram(); + let dgram = server.process(client_initial.as_dgram_ref(), now()).dgram(); assert!(dgram.is_some()); // The server should make a new connection object. assert_eq!(server.active_connections().len(), 1); @@ -231,8 +235,8 @@ fn drop_non_initial() { let mut bogus_data: Vec = header.into(); bogus_data.resize(1200, 66); - let bogus = Datagram::new(test_fixture::addr(), test_fixture::addr(), bogus_data); - assert!(server.process(Some(bogus), now()).dgram().is_none()); + let bogus = datagram(bogus_data); + assert!(server.process(Some(&bogus), now()).dgram().is_none()); } #[test] @@ -250,8 +254,8 @@ fn drop_short_initial() { let mut bogus_data: Vec = header.into(); bogus_data.resize(1199, 66); - let bogus = Datagram::new(test_fixture::addr(), test_fixture::addr(), bogus_data); - assert!(server.process(Some(bogus), now()).dgram().is_none()); + let bogus = datagram(bogus_data); + assert!(server.process(Some(&bogus), now()).dgram().is_none()); } /// Verify that the server can read 0-RTT properly. A more robust server would buffer @@ -296,12 +300,12 @@ fn zero_rtt() { let c4 = client_send(); // 0-RTT packets that arrive before the handshake get dropped. - mem::drop(server.process(Some(c2), now)); + mem::drop(server.process(Some(&c2), now)); assert!(server.active_connections().is_empty()); // Now handshake and let another 0-RTT packet in. - let shs = server.process(Some(c1), now).dgram(); - mem::drop(server.process(Some(c3), now)); + let shs = server.process(Some(&c1), now); + mem::drop(server.process(Some(&c3), now)); // The server will have received two STREAM frames now if it processed both packets. let active = server.active_connections(); assert_eq!(active.len(), 1); @@ -310,11 +314,11 @@ fn zero_rtt() { // Complete the handshake. As the client was pacing 0-RTT packets, extend the time // a little so that the pacer doesn't prevent the Finished from being sent. now += now - start_time; - let cfin = client.process(shs, now).dgram(); - mem::drop(server.process(cfin, now)); + let cfin = client.process(shs.as_dgram_ref(), now); + mem::drop(server.process(cfin.as_dgram_ref(), now)); // The server will drop this last 0-RTT packet. - mem::drop(server.process(Some(c4), now)); + mem::drop(server.process(Some(&c4), now)); let active = server.active_connections(); assert_eq!(active.len(), 1); assert_eq!(active[0].borrow().stats().frame_rx.stream, 2); @@ -332,21 +336,21 @@ fn new_token_0rtt() { let client_stream = client.stream_create(StreamType::UniDi).unwrap(); client.stream_send(client_stream, &[1, 2, 3]).unwrap(); - let dgram = client.process(None, now()).dgram(); // Initial w/0-RTT - assert!(dgram.is_some()); - assertions::assert_initial(dgram.as_ref().unwrap(), true); - assertions::assert_coalesced_0rtt(dgram.as_ref().unwrap()); - let dgram = server.process(dgram, now()).dgram(); // Initial - assert!(dgram.is_some()); - assertions::assert_initial(dgram.as_ref().unwrap(), false); + let out = client.process(None, now()); // Initial w/0-RTT + assert!(out.as_dgram_ref().is_some()); + assertions::assert_initial(out.as_dgram_ref().unwrap(), true); + assertions::assert_coalesced_0rtt(out.as_dgram_ref().unwrap()); + let out = server.process(out.as_dgram_ref(), now()); // Initial + assert!(out.as_dgram_ref().is_some()); + assertions::assert_initial(out.as_dgram_ref().unwrap(), false); - let dgram = client.process(dgram, now()).dgram(); + let dgram = client.process(out.as_dgram_ref(), now()); // Note: the client doesn't need to authenticate the server here // as there is no certificate; authentication is based on the ticket. - assert!(dgram.is_some()); + assert!(out.as_dgram_ref().is_some()); assert_eq!(*client.state(), State::Connected); - let dgram = server.process(dgram, now()).dgram(); // (done) - assert!(dgram.is_some()); + let dgram = server.process(dgram.as_dgram_ref(), now()); // (done) + assert!(dgram.as_dgram_ref().is_some()); connected_server(&mut server); assert!(client.tls_info().unwrap().resumed()); } @@ -367,8 +371,14 @@ fn new_token_different_port() { // Now rewrite the source port, which should not change that the token is OK. let d = dgram.unwrap(); let src = SocketAddr::new(d.source().ip(), d.source().port() + 1); - let dgram = Some(Datagram::new(src, d.destination(), &d[..])); - let dgram = server.process(dgram, now()).dgram(); // Retry + let dgram = Some(Datagram::new( + src, + d.destination(), + d.tos(), + d.ttl(), + &d[..], + )); + let dgram = server.process(dgram.as_ref(), now()).dgram(); // Retry assert!(dgram.is_some()); assertions::assert_initial(dgram.as_ref().unwrap(), false); } @@ -422,10 +432,16 @@ fn bad_client_initial() { &mut ciphertext, (header_enc.len() - 1)..header_enc.len(), ); - let bad_dgram = Datagram::new(dgram.source(), dgram.destination(), ciphertext); + let bad_dgram = Datagram::new( + dgram.source(), + dgram.destination(), + dgram.tos(), + dgram.ttl(), + ciphertext, + ); // The server should reject this. - let response = server.process(Some(bad_dgram), now()); + let response = server.process(Some(&bad_dgram), now()); let close_dgram = response.dgram().unwrap(); // The resulting datagram might contain multiple packets, but each is small. let (initial_close, rest) = split_datagram(&close_dgram); @@ -439,7 +455,7 @@ fn bad_client_initial() { // The client should accept this new and stop trying to connect. // It will generate a CONNECTION_CLOSE first though. - let response = client.process(Some(close_dgram), now()).dgram(); + let response = client.process(Some(&close_dgram), now()).dgram(); assert!(response.is_some()); // The client will now wait out its closing period. let delay = client.process(None, now()).callback(); @@ -470,8 +486,14 @@ fn version_negotiation_ignored() { let dgram = client.process(None, now()).dgram().expect("a datagram"); let mut input = dgram.to_vec(); input[1] ^= 0x12; - let damaged = Datagram::new(dgram.source(), dgram.destination(), input.clone()); - let vn = server.process(Some(damaged), now()).dgram(); + let damaged = Datagram::new( + dgram.source(), + dgram.destination(), + dgram.tos(), + dgram.ttl(), + input.clone(), + ); + let vn = server.process(Some(&damaged), now()).dgram(); let mut dec = Decoder::from(&input[5..]); // Skip past version. let d_cid = dec.decode_vec(1).expect("client DCID").to_vec(); @@ -492,7 +514,7 @@ fn version_negotiation_ignored() { assert!(found, "valid version not found"); // Client ignores VN packet that contain negotiated version. - let res = client.process(Some(vn), now()); + let res = client.process(Some(&vn), now()); assert!(res.callback() > Duration::new(0, 120)); assert_eq!(client.state(), &State::WaitInitial); } @@ -512,9 +534,9 @@ fn version_negotiation() { // `connect()` runs a fixed exchange, so manually run the Version Negotiation. let dgram = client.process_output(now()).dgram(); assert!(dgram.is_some()); - let dgram = server.process(dgram, now()).dgram(); + let dgram = server.process(dgram.as_ref(), now()).dgram(); assertions::assert_vn(dgram.as_ref().unwrap()); - client.process_input(dgram.unwrap(), now()); + client.process_input(&dgram.unwrap(), now()); let sconn = connect(&mut client, &mut server); assert_eq!(client.version(), VN_VERSION); @@ -548,22 +570,22 @@ fn version_negotiation_and_compatible() { let dgram = client.process_output(now()).dgram(); assert!(dgram.is_some()); assertions::assert_version(dgram.as_ref().unwrap(), ORIG_VERSION.wire_version()); - let dgram = server.process(dgram, now()).dgram(); + let dgram = server.process(dgram.as_ref(), now()).dgram(); assertions::assert_vn(dgram.as_ref().unwrap()); - client.process_input(dgram.unwrap(), now()); + client.process_input(&dgram.unwrap(), now()); let dgram = client.process(None, now()).dgram(); // ClientHello assertions::assert_version(dgram.as_ref().unwrap(), VN_VERSION.wire_version()); - let dgram = server.process(dgram, now()).dgram(); // ServerHello... + let dgram = server.process(dgram.as_ref(), now()).dgram(); // ServerHello... assertions::assert_version(dgram.as_ref().unwrap(), COMPAT_VERSION.wire_version()); - client.process_input(dgram.unwrap(), now()); + client.process_input(&dgram.unwrap(), now()); client.authenticated(AuthenticationStatus::Ok, now()); let dgram = client.process_output(now()).dgram(); assertions::assert_version(dgram.as_ref().unwrap(), COMPAT_VERSION.wire_version()); assert_eq!(*client.state(), State::Connected); - let dgram = server.process(dgram, now()).dgram(); // ACK + HANDSHAKE_DONE + NST - client.process_input(dgram.unwrap(), now()); + let dgram = server.process(dgram.as_ref(), now()).dgram(); // ACK + HANDSHAKE_DONE + NST + client.process_input(&dgram.unwrap(), now()); assert_eq!(*client.state(), State::Confirmed); let sconn = connected_server(&mut server); @@ -596,7 +618,7 @@ fn compatible_upgrade_resumption_and_vn() { server_conn.borrow_mut().send_ticket(now(), &[]).unwrap(); let dgram = server.process(None, now()).dgram(); - client.process_input(dgram.unwrap(), now()); // Consume ticket, ignore output. + client.process_input(&dgram.unwrap(), now()); // Consume ticket, ignore output. let ticket = find_ticket(&mut client); // This new server will reject the ticket, but it will also generate a VN packet. @@ -610,9 +632,9 @@ fn compatible_upgrade_resumption_and_vn() { let dgram = client.process_output(now()).dgram(); assert!(dgram.is_some()); assertions::assert_version(dgram.as_ref().unwrap(), COMPAT_VERSION.wire_version()); - let dgram = server.process(dgram, now()).dgram(); + let dgram = server.process(dgram.as_ref(), now()).dgram(); assertions::assert_vn(dgram.as_ref().unwrap()); - client.process_input(dgram.unwrap(), now()); + client.process_input(&dgram.unwrap(), now()); let server_conn = connect(&mut client, &mut server); assert_eq!(client.version(), RESUMPTION_VERSION); @@ -722,8 +744,8 @@ fn max_streams_after_0rtt_rejection() { client.enable_resumption(now(), &token).unwrap(); _ = client.stream_create(StreamType::BiDi).unwrap(); let dgram = client.process_output(now()).dgram(); - let dgram = server.process(dgram, now()).dgram(); - let dgram = client.process(dgram, now()).dgram(); + let dgram = server.process(dgram.as_ref(), now()).dgram(); + let dgram = client.process(dgram.as_ref(), now()).dgram(); assert!(dgram.is_some()); // We're far enough along to complete the test now. // Make sure that we can create MAX_STREAMS uni- and bidirectional streams. diff --git a/qns/.dockerignore b/qns/.dockerignore index acdb180198..2f10ed7b44 100644 --- a/qns/.dockerignore +++ b/qns/.dockerignore @@ -1 +1,2 @@ .last-update-* +/target/ diff --git a/qns/Dockerfile b/qns/Dockerfile index dd18af0e25..cdb192f203 100644 --- a/qns/Dockerfile +++ b/qns/Dockerfile @@ -1,67 +1,47 @@ FROM martenseemann/quic-network-simulator-endpoint:latest AS buildimage -# Which branch to build from. -ARG NEQO_BRANCH=main - RUN apt-get update && apt-get install -y --no-install-recommends \ - ca-certificates coreutils curl git make mercurial ssh \ - build-essential clang llvm libclang-dev lld \ - gyp ninja-build pkg-config zlib1g-dev python \ - && apt-get autoremove -y && apt-get clean -y \ - && rm -rf /var/lib/apt/lists/* + curl git mercurial coreutils \ + build-essential libclang-dev lld \ + gyp ninja-build zlib1g-dev python \ + && apt-get autoremove -y && apt-get clean -y \ + && rm -rf /var/lib/apt/lists/* + +ARG RUST_VERSION=stable ENV RUSTUP_HOME=/usr/local/rustup \ CARGO_HOME=/usr/local/cargo \ - PATH=/usr/local/cargo/bin:$PATH \ - RUST_VERSION=1.45.2 + PATH=/usr/local/cargo/bin:$PATH -RUN set -eux; \ - curl -sSLf "https://static.rust-lang.org/rustup/archive/1.22.1/x86_64-unknown-linux-gnu/rustup-init" -o rustup-init; \ - echo '49c96f3f74be82f4752b8bffcf81961dea5e6e94ce1ccba94435f12e871c3bdb *rustup-init' | sha256sum -c -; \ - chmod +x rustup-init; \ - ./rustup-init -y -q --no-modify-path --profile minimal --default-toolchain "$RUST_VERSION"; \ - rm -f rustup-init; \ - chmod -R a+w "$RUSTUP_HOME" "$CARGO_HOME" +RUN curl https://sh.rustup.rs -sSf | \ + sh -s -- -y -q --no-modify-path --profile minimal --default-toolchain $RUST_VERSION ENV NSS_DIR=/nss \ NSPR_DIR=/nspr \ LD_LIBRARY_PATH=/dist/Release/lib RUN set -eux; \ - hg clone https://hg.mozilla.org/projects/nss "$NSS_DIR"; \ - hg clone https://hg.mozilla.org/projects/nspr "$NSPR_DIR" + git clone --depth=1 https://github.com/nss-dev/nspr "$NSPR_DIR"; \ + git clone --depth=1 https://github.com/nss-dev/nss "$NSS_DIR" RUN "$NSS_DIR"/build.sh --static -Ddisable_tests=1 -o -# Copy the .git directory from the local clone so that it is possible to create -# an image that includes local updates. -RUN mkdir -p /neqo-reference -ADD . /neqo-reference -RUN if [ -d /neqo-reference/.git ]; then \ - source=/neqo-reference; \ - else \ - source=https://github.com/mozilla/neqo; \ - fi; \ - git clone --depth 1 --branch "$NEQO_BRANCH" "$source" /neqo; \ - rm -rf /neqo-reference +ADD . /neqo RUN set -eux; \ cd /neqo; \ - RUSTFLAGS="-g -C link-arg=-fuse-ld=lld" cargo build --release \ - --bin neqo-client --bin neqo-server; \ - cp target/release/neqo-client target; \ - cp target/release/neqo-server target; \ - rm -rf target/release + RUSTFLAGS="-C link-arg=-fuse-ld=lld" cargo build --release \ + --bin neqo-client --bin neqo-server # Copy only binaries to the final image to keep it small. FROM martenseemann/quic-network-simulator-endpoint:latest ENV LD_LIBRARY_PATH=/neqo/lib -COPY --from=buildimage /neqo/target/neqo-client /neqo/target/neqo-server /neqo/bin/ +COPY --from=buildimage /neqo/target/release/neqo-client /neqo/target/release/neqo-server /neqo/bin/ COPY --from=buildimage /dist/Release/lib/*.so /neqo/lib/ COPY --from=buildimage /dist/Release/bin/certutil /dist/Release/bin/pk12util /neqo/bin/ -COPY interop.sh /neqo/ +COPY qns/interop.sh /neqo/ RUN chmod +x /neqo/interop.sh ENTRYPOINT [ "/neqo/interop.sh" ] diff --git a/qns/interop.sh b/qns/interop.sh index 3c828ead9e..e216e49866 100755 --- a/qns/interop.sh +++ b/qns/interop.sh @@ -10,28 +10,27 @@ export PATH="${PATH}:/neqo/bin" [ -n "$QLOGDIR" ] case "$ROLE" in - client) - /wait-for-it.sh sim:57832 -s -t 30 - sleep 5 - RUST_LOG=debug RUST_BACKTRACE=1 neqo-client --cc cubic --qns-test "$TESTCASE" \ - --qlog-dir "$QLOGDIR" --output-dir /downloads $REQUESTS - ;; +client) + /wait-for-it.sh sim:57832 -s -t 30 + RUST_LOG=debug RUST_BACKTRACE=1 neqo-client --cc cubic --qns-test "$TESTCASE" \ + --qlog-dir "$QLOGDIR" --output-dir /downloads $REQUESTS 2> >(tee -i -a "/logs/$ROLE.log" >&2) + ;; - server) - DB=/neqo/db - CERT=cert - P12CERT=$(mktemp) - mkdir -p "$DB" - certutil -N -d "sql:$DB" --empty-password - openssl pkcs12 -export -nodes -in /certs/cert.pem -inkey /certs/priv.key \ - -name "$CERT" -passout pass: -out "$P12CERT" - pk12util -d "sql:$DB" -i "$P12CERT" -W '' - certutil -L -d "sql:$DB" -n "$CERT" - RUST_LOG=info RUST_BACKTRACE=1 neqo-server --cc cubic --qns-test "$TESTCASE" \ - --qlog-dir "$QLOGDIR" -d "$DB" -k "$CERT" [::]:443 - ;; +server) + DB=/neqo/db + CERT=cert + P12CERT=$(mktemp) + mkdir -p "$DB" + certutil -N -d "sql:$DB" --empty-password + openssl pkcs12 -export -nodes -in /certs/cert.pem -inkey /certs/priv.key \ + -name "$CERT" -passout pass: -out "$P12CERT" + pk12util -d "sql:$DB" -i "$P12CERT" -W '' + certutil -L -d "sql:$DB" -n "$CERT" + RUST_LOG=info RUST_BACKTRACE=1 neqo-server --cc cubic --qns-test "$TESTCASE" \ + --qlog-dir "$QLOGDIR" -d "$DB" -k "$CERT" '[::]:443' 2> >(tee -i -a "/logs/$ROLE.log" >&2) + ;; - *) - exit 1 - ;; +*) + exit 1 + ;; esac diff --git a/qns/update.sh b/qns/update.sh deleted file mode 100755 index 2243ee23a0..0000000000 --- a/qns/update.sh +++ /dev/null @@ -1,38 +0,0 @@ -#!/bin/bash -set -e - -if [[ "$1" == "-p" ]]; then - shift - push=1 -else - push=0 -fi - -branch="${1:-$(git rev-parse --abbrev-ref HEAD)}" -if [[ "$branch" == "main" ]]; then - tag="neqoquic/neqo-qns:latest" -else - tag="neqoquic/neqo-qns:${branch}" -fi - -cd "$(dirname "$0")" - -rev=$(git log -n 1 --format='format:%H') -if [[ "$rev" == "$(cat ".last-update-$branch")" ]]; then - echo "No change since $rev." - exit 0 -fi - -# This makes the local .git directory the source, allowing for the current -# build to be build and pushed. -[[ ! -e .git ]] || ! echo "Found .git directory. Script still active. Exiting." -trap 'rm -rf .git' EXIT -cp -R ../.git .git - -docker build -t "$tag" --build-arg NEQO_BRANCH="$branch" . -if [[ "$push" == "1" ]]; then - docker login - docker push "$tag" -fi - -echo "$rev" > ".last-update-$branch" diff --git a/test-fixture/Cargo.toml b/test-fixture/Cargo.toml index f7c54fbf27..9de2a24cce 100644 --- a/test-fixture/Cargo.toml +++ b/test-fixture/Cargo.toml @@ -1,20 +1,29 @@ [package] name = "test-fixture" -version = "0.6.4" -authors = ["Martin Thomson "] -edition = "2018" -rust-version = "1.65.0" -license = "MIT/Apache-2.0" +authors.workspace = true +homepage.workspace = true +repository.workspace = true +version.workspace = true +edition.workspace = true +rust-version.workspace = true +license.workspace = true + +[lints] +workspace = true [dependencies] +# Sync with https://searchfox.org/mozilla-central/source/Cargo.lock 2024-02-08 +log = { version = "0.4", default-features = false } neqo-common = { path = "../neqo-common" } neqo-crypto = { path = "../neqo-crypto" } neqo-http3 = { path = "../neqo-http3" } neqo-qpack = { path = "../neqo-qpack" } neqo-transport = { path = "../neqo-transport" } -log = {version = "0.4.0", default-features = false} -lazy_static = "1.3.0" +qlog = { version = "0.12", default-features = false } [features] -default = ["deny-warnings"] -deny-warnings = [] +bench = [] + +[lib] +# See https://github.com/bheisler/criterion.rs/blob/master/book/src/faq.md#cargo-bench-gives-unrecognized-option-errors-for-valid-command-line-options +bench = false diff --git a/test-fixture/src/assertions.rs b/test-fixture/src/assertions.rs index 339f11df64..191c81f7ab 100644 --- a/test-fixture/src/assertions.rs +++ b/test-fixture/src/assertions.rs @@ -4,13 +4,13 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use crate::{addr, addr_v4}; -use neqo_common::{Datagram, Decoder}; -use neqo_transport::version::WireVersion; -use neqo_transport::Version; -use std::convert::{TryFrom, TryInto}; use std::net::SocketAddr; +use neqo_common::{Datagram, Decoder}; +use neqo_transport::{version::WireVersion, Version}; + +use crate::{DEFAULT_ADDR, DEFAULT_ADDR_V4}; + const PACKET_TYPE_MASK: u8 = 0b1011_0000; fn assert_default_version(dec: &mut Decoder) -> Version { @@ -32,7 +32,9 @@ fn assert_long_packet_type(b: u8, v1_expected: u8, version: Version) { } /// Simple checks for the version being correct. +/// /// # Panics +/// /// If this is not a long header packet with the given version. pub fn assert_version(payload: &[u8], v: u32) { let mut dec = Decoder::from(payload); @@ -41,7 +43,9 @@ pub fn assert_version(payload: &[u8], v: u32) { } /// Simple checks for a Version Negotiation packet. +/// /// # Panics +/// /// If this is clearly not a Version Negotiation packet. pub fn assert_vn(payload: &[u8]) { let mut dec = Decoder::from(payload); @@ -53,7 +57,9 @@ pub fn assert_vn(payload: &[u8]) { } /// Do a simple decode of the datagram to verify that it is coalesced. +/// /// # Panics +/// /// If the tests fail. pub fn assert_coalesced_0rtt(payload: &[u8]) { assert!(payload.len() >= 1200); @@ -71,6 +77,7 @@ pub fn assert_coalesced_0rtt(payload: &[u8]) { } /// # Panics +/// /// If the tests fail. pub fn assert_retry(payload: &[u8]) { let mut dec = Decoder::from(payload); @@ -80,7 +87,9 @@ pub fn assert_retry(payload: &[u8]) { } /// Assert that this is an Initial packet with (or without) a token. +/// /// # Panics +/// /// If the tests fail. pub fn assert_initial(payload: &[u8], expect_token: bool) { let mut dec = Decoder::from(payload); @@ -94,7 +103,9 @@ pub fn assert_initial(payload: &[u8], expect_token: bool) { } /// Assert that this is a Handshake packet. +/// /// # Panics +/// /// If the tests fail. pub fn assert_handshake(payload: &[u8]) { let mut dec = Decoder::from(payload); @@ -104,6 +115,7 @@ pub fn assert_handshake(payload: &[u8]) { } /// # Panics +/// /// If the tests fail. pub fn assert_no_1rtt(payload: &[u8]) { let mut dec = Decoder::from(payload); @@ -135,6 +147,7 @@ pub fn assert_no_1rtt(payload: &[u8]) { } /// # Panics +/// /// When the path doesn't use the given socket address at both ends. pub fn assert_path(dgram: &Datagram, path_addr: SocketAddr) { assert_eq!(dgram.source(), path_addr); @@ -142,18 +155,20 @@ pub fn assert_path(dgram: &Datagram, path_addr: SocketAddr) { } /// # Panics +/// /// When the path doesn't use the default v4 socket address at both ends. pub fn assert_v4_path(dgram: &Datagram, padded: bool) { - assert_path(dgram, addr_v4()); + assert_path(dgram, DEFAULT_ADDR_V4); if padded { assert_eq!(dgram.len(), 1357 /* PATH_MTU_V4 */); } } /// # Panics +/// /// When the path doesn't use the default v6 socket address at both ends. pub fn assert_v6_path(dgram: &Datagram, padded: bool) { - assert_path(dgram, addr()); + assert_path(dgram, DEFAULT_ADDR); if padded { assert_eq!(dgram.len(), 1337 /* PATH_MTU_V6 */); } diff --git a/test-fixture/src/lib.rs b/test-fixture/src/lib.rs index 8b1b5ef419..e34fb522ff 100644 --- a/test-fixture/src/lib.rs +++ b/test-fixture/src/lib.rs @@ -4,31 +4,36 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -#![cfg_attr(feature = "deny-warnings", deny(warnings))] -#![warn(clippy::pedantic)] - -use neqo_common::{event::Provider, hex, qtrace, Datagram, Decoder}; - -use neqo_crypto::{init_db, random, AllowZeroRtt, AntiReplay, AuthenticationStatus}; -use neqo_http3::{Http3Client, Http3Parameters, Http3Server}; -use neqo_transport::{ - version::WireVersion, Connection, ConnectionEvent, ConnectionId, ConnectionIdDecoder, - ConnectionIdGenerator, ConnectionIdRef, ConnectionParameters, State, Version, -}; +#![allow(clippy::module_name_repetitions)] // This lint doesn't work here. use std::{ - cell::RefCell, + cell::{OnceCell, RefCell}, cmp::max, - convert::TryFrom, + fmt::Display, + io::{Cursor, Result, Write}, mem, net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}, rc::Rc, + sync::{Arc, Mutex}, time::{Duration, Instant}, }; -use lazy_static::lazy_static; +use neqo_common::{ + event::Provider, + hex, + qlog::{new_trace, NeqoQlog}, + qtrace, Datagram, Decoder, IpTos, Role, +}; +use neqo_crypto::{init_db, random, AllowZeroRtt, AntiReplay, AuthenticationStatus}; +use neqo_http3::{Http3Client, Http3Parameters, Http3Server}; +use neqo_transport::{ + version::WireVersion, Connection, ConnectionEvent, ConnectionId, ConnectionIdDecoder, + ConnectionIdGenerator, ConnectionIdRef, ConnectionParameters, State, Version, +}; +use qlog::{events::EventImportance, streamer::QlogStreamer}; pub mod assertions; +pub mod sim; /// The path for the database used in tests. pub const NSS_DB_PATH: &str = concat!(env!("CARGO_MANIFEST_DIR"), "/db"); @@ -36,34 +41,43 @@ pub const NSS_DB_PATH: &str = concat!(env!("CARGO_MANIFEST_DIR"), "/db"); /// Initialize the test fixture. Only call this if you aren't also calling a /// fixture function that depends on setup. Other functions in the fixture /// that depend on this setup call the function for you. +/// +/// # Panics +/// +/// When the NSS initialization fails. pub fn fixture_init() { - init_db(NSS_DB_PATH); + init_db(NSS_DB_PATH).unwrap(); } // This needs to be > 2ms to avoid it being rounded to zero. // NSS operates in milliseconds and halves any value it is provided. pub const ANTI_REPLAY_WINDOW: Duration = Duration::from_millis(10); -lazy_static! { - static ref BASE_TIME: Instant = Instant::now(); -} - +/// A baseline time for all tests. This needs to be earlier than what `now()` produces +/// because of the need to have a span of time elapse for anti-replay purposes. fn earlier() -> Instant { + // Note: It is only OK to have a different base time for each thread because our tests are + // single-threaded. + thread_local!(static EARLIER: OnceCell = const { OnceCell::new() }); fixture_init(); - *BASE_TIME + EARLIER.with(|b| *b.get_or_init(Instant::now)) } /// The current time for the test. Which is in the future, /// because 0-RTT tests need to run at least `ANTI_REPLAY_WINDOW` in the past. +/// /// # Panics +/// /// When the setup fails. #[must_use] pub fn now() -> Instant { earlier().checked_add(ANTI_REPLAY_WINDOW).unwrap() } -// Create a default anti-replay context. +/// Create a default anti-replay context. +/// /// # Panics +/// /// When the setup fails. #[must_use] pub fn anti_replay() -> AntiReplay { @@ -75,20 +89,33 @@ pub const DEFAULT_KEYS: &[&str] = &["key"]; pub const LONG_CERT_KEYS: &[&str] = &["A long cert"]; pub const DEFAULT_ALPN: &[&str] = &["alpn"]; pub const DEFAULT_ALPN_H3: &[&str] = &["h3"]; +pub const DEFAULT_ADDR: SocketAddr = addr(); +pub const DEFAULT_ADDR_V4: SocketAddr = addr_v4(); + +// Create a default datagram with the given data. +#[must_use] +pub fn datagram(data: Vec) -> Datagram { + Datagram::new( + DEFAULT_ADDR, + DEFAULT_ADDR, + IpTos::default(), + Some(128), + data, + ) +} /// Create a default socket address. #[must_use] -pub fn addr() -> SocketAddr { - // These could be const functions, but they aren't... +const fn addr() -> SocketAddr { let v6ip = IpAddr::V6(Ipv6Addr::new(0xfe80, 0, 0, 0, 0, 0, 0, 1)); SocketAddr::new(v6ip, 443) } /// An IPv4 version of the default socket address. #[must_use] -pub fn addr_v4() -> SocketAddr { - let localhost_v4 = IpAddr::V4(Ipv4Addr::from(0xc000_0201)); - SocketAddr::new(localhost_v4, addr().port()) +const fn addr_v4() -> SocketAddr { + let v4ip = IpAddr::V4(Ipv4Addr::new(192, 0, 2, 1)); + SocketAddr::new(v4ip, DEFAULT_ADDR.port()) } /// This connection ID generation scheme is the worst, but it doesn't produce collisions. @@ -107,7 +134,7 @@ impl ConnectionIdDecoder for CountingConnectionIdGenerator { impl ConnectionIdGenerator for CountingConnectionIdGenerator { fn generate_cid(&mut self) -> Option { - let mut r = random(20); + let mut r = random::<20>(); // Randomize length, but ensure that the connection ID is long // enough to pass for an original destination connection ID. r[0] = max(8, 5 + ((r[0] >> 4) & r[0])); @@ -124,19 +151,27 @@ impl ConnectionIdGenerator for CountingConnectionIdGenerator { } } +/// Create a new client. +/// +/// # Panics +/// +/// If this doesn't work. #[must_use] pub fn new_client(params: ConnectionParameters) -> Connection { fixture_init(); - Connection::new_client( + let (log, _contents) = new_neqo_qlog(); + let mut client = Connection::new_client( DEFAULT_SERVER_NAME, DEFAULT_ALPN, Rc::new(RefCell::new(CountingConnectionIdGenerator::default())), - addr(), - addr(), + DEFAULT_ADDR, + DEFAULT_ADDR, params.ack_ratio(255), // Tests work better with this set this way. now(), ) - .expect("create a client") + .expect("create a client"); + client.set_qlog(log); + client } /// Create a transport client with default configuration. @@ -158,10 +193,14 @@ pub fn default_server_h3() -> Connection { } /// Create a transport server with a configuration. +/// +/// # Panics +/// +/// If this doesn't work. #[must_use] pub fn new_server(alpn: &[impl AsRef], params: ConnectionParameters) -> Connection { fixture_init(); - + let (log, _contents) = new_neqo_qlog(); let mut c = Connection::new_server( DEFAULT_KEYS, alpn, @@ -169,6 +208,7 @@ pub fn new_server(alpn: &[impl AsRef], params: ConnectionParameters) -> Con params.ack_ratio(255), ) .expect("create a server"); + c.set_qlog(log); c.server_enable_0rtt(&anti_replay(), AllowZeroRtt {}) .expect("enable 0-RTT"); c @@ -198,13 +238,14 @@ pub fn handshake(client: &mut Connection, server: &mut Connection) { }; while !is_done(a) { _ = maybe_authenticate(a); - let d = a.process(datagram, now()); + let d = a.process(datagram.as_ref(), now()); datagram = d.dgram(); mem::swap(&mut a, &mut b); } } /// # Panics +/// /// When the connection fails. #[must_use] pub fn connect() -> (Connection, Connection) { @@ -217,7 +258,9 @@ pub fn connect() -> (Connection, Connection) { } /// Create a http3 client with default configuration. +/// /// # Panics +/// /// When the client can't be created. #[must_use] pub fn default_http3_client() -> Http3Client { @@ -225,8 +268,8 @@ pub fn default_http3_client() -> Http3Client { Http3Client::new( DEFAULT_SERVER_NAME, Rc::new(RefCell::new(CountingConnectionIdGenerator::default())), - addr(), - addr(), + DEFAULT_ADDR, + DEFAULT_ADDR, Http3Parameters::default() .max_table_size_encoder(100) .max_table_size_decoder(100) @@ -238,7 +281,9 @@ pub fn default_http3_client() -> Http3Client { } /// Create a http3 client. +/// /// # Panics +/// /// When the client can't be created. #[must_use] pub fn http3_client_with_params(params: Http3Parameters) -> Http3Client { @@ -246,8 +291,8 @@ pub fn http3_client_with_params(params: Http3Parameters) -> Http3Client { Http3Client::new( DEFAULT_SERVER_NAME, Rc::new(RefCell::new(CountingConnectionIdGenerator::default())), - addr(), - addr(), + DEFAULT_ADDR, + DEFAULT_ADDR, params, now(), ) @@ -255,7 +300,9 @@ pub fn http3_client_with_params(params: Http3Parameters) -> Http3Client { } /// Create a http3 server with default configuration. +/// /// # Panics +/// /// When the server can't be created. #[must_use] pub fn default_http3_server() -> Http3Server { @@ -314,7 +361,66 @@ fn split_packet(buf: &[u8]) -> (&[u8], Option<&[u8]>) { pub fn split_datagram(d: &Datagram) -> (Datagram, Option) { let (a, b) = split_packet(&d[..]); ( - Datagram::new(d.source(), d.destination(), a), - b.map(|b| Datagram::new(d.source(), d.destination(), b)), + Datagram::new(d.source(), d.destination(), d.tos(), d.ttl(), a), + b.map(|b| Datagram::new(d.source(), d.destination(), d.tos(), d.ttl(), b)), ) } + +#[derive(Clone, Default)] +pub struct SharedVec { + buf: Arc>>>, +} + +impl Write for SharedVec { + fn write(&mut self, buf: &[u8]) -> Result { + self.buf.lock().unwrap().write(buf) + } + fn flush(&mut self) -> Result<()> { + self.buf.lock().unwrap().flush() + } +} + +impl Display for SharedVec { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.write_str(&String::from_utf8(self.buf.lock().unwrap().clone().into_inner()).unwrap()) + } +} + +/// Returns a pair of new enabled `NeqoQlog` that is backed by a [`Vec`] +/// together with a [`Cursor>`] that can be used to read the contents of +/// the log. +/// +/// # Panics +/// +/// Panics if the log cannot be created. +#[must_use] +pub fn new_neqo_qlog() -> (NeqoQlog, SharedVec) { + let buf = SharedVec::default(); + + if cfg!(feature = "bench") { + return (NeqoQlog::disabled(), buf); + } + + let mut trace = new_trace(Role::Client); + // Set reference time to 0.0 for testing. + trace.common_fields.as_mut().unwrap().reference_time = Some(0.0); + let contents = buf.clone(); + let streamer = QlogStreamer::new( + qlog::QLOG_VERSION.to_string(), + None, + None, + None, + std::time::Instant::now(), + trace, + EventImportance::Base, + Box::new(buf), + ); + let log = NeqoQlog::enabled(streamer, ""); + (log.expect("to be able to write to new log"), contents) +} + +pub const EXPECTED_LOG_HEADER: &str = concat!( + "\u{1e}", + r#"{"qlog_version":"0.3","qlog_format":"JSON-SEQ","trace":{"vantage_point":{"name":"neqo-Client","type":"client"},"title":"neqo-Client trace","description":"Example qlog trace description","configuration":{"time_offset":0.0},"common_fields":{"reference_time":0.0,"time_format":"relative"}}}"#, + "\n" +); diff --git a/neqo-transport/tests/sim/connection.rs b/test-fixture/src/sim/connection.rs similarity index 81% rename from neqo-transport/tests/sim/connection.rs rename to test-fixture/src/sim/connection.rs index 5768941e4a..d05979cfca 100644 --- a/neqo-transport/tests/sim/connection.rs +++ b/test-fixture/src/sim/connection.rs @@ -6,18 +6,23 @@ #![allow(clippy::module_name_repetitions)] -use super::{Node, Rng}; -use neqo_common::{event::Provider, qdebug, qtrace, Datagram}; -use neqo_crypto::AuthenticationStatus; -use neqo_transport::{ - Connection, ConnectionEvent, ConnectionParameters, Output, State, StreamId, StreamType, -}; use std::{ cmp::min, fmt::{self, Debug}, time::Instant, }; +use neqo_common::{event::Provider, qdebug, qinfo, qtrace, Datagram}; +use neqo_crypto::AuthenticationStatus; +use neqo_transport::{ + Connection, ConnectionEvent, ConnectionParameters, Output, State, StreamId, StreamType, +}; + +use crate::{ + boxed, + sim::{Node, Rng}, +}; + /// The status of the processing of an event. #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum GoalStatus { @@ -31,7 +36,7 @@ pub enum GoalStatus { /// A goal for the connection. /// Goals can be accomplished in any order. -pub trait ConnectionGoal { +pub trait ConnectionGoal: Debug { fn init(&mut self, _c: &mut Connection, _now: Instant) {} /// Perform some processing. fn process(&mut self, _c: &mut Connection, _now: Instant) -> GoalStatus { @@ -45,36 +50,49 @@ pub trait ConnectionGoal { pub struct ConnectionNode { c: Connection, + setup_goals: Vec>, goals: Vec>, } impl ConnectionNode { pub fn new_client( params: ConnectionParameters, + setup: impl IntoIterator>, goals: impl IntoIterator>, ) -> Self { Self { - c: test_fixture::new_client(params), + c: crate::new_client(params), + setup_goals: setup.into_iter().collect(), goals: goals.into_iter().collect(), } } pub fn new_server( params: ConnectionParameters, + setup: impl IntoIterator>, goals: impl IntoIterator>, ) -> Self { Self { - c: test_fixture::new_server(test_fixture::DEFAULT_ALPN, params), + c: crate::new_server(crate::DEFAULT_ALPN, params), + setup_goals: setup.into_iter().collect(), goals: goals.into_iter().collect(), } } pub fn default_client(goals: impl IntoIterator>) -> Self { - Self::new_client(ConnectionParameters::default(), goals) + Self::new_client( + ConnectionParameters::default(), + boxed![ReachState::new(State::Confirmed)], + goals, + ) } pub fn default_server(goals: impl IntoIterator>) -> Self { - Self::new_server(ConnectionParameters::default(), goals) + Self::new_server( + ConnectionParameters::default(), + boxed![ReachState::new(State::Confirmed)], + goals, + ) } #[allow(dead_code)] @@ -87,13 +105,20 @@ impl ConnectionNode { self.goals.push(goal); } + /// On the first call to this method, the setup goals will turn into the active goals. + /// On the second call, they will be swapped back and the main goals will run. + fn setup_goals(&mut self, now: Instant) { + std::mem::swap(&mut self.goals, &mut self.setup_goals); + for g in &mut self.goals { + g.init(&mut self.c, now); + } + } + /// Process all goals using the given closure and return whether any were active. fn process_goals(&mut self, mut f: F) -> bool where F: FnMut(&mut Box, &mut Connection) -> GoalStatus, { - // Waiting on drain_filter... - // self.goals.drain_filter(|g| f(g, &mut self.c, &e)).count(); let mut active = false; let mut i = 0; while i < self.goals.len() { @@ -112,15 +137,13 @@ impl ConnectionNode { impl Node for ConnectionNode { fn init(&mut self, _rng: Rng, now: Instant) { - for g in &mut self.goals { - g.init(&mut self.c, now); - } + self.setup_goals(now); } - fn process(&mut self, mut d: Option, now: Instant) -> Output { + fn process(&mut self, mut dgram: Option, now: Instant) -> Output { _ = self.process_goals(|goal, c| goal.process(c, now)); loop { - let res = self.c.process(d.take(), now); + let res = self.c.process(dgram.take().as_ref(), now); let mut active = false; while let Some(e) = self.c.next_event() { @@ -143,12 +166,18 @@ impl Node for ConnectionNode { } } + fn prepare(&mut self, now: Instant) { + assert!(self.done(), "ConnectionNode::prepare: setup not complete"); + self.setup_goals(now); + assert!(!self.done(), "ConnectionNode::prepare: setup not complete"); + } + fn done(&self) -> bool { self.goals.is_empty() } fn print_summary(&self, test_name: &str) { - println!("{}: {:?}", test_name, self.c.stats()); + qinfo!("{}: {:?}", test_name, self.c.stats()); } } @@ -158,12 +187,15 @@ impl Debug for ConnectionNode { } } +/// A target for a connection that involves reaching a given connection state. #[derive(Debug, Clone)] pub struct ReachState { target: State, } impl ReachState { + /// Create a new instance that intends to reach the indicated state. + #[must_use] pub fn new(target: State) -> Self { Self { target } } @@ -184,13 +216,15 @@ impl ConnectionGoal for ReachState { } } -#[derive(Debug)] +/// A target for a connection that involves sending a given amount of data on the indicated stream. +#[derive(Debug, Clone)] pub struct SendData { remaining: usize, stream_id: Option, } impl SendData { + #[must_use] pub fn new(amount: usize) -> Self { Self { remaining: amount, @@ -246,9 +280,7 @@ impl ConnectionGoal for SendData { match e { ConnectionEvent::SendStreamCreatable { stream_type: StreamType::UniDi, - } - // TODO(mt): remove the second condition when #842 is fixed. - | ConnectionEvent::StateChange(_) => { + } => { self.make_stream(c); GoalStatus::Active } @@ -268,12 +300,13 @@ impl ConnectionGoal for SendData { } /// Receive a prescribed amount of data from any stream. -#[derive(Debug)] +#[derive(Debug, Clone)] pub struct ReceiveData { remaining: usize, } impl ReceiveData { + #[must_use] pub fn new(amount: usize) -> Self { Self { remaining: amount } } diff --git a/neqo-transport/tests/sim/delay.rs b/test-fixture/src/sim/delay.rs similarity index 94% rename from neqo-transport/tests/sim/delay.rs rename to test-fixture/src/sim/delay.rs index 95188c0562..c8de66758c 100644 --- a/neqo-transport/tests/sim/delay.rs +++ b/test-fixture/src/sim/delay.rs @@ -6,14 +6,17 @@ #![allow(clippy::module_name_repetitions)] -use super::{Node, Rng}; +use std::{ + collections::BTreeMap, + fmt::{self, Debug}, + ops::Range, + time::{Duration, Instant}, +}; + use neqo_common::Datagram; use neqo_transport::Output; -use std::collections::BTreeMap; -use std::convert::TryFrom; -use std::fmt::{self, Debug}; -use std::ops::Range; -use std::time::{Duration, Instant}; + +use super::{Node, Rng}; /// An iterator that shares a `Random` instance and produces uniformly /// random `Duration`s within a specified range. @@ -54,6 +57,7 @@ pub struct Delay { } impl Delay { + #[must_use] pub fn new(bounds: Range) -> Self { Self { random: RandomDelay::new(bounds), diff --git a/neqo-transport/tests/sim/drop.rs b/test-fixture/src/sim/drop.rs similarity index 87% rename from neqo-transport/tests/sim/drop.rs rename to test-fixture/src/sim/drop.rs index d42913d99d..6529a95d04 100644 --- a/neqo-transport/tests/sim/drop.rs +++ b/test-fixture/src/sim/drop.rs @@ -6,11 +6,15 @@ #![allow(clippy::module_name_repetitions)] -use super::{Node, Rng}; +use std::{ + fmt::{self, Debug}, + time::Instant, +}; + use neqo_common::{qtrace, Datagram}; use neqo_transport::Output; -use std::fmt::{self, Debug}; -use std::time::Instant; + +use super::{Node, Rng}; /// A random dropper. pub struct Drop { @@ -23,6 +27,7 @@ impl Drop { /// Make a new random drop generator. Each `drop` is called, this generates a /// random value between 0 and `max` (exclusive). If this value is less than /// `threshold` a value of `true` is returned. + #[must_use] pub fn new(threshold: u64, max: u64) -> Self { Self { threshold, @@ -32,11 +37,16 @@ impl Drop { } /// Generate random drops with the given percentage. + #[must_use] pub fn percentage(pct: u8) -> Self { // Multiply by 10 so that the random number generator works more efficiently. Self::new(u64::from(pct) * 10, 1000) } + /// Determine whether or not to drop a packet. + /// # Panics + /// When this is invoked after test configuration has been torn down, + /// such that the RNG is no longer available. pub fn drop(&mut self) -> bool { let mut rng = self.rng.as_ref().unwrap().borrow_mut(); let r = rng.random_from(0..self.max); diff --git a/neqo-transport/tests/sim/mod.rs b/test-fixture/src/sim/mod.rs similarity index 69% rename from neqo-transport/tests/sim/mod.rs rename to test-fixture/src/sim/mod.rs index f7646aac56..9cf43b4b67 100644 --- a/neqo-transport/tests/sim/mod.rs +++ b/test-fixture/src/sim/mod.rs @@ -4,33 +4,31 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -// Tests with simulated network -#![cfg_attr(feature = "deny-warnings", deny(warnings))] -#![warn(clippy::pedantic)] - +/// Tests with simulated network components. pub mod connection; mod delay; mod drop; pub mod rng; mod taildrop; +use std::{ + cell::RefCell, + cmp::min, + fmt::Debug, + ops::{Deref, DerefMut}, + rc::Rc, + time::{Duration, Instant}, +}; + use neqo_common::{qdebug, qinfo, qtrace, Datagram, Encoder}; use neqo_transport::Output; use rng::Random; -use std::cell::RefCell; -use std::cmp::min; -use std::convert::TryFrom; -use std::fmt::Debug; -use std::rc::Rc; -use std::time::{Duration, Instant}; -use test_fixture::{self, now}; - use NodeState::{Active, Idle, Waiting}; +use crate::now; + pub mod network { - pub use super::delay::Delay; - pub use super::drop::Drop; - pub use super::taildrop::TailDrop; + pub use super::{delay::Delay, drop::Drop, taildrop::TailDrop}; } type Rng = Rc>; @@ -78,17 +76,21 @@ pub trait Node: Debug { /// Perform processing. This optionally takes a datagram and produces either /// another data, a time that the simulator needs to wait, or nothing. fn process(&mut self, d: Option, now: Instant) -> Output; + /// This is called after setup is complete and before the main processing starts. + fn prepare(&mut self, _now: Instant) {} /// An node can report when it considers itself "done". + /// Prior to calling `prepare`, this should return `true` if it is ready. fn done(&self) -> bool { true } + /// Print out a summary of the state of the node. fn print_summary(&self, _test_name: &str) {} } /// The state of a single node. Nodes will be activated if they are `Active` /// or if the previous node in the loop generated a datagram. Nodes that return /// `true` from `Node::done` will be activated as normal. -#[derive(Debug, PartialEq)] +#[derive(Clone, Copy, Debug, PartialEq)] enum NodeState { /// The node just produced a datagram. It should be activated again as soon as possible. Active, @@ -114,6 +116,19 @@ impl NodeHolder { } } +impl Deref for NodeHolder { + type Target = dyn Node; + fn deref(&self) -> &Self::Target { + self.node.as_ref() + } +} + +impl DerefMut for NodeHolder { + fn deref_mut(&mut self) -> &mut Self::Target { + self.node.as_mut() + } +} + pub struct Simulator { name: String, nodes: Vec, @@ -142,11 +157,12 @@ impl Simulator { } pub fn seed(&mut self, seed: [u8; 32]) { - self.rng = Rc::new(RefCell::new(Random::new(seed))); + self.rng = Rc::new(RefCell::new(Random::new(&seed))); } /// Seed from a hex string. - /// Though this is convenient, it panics if this isn't a 64 character hex string. + /// # Panics + /// When the provided string is not 32 bytes of hex (64 characters). pub fn seed_str(&mut self, seed: impl AsRef) { let seed = Encoder::from_hex(seed); self.seed(<[u8; 32]>::try_from(seed.as_ref()).unwrap()); @@ -164,18 +180,8 @@ impl Simulator { next.expect("a node cannot be idle and not done") } - /// Runs the simulation. - pub fn run(mut self) -> Duration { - let start = now(); - let mut now = start; + fn process_loop(&mut self, start: Instant, mut now: Instant) -> Instant { let mut dgram = None; - - for n in &mut self.nodes { - n.node.init(self.rng.clone(), now); - } - println!("{}: seed {}", self.name, self.rng.borrow().seed_str()); - - let real_start = Instant::now(); loop { for n in &mut self.nodes { if dgram.is_none() && !n.ready(now) { @@ -184,7 +190,7 @@ impl Simulator { } qdebug!([self.name], "processing {:?}", n.node); - let res = n.node.process(dgram.take(), now); + let res = n.process(dgram.take(), now); n.state = match res { Output::Datagram(d) => { qtrace!([self.name], " => datagram {}", d.len()); @@ -198,21 +204,14 @@ impl Simulator { } Output::None => { qtrace!([self.name], " => nothing"); - assert!(n.node.done(), "nodes have to be done when they go idle"); + assert!(n.done(), "nodes should be done when they go idle"); Idle } }; } - if self.nodes.iter().all(|n| n.node.done()) { - let real_elapsed = real_start.elapsed(); - println!("{}: real elapsed time: {:?}", self.name, real_elapsed); - let elapsed = now - start; - println!("{}: simulated elapsed time: {:?}", self.name, elapsed); - for n in &self.nodes { - n.node.print_summary(&self.name); - } - return elapsed; + if self.nodes.iter().all(|n| n.done()) { + return now; } if dgram.is_none() { @@ -229,4 +228,66 @@ impl Simulator { } } } + + #[must_use] + pub fn setup(mut self) -> ReadySimulator { + let start = now(); + + qinfo!("{}: seed {}", self.name, self.rng.borrow().seed_str()); + for n in &mut self.nodes { + n.init(self.rng.clone(), start); + } + + let setup_start = Instant::now(); + let now = self.process_loop(start, start); + let setup_time = now - start; + qinfo!( + "{t}: Setup took {wall:?} (wall) {setup_time:?} (simulated)", + t = self.name, + wall = setup_start.elapsed(), + ); + + for n in &mut self.nodes { + n.prepare(now); + } + + ReadySimulator { + sim: self, + start, + now, + } + } + + /// Runs the simulation. + /// # Panics + /// When sanity checks fail in unexpected ways; this is a testing function after all. + pub fn run(self) { + self.setup().run(); + } + + fn print_summary(&self) { + for n in &self.nodes { + n.print_summary(&self.name); + } + } +} + +pub struct ReadySimulator { + sim: Simulator, + start: Instant, + now: Instant, +} + +impl ReadySimulator { + pub fn run(mut self) { + let real_start = Instant::now(); + let end = self.sim.process_loop(self.start, self.now); + let sim_time = end - self.now; + qinfo!( + "{t}: Simulation took {wall:?} (wall) {sim_time:?} (simulated)", + t = self.sim.name, + wall = real_start.elapsed(), + ); + self.sim.print_summary(); + } } diff --git a/neqo-transport/tests/sim/net.rs b/test-fixture/src/sim/net.rs similarity index 100% rename from neqo-transport/tests/sim/net.rs rename to test-fixture/src/sim/net.rs diff --git a/neqo-transport/tests/sim/rng.rs b/test-fixture/src/sim/rng.rs similarity index 88% rename from neqo-transport/tests/sim/rng.rs rename to test-fixture/src/sim/rng.rs index d314e8b36f..913d7eae7a 100644 --- a/neqo-transport/tests/sim/rng.rs +++ b/test-fixture/src/sim/rng.rs @@ -4,17 +4,19 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use neqo_common::Decoder; -use std::convert::TryFrom; use std::ops::Range; +use neqo_common::Decoder; + /// An implementation of a xoshiro256** pseudorandom generator. pub struct Random { state: [u64; 4], } impl Random { - pub fn new(seed: [u8; 32]) -> Self { + #[must_use] + #[allow(clippy::missing_panics_doc)] // These are impossible. + pub fn new(seed: &[u8; 32]) -> Self { assert!(seed.iter().any(|&x| x != 0)); let mut dec = Decoder::from(&seed); Self { @@ -48,6 +50,7 @@ impl Random { /// Generate a random value from the range. /// If the range is empty or inverted (`range.start > range.end`), then /// this returns the value of `range.start` without generating any random values. + #[must_use] pub fn random_from(&mut self, range: Range) -> u64 { let max = range.end.saturating_sub(range.start); if max == 0 { @@ -55,7 +58,6 @@ impl Random { } let shift = (max - 1).leading_zeros(); - assert_ne!(max, 0); loop { let r = self.random() >> shift; if r < max { @@ -64,7 +66,8 @@ impl Random { } } - /// Get the seed necessary to continue from this point. + /// Get the seed necessary to continue from the current state of the RNG. + #[must_use] pub fn seed_str(&self) -> String { format!( "{:8x}{:8x}{:8x}{:8x}", @@ -75,7 +78,6 @@ impl Random { impl Default for Random { fn default() -> Self { - let buf = neqo_crypto::random(32); - Random::new(<[u8; 32]>::try_from(&buf[..]).unwrap()) + Random::new(&neqo_crypto::random::<32>()) } } diff --git a/neqo-transport/tests/sim/taildrop.rs b/test-fixture/src/sim/taildrop.rs similarity index 93% rename from neqo-transport/tests/sim/taildrop.rs rename to test-fixture/src/sim/taildrop.rs index 7346b27178..fc093e461d 100644 --- a/neqo-transport/tests/sim/taildrop.rs +++ b/test-fixture/src/sim/taildrop.rs @@ -6,19 +6,23 @@ #![allow(clippy::module_name_repetitions)] -use super::Node; -use neqo_common::{qtrace, Datagram}; +use std::{ + cmp::max, + collections::VecDeque, + fmt::{self, Debug}, + time::{Duration, Instant}, +}; + +use neqo_common::{qinfo, qtrace, Datagram}; use neqo_transport::Output; -use std::cmp::max; -use std::collections::VecDeque; -use std::convert::TryFrom; -use std::fmt::{self, Debug}; -use std::time::{Duration, Instant}; + +use super::Node; /// One second in nanoseconds. const ONE_SECOND_NS: u128 = 1_000_000_000; /// This models a link with a tail drop router at the front of it. +#[derive(Clone)] pub struct TailDrop { /// An overhead associated with each entry. This accounts for /// layer 2, IP, and UDP overheads. @@ -56,6 +60,7 @@ pub struct TailDrop { impl TailDrop { /// Make a new taildrop node with the given rate, queue capacity, and link delay. + #[must_use] pub fn new(rate: usize, capacity: usize, delay: Duration) -> Self { Self { overhead: 64, @@ -76,12 +81,14 @@ impl TailDrop { /// A tail drop queue on a 10Mbps link (approximated to 1 million bytes per second) /// with a fat 32k buffer (about 30ms), and the default forward delay of 50ms. - pub fn dsl_uplink() -> Self { + #[must_use] + pub fn dsl_downlink() -> Self { TailDrop::new(1_000_000, 32_768, Duration::from_millis(50)) } - /// Cut downlink to one fifth of the uplink (2Mbps), and reduce the buffer to 1/4. - pub fn dsl_downlink() -> Self { + /// Cut uplink to one fifth of the downlink (2Mbps), and reduce the buffer to 1/4. + #[must_use] + pub fn dsl_uplink() -> Self { TailDrop::new(200_000, 8_192, Duration::from_millis(50)) } @@ -170,9 +177,13 @@ impl Node for TailDrop { } fn print_summary(&self, test_name: &str) { - println!( + qinfo!( "{}: taildrop: rx {} drop {} tx {} maxq {}", - test_name, self.received, self.dropped, self.delivered, self.maxq, + test_name, + self.received, + self.dropped, + self.delivered, + self.maxq, ); } } diff --git a/test/README.md b/test/README.md new file mode 100644 index 0000000000..38acb7a90f --- /dev/null +++ b/test/README.md @@ -0,0 +1,35 @@ +## Steps to run an upload test with neqo-client and neqo-server: + +1. Build the release version of neqo-client and neqo-server by running + `cargo build --release` +1. Start neqo-server. `./target/release/neqo-server --db ./test-fixture/db` +1. Start neqo-client and specify parameters to start the upload test. + ` ./target/release/neqo-client http://127.0.0.1:4433/ --test upload --upload-size ${size_in_bytes}` + +## To enable log messages for analyzing upload performance + +This can be done by setting the `RUST_LOG` environment variable to `neqo_transport=info`. +For example, the command below starts neqo-client and uploads 8MB of content to the server. +``` +RUST_LOG=neqo_transport=info ./target/release/neqo-client http://127.0.0.1:4433/ --test upload --upload-size 8388608 &>upload.log +``` + +## To run the upload test with `upload_test.sh` script + +### Overview +The `upload_test.sh` script automates testing network conditions for `neqo-client` and `neqo-server`. It runs the upload test under various network parameters like bandwidth, RTT (Round-Trip Time), and PLR (Packet Loss Rate). + +### Configuration +- **Server Address and Port**: Defaults to `127.0.0.1` and `4433`. +- **Upload Size**: Set to 8MB by default. +- **Network Conditions**: Modify `network_conditions`, `network_bandwidths`, `network_rtts`, and `plrs` arrays for different conditions. +- **Runs**: Number of test iterations, default is `1`. + +### Usage +1. **Start the Script**: Execute with `./upload_test.sh`. +2. **Root Password Prompt**: Enter the root password when prompted for executing network configuration commands. +3. **Automated Test Execution**: The script sets up network conditions and runs `neqo-client` and `neqo-server` tests. +4. **Cleanup**: At the end, it resets network conditions and stops the server. + +## Visualize log file +Run `./mozlog-neqo-cwnd.py upload.log` to view the logs with matplotlib and python. diff --git a/test/mozlog-neqo-cwnd.py b/test/mozlog-neqo-cwnd.py new file mode 100755 index 0000000000..f40c3d5606 --- /dev/null +++ b/test/mozlog-neqo-cwnd.py @@ -0,0 +1,208 @@ +#!/usr/bin/env python3 + +# Licensed under the Apache License, Version 2.0 or the MIT license +# , at your +# option. This file may not be copied, modified, or distributed +# except according to those terms. + +# Author: Manuel Bucher +# Date: 2023-11-02 + +import matplotlib.pyplot as plt +import re +import sys +from collections import defaultdict +from datetime import datetime + +# 2023-11-02 13:32:28.450290 UTC - [Parent 31525: Socket Thread]: I/neqo_transport::* [neqo_transport::cc::classic_cc] packet_sent this=0x7f84d3d31100, pn=111, ps=36 +# 2023-11-02 13:32:28.477635 UTC - [Parent 31525: Socket Thread]: I/neqo_transport::* [neqo_transport::cc::classic_cc] packet_acked this=0x7f84d3d31100, pn=111, ps=36, ignored=0, lost=0 +# 2023-11-02 13:55:02.954829 UTC - [Parent 41203: Socket Thread]: I/neqo_transport::* [neqo_transport::cc::classic_cc] packet_lost this=0x7f2864efcc80, pn=308694, ps=1337 +PATTERN = r" ([a-z_]+) this=0x([0-9a-f]+), pn=(\d+), ps=(\d+)" +events = re.compile(PATTERN) + +# 2023-11-02 13:32:28.477655 UTC - [Parent 31525: Socket Thread]: I/neqo_transport::* [neqo_transport::cc::classic_cc] on_packets_acked this=0x7f84d3d31100, limited=1, bytes_in_flight=0, cwnd=13370, state=SlowStart, new_acked=36 +PATTERN = r" on_packets_acked this=0x([0-9a-f]+), limited=(\d+), bytes_in_flight=(\d+), cwnd=(\d+), state=([a-zA-Z]+), new_acked=(\d+)" +acked = re.compile(PATTERN) +# 2023-11-02 13:55:02.954909 UTC - [Parent 41203: Socket Thread]: I/neqo_transport::* [neqo_transport::cc::classic_cc] on_packets_lost this=0x7f2864efcc80, bytes_in_flight=690883, cwnd=1520187, state=RecoveryStart +PATTERN = r" on_packets_lost this=0x([0-9a-f]+), bytes_in_flight=(\d+), cwnd=(\d+), state=([a-zA-Z]+)" +lost = re.compile(PATTERN) + + +def get_time(line): + # allow garbage data before timestamp + timestamp = line.split(" UTC", 1)[0].split(" ") + timestamp = timestamp[-2] + " " + timestamp[-1] + return datetime.strptime(timestamp, "%Y-%m-%d %H:%M:%S.%f") + + +def main(): + if len(sys.argv) < 2: + print("usage:", sys.argv[0], "LOG_FILE") + return + + data = defaultdict( + lambda: { + "time": [], + "cwnd": [], + "bif": [], + "bif_limited": [], + "bif_limited_time": [], + "last_bytes_in_flight": 0, + "last_state": ("SlowStart", 0), + # color background depending on state + "bg_state": [], + "bg_time": [], + # event occurences + "p": {}, # pn -> y-axis (bytes in flight after packet sent) + "ps": defaultdict( + lambda: defaultdict(lambda: []) + ), # x/y coords of packet_sent/_acked/_lost + } + ) + + for line in open(sys.argv[1]): + if (result := acked.search(line)) is not None: + this = result.group(1) + now = get_time(line) + data[this]["time"].append(now) + data[this]["limited"] = bool(int(result.group(2))) + data[this]["last_bytes_in_flight"] = int(result.group(3)) + data[this]["bif"].append(data[this]["last_bytes_in_flight"]) + data[this]["cwnd"].append(int(result.group(4))) + state = result.group(5) + if data[this]["last_state"][0] != state: + data[this]["bg_state"].append(state) + data[this]["bg_time"].append(now) + data[this]["last_state"] = (state, now) + data[this]["new_acked"] = result.group(6) + if data[this]["limited"]: + data[this]["bif_limited"].append(data[this]["last_bytes_in_flight"]) + data[this]["bif_limited_time"].append(now) + elif (result := events.search(line)) is not None: + this = result.group(2) + now = get_time(line) + event = result.group(1) + pn = int(result.group(3)) + packet_size = int(result.group(4)) + if ( + event == "packet_sent" + or event == "packet_acked" + or event == "packet_lost" + ): + if event == "packet_sent": + data[this]["last_bytes_in_flight"] += packet_size + data[this]["p"][pn] = data[this]["last_bytes_in_flight"] + if data[this]["last_state"][0] == "RecoveryStart": + data[this]["bg_state"].append("CongestionAvoidance") + data[this]["bg_time"].append(now) + data[this]["last_state"] = ("CongestionAvoidance", now) + if data[this]["last_state"] == "PersistentCongestion": + data[this]["bg_state"].append("SlowStart") + data[this]["bg_time"].append(now) + data[this]["last_state"] = ("SlowStart", now) + # only remember events for packets where we sent the packet + if pn in data[this]["p"]: + data[this]["ps"][event]["time"].append(now) + data[this]["ps"][event]["bif"].append(data[this]["p"][pn]) + data[this]["ps"][event]["pn"].append(pn) + elif (result := lost.search(line)) is not None: + this = result.group(1) + now = get_time(line) + data[this]["time"].append(now) + data[this]["last_bytes_in_flight"] = int(result.group(2)) + data[this]["bif"].append(data[this]["last_bytes_in_flight"]) + data[this]["cwnd"].append(int(result.group(3))) + state = result.group(4) + if data[this]["last_state"][0] != state: + data[this]["bg_state"].append(state) + data[this]["bg_time"].append(now) + data[this]["last_state"] = (state, now) + + output = "" + output_num = 0 + for el in data: + if len(data[el]["time"]) > output_num: + output_num = len(data[el]["time"]) + output = el + fig, axs = plt.subplots(2, 1) + + data[output]["bg_time"].append(data[output]["time"][-1]) + for ax in axs: + color_background(ax, data[output]["bg_time"], data[output]["bg_state"]) + + # add plots + graph_pn(axs[0], data[output]) + graph_cwnd(axs[1], data[output]) + + # configure graph + axs[0].set_title(sys.argv[1].split("/")[-1]) + for ax in axs: + ax.legend() + plt.show() + + +COLORS = { + "packet_sent": "black", + "packet_lost": "red", + "packet_acked": "green", +} + + +# plot pn graph +def graph_pn(ax, output_data): + for event in ["packet_sent", "packet_acked", "packet_lost"]: + ax.scatter( + output_data["ps"][event]["time"], + output_data["ps"][event]["pn"], + label=event, + s=10, + color=COLORS[event], + ) + ax.set_xlabel("time in s") + ax.set_ylabel("packet_number") + + +# plot cwnd graph +def graph_cwnd(ax, output_data): + ax.plot(output_data["time"], output_data["cwnd"], label="cwnd") + ax.plot(output_data["time"], output_data["bif"], ".-", label="bytes in flight") + ax.plot( + output_data["bif_limited_time"], + output_data["bif_limited"], + "s", + label="app_limited", + ) + for event in ["packet_sent", "packet_lost"]: + ax.scatter( + output_data["ps"][event]["time"], + output_data["ps"][event]["bif"], + label=event, + s=10, + color=COLORS[event], + ) + ax.set_xlabel("time in s") + ax.set_ylabel("bytes") + + +def color_background(ax, time, states): + # change background depending on congestion controller state + state_colors = { + "SlowStart": "green", + "CongestionAvoidance": "blue", + "RecoveryStart": "gray", + "Recovery": "orange", + "PersistentCongestion": "purple", + } + legend = set() + for time_from, time_to, state in zip(time[:-1], time[1:], states): + color = state_colors[state] + if state in legend: + ax.axvspan(time_from, time_to, facecolor=color, alpha=0.3) + else: + legend.add(state) + ax.axvspan(time_from, time_to, facecolor=color, alpha=0.3, label=state) + + +if __name__ == "__main__": + main() diff --git a/test/test.sh b/test/test.sh new file mode 100755 index 0000000000..dc02b2161c --- /dev/null +++ b/test/test.sh @@ -0,0 +1,40 @@ +#! /usr/bin/env bash + +# This script builds the client and server binaries and runs them in a tmux +# session side-by-side. The client connects to the server and the server +# responds with a simple HTTP response. The client and server are run with +# verbose logging and the qlog output is stored in a temporary directory. The +# script also runs tcpdump to capture the packets exchanged between the client +# and server. The script uses tmux to create a split terminal window to display +# the qlog output and the packet capture. + +set -e +tmp=$(mktemp -d) +trap 'rm -rf "$tmp"' EXIT + +cargo build --bin neqo-client --bin neqo-server + +addr=127.0.0.1 +port=4433 +path=/20000 +flags="--verbose --qlog-dir $tmp --use-old-http --alpn hq-interop --quic-version 1" +if [ "$(uname -s)" != "Linux" ]; then + iface=lo0 +else + iface=lo +fi + +client="./target/debug/neqo-client $flags --output-dir $tmp --stats https://$addr:$port$path" +server="SSLKEYLOGFILE=$tmp/test.tlskey ./target/debug/neqo-server $flags $addr:$port" + +tcpdump -U -i "$iface" -w "$tmp/test.pcap" host $addr and port $port >/dev/null 2>&1 & +tcpdump_pid=$! + +tmux -CC \ + set-option -g default-shell "$(which bash)" \; \ + new-session "$client && kill -USR2 $tcpdump_pid && touch $tmp/done" \; \ + split-window -h "$server" \; \ + split-window -v -f "\ + until [ -e $tmp/done ]; do sleep 1; done && \ + tshark -r $tmp/test.pcap -o tls.keylog_file:$tmp/test.tlskey" \; \ + set remain-on-exit on diff --git a/test/upload_test.sh b/test/upload_test.sh new file mode 100755 index 0000000000..8edb55e75d --- /dev/null +++ b/test/upload_test.sh @@ -0,0 +1,137 @@ +#!/bin/bash + +set -e + +export RUST_LOG=neqo_transport::cc=debug + +server_address=127.0.0.1 +server_port=4433 +upload_size=8388608 +cc=cubic +client="cargo run --release --bin neqo-client -- http://$server_address:$server_port/ --test upload --upload-size $upload_size --cc $cc" +server="cargo run --release --bin neqo-server -- --db ../test-fixture/db $server_address:$server_port" +server_pid=0 +no_pacing=false +if [ "$no_pacing" = true ]; then + client="$client --no-pacing" + server="$server --no-pacing" +fi + +# Define two indexed arrays to store network conditions +network_conditions=("cable" "3g_slow" "DSL" "LTE" "fast wifi") +network_bandwidths=("5Mbit/s" "400Kbit/s" "2Mbit/s" "12Mbit/s" "100Mbit/s") +network_rtts=("14" "200" "25" "35" "10") +plrs=("0.0001" "0.0005" "0.001" "0.002" "0.005") + +runs=1 + +setup_network_conditions() { + bw="$1" + delay_ms="$2" + plr="$3" + delay_s=$(echo "scale=5; $delay_ms / 1000" | bc -l) + if [[ $bw == *"Mbit/s"* ]]; then + bw_value=$(echo "$bw" | sed 's/Mbit\/s//') # Remove 'Mbit/s' + bw_bits_per_second=$(echo "$bw_value * 1000000" | bc) # Convert from Mbits to bits + elif [[ $bw == *"Kbit/s"* ]]; then + bw_value=$(echo "$bw" | sed 's/Kbit\/s//') # Remove 'Kbit/s' + bw_bits_per_second=$(echo "$bw_value * 1000" | bc) # Convert from Kbits to bits + fi + + bdp_bits=$(echo "$bw_bits_per_second * $delay_s" | bc) + + # Convert BDP to kilobytes + bdp_kb=$(echo "scale=2; $bdp_bits / 8 / 1024" | bc) + bdp_kb_rounded_up=$(LC_NUMERIC=C printf "%.0f" "$bdp_kb") + + + # if we are on MacOS X, configure the firewall to add delay and queue traffic + if [ -x /usr/sbin/dnctl ]; then + set_condition_commands=( + "sudo dnctl pipe 1 config bw $bw delay $delay_ms plr $plr queue ${bdp_kb_rounded_up}Kbytes noerror" + "sudo dnctl pipe 2 config bw $bw delay $delay_ms plr $plr queue ${bdp_kb_rounded_up}Kbytes noerror" + "sudo echo 'dummynet in proto {udp} from any to localhost pipe 1' | sudo pfctl -f -" + "sudo echo 'dummynet in proto {udp} from localhost to any pipe 2' | sudo pfctl -f -" + "sudo pfctl -e || true" + ) + else + bw_in_bits_per_sec="${bw%/s}" + bdp_bytes=$(echo "scale=2; $bdp_bits / 8" | bc) + bdp_bytes_rounded_up=$(LC_NUMERIC=C printf "%.0f" "$bdp_bytes") + plr_p=$(echo "scale=4; $plr * 100" | bc) + plr_p=$(LC_NUMERIC=C printf "%.2f" "$plr_p") + set_condition_commands=( + "sudo tc qdisc add dev lo root handle 1: tbf rate $bw_in_bits_per_sec burst $bdp_bytes_rounded_up limit 30000" + "sudo tc qdisc add dev lo parent 1:1 handle 10: netem delay ${delay_ms}ms loss ${plr_p}%" + ) + fi + + for command in "${set_condition_commands[@]}"; do + echo "$command" + eval "$command" + done +} + +stop_network_conditions() { + if [ -x /usr/sbin/dnctl ]; then + stop_condition_commands=( + "sudo pfctl -f /etc/pf.conf" + "sudo dnctl -q flush" + ) + else + stop_condition_commands=( + "sudo tc qdisc del dev lo root" + ) + fi + + for command in "${stop_condition_commands[@]}"; do + eval "$command" + done +} + +stop_server() { + echo "stop server" + server_pid=$(pgrep -f "neqo-server") + # Kill the server + kill "$server_pid" +} + +start_test() { + echo "start_test" + eval "$server" > /dev/null 2>&1 & sleep 1 + + # Run the client command and capture its output + echo "Running client..." + client_output=$(eval "$client") + echo "Client output: $client_output" +} + +cleanup() { + echo "clean up" + stop_server + stop_network_conditions +} + +trap cleanup SIGINT + +for i in "${!network_conditions[@]}"; do + condition=${network_conditions[$i]} + bandwidth=${network_bandwidths[$i]} + rtt=${network_rtts[$i]} + + for plr in "${plrs[@]}"; do + echo "Setting up tests for condition: $condition, Bandwidth: $bandwidth, RTT: $rtt, Packet Loss Rate: $plr" + + for r in $(seq 1 $runs); do + echo "Test Run: $r | Condition: $condition | Bandwidth: $bandwidth | RTT: $rtt | PLR: $plr | Start" + setup_network_conditions "$bandwidth" "$rtt" "$plr" + start_test + cleanup + echo "Test Run: $r | Condition: $condition | Bandwidth: $bandwidth | RTT: $rtt | PLR: $plr | End" + done + done + + echo "Completed tests for condition: $condition." +done + +echo "All test runs completed."