diff --git a/.github/env b/.github/env index 2e4d5b48100d..bb61e1f4cd99 100644 --- a/.github/env +++ b/.github/env @@ -1 +1 @@ -IMAGE="docker.io/paritytech/ci-unified:bullseye-1.77.0-2024-04-10-v202407161507" +IMAGE="docker.io/paritytech/ci-unified:bullseye-1.81.0-2024-09-11-v202409111034" diff --git a/.github/scripts/generate-prdoc.py b/.github/scripts/generate-prdoc.py index d3b6b523ecfd..f05d517a5802 100644 --- a/.github/scripts/generate-prdoc.py +++ b/.github/scripts/generate-prdoc.py @@ -67,7 +67,6 @@ def create_prdoc(pr, audience, title, description, patch, bump, force): # Go up until we find a Cargo.toml p = os.path.join(workspace.path, p) while not os.path.exists(os.path.join(p, "Cargo.toml")): - print(f"Could not find Cargo.toml in {p}") if p == '/': exit(1) p = os.path.dirname(p) @@ -76,7 +75,6 @@ def create_prdoc(pr, audience, title, description, patch, bump, force): manifest = toml.load(f) if not "package" in manifest: - print(f"File was not in any crate: {p}") continue crate_name = manifest["package"]["name"] @@ -85,8 +83,6 @@ def create_prdoc(pr, audience, title, description, patch, bump, force): else: print(f"Skipping unpublished crate: {crate_name}") - print(f"Modified crates: {modified_crates.keys()}") - for crate_name in modified_crates.keys(): entry = { "name": crate_name } @@ -137,4 +133,4 @@ def main(args): if __name__ == "__main__": args = setup_parser().parse_args() - main(args) \ No newline at end of file + main(args) diff --git a/.github/workflows/check-runtime-migration.yml b/.github/workflows/check-runtime-migration.yml index 8185cf171aef..8d30e752b115 100644 --- a/.github/workflows/check-runtime-migration.yml +++ b/.github/workflows/check-runtime-migration.yml @@ -17,13 +17,13 @@ concurrency: cancel-in-progress: true jobs: - preflight: uses: ./.github/workflows/reusable-preflight.yml # More info can be found here: https://github.com/paritytech/polkadot/pull/5865 check-runtime-migration: runs-on: ${{ needs.preflight.outputs.RUNNER }} + if: ${{ needs.preflight.outputs.changes_rust }} # We need to set this to rather long to allow the snapshot to be created, but the average time # should be much lower. timeout-minutes: 60 diff --git a/.github/workflows/checks.yml b/.github/workflows/checks.yml index f765d79254c8..0793c31dbb87 100644 --- a/.github/workflows/checks.yml +++ b/.github/workflows/checks.yml @@ -15,14 +15,13 @@ concurrency: permissions: {} jobs: - preflight: uses: ./.github/workflows/reusable-preflight.yml cargo-clippy: runs-on: ${{ needs.preflight.outputs.RUNNER }} needs: [preflight] - # if: ${{ needs.preflight.outputs.changes_rust }} + if: ${{ needs.preflight.outputs.changes_rust }} timeout-minutes: 40 container: image: ${{ needs.preflight.outputs.IMAGE }} @@ -33,12 +32,12 @@ jobs: - uses: actions/checkout@6d193bf28034eafb982f37bd894289fe649468fc # v4.1.7 - name: script run: | - forklift cargo clippy --all-targets --locked --workspace - forklift cargo clippy --all-targets --all-features --locked --workspace + forklift cargo clippy --all-targets --locked --workspace --quiet + forklift cargo clippy --all-targets --all-features --locked --workspace --quiet check-try-runtime: runs-on: ${{ needs.preflight.outputs.RUNNER }} needs: [preflight] - # if: ${{ needs.preflight.outputs.changes_rust }} + if: ${{ needs.preflight.outputs.changes_rust }} timeout-minutes: 40 container: image: ${{ needs.preflight.outputs.IMAGE }} @@ -46,18 +45,18 @@ jobs: - uses: actions/checkout@6d193bf28034eafb982f37bd894289fe649468fc # v4.1.7 - name: script run: | - forklift cargo check --locked --all --features try-runtime + forklift cargo check --locked --all --features try-runtime --quiet # this is taken from cumulus # Check that parachain-template will compile with `try-runtime` feature flag. forklift cargo check --locked -p parachain-template-node --features try-runtime # add after https://github.com/paritytech/substrate/pull/14502 is merged # experimental code may rely on try-runtime and vice-versa - forklift cargo check --locked --all --features try-runtime,experimental + forklift cargo check --locked --all --features try-runtime,experimental --quiet # check-core-crypto-features works fast without forklift check-core-crypto-features: runs-on: ${{ needs.preflight.outputs.RUNNER }} needs: [preflight] - # if: ${{ needs.preflight.outputs.changes_rust }} + if: ${{ needs.preflight.outputs.changes_rust }} timeout-minutes: 30 container: image: ${{ needs.preflight.outputs.IMAGE }} diff --git a/.github/workflows/command-prdoc.yml b/.github/workflows/command-prdoc.yml index aa9de9474a7b..2154c8a987f2 100644 --- a/.github/workflows/command-prdoc.yml +++ b/.github/workflows/command-prdoc.yml @@ -28,7 +28,7 @@ on: - "Runtime Dev" - "Runtime User" - "Node Dev" - - "Node User" + - "Node Operator" overwrite: type: choice description: Overwrite existing PrDoc diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml index 514bac3973bf..44ad9613997a 100644 --- a/.github/workflows/docs.yml +++ b/.github/workflows/docs.yml @@ -9,15 +9,17 @@ on: merge_group: concurrency: - group: ${{ github.ref }} - cancel-in-progress: ${{ github.ref != 'refs/heads/master' }} + group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} + cancel-in-progress: true jobs: preflight: + if: contains(github.event.label.name, 'GHA-migration') uses: ./.github/workflows/reusable-preflight.yml test-rustdoc: - runs-on: arc-runners-polkadot-sdk-beefy + runs-on: ${{ needs.preflight.outputs.RUNNER }} + if: ${{ needs.preflight.outputs.changes_rust }} needs: [preflight] container: image: ${{ needs.preflight.outputs.IMAGE }} @@ -38,7 +40,8 @@ jobs: RUSTFLAGS: "-Cdebug-assertions=y -Dwarnings" build-rustdoc: - runs-on: arc-runners-polkadot-sdk-beefy + runs-on: ${{ needs.preflight.outputs.RUNNER }} + if: ${{ needs.preflight.outputs.changes_rust }} needs: [preflight, test-rustdoc] container: image: ${{ needs.preflight.outputs.IMAGE }} diff --git a/.github/workflows/tests-linux-stable.yml b/.github/workflows/tests-linux-stable.yml index 994f1c9b78ea..599edd8e69a1 100644 --- a/.github/workflows/tests-linux-stable.yml +++ b/.github/workflows/tests-linux-stable.yml @@ -13,13 +13,12 @@ concurrency: cancel-in-progress: true jobs: - preflight: uses: ./.github/workflows/reusable-preflight.yml test-linux-stable-int: needs: [preflight] - # if: ${{ needs.preflight.outputs.changes_rust }} + if: ${{ needs.preflight.outputs.changes_rust }} runs-on: ${{ needs.preflight.outputs.RUNNER }} timeout-minutes: 60 container: @@ -40,7 +39,7 @@ jobs: # https://github.com/paritytech/ci_cd/issues/864 test-linux-stable-runtime-benchmarks: needs: [preflight] - # if: ${{ needs.preflight.outputs.changes_rust }} + if: ${{ needs.preflight.outputs.changes_rust }} runs-on: ${{ needs.preflight.outputs.RUNNER }} timeout-minutes: 60 container: @@ -54,11 +53,11 @@ jobs: - name: Checkout uses: actions/checkout@v4 - name: script - run: forklift cargo nextest run --workspace --features runtime-benchmarks benchmark --locked --cargo-profile testnet + run: forklift cargo nextest run --workspace --features runtime-benchmarks benchmark --locked --cargo-profile testnet --cargo-quiet test-linux-stable: needs: [preflight] - # if: ${{ needs.preflight.outputs.changes_rust }} + if: ${{ needs.preflight.outputs.changes_rust }} runs-on: ${{ matrix.runners }} timeout-minutes: 60 strategy: @@ -91,12 +90,13 @@ jobs: --locked \ --release \ --no-fail-fast \ + --cargo-quiet \ --features try-runtime,experimental,riscv,ci-only-tests \ --partition count:${{ matrix.partition }} # run runtime-api tests with `enable-staging-api` feature on the 1st node - name: runtime-api tests if: ${{ matrix.partition == '1/3' }} - run: forklift cargo nextest run -p sp-api-test --features enable-staging-api + run: forklift cargo nextest run -p sp-api-test --features enable-staging-api --cargo-quiet confirm-required-jobs-passed: runs-on: ubuntu-latest diff --git a/.github/workflows/tests-misc.yml b/.github/workflows/tests-misc.yml index 90162adc2258..45ed4ebdc6cb 100644 --- a/.github/workflows/tests-misc.yml +++ b/.github/workflows/tests-misc.yml @@ -22,6 +22,7 @@ jobs: test-full-crypto-feature: needs: [preflight] runs-on: ${{ needs.preflight.outputs.RUNNER }} + if: ${{ needs.preflight.outputs.changes_rust }} timeout-minutes: 60 container: image: ${{ needs.preflight.outputs.IMAGE }} @@ -46,6 +47,7 @@ jobs: # into one job needs: [preflight, test-full-crypto-feature] runs-on: ${{ needs.preflight.outputs.RUNNER }} + if: ${{ needs.preflight.outputs.changes_rust }} container: image: ${{ needs.preflight.outputs.IMAGE }} env: @@ -67,6 +69,7 @@ jobs: timeout-minutes: 60 needs: [preflight] runs-on: ${{ needs.preflight.outputs.RUNNER }} + if: ${{ needs.preflight.outputs.changes_rust }} container: image: ${{ needs.preflight.outputs.IMAGE }} env: @@ -74,8 +77,7 @@ jobs: # but still want to have debug assertions. RUSTFLAGS: "-C debug-assertions -D warnings" RUST_BACKTRACE: 1 - WASM_BUILD_NO_COLOR: 1 - WASM_BUILD_RUSTFLAGS: "-C debug-assertions -D warnings" + SKIP_WASM_BUILD: 1 # Ensure we run the UI tests. RUN_UI_TESTS: 1 steps: @@ -83,18 +85,20 @@ jobs: uses: actions/checkout@v4 - name: script run: | - forklift cargo test --locked -q --profile testnet -p frame-support-test --features=frame-feature-testing,no-metadata-docs,try-runtime,experimental - forklift cargo test --locked -q --profile testnet -p frame-support-test --features=frame-feature-testing,frame-feature-testing-2,no-metadata-docs,try-runtime,experimental - forklift cargo test --locked -q --profile testnet -p xcm-procedural - forklift cargo test --locked -q --profile testnet -p frame-election-provider-solution-type - forklift cargo test --locked -q --profile testnet -p sp-api-test + cargo version + forklift cargo test --locked -q --profile testnet -p frame-support-test --features=frame-feature-testing,no-metadata-docs,try-runtime,experimental ui + forklift cargo test --locked -q --profile testnet -p frame-support-test --features=frame-feature-testing,frame-feature-testing-2,no-metadata-docs,try-runtime,experimental ui + forklift cargo test --locked -q --profile testnet -p xcm-procedural ui + forklift cargo test --locked -q --profile testnet -p frame-election-provider-solution-type ui + forklift cargo test --locked -q --profile testnet -p sp-api-test ui # There is multiple version of sp-runtime-interface in the repo. So we point to the manifest. - forklift cargo test --locked -q --profile testnet --manifest-path substrate/primitives/runtime-interface/Cargo.toml + forklift cargo test --locked -q --profile testnet --manifest-path substrate/primitives/runtime-interface/Cargo.toml ui test-deterministic-wasm: timeout-minutes: 20 needs: [preflight, test-frame-examples-compile-to-wasm] runs-on: ${{ needs.preflight.outputs.RUNNER }} + if: ${{ needs.preflight.outputs.changes_rust }} container: image: ${{ needs.preflight.outputs.IMAGE }} env: @@ -194,6 +198,7 @@ jobs: needs: [preflight] timeout-minutes: 30 runs-on: ${{ needs.preflight.outputs.RUNNER }} + if: ${{ needs.preflight.outputs.changes_rust }} container: image: ${{ needs.preflight.outputs.IMAGE }} steps: @@ -226,6 +231,7 @@ jobs: timeout-minutes: 20 needs: [preflight, test-node-metrics] runs-on: ${{ needs.preflight.outputs.RUNNER }} + if: ${{ needs.preflight.outputs.changes_rust }} container: image: ${{ needs.preflight.outputs.IMAGE }} steps: @@ -241,6 +247,7 @@ jobs: timeout-minutes: 20 needs: [preflight, check-tracing] runs-on: ${{ needs.preflight.outputs.RUNNER }} + if: ${{ needs.preflight.outputs.changes_rust }} container: image: ${{ needs.preflight.outputs.IMAGE }} steps: @@ -251,50 +258,52 @@ jobs: run: | forklift cargo build --locked -p westend-runtime --features metadata-hash - cargo-hfuzz: - timeout-minutes: 20 - needs: [preflight, check-metadata-hash] - runs-on: ${{ needs.preflight.outputs.RUNNER }} - container: - image: ${{ needs.preflight.outputs.IMAGE }} - env: - # max 10s per iteration, 60s per file - HFUZZ_RUN_ARGS: | - --exit_upon_crash - --exit_code_upon_crash 1 - --timeout 10 - --run_time 60 - - # use git version of honggfuzz-rs until v0.5.56 is out, we need a few recent changes: - # https://github.com/rust-fuzz/honggfuzz-rs/pull/75 to avoid breakage on debian - # https://github.com/rust-fuzz/honggfuzz-rs/pull/81 fix to the above pr - # https://github.com/rust-fuzz/honggfuzz-rs/pull/82 fix for handling absolute CARGO_TARGET_DIR - HFUZZ_BUILD_ARGS: | - --config=patch.crates-io.honggfuzz.git="https://github.com/altaua/honggfuzz-rs" - --config=patch.crates-io.honggfuzz.rev="205f7c8c059a0d98fe1cb912cdac84f324cb6981" - steps: - - name: Checkout - uses: actions/checkout@v4 - - - name: Run honggfuzz - run: | - cd substrate/primitives/arithmetic/fuzzer - forklift cargo hfuzz build - for target in $(cargo read-manifest | jq -r '.targets | .[] | .name'); - do - forklift cargo hfuzz run "$target" || { printf "fuzzing failure for %s\n" "$target"; exit 1; }; - done - - - name: Upload artifacts - uses: actions/upload-artifact@v4.3.6 - with: - path: substrate/primitives/arithmetic/fuzzer/hfuzz_workspace/ - name: hfuzz-${{ github.sha }} + # disabled until https://github.com/paritytech/polkadot-sdk/issues/5812 is resolved + # cargo-hfuzz: + # timeout-minutes: 20 + # needs: [preflight, check-metadata-hash] + # runs-on: ${{ needs.preflight.outputs.RUNNER }} + # container: + # image: ${{ needs.preflight.outputs.IMAGE }} + # env: + # # max 10s per iteration, 60s per file + # HFUZZ_RUN_ARGS: | + # --exit_upon_crash + # --exit_code_upon_crash 1 + # --timeout 10 + # --run_time 60 + + # # use git version of honggfuzz-rs until v0.5.56 is out, we need a few recent changes: + # # https://github.com/rust-fuzz/honggfuzz-rs/pull/75 to avoid breakage on debian + # # https://github.com/rust-fuzz/honggfuzz-rs/pull/81 fix to the above pr + # # https://github.com/rust-fuzz/honggfuzz-rs/pull/82 fix for handling absolute CARGO_TARGET_DIR + # HFUZZ_BUILD_ARGS: | + # --config=patch.crates-io.honggfuzz.git="https://github.com/altaua/honggfuzz-rs" + # --config=patch.crates-io.honggfuzz.rev="205f7c8c059a0d98fe1cb912cdac84f324cb6981" + # steps: + # - name: Checkout + # uses: actions/checkout@v4 + + # - name: Run honggfuzz + # run: | + # cd substrate/primitives/arithmetic/fuzzer + # forklift cargo hfuzz build + # for target in $(cargo read-manifest | jq -r '.targets | .[] | .name'); + # do + # forklift cargo hfuzz run "$target" || { printf "fuzzing failure for %s\n" "$target"; exit 1; }; + # done + + # - name: Upload artifacts + # uses: actions/upload-artifact@v4.3.6 + # with: + # path: substrate/primitives/arithmetic/fuzzer/hfuzz_workspace/ + # name: hfuzz-${{ github.sha }} cargo-check-each-crate: timeout-minutes: 140 needs: [preflight] runs-on: ${{ needs.preflight.outputs.RUNNER }} + if: ${{ needs.preflight.outputs.changes_rust }} container: image: ${{ needs.preflight.outputs.IMAGE }} env: @@ -322,6 +331,7 @@ jobs: timeout-minutes: 30 needs: [preflight] runs-on: ${{ needs.preflight.outputs.RUNNER_MACOS }} + if: ${{ needs.preflight.outputs.changes_rust }} env: SKIP_WASM_BUILD: 1 steps: diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 1132c2ca4dd5..6d6e393b0410 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -5,21 +5,20 @@ on: branches: - master pull_request: - types: [ opened, synchronize, reopened, ready_for_review ] + types: [opened, synchronize, reopened, ready_for_review] merge_group: concurrency: group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} cancel-in-progress: true jobs: - preflight: uses: ./.github/workflows/reusable-preflight.yml # This job runs all benchmarks defined in the `/bin/node/runtime` once to check that there are no errors. quick-benchmarks: - needs: [ preflight ] - # if: ${{ needs.preflight.outputs.changes_rust }} + needs: [preflight] + if: ${{ needs.preflight.outputs.changes_rust }} runs-on: ${{ needs.preflight.outputs.RUNNER }} timeout-minutes: 60 container: @@ -33,12 +32,12 @@ jobs: - name: Checkout uses: actions/checkout@v4 - name: script - run: forklift cargo run --locked --release -p staging-node-cli --bin substrate-node --features runtime-benchmarks -- benchmark pallet --chain dev --pallet "*" --extrinsic "*" --steps 2 --repeat 1 --quiet + run: forklift cargo run --locked --release -p staging-node-cli --bin substrate-node --features runtime-benchmarks --quiet -- benchmark pallet --chain dev --pallet "*" --extrinsic "*" --steps 2 --repeat 1 --quiet # cf https://github.com/paritytech/polkadot-sdk/issues/1652 test-syscalls: - needs: [ preflight ] - # if: ${{ needs.preflight.outputs.changes_rust }} + needs: [preflight] + if: ${{ needs.preflight.outputs.changes_rust }} runs-on: ${{ needs.preflight.outputs.RUNNER }} timeout-minutes: 60 container: @@ -52,7 +51,7 @@ jobs: - name: script id: test run: | - forklift cargo build --locked --profile production --target x86_64-unknown-linux-musl --bin polkadot-execute-worker --bin polkadot-prepare-worker + forklift cargo build --locked --profile production --target x86_64-unknown-linux-musl --bin polkadot-execute-worker --bin polkadot-prepare-worker --quiet cd polkadot/scripts/list-syscalls ./list-syscalls.rb ../../../target/x86_64-unknown-linux-musl/production/polkadot-execute-worker --only-used-syscalls | diff -u execute-worker-syscalls - ./list-syscalls.rb ../../../target/x86_64-unknown-linux-musl/production/polkadot-prepare-worker --only-used-syscalls | diff -u prepare-worker-syscalls - @@ -61,10 +60,9 @@ jobs: run: | echo "The x86_64 syscalls used by the worker binaries have changed. Please review if this is expected and update polkadot/scripts/list-syscalls/*-worker-syscalls as needed." >> $GITHUB_STEP_SUMMARY - cargo-check-all-benches: - needs: [ preflight ] - # if: ${{ needs.preflight.outputs.changes_rust }} + needs: [preflight] + if: ${{ needs.preflight.outputs.changes_rust }} runs-on: ${{ needs.preflight.outputs.RUNNER }} timeout-minutes: 60 container: @@ -75,4 +73,4 @@ jobs: - name: Checkout uses: actions/checkout@v4 - name: script - run: forklift cargo check --all --benches + run: forklift cargo check --all --benches --quiet diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 5c6b3928b46e..dbc5dafeb0ae 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -21,7 +21,8 @@ workflow: - if: $CI_COMMIT_BRANCH variables: - CI_IMAGE: !reference [.ci-unified, variables, CI_IMAGE] + # CI_IMAGE: !reference [ .ci-unified, variables, CI_IMAGE ] + CI_IMAGE: "docker.io/paritytech/ci-unified:bullseye-1.81.0-2024-09-11-v202409111034" # BUILDAH_IMAGE is defined in group variables BUILDAH_COMMAND: "buildah --storage-driver overlay2" RELENG_SCRIPTS_BRANCH: "master" diff --git a/.gitlab/pipeline/test.yml b/.gitlab/pipeline/test.yml index 6fb35c61e48d..0252620cd0f3 100644 --- a/.gitlab/pipeline/test.yml +++ b/.gitlab/pipeline/test.yml @@ -112,6 +112,7 @@ test-linux-stable-codecov: # some tests do not run with `try-runtime` feature enabled # https://github.com/paritytech/polkadot-sdk/pull/4251#discussion_r1624282143 +# Move to github after https://github.com/paritytech/ci_cd/issues/1056 is fixed test-linux-stable-no-try-runtime: stage: test extends: diff --git a/.gitlab/pipeline/zombienet.yml b/.gitlab/pipeline/zombienet.yml index c17366dbe4c2..e43b5a54b042 100644 --- a/.gitlab/pipeline/zombienet.yml +++ b/.gitlab/pipeline/zombienet.yml @@ -1,7 +1,7 @@ .zombienet-refs: extends: .build-refs variables: - ZOMBIENET_IMAGE: "docker.io/paritytech/zombienet:v1.3.105" + ZOMBIENET_IMAGE: "docker.io/paritytech/zombienet:v1.3.110" PUSHGATEWAY_URL: "http://zombienet-prometheus-pushgateway.managed-monitoring:9091/metrics/job/zombie-metrics" DEBUG: "zombie,zombie::network-node,zombie::kube::client::logs" ZOMBIE_PROVIDER: "k8s" diff --git a/.gitlab/pipeline/zombienet/polkadot.yml b/.gitlab/pipeline/zombienet/polkadot.yml index 93fc4bbb578a..9a3bed24cfa0 100644 --- a/.gitlab/pipeline/zombienet/polkadot.yml +++ b/.gitlab/pipeline/zombienet/polkadot.yml @@ -108,7 +108,7 @@ zombienet-polkadot-functional-0004-parachains-disputes-garbage-candidate: --local-dir="${LOCAL_DIR}/functional" --test="0004-parachains-garbage-candidate.zndsl" -zombienet-polkadot-functional-0005-parachains-disputes-past-session: +.zombienet-polkadot-functional-0005-parachains-disputes-past-session: extends: - .zombienet-polkadot-common script: @@ -223,6 +223,14 @@ zombienet-polkadot-functional-0015-coretime-shared-core: --local-dir="${LOCAL_DIR}/functional" --test="0015-coretime-shared-core.zndsl" +zombienet-polkadot-functional-0016-approval-voting-parallel: + extends: + - .zombienet-polkadot-common + script: + - /home/nonroot/zombie-net/scripts/ci/run-test-local-env-manager.sh + --local-dir="${LOCAL_DIR}/functional" + --test="0016-approval-voting-parallel.zndsl" + zombienet-polkadot-smoke-0001-parachains-smoke-test: extends: - .zombienet-polkadot-common @@ -343,7 +351,7 @@ zombienet-polkadot-malus-0001-dispute-valid: --local-dir="${LOCAL_DIR}/integrationtests" --test="0001-dispute-valid-block.zndsl" -zombienet-polkadot-coretime-revenue: +.zombienet-polkadot-coretime-revenue: extends: - .zombienet-polkadot-common needs: diff --git a/Cargo.lock b/Cargo.lock index 61f485bcecb3..2e8eda3ca9e8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -13902,7 +13902,6 @@ dependencies = [ "futures-timer", "itertools 0.11.0", "log", - "polkadot-node-jaeger", "polkadot-node-metrics", "polkadot-node-network-protocol", "polkadot-node-primitives", @@ -14240,7 +14239,6 @@ dependencies = [ "merlin", "parity-scale-codec", "parking_lot 0.12.3", - "polkadot-node-jaeger", "polkadot-node-primitives", "polkadot-node-subsystem", "polkadot-node-subsystem-test-helpers", @@ -14268,6 +14266,49 @@ dependencies = [ "tracing-gum", ] +[[package]] +name = "polkadot-node-core-approval-voting-parallel" +version = "7.0.0" +dependencies = [ + "assert_matches", + "async-trait", + "futures", + "futures-timer", + "itertools 0.11.0", + "kvdb-memorydb", + "log", + "parking_lot 0.12.3", + "polkadot-approval-distribution", + "polkadot-node-core-approval-voting", + "polkadot-node-jaeger", + "polkadot-node-metrics", + "polkadot-node-network-protocol", + "polkadot-node-primitives", + "polkadot-node-subsystem", + "polkadot-node-subsystem-test-helpers", + "polkadot-node-subsystem-util", + "polkadot-overseer", + "polkadot-primitives", + "polkadot-primitives-test-helpers", + "polkadot-subsystem-bench", + "rand", + "rand_chacha", + "rand_core 0.6.4", + "sc-keystore", + "schnorrkel 0.11.4", + "sp-application-crypto 30.0.0", + "sp-consensus", + "sp-consensus-babe", + "sp-consensus-slots", + "sp-core 28.0.0", + "sp-keyring", + "sp-keystore 0.34.0", + "sp-runtime 31.0.1", + "sp-tracing 16.0.0", + "thiserror", + "tracing-gum", +] + [[package]] name = "polkadot-node-core-av-store" version = "7.0.0" @@ -15397,6 +15438,7 @@ dependencies = [ "polkadot-network-bridge", "polkadot-node-collation-generation", "polkadot-node-core-approval-voting", + "polkadot-node-core-approval-voting-parallel", "polkadot-node-core-av-store", "polkadot-node-core-backing", "polkadot-node-core-bitfield-signing", @@ -15736,6 +15778,7 @@ dependencies = [ "polkadot-network-bridge", "polkadot-node-collation-generation", "polkadot-node-core-approval-voting", + "polkadot-node-core-approval-voting-parallel", "polkadot-node-core-av-store", "polkadot-node-core-backing", "polkadot-node-core-bitfield-signing", @@ -15892,6 +15935,7 @@ dependencies = [ "polkadot-availability-recovery", "polkadot-erasure-coding", "polkadot-node-core-approval-voting", + "polkadot-node-core-approval-voting-parallel", "polkadot-node-core-av-store", "polkadot-node-core-chain-api", "polkadot-node-metrics", diff --git a/Cargo.toml b/Cargo.toml index b7c9c0cdcbf1..c40d59fce3c0 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -158,6 +158,7 @@ members = [ "polkadot/erasure-coding/fuzzer", "polkadot/node/collation-generation", "polkadot/node/core/approval-voting", + "polkadot/node/core/approval-voting-parallel", "polkadot/node/core/av-store", "polkadot/node/core/backing", "polkadot/node/core/bitfield-signing", @@ -551,7 +552,7 @@ default-members = [ [workspace.lints.rust] suspicious_double_ref_op = { level = "allow", priority = 2 } # `substrate_runtime` is a common `cfg` condition name used in the repo. -unexpected_cfgs = { level = "warn", check-cfg = ['cfg(substrate_runtime)'] } +unexpected_cfgs = { level = "warn", check-cfg = ['cfg(build_opt_level, values("3"))', 'cfg(build_profile, values("debug", "release"))', 'cfg(enable_alloc_error_handler)', 'cfg(fuzzing)', 'cfg(substrate_runtime)'] } [workspace.lints.clippy] all = { level = "allow", priority = 0 } @@ -1032,6 +1033,7 @@ polkadot-gossip-support = { path = "polkadot/node/network/gossip-support", defau polkadot-network-bridge = { path = "polkadot/node/network/bridge", default-features = false } polkadot-node-collation-generation = { path = "polkadot/node/collation-generation", default-features = false } polkadot-node-core-approval-voting = { path = "polkadot/node/core/approval-voting", default-features = false } +polkadot-node-core-approval-voting-parallel = { path = "polkadot/node/core/approval-voting-parallel", default-features = false } polkadot-node-core-av-store = { path = "polkadot/node/core/av-store", default-features = false } polkadot-node-core-backing = { path = "polkadot/node/core/backing", default-features = false } polkadot-node-core-bitfield-signing = { path = "polkadot/node/core/bitfield-signing", default-features = false } diff --git a/bridges/modules/grandpa/src/lib.rs b/bridges/modules/grandpa/src/lib.rs index dff4b98fd919..22a15ec4062f 100644 --- a/bridges/modules/grandpa/src/lib.rs +++ b/bridges/modules/grandpa/src/lib.rs @@ -728,15 +728,13 @@ pub mod pallet { init_params; let authority_set_length = authority_list.len(); let authority_set = StoredAuthoritySet::::try_new(authority_list, set_id) - .map_err(|e| { + .inspect_err(|_| { log::error!( target: LOG_TARGET, "Failed to initialize bridge. Number of authorities in the set {} is larger than the configured value {}", authority_set_length, T::BridgedChain::MAX_AUTHORITIES_COUNT, ); - - e })?; let initial_hash = header.hash(); diff --git a/bridges/modules/xcm-bridge-hub/src/lib.rs b/bridges/modules/xcm-bridge-hub/src/lib.rs index 22c60fb4ad60..1b2536598a20 100644 --- a/bridges/modules/xcm-bridge-hub/src/lib.rs +++ b/bridges/modules/xcm-bridge-hub/src/lib.rs @@ -415,7 +415,7 @@ pub mod pallet { bridge.deposit, Precision::BestEffort, ) - .map_err(|e| { + .inspect_err(|e| { // we can't do anything here - looks like funds have been (partially) unreserved // before by someone else. Let's not fail, though - it'll be worse for the caller log::error!( @@ -423,7 +423,6 @@ pub mod pallet { "Failed to unreserve during the bridge {:?} closure with error: {e:?}", locations.bridge_id(), ); - e }) .ok() .unwrap_or(BalanceOf::>::zero()); @@ -1456,25 +1455,26 @@ mod tests { let lane_id = TestLaneIdType::try_new(1, 2).unwrap(); let lane_id_mismatch = TestLaneIdType::try_new(3, 4).unwrap(); - let test_bridge_state = |id, - bridge, - (lane_id, bridge_id), - (inbound_lane_id, outbound_lane_id), - expected_error: Option| { - Bridges::::insert(id, bridge); - LaneToBridge::::insert(lane_id, bridge_id); - - let lanes_manager = LanesManagerOf::::new(); - lanes_manager.create_inbound_lane(inbound_lane_id).unwrap(); - lanes_manager.create_outbound_lane(outbound_lane_id).unwrap(); + let test_bridge_state = + |id, + bridge, + (lane_id, bridge_id), + (inbound_lane_id, outbound_lane_id), + expected_error: Option| { + Bridges::::insert(id, bridge); + LaneToBridge::::insert(lane_id, bridge_id); - let result = XcmOverBridge::do_try_state(); - if let Some(e) = expected_error { - assert_err!(result, e); - } else { - assert_ok!(result); - } - }; + let lanes_manager = LanesManagerOf::::new(); + lanes_manager.create_inbound_lane(inbound_lane_id).unwrap(); + lanes_manager.create_outbound_lane(outbound_lane_id).unwrap(); + + let result = XcmOverBridge::do_try_state(); + if let Some(e) = expected_error { + assert_err!(result, e); + } else { + assert_ok!(result); + } + }; let cleanup = |bridge_id, lane_ids| { Bridges::::remove(bridge_id); for lane_id in lane_ids { diff --git a/bridges/primitives/header-chain/src/justification/mod.rs b/bridges/primitives/header-chain/src/justification/mod.rs index d7c2cbf429e2..87f53dac6463 100644 --- a/bridges/primitives/header-chain/src/justification/mod.rs +++ b/bridges/primitives/header-chain/src/justification/mod.rs @@ -32,7 +32,6 @@ pub use verification::{ use bp_runtime::{BlockNumberOf, Chain, HashOf, HeaderId}; use codec::{Decode, Encode, MaxEncodedLen}; -use frame_support::RuntimeDebugNoBound; use scale_info::TypeInfo; use sp_consensus_grandpa::{AuthorityId, AuthoritySignature}; use sp_runtime::{traits::Header as HeaderT, RuntimeDebug, SaturatedConversion}; @@ -43,7 +42,8 @@ use sp_std::prelude::*; /// /// This particular proof is used to prove that headers on a bridged chain /// (so not our chain) have been finalized correctly. -#[derive(Encode, Decode, Clone, PartialEq, Eq, TypeInfo, RuntimeDebugNoBound)] +#[derive(Encode, Decode, Clone, PartialEq, Eq, TypeInfo)] +#[cfg_attr(feature = "std", derive(Debug))] pub struct GrandpaJustification { /// The round (voting period) this justification is valid for. pub round: u64, @@ -54,6 +54,17 @@ pub struct GrandpaJustification { pub votes_ancestries: Vec
, } +// A proper Debug impl for no-std is not possible for the `GrandpaJustification` since the `Commit` +// type only implements Debug that for `std` here: +// https://github.com/paritytech/finality-grandpa/blob/8c45a664c05657f0c71057158d3ba555ba7d20de/src/lib.rs#L275 +// so we do a manual impl. +#[cfg(not(feature = "std"))] +impl core::fmt::Debug for GrandpaJustification { + fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { + write!(f, "GrandpaJustification {{ round: {:?}, commit: , votes_ancestries: {:?} }}", self.round, self.votes_ancestries) + } +} + impl GrandpaJustification { /// Returns reasonable size of justification using constants from the provided chain. /// diff --git a/bridges/primitives/messages/src/lane.rs b/bridges/primitives/messages/src/lane.rs index c835449db27d..0f14ce93e114 100644 --- a/bridges/primitives/messages/src/lane.rs +++ b/bridges/primitives/messages/src/lane.rs @@ -71,7 +71,7 @@ impl LaneIdType for LegacyLaneId { } } -#[cfg(any(feature = "std", feature = "runtime-benchmarks", test))] +#[cfg(feature = "std")] impl TryFrom> for LegacyLaneId { type Error = (); @@ -140,7 +140,7 @@ impl HashedLaneId { /// There's no `From` implementation for the `LaneId`, because using this conversion /// in a wrong way (i.e. computing hash of endpoints manually) may lead to issues. So we /// want the call to be explicit. - #[cfg(any(feature = "std", feature = "runtime-benchmarks", test))] + #[cfg(feature = "std")] pub const fn from_inner(inner: H256) -> Self { Self(inner) } @@ -184,7 +184,7 @@ impl LaneIdType for HashedLaneId { } } -#[cfg(any(feature = "std", feature = "runtime-benchmarks", test))] +#[cfg(feature = "std")] impl TryFrom> for HashedLaneId { type Error = (); diff --git a/bridges/relays/lib-substrate-relay/src/cli/relay_headers_and_messages/mod.rs b/bridges/relays/lib-substrate-relay/src/cli/relay_headers_and_messages/mod.rs index 9261dc437536..e875c53a2e7e 100644 --- a/bridges/relays/lib-substrate-relay/src/cli/relay_headers_and_messages/mod.rs +++ b/bridges/relays/lib-substrate-relay/src/cli/relay_headers_and_messages/mod.rs @@ -325,7 +325,9 @@ where _, Self::Left, MessagesLaneIdOf, - >(common.right.client.clone(), &common.metrics_params, &common.right.accounts, &lanes) + >( + common.right.client.clone(), &common.metrics_params, &common.right.accounts, &lanes + ) .await?; } diff --git a/bridges/relays/lib-substrate-relay/src/messages/metrics.rs b/bridges/relays/lib-substrate-relay/src/messages/metrics.rs index 9d45a4b3d668..efe429701c41 100644 --- a/bridges/relays/lib-substrate-relay/src/messages/metrics.rs +++ b/bridges/relays/lib-substrate-relay/src/messages/metrics.rs @@ -53,9 +53,8 @@ where let token_decimals = client .token_decimals() .await? - .map(|token_decimals| { + .inspect(|token_decimals| { log::info!(target: "bridge", "Read `tokenDecimals` for {}: {}", C::NAME, token_decimals); - token_decimals }) .unwrap_or_else(|| { // turns out it is normal not to have this property - e.g. when polkadot binary is diff --git a/bridges/relays/lib-substrate-relay/src/on_demand/parachains.rs b/bridges/relays/lib-substrate-relay/src/on_demand/parachains.rs index 2ef86f48ecbe..96eba0af988c 100644 --- a/bridges/relays/lib-substrate-relay/src/on_demand/parachains.rs +++ b/bridges/relays/lib-substrate-relay/src/on_demand/parachains.rs @@ -664,7 +664,8 @@ impl<'a, P: SubstrateParachainsPipeline, SourceRelayClnt, TargetClnt> for ( &'a OnDemandParachainsRelay, &'a ParachainsSource, - ) where + ) +where SourceRelayClnt: Client, TargetClnt: Client, { diff --git a/bridges/relays/messages/src/message_race_delivery.rs b/bridges/relays/messages/src/message_race_delivery.rs index cbb89baabcc5..b09533a4ddc1 100644 --- a/bridges/relays/messages/src/message_race_delivery.rs +++ b/bridges/relays/messages/src/message_race_delivery.rs @@ -59,9 +59,7 @@ pub async fn run( _phantom: Default::default(), }, target_state_updates, - MessageDeliveryStrategy:: { - lane_source_client: source_client, - lane_target_client: target_client, + MessageDeliveryStrategy::

{ max_unrewarded_relayer_entries_at_target: params .max_unrewarded_relayer_entries_at_target, max_unconfirmed_nonces_at_target: params.max_unconfirmed_nonces_at_target, @@ -71,7 +69,6 @@ pub async fn run( latest_confirmed_nonces_at_source: VecDeque::new(), target_nonces: None, strategy: BasicStrategy::new(), - metrics_msg, }, ) .await @@ -300,11 +297,7 @@ struct DeliveryRaceTargetNoncesData { } /// Messages delivery strategy. -struct MessageDeliveryStrategy { - /// The client that is connected to the message lane source node. - lane_source_client: SC, - /// The client that is connected to the message lane target node. - lane_target_client: TC, +struct MessageDeliveryStrategy { /// Maximal unrewarded relayer entries at target client. max_unrewarded_relayer_entries_at_target: MessageNonce, /// Maximal unconfirmed nonces at target client. @@ -322,8 +315,6 @@ struct MessageDeliveryStrategy { target_nonces: Option>, /// Basic delivery strategy. strategy: MessageDeliveryStrategyBase

, - /// Message lane metrics. - metrics_msg: Option, } type MessageDeliveryStrategyBase

= BasicStrategy< @@ -335,7 +326,7 @@ type MessageDeliveryStrategyBase

= BasicStrategy<

::MessagesProof, >; -impl std::fmt::Debug for MessageDeliveryStrategy { +impl std::fmt::Debug for MessageDeliveryStrategy

{ fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result { fmt.debug_struct("MessageDeliveryStrategy") .field( @@ -353,11 +344,9 @@ impl std::fmt::Debug for MessageDeliveryStrategy MessageDeliveryStrategy +impl MessageDeliveryStrategy

where P: MessageLane, - SC: MessageLaneSourceClient

, - TC: MessageLaneTargetClient

, { /// Returns true if some race action can be selected (with `select_race_action`) at given /// `best_finalized_source_header_id_at_best_target` source header at target. @@ -465,23 +454,18 @@ where let max_nonces = std::cmp::min(max_nonces, self.max_messages_in_single_batch); let max_messages_weight_in_single_batch = self.max_messages_weight_in_single_batch; let max_messages_size_in_single_batch = self.max_messages_size_in_single_batch; - let lane_source_client = self.lane_source_client.clone(); - let lane_target_client = self.lane_target_client.clone(); // select nonces from nonces, available for delivery let selected_nonces = match self.strategy.available_source_queue_indices(race_state) { Some(available_source_queue_indices) => { let source_queue = self.strategy.source_queue(); - let reference = RelayMessagesBatchReference { + let reference = RelayMessagesBatchReference::

{ max_messages_in_this_batch: max_nonces, max_messages_weight_in_single_batch, max_messages_size_in_single_batch, - lane_source_client: lane_source_client.clone(), - lane_target_client: lane_target_client.clone(), best_target_nonce, nonces_queue: source_queue.clone(), nonces_queue_range: available_source_queue_indices, - metrics: self.metrics_msg.clone(), }; MessageRaceLimits::decide(reference).await @@ -534,12 +518,10 @@ where } #[async_trait] -impl RaceStrategy, TargetHeaderIdOf

, P::MessagesProof> - for MessageDeliveryStrategy +impl

RaceStrategy, TargetHeaderIdOf

, P::MessagesProof> + for MessageDeliveryStrategy

where P: MessageLane, - SC: MessageLaneSourceClient

, - TC: MessageLaneTargetClient

, { type SourceNoncesRange = MessageDetailsMap; type ProofParameters = MessageProofParameters; @@ -707,8 +689,7 @@ mod tests { message_lane_loop::{ tests::{ header_id, TestMessageLane, TestMessagesBatchTransaction, TestMessagesProof, - TestSourceChainBalance, TestSourceClient, TestSourceHeaderId, TestTargetClient, - TestTargetHeaderId, + TestSourceChainBalance, TestSourceHeaderId, TestTargetHeaderId, }, MessageDetails, }, @@ -726,8 +707,7 @@ mod tests { TestMessagesProof, TestMessagesBatchTransaction, >; - type TestStrategy = - MessageDeliveryStrategy; + type TestStrategy = MessageDeliveryStrategy; fn source_nonces( new_nonces: RangeInclusive, @@ -770,9 +750,6 @@ mod tests { max_messages_weight_in_single_batch: Weight::from_parts(4, 0), max_messages_size_in_single_batch: 4, latest_confirmed_nonces_at_source: vec![(header_id(1), 19)].into_iter().collect(), - lane_source_client: TestSourceClient::default(), - lane_target_client: TestTargetClient::default(), - metrics_msg: None, target_nonces: Some(TargetClientNonces { latest_nonce: 19, nonces_data: DeliveryRaceTargetNoncesData { @@ -1167,9 +1144,6 @@ mod tests { max_messages_weight_in_single_batch: Weight::from_parts(4, 0), max_messages_size_in_single_batch: 4, latest_confirmed_nonces_at_source: VecDeque::new(), - lane_source_client: TestSourceClient::default(), - lane_target_client: TestTargetClient::default(), - metrics_msg: None, target_nonces: None, strategy: BasicStrategy::new(), }; diff --git a/bridges/relays/messages/src/message_race_limits.rs b/bridges/relays/messages/src/message_race_limits.rs index 873bb6aad042..8fcd1f911f68 100644 --- a/bridges/relays/messages/src/message_race_limits.rs +++ b/bridges/relays/messages/src/message_race_limits.rs @@ -23,33 +23,16 @@ use bp_messages::{MessageNonce, Weight}; use crate::{ message_lane::MessageLane, - message_lane_loop::{ - MessageDetails, MessageDetailsMap, SourceClient as MessageLaneSourceClient, - TargetClient as MessageLaneTargetClient, - }, + message_lane_loop::{MessageDetails, MessageDetailsMap}, message_race_loop::NoncesRange, message_race_strategy::SourceRangesQueue, - metrics::MessageLaneLoopMetrics, }; /// Reference data for participating in relay -pub struct RelayReference< - P: MessageLane, - SourceClient: MessageLaneSourceClient

, - TargetClient: MessageLaneTargetClient

, -> { - /// The client that is connected to the message lane source node. - pub lane_source_client: SourceClient, - /// The client that is connected to the message lane target node. - pub lane_target_client: TargetClient, - /// Metrics reference. - pub metrics: Option, +pub struct RelayReference { /// Messages size summary pub selected_size: u32, - /// Hard check begin nonce - pub hard_selected_begin_nonce: MessageNonce, - /// Index by all ready nonces pub index: usize, /// Current nonce @@ -59,23 +42,13 @@ pub struct RelayReference< } /// Relay reference data -pub struct RelayMessagesBatchReference< - P: MessageLane, - SourceClient: MessageLaneSourceClient

, - TargetClient: MessageLaneTargetClient

, -> { +pub struct RelayMessagesBatchReference { /// Maximal number of relayed messages in single delivery transaction. pub max_messages_in_this_batch: MessageNonce, /// Maximal cumulative dispatch weight of relayed messages in single delivery transaction. pub max_messages_weight_in_single_batch: Weight, /// Maximal cumulative size of relayed messages in single delivery transaction. pub max_messages_size_in_single_batch: u32, - /// The client that is connected to the message lane source node. - pub lane_source_client: SourceClient, - /// The client that is connected to the message lane target node. - pub lane_target_client: TargetClient, - /// Metrics reference. - pub metrics: Option, /// Best available nonce at the **best** target block. We do not want to deliver nonces /// less than this nonce, even though the block may be retracted. pub best_target_nonce: MessageNonce, @@ -94,12 +67,8 @@ pub struct RelayMessagesBatchReference< pub struct MessageRaceLimits; impl MessageRaceLimits { - pub async fn decide< - P: MessageLane, - SourceClient: MessageLaneSourceClient

, - TargetClient: MessageLaneTargetClient

, - >( - reference: RelayMessagesBatchReference, + pub async fn decide( + reference: RelayMessagesBatchReference

, ) -> Option> { let mut hard_selected_count = 0; @@ -112,15 +81,9 @@ impl MessageRaceLimits { ); // relay reference - let mut relay_reference = RelayReference { - lane_source_client: reference.lane_source_client.clone(), - lane_target_client: reference.lane_target_client.clone(), - metrics: reference.metrics.clone(), - + let mut relay_reference = RelayReference::

{ selected_size: 0, - hard_selected_begin_nonce, - index: 0, nonce: 0, details: MessageDetails { diff --git a/bridges/relays/messages/src/message_race_loop.rs b/bridges/relays/messages/src/message_race_loop.rs index 31341a9a0c0c..ea6a2371dc90 100644 --- a/bridges/relays/messages/src/message_race_loop.rs +++ b/bridges/relays/messages/src/message_race_loop.rs @@ -225,15 +225,9 @@ pub trait RaceState: Clone + Send + Sync { /// client (at the `best_finalized_source_header_id_at_best_target`). fn set_best_finalized_source_header_id_at_best_target(&mut self, id: SourceHeaderId); - /// Best finalized source header id at the source client. - fn best_finalized_source_header_id_at_source(&self) -> Option; /// Best finalized source header id at the best block on the target /// client (at the `best_finalized_source_header_id_at_best_target`). fn best_finalized_source_header_id_at_best_target(&self) -> Option; - /// The best header id at the target client. - fn best_target_header_id(&self) -> Option; - /// Best finalized header id at the target client. - fn best_finalized_target_header_id(&self) -> Option; /// Returns `true` if we have selected nonces to submit to the target node. fn nonces_to_submit(&self) -> Option>; @@ -296,22 +290,10 @@ where self.best_finalized_source_header_id_at_best_target = Some(id); } - fn best_finalized_source_header_id_at_source(&self) -> Option { - self.best_finalized_source_header_id_at_source.clone() - } - fn best_finalized_source_header_id_at_best_target(&self) -> Option { self.best_finalized_source_header_id_at_best_target.clone() } - fn best_target_header_id(&self) -> Option { - self.best_target_header_id.clone() - } - - fn best_finalized_target_header_id(&self) -> Option { - self.best_finalized_target_header_id.clone() - } - fn nonces_to_submit(&self) -> Option> { self.nonces_to_submit.clone().map(|(_, nonces, _)| nonces) } diff --git a/bridges/relays/messages/src/message_race_strategy.rs b/bridges/relays/messages/src/message_race_strategy.rs index 3a532331d79d..1303fcfedebd 100644 --- a/bridges/relays/messages/src/message_race_strategy.rs +++ b/bridges/relays/messages/src/message_race_strategy.rs @@ -67,7 +67,8 @@ impl< TargetHeaderHash, SourceNoncesRange, Proof, - > where + > +where SourceHeaderHash: Clone, SourceHeaderNumber: Clone + Ord, SourceNoncesRange: NoncesRange, @@ -189,7 +190,8 @@ impl< TargetHeaderHash, SourceNoncesRange, Proof, - > where + > +where SourceHeaderHash: Clone + Debug + Send + Sync, SourceHeaderNumber: Clone + Ord + Debug + Send + Sync, SourceNoncesRange: NoncesRange + Debug + Send + Sync, diff --git a/bridges/snowbridge/pallets/outbound-queue/merkle-tree/src/lib.rs b/bridges/snowbridge/pallets/outbound-queue/merkle-tree/src/lib.rs index d5c89b9c0987..eeeaa6e68cf9 100644 --- a/bridges/snowbridge/pallets/outbound-queue/merkle-tree/src/lib.rs +++ b/bridges/snowbridge/pallets/outbound-queue/merkle-tree/src/lib.rs @@ -182,12 +182,6 @@ where let root = merkelize::(hashes.into_iter(), &mut collect_proof); let leaf = leaf.expect("Requested `leaf_index` is greater than number of leaves."); - #[cfg(feature = "debug")] - log::debug!( - "[merkle_proof] Proof: {:?}", - collect_proof.proof.iter().map(hex::encode).collect::>() - ); - MerkleProof { root, proof: collect_proof.proof, number_of_leaves, leaf_index, leaf } } @@ -274,8 +268,6 @@ where V: Visitor, I: Iterator, { - #[cfg(feature = "debug")] - log::debug!("[merkelize_row]"); next.clear(); let hash_len = ::LENGTH; @@ -286,9 +278,6 @@ where let b = iter.next(); visitor.visit(index, &a, &b); - #[cfg(feature = "debug")] - log::debug!(" {:?}\n {:?}", a.as_ref().map(hex::encode), b.as_ref().map(hex::encode)); - index += 2; match (a, b) { (Some(a), Some(b)) => { @@ -309,14 +298,7 @@ where // Last item = root. (Some(a), None) => return Ok(a), // Finish up, no more items. - _ => { - #[cfg(feature = "debug")] - log::debug!( - "[merkelize_row] Next: {:?}", - next.iter().map(hex::encode).collect::>() - ); - return Err(next) - }, + _ => return Err(next), } } } diff --git a/bridges/snowbridge/primitives/router/src/inbound/mod.rs b/bridges/snowbridge/primitives/router/src/inbound/mod.rs index a10884b45531..fbfc52d01c83 100644 --- a/bridges/snowbridge/primitives/router/src/inbound/mod.rs +++ b/bridges/snowbridge/primitives/router/src/inbound/mod.rs @@ -168,7 +168,8 @@ impl< ConvertAssetId, EthereumUniversalLocation, GlobalAssetHubLocation, - > where + > +where CreateAssetCall: Get, CreateAssetDeposit: Get, InboundQueuePalletInstance: Get, @@ -226,7 +227,8 @@ impl< ConvertAssetId, EthereumUniversalLocation, GlobalAssetHubLocation, - > where + > +where CreateAssetCall: Get, CreateAssetDeposit: Get, InboundQueuePalletInstance: Get, diff --git a/bridges/snowbridge/primitives/router/src/outbound/mod.rs b/bridges/snowbridge/primitives/router/src/outbound/mod.rs index d3b6c116dd7a..b23bce0e2185 100644 --- a/bridges/snowbridge/primitives/router/src/outbound/mod.rs +++ b/bridges/snowbridge/primitives/router/src/outbound/mod.rs @@ -44,7 +44,8 @@ impl where + > +where UniversalLocation: Get, EthereumNetwork: Get, OutboundQueue: SendMessage, diff --git a/bridges/snowbridge/runtime/runtime-common/src/lib.rs b/bridges/snowbridge/runtime/runtime-common/src/lib.rs index aae45520ff4b..0b1a74b232a0 100644 --- a/bridges/snowbridge/runtime/runtime-common/src/lib.rs +++ b/bridges/snowbridge/runtime/runtime-common/src/lib.rs @@ -50,7 +50,8 @@ impl where + > +where Balance: BaseArithmetic + Unsigned + Copy + From + Into + Debug, AccountId: Clone + FullCodec, FeeAssetLocation: Get, diff --git a/cumulus/client/relay-chain-inprocess-interface/src/lib.rs b/cumulus/client/relay-chain-inprocess-interface/src/lib.rs index 4fea055203d8..f0a082dce534 100644 --- a/cumulus/client/relay-chain-inprocess-interface/src/lib.rs +++ b/cumulus/client/relay-chain-inprocess-interface/src/lib.rs @@ -379,6 +379,7 @@ fn build_polkadot_full_node( execute_workers_max_num: None, prepare_workers_hard_max_num: None, prepare_workers_soft_max_num: None, + enable_approval_voting_parallel: false, }, )?; diff --git a/cumulus/pallets/parachain-system/src/lib.rs b/cumulus/pallets/parachain-system/src/lib.rs index 9dc41aa03d9b..21af35fe3de3 100644 --- a/cumulus/pallets/parachain-system/src/lib.rs +++ b/cumulus/pallets/parachain-system/src/lib.rs @@ -390,7 +390,8 @@ pub mod pallet { let maximum_channels = host_config .hrmp_max_message_num_per_candidate - .min(>::take()) as usize; + .min(>::take()) + as usize; // Note: this internally calls the `GetChannelInfo` implementation for this // pallet, which draws on the `RelevantMessagingState`. That in turn has diff --git a/cumulus/pallets/xcmp-queue/src/mock.rs b/cumulus/pallets/xcmp-queue/src/mock.rs index 470e00fe94e1..3964ecf28cac 100644 --- a/cumulus/pallets/xcmp-queue/src/mock.rs +++ b/cumulus/pallets/xcmp-queue/src/mock.rs @@ -20,7 +20,7 @@ use cumulus_pallet_parachain_system::AnyRelayNumber; use cumulus_primitives_core::{ChannelInfo, IsSystem, ParaId}; use frame_support::{ derive_impl, parameter_types, - traits::{ConstU32, Everything, Nothing, OriginTrait}, + traits::{ConstU32, Everything, OriginTrait}, BoundedSlice, }; use frame_system::EnsureRoot; @@ -30,10 +30,6 @@ use sp_runtime::{ BuildStorage, }; use xcm::prelude::*; -use xcm_builder::{ - FixedWeightBounds, FrameTransactionalProcessor, FungibleAdapter, IsConcrete, NativeAsset, - ParentIsPreset, -}; use xcm_executor::traits::ConvertOrigin; type Block = frame_system::mocking::MockBlock; @@ -119,61 +115,6 @@ parameter_types! { pub const MaxAssetsIntoHolding: u32 = 64; } -/// Means for transacting assets on this chain. -pub type LocalAssetTransactor = FungibleAdapter< - // Use this currency: - Balances, - // Use this currency when it is a fungible asset matching the given location or name: - IsConcrete, - // Do a simple punn to convert an AccountId32 Location into a native chain account ID: - LocationToAccountId, - // Our chain's account ID type (we can't get away without mentioning it explicitly): - AccountId, - // We don't track any teleports. - (), ->; - -pub type LocationToAccountId = (ParentIsPreset,); - -pub struct XcmConfig; -impl xcm_executor::Config for XcmConfig { - type RuntimeCall = RuntimeCall; - type XcmSender = XcmRouter; - // How to withdraw and deposit an asset. - type AssetTransactor = LocalAssetTransactor; - type OriginConverter = (); - type IsReserve = NativeAsset; - type IsTeleporter = NativeAsset; - type UniversalLocation = UniversalLocation; - type Barrier = (); - type Weigher = FixedWeightBounds; - type Trader = (); - type ResponseHandler = (); - type AssetTrap = (); - type AssetClaims = (); - type SubscriptionService = (); - type PalletInstancesInfo = AllPalletsWithSystem; - type MaxAssetsIntoHolding = MaxAssetsIntoHolding; - type AssetLocker = (); - type AssetExchanger = (); - type FeeManager = (); - type MessageExporter = (); - type UniversalAliases = Nothing; - type CallDispatcher = RuntimeCall; - type SafeCallFilter = Everything; - type Aliasers = Nothing; - type TransactionalProcessor = FrameTransactionalProcessor; - type HrmpNewChannelOpenRequestHandler = (); - type HrmpChannelAcceptedHandler = (); - type HrmpChannelClosingHandler = (); - type XcmRecorder = (); -} - -pub type XcmRouter = ( - // XCMP to communicate with the sibling chains. - XcmpQueue, -); - pub struct SystemParachainAsSuperuser(PhantomData); impl ConvertOrigin for SystemParachainAsSuperuser diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/teleport.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/teleport.rs index d6cf819118e9..470b4d0f389e 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/teleport.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/teleport.rs @@ -265,7 +265,9 @@ fn limited_teleport_native_assets_from_system_para_to_relay_fails() { let delivery_fees = AssetHubRococo::execute_with(|| { xcm_helpers::teleport_assets_delivery_fees::< ::XcmSender, - >(test.args.assets.clone(), 0, test.args.weight_limit, test.args.beneficiary, test.args.dest) + >( + test.args.assets.clone(), 0, test.args.weight_limit, test.args.beneficiary, test.args.dest + ) }); // Sender's balance is reduced diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/teleport.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/teleport.rs index ddb82a954a87..ee0f297792f8 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/teleport.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/teleport.rs @@ -265,7 +265,9 @@ fn limited_teleport_native_assets_from_system_para_to_relay_fails() { let delivery_fees = AssetHubWestend::execute_with(|| { xcm_helpers::teleport_assets_delivery_fees::< ::XcmSender, - >(test.args.assets.clone(), 0, test.args.weight_limit, test.args.beneficiary, test.args.dest) + >( + test.args.assets.clone(), 0, test.args.weight_limit, test.args.beneficiary, test.args.dest + ) }); // Sender's balance is reduced diff --git a/cumulus/parachains/integration-tests/emulated/tests/people/people-rococo/src/tests/teleport.rs b/cumulus/parachains/integration-tests/emulated/tests/people/people-rococo/src/tests/teleport.rs index 44e6b3934f0e..2619ca7591d0 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/people/people-rococo/src/tests/teleport.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/people/people-rococo/src/tests/teleport.rs @@ -107,7 +107,9 @@ fn limited_teleport_native_assets_from_system_para_to_relay_fails() { let delivery_fees = PeopleRococo::execute_with(|| { xcm_helpers::teleport_assets_delivery_fees::< ::XcmSender, - >(test.args.assets.clone(), 0, test.args.weight_limit, test.args.beneficiary, test.args.dest) + >( + test.args.assets.clone(), 0, test.args.weight_limit, test.args.beneficiary, test.args.dest + ) }); // Sender's balance is reduced diff --git a/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/src/tests/teleport.rs b/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/src/tests/teleport.rs index 83888031723f..d9a2c23ac0c6 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/src/tests/teleport.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/src/tests/teleport.rs @@ -107,7 +107,9 @@ fn limited_teleport_native_assets_from_system_para_to_relay_fails() { let delivery_fees = PeopleWestend::execute_with(|| { xcm_helpers::teleport_assets_delivery_fees::< ::XcmSender, - >(test.args.assets.clone(), 0, test.args.weight_limit, test.args.beneficiary, test.args.dest) + >( + test.args.assets.clone(), 0, test.args.weight_limit, test.args.beneficiary, test.args.dest + ) }); // Sender's balance is reduced diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs index e46734dfa97d..430c5bd18561 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs @@ -545,7 +545,8 @@ impl InstanceFilter for ProxyType { RuntimeCall::Utility { .. } | RuntimeCall::Multisig { .. } | RuntimeCall::NftFractionalization { .. } | - RuntimeCall::Nfts { .. } | RuntimeCall::Uniques { .. } + RuntimeCall::Nfts { .. } | + RuntimeCall::Uniques { .. } ) }, ProxyType::AssetOwner => matches!( @@ -866,7 +867,7 @@ impl pallet_nft_fractionalization::Config for Runtime { type Assets = Assets; type Nfts = Nfts; type PalletId = NftFractionalizationPalletId; - type WeightInfo = pallet_nft_fractionalization::weights::SubstrateWeight; + type WeightInfo = weights::pallet_nft_fractionalization::WeightInfo; type RuntimeHoldReason = RuntimeHoldReason; #[cfg(feature = "runtime-benchmarks")] type BenchmarkHelper = (); diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs index 946b72e966e9..8565cd30f430 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs @@ -543,7 +543,8 @@ impl InstanceFilter for ProxyType { RuntimeCall::Utility { .. } | RuntimeCall::Multisig { .. } | RuntimeCall::NftFractionalization { .. } | - RuntimeCall::Nfts { .. } | RuntimeCall::Uniques { .. } + RuntimeCall::Nfts { .. } | + RuntimeCall::Uniques { .. } ) }, ProxyType::AssetOwner => matches!( @@ -859,7 +860,7 @@ impl pallet_nft_fractionalization::Config for Runtime { type Assets = Assets; type Nfts = Nfts; type PalletId = NftFractionalizationPalletId; - type WeightInfo = pallet_nft_fractionalization::weights::SubstrateWeight; + type WeightInfo = weights::pallet_nft_fractionalization::WeightInfo; type RuntimeHoldReason = RuntimeHoldReason; #[cfg(feature = "runtime-benchmarks")] type BenchmarkHelper = (); diff --git a/cumulus/parachains/runtimes/assets/test-utils/src/test_cases.rs b/cumulus/parachains/runtimes/assets/test-utils/src/test_cases.rs index 67b585ecfe86..c80222142304 100644 --- a/cumulus/parachains/runtimes/assets/test-utils/src/test_cases.rs +++ b/cumulus/parachains/runtimes/assets/test-utils/src/test_cases.rs @@ -1143,7 +1143,8 @@ pub fn create_and_manage_foreign_assets_for_local_consensus_parachain_assets_wor .with_balances(vec![( foreign_creator_as_account_id.clone(), existential_deposit + - asset_deposit + metadata_deposit_base + + asset_deposit + + metadata_deposit_base + metadata_deposit_per_byte_eta + buy_execution_fee_amount.into() + buy_execution_fee_amount.into(), diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/tests/tests.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/tests/tests.rs index 4ff388f4ba2b..d168d1b7a13c 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/tests/tests.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/tests/tests.rs @@ -301,12 +301,16 @@ fn relayed_incoming_message_works() { XcmOverBridgeHubRococoInstance, LocationToAccountId, WestendLocation, - >(SiblingParachainLocation::get(), BridgedUniversalLocation::get(), |locations, fee| { - bridge_hub_test_utils::open_bridge_with_storage::< - Runtime, - XcmOverBridgeHubRococoInstance, - >(locations, fee, LegacyLaneId([0, 0, 0, 1])) - }) + >( + SiblingParachainLocation::get(), + BridgedUniversalLocation::get(), + |locations, fee| { + bridge_hub_test_utils::open_bridge_with_storage::< + Runtime, + XcmOverBridgeHubRococoInstance, + >(locations, fee, LegacyLaneId([0, 0, 0, 1])) + }, + ) .1 }, construct_and_apply_extrinsic, @@ -331,12 +335,16 @@ fn free_relay_extrinsic_works() { XcmOverBridgeHubRococoInstance, LocationToAccountId, WestendLocation, - >(SiblingParachainLocation::get(), BridgedUniversalLocation::get(), |locations, fee| { - bridge_hub_test_utils::open_bridge_with_storage::< - Runtime, - XcmOverBridgeHubRococoInstance, - >(locations, fee, LegacyLaneId([0, 0, 0, 1])) - }) + >( + SiblingParachainLocation::get(), + BridgedUniversalLocation::get(), + |locations, fee| { + bridge_hub_test_utils::open_bridge_with_storage::< + Runtime, + XcmOverBridgeHubRococoInstance, + >(locations, fee, LegacyLaneId([0, 0, 0, 1])) + }, + ) .1 }, construct_and_apply_extrinsic, diff --git a/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_cases/mod.rs b/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_cases/mod.rs index 663558f5fd5c..9c5d6269dc0e 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_cases/mod.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_cases/mod.rs @@ -506,11 +506,12 @@ pub fn message_dispatch_routing_works< // 2. this message is sent from other global consensus with destination of this Runtime // sibling parachain (HRMP) - let bridging_message = test_data::simulate_message_exporter_on_bridged_chain::< - BridgedNetwork, - NetworkWithParentCount, - AlwaysLatest, - >((RuntimeNetwork::get(), [Parachain(sibling_parachain_id)].into())); + let bridging_message = + test_data::simulate_message_exporter_on_bridged_chain::< + BridgedNetwork, + NetworkWithParentCount, + AlwaysLatest, + >((RuntimeNetwork::get(), [Parachain(sibling_parachain_id)].into())); // 2.1. WITHOUT opened hrmp channel -> RoutingError let result = diff --git a/cumulus/parachains/runtimes/starters/shell/src/lib.rs b/cumulus/parachains/runtimes/starters/shell/src/lib.rs index a7c8bc239358..c7e81d67bbab 100644 --- a/cumulus/parachains/runtimes/starters/shell/src/lib.rs +++ b/cumulus/parachains/runtimes/starters/shell/src/lib.rs @@ -218,11 +218,7 @@ parameter_types! { impl pallet_message_queue::Config for Runtime { type RuntimeEvent = RuntimeEvent; type WeightInfo = (); - #[cfg(feature = "runtime-benchmarks")] - type MessageProcessor = pallet_message_queue::mock_helpers::NoopMessageProcessor< - cumulus_primitives_core::AggregateMessageOrigin, - >; - #[cfg(not(feature = "runtime-benchmarks"))] + // NOTE: Changes are needed here if you add benchmarking to the runtime. type MessageProcessor = xcm_builder::ProcessXcmMessage< AggregateMessageOrigin, xcm_executor::XcmExecutor, diff --git a/cumulus/polkadot-parachain/polkadot-parachain-lib/src/fake_runtime_api/mod.rs b/cumulus/polkadot-parachain/polkadot-parachain-lib/src/fake_runtime_api/mod.rs index 02aa867d70fe..bd4ff167d8f1 100644 --- a/cumulus/polkadot-parachain/polkadot-parachain-lib/src/fake_runtime_api/mod.rs +++ b/cumulus/polkadot-parachain/polkadot-parachain-lib/src/fake_runtime_api/mod.rs @@ -24,12 +24,14 @@ use utils::{impl_node_runtime_apis, imports::*}; type CustomBlock = crate::common::types::Block; pub mod aura_sr25519 { use super::*; + #[allow(dead_code)] struct FakeRuntime; impl_node_runtime_apis!(FakeRuntime, CustomBlock, sp_consensus_aura::sr25519::AuthorityId); } pub mod aura_ed25519 { use super::*; + #[allow(dead_code)] struct FakeRuntime; impl_node_runtime_apis!(FakeRuntime, CustomBlock, sp_consensus_aura::ed25519::AuthorityId); } diff --git a/cumulus/primitives/utility/src/lib.rs b/cumulus/primitives/utility/src/lib.rs index 6bd14d136a62..8530f5b87487 100644 --- a/cumulus/primitives/utility/src/lib.rs +++ b/cumulus/primitives/utility/src/lib.rs @@ -385,7 +385,8 @@ impl< FungiblesAssetMatcher, OnUnbalanced, AccountId, - > where + > +where Fungibles::Balance: Into, { fn new() -> Self { @@ -545,7 +546,8 @@ impl< FungiblesAssetMatcher, OnUnbalanced, AccountId, - > where + > +where Fungibles::Balance: Into, { fn drop(&mut self) { diff --git a/polkadot/cli/src/cli.rs b/polkadot/cli/src/cli.rs index 3e5a6ccdd3c2..1445ade08e20 100644 --- a/polkadot/cli/src/cli.rs +++ b/polkadot/cli/src/cli.rs @@ -151,6 +151,13 @@ pub struct RunCmd { /// TESTING ONLY: disable the version check between nodes and workers. #[arg(long, hide = true)] pub disable_worker_version_check: bool, + + /// Enable approval-voting message processing in parallel. + /// + ///**Dangerous!** This is an experimental feature and should not be used in production, unless + /// explicitly advised to. + #[arg(long)] + pub enable_approval_voting_parallel: bool, } #[allow(missing_docs)] diff --git a/polkadot/cli/src/command.rs b/polkadot/cli/src/command.rs index 89e21bf135b6..34ada58bbcdb 100644 --- a/polkadot/cli/src/command.rs +++ b/polkadot/cli/src/command.rs @@ -29,8 +29,6 @@ use sp_keyring::Sr25519Keyring; use std::net::ToSocketAddrs; pub use crate::error::Error; -#[cfg(feature = "hostperfcheck")] -pub use polkadot_performance_test::PerfCheckError; #[cfg(feature = "pyroscope")] use pyroscope_pprofrs::{pprof_backend, PprofConfig}; @@ -244,6 +242,7 @@ where execute_workers_max_num: cli.run.execute_workers_max_num, prepare_workers_hard_max_num: cli.run.prepare_workers_hard_max_num, prepare_workers_soft_max_num: cli.run.prepare_workers_soft_max_num, + enable_approval_voting_parallel: cli.run.enable_approval_voting_parallel, }, ) .map(|full| full.task_manager)?; diff --git a/polkadot/node/core/approval-voting-parallel/Cargo.toml b/polkadot/node/core/approval-voting-parallel/Cargo.toml new file mode 100644 index 000000000000..e62062eab409 --- /dev/null +++ b/polkadot/node/core/approval-voting-parallel/Cargo.toml @@ -0,0 +1,55 @@ +[package] +name = "polkadot-node-core-approval-voting-parallel" +version = "7.0.0" +authors.workspace = true +edition.workspace = true +license.workspace = true +description = "Approval Voting Subsystem running approval work in parallel" + +[lints] +workspace = true + +[dependencies] +async-trait = { workspace = true } +futures = { workspace = true } +futures-timer = { workspace = true } +gum = { workspace = true } +itertools = { workspace = true } +thiserror = { workspace = true } + +polkadot-node-core-approval-voting = { workspace = true, default-features = true } +polkadot-approval-distribution = { workspace = true, default-features = true } +polkadot-node-subsystem = { workspace = true, default-features = true } +polkadot-node-subsystem-util = { workspace = true, default-features = true } +polkadot-overseer = { workspace = true, default-features = true } +polkadot-primitives = { workspace = true, default-features = true } +polkadot-node-primitives = { workspace = true, default-features = true } +polkadot-node-jaeger = { workspace = true, default-features = true } +polkadot-node-network-protocol = { workspace = true, default-features = true } +polkadot-node-metrics = { workspace = true, default-features = true } + +sc-keystore = { workspace = true, default-features = false } +sp-consensus = { workspace = true, default-features = false } +sp-consensus-slots = { workspace = true, default-features = false } +sp-application-crypto = { workspace = true, default-features = false, features = ["full_crypto"] } +sp-runtime = { workspace = true, default-features = false } + +rand = { workspace = true } +rand_core = { workspace = true } +rand_chacha = { workspace = true } + +[dev-dependencies] +async-trait = { workspace = true } +parking_lot = { workspace = true } +sp-keyring = { workspace = true, default-features = true } +sp-keystore = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-consensus-babe = { workspace = true, default-features = true } +sp-tracing = { workspace = true } +polkadot-node-subsystem-test-helpers = { workspace = true, default-features = true } +assert_matches = { workspace = true } +kvdb-memorydb = { workspace = true } +polkadot-primitives-test-helpers = { workspace = true, default-features = true } +log = { workspace = true, default-features = true } +polkadot-subsystem-bench = { workspace = true, default-features = true } +schnorrkel = { workspace = true, default-features = true } diff --git a/polkadot/node/core/approval-voting-parallel/src/lib.rs b/polkadot/node/core/approval-voting-parallel/src/lib.rs new file mode 100644 index 000000000000..1a7ef756bdfc --- /dev/null +++ b/polkadot/node/core/approval-voting-parallel/src/lib.rs @@ -0,0 +1,958 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! The Approval Voting Parallel Subsystem. +//! +//! This subsystem is responsible for orchestrating the work done by +//! approval-voting and approval-distribution subsystem, so they can +//! do their work in parallel, rather than serially, when they are run +//! as independent subsystems. +use itertools::Itertools; +use metrics::{Meters, MetricsWatcher}; +use polkadot_node_core_approval_voting::{Config, RealAssignmentCriteria}; +use polkadot_node_metrics::metered::{ + self, channel, unbounded, MeteredReceiver, MeteredSender, UnboundedMeteredReceiver, + UnboundedMeteredSender, +}; + +use polkadot_node_primitives::{ + approval::time::{Clock, SystemClock}, + DISPUTE_WINDOW, +}; +use polkadot_node_subsystem::{ + messages::{ApprovalDistributionMessage, ApprovalVotingMessage, ApprovalVotingParallelMessage}, + overseer, FromOrchestra, SpawnedSubsystem, SubsystemError, SubsystemResult, +}; +use polkadot_node_subsystem_util::{ + self, + database::Database, + runtime::{Config as RuntimeInfoConfig, RuntimeInfo}, +}; +use polkadot_overseer::{OverseerSignal, Priority, SubsystemSender, TimeoutExt}; +use polkadot_primitives::{CandidateIndex, Hash, ValidatorIndex, ValidatorSignature}; +use rand::SeedableRng; + +use sc_keystore::LocalKeystore; +use sp_consensus::SyncOracle; + +use futures::{channel::oneshot, prelude::*, StreamExt}; +pub use metrics::Metrics; +use polkadot_node_core_approval_voting::{ + approval_db::common::Config as DatabaseConfig, ApprovalVotingWorkProvider, +}; +use std::{ + collections::{HashMap, HashSet}, + fmt::Debug, + sync::Arc, + time::Duration, +}; +use stream::{select_with_strategy, PollNext, SelectWithStrategy}; +pub mod metrics; + +#[cfg(test)] +mod tests; + +pub(crate) const LOG_TARGET: &str = "parachain::approval-voting-parallel"; +// Value rather arbitrarily: Should not be hit in practice, it exists to more easily diagnose dead +// lock issues for example. +const WAIT_FOR_SIGS_GATHER_TIMEOUT: Duration = Duration::from_millis(2000); + +/// The number of workers used for running the approval-distribution logic. +pub const APPROVAL_DISTRIBUTION_WORKER_COUNT: usize = 4; + +/// The default channel size for the workers, can be overridden by the user through +/// `overseer_channel_capacity_override` +pub const DEFAULT_WORKERS_CHANNEL_SIZE: usize = 64000 / APPROVAL_DISTRIBUTION_WORKER_COUNT; + +fn prio_right<'a>(_val: &'a mut ()) -> PollNext { + PollNext::Right +} + +/// The approval voting parallel subsystem. +pub struct ApprovalVotingParallelSubsystem { + /// `LocalKeystore` is needed for assignment keys, but not necessarily approval keys. + /// + /// We do a lot of VRF signing and need the keys to have low latency. + keystore: Arc, + db_config: DatabaseConfig, + slot_duration_millis: u64, + db: Arc, + sync_oracle: Box, + metrics: Metrics, + spawner: Arc, + clock: Arc, + overseer_message_channel_capacity_override: Option, +} + +impl ApprovalVotingParallelSubsystem { + /// Create a new approval voting subsystem with the given keystore, config, and database. + pub fn with_config( + config: Config, + db: Arc, + keystore: Arc, + sync_oracle: Box, + metrics: Metrics, + spawner: impl overseer::gen::Spawner + 'static + Clone, + overseer_message_channel_capacity_override: Option, + ) -> Self { + ApprovalVotingParallelSubsystem::with_config_and_clock( + config, + db, + keystore, + sync_oracle, + metrics, + Arc::new(SystemClock {}), + spawner, + overseer_message_channel_capacity_override, + ) + } + + /// Create a new approval voting subsystem with the given keystore, config, clock, and database. + pub fn with_config_and_clock( + config: Config, + db: Arc, + keystore: Arc, + sync_oracle: Box, + metrics: Metrics, + clock: Arc, + spawner: impl overseer::gen::Spawner + 'static, + overseer_message_channel_capacity_override: Option, + ) -> Self { + ApprovalVotingParallelSubsystem { + keystore, + slot_duration_millis: config.slot_duration_millis, + db, + db_config: DatabaseConfig { col_approval_data: config.col_approval_data }, + sync_oracle, + metrics, + spawner: Arc::new(spawner), + clock, + overseer_message_channel_capacity_override, + } + } + + /// The size of the channel used for the workers. + fn workers_channel_size(&self) -> usize { + self.overseer_message_channel_capacity_override + .unwrap_or(DEFAULT_WORKERS_CHANNEL_SIZE) + } +} + +#[overseer::subsystem(ApprovalVotingParallel, error = SubsystemError, prefix = self::overseer)] +impl ApprovalVotingParallelSubsystem { + fn start(self, ctx: Context) -> SpawnedSubsystem { + let future = run::(ctx, self) + .map_err(|e| SubsystemError::with_origin("approval-voting-parallel", e)) + .boxed(); + + SpawnedSubsystem { name: "approval-voting-parallel-subsystem", future } + } +} + +// It starts worker for the approval voting subsystem and the `APPROVAL_DISTRIBUTION_WORKER_COUNT` +// workers for the approval distribution subsystem. +// +// It returns handles that can be used to send messages to the workers. +#[overseer::contextbounds(ApprovalVotingParallel, prefix = self::overseer)] +async fn start_workers( + ctx: &mut Context, + subsystem: ApprovalVotingParallelSubsystem, + metrics_watcher: &mut MetricsWatcher, +) -> SubsystemResult<(ToWorker, Vec>)> +where +{ + gum::info!(target: LOG_TARGET, "Starting approval distribution workers"); + + // Build approval voting handles. + let (to_approval_voting_worker, approval_voting_work_provider) = build_worker_handles( + "approval-voting-parallel-db".into(), + subsystem.workers_channel_size(), + metrics_watcher, + prio_right, + ); + let mut to_approval_distribution_workers = Vec::new(); + let slot_duration_millis = subsystem.slot_duration_millis; + + for i in 0..APPROVAL_DISTRIBUTION_WORKER_COUNT { + let mut network_sender = ctx.sender().clone(); + let mut runtime_api_sender = ctx.sender().clone(); + let mut approval_distribution_to_approval_voting = to_approval_voting_worker.clone(); + + let approval_distr_instance = + polkadot_approval_distribution::ApprovalDistribution::new_with_clock( + subsystem.metrics.approval_distribution_metrics(), + subsystem.slot_duration_millis, + subsystem.clock.clone(), + Arc::new(RealAssignmentCriteria {}), + ); + let task_name = format!("approval-voting-parallel-{}", i); + let (to_approval_distribution_worker, mut approval_distribution_work_provider) = + build_worker_handles( + task_name.clone(), + subsystem.workers_channel_size(), + metrics_watcher, + prio_right, + ); + + metrics_watcher.watch(task_name.clone(), to_approval_distribution_worker.meter()); + + subsystem.spawner.spawn_blocking( + task_name.leak(), + Some("approval-voting-parallel"), + Box::pin(async move { + let mut state = + polkadot_approval_distribution::State::with_config(slot_duration_millis); + let mut rng = rand::rngs::StdRng::from_entropy(); + let mut session_info_provider = RuntimeInfo::new_with_config(RuntimeInfoConfig { + keystore: None, + session_cache_lru_size: DISPUTE_WINDOW.get(), + }); + + loop { + let message = match approval_distribution_work_provider.next().await { + Some(message) => message, + None => { + gum::info!( + target: LOG_TARGET, + "Approval distribution stream finished, most likely shutting down", + ); + break; + }, + }; + if approval_distr_instance + .handle_from_orchestra( + message, + &mut approval_distribution_to_approval_voting, + &mut network_sender, + &mut runtime_api_sender, + &mut state, + &mut rng, + &mut session_info_provider, + ) + .await + { + gum::info!( + target: LOG_TARGET, + "Approval distribution worker {}, exiting because of shutdown", i + ); + }; + } + }), + ); + to_approval_distribution_workers.push(to_approval_distribution_worker); + } + + gum::info!(target: LOG_TARGET, "Starting approval voting workers"); + + let sender = ctx.sender().clone(); + let to_approval_distribution = ApprovalVotingToApprovalDistribution(sender.clone()); + polkadot_node_core_approval_voting::start_approval_worker( + approval_voting_work_provider, + sender.clone(), + to_approval_distribution, + polkadot_node_core_approval_voting::Config { + slot_duration_millis: subsystem.slot_duration_millis, + col_approval_data: subsystem.db_config.col_approval_data, + }, + subsystem.db.clone(), + subsystem.keystore.clone(), + subsystem.sync_oracle, + subsystem.metrics.approval_voting_metrics(), + subsystem.spawner.clone(), + "approval-voting-parallel-db", + "approval-voting-parallel", + subsystem.clock.clone(), + ) + .await?; + + Ok((to_approval_voting_worker, to_approval_distribution_workers)) +} + +// The main run function of the approval parallel voting subsystem. +#[overseer::contextbounds(ApprovalVotingParallel, prefix = self::overseer)] +async fn run( + mut ctx: Context, + subsystem: ApprovalVotingParallelSubsystem, +) -> SubsystemResult<()> { + let mut metrics_watcher = MetricsWatcher::new(subsystem.metrics.clone()); + gum::info!( + target: LOG_TARGET, + "Starting workers" + ); + + let (to_approval_voting_worker, to_approval_distribution_workers) = + start_workers(&mut ctx, subsystem, &mut metrics_watcher).await?; + + gum::info!( + target: LOG_TARGET, + "Starting main subsystem loop" + ); + + run_main_loop(ctx, to_approval_voting_worker, to_approval_distribution_workers, metrics_watcher) + .await +} + +// Main loop of the subsystem, it shouldn't include any logic just dispatching of messages to +// the workers. +// +// It listens for messages from the overseer and dispatches them to the workers. +#[overseer::contextbounds(ApprovalVotingParallel, prefix = self::overseer)] +async fn run_main_loop( + mut ctx: Context, + mut to_approval_voting_worker: ToWorker, + mut to_approval_distribution_workers: Vec>, + metrics_watcher: MetricsWatcher, +) -> SubsystemResult<()> { + loop { + futures::select! { + next_msg = ctx.recv().fuse() => { + let next_msg = match next_msg { + Ok(msg) => msg, + Err(err) => { + gum::info!(target: LOG_TARGET, ?err, "Approval voting parallel subsystem received an error"); + return Err(err); + } + }; + + match next_msg { + FromOrchestra::Signal(msg) => { + if matches!(msg, OverseerSignal::ActiveLeaves(_)) { + metrics_watcher.collect_metrics(); + } + + for worker in to_approval_distribution_workers.iter_mut() { + worker + .send_signal(msg.clone()).await?; + } + + to_approval_voting_worker.send_signal(msg.clone()).await?; + if matches!(msg, OverseerSignal::Conclude) { + break; + } + }, + FromOrchestra::Communication { msg } => match msg { + // The message the approval voting subsystem would've handled. + ApprovalVotingParallelMessage::ApprovedAncestor(_, _,_) | + ApprovalVotingParallelMessage::GetApprovalSignaturesForCandidate(_, _) => { + to_approval_voting_worker.send_message( + msg.try_into().expect( + "Message is one of ApprovedAncestor, GetApprovalSignaturesForCandidate + and that can be safely converted to ApprovalVotingMessage; qed" + ) + ).await; + }, + // Now the message the approval distribution subsystem would've handled and need to + // be forwarded to the workers. + ApprovalVotingParallelMessage::NewBlocks(msg) => { + for worker in to_approval_distribution_workers.iter_mut() { + worker + .send_message( + ApprovalDistributionMessage::NewBlocks(msg.clone()), + ) + .await; + } + }, + ApprovalVotingParallelMessage::DistributeAssignment(assignment, claimed) => { + let worker = assigned_worker_for_validator(assignment.validator, &mut to_approval_distribution_workers); + worker + .send_message( + ApprovalDistributionMessage::DistributeAssignment(assignment, claimed) + ) + .await; + + }, + ApprovalVotingParallelMessage::DistributeApproval(vote) => { + let worker = assigned_worker_for_validator(vote.validator, &mut to_approval_distribution_workers); + worker + .send_message( + ApprovalDistributionMessage::DistributeApproval(vote) + ).await; + + }, + ApprovalVotingParallelMessage::NetworkBridgeUpdate(msg) => { + if let polkadot_node_subsystem::messages::NetworkBridgeEvent::PeerMessage( + peer_id, + msg, + ) = msg + { + let (all_msgs_from_same_validator, messages_split_by_validator) = validator_index_for_msg(msg); + + for (validator_index, msg) in all_msgs_from_same_validator.into_iter().chain(messages_split_by_validator.into_iter().flatten()) { + let worker = assigned_worker_for_validator(validator_index, &mut to_approval_distribution_workers); + + worker + .send_message( + ApprovalDistributionMessage::NetworkBridgeUpdate( + polkadot_node_subsystem::messages::NetworkBridgeEvent::PeerMessage( + peer_id, msg, + ), + ), + ).await; + } + } else { + for worker in to_approval_distribution_workers.iter_mut() { + worker + .send_message_with_priority::( + ApprovalDistributionMessage::NetworkBridgeUpdate(msg.clone()), + ).await; + } + } + }, + ApprovalVotingParallelMessage::GetApprovalSignatures(indices, tx) => { + handle_get_approval_signatures(&mut ctx, &mut to_approval_distribution_workers, indices, tx).await; + }, + ApprovalVotingParallelMessage::ApprovalCheckingLagUpdate(lag) => { + for worker in to_approval_distribution_workers.iter_mut() { + worker + .send_message( + ApprovalDistributionMessage::ApprovalCheckingLagUpdate(lag) + ).await; + } + }, + }, + }; + + }, + }; + } + Ok(()) +} + +// It sends a message to all approval workers to get the approval signatures for the requested +// candidates and then merges them all together and sends them back to the requester. +#[overseer::contextbounds(ApprovalVotingParallel, prefix = self::overseer)] +async fn handle_get_approval_signatures( + ctx: &mut Context, + to_approval_distribution_workers: &mut Vec>, + requested_candidates: HashSet<(Hash, CandidateIndex)>, + result_channel: oneshot::Sender< + HashMap, ValidatorSignature)>, + >, +) { + let mut sigs = HashMap::new(); + let mut signatures_channels = Vec::new(); + for worker in to_approval_distribution_workers.iter_mut() { + let (tx, rx) = oneshot::channel(); + worker.send_unbounded_message(ApprovalDistributionMessage::GetApprovalSignatures( + requested_candidates.clone(), + tx, + )); + signatures_channels.push(rx); + } + + let gather_signatures = async move { + let Some(results) = futures::future::join_all(signatures_channels) + .timeout(WAIT_FOR_SIGS_GATHER_TIMEOUT) + .await + else { + gum::warn!( + target: LOG_TARGET, + "Waiting for approval signatures timed out - dead lock?" + ); + return; + }; + + for result in results { + let worker_sigs = match result { + Ok(sigs) => sigs, + Err(_) => { + gum::error!( + target: LOG_TARGET, + "Getting approval signatures failed, oneshot got closed" + ); + continue; + }, + }; + sigs.extend(worker_sigs); + } + + if let Err(_) = result_channel.send(sigs) { + gum::debug!( + target: LOG_TARGET, + "Sending back approval signatures failed, oneshot got closed" + ); + } + }; + + if let Err(err) = ctx.spawn("approval-voting-gather-signatures", Box::pin(gather_signatures)) { + gum::warn!(target: LOG_TARGET, "Failed to spawn gather signatures task: {:?}", err); + } +} + +// Returns the worker that should receive the message for the given validator. +fn assigned_worker_for_validator( + validator: ValidatorIndex, + to_approval_distribution_workers: &mut Vec>, +) -> &mut ToWorker { + let worker_index = validator.0 as usize % to_approval_distribution_workers.len(); + to_approval_distribution_workers + .get_mut(worker_index) + .expect("Worker index is obtained modulo len; qed") +} + +// Returns the validators that initially created this assignments/votes, the validator index +// is later used to decide which approval-distribution worker should receive the message. +// +// Because this is on the hot path and we don't want to be unnecessarily slow, it contains two logic +// paths. The ultra fast path where all messages have the same validator index and we don't do +// any cloning or allocation and the path where we need to split the messages into multiple +// messages, because they have different validator indices, where we do need to clone and allocate. +// In practice most of the message will fall on the ultra fast path. +fn validator_index_for_msg( + msg: polkadot_node_network_protocol::ApprovalDistributionMessage, +) -> ( + Option<(ValidatorIndex, polkadot_node_network_protocol::ApprovalDistributionMessage)>, + Option>, +) { + match msg { + polkadot_node_network_protocol::Versioned::V1(ref message) => match message { + polkadot_node_network_protocol::v1::ApprovalDistributionMessage::Assignments(msgs) => + if let Ok(validator) = msgs.iter().map(|(msg, _)| msg.validator).all_equal_value() { + (Some((validator, msg)), None) + } else { + let split = msgs + .iter() + .map(|(msg, claimed_candidates)| { + ( + msg.validator, + polkadot_node_network_protocol::Versioned::V1( + polkadot_node_network_protocol::v1::ApprovalDistributionMessage::Assignments( + vec![(msg.clone(), *claimed_candidates)] + ), + ), + ) + }) + .collect_vec(); + (None, Some(split)) + }, + polkadot_node_network_protocol::v1::ApprovalDistributionMessage::Approvals(msgs) => + if let Ok(validator) = msgs.iter().map(|msg| msg.validator).all_equal_value() { + (Some((validator, msg)), None) + } else { + let split = msgs + .iter() + .map(|vote| { + ( + vote.validator, + polkadot_node_network_protocol::Versioned::V1( + polkadot_node_network_protocol::v1::ApprovalDistributionMessage::Approvals( + vec![vote.clone()] + ), + ), + ) + }) + .collect_vec(); + (None, Some(split)) + }, + }, + polkadot_node_network_protocol::Versioned::V2(ref message) => match message { + polkadot_node_network_protocol::v2::ApprovalDistributionMessage::Assignments(msgs) => + if let Ok(validator) = msgs.iter().map(|(msg, _)| msg.validator).all_equal_value() { + (Some((validator, msg)), None) + } else { + let split = msgs + .iter() + .map(|(msg, claimed_candidates)| { + ( + msg.validator, + polkadot_node_network_protocol::Versioned::V2( + polkadot_node_network_protocol::v2::ApprovalDistributionMessage::Assignments( + vec![(msg.clone(), *claimed_candidates)] + ), + ), + ) + }) + .collect_vec(); + (None, Some(split)) + }, + + polkadot_node_network_protocol::v2::ApprovalDistributionMessage::Approvals(msgs) => + if let Ok(validator) = msgs.iter().map(|msg| msg.validator).all_equal_value() { + (Some((validator, msg)), None) + } else { + let split = msgs + .iter() + .map(|vote| { + ( + vote.validator, + polkadot_node_network_protocol::Versioned::V2( + polkadot_node_network_protocol::v2::ApprovalDistributionMessage::Approvals( + vec![vote.clone()] + ), + ), + ) + }) + .collect_vec(); + (None, Some(split)) + }, + }, + polkadot_node_network_protocol::Versioned::V3(ref message) => match message { + polkadot_node_network_protocol::v3::ApprovalDistributionMessage::Assignments(msgs) => + if let Ok(validator) = msgs.iter().map(|(msg, _)| msg.validator).all_equal_value() { + (Some((validator, msg)), None) + } else { + let split = msgs + .iter() + .map(|(msg, claimed_candidates)| { + ( + msg.validator, + polkadot_node_network_protocol::Versioned::V3( + polkadot_node_network_protocol::v3::ApprovalDistributionMessage::Assignments( + vec![(msg.clone(), claimed_candidates.clone())] + ), + ), + ) + }) + .collect_vec(); + (None, Some(split)) + }, + polkadot_node_network_protocol::v3::ApprovalDistributionMessage::Approvals(msgs) => + if let Ok(validator) = msgs.iter().map(|msg| msg.validator).all_equal_value() { + (Some((validator, msg)), None) + } else { + let split = msgs + .iter() + .map(|vote| { + ( + vote.validator, + polkadot_node_network_protocol::Versioned::V3( + polkadot_node_network_protocol::v3::ApprovalDistributionMessage::Approvals( + vec![vote.clone()] + ), + ), + ) + }) + .collect_vec(); + (None, Some(split)) + }, + }, + } +} + +/// A handler object that both type of workers use for receiving work. +/// +/// In practive this is just a wrapper over two channels Receiver, that is injected into +/// approval-voting worker and approval-distribution workers. +type WorkProvider = WorkProviderImpl< + SelectWithStrategy< + MeteredReceiver>, + UnboundedMeteredReceiver>, + Clos, + State, + >, +>; + +pub struct WorkProviderImpl(T); + +impl Stream for WorkProviderImpl +where + T: Stream> + Unpin + Send, +{ + type Item = FromOrchestra; + + fn poll_next( + mut self: std::pin::Pin<&mut Self>, + cx: &mut std::task::Context<'_>, + ) -> std::task::Poll> { + self.0.poll_next_unpin(cx) + } +} + +#[async_trait::async_trait] +impl ApprovalVotingWorkProvider for WorkProviderImpl +where + T: Stream> + Unpin + Send, +{ + async fn recv(&mut self) -> SubsystemResult> { + self.0.next().await.ok_or(SubsystemError::Context( + "ApprovalVotingWorkProviderImpl: Channel closed".to_string(), + )) + } +} + +impl WorkProvider +where + M: Send + Sync + 'static, + Clos: FnMut(&mut State) -> PollNext, + State: Default, +{ + // Constructs a work providers from the channels handles. + fn from_rx_worker(rx: RxWorker, prio: Clos) -> Self { + let prioritised = select_with_strategy(rx.0, rx.1, prio); + WorkProviderImpl(prioritised) + } +} + +/// Just a wrapper for implementing `overseer::SubsystemSender` and +/// `overseer::SubsystemSender`. +/// +/// The instance of this struct can be injected into the workers, so they can talk +/// directly with each other without intermediating in this subsystem loop. +pub struct ToWorker( + MeteredSender>, + UnboundedMeteredSender>, +); + +impl Clone for ToWorker { + fn clone(&self) -> Self { + Self(self.0.clone(), self.1.clone()) + } +} + +impl ToWorker { + async fn send_signal(&mut self, signal: OverseerSignal) -> Result<(), SubsystemError> { + self.1 + .unbounded_send(FromOrchestra::Signal(signal)) + .map_err(|err| SubsystemError::QueueError(err.into_send_error())) + } + + fn meter(&self) -> Meters { + Meters::new(self.0.meter(), self.1.meter()) + } +} + +impl overseer::SubsystemSender for ToWorker { + fn send_message<'life0, 'async_trait>( + &'life0 mut self, + msg: T, + ) -> ::core::pin::Pin< + Box + ::core::marker::Send + 'async_trait>, + > + where + 'life0: 'async_trait, + Self: 'async_trait, + { + async { + if let Err(err) = + self.0.send(polkadot_overseer::FromOrchestra::Communication { msg }).await + { + gum::error!( + target: LOG_TARGET, + "Failed to send message to approval voting worker: {:?}, subsystem is probably shutting down.", + err + ); + } + } + .boxed() + } + + fn try_send_message(&mut self, msg: T) -> Result<(), metered::TrySendError> { + self.0 + .try_send(polkadot_overseer::FromOrchestra::Communication { msg }) + .map_err(|result| { + let is_full = result.is_full(); + let msg = match result.into_inner() { + polkadot_overseer::FromOrchestra::Signal(_) => + panic!("Cannot happen variant is never built"), + polkadot_overseer::FromOrchestra::Communication { msg } => msg, + }; + if is_full { + metered::TrySendError::Full(msg) + } else { + metered::TrySendError::Closed(msg) + } + }) + } + + fn send_messages<'life0, 'async_trait, I>( + &'life0 mut self, + msgs: I, + ) -> ::core::pin::Pin< + Box + ::core::marker::Send + 'async_trait>, + > + where + I: IntoIterator + Send, + I::IntoIter: Send, + I: 'async_trait, + 'life0: 'async_trait, + Self: 'async_trait, + { + async { + for msg in msgs { + self.send_message(msg).await; + } + } + .boxed() + } + + fn send_unbounded_message(&mut self, msg: T) { + if let Err(err) = + self.1.unbounded_send(polkadot_overseer::FromOrchestra::Communication { msg }) + { + gum::error!( + target: LOG_TARGET, + "Failed to send unbounded message to approval voting worker: {:?}, subsystem is probably shutting down.", + err + ); + } + } + + fn send_message_with_priority<'life0, 'async_trait, P>( + &'life0 mut self, + msg: T, + ) -> ::core::pin::Pin< + Box + ::core::marker::Send + 'async_trait>, + > + where + P: 'async_trait + Priority, + 'life0: 'async_trait, + Self: 'async_trait, + { + match P::priority() { + polkadot_overseer::PriorityLevel::Normal => self.send_message(msg), + polkadot_overseer::PriorityLevel::High => + async { self.send_unbounded_message(msg) }.boxed(), + } + } + + fn try_send_message_with_priority( + &mut self, + msg: T, + ) -> Result<(), metered::TrySendError> { + match P::priority() { + polkadot_overseer::PriorityLevel::Normal => self.try_send_message(msg), + polkadot_overseer::PriorityLevel::High => Ok(self.send_unbounded_message(msg)), + } + } +} + +/// Handles that are used by an worker to receive work. +pub struct RxWorker( + MeteredReceiver>, + UnboundedMeteredReceiver>, +); + +// Build all the necessary channels for sending messages to an worker +// and for the worker to receive them. +fn build_channels( + channel_name: String, + channel_size: usize, + metrics_watcher: &mut MetricsWatcher, +) -> (ToWorker, RxWorker) { + let (tx_work, rx_work) = channel::>(channel_size); + let (tx_work_unbounded, rx_work_unbounded) = unbounded::>(); + let to_worker = ToWorker(tx_work, tx_work_unbounded); + + metrics_watcher.watch(channel_name, to_worker.meter()); + + (to_worker, RxWorker(rx_work, rx_work_unbounded)) +} + +/// Build the worker handles used for interacting with the workers. +/// +/// `ToWorker` is used for sending messages to the workers. +/// `WorkProvider` is used by the workers for receiving the messages. +fn build_worker_handles( + channel_name: String, + channel_size: usize, + metrics_watcher: &mut MetricsWatcher, + prio_right: Clos, +) -> (ToWorker, WorkProvider) +where + M: Send + Sync + 'static, + Clos: FnMut(&mut State) -> PollNext, + State: Default, +{ + let (to_worker, rx_worker) = build_channels(channel_name, channel_size, metrics_watcher); + (to_worker, WorkProviderImpl::from_rx_worker(rx_worker, prio_right)) +} + +/// Just a wrapper for implementing `overseer::SubsystemSender`, so +/// that we can inject into the approval voting subsystem. +#[derive(Clone)] +pub struct ApprovalVotingToApprovalDistribution>( + S, +); + +impl> + overseer::SubsystemSender + for ApprovalVotingToApprovalDistribution +{ + #[allow(clippy::type_complexity, clippy::type_repetition_in_bounds)] + fn send_message<'life0, 'async_trait>( + &'life0 mut self, + msg: ApprovalDistributionMessage, + ) -> ::core::pin::Pin< + Box + ::core::marker::Send + 'async_trait>, + > + where + 'life0: 'async_trait, + Self: 'async_trait, + { + self.0.send_message(msg.into()) + } + + fn try_send_message( + &mut self, + msg: ApprovalDistributionMessage, + ) -> Result<(), metered::TrySendError> { + self.0.try_send_message(msg.into()).map_err(|err| match err { + // Safe to unwrap because it was built from the same type. + metered::TrySendError::Closed(msg) => + metered::TrySendError::Closed(msg.try_into().unwrap()), + metered::TrySendError::Full(msg) => + metered::TrySendError::Full(msg.try_into().unwrap()), + }) + } + + #[allow(clippy::type_complexity, clippy::type_repetition_in_bounds)] + fn send_messages<'life0, 'async_trait, I>( + &'life0 mut self, + msgs: I, + ) -> ::core::pin::Pin< + Box + ::core::marker::Send + 'async_trait>, + > + where + I: IntoIterator + Send, + I::IntoIter: Send, + I: 'async_trait, + 'life0: 'async_trait, + Self: 'async_trait, + { + self.0.send_messages(msgs.into_iter().map(|msg| msg.into())) + } + + fn send_unbounded_message(&mut self, msg: ApprovalDistributionMessage) { + self.0.send_unbounded_message(msg.into()) + } + + fn send_message_with_priority<'life0, 'async_trait, P>( + &'life0 mut self, + msg: ApprovalDistributionMessage, + ) -> ::core::pin::Pin< + Box + ::core::marker::Send + 'async_trait>, + > + where + P: 'async_trait + Priority, + 'life0: 'async_trait, + Self: 'async_trait, + { + self.0.send_message_with_priority::

(msg.into()) + } + + fn try_send_message_with_priority( + &mut self, + msg: ApprovalDistributionMessage, + ) -> Result<(), metered::TrySendError> { + self.0.try_send_message_with_priority::

(msg.into()).map_err(|err| match err { + // Safe to unwrap because it was built from the same type. + metered::TrySendError::Closed(msg) => + metered::TrySendError::Closed(msg.try_into().unwrap()), + metered::TrySendError::Full(msg) => + metered::TrySendError::Full(msg.try_into().unwrap()), + }) + } +} diff --git a/polkadot/node/core/approval-voting-parallel/src/metrics.rs b/polkadot/node/core/approval-voting-parallel/src/metrics.rs new file mode 100644 index 000000000000..1b4ab4bd9b88 --- /dev/null +++ b/polkadot/node/core/approval-voting-parallel/src/metrics.rs @@ -0,0 +1,236 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! The Metrics for Approval Voting Parallel Subsystem. + +use std::collections::HashMap; + +use polkadot_node_metrics::{metered::Meter, metrics}; +use polkadot_overseer::prometheus; + +#[derive(Default, Clone)] +pub struct Metrics(Option); + +/// Approval Voting parallel metrics. +#[derive(Clone)] +pub struct MetricsInner { + // The inner metrics of the approval distribution workers. + approval_distribution: polkadot_approval_distribution::metrics::Metrics, + // The inner metrics of the approval voting workers. + approval_voting: polkadot_node_core_approval_voting::Metrics, + + // Time of flight metrics for bounded channels. + to_worker_bounded_tof: prometheus::HistogramVec, + // Number of elements sent to the worker's bounded queue. + to_worker_bounded_sent: prometheus::GaugeVec, + // Number of elements received by the worker's bounded queue. + to_worker_bounded_received: prometheus::GaugeVec, + // Number of times senders blocked while sending messages to the worker. + to_worker_bounded_blocked: prometheus::GaugeVec, + // Time of flight metrics for unbounded channels. + to_worker_unbounded_tof: prometheus::HistogramVec, + // Number of elements sent to the worker's unbounded queue. + to_worker_unbounded_sent: prometheus::GaugeVec, + // Number of elements received by the worker's unbounded queue. + to_worker_unbounded_received: prometheus::GaugeVec, +} + +impl Metrics { + /// Get the approval distribution metrics. + pub fn approval_distribution_metrics( + &self, + ) -> polkadot_approval_distribution::metrics::Metrics { + self.0 + .as_ref() + .map(|metrics_inner| metrics_inner.approval_distribution.clone()) + .unwrap_or_default() + } + + /// Get the approval voting metrics. + pub fn approval_voting_metrics(&self) -> polkadot_node_core_approval_voting::Metrics { + self.0 + .as_ref() + .map(|metrics_inner| metrics_inner.approval_voting.clone()) + .unwrap_or_default() + } +} + +impl metrics::Metrics for Metrics { + /// Try to register the metrics. + fn try_register( + registry: &prometheus::Registry, + ) -> std::result::Result { + Ok(Metrics(Some(MetricsInner { + approval_distribution: polkadot_approval_distribution::metrics::Metrics::try_register( + registry, + )?, + approval_voting: polkadot_node_core_approval_voting::Metrics::try_register(registry)?, + to_worker_bounded_tof: prometheus::register( + prometheus::HistogramVec::new( + prometheus::HistogramOpts::new( + "polkadot_approval_voting_parallel_worker_bounded_tof", + "Duration spent in a particular approval voting worker channel from entrance to removal", + ) + .buckets(vec![ + 0.0001, 0.0004, 0.0016, 0.0064, 0.0256, 0.1024, 0.4096, 1.6384, 3.2768, + 4.9152, 6.5536, + ]), + &["worker_name"], + )?, + registry, + )?, + to_worker_bounded_sent: prometheus::register( + prometheus::GaugeVec::::new( + prometheus::Opts::new( + "polkadot_approval_voting_parallel_worker_bounded_sent", + "Number of elements sent to approval voting workers' bounded queues", + ), + &["worker_name"], + )?, + registry, + )?, + to_worker_bounded_received: prometheus::register( + prometheus::GaugeVec::::new( + prometheus::Opts::new( + "polkadot_approval_voting_parallel_worker_bounded_received", + "Number of elements received by approval voting workers' bounded queues", + ), + &["worker_name"], + )?, + registry, + )?, + to_worker_bounded_blocked: prometheus::register( + prometheus::GaugeVec::::new( + prometheus::Opts::new( + "polkadot_approval_voting_parallel_worker_bounded_blocked", + "Number of times approval voting workers blocked while sending messages to a subsystem", + ), + &["worker_name"], + )?, + registry, + )?, + to_worker_unbounded_tof: prometheus::register( + prometheus::HistogramVec::new( + prometheus::HistogramOpts::new( + "polkadot_approval_voting_parallel_worker_unbounded_tof", + "Duration spent in a particular approval voting worker channel from entrance to removal", + ) + .buckets(vec![ + 0.0001, 0.0004, 0.0016, 0.0064, 0.0256, 0.1024, 0.4096, 1.6384, 3.2768, + 4.9152, 6.5536, + ]), + &["worker_name"], + )?, + registry, + )?, + to_worker_unbounded_sent: prometheus::register( + prometheus::GaugeVec::::new( + prometheus::Opts::new( + "polkadot_approval_voting_parallel_worker_unbounded_sent", + "Number of elements sent to approval voting workers' unbounded queues", + ), + &["worker_name"], + )?, + registry, + )?, + to_worker_unbounded_received: prometheus::register( + prometheus::GaugeVec::::new( + prometheus::Opts::new( + "polkadot_approval_voting_parallel_worker_unbounded_received", + "Number of elements received by approval voting workers' unbounded queues", + ), + &["worker_name"], + )?, + registry, + )?, + }))) + } +} + +/// The meters to watch. +#[derive(Clone)] +pub struct Meters { + bounded: Meter, + unbounded: Meter, +} + +impl Meters { + pub fn new(bounded: &Meter, unbounded: &Meter) -> Self { + Self { bounded: bounded.clone(), unbounded: unbounded.clone() } + } +} + +/// A metrics watcher that watches the meters and updates the metrics. +pub struct MetricsWatcher { + to_watch: HashMap, + metrics: Metrics, +} + +impl MetricsWatcher { + /// Create a new metrics watcher. + pub fn new(metrics: Metrics) -> Self { + Self { to_watch: HashMap::new(), metrics } + } + + /// Watch the meters of a worker with this name. + pub fn watch(&mut self, worker_name: String, meters: Meters) { + self.to_watch.insert(worker_name, meters); + } + + /// Collect all the metrics. + pub fn collect_metrics(&self) { + for (name, meter) in &self.to_watch { + let bounded_readouts = meter.bounded.read(); + let unbounded_readouts = meter.unbounded.read(); + if let Some(metrics) = self.metrics.0.as_ref() { + metrics + .to_worker_bounded_sent + .with_label_values(&[name]) + .set(bounded_readouts.sent as u64); + + metrics + .to_worker_bounded_received + .with_label_values(&[name]) + .set(bounded_readouts.received as u64); + + metrics + .to_worker_bounded_blocked + .with_label_values(&[name]) + .set(bounded_readouts.blocked as u64); + + metrics + .to_worker_unbounded_sent + .with_label_values(&[name]) + .set(unbounded_readouts.sent as u64); + + metrics + .to_worker_unbounded_received + .with_label_values(&[name]) + .set(unbounded_readouts.received as u64); + + let hist_bounded = metrics.to_worker_bounded_tof.with_label_values(&[name]); + for tof in bounded_readouts.tof { + hist_bounded.observe(tof.as_f64()); + } + + let hist_unbounded = metrics.to_worker_unbounded_tof.with_label_values(&[name]); + for tof in unbounded_readouts.tof { + hist_unbounded.observe(tof.as_f64()); + } + } + } + } +} diff --git a/polkadot/node/core/approval-voting-parallel/src/tests.rs b/polkadot/node/core/approval-voting-parallel/src/tests.rs new file mode 100644 index 000000000000..215a707147fc --- /dev/null +++ b/polkadot/node/core/approval-voting-parallel/src/tests.rs @@ -0,0 +1,1178 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! The tests for Approval Voting Parallel Subsystem. + +use std::{ + collections::{HashMap, HashSet}, + future::Future, + sync::Arc, + time::Duration, +}; + +use crate::{ + build_worker_handles, metrics::MetricsWatcher, prio_right, run_main_loop, start_workers, + validator_index_for_msg, ApprovalVotingParallelSubsystem, Metrics, WorkProvider, +}; +use assert_matches::assert_matches; +use futures::{channel::oneshot, future, stream::PollNext, StreamExt}; +use itertools::Itertools; +use polkadot_node_core_approval_voting::{ApprovalVotingWorkProvider, Config}; +use polkadot_node_network_protocol::{peer_set::ValidationVersion, ObservedRole, PeerId, View}; +use polkadot_node_primitives::approval::{ + time::SystemClock, + v1::{ + AssignmentCert, AssignmentCertKind, IndirectAssignmentCert, IndirectSignedApprovalVote, + RELAY_VRF_MODULO_CONTEXT, + }, + v2::{ + AssignmentCertKindV2, AssignmentCertV2, CoreBitfield, IndirectAssignmentCertV2, + IndirectSignedApprovalVoteV2, + }, +}; +use polkadot_node_subsystem::{ + messages::{ApprovalDistributionMessage, ApprovalVotingMessage, ApprovalVotingParallelMessage}, + FromOrchestra, +}; +use polkadot_node_subsystem_test_helpers::{mock::new_leaf, TestSubsystemContext}; +use polkadot_overseer::{ActiveLeavesUpdate, OverseerSignal, SpawnGlue, TimeoutExt}; +use polkadot_primitives::{CandidateHash, CoreIndex, Hash, ValidatorIndex}; +use sc_keystore::{Keystore, LocalKeystore}; +use sp_consensus::SyncOracle; +use sp_consensus_babe::{VrfPreOutput, VrfProof, VrfSignature}; +use sp_core::{testing::TaskExecutor, H256}; +use sp_keyring::Sr25519Keyring; +type VirtualOverseer = + polkadot_node_subsystem_test_helpers::TestSubsystemContextHandle; + +const SLOT_DURATION_MILLIS: u64 = 6000; + +pub mod test_constants { + pub(crate) const DATA_COL: u32 = 0; + pub(crate) const NUM_COLUMNS: u32 = 1; +} + +fn fake_assignment_cert(block_hash: Hash, validator: ValidatorIndex) -> IndirectAssignmentCert { + let ctx = schnorrkel::signing_context(RELAY_VRF_MODULO_CONTEXT); + let msg = b"WhenParachains?"; + let mut prng = rand_core::OsRng; + let keypair = schnorrkel::Keypair::generate_with(&mut prng); + let (inout, proof, _) = keypair.vrf_sign(ctx.bytes(msg)); + let preout = inout.to_preout(); + + IndirectAssignmentCert { + block_hash, + validator, + cert: AssignmentCert { + kind: AssignmentCertKind::RelayVRFModulo { sample: 1 }, + vrf: VrfSignature { pre_output: VrfPreOutput(preout), proof: VrfProof(proof) }, + }, + } +} + +fn fake_assignment_cert_v2( + block_hash: Hash, + validator: ValidatorIndex, + core_bitfield: CoreBitfield, +) -> IndirectAssignmentCertV2 { + let ctx = schnorrkel::signing_context(RELAY_VRF_MODULO_CONTEXT); + let msg = b"WhenParachains?"; + let mut prng = rand_core::OsRng; + let keypair = schnorrkel::Keypair::generate_with(&mut prng); + let (inout, proof, _) = keypair.vrf_sign(ctx.bytes(msg)); + let preout = inout.to_preout(); + + IndirectAssignmentCertV2 { + block_hash, + validator, + cert: AssignmentCertV2 { + kind: AssignmentCertKindV2::RelayVRFModuloCompact { core_bitfield }, + vrf: VrfSignature { pre_output: VrfPreOutput(preout), proof: VrfProof(proof) }, + }, + } +} + +/// Creates a meaningless signature +pub fn dummy_signature() -> polkadot_primitives::ValidatorSignature { + sp_core::crypto::UncheckedFrom::unchecked_from([1u8; 64]) +} + +fn build_subsystem( + sync_oracle: Box, +) -> ( + ApprovalVotingParallelSubsystem, + TestSubsystemContext>, + VirtualOverseer, +) { + sp_tracing::init_for_tests(); + + let pool = sp_core::testing::TaskExecutor::new(); + let (context, virtual_overseer) = polkadot_node_subsystem_test_helpers::make_subsystem_context::< + ApprovalVotingParallelMessage, + _, + >(pool.clone()); + + let keystore = LocalKeystore::in_memory(); + let _ = keystore.sr25519_generate_new( + polkadot_primitives::PARACHAIN_KEY_TYPE_ID, + Some(&Sr25519Keyring::Alice.to_seed()), + ); + + let clock = Arc::new(SystemClock {}); + let db = kvdb_memorydb::create(test_constants::NUM_COLUMNS); + let db = polkadot_node_subsystem_util::database::kvdb_impl::DbAdapter::new(db, &[]); + + ( + ApprovalVotingParallelSubsystem::with_config_and_clock( + Config { + col_approval_data: test_constants::DATA_COL, + slot_duration_millis: SLOT_DURATION_MILLIS, + }, + Arc::new(db), + Arc::new(keystore), + sync_oracle, + Metrics::default(), + clock.clone(), + SpawnGlue(pool), + None, + ), + context, + virtual_overseer, + ) +} + +#[derive(Clone)] +struct TestSyncOracle {} + +impl SyncOracle for TestSyncOracle { + fn is_major_syncing(&self) -> bool { + false + } + + fn is_offline(&self) -> bool { + unimplemented!("not used in network bridge") + } +} + +fn test_harness( + num_approval_distro_workers: usize, + prio_right: Clos, + subsystem_gracefully_exits: bool, + test_fn: impl FnOnce( + VirtualOverseer, + WorkProvider, + Vec>, + ) -> T, +) where + T: Future, + Clos: Clone + FnMut(&mut State) -> PollNext, + State: Default, +{ + let (subsystem, context, virtual_overseer) = build_subsystem(Box::new(TestSyncOracle {})); + let mut metrics_watcher = MetricsWatcher::new(subsystem.metrics.clone()); + let channel_size = 5; + + let (to_approval_voting_worker, approval_voting_work_provider) = + build_worker_handles::( + "to_approval_voting_worker".into(), + channel_size, + &mut metrics_watcher, + prio_right.clone(), + ); + + let approval_distribution_channels = { 0..num_approval_distro_workers } + .into_iter() + .map(|worker_index| { + build_worker_handles::( + format!("to_approval_distro/{}", worker_index), + channel_size, + &mut metrics_watcher, + prio_right.clone(), + ) + }) + .collect_vec(); + + let to_approval_distribution_workers = + approval_distribution_channels.iter().map(|(tx, _)| tx.clone()).collect_vec(); + let approval_distribution_work_providers = + approval_distribution_channels.into_iter().map(|(_, rx)| rx).collect_vec(); + + let subsystem = async move { + let result = run_main_loop( + context, + to_approval_voting_worker, + to_approval_distribution_workers, + metrics_watcher, + ) + .await; + + if subsystem_gracefully_exits && result.is_err() { + result + } else { + Ok(()) + } + }; + + let test_fut = test_fn( + virtual_overseer, + approval_voting_work_provider, + approval_distribution_work_providers, + ); + + futures::pin_mut!(test_fut); + futures::pin_mut!(subsystem); + + futures::executor::block_on(future::join( + async move { + let _overseer = test_fut.await; + }, + subsystem, + )) + .1 + .unwrap(); +} + +const TIMEOUT: Duration = Duration::from_millis(2000); + +async fn overseer_signal(overseer: &mut VirtualOverseer, signal: OverseerSignal) { + overseer + .send(FromOrchestra::Signal(signal)) + .timeout(TIMEOUT) + .await + .expect(&format!("{:?} is more than enough for sending signals.", TIMEOUT)); +} + +async fn overseer_message(overseer: &mut VirtualOverseer, msg: ApprovalVotingParallelMessage) { + overseer + .send(FromOrchestra::Communication { msg }) + .timeout(TIMEOUT) + .await + .expect(&format!("{:?} is more than enough for sending signals.", TIMEOUT)); +} + +async fn run_start_workers() { + let (subsystem, mut context, _) = build_subsystem(Box::new(TestSyncOracle {})); + let mut metrics_watcher = MetricsWatcher::new(subsystem.metrics.clone()); + let _workers = start_workers(&mut context, subsystem, &mut metrics_watcher).await.unwrap(); +} + +// Test starting the workers succeeds. +#[test] +fn start_workers_succeeds() { + futures::executor::block_on(run_start_workers()); +} + +// Test main loop forwards messages to the correct worker for all type of messages. +#[test] +fn test_main_loop_forwards_correctly() { + let num_approval_distro_workers = 4; + test_harness( + num_approval_distro_workers, + prio_right, + true, + |mut overseer, mut approval_voting_work_provider, mut rx_approval_distribution_workers| async move { + // 1. Check Signals are correctly forwarded to the workers. + let signal = OverseerSignal::ActiveLeaves(ActiveLeavesUpdate::start_work(new_leaf( + Hash::random(), + 1, + ))); + overseer_signal(&mut overseer, signal.clone()).await; + let approval_voting_receives = approval_voting_work_provider.recv().await.unwrap(); + assert_matches!(approval_voting_receives, FromOrchestra::Signal(_)); + for rx_approval_distribution_worker in rx_approval_distribution_workers.iter_mut() { + let approval_distribution_receives = + rx_approval_distribution_worker.next().await.unwrap(); + assert_matches!(approval_distribution_receives, FromOrchestra::Signal(_)); + } + + let (test_tx, _rx) = oneshot::channel(); + let test_hash = Hash::random(); + let test_block_nr = 2; + overseer_message( + &mut overseer, + ApprovalVotingParallelMessage::ApprovedAncestor(test_hash, test_block_nr, test_tx), + ) + .await; + assert_matches!( + approval_voting_work_provider.recv().await.unwrap(), + FromOrchestra::Communication { + msg: ApprovalVotingMessage::ApprovedAncestor(hash, block_nr, _) + } => { + assert_eq!(hash, test_hash); + assert_eq!(block_nr, test_block_nr); + } + ); + for rx_approval_distribution_worker in rx_approval_distribution_workers.iter_mut() { + assert!(rx_approval_distribution_worker + .next() + .timeout(Duration::from_millis(200)) + .await + .is_none()); + } + + // 2. Check GetApprovalSignaturesForCandidate is correctly forwarded to the workers. + let (test_tx, _rx) = oneshot::channel(); + let test_hash = CandidateHash(Hash::random()); + overseer_message( + &mut overseer, + ApprovalVotingParallelMessage::GetApprovalSignaturesForCandidate( + test_hash, test_tx, + ), + ) + .await; + + assert_matches!( + approval_voting_work_provider.recv().await.unwrap(), + FromOrchestra::Communication { + msg: ApprovalVotingMessage::GetApprovalSignaturesForCandidate(hash, _) + } => { + assert_eq!(hash, test_hash); + } + ); + + for rx_approval_distribution_worker in rx_approval_distribution_workers.iter_mut() { + assert!(rx_approval_distribution_worker + .next() + .timeout(Duration::from_millis(200)) + .await + .is_none()); + } + + // 3. Check NewBlocks is correctly forwarded to the workers. + overseer_message(&mut overseer, ApprovalVotingParallelMessage::NewBlocks(vec![])).await; + for rx_approval_distribution_worker in rx_approval_distribution_workers.iter_mut() { + assert_matches!(rx_approval_distribution_worker.next().await.unwrap(), + FromOrchestra::Communication { + msg: ApprovalDistributionMessage::NewBlocks(blocks) + } => { + assert!(blocks.is_empty()); + } + ); + } + assert!(approval_voting_work_provider + .recv() + .timeout(Duration::from_millis(200)) + .await + .is_none()); + + // 4. Check DistributeAssignment is correctly forwarded to the workers. + let validator_index = ValidatorIndex(17); + let assignment = + fake_assignment_cert_v2(Hash::random(), validator_index, CoreIndex(1).into()); + overseer_message( + &mut overseer, + ApprovalVotingParallelMessage::DistributeAssignment(assignment.clone(), 1.into()), + ) + .await; + + for (index, rx_approval_distribution_worker) in + rx_approval_distribution_workers.iter_mut().enumerate() + { + if index == validator_index.0 as usize % num_approval_distro_workers { + assert_matches!(rx_approval_distribution_worker.next().await.unwrap(), + FromOrchestra::Communication { + msg: ApprovalDistributionMessage::DistributeAssignment(cert, bitfield) + } => { + assert_eq!(cert, assignment); + assert_eq!(bitfield, 1.into()); + } + ); + } else { + assert!(rx_approval_distribution_worker + .next() + .timeout(Duration::from_millis(200)) + .await + .is_none()); + } + } + assert!(approval_voting_work_provider + .recv() + .timeout(Duration::from_millis(200)) + .await + .is_none()); + + // 5. Check DistributeApproval is correctly forwarded to the workers. + let validator_index = ValidatorIndex(26); + let expected_vote = IndirectSignedApprovalVoteV2 { + block_hash: H256::random(), + candidate_indices: 1.into(), + validator: validator_index, + signature: dummy_signature(), + }; + + overseer_message( + &mut overseer, + ApprovalVotingParallelMessage::DistributeApproval(expected_vote.clone()), + ) + .await; + + for (index, rx_approval_distribution_worker) in + rx_approval_distribution_workers.iter_mut().enumerate() + { + if index == validator_index.0 as usize % num_approval_distro_workers { + assert_matches!(rx_approval_distribution_worker.next().await.unwrap(), + FromOrchestra::Communication { + msg: ApprovalDistributionMessage::DistributeApproval(vote) + } => { + assert_eq!(vote, expected_vote); + } + ); + } else { + assert!(rx_approval_distribution_worker + .next() + .timeout(Duration::from_millis(200)) + .await + .is_none()); + } + } + + // 6. Check NetworkBridgeUpdate::PeerMessage is correctly forwarded just to one of the + // workers. + let approvals = vec![ + IndirectSignedApprovalVoteV2 { + block_hash: H256::random(), + candidate_indices: 1.into(), + validator: validator_index, + signature: dummy_signature(), + }, + IndirectSignedApprovalVoteV2 { + block_hash: H256::random(), + candidate_indices: 2.into(), + validator: validator_index, + signature: dummy_signature(), + }, + ]; + let expected_msg = polkadot_node_network_protocol::Versioned::V3( + polkadot_node_network_protocol::v3::ApprovalDistributionMessage::Approvals( + approvals.clone(), + ), + ); + overseer_message( + &mut overseer, + ApprovalVotingParallelMessage::NetworkBridgeUpdate( + polkadot_node_subsystem::messages::NetworkBridgeEvent::PeerMessage( + PeerId::random(), + expected_msg.clone(), + ), + ), + ) + .await; + + for (index, rx_approval_distribution_worker) in + rx_approval_distribution_workers.iter_mut().enumerate() + { + if index == validator_index.0 as usize % num_approval_distro_workers { + assert_matches!(rx_approval_distribution_worker.next().await.unwrap(), + FromOrchestra::Communication { + msg: ApprovalDistributionMessage::NetworkBridgeUpdate( + polkadot_node_subsystem::messages::NetworkBridgeEvent::PeerMessage( + _, + msg, + ), + ) + } => { + assert_eq!(msg, expected_msg); + } + ); + } else { + assert!(rx_approval_distribution_worker + .next() + .timeout(Duration::from_millis(200)) + .await + .is_none()); + } + } + assert!(approval_voting_work_provider + .recv() + .timeout(Duration::from_millis(200)) + .await + .is_none()); + + assert!(approval_voting_work_provider + .recv() + .timeout(Duration::from_millis(200)) + .await + .is_none()); + + // 7. Check NetworkBridgeUpdate::PeerConnected is correctly forwarded to all workers. + let expected_peer_id = PeerId::random(); + overseer_message( + &mut overseer, + ApprovalVotingParallelMessage::NetworkBridgeUpdate( + polkadot_node_subsystem::messages::NetworkBridgeEvent::PeerConnected( + expected_peer_id, + ObservedRole::Authority, + ValidationVersion::V3.into(), + None, + ), + ), + ) + .await; + + for rx_approval_distribution_worker in rx_approval_distribution_workers.iter_mut() { + assert_matches!(rx_approval_distribution_worker.next().await.unwrap(), + FromOrchestra::Communication { + msg: ApprovalDistributionMessage::NetworkBridgeUpdate( + polkadot_node_subsystem::messages::NetworkBridgeEvent::PeerConnected( + peer_id, + role, + version, + authority_id, + ), + ) + } => { + assert_eq!(peer_id, expected_peer_id); + assert_eq!(role, ObservedRole::Authority); + assert_eq!(version, ValidationVersion::V3.into()); + assert_eq!(authority_id, None); + } + ); + } + assert!(approval_voting_work_provider + .recv() + .timeout(Duration::from_millis(200)) + .await + .is_none()); + + // 8. Check ApprovalCheckingLagUpdate is correctly forwarded to all workers. + overseer_message( + &mut overseer, + ApprovalVotingParallelMessage::ApprovalCheckingLagUpdate(7), + ) + .await; + + for rx_approval_distribution_worker in rx_approval_distribution_workers.iter_mut() { + assert_matches!(rx_approval_distribution_worker.next().await.unwrap(), + FromOrchestra::Communication { + msg: ApprovalDistributionMessage::ApprovalCheckingLagUpdate( + lag + ) + } => { + assert_eq!(lag, 7); + } + ); + } + assert!(approval_voting_work_provider + .recv() + .timeout(Duration::from_millis(200)) + .await + .is_none()); + + overseer_signal(&mut overseer, OverseerSignal::Conclude).await; + + overseer + }, + ); +} + +/// Test GetApprovalSignatures correctly gatheres the signatures from all workers. +#[test] +fn test_handle_get_approval_signatures() { + let num_approval_distro_workers = 4; + + test_harness( + num_approval_distro_workers, + prio_right, + true, + |mut overseer, mut approval_voting_work_provider, mut rx_approval_distribution_workers| async move { + let (tx, rx) = oneshot::channel(); + let first_block = Hash::random(); + let second_block = Hash::random(); + let expected_candidates: HashSet<_> = + vec![(first_block, 2), (second_block, 3)].into_iter().collect(); + + overseer_message( + &mut overseer, + ApprovalVotingParallelMessage::GetApprovalSignatures( + expected_candidates.clone(), + tx, + ), + ) + .await; + + assert!(approval_voting_work_provider + .recv() + .timeout(Duration::from_millis(200)) + .await + .is_none()); + let mut all_votes = HashMap::new(); + for (index, rx_approval_distribution_worker) in + rx_approval_distribution_workers.iter_mut().enumerate() + { + assert_matches!(rx_approval_distribution_worker.next().await.unwrap(), + FromOrchestra::Communication { + msg: ApprovalDistributionMessage::GetApprovalSignatures( + candidates, tx + ) + } => { + assert_eq!(candidates, expected_candidates); + let to_send: HashMap<_, _> = {0..10}.into_iter().map(|validator| { + let validator_index = ValidatorIndex(validator as u32 * num_approval_distro_workers as u32 + index as u32); + (validator_index, (first_block, vec![2, 4], dummy_signature())) + }).collect(); + tx.send(to_send.clone()).unwrap(); + all_votes.extend(to_send.clone()); + + } + ); + } + + let received_votes = rx.await.unwrap(); + assert_eq!(received_votes, all_votes); + overseer_signal(&mut overseer, OverseerSignal::Conclude).await; + + overseer + }, + ) +} + +/// Test subsystem exits with error when approval_voting_work_provider exits. +#[test] +fn test_subsystem_exits_with_error_if_approval_voting_worker_errors() { + let num_approval_distro_workers = 4; + + test_harness( + num_approval_distro_workers, + prio_right, + false, + |overseer, approval_voting_work_provider, _rx_approval_distribution_workers| async move { + // Drop the approval_voting_work_provider to simulate an error. + std::mem::drop(approval_voting_work_provider); + + overseer + }, + ) +} + +/// Test subsystem exits with error when approval_distribution_workers exits. +#[test] +fn test_subsystem_exits_with_error_if_approval_distribution_worker_errors() { + let num_approval_distro_workers = 4; + + test_harness( + num_approval_distro_workers, + prio_right, + false, + |overseer, _approval_voting_work_provider, rx_approval_distribution_workers| async move { + // Drop the approval_distribution_workers to simulate an error. + std::mem::drop(rx_approval_distribution_workers.into_iter().next().unwrap()); + overseer + }, + ) +} + +/// Test signals sent before messages are processed in order. +#[test] +fn test_signal_before_message_keeps_receive_order() { + let num_approval_distro_workers = 4; + + test_harness( + num_approval_distro_workers, + prio_right, + true, + |mut overseer, mut approval_voting_work_provider, mut rx_approval_distribution_workers| async move { + let signal = OverseerSignal::ActiveLeaves(ActiveLeavesUpdate::start_work(new_leaf( + Hash::random(), + 1, + ))); + overseer_signal(&mut overseer, signal.clone()).await; + + let validator_index = ValidatorIndex(17); + let assignment = + fake_assignment_cert_v2(Hash::random(), validator_index, CoreIndex(1).into()); + overseer_message( + &mut overseer, + ApprovalVotingParallelMessage::DistributeAssignment(assignment.clone(), 1.into()), + ) + .await; + + let approval_voting_receives = approval_voting_work_provider.recv().await.unwrap(); + assert_matches!(approval_voting_receives, FromOrchestra::Signal(_)); + let rx_approval_distribution_worker = rx_approval_distribution_workers + .get_mut(validator_index.0 as usize % num_approval_distro_workers) + .unwrap(); + let approval_distribution_receives = + rx_approval_distribution_worker.next().await.unwrap(); + assert_matches!(approval_distribution_receives, FromOrchestra::Signal(_)); + assert_matches!( + rx_approval_distribution_worker.next().await.unwrap(), + FromOrchestra::Communication { + msg: ApprovalDistributionMessage::DistributeAssignment(_, _) + } + ); + + overseer_signal(&mut overseer, OverseerSignal::Conclude).await; + overseer + }, + ) +} + +/// Test signals sent after messages are processed with the highest priority. +#[test] +fn test_signal_is_prioritized_when_unread_messages_in_the_queue() { + let num_approval_distro_workers = 4; + + test_harness( + num_approval_distro_workers, + prio_right, + true, + |mut overseer, mut approval_voting_work_provider, mut rx_approval_distribution_workers| async move { + let validator_index = ValidatorIndex(17); + let assignment = + fake_assignment_cert_v2(Hash::random(), validator_index, CoreIndex(1).into()); + overseer_message( + &mut overseer, + ApprovalVotingParallelMessage::DistributeAssignment(assignment.clone(), 1.into()), + ) + .await; + + let signal = OverseerSignal::ActiveLeaves(ActiveLeavesUpdate::start_work(new_leaf( + Hash::random(), + 1, + ))); + overseer_signal(&mut overseer, signal.clone()).await; + + let approval_voting_receives = approval_voting_work_provider.recv().await.unwrap(); + assert_matches!(approval_voting_receives, FromOrchestra::Signal(_)); + let rx_approval_distribution_worker = rx_approval_distribution_workers + .get_mut(validator_index.0 as usize % num_approval_distro_workers) + .unwrap(); + let approval_distribution_receives = + rx_approval_distribution_worker.next().await.unwrap(); + assert_matches!(approval_distribution_receives, FromOrchestra::Signal(_)); + assert_matches!( + rx_approval_distribution_worker.next().await.unwrap(), + FromOrchestra::Communication { + msg: ApprovalDistributionMessage::DistributeAssignment(_, _) + } + ); + + overseer_signal(&mut overseer, OverseerSignal::Conclude).await; + overseer + }, + ) +} + +/// Test peer view updates have higher priority than normal messages. +#[test] +fn test_peer_view_is_prioritized_when_unread_messages_in_the_queue() { + let num_approval_distro_workers = 4; + + test_harness( + num_approval_distro_workers, + prio_right, + true, + |mut overseer, mut approval_voting_work_provider, mut rx_approval_distribution_workers| async move { + let validator_index = ValidatorIndex(17); + let approvals = vec![ + IndirectSignedApprovalVoteV2 { + block_hash: H256::random(), + candidate_indices: 1.into(), + validator: validator_index, + signature: dummy_signature(), + }, + IndirectSignedApprovalVoteV2 { + block_hash: H256::random(), + candidate_indices: 2.into(), + validator: validator_index, + signature: dummy_signature(), + }, + ]; + let expected_msg = polkadot_node_network_protocol::Versioned::V3( + polkadot_node_network_protocol::v3::ApprovalDistributionMessage::Approvals( + approvals.clone(), + ), + ); + overseer_message( + &mut overseer, + ApprovalVotingParallelMessage::NetworkBridgeUpdate( + polkadot_node_subsystem::messages::NetworkBridgeEvent::PeerMessage( + PeerId::random(), + expected_msg.clone(), + ), + ), + ) + .await; + + overseer_message( + &mut overseer, + ApprovalVotingParallelMessage::NetworkBridgeUpdate( + polkadot_node_subsystem::messages::NetworkBridgeEvent::PeerViewChange( + PeerId::random(), + View::default(), + ), + ), + ) + .await; + + for (index, rx_approval_distribution_worker) in + rx_approval_distribution_workers.iter_mut().enumerate() + { + assert_matches!(rx_approval_distribution_worker.next().await.unwrap(), + FromOrchestra::Communication { + msg: ApprovalDistributionMessage::NetworkBridgeUpdate( + polkadot_node_subsystem::messages::NetworkBridgeEvent::PeerViewChange( + _, + _, + ), + ) + } => { + } + ); + if index == validator_index.0 as usize % num_approval_distro_workers { + assert_matches!(rx_approval_distribution_worker.next().await.unwrap(), + FromOrchestra::Communication { + msg: ApprovalDistributionMessage::NetworkBridgeUpdate( + polkadot_node_subsystem::messages::NetworkBridgeEvent::PeerMessage( + _, + msg, + ), + ) + } => { + assert_eq!(msg, expected_msg); + } + ); + } else { + assert!(rx_approval_distribution_worker + .next() + .timeout(Duration::from_millis(200)) + .await + .is_none()); + } + } + + assert!(approval_voting_work_provider + .recv() + .timeout(Duration::from_millis(200)) + .await + .is_none()); + + overseer_signal(&mut overseer, OverseerSignal::Conclude).await; + overseer + }, + ) +} + +// Test validator_index_for_msg with empty messages. +#[test] +fn test_validator_index_with_empty_message() { + let result = validator_index_for_msg(polkadot_node_network_protocol::Versioned::V1( + polkadot_node_network_protocol::v1::ApprovalDistributionMessage::Assignments(vec![]), + )); + + assert_eq!(result, (None, Some(vec![]))); + + let result = validator_index_for_msg(polkadot_node_network_protocol::Versioned::V2( + polkadot_node_network_protocol::v2::ApprovalDistributionMessage::Assignments(vec![]), + )); + + assert_eq!(result, (None, Some(vec![]))); + + let result = validator_index_for_msg(polkadot_node_network_protocol::Versioned::V3( + polkadot_node_network_protocol::v3::ApprovalDistributionMessage::Assignments(vec![]), + )); + + assert_eq!(result, (None, Some(vec![]))); + + let result = validator_index_for_msg(polkadot_node_network_protocol::Versioned::V1( + polkadot_node_network_protocol::v1::ApprovalDistributionMessage::Approvals(vec![]), + )); + + assert_eq!(result, (None, Some(vec![]))); + + let result = validator_index_for_msg(polkadot_node_network_protocol::Versioned::V2( + polkadot_node_network_protocol::v2::ApprovalDistributionMessage::Approvals(vec![]), + )); + + assert_eq!(result, (None, Some(vec![]))); + + let result = validator_index_for_msg(polkadot_node_network_protocol::Versioned::V3( + polkadot_node_network_protocol::v3::ApprovalDistributionMessage::Approvals(vec![]), + )); + + assert_eq!(result, (None, Some(vec![]))); +} + +// Test validator_index_for_msg when all the messages are originating from the same validator. +#[test] +fn test_validator_index_with_all_messages_from_the_same_validator() { + let validator_index = ValidatorIndex(3); + let v1_assignment = polkadot_node_network_protocol::Versioned::V1( + polkadot_node_network_protocol::v1::ApprovalDistributionMessage::Assignments(vec![ + (fake_assignment_cert(H256::random(), validator_index), 1), + (fake_assignment_cert(H256::random(), validator_index), 3), + ]), + ); + let result = validator_index_for_msg(v1_assignment.clone()); + + assert_eq!(result, (Some((validator_index, v1_assignment)), None)); + + let v1_approval = polkadot_node_network_protocol::Versioned::V1( + polkadot_node_network_protocol::v1::ApprovalDistributionMessage::Approvals(vec![ + IndirectSignedApprovalVote { + block_hash: H256::random(), + candidate_index: 1, + validator: validator_index, + signature: dummy_signature(), + }, + IndirectSignedApprovalVote { + block_hash: H256::random(), + candidate_index: 1, + validator: validator_index, + signature: dummy_signature(), + }, + ]), + ); + let result = validator_index_for_msg(v1_approval.clone()); + + assert_eq!(result, (Some((validator_index, v1_approval)), None)); + + let validator_index = ValidatorIndex(3); + let v2_assignment = polkadot_node_network_protocol::Versioned::V2( + polkadot_node_network_protocol::v2::ApprovalDistributionMessage::Assignments(vec![ + (fake_assignment_cert(H256::random(), validator_index), 1), + (fake_assignment_cert(H256::random(), validator_index), 3), + ]), + ); + let result = validator_index_for_msg(v2_assignment.clone()); + + assert_eq!(result, (Some((validator_index, v2_assignment)), None)); + + let v2_approval = polkadot_node_network_protocol::Versioned::V2( + polkadot_node_network_protocol::v2::ApprovalDistributionMessage::Approvals(vec![ + IndirectSignedApprovalVote { + block_hash: H256::random(), + candidate_index: 1, + validator: validator_index, + signature: dummy_signature(), + }, + IndirectSignedApprovalVote { + block_hash: H256::random(), + candidate_index: 1, + validator: validator_index, + signature: dummy_signature(), + }, + ]), + ); + let result = validator_index_for_msg(v2_approval.clone()); + + assert_eq!(result, (Some((validator_index, v2_approval)), None)); + + let validator_index = ValidatorIndex(3); + let v3_assignment = polkadot_node_network_protocol::Versioned::V3( + polkadot_node_network_protocol::v3::ApprovalDistributionMessage::Assignments(vec![ + ( + fake_assignment_cert_v2(H256::random(), validator_index, CoreIndex(1).into()), + 1.into(), + ), + ( + fake_assignment_cert_v2(H256::random(), validator_index, CoreIndex(3).into()), + 3.into(), + ), + ]), + ); + let result = validator_index_for_msg(v3_assignment.clone()); + + assert_eq!(result, (Some((validator_index, v3_assignment)), None)); + + let v3_approval = polkadot_node_network_protocol::Versioned::V3( + polkadot_node_network_protocol::v3::ApprovalDistributionMessage::Approvals(vec![ + IndirectSignedApprovalVoteV2 { + block_hash: H256::random(), + candidate_indices: 1.into(), + validator: validator_index, + signature: dummy_signature(), + }, + IndirectSignedApprovalVoteV2 { + block_hash: H256::random(), + candidate_indices: 1.into(), + validator: validator_index, + signature: dummy_signature(), + }, + ]), + ); + let result = validator_index_for_msg(v3_approval.clone()); + + assert_eq!(result, (Some((validator_index, v3_approval)), None)); +} + +// Test validator_index_for_msg when all the messages are originating from different validators, +// so the function should split them by validator index, so we can forward them separately to the +// worker they are assigned to. +#[test] +fn test_validator_index_with_messages_from_different_validators() { + let first_validator_index = ValidatorIndex(3); + let second_validator_index = ValidatorIndex(4); + let assignments = vec![ + (fake_assignment_cert(H256::random(), first_validator_index), 1), + (fake_assignment_cert(H256::random(), second_validator_index), 3), + ]; + let v1_assignment = polkadot_node_network_protocol::Versioned::V1( + polkadot_node_network_protocol::v1::ApprovalDistributionMessage::Assignments( + assignments.clone(), + ), + ); + let result = validator_index_for_msg(v1_assignment.clone()); + + assert_matches!(result, (None, Some(_))); + let messsages_split_by_validator = result.1.unwrap(); + assert_eq!(messsages_split_by_validator.len(), assignments.len()); + for (index, (validator_index, message)) in messsages_split_by_validator.into_iter().enumerate() + { + assert_eq!(validator_index, assignments[index].0.validator); + assert_eq!( + message, + polkadot_node_network_protocol::Versioned::V1( + polkadot_node_network_protocol::v1::ApprovalDistributionMessage::Assignments( + assignments.get(index).into_iter().cloned().collect(), + ), + ) + ); + } + + let v2_assignment = polkadot_node_network_protocol::Versioned::V2( + polkadot_node_network_protocol::v2::ApprovalDistributionMessage::Assignments( + assignments.clone(), + ), + ); + let result = validator_index_for_msg(v2_assignment.clone()); + + assert_matches!(result, (None, Some(_))); + let messsages_split_by_validator = result.1.unwrap(); + assert_eq!(messsages_split_by_validator.len(), assignments.len()); + for (index, (validator_index, message)) in messsages_split_by_validator.into_iter().enumerate() + { + assert_eq!(validator_index, assignments[index].0.validator); + assert_eq!( + message, + polkadot_node_network_protocol::Versioned::V2( + polkadot_node_network_protocol::v2::ApprovalDistributionMessage::Assignments( + assignments.get(index).into_iter().cloned().collect(), + ), + ) + ); + } + + let first_validator_index = ValidatorIndex(3); + let second_validator_index = ValidatorIndex(4); + let v2_assignments = vec![ + ( + fake_assignment_cert_v2(H256::random(), first_validator_index, CoreIndex(1).into()), + 1.into(), + ), + ( + fake_assignment_cert_v2(H256::random(), second_validator_index, CoreIndex(3).into()), + 3.into(), + ), + ]; + + let approvals = vec![ + IndirectSignedApprovalVote { + block_hash: H256::random(), + candidate_index: 1, + validator: first_validator_index, + signature: dummy_signature(), + }, + IndirectSignedApprovalVote { + block_hash: H256::random(), + candidate_index: 2, + validator: second_validator_index, + signature: dummy_signature(), + }, + ]; + let v2_approvals = polkadot_node_network_protocol::Versioned::V2( + polkadot_node_network_protocol::v2::ApprovalDistributionMessage::Approvals( + approvals.clone(), + ), + ); + let result = validator_index_for_msg(v2_approvals.clone()); + + assert_matches!(result, (None, Some(_))); + let messsages_split_by_validator = result.1.unwrap(); + assert_eq!(messsages_split_by_validator.len(), approvals.len()); + for (index, (validator_index, message)) in messsages_split_by_validator.into_iter().enumerate() + { + assert_eq!(validator_index, approvals[index].validator); + assert_eq!( + message, + polkadot_node_network_protocol::Versioned::V2( + polkadot_node_network_protocol::v2::ApprovalDistributionMessage::Approvals( + approvals.get(index).into_iter().cloned().collect(), + ), + ) + ); + } + + let v3_assignment = polkadot_node_network_protocol::Versioned::V3( + polkadot_node_network_protocol::v3::ApprovalDistributionMessage::Assignments( + v2_assignments.clone(), + ), + ); + let result = validator_index_for_msg(v3_assignment.clone()); + + assert_matches!(result, (None, Some(_))); + let messsages_split_by_validator = result.1.unwrap(); + assert_eq!(messsages_split_by_validator.len(), v2_assignments.len()); + for (index, (validator_index, message)) in messsages_split_by_validator.into_iter().enumerate() + { + assert_eq!(validator_index, v2_assignments[index].0.validator); + assert_eq!( + message, + polkadot_node_network_protocol::Versioned::V3( + polkadot_node_network_protocol::v3::ApprovalDistributionMessage::Assignments( + v2_assignments.get(index).into_iter().cloned().collect(), + ), + ) + ); + } + + let approvals = vec![ + IndirectSignedApprovalVoteV2 { + block_hash: H256::random(), + candidate_indices: 1.into(), + validator: first_validator_index, + signature: dummy_signature(), + }, + IndirectSignedApprovalVoteV2 { + block_hash: H256::random(), + candidate_indices: 2.into(), + validator: second_validator_index, + signature: dummy_signature(), + }, + ]; + let v3_approvals = polkadot_node_network_protocol::Versioned::V3( + polkadot_node_network_protocol::v3::ApprovalDistributionMessage::Approvals( + approvals.clone(), + ), + ); + let result = validator_index_for_msg(v3_approvals.clone()); + + assert_matches!(result, (None, Some(_))); + let messsages_split_by_validator = result.1.unwrap(); + assert_eq!(messsages_split_by_validator.len(), approvals.len()); + for (index, (validator_index, message)) in messsages_split_by_validator.into_iter().enumerate() + { + assert_eq!(validator_index, approvals[index].validator); + assert_eq!( + message, + polkadot_node_network_protocol::Versioned::V3( + polkadot_node_network_protocol::v3::ApprovalDistributionMessage::Approvals( + approvals.get(index).into_iter().cloned().collect(), + ), + ) + ); + } +} diff --git a/polkadot/node/core/approval-voting/Cargo.toml b/polkadot/node/core/approval-voting/Cargo.toml index e678118440f5..2c3db866566c 100644 --- a/polkadot/node/core/approval-voting/Cargo.toml +++ b/polkadot/node/core/approval-voting/Cargo.toml @@ -29,7 +29,6 @@ polkadot-node-subsystem-util = { workspace = true, default-features = true } polkadot-overseer = { workspace = true, default-features = true } polkadot-primitives = { workspace = true, default-features = true } polkadot-node-primitives = { workspace = true, default-features = true } -polkadot-node-jaeger = { workspace = true, default-features = true } sc-keystore = { workspace = true } sp-consensus = { workspace = true } diff --git a/polkadot/node/core/approval-voting/benches/approval-voting-regression-bench.rs b/polkadot/node/core/approval-voting/benches/approval-voting-regression-bench.rs index 0b03f1127ee8..e202d1ee229d 100644 --- a/polkadot/node/core/approval-voting/benches/approval-voting-regression-bench.rs +++ b/polkadot/node/core/approval-voting/benches/approval-voting-regression-bench.rs @@ -53,6 +53,7 @@ fn main() -> Result<(), String> { stop_when_approved: false, workdir_prefix: "/tmp".to_string(), num_no_shows_per_candidate: 0, + approval_voting_parallel_enabled: true, }; println!("Benchmarking..."); @@ -82,8 +83,9 @@ fn main() -> Result<(), String> { ("Sent to peers", 63995.2200, 0.01), ])); messages.extend(average_usage.check_cpu_usage(&[ - ("approval-distribution", 12.2736, 0.1), - ("approval-voting", 2.7174, 0.1), + ("approval-distribution", 0.1, 0.1), + ("approval-voting", 0.1, 0.1), + ("approval-voting-parallel", 18.0758, 0.1), ])); if messages.is_empty() { diff --git a/polkadot/node/core/approval-voting/src/import.rs b/polkadot/node/core/approval-voting/src/import.rs index bf6ea0c98149..e50a2f911489 100644 --- a/polkadot/node/core/approval-voting/src/import.rs +++ b/polkadot/node/core/approval-voting/src/import.rs @@ -28,7 +28,6 @@ //! //! We maintain a rolling window of session indices. This starts as empty -use polkadot_node_jaeger as jaeger; use polkadot_node_primitives::{ approval::{ self as approval_types, @@ -320,7 +319,6 @@ pub struct BlockImportedCandidates { pub block_hash: Hash, pub block_number: BlockNumber, pub block_tick: Tick, - pub no_show_duration: Tick, pub imported_candidates: Vec<(CandidateHash, CandidateEntry)>, } @@ -349,13 +347,6 @@ pub(crate) async fn handle_new_head< finalized_number: &Option, ) -> SubsystemResult> { const MAX_HEADS_LOOK_BACK: BlockNumber = MAX_FINALITY_LAG; - let _handle_new_head_span = state - .spans - .get(&head) - .map(|span| span.child("handle-new-head")) - .unwrap_or_else(|| jaeger::Span::new(head, "handle-new-head")) - .with_string_tag("head", format!("{:?}", head)) - .with_stage(jaeger::Stage::ApprovalChecking); let header = { let (h_tx, h_rx) = oneshot::channel(); @@ -469,14 +460,7 @@ pub(crate) async fn handle_new_head< None => return Ok(Vec::new()), }; - let (block_tick, no_show_duration) = { - let block_tick = slot_number_to_tick(state.slot_duration_millis, slot); - let no_show_duration = slot_number_to_tick( - state.slot_duration_millis, - Slot::from(u64::from(session_info.no_show_slots)), - ); - (block_tick, no_show_duration) - }; + let block_tick = slot_number_to_tick(state.slot_duration_millis, slot); let needed_approvals = session_info.needed_approvals; let validator_group_lens: Vec = @@ -595,7 +579,6 @@ pub(crate) async fn handle_new_head< block_hash, block_number: block_header.number, block_tick, - no_show_duration, imported_candidates: candidate_entries .into_iter() .map(|(h, e)| (h, e.into())) @@ -675,7 +658,6 @@ pub(crate) mod tests { slot_duration_millis: 6_000, clock: Arc::new(MockClock::default()), assignment_criteria: Box::new(MockAssignmentCriteria::default()), - spans: HashMap::new(), per_block_assignments_gathering_times: LruMap::new(ByLength::new( MAX_BLOCKS_WITH_ASSIGNMENT_TIMESTAMPS, )), diff --git a/polkadot/node/core/approval-voting/src/lib.rs b/polkadot/node/core/approval-voting/src/lib.rs index 2149ce81fa80..835098293050 100644 --- a/polkadot/node/core/approval-voting/src/lib.rs +++ b/polkadot/node/core/approval-voting/src/lib.rs @@ -21,9 +21,6 @@ //! of others. It uses this information to determine when candidates and blocks have //! been sufficiently approved to finalize. -use itertools::Itertools; -use jaeger::{hash_to_trace_identifier, PerLeafSpan}; -use polkadot_node_jaeger as jaeger; use polkadot_node_primitives::{ approval::{ v1::{BlockApprovalMeta, DelayTranche}, @@ -634,11 +631,7 @@ impl Wakeups { self.wakeups.entry(tick).or_default().push((block_hash, candidate_hash)); } - fn prune_finalized_wakeups( - &mut self, - finalized_number: BlockNumber, - spans: &mut HashMap, - ) { + fn prune_finalized_wakeups(&mut self, finalized_number: BlockNumber) { let after = self.block_numbers.split_off(&(finalized_number + 1)); let pruned_blocks: HashSet<_> = std::mem::replace(&mut self.block_numbers, after) .into_iter() @@ -662,9 +655,6 @@ impl Wakeups { } } } - - // Remove all spans that are associated with pruned blocks. - spans.retain(|h, _| !pruned_blocks.contains(h)); } // Get the wakeup for a particular block/candidate combo, if any. @@ -841,7 +831,6 @@ struct State { slot_duration_millis: u64, clock: Arc, assignment_criteria: Box, - spans: HashMap, // Per block, candidate records about how long we take until we gather enough // assignments, this is relevant because it gives us a good idea about how many // tranches we trigger and why. @@ -1203,7 +1192,6 @@ where slot_duration_millis: subsystem.slot_duration_millis, clock: subsystem.clock, assignment_criteria, - spans: HashMap::new(), per_block_assignments_gathering_times: LruMap::new(ByLength::new( MAX_BLOCKS_WITH_ASSIGNMENT_TIMESTAMPS, )), @@ -1525,18 +1513,8 @@ async fn handle_actions< continue } - let mut launch_approval_span = state - .spans - .get(&relay_block_hash) - .map(|span| span.child("launch-approval")) - .unwrap_or_else(|| jaeger::Span::new(candidate_hash, "launch-approval")) - .with_trace_id(candidate_hash) - .with_candidate(candidate_hash) - .with_stage(jaeger::Stage::ApprovalChecking); - metrics.on_assignment_produced(assignment_tranche); let block_hash = indirect_cert.block_hash; - launch_approval_span.add_string_tag("block-hash", format!("{:?}", block_hash)); let validator_index = indirect_cert.validator; if distribute_assignment { @@ -1580,7 +1558,6 @@ async fn handle_actions< backing_group, executor_params, core_index, - &launch_approval_span, ) .await }, @@ -1591,15 +1568,6 @@ async fn handle_actions< } }, Action::NoteApprovedInChainSelection(block_hash) => { - let _span = state - .spans - .get(&block_hash) - .map(|span| span.child("note-approved-in-chain-selection")) - .unwrap_or_else(|| { - jaeger::Span::new(block_hash, "note-approved-in-chain-selection") - }) - .with_string_tag("block-hash", format!("{:?}", block_hash)) - .with_stage(jaeger::Stage::ApprovalChecking); sender.send_message(ChainSelectionMessage::Approved(block_hash)).await; }, Action::BecomeActive => { @@ -1704,15 +1672,6 @@ async fn distribution_messages_for_activation b, None => { @@ -1722,9 +1681,6 @@ async fn distribution_messages_for_activation c, None => { @@ -1936,9 +1890,6 @@ async fn handle_from_overseer< let mut actions = Vec::new(); if let Some(activated) = update.activated { let head = activated.hash; - let approval_voting_span = - jaeger::PerLeafSpan::new(activated.span, "approval-voting"); - state.spans.insert(head, approval_voting_span); match import::handle_new_head( sender, approval_voting_sender, @@ -2009,7 +1960,7 @@ async fn handle_from_overseer< // `prune_finalized_wakeups` prunes all finalized block hashes. We prune spans // accordingly. - wakeups.prune_finalized_wakeups(block_number, &mut state.spans); + wakeups.prune_finalized_wakeups(block_number); state.cleanup_assignments_gathering_timestamp(block_number); // // `prune_finalized_wakeups` prunes all finalized block hashes. We prune spans @@ -2051,23 +2002,8 @@ async fn handle_from_overseer< result.0 }, ApprovalVotingMessage::ApprovedAncestor(target, lower_bound, res) => { - let mut approved_ancestor_span = state - .spans - .get(&target) - .map(|span| span.child("approved-ancestor")) - .unwrap_or_else(|| jaeger::Span::new(target, "approved-ancestor")) - .with_stage(jaeger::Stage::ApprovalChecking) - .with_string_tag("leaf", format!("{:?}", target)); - match handle_approved_ancestor( - sender, - db, - target, - lower_bound, - wakeups, - &mut approved_ancestor_span, - &metrics, - ) - .await + match handle_approved_ancestor(sender, db, target, lower_bound, wakeups, &metrics) + .await { Ok(v) => { let _ = res.send(v); @@ -2260,15 +2196,11 @@ async fn handle_approved_ancestor>( target: Hash, lower_bound: BlockNumber, wakeups: &Wakeups, - span: &mut jaeger::Span, metrics: &Metrics, ) -> SubsystemResult> { const MAX_TRACING_WINDOW: usize = 200; const ABNORMAL_DEPTH_THRESHOLD: usize = 5; const LOGGING_DEPTH_THRESHOLD: usize = 10; - let mut span = span - .child("handle-approved-ancestor") - .with_stage(jaeger::Stage::ApprovalChecking); let mut all_approved_max = None; @@ -2284,8 +2216,6 @@ async fn handle_approved_ancestor>( } }; - span.add_uint_tag("leaf-number", target_number as u64); - span.add_uint_tag("lower-bound", lower_bound as u64); if target_number <= lower_bound { return Ok(None) } @@ -2317,9 +2247,6 @@ async fn handle_approved_ancestor>( let mut bits: BitVec = Default::default(); for (i, block_hash) in std::iter::once(target).chain(ancestry).enumerate() { - let mut entry_span = - span.child("load-block-entry").with_stage(jaeger::Stage::ApprovalChecking); - entry_span.add_string_tag("block-hash", format!("{:?}", block_hash)); // Block entries should be present as the assumption is that // nothing here is finalized. If we encounter any missing block // entries we can fail. @@ -2386,7 +2313,6 @@ async fn handle_approved_ancestor>( ) } metrics.on_unapproved_candidates_in_unfinalized_chain(unapproved.len()); - entry_span.add_uint_tag("unapproved-candidates", unapproved.len() as u64); for candidate_hash in unapproved { match db.load_candidate_entry(&candidate_hash)? { None => { @@ -2507,15 +2433,6 @@ async fn handle_approved_ancestor>( number: block_number, descriptions: block_descriptions, }); - match all_approved_max { - Some(HighestApprovedAncestorBlock { ref hash, ref number, .. }) => { - span.add_uint_tag("highest-approved-number", *number as u64); - span.add_string_fmt_debug_tag("highest-approved-hash", hash); - }, - None => { - span.add_string_tag("reached-lower-bound", "true"); - }, - } Ok(all_approved_max) } @@ -2548,7 +2465,12 @@ fn schedule_wakeup_action( last_assignment_tick.map(|l| l + APPROVAL_DELAY).filter(|t| t > &tick_now), next_no_show, ) - .map(|tick| Action::ScheduleWakeup { block_hash, block_number, candidate_hash, tick }) + .map(|tick| Action::ScheduleWakeup { + block_hash, + block_number, + candidate_hash, + tick, + }) }, RequiredTranches::Pending { considered, next_no_show, clock_drift, .. } => { // select the minimum of `next_no_show`, or the tick of the next non-empty tranche @@ -2617,17 +2539,6 @@ where let assignment = checked_assignment.assignment(); let candidate_indices = checked_assignment.candidate_indices(); let tranche = checked_assignment.tranche(); - let mut import_assignment_span = state - .spans - .get(&assignment.block_hash) - .map(|span| span.child("import-assignment")) - .unwrap_or_else(|| jaeger::Span::new(assignment.block_hash, "import-assignment")) - .with_relay_parent(assignment.block_hash) - .with_stage(jaeger::Stage::ApprovalChecking); - - for candidate_index in candidate_indices.iter_ones() { - import_assignment_span.add_uint_tag("candidate-index", candidate_index as u64); - } let block_entry = match db.load_block_entry(&assignment.block_hash)? { Some(b) => b, @@ -2707,13 +2618,6 @@ where )), // no candidate at core. }; - import_assignment_span - .add_string_tag("candidate-hash", format!("{:?}", assigned_candidate_hash)); - import_assignment_span.add_string_tag( - "traceID", - format!("{:?}", jaeger::hash_to_trace_identifier(assigned_candidate_hash.0)), - ); - if candidate_entry.approval_entry_mut(&assignment.block_hash).is_none() { return Ok(( AssignmentCheckResult::Bad(AssignmentCheckError::Internal( @@ -2771,7 +2675,6 @@ where }; is_duplicate &= approval_entry.is_assigned(assignment.validator); approval_entry.import_assignment(tranche, assignment.validator, tick_now); - import_assignment_span.add_uint_tag("tranche", tranche as u64); // We've imported a new assignment, so we need to schedule a wake-up for when that might // no-show. @@ -2845,14 +2748,6 @@ where return Ok((Vec::new(), $e)) }}; } - let mut span = state - .spans - .get(&approval.block_hash) - .map(|span| span.child("import-approval")) - .unwrap_or_else(|| jaeger::Span::new(approval.block_hash, "import-approval")) - .with_string_fmt_debug_tag("candidate-index", approval.candidate_indices.clone()) - .with_relay_parent(approval.block_hash) - .with_stage(jaeger::Stage::ApprovalChecking); let block_entry = match db.load_block_entry(&approval.block_hash)? { Some(b) => b, @@ -2882,20 +2777,6 @@ where }, }; - span.add_string_tag("candidate-hashes", format!("{:?}", approved_candidates_info)); - span.add_string_tag( - "traceIDs", - format!( - "{:?}", - approved_candidates_info - .iter() - .map(|(_, approved_candidate_hash)| hash_to_trace_identifier( - approved_candidate_hash.0 - )) - .collect_vec() - ), - ); - gum::trace!( target: LOG_TARGET, "Received approval for num_candidates {:}", @@ -3250,16 +3131,6 @@ async fn process_wakeup>( metrics: &Metrics, wakeups: &Wakeups, ) -> SubsystemResult> { - let mut span = state - .spans - .get(&relay_block) - .map(|span| span.child("process-wakeup")) - .unwrap_or_else(|| jaeger::Span::new(candidate_hash, "process-wakeup")) - .with_trace_id(candidate_hash) - .with_relay_parent(relay_block) - .with_candidate(candidate_hash) - .with_stage(jaeger::Stage::ApprovalChecking); - let block_entry = db.load_block_entry(&relay_block)?; let candidate_entry = db.load_candidate_entry(&candidate_hash)?; @@ -3288,7 +3159,7 @@ async fn process_wakeup>( Slot::from(u64::from(session_info.no_show_slots)), ); let tranche_now = state.clock.tranche_now(state.slot_duration_millis, block_entry.slot()); - span.add_uint_tag("tranche", tranche_now as u64); + gum::trace!( target: LOG_TARGET, tranche = tranche_now, @@ -3451,7 +3322,6 @@ async fn launch_approval< backing_group: GroupIndex, executor_params: ExecutorParams, core_index: Option, - span: &jaeger::Span, ) -> SubsystemResult> { let (a_tx, a_rx) = oneshot::channel(); let (code_tx, code_rx) = oneshot::channel(); @@ -3485,13 +3355,6 @@ async fn launch_approval< let para_id = candidate.descriptor.para_id; gum::trace!(target: LOG_TARGET, ?candidate_hash, ?para_id, "Recovering data."); - let request_validation_data_span = span - .child("request-validation-data") - .with_trace_id(candidate_hash) - .with_candidate(candidate_hash) - .with_string_tag("block-hash", format!("{:?}", block_hash)) - .with_stage(jaeger::Stage::ApprovalChecking); - let timer = metrics.time_recover_and_approve(); sender .send_message(AvailabilityRecoveryMessage::RecoverAvailableData( @@ -3503,13 +3366,6 @@ async fn launch_approval< )) .await; - let request_validation_result_span = span - .child("request-validation-result") - .with_trace_id(candidate_hash) - .with_candidate(candidate_hash) - .with_string_tag("block-hash", format!("{:?}", block_hash)) - .with_stage(jaeger::Stage::ApprovalChecking); - sender .send_message(RuntimeApiMessage::Request( block_hash, @@ -3573,7 +3429,6 @@ async fn launch_approval< return ApprovalState::failed(validator_index, candidate_hash) }, }; - drop(request_validation_data_span); let validation_code = match code_rx.await { Err(_) => return ApprovalState::failed(validator_index, candidate_hash), @@ -3645,7 +3500,6 @@ async fn launch_approval< "Failed to validate candidate due to internal error", ); metrics_guard.take().on_approval_error(); - drop(request_validation_result_span); return ApprovalState::failed(validator_index, candidate_hash) }, } @@ -3673,17 +3527,6 @@ async fn issue_approval< ApprovalVoteRequest { validator_index, block_hash }: ApprovalVoteRequest, wakeups: &Wakeups, ) -> SubsystemResult> { - let mut issue_approval_span = state - .spans - .get(&block_hash) - .map(|span| span.child("issue-approval")) - .unwrap_or_else(|| jaeger::Span::new(block_hash, "issue-approval")) - .with_trace_id(candidate_hash) - .with_string_tag("block-hash", format!("{:?}", block_hash)) - .with_candidate(candidate_hash) - .with_validator_index(validator_index) - .with_stage(jaeger::Stage::ApprovalChecking); - let mut block_entry = match db.load_block_entry(&block_hash)? { Some(b) => b, None => { @@ -3708,7 +3551,6 @@ async fn issue_approval< }, Some(idx) => idx, }; - issue_approval_span.add_int_tag("candidate_index", candidate_index as i64); let candidate_hash = match block_entry.candidate(candidate_index as usize) { Some((_, h)) => *h, diff --git a/polkadot/node/core/approval-voting/src/tests.rs b/polkadot/node/core/approval-voting/src/tests.rs index 8aa78df54953..db5ffd441c0d 100644 --- a/polkadot/node/core/approval-voting/src/tests.rs +++ b/polkadot/node/core/approval-voting/src/tests.rs @@ -17,6 +17,7 @@ use self::test_helpers::mock::new_leaf; use super::*; use crate::backend::V1ReadBackend; +use itertools::Itertools; use overseer::prometheus::{ prometheus::{IntCounter, IntCounterVec}, Histogram, HistogramOpts, HistogramVec, Opts, @@ -253,7 +254,8 @@ where _relay_vrf_story: polkadot_node_primitives::approval::v1::RelayVRFStory, _assignment: &polkadot_node_primitives::approval::v2::AssignmentCertV2, _backing_groups: Vec, - ) -> Result { + ) -> Result + { self.1(validator_index) } } @@ -4921,7 +4923,6 @@ fn test_gathering_assignments_statements() { slot_duration_millis: 6_000, clock: Arc::new(MockClock::default()), assignment_criteria: Box::new(MockAssignmentCriteria::check_only(|_| Ok(0))), - spans: HashMap::new(), per_block_assignments_gathering_times: LruMap::new(ByLength::new( MAX_BLOCKS_WITH_ASSIGNMENT_TIMESTAMPS, )), @@ -5016,7 +5017,6 @@ fn test_observe_assignment_gathering_status() { slot_duration_millis: 6_000, clock: Arc::new(MockClock::default()), assignment_criteria: Box::new(MockAssignmentCriteria::check_only(|_| Ok(0))), - spans: HashMap::new(), per_block_assignments_gathering_times: LruMap::new(ByLength::new( MAX_BLOCKS_WITH_ASSIGNMENT_TIMESTAMPS, )), diff --git a/polkadot/node/core/dispute-coordinator/src/initialized.rs b/polkadot/node/core/dispute-coordinator/src/initialized.rs index 5096fe5e6891..9cf9047b7273 100644 --- a/polkadot/node/core/dispute-coordinator/src/initialized.rs +++ b/polkadot/node/core/dispute-coordinator/src/initialized.rs @@ -34,8 +34,9 @@ use polkadot_node_primitives::{ }; use polkadot_node_subsystem::{ messages::{ - ApprovalVotingMessage, BlockDescription, ChainSelectionMessage, DisputeCoordinatorMessage, - DisputeDistributionMessage, ImportStatementsResult, + ApprovalVotingMessage, ApprovalVotingParallelMessage, BlockDescription, + ChainSelectionMessage, DisputeCoordinatorMessage, DisputeDistributionMessage, + ImportStatementsResult, }, overseer, ActivatedLeaf, ActiveLeavesUpdate, FromOrchestra, OverseerSignal, RuntimeApiError, }; @@ -117,6 +118,7 @@ pub(crate) struct Initialized { /// `CHAIN_IMPORT_MAX_BATCH_SIZE` and put the rest here for later processing. chain_import_backlog: VecDeque, metrics: Metrics, + approval_voting_parallel_enabled: bool, } #[overseer::contextbounds(DisputeCoordinator, prefix = self::overseer)] @@ -130,7 +132,13 @@ impl Initialized { highest_session_seen: SessionIndex, gaps_in_cache: bool, ) -> Self { - let DisputeCoordinatorSubsystem { config: _, store: _, keystore, metrics } = subsystem; + let DisputeCoordinatorSubsystem { + config: _, + store: _, + keystore, + metrics, + approval_voting_parallel_enabled, + } = subsystem; let (participation_sender, participation_receiver) = mpsc::channel(1); let participation = Participation::new(participation_sender, metrics.clone()); @@ -148,6 +156,7 @@ impl Initialized { participation_receiver, chain_import_backlog: VecDeque::new(), metrics, + approval_voting_parallel_enabled, } } @@ -1059,9 +1068,21 @@ impl Initialized { // 4. We are waiting (and blocking the whole subsystem) on a response right after - // therefore even with all else failing we will never have more than // one message in flight at any given time. - ctx.send_unbounded_message( - ApprovalVotingMessage::GetApprovalSignaturesForCandidate(candidate_hash, tx), - ); + if self.approval_voting_parallel_enabled { + ctx.send_unbounded_message( + ApprovalVotingParallelMessage::GetApprovalSignaturesForCandidate( + candidate_hash, + tx, + ), + ); + } else { + ctx.send_unbounded_message( + ApprovalVotingMessage::GetApprovalSignaturesForCandidate( + candidate_hash, + tx, + ), + ); + } match rx.await { Err(_) => { gum::warn!( diff --git a/polkadot/node/core/dispute-coordinator/src/lib.rs b/polkadot/node/core/dispute-coordinator/src/lib.rs index 34d9ddf3a97c..84408eb96305 100644 --- a/polkadot/node/core/dispute-coordinator/src/lib.rs +++ b/polkadot/node/core/dispute-coordinator/src/lib.rs @@ -122,6 +122,7 @@ pub struct DisputeCoordinatorSubsystem { store: Arc, keystore: Arc, metrics: Metrics, + approval_voting_parallel_enabled: bool, } /// Configuration for the dispute coordinator subsystem. @@ -164,8 +165,9 @@ impl DisputeCoordinatorSubsystem { config: Config, keystore: Arc, metrics: Metrics, + approval_voting_parallel_enabled: bool, ) -> Self { - Self { store, config, keystore, metrics } + Self { store, config, keystore, metrics, approval_voting_parallel_enabled } } /// Initialize and afterwards run `Initialized::run`. diff --git a/polkadot/node/core/dispute-coordinator/src/tests.rs b/polkadot/node/core/dispute-coordinator/src/tests.rs index f97a625a9528..b41cdb94b4d2 100644 --- a/polkadot/node/core/dispute-coordinator/src/tests.rs +++ b/polkadot/node/core/dispute-coordinator/src/tests.rs @@ -580,6 +580,7 @@ impl TestState { self.config, self.subsystem_keystore.clone(), Metrics::default(), + false, ); let backend = DbBackend::new(self.db.clone(), self.config.column_config(), Metrics::default()); diff --git a/polkadot/node/core/pvf/build.rs b/polkadot/node/core/pvf/build.rs index e01cc6deecc2..e46f2dc5f55a 100644 --- a/polkadot/node/core/pvf/build.rs +++ b/polkadot/node/core/pvf/build.rs @@ -16,6 +16,6 @@ fn main() { if let Ok(profile) = std::env::var("PROFILE") { - println!(r#"cargo:rustc-cfg=build_type="{}""#, profile); + println!(r#"cargo:rustc-cfg=build_profile="{}""#, profile); } } diff --git a/polkadot/node/core/pvf/common/src/worker/security/change_root.rs b/polkadot/node/core/pvf/common/src/worker/security/change_root.rs index 9ec66906819f..fcfaf6541c29 100644 --- a/polkadot/node/core/pvf/common/src/worker/security/change_root.rs +++ b/polkadot/node/core/pvf/common/src/worker/security/change_root.rs @@ -124,7 +124,8 @@ fn try_restrict(worker_info: &WorkerInfo) -> Result<()> { libc::MS_BIND | libc::MS_REC | libc::MS_NOEXEC | libc::MS_NODEV | libc::MS_NOSUID | - libc::MS_NOATIME | additional_flags, + libc::MS_NOATIME | + additional_flags, ptr::null(), // ignored when MS_BIND is used ) < 0 { diff --git a/polkadot/node/core/pvf/src/artifacts.rs b/polkadot/node/core/pvf/src/artifacts.rs index 119af34082a9..1126a0c90c8c 100644 --- a/polkadot/node/core/pvf/src/artifacts.rs +++ b/polkadot/node/core/pvf/src/artifacts.rs @@ -56,7 +56,7 @@ use crate::{host::PrecheckResultSender, worker_interface::WORKER_DIR_PREFIX}; use always_assert::always; -use polkadot_node_core_pvf_common::{error::PrepareError, prepare::PrepareStats, pvf::PvfPrepData}; +use polkadot_node_core_pvf_common::{error::PrepareError, pvf::PvfPrepData}; use polkadot_parachain_primitives::primitives::ValidationCodeHash; use polkadot_primitives::ExecutorParamsPrepHash; use std::{ @@ -144,8 +144,6 @@ pub enum ArtifactState { last_time_needed: SystemTime, /// Size in bytes size: u64, - /// Stats produced by successful preparation. - prepare_stats: PrepareStats, }, /// A task to prepare this artifact is scheduled. Preparing { @@ -269,15 +267,11 @@ impl Artifacts { path: PathBuf, last_time_needed: SystemTime, size: u64, - prepare_stats: PrepareStats, ) { // See the precondition. always!(self .inner - .insert( - artifact_id, - ArtifactState::Prepared { path, last_time_needed, size, prepare_stats } - ) + .insert(artifact_id, ArtifactState::Prepared { path, last_time_needed, size }) .is_none()); } @@ -384,21 +378,18 @@ mod tests { path1.clone(), mock_now - Duration::from_secs(5), 1024, - PrepareStats::default(), ); artifacts.insert_prepared( artifact_id2.clone(), path2.clone(), mock_now - Duration::from_secs(10), 1024, - PrepareStats::default(), ); artifacts.insert_prepared( artifact_id3.clone(), path3.clone(), mock_now - Duration::from_secs(15), 1024, - PrepareStats::default(), ); let pruned = artifacts.prune(&cleanup_config); @@ -432,21 +423,18 @@ mod tests { path1.clone(), mock_now - Duration::from_secs(5), 1024, - PrepareStats::default(), ); artifacts.insert_prepared( artifact_id2.clone(), path2.clone(), mock_now - Duration::from_secs(10), 1024, - PrepareStats::default(), ); artifacts.insert_prepared( artifact_id3.clone(), path3.clone(), mock_now - Duration::from_secs(15), 1024, - PrepareStats::default(), ); let pruned = artifacts.prune(&cleanup_config); diff --git a/polkadot/node/core/pvf/src/host.rs b/polkadot/node/core/pvf/src/host.rs index 44a4cba2fbf8..22943a06c43c 100644 --- a/polkadot/node/core/pvf/src/host.rs +++ b/polkadot/node/core/pvf/src/host.rs @@ -813,12 +813,8 @@ async fn handle_prepare_done( } *state = match result { - Ok(PrepareSuccess { path, stats: prepare_stats, size }) => ArtifactState::Prepared { - path, - last_time_needed: SystemTime::now(), - size, - prepare_stats, - }, + Ok(PrepareSuccess { path, size, .. }) => + ArtifactState::Prepared { path, last_time_needed: SystemTime::now(), size }, Err(error) => { let last_time_failed = SystemTime::now(); let num_failures = *num_failures + 1; @@ -976,7 +972,6 @@ pub(crate) mod tests { use crate::{artifacts::generate_artifact_path, testing::artifact_id, PossiblyInvalidError}; use assert_matches::assert_matches; use futures::future::BoxFuture; - use polkadot_node_core_pvf_common::prepare::PrepareStats; use polkadot_node_primitives::BlockData; use sp_core::H256; @@ -1196,20 +1191,8 @@ pub(crate) mod tests { builder.cleanup_config = ArtifactsCleanupConfig::new(1024, Duration::from_secs(0)); let path1 = generate_artifact_path(cache_path); let path2 = generate_artifact_path(cache_path); - builder.artifacts.insert_prepared( - artifact_id(1), - path1.clone(), - mock_now, - 1024, - PrepareStats::default(), - ); - builder.artifacts.insert_prepared( - artifact_id(2), - path2.clone(), - mock_now, - 1024, - PrepareStats::default(), - ); + builder.artifacts.insert_prepared(artifact_id(1), path1.clone(), mock_now, 1024); + builder.artifacts.insert_prepared(artifact_id(2), path2.clone(), mock_now, 1024); let mut test = builder.build(); let mut host = test.host_handle(); diff --git a/polkadot/node/core/pvf/src/prepare/pool.rs b/polkadot/node/core/pvf/src/prepare/pool.rs index 4e11f977c9e7..67cd71812e52 100644 --- a/polkadot/node/core/pvf/src/prepare/pool.rs +++ b/polkadot/node/core/pvf/src/prepare/pool.rs @@ -343,14 +343,13 @@ fn handle_mux( ), // Return `Concluded`, but do not kill the worker since the error was on the host // side. - Outcome::RenameTmpFile { worker: idle, result: _, err, src, dest } => - handle_concluded_no_rip( - from_pool, - spawned, - worker, - idle, - Err(PrepareError::RenameTmpFile { err, src, dest }), - ), + Outcome::RenameTmpFile { worker: idle, err, src, dest } => handle_concluded_no_rip( + from_pool, + spawned, + worker, + idle, + Err(PrepareError::RenameTmpFile { err, src, dest }), + ), // Could not clear worker cache. Kill the worker so other jobs can't see the data. Outcome::ClearWorkerDir { err } => { if attempt_retire(metrics, spawned, worker) { diff --git a/polkadot/node/core/pvf/src/prepare/worker_interface.rs b/polkadot/node/core/pvf/src/prepare/worker_interface.rs index d29d2717c4b6..718416e8be76 100644 --- a/polkadot/node/core/pvf/src/prepare/worker_interface.rs +++ b/polkadot/node/core/pvf/src/prepare/worker_interface.rs @@ -81,7 +81,6 @@ pub enum Outcome { /// final destination location. RenameTmpFile { worker: IdleWorker, - result: PrepareWorkerResult, err: String, // Unfortunately `PathBuf` doesn't implement `Encode`/`Decode`, so we do a fallible // conversion to `Option`. @@ -287,7 +286,6 @@ async fn handle_response( ); Outcome::RenameTmpFile { worker, - result, err: format!("{:?}", err), src: tmp_file.to_str().map(String::from), dest: artifact_path.to_str().map(String::from), diff --git a/polkadot/node/core/pvf/src/testing.rs b/polkadot/node/core/pvf/src/testing.rs index 8c75dafa69c2..9a4004f39037 100644 --- a/polkadot/node/core/pvf/src/testing.rs +++ b/polkadot/node/core/pvf/src/testing.rs @@ -72,7 +72,7 @@ pub fn build_workers_and_get_paths() -> (PathBuf, PathBuf) { "--bin=polkadot-execute-worker", ]; - if cfg!(build_type = "release") { + if cfg!(build_profile = "release") { build_args.push("--release"); } diff --git a/polkadot/node/network/approval-distribution/Cargo.toml b/polkadot/node/network/approval-distribution/Cargo.toml index 51478dfa4a4f..8d674a733470 100644 --- a/polkadot/node/network/approval-distribution/Cargo.toml +++ b/polkadot/node/network/approval-distribution/Cargo.toml @@ -16,7 +16,6 @@ polkadot-node-primitives = { workspace = true, default-features = true } polkadot-node-subsystem = { workspace = true, default-features = true } polkadot-node-subsystem-util = { workspace = true, default-features = true } polkadot-primitives = { workspace = true, default-features = true } -polkadot-node-jaeger = { workspace = true, default-features = true } rand = { workspace = true, default-features = true } itertools = { workspace = true } diff --git a/polkadot/node/network/approval-distribution/src/lib.rs b/polkadot/node/network/approval-distribution/src/lib.rs index 971b6de5f8f6..876cc59b9c28 100644 --- a/polkadot/node/network/approval-distribution/src/lib.rs +++ b/polkadot/node/network/approval-distribution/src/lib.rs @@ -27,7 +27,6 @@ use self::metrics::Metrics; use futures::{select, FutureExt as _}; use itertools::Itertools; use net_protocol::peer_set::{ProtocolVersion, ValidationVersion}; -use polkadot_node_jaeger as jaeger; use polkadot_node_network_protocol::{ self as net_protocol, filter_by_peer_version, grid_topology::{RandomRouting, RequiredRouting, SessionGridTopologies, SessionGridTopology}, @@ -73,7 +72,8 @@ use std::{ time::Duration, }; -mod metrics; +/// Approval distribution metrics. +pub mod metrics; #[cfg(test)] mod tests; @@ -99,7 +99,7 @@ const MAX_BITFIELD_SIZE: usize = 500; pub struct ApprovalDistribution { metrics: Metrics, slot_duration_millis: u64, - clock: Box, + clock: Arc, assignment_criteria: Arc, } @@ -358,9 +358,6 @@ pub struct State { /// Tracks recently finalized blocks. recent_outdated_blocks: RecentlyOutdated, - /// HashMap from active leaves to spans - spans: HashMap, - /// Aggression configuration. aggression_config: AggressionConfig, @@ -871,18 +868,9 @@ impl State { ); for meta in metas { - let mut span = self - .spans - .get(&meta.hash) - .map(|span| span.child(&"handle-new-blocks")) - .unwrap_or_else(|| jaeger::Span::new(meta.hash, &"handle-new-blocks")) - .with_string_tag("block-hash", format!("{:?}", meta.hash)) - .with_stage(jaeger::Stage::ApprovalDistribution); - match self.blocks.entry(meta.hash) { hash_map::Entry::Vacant(entry) => { let candidates_count = meta.candidates.len(); - span.add_uint_tag("candidates-count", candidates_count as u64); let mut candidates = Vec::with_capacity(candidates_count); candidates.resize_with(candidates_count, Default::default); @@ -1329,7 +1317,6 @@ impl State { if let Some(block_entry) = self.blocks.remove(relay_block) { self.topologies.dec_session_refs(block_entry.session); } - self.spans.remove(&relay_block); }); // If a block was finalized, this means we may need to move our aggression @@ -1356,21 +1343,6 @@ impl State { RA: overseer::SubsystemSender, R: CryptoRng + Rng, { - let _span = self - .spans - .get(&assignment.block_hash) - .map(|span| { - span.child(if source.peer_id().is_some() { - "peer-import-and-distribute-assignment" - } else { - "local-import-and-distribute-assignment" - }) - }) - .unwrap_or_else(|| jaeger::Span::new(&assignment.block_hash, "distribute-assignment")) - .with_string_tag("block-hash", format!("{:?}", assignment.block_hash)) - .with_optional_peer_id(source.peer_id().as_ref()) - .with_stage(jaeger::Stage::ApprovalDistribution); - let block_hash = assignment.block_hash; let validator_index = assignment.validator; @@ -1836,21 +1808,6 @@ impl State { vote: IndirectSignedApprovalVoteV2, session_info_provider: &mut RuntimeInfo, ) { - let _span = self - .spans - .get(&vote.block_hash) - .map(|span| { - span.child(if source.peer_id().is_some() { - "peer-import-and-distribute-approval" - } else { - "local-import-and-distribute-approval" - }) - }) - .unwrap_or_else(|| jaeger::Span::new(&vote.block_hash, "distribute-approval")) - .with_string_tag("block-hash", format!("{:?}", vote.block_hash)) - .with_optional_peer_id(source.peer_id().as_ref()) - .with_stage(jaeger::Stage::ApprovalDistribution); - let block_hash = vote.block_hash; let validator_index = vote.validator; let candidate_indices = &vote.candidate_indices; @@ -2090,14 +2047,6 @@ impl State { ) -> HashMap, ValidatorSignature)> { let mut all_sigs = HashMap::new(); for (hash, index) in indices { - let _span = self - .spans - .get(&hash) - .map(|span| span.child("get-approval-signatures")) - .unwrap_or_else(|| jaeger::Span::new(&hash, "get-approval-signatures")) - .with_string_tag("block-hash", format!("{:?}", hash)) - .with_stage(jaeger::Stage::ApprovalDistribution); - let block_entry = match self.blocks.get(&hash) { None => { gum::debug!( @@ -2668,7 +2617,7 @@ impl ApprovalDistribution { Self::new_with_clock( metrics, slot_duration_millis, - Box::new(SystemClock), + Arc::new(SystemClock), assignment_criteria, ) } @@ -2677,7 +2626,7 @@ impl ApprovalDistribution { pub fn new_with_clock( metrics: Metrics, slot_duration_millis: u64, - clock: Box, + clock: Arc, assignment_criteria: Arc, ) -> Self { Self { metrics, slot_duration_millis, clock, assignment_criteria } @@ -2775,18 +2724,12 @@ impl ApprovalDistribution { session_info_provider, ) .await, - FromOrchestra::Signal(OverseerSignal::ActiveLeaves(update)) => { + FromOrchestra::Signal(OverseerSignal::ActiveLeaves(_update)) => { gum::trace!(target: LOG_TARGET, "active leaves signal (ignored)"); // the relay chain blocks relevant to the approval subsystems // are those that are available, but not finalized yet // activated and deactivated heads hence are irrelevant to this subsystem, other // than for tracing purposes. - if let Some(activated) = update.activated { - let head = activated.hash; - let approval_distribution_span = - jaeger::PerLeafSpan::new(activated.span, "approval-distribution"); - state.spans.insert(head, approval_distribution_span); - } }, FromOrchestra::Signal(OverseerSignal::BlockFinalized(_hash, number)) => { gum::trace!(target: LOG_TARGET, number = %number, "finalized signal"); @@ -2845,14 +2788,6 @@ impl ApprovalDistribution { .await; }, ApprovalDistributionMessage::DistributeAssignment(cert, candidate_indices) => { - let _span = state - .spans - .get(&cert.block_hash) - .map(|span| span.child("import-and-distribute-assignment")) - .unwrap_or_else(|| jaeger::Span::new(&cert.block_hash, "distribute-assignment")) - .with_string_tag("block-hash", format!("{:?}", cert.block_hash)) - .with_stage(jaeger::Stage::ApprovalDistribution); - gum::debug!( target: LOG_TARGET, ?candidate_indices, diff --git a/polkadot/node/network/approval-distribution/src/metrics.rs b/polkadot/node/network/approval-distribution/src/metrics.rs index 10553c352966..2f677ba415e4 100644 --- a/polkadot/node/network/approval-distribution/src/metrics.rs +++ b/polkadot/node/network/approval-distribution/src/metrics.rs @@ -79,31 +79,19 @@ impl Metrics { .map(|metrics| metrics.time_import_pending_now_known.start_timer()) } - pub fn on_approval_already_known(&self) { - if let Some(metrics) = &self.0 { - metrics.approvals_received_result.with_label_values(&["known"]).inc() - } - } - - pub fn on_approval_entry_not_found(&self) { - if let Some(metrics) = &self.0 { - metrics.approvals_received_result.with_label_values(&["noapprovalentry"]).inc() - } - } - - pub fn on_approval_recent_outdated(&self) { + pub(crate) fn on_approval_recent_outdated(&self) { if let Some(metrics) = &self.0 { metrics.approvals_received_result.with_label_values(&["outdated"]).inc() } } - pub fn on_approval_invalid_block(&self) { + pub(crate) fn on_approval_invalid_block(&self) { if let Some(metrics) = &self.0 { metrics.approvals_received_result.with_label_values(&["invalidblock"]).inc() } } - pub fn on_approval_unknown_assignment(&self) { + pub(crate) fn on_approval_unknown_assignment(&self) { if let Some(metrics) = &self.0 { metrics .approvals_received_result @@ -112,94 +100,73 @@ impl Metrics { } } - pub fn on_approval_duplicate(&self) { + pub(crate) fn on_approval_duplicate(&self) { if let Some(metrics) = &self.0 { metrics.approvals_received_result.with_label_values(&["duplicate"]).inc() } } - pub fn on_approval_out_of_view(&self) { + pub(crate) fn on_approval_out_of_view(&self) { if let Some(metrics) = &self.0 { metrics.approvals_received_result.with_label_values(&["outofview"]).inc() } } - pub fn on_approval_good_known(&self) { + pub(crate) fn on_approval_good_known(&self) { if let Some(metrics) = &self.0 { metrics.approvals_received_result.with_label_values(&["goodknown"]).inc() } } - pub fn on_approval_bad(&self) { + pub(crate) fn on_approval_bad(&self) { if let Some(metrics) = &self.0 { metrics.approvals_received_result.with_label_values(&["bad"]).inc() } } - pub fn on_approval_unexpected(&self) { - if let Some(metrics) = &self.0 { - metrics.approvals_received_result.with_label_values(&["unexpected"]).inc() - } - } - - pub fn on_approval_bug(&self) { + pub(crate) fn on_approval_bug(&self) { if let Some(metrics) = &self.0 { metrics.approvals_received_result.with_label_values(&["bug"]).inc() } } - pub fn on_assignment_already_known(&self) { - if let Some(metrics) = &self.0 { - metrics.assignments_received_result.with_label_values(&["known"]).inc() - } - } - - pub fn on_assignment_recent_outdated(&self) { + pub(crate) fn on_assignment_recent_outdated(&self) { if let Some(metrics) = &self.0 { metrics.assignments_received_result.with_label_values(&["outdated"]).inc() } } - pub fn on_assignment_invalid_block(&self) { + pub(crate) fn on_assignment_invalid_block(&self) { if let Some(metrics) = &self.0 { metrics.assignments_received_result.with_label_values(&["invalidblock"]).inc() } } - pub fn on_assignment_duplicate(&self) { + pub(crate) fn on_assignment_duplicate(&self) { if let Some(metrics) = &self.0 { metrics.assignments_received_result.with_label_values(&["duplicate"]).inc() } } - pub fn on_assignment_out_of_view(&self) { + pub(crate) fn on_assignment_out_of_view(&self) { if let Some(metrics) = &self.0 { metrics.assignments_received_result.with_label_values(&["outofview"]).inc() } } - pub fn on_assignment_good_known(&self) { + pub(crate) fn on_assignment_good_known(&self) { if let Some(metrics) = &self.0 { metrics.assignments_received_result.with_label_values(&["goodknown"]).inc() } } - pub fn on_assignment_bad(&self) { + pub(crate) fn on_assignment_bad(&self) { if let Some(metrics) = &self.0 { metrics.assignments_received_result.with_label_values(&["bad"]).inc() } } - pub fn on_assignment_duplicatevoting(&self) { - if let Some(metrics) = &self.0 { - metrics - .assignments_received_result - .with_label_values(&["duplicatevoting"]) - .inc() - } - } - - pub fn on_assignment_far(&self) { + pub(crate) fn on_assignment_far(&self) { if let Some(metrics) = &self.0 { metrics.assignments_received_result.with_label_values(&["far"]).inc() } diff --git a/polkadot/node/network/approval-distribution/src/tests.rs b/polkadot/node/network/approval-distribution/src/tests.rs index 4ee9320e0e45..063e71f2f528 100644 --- a/polkadot/node/network/approval-distribution/src/tests.rs +++ b/polkadot/node/network/approval-distribution/src/tests.rs @@ -54,7 +54,7 @@ type VirtualOverseer = fn test_harness>( assignment_criteria: Arc, - clock: Box, + clock: Arc, mut state: State, test_fn: impl FnOnce(VirtualOverseer) -> T, ) -> State { @@ -535,7 +535,8 @@ impl AssignmentCriteria for MockAssignmentCriteria { _relay_vrf_story: polkadot_node_primitives::approval::v1::RelayVRFStory, _assignment: &polkadot_node_primitives::approval::v2::AssignmentCertV2, _backing_groups: Vec, - ) -> Result { + ) -> Result + { self.tranche } } @@ -555,16 +556,15 @@ fn try_import_the_same_assignment() { let _ = test_harness( Arc::new(MockAssignmentCriteria { tranche: Ok(0) }), - Box::new(SystemClock {}), + Arc::new(SystemClock {}), state_without_reputation_delay(), |mut virtual_overseer| async move { let overseer = &mut virtual_overseer; - // setup peers + setup_peer_with_view(overseer, &peer_a, view![], ValidationVersion::V1).await; setup_peer_with_view(overseer, &peer_b, view![hash], ValidationVersion::V1).await; setup_peer_with_view(overseer, &peer_c, view![hash], ValidationVersion::V1).await; - // Set up a gossip topology, where a, b, c and d are topology neighbors to the node // under testing. let peers_with_optional_peer_id = peers .iter() @@ -661,7 +661,7 @@ fn try_import_the_same_assignment_v2() { let _ = test_harness( Arc::new(MockAssignmentCriteria { tranche: Ok(0) }), - Box::new(SystemClock {}), + Arc::new(SystemClock {}), state_without_reputation_delay(), |mut virtual_overseer| async move { let overseer = &mut virtual_overseer; @@ -772,7 +772,7 @@ fn delay_reputation_change() { let _ = test_harness( Arc::new(MockAssignmentCriteria { tranche: Ok(0) }), - Box::new(SystemClock {}), + Arc::new(SystemClock {}), state_with_reputation_delay(), |mut virtual_overseer| async move { let overseer = &mut virtual_overseer; @@ -845,7 +845,7 @@ fn spam_attack_results_in_negative_reputation_change() { let _ = test_harness( Arc::new(MockAssignmentCriteria { tranche: Ok(0) }), - Box::new(SystemClock {}), + Arc::new(SystemClock {}), state_without_reputation_delay(), |mut virtual_overseer| async move { let overseer = &mut virtual_overseer; @@ -942,7 +942,7 @@ fn peer_sending_us_the_same_we_just_sent_them_is_ok() { let _ = test_harness( Arc::new(MockAssignmentCriteria { tranche: Ok(0) }), - Box::new(SystemClock {}), + Arc::new(SystemClock {}), state_without_reputation_delay(), |mut virtual_overseer| async move { let overseer = &mut virtual_overseer; @@ -1043,7 +1043,7 @@ fn import_approval_happy_path_v1_v2_peers() { let _ = test_harness( Arc::new(MockAssignmentCriteria { tranche: Ok(0) }), - Box::new(SystemClock {}), + Arc::new(SystemClock {}), state_without_reputation_delay(), |mut virtual_overseer| async move { let overseer = &mut virtual_overseer; @@ -1183,7 +1183,7 @@ fn import_approval_happy_path_v2() { let candidate_hash_second = polkadot_primitives::CandidateHash(Hash::repeat_byte(0xCC)); let _ = test_harness( Arc::new(MockAssignmentCriteria { tranche: Ok(0) }), - Box::new(SystemClock {}), + Arc::new(SystemClock {}), state_without_reputation_delay(), |mut virtual_overseer| async move { let overseer = &mut virtual_overseer; @@ -1314,7 +1314,7 @@ fn multiple_assignments_covered_with_one_approval_vote() { let _ = test_harness( Arc::new(MockAssignmentCriteria { tranche: Ok(0) }), - Box::new(SystemClock {}), + Arc::new(SystemClock {}), state_without_reputation_delay(), |mut virtual_overseer| async move { let overseer = &mut virtual_overseer; @@ -1524,7 +1524,7 @@ fn unify_with_peer_multiple_assignments_covered_with_one_approval_vote() { let _ = test_harness( Arc::new(MockAssignmentCriteria { tranche: Ok(0) }), - Box::new(SystemClock {}), + Arc::new(SystemClock {}), state_without_reputation_delay(), |mut virtual_overseer| async move { let overseer = &mut virtual_overseer; @@ -1723,7 +1723,7 @@ fn import_approval_bad() { let _ = test_harness( Arc::new(MockAssignmentCriteria { tranche: Ok(0) }), - Box::new(SystemClock {}), + Arc::new(SystemClock {}), state_without_reputation_delay(), |mut virtual_overseer| async move { let overseer = &mut virtual_overseer; @@ -1810,7 +1810,7 @@ fn update_our_view() { let state = test_harness( Arc::new(MockAssignmentCriteria { tranche: Ok(0) }), - Box::new(SystemClock {}), + Arc::new(SystemClock {}), State::default(), |mut virtual_overseer| async move { let overseer = &mut virtual_overseer; @@ -1858,7 +1858,7 @@ fn update_our_view() { let state = test_harness( Arc::new(MockAssignmentCriteria { tranche: Ok(0) }), - Box::new(SystemClock {}), + Arc::new(SystemClock {}), state, |mut virtual_overseer| async move { let overseer = &mut virtual_overseer; @@ -1877,7 +1877,7 @@ fn update_our_view() { let state = test_harness( Arc::new(MockAssignmentCriteria { tranche: Ok(0) }), - Box::new(SystemClock {}), + Arc::new(SystemClock {}), state, |mut virtual_overseer| async move { let overseer = &mut virtual_overseer; @@ -1905,7 +1905,7 @@ fn update_peer_view() { let state = test_harness( Arc::new(MockAssignmentCriteria { tranche: Ok(0) }), - Box::new(SystemClock {}), + Arc::new(SystemClock {}), State::default(), |mut virtual_overseer| async move { let overseer = &mut virtual_overseer; @@ -2004,7 +2004,7 @@ fn update_peer_view() { let state = test_harness( Arc::new(MockAssignmentCriteria { tranche: Ok(0) }), - Box::new(SystemClock {}), + Arc::new(SystemClock {}), state, |mut virtual_overseer| async move { let overseer = &mut virtual_overseer; @@ -2064,7 +2064,7 @@ fn update_peer_view() { let finalized_number = 4_000_000_000; let state = test_harness( Arc::new(MockAssignmentCriteria { tranche: Ok(0) }), - Box::new(SystemClock {}), + Arc::new(SystemClock {}), state, |mut virtual_overseer| async move { let overseer = &mut virtual_overseer; @@ -2106,7 +2106,7 @@ fn update_peer_authority_id() { let _state = test_harness( Arc::new(MockAssignmentCriteria { tranche: Ok(0) }), - Box::new(SystemClock {}), + Arc::new(SystemClock {}), State::default(), |mut virtual_overseer| async move { let overseer = &mut virtual_overseer; @@ -2287,7 +2287,7 @@ fn import_remotely_then_locally() { let _ = test_harness( Arc::new(MockAssignmentCriteria { tranche: Ok(0) }), - Box::new(SystemClock {}), + Arc::new(SystemClock {}), state_without_reputation_delay(), |mut virtual_overseer| async move { let overseer = &mut virtual_overseer; @@ -2393,7 +2393,7 @@ fn sends_assignments_even_when_state_is_approved() { let _ = test_harness( Arc::new(MockAssignmentCriteria { tranche: Ok(0) }), - Box::new(SystemClock {}), + Arc::new(SystemClock {}), State::default(), |mut virtual_overseer| async move { let overseer = &mut virtual_overseer; @@ -2499,7 +2499,7 @@ fn sends_assignments_even_when_state_is_approved_v2() { let _ = test_harness( Arc::new(MockAssignmentCriteria { tranche: Ok(0) }), - Box::new(SystemClock {}), + Arc::new(SystemClock {}), State::default(), |mut virtual_overseer| async move { let overseer = &mut virtual_overseer; @@ -2625,7 +2625,7 @@ fn race_condition_in_local_vs_remote_view_update() { let _ = test_harness( Arc::new(MockAssignmentCriteria { tranche: Ok(0) }), - Box::new(SystemClock {}), + Arc::new(SystemClock {}), state_without_reputation_delay(), |mut virtual_overseer| async move { let overseer = &mut virtual_overseer; @@ -2711,7 +2711,7 @@ fn propagates_locally_generated_assignment_to_both_dimensions() { let _ = test_harness( Arc::new(MockAssignmentCriteria { tranche: Ok(0) }), - Box::new(SystemClock {}), + Arc::new(SystemClock {}), State::default(), |mut virtual_overseer| async move { let overseer = &mut virtual_overseer; @@ -2841,7 +2841,7 @@ fn propagates_assignments_along_unshared_dimension() { let _ = test_harness( Arc::new(MockAssignmentCriteria { tranche: Ok(0) }), - Box::new(SystemClock {}), + Arc::new(SystemClock {}), state_without_reputation_delay(), |mut virtual_overseer| async move { let overseer = &mut virtual_overseer; @@ -3000,7 +3000,7 @@ fn propagates_to_required_after_connect() { let _ = test_harness( Arc::new(MockAssignmentCriteria { tranche: Ok(0) }), - Box::new(SystemClock {}), + Arc::new(SystemClock {}), State::default(), |mut virtual_overseer| async move { let overseer = &mut virtual_overseer; @@ -3165,7 +3165,7 @@ fn sends_to_more_peers_after_getting_topology() { let _ = test_harness( Arc::new(MockAssignmentCriteria { tranche: Ok(0) }), - Box::new(SystemClock {}), + Arc::new(SystemClock {}), State::default(), |mut virtual_overseer| async move { let overseer = &mut virtual_overseer; @@ -3303,7 +3303,7 @@ fn originator_aggression_l1() { let _ = test_harness( Arc::new(MockAssignmentCriteria { tranche: Ok(0) }), - Box::new(SystemClock {}), + Arc::new(SystemClock {}), state, |mut virtual_overseer| async move { let overseer = &mut virtual_overseer; @@ -3484,7 +3484,7 @@ fn non_originator_aggression_l1() { let _ = test_harness( Arc::new(MockAssignmentCriteria { tranche: Ok(0) }), - Box::new(SystemClock {}), + Arc::new(SystemClock {}), state, |mut virtual_overseer| async move { let overseer = &mut virtual_overseer; @@ -3609,7 +3609,7 @@ fn non_originator_aggression_l2() { let aggression_l2_threshold = state.aggression_config.l2_threshold.unwrap(); let _ = test_harness( Arc::new(MockAssignmentCriteria { tranche: Ok(0) }), - Box::new(SystemClock {}), + Arc::new(SystemClock {}), state, |mut virtual_overseer| async move { let overseer = &mut virtual_overseer; @@ -3794,7 +3794,7 @@ fn resends_messages_periodically() { state.aggression_config.resend_unfinalized_period = Some(2); let _ = test_harness( Arc::new(MockAssignmentCriteria { tranche: Ok(0) }), - Box::new(SystemClock {}), + Arc::new(SystemClock {}), state, |mut virtual_overseer| async move { let overseer = &mut virtual_overseer; @@ -3958,7 +3958,7 @@ fn import_versioned_approval() { let candidate_hash = polkadot_primitives::CandidateHash(Hash::repeat_byte(0xBB)); let _ = test_harness( Arc::new(MockAssignmentCriteria { tranche: Ok(0) }), - Box::new(SystemClock {}), + Arc::new(SystemClock {}), state, |mut virtual_overseer| async move { let overseer = &mut virtual_overseer; @@ -4131,7 +4131,7 @@ fn batch_test_round(message_count: usize) { let subsystem = ApprovalDistribution::new_with_clock( Default::default(), Default::default(), - Box::new(SystemClock {}), + Arc::new(SystemClock {}), Arc::new(MockAssignmentCriteria { tranche: Ok(0) }), ); let mut rng = rand_chacha::ChaCha12Rng::seed_from_u64(12345); @@ -4318,7 +4318,7 @@ fn subsystem_rejects_assignment_in_future() { let _ = test_harness( Arc::new(MockAssignmentCriteria { tranche: Ok(89) }), - Box::new(DummyClock {}), + Arc::new(DummyClock {}), state_without_reputation_delay(), |mut virtual_overseer| async move { let overseer = &mut virtual_overseer; @@ -4384,7 +4384,7 @@ fn subsystem_rejects_bad_assignments() { Arc::new(MockAssignmentCriteria { tranche: Err(InvalidAssignment(criteria::InvalidAssignmentReason::NullAssignment)), }), - Box::new(DummyClock {}), + Arc::new(DummyClock {}), state_without_reputation_delay(), |mut virtual_overseer| async move { let overseer = &mut virtual_overseer; @@ -4447,7 +4447,7 @@ fn subsystem_rejects_wrong_claimed_assignments() { let _ = test_harness( Arc::new(MockAssignmentCriteria { tranche: Ok(0) }), - Box::new(DummyClock {}), + Arc::new(DummyClock {}), state_without_reputation_delay(), |mut virtual_overseer| async move { let overseer = &mut virtual_overseer; @@ -4531,7 +4531,7 @@ fn subsystem_accepts_tranche0_duplicate_assignments() { let _ = test_harness( Arc::new(MockAssignmentCriteria { tranche: Ok(0) }), - Box::new(DummyClock {}), + Arc::new(DummyClock {}), state_without_reputation_delay(), |mut virtual_overseer| async move { let overseer = &mut virtual_overseer; diff --git a/polkadot/node/network/availability-distribution/src/tests/mod.rs b/polkadot/node/network/availability-distribution/src/tests/mod.rs index 3320871bceb5..078220607c37 100644 --- a/polkadot/node/network/availability-distribution/src/tests/mod.rs +++ b/polkadot/node/network/availability-distribution/src/tests/mod.rs @@ -45,7 +45,7 @@ fn test_harness>( let (context, virtual_overseer) = polkadot_node_subsystem_test_helpers::make_subsystem_context(pool.clone()); - let (pov_req_receiver, pov_req_cfg) = IncomingRequest::get_config_receiver::< + let (pov_req_receiver, _pov_req_cfg) = IncomingRequest::get_config_receiver::< Block, sc_network::NetworkWorker, >(&req_protocol_names); @@ -65,13 +65,8 @@ fn test_harness>( ); let subsystem = subsystem.run(context); - let test_fut = test_fx(TestHarness { - virtual_overseer, - pov_req_cfg, - chunk_req_v1_cfg, - chunk_req_v2_cfg, - pool, - }); + let test_fut = + test_fx(TestHarness { virtual_overseer, chunk_req_v1_cfg, chunk_req_v2_cfg, pool }); futures::pin_mut!(test_fut); futures::pin_mut!(subsystem); diff --git a/polkadot/node/network/availability-distribution/src/tests/state.rs b/polkadot/node/network/availability-distribution/src/tests/state.rs index 97e616f79fb7..53d6fd2c530f 100644 --- a/polkadot/node/network/availability-distribution/src/tests/state.rs +++ b/polkadot/node/network/availability-distribution/src/tests/state.rs @@ -60,7 +60,6 @@ type VirtualOverseer = polkadot_node_subsystem_test_helpers::TestSubsystemContex >; pub struct TestHarness { pub virtual_overseer: VirtualOverseer, - pub pov_req_cfg: RequestResponseConfig, pub chunk_req_v1_cfg: RequestResponseConfig, pub chunk_req_v2_cfg: RequestResponseConfig, pub pool: TaskExecutor, diff --git a/polkadot/node/network/availability-recovery/src/task/strategy/chunks.rs b/polkadot/node/network/availability-recovery/src/task/strategy/chunks.rs index b6376a5b543e..6b34538b6266 100644 --- a/polkadot/node/network/availability-recovery/src/task/strategy/chunks.rs +++ b/polkadot/node/network/availability-recovery/src/task/strategy/chunks.rs @@ -107,9 +107,10 @@ impl FetchChunks { state: &mut State, common_params: &RecoveryParams, ) -> Result { - let recovery_duration = common_params - .metrics - .time_erasure_recovery(RecoveryStrategy::::strategy_type(self)); + let recovery_duration = + common_params + .metrics + .time_erasure_recovery(RecoveryStrategy::::strategy_type(self)); // Send request to reconstruct available data from chunks. let (avilable_data_tx, available_data_rx) = oneshot::channel(); @@ -136,18 +137,16 @@ impl FetchChunks { // Attempt post-recovery check. Ok(data) => do_post_recovery_check(common_params, data) .await - .map_err(|e| { + .inspect_err(|_| { recovery_duration.map(|rd| rd.stop_and_discard()); - e }) - .map(|data| { + .inspect(|_| { gum::trace!( target: LOG_TARGET, candidate_hash = ?common_params.candidate_hash, erasure_root = ?common_params.erasure_root, "Data recovery from chunks complete", ); - data }), Err(err) => { recovery_duration.map(|rd| rd.stop_and_discard()); diff --git a/polkadot/node/network/availability-recovery/src/task/strategy/systematic.rs b/polkadot/node/network/availability-recovery/src/task/strategy/systematic.rs index 677bc2d1375a..8b8cff549912 100644 --- a/polkadot/node/network/availability-recovery/src/task/strategy/systematic.rs +++ b/polkadot/node/network/availability-recovery/src/task/strategy/systematic.rs @@ -125,18 +125,16 @@ impl FetchSystematicChunks { // Attempt post-recovery check. do_post_recovery_check(common_params, data) .await - .map_err(|e| { + .inspect_err(|_| { recovery_duration.map(|rd| rd.stop_and_discard()); - e }) - .map(|data| { + .inspect(|_| { gum::trace!( target: LOG_TARGET, candidate_hash = ?common_params.candidate_hash, erasure_root = ?common_params.erasure_root, "Data recovery from systematic chunks complete", ); - data }) }, Err(err) => { diff --git a/polkadot/node/network/bridge/src/rx/mod.rs b/polkadot/node/network/bridge/src/rx/mod.rs index 7745c42f78a1..9a2357f02d8b 100644 --- a/polkadot/node/network/bridge/src/rx/mod.rs +++ b/polkadot/node/network/bridge/src/rx/mod.rs @@ -45,8 +45,9 @@ use polkadot_node_subsystem::{ errors::SubsystemError, messages::{ network_bridge_event::NewGossipTopology, ApprovalDistributionMessage, - BitfieldDistributionMessage, CollatorProtocolMessage, GossipSupportMessage, - NetworkBridgeEvent, NetworkBridgeRxMessage, StatementDistributionMessage, + ApprovalVotingParallelMessage, BitfieldDistributionMessage, CollatorProtocolMessage, + GossipSupportMessage, NetworkBridgeEvent, NetworkBridgeRxMessage, + StatementDistributionMessage, }, overseer, ActivatedLeaf, ActiveLeavesUpdate, FromOrchestra, OverseerSignal, SpawnedSubsystem, }; @@ -89,6 +90,7 @@ pub struct NetworkBridgeRx { validation_service: Box, collation_service: Box, notification_sinks: Arc>>>, + approval_voting_parallel_enabled: bool, } impl NetworkBridgeRx { @@ -105,6 +107,7 @@ impl NetworkBridgeRx { peerset_protocol_names: PeerSetProtocolNames, mut notification_services: HashMap>, notification_sinks: Arc>>>, + approval_voting_parallel_enabled: bool, ) -> Self { let shared = Shared::default(); @@ -125,6 +128,7 @@ impl NetworkBridgeRx { validation_service, collation_service, notification_sinks, + approval_voting_parallel_enabled, } } } @@ -156,6 +160,7 @@ async fn handle_validation_message( peerset_protocol_names: &PeerSetProtocolNames, notification_service: &mut Box, notification_sinks: &mut Arc>>>, + approval_voting_parallel_enabled: bool, ) where AD: validator_discovery::AuthorityDiscovery + Send, { @@ -276,6 +281,7 @@ async fn handle_validation_message( ], sender, &metrics, + approval_voting_parallel_enabled, ) .await; @@ -329,6 +335,7 @@ async fn handle_validation_message( NetworkBridgeEvent::PeerDisconnected(peer), sender, &metrics, + approval_voting_parallel_enabled, ) .await; } @@ -398,7 +405,13 @@ async fn handle_validation_message( network_service.report_peer(peer, report.into()); } - dispatch_validation_events_to_all(events, sender, &metrics).await; + dispatch_validation_events_to_all( + events, + sender, + &metrics, + approval_voting_parallel_enabled, + ) + .await; }, } } @@ -652,6 +665,7 @@ async fn handle_network_messages( mut validation_service: Box, mut collation_service: Box, mut notification_sinks: Arc>>>, + approval_voting_parallel_enabled: bool, ) -> Result<(), Error> where AD: validator_discovery::AuthorityDiscovery + Send, @@ -669,6 +683,7 @@ where &peerset_protocol_names, &mut validation_service, &mut notification_sinks, + approval_voting_parallel_enabled, ).await, None => return Err(Error::EventStreamConcluded), }, @@ -727,6 +742,7 @@ async fn run_incoming_orchestra_signals( sync_oracle: Box, metrics: Metrics, notification_sinks: Arc>>>, + approval_voting_parallel_enabled: bool, ) -> Result<(), Error> where AD: validator_discovery::AuthorityDiscovery + Clone, @@ -766,6 +782,7 @@ where local_index, }), ctx.sender(), + approval_voting_parallel_enabled, ); }, FromOrchestra::Communication { @@ -787,6 +804,7 @@ where dispatch_validation_event_to_all_unbounded( NetworkBridgeEvent::UpdatedAuthorityIds(peer_id, authority_ids), ctx.sender(), + approval_voting_parallel_enabled, ); }, FromOrchestra::Signal(OverseerSignal::Conclude) => return Ok(()), @@ -826,6 +844,7 @@ where finalized_number, &metrics, ¬ification_sinks, + approval_voting_parallel_enabled, ); note_peers_count(&metrics, &shared); } @@ -875,6 +894,7 @@ where validation_service, collation_service, notification_sinks, + approval_voting_parallel_enabled, } = bridge; let (task, network_event_handler) = handle_network_messages( @@ -887,6 +907,7 @@ where validation_service, collation_service, notification_sinks.clone(), + approval_voting_parallel_enabled, ) .remote_handle(); @@ -900,6 +921,7 @@ where sync_oracle, metrics, notification_sinks, + approval_voting_parallel_enabled, ); futures::pin_mut!(orchestra_signal_handler); @@ -926,6 +948,7 @@ fn update_our_view( finalized_number: BlockNumber, metrics: &Metrics, notification_sinks: &Arc>>>, + approval_voting_parallel_enabled: bool, ) { let new_view = construct_view(live_heads.iter().map(|v| v.hash), finalized_number); @@ -970,6 +993,7 @@ fn update_our_view( dispatch_validation_event_to_all_unbounded( NetworkBridgeEvent::OurViewChange(our_view.clone()), ctx.sender(), + approval_voting_parallel_enabled, ); dispatch_collation_event_to_all_unbounded( @@ -1081,8 +1105,15 @@ async fn dispatch_validation_event_to_all( event: NetworkBridgeEvent, ctx: &mut impl overseer::NetworkBridgeRxSenderTrait, metrics: &Metrics, + approval_voting_parallel_enabled: bool, ) { - dispatch_validation_events_to_all(std::iter::once(event), ctx, metrics).await + dispatch_validation_events_to_all( + std::iter::once(event), + ctx, + metrics, + approval_voting_parallel_enabled, + ) + .await } async fn dispatch_collation_event_to_all( @@ -1095,6 +1126,7 @@ async fn dispatch_collation_event_to_all( fn dispatch_validation_event_to_all_unbounded( event: NetworkBridgeEvent, sender: &mut impl overseer::NetworkBridgeRxSenderTrait, + approval_voting_parallel_enabled: bool, ) { event .focus() @@ -1106,11 +1138,20 @@ fn dispatch_validation_event_to_all_unbounded( .ok() .map(BitfieldDistributionMessage::from) .and_then(|msg| Some(sender.send_unbounded_message(msg))); - event - .focus() - .ok() - .map(ApprovalDistributionMessage::from) - .and_then(|msg| Some(sender.send_unbounded_message(msg))); + + if approval_voting_parallel_enabled { + event + .focus() + .ok() + .map(ApprovalVotingParallelMessage::from) + .and_then(|msg| Some(sender.send_unbounded_message(msg))); + } else { + event + .focus() + .ok() + .map(ApprovalDistributionMessage::from) + .and_then(|msg| Some(sender.send_unbounded_message(msg))); + } event .focus() .ok() @@ -1131,6 +1172,7 @@ async fn dispatch_validation_events_to_all( events: I, sender: &mut impl overseer::NetworkBridgeRxSenderTrait, _metrics: &Metrics, + approval_voting_parallel_enabled: bool, ) where I: IntoIterator>, I::IntoIter: Send, @@ -1160,7 +1202,11 @@ async fn dispatch_validation_events_to_all( for event in events { send_message!(event, StatementDistributionMessage); send_message!(event, BitfieldDistributionMessage); - send_message!(event, ApprovalDistributionMessage); + if approval_voting_parallel_enabled { + send_message!(event, ApprovalVotingParallelMessage); + } else { + send_message!(event, ApprovalDistributionMessage); + } send_message!(event, GossipSupportMessage); } } diff --git a/polkadot/node/network/bridge/src/rx/tests.rs b/polkadot/node/network/bridge/src/rx/tests.rs index 601dca5cb8a3..a96817eb2546 100644 --- a/polkadot/node/network/bridge/src/rx/tests.rs +++ b/polkadot/node/network/bridge/src/rx/tests.rs @@ -529,6 +529,7 @@ fn test_harness>( validation_service, collation_service, notification_sinks, + approval_voting_parallel_enabled: false, }; let network_bridge = run_network_in(bridge, context) diff --git a/polkadot/node/overseer/src/dummy.rs b/polkadot/node/overseer/src/dummy.rs index fc5f0070773b..6f9cd9d00403 100644 --- a/polkadot/node/overseer/src/dummy.rs +++ b/polkadot/node/overseer/src/dummy.rs @@ -88,6 +88,7 @@ pub fn dummy_overseer_builder( DummySubsystem, DummySubsystem, DummySubsystem, + DummySubsystem, >, SubsystemError, > @@ -131,6 +132,7 @@ pub fn one_for_all_overseer_builder( Sub, Sub, Sub, + Sub, >, SubsystemError, > @@ -155,6 +157,7 @@ where + Subsystem, SubsystemError> + Subsystem, SubsystemError> + Subsystem, SubsystemError> + + Subsystem, SubsystemError> + Subsystem, SubsystemError> + Subsystem, SubsystemError> + Subsystem, SubsystemError> @@ -183,6 +186,7 @@ where .statement_distribution(subsystem.clone()) .approval_distribution(subsystem.clone()) .approval_voting(subsystem.clone()) + .approval_voting_parallel(subsystem.clone()) .gossip_support(subsystem.clone()) .dispute_coordinator(subsystem.clone()) .dispute_distribution(subsystem.clone()) diff --git a/polkadot/node/overseer/src/lib.rs b/polkadot/node/overseer/src/lib.rs index 23adf4f4d8a4..e30ed69f9e37 100644 --- a/polkadot/node/overseer/src/lib.rs +++ b/polkadot/node/overseer/src/lib.rs @@ -60,6 +60,7 @@ // unused dependencies can not work for test and examples at the same time // yielding false positives #![warn(missing_docs)] +#![allow(dead_code)] // TODO https://github.com/paritytech/polkadot-sdk/issues/5793 use std::{ collections::{hash_map, HashMap}, @@ -76,13 +77,13 @@ use sc_client_api::{BlockImportNotification, BlockchainEvents, FinalityNotificat use self::messages::{BitfieldSigningMessage, PvfCheckerMessage}; use polkadot_node_subsystem_types::messages::{ - ApprovalDistributionMessage, ApprovalVotingMessage, AvailabilityDistributionMessage, - AvailabilityRecoveryMessage, AvailabilityStoreMessage, BitfieldDistributionMessage, - CandidateBackingMessage, CandidateValidationMessage, ChainApiMessage, ChainSelectionMessage, - CollationGenerationMessage, CollatorProtocolMessage, DisputeCoordinatorMessage, - DisputeDistributionMessage, GossipSupportMessage, NetworkBridgeRxMessage, - NetworkBridgeTxMessage, ProspectiveParachainsMessage, ProvisionerMessage, RuntimeApiMessage, - StatementDistributionMessage, + ApprovalDistributionMessage, ApprovalVotingMessage, ApprovalVotingParallelMessage, + AvailabilityDistributionMessage, AvailabilityRecoveryMessage, AvailabilityStoreMessage, + BitfieldDistributionMessage, CandidateBackingMessage, CandidateValidationMessage, + ChainApiMessage, ChainSelectionMessage, CollationGenerationMessage, CollatorProtocolMessage, + DisputeCoordinatorMessage, DisputeDistributionMessage, GossipSupportMessage, + NetworkBridgeRxMessage, NetworkBridgeTxMessage, ProspectiveParachainsMessage, + ProvisionerMessage, RuntimeApiMessage, StatementDistributionMessage, }; pub use polkadot_node_subsystem_types::{ @@ -550,6 +551,7 @@ pub struct Overseer { BitfieldDistributionMessage, StatementDistributionMessage, ApprovalDistributionMessage, + ApprovalVotingParallelMessage, GossipSupportMessage, DisputeDistributionMessage, CollationGenerationMessage, @@ -595,7 +597,19 @@ pub struct Overseer { RuntimeApiMessage, ])] approval_voting: ApprovalVoting, - + #[subsystem(blocking, message_capacity: 64000, ApprovalVotingParallelMessage, sends: [ + AvailabilityRecoveryMessage, + CandidateValidationMessage, + ChainApiMessage, + ChainSelectionMessage, + DisputeCoordinatorMessage, + RuntimeApiMessage, + NetworkBridgeTxMessage, + ApprovalVotingMessage, + ApprovalDistributionMessage, + ApprovalVotingParallelMessage, + ])] + approval_voting_parallel: ApprovalVotingParallel, #[subsystem(GossipSupportMessage, sends: [ NetworkBridgeTxMessage, NetworkBridgeRxMessage, // TODO @@ -613,6 +627,7 @@ pub struct Overseer { AvailabilityStoreMessage, AvailabilityRecoveryMessage, ChainSelectionMessage, + ApprovalVotingParallelMessage, ])] dispute_coordinator: DisputeCoordinator, diff --git a/polkadot/node/overseer/src/tests.rs b/polkadot/node/overseer/src/tests.rs index 8e78d8fc8921..cb0add03e2e7 100644 --- a/polkadot/node/overseer/src/tests.rs +++ b/polkadot/node/overseer/src/tests.rs @@ -950,7 +950,7 @@ fn test_prospective_parachains_msg() -> ProspectiveParachainsMessage { // Checks that `stop`, `broadcast_signal` and `broadcast_message` are implemented correctly. #[test] fn overseer_all_subsystems_receive_signals_and_messages() { - const NUM_SUBSYSTEMS: usize = 23; + const NUM_SUBSYSTEMS: usize = 24; // -4 for BitfieldSigning, GossipSupport, AvailabilityDistribution and PvfCheckerSubsystem. const NUM_SUBSYSTEMS_MESSAGED: usize = NUM_SUBSYSTEMS - 4; @@ -1028,6 +1028,11 @@ fn overseer_all_subsystems_receive_signals_and_messages() { handle .send_msg_anon(AllMessages::ApprovalDistribution(test_approval_distribution_msg())) .await; + handle + .send_msg_anon(AllMessages::ApprovalVotingParallel( + test_approval_distribution_msg().into(), + )) + .await; handle .send_msg_anon(AllMessages::ApprovalVoting(test_approval_voting_msg())) .await; @@ -1101,6 +1106,7 @@ fn context_holds_onto_message_until_enough_signals_received() { let (chain_selection_bounded_tx, _) = metered::channel(CHANNEL_CAPACITY); let (pvf_checker_bounded_tx, _) = metered::channel(CHANNEL_CAPACITY); let (prospective_parachains_bounded_tx, _) = metered::channel(CHANNEL_CAPACITY); + let (approval_voting_parallel_tx, _) = metered::channel(CHANNEL_CAPACITY); let (candidate_validation_unbounded_tx, _) = metered::unbounded(); let (candidate_backing_unbounded_tx, _) = metered::unbounded(); @@ -1125,6 +1131,7 @@ fn context_holds_onto_message_until_enough_signals_received() { let (chain_selection_unbounded_tx, _) = metered::unbounded(); let (pvf_checker_unbounded_tx, _) = metered::unbounded(); let (prospective_parachains_unbounded_tx, _) = metered::unbounded(); + let (approval_voting_parallel_unbounded_tx, _) = metered::unbounded(); let channels_out = ChannelsOut { candidate_validation: candidate_validation_bounded_tx.clone(), @@ -1150,6 +1157,7 @@ fn context_holds_onto_message_until_enough_signals_received() { chain_selection: chain_selection_bounded_tx.clone(), pvf_checker: pvf_checker_bounded_tx.clone(), prospective_parachains: prospective_parachains_bounded_tx.clone(), + approval_voting_parallel: approval_voting_parallel_tx.clone(), candidate_validation_unbounded: candidate_validation_unbounded_tx.clone(), candidate_backing_unbounded: candidate_backing_unbounded_tx.clone(), @@ -1174,6 +1182,7 @@ fn context_holds_onto_message_until_enough_signals_received() { chain_selection_unbounded: chain_selection_unbounded_tx.clone(), pvf_checker_unbounded: pvf_checker_unbounded_tx.clone(), prospective_parachains_unbounded: prospective_parachains_unbounded_tx.clone(), + approval_voting_parallel_unbounded: approval_voting_parallel_unbounded_tx.clone(), }; let (mut signal_tx, signal_rx) = metered::channel(CHANNEL_CAPACITY); diff --git a/polkadot/node/primitives/src/approval/mod.rs b/polkadot/node/primitives/src/approval/mod.rs index 79f4cfa9e0be..42342f9889a9 100644 --- a/polkadot/node/primitives/src/approval/mod.rs +++ b/polkadot/node/primitives/src/approval/mod.rs @@ -124,7 +124,7 @@ pub mod v1 { } /// Metadata about a block which is now live in the approval protocol. - #[derive(Debug)] + #[derive(Debug, Clone)] pub struct BlockApprovalMeta { /// The hash of the block. pub hash: Hash, diff --git a/polkadot/node/service/Cargo.toml b/polkadot/node/service/Cargo.toml index 89f8212bf9d8..8d50b54b2fdc 100644 --- a/polkadot/node/service/Cargo.toml +++ b/polkadot/node/service/Cargo.toml @@ -116,6 +116,7 @@ polkadot-gossip-support = { optional = true, workspace = true, default-features polkadot-network-bridge = { optional = true, workspace = true, default-features = true } polkadot-node-collation-generation = { optional = true, workspace = true, default-features = true } polkadot-node-core-approval-voting = { optional = true, workspace = true, default-features = true } +polkadot-node-core-approval-voting-parallel = { optional = true, workspace = true, default-features = true } polkadot-node-core-av-store = { optional = true, workspace = true, default-features = true } polkadot-node-core-backing = { optional = true, workspace = true, default-features = true } polkadot-node-core-bitfield-signing = { optional = true, workspace = true, default-features = true } @@ -160,6 +161,7 @@ full-node = [ "polkadot-network-bridge", "polkadot-node-collation-generation", "polkadot-node-core-approval-voting", + "polkadot-node-core-approval-voting-parallel", "polkadot-node-core-av-store", "polkadot-node-core-backing", "polkadot-node-core-bitfield-signing", diff --git a/polkadot/node/service/src/fake_runtime_api.rs b/polkadot/node/service/src/fake_runtime_api.rs index 1f2efdbbb5b3..d8f147a9cf7b 100644 --- a/polkadot/node/service/src/fake_runtime_api.rs +++ b/polkadot/node/service/src/fake_runtime_api.rs @@ -53,6 +53,7 @@ sp_api::decl_runtime_apis! { } } +#[allow(dead_code)] struct Runtime; sp_api::impl_runtime_apis! { diff --git a/polkadot/node/service/src/lib.rs b/polkadot/node/service/src/lib.rs index 794243568804..d029f3be53eb 100644 --- a/polkadot/node/service/src/lib.rs +++ b/polkadot/node/service/src/lib.rs @@ -660,6 +660,8 @@ pub struct NewFullParams { #[allow(dead_code)] pub malus_finality_delay: Option, pub hwbench: Option, + /// Enable approval voting processing in parallel. + pub enable_approval_voting_parallel: bool, } #[cfg(feature = "full-node")] @@ -753,6 +755,7 @@ pub fn new_full< execute_workers_max_num, prepare_workers_soft_max_num, prepare_workers_hard_max_num, + enable_approval_voting_parallel, }: NewFullParams, ) -> Result { use polkadot_availability_recovery::FETCH_CHUNKS_THRESHOLD; @@ -784,6 +787,14 @@ pub fn new_full< Some(backoff) }; + // Running approval voting in parallel is enabled by default on all networks except Polkadot and + // Kusama, unless explicitly enabled by the commandline option. + // This is meant to be temporary until we have enough confidence in the new system to enable it + // by default on all networks. + let enable_approval_voting_parallel = (!config.chain_spec.is_kusama() && + !config.chain_spec.is_polkadot()) || + enable_approval_voting_parallel; + let disable_grandpa = config.disable_grandpa; let name = config.network.node_name.clone(); @@ -806,6 +817,7 @@ pub fn new_full< overseer_handle.clone(), metrics, Some(basics.task_manager.spawn_handle()), + enable_approval_voting_parallel, ) } else { SelectRelayChain::new_longest_chain(basics.backend.clone()) @@ -1016,6 +1028,7 @@ pub fn new_full< dispute_coordinator_config, chain_selection_config, fetch_chunks_threshold, + enable_approval_voting_parallel, }) }; diff --git a/polkadot/node/service/src/overseer.rs b/polkadot/node/service/src/overseer.rs index 3c071e34fe11..a98b6bcb3083 100644 --- a/polkadot/node/service/src/overseer.rs +++ b/polkadot/node/service/src/overseer.rs @@ -58,6 +58,9 @@ pub use polkadot_network_bridge::{ }; pub use polkadot_node_collation_generation::CollationGenerationSubsystem; pub use polkadot_node_core_approval_voting::ApprovalVotingSubsystem; +pub use polkadot_node_core_approval_voting_parallel::{ + ApprovalVotingParallelSubsystem, Metrics as ApprovalVotingParallelMetrics, +}; pub use polkadot_node_core_av_store::AvailabilityStoreSubsystem; pub use polkadot_node_core_backing::CandidateBackingSubsystem; pub use polkadot_node_core_bitfield_signing::BitfieldSigningSubsystem; @@ -139,9 +142,16 @@ pub struct ExtendedOverseerGenArgs { /// than the value put in here we always try to recovery availability from backers. /// The presence of this parameter here is needed to have different values per chain. pub fetch_chunks_threshold: Option, + /// Enable approval-voting-parallel subsystem and disable the standalone approval-voting and + /// approval-distribution subsystems. + pub enable_approval_voting_parallel: bool, } /// Obtain a prepared validator `Overseer`, that is initialized with all default values. +/// +/// The difference between this function and `validator_with_parallel_overseer_builder` is that this +/// function enables the standalone approval-voting and approval-distribution subsystems +/// and disables the approval-voting-parallel subsystem. pub fn validator_overseer_builder( OverseerGenArgs { runtime_client, @@ -174,6 +184,7 @@ pub fn validator_overseer_builder( dispute_coordinator_config, chain_selection_config, fetch_chunks_threshold, + enable_approval_voting_parallel, }: ExtendedOverseerGenArgs, ) -> Result< InitializedOverseerBuilder< @@ -203,6 +214,7 @@ pub fn validator_overseer_builder( CollatorProtocolSubsystem, ApprovalDistributionSubsystem, ApprovalVotingSubsystem, + DummySubsystem, GossipSupportSubsystem, DisputeCoordinatorSubsystem, DisputeDistributionSubsystem, @@ -223,7 +235,8 @@ where let spawner = SpawnGlue(spawner); let network_bridge_metrics: NetworkBridgeMetrics = Metrics::register(registry)?; - + let approval_voting_parallel_metrics: ApprovalVotingParallelMetrics = + Metrics::register(registry)?; let builder = Overseer::builder() .network_bridge_tx(NetworkBridgeTxSubsystem::new( network_service.clone(), @@ -241,6 +254,7 @@ where peerset_protocol_names, notification_services, notification_sinks, + enable_approval_voting_parallel, )) .availability_distribution(AvailabilityDistributionSubsystem::new( keystore.clone(), @@ -310,18 +324,19 @@ where rand::rngs::StdRng::from_entropy(), )) .approval_distribution(ApprovalDistributionSubsystem::new( - Metrics::register(registry)?, + approval_voting_parallel_metrics.approval_distribution_metrics(), approval_voting_config.slot_duration_millis, Arc::new(RealAssignmentCriteria {}), )) .approval_voting(ApprovalVotingSubsystem::with_config( - approval_voting_config, + approval_voting_config.clone(), parachains_db.clone(), keystore.clone(), Box::new(sync_service.clone()), - Metrics::register(registry)?, + approval_voting_parallel_metrics.approval_voting_metrics(), Arc::new(spawner.clone()), )) + .approval_voting_parallel(DummySubsystem) .gossip_support(GossipSupportSubsystem::new( keystore.clone(), authority_discovery_service.clone(), @@ -332,6 +347,229 @@ where dispute_coordinator_config, keystore.clone(), Metrics::register(registry)?, + enable_approval_voting_parallel, + )) + .dispute_distribution(DisputeDistributionSubsystem::new( + keystore.clone(), + dispute_req_receiver, + authority_discovery_service.clone(), + Metrics::register(registry)?, + )) + .chain_selection(ChainSelectionSubsystem::new(chain_selection_config, parachains_db)) + .prospective_parachains(ProspectiveParachainsSubsystem::new(Metrics::register(registry)?)) + .activation_external_listeners(Default::default()) + .span_per_active_leaf(Default::default()) + .active_leaves(Default::default()) + .supports_parachains(runtime_client) + .metrics(metrics) + .spawner(spawner); + + let builder = if let Some(capacity) = overseer_message_channel_capacity_override { + builder.message_channel_capacity(capacity) + } else { + builder + }; + Ok(builder) +} + +/// Obtain a prepared validator `Overseer`, that is initialized with all default values. +/// +/// The difference between this function and `validator_overseer_builder` is that this +/// function enables the approval-voting-parallel subsystem and disables the standalone +/// approval-voting and approval-distribution subsystems. +pub fn validator_with_parallel_overseer_builder( + OverseerGenArgs { + runtime_client, + network_service, + sync_service, + authority_discovery_service, + collation_req_v1_receiver: _, + collation_req_v2_receiver: _, + available_data_req_receiver, + registry, + spawner, + is_parachain_node, + overseer_message_channel_capacity_override, + req_protocol_names, + peerset_protocol_names, + notification_services, + }: OverseerGenArgs, + ExtendedOverseerGenArgs { + keystore, + parachains_db, + candidate_validation_config, + availability_config, + pov_req_receiver, + chunk_req_v1_receiver, + chunk_req_v2_receiver, + statement_req_receiver, + candidate_req_v2_receiver, + approval_voting_config, + dispute_req_receiver, + dispute_coordinator_config, + chain_selection_config, + fetch_chunks_threshold, + enable_approval_voting_parallel, + }: ExtendedOverseerGenArgs, +) -> Result< + InitializedOverseerBuilder< + SpawnGlue, + Arc, + CandidateValidationSubsystem, + PvfCheckerSubsystem, + CandidateBackingSubsystem, + StatementDistributionSubsystem, + AvailabilityDistributionSubsystem, + AvailabilityRecoverySubsystem, + BitfieldSigningSubsystem, + BitfieldDistributionSubsystem, + ProvisionerSubsystem, + RuntimeApiSubsystem, + AvailabilityStoreSubsystem, + NetworkBridgeRxSubsystem< + Arc, + AuthorityDiscoveryService, + >, + NetworkBridgeTxSubsystem< + Arc, + AuthorityDiscoveryService, + >, + ChainApiSubsystem, + CollationGenerationSubsystem, + CollatorProtocolSubsystem, + DummySubsystem, + DummySubsystem, + ApprovalVotingParallelSubsystem, + GossipSupportSubsystem, + DisputeCoordinatorSubsystem, + DisputeDistributionSubsystem, + ChainSelectionSubsystem, + ProspectiveParachainsSubsystem, + >, + Error, +> +where + RuntimeClient: RuntimeApiSubsystemClient + ChainApiBackend + AuxStore + 'static, + Spawner: 'static + SpawnNamed + Clone + Unpin, +{ + use polkadot_node_subsystem_util::metrics::Metrics; + + let metrics = ::register(registry)?; + let notification_sinks = Arc::new(Mutex::new(HashMap::new())); + + let spawner = SpawnGlue(spawner); + + let network_bridge_metrics: NetworkBridgeMetrics = Metrics::register(registry)?; + let approval_voting_parallel_metrics: ApprovalVotingParallelMetrics = + Metrics::register(registry)?; + let builder = Overseer::builder() + .network_bridge_tx(NetworkBridgeTxSubsystem::new( + network_service.clone(), + authority_discovery_service.clone(), + network_bridge_metrics.clone(), + req_protocol_names.clone(), + peerset_protocol_names.clone(), + notification_sinks.clone(), + )) + .network_bridge_rx(NetworkBridgeRxSubsystem::new( + network_service.clone(), + authority_discovery_service.clone(), + Box::new(sync_service.clone()), + network_bridge_metrics, + peerset_protocol_names, + notification_services, + notification_sinks, + enable_approval_voting_parallel, + )) + .availability_distribution(AvailabilityDistributionSubsystem::new( + keystore.clone(), + IncomingRequestReceivers { + pov_req_receiver, + chunk_req_v1_receiver, + chunk_req_v2_receiver, + }, + req_protocol_names.clone(), + Metrics::register(registry)?, + )) + .availability_recovery(AvailabilityRecoverySubsystem::for_validator( + fetch_chunks_threshold, + available_data_req_receiver, + &req_protocol_names, + Metrics::register(registry)?, + )) + .availability_store(AvailabilityStoreSubsystem::new( + parachains_db.clone(), + availability_config, + Box::new(sync_service.clone()), + Metrics::register(registry)?, + )) + .bitfield_distribution(BitfieldDistributionSubsystem::new(Metrics::register(registry)?)) + .bitfield_signing(BitfieldSigningSubsystem::new( + keystore.clone(), + Metrics::register(registry)?, + )) + .candidate_backing(CandidateBackingSubsystem::new( + keystore.clone(), + Metrics::register(registry)?, + )) + .candidate_validation(CandidateValidationSubsystem::with_config( + candidate_validation_config, + keystore.clone(), + Metrics::register(registry)?, // candidate-validation metrics + Metrics::register(registry)?, // validation host metrics + )) + .pvf_checker(PvfCheckerSubsystem::new(keystore.clone(), Metrics::register(registry)?)) + .chain_api(ChainApiSubsystem::new(runtime_client.clone(), Metrics::register(registry)?)) + .collation_generation(CollationGenerationSubsystem::new(Metrics::register(registry)?)) + .collator_protocol({ + let side = match is_parachain_node { + IsParachainNode::Collator(_) | IsParachainNode::FullNode => + return Err(Error::Overseer(SubsystemError::Context( + "build validator overseer for parachain node".to_owned(), + ))), + IsParachainNode::No => ProtocolSide::Validator { + keystore: keystore.clone(), + eviction_policy: Default::default(), + metrics: Metrics::register(registry)?, + }, + }; + CollatorProtocolSubsystem::new(side) + }) + .provisioner(ProvisionerSubsystem::new(Metrics::register(registry)?)) + .runtime_api(RuntimeApiSubsystem::new( + runtime_client.clone(), + Metrics::register(registry)?, + spawner.clone(), + )) + .statement_distribution(StatementDistributionSubsystem::new( + keystore.clone(), + statement_req_receiver, + candidate_req_v2_receiver, + Metrics::register(registry)?, + rand::rngs::StdRng::from_entropy(), + )) + .approval_distribution(DummySubsystem) + .approval_voting(DummySubsystem) + .approval_voting_parallel(ApprovalVotingParallelSubsystem::with_config( + approval_voting_config, + parachains_db.clone(), + keystore.clone(), + Box::new(sync_service.clone()), + approval_voting_parallel_metrics, + spawner.clone(), + overseer_message_channel_capacity_override, + )) + .gossip_support(GossipSupportSubsystem::new( + keystore.clone(), + authority_discovery_service.clone(), + Metrics::register(registry)?, + )) + .dispute_coordinator(DisputeCoordinatorSubsystem::new( + parachains_db.clone(), + dispute_coordinator_config, + keystore.clone(), + Metrics::register(registry)?, + enable_approval_voting_parallel, )) .dispute_distribution(DisputeDistributionSubsystem::new( keystore.clone(), @@ -407,6 +645,7 @@ pub fn collator_overseer_builder( DummySubsystem, DummySubsystem, DummySubsystem, + DummySubsystem, >, Error, > @@ -439,6 +678,7 @@ where peerset_protocol_names, notification_services, notification_sinks, + false, )) .availability_distribution(DummySubsystem) .availability_recovery(AvailabilityRecoverySubsystem::for_collator( @@ -481,6 +721,7 @@ where .statement_distribution(DummySubsystem) .approval_distribution(DummySubsystem) .approval_voting(DummySubsystem) + .approval_voting_parallel(DummySubsystem) .gossip_support(DummySubsystem) .dispute_coordinator(DummySubsystem) .dispute_distribution(DummySubsystem) @@ -537,9 +778,15 @@ impl OverseerGen for ValidatorOverseerGen { "create validator overseer as mandatory extended arguments were not provided" .to_owned(), )))?; - validator_overseer_builder(args, ext_args)? - .build_with_connector(connector) - .map_err(|e| e.into()) + if ext_args.enable_approval_voting_parallel { + validator_with_parallel_overseer_builder(args, ext_args)? + .build_with_connector(connector) + .map_err(|e| e.into()) + } else { + validator_overseer_builder(args, ext_args)? + .build_with_connector(connector) + .map_err(|e| e.into()) + } } } diff --git a/polkadot/node/service/src/parachains_db/mod.rs b/polkadot/node/service/src/parachains_db/mod.rs index 59af30dceeb9..887db80a3034 100644 --- a/polkadot/node/service/src/parachains_db/mod.rs +++ b/polkadot/node/service/src/parachains_db/mod.rs @@ -100,18 +100,11 @@ pub struct CacheSizes { pub availability_meta: usize, /// Cache used by approval data. pub approval_data: usize, - /// Cache used by session window data - pub session_data: usize, } impl Default for CacheSizes { fn default() -> Self { - CacheSizes { - availability_data: 25, - availability_meta: 1, - approval_data: 5, - session_data: 1, - } + CacheSizes { availability_data: 25, availability_meta: 1, approval_data: 5 } } } diff --git a/polkadot/node/service/src/relay_chain_selection.rs b/polkadot/node/service/src/relay_chain_selection.rs index c0b1ce8b0ebe..e48874f01ca6 100644 --- a/polkadot/node/service/src/relay_chain_selection.rs +++ b/polkadot/node/service/src/relay_chain_selection.rs @@ -39,8 +39,8 @@ use super::{HeaderProvider, HeaderProviderProvider}; use futures::channel::oneshot; use polkadot_node_primitives::MAX_FINALITY_LAG as PRIMITIVES_MAX_FINALITY_LAG; use polkadot_node_subsystem::messages::{ - ApprovalDistributionMessage, ApprovalVotingMessage, ChainSelectionMessage, - DisputeCoordinatorMessage, HighestApprovedAncestorBlock, + ApprovalDistributionMessage, ApprovalVotingMessage, ApprovalVotingParallelMessage, + ChainSelectionMessage, DisputeCoordinatorMessage, HighestApprovedAncestorBlock, }; use polkadot_node_subsystem_util::metrics::{self, prometheus}; use polkadot_overseer::{AllMessages, Handle}; @@ -169,6 +169,7 @@ where overseer: Handle, metrics: Metrics, spawn_handle: Option, + approval_voting_parallel_enabled: bool, ) -> Self { gum::debug!(target: LOG_TARGET, "Using dispute aware relay-chain selection algorithm",); @@ -179,6 +180,7 @@ where overseer, metrics, spawn_handle, + approval_voting_parallel_enabled, )), } } @@ -230,6 +232,7 @@ pub struct SelectRelayChainInner { overseer: OH, metrics: Metrics, spawn_handle: Option, + approval_voting_parallel_enabled: bool, } impl SelectRelayChainInner @@ -244,8 +247,15 @@ where overseer: OH, metrics: Metrics, spawn_handle: Option, + approval_voting_parallel_enabled: bool, ) -> Self { - SelectRelayChainInner { backend, overseer, metrics, spawn_handle } + SelectRelayChainInner { + backend, + overseer, + metrics, + spawn_handle, + approval_voting_parallel_enabled, + } } fn block_header(&self, hash: Hash) -> Result { @@ -284,6 +294,7 @@ where overseer: self.overseer.clone(), metrics: self.metrics.clone(), spawn_handle: self.spawn_handle.clone(), + approval_voting_parallel_enabled: self.approval_voting_parallel_enabled, } } } @@ -448,13 +459,25 @@ where // 2. Constrain according to `ApprovedAncestor`. let (subchain_head, subchain_number, subchain_block_descriptions) = { let (tx, rx) = oneshot::channel(); - overseer - .send_msg( - ApprovalVotingMessage::ApprovedAncestor(subchain_head, target_number, tx), - std::any::type_name::(), - ) - .await; - + if self.approval_voting_parallel_enabled { + overseer + .send_msg( + ApprovalVotingParallelMessage::ApprovedAncestor( + subchain_head, + target_number, + tx, + ), + std::any::type_name::(), + ) + .await; + } else { + overseer + .send_msg( + ApprovalVotingMessage::ApprovedAncestor(subchain_head, target_number, tx), + std::any::type_name::(), + ) + .await; + } match rx .await .map_err(Error::ApprovedAncestorCanceled) @@ -476,13 +499,23 @@ where // task for sending the message to not block here and delay finality. if let Some(spawn_handle) = &self.spawn_handle { let mut overseer_handle = self.overseer.clone(); + let approval_voting_parallel_enabled = self.approval_voting_parallel_enabled; let lag_update_task = async move { - overseer_handle - .send_msg( - ApprovalDistributionMessage::ApprovalCheckingLagUpdate(lag), - std::any::type_name::(), - ) - .await; + if approval_voting_parallel_enabled { + overseer_handle + .send_msg( + ApprovalVotingParallelMessage::ApprovalCheckingLagUpdate(lag), + std::any::type_name::(), + ) + .await; + } else { + overseer_handle + .send_msg( + ApprovalDistributionMessage::ApprovalCheckingLagUpdate(lag), + std::any::type_name::(), + ) + .await; + } }; spawn_handle.spawn( diff --git a/polkadot/node/service/src/tests.rs b/polkadot/node/service/src/tests.rs index 195432bcb75d..78bbfcd5444f 100644 --- a/polkadot/node/service/src/tests.rs +++ b/polkadot/node/service/src/tests.rs @@ -63,9 +63,6 @@ struct TestHarness { finality_target_rx: Receiver>, } -#[derive(Default)] -struct HarnessConfig; - fn test_harness>( case_vars: CaseVars, test: impl FnOnce(TestHarness) -> T, @@ -83,6 +80,7 @@ fn test_harness>( context.sender().clone(), Default::default(), None, + false, ); let target_hash = case_vars.target_block; diff --git a/polkadot/node/subsystem-bench/Cargo.toml b/polkadot/node/subsystem-bench/Cargo.toml index ae798cf2640a..293df9f6e6d5 100644 --- a/polkadot/node/subsystem-bench/Cargo.toml +++ b/polkadot/node/subsystem-bench/Cargo.toml @@ -80,6 +80,7 @@ serde_yaml = { workspace = true } serde_json = { workspace = true } polkadot-node-core-approval-voting = { workspace = true, default-features = true } +polkadot-node-core-approval-voting-parallel = { workspace = true, default-features = true } polkadot-approval-distribution = { workspace = true, default-features = true } sp-consensus-babe = { workspace = true, default-features = true } sp-runtime = { workspace = true } diff --git a/polkadot/node/subsystem-bench/examples/approvals_no_shows.yaml b/polkadot/node/subsystem-bench/examples/approvals_no_shows.yaml index cae1a30914da..1423d324df3f 100644 --- a/polkadot/node/subsystem-bench/examples/approvals_no_shows.yaml +++ b/polkadot/node/subsystem-bench/examples/approvals_no_shows.yaml @@ -9,6 +9,7 @@ TestConfiguration: coalesce_tranche_diff: 12 num_no_shows_per_candidate: 10 workdir_prefix: "/tmp/" + approval_voting_parallel_enabled: false n_validators: 500 n_cores: 100 min_pov_size: 1120 diff --git a/polkadot/node/subsystem-bench/examples/approvals_throughput.yaml b/polkadot/node/subsystem-bench/examples/approvals_throughput.yaml index 7edb48e302a4..87c6103a5d0a 100644 --- a/polkadot/node/subsystem-bench/examples/approvals_throughput.yaml +++ b/polkadot/node/subsystem-bench/examples/approvals_throughput.yaml @@ -9,6 +9,7 @@ TestConfiguration: coalesce_tranche_diff: 12 num_no_shows_per_candidate: 0 workdir_prefix: "/tmp" + approval_voting_parallel_enabled: true n_validators: 500 n_cores: 100 min_pov_size: 1120 diff --git a/polkadot/node/subsystem-bench/examples/approvals_throughput_best_case.yaml b/polkadot/node/subsystem-bench/examples/approvals_throughput_best_case.yaml index 7c24f50e6af5..5e2ea3817d17 100644 --- a/polkadot/node/subsystem-bench/examples/approvals_throughput_best_case.yaml +++ b/polkadot/node/subsystem-bench/examples/approvals_throughput_best_case.yaml @@ -8,6 +8,7 @@ TestConfiguration: stop_when_approved: true coalesce_tranche_diff: 12 num_no_shows_per_candidate: 0 + approval_voting_parallel_enabled: false workdir_prefix: "/tmp/" n_validators: 500 n_cores: 100 diff --git a/polkadot/node/subsystem-bench/src/lib/approval/helpers.rs b/polkadot/node/subsystem-bench/src/lib/approval/helpers.rs index 4b2b91696824..a3a475ac6b98 100644 --- a/polkadot/node/subsystem-bench/src/lib/approval/helpers.rs +++ b/polkadot/node/subsystem-bench/src/lib/approval/helpers.rs @@ -21,8 +21,11 @@ use polkadot_node_network_protocol::{ View, }; use polkadot_node_primitives::approval::time::{Clock, SystemClock, Tick}; +use polkadot_node_subsystem::messages::{ + ApprovalDistributionMessage, ApprovalVotingParallelMessage, +}; use polkadot_node_subsystem_types::messages::{ - network_bridge_event::NewGossipTopology, ApprovalDistributionMessage, NetworkBridgeEvent, + network_bridge_event::NewGossipTopology, NetworkBridgeEvent, }; use polkadot_overseer::AllMessages; use polkadot_primitives::{ @@ -121,6 +124,7 @@ pub fn generate_topology(test_authorities: &TestAuthorities) -> SessionGridTopol pub fn generate_new_session_topology( test_authorities: &TestAuthorities, test_node: ValidatorIndex, + approval_voting_parallel_enabled: bool, ) -> Vec { let topology = generate_topology(test_authorities); @@ -129,14 +133,29 @@ pub fn generate_new_session_topology( topology, local_index: Some(test_node), }); - vec![AllMessages::ApprovalDistribution(ApprovalDistributionMessage::NetworkBridgeUpdate(event))] + vec![if approval_voting_parallel_enabled { + AllMessages::ApprovalVotingParallel(ApprovalVotingParallelMessage::NetworkBridgeUpdate( + event, + )) + } else { + AllMessages::ApprovalDistribution(ApprovalDistributionMessage::NetworkBridgeUpdate(event)) + }] } /// Generates a peer view change for the passed `block_hash` -pub fn generate_peer_view_change_for(block_hash: Hash, peer_id: PeerId) -> AllMessages { +pub fn generate_peer_view_change_for( + block_hash: Hash, + peer_id: PeerId, + approval_voting_parallel_enabled: bool, +) -> AllMessages { let network = NetworkBridgeEvent::PeerViewChange(peer_id, View::new([block_hash], 0)); - - AllMessages::ApprovalDistribution(ApprovalDistributionMessage::NetworkBridgeUpdate(network)) + if approval_voting_parallel_enabled { + AllMessages::ApprovalVotingParallel(ApprovalVotingParallelMessage::NetworkBridgeUpdate( + network, + )) + } else { + AllMessages::ApprovalDistribution(ApprovalDistributionMessage::NetworkBridgeUpdate(network)) + } } /// Helper function to create a a signature for the block header. diff --git a/polkadot/node/subsystem-bench/src/lib/approval/message_generator.rs b/polkadot/node/subsystem-bench/src/lib/approval/message_generator.rs index da25a3bf3b79..807afb0438c9 100644 --- a/polkadot/node/subsystem-bench/src/lib/approval/message_generator.rs +++ b/polkadot/node/subsystem-bench/src/lib/approval/message_generator.rs @@ -401,7 +401,7 @@ impl PeerMessagesGenerator { /// We can not sample every time for all the messages because that would be too expensive to /// perform, so pre-generate a list of samples for a given network size. /// - result[i] give us as a list of random nodes that would send a given message to the node under -/// test. +/// test. fn random_samplings_to_node( node_under_test: ValidatorIndex, num_validators: usize, @@ -474,8 +474,7 @@ fn issue_approvals( coalesce_approvals_len(options.coalesce_mean, options.coalesce_std_dev, rand_chacha); let result = assignments .iter() - .enumerate() - .map(|(_index, message)| match &message.msg { + .map(|message| match &message.msg { protocol_v3::ApprovalDistributionMessage::Assignments(assignments) => { let mut approvals_to_create = Vec::new(); diff --git a/polkadot/node/subsystem-bench/src/lib/approval/mod.rs b/polkadot/node/subsystem-bench/src/lib/approval/mod.rs index 9d85039b8880..29ebc4a419ae 100644 --- a/polkadot/node/subsystem-bench/src/lib/approval/mod.rs +++ b/polkadot/node/subsystem-bench/src/lib/approval/mod.rs @@ -49,20 +49,21 @@ use itertools::Itertools; use orchestra::TimeoutExt; use overseer::{metrics::Metrics as OverseerMetrics, MetricsTrait}; use polkadot_approval_distribution::ApprovalDistribution; +use polkadot_node_core_approval_voting_parallel::ApprovalVotingParallelSubsystem; use polkadot_node_primitives::approval::time::{ slot_number_to_tick, tick_to_slot_number, Clock, ClockExt, SystemClock, }; use polkadot_node_core_approval_voting::{ - ApprovalVotingSubsystem, Config as ApprovalVotingConfig, Metrics as ApprovalVotingMetrics, - RealAssignmentCriteria, + ApprovalVotingSubsystem, Config as ApprovalVotingConfig, RealAssignmentCriteria, }; use polkadot_node_network_protocol::v3 as protocol_v3; use polkadot_node_primitives::approval::{self, v1::RelayVRFStory}; -use polkadot_node_subsystem::{overseer, AllMessages, Overseer, OverseerConnector, SpawnGlue}; +use polkadot_node_subsystem::{ + messages::{ApprovalDistributionMessage, ApprovalVotingMessage, ApprovalVotingParallelMessage}, + overseer, AllMessages, Overseer, OverseerConnector, SpawnGlue, +}; use polkadot_node_subsystem_test_helpers::mock::new_block_import_info; -use polkadot_node_subsystem_types::messages::{ApprovalDistributionMessage, ApprovalVotingMessage}; -use polkadot_node_subsystem_util::metrics::Metrics; use polkadot_overseer::Handle as OverseerHandleReal; use polkadot_primitives::{ BlockNumber, CandidateEvent, CandidateIndex, CandidateReceipt, Hash, Header, Slot, ValidatorId, @@ -138,6 +139,9 @@ pub struct ApprovalsOptions { /// The number of no shows per candidate #[clap(short, long, default_value_t = 0)] pub num_no_shows_per_candidate: u32, + /// Enable approval voting parallel. + #[clap(short, long, default_value_t = true)] + pub approval_voting_parallel_enabled: bool, } impl ApprovalsOptions { @@ -272,7 +276,7 @@ pub struct ApprovalTestState { /// Total unique sent messages. total_unique_messages: Arc, /// Approval voting metrics. - approval_voting_metrics: ApprovalVotingMetrics, + approval_voting_parallel_metrics: polkadot_node_core_approval_voting_parallel::Metrics, /// The delta ticks from the tick the messages were generated to the the time we start this /// message. delta_tick_from_generated: Arc, @@ -330,7 +334,10 @@ impl ApprovalTestState { total_sent_messages_from_node: Arc::new(AtomicU64::new(0)), total_unique_messages: Arc::new(AtomicU64::new(0)), options, - approval_voting_metrics: ApprovalVotingMetrics::try_register(&dependencies.registry) + approval_voting_parallel_metrics: + polkadot_node_core_approval_voting_parallel::Metrics::try_register( + &dependencies.registry, + ) .unwrap(), delta_tick_from_generated: Arc::new(AtomicU64::new(630720000)), configuration: configuration.clone(), @@ -456,6 +463,14 @@ impl ApprovalTestState { }) .collect() } + + fn subsystem_name(&self) -> &'static str { + if self.options.approval_voting_parallel_enabled { + "approval-voting-parallel-subsystem" + } else { + "approval-distribution-subsystem" + } + } } impl ApprovalTestState { @@ -597,13 +612,16 @@ impl PeerMessageProducer { // so when the approval-distribution answered to it, we know it doesn't have anything // else to process. let (tx, rx) = oneshot::channel(); - let msg = ApprovalDistributionMessage::GetApprovalSignatures(HashSet::new(), tx); - self.send_overseer_message( - AllMessages::ApprovalDistribution(msg), - ValidatorIndex(0), - None, - ) - .await; + let msg = if self.options.approval_voting_parallel_enabled { + AllMessages::ApprovalVotingParallel( + ApprovalVotingParallelMessage::GetApprovalSignatures(HashSet::new(), tx), + ) + } else { + AllMessages::ApprovalDistribution( + ApprovalDistributionMessage::GetApprovalSignatures(HashSet::new(), tx), + ) + }; + self.send_overseer_message(msg, ValidatorIndex(0), None).await; rx.await.expect("Failed to get signatures"); self.notify_done.send(()).expect("Failed to notify main loop"); gum::info!("All messages processed "); @@ -743,7 +761,11 @@ impl PeerMessageProducer { for validator in 1..self.state.test_authorities.validator_authority_id.len() as u32 { let peer_id = self.state.test_authorities.peer_ids.get(validator as usize).unwrap(); let validator = ValidatorIndex(validator); - let view_update = generate_peer_view_change_for(block_info.hash, *peer_id); + let view_update = generate_peer_view_change_for( + block_info.hash, + *peer_id, + self.state.options.approval_voting_parallel_enabled, + ); self.send_overseer_message(view_update, validator, None).await; } @@ -808,24 +830,12 @@ fn build_overseer( let system_clock = PastSystemClock::new(SystemClock {}, state.delta_tick_from_generated.clone()); - let approval_voting = ApprovalVotingSubsystem::with_config_and_clock( - TEST_CONFIG, - Arc::new(db), - Arc::new(keystore), - Box::new(TestSyncOracle {}), - state.approval_voting_metrics.clone(), - Arc::new(system_clock.clone()), - Arc::new(SpawnGlue(spawn_task_handle.clone())), - ); + let keystore = Arc::new(keystore); + let db = Arc::new(db); - let approval_distribution = ApprovalDistribution::new_with_clock( - Metrics::register(Some(&dependencies.registry)).unwrap(), - SLOT_DURATION_MILLIS, - Box::new(system_clock.clone()), - Arc::new(RealAssignmentCriteria {}), - ); let mock_chain_api = MockChainApi::new(state.build_chain_api_state()); - let mock_chain_selection = MockChainSelection { state: state.clone(), clock: system_clock }; + let mock_chain_selection = + MockChainSelection { state: state.clone(), clock: system_clock.clone() }; let mock_runtime_api = MockRuntimeApi::new( config.clone(), state.test_authorities.clone(), @@ -840,11 +850,14 @@ fn build_overseer( network_interface.subsystem_sender(), state.test_authorities.clone(), ); - let mock_rx_bridge = MockNetworkBridgeRx::new(network_receiver, None); + let mock_rx_bridge = MockNetworkBridgeRx::new( + network_receiver, + None, + state.options.approval_voting_parallel_enabled, + ); let overseer_metrics = OverseerMetrics::try_register(&dependencies.registry).unwrap(); - let dummy = dummy_builder!(spawn_task_handle, overseer_metrics) - .replace_approval_distribution(|_| approval_distribution) - .replace_approval_voting(|_| approval_voting) + let task_handle = spawn_task_handle.clone(); + let dummy = dummy_builder!(task_handle, overseer_metrics) .replace_chain_api(|_| mock_chain_api) .replace_chain_selection(|_| mock_chain_selection) .replace_runtime_api(|_| mock_runtime_api) @@ -853,8 +866,45 @@ fn build_overseer( .replace_availability_recovery(|_| MockAvailabilityRecovery::new()) .replace_candidate_validation(|_| MockCandidateValidation::new()); - let (overseer, raw_handle) = - dummy.build_with_connector(overseer_connector).expect("Should not fail"); + let (overseer, raw_handle) = if state.options.approval_voting_parallel_enabled { + let approval_voting_parallel = ApprovalVotingParallelSubsystem::with_config_and_clock( + TEST_CONFIG, + db.clone(), + keystore.clone(), + Box::new(TestSyncOracle {}), + state.approval_voting_parallel_metrics.clone(), + Arc::new(system_clock.clone()), + SpawnGlue(spawn_task_handle.clone()), + None, + ); + dummy + .replace_approval_voting_parallel(|_| approval_voting_parallel) + .build_with_connector(overseer_connector) + .expect("Should not fail") + } else { + let approval_voting = ApprovalVotingSubsystem::with_config_and_clock( + TEST_CONFIG, + db.clone(), + keystore.clone(), + Box::new(TestSyncOracle {}), + state.approval_voting_parallel_metrics.approval_voting_metrics(), + Arc::new(system_clock.clone()), + Arc::new(SpawnGlue(spawn_task_handle.clone())), + ); + + let approval_distribution = ApprovalDistribution::new_with_clock( + state.approval_voting_parallel_metrics.approval_distribution_metrics(), + TEST_CONFIG.slot_duration_millis, + Arc::new(system_clock.clone()), + Arc::new(RealAssignmentCriteria {}), + ); + + dummy + .replace_approval_voting(|_| approval_voting) + .replace_approval_distribution(|_| approval_distribution) + .build_with_connector(overseer_connector) + .expect("Should not fail") + }; let overseer_handle = OverseerHandleReal::new(raw_handle); (overseer, overseer_handle) @@ -943,11 +993,18 @@ pub async fn bench_approvals_run( // First create the initialization messages that make sure that then node under // tests receives notifications about the topology used and the connected peers. let mut initialization_messages = env.network().generate_peer_connected(|e| { - AllMessages::ApprovalDistribution(ApprovalDistributionMessage::NetworkBridgeUpdate(e)) + if state.options.approval_voting_parallel_enabled { + AllMessages::ApprovalVotingParallel(ApprovalVotingParallelMessage::NetworkBridgeUpdate( + e, + )) + } else { + AllMessages::ApprovalDistribution(ApprovalDistributionMessage::NetworkBridgeUpdate(e)) + } }); initialization_messages.extend(generate_new_session_topology( &state.test_authorities, ValidatorIndex(NODE_UNDER_TEST), + state.options.approval_voting_parallel_enabled, )); for message in initialization_messages { env.send_message(message).await; @@ -1012,7 +1069,14 @@ pub async fn bench_approvals_run( state.total_sent_messages_to_node.load(std::sync::atomic::Ordering::SeqCst) as usize; env.wait_until_metric( "polkadot_parachain_subsystem_bounded_received", - Some(("subsystem_name", "approval-distribution-subsystem")), + Some(( + "subsystem_name", + if state.options.approval_voting_parallel_enabled { + "approval-voting-parallel-subsystem" + } else { + "approval-distribution-subsystem" + }, + )), |value| { gum::debug!(target: LOG_TARGET, ?value, ?at_least_messages, "Waiting metric"); value >= at_least_messages as f64 @@ -1029,11 +1093,22 @@ pub async fn bench_approvals_run( CandidateEvent::CandidateIncluded(receipt_fetch, _head, _, _) => { let (tx, rx) = oneshot::channel(); - let msg = ApprovalVotingMessage::GetApprovalSignaturesForCandidate( - receipt_fetch.hash(), - tx, - ); - env.send_message(AllMessages::ApprovalVoting(msg)).await; + let msg = if state.options.approval_voting_parallel_enabled { + AllMessages::ApprovalVotingParallel( + ApprovalVotingParallelMessage::GetApprovalSignaturesForCandidate( + receipt_fetch.hash(), + tx, + ), + ) + } else { + AllMessages::ApprovalVoting( + ApprovalVotingMessage::GetApprovalSignaturesForCandidate( + receipt_fetch.hash(), + tx, + ), + ) + }; + env.send_message(msg).await; let result = rx.await.unwrap(); @@ -1057,7 +1132,7 @@ pub async fn bench_approvals_run( state.total_sent_messages_to_node.load(std::sync::atomic::Ordering::SeqCst) as usize; env.wait_until_metric( "polkadot_parachain_subsystem_bounded_received", - Some(("subsystem_name", "approval-distribution-subsystem")), + Some(("subsystem_name", state.subsystem_name())), |value| { gum::debug!(target: LOG_TARGET, ?value, ?at_least_messages, "Waiting metric"); value >= at_least_messages as f64 @@ -1098,5 +1173,8 @@ pub async fn bench_approvals_run( state.total_unique_messages.load(std::sync::atomic::Ordering::SeqCst) ); - env.collect_resource_usage(&["approval-distribution", "approval-voting"]) + env.collect_resource_usage( + &["approval-distribution", "approval-voting", "approval-voting-parallel"], + true, + ) } diff --git a/polkadot/node/subsystem-bench/src/lib/availability/mod.rs b/polkadot/node/subsystem-bench/src/lib/availability/mod.rs index 32dc8ae2c8dc..f28adff315f8 100644 --- a/polkadot/node/subsystem-bench/src/lib/availability/mod.rs +++ b/polkadot/node/subsystem-bench/src/lib/availability/mod.rs @@ -210,7 +210,7 @@ pub fn prepare_test( state.test_authorities.clone(), ); let network_bridge_rx = - network_bridge::MockNetworkBridgeRx::new(network_receiver, Some(chunk_req_v2_cfg)); + network_bridge::MockNetworkBridgeRx::new(network_receiver, Some(chunk_req_v2_cfg), false); let runtime_api = MockRuntimeApi::new( state.config.clone(), @@ -372,7 +372,7 @@ pub async fn benchmark_availability_read( ); env.stop().await; - env.collect_resource_usage(&["availability-recovery"]) + env.collect_resource_usage(&["availability-recovery"], false) } pub async fn benchmark_availability_write( @@ -506,9 +506,8 @@ pub async fn benchmark_availability_write( ); env.stop().await; - env.collect_resource_usage(&[ - "availability-distribution", - "bitfield-distribution", - "availability-store", - ]) + env.collect_resource_usage( + &["availability-distribution", "bitfield-distribution", "availability-store"], + false, + ) } diff --git a/polkadot/node/subsystem-bench/src/lib/display.rs b/polkadot/node/subsystem-bench/src/lib/display.rs index b153d54a7c36..c47dd9a07900 100644 --- a/polkadot/node/subsystem-bench/src/lib/display.rs +++ b/polkadot/node/subsystem-bench/src/lib/display.rs @@ -96,6 +96,23 @@ pub struct TestMetric { value: f64, } +impl TestMetric { + pub fn name(&self) -> &str { + &self.name + } + + pub fn value(&self) -> f64 { + self.value + } + + pub fn label_value(&self, label_name: &str) -> Option<&str> { + self.label_names + .iter() + .position(|name| name == label_name) + .and_then(|index| self.label_values.get(index).map(|s| s.as_str())) + } +} + impl Display for TestMetric { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!( diff --git a/polkadot/node/subsystem-bench/src/lib/environment.rs b/polkadot/node/subsystem-bench/src/lib/environment.rs index a63f90da50b3..4de683ad6487 100644 --- a/polkadot/node/subsystem-bench/src/lib/environment.rs +++ b/polkadot/node/subsystem-bench/src/lib/environment.rs @@ -351,10 +351,14 @@ impl TestEnvironment { } } - pub fn collect_resource_usage(&self, subsystems_under_test: &[&str]) -> BenchmarkUsage { + pub fn collect_resource_usage( + &self, + subsystems_under_test: &[&str], + break_down_cpu_usage_per_task: bool, + ) -> BenchmarkUsage { BenchmarkUsage { network_usage: self.network_usage(), - cpu_usage: self.cpu_usage(subsystems_under_test), + cpu_usage: self.cpu_usage(subsystems_under_test, break_down_cpu_usage_per_task), } } @@ -378,7 +382,11 @@ impl TestEnvironment { ] } - fn cpu_usage(&self, subsystems_under_test: &[&str]) -> Vec { + fn cpu_usage( + &self, + subsystems_under_test: &[&str], + break_down_per_task: bool, + ) -> Vec { let test_metrics = super::display::parse_metrics(self.registry()); let mut usage = vec![]; let num_blocks = self.config().num_blocks as f64; @@ -392,6 +400,22 @@ impl TestEnvironment { total: total_cpu, per_block: total_cpu / num_blocks, }); + + if break_down_per_task { + for metric in subsystem_cpu_metrics.all() { + if metric.name() != "substrate_tasks_polling_duration_sum" { + continue; + } + + if let Some(task_name) = metric.label_value("task_name") { + usage.push(ResourceUsage { + resource_name: format!("{}/{}", subsystem, task_name), + total: metric.value(), + per_block: metric.value() / num_blocks, + }); + } + } + } } let test_env_cpu_metrics = diff --git a/polkadot/node/subsystem-bench/src/lib/mock/dummy.rs b/polkadot/node/subsystem-bench/src/lib/mock/dummy.rs index 8783b35f1c04..092a8fc5f4c1 100644 --- a/polkadot/node/subsystem-bench/src/lib/mock/dummy.rs +++ b/polkadot/node/subsystem-bench/src/lib/mock/dummy.rs @@ -96,5 +96,6 @@ mock!(NetworkBridgeTx); mock!(ChainApi); mock!(ChainSelection); mock!(ApprovalVoting); +mock!(ApprovalVotingParallel); mock!(ApprovalDistribution); mock!(RuntimeApi); diff --git a/polkadot/node/subsystem-bench/src/lib/mock/mod.rs b/polkadot/node/subsystem-bench/src/lib/mock/mod.rs index da4ac05e33b7..2ca47d9fc082 100644 --- a/polkadot/node/subsystem-bench/src/lib/mock/mod.rs +++ b/polkadot/node/subsystem-bench/src/lib/mock/mod.rs @@ -47,6 +47,7 @@ macro_rules! dummy_builder { // All subsystem except approval_voting and approval_distribution are mock subsystems. Overseer::builder() .approval_voting(MockApprovalVoting {}) + .approval_voting_parallel(MockApprovalVotingParallel {}) .approval_distribution(MockApprovalDistribution {}) .availability_recovery(MockAvailabilityRecovery {}) .candidate_validation(MockCandidateValidation {}) diff --git a/polkadot/node/subsystem-bench/src/lib/mock/network_bridge.rs b/polkadot/node/subsystem-bench/src/lib/mock/network_bridge.rs index d70953926d13..f5474a61e3dc 100644 --- a/polkadot/node/subsystem-bench/src/lib/mock/network_bridge.rs +++ b/polkadot/node/subsystem-bench/src/lib/mock/network_bridge.rs @@ -24,13 +24,13 @@ use crate::{ use futures::{channel::mpsc::UnboundedSender, FutureExt, StreamExt}; use polkadot_node_network_protocol::Versioned; use polkadot_node_subsystem::{ - messages::NetworkBridgeTxMessage, overseer, SpawnedSubsystem, SubsystemError, -}; -use polkadot_node_subsystem_types::{ messages::{ - ApprovalDistributionMessage, BitfieldDistributionMessage, NetworkBridgeEvent, - StatementDistributionMessage, + ApprovalDistributionMessage, ApprovalVotingParallelMessage, NetworkBridgeTxMessage, }, + overseer, SpawnedSubsystem, SubsystemError, +}; +use polkadot_node_subsystem_types::{ + messages::{BitfieldDistributionMessage, NetworkBridgeEvent, StatementDistributionMessage}, OverseerSignal, }; use sc_network::{request_responses::ProtocolConfig, RequestFailure}; @@ -57,6 +57,8 @@ pub struct MockNetworkBridgeRx { network_receiver: NetworkInterfaceReceiver, /// Chunk request sender chunk_request_sender: Option, + /// Approval voting parallel enabled. + approval_voting_parallel_enabled: bool, } impl MockNetworkBridgeTx { @@ -73,8 +75,9 @@ impl MockNetworkBridgeRx { pub fn new( network_receiver: NetworkInterfaceReceiver, chunk_request_sender: Option, + approval_voting_parallel_enabled: bool, ) -> MockNetworkBridgeRx { - Self { network_receiver, chunk_request_sender } + Self { network_receiver, chunk_request_sender, approval_voting_parallel_enabled } } } @@ -199,9 +202,15 @@ impl MockNetworkBridgeRx { Versioned::V3( polkadot_node_network_protocol::v3::ValidationProtocol::ApprovalDistribution(msg) ) => { - ctx.send_message( - ApprovalDistributionMessage::NetworkBridgeUpdate(NetworkBridgeEvent::PeerMessage(peer_id, polkadot_node_network_protocol::Versioned::V3(msg))) - ).await; + if self.approval_voting_parallel_enabled { + ctx.send_message( + ApprovalVotingParallelMessage::NetworkBridgeUpdate(NetworkBridgeEvent::PeerMessage(peer_id, polkadot_node_network_protocol::Versioned::V3(msg))) + ).await; + } else { + ctx.send_message( + ApprovalDistributionMessage::NetworkBridgeUpdate(NetworkBridgeEvent::PeerMessage(peer_id, polkadot_node_network_protocol::Versioned::V3(msg))) + ).await; + } } Versioned::V3( polkadot_node_network_protocol::v3::ValidationProtocol::StatementDistribution(msg) diff --git a/polkadot/node/subsystem-bench/src/lib/statement/mod.rs b/polkadot/node/subsystem-bench/src/lib/statement/mod.rs index bd47505f56ae..dd7095d3b00c 100644 --- a/polkadot/node/subsystem-bench/src/lib/statement/mod.rs +++ b/polkadot/node/subsystem-bench/src/lib/statement/mod.rs @@ -114,14 +114,14 @@ fn build_overseer( state.pvd.clone(), state.own_backing_group.clone(), ); - let (statement_req_receiver, statement_req_cfg) = IncomingRequest::get_config_receiver::< - Block, - sc_network::NetworkWorker, - >(&ReqProtocolNames::new(GENESIS_HASH, None)); - let (candidate_req_receiver, candidate_req_cfg) = IncomingRequest::get_config_receiver::< - Block, - sc_network::NetworkWorker, - >(&ReqProtocolNames::new(GENESIS_HASH, None)); + let (statement_req_receiver, statement_req_cfg) = + IncomingRequest::get_config_receiver::>( + &ReqProtocolNames::new(GENESIS_HASH, None), + ); + let (candidate_req_receiver, candidate_req_cfg) = + IncomingRequest::get_config_receiver::>( + &ReqProtocolNames::new(GENESIS_HASH, None), + ); let keystore = make_keystore(); let subsystem = StatementDistributionSubsystem::new( keystore.clone(), @@ -135,7 +135,8 @@ fn build_overseer( network_interface.subsystem_sender(), state.test_authorities.clone(), ); - let network_bridge_rx = MockNetworkBridgeRx::new(network_receiver, Some(candidate_req_cfg)); + let network_bridge_rx = + MockNetworkBridgeRx::new(network_receiver, Some(candidate_req_cfg), false); let dummy = dummy_builder!(spawn_task_handle, overseer_metrics) .replace_runtime_api(|_| mock_runtime_api) @@ -445,5 +446,5 @@ pub async fn benchmark_statement_distribution( ); env.stop().await; - env.collect_resource_usage(&["statement-distribution"]) + env.collect_resource_usage(&["statement-distribution"], false) } diff --git a/polkadot/node/subsystem-bench/src/lib/usage.rs b/polkadot/node/subsystem-bench/src/lib/usage.rs index 883e9aa7ad0a..5f691ae2db39 100644 --- a/polkadot/node/subsystem-bench/src/lib/usage.rs +++ b/polkadot/node/subsystem-bench/src/lib/usage.rs @@ -32,14 +32,14 @@ impl std::fmt::Display for BenchmarkUsage { write!( f, "\n{}\n{}\n\n{}\n{}\n", - format!("{:<32}{:>12}{:>12}", "Network usage, KiB", "total", "per block").blue(), + format!("{:<64}{:>12}{:>12}", "Network usage, KiB", "total", "per block").blue(), self.network_usage .iter() .map(|v| v.to_string()) .sorted() .collect::>() .join("\n"), - format!("{:<32}{:>12}{:>12}", "CPU usage, seconds", "total", "per block").blue(), + format!("{:<64}{:>12}{:>12}", "CPU usage, seconds", "total", "per block").blue(), self.cpu_usage .iter() .map(|v| v.to_string()) @@ -134,7 +134,7 @@ pub struct ResourceUsage { impl std::fmt::Display for ResourceUsage { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { - write!(f, "{:<32}{:>12.4}{:>12.4}", self.resource_name.cyan(), self.total, self.per_block) + write!(f, "{:<64}{:>12.4}{:>12.4}", self.resource_name.cyan(), self.total, self.per_block) } } diff --git a/polkadot/node/subsystem-types/src/messages.rs b/polkadot/node/subsystem-types/src/messages.rs index 854a9da158be..fafc700e739f 100644 --- a/polkadot/node/subsystem-types/src/messages.rs +++ b/polkadot/node/subsystem-types/src/messages.rs @@ -955,6 +955,103 @@ pub struct BlockDescription { pub candidates: Vec, } +/// Message to the approval voting parallel subsystem running both approval-distribution and +/// approval-voting logic in parallel. This is a combination of all the messages ApprovalVoting and +/// ApprovalDistribution subsystems can receive. +/// +/// The reason this exists is, so that we can keep both modes of running in the same polkadot +/// binary, based on the value of `--approval-voting-parallel-enabled`, we decide if we run with two +/// different subsystems for approval-distribution and approval-voting or run the approval-voting +/// parallel which has several parallel workers for the approval-distribution and a worker for +/// approval-voting. +/// +/// This is meant to be a temporary state until we can safely remove running the two subsystems +/// individually. +#[derive(Debug, derive_more::From)] +pub enum ApprovalVotingParallelMessage { + /// Gets mapped into `ApprovalVotingMessage::ApprovedAncestor` + ApprovedAncestor(Hash, BlockNumber, oneshot::Sender>), + + /// Gets mapped into `ApprovalVotingMessage::GetApprovalSignaturesForCandidate` + GetApprovalSignaturesForCandidate( + CandidateHash, + oneshot::Sender, ValidatorSignature)>>, + ), + /// Gets mapped into `ApprovalDistributionMessage::NewBlocks` + NewBlocks(Vec), + /// Gets mapped into `ApprovalDistributionMessage::DistributeAssignment` + DistributeAssignment(IndirectAssignmentCertV2, CandidateBitfield), + /// Gets mapped into `ApprovalDistributionMessage::DistributeApproval` + DistributeApproval(IndirectSignedApprovalVoteV2), + /// An update from the network bridge, gets mapped into + /// `ApprovalDistributionMessage::NetworkBridgeUpdate` + #[from] + NetworkBridgeUpdate(NetworkBridgeEvent), + + /// Gets mapped into `ApprovalDistributionMessage::GetApprovalSignatures` + GetApprovalSignatures( + HashSet<(Hash, CandidateIndex)>, + oneshot::Sender, ValidatorSignature)>>, + ), + /// Gets mapped into `ApprovalDistributionMessage::ApprovalCheckingLagUpdate` + ApprovalCheckingLagUpdate(BlockNumber), +} + +impl TryFrom for ApprovalVotingMessage { + type Error = (); + + fn try_from(msg: ApprovalVotingParallelMessage) -> Result { + match msg { + ApprovalVotingParallelMessage::ApprovedAncestor(hash, number, tx) => + Ok(ApprovalVotingMessage::ApprovedAncestor(hash, number, tx)), + ApprovalVotingParallelMessage::GetApprovalSignaturesForCandidate(candidate, tx) => + Ok(ApprovalVotingMessage::GetApprovalSignaturesForCandidate(candidate, tx)), + _ => Err(()), + } + } +} + +impl TryFrom for ApprovalDistributionMessage { + type Error = (); + + fn try_from(msg: ApprovalVotingParallelMessage) -> Result { + match msg { + ApprovalVotingParallelMessage::NewBlocks(blocks) => + Ok(ApprovalDistributionMessage::NewBlocks(blocks)), + ApprovalVotingParallelMessage::DistributeAssignment(assignment, claimed_cores) => + Ok(ApprovalDistributionMessage::DistributeAssignment(assignment, claimed_cores)), + ApprovalVotingParallelMessage::DistributeApproval(vote) => + Ok(ApprovalDistributionMessage::DistributeApproval(vote)), + ApprovalVotingParallelMessage::NetworkBridgeUpdate(msg) => + Ok(ApprovalDistributionMessage::NetworkBridgeUpdate(msg)), + ApprovalVotingParallelMessage::GetApprovalSignatures(candidate_indicies, tx) => + Ok(ApprovalDistributionMessage::GetApprovalSignatures(candidate_indicies, tx)), + ApprovalVotingParallelMessage::ApprovalCheckingLagUpdate(lag) => + Ok(ApprovalDistributionMessage::ApprovalCheckingLagUpdate(lag)), + _ => Err(()), + } + } +} + +impl From for ApprovalVotingParallelMessage { + fn from(msg: ApprovalDistributionMessage) -> Self { + match msg { + ApprovalDistributionMessage::NewBlocks(blocks) => + ApprovalVotingParallelMessage::NewBlocks(blocks), + ApprovalDistributionMessage::DistributeAssignment(cert, bitfield) => + ApprovalVotingParallelMessage::DistributeAssignment(cert, bitfield), + ApprovalDistributionMessage::DistributeApproval(vote) => + ApprovalVotingParallelMessage::DistributeApproval(vote), + ApprovalDistributionMessage::NetworkBridgeUpdate(msg) => + ApprovalVotingParallelMessage::NetworkBridgeUpdate(msg), + ApprovalDistributionMessage::GetApprovalSignatures(candidate_indicies, tx) => + ApprovalVotingParallelMessage::GetApprovalSignatures(candidate_indicies, tx), + ApprovalDistributionMessage::ApprovalCheckingLagUpdate(lag) => + ApprovalVotingParallelMessage::ApprovalCheckingLagUpdate(lag), + } + } +} + /// Response type to `ApprovalVotingMessage::ApprovedAncestor`. #[derive(Clone, Debug)] pub struct HighestApprovedAncestorBlock { diff --git a/polkadot/node/subsystem-types/src/runtime_client.rs b/polkadot/node/subsystem-types/src/runtime_client.rs index 7938223df23b..a8af8b7996f9 100644 --- a/polkadot/node/subsystem-types/src/runtime_client.rs +++ b/polkadot/node/subsystem-types/src/runtime_client.rs @@ -665,7 +665,8 @@ where fn number( &self, hash: Block::Hash, - ) -> sc_client_api::blockchain::Result::Header as HeaderT>::Number>> { + ) -> sc_client_api::blockchain::Result::Header as HeaderT>::Number>> + { self.client.number(hash) } diff --git a/polkadot/node/test/service/src/lib.rs b/polkadot/node/test/service/src/lib.rs index b12387884861..f879aa93df9f 100644 --- a/polkadot/node/test/service/src/lib.rs +++ b/polkadot/node/test/service/src/lib.rs @@ -101,6 +101,7 @@ pub fn new_full( execute_workers_max_num: None, prepare_workers_hard_max_num: None, prepare_workers_soft_max_num: None, + enable_approval_voting_parallel: false, }, ), sc_network::config::NetworkBackendType::Litep2p => @@ -123,6 +124,7 @@ pub fn new_full( execute_workers_max_num: None, prepare_workers_hard_max_num: None, prepare_workers_soft_max_num: None, + enable_approval_voting_parallel: false, }, ), } diff --git a/polkadot/parachain/test-parachains/adder/collator/src/main.rs b/polkadot/parachain/test-parachains/adder/collator/src/main.rs index e8588274df27..4660b4d38f7c 100644 --- a/polkadot/parachain/test-parachains/adder/collator/src/main.rs +++ b/polkadot/parachain/test-parachains/adder/collator/src/main.rs @@ -98,6 +98,7 @@ fn main() -> Result<()> { execute_workers_max_num: None, prepare_workers_hard_max_num: None, prepare_workers_soft_max_num: None, + enable_approval_voting_parallel: false, }, ) .map_err(|e| e.to_string())?; diff --git a/polkadot/parachain/test-parachains/undying/collator/src/main.rs b/polkadot/parachain/test-parachains/undying/collator/src/main.rs index 7198a831a477..3dfa714e6d16 100644 --- a/polkadot/parachain/test-parachains/undying/collator/src/main.rs +++ b/polkadot/parachain/test-parachains/undying/collator/src/main.rs @@ -100,6 +100,7 @@ fn main() -> Result<()> { execute_workers_max_num: None, prepare_workers_hard_max_num: None, prepare_workers_soft_max_num: None, + enable_approval_voting_parallel: false, }, ) .map_err(|e| e.to_string())?; diff --git a/polkadot/primitives/src/runtime_api.rs b/polkadot/primitives/src/runtime_api.rs index ddebe99e6214..3c90c050baed 100644 --- a/polkadot/primitives/src/runtime_api.rs +++ b/polkadot/primitives/src/runtime_api.rs @@ -36,7 +36,7 @@ //! //! Let's see a quick example: //! -//! ```rust(ignore) +//! ```nocompile //! sp_api::decl_runtime_apis! { //! #[api_version(2)] //! pub trait MyApi { diff --git a/polkadot/primitives/src/v8/metrics.rs b/polkadot/primitives/src/v8/metrics.rs index 1d66c9848a7c..409efc86bc9b 100644 --- a/polkadot/primitives/src/v8/metrics.rs +++ b/polkadot/primitives/src/v8/metrics.rs @@ -91,18 +91,6 @@ pub type RuntimeMetricLabelValue = RuntimeMetricLabel; /// A set of metric label values. pub type RuntimeMetricLabelValues = RuntimeMetricLabels; -/// Trait for converting Vec to `&str`. -pub trait AsStr { - /// Return a str reference. - fn as_str(&self) -> Option<&str>; -} - -impl AsStr for RuntimeMetricLabel { - fn as_str(&self) -> Option<&str> { - alloc::str::from_utf8(&self.0).ok() - } -} - impl From<&'static str> for RuntimeMetricLabel { fn from(s: &'static str) -> Self { Self(s.as_bytes().to_vec()) diff --git a/polkadot/roadmap/implementers-guide/src/node/approval/approval-voting-parallel.md b/polkadot/roadmap/implementers-guide/src/node/approval/approval-voting-parallel.md new file mode 100644 index 000000000000..84661b7bf9b3 --- /dev/null +++ b/polkadot/roadmap/implementers-guide/src/node/approval/approval-voting-parallel.md @@ -0,0 +1,30 @@ +# Approval voting parallel + +The approval-voting-parallel subsystem acts as an orchestrator for the tasks handled by the [Approval Voting](approval-voting.md) +and [Approval Distribution](approval-distribution.md) subsystems. Initially, these two systems operated separately and interacted +with each other and other subsystems through orchestra. + +With approval-voting-parallel, we have a single subsystem that creates two types of workers: +- Four approval-distribution workers that operate in parallel, each handling tasks based on the validator_index of the message + originator. +- One approval-voting worker that performs the tasks previously managed by the standalone approval-voting subsystem. + +This subsystem does not maintain any state. Instead, it functions as an orchestrator that: +- Spawns and initializes each workers. +- Forwards each message and signal to the appropriate worker. +- Aggregates results for messages that require input from more than one worker, such as GetApprovalSignatures. + +## Forwarding logic + +The messages received and forwarded by approval-voting-parallel split in three categories: +- Signals which need to be forwarded to all workers. +- Messages that only the `approval-voting` worker needs to handle, `ApprovalVotingParallelMessage::ApprovedAncestor` + and `ApprovalVotingParallelMessage::GetApprovalSignaturesForCandidate` +- Control messages that all `approval-distribution` workers need to receive `ApprovalVotingParallelMessage::NewBlocks`, + `ApprovalVotingParallelMessage::ApprovalCheckingLagUpdate` and all network bridge variants `ApprovalVotingParallelMessage::NetworkBridgeUpdate` + except `ApprovalVotingParallelMessage::NetworkBridgeUpdate(NetworkBridgeEvent::PeerMessage)` +- Data messages `ApprovalVotingParallelMessage::NetworkBridgeUpdate(NetworkBridgeEvent::PeerMessage)` which need to be sent + just to a single `approval-distribution` worker based on the ValidatorIndex. The logic for assigning the work is: + ``` + assigned_worker_index = validator_index % number_of_workers; + ``` diff --git a/polkadot/runtime/parachains/src/hrmp.rs b/polkadot/runtime/parachains/src/hrmp.rs index b149404b41b8..220543f00ec3 100644 --- a/polkadot/runtime/parachains/src/hrmp.rs +++ b/polkadot/runtime/parachains/src/hrmp.rs @@ -945,7 +945,7 @@ impl Pallet { outgoing_paras.len() as u32 )) .saturating_add(::WeightInfo::force_process_hrmp_close( - outgoing_paras.len() as u32 + outgoing_paras.len() as u32, )) } diff --git a/polkadot/runtime/parachains/src/inclusion/mod.rs b/polkadot/runtime/parachains/src/inclusion/mod.rs index e014529ea11a..cc333e3b20f8 100644 --- a/polkadot/runtime/parachains/src/inclusion/mod.rs +++ b/polkadot/runtime/parachains/src/inclusion/mod.rs @@ -991,9 +991,8 @@ impl Pallet { .iter() .filter_map(|d| { BoundedSlice::try_from(&d[..]) - .map_err(|e| { + .inspect_err(|_| { defensive!("Accepted candidate contains too long msg, len=", d.len()); - e }) .ok() }) diff --git a/polkadot/runtime/parachains/src/paras_inherent/mod.rs b/polkadot/runtime/parachains/src/paras_inherent/mod.rs index 84d8299cd29c..70c4ba72d58e 100644 --- a/polkadot/runtime/parachains/src/paras_inherent/mod.rs +++ b/polkadot/runtime/parachains/src/paras_inherent/mod.rs @@ -1216,18 +1216,19 @@ fn filter_backed_statements_from_disabled_validators< // Get relay parent block number of the candidate. We need this to get the group index // assigned to this core at this block number - let relay_parent_block_number = - match allowed_relay_parents.acquire_info(bc.descriptor().relay_parent(), None) { - Some((_, block_num)) => block_num, - None => { - log::debug!( - target: LOG_TARGET, - "Relay parent {:?} for candidate is not in the allowed relay parents. Dropping the candidate.", - bc.descriptor().relay_parent() - ); - return false - }, - }; + let relay_parent_block_number = match allowed_relay_parents + .acquire_info(bc.descriptor().relay_parent(), None) + { + Some((_, block_num)) => block_num, + None => { + log::debug!( + target: LOG_TARGET, + "Relay parent {:?} for candidate is not in the allowed relay parents. Dropping the candidate.", + bc.descriptor().relay_parent() + ); + return false + }, + }; // Get the group index for the core let group_idx = match scheduler::Pallet::::group_assigned_to_core( diff --git a/polkadot/runtime/parachains/src/ump_tests.rs b/polkadot/runtime/parachains/src/ump_tests.rs index d914bf8b6661..91571859ecf0 100644 --- a/polkadot/runtime/parachains/src/ump_tests.rs +++ b/polkadot/runtime/parachains/src/ump_tests.rs @@ -462,10 +462,11 @@ fn verify_relay_dispatch_queue_size_is_externally_accessible() { fn assert_queue_size(para: ParaId, count: u32, size: u32) { #[allow(deprecated)] - let raw_queue_size = sp_io::storage::get(&well_known_keys::relay_dispatch_queue_size(para)).expect( - "enqueuing a message should create the dispatch queue\ + let raw_queue_size = sp_io::storage::get(&well_known_keys::relay_dispatch_queue_size(para)) + .expect( + "enqueuing a message should create the dispatch queue\ and it should be accessible via the well known keys", - ); + ); let (c, s) = <(u32, u32)>::decode(&mut &raw_queue_size[..]) .expect("the dispatch queue size should be decodable into (u32, u32)"); assert_eq!((c, s), (count, size)); diff --git a/polkadot/runtime/westend/src/lib.rs b/polkadot/runtime/westend/src/lib.rs index d6ed11d07875..576e297df31c 100644 --- a/polkadot/runtime/westend/src/lib.rs +++ b/polkadot/runtime/westend/src/lib.rs @@ -1110,7 +1110,8 @@ impl InstanceFilter for ProxyType { matches!( c, RuntimeCall::Staking(..) | - RuntimeCall::Session(..) | RuntimeCall::Utility(..) | + RuntimeCall::Session(..) | + RuntimeCall::Utility(..) | RuntimeCall::FastUnstake(..) | RuntimeCall::VoterList(..) | RuntimeCall::NominationPools(..) @@ -2761,49 +2762,3 @@ sp_api::impl_runtime_apis! { } } } - -mod clean_state_migration { - use super::Runtime; - #[cfg(feature = "try-runtime")] - use super::Vec; - use frame_support::{pallet_prelude::*, storage_alias, traits::OnRuntimeUpgrade}; - use pallet_state_trie_migration::MigrationLimits; - - #[storage_alias] - type AutoLimits = StorageValue, ValueQuery>; - - // Actual type of value is `MigrationTask`, putting a dummy - // one to avoid the trait constraint on T. - // Since we only use `kill` it is fine. - #[storage_alias] - type MigrationProcess = StorageValue; - - #[storage_alias] - type SignedMigrationMaxLimits = StorageValue; - - /// Initialize an automatic migration process. - pub struct CleanMigrate; - - impl OnRuntimeUpgrade for CleanMigrate { - #[cfg(feature = "try-runtime")] - fn pre_upgrade() -> Result, sp_runtime::TryRuntimeError> { - Ok(Default::default()) - } - - fn on_runtime_upgrade() -> frame_support::weights::Weight { - MigrationProcess::kill(); - AutoLimits::kill(); - SignedMigrationMaxLimits::kill(); - ::DbWeight::get().writes(3) - } - - #[cfg(feature = "try-runtime")] - fn post_upgrade(_state: Vec) -> Result<(), sp_runtime::TryRuntimeError> { - frame_support::ensure!( - !AutoLimits::exists() && !SignedMigrationMaxLimits::exists(), - "State migration clean.", - ); - Ok(()) - } - } -} diff --git a/polkadot/runtime/westend/src/weights/mod.rs b/polkadot/runtime/westend/src/weights/mod.rs index 1e7b01bc472b..ae11f6ccba41 100644 --- a/polkadot/runtime/westend/src/weights/mod.rs +++ b/polkadot/runtime/westend/src/weights/mod.rs @@ -33,7 +33,6 @@ pub mod pallet_nomination_pools; pub mod pallet_parameters; pub mod pallet_preimage; pub mod pallet_proxy; -pub mod pallet_referenda_fellowship_referenda; pub mod pallet_referenda_referenda; pub mod pallet_scheduler; pub mod pallet_session; diff --git a/polkadot/runtime/westend/src/weights/pallet_referenda_fellowship_referenda.rs b/polkadot/runtime/westend/src/weights/pallet_referenda_fellowship_referenda.rs deleted file mode 100644 index a4ac06679116..000000000000 --- a/polkadot/runtime/westend/src/weights/pallet_referenda_fellowship_referenda.rs +++ /dev/null @@ -1,525 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Polkadot. - -// Polkadot is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Polkadot is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Polkadot. If not, see . - -//! Autogenerated weights for `pallet_referenda` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-06-19, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-e8ezs4ez-project-163-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("kusama-dev"), DB CACHE: 1024 - -// Executed Command: -// ./target/production/polkadot -// benchmark -// pallet -// --chain=kusama-dev -// --steps=50 -// --repeat=20 -// --no-storage-info -// --no-median-slopes -// --no-min-squares -// --pallet=pallet_referenda -// --extrinsic=* -// --execution=wasm -// --wasm-execution=compiled -// --header=./file_header.txt -// --output=./runtime/kusama/src/weights/ - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::Weight}; -use core::marker::PhantomData; - -/// Weight functions for `pallet_referenda`. -pub struct WeightInfo(PhantomData); -impl pallet_referenda::WeightInfo for WeightInfo { - /// Storage: FellowshipCollective Members (r:1 w:0) - /// Proof: FellowshipCollective Members (max_values: None, max_size: Some(42), added: 2517, mode: MaxEncodedLen) - /// Storage: FellowshipReferenda ReferendumCount (r:1 w:1) - /// Proof: FellowshipReferenda ReferendumCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Scheduler Agenda (r:1 w:1) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) - /// Storage: FellowshipReferenda ReferendumInfoFor (r:0 w:1) - /// Proof: FellowshipReferenda ReferendumInfoFor (max_values: None, max_size: Some(900), added: 3375, mode: MaxEncodedLen) - fn submit() -> Weight { - // Proof Size summary in bytes: - // Measured: `327` - // Estimated: `42428` - // Minimum execution time: 28_969_000 picoseconds. - Weight::from_parts(30_902_000, 0) - .saturating_add(Weight::from_parts(0, 42428)) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(3)) - } - /// Storage: FellowshipReferenda ReferendumInfoFor (r:1 w:1) - /// Proof: FellowshipReferenda ReferendumInfoFor (max_values: None, max_size: Some(900), added: 3375, mode: MaxEncodedLen) - /// Storage: Scheduler Agenda (r:2 w:2) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) - fn place_decision_deposit_preparing() -> Weight { - // Proof Size summary in bytes: - // Measured: `404` - // Estimated: `83866` - // Minimum execution time: 53_500_000 picoseconds. - Weight::from_parts(54_447_000, 0) - .saturating_add(Weight::from_parts(0, 83866)) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(3)) - } - /// Storage: FellowshipReferenda ReferendumInfoFor (r:1 w:1) - /// Proof: FellowshipReferenda ReferendumInfoFor (max_values: None, max_size: Some(900), added: 3375, mode: MaxEncodedLen) - /// Storage: FellowshipReferenda DecidingCount (r:1 w:0) - /// Proof: FellowshipReferenda DecidingCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) - /// Storage: FellowshipReferenda TrackQueue (r:1 w:1) - /// Proof: FellowshipReferenda TrackQueue (max_values: None, max_size: Some(812), added: 3287, mode: MaxEncodedLen) - /// Storage: Scheduler Agenda (r:1 w:1) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) - fn place_decision_deposit_queued() -> Weight { - // Proof Size summary in bytes: - // Measured: `2042` - // Estimated: `42428` - // Minimum execution time: 114_321_000 picoseconds. - Weight::from_parts(122_607_000, 0) - .saturating_add(Weight::from_parts(0, 42428)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(3)) - } - /// Storage: FellowshipReferenda ReferendumInfoFor (r:1 w:1) - /// Proof: FellowshipReferenda ReferendumInfoFor (max_values: None, max_size: Some(900), added: 3375, mode: MaxEncodedLen) - /// Storage: FellowshipReferenda DecidingCount (r:1 w:0) - /// Proof: FellowshipReferenda DecidingCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) - /// Storage: FellowshipReferenda TrackQueue (r:1 w:1) - /// Proof: FellowshipReferenda TrackQueue (max_values: None, max_size: Some(812), added: 3287, mode: MaxEncodedLen) - /// Storage: Scheduler Agenda (r:1 w:1) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) - fn place_decision_deposit_not_queued() -> Weight { - // Proof Size summary in bytes: - // Measured: `2083` - // Estimated: `42428` - // Minimum execution time: 113_476_000 picoseconds. - Weight::from_parts(120_078_000, 0) - .saturating_add(Weight::from_parts(0, 42428)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(3)) - } - /// Storage: FellowshipReferenda ReferendumInfoFor (r:1 w:1) - /// Proof: FellowshipReferenda ReferendumInfoFor (max_values: None, max_size: Some(900), added: 3375, mode: MaxEncodedLen) - /// Storage: FellowshipReferenda DecidingCount (r:1 w:1) - /// Proof: FellowshipReferenda DecidingCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) - /// Storage: FellowshipCollective MemberCount (r:1 w:0) - /// Proof: FellowshipCollective MemberCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) - /// Storage: Scheduler Agenda (r:2 w:2) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) - fn place_decision_deposit_passing() -> Weight { - // Proof Size summary in bytes: - // Measured: `774` - // Estimated: `83866` - // Minimum execution time: 194_798_000 picoseconds. - Weight::from_parts(208_378_000, 0) - .saturating_add(Weight::from_parts(0, 83866)) - .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(4)) - } - /// Storage: FellowshipReferenda ReferendumInfoFor (r:1 w:1) - /// Proof: FellowshipReferenda ReferendumInfoFor (max_values: None, max_size: Some(900), added: 3375, mode: MaxEncodedLen) - /// Storage: FellowshipReferenda DecidingCount (r:1 w:1) - /// Proof: FellowshipReferenda DecidingCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) - /// Storage: FellowshipCollective MemberCount (r:1 w:0) - /// Proof: FellowshipCollective MemberCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) - /// Storage: Scheduler Agenda (r:2 w:2) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) - fn place_decision_deposit_failing() -> Weight { - // Proof Size summary in bytes: - // Measured: `639` - // Estimated: `83866` - // Minimum execution time: 69_502_000 picoseconds. - Weight::from_parts(71_500_000, 0) - .saturating_add(Weight::from_parts(0, 83866)) - .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(4)) - } - /// Storage: FellowshipReferenda ReferendumInfoFor (r:1 w:1) - /// Proof: FellowshipReferenda ReferendumInfoFor (max_values: None, max_size: Some(900), added: 3375, mode: MaxEncodedLen) - fn refund_decision_deposit() -> Weight { - // Proof Size summary in bytes: - // Measured: `317` - // Estimated: `4365` - // Minimum execution time: 30_561_000 picoseconds. - Weight::from_parts(31_427_000, 0) - .saturating_add(Weight::from_parts(0, 4365)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: FellowshipReferenda ReferendumInfoFor (r:1 w:1) - /// Proof: FellowshipReferenda ReferendumInfoFor (max_values: None, max_size: Some(900), added: 3375, mode: MaxEncodedLen) - fn refund_submission_deposit() -> Weight { - // Proof Size summary in bytes: - // Measured: `167` - // Estimated: `4365` - // Minimum execution time: 14_535_000 picoseconds. - Weight::from_parts(14_999_000, 0) - .saturating_add(Weight::from_parts(0, 4365)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: FellowshipReferenda ReferendumInfoFor (r:1 w:1) - /// Proof: FellowshipReferenda ReferendumInfoFor (max_values: None, max_size: Some(900), added: 3375, mode: MaxEncodedLen) - /// Storage: Scheduler Agenda (r:2 w:2) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) - fn cancel() -> Weight { - // Proof Size summary in bytes: - // Measured: `349` - // Estimated: `83866` - // Minimum execution time: 38_532_000 picoseconds. - Weight::from_parts(39_361_000, 0) - .saturating_add(Weight::from_parts(0, 83866)) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(3)) - } - /// Storage: FellowshipReferenda ReferendumInfoFor (r:1 w:1) - /// Proof: FellowshipReferenda ReferendumInfoFor (max_values: None, max_size: Some(900), added: 3375, mode: MaxEncodedLen) - /// Storage: Scheduler Agenda (r:2 w:2) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) - /// Storage: FellowshipReferenda MetadataOf (r:1 w:0) - /// Proof: FellowshipReferenda MetadataOf (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) - fn kill() -> Weight { - // Proof Size summary in bytes: - // Measured: `450` - // Estimated: `83866` - // Minimum execution time: 78_956_000 picoseconds. - Weight::from_parts(80_594_000, 0) - .saturating_add(Weight::from_parts(0, 83866)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(3)) - } - /// Storage: FellowshipReferenda TrackQueue (r:1 w:0) - /// Proof: FellowshipReferenda TrackQueue (max_values: None, max_size: Some(812), added: 3287, mode: MaxEncodedLen) - /// Storage: FellowshipReferenda DecidingCount (r:1 w:1) - /// Proof: FellowshipReferenda DecidingCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) - fn one_fewer_deciding_queue_empty() -> Weight { - // Proof Size summary in bytes: - // Measured: `140` - // Estimated: `4277` - // Minimum execution time: 9_450_000 picoseconds. - Weight::from_parts(9_881_000, 0) - .saturating_add(Weight::from_parts(0, 4277)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: FellowshipReferenda TrackQueue (r:1 w:1) - /// Proof: FellowshipReferenda TrackQueue (max_values: None, max_size: Some(812), added: 3287, mode: MaxEncodedLen) - /// Storage: FellowshipReferenda ReferendumInfoFor (r:1 w:1) - /// Proof: FellowshipReferenda ReferendumInfoFor (max_values: None, max_size: Some(900), added: 3375, mode: MaxEncodedLen) - /// Storage: FellowshipCollective MemberCount (r:1 w:0) - /// Proof: FellowshipCollective MemberCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) - /// Storage: Scheduler Agenda (r:1 w:1) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) - fn one_fewer_deciding_failing() -> Weight { - // Proof Size summary in bytes: - // Measured: `2376` - // Estimated: `42428` - // Minimum execution time: 98_126_000 picoseconds. - Weight::from_parts(102_511_000, 0) - .saturating_add(Weight::from_parts(0, 42428)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(3)) - } - /// Storage: FellowshipReferenda TrackQueue (r:1 w:1) - /// Proof: FellowshipReferenda TrackQueue (max_values: None, max_size: Some(812), added: 3287, mode: MaxEncodedLen) - /// Storage: FellowshipReferenda ReferendumInfoFor (r:1 w:1) - /// Proof: FellowshipReferenda ReferendumInfoFor (max_values: None, max_size: Some(900), added: 3375, mode: MaxEncodedLen) - /// Storage: FellowshipCollective MemberCount (r:1 w:0) - /// Proof: FellowshipCollective MemberCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) - /// Storage: Scheduler Agenda (r:1 w:1) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) - fn one_fewer_deciding_passing() -> Weight { - // Proof Size summary in bytes: - // Measured: `2362` - // Estimated: `42428` - // Minimum execution time: 99_398_000 picoseconds. - Weight::from_parts(104_045_000, 0) - .saturating_add(Weight::from_parts(0, 42428)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(3)) - } - /// Storage: FellowshipReferenda ReferendumInfoFor (r:1 w:0) - /// Proof: FellowshipReferenda ReferendumInfoFor (max_values: None, max_size: Some(900), added: 3375, mode: MaxEncodedLen) - /// Storage: FellowshipReferenda TrackQueue (r:1 w:1) - /// Proof: FellowshipReferenda TrackQueue (max_values: None, max_size: Some(812), added: 3287, mode: MaxEncodedLen) - fn nudge_referendum_requeued_insertion() -> Weight { - // Proof Size summary in bytes: - // Measured: `1807` - // Estimated: `4365` - // Minimum execution time: 43_734_000 picoseconds. - Weight::from_parts(46_962_000, 0) - .saturating_add(Weight::from_parts(0, 4365)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: FellowshipReferenda ReferendumInfoFor (r:1 w:0) - /// Proof: FellowshipReferenda ReferendumInfoFor (max_values: None, max_size: Some(900), added: 3375, mode: MaxEncodedLen) - /// Storage: FellowshipReferenda TrackQueue (r:1 w:1) - /// Proof: FellowshipReferenda TrackQueue (max_values: None, max_size: Some(812), added: 3287, mode: MaxEncodedLen) - fn nudge_referendum_requeued_slide() -> Weight { - // Proof Size summary in bytes: - // Measured: `1774` - // Estimated: `4365` - // Minimum execution time: 42_863_000 picoseconds. - Weight::from_parts(46_241_000, 0) - .saturating_add(Weight::from_parts(0, 4365)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: FellowshipReferenda ReferendumInfoFor (r:1 w:1) - /// Proof: FellowshipReferenda ReferendumInfoFor (max_values: None, max_size: Some(900), added: 3375, mode: MaxEncodedLen) - /// Storage: FellowshipReferenda DecidingCount (r:1 w:0) - /// Proof: FellowshipReferenda DecidingCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) - /// Storage: FellowshipReferenda TrackQueue (r:1 w:1) - /// Proof: FellowshipReferenda TrackQueue (max_values: None, max_size: Some(812), added: 3287, mode: MaxEncodedLen) - fn nudge_referendum_queued() -> Weight { - // Proof Size summary in bytes: - // Measured: `1790` - // Estimated: `4365` - // Minimum execution time: 57_511_000 picoseconds. - Weight::from_parts(64_027_000, 0) - .saturating_add(Weight::from_parts(0, 4365)) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: FellowshipReferenda ReferendumInfoFor (r:1 w:1) - /// Proof: FellowshipReferenda ReferendumInfoFor (max_values: None, max_size: Some(900), added: 3375, mode: MaxEncodedLen) - /// Storage: FellowshipReferenda DecidingCount (r:1 w:0) - /// Proof: FellowshipReferenda DecidingCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) - /// Storage: FellowshipReferenda TrackQueue (r:1 w:1) - /// Proof: FellowshipReferenda TrackQueue (max_values: None, max_size: Some(812), added: 3287, mode: MaxEncodedLen) - fn nudge_referendum_not_queued() -> Weight { - // Proof Size summary in bytes: - // Measured: `1831` - // Estimated: `4365` - // Minimum execution time: 56_726_000 picoseconds. - Weight::from_parts(61_962_000, 0) - .saturating_add(Weight::from_parts(0, 4365)) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: FellowshipReferenda ReferendumInfoFor (r:1 w:1) - /// Proof: FellowshipReferenda ReferendumInfoFor (max_values: None, max_size: Some(900), added: 3375, mode: MaxEncodedLen) - /// Storage: Scheduler Agenda (r:1 w:1) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) - fn nudge_referendum_no_deposit() -> Weight { - // Proof Size summary in bytes: - // Measured: `301` - // Estimated: `42428` - // Minimum execution time: 24_870_000 picoseconds. - Weight::from_parts(25_837_000, 0) - .saturating_add(Weight::from_parts(0, 42428)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: FellowshipReferenda ReferendumInfoFor (r:1 w:1) - /// Proof: FellowshipReferenda ReferendumInfoFor (max_values: None, max_size: Some(900), added: 3375, mode: MaxEncodedLen) - /// Storage: Scheduler Agenda (r:1 w:1) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) - fn nudge_referendum_preparing() -> Weight { - // Proof Size summary in bytes: - // Measured: `349` - // Estimated: `42428` - // Minimum execution time: 25_297_000 picoseconds. - Weight::from_parts(26_086_000, 0) - .saturating_add(Weight::from_parts(0, 42428)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: FellowshipReferenda ReferendumInfoFor (r:1 w:1) - /// Proof: FellowshipReferenda ReferendumInfoFor (max_values: None, max_size: Some(900), added: 3375, mode: MaxEncodedLen) - fn nudge_referendum_timed_out() -> Weight { - // Proof Size summary in bytes: - // Measured: `208` - // Estimated: `4365` - // Minimum execution time: 16_776_000 picoseconds. - Weight::from_parts(17_396_000, 0) - .saturating_add(Weight::from_parts(0, 4365)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: FellowshipReferenda ReferendumInfoFor (r:1 w:1) - /// Proof: FellowshipReferenda ReferendumInfoFor (max_values: None, max_size: Some(900), added: 3375, mode: MaxEncodedLen) - /// Storage: FellowshipReferenda DecidingCount (r:1 w:1) - /// Proof: FellowshipReferenda DecidingCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) - /// Storage: FellowshipCollective MemberCount (r:1 w:0) - /// Proof: FellowshipCollective MemberCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) - /// Storage: Scheduler Agenda (r:1 w:1) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) - fn nudge_referendum_begin_deciding_failing() -> Weight { - // Proof Size summary in bytes: - // Measured: `584` - // Estimated: `42428` - // Minimum execution time: 37_780_000 picoseconds. - Weight::from_parts(38_626_000, 0) - .saturating_add(Weight::from_parts(0, 42428)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(3)) - } - /// Storage: FellowshipReferenda ReferendumInfoFor (r:1 w:1) - /// Proof: FellowshipReferenda ReferendumInfoFor (max_values: None, max_size: Some(900), added: 3375, mode: MaxEncodedLen) - /// Storage: FellowshipReferenda DecidingCount (r:1 w:1) - /// Proof: FellowshipReferenda DecidingCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) - /// Storage: FellowshipCollective MemberCount (r:1 w:0) - /// Proof: FellowshipCollective MemberCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) - /// Storage: Scheduler Agenda (r:1 w:1) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) - fn nudge_referendum_begin_deciding_passing() -> Weight { - // Proof Size summary in bytes: - // Measured: `719` - // Estimated: `42428` - // Minimum execution time: 85_265_000 picoseconds. - Weight::from_parts(89_986_000, 0) - .saturating_add(Weight::from_parts(0, 42428)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(3)) - } - /// Storage: FellowshipReferenda ReferendumInfoFor (r:1 w:1) - /// Proof: FellowshipReferenda ReferendumInfoFor (max_values: None, max_size: Some(900), added: 3375, mode: MaxEncodedLen) - /// Storage: FellowshipCollective MemberCount (r:1 w:0) - /// Proof: FellowshipCollective MemberCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) - /// Storage: Scheduler Agenda (r:1 w:1) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) - fn nudge_referendum_begin_confirming() -> Weight { - // Proof Size summary in bytes: - // Measured: `770` - // Estimated: `42428` - // Minimum execution time: 143_283_000 picoseconds. - Weight::from_parts(158_540_000, 0) - .saturating_add(Weight::from_parts(0, 42428)) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: FellowshipReferenda ReferendumInfoFor (r:1 w:1) - /// Proof: FellowshipReferenda ReferendumInfoFor (max_values: None, max_size: Some(900), added: 3375, mode: MaxEncodedLen) - /// Storage: FellowshipCollective MemberCount (r:1 w:0) - /// Proof: FellowshipCollective MemberCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) - /// Storage: Scheduler Agenda (r:1 w:1) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) - fn nudge_referendum_end_confirming() -> Weight { - // Proof Size summary in bytes: - // Measured: `755` - // Estimated: `42428` - // Minimum execution time: 143_736_000 picoseconds. - Weight::from_parts(162_755_000, 0) - .saturating_add(Weight::from_parts(0, 42428)) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: FellowshipReferenda ReferendumInfoFor (r:1 w:1) - /// Proof: FellowshipReferenda ReferendumInfoFor (max_values: None, max_size: Some(900), added: 3375, mode: MaxEncodedLen) - /// Storage: FellowshipCollective MemberCount (r:1 w:0) - /// Proof: FellowshipCollective MemberCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) - /// Storage: Scheduler Agenda (r:1 w:1) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) - fn nudge_referendum_continue_not_confirming() -> Weight { - // Proof Size summary in bytes: - // Measured: `770` - // Estimated: `42428` - // Minimum execution time: 139_021_000 picoseconds. - Weight::from_parts(157_398_000, 0) - .saturating_add(Weight::from_parts(0, 42428)) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: FellowshipReferenda ReferendumInfoFor (r:1 w:1) - /// Proof: FellowshipReferenda ReferendumInfoFor (max_values: None, max_size: Some(900), added: 3375, mode: MaxEncodedLen) - /// Storage: FellowshipCollective MemberCount (r:1 w:0) - /// Proof: FellowshipCollective MemberCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) - /// Storage: Scheduler Agenda (r:1 w:1) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) - fn nudge_referendum_continue_confirming() -> Weight { - // Proof Size summary in bytes: - // Measured: `776` - // Estimated: `42428` - // Minimum execution time: 78_530_000 picoseconds. - Weight::from_parts(83_556_000, 0) - .saturating_add(Weight::from_parts(0, 42428)) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: FellowshipReferenda ReferendumInfoFor (r:1 w:1) - /// Proof: FellowshipReferenda ReferendumInfoFor (max_values: None, max_size: Some(900), added: 3375, mode: MaxEncodedLen) - /// Storage: FellowshipCollective MemberCount (r:1 w:0) - /// Proof: FellowshipCollective MemberCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) - /// Storage: Scheduler Agenda (r:2 w:2) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) - /// Storage: Scheduler Lookup (r:1 w:1) - /// Proof: Scheduler Lookup (max_values: None, max_size: Some(48), added: 2523, mode: MaxEncodedLen) - fn nudge_referendum_approved() -> Weight { - // Proof Size summary in bytes: - // Measured: `776` - // Estimated: `83866` - // Minimum execution time: 174_165_000 picoseconds. - Weight::from_parts(188_496_000, 0) - .saturating_add(Weight::from_parts(0, 83866)) - .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(4)) - } - /// Storage: FellowshipReferenda ReferendumInfoFor (r:1 w:1) - /// Proof: FellowshipReferenda ReferendumInfoFor (max_values: None, max_size: Some(900), added: 3375, mode: MaxEncodedLen) - /// Storage: FellowshipCollective MemberCount (r:1 w:0) - /// Proof: FellowshipCollective MemberCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) - /// Storage: Scheduler Agenda (r:1 w:1) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) - fn nudge_referendum_rejected() -> Weight { - // Proof Size summary in bytes: - // Measured: `772` - // Estimated: `42428` - // Minimum execution time: 142_964_000 picoseconds. - Weight::from_parts(157_257_000, 0) - .saturating_add(Weight::from_parts(0, 42428)) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: FellowshipReferenda ReferendumInfoFor (r:1 w:0) - /// Proof: FellowshipReferenda ReferendumInfoFor (max_values: None, max_size: Some(900), added: 3375, mode: MaxEncodedLen) - /// Storage: Preimage StatusFor (r:1 w:0) - /// Proof: Preimage StatusFor (max_values: None, max_size: Some(91), added: 2566, mode: MaxEncodedLen) - /// Storage: FellowshipReferenda MetadataOf (r:0 w:1) - /// Proof: FellowshipReferenda MetadataOf (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) - fn set_some_metadata() -> Weight { - // Proof Size summary in bytes: - // Measured: `352` - // Estimated: `4365` - // Minimum execution time: 20_126_000 picoseconds. - Weight::from_parts(20_635_000, 0) - .saturating_add(Weight::from_parts(0, 4365)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: FellowshipReferenda ReferendumInfoFor (r:1 w:0) - /// Proof: FellowshipReferenda ReferendumInfoFor (max_values: None, max_size: Some(900), added: 3375, mode: MaxEncodedLen) - /// Storage: FellowshipReferenda MetadataOf (r:1 w:1) - /// Proof: FellowshipReferenda MetadataOf (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) - fn clear_metadata() -> Weight { - // Proof Size summary in bytes: - // Measured: `285` - // Estimated: `4365` - // Minimum execution time: 17_716_000 picoseconds. - Weight::from_parts(18_324_000, 0) - .saturating_add(Weight::from_parts(0, 4365)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } -} diff --git a/polkadot/scripts/list-syscalls/execute-worker-syscalls b/polkadot/scripts/list-syscalls/execute-worker-syscalls index 349af783cf1a..4b22f6787864 100644 --- a/polkadot/scripts/list-syscalls/execute-worker-syscalls +++ b/polkadot/scripts/list-syscalls/execute-worker-syscalls @@ -20,6 +20,7 @@ 24 (sched_yield) 25 (mremap) 28 (madvise) +34 (pause) 39 (getpid) 41 (socket) 42 (connect) diff --git a/polkadot/scripts/list-syscalls/prepare-worker-syscalls b/polkadot/scripts/list-syscalls/prepare-worker-syscalls index 05281b61591a..fd46a788537d 100644 --- a/polkadot/scripts/list-syscalls/prepare-worker-syscalls +++ b/polkadot/scripts/list-syscalls/prepare-worker-syscalls @@ -20,6 +20,7 @@ 24 (sched_yield) 25 (mremap) 28 (madvise) +34 (pause) 39 (getpid) 41 (socket) 42 (connect) diff --git a/polkadot/statement-table/src/generic.rs b/polkadot/statement-table/src/generic.rs index 1e90338a0f18..e3c470fcdeec 100644 --- a/polkadot/statement-table/src/generic.rs +++ b/polkadot/statement-table/src/generic.rs @@ -245,7 +245,8 @@ impl CandidateData { pub fn attested( &self, validity_threshold: usize, - ) -> Option> { + ) -> Option> + { let valid_votes = self.validity_votes.len(); if valid_votes < validity_threshold { return None @@ -321,7 +322,8 @@ impl Table { digest: &Ctx::Digest, context: &Ctx, minimum_backing_votes: u32, - ) -> Option> { + ) -> Option> + { self.candidate_votes.get(digest).and_then(|data| { let v_threshold = context.get_group_size(&data.group_id).map_or(usize::MAX, |len| { effective_minimum_backing_votes(len, minimum_backing_votes) diff --git a/polkadot/xcm/docs/src/cookbook/relay_token_transactor/parachain/xcm_config.rs b/polkadot/xcm/docs/src/cookbook/relay_token_transactor/parachain/xcm_config.rs index 99f17693093e..7cb230f6e006 100644 --- a/polkadot/xcm/docs/src/cookbook/relay_token_transactor/parachain/xcm_config.rs +++ b/polkadot/xcm/docs/src/cookbook/relay_token_transactor/parachain/xcm_config.rs @@ -152,7 +152,7 @@ impl pallet_xcm::Config for Runtime { // We turn off sending for these tests type SendXcmOrigin = EnsureXcmOrigin; type XcmRouter = super::super::network::ParachainXcmRouter; // Provided by xcm-simulator - // Anyone can execute XCM programs + // Anyone can execute XCM programs type ExecuteXcmOrigin = EnsureXcmOrigin; // We execute any type of program type XcmExecuteFilter = Everything; diff --git a/polkadot/xcm/docs/src/cookbook/relay_token_transactor/relay_chain/xcm_config.rs b/polkadot/xcm/docs/src/cookbook/relay_token_transactor/relay_chain/xcm_config.rs index 987bb3f9ab66..a31e664d8216 100644 --- a/polkadot/xcm/docs/src/cookbook/relay_token_transactor/relay_chain/xcm_config.rs +++ b/polkadot/xcm/docs/src/cookbook/relay_token_transactor/relay_chain/xcm_config.rs @@ -125,7 +125,7 @@ impl pallet_xcm::Config for Runtime { // No one can call `send` type SendXcmOrigin = EnsureXcmOrigin; type XcmRouter = super::super::network::RelayChainXcmRouter; // Provided by xcm-simulator - // Anyone can execute XCM programs + // Anyone can execute XCM programs type ExecuteXcmOrigin = EnsureXcmOrigin; // We execute any type of program type XcmExecuteFilter = Everything; diff --git a/polkadot/xcm/pallet-xcm-benchmarks/src/lib.rs b/polkadot/xcm/pallet-xcm-benchmarks/src/lib.rs index 4a12bb7f47c6..210b8f656377 100644 --- a/polkadot/xcm/pallet-xcm-benchmarks/src/lib.rs +++ b/polkadot/xcm/pallet-xcm-benchmarks/src/lib.rs @@ -72,7 +72,7 @@ pub fn generate_holding_assets(max_assets: u32) -> Assets { let fungibles_amount: u128 = 100; let holding_fungibles = max_assets / 2; let holding_non_fungibles = max_assets - holding_fungibles - 1; // -1 because of adding `Here` asset - // add count of `holding_fungibles` + // add count of `holding_fungibles` (0..holding_fungibles) .map(|i| { Asset { diff --git a/polkadot/xcm/procedural/tests/ui.rs b/polkadot/xcm/procedural/tests/ui.rs index b3469b520eb7..4d0c8af45005 100644 --- a/polkadot/xcm/procedural/tests/ui.rs +++ b/polkadot/xcm/procedural/tests/ui.rs @@ -16,7 +16,6 @@ //! UI tests for XCM procedural macros -#[cfg(not(feature = "disable-ui-tests"))] #[test] fn ui() { // Only run the ui tests when `RUN_UI_TESTS` is set. diff --git a/polkadot/xcm/xcm-builder/src/asset_conversion.rs b/polkadot/xcm/xcm-builder/src/asset_conversion.rs index 16ae05c20795..6d090b04886c 100644 --- a/polkadot/xcm/xcm-builder/src/asset_conversion.rs +++ b/polkadot/xcm/xcm-builder/src/asset_conversion.rs @@ -137,7 +137,13 @@ impl< ConvertClassId: MaybeEquivalence, ConvertInstanceId: MaybeEquivalence, > MatchesNonFungibles - for MatchedConvertedConcreteId + for MatchedConvertedConcreteId< + ClassId, + InstanceId, + MatchClassId, + ConvertClassId, + ConvertInstanceId, + > { fn matches_nonfungibles(a: &Asset) -> result::Result<(ClassId, InstanceId), MatchError> { let (instance, class) = match (&a.fun, &a.id) { diff --git a/polkadot/xcm/xcm-builder/src/nonfungibles_adapter.rs b/polkadot/xcm/xcm-builder/src/nonfungibles_adapter.rs index b111a05a4f1f..006c28954bce 100644 --- a/polkadot/xcm/xcm-builder/src/nonfungibles_adapter.rs +++ b/polkadot/xcm/xcm-builder/src/nonfungibles_adapter.rs @@ -270,7 +270,14 @@ impl< CheckAsset: AssetChecking, CheckingAccount: Get>, > TransactAsset - for NonFungiblesAdapter + for NonFungiblesAdapter< + Assets, + Matcher, + AccountIdConverter, + AccountId, + CheckAsset, + CheckingAccount, + > { fn can_check_in(origin: &Location, what: &Asset, context: &XcmContext) -> XcmResult { NonFungiblesMutateAdapter::< diff --git a/polkadot/xcm/xcm-executor/src/traits/weight.rs b/polkadot/xcm/xcm-executor/src/traits/weight.rs index 72de3e0f433b..61545c330621 100644 --- a/polkadot/xcm/xcm-executor/src/traits/weight.rs +++ b/polkadot/xcm/xcm-executor/src/traits/weight.rs @@ -29,13 +29,6 @@ pub trait WeightBounds { fn instr_weight(instruction: &Instruction) -> Result; } -/// A means of getting approximate weight consumption for a given destination message executor and a -/// message. -pub trait UniversalWeigher { - /// Get the upper limit of weight required for `dest` to execute `message`. - fn weigh(dest: impl Into, message: Xcm<()>) -> Result; -} - /// Charge for weight in order to execute XCM. /// /// A `WeightTrader` may also be put into a tuple, in which case the default behavior of diff --git a/polkadot/xcm/xcm-runtime-apis/src/dry_run.rs b/polkadot/xcm/xcm-runtime-apis/src/dry_run.rs index c51a4a5376a3..f0a70b0dacfe 100644 --- a/polkadot/xcm/xcm-runtime-apis/src/dry_run.rs +++ b/polkadot/xcm/xcm-runtime-apis/src/dry_run.rs @@ -57,7 +57,12 @@ sp_api::decl_runtime_apis! { /// Calls or XCMs might fail when executed, this doesn't mean the result of these calls will be an `Err`. /// In those cases, there might still be a valid result, with the execution error inside it. /// The only reasons why these calls might return an error are listed in the [`Error`] enum. - pub trait DryRunApi { + pub trait DryRunApi + where + Call: Encode, + Event: Decode, + OriginCaller: Encode + { /// Dry run call. fn dry_run_call(origin: OriginCaller, call: Call) -> Result, Error>; diff --git a/polkadot/xcm/xcm-runtime-apis/tests/fee_estimation.rs b/polkadot/xcm/xcm-runtime-apis/tests/fee_estimation.rs index 889a50a2bab9..2d14b4e571c6 100644 --- a/polkadot/xcm/xcm-runtime-apis/tests/fee_estimation.rs +++ b/polkadot/xcm/xcm-runtime-apis/tests/fee_estimation.rs @@ -197,7 +197,7 @@ fn fee_estimation_for_teleport() { fn dry_run_reserve_asset_transfer() { sp_tracing::init_for_tests(); let who = 1; // AccountId = u64. - // Native token used for fees. + // Native token used for fees. let balances = vec![(who, DeliveryFees::get() + ExistentialDeposit::get())]; // Relay token is the one we want to transfer. let assets = vec![(1, who, 100)]; // id, account_id, balance. diff --git a/polkadot/zombienet_tests/functional/0009-approval-voting-coalescing.toml b/polkadot/zombienet_tests/functional/0009-approval-voting-coalescing.toml index 19c7015403d7..113de0e73aa1 100644 --- a/polkadot/zombienet_tests/functional/0009-approval-voting-coalescing.toml +++ b/polkadot/zombienet_tests/functional/0009-approval-voting-coalescing.toml @@ -18,7 +18,7 @@ requests = { memory = "2G", cpu = "1" } [[relaychain.node_groups]] name = "alice" - args = [ "-lparachain=trace,runtime=debug" ] + args = [ "-lparachain=debug,runtime=debug" ] count = 13 [[parachains]] diff --git a/polkadot/zombienet_tests/functional/0016-approval-voting-parallel.toml b/polkadot/zombienet_tests/functional/0016-approval-voting-parallel.toml new file mode 100644 index 000000000000..c035e23639c1 --- /dev/null +++ b/polkadot/zombienet_tests/functional/0016-approval-voting-parallel.toml @@ -0,0 +1,120 @@ +[settings] +timeout = 1000 + +[relaychain] +default_image = "{{ZOMBIENET_INTEGRATION_TEST_IMAGE}}" +chain = "rococo-local" + +[relaychain.genesis.runtimeGenesis.patch.configuration.config] + needed_approvals = 4 + relay_vrf_modulo_samples = 2 + +[relaychain.genesis.runtimeGenesis.patch.configuration.config.approval_voting_params] + max_approval_coalesce_count = 5 + +[relaychain.default_resources] +limits = { memory = "4G", cpu = "2" } +requests = { memory = "2G", cpu = "1" } + + [[relaychain.node_groups]] + name = "alice" + args = ["-lparachain=debug,runtime=debug", "--enable-approval-voting-parallel"] + count = 8 + + [[relaychain.node_groups]] + name = "bob" + args = ["-lparachain=debug,runtime=debug"] + count = 7 + +[[parachains]] +id = 2000 +addToGenesis = true +genesis_state_generator = "undying-collator export-genesis-state --pov-size=100000 --pvf-complexity=1" + + [parachains.collator] + name = "collator01" + image = "{{COL_IMAGE}}" + command = "undying-collator" + args = ["-lparachain=debug", "--pov-size=100000", "--pvf-complexity=1", "--parachain-id=2000"] + +[[parachains]] +id = 2001 +addToGenesis = true +genesis_state_generator = "undying-collator export-genesis-state --pov-size=100000 --pvf-complexity=10" + + [parachains.collator] + name = "collator02" + image = "{{COL_IMAGE}}" + command = "undying-collator" + args = ["-lparachain=debug", "--pov-size=100000", "--parachain-id=2001", "--pvf-complexity=10"] + +[[parachains]] +id = 2002 +addToGenesis = true +genesis_state_generator = "undying-collator export-genesis-state --pov-size=100000 --pvf-complexity=100" + + [parachains.collator] + name = "collator03" + image = "{{COL_IMAGE}}" + command = "undying-collator" + args = ["-lparachain=debug", "--pov-size=100000", "--parachain-id=2002", "--pvf-complexity=100"] + +[[parachains]] +id = 2003 +addToGenesis = true +genesis_state_generator = "undying-collator export-genesis-state --pov-size=20000 --pvf-complexity=300" + + [parachains.collator] + name = "collator04" + image = "{{COL_IMAGE}}" + command = "undying-collator" + args = ["-lparachain=debug", "--pov-size=20000", "--parachain-id=2003", "--pvf-complexity=300"] + +[[parachains]] +id = 2004 +addToGenesis = true +genesis_state_generator = "undying-collator export-genesis-state --pov-size=100000 --pvf-complexity=300" + + [parachains.collator] + name = "collator05" + image = "{{COL_IMAGE}}" + command = "undying-collator" + args = ["-lparachain=debug", "--pov-size=100000", "--parachain-id=2004", "--pvf-complexity=300"] + +[[parachains]] +id = 2005 +addToGenesis = true +genesis_state_generator = "undying-collator export-genesis-state --pov-size=20000 --pvf-complexity=400" + + [parachains.collator] + name = "collator06" + image = "{{COL_IMAGE}}" + command = "undying-collator" + args = ["-lparachain=debug", "--pov-size=20000", "--pvf-complexity=400", "--parachain-id=2005"] + +[[parachains]] +id = 2006 +addToGenesis = true +genesis_state_generator = "undying-collator export-genesis-state --pov-size=100000 --pvf-complexity=300" + + [parachains.collator] + name = "collator07" + image = "{{COL_IMAGE}}" + command = "undying-collator" + args = ["-lparachain=debug", "--pov-size=100000", "--pvf-complexity=300", "--parachain-id=2006"] + +[[parachains]] +id = 2007 +addToGenesis = true +genesis_state_generator = "undying-collator export-genesis-state --pov-size=100000 --pvf-complexity=300" + + [parachains.collator] + name = "collator08" + image = "{{COL_IMAGE}}" + command = "undying-collator" + args = ["-lparachain=debug", "--pov-size=100000", "--pvf-complexity=300", "--parachain-id=2007"] + +[types.Header] +number = "u64" +parent_hash = "Hash" +post_state = "Hash" \ No newline at end of file diff --git a/polkadot/zombienet_tests/functional/0016-approval-voting-parallel.zndsl b/polkadot/zombienet_tests/functional/0016-approval-voting-parallel.zndsl new file mode 100644 index 000000000000..d70707747474 --- /dev/null +++ b/polkadot/zombienet_tests/functional/0016-approval-voting-parallel.zndsl @@ -0,0 +1,35 @@ +Description: Check finality works with approval voting parallel enabled +Network: ./0016-approval-voting-parallel.toml +Creds: config + +# Check authority status. +alice: reports node_roles is 4 + +# Ensure parachains are registered. +alice: parachain 2000 is registered within 60 seconds +alice: parachain 2001 is registered within 60 seconds +alice: parachain 2002 is registered within 60 seconds +alice: parachain 2003 is registered within 60 seconds +alice: parachain 2004 is registered within 60 seconds +alice: parachain 2005 is registered within 60 seconds +alice: parachain 2006 is registered within 60 seconds +alice: parachain 2007 is registered within 60 seconds + +# Ensure parachains made progress. +alice: parachain 2000 block height is at least 10 within 300 seconds +alice: parachain 2001 block height is at least 10 within 300 seconds +alice: parachain 2002 block height is at least 10 within 300 seconds +alice: parachain 2003 block height is at least 10 within 300 seconds +alice: parachain 2004 block height is at least 10 within 300 seconds +alice: parachain 2005 block height is at least 10 within 300 seconds +alice: parachain 2006 block height is at least 10 within 300 seconds +alice: parachain 2007 block height is at least 10 within 300 seconds + +alice: reports substrate_block_height{status="finalized"} is at least 30 within 180 seconds +bob: reports substrate_block_height{status="finalized"} is at least 30 within 180 seconds + +alice: reports polkadot_parachain_approval_checking_finality_lag < 3 +bob: reports polkadot_parachain_approval_checking_finality_lag < 3 + +alice: reports polkadot_parachain_approvals_no_shows_total < 3 within 10 seconds +bob: reports polkadot_parachain_approvals_no_shows_total < 3 within 10 seconds diff --git a/prdoc/pr_4849.prdoc b/prdoc/pr_4849.prdoc new file mode 100644 index 000000000000..185295151068 --- /dev/null +++ b/prdoc/pr_4849.prdoc @@ -0,0 +1,47 @@ +title: Introduce approval-voting-parallel subsystem + +doc: + - audience: Node Dev + description: | + This introduces a new subsystem called approval-voting-parallel. It combines the tasks + previously handled by the approval-voting and approval-distribution subsystems. + + The new subsystem is enabled by default on all test networks. On production networks + like Polkadot and Kusama, the legacy system with two separate subsystems is still in use. + However, there is a CLI option --enable-approval-voting-parallel to gradually roll out + the new subsystem on specific nodes. Once we are confident that it works as expected, + it will be enabled by default on all networks. + + The approval-voting-parallel subsystem coordinates two groups of workers: + - Four approval-distribution workers that operate in parallel, each handling tasks based + on the validator_index of the message originator. + - One approval-voting worker that performs the tasks previously managed by the standalone + approval-voting subsystem. + +crates: + - name: polkadot-overseer + bump: major + - name: polkadot-node-primitives + bump: major + - name: polkadot-node-subsystem-types + bump: major + - name: polkadot-service + bump: major + - name: polkadot-approval-distribution + bump: major + - name: polkadot-node-core-approval-voting + bump: major + - name: polkadot-node-core-approval-voting-parallel + bump: major + - name: polkadot-network-bridge + bump: major + - name: polkadot-node-core-dispute-coordinator + bump: major + - name: cumulus-relay-chain-inprocess-interface + bump: major + - name: polkadot-cli + bump: major + - name: polkadot + bump: major + - name: polkadot-sdk + bump: minor diff --git a/prdoc/pr_4851.prdoc b/prdoc/pr_4851.prdoc index 923ca4bfff5d..2110a68d401c 100644 --- a/prdoc/pr_4851.prdoc +++ b/prdoc/pr_4851.prdoc @@ -5,8 +5,8 @@ title: Add support for deprecation metadata in `RuntimeMetadataIr` entries. doc: - audience: - - Runtime dev - - Runtime user + - Runtime Dev + - Runtime User description: | Changes introduced are listed below. Adds `DeprecationStatusIR` enum to sp_metadata_ir. diff --git a/prdoc/pr_5676.prdoc b/prdoc/pr_5676.prdoc new file mode 100644 index 000000000000..dfe23e120b4b --- /dev/null +++ b/prdoc/pr_5676.prdoc @@ -0,0 +1,174 @@ +title: '[ci] Update CI image with rust 1.81.0 and 2024-09-11' +doc: +- audience: [Runtime Dev, Node Dev, Node Operator] + description: |- + cc https://github.com/paritytech/ci_cd/issues/1035 + + close https://github.com/paritytech/ci_cd/issues/1023 +crates: +- name: pallet-xcm-bridge-hub + bump: patch +- name: snowbridge-router-primitives + bump: patch +- name: snowbridge-runtime-common + bump: patch +- name: cumulus-pallet-parachain-system + bump: patch +- name: asset-hub-rococo-runtime + bump: patch +- name: asset-hub-westend-runtime + bump: patch +- name: asset-test-utils + bump: patch +- name: bridge-hub-test-utils + bump: patch +- name: cumulus-primitives-utility + bump: patch +- name: polkadot-node-core-approval-voting + bump: patch +- name: polkadot-node-core-pvf-common + bump: patch +- name: polkadot-approval-distribution + bump: patch +- name: polkadot-availability-recovery + bump: patch +- name: polkadot-node-subsystem-types + bump: patch +- name: polkadot-runtime-parachains + bump: patch +- name: westend-runtime + bump: patch +- name: polkadot-statement-table + bump: patch +- name: pallet-xcm-benchmarks + bump: patch +- name: staging-xcm-builder + bump: patch +- name: xcm-runtime-apis + bump: patch +- name: sc-cli + bump: patch +- name: sc-consensus-grandpa + bump: patch +- name: sc-network + bump: patch +- name: sc-network-sync + bump: patch +- name: sc-rpc-spec-v2 + bump: patch +- name: pallet-bags-list + bump: patch +- name: pallet-balances + bump: patch +- name: pallet-bounties + bump: patch +- name: pallet-child-bounties + bump: patch +- name: pallet-nis + bump: patch +- name: pallet-referenda + bump: patch +- name: pallet-revive-proc-macro + bump: patch +- name: pallet-society + bump: patch +- name: pallet-staking + bump: patch +- name: frame-support-procedural + bump: patch +- name: frame-support + bump: patch +- name: pallet-transaction-payment + bump: patch +- name: pallet-utility + bump: patch +- name: pallet-vesting + bump: patch +- name: substrate-wasm-builder + bump: patch +- name: snowbridge-outbound-queue-merkle-tree + bump: patch +- name: shell-runtime + bump: patch +- name: polkadot-parachain-lib + bump: patch +- name: polkadot-cli + bump: patch +- name: polkadot-node-core-pvf + bump: patch +- name: polkadot-service + bump: patch +- name: polkadot-primitives + bump: patch +- name: staging-xcm-executor + bump: patch +- name: sc-consensus-beefy + bump: patch +- name: sc-consensus-slots + bump: patch +- name: frame-benchmarking-pallet-pov + bump: patch +- name: pallet-contracts + bump: patch +- name: frame-election-provider-support + bump: patch +- name: pallet-revive-mock-network + bump: patch +- name: frame-benchmarking-cli + bump: patch +- name: sc-utils + bump: patch +- name: pallet-beefy-mmr + bump: patch +- name: sp-state-machine + bump: patch +- name: fork-tree + bump: patch +- name: sc-transaction-pool + bump: patch +- name: pallet-delegated-staking + bump: patch +- name: sc-executor-wasmtime + bump: patch +- name: cumulus-pallet-xcmp-queue + bump: patch +- name: xcm-procedural + bump: patch +- name: sp-application-crypto + bump: patch +- name: sp-core + bump: patch +- name: sp-keyring + bump: patch +- name: polkadot-availability-distribution + bump: patch +- name: sp-runtime + bump: patch +- name: sc-authority-discovery + bump: patch +- name: frame-system + bump: patch +- name: sc-network-gossip + bump: patch +- name: pallet-authorship + bump: patch +- name: pallet-election-provider-multi-phase + bump: patch +- name: sp-runtime-interface + bump: patch +- name: pallet-bridge-grandpa + bump: patch +- name: pallet-elections-phragmen + bump: patch +- name: frame-executive + bump: patch +- name: bp-header-chain + bump: patch +- name: polkadot-overseer + bump: patch +- name: polkadot + bump: patch +- name: bridge-hub-westend-runtime + bump: major +- name: bp-messages + bump: patch diff --git a/prdoc/pr_5678.prdoc b/prdoc/pr_5678.prdoc index af1fac31c560..ebb5e5a0d79f 100644 --- a/prdoc/pr_5678.prdoc +++ b/prdoc/pr_5678.prdoc @@ -1,6 +1,6 @@ title: 'rpc server: fix deny unsafe on RpcMethods::Auto' doc: -- audience: Node User +- audience: Node Operator description: |- Close #5677 diff --git a/prdoc/pr_5684.prdoc b/prdoc/pr_5684.prdoc index a17bacd2fb94..9800c85de2ae 100644 --- a/prdoc/pr_5684.prdoc +++ b/prdoc/pr_5684.prdoc @@ -4,7 +4,7 @@ title: "[pallet-revive]" doc: - - audience: Runtime Devs + - audience: Runtime Dev description: | Update xcm runtime api, and fix pallet-revive xcm tests diff --git a/prdoc/pr_5830.prdoc b/prdoc/pr_5830.prdoc new file mode 100644 index 000000000000..10b586e4a4af --- /dev/null +++ b/prdoc/pr_5830.prdoc @@ -0,0 +1,13 @@ +title: "Remove jaeger from approval-voting and approval-distribution" + +doc: + - audience: Node Dev + description: | + Jaeger was remove from approval-voting and approval-distribution because + it did not prove to improve the debugging and it wasted precious cpu cycles. + +crates: + - name: polkadot-approval-distribution + bump: none + - name: polkadot-node-core-approval-voting + bump: none diff --git a/scripts/update-ui-tests.sh b/scripts/update-ui-tests.sh index a1f380c4712d..c25b22fa7f75 100755 --- a/scripts/update-ui-tests.sh +++ b/scripts/update-ui-tests.sh @@ -32,10 +32,12 @@ export RUN_UI_TESTS=1 export SKIP_WASM_BUILD=1 # Let trybuild overwrite the .stderr files export TRYBUILD=overwrite +# Warnings are part of our UI and the CI also sets this. +export RUSTFLAGS="-C debug-assertions -D warnings" # ./substrate -$RUSTUP_RUN cargo test --manifest-path substrate/primitives/runtime-interface/Cargo.toml ui -$RUSTUP_RUN cargo test -p sp-api-test ui -$RUSTUP_RUN cargo test -p frame-election-provider-solution-type ui -$RUSTUP_RUN cargo test -p frame-support-test --features=no-metadata-docs,try-runtime,experimental ui -$RUSTUP_RUN cargo test -p xcm-procedural ui \ No newline at end of file +$RUSTUP_RUN cargo test -q --locked --manifest-path substrate/primitives/runtime-interface/Cargo.toml ui +$RUSTUP_RUN cargo test -q --locked -p sp-api-test ui +$RUSTUP_RUN cargo test -q --locked -p frame-election-provider-solution-type ui +$RUSTUP_RUN cargo test -q --locked -p frame-support-test --features=no-metadata-docs,try-runtime,experimental ui +$RUSTUP_RUN cargo test -q --locked -p xcm-procedural ui diff --git a/substrate/bin/node/cli/tests/fees.rs b/substrate/bin/node/cli/tests/fees.rs index 9f82338b4fb0..b49af4c1055c 100644 --- a/substrate/bin/node/cli/tests/fees.rs +++ b/substrate/bin/node/cli/tests/fees.rs @@ -188,135 +188,3 @@ fn transaction_fee_is_correct() { assert_eq!(Balances::total_balance(&alice()), balance_alice); }); } - -#[test] -#[should_panic] -#[cfg(feature = "stress-test")] -fn block_weight_capacity_report() { - // Just report how many transfer calls you could fit into a block. The number should at least - // be a few hundred (250 at the time of writing but can change over time). Runs until panic. - use node_primitives::Nonce; - - // execution ext. - let mut t = new_test_ext(compact_code_unwrap()); - // setup ext. - let mut tt = new_test_ext(compact_code_unwrap()); - - let factor = 50; - let mut time = 10; - let mut nonce: Nonce = 0; - let mut block_number = 1; - let mut previous_hash: node_primitives::Hash = GENESIS_HASH.into(); - - loop { - let num_transfers = block_number * factor; - let mut xts = (0..num_transfers) - .map(|i| CheckedExtrinsic { - signed: Some((charlie(), signed_extra(nonce + i as Nonce, 0))), - function: RuntimeCall::Balances(pallet_balances::Call::transfer_allow_death { - dest: bob().into(), - value: 0, - }), - }) - .collect::>(); - - xts.insert( - 0, - CheckedExtrinsic { - signed: None, - function: RuntimeCall::Timestamp(pallet_timestamp::Call::set { now: time * 1000 }), - }, - ); - - // NOTE: this is super slow. Can probably be improved. - let block = construct_block( - &mut tt, - block_number, - previous_hash, - xts, - (time * 1000 / SLOT_DURATION).into(), - ); - - let len = block.0.len(); - print!( - "++ Executing block with {} transfers. Block size = {} bytes / {} kb / {} mb", - num_transfers, - len, - len / 1024, - len / 1024 / 1024, - ); - - let r = executor_call(&mut t, "Core_execute_block", &block.0).0; - - println!(" || Result = {:?}", r); - assert!(r.is_ok()); - - previous_hash = block.1; - nonce += num_transfers; - time += 10; - block_number += 1; - } -} - -#[test] -#[should_panic] -#[cfg(feature = "stress-test")] -fn block_length_capacity_report() { - // Just report how big a block can get. Executes until panic. Should be ignored unless if - // manually inspected. The number should at least be a few megabytes (5 at the time of - // writing but can change over time). - use node_primitives::Nonce; - - // execution ext. - let mut t = new_test_ext(compact_code_unwrap()); - // setup ext. - let mut tt = new_test_ext(compact_code_unwrap()); - - let factor = 256 * 1024; - let mut time = 10; - let mut nonce: Nonce = 0; - let mut block_number = 1; - let mut previous_hash: node_primitives::Hash = GENESIS_HASH.into(); - - loop { - // NOTE: this is super slow. Can probably be improved. - let block = construct_block( - &mut tt, - block_number, - previous_hash, - vec![ - CheckedExtrinsic { - signed: None, - function: RuntimeCall::Timestamp(pallet_timestamp::Call::set { - now: time * 1000, - }), - }, - CheckedExtrinsic { - signed: Some((charlie(), signed_extra(nonce, 0))), - function: RuntimeCall::System(frame_system::Call::remark { - remark: vec![0u8; (block_number * factor) as usize], - }), - }, - ], - (time * 1000 / SLOT_DURATION).into(), - ); - - let len = block.0.len(); - print!( - "++ Executing block with big remark. Block size = {} bytes / {} kb / {} mb", - len, - len / 1024, - len / 1024 / 1024, - ); - - let r = executor_call(&mut t, "Core_execute_block", &block.0).0; - - println!(" || Result = {:?}", r); - assert!(r.is_ok()); - - previous_hash = block.1; - nonce += 1; - time += 10; - block_number += 1; - } -} diff --git a/substrate/client/authority-discovery/src/worker/tests.rs b/substrate/client/authority-discovery/src/worker/tests.rs index b49615382b8a..d71a85db8b81 100644 --- a/substrate/client/authority-discovery/src/worker/tests.rs +++ b/substrate/client/authority-discovery/src/worker/tests.rs @@ -117,10 +117,10 @@ sp_api::mock_impl_runtime_apis! { #[derive(Debug)] pub enum TestNetworkEvent { - GetCalled(KademliaKey), - PutCalled(KademliaKey, Vec), - PutToCalled(Record, HashSet, bool), - StoreRecordCalled(KademliaKey, Vec, Option, Option), + GetCalled, + PutCalled, + PutToCalled, + StoreRecordCalled, } pub struct TestNetwork { @@ -190,17 +190,11 @@ impl NetworkSigner for TestNetwork { impl NetworkDHTProvider for TestNetwork { fn put_value(&self, key: KademliaKey, value: Vec) { self.put_value_call.lock().unwrap().push((key.clone(), value.clone())); - self.event_sender - .clone() - .unbounded_send(TestNetworkEvent::PutCalled(key, value)) - .unwrap(); + self.event_sender.clone().unbounded_send(TestNetworkEvent::PutCalled).unwrap(); } fn get_value(&self, key: &KademliaKey) { self.get_value_call.lock().unwrap().push(key.clone()); - self.event_sender - .clone() - .unbounded_send(TestNetworkEvent::GetCalled(key.clone())) - .unwrap(); + self.event_sender.clone().unbounded_send(TestNetworkEvent::GetCalled).unwrap(); } fn put_record_to( @@ -214,10 +208,7 @@ impl NetworkDHTProvider for TestNetwork { peers.clone(), update_local_storage, )); - self.event_sender - .clone() - .unbounded_send(TestNetworkEvent::PutToCalled(record, peers, update_local_storage)) - .unwrap(); + self.event_sender.clone().unbounded_send(TestNetworkEvent::PutToCalled).unwrap(); } fn store_record( @@ -235,7 +226,7 @@ impl NetworkDHTProvider for TestNetwork { )); self.event_sender .clone() - .unbounded_send(TestNetworkEvent::StoreRecordCalled(key, value, publisher, expires)) + .unbounded_send(TestNetworkEvent::StoreRecordCalled) .unwrap(); } } @@ -536,7 +527,7 @@ fn dont_stop_polling_dht_event_stream_after_bogus_event() { pool.run_until(async { // Assert worker to trigger a lookup for the one and only authority. - assert!(matches!(network_events.next().await, Some(TestNetworkEvent::GetCalled(_)))); + assert!(matches!(network_events.next().await, Some(TestNetworkEvent::GetCalled))); // Send an event that should generate an error dht_event_tx @@ -1137,7 +1128,7 @@ fn lookup_throttling() { async { // Assert worker to trigger MAX_IN_FLIGHT_LOOKUPS lookups. for _ in 0..MAX_IN_FLIGHT_LOOKUPS { - assert!(matches!(receiver.next().await, Some(TestNetworkEvent::GetCalled(_)))); + assert!(matches!(receiver.next().await, Some(TestNetworkEvent::GetCalled))); } assert_eq!( metrics.requests_pending.get(), @@ -1168,7 +1159,7 @@ fn lookup_throttling() { } // Assert worker to trigger another lookup. - assert!(matches!(receiver.next().await, Some(TestNetworkEvent::GetCalled(_)))); + assert!(matches!(receiver.next().await, Some(TestNetworkEvent::GetCalled))); assert_eq!( metrics.requests_pending.get(), (remote_public_keys.len() - MAX_IN_FLIGHT_LOOKUPS - 1) as u64 @@ -1181,7 +1172,7 @@ fn lookup_throttling() { dht_event_tx.send(dht_event).await.expect("Channel has capacity of 1."); // Assert worker to trigger another lookup. - assert!(matches!(receiver.next().await, Some(TestNetworkEvent::GetCalled(_)))); + assert!(matches!(receiver.next().await, Some(TestNetworkEvent::GetCalled))); assert_eq!( metrics.requests_pending.get(), (remote_public_keys.len() - MAX_IN_FLIGHT_LOOKUPS - 2) as u64 diff --git a/substrate/client/cli/src/commands/import_blocks_cmd.rs b/substrate/client/cli/src/commands/import_blocks_cmd.rs index 815c6ab18aa6..6bd607901e38 100644 --- a/substrate/client/cli/src/commands/import_blocks_cmd.rs +++ b/substrate/client/cli/src/commands/import_blocks_cmd.rs @@ -28,7 +28,7 @@ use sp_runtime::traits::Block as BlockT; use std::{ fmt::Debug, fs, - io::{self, Read, Seek}, + io::{self, Read}, path::PathBuf, sync::Arc, }; @@ -58,11 +58,6 @@ pub struct ImportBlocksCmd { pub import_params: ImportParams, } -/// Internal trait used to cast to a dynamic type that implements Read and Seek. -trait ReadPlusSeek: Read + Seek {} - -impl ReadPlusSeek for T {} - impl ImportBlocksCmd { /// Run the import-blocks command pub async fn run(&self, client: Arc, import_queue: IQ) -> error::Result<()> diff --git a/substrate/client/cli/src/commands/vanity.rs b/substrate/client/cli/src/commands/vanity.rs index 330a59493efc..9acacb4b15b2 100644 --- a/substrate/client/cli/src/commands/vanity.rs +++ b/substrate/client/cli/src/commands/vanity.rs @@ -166,8 +166,6 @@ mod tests { crypto::{default_ss58_version, Ss58AddressFormatRegistry, Ss58Codec}, sr25519, Pair, }; - #[cfg(feature = "bench")] - use test::Bencher; #[test] fn vanity() { @@ -225,16 +223,4 @@ mod tests { 0 ); } - - #[cfg(feature = "bench")] - #[bench] - fn bench_paranoiac(b: &mut Bencher) { - b.iter(|| generate_key("polk")); - } - - #[cfg(feature = "bench")] - #[bench] - fn bench_not_paranoiac(b: &mut Bencher) { - b.iter(|| generate_key("polk")); - } } diff --git a/substrate/client/cli/src/params/node_key_params.rs b/substrate/client/cli/src/params/node_key_params.rs index cdd637888114..70671bff8c05 100644 --- a/substrate/client/cli/src/params/node_key_params.rs +++ b/substrate/client/cli/src/params/node_key_params.rs @@ -116,8 +116,8 @@ impl NodeKeyParams { .clone() .unwrap_or_else(|| net_config_dir.join(NODE_KEY_ED25519_FILE)); if !self.unsafe_force_node_key_generation && - role.is_authority() && !is_dev && - !key_path.exists() + role.is_authority() && + !is_dev && !key_path.exists() { return Err(Error::NetworkKeyNotFound(key_path)) } diff --git a/substrate/client/consensus/beefy/src/fisherman.rs b/substrate/client/consensus/beefy/src/fisherman.rs index faa4d34eff5a..2b2683b35f0a 100644 --- a/substrate/client/consensus/beefy/src/fisherman.rs +++ b/substrate/client/consensus/beefy/src/fisherman.rs @@ -32,9 +32,8 @@ use sp_runtime::{ }; use std::{marker::PhantomData, sync::Arc}; -/// Helper struct containing the id and the key ownership proof for a validator. -pub struct ProvedValidator<'a, AuthorityId: AuthorityIdBound> { - pub id: &'a AuthorityId, +/// Helper struct containing the key ownership proof for a validator. +pub struct ProvedValidator { pub key_owner_proof: OpaqueKeyOwnershipProof, } @@ -66,7 +65,7 @@ where at: BlockId, offender_ids: impl Iterator, validator_set_id: ValidatorSetId, - ) -> Result>, Error> { + ) -> Result, Error> { let hash = match at { BlockId::Hash(hash) => hash, BlockId::Number(number) => self @@ -91,7 +90,7 @@ where offender_id.clone(), ) { Ok(Some(key_owner_proof)) => { - proved_offenders.push(ProvedValidator { id: offender_id, key_owner_proof }); + proved_offenders.push(ProvedValidator { key_owner_proof }); }, Ok(None) => { debug!( diff --git a/substrate/client/consensus/grandpa/src/aux_schema.rs b/substrate/client/consensus/grandpa/src/aux_schema.rs index 8ec882591be9..c42310dcd72c 100644 --- a/substrate/client/consensus/grandpa/src/aux_schema.rs +++ b/substrate/client/consensus/grandpa/src/aux_schema.rs @@ -743,7 +743,9 @@ mod test { substrate_test_runtime_client::runtime::Block, _, _, - >(&client, H256::random(), 0, || unreachable!()) + >( + &client, H256::random(), 0, || unreachable!() + ) .unwrap(); assert_eq!( diff --git a/substrate/client/consensus/grandpa/src/voting_rule.rs b/substrate/client/consensus/grandpa/src/voting_rule.rs index c1d3cd2fbd6a..6072f1895fd0 100644 --- a/substrate/client/consensus/grandpa/src/voting_rule.rs +++ b/substrate/client/consensus/grandpa/src/voting_rule.rs @@ -82,7 +82,7 @@ where /// /// In the best case our vote is exactly N blocks /// behind the best block, but if there is a scenario where either -/// >34% of validators run without this rule or the fork-choice rule +/// \>34% of validators run without this rule or the fork-choice rule /// can prioritize shorter chains over longer ones, the vote may be /// closer to the best block than N. #[derive(Clone)] diff --git a/substrate/client/consensus/slots/build.rs b/substrate/client/consensus/slots/build.rs index a68cb706e8fb..c63f0b8b6674 100644 --- a/substrate/client/consensus/slots/build.rs +++ b/substrate/client/consensus/slots/build.rs @@ -20,6 +20,6 @@ use std::env; fn main() { if let Ok(profile) = env::var("PROFILE") { - println!("cargo:rustc-cfg=build_type=\"{}\"", profile); + println!("cargo:rustc-cfg=build_profile=\"{}\"", profile); } } diff --git a/substrate/client/consensus/slots/src/lib.rs b/substrate/client/consensus/slots/src/lib.rs index 06e0756fc968..4f7e85541777 100644 --- a/substrate/client/consensus/slots/src/lib.rs +++ b/substrate/client/consensus/slots/src/lib.rs @@ -227,7 +227,7 @@ pub trait SimpleSlotWorker { "⌛️ Discarding proposal for slot {}; block production took too long", slot, ); // If the node was compiled with debug, tell the user to use release optimizations. - #[cfg(build_type = "debug")] + #[cfg(build_profile = "debug")] info!( target: log_target, "👉 Recompile your node in `--release` mode to mitigate this problem.", diff --git a/substrate/client/executor/wasmtime/build.rs b/substrate/client/executor/wasmtime/build.rs index a68cb706e8fb..c63f0b8b6674 100644 --- a/substrate/client/executor/wasmtime/build.rs +++ b/substrate/client/executor/wasmtime/build.rs @@ -20,6 +20,6 @@ use std::env; fn main() { if let Ok(profile) = env::var("PROFILE") { - println!("cargo:rustc-cfg=build_type=\"{}\"", profile); + println!("cargo:rustc-cfg=build_profile=\"{}\"", profile); } } diff --git a/substrate/client/executor/wasmtime/src/tests.rs b/substrate/client/executor/wasmtime/src/tests.rs index f86a42757694..abf2b9509c2b 100644 --- a/substrate/client/executor/wasmtime/src/tests.rs +++ b/substrate/client/executor/wasmtime/src/tests.rs @@ -455,7 +455,7 @@ fn test_max_memory_pages( // This test takes quite a while to execute in a debug build (over 6 minutes on a TR 3970x) // so it's ignored by default unless it was compiled with `--release`. -#[cfg_attr(build_type = "debug", ignore)] +#[cfg_attr(build_profile = "debug", ignore)] #[test] fn test_instances_without_reuse_are_not_leaked() { let runtime = crate::create_runtime::( diff --git a/substrate/client/network-gossip/src/bridge.rs b/substrate/client/network-gossip/src/bridge.rs index 414da9b2a589..a4bd922a76d5 100644 --- a/substrate/client/network-gossip/src/bridge.rs +++ b/substrate/client/network-gossip/src/bridge.rs @@ -377,9 +377,6 @@ mod tests { #[derive(Clone, Default)] struct TestNetwork {} - #[derive(Clone, Default)] - struct TestNetworkInner {} - #[async_trait::async_trait] impl NetworkPeers for TestNetwork { fn set_authorized_peers(&self, _peers: HashSet) { diff --git a/substrate/client/network/src/behaviour.rs b/substrate/client/network/src/behaviour.rs index 9a6324dafd37..5ecbec52d507 100644 --- a/substrate/client/network/src/behaviour.rs +++ b/substrate/client/network/src/behaviour.rs @@ -76,8 +76,6 @@ pub enum BehaviourOut { /// /// This event is generated for statistics purposes. InboundRequest { - /// Peer which sent us a request. - peer: PeerId, /// Protocol name of the request. protocol: ProtocolName, /// If `Ok`, contains the time elapsed between when we received the request and when we @@ -89,8 +87,6 @@ pub enum BehaviourOut { /// /// This event is generated for statistics purposes. RequestFinished { - /// Peer that we send a request to. - peer: PeerId, /// Name of the protocol in question. protocol: ProtocolName, /// Duration the request took. @@ -350,10 +346,10 @@ impl From for BehaviourOut { impl From for BehaviourOut { fn from(event: request_responses::Event) -> Self { match event { - request_responses::Event::InboundRequest { peer, protocol, result } => - BehaviourOut::InboundRequest { peer, protocol, result }, - request_responses::Event::RequestFinished { peer, protocol, duration, result } => - BehaviourOut::RequestFinished { peer, protocol, duration, result }, + request_responses::Event::InboundRequest { protocol, result, .. } => + BehaviourOut::InboundRequest { protocol, result }, + request_responses::Event::RequestFinished { protocol, duration, result, .. } => + BehaviourOut::RequestFinished { protocol, duration, result }, request_responses::Event::ReputationChanges { peer, changes } => BehaviourOut::ReputationChanges { peer, changes }, } diff --git a/substrate/client/network/src/litep2p/discovery.rs b/substrate/client/network/src/litep2p/discovery.rs index bf2005df34d7..5fe944cadc0b 100644 --- a/substrate/client/network/src/litep2p/discovery.rs +++ b/substrate/client/network/src/litep2p/discovery.rs @@ -95,15 +95,6 @@ pub enum DiscoveryEvent { /// Peer ID. peer: PeerId, - /// Identify protocol version. - protocol_version: Option, - - /// Identify user agent version. - user_agent: Option, - - /// Observed address. - observed_address: Multiaddr, - /// Listen addresses. listen_addresses: Vec, @@ -561,11 +552,10 @@ impl Stream for Discovery { Poll::Ready(None) => return Poll::Ready(None), Poll::Ready(Some(IdentifyEvent::PeerIdentified { peer, - protocol_version, - user_agent, listen_addresses, supported_protocols, observed_address, + .. })) => { if this.is_new_external_address(&observed_address, peer) { this.pending_events.push_back(DiscoveryEvent::ExternalAddressDiscovered { @@ -575,10 +565,7 @@ impl Stream for Discovery { return Poll::Ready(Some(DiscoveryEvent::Identified { peer, - protocol_version, - user_agent, listen_addresses, - observed_address, supported_protocols, })); }, diff --git a/substrate/client/network/src/protocol/notifications/behaviour.rs b/substrate/client/network/src/protocol/notifications/behaviour.rs index cb4f089995e3..a562546145c8 100644 --- a/substrate/client/network/src/protocol/notifications/behaviour.rs +++ b/substrate/client/network/src/protocol/notifications/behaviour.rs @@ -33,7 +33,7 @@ use bytes::BytesMut; use fnv::FnvHashMap; use futures::{future::BoxFuture, prelude::*, stream::FuturesUnordered}; use libp2p::{ - core::{ConnectedPoint, Endpoint, Multiaddr}, + core::{Endpoint, Multiaddr}, swarm::{ behaviour::{ConnectionClosed, ConnectionEstablished, DialFailure, FromSwarm}, ConnectionDenied, ConnectionId, DialError, NetworkBehaviour, NotifyHandler, PollParameters, @@ -362,8 +362,6 @@ pub enum NotificationsOut { received_handshake: Vec, /// Object that permits sending notifications to the peer. notifications_sink: NotificationsSink, - /// Is the connection inbound. - inbound: bool, }, /// The [`NotificationsSink`] object used to send notifications with the given peer must be @@ -1223,33 +1221,20 @@ impl NetworkBehaviour for Notifications { &mut self, _connection_id: ConnectionId, peer: PeerId, - local_addr: &Multiaddr, - remote_addr: &Multiaddr, + _local_addr: &Multiaddr, + _remote_addr: &Multiaddr, ) -> Result, ConnectionDenied> { - Ok(NotifsHandler::new( - peer, - ConnectedPoint::Listener { - local_addr: local_addr.clone(), - send_back_addr: remote_addr.clone(), - }, - self.notif_protocols.clone(), - Some(self.metrics.clone()), - )) + Ok(NotifsHandler::new(peer, self.notif_protocols.clone(), Some(self.metrics.clone()))) } fn handle_established_outbound_connection( &mut self, _connection_id: ConnectionId, peer: PeerId, - addr: &Multiaddr, - role_override: Endpoint, + _addr: &Multiaddr, + _role_override: Endpoint, ) -> Result, ConnectionDenied> { - Ok(NotifsHandler::new( - peer, - ConnectedPoint::Dialer { address: addr.clone(), role_override }, - self.notif_protocols.clone(), - Some(self.metrics.clone()), - )) + Ok(NotifsHandler::new(peer, self.notif_protocols.clone(), Some(self.metrics.clone()))) } fn on_swarm_event(&mut self, event: FromSwarm) { @@ -2061,7 +2046,6 @@ impl NetworkBehaviour for Notifications { let event = NotificationsOut::CustomProtocolOpen { peer_id, set_id, - inbound, direction: if inbound { Direction::Inbound } else { @@ -2383,6 +2367,7 @@ mod tests { protocol::notifications::handler::tests::*, protocol_controller::{IncomingIndex, ProtoSetConfig, ProtocolController}, }; + use libp2p::core::ConnectedPoint; use sc_utils::mpsc::tracing_unbounded; use std::{collections::HashSet, iter}; @@ -2413,7 +2398,8 @@ mod tests { } fn development_notifs( - ) -> (Notifications, ProtocolController, Box) { + ) -> (Notifications, ProtocolController, Box) + { let (protocol_handle_pair, notif_service) = crate::protocol::notifications::service::notification_service("/proto/1".into()); let (to_notifications, from_controller) = @@ -2668,7 +2654,7 @@ mod tests { peer_id: peer, connection_id: conn, endpoint: &connected.clone(), - handler: NotifsHandler::new(peer, connected, vec![], None), + handler: NotifsHandler::new(peer, vec![], None), remaining_established: 0usize, }, )); @@ -2868,7 +2854,7 @@ mod tests { peer_id: peer, connection_id: conn, endpoint: &connected.clone(), - handler: NotifsHandler::new(peer, connected, vec![], None), + handler: NotifsHandler::new(peer, vec![], None), remaining_established: 0usize, }, )); @@ -3021,7 +3007,7 @@ mod tests { peer_id: peer, connection_id: conn, endpoint: &connected.clone(), - handler: NotifsHandler::new(peer, connected, vec![], None), + handler: NotifsHandler::new(peer, vec![], None), remaining_established: 0usize, }, )); @@ -3065,7 +3051,7 @@ mod tests { peer_id: peer, connection_id: conn, endpoint: &connected.clone(), - handler: NotifsHandler::new(peer, connected, vec![], None), + handler: NotifsHandler::new(peer, vec![], None), remaining_established: 0usize, }, )); @@ -3135,7 +3121,7 @@ mod tests { peer_id: peer, connection_id: conn, endpoint: &connected.clone(), - handler: NotifsHandler::new(peer, connected, vec![], None), + handler: NotifsHandler::new(peer, vec![], None), remaining_established: 0usize, }, )); @@ -3188,7 +3174,7 @@ mod tests { assert!(std::matches!(notif.peers.get(&(peer, set_id)), Some(&PeerState::Enabled { .. }))); // open new substream - let event = conn_yielder.open_substream(peer, 0, connected, vec![1, 2, 3, 4]); + let event = conn_yielder.open_substream(peer, 0, vec![1, 2, 3, 4]); notif.on_connection_handler_event(peer, conn, event); assert!(std::matches!(notif.peers.get(&(peer, set_id)), Some(&PeerState::Enabled { .. }))); @@ -3261,7 +3247,7 @@ mod tests { notif.on_connection_handler_event( peer, *conn, - conn_yielder.open_substream(peer, 0, connected.clone(), vec![1, 2, 3, 4]), + conn_yielder.open_substream(peer, 0, vec![1, 2, 3, 4]), ); } @@ -3283,7 +3269,7 @@ mod tests { peer_id: peer, connection_id: conn1, endpoint: &connected.clone(), - handler: NotifsHandler::new(peer, connected, vec![], None), + handler: NotifsHandler::new(peer, vec![], None), remaining_established: 0usize, }, )); @@ -3354,7 +3340,7 @@ mod tests { notif.on_connection_handler_event( peer, conn, - conn_yielder.open_substream(peer, 0, connected, vec![1, 2, 3, 4]), + conn_yielder.open_substream(peer, 0, vec![1, 2, 3, 4]), ); if let Some(PeerState::Enabled { ref connections, .. }) = notif.peers.get(&(peer, set_id)) { @@ -3409,7 +3395,7 @@ mod tests { peer_id: peer, connection_id: conn, endpoint: &connected.clone(), - handler: NotifsHandler::new(peer, connected, vec![], None), + handler: NotifsHandler::new(peer, vec![], None), remaining_established: 0usize, }, )); @@ -3483,7 +3469,7 @@ mod tests { peer_id: peer, connection_id: conn, endpoint: &connected.clone(), - handler: NotifsHandler::new(peer, connected, vec![], None), + handler: NotifsHandler::new(peer, vec![], None), remaining_established: 0usize, }, )); @@ -3546,7 +3532,7 @@ mod tests { peer_id: peer, connection_id: conn1, endpoint: &connected.clone(), - handler: NotifsHandler::new(peer, connected.clone(), vec![], None), + handler: NotifsHandler::new(peer, vec![], None), remaining_established: 0usize, }, )); @@ -3560,7 +3546,7 @@ mod tests { peer_id: peer, connection_id: conn2, endpoint: &connected.clone(), - handler: NotifsHandler::new(peer, connected, vec![], None), + handler: NotifsHandler::new(peer, vec![], None), remaining_established: 0usize, }, )); @@ -3614,7 +3600,7 @@ mod tests { peer_id: peer, connection_id: conn, endpoint: &connected.clone(), - handler: NotifsHandler::new(peer, connected, vec![], None), + handler: NotifsHandler::new(peer, vec![], None), remaining_established: 0usize, }, )); @@ -3672,7 +3658,7 @@ mod tests { peer_id: peer, connection_id: conn2, endpoint: &connected.clone(), - handler: NotifsHandler::new(peer, connected, vec![], None), + handler: NotifsHandler::new(peer, vec![], None), remaining_established: 0usize, }, )); @@ -3733,7 +3719,7 @@ mod tests { peer_id: peer, connection_id: conn1, endpoint: &connected.clone(), - handler: NotifsHandler::new(peer, connected, vec![], None), + handler: NotifsHandler::new(peer, vec![], None), remaining_established: 0usize, }, )); @@ -3786,7 +3772,7 @@ mod tests { notif.on_connection_handler_event( peer, conn1, - conn_yielder.open_substream(peer, 0, connected.clone(), vec![1, 2, 3, 4]), + conn_yielder.open_substream(peer, 0, vec![1, 2, 3, 4]), ); if let Some(PeerState::Enabled { ref connections, .. }) = notif.peers.get(&(peer, set_id)) { @@ -3802,7 +3788,7 @@ mod tests { peer_id: peer, connection_id: conn1, endpoint: &connected.clone(), - handler: NotifsHandler::new(peer, connected, vec![], None), + handler: NotifsHandler::new(peer, vec![], None), remaining_established: 0usize, }, )); @@ -3843,7 +3829,7 @@ mod tests { peer_id: peer, connection_id: conn, endpoint: &connected.clone(), - handler: NotifsHandler::new(peer, connected, vec![], None), + handler: NotifsHandler::new(peer, vec![], None), remaining_established: 0usize, }, )); @@ -3966,7 +3952,7 @@ mod tests { peer_id: peer, connection_id: conn, endpoint: &connected.clone(), - handler: NotifsHandler::new(peer, connected, vec![], None), + handler: NotifsHandler::new(peer, vec![], None), remaining_established: 0usize, }, )); @@ -4015,10 +4001,6 @@ mod tests { let peer = PeerId::random(); let conn = ConnectionId::new_unchecked(0); let set_id = SetId::from(0); - let connected = ConnectedPoint::Listener { - local_addr: Multiaddr::empty(), - send_back_addr: Multiaddr::empty(), - }; let mut conn_yielder = ConnectionYielder::new(); // move the peer to `Enabled` state @@ -4052,7 +4034,7 @@ mod tests { notif.protocol_report_accept(IncomingIndex(0)); assert!(std::matches!(notif.peers.get(&(peer, set_id)), Some(&PeerState::Enabled { .. }))); - let event = conn_yielder.open_substream(peer, 0, connected, vec![1, 2, 3, 4]); + let event = conn_yielder.open_substream(peer, 0, vec![1, 2, 3, 4]); notif.on_connection_handler_event(peer, conn, event); assert!(std::matches!(notif.peers.get(&(peer, set_id)), Some(&PeerState::Enabled { .. }))); @@ -4167,7 +4149,7 @@ mod tests { notif.peerset_report_connect(peer, set_id); assert!(std::matches!(notif.peers.get(&(peer, set_id)), Some(&PeerState::Enabled { .. }))); - let event = conn_yielder.open_substream(peer, 0, connected, vec![1, 2, 3, 4]); + let event = conn_yielder.open_substream(peer, 0, vec![1, 2, 3, 4]); notif.on_connection_handler_event(peer, conn, event); assert!(std::matches!(notif.peers.get(&(peer, set_id)), Some(&PeerState::Enabled { .. }))); @@ -4280,7 +4262,7 @@ mod tests { peer_id: peer, connection_id: conn, endpoint: &connected.clone(), - handler: NotifsHandler::new(peer, connected, vec![], None), + handler: NotifsHandler::new(peer, vec![], None), remaining_established: 0usize, }, )); @@ -4521,7 +4503,7 @@ mod tests { peer_id: peer, connection_id: ConnectionId::new_unchecked(0), endpoint: &connected.clone(), - handler: NotifsHandler::new(peer, connected, vec![], None), + handler: NotifsHandler::new(peer, vec![], None), remaining_established: 0usize, }, )); @@ -4623,7 +4605,7 @@ mod tests { peer_id: peer, connection_id: ConnectionId::new_unchecked(0), endpoint: &connected.clone(), - handler: NotifsHandler::new(peer, connected, vec![], None), + handler: NotifsHandler::new(peer, vec![], None), remaining_established: 0usize, }, )); @@ -4681,7 +4663,7 @@ mod tests { notif.peerset_report_connect(peer, set_id); assert!(std::matches!(notif.peers.get(&(peer, set_id)), Some(&PeerState::Enabled { .. }))); - let event = conn_yielder.open_substream(peer, 0, connected, vec![1, 2, 3, 4]); + let event = conn_yielder.open_substream(peer, 0, vec![1, 2, 3, 4]); notif.on_connection_handler_event(peer, conn, event); assert!(std::matches!(notif.peers.get(&(peer, set_id)), Some(&PeerState::Enabled { .. }))); @@ -4705,7 +4687,7 @@ mod tests { peer_id: peer, connection_id: ConnectionId::new_unchecked(0), endpoint: &endpoint.clone(), - handler: NotifsHandler::new(peer, endpoint, vec![], None), + handler: NotifsHandler::new(peer, vec![], None), remaining_established: 0usize, }, )); @@ -4822,7 +4804,7 @@ mod tests { peer_id: peer, connection_id: ConnectionId::new_unchecked(1337), endpoint: &connected.clone(), - handler: NotifsHandler::new(peer, connected, vec![], None), + handler: NotifsHandler::new(peer, vec![], None), remaining_established: 0usize, }, )); @@ -4857,7 +4839,7 @@ mod tests { peer_id: peer, connection_id: ConnectionId::new_unchecked(1337), endpoint: &connected.clone(), - handler: NotifsHandler::new(peer, connected, vec![], None), + handler: NotifsHandler::new(peer, vec![], None), remaining_established: 0usize, }, )); @@ -4908,7 +4890,7 @@ mod tests { peer_id: peer, connection_id: ConnectionId::new_unchecked(1337), endpoint: &connected.clone(), - handler: NotifsHandler::new(peer, connected, vec![], None), + handler: NotifsHandler::new(peer, vec![], None), remaining_established: 0usize, }, )); @@ -4955,7 +4937,7 @@ mod tests { peer_id: peer, connection_id: conn, endpoint: &connected.clone(), - handler: NotifsHandler::new(peer, connected, vec![], None), + handler: NotifsHandler::new(peer, vec![], None), remaining_established: 0usize, }, )); @@ -5005,7 +4987,7 @@ mod tests { peer_id: peer, connection_id: ConnectionId::new_unchecked(1337), endpoint: &connected.clone(), - handler: NotifsHandler::new(peer, connected, vec![], None), + handler: NotifsHandler::new(peer, vec![], None), remaining_established: 0usize, }, )); @@ -5048,7 +5030,7 @@ mod tests { peer_id: peer, connection_id: conn, endpoint: &connected.clone(), - handler: NotifsHandler::new(peer, connected.clone(), vec![], None), + handler: NotifsHandler::new(peer, vec![], None), remaining_established: 0usize, }, )); @@ -5059,7 +5041,7 @@ mod tests { peer_id: peer, connection_id: conn, endpoint: &connected.clone(), - handler: NotifsHandler::new(peer, connected, vec![], None), + handler: NotifsHandler::new(peer, vec![], None), remaining_established: 0usize, }, )); @@ -5071,16 +5053,12 @@ mod tests { fn open_result_ok_non_existent_peer() { let (mut notif, _controller, _notif_service) = development_notifs(); let conn = ConnectionId::new_unchecked(0); - let connected = ConnectedPoint::Listener { - local_addr: Multiaddr::empty(), - send_back_addr: Multiaddr::empty(), - }; let mut conn_yielder = ConnectionYielder::new(); notif.on_connection_handler_event( PeerId::random(), conn, - conn_yielder.open_substream(PeerId::random(), 0, connected, vec![1, 2, 3, 4]), + conn_yielder.open_substream(PeerId::random(), 0, vec![1, 2, 3, 4]), ); } } diff --git a/substrate/client/network/src/protocol/notifications/handler.rs b/substrate/client/network/src/protocol/notifications/handler.rs index 967ef614c556..bff60ba1125f 100644 --- a/substrate/client/network/src/protocol/notifications/handler.rs +++ b/substrate/client/network/src/protocol/notifications/handler.rs @@ -73,7 +73,6 @@ use futures::{ prelude::*, }; use libp2p::{ - core::ConnectedPoint, swarm::{ handler::ConnectionEvent, ConnectionHandler, ConnectionHandlerEvent, KeepAlive, Stream, SubstreamProtocol, @@ -117,9 +116,6 @@ pub struct NotifsHandler { /// When the connection with the remote has been successfully established. when_connection_open: Instant, - /// Whether we are the connection dialer or listener. - endpoint: ConnectedPoint, - /// Remote we are connected to. peer_id: PeerId, @@ -136,7 +132,6 @@ impl NotifsHandler { /// Creates new [`NotifsHandler`]. pub fn new( peer_id: PeerId, - endpoint: ConnectedPoint, protocols: Vec, metrics: Option, ) -> Self { @@ -154,7 +149,6 @@ impl NotifsHandler { }) .collect(), peer_id, - endpoint, when_connection_open: Instant::now(), events_queue: VecDeque::with_capacity(16), metrics: metrics.map_or(None, |metrics| Some(Arc::new(metrics))), @@ -281,8 +275,6 @@ pub enum NotifsHandlerOut { protocol_index: usize, /// Name of the protocol that was actually negotiated, if the default one wasn't available. negotiated_fallback: Option, - /// The endpoint of the connection that is open for custom protocols. - endpoint: ConnectedPoint, /// Handshake that was sent to us. /// This is normally a "Status" message, but this out of the concern of this code. received_handshake: Vec, @@ -590,7 +582,6 @@ impl ConnectionHandler for NotifsHandler { NotifsHandlerOut::OpenResultOk { protocol_index, negotiated_fallback: new_open.negotiated_fallback, - endpoint: self.endpoint.clone(), received_handshake: new_open.handshake, notifications_sink, inbound, @@ -889,7 +880,6 @@ pub mod tests { use libp2p::{ core::muxing::SubstreamBox, swarm::handler::{self, StreamUpgradeError}, - Multiaddr, Stream, }; use multistream_select::{dialer_select_proto, listener_select_proto, Negotiated, Version}; use std::{ @@ -925,7 +915,6 @@ pub mod tests { &mut self, peer: PeerId, protocol_index: usize, - endpoint: ConnectedPoint, received_handshake: Vec, ) -> NotifsHandlerOut { let (async_tx, async_rx) = @@ -954,7 +943,6 @@ pub mod tests { NotifsHandlerOut::OpenResultOk { protocol_index, negotiated_fallback: None, - endpoint, received_handshake, notifications_sink, inbound: false, @@ -1110,10 +1098,6 @@ pub mod tests { NotifsHandler { protocols: vec![proto], when_connection_open: Instant::now(), - endpoint: ConnectedPoint::Listener { - local_addr: Multiaddr::empty(), - send_back_addr: Multiaddr::empty(), - }, peer_id: PeerId::random(), events_queue: VecDeque::new(), metrics: None, @@ -1131,7 +1115,6 @@ pub mod tests { let notif_in = NotificationsInOpen { handshake: b"hello, world".to_vec(), - negotiated_fallback: None, substream: NotificationsInSubstream::new( Framed::new(io, codec), NotificationsInSubstreamHandshake::NotSent, @@ -1158,7 +1141,6 @@ pub mod tests { let notif_in = NotificationsInOpen { handshake: b"hello, world".to_vec(), - negotiated_fallback: None, substream: NotificationsInSubstream::new( Framed::new(io, codec), NotificationsInSubstreamHandshake::NotSent, @@ -1191,7 +1173,6 @@ pub mod tests { let notif_in = NotificationsInOpen { handshake: b"hello, world".to_vec(), - negotiated_fallback: None, substream: NotificationsInSubstream::new( Framed::new(io, codec), NotificationsInSubstreamHandshake::NotSent, @@ -1225,7 +1206,6 @@ pub mod tests { let notif_in = NotificationsInOpen { handshake: b"hello, world".to_vec(), - negotiated_fallback: None, substream: NotificationsInSubstream::new( Framed::new(io, codec), NotificationsInSubstreamHandshake::NotSent, @@ -1265,7 +1245,6 @@ pub mod tests { let notif_in = NotificationsInOpen { handshake: b"hello, world".to_vec(), - negotiated_fallback: None, substream: NotificationsInSubstream::new( Framed::new(io, codec), NotificationsInSubstreamHandshake::NotSent, @@ -1316,7 +1295,6 @@ pub mod tests { codec.set_max_len(usize::MAX); let notif_in = NotificationsInOpen { handshake: b"hello, world".to_vec(), - negotiated_fallback: None, substream: NotificationsInSubstream::new( Framed::new(io, codec), NotificationsInSubstreamHandshake::NotSent, @@ -1355,7 +1333,6 @@ pub mod tests { let notif_in = NotificationsInOpen { handshake: b"hello, world".to_vec(), - negotiated_fallback: None, substream: NotificationsInSubstream::new( Framed::new(io, codec), NotificationsInSubstreamHandshake::NotSent, @@ -1415,7 +1392,6 @@ pub mod tests { let notif_in = NotificationsInOpen { handshake: b"hello, world".to_vec(), - negotiated_fallback: None, substream: NotificationsInSubstream::new( Framed::new(io, codec), NotificationsInSubstreamHandshake::NotSent, @@ -1452,7 +1428,6 @@ pub mod tests { let notif_in = NotificationsInOpen { handshake: b"hello, world".to_vec(), - negotiated_fallback: None, substream: NotificationsInSubstream::new( Framed::new(io, codec), NotificationsInSubstreamHandshake::NotSent, @@ -1498,7 +1473,6 @@ pub mod tests { let notif_in = NotificationsInOpen { handshake: b"hello, world".to_vec(), - negotiated_fallback: None, substream: NotificationsInSubstream::new( Framed::new(io, codec), NotificationsInSubstreamHandshake::NotSent, @@ -1547,7 +1521,6 @@ pub mod tests { let notif_in = NotificationsInOpen { handshake: b"hello, world".to_vec(), - negotiated_fallback: None, substream: NotificationsInSubstream::new( Framed::new(io, codec), NotificationsInSubstreamHandshake::NotSent, @@ -1583,7 +1556,6 @@ pub mod tests { let notif_in = NotificationsInOpen { handshake: b"hello, world".to_vec(), - negotiated_fallback: None, substream: NotificationsInSubstream::new( Framed::new(io, codec), NotificationsInSubstreamHandshake::NotSent, @@ -1658,7 +1630,6 @@ pub mod tests { let notif_in = NotificationsInOpen { handshake: b"hello, world".to_vec(), - negotiated_fallback: None, substream: NotificationsInSubstream::new( Framed::new(io, codec), NotificationsInSubstreamHandshake::PendingSend(vec![1, 2, 3, 4]), diff --git a/substrate/client/network/src/protocol/notifications/service/mod.rs b/substrate/client/network/src/protocol/notifications/service/mod.rs index 4f6d32ae3b35..a7eb31fc5795 100644 --- a/substrate/client/network/src/protocol/notifications/service/mod.rs +++ b/substrate/client/network/src/protocol/notifications/service/mod.rs @@ -89,9 +89,8 @@ impl MessageSink for NotificationSink { .await .map_err(|_| error::Error::ConnectionClosed)?; - permit.send(notification).map_err(|_| error::Error::ChannelClosed).map(|res| { + permit.send(notification).map_err(|_| error::Error::ChannelClosed).inspect(|_| { metrics::register_notification_sent(sink.0.metrics(), &sink.1, notification_len); - res }) } } @@ -263,13 +262,12 @@ impl NotificationService for NotificationHandle { .map_err(|_| error::Error::ConnectionClosed)? .send(notification) .map_err(|_| error::Error::ChannelClosed) - .map(|res| { + .inspect(|_| { metrics::register_notification_sent( sink.metrics(), &self.protocol, notification_len, ); - res }) } diff --git a/substrate/client/network/src/protocol/notifications/upgrade/notifications.rs b/substrate/client/network/src/protocol/notifications/upgrade/notifications.rs index a8a9e453a7bb..e01bcbe0bad7 100644 --- a/substrate/client/network/src/protocol/notifications/upgrade/notifications.rs +++ b/substrate/client/network/src/protocol/notifications/upgrade/notifications.rs @@ -151,7 +151,7 @@ where type Future = Pin> + Send>>; type Error = NotificationsHandshakeError; - fn upgrade_inbound(self, mut socket: TSubstream, negotiated_name: Self::Info) -> Self::Future { + fn upgrade_inbound(self, mut socket: TSubstream, _negotiated_name: Self::Info) -> Self::Future { Box::pin(async move { let handshake_len = unsigned_varint::aio::read_usize(&mut socket).await?; if handshake_len > MAX_HANDSHAKE_SIZE { @@ -174,15 +174,7 @@ where handshake: NotificationsInSubstreamHandshake::NotSent, }; - Ok(NotificationsInOpen { - handshake, - negotiated_fallback: if negotiated_name == self.protocol_names[0] { - None - } else { - Some(negotiated_name) - }, - substream, - }) + Ok(NotificationsInOpen { handshake, substream }) }) } } @@ -191,9 +183,6 @@ where pub struct NotificationsInOpen { /// Handshake sent by the remote. pub handshake: Vec, - /// If the negotiated name is not the "main" protocol name but a fallback, contains the - /// name of the negotiated fallback. - pub negotiated_fallback: Option, /// Implementation of `Stream` that allows receives messages from the substream. pub substream: NotificationsInSubstream, } diff --git a/substrate/client/network/src/service/metrics.rs b/substrate/client/network/src/service/metrics.rs index 202dc7b2ed69..e48c53953ff8 100644 --- a/substrate/client/network/src/service/metrics.rs +++ b/substrate/client/network/src/service/metrics.rs @@ -72,7 +72,6 @@ pub struct Metrics { pub distinct_peers_connections_opened_total: Counter, pub incoming_connections_errors_total: CounterVec, pub incoming_connections_total: Counter, - pub issued_light_requests: Counter, pub kademlia_query_duration: HistogramVec, pub kademlia_random_queries_total: Counter, pub kademlia_records_count: Gauge, @@ -126,10 +125,6 @@ impl Metrics { "substrate_sub_libp2p_incoming_connections_total", "Total number of incoming connections on the listening sockets" )?, registry)?, - issued_light_requests: prometheus::register(Counter::new( - "substrate_issued_light_requests", - "Number of light client requests that our node has issued.", - )?, registry)?, kademlia_query_duration: prometheus::register(HistogramVec::new( HistogramOpts { common_opts: Opts::new( diff --git a/substrate/client/network/sync/src/engine.rs b/substrate/client/network/sync/src/engine.rs index 96c1750b3116..dceea9954c6e 100644 --- a/substrate/client/network/sync/src/engine.rs +++ b/substrate/client/network/sync/src/engine.rs @@ -812,7 +812,8 @@ where } if !self.default_peers_set_no_slot_connected_peers.remove(&peer_id) && - info.inbound && info.info.roles.is_full() + info.inbound && + info.info.roles.is_full() { match self.num_in_peers.checked_sub(1) { Some(value) => { diff --git a/substrate/client/network/sync/src/lib.rs b/substrate/client/network/sync/src/lib.rs index ca7280edba5f..c458c7a5da49 100644 --- a/substrate/client/network/sync/src/lib.rs +++ b/substrate/client/network/sync/src/lib.rs @@ -26,7 +26,6 @@ mod block_announce_validator; mod futures_stream; mod justification_requests; mod pending_responses; -mod request_metrics; mod schema; pub mod types; diff --git a/substrate/client/network/sync/src/request_metrics.rs b/substrate/client/network/sync/src/request_metrics.rs deleted file mode 100644 index 455f57ec3933..000000000000 --- a/substrate/client/network/sync/src/request_metrics.rs +++ /dev/null @@ -1,25 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 - -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -#[derive(Debug)] -pub struct Metrics { - pub pending_requests: u32, - pub active_requests: u32, - pub importing_requests: u32, - pub failed_requests: u32, -} diff --git a/substrate/client/network/test/src/lib.rs b/substrate/client/network/test/src/lib.rs index 0f73e3194baa..06e243342fb2 100644 --- a/substrate/client/network/test/src/lib.rs +++ b/substrate/client/network/test/src/lib.rs @@ -628,9 +628,8 @@ struct VerifierAdapter { impl Verifier for VerifierAdapter { async fn verify(&self, block: BlockImportParams) -> Result, String> { let hash = block.header.hash(); - self.verifier.lock().await.verify(block).await.map_err(|e| { + self.verifier.lock().await.verify(block).await.inspect_err(|e| { self.failed_verifications.lock().insert(hash, e.clone()); - e }) } } diff --git a/substrate/client/network/test/src/sync.rs b/substrate/client/network/test/src/sync.rs index 4244c49bf7fb..91307d869281 100644 --- a/substrate/client/network/test/src/sync.rs +++ b/substrate/client/network/test/src/sync.rs @@ -749,24 +749,6 @@ async fn sync_blocks_when_block_announce_validator_says_it_is_new_best() { } } -/// Waits for some time until the validation is successful. -struct DeferredBlockAnnounceValidator; - -impl BlockAnnounceValidator for DeferredBlockAnnounceValidator { - fn validate( - &mut self, - _: &Header, - _: &[u8], - ) -> Pin>> + Send>> - { - async { - futures_timer::Delay::new(std::time::Duration::from_millis(500)).await; - Ok(Validation::Success { is_new_best: false }) - } - .boxed() - } -} - #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn wait_until_deferred_block_announce_validation_is_ready() { sp_tracing::try_init_simple(); diff --git a/substrate/client/rpc-spec-v2/src/chain_head/test_utils.rs b/substrate/client/rpc-spec-v2/src/chain_head/test_utils.rs index 073ee34a79f3..fa10fde388f9 100644 --- a/substrate/client/rpc-spec-v2/src/chain_head/test_utils.rs +++ b/substrate/client/rpc-spec-v2/src/chain_head/test_utils.rs @@ -343,7 +343,8 @@ where fn number( &self, hash: Block::Hash, - ) -> sc_client_api::blockchain::Result::Header as HeaderT>::Number>> { + ) -> sc_client_api::blockchain::Result::Header as HeaderT>::Number>> + { self.client.number(hash) } diff --git a/substrate/client/service/test/src/client/mod.rs b/substrate/client/service/test/src/client/mod.rs index 13e63962fe8f..55bbfcdd8594 100644 --- a/substrate/client/service/test/src/client/mod.rs +++ b/substrate/client/service/test/src/client/mod.rs @@ -1748,11 +1748,9 @@ fn respects_block_rules() { } #[test] -#[cfg(disable_flaky)] -#[allow(dead_code)] -// FIXME: https://github.com/paritytech/substrate/issues/11321 +// FIXME: https://github.com/paritytech/polkadot-sdk/issues/48 fn returns_status_for_pruned_blocks() { - use sc_consensus::BlockStatus; + use sp_consensus::BlockStatus; sp_tracing::try_init_simple(); let tmp = tempfile::tempdir().unwrap(); diff --git a/substrate/client/transaction-pool/src/graph/tracked_map.rs b/substrate/client/transaction-pool/src/graph/tracked_map.rs index 47ad22603e46..44c2c738ab1b 100644 --- a/substrate/client/transaction-pool/src/graph/tracked_map.rs +++ b/substrate/client/transaction-pool/src/graph/tracked_map.rs @@ -119,10 +119,9 @@ where let new_bytes = val.size(); self.bytes.fetch_add(new_bytes as isize, AtomicOrdering::Relaxed); self.length.fetch_add(1, AtomicOrdering::Relaxed); - self.inner_guard.insert(key, val).map(|old_val| { + self.inner_guard.insert(key, val).inspect(|old_val| { self.bytes.fetch_sub(old_val.size() as isize, AtomicOrdering::Relaxed); self.length.fetch_sub(1, AtomicOrdering::Relaxed); - old_val }) } diff --git a/substrate/client/utils/src/mpsc.rs b/substrate/client/utils/src/mpsc.rs index 91db7e1e7b01..051cb5b387ca 100644 --- a/substrate/client/utils/src/mpsc.rs +++ b/substrate/client/utils/src/mpsc.rs @@ -103,7 +103,7 @@ impl TracingUnboundedSender { /// Proxy function to `async_channel::Sender::try_send`. pub fn unbounded_send(&self, msg: T) -> Result<(), TrySendError> { - self.inner.try_send(msg).map(|s| { + self.inner.try_send(msg).inspect(|_| { UNBOUNDED_CHANNELS_COUNTER.with_label_values(&[self.name, SENT_LABEL]).inc(); UNBOUNDED_CHANNELS_SIZE .with_label_values(&[self.name]) @@ -124,8 +124,6 @@ impl TracingUnboundedSender { Backtrace::force_capture(), ); } - - s }) } @@ -144,12 +142,11 @@ impl TracingUnboundedReceiver { /// Proxy function to [`async_channel::Receiver`] /// that discounts the messages taken out. pub fn try_recv(&mut self) -> Result { - self.inner.try_recv().map(|s| { + self.inner.try_recv().inspect(|_| { UNBOUNDED_CHANNELS_COUNTER.with_label_values(&[self.name, RECEIVED_LABEL]).inc(); UNBOUNDED_CHANNELS_SIZE .with_label_values(&[self.name]) .set(self.inner.len().saturated_into()); - s }) } diff --git a/substrate/frame/authorship/src/lib.rs b/substrate/frame/authorship/src/lib.rs index a0cca806e786..1de2262a2014 100644 --- a/substrate/frame/authorship/src/lib.rs +++ b/substrate/frame/authorship/src/lib.rs @@ -84,9 +84,8 @@ impl Pallet { let digest = >::digest(); let pre_runtime_digests = digest.logs.iter().filter_map(|d| d.as_pre_runtime()); - T::FindAuthor::find_author(pre_runtime_digests).map(|a| { + T::FindAuthor::find_author(pre_runtime_digests).inspect(|a| { >::put(&a); - a }) } } diff --git a/substrate/frame/bags-list/src/lib.rs b/substrate/frame/bags-list/src/lib.rs index f6af1da5e7b7..ee36a3a3ebd8 100644 --- a/substrate/frame/bags-list/src/lib.rs +++ b/substrate/frame/bags-list/src/lib.rs @@ -491,7 +491,7 @@ impl, I: 'static> ScoreProvider for Pallet { Node::::get(id).map(|node| node.score()).unwrap_or_default() } - frame_election_provider_support::runtime_benchmarks_fuzz_or_std_enabled! { + frame_election_provider_support::runtime_benchmarks_or_std_enabled! { fn set_score_of(id: &T::AccountId, new_score: T::Score) { ListNodes::::mutate(id, |maybe_node| { if let Some(node) = maybe_node.as_mut() { diff --git a/substrate/frame/bags-list/src/list/tests.rs b/substrate/frame/bags-list/src/list/tests.rs index e5fff76d75c7..fc4c4fbd088b 100644 --- a/substrate/frame/bags-list/src/list/tests.rs +++ b/substrate/frame/bags-list/src/list/tests.rs @@ -778,7 +778,7 @@ mod bags { assert_eq!(bag_1000.iter().count(), 3); bag_1000.insert_node_unchecked(node(4, None, None, bag_1000.bag_upper)); // panics in debug assert_eq!(bag_1000.iter().count(), 3); // in release we expect it to silently ignore the - // request. + // request. }); } diff --git a/substrate/frame/bags-list/src/mock.rs b/substrate/frame/bags-list/src/mock.rs index ea677cb9e73e..3690a876f62d 100644 --- a/substrate/frame/bags-list/src/mock.rs +++ b/substrate/frame/bags-list/src/mock.rs @@ -41,7 +41,7 @@ impl frame_election_provider_support::ScoreProvider for StakingMock { *NextVoteWeightMap::get().get(id).unwrap_or(&NextVoteWeight::get()) } - frame_election_provider_support::runtime_benchmarks_fuzz_or_std_enabled! { + frame_election_provider_support::runtime_benchmarks_or_std_enabled! { fn set_score_of(id: &AccountId, weight: Self::Score) { NEXT_VOTE_WEIGHT_MAP.with(|m| m.borrow_mut().insert(*id, weight)); } diff --git a/substrate/frame/balances/src/lib.rs b/substrate/frame/balances/src/lib.rs index 87d2029d488e..f6858347f24e 100644 --- a/substrate/frame/balances/src/lib.rs +++ b/substrate/frame/balances/src/lib.rs @@ -1031,7 +1031,7 @@ pub mod pallet { } if did_provide && !does_provide { // This could reap the account so must go last. - frame_system::Pallet::::dec_providers(who).map_err(|r| { + frame_system::Pallet::::dec_providers(who).inspect_err(|_| { // best-effort revert consumer change. if did_consume && !does_consume { let _ = frame_system::Pallet::::inc_consumers(who).defensive(); @@ -1039,7 +1039,6 @@ pub mod pallet { if !did_consume && does_consume { let _ = frame_system::Pallet::::dec_consumers(who); } - r })?; } diff --git a/substrate/frame/balances/src/tests/currency_tests.rs b/substrate/frame/balances/src/tests/currency_tests.rs index 2243859458be..a4984b34f6db 100644 --- a/substrate/frame/balances/src/tests/currency_tests.rs +++ b/substrate/frame/balances/src/tests/currency_tests.rs @@ -1017,7 +1017,7 @@ fn slash_consumed_slash_full_works() { ExtBuilder::default().existential_deposit(100).build_and_execute_with(|| { Balances::make_free_balance_be(&1, 1_000); assert_ok!(System::inc_consumers(&1)); // <-- Reference counter added here is enough for all tests - // Slashed completed in full + // Slashed completed in full assert_eq!(Balances::slash(&1, 900), (NegativeImbalance::new(900), 0)); // Account is still alive assert!(System::account_exists(&1)); @@ -1029,7 +1029,7 @@ fn slash_consumed_slash_over_works() { ExtBuilder::default().existential_deposit(100).build_and_execute_with(|| { Balances::make_free_balance_be(&1, 1_000); assert_ok!(System::inc_consumers(&1)); // <-- Reference counter added here is enough for all tests - // Slashed completed in full + // Slashed completed in full assert_eq!(Balances::slash(&1, 1_000), (NegativeImbalance::new(900), 100)); // Account is still alive assert!(System::account_exists(&1)); @@ -1041,7 +1041,7 @@ fn slash_consumed_slash_partial_works() { ExtBuilder::default().existential_deposit(100).build_and_execute_with(|| { Balances::make_free_balance_be(&1, 1_000); assert_ok!(System::inc_consumers(&1)); // <-- Reference counter added here is enough for all tests - // Slashed completed in full + // Slashed completed in full assert_eq!(Balances::slash(&1, 800), (NegativeImbalance::new(800), 0)); // Account is still alive assert!(System::account_exists(&1)); diff --git a/substrate/frame/beefy-mmr/src/benchmarking.rs b/substrate/frame/beefy-mmr/src/benchmarking.rs index 135f95eabb99..fea6a2078f0f 100644 --- a/substrate/frame/beefy-mmr/src/benchmarking.rs +++ b/substrate/frame/beefy-mmr/src/benchmarking.rs @@ -51,9 +51,7 @@ mod benchmarks { #[benchmark] fn extract_validation_context() { - if !cfg!(feature = "test") { - pallet_mmr::UseLocalStorage::::set(true); - } + pallet_mmr::UseLocalStorage::::set(true); init_block::(1); let header = System::::finalize(); @@ -71,9 +69,7 @@ mod benchmarks { #[benchmark] fn read_peak() { - if !cfg!(feature = "test") { - pallet_mmr::UseLocalStorage::::set(true); - } + pallet_mmr::UseLocalStorage::::set(true); init_block::(1); @@ -91,9 +87,7 @@ mod benchmarks { /// the verification. We need to account for the peaks separately. #[benchmark] fn n_items_proof_is_non_canonical(n: Linear<2, 512>) { - if !cfg!(feature = "test") { - pallet_mmr::UseLocalStorage::::set(true); - } + pallet_mmr::UseLocalStorage::::set(true); for block_num in 1..=n { init_block::(block_num); diff --git a/substrate/frame/benchmarking/pov/src/weights.rs b/substrate/frame/benchmarking/pov/src/weights.rs index c4fc03d1dd93..1f20d5f0b515 100644 --- a/substrate/frame/benchmarking/pov/src/weights.rs +++ b/substrate/frame/benchmarking/pov/src/weights.rs @@ -45,6 +45,7 @@ use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; use core::marker::PhantomData; /// Weight functions needed for `frame_benchmarking_pallet_pov`. +#[allow(dead_code)] pub trait WeightInfo { fn storage_single_value_read() -> Weight; fn storage_single_value_ignored_read() -> Weight; @@ -79,6 +80,7 @@ pub trait WeightInfo { } /// Weights for `frame_benchmarking_pallet_pov` using the Substrate node and recommended hardware. +#[allow(dead_code)] pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { /// Storage: `Pov::Value` (r:1 w:0) diff --git a/substrate/frame/bounties/src/lib.rs b/substrate/frame/bounties/src/lib.rs index 7b89a6e3e76f..e30d6fa2d143 100644 --- a/substrate/frame/bounties/src/lib.rs +++ b/substrate/frame/bounties/src/lib.rs @@ -459,12 +459,12 @@ pub mod pallet { Bounties::::try_mutate_exists(bounty_id, |maybe_bounty| -> DispatchResult { let bounty = maybe_bounty.as_mut().ok_or(Error::::InvalidIndex)?; - let slash_curator = |curator: &T::AccountId, - curator_deposit: &mut BalanceOf| { - let imbalance = T::Currency::slash_reserved(curator, *curator_deposit).0; - T::OnSlash::on_unbalanced(imbalance); - *curator_deposit = Zero::zero(); - }; + let slash_curator = + |curator: &T::AccountId, curator_deposit: &mut BalanceOf| { + let imbalance = T::Currency::slash_reserved(curator, *curator_deposit).0; + T::OnSlash::on_unbalanced(imbalance); + *curator_deposit = Zero::zero(); + }; match bounty.status { BountyStatus::Proposed | BountyStatus::Approved | BountyStatus::Funded => { diff --git a/substrate/frame/child-bounties/src/lib.rs b/substrate/frame/child-bounties/src/lib.rs index 911fd4c4c49f..660a30ca5d26 100644 --- a/substrate/frame/child-bounties/src/lib.rs +++ b/substrate/frame/child-bounties/src/lib.rs @@ -473,12 +473,13 @@ pub mod pallet { let child_bounty = maybe_child_bounty.as_mut().ok_or(BountiesError::::InvalidIndex)?; - let slash_curator = |curator: &T::AccountId, - curator_deposit: &mut BalanceOf| { - let imbalance = T::Currency::slash_reserved(curator, *curator_deposit).0; - T::OnSlash::on_unbalanced(imbalance); - *curator_deposit = Zero::zero(); - }; + let slash_curator = + |curator: &T::AccountId, curator_deposit: &mut BalanceOf| { + let imbalance = + T::Currency::slash_reserved(curator, *curator_deposit).0; + T::OnSlash::on_unbalanced(imbalance); + *curator_deposit = Zero::zero(); + }; match child_bounty.status { ChildBountyStatus::Added => { diff --git a/substrate/frame/contracts/src/benchmarking/code.rs b/substrate/frame/contracts/src/benchmarking/code.rs index 1473022b5537..b5918a5e182d 100644 --- a/substrate/frame/contracts/src/benchmarking/code.rs +++ b/substrate/frame/contracts/src/benchmarking/code.rs @@ -114,7 +114,6 @@ pub struct ImportedFunction { pub struct WasmModule { pub code: Vec, pub hash: ::Output, - pub memory: Option, } impl From for WasmModule { @@ -233,7 +232,7 @@ impl From for WasmModule { let code = contract.build().into_bytes().unwrap(); let hash = T::Hashing::hash(&code); - Self { code: code.into(), hash, memory: def.memory } + Self { code: code.into(), hash } } } diff --git a/substrate/frame/contracts/src/exec.rs b/substrate/frame/contracts/src/exec.rs index 31e0bf50b73e..046affe32d96 100644 --- a/substrate/frame/contracts/src/exec.rs +++ b/substrate/frame/contracts/src/exec.rs @@ -454,9 +454,6 @@ pub trait Executable: Sized { /// The code hash of the executable. fn code_hash(&self) -> &CodeHash; - /// Size of the contract code in bytes. - fn code_len(&self) -> u32; - /// The code does not contain any instructions which could lead to indeterminism. fn is_deterministic(&self) -> bool; } @@ -1838,10 +1835,6 @@ mod tests { &self.code_info } - fn code_len(&self) -> u32 { - 0 - } - fn is_deterministic(&self) -> bool { true } diff --git a/substrate/frame/contracts/src/wasm/mod.rs b/substrate/frame/contracts/src/wasm/mod.rs index f4ee76459c4e..c9786fa1516b 100644 --- a/substrate/frame/contracts/src/wasm/mod.rs +++ b/substrate/frame/contracts/src/wasm/mod.rs @@ -488,10 +488,6 @@ impl Executable for WasmBlob { &self.code_info } - fn code_len(&self) -> u32 { - self.code.len() as u32 - } - fn is_deterministic(&self) -> bool { matches!(self.code_info.determinism, Determinism::Enforced) } diff --git a/substrate/frame/delegated-staking/src/lib.rs b/substrate/frame/delegated-staking/src/lib.rs index 7b8d14b0a611..1d181eb29cab 100644 --- a/substrate/frame/delegated-staking/src/lib.rs +++ b/substrate/frame/delegated-staking/src/lib.rs @@ -71,8 +71,8 @@ //! - Migrate a `Nominator` account to an `agent` account. See [`Pallet::migrate_to_agent`]. //! Explained in more detail in the `Migration` section. //! - Migrate unclaimed delegated funds from `agent` to delegator. When a nominator migrates to an -//! agent, the funds are held in a proxy account. This function allows the delegator to claim their -//! share of the funds from the proxy account. See [`Pallet::migrate_delegation`]. +//! agent, the funds are held in a proxy account. This function allows the delegator to claim +//! their share of the funds from the proxy account. See [`Pallet::migrate_delegation`]. //! //! ## Lazy Slashing //! One of the reasons why direct nominators on staking pallet cannot scale well is because all diff --git a/substrate/frame/election-provider-multi-phase/src/lib.rs b/substrate/frame/election-provider-multi-phase/src/lib.rs index 09248e77848b..072cfe176b61 100644 --- a/substrate/frame/election-provider-multi-phase/src/lib.rs +++ b/substrate/frame/election-provider-multi-phase/src/lib.rs @@ -1208,9 +1208,8 @@ pub mod pallet { } let _ = Self::unsigned_pre_dispatch_checks(raw_solution) - .map_err(|err| { + .inspect_err(|err| { log!(debug, "unsigned transaction validation failed due to {:?}", err); - err }) .map_err(dispatch_error_to_invalid)?; diff --git a/substrate/frame/election-provider-support/src/lib.rs b/substrate/frame/election-provider-support/src/lib.rs index 394f58a38442..cb3249e388a3 100644 --- a/substrate/frame/election-provider-support/src/lib.rs +++ b/substrate/frame/election-provider-support/src/lib.rs @@ -687,7 +687,7 @@ sp_core::generate_feature_enabled_macro!( ); sp_core::generate_feature_enabled_macro!( - runtime_benchmarks_fuzz_or_std_enabled, - any(feature = "runtime-benchmarks", feature = "fuzzing", feature = "std"), + runtime_benchmarks_or_std_enabled, + any(feature = "runtime-benchmarks", feature = "std"), $ ); diff --git a/substrate/frame/elections-phragmen/src/lib.rs b/substrate/frame/elections-phragmen/src/lib.rs index 6d91448fd185..a1c5f69e1b65 100644 --- a/substrate/frame/elections-phragmen/src/lib.rs +++ b/substrate/frame/elections-phragmen/src/lib.rs @@ -829,7 +829,7 @@ impl Pallet { T::Currency::unreserve(who, removed.deposit); } - let maybe_next_best = RunnersUp::::mutate(|r| r.pop()).map(|next_best| { + let maybe_next_best = RunnersUp::::mutate(|r| r.pop()).inspect(|next_best| { // defensive-only: Members and runners-up are disjoint. This will always be err and // give us an index to insert. if let Err(index) = members.binary_search_by(|m| m.who.cmp(&next_best.who)) { @@ -839,7 +839,6 @@ impl Pallet { // is already a member, so not much more to do. log::error!(target: LOG_TARGET, "A member seems to also be a runner-up."); } - next_best }); Ok(maybe_next_best) })?; diff --git a/substrate/frame/examples/offchain-worker/src/tests.rs b/substrate/frame/examples/offchain-worker/src/tests.rs index b665cbbb62ae..741adbe6d26a 100644 --- a/substrate/frame/examples/offchain-worker/src/tests.rs +++ b/substrate/frame/examples/offchain-worker/src/tests.rs @@ -266,11 +266,12 @@ fn should_submit_unsigned_transaction_on_chain_for_any_account() { { assert_eq!(body, price_payload); - let signature_valid = - ::Public, - frame_system::pallet_prelude::BlockNumberFor, - > as SignedPayload>::verify::(&price_payload, signature); + let signature_valid = ::Public, + frame_system::pallet_prelude::BlockNumberFor, + > as SignedPayload>::verify::( + &price_payload, signature + ); assert!(signature_valid); } @@ -320,11 +321,12 @@ fn should_submit_unsigned_transaction_on_chain_for_all_accounts() { { assert_eq!(body, price_payload); - let signature_valid = - ::Public, - frame_system::pallet_prelude::BlockNumberFor, - > as SignedPayload>::verify::(&price_payload, signature); + let signature_valid = ::Public, + frame_system::pallet_prelude::BlockNumberFor, + > as SignedPayload>::verify::( + &price_payload, signature + ); assert!(signature_valid); } diff --git a/substrate/frame/examples/single-block-migrations/src/migrations/v1.rs b/substrate/frame/examples/single-block-migrations/src/migrations/v1.rs index 55cf7cef9a7a..922c03afdd1e 100644 --- a/substrate/frame/examples/single-block-migrations/src/migrations/v1.rs +++ b/substrate/frame/examples/single-block-migrations/src/migrations/v1.rs @@ -60,7 +60,7 @@ impl UncheckedOnRuntimeUpgrade for InnerMigrateV0ToV1 { /// /// - If the value doesn't exist, there is nothing to do. /// - If the value exists, it is read and then written back to storage inside a - /// [`crate::CurrentAndPreviousValue`]. + /// [`crate::CurrentAndPreviousValue`]. fn on_runtime_upgrade() -> frame_support::weights::Weight { // Read the old value from storage if let Some(old_value) = v0::Value::::take() { diff --git a/substrate/frame/executive/src/lib.rs b/substrate/frame/executive/src/lib.rs index 1e7bac64e18f..fe702e1fc395 100644 --- a/substrate/frame/executive/src/lib.rs +++ b/substrate/frame/executive/src/lib.rs @@ -382,9 +382,8 @@ where , >>::try_state(*header.number(), select.clone()) - .map_err(|e| { + .inspect_err(|e| { log::error!(target: LOG_TARGET, "failure: {:?}", e); - e })?; if select.any() { let res = AllPalletsWithSystem::try_decode_entire_state(); diff --git a/substrate/frame/nis/src/lib.rs b/substrate/frame/nis/src/lib.rs index 016daa4cb78b..87e2276e768d 100644 --- a/substrate/frame/nis/src/lib.rs +++ b/substrate/frame/nis/src/lib.rs @@ -756,15 +756,13 @@ pub mod pallet { .map(|_| ()) // We ignore this error as it just means the amount we're trying to deposit is // dust and the beneficiary account doesn't exist. - .or_else( - |e| { - if e == TokenError::CannotCreate.into() { - Ok(()) - } else { - Err(e) - } - }, - )?; + .or_else(|e| { + if e == TokenError::CannotCreate.into() { + Ok(()) + } else { + Err(e) + } + })?; summary.receipts_on_hold.saturating_reduce(on_hold); } T::Currency::release(&HoldReason::NftReceipt.into(), &who, amount, Exact)?; diff --git a/substrate/frame/referenda/src/mock.rs b/substrate/frame/referenda/src/mock.rs index bf0fa4e1a12e..c96a50af8658 100644 --- a/substrate/frame/referenda/src/mock.rs +++ b/substrate/frame/referenda/src/mock.rs @@ -24,7 +24,6 @@ use frame_support::{ assert_ok, derive_impl, ord_parameter_types, parameter_types, traits::{ ConstU32, ConstU64, Contains, EqualPrivilegeOnly, OnInitialize, OriginTrait, Polling, - SortedMembers, }, weights::Weight, }; @@ -98,14 +97,6 @@ ord_parameter_types! { pub const Five: u64 = 5; pub const Six: u64 = 6; } -pub struct OneToFive; -impl SortedMembers for OneToFive { - fn sorted_members() -> Vec { - vec![1, 2, 3, 4, 5] - } - #[cfg(feature = "runtime-benchmarks")] - fn add(_m: &u64) {} -} pub struct TestTracksInfo; impl TracksInfo for TestTracksInfo { diff --git a/substrate/frame/referenda/src/types.rs b/substrate/frame/referenda/src/types.rs index 1039b288b2ae..e83f28b472cd 100644 --- a/substrate/frame/referenda/src/types.rs +++ b/substrate/frame/referenda/src/types.rs @@ -258,7 +258,8 @@ impl< Tally: Eq + PartialEq + Debug + Encode + Decode + TypeInfo + Clone, AccountId: Eq + PartialEq + Debug + Encode + Decode + TypeInfo + Clone, ScheduleAddress: Eq + PartialEq + Debug + Encode + Decode + TypeInfo + Clone, - > ReferendumInfo + > + ReferendumInfo { /// Take the Decision Deposit from `self`, if there is one. Returns an `Err` if `self` is not /// in a valid state for the Decision Deposit to be refunded. diff --git a/substrate/frame/revive/proc-macro/src/lib.rs b/substrate/frame/revive/proc-macro/src/lib.rs index 95f4110a2d76..012b4bfab9a9 100644 --- a/substrate/frame/revive/proc-macro/src/lib.rs +++ b/substrate/frame/revive/proc-macro/src/lib.rs @@ -349,18 +349,19 @@ where let Some(ident) = path.path.get_ident() else { panic!("Type needs to be ident"); }; - let size = - if ident == "i8" || - ident == "i16" || ident == "i32" || - ident == "u8" || ident == "u16" || - ident == "u32" - { - 1 - } else if ident == "i64" || ident == "u64" { - 2 - } else { - panic!("Pass by value only supports primitives"); - }; + let size = if ident == "i8" || + ident == "i16" || + ident == "i32" || + ident == "u8" || + ident == "u16" || + ident == "u32" + { + 1 + } else if ident == "i64" || ident == "u64" { + 2 + } else { + panic!("Pass by value only supports primitives"); + }; registers_used += size; if registers_used > ALLOWED_REGISTERS { return quote! { diff --git a/substrate/frame/society/src/lib.rs b/substrate/frame/society/src/lib.rs index b4c5c88af3d6..04879cd87091 100644 --- a/substrate/frame/society/src/lib.rs +++ b/substrate/frame/society/src/lib.rs @@ -1387,18 +1387,6 @@ impl_ensure_origin_with_arg_ignoring_arg! { {} } -struct InputFromRng<'a, T>(&'a mut T); -impl<'a, T: RngCore> codec::Input for InputFromRng<'a, T> { - fn remaining_len(&mut self) -> Result, codec::Error> { - return Ok(None) - } - - fn read(&mut self, into: &mut [u8]) -> Result<(), codec::Error> { - self.0.fill_bytes(into); - Ok(()) - } -} - pub enum Period { Voting { elapsed: BlockNumber, more: BlockNumber }, Claim { elapsed: BlockNumber, more: BlockNumber }, diff --git a/substrate/frame/society/src/tests.rs b/substrate/frame/society/src/tests.rs index df8e844cdad9..2a13f99855b5 100644 --- a/substrate/frame/society/src/tests.rs +++ b/substrate/frame/society/src/tests.rs @@ -281,7 +281,7 @@ fn bidding_works() { // No more candidates satisfy the requirements assert_eq!(candidacies(), vec![]); assert_ok!(Society::defender_vote(Origin::signed(10), true)); // Keep defender around - // Next period + // Next period run_to_block(16); // Same members assert_eq!(members(), vec![10, 30, 40, 50]); diff --git a/substrate/frame/staking/src/tests.rs b/substrate/frame/staking/src/tests.rs index ab2c00ca9ccc..dd178a95bec5 100644 --- a/substrate/frame/staking/src/tests.rs +++ b/substrate/frame/staking/src/tests.rs @@ -7995,7 +7995,7 @@ mod ledger_recovery { assert_eq!(Balances::balance_locked(crate::STAKING_ID, &333), lock_333_before); // OK assert_eq!(Bonded::::get(&333), Some(444)); // OK assert!(Payee::::get(&333).is_some()); // OK - // however, ledger associated with its controller was killed. + // however, ledger associated with its controller was killed. assert!(Ledger::::get(&444).is_none()); // NOK // side effects on 444 - ledger, bonded, payee, lock should be completely removed. diff --git a/substrate/frame/support/procedural/src/construct_runtime/parse.rs b/substrate/frame/support/procedural/src/construct_runtime/parse.rs index 3e38adcc3c59..729a803a302e 100644 --- a/substrate/frame/support/procedural/src/construct_runtime/parse.rs +++ b/substrate/frame/support/procedural/src/construct_runtime/parse.rs @@ -592,8 +592,6 @@ pub struct Pallet { pub cfg_pattern: Vec, /// The doc literals pub docs: Vec, - /// attributes - pub attrs: Vec, } impl Pallet { @@ -764,7 +762,6 @@ fn convert_pallets(pallets: Vec) -> syn::Result>>()?; diff --git a/substrate/frame/support/procedural/src/derive_impl.rs b/substrate/frame/support/procedural/src/derive_impl.rs index 69117c026816..5d39c2707def 100644 --- a/substrate/frame/support/procedural/src/derive_impl.rs +++ b/substrate/frame/support/procedural/src/derive_impl.rs @@ -304,6 +304,7 @@ fn test_derive_impl_attr_args_parsing() { #[test] fn test_runtime_type_with_doc() { + #[allow(dead_code)] trait TestTrait { type Test; } diff --git a/substrate/frame/support/procedural/src/lib.rs b/substrate/frame/support/procedural/src/lib.rs index d40a571c9eab..a2c1e6eec7f7 100644 --- a/substrate/frame/support/procedural/src/lib.rs +++ b/substrate/frame/support/procedural/src/lib.rs @@ -321,9 +321,10 @@ pub fn derive_debug_no_bound(input: TokenStream) -> TokenStream { /// This behaviour is useful to prevent bloating the runtime WASM blob from unneeded code. #[proc_macro_derive(RuntimeDebugNoBound)] pub fn derive_runtime_debug_no_bound(input: TokenStream) -> TokenStream { - if cfg!(any(feature = "std", feature = "try-runtime")) { - no_bound::debug::derive_debug_no_bound(input) - } else { + let try_runtime_or_std_impl: proc_macro2::TokenStream = + no_bound::debug::derive_debug_no_bound(input.clone()).into(); + + let stripped_impl = { let input = syn::parse_macro_input!(input as syn::DeriveInput); let name = &input.ident; @@ -338,8 +339,22 @@ pub fn derive_runtime_debug_no_bound(input: TokenStream) -> TokenStream { } }; ) - .into() - } + }; + + let frame_support = match generate_access_from_frame_or_crate("frame-support") { + Ok(frame_support) => frame_support, + Err(e) => return e.to_compile_error().into(), + }; + + quote::quote!( + #frame_support::try_runtime_or_std_enabled! { + #try_runtime_or_std_impl + } + #frame_support::try_runtime_and_std_not_enabled! { + #stripped_impl + } + ) + .into() } /// Derive [`PartialEq`] but do not bound any generic. diff --git a/substrate/frame/support/procedural/src/pallet/expand/genesis_build.rs b/substrate/frame/support/procedural/src/pallet/expand/genesis_build.rs index 248e83469435..b71aed680dc8 100644 --- a/substrate/frame/support/procedural/src/pallet/expand/genesis_build.rs +++ b/substrate/frame/support/procedural/src/pallet/expand/genesis_build.rs @@ -35,7 +35,7 @@ pub fn expand_genesis_build(def: &mut Def) -> proc_macro2::TokenStream { let where_clause = &genesis_build.where_clause; quote::quote_spanned!(genesis_build.attr_span => - #[cfg(feature = "std")] + #frame_support::std_enabled! { impl<#type_impl_gen> #frame_support::sp_runtime::BuildStorage for #gen_cfg_ident<#gen_cfg_use_gen> #where_clause { fn assimilate_storage(&self, storage: &mut #frame_support::sp_runtime::Storage) -> std::result::Result<(), std::string::String> { @@ -45,5 +45,6 @@ pub fn expand_genesis_build(def: &mut Def) -> proc_macro2::TokenStream { }) } } + } ) } diff --git a/substrate/frame/support/procedural/src/pallet/expand/hooks.rs b/substrate/frame/support/procedural/src/pallet/expand/hooks.rs index 1b0c09c4e365..c31ddd8a47ba 100644 --- a/substrate/frame/support/procedural/src/pallet/expand/hooks.rs +++ b/substrate/frame/support/procedural/src/pallet/expand/hooks.rs @@ -324,15 +324,13 @@ pub fn expand_hooks(def: &mut Def) -> proc_macro2::TokenStream { Self as #frame_support::traits::Hooks< #frame_system::pallet_prelude::BlockNumberFor:: > - >::try_state(n).map_err(|err| { + >::try_state(n).inspect_err(|err| { #frame_support::__private::log::error!( target: #frame_support::LOG_TARGET, "❌ {:?} try_state checks failed: {:?}", #pallet_name, err ); - - err }) } } diff --git a/substrate/frame/support/procedural/src/pallet/parse/call.rs b/substrate/frame/support/procedural/src/pallet/parse/call.rs index 346dff46f12e..68ced1bc0ed3 100644 --- a/substrate/frame/support/procedural/src/pallet/parse/call.rs +++ b/substrate/frame/support/procedural/src/pallet/parse/call.rs @@ -400,18 +400,19 @@ impl CallDef { } for (feeless_arg, arg) in feeless_check.inputs.iter().skip(1).zip(args.iter()) { - let feeless_arg_type = - if let syn::Pat::Type(syn::PatType { ty, .. }) = feeless_arg.clone() { - if let syn::Type::Reference(pat) = *ty { - pat.elem.clone() - } else { - let msg = "Invalid pallet::call, feeless_if closure argument must be a reference"; - return Err(syn::Error::new(ty.span(), msg)); - } + let feeless_arg_type = if let syn::Pat::Type(syn::PatType { ty, .. }) = + feeless_arg.clone() + { + if let syn::Type::Reference(pat) = *ty { + pat.elem.clone() } else { - let msg = "Invalid pallet::call, feeless_if closure argument must be a type ascription pattern"; - return Err(syn::Error::new(feeless_arg.span(), msg)); - }; + let msg = "Invalid pallet::call, feeless_if closure argument must be a reference"; + return Err(syn::Error::new(ty.span(), msg)); + } + } else { + let msg = "Invalid pallet::call, feeless_if closure argument must be a type ascription pattern"; + return Err(syn::Error::new(feeless_arg.span(), msg)); + }; if feeless_arg_type != arg.2 { let msg = diff --git a/substrate/frame/support/procedural/src/runtime/parse/pallet.rs b/substrate/frame/support/procedural/src/runtime/parse/pallet.rs index de1efa267c89..52f57cd2cd8b 100644 --- a/substrate/frame/support/procedural/src/runtime/parse/pallet.rs +++ b/substrate/frame/support/procedural/src/runtime/parse/pallet.rs @@ -91,7 +91,6 @@ impl Pallet { cfg_pattern, pallet_parts, docs, - attrs: item.attrs.clone(), }) } } diff --git a/substrate/frame/support/src/lib.rs b/substrate/frame/support/src/lib.rs index 28283f2a5a06..269867e38c51 100644 --- a/substrate/frame/support/src/lib.rs +++ b/substrate/frame/support/src/lib.rs @@ -2523,6 +2523,8 @@ pub use frame_support_procedural::register_default_impl; sp_core::generate_feature_enabled_macro!(std_enabled, feature = "std", $); // Generate a macro that will enable/disable code based on `try-runtime` feature being active. sp_core::generate_feature_enabled_macro!(try_runtime_enabled, feature = "try-runtime", $); +sp_core::generate_feature_enabled_macro!(try_runtime_or_std_enabled, any(feature = "try-runtime", feature = "std"), $); +sp_core::generate_feature_enabled_macro!(try_runtime_and_std_not_enabled, all(not(feature = "try-runtime"), not(feature = "std")), $); // Helper for implementing GenesisBuilder runtime API pub mod genesis_builder_helper; diff --git a/substrate/frame/support/src/storage/generator/double_map.rs b/substrate/frame/support/src/storage/generator/double_map.rs index b68f3fa495ff..a9116f1f66bd 100644 --- a/substrate/frame/support/src/storage/generator/double_map.rs +++ b/substrate/frame/support/src/storage/generator/double_map.rs @@ -346,9 +346,8 @@ where final_key }; - unhashed::take(old_key.as_ref()).map(|value| { + unhashed::take(old_key.as_ref()).inspect(|value| { unhashed::put(Self::storage_double_map_final_key(key1, key2).as_ref(), &value); - value }) } } diff --git a/substrate/frame/support/src/storage/generator/map.rs b/substrate/frame/support/src/storage/generator/map.rs index e905df41a5a6..2d1f6c9f73a2 100644 --- a/substrate/frame/support/src/storage/generator/map.rs +++ b/substrate/frame/support/src/storage/generator/map.rs @@ -311,9 +311,8 @@ impl> storage::StorageMap final_key }; - unhashed::take(old_key.as_ref()).map(|value| { + unhashed::take(old_key.as_ref()).inspect(|value| { unhashed::put(Self::storage_map_final_key(key).as_ref(), &value); - value }) } } diff --git a/substrate/frame/support/src/storage/generator/nmap.rs b/substrate/frame/support/src/storage/generator/nmap.rs index 0466583a2795..9083aba9d32c 100755 --- a/substrate/frame/support/src/storage/generator/nmap.rs +++ b/substrate/frame/support/src/storage/generator/nmap.rs @@ -305,9 +305,8 @@ where final_key }; - unhashed::take(old_key.as_ref()).map(|value| { + unhashed::take(old_key.as_ref()).inspect(|value| { unhashed::put(Self::storage_n_map_final_key::(key).as_ref(), &value); - value }) } } diff --git a/substrate/frame/support/src/storage/types/double_map.rs b/substrate/frame/support/src/storage/types/double_map.rs index c70d9de54467..24aad3de0b33 100644 --- a/substrate/frame/support/src/storage/types/double_map.rs +++ b/substrate/frame/support/src/storage/types/double_map.rs @@ -129,7 +129,8 @@ impl OnEmpty, MaxValues, >, - > where + > +where Prefix: StorageInstance, Hasher1: crate::hash::StorageHasher, Hasher2: crate::hash::StorageHasher, diff --git a/substrate/frame/support/src/tests/mod.rs b/substrate/frame/support/src/tests/mod.rs index 5e1bcc777df4..7c90a12d4167 100644 --- a/substrate/frame/support/src/tests/mod.rs +++ b/substrate/frame/support/src/tests/mod.rs @@ -769,5 +769,6 @@ fn derive_partial_eq_no_bound_core_mod() { crate::DefaultNoBound, crate::EqNoBound, )] + #[allow(dead_code)] struct Test; } diff --git a/substrate/frame/support/src/traits/misc.rs b/substrate/frame/support/src/traits/misc.rs index 492475d6f63c..4d3b122daf67 100644 --- a/substrate/frame/support/src/traits/misc.rs +++ b/substrate/frame/support/src/traits/misc.rs @@ -488,7 +488,7 @@ pub trait DefensiveMin { /// assert_eq!(4, 4_u32.defensive_min(4_u32)); /// ``` /// - /// ```#[cfg_attr(debug_assertions, should_panic)] + /// ```should_panic /// use frame_support::traits::DefensiveMin; /// // min(4, 3) panics. /// 4_u32.defensive_min(3_u32); @@ -505,7 +505,7 @@ pub trait DefensiveMin { /// assert_eq!(3, 3_u32.defensive_strict_min(4_u32)); /// ``` /// - /// ```#[cfg_attr(debug_assertions, should_panic)] + /// ```should_panic /// use frame_support::traits::DefensiveMin; /// // min(4, 4) panics. /// 4_u32.defensive_strict_min(4_u32); @@ -552,7 +552,7 @@ pub trait DefensiveMax { /// assert_eq!(4, 4_u32.defensive_max(4_u32)); /// ``` /// - /// ```#[cfg_attr(debug_assertions, should_panic)] + /// ```should_panic /// use frame_support::traits::DefensiveMax; /// // max(4, 5) panics. /// 4_u32.defensive_max(5_u32); @@ -569,7 +569,7 @@ pub trait DefensiveMax { /// assert_eq!(4, 4_u32.defensive_strict_max(3_u32)); /// ``` /// - /// ```#[cfg_attr(debug_assertions, should_panic)] + /// ```should_panic /// use frame_support::traits::DefensiveMax; /// // max(4, 4) panics. /// 4_u32.defensive_strict_max(4_u32); diff --git a/substrate/frame/support/src/traits/try_runtime/decode_entire_state.rs b/substrate/frame/support/src/traits/try_runtime/decode_entire_state.rs index 8dbeecd8e860..a7465c87fb27 100644 --- a/substrate/frame/support/src/traits/try_runtime/decode_entire_state.rs +++ b/substrate/frame/support/src/traits/try_runtime/decode_entire_state.rs @@ -197,7 +197,8 @@ impl TryDecodeEntireS QueryKind, OnEmpty, MaxValues, - > where + > +where Prefix: CountedStorageMapInstance, Hasher: StorageHasher, Key: FullCodec, @@ -229,7 +230,8 @@ impl QueryKind, OnEmpty, MaxValues, - > where + > +where Prefix: StorageInstance, Hasher1: StorageHasher, Key1: FullCodec, diff --git a/substrate/frame/support/test/tests/benchmark_ui/bad_return_type_blank_with_question.stderr b/substrate/frame/support/test/tests/benchmark_ui/bad_return_type_blank_with_question.stderr index 7e0a02be649b..04203e4b684b 100644 --- a/substrate/frame/support/test/tests/benchmark_ui/bad_return_type_blank_with_question.stderr +++ b/substrate/frame/support/test/tests/benchmark_ui/bad_return_type_blank_with_question.stderr @@ -8,3 +8,7 @@ error[E0277]: the `?` operator can only be used in a function that returns `Resu | ^ cannot use the `?` operator in a function that returns `()` | = help: the trait `FromResidual>` is not implemented for `()` +help: consider adding return type + | +31 | fn bench() -> Result<(), Box> { + | +++++++++++++++++++++++++++++++++++++++++ diff --git a/substrate/frame/support/test/tests/construct_runtime_ui/deprecated_where_block.stderr b/substrate/frame/support/test/tests/construct_runtime_ui/deprecated_where_block.stderr index b28cae2ddefa..59e36775d464 100644 --- a/substrate/frame/support/test/tests/construct_runtime_ui/deprecated_where_block.stderr +++ b/substrate/frame/support/test/tests/construct_runtime_ui/deprecated_where_block.stderr @@ -53,7 +53,15 @@ note: required by a bound in `frame_system::Event` | ^^^^^^ required by this bound in `Event` = note: this error originates in the macro `frame_support::construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) -error[E0277]: the trait bound `Runtime: Config` is not satisfied in `RuntimeEvent` +error[E0277]: the trait bound `Runtime: Config` is not satisfied + --> tests/construct_runtime_ui/deprecated_where_block.rs:20:1 + | +20 | construct_runtime! { + | ^ the trait `Config` is not implemented for `Runtime` + | + = note: this error originates in the macro `frame_support::construct_runtime` which comes from the expansion of the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the trait bound `RawOrigin<_>: TryFrom` is not satisfied --> tests/construct_runtime_ui/deprecated_where_block.rs:20:1 | 20 | / construct_runtime! { @@ -63,9 +71,12 @@ error[E0277]: the trait bound `Runtime: Config` is not satisfied in `RuntimeEven ... | 27 | | } 28 | | } - | |_^ within `RuntimeEvent`, the trait `Config` is not implemented for `Runtime`, which is required by `RuntimeEvent: Sized` + | |_^ the trait `TryFrom` is not implemented for `RawOrigin<_>` | -note: required because it appears within the type `RuntimeEvent` + = help: the trait `TryFrom` is implemented for `RawOrigin<::AccountId>` + = note: this error originates in the macro `frame_support::construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the trait bound `Runtime: Config` is not satisfied --> tests/construct_runtime_ui/deprecated_where_block.rs:20:1 | 20 | / construct_runtime! { @@ -75,15 +86,25 @@ note: required because it appears within the type `RuntimeEvent` ... | 27 | | } 28 | | } - | |_^ -note: required by a bound in `Clone` - --> $RUST/core/src/clone.rs + | |_^ the trait `Config` is not implemented for `Runtime`, which is required by `Pallet: Callable` | - | pub trait Clone: Sized { - | ^^^^^ required by this bound in `Clone` - = note: this error originates in the derive macro `Clone` which comes from the expansion of the macro `frame_support::construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) + = help: the trait `Callable` is implemented for `Pallet` + = note: required for `Pallet` to implement `Callable` + = note: this error originates in the macro `frame_support::construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the trait bound `Runtime: Config` is not satisfied + --> tests/construct_runtime_ui/deprecated_where_block.rs:26:3 + | +26 | System: frame_system::{Pallet, Call, Storage, Config, Event}, + | ^^^^^^ the trait `Config` is not implemented for `Runtime` + | +note: required by a bound in `GenesisConfig` + --> $WORKSPACE/substrate/frame/system/src/lib.rs + | + | pub struct GenesisConfig { + | ^^^^^^ required by this bound in `GenesisConfig` -error[E0277]: the trait bound `Runtime: Config` is not satisfied in `RuntimeEvent` +error[E0277]: the trait bound `Runtime: Config` is not satisfied --> tests/construct_runtime_ui/deprecated_where_block.rs:20:1 | 20 | / construct_runtime! { @@ -93,9 +114,16 @@ error[E0277]: the trait bound `Runtime: Config` is not satisfied in `RuntimeEven ... | 27 | | } 28 | | } - | |_^ within `RuntimeEvent`, the trait `Config` is not implemented for `Runtime`, which is required by `RuntimeEvent: Sized` + | |_^ the trait `Config` is not implemented for `Runtime` | -note: required because it appears within the type `RuntimeEvent` +note: required by a bound in `frame_system::Event` + --> $WORKSPACE/substrate/frame/system/src/lib.rs + | + | pub enum Event { + | ^^^^^^ required by this bound in `Event` + = note: this error originates in the derive macro `Clone` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0369]: binary operation `==` cannot be applied to type `&frame_system::Event` --> tests/construct_runtime_ui/deprecated_where_block.rs:20:1 | 20 | / construct_runtime! { @@ -106,14 +134,21 @@ note: required because it appears within the type `RuntimeEvent` 27 | | } 28 | | } | |_^ -note: required by a bound in `EncodeLike` - --> $CARGO/parity-scale-codec-3.6.12/src/encode_like.rs | - | pub trait EncodeLike: Sized + Encode {} - | ^^^^^ required by this bound in `EncodeLike` - = note: this error originates in the macro `frame_support::construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) +note: an implementation of `Config` might be missing for `Runtime` + --> tests/construct_runtime_ui/deprecated_where_block.rs:20:1 + | +20 | / construct_runtime! { +21 | | pub struct Runtime where + | |______________________^ must implement `Config` +note: the trait `Config` must be implemented + --> $WORKSPACE/substrate/frame/system/src/lib.rs + | + | pub trait Config: 'static + Eq + Clone { + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + = note: this error originates in the derive macro `PartialEq` which comes from the expansion of the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) -error[E0277]: the trait bound `Runtime: Config` is not satisfied in `RuntimeEvent` +error[E0277]: the trait bound `Runtime: Config` is not satisfied --> tests/construct_runtime_ui/deprecated_where_block.rs:20:1 | 20 | / construct_runtime! { @@ -123,9 +158,16 @@ error[E0277]: the trait bound `Runtime: Config` is not satisfied in `RuntimeEven ... | 27 | | } 28 | | } - | |_^ within `RuntimeEvent`, the trait `Config` is not implemented for `Runtime`, which is required by `RuntimeEvent: Sized` + | |_^ the trait `Config` is not implemented for `Runtime` | -note: required because it appears within the type `RuntimeEvent` +note: required by a bound in `frame_system::Event` + --> $WORKSPACE/substrate/frame/system/src/lib.rs + | + | pub enum Event { + | ^^^^^^ required by this bound in `Event` + = note: this error originates in the derive macro `Eq` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the trait bound `frame_system::Event: Encode` is not satisfied --> tests/construct_runtime_ui/deprecated_where_block.rs:20:1 | 20 | / construct_runtime! { @@ -135,15 +177,12 @@ note: required because it appears within the type `RuntimeEvent` ... | 27 | | } 28 | | } - | |_^ -note: required by a bound in `Decode` - --> $CARGO/parity-scale-codec-3.6.12/src/codec.rs + | |_^ the trait `Encode` is not implemented for `frame_system::Event` | - | pub trait Decode: Sized { - | ^^^^^ required by this bound in `Decode` - = note: this error originates in the macro `frame_support::construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) + = help: the trait `Encode` is implemented for `frame_system::Event` + = note: this error originates in the derive macro `self::sp_api_hidden_includes_construct_runtime::hidden_include::__private::codec::Encode` (in Nightly builds, run with -Z macro-backtrace for more info) -error[E0277]: the trait bound `Runtime: Config` is not satisfied in `frame_system::Event` +error[E0277]: the trait bound `Runtime: Config` is not satisfied --> tests/construct_runtime_ui/deprecated_where_block.rs:20:1 | 20 | / construct_runtime! { @@ -153,21 +192,16 @@ error[E0277]: the trait bound `Runtime: Config` is not satisfied in `frame_syste ... | 27 | | } 28 | | } - | |_^ within `frame_system::Event`, the trait `Config` is not implemented for `Runtime`, which is required by `frame_system::Event: Sized` + | |_^ the trait `Config` is not implemented for `Runtime` | -note: required because it appears within the type `frame_system::Event` +note: required by a bound in `frame_system::Event` --> $WORKSPACE/substrate/frame/system/src/lib.rs | | pub enum Event { - | ^^^^^ -note: required by a bound in `From` - --> $RUST/core/src/convert/mod.rs - | - | pub trait From: Sized { - | ^ required by this bound in `From` - = note: this error originates in the macro `frame_support::construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) + | ^^^^^^ required by this bound in `Event` + = note: this error originates in the derive macro `self::sp_api_hidden_includes_construct_runtime::hidden_include::__private::codec::Encode` (in Nightly builds, run with -Z macro-backtrace for more info) -error[E0277]: the trait bound `Runtime: Config` is not satisfied in `frame_system::Event` +error[E0277]: the trait bound `frame_system::Event: Decode` is not satisfied --> tests/construct_runtime_ui/deprecated_where_block.rs:20:1 | 20 | / construct_runtime! { @@ -177,29 +211,40 @@ error[E0277]: the trait bound `Runtime: Config` is not satisfied in `frame_syste ... | 27 | | } 28 | | } - | |_^ within `frame_system::Event`, the trait `Config` is not implemented for `Runtime`, which is required by `frame_system::Event: Sized` + | |_^ the trait `Decode` is not implemented for `frame_system::Event` | -note: required because it appears within the type `frame_system::Event` + = help: the trait `Decode` is implemented for `frame_system::Event` + +error[E0277]: the trait bound `Runtime: Config` is not satisfied + --> tests/construct_runtime_ui/deprecated_where_block.rs:26:11 + | +26 | System: frame_system::{Pallet, Call, Storage, Config, Event}, + | ^^^^^^^^^^^^ the trait `Config` is not implemented for `Runtime` + | +note: required by a bound in `frame_system::Event` --> $WORKSPACE/substrate/frame/system/src/lib.rs | | pub enum Event { - | ^^^^^ -note: required by a bound in `TryInto` - --> $RUST/core/src/convert/mod.rs - | - | pub trait TryInto: Sized { - | ^ required by this bound in `TryInto` - = note: this error originates in the macro `frame_support::construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) + | ^^^^^^ required by this bound in `Event` error[E0277]: the trait bound `Runtime: Config` is not satisfied --> tests/construct_runtime_ui/deprecated_where_block.rs:20:1 | -20 | construct_runtime! { - | ^ the trait `Config` is not implemented for `Runtime` +20 | / construct_runtime! { +21 | | pub struct Runtime where +22 | | Block = Block, +23 | | NodeBlock = Block, +... | +27 | | } +28 | | } + | |_^ the trait `Config` is not implemented for `Runtime`, which is required by `frame_system::Event: std::fmt::Debug` | - = note: this error originates in the macro `frame_support::construct_runtime` which comes from the expansion of the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) + = help: the trait `std::fmt::Debug` is implemented for `frame_system::Event` + = note: required for `frame_system::Event` to implement `std::fmt::Debug` + = note: required for the cast from `&frame_system::Event` to `&dyn std::fmt::Debug` + = note: this error originates in the derive macro `self::sp_api_hidden_includes_construct_runtime::hidden_include::__private::RuntimeDebug` (in Nightly builds, run with -Z macro-backtrace for more info) -error[E0277]: the trait bound `RawOrigin<_>: TryFrom` is not satisfied +error[E0277]: the trait bound `Runtime: Config` is not satisfied --> tests/construct_runtime_ui/deprecated_where_block.rs:20:1 | 20 | / construct_runtime! { @@ -209,10 +254,12 @@ error[E0277]: the trait bound `RawOrigin<_>: TryFrom` is not satis ... | 27 | | } 28 | | } - | |_^ the trait `TryFrom` is not implemented for `RawOrigin<_>` + | |_^ the trait `Config` is not implemented for `Runtime`, which is required by `frame_system::Error: std::fmt::Debug` | - = help: the trait `TryFrom` is implemented for `RawOrigin<::AccountId>` - = note: this error originates in the macro `frame_support::construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) + = help: the trait `std::fmt::Debug` is implemented for `frame_system::Error` + = note: required for `frame_system::Error` to implement `std::fmt::Debug` + = note: required for the cast from `&frame_system::Error` to `&dyn std::fmt::Debug` + = note: this error originates in the derive macro `self::sp_api_hidden_includes_construct_runtime::hidden_include::__private::RuntimeDebug` (in Nightly builds, run with -Z macro-backtrace for more info) error[E0277]: the trait bound `Runtime: Config` is not satisfied --> tests/construct_runtime_ui/deprecated_where_block.rs:20:1 @@ -224,11 +271,15 @@ error[E0277]: the trait bound `Runtime: Config` is not satisfied ... | 27 | | } 28 | | } - | |_^ the trait `Config` is not implemented for `Runtime`, which is required by `Pallet: Callable` + | |_^ the trait `Config` is not implemented for `Runtime` | - = help: the trait `Callable` is implemented for `Pallet` - = note: required for `Pallet` to implement `Callable` - = note: this error originates in the macro `frame_support::construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) + = note: this error originates in the derive macro `Clone` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the trait bound `Runtime: Config` is not satisfied + --> tests/construct_runtime_ui/deprecated_where_block.rs:21:13 + | +21 | pub struct Runtime where + | ^^^^^^^ the trait `Config` is not implemented for `Runtime` error[E0277]: the trait bound `Runtime: Config` is not satisfied --> tests/construct_runtime_ui/deprecated_where_block.rs:20:1 @@ -240,10 +291,12 @@ error[E0277]: the trait bound `Runtime: Config` is not satisfied ... | 27 | | } 28 | | } - | |_^ the trait `Config` is not implemented for `Runtime`, which is required by `RuntimeCall: Sized` + | |_^ the trait `Config` is not implemented for `Runtime`, which is required by `RawOrigin<_>: Into<_>` | - = note: required for `Pallet` to implement `Callable` -note: required because it appears within the type `RuntimeCall` + = note: required for `RawOrigin<_>` to implement `Into` + = note: this error originates in the macro `frame_support::construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the trait bound `Runtime: Config` is not satisfied --> tests/construct_runtime_ui/deprecated_where_block.rs:20:1 | 20 | / construct_runtime! { @@ -253,13 +306,9 @@ note: required because it appears within the type `RuntimeCall` ... | 27 | | } 28 | | } - | |_^ -note: required by a bound in `Clone` - --> $RUST/core/src/clone.rs + | |_^ the trait `Config` is not implemented for `Runtime` | - | pub trait Clone: Sized { - | ^^^^^ required by this bound in `Clone` - = note: this error originates in the derive macro `Clone` which comes from the expansion of the macro `frame_support::construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) + = note: this error originates in the derive macro `PartialEq` (in Nightly builds, run with -Z macro-backtrace for more info) error[E0277]: the trait bound `Runtime: Config` is not satisfied --> tests/construct_runtime_ui/deprecated_where_block.rs:20:1 @@ -271,10 +320,17 @@ error[E0277]: the trait bound `Runtime: Config` is not satisfied ... | 27 | | } 28 | | } - | |_^ the trait `Config` is not implemented for `Runtime`, which is required by `RuntimeCall: Sized` + | |_^ the trait `Config` is not implemented for `Runtime` | - = note: required for `Pallet` to implement `Callable` -note: required because it appears within the type `RuntimeCall` + = note: this error originates in the derive macro `self::sp_api_hidden_includes_construct_runtime::hidden_include::__private::RuntimeDebug` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the trait bound `Runtime: Config` is not satisfied + --> tests/construct_runtime_ui/deprecated_where_block.rs:26:11 + | +26 | System: frame_system::{Pallet, Call, Storage, Config, Event}, + | ^^^^^^^^^^^^ the trait `Config` is not implemented for `Runtime` + +error[E0277]: the trait bound `Runtime: Config` is not satisfied --> tests/construct_runtime_ui/deprecated_where_block.rs:20:1 | 20 | / construct_runtime! { @@ -284,12 +340,10 @@ note: required because it appears within the type `RuntimeCall` ... | 27 | | } 28 | | } - | |_^ -note: required by a bound in `EncodeLike` - --> $CARGO/parity-scale-codec-3.6.12/src/encode_like.rs + | |_^ the trait `Config` is not implemented for `Runtime`, which is required by `Pallet: PalletInfoAccess` | - | pub trait EncodeLike: Sized + Encode {} - | ^^^^^ required by this bound in `EncodeLike` + = help: the trait `PalletInfoAccess` is implemented for `Pallet` + = note: required for `Pallet` to implement `PalletInfoAccess` = note: this error originates in the macro `frame_support::construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) error[E0277]: the trait bound `Runtime: Config` is not satisfied @@ -302,10 +356,13 @@ error[E0277]: the trait bound `Runtime: Config` is not satisfied ... | 27 | | } 28 | | } - | |_^ the trait `Config` is not implemented for `Runtime`, which is required by `RuntimeCall: Sized` + | |_^ the trait `Config` is not implemented for `Runtime`, which is required by `Pallet: Callable` | + = help: the trait `Callable` is implemented for `Pallet` = note: required for `Pallet` to implement `Callable` -note: required because it appears within the type `RuntimeCall` + = note: this error originates in the derive macro `Clone` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the trait bound `Runtime: Config` is not satisfied --> tests/construct_runtime_ui/deprecated_where_block.rs:20:1 | 20 | / construct_runtime! { @@ -315,15 +372,13 @@ note: required because it appears within the type `RuntimeCall` ... | 27 | | } 28 | | } - | |_^ -note: required by a bound in `Decode` - --> $CARGO/parity-scale-codec-3.6.12/src/codec.rs + | |_^ the trait `Config` is not implemented for `Runtime`, which is required by `Pallet: Callable` | - | pub trait Decode: Sized { - | ^^^^^ required by this bound in `Decode` - = note: this error originates in the macro `frame_support::construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) + = help: the trait `Callable` is implemented for `Pallet` + = note: required for `Pallet` to implement `Callable` + = note: this error originates in the derive macro `PartialEq` (in Nightly builds, run with -Z macro-backtrace for more info) -error[E0277]: the trait bound `Runtime: Config` is not satisfied +error[E0369]: binary operation `==` cannot be applied to type `&frame_system::Call` --> tests/construct_runtime_ui/deprecated_where_block.rs:20:1 | 20 | / construct_runtime! { @@ -333,10 +388,22 @@ error[E0277]: the trait bound `Runtime: Config` is not satisfied ... | 27 | | } 28 | | } - | |_^ the trait `Config` is not implemented for `Runtime`, which is required by `RuntimeCall: Sized` + | |_^ | - = note: required for `Pallet` to implement `Callable` -note: required because it appears within the type `RuntimeCall` +note: an implementation of `Config` might be missing for `Runtime` + --> tests/construct_runtime_ui/deprecated_where_block.rs:20:1 + | +20 | / construct_runtime! { +21 | | pub struct Runtime where + | |______________________^ must implement `Config` +note: the trait `Config` must be implemented + --> $WORKSPACE/substrate/frame/system/src/lib.rs + | + | pub trait Config: 'static + Eq + Clone { + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + = note: this error originates in the derive macro `PartialEq` which comes from the expansion of the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the trait bound `frame_system::Call: Encode` is not satisfied --> tests/construct_runtime_ui/deprecated_where_block.rs:20:1 | 20 | / construct_runtime! { @@ -346,27 +413,31 @@ note: required because it appears within the type `RuntimeCall` ... | 27 | | } 28 | | } - | |_^ -note: required by a bound in `frame_support::sp_runtime::traits::Dispatchable::Config` - --> $WORKSPACE/substrate/primitives/runtime/src/traits.rs + | |_^ the trait `Encode` is not implemented for `frame_system::Call` | - | type Config; - | ^^^^^^^^^^^^ required by this bound in `Dispatchable::Config` - = note: this error originates in the macro `frame_support::construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) + = help: the trait `Encode` is implemented for `frame_system::Call` + = note: this error originates in the derive macro `self::sp_api_hidden_includes_construct_runtime::hidden_include::__private::codec::Encode` (in Nightly builds, run with -Z macro-backtrace for more info) error[E0277]: the trait bound `Runtime: Config` is not satisfied - --> tests/construct_runtime_ui/deprecated_where_block.rs:26:3 + --> tests/construct_runtime_ui/deprecated_where_block.rs:20:1 | -26 | System: frame_system::{Pallet, Call, Storage, Config, Event}, - | ^^^^^^ the trait `Config` is not implemented for `Runtime` +20 | / construct_runtime! { +21 | | pub struct Runtime where +22 | | Block = Block, +23 | | NodeBlock = Block, +... | +27 | | } +28 | | } + | |_^ the trait `Config` is not implemented for `Runtime` | -note: required by a bound in `GenesisConfig` +note: required by a bound in `frame_system::Call` --> $WORKSPACE/substrate/frame/system/src/lib.rs | - | pub struct GenesisConfig { - | ^^^^^^ required by this bound in `GenesisConfig` + | #[pallet::call] + | ^^^^ required by this bound in `Call` + = note: this error originates in the derive macro `self::sp_api_hidden_includes_construct_runtime::hidden_include::__private::codec::Encode` (in Nightly builds, run with -Z macro-backtrace for more info) -error[E0277]: the trait bound `Runtime: Config` is not satisfied +error[E0277]: the trait bound `frame_system::Call: Decode` is not satisfied --> tests/construct_runtime_ui/deprecated_where_block.rs:20:1 | 20 | / construct_runtime! { @@ -376,10 +447,12 @@ error[E0277]: the trait bound `Runtime: Config` is not satisfied ... | 27 | | } 28 | | } - | |_^ the trait `Config` is not implemented for `Runtime`, which is required by `RuntimeCall: Sized` + | |_^ the trait `Decode` is not implemented for `frame_system::Call` | - = note: required for `Pallet` to implement `Callable` -note: required because it appears within the type `RuntimeCall` + = help: the trait `Decode` is implemented for `frame_system::Call` + = note: this error originates in the macro `frame_support::construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the trait bound `Runtime: Config` is not satisfied --> tests/construct_runtime_ui/deprecated_where_block.rs:20:1 | 20 | / construct_runtime! { @@ -389,15 +462,256 @@ note: required because it appears within the type `RuntimeCall` ... | 27 | | } 28 | | } - | |_^ -note: required by a bound in `frame_support::pallet_prelude::ValidateUnsigned::Call` - --> $WORKSPACE/substrate/primitives/runtime/src/traits.rs + | |_^ the trait `Config` is not implemented for `Runtime`, which is required by `Pallet: Callable` + | + = help: the trait `Callable` is implemented for `Pallet` + = note: required for `Pallet` to implement `Callable` + = note: this error originates in the derive macro `self::sp_api_hidden_includes_construct_runtime::hidden_include::__private::RuntimeDebug` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0599]: the method `get_dispatch_info` exists for reference `&Call`, but its trait bounds were not satisfied + --> tests/construct_runtime_ui/deprecated_where_block.rs:20:1 + | +20 | construct_runtime! { + | __^ + | | _| + | || +21 | || pub struct Runtime where + | ||______________________- doesn't satisfy `Runtime: Config` +22 | | Block = Block, +23 | | NodeBlock = Block, +... | +27 | | } +28 | | } + | |__^ method cannot be called on `&Call` due to unsatisfied trait bounds + | + ::: $WORKSPACE/substrate/frame/system/src/lib.rs + | + | #[pallet::call] + | ---- doesn't satisfy `frame_system::Call: GetDispatchInfo` + | + = note: the following trait bounds were not satisfied: + `Runtime: Config` + which is required by `frame_system::Call: GetDispatchInfo` +note: the trait `Config` must be implemented + --> $WORKSPACE/substrate/frame/system/src/lib.rs + | + | pub trait Config: 'static + Eq + Clone { + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + = note: this error originates in the macro `frame_support::construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0599]: the method `is_feeless` exists for reference `&Call`, but its trait bounds were not satisfied + --> tests/construct_runtime_ui/deprecated_where_block.rs:20:1 + | +20 | construct_runtime! { + | __^ + | | _| + | || +21 | || pub struct Runtime where + | ||______________________- doesn't satisfy `Runtime: Config` +22 | | Block = Block, +23 | | NodeBlock = Block, +... | +27 | | } +28 | | } + | |__^ method cannot be called on `&Call` due to unsatisfied trait bounds + | + ::: $WORKSPACE/substrate/frame/system/src/lib.rs + | + | #[pallet::call] + | ---- doesn't satisfy `frame_system::Call: CheckIfFeeless` + | + = note: the following trait bounds were not satisfied: + `Runtime: Config` + which is required by `frame_system::Call: CheckIfFeeless` +note: the trait `Config` must be implemented + --> $WORKSPACE/substrate/frame/system/src/lib.rs | - | type Call; - | ^^^^^^^^^^ required by this bound in `ValidateUnsigned::Call` + | pub trait Config: 'static + Eq + Clone { + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ = note: this error originates in the macro `frame_support::construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) -error[E0277]: the trait bound `Runtime: Config` is not satisfied in `RuntimeEvent` +error[E0599]: the method `get_call_name` exists for reference `&Call`, but its trait bounds were not satisfied + --> tests/construct_runtime_ui/deprecated_where_block.rs:20:1 + | +20 | construct_runtime! { + | __^ + | | _| + | || +21 | || pub struct Runtime where + | ||______________________- doesn't satisfy `Runtime: Config` +22 | | Block = Block, +23 | | NodeBlock = Block, +... | +27 | | } +28 | | } + | |__^ method cannot be called on `&Call` due to unsatisfied trait bounds + | + ::: $WORKSPACE/substrate/frame/system/src/lib.rs + | + | #[pallet::call] + | ---- doesn't satisfy `frame_system::Call: GetCallName` + | + = note: the following trait bounds were not satisfied: + `Runtime: Config` + which is required by `frame_system::Call: GetCallName` +note: the trait `Config` must be implemented + --> $WORKSPACE/substrate/frame/system/src/lib.rs + | + | pub trait Config: 'static + Eq + Clone { + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + = note: this error originates in the macro `frame_support::construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0599]: the function or associated item `storage_metadata` exists for struct `Pallet`, but its trait bounds were not satisfied + --> tests/construct_runtime_ui/deprecated_where_block.rs:20:1 + | +20 | construct_runtime! { + | __^ + | | _| + | || +21 | || pub struct Runtime where + | ||______________________- doesn't satisfy `Runtime: Config` +22 | | Block = Block, +23 | | NodeBlock = Block, +... | +27 | | } +28 | | } + | |__^ function or associated item cannot be called on `Pallet` due to unsatisfied trait bounds + | + = note: the following trait bounds were not satisfied: + `Runtime: Config` +note: the trait `Config` must be implemented + --> $WORKSPACE/substrate/frame/system/src/lib.rs + | + | pub trait Config: 'static + Eq + Clone { + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + = note: this error originates in the macro `frame_support::construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0599]: the function or associated item `call_functions` exists for struct `Pallet`, but its trait bounds were not satisfied + --> tests/construct_runtime_ui/deprecated_where_block.rs:20:1 + | +20 | construct_runtime! { + | __^ + | | _| + | || +21 | || pub struct Runtime where + | ||______________________- doesn't satisfy `Runtime: Config` +22 | | Block = Block, +23 | | NodeBlock = Block, +... | +27 | | } +28 | | } + | |__^ function or associated item cannot be called on `Pallet` due to unsatisfied trait bounds + | + = note: the following trait bounds were not satisfied: + `Runtime: Config` +note: the trait `Config` must be implemented + --> $WORKSPACE/substrate/frame/system/src/lib.rs + | + | pub trait Config: 'static + Eq + Clone { + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + = note: this error originates in the macro `frame_support::construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0599]: the variant or associated item `event_metadata` exists for enum `Event`, but its trait bounds were not satisfied + --> tests/construct_runtime_ui/deprecated_where_block.rs:20:1 + | +20 | construct_runtime! { + | __^ + | | _| + | || +21 | || pub struct Runtime where + | ||______________________- doesn't satisfy `Runtime: Config` +22 | | Block = Block, +23 | | NodeBlock = Block, +... | +27 | | } +28 | | } + | |__^ variant or associated item cannot be called on `Event` due to unsatisfied trait bounds + | + = note: the following trait bounds were not satisfied: + `Runtime: Config` +note: the trait `Config` must be implemented + --> $WORKSPACE/substrate/frame/system/src/lib.rs + | + | pub trait Config: 'static + Eq + Clone { + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + = note: this error originates in the macro `frame_support::construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0599]: the function or associated item `pallet_constants_metadata` exists for struct `Pallet`, but its trait bounds were not satisfied + --> tests/construct_runtime_ui/deprecated_where_block.rs:20:1 + | +20 | construct_runtime! { + | __^ + | | _| + | || +21 | || pub struct Runtime where + | ||______________________- doesn't satisfy `Runtime: Config` +22 | | Block = Block, +23 | | NodeBlock = Block, +... | +27 | | } +28 | | } + | |__^ function or associated item cannot be called on `Pallet` due to unsatisfied trait bounds + | + = note: the following trait bounds were not satisfied: + `Runtime: Config` +note: the trait `Config` must be implemented + --> $WORKSPACE/substrate/frame/system/src/lib.rs + | + | pub trait Config: 'static + Eq + Clone { + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + = note: this error originates in the macro `frame_support::construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0599]: the function or associated item `error_metadata` exists for struct `Pallet`, but its trait bounds were not satisfied + --> tests/construct_runtime_ui/deprecated_where_block.rs:20:1 + | +20 | construct_runtime! { + | __^ + | | _| + | || +21 | || pub struct Runtime where + | ||______________________- doesn't satisfy `Runtime: Config` +22 | | Block = Block, +23 | | NodeBlock = Block, +... | +27 | | } +28 | | } + | |__^ function or associated item cannot be called on `Pallet` due to unsatisfied trait bounds + | + = note: the following trait bounds were not satisfied: + `Runtime: Config` +note: the trait `Config` must be implemented + --> $WORKSPACE/substrate/frame/system/src/lib.rs + | + | pub trait Config: 'static + Eq + Clone { + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + = note: this error originates in the macro `frame_support::construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0599]: the function or associated item `pallet_documentation_metadata` exists for struct `Pallet`, but its trait bounds were not satisfied + --> tests/construct_runtime_ui/deprecated_where_block.rs:20:1 + | +20 | construct_runtime! { + | __^ + | | _| + | || +21 | || pub struct Runtime where + | ||______________________- doesn't satisfy `Runtime: Config` +22 | | Block = Block, +23 | | NodeBlock = Block, +... | +27 | | } +28 | | } + | |__^ function or associated item cannot be called on `Pallet` due to unsatisfied trait bounds + | + = note: the following trait bounds were not satisfied: + `Runtime: Config` +note: the trait `Config` must be implemented + --> $WORKSPACE/substrate/frame/system/src/lib.rs + | + | pub trait Config: 'static + Eq + Clone { + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + = note: this error originates in the macro `frame_support::construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the trait bound `Runtime: Config` is not satisfied --> tests/construct_runtime_ui/deprecated_where_block.rs:20:1 | 20 | / construct_runtime! { @@ -407,9 +721,21 @@ error[E0277]: the trait bound `Runtime: Config` is not satisfied in `RuntimeEven ... | 27 | | } 28 | | } - | |_^ within `RuntimeEvent`, the trait `Config` is not implemented for `Runtime`, which is required by `RuntimeEvent: Sized` - | -note: required because it appears within the type `RuntimeEvent` + | |_^ the trait `Config` is not implemented for `Runtime`, which is required by `GenesisConfig: Serialize` + | + = help: the trait `Serialize` is implemented for `GenesisConfig` + = note: required for `GenesisConfig` to implement `Serialize` +note: required by a bound in `frame_support::sp_runtime::serde::ser::SerializeStruct::serialize_field` + --> $CARGO/serde-1.0.210/src/ser/mod.rs + | + | fn serialize_field(&mut self, key: &'static str, value: &T) -> Result<(), Self::Error> + | --------------- required by a bound in this associated function + | where + | T: ?Sized + Serialize; + | ^^^^^^^^^ required by this bound in `SerializeStruct::serialize_field` + = note: this error originates in the macro `frame_support::construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the trait bound `Runtime: Config` is not satisfied --> tests/construct_runtime_ui/deprecated_where_block.rs:20:1 | 20 | / construct_runtime! { @@ -419,15 +745,16 @@ note: required because it appears within the type `RuntimeEvent` ... | 27 | | } 28 | | } - | |_^ -note: required by a bound in `Result` - --> $RUST/core/src/result.rs + | |_^ the trait `Config` is not implemented for `Runtime` | - | pub enum Result { - | ^ required by this bound in `Result` - = note: this error originates in the derive macro `self::sp_api_hidden_includes_construct_runtime::hidden_include::__private::codec::Decode` which comes from the expansion of the macro `frame_support::construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) +note: required by a bound in `GenesisConfig` + --> $WORKSPACE/substrate/frame/system/src/lib.rs + | + | pub struct GenesisConfig { + | ^^^^^^ required by this bound in `GenesisConfig` + = note: this error originates in the macro `frame_support::construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) -error[E0277]: the trait bound `Runtime: Config` is not satisfied in `RuntimeEvent` +error[E0277]: the trait bound `Runtime: Config` is not satisfied --> tests/construct_runtime_ui/deprecated_where_block.rs:20:1 | 20 | / construct_runtime! { @@ -437,9 +764,16 @@ error[E0277]: the trait bound `Runtime: Config` is not satisfied in `RuntimeEven ... | 27 | | } 28 | | } - | |_^ within `RuntimeEvent`, the trait `Config` is not implemented for `Runtime`, which is required by `RuntimeEvent: Sized` + | |_^ the trait `Config` is not implemented for `Runtime` | -note: required because it appears within the type `RuntimeEvent` +note: required by a bound in `GenesisConfig` + --> $WORKSPACE/substrate/frame/system/src/lib.rs + | + | pub struct GenesisConfig { + | ^^^^^^ required by this bound in `GenesisConfig` + = note: this error originates in the derive macro `self::sp_api_hidden_includes_construct_runtime::hidden_include::__private::serde::Deserialize` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the trait bound `Runtime: Config` is not satisfied --> tests/construct_runtime_ui/deprecated_where_block.rs:20:1 | 20 | / construct_runtime! { @@ -449,13 +783,11 @@ note: required because it appears within the type `RuntimeEvent` ... | 27 | | } 28 | | } - | |_^ -note: required by a bound in `TryInto` - --> $RUST/core/src/convert/mod.rs + | |_^ the trait `Config` is not implemented for `Runtime`, which is required by `GenesisConfig: std::default::Default` | - | pub trait TryInto: Sized { - | ^^^^^ required by this bound in `TryInto` - = note: this error originates in the macro `frame_support::construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) + = help: the trait `std::default::Default` is implemented for `GenesisConfig` + = note: required for `GenesisConfig` to implement `std::default::Default` + = note: this error originates in the derive macro `Default` (in Nightly builds, run with -Z macro-backtrace for more info) error[E0277]: the trait bound `Runtime: Config` is not satisfied --> tests/construct_runtime_ui/deprecated_where_block.rs:20:1 @@ -467,10 +799,24 @@ error[E0277]: the trait bound `Runtime: Config` is not satisfied ... | 27 | | } 28 | | } - | |_^ the trait `Config` is not implemented for `Runtime`, which is required by `RuntimeCall: Sized` - | - = note: required for `Pallet` to implement `Callable` -note: required because it appears within the type `RuntimeCall` + | |_^ the trait `Config` is not implemented for `Runtime`, which is required by `(Pallet,): OnGenesis` + | + = help: the following other types implement trait `OnGenesis`: + () + (TupleElement0, TupleElement1) + (TupleElement0, TupleElement1, TupleElement2) + (TupleElement0, TupleElement1, TupleElement2, TupleElement3) + (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4) + (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4, TupleElement5) + (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4, TupleElement5, TupleElement6) + (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4, TupleElement5, TupleElement6, TupleElement7) + and $N others + = note: required for `Pallet` to implement `OnGenesis` + = note: 1 redundant requirement hidden + = note: required for `(Pallet,)` to implement `OnGenesis` + = note: this error originates in the macro `frame_support::construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0282]: type annotations needed --> tests/construct_runtime_ui/deprecated_where_block.rs:20:1 | 20 | / construct_runtime! { @@ -480,10 +826,8 @@ note: required because it appears within the type `RuntimeCall` ... | 27 | | } 28 | | } - | |_^ -note: required by a bound in `Result` - --> $RUST/core/src/result.rs + | |_^ cannot infer type | - | pub enum Result { - | ^ required by this bound in `Result` - = note: this error originates in the derive macro `self::sp_api_hidden_includes_construct_runtime::hidden_include::__private::codec::Decode` which comes from the expansion of the macro `frame_support::construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) + = note: this error originates in the macro `frame_support::construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) + +error: internal compiler error: compiler/rustc_middle/src/ty/normalize_erasing_regions.rs:168:90: Failed to normalize std::rc::Rc::RuntimeCall,)>), bound_vars: [Region(BrAnon)] }, Binder { value: Projection(Output = bool), bound_vars: [Region(BrAnon)] }] + '{erased}, std::alloc::Global>, std::alloc::Global>, maybe try to call `try_normalize_erasing_regions` instead diff --git a/substrate/frame/support/test/tests/construct_runtime_ui/undefined_event_part.stderr b/substrate/frame/support/test/tests/construct_runtime_ui/undefined_event_part.stderr index 0f7afb2b9901..c50cba71d4e7 100644 --- a/substrate/frame/support/test/tests/construct_runtime_ui/undefined_event_part.stderr +++ b/substrate/frame/support/test/tests/construct_runtime_ui/undefined_event_part.stderr @@ -28,7 +28,7 @@ error[E0412]: cannot find type `Event` in module `pallet` | |_^ not found in `pallet` | = note: this error originates in the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) -help: consider importing one of these items +help: consider importing one of these enums | 18 + use frame_support_test::Event; | @@ -48,7 +48,7 @@ error[E0433]: failed to resolve: could not find `Event` in `pallet` | |_^ could not find `Event` in `pallet` | = note: this error originates in the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) -help: consider importing one of these items +help: consider importing one of these enums | 18 + use frame_support_test::Event; | diff --git a/substrate/frame/support/test/tests/construct_runtime_ui/undefined_genesis_config_part.stderr b/substrate/frame/support/test/tests/construct_runtime_ui/undefined_genesis_config_part.stderr index 10093b26f5a8..2aa794edc3c9 100644 --- a/substrate/frame/support/test/tests/construct_runtime_ui/undefined_genesis_config_part.stderr +++ b/substrate/frame/support/test/tests/construct_runtime_ui/undefined_genesis_config_part.stderr @@ -28,7 +28,7 @@ error[E0412]: cannot find type `GenesisConfig` in module `pallet` | |_^ not found in `pallet` | = note: this error originates in the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) -help: consider importing one of these items +help: consider importing one of these structs | 18 + use frame_system::GenesisConfig; | diff --git a/substrate/frame/support/test/tests/construct_runtime_ui/undefined_inherent_part.stderr b/substrate/frame/support/test/tests/construct_runtime_ui/undefined_inherent_part.stderr index 30005c07cb63..d8dc7bd45bc6 100644 --- a/substrate/frame/support/test/tests/construct_runtime_ui/undefined_inherent_part.stderr +++ b/substrate/frame/support/test/tests/construct_runtime_ui/undefined_inherent_part.stderr @@ -54,8 +54,8 @@ error[E0599]: no function or associated item named `is_inherent` found for struc | = help: items from traits can only be used if the trait is implemented and in scope = note: the following traits define an item `is_inherent`, perhaps you need to implement one of them: - candidate #1: `ProvideInherent` - candidate #2: `IsInherent` + candidate #1: `IsInherent` + candidate #2: `ProvideInherent` = note: this error originates in the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) error[E0599]: no function or associated item named `check_inherent` found for struct `pallet::Pallet` in the current scope diff --git a/substrate/frame/support/test/tests/construct_runtime_ui/undefined_origin_part.stderr b/substrate/frame/support/test/tests/construct_runtime_ui/undefined_origin_part.stderr index d0f4b44ab0d5..58c42311b87b 100644 --- a/substrate/frame/support/test/tests/construct_runtime_ui/undefined_origin_part.stderr +++ b/substrate/frame/support/test/tests/construct_runtime_ui/undefined_origin_part.stderr @@ -28,7 +28,7 @@ error[E0412]: cannot find type `Origin` in module `pallet` | |_^ not found in `pallet` | = note: this error originates in the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) -help: consider importing one of these items +help: consider importing one of these type aliases | 18 + use frame_support_test::Origin; | diff --git a/substrate/frame/support/test/tests/derive_impl.rs b/substrate/frame/support/test/tests/derive_impl.rs index 675e85f4bfce..3514593c8568 100644 --- a/substrate/frame/support/test/tests/derive_impl.rs +++ b/substrate/frame/support/test/tests/derive_impl.rs @@ -25,15 +25,9 @@ struct SomeRectangle {} #[frame_support::register_default_impl(SomeRectangle)] impl Shape for SomeRectangle { - #[cfg(not(feature = "feature-frame-testing"))] fn area(&self) -> u32 { 10 } - - #[cfg(feature = "feature-frame-testing")] - fn area(&self) -> u32 { - 0 - } } struct SomeSquare {} @@ -44,9 +38,5 @@ impl Shape for SomeSquare {} #[test] fn test_feature_parsing() { let square = SomeSquare {}; - #[cfg(not(feature = "feature-frame-testing"))] assert_eq!(square.area(), 10); - - #[cfg(feature = "feature-frame-testing")] - assert_eq!(square.area(), 0); } diff --git a/substrate/frame/support/test/tests/derive_no_bound.rs b/substrate/frame/support/test/tests/derive_no_bound.rs index b19147078051..6fc4ea12c513 100644 --- a/substrate/frame/support/test/tests/derive_no_bound.rs +++ b/substrate/frame/support/test/tests/derive_no_bound.rs @@ -159,6 +159,7 @@ fn test_struct_unnamed() { PartialOrdNoBound, OrdNoBound, )] +#[allow(dead_code)] struct StructNoGenerics { field1: u32, field2: u64, diff --git a/substrate/frame/support/test/tests/derive_no_bound_ui/ord.stderr b/substrate/frame/support/test/tests/derive_no_bound_ui/ord.stderr index db8a50796077..8bf82bff7809 100644 --- a/substrate/frame/support/test/tests/derive_no_bound_ui/ord.stderr +++ b/substrate/frame/support/test/tests/derive_no_bound_ui/ord.stderr @@ -23,3 +23,13 @@ note: required by a bound in `std::cmp::Eq` | | pub trait Eq: PartialEq { | ^^^^^^^^^^^^^^^ required by this bound in `Eq` + +error[E0599]: `::C` is not an iterator + --> tests/derive_no_bound_ui/ord.rs:24:2 + | +24 | c: T::C, + | ^ `::C` is not an iterator + | + = note: the following trait bounds were not satisfied: + `::C: Iterator` + which is required by `&mut ::C: Iterator` diff --git a/substrate/frame/support/test/tests/pallet.rs b/substrate/frame/support/test/tests/pallet.rs index 7f1ce0556eab..3d6aa1d83749 100644 --- a/substrate/frame/support/test/tests/pallet.rs +++ b/substrate/frame/support/test/tests/pallet.rs @@ -2431,9 +2431,10 @@ fn post_runtime_upgrade_detects_storage_version_issues() { // any storage version "enabled". assert!( ExecutiveWithUpgradePallet4::try_runtime_upgrade(UpgradeCheckSelect::PreAndPost) - .unwrap_err() == "On chain storage version set, while the pallet \ + .unwrap_err() == + "On chain storage version set, while the pallet \ doesn't have the `#[pallet::storage_version(VERSION)]` attribute." - .into() + .into() ); }); } diff --git a/substrate/frame/support/test/tests/pallet_ui/call_argument_invalid_bound.stderr b/substrate/frame/support/test/tests/pallet_ui/call_argument_invalid_bound.stderr index 2a4ceecd8fa4..1f91f7740238 100644 --- a/substrate/frame/support/test/tests/pallet_ui/call_argument_invalid_bound.stderr +++ b/substrate/frame/support/test/tests/pallet_ui/call_argument_invalid_bound.stderr @@ -33,3 +33,12 @@ error[E0369]: binary operation `==` cannot be applied to type `&, _bar: T::Bar) -> DispatchResultWithPostInfo { | ^^^^ + +error: unused variable: `origin` + --> tests/pallet_ui/call_argument_invalid_bound.rs:38:14 + | +38 | pub fn foo(origin: OriginFor, _bar: T::Bar) -> DispatchResultWithPostInfo { + | ^^^^^^ help: if this is intentional, prefix it with an underscore: `_origin` + | + = note: `-D unused-variables` implied by `-D warnings` + = help: to override `-D warnings` add `#[allow(unused_variables)]` diff --git a/substrate/frame/support/test/tests/pallet_ui/call_argument_invalid_bound_2.stderr b/substrate/frame/support/test/tests/pallet_ui/call_argument_invalid_bound_2.stderr index fc993e9ff68f..4657c0a0c601 100644 --- a/substrate/frame/support/test/tests/pallet_ui/call_argument_invalid_bound_2.stderr +++ b/substrate/frame/support/test/tests/pallet_ui/call_argument_invalid_bound_2.stderr @@ -34,7 +34,7 @@ error[E0369]: binary operation `==` cannot be applied to type `&, _bar: T::Bar) -> DispatchResultWithPostInfo { | ^^^^ -error[E0277]: the trait bound `::Bar: WrapperTypeEncode` is not satisfied +error[E0277]: the trait bound `::Bar: Encode` is not satisfied --> tests/pallet_ui/call_argument_invalid_bound_2.rs:38:36 | 18 | #[frame_support::pallet] @@ -45,10 +45,19 @@ error[E0277]: the trait bound `::Bar: WrapperTypeEncode` is | = note: required for `::Bar` to implement `Encode` -error[E0277]: the trait bound `::Bar: WrapperTypeDecode` is not satisfied +error[E0277]: the trait bound `::Bar: Decode` is not satisfied --> tests/pallet_ui/call_argument_invalid_bound_2.rs:38:42 | 38 | pub fn foo(origin: OriginFor, _bar: T::Bar) -> DispatchResultWithPostInfo { | ^^^^^^ the trait `WrapperTypeDecode` is not implemented for `::Bar`, which is required by `::Bar: Decode` | = note: required for `::Bar` to implement `Decode` + +error: unused variable: `origin` + --> tests/pallet_ui/call_argument_invalid_bound_2.rs:38:14 + | +38 | pub fn foo(origin: OriginFor, _bar: T::Bar) -> DispatchResultWithPostInfo { + | ^^^^^^ help: if this is intentional, prefix it with an underscore: `_origin` + | + = note: `-D unused-variables` implied by `-D warnings` + = help: to override `-D warnings` add `#[allow(unused_variables)]` diff --git a/substrate/frame/support/test/tests/pallet_ui/call_argument_invalid_bound_3.stderr b/substrate/frame/support/test/tests/pallet_ui/call_argument_invalid_bound_3.stderr index d6486a490794..f829baeb4c11 100644 --- a/substrate/frame/support/test/tests/pallet_ui/call_argument_invalid_bound_3.stderr +++ b/substrate/frame/support/test/tests/pallet_ui/call_argument_invalid_bound_3.stderr @@ -27,3 +27,12 @@ help: consider annotating `Bar` with `#[derive(Debug)]` 34 + #[derive(Debug)] 35 | struct Bar; | + +error: unused variable: `origin` + --> tests/pallet_ui/call_argument_invalid_bound_3.rs:40:14 + | +40 | pub fn foo(origin: OriginFor, _bar: Bar) -> DispatchResultWithPostInfo { + | ^^^^^^ help: if this is intentional, prefix it with an underscore: `_origin` + | + = note: `-D unused-variables` implied by `-D warnings` + = help: to override `-D warnings` add `#[allow(unused_variables)]` diff --git a/substrate/frame/support/test/tests/pallet_ui/compare_unset_storage_version.stderr b/substrate/frame/support/test/tests/pallet_ui/compare_unset_storage_version.stderr index 3256e69528a2..8049c07648ca 100644 --- a/substrate/frame/support/test/tests/pallet_ui/compare_unset_storage_version.stderr +++ b/substrate/frame/support/test/tests/pallet_ui/compare_unset_storage_version.stderr @@ -5,3 +5,9 @@ error[E0369]: binary operation `!=` cannot be applied to type `NoStorageVersionS | ------------------------------- ^^ -------------------------------- StorageVersion | | | NoStorageVersionSet + | +note: the foreign item type `NoStorageVersionSet` doesn't implement `PartialEq` + --> $WORKSPACE/substrate/frame/support/src/traits/metadata.rs + | + | pub struct NoStorageVersionSet; + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ not implement `PartialEq` diff --git a/substrate/frame/support/test/tests/pallet_ui/dev_mode_without_arg_max_encoded_len.stderr b/substrate/frame/support/test/tests/pallet_ui/dev_mode_without_arg_max_encoded_len.stderr index 629fefebbe2c..2fcc33282140 100644 --- a/substrate/frame/support/test/tests/pallet_ui/dev_mode_without_arg_max_encoded_len.stderr +++ b/substrate/frame/support/test/tests/pallet_ui/dev_mode_without_arg_max_encoded_len.stderr @@ -38,13 +38,13 @@ error[E0277]: the trait bound `Vec: MaxEncodedLen` is not satisfied | |__________________^ the trait `MaxEncodedLen` is not implemented for `Vec`, which is required by `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageMyStorage, Vec>: StorageInfoTrait` | = help: the following other types implement trait `MaxEncodedLen`: - bool - i8 - i16 - i32 - i64 - i128 - u8 - u16 + () + (TupleElement0, TupleElement1) + (TupleElement0, TupleElement1, TupleElement2) + (TupleElement0, TupleElement1, TupleElement2, TupleElement3) + (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4) + (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4, TupleElement5) + (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4, TupleElement5, TupleElement6) + (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4, TupleElement5, TupleElement6, TupleElement7) and $N others = note: required for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageMyStorage, Vec>` to implement `StorageInfoTrait` diff --git a/substrate/frame/support/test/tests/pallet_ui/error_does_not_derive_pallet_error.stderr b/substrate/frame/support/test/tests/pallet_ui/error_does_not_derive_pallet_error.stderr index 44d8d3fcadbf..92fb5b9cb38d 100644 --- a/substrate/frame/support/test/tests/pallet_ui/error_does_not_derive_pallet_error.stderr +++ b/substrate/frame/support/test/tests/pallet_ui/error_does_not_derive_pallet_error.stderr @@ -5,12 +5,12 @@ error[E0277]: the trait bound `MyError: PalletError` is not satisfied | ^^^^^^^^^^^^^^ the trait `PalletError` is not implemented for `MyError` | = help: the following other types implement trait `PalletError`: - bool - i8 - i16 - i32 - i64 - i128 - u8 - u16 + () + (TupleElement0, TupleElement1) + (TupleElement0, TupleElement1, TupleElement2) + (TupleElement0, TupleElement1, TupleElement2, TupleElement3) + (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4) + (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4, TupleElement5) + (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4, TupleElement5, TupleElement6) + (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4, TupleElement5, TupleElement6, TupleElement7) and $N others diff --git a/substrate/frame/support/test/tests/pallet_ui/hooks_invalid_item.stderr b/substrate/frame/support/test/tests/pallet_ui/hooks_invalid_item.stderr index b7327943ee20..c04499dbbd14 100644 --- a/substrate/frame/support/test/tests/pallet_ui/hooks_invalid_item.stderr +++ b/substrate/frame/support/test/tests/pallet_ui/hooks_invalid_item.stderr @@ -13,3 +13,9 @@ help: add missing generic argument | 29 | impl Hooks for Pallet {} | +++++++++++++ + +error[E0277]: the trait bound `pallet::Pallet: Hooks<<<::Block as frame_support::sp_runtime::traits::Block>::Header as frame_support::sp_runtime::traits::Header>::Number>` is not satisfied + --> tests/pallet_ui/hooks_invalid_item.rs:28:12 + | +28 | #[pallet::hooks] + | ^^^^^ the trait `Hooks<<<::Block as frame_support::sp_runtime::traits::Block>::Header as frame_support::sp_runtime::traits::Header>::Number>` is not implemented for `pallet::Pallet` diff --git a/substrate/frame/support/test/tests/pallet_ui/inherent_check_inner_span.stderr b/substrate/frame/support/test/tests/pallet_ui/inherent_check_inner_span.stderr index 5ea3be470a06..516bddd2c61b 100644 --- a/substrate/frame/support/test/tests/pallet_ui/inherent_check_inner_span.stderr +++ b/substrate/frame/support/test/tests/pallet_ui/inherent_check_inner_span.stderr @@ -6,6 +6,6 @@ error[E0046]: not all trait items implemented, missing: `Call`, `Error`, `INHERE | = help: implement the missing item: `type Call = /* Type */;` = help: implement the missing item: `type Error = /* Type */;` - = help: implement the missing item: `const INHERENT_IDENTIFIER: [u8; 8] = value;` + = help: implement the missing item: `const INHERENT_IDENTIFIER: [u8; 8] = [42; 8];` = help: implement the missing item: `fn create_inherent(_: &InherentData) -> std::option::Option<::Call> { todo!() }` = help: implement the missing item: `fn is_inherent(_: &::Call) -> bool { todo!() }` diff --git a/substrate/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen.stderr b/substrate/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen.stderr index c8c41e805014..fa6b7284d889 100644 --- a/substrate/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen.stderr +++ b/substrate/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen.stderr @@ -12,10 +12,10 @@ error[E0277]: the trait bound `Bar: WrapperTypeDecode` is not satisfied | |____________^ the trait `WrapperTypeDecode` is not implemented for `Bar`, which is required by `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>: PartialStorageInfoTrait` | = help: the following other types implement trait `WrapperTypeDecode`: + Arc Box - frame_support::sp_runtime::sp_application_crypto::sp_core::Bytes Rc - Arc + frame_support::sp_runtime::sp_application_crypto::sp_core::Bytes = note: required for `Bar` to implement `Decode` = note: required for `Bar` to implement `FullCodec` = note: required for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` to implement `PartialStorageInfoTrait` @@ -34,14 +34,14 @@ error[E0277]: the trait bound `Bar: EncodeLike` is not satisfied | |____________^ the trait `EncodeLike` is not implemented for `Bar`, which is required by `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>: PartialStorageInfoTrait` | = help: the following other types implement trait `EncodeLike`: - - - - - - - - + `&&T` implements `EncodeLike` + `&T` implements `EncodeLike` + `&T` implements `EncodeLike` + `&[(K, V)]` implements `EncodeLike>` + `&[(T,)]` implements `EncodeLike>` + `&[(T,)]` implements `EncodeLike>` + `&[(T,)]` implements `EncodeLike>` + `&[T]` implements `EncodeLike>` and $N others = note: required for `Bar` to implement `FullEncode` = note: required for `Bar` to implement `FullCodec` @@ -61,14 +61,14 @@ error[E0277]: the trait bound `Bar: WrapperTypeEncode` is not satisfied | |____________^ the trait `WrapperTypeEncode` is not implemented for `Bar`, which is required by `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>: PartialStorageInfoTrait` | = help: the following other types implement trait `WrapperTypeEncode`: + &T + &mut T + Arc Box - bytes::bytes::Bytes Cow<'a, T> - parity_scale_codec::Ref<'a, T, U> - frame_support::sp_runtime::sp_application_crypto::sp_core::Bytes Rc - Arc Vec + bytes::bytes::Bytes and $N others = note: required for `Bar` to implement `Encode` = note: required for `Bar` to implement `FullEncode` @@ -84,14 +84,14 @@ error[E0277]: the trait bound `Bar: TypeInfo` is not satisfied | |____________^ the trait `TypeInfo` is not implemented for `Bar`, which is required by `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>: StorageEntryMetadataBuilder` | = help: the following other types implement trait `TypeInfo`: - bool - char - i8 - i16 - i32 - i64 - i128 - u8 + &T + &mut T + () + (A, B) + (A, B, C) + (A, B, C, D) + (A, B, C, D, E) + (A, B, C, D, E, F) and $N others = note: required for `Bar` to implement `StaticTypeInfo` = note: required for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` to implement `StorageEntryMetadataBuilder` @@ -105,10 +105,10 @@ error[E0277]: the trait bound `Bar: WrapperTypeDecode` is not satisfied | |____________^ the trait `WrapperTypeDecode` is not implemented for `Bar`, which is required by `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>: StorageEntryMetadataBuilder` | = help: the following other types implement trait `WrapperTypeDecode`: + Arc Box - frame_support::sp_runtime::sp_application_crypto::sp_core::Bytes Rc - Arc + frame_support::sp_runtime::sp_application_crypto::sp_core::Bytes = note: required for `Bar` to implement `Decode` = note: required for `Bar` to implement `FullCodec` = note: required for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` to implement `StorageEntryMetadataBuilder` @@ -122,14 +122,14 @@ error[E0277]: the trait bound `Bar: EncodeLike` is not satisfied | |____________^ the trait `EncodeLike` is not implemented for `Bar`, which is required by `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>: StorageEntryMetadataBuilder` | = help: the following other types implement trait `EncodeLike`: - - - - - - - - + `&&T` implements `EncodeLike` + `&T` implements `EncodeLike` + `&T` implements `EncodeLike` + `&[(K, V)]` implements `EncodeLike>` + `&[(T,)]` implements `EncodeLike>` + `&[(T,)]` implements `EncodeLike>` + `&[(T,)]` implements `EncodeLike>` + `&[T]` implements `EncodeLike>` and $N others = note: required for `Bar` to implement `FullEncode` = note: required for `Bar` to implement `FullCodec` @@ -144,14 +144,14 @@ error[E0277]: the trait bound `Bar: WrapperTypeEncode` is not satisfied | |____________^ the trait `WrapperTypeEncode` is not implemented for `Bar`, which is required by `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>: StorageEntryMetadataBuilder` | = help: the following other types implement trait `WrapperTypeEncode`: + &T + &mut T + Arc Box - bytes::bytes::Bytes Cow<'a, T> - parity_scale_codec::Ref<'a, T, U> - frame_support::sp_runtime::sp_application_crypto::sp_core::Bytes Rc - Arc Vec + bytes::bytes::Bytes and $N others = note: required for `Bar` to implement `Encode` = note: required for `Bar` to implement `FullEncode` @@ -167,10 +167,10 @@ error[E0277]: the trait bound `Bar: WrapperTypeDecode` is not satisfied | |____________^ the trait `WrapperTypeDecode` is not implemented for `Bar`, which is required by `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>: TryDecodeEntireStorage` | = help: the following other types implement trait `WrapperTypeDecode`: + Arc Box - frame_support::sp_runtime::sp_application_crypto::sp_core::Bytes Rc - Arc + frame_support::sp_runtime::sp_application_crypto::sp_core::Bytes = note: required for `Bar` to implement `Decode` = note: required for `Bar` to implement `FullCodec` = note: required for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` to implement `TryDecodeEntireStorage` @@ -184,14 +184,14 @@ error[E0277]: the trait bound `Bar: EncodeLike` is not satisfied | |____________^ the trait `EncodeLike` is not implemented for `Bar`, which is required by `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>: TryDecodeEntireStorage` | = help: the following other types implement trait `EncodeLike`: - - - - - - - - + `&&T` implements `EncodeLike` + `&T` implements `EncodeLike` + `&T` implements `EncodeLike` + `&[(K, V)]` implements `EncodeLike>` + `&[(T,)]` implements `EncodeLike>` + `&[(T,)]` implements `EncodeLike>` + `&[(T,)]` implements `EncodeLike>` + `&[T]` implements `EncodeLike>` and $N others = note: required for `Bar` to implement `FullEncode` = note: required for `Bar` to implement `FullCodec` @@ -206,14 +206,14 @@ error[E0277]: the trait bound `Bar: WrapperTypeEncode` is not satisfied | |____________^ the trait `WrapperTypeEncode` is not implemented for `Bar`, which is required by `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>: TryDecodeEntireStorage` | = help: the following other types implement trait `WrapperTypeEncode`: + &T + &mut T + Arc Box - bytes::bytes::Bytes Cow<'a, T> - parity_scale_codec::Ref<'a, T, U> - frame_support::sp_runtime::sp_application_crypto::sp_core::Bytes Rc - Arc Vec + bytes::bytes::Bytes and $N others = note: required for `Bar` to implement `Encode` = note: required for `Bar` to implement `FullEncode` diff --git a/substrate/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen_unnamed.stderr b/substrate/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen_unnamed.stderr index 08b35eb8ed15..944b194b7bcf 100644 --- a/substrate/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen_unnamed.stderr +++ b/substrate/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen_unnamed.stderr @@ -12,10 +12,10 @@ error[E0277]: the trait bound `Bar: WrapperTypeDecode` is not satisfied | |____________^ the trait `WrapperTypeDecode` is not implemented for `Bar`, which is required by `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>: PartialStorageInfoTrait` | = help: the following other types implement trait `WrapperTypeDecode`: + Arc Box - frame_support::sp_runtime::sp_application_crypto::sp_core::Bytes Rc - Arc + frame_support::sp_runtime::sp_application_crypto::sp_core::Bytes = note: required for `Bar` to implement `Decode` = note: required for `Bar` to implement `FullCodec` = note: required for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` to implement `PartialStorageInfoTrait` @@ -34,14 +34,14 @@ error[E0277]: the trait bound `Bar: EncodeLike` is not satisfied | |____________^ the trait `EncodeLike` is not implemented for `Bar`, which is required by `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>: PartialStorageInfoTrait` | = help: the following other types implement trait `EncodeLike`: - - - - - - - - + `&&T` implements `EncodeLike` + `&T` implements `EncodeLike` + `&T` implements `EncodeLike` + `&[(K, V)]` implements `EncodeLike>` + `&[(T,)]` implements `EncodeLike>` + `&[(T,)]` implements `EncodeLike>` + `&[(T,)]` implements `EncodeLike>` + `&[T]` implements `EncodeLike>` and $N others = note: required for `Bar` to implement `FullEncode` = note: required for `Bar` to implement `FullCodec` @@ -61,14 +61,14 @@ error[E0277]: the trait bound `Bar: WrapperTypeEncode` is not satisfied | |____________^ the trait `WrapperTypeEncode` is not implemented for `Bar`, which is required by `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>: PartialStorageInfoTrait` | = help: the following other types implement trait `WrapperTypeEncode`: + &T + &mut T + Arc Box - bytes::bytes::Bytes Cow<'a, T> - parity_scale_codec::Ref<'a, T, U> - frame_support::sp_runtime::sp_application_crypto::sp_core::Bytes Rc - Arc Vec + bytes::bytes::Bytes and $N others = note: required for `Bar` to implement `Encode` = note: required for `Bar` to implement `FullEncode` @@ -84,14 +84,14 @@ error[E0277]: the trait bound `Bar: TypeInfo` is not satisfied | |____________^ the trait `TypeInfo` is not implemented for `Bar`, which is required by `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>: StorageEntryMetadataBuilder` | = help: the following other types implement trait `TypeInfo`: - bool - char - i8 - i16 - i32 - i64 - i128 - u8 + &T + &mut T + () + (A, B) + (A, B, C) + (A, B, C, D) + (A, B, C, D, E) + (A, B, C, D, E, F) and $N others = note: required for `Bar` to implement `StaticTypeInfo` = note: required for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` to implement `StorageEntryMetadataBuilder` @@ -105,10 +105,10 @@ error[E0277]: the trait bound `Bar: WrapperTypeDecode` is not satisfied | |____________^ the trait `WrapperTypeDecode` is not implemented for `Bar`, which is required by `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>: StorageEntryMetadataBuilder` | = help: the following other types implement trait `WrapperTypeDecode`: + Arc Box - frame_support::sp_runtime::sp_application_crypto::sp_core::Bytes Rc - Arc + frame_support::sp_runtime::sp_application_crypto::sp_core::Bytes = note: required for `Bar` to implement `Decode` = note: required for `Bar` to implement `FullCodec` = note: required for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` to implement `StorageEntryMetadataBuilder` @@ -122,14 +122,14 @@ error[E0277]: the trait bound `Bar: EncodeLike` is not satisfied | |____________^ the trait `EncodeLike` is not implemented for `Bar`, which is required by `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>: StorageEntryMetadataBuilder` | = help: the following other types implement trait `EncodeLike`: - - - - - - - - + `&&T` implements `EncodeLike` + `&T` implements `EncodeLike` + `&T` implements `EncodeLike` + `&[(K, V)]` implements `EncodeLike>` + `&[(T,)]` implements `EncodeLike>` + `&[(T,)]` implements `EncodeLike>` + `&[(T,)]` implements `EncodeLike>` + `&[T]` implements `EncodeLike>` and $N others = note: required for `Bar` to implement `FullEncode` = note: required for `Bar` to implement `FullCodec` @@ -144,14 +144,14 @@ error[E0277]: the trait bound `Bar: WrapperTypeEncode` is not satisfied | |____________^ the trait `WrapperTypeEncode` is not implemented for `Bar`, which is required by `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>: StorageEntryMetadataBuilder` | = help: the following other types implement trait `WrapperTypeEncode`: + &T + &mut T + Arc Box - bytes::bytes::Bytes Cow<'a, T> - parity_scale_codec::Ref<'a, T, U> - frame_support::sp_runtime::sp_application_crypto::sp_core::Bytes Rc - Arc Vec + bytes::bytes::Bytes and $N others = note: required for `Bar` to implement `Encode` = note: required for `Bar` to implement `FullEncode` @@ -167,10 +167,10 @@ error[E0277]: the trait bound `Bar: WrapperTypeDecode` is not satisfied | |____________^ the trait `WrapperTypeDecode` is not implemented for `Bar`, which is required by `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>: TryDecodeEntireStorage` | = help: the following other types implement trait `WrapperTypeDecode`: + Arc Box - frame_support::sp_runtime::sp_application_crypto::sp_core::Bytes Rc - Arc + frame_support::sp_runtime::sp_application_crypto::sp_core::Bytes = note: required for `Bar` to implement `Decode` = note: required for `Bar` to implement `FullCodec` = note: required for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` to implement `TryDecodeEntireStorage` @@ -184,14 +184,14 @@ error[E0277]: the trait bound `Bar: EncodeLike` is not satisfied | |____________^ the trait `EncodeLike` is not implemented for `Bar`, which is required by `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>: TryDecodeEntireStorage` | = help: the following other types implement trait `EncodeLike`: - - - - - - - - + `&&T` implements `EncodeLike` + `&T` implements `EncodeLike` + `&T` implements `EncodeLike` + `&[(K, V)]` implements `EncodeLike>` + `&[(T,)]` implements `EncodeLike>` + `&[(T,)]` implements `EncodeLike>` + `&[(T,)]` implements `EncodeLike>` + `&[T]` implements `EncodeLike>` and $N others = note: required for `Bar` to implement `FullEncode` = note: required for `Bar` to implement `FullCodec` @@ -206,14 +206,14 @@ error[E0277]: the trait bound `Bar: WrapperTypeEncode` is not satisfied | |____________^ the trait `WrapperTypeEncode` is not implemented for `Bar`, which is required by `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>: TryDecodeEntireStorage` | = help: the following other types implement trait `WrapperTypeEncode`: + &T + &mut T + Arc Box - bytes::bytes::Bytes Cow<'a, T> - parity_scale_codec::Ref<'a, T, U> - frame_support::sp_runtime::sp_application_crypto::sp_core::Bytes Rc - Arc Vec + bytes::bytes::Bytes and $N others = note: required for `Bar` to implement `Encode` = note: required for `Bar` to implement `FullEncode` diff --git a/substrate/frame/support/test/tests/pallet_ui/storage_info_unsatisfied.stderr b/substrate/frame/support/test/tests/pallet_ui/storage_info_unsatisfied.stderr index 042a6f67fd31..95ec76e29c0b 100644 --- a/substrate/frame/support/test/tests/pallet_ui/storage_info_unsatisfied.stderr +++ b/substrate/frame/support/test/tests/pallet_ui/storage_info_unsatisfied.stderr @@ -12,13 +12,13 @@ error[E0277]: the trait bound `Bar: MaxEncodedLen` is not satisfied | |____________^ the trait `MaxEncodedLen` is not implemented for `Bar`, which is required by `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>: StorageInfoTrait` | = help: the following other types implement trait `MaxEncodedLen`: - bool - i8 - i16 - i32 - i64 - i128 - u8 - u16 + () + (TupleElement0, TupleElement1) + (TupleElement0, TupleElement1, TupleElement2) + (TupleElement0, TupleElement1, TupleElement2, TupleElement3) + (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4) + (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4, TupleElement5) + (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4, TupleElement5, TupleElement6) + (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4, TupleElement5, TupleElement6, TupleElement7) and $N others = note: required for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` to implement `StorageInfoTrait` diff --git a/substrate/frame/support/test/tests/pallet_ui/storage_info_unsatisfied_nmap.stderr b/substrate/frame/support/test/tests/pallet_ui/storage_info_unsatisfied_nmap.stderr index 9f57b85f3a8a..8351dd92d594 100644 --- a/substrate/frame/support/test/tests/pallet_ui/storage_info_unsatisfied_nmap.stderr +++ b/substrate/frame/support/test/tests/pallet_ui/storage_info_unsatisfied_nmap.stderr @@ -12,14 +12,14 @@ error[E0277]: the trait bound `Bar: MaxEncodedLen` is not satisfied | |____________^ the trait `MaxEncodedLen` is not implemented for `Bar`, which is required by `frame_support::pallet_prelude::StorageNMap<_GeneratedPrefixForStorageFoo, NMapKey, u32>: StorageInfoTrait` | = help: the following other types implement trait `MaxEncodedLen`: - bool - i8 - i16 - i32 - i64 - i128 - u8 - u16 + () + (TupleElement0, TupleElement1) + (TupleElement0, TupleElement1, TupleElement2) + (TupleElement0, TupleElement1, TupleElement2, TupleElement3) + (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4) + (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4, TupleElement5) + (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4, TupleElement5, TupleElement6) + (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4, TupleElement5, TupleElement6, TupleElement7) and $N others = note: required for `NMapKey` to implement `KeyGeneratorMaxEncodedLen` = note: required for `frame_support::pallet_prelude::StorageNMap<_GeneratedPrefixForStorageFoo, NMapKey, u32>` to implement `StorageInfoTrait` diff --git a/substrate/frame/support/test/tests/pallet_ui/type_value_error_in_block.stderr b/substrate/frame/support/test/tests/pallet_ui/type_value_error_in_block.stderr index 41dcd273d962..0b13dcff90c6 100644 --- a/substrate/frame/support/test/tests/pallet_ui/type_value_error_in_block.stderr +++ b/substrate/frame/support/test/tests/pallet_ui/type_value_error_in_block.stderr @@ -3,3 +3,9 @@ error[E0599]: no function or associated item named `new` found for type `u32` in | 37 | u32::new() | ^^^ function or associated item not found in `u32` + | +help: there is a method `ne` with a similar name, but with different arguments + --> $RUST/core/src/cmp.rs + | + | fn ne(&self, other: &Rhs) -> bool { + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ diff --git a/substrate/frame/system/src/tests.rs b/substrate/frame/system/src/tests.rs index 534ba1e863fc..aa1094e3fe48 100644 --- a/substrate/frame/system/src/tests.rs +++ b/substrate/frame/system/src/tests.rs @@ -848,6 +848,7 @@ pub fn from_post_weight_info(ref_time: Option, pays_fee: Pays) -> PostDispa #[docify::export] #[test] fn last_runtime_upgrade_spec_version_usage() { + #[allow(dead_code)] struct Migration; impl OnRuntimeUpgrade for Migration { diff --git a/substrate/frame/transaction-payment/src/tests.rs b/substrate/frame/transaction-payment/src/tests.rs index 35d5322a6f33..bac89967d6af 100644 --- a/substrate/frame/transaction-payment/src/tests.rs +++ b/substrate/frame/transaction-payment/src/tests.rs @@ -273,8 +273,10 @@ fn signed_ext_length_fee_is_also_updated_per_congestion() { NextFeeMultiplier::::put(Multiplier::saturating_from_rational(3, 2)); let len = 10; - assert_ok!(ChargeTransactionPayment::::from(10) // tipped - .pre_dispatch(&1, CALL, &info_from_weight(Weight::from_parts(3, 0)), len)); + assert_ok!( + ChargeTransactionPayment::::from(10) // tipped + .pre_dispatch(&1, CALL, &info_from_weight(Weight::from_parts(3, 0)), len) + ); assert_eq!( Balances::free_balance(1), 100 // original diff --git a/substrate/frame/utility/src/lib.rs b/substrate/frame/utility/src/lib.rs index ed5544fe55ca..a4f66298f3fa 100644 --- a/substrate/frame/utility/src/lib.rs +++ b/substrate/frame/utility/src/lib.rs @@ -134,8 +134,8 @@ pub mod pallet { fn batched_calls_limit() -> u32 { let allocator_limit = sp_core::MAX_POSSIBLE_ALLOCATION; let call_size = ((core::mem::size_of::<::RuntimeCall>() as u32 + - CALL_ALIGN - 1) / CALL_ALIGN) * - CALL_ALIGN; + CALL_ALIGN - 1) / + CALL_ALIGN) * CALL_ALIGN; // The margin to take into account vec doubling capacity. let margin_factor = 3; diff --git a/substrate/frame/vesting/src/tests.rs b/substrate/frame/vesting/src/tests.rs index 004da0dfbfa1..57cb59f27a4d 100644 --- a/substrate/frame/vesting/src/tests.rs +++ b/substrate/frame/vesting/src/tests.rs @@ -182,7 +182,7 @@ fn unvested_balance_should_not_transfer() { ExtBuilder::default().existential_deposit(10).build().execute_with(|| { let user1_free_balance = Balances::free_balance(&1); assert_eq!(user1_free_balance, 100); // Account 1 has free balance - // Account 1 has only 5 units vested at block 1 (plus 50 unvested) + // Account 1 has only 5 units vested at block 1 (plus 50 unvested) assert_eq!(Vesting::vesting_balance(&1), Some(45)); // Account 1 cannot send more than vested amount... assert_noop!(Balances::transfer_allow_death(Some(1).into(), 2, 56), TokenError::Frozen); @@ -194,7 +194,7 @@ fn vested_balance_should_transfer() { ExtBuilder::default().existential_deposit(10).build().execute_with(|| { let user1_free_balance = Balances::free_balance(&1); assert_eq!(user1_free_balance, 100); // Account 1 has free balance - // Account 1 has only 5 units vested at block 1 (plus 50 unvested) + // Account 1 has only 5 units vested at block 1 (plus 50 unvested) assert_eq!(Vesting::vesting_balance(&1), Some(45)); assert_ok!(Vesting::vest(Some(1).into())); assert_ok!(Balances::transfer_allow_death(Some(1).into(), 2, 55)); @@ -232,7 +232,7 @@ fn vested_balance_should_transfer_using_vest_other() { ExtBuilder::default().existential_deposit(10).build().execute_with(|| { let user1_free_balance = Balances::free_balance(&1); assert_eq!(user1_free_balance, 100); // Account 1 has free balance - // Account 1 has only 5 units vested at block 1 (plus 50 unvested) + // Account 1 has only 5 units vested at block 1 (plus 50 unvested) assert_eq!(Vesting::vesting_balance(&1), Some(45)); assert_ok!(Vesting::vest_other(Some(2).into(), 1)); assert_ok!(Balances::transfer_allow_death(Some(1).into(), 2, 55)); @@ -286,7 +286,7 @@ fn extra_balance_should_transfer() { assert_eq!(Vesting::vesting_balance(&2), Some(200)); assert_ok!(Vesting::vest(Some(2).into())); assert_ok!(Balances::transfer_allow_death(Some(2).into(), 3, 100)); // Account 2 can send extra - // units gained + // units gained }); } @@ -296,7 +296,7 @@ fn liquid_funds_should_transfer_with_delayed_vesting() { let user12_free_balance = Balances::free_balance(&12); assert_eq!(user12_free_balance, 2560); // Account 12 has free balance - // Account 12 has liquid funds + // Account 12 has liquid funds assert_eq!(Vesting::vesting_balance(&12), Some(user12_free_balance - 256 * 5)); // Account 12 has delayed vesting diff --git a/substrate/primitives/api/test/Cargo.toml b/substrate/primitives/api/test/Cargo.toml index 121ce6b99938..1d21f23eb804 100644 --- a/substrate/primitives/api/test/Cargo.toml +++ b/substrate/primitives/api/test/Cargo.toml @@ -41,3 +41,4 @@ harness = false [features] "enable-staging-api" = [] +disable-ui-tests = [] diff --git a/substrate/primitives/api/test/tests/trybuild.rs b/substrate/primitives/api/test/tests/trybuild.rs index b0a334eb7a22..b13e5df9d6f8 100644 --- a/substrate/primitives/api/test/tests/trybuild.rs +++ b/substrate/primitives/api/test/tests/trybuild.rs @@ -15,18 +15,20 @@ // See the License for the specific language governing permissions and // limitations under the License. -use std::env; - #[rustversion::attr(not(stable), ignore)] +#[cfg(not(feature = "disable-ui-tests"))] #[test] fn ui() { // Only run the ui tests when `RUN_UI_TESTS` is set. - if env::var("RUN_UI_TESTS").is_err() { + if std::env::var("RUN_UI_TESTS").is_err() { return } // As trybuild is using `cargo check`, we don't need the real WASM binaries. - env::set_var("SKIP_WASM_BUILD", "1"); + std::env::set_var("SKIP_WASM_BUILD", "1"); + + // Warnings are part of our UI. + std::env::set_var("RUSTFLAGS", "--deny warnings"); let t = trybuild::TestCases::new(); t.compile_fail("tests/ui/*.rs"); diff --git a/substrate/primitives/api/test/tests/ui/deprecation_info.stderr b/substrate/primitives/api/test/tests/ui/deprecation_info.stderr index 2466c3ea5d50..78c687e876de 100644 --- a/substrate/primitives/api/test/tests/ui/deprecation_info.stderr +++ b/substrate/primitives/api/test/tests/ui/deprecation_info.stderr @@ -12,6 +12,21 @@ error: Invalid deprecation attribute: missing `note` 20 | #[deprecated(unknown_kw = "test")] | ^ +error: malformed `deprecated` attribute input + --> tests/ui/deprecation_info.rs:24:3 + | +24 | #[deprecated = 5] + | ^^^^^^^^^^^^^^^^^ + | +help: the following are the possible correct uses + | +24 | #[deprecated = "reason"] + | +24 | #[deprecated(/*opt*/ since = "version", /*opt*/ note = "reason")] + | +24 | #[deprecated] + | + error[E0541]: unknown meta item 'unknown_kw' --> tests/ui/deprecation_info.rs:20:16 | diff --git a/substrate/primitives/api/test/tests/ui/impl_incorrect_method_signature.stderr b/substrate/primitives/api/test/tests/ui/impl_incorrect_method_signature.stderr index 535bbb178d5f..d625020fe4d3 100644 --- a/substrate/primitives/api/test/tests/ui/impl_incorrect_method_signature.stderr +++ b/substrate/primitives/api/test/tests/ui/impl_incorrect_method_signature.stderr @@ -22,10 +22,7 @@ error[E0053]: method `test` has an incompatible type for trait --> tests/ui/impl_incorrect_method_signature.rs:33:17 | 33 | fn test(data: String) {} - | ^^^^^^ - | | - | expected `u64`, found `std::string::String` - | help: change the parameter type to match the trait: `u64` + | ^^^^^^ expected `u64`, found `std::string::String` | note: type in trait --> tests/ui/impl_incorrect_method_signature.rs:27:17 @@ -34,6 +31,10 @@ note: type in trait | ^^^ = note: expected signature `fn(u64)` found signature `fn(std::string::String)` +help: change the parameter type to match the trait + | +33 | fn test(data: u64) {} + | ~~~ error[E0308]: mismatched types --> tests/ui/impl_incorrect_method_signature.rs:33:11 @@ -53,3 +54,12 @@ note: associated function defined here | 27 | fn test(data: u64); | ^^^^ + +error: unused variable: `data` + --> tests/ui/impl_incorrect_method_signature.rs:33:11 + | +33 | fn test(data: String) {} + | ^^^^ help: if this is intentional, prefix it with an underscore: `_data` + | + = note: `-D unused-variables` implied by `-D warnings` + = help: to override `-D warnings` add `#[allow(unused_variables)]` diff --git a/substrate/primitives/api/test/tests/ui/mock_only_self_reference.stderr b/substrate/primitives/api/test/tests/ui/mock_only_self_reference.stderr index 845755771877..764a0bafaa4f 100644 --- a/substrate/primitives/api/test/tests/ui/mock_only_self_reference.stderr +++ b/substrate/primitives/api/test/tests/ui/mock_only_self_reference.stderr @@ -21,8 +21,7 @@ error[E0050]: method `test` has 2 parameters but the declaration in trait `Api:: 29 | / sp_api::mock_impl_runtime_apis! { 30 | | impl Api for MockApi { 31 | | fn test(self, data: u64) {} -32 | | -33 | | fn test2(&mut self, data: u64) {} +... | 34 | | } 35 | | } | |_^ expected 3 parameters, found 2 @@ -41,8 +40,7 @@ error[E0050]: method `test2` has 2 parameters but the declaration in trait `Api: 29 | / sp_api::mock_impl_runtime_apis! { 30 | | impl Api for MockApi { 31 | | fn test(self, data: u64) {} -32 | | -33 | | fn test2(&mut self, data: u64) {} +... | 34 | | } 35 | | } | |_^ expected 3 parameters, found 2 diff --git a/substrate/primitives/api/test/tests/ui/type_reference_in_impl_runtime_apis_call.stderr b/substrate/primitives/api/test/tests/ui/type_reference_in_impl_runtime_apis_call.stderr index f4e0f3b0afb0..26be311c02fa 100644 --- a/substrate/primitives/api/test/tests/ui/type_reference_in_impl_runtime_apis_call.stderr +++ b/substrate/primitives/api/test/tests/ui/type_reference_in_impl_runtime_apis_call.stderr @@ -22,10 +22,7 @@ error[E0053]: method `test` has an incompatible type for trait --> tests/ui/type_reference_in_impl_runtime_apis_call.rs:33:17 | 33 | fn test(data: &u64) { - | ^^^^ - | | - | expected `u64`, found `&u64` - | help: change the parameter type to match the trait: `u64` + | ^^^^ expected `u64`, found `&u64` | note: type in trait --> tests/ui/type_reference_in_impl_runtime_apis_call.rs:27:17 @@ -34,6 +31,10 @@ note: type in trait | ^^^ = note: expected signature `fn(_)` found signature `fn(&_)` +help: change the parameter type to match the trait + | +33 | fn test(data: u64) { + | ~~~ error[E0308]: mismatched types --> tests/ui/type_reference_in_impl_runtime_apis_call.rs:33:11 @@ -57,3 +58,12 @@ help: consider removing the borrow | 33 | fn test(data: &u64) { | + +error: unused variable: `data` + --> tests/ui/type_reference_in_impl_runtime_apis_call.rs:33:11 + | +33 | fn test(data: &u64) { + | ^^^^ help: if this is intentional, prefix it with an underscore: `_data` + | + = note: `-D unused-variables` implied by `-D warnings` + = help: to override `-D warnings` add `#[allow(unused_variables)]` diff --git a/substrate/primitives/runtime-interface/tests/ui.rs b/substrate/primitives/runtime-interface/tests/ui.rs index 821d0b73f268..408ddbc981ee 100644 --- a/substrate/primitives/runtime-interface/tests/ui.rs +++ b/substrate/primitives/runtime-interface/tests/ui.rs @@ -15,18 +15,16 @@ // See the License for the specific language governing permissions and // limitations under the License. -use std::env; - #[rustversion::attr(not(stable), ignore)] #[test] fn ui() { // Only run the ui tests when `RUN_UI_TESTS` is set. - if env::var("RUN_UI_TESTS").is_err() { + if std::env::var("RUN_UI_TESTS").is_err() { return } // As trybuild is using `cargo check`, we don't need the real WASM binaries. - env::set_var("SKIP_WASM_BUILD", "1"); + std::env::set_var("SKIP_WASM_BUILD", "1"); let t = trybuild::TestCases::new(); t.compile_fail("tests/ui/*.rs"); diff --git a/substrate/primitives/runtime-interface/tests/ui/no_feature_gated_method.stderr b/substrate/primitives/runtime-interface/tests/ui/no_feature_gated_method.stderr index 10012ede793d..1c1649d011e6 100644 --- a/substrate/primitives/runtime-interface/tests/ui/no_feature_gated_method.stderr +++ b/substrate/primitives/runtime-interface/tests/ui/no_feature_gated_method.stderr @@ -9,9 +9,41 @@ note: found an item that was configured out | 25 | fn bar() {} | ^^^ - = note: the item is gated behind the `bar-feature` feature +note: the item is gated behind the `bar-feature` feature + --> tests/ui/no_feature_gated_method.rs:24:8 + | +24 | #[cfg(feature = "bar-feature")] + | ^^^^^^^^^^^^^^^^^^^^^^^ note: found an item that was configured out --> tests/ui/no_feature_gated_method.rs:25:5 | 25 | fn bar() {} | ^^^ +note: the item is gated here + --> tests/ui/no_feature_gated_method.rs:20:1 + | +20 | #[runtime_interface] + | ^^^^^^^^^^^^^^^^^^^^ + = note: this error originates in the attribute macro `runtime_interface` (in Nightly builds, run with -Z macro-backtrace for more info) + +error: unexpected `cfg` condition value: `bar-feature` + --> tests/ui/no_feature_gated_method.rs:24:8 + | +24 | #[cfg(feature = "bar-feature")] + | ^^^^^^^^^^^^^^^^^^^^^^^ + | + = note: expected values for `feature` are: `default`, `disable_target_static_assertions`, and `std` + = help: consider adding `bar-feature` as a feature in `Cargo.toml` + = note: see for more information about checking conditional configuration + = note: `-D unexpected-cfgs` implied by `-D warnings` + = help: to override `-D warnings` add `#[allow(unexpected_cfgs)]` + +error: unexpected `cfg` condition value: `bar-feature` + --> tests/ui/no_feature_gated_method.rs:27:12 + | +27 | #[cfg(not(feature = "bar-feature"))] + | ^^^^^^^^^^^^^^^^^^^^^^^ + | + = note: expected values for `feature` are: `default`, `disable_target_static_assertions`, and `std` + = help: consider adding `bar-feature` as a feature in `Cargo.toml` + = note: see for more information about checking conditional configuration diff --git a/substrate/primitives/runtime/src/traits.rs b/substrate/primitives/runtime/src/traits.rs index 25ef15eaf56e..fc63bc76decb 100644 --- a/substrate/primitives/runtime/src/traits.rs +++ b/substrate/primitives/runtime/src/traits.rs @@ -2342,8 +2342,6 @@ impl BlockNumberProvider for () { mod tests { use super::*; use crate::codec::{Decode, Encode, Input}; - #[cfg(feature = "bls-experimental")] - use sp_core::{bls377, bls381}; use sp_core::{ crypto::{Pair, UncheckedFrom}, ecdsa, ed25519, sr25519, @@ -2486,14 +2484,4 @@ mod tests { fn ecdsa_verify_works() { signature_verify_test!(ecdsa); } - - #[cfg(feature = "bls-experimental")] - fn bls377_verify_works() { - signature_verify_test!(bls377) - } - - #[cfg(feature = "bls-experimental")] - fn bls381_verify_works() { - signature_verify_test!(bls381) - } } diff --git a/substrate/primitives/state-machine/src/ext.rs b/substrate/primitives/state-machine/src/ext.rs index 7a79c4e8a1f1..baad7e621bed 100644 --- a/substrate/primitives/state-machine/src/ext.rs +++ b/substrate/primitives/state-machine/src/ext.rs @@ -713,6 +713,7 @@ where } /// Implement `Encode` by forwarding the stored raw vec. +#[allow(dead_code)] struct EncodeOpaqueValue(Vec); impl Encode for EncodeOpaqueValue { diff --git a/substrate/test-utils/cli/build.rs b/substrate/test-utils/cli/build.rs index a68cb706e8fb..c63f0b8b6674 100644 --- a/substrate/test-utils/cli/build.rs +++ b/substrate/test-utils/cli/build.rs @@ -20,6 +20,6 @@ use std::env; fn main() { if let Ok(profile) = env::var("PROFILE") { - println!("cargo:rustc-cfg=build_type=\"{}\"", profile); + println!("cargo:rustc-cfg=build_profile=\"{}\"", profile); } } diff --git a/substrate/test-utils/cli/src/lib.rs b/substrate/test-utils/cli/src/lib.rs index d77a89b4dbf4..70d68f6f1835 100644 --- a/substrate/test-utils/cli/src/lib.rs +++ b/substrate/test-utils/cli/src/lib.rs @@ -130,7 +130,7 @@ pub fn start_node() -> Child { /// build_substrate(&["--features=try-runtime"]); /// ``` pub fn build_substrate(args: &[&str]) { - let is_release_build = !cfg!(build_type = "debug"); + let is_release_build = !cfg!(build_profile = "debug"); // Get the root workspace directory from the CARGO_MANIFEST_DIR environment variable let mut cmd = Command::new("cargo"); diff --git a/substrate/utils/fork-tree/src/lib.rs b/substrate/utils/fork-tree/src/lib.rs index ff86467c85d5..fe349b6c29af 100644 --- a/substrate/utils/fork-tree/src/lib.rs +++ b/substrate/utils/fork-tree/src/lib.rs @@ -810,12 +810,11 @@ impl<'a, H, N, V> Iterator for ForkTreeIterator<'a, H, N, V> { type Item = &'a Node; fn next(&mut self) -> Option { - self.stack.pop().map(|node| { + self.stack.pop().inspect(|node| { // child nodes are stored ordered by max branch height (decreasing), // we want to keep this ordering while iterating but since we're // using a stack for iterator state we need to reverse it. self.stack.extend(node.children.iter().rev()); - node }) } } diff --git a/substrate/utils/frame/benchmarking-cli/build.rs b/substrate/utils/frame/benchmarking-cli/build.rs index 1545d1e0c21e..06cdb7973abd 100644 --- a/substrate/utils/frame/benchmarking-cli/build.rs +++ b/substrate/utils/frame/benchmarking-cli/build.rs @@ -24,8 +24,12 @@ use std::env; pub fn main() { if let Ok(opt_level) = env::var("OPT_LEVEL") { println!("cargo:rustc-cfg=build_opt_level={:?}", opt_level); + } else { + println!("cargo:rustc-cfg=build_opt_level={:?}", "unknown"); } if let Ok(profile) = env::var("PROFILE") { println!("cargo:rustc-cfg=build_profile={:?}", profile); + } else { + println!("cargo:rustc-cfg=build_profile={:?}", "unknown"); } } diff --git a/substrate/utils/wasm-builder/src/wasm_project.rs b/substrate/utils/wasm-builder/src/wasm_project.rs index a6eda078fde0..dcd5e6f1ade9 100644 --- a/substrate/utils/wasm-builder/src/wasm_project.rs +++ b/substrate/utils/wasm-builder/src/wasm_project.rs @@ -601,9 +601,10 @@ fn project_enabled_features( // We don't want to enable the `std`/`default` feature for the wasm build and // we need to check if the feature is enabled by checking the env variable. *f != "std" && - *f != "default" && env::var(format!("CARGO_FEATURE_{}", feature_env)) - .map(|v| v == "1") - .unwrap_or_default() + *f != "default" && + env::var(format!("CARGO_FEATURE_{feature_env}")) + .map(|v| v == "1") + .unwrap_or_default() }) .map(|d| d.0.clone()) .collect::>(); diff --git a/templates/parachain/pallets/template/src/benchmarking.rs b/templates/parachain/pallets/template/src/benchmarking.rs index 5acad6e60dec..29572c3ff609 100644 --- a/templates/parachain/pallets/template/src/benchmarking.rs +++ b/templates/parachain/pallets/template/src/benchmarking.rs @@ -1,5 +1,4 @@ //! Benchmarking setup for pallet-template -#![cfg(feature = "runtime-benchmarks")] use super::*; use frame_benchmarking::v2::*; diff --git a/templates/solochain/pallets/template/src/benchmarking.rs b/templates/solochain/pallets/template/src/benchmarking.rs index d1a9554aed6d..8af5d246f761 100644 --- a/templates/solochain/pallets/template/src/benchmarking.rs +++ b/templates/solochain/pallets/template/src/benchmarking.rs @@ -1,5 +1,5 @@ //! Benchmarking setup for pallet-template -#![cfg(feature = "runtime-benchmarks")] + use super::*; #[allow(unused)] diff --git a/umbrella/Cargo.toml b/umbrella/Cargo.toml index b7c1c375094f..83cbebbc61cd 100644 --- a/umbrella/Cargo.toml +++ b/umbrella/Cargo.toml @@ -600,7 +600,7 @@ runtime = [ "sp-wasm-interface", "sp-weights", ] -node = ["asset-test-utils", "bridge-hub-test-utils", "cumulus-client-cli", "cumulus-client-collator", "cumulus-client-consensus-aura", "cumulus-client-consensus-common", "cumulus-client-consensus-proposer", "cumulus-client-consensus-relay-chain", "cumulus-client-network", "cumulus-client-parachain-inherent", "cumulus-client-pov-recovery", "cumulus-client-service", "cumulus-relay-chain-inprocess-interface", "cumulus-relay-chain-interface", "cumulus-relay-chain-minimal-node", "cumulus-relay-chain-rpc-interface", "cumulus-test-relay-sproof-builder", "emulated-integration-tests-common", "fork-tree", "frame-benchmarking-cli", "frame-remote-externalities", "frame-support-procedural-tools", "generate-bags", "mmr-gadget", "mmr-rpc", "pallet-contracts-mock-network", "pallet-revive-mock-network", "pallet-transaction-payment-rpc", "parachains-runtimes-test-utils", "polkadot-approval-distribution", "polkadot-availability-bitfield-distribution", "polkadot-availability-distribution", "polkadot-availability-recovery", "polkadot-cli", "polkadot-collator-protocol", "polkadot-dispute-distribution", "polkadot-erasure-coding", "polkadot-gossip-support", "polkadot-network-bridge", "polkadot-node-collation-generation", "polkadot-node-core-approval-voting", "polkadot-node-core-av-store", "polkadot-node-core-backing", "polkadot-node-core-bitfield-signing", "polkadot-node-core-candidate-validation", "polkadot-node-core-chain-api", "polkadot-node-core-chain-selection", "polkadot-node-core-dispute-coordinator", "polkadot-node-core-parachains-inherent", "polkadot-node-core-prospective-parachains", "polkadot-node-core-provisioner", "polkadot-node-core-pvf", "polkadot-node-core-pvf-checker", "polkadot-node-core-pvf-common", "polkadot-node-core-pvf-execute-worker", "polkadot-node-core-pvf-prepare-worker", "polkadot-node-core-runtime-api", "polkadot-node-jaeger", "polkadot-node-metrics", "polkadot-node-network-protocol", "polkadot-node-primitives", "polkadot-node-subsystem", "polkadot-node-subsystem-types", "polkadot-node-subsystem-util", "polkadot-overseer", "polkadot-parachain-lib", "polkadot-rpc", "polkadot-service", "polkadot-statement-distribution", "polkadot-statement-table", "sc-allocator", "sc-authority-discovery", "sc-basic-authorship", "sc-block-builder", "sc-chain-spec", "sc-cli", "sc-client-api", "sc-client-db", "sc-consensus", "sc-consensus-aura", "sc-consensus-babe", "sc-consensus-babe-rpc", "sc-consensus-beefy", "sc-consensus-beefy-rpc", "sc-consensus-epochs", "sc-consensus-grandpa", "sc-consensus-grandpa-rpc", "sc-consensus-manual-seal", "sc-consensus-pow", "sc-consensus-slots", "sc-executor", "sc-executor-common", "sc-executor-polkavm", "sc-executor-wasmtime", "sc-informant", "sc-keystore", "sc-mixnet", "sc-network", "sc-network-common", "sc-network-gossip", "sc-network-light", "sc-network-statement", "sc-network-sync", "sc-network-transactions", "sc-network-types", "sc-offchain", "sc-proposer-metrics", "sc-rpc", "sc-rpc-api", "sc-rpc-server", "sc-rpc-spec-v2", "sc-service", "sc-state-db", "sc-statement-store", "sc-storage-monitor", "sc-sync-state-rpc", "sc-sysinfo", "sc-telemetry", "sc-tracing", "sc-transaction-pool", "sc-transaction-pool-api", "sc-utils", "snowbridge-runtime-test-common", "sp-blockchain", "sp-consensus", "sp-core-hashing", "sp-core-hashing-proc-macro", "sp-database", "sp-maybe-compressed-blob", "sp-panic-handler", "sp-rpc", "staging-chain-spec-builder", "staging-node-inspect", "staging-tracking-allocator", "std", "subkey", "substrate-build-script-utils", "substrate-frame-rpc-support", "substrate-frame-rpc-system", "substrate-prometheus-endpoint", "substrate-rpc-client", "substrate-state-trie-migration-rpc", "substrate-wasm-builder", "tracing-gum", "xcm-emulator", "xcm-simulator"] +node = ["asset-test-utils", "bridge-hub-test-utils", "cumulus-client-cli", "cumulus-client-collator", "cumulus-client-consensus-aura", "cumulus-client-consensus-common", "cumulus-client-consensus-proposer", "cumulus-client-consensus-relay-chain", "cumulus-client-network", "cumulus-client-parachain-inherent", "cumulus-client-pov-recovery", "cumulus-client-service", "cumulus-relay-chain-inprocess-interface", "cumulus-relay-chain-interface", "cumulus-relay-chain-minimal-node", "cumulus-relay-chain-rpc-interface", "cumulus-test-relay-sproof-builder", "emulated-integration-tests-common", "fork-tree", "frame-benchmarking-cli", "frame-remote-externalities", "frame-support-procedural-tools", "generate-bags", "mmr-gadget", "mmr-rpc", "pallet-contracts-mock-network", "pallet-revive-mock-network", "pallet-transaction-payment-rpc", "parachains-runtimes-test-utils", "polkadot-approval-distribution", "polkadot-availability-bitfield-distribution", "polkadot-availability-distribution", "polkadot-availability-recovery", "polkadot-cli", "polkadot-collator-protocol", "polkadot-dispute-distribution", "polkadot-erasure-coding", "polkadot-gossip-support", "polkadot-network-bridge", "polkadot-node-collation-generation", "polkadot-node-core-approval-voting", "polkadot-node-core-approval-voting-parallel", "polkadot-node-core-av-store", "polkadot-node-core-backing", "polkadot-node-core-bitfield-signing", "polkadot-node-core-candidate-validation", "polkadot-node-core-chain-api", "polkadot-node-core-chain-selection", "polkadot-node-core-dispute-coordinator", "polkadot-node-core-parachains-inherent", "polkadot-node-core-prospective-parachains", "polkadot-node-core-provisioner", "polkadot-node-core-pvf", "polkadot-node-core-pvf-checker", "polkadot-node-core-pvf-common", "polkadot-node-core-pvf-execute-worker", "polkadot-node-core-pvf-prepare-worker", "polkadot-node-core-runtime-api", "polkadot-node-jaeger", "polkadot-node-metrics", "polkadot-node-network-protocol", "polkadot-node-primitives", "polkadot-node-subsystem", "polkadot-node-subsystem-types", "polkadot-node-subsystem-util", "polkadot-overseer", "polkadot-parachain-lib", "polkadot-rpc", "polkadot-service", "polkadot-statement-distribution", "polkadot-statement-table", "sc-allocator", "sc-authority-discovery", "sc-basic-authorship", "sc-block-builder", "sc-chain-spec", "sc-cli", "sc-client-api", "sc-client-db", "sc-consensus", "sc-consensus-aura", "sc-consensus-babe", "sc-consensus-babe-rpc", "sc-consensus-beefy", "sc-consensus-beefy-rpc", "sc-consensus-epochs", "sc-consensus-grandpa", "sc-consensus-grandpa-rpc", "sc-consensus-manual-seal", "sc-consensus-pow", "sc-consensus-slots", "sc-executor", "sc-executor-common", "sc-executor-polkavm", "sc-executor-wasmtime", "sc-informant", "sc-keystore", "sc-mixnet", "sc-network", "sc-network-common", "sc-network-gossip", "sc-network-light", "sc-network-statement", "sc-network-sync", "sc-network-transactions", "sc-network-types", "sc-offchain", "sc-proposer-metrics", "sc-rpc", "sc-rpc-api", "sc-rpc-server", "sc-rpc-spec-v2", "sc-service", "sc-state-db", "sc-statement-store", "sc-storage-monitor", "sc-sync-state-rpc", "sc-sysinfo", "sc-telemetry", "sc-tracing", "sc-transaction-pool", "sc-transaction-pool-api", "sc-utils", "snowbridge-runtime-test-common", "sp-blockchain", "sp-consensus", "sp-core-hashing", "sp-core-hashing-proc-macro", "sp-database", "sp-maybe-compressed-blob", "sp-panic-handler", "sp-rpc", "staging-chain-spec-builder", "staging-node-inspect", "staging-tracking-allocator", "std", "subkey", "substrate-build-script-utils", "substrate-frame-rpc-support", "substrate-frame-rpc-system", "substrate-prometheus-endpoint", "substrate-rpc-client", "substrate-state-trie-migration-rpc", "substrate-wasm-builder", "tracing-gum", "xcm-emulator", "xcm-simulator"] tuples-96 = [ "frame-support-procedural?/tuples-96", "frame-support?/tuples-96", @@ -1967,6 +1967,11 @@ path = "../polkadot/node/core/approval-voting" default-features = false optional = true +[dependencies.polkadot-node-core-approval-voting-parallel] +path = "../polkadot/node/core/approval-voting-parallel" +default-features = false +optional = true + [dependencies.polkadot-node-core-av-store] path = "../polkadot/node/core/av-store" default-features = false diff --git a/umbrella/src/lib.rs b/umbrella/src/lib.rs index b7b9c15fe588..4a653dab99b6 100644 --- a/umbrella/src/lib.rs +++ b/umbrella/src/lib.rs @@ -796,6 +796,10 @@ pub use polkadot_node_collation_generation; #[cfg(feature = "polkadot-node-core-approval-voting")] pub use polkadot_node_core_approval_voting; +/// Approval Voting Subsystem running approval work in parallel. +#[cfg(feature = "polkadot-node-core-approval-voting-parallel")] +pub use polkadot_node_core_approval_voting_parallel; + /// The Availability Store subsystem. Wrapper over the DB that stores availability data and /// chunks. #[cfg(feature = "polkadot-node-core-av-store")]