diff --git a/.github/release-please/manifest.json b/.github/release-please/manifest.json index 2daa9a058a4..0a8021b7bdb 100644 --- a/.github/release-please/manifest.json +++ b/.github/release-please/manifest.json @@ -1,5 +1,5 @@ { - "core": "24.17.0", - "prover": "16.3.0", + "core": "24.18.0", + "prover": "16.4.0", "zk_toolbox": "0.1.1" } diff --git a/.github/workflows/ci-core-reusable.yml b/.github/workflows/ci-core-reusable.yml index a04e64ae3ea..85eefc86227 100644 --- a/.github/workflows/ci-core-reusable.yml +++ b/.github/workflows/ci-core-reusable.yml @@ -63,7 +63,11 @@ jobs: run: ci_run yarn l1-contracts test - name: Rust unit tests - run: ci_run zk test rust + run: | + ci_run zk test rust + # Benchmarks are not tested by `cargo nextest` unless specified explicitly, and even then `criterion` harness is incompatible + # with how `cargo nextest` runs tests. Thus, we run criterion-based benchmark tests manually. + ci_run zk f cargo test --release -p vm-benchmark --bench criterion --bench fill_bootloader loadtest: runs-on: [matterlabs-ci-runner] diff --git a/Cargo.lock b/Cargo.lock index 5dbaac90eca..c87269ce2d6 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1045,14 +1045,14 @@ dependencies = [ [[package]] name = "circuit_encodings" -version = "0.150.2" +version = "0.150.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2ba840a74f8d0b8b1334e93e4c87514a27c9be83d42d9f78d0c577572bb5f435" +checksum = "2593c02ad6b4b31ba63506c3f807f666133dd36bf47422f99b1d2947cf3c8dc1" dependencies = [ "derivative", "serde", - "zk_evm 0.150.0", - "zkevm_circuits 0.150.3", + "zk_evm 0.150.4", + "zkevm_circuits 0.150.4", ] [[package]] @@ -1112,12 +1112,12 @@ dependencies = [ [[package]] name = "circuit_sequencer_api" -version = "0.150.2" +version = "0.150.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "79f3177b2bcd4ef5da9d2ca6916f6de31fb1973dfece27907a8dc7c69233494d" +checksum = "42d1a86b9c2207f3bb2dff5f00d1af1cb95004b6d07e9bacb6519fe08f12c04b" dependencies = [ "bellman_ce", - "circuit_encodings 0.150.2", + "circuit_encodings 0.150.4", "derivative", "rayon", "serde", @@ -3147,15 +3147,6 @@ dependencies = [ "either", ] -[[package]] -name = "itertools" -version = "0.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1c173a5686ce8bfa551b3563d0c2170bf24ca44da99c7ca4bfdab5418c3fe57" -dependencies = [ - "either", -] - [[package]] name = "itertools" version = "0.12.0" @@ -4717,7 +4708,7 @@ checksum = "8bdf592881d821b83d471f8af290226c8d51402259e9bb5be7f9f8bdebbb11ac" dependencies = [ "bytes", "heck 0.4.1", - "itertools 0.11.0", + "itertools 0.10.5", "log", "multimap", "once_cell", @@ -4738,7 +4729,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "265baba7fabd416cf5078179f7d2cbeca4ce7a9041111900675ea7c4cb8a4c32" dependencies = [ "anyhow", - "itertools 0.11.0", + "itertools 0.10.5", "proc-macro2 1.0.86", "quote 1.0.36", "syn 2.0.72", @@ -7281,8 +7272,10 @@ version = "0.1.0" dependencies = [ "criterion", "iai", + "rand 0.8.5", "tokio", "vise", + "zksync_types", "zksync_vlog", "zksync_vm_benchmark_harness", ] @@ -7294,8 +7287,8 @@ source = "git+https://github.com/matter-labs/vm2.git?rev=9a38900d7af9b1d72b47ce3 dependencies = [ "enum_dispatch", "primitive-types", - "zk_evm_abstractions 0.150.0", - "zkevm_opcode_defs 0.150.0", + "zk_evm_abstractions 0.150.4", + "zkevm_opcode_defs 0.150.4", ] [[package]] @@ -7871,9 +7864,9 @@ dependencies = [ [[package]] name = "zk_evm" -version = "0.150.0" +version = "0.150.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f5bf91304aa14827758afa3def8cf622f9a7f9fb65fe5d5099018dbacf0c5984" +checksum = "e2dbb0ed38d61fbd04bd7575755924d1303e129c04c909abba7f5bfcc6260bcf" dependencies = [ "anyhow", "lazy_static", @@ -7881,7 +7874,7 @@ dependencies = [ "serde", "serde_json", "static_assertions", - "zk_evm_abstractions 0.150.0", + "zk_evm_abstractions 0.150.4", ] [[package]] @@ -7912,15 +7905,15 @@ dependencies = [ [[package]] name = "zk_evm_abstractions" -version = "0.150.0" +version = "0.150.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc313cea4ac9ef6b855264b1425cbe9de30dd8f009559dabcb6b2896122da5db" +checksum = "31460aacfe65b39ac484a2a2e0bbb02baf141f65264bf48e1e4f59ab375fe933" dependencies = [ "anyhow", "num_enum 0.6.1", "serde", "static_assertions", - "zkevm_opcode_defs 0.150.0", + "zkevm_opcode_defs 0.150.4", ] [[package]] @@ -7969,9 +7962,9 @@ dependencies = [ [[package]] name = "zkevm_circuits" -version = "0.150.3" +version = "0.150.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2d64bda28dec766324d2e5095a46fb141540d86a232106760dfb20ab4ae6e5c" +checksum = "abdfaa95dfe0878fda219dd17a6cc8c28711e2067785910c0e06d3ffdca78629" dependencies = [ "arrayvec 0.7.4", "boojum", @@ -7984,7 +7977,7 @@ dependencies = [ "seq-macro", "serde", "smallvec", - "zkevm_opcode_defs 0.150.0", + "zkevm_opcode_defs 0.150.4", ] [[package]] @@ -8031,9 +8024,9 @@ dependencies = [ [[package]] name = "zkevm_opcode_defs" -version = "0.150.0" +version = "0.150.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b3328c012d444bdbfadb754a72c01a56879eb66584efc71eac457e89e7843608" +checksum = "bb7c5c7b4481a646f8696b08cee64a8dec097509a6378d18242f81022f327f1e" dependencies = [ "bitflags 2.6.0", "blake2 0.10.6", @@ -8127,18 +8120,19 @@ dependencies = [ "anyhow", "circuit_sequencer_api 0.140.0", "circuit_sequencer_api 0.141.1", - "circuit_sequencer_api 0.150.2", + "circuit_sequencer_api 0.150.4", "futures 0.3.28", "itertools 0.10.5", "num_cpus", "rand 0.8.5", + "serde", "serde_json", "tokio", "tracing", "vise", "zk_evm 0.133.0", "zk_evm 0.141.0", - "zk_evm 0.150.0", + "zk_evm 0.150.4", "zksync_contracts", "zksync_dal", "zksync_eth_client", @@ -8154,9 +8148,9 @@ dependencies = [ [[package]] name = "zksync_concurrency" -version = "0.1.0-rc.10" +version = "0.1.0-rc.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a463106f37cfa589896e6a165b5bb0533013377990e19f10e8c4894346a62e8b" +checksum = "b0e31a9fc9a390b440cd12bbe040330dc64f64697a8a8ecbc3beb98cd0747909" dependencies = [ "anyhow", "once_cell", @@ -8190,9 +8184,9 @@ dependencies = [ [[package]] name = "zksync_consensus_bft" -version = "0.1.0-rc.10" +version = "0.1.0-rc.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d1bed5bd7e219cc1429ae36732f6d943e4d98a1b4ddcbb60cff89a3a4d3bcd6" +checksum = "e22e3bfe96fa30a57313e774a5e8c74ffee884abff57ecacc10e8832315ee8a2" dependencies = [ "anyhow", "async-trait", @@ -8212,9 +8206,9 @@ dependencies = [ [[package]] name = "zksync_consensus_crypto" -version = "0.1.0-rc.10" +version = "0.1.0-rc.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f0883af373e9198fd27c0148e7e47b37f912cb4b444bec3f7eed0af0b0dfc69" +checksum = "efb7ff3ec44b7b92fd4e28d9d92b83d61dc74125ccfc90bcfb27a5750d8a8580" dependencies = [ "anyhow", "blst", @@ -8236,9 +8230,9 @@ dependencies = [ [[package]] name = "zksync_consensus_executor" -version = "0.1.0-rc.10" +version = "0.1.0-rc.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d70afdfc07658d6bb309237c5da2cab40ab7efed95538c92fd0340b1b967818c" +checksum = "a7fcde1275970a6b8a33ea2ade5cc994d6392f95509ce374e0e7a26cde4cd6db" dependencies = [ "anyhow", "async-trait", @@ -8257,9 +8251,9 @@ dependencies = [ [[package]] name = "zksync_consensus_network" -version = "0.1.0-rc.10" +version = "0.1.0-rc.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e82f6f2dbd122b60a199843bd70b9b979190e81458fe17180e23f930ea2194e1" +checksum = "e6ee48bee7dae8adb2769c7315adde1780832d05ecb6a77c08cdda53a315992a" dependencies = [ "anyhow", "async-trait", @@ -8292,9 +8286,9 @@ dependencies = [ [[package]] name = "zksync_consensus_roles" -version = "0.1.0-rc.10" +version = "0.1.0-rc.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e426aa7c68a12dde702c3ec4ef49de24d9054ef908384232b7887e043ca3f2fe" +checksum = "72223c0b20621775db51bcc4b043addafeaf784d444af2ad4bc8bcdee477367c" dependencies = [ "anyhow", "bit-vec", @@ -8314,9 +8308,9 @@ dependencies = [ [[package]] name = "zksync_consensus_storage" -version = "0.1.0-rc.10" +version = "0.1.0-rc.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8388c33fd5bc3725e58c26db2d3016538c6221c6448b3e92cf5df07f6074a028" +checksum = "41d1750ad93f7e3a0c2f5880f9bcc1244a3b46d3e6c124c4f65f545032b87464" dependencies = [ "anyhow", "async-trait", @@ -8334,9 +8328,9 @@ dependencies = [ [[package]] name = "zksync_consensus_utils" -version = "0.1.0-rc.10" +version = "0.1.0-rc.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "612920e56dcb99f227bc23e1254f4dabc7cb4c5cd1a9ec400ceba0ec6fa77c1e" +checksum = "2ff679f8b5f671d887a750b8107f3b5c01fd6085f68eef37ab01de8d2bd0736b" dependencies = [ "anyhow", "rand 0.8.5", @@ -8541,6 +8535,7 @@ dependencies = [ "zksync_test_account", "zksync_types", "zksync_utils", + "zksync_vm_interface", ] [[package]] @@ -8674,7 +8669,7 @@ dependencies = [ [[package]] name = "zksync_external_node" -version = "24.17.0" +version = "24.18.0" dependencies = [ "anyhow", "assert_matches", @@ -8796,9 +8791,9 @@ dependencies = [ [[package]] name = "zksync_kzg" -version = "0.150.2" +version = "0.150.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b76d0e08b3e0970565f7a9a611278547f4f1dbd6184a250c8c5e743aed61c525" +checksum = "9949f48ea1a9f9a0e73242d4d1e87e681095181827486b3fcc2cf93e5aa03280" dependencies = [ "boojum", "derivative", @@ -8808,7 +8803,7 @@ dependencies = [ "serde", "serde_json", "serde_with", - "zkevm_circuits 0.150.3", + "zkevm_circuits 0.150.4", ] [[package]] @@ -8828,6 +8823,18 @@ dependencies = [ "zksync_types", ] +[[package]] +name = "zksync_logs_bloom_backfill" +version = "0.1.0" +dependencies = [ + "anyhow", + "tokio", + "tracing", + "zksync_dal", + "zksync_types", + "zksync_vm_interface", +] + [[package]] name = "zksync_mempool" version = "0.1.0" @@ -8920,7 +8927,7 @@ dependencies = [ "circuit_sequencer_api 0.140.0", "circuit_sequencer_api 0.141.1", "circuit_sequencer_api 0.142.0", - "circuit_sequencer_api 0.150.2", + "circuit_sequencer_api 0.150.4", "ethabi", "hex", "itertools 0.10.5", @@ -8935,7 +8942,7 @@ dependencies = [ "zk_evm 0.133.0", "zk_evm 0.140.0", "zk_evm 0.141.0", - "zk_evm 0.150.0", + "zk_evm 0.150.4", "zksync_contracts", "zksync_eth_signer", "zksync_system_constants", @@ -9115,6 +9122,7 @@ dependencies = [ "zksync_external_proof_integration_api", "zksync_health_check", "zksync_house_keeper", + "zksync_logs_bloom_backfill", "zksync_metadata_calculator", "zksync_node_api_server", "zksync_node_consensus", @@ -9291,9 +9299,9 @@ dependencies = [ [[package]] name = "zksync_protobuf" -version = "0.1.0-rc.10" +version = "0.1.0-rc.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0d82fd63f27681b9c01f0e01e3060e71b72809db8e21d9130663ee92bd1e391" +checksum = "f4f6ba3bf0aac20de18b4ae18a22d8c81b83f8f72e8fdec1c879525ecdacd2f5" dependencies = [ "anyhow", "bit-vec", @@ -9312,9 +9320,9 @@ dependencies = [ [[package]] name = "zksync_protobuf_build" -version = "0.1.0-rc.10" +version = "0.1.0-rc.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee3c158ab4d211053886371d4a00514bdf8ebdf826d40ee03b98fee2e0d1605e" +checksum = "7798c248b9a64505f0586bd5fadad6b26c999be4a8dec6b1a86b10b3888169c5" dependencies = [ "anyhow", "heck 0.5.0", @@ -9362,7 +9370,7 @@ version = "0.1.0" dependencies = [ "bincode", "chrono", - "circuit_sequencer_api 0.150.2", + "circuit_sequencer_api 0.150.4", "serde", "serde_json", "serde_with", @@ -9740,6 +9748,7 @@ dependencies = [ name = "zksync_vm_benchmark_harness" version = "0.1.0" dependencies = [ + "assert_matches", "once_cell", "zk_evm 0.133.0", "zksync_contracts", @@ -9761,7 +9770,6 @@ dependencies = [ "zksync_contracts", "zksync_system_constants", "zksync_types", - "zksync_utils", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index 6619fd26175..d4855a34b9d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -37,6 +37,7 @@ members = [ "core/node/tee_verifier_input_producer", "core/node/base_token_adjuster", "core/node/external_proof_integration_api", + "core/node/logs_bloom_backfill", # Libraries "core/lib/db_connection", "core/lib/zksync_core_leftovers", @@ -206,30 +207,30 @@ circuit_sequencer_api_1_3_3 = { package = "circuit_sequencer_api", version = "0. circuit_sequencer_api_1_4_0 = { package = "circuit_sequencer_api", version = "0.140" } circuit_sequencer_api_1_4_1 = { package = "circuit_sequencer_api", version = "0.141" } circuit_sequencer_api_1_4_2 = { package = "circuit_sequencer_api", version = "0.142" } -circuit_sequencer_api_1_5_0 = { package = "circuit_sequencer_api", version = "=0.150.2" } +circuit_sequencer_api_1_5_0 = { package = "circuit_sequencer_api", version = "=0.150.4" } crypto_codegen = { package = "zksync_solidity_vk_codegen", version = "=0.1.0" } -kzg = { package = "zksync_kzg", version = "=0.150.2" } +kzg = { package = "zksync_kzg", version = "=0.150.4" } zk_evm = { version = "=0.133.0" } zk_evm_1_3_1 = { package = "zk_evm", version = "0.131.0-rc.2" } zk_evm_1_3_3 = { package = "zk_evm", version = "0.133.0" } zk_evm_1_4_0 = { package = "zk_evm", version = "0.140.0" } zk_evm_1_4_1 = { package = "zk_evm", version = "0.141.0" } -zk_evm_1_5_0 = { package = "zk_evm", version = "=0.150.0" } +zk_evm_1_5_0 = { package = "zk_evm", version = "=0.150.4" } # New VM; pinned to a specific commit because of instability vm2 = { git = "https://github.com/matter-labs/vm2.git", rev = "9a38900d7af9b1d72b47ce3be980e77c1239a61d" } # Consensus dependencies. -zksync_concurrency = "=0.1.0-rc.10" -zksync_consensus_bft = "=0.1.0-rc.10" -zksync_consensus_crypto = "=0.1.0-rc.10" -zksync_consensus_executor = "=0.1.0-rc.10" -zksync_consensus_network = "=0.1.0-rc.10" -zksync_consensus_roles = "=0.1.0-rc.10" -zksync_consensus_storage = "=0.1.0-rc.10" -zksync_consensus_utils = "=0.1.0-rc.10" -zksync_protobuf = "=0.1.0-rc.10" -zksync_protobuf_build = "=0.1.0-rc.10" +zksync_concurrency = "=0.1.0-rc.11" +zksync_consensus_bft = "=0.1.0-rc.11" +zksync_consensus_crypto = "=0.1.0-rc.11" +zksync_consensus_executor = "=0.1.0-rc.11" +zksync_consensus_network = "=0.1.0-rc.11" +zksync_consensus_roles = "=0.1.0-rc.11" +zksync_consensus_storage = "=0.1.0-rc.11" +zksync_consensus_utils = "=0.1.0-rc.11" +zksync_protobuf = "=0.1.0-rc.11" +zksync_protobuf_build = "=0.1.0-rc.11" # "Local" dependencies zksync_multivm = { version = "0.1.0", path = "core/lib/multivm" } @@ -300,3 +301,4 @@ zksync_contract_verification_server = { version = "0.1.0", path = "core/node/con zksync_node_api_server = { version = "0.1.0", path = "core/node/api_server" } zksync_tee_verifier_input_producer = { version = "0.1.0", path = "core/node/tee_verifier_input_producer" } zksync_base_token_adjuster = { version = "0.1.0", path = "core/node/base_token_adjuster" } +zksync_logs_bloom_backfill = { version = "0.1.0", path = "core/node/logs_bloom_backfill" } diff --git a/bin/zkt b/bin/zkt index 337ad5d7395..9447230486f 100755 --- a/bin/zkt +++ b/bin/zkt @@ -1,7 +1,13 @@ #!/usr/bin/env bash cd $(dirname $0) -cd ../zk_toolbox -cargo install --path ./crates/zk_inception --force -cargo install --path ./crates/zk_supervisor --force +if which zkup >/dev/null; then + zkup -p .. --alias +else + echo zkup does not installed, please install it https://github.com/matter-labs/zksync-era/tree/main/zk_toolbox/zkup + cd ../zk_toolbox + cargo install --path ./crates/zk_inception --force + cargo install --path ./crates/zk_supervisor --force +fi + diff --git a/core/CHANGELOG.md b/core/CHANGELOG.md index 962113833f0..67fdc8cddc9 100644 --- a/core/CHANGELOG.md +++ b/core/CHANGELOG.md @@ -1,5 +1,13 @@ # Changelog +## [24.18.0](https://github.com/matter-labs/zksync-era/compare/core-v24.17.0...core-v24.18.0) (2024-08-14) + + +### Features + +* add logs bloom ([#2633](https://github.com/matter-labs/zksync-era/issues/2633)) ([1067462](https://github.com/matter-labs/zksync-era/commit/10674620d1a04333507ca17b9a34ab3cb58846cf)) +* **zk_toolbox:** Minting base token ([#2571](https://github.com/matter-labs/zksync-era/issues/2571)) ([ae2dd3b](https://github.com/matter-labs/zksync-era/commit/ae2dd3bbccdffc25b040313b2c7983a936f36aac)) + ## [24.17.0](https://github.com/matter-labs/zksync-era/compare/core-v24.16.0...core-v24.17.0) (2024-08-13) diff --git a/core/bin/external_node/Cargo.toml b/core/bin/external_node/Cargo.toml index 68f7e8c29a4..5b7309a55a2 100644 --- a/core/bin/external_node/Cargo.toml +++ b/core/bin/external_node/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "zksync_external_node" description = "Non-validator ZKsync node" -version = "24.17.0" # x-release-please-version +version = "24.18.0" # x-release-please-version edition.workspace = true authors.workspace = true homepage.workspace = true diff --git a/core/bin/external_node/src/node_builder.rs b/core/bin/external_node/src/node_builder.rs index 0b150c9872a..c30cc1a432b 100644 --- a/core/bin/external_node/src/node_builder.rs +++ b/core/bin/external_node/src/node_builder.rs @@ -22,6 +22,7 @@ use zksync_node_framework::{ consistency_checker::ConsistencyCheckerLayer, healtcheck_server::HealthCheckLayer, l1_batch_commitment_mode_validation::L1BatchCommitmentModeValidationLayer, + logs_bloom_backfill::LogsBloomBackfillLayer, main_node_client::MainNodeClientLayer, main_node_fee_params_fetcher::MainNodeFeeParamsFetcherLayer, metadata_calculator::MetadataCalculatorLayer, @@ -412,6 +413,11 @@ impl ExternalNodeBuilder { Ok(self) } + fn add_logs_bloom_backfill_layer(mut self) -> anyhow::Result { + self.node.add_layer(LogsBloomBackfillLayer); + Ok(self) + } + fn web3_api_optional_config(&self) -> Web3ServerOptionalConfig { // The refresh interval should be several times lower than the pruning removal delay, so that // soft-pruning will timely propagate to the API server. @@ -602,7 +608,8 @@ impl ExternalNodeBuilder { .add_pruning_layer()? .add_consistency_checker_layer()? .add_commitment_generator_layer()? - .add_batch_status_updater_layer()?; + .add_batch_status_updater_layer()? + .add_logs_bloom_backfill_layer()?; } } } diff --git a/core/bin/snapshots_creator/src/tests.rs b/core/bin/snapshots_creator/src/tests.rs index 89a3807422b..990dd672975 100644 --- a/core/bin/snapshots_creator/src/tests.rs +++ b/core/bin/snapshots_creator/src/tests.rs @@ -154,6 +154,7 @@ async fn create_l2_block( protocol_version: Some(Default::default()), virtual_blocks: 0, gas_limit: 0, + logs_bloom: Default::default(), }; conn.blocks_dal() diff --git a/core/bin/zksync_server/src/main.rs b/core/bin/zksync_server/src/main.rs index 1c22ce5c41a..7e0ff0e4920 100644 --- a/core/bin/zksync_server/src/main.rs +++ b/core/bin/zksync_server/src/main.rs @@ -210,5 +210,6 @@ fn load_env_config() -> anyhow::Result { external_price_api_client_config: ExternalPriceApiClientConfig::from_env().ok(), external_proof_integration_api_config: ExternalProofIntegrationApiConfig::from_env().ok(), experimental_vm_config: ExperimentalVmConfig::from_env().ok(), + prover_job_monitor_config: None, }) } diff --git a/core/bin/zksync_server/src/node_builder.rs b/core/bin/zksync_server/src/node_builder.rs index f4b3dbe9b40..7c4503876e9 100644 --- a/core/bin/zksync_server/src/node_builder.rs +++ b/core/bin/zksync_server/src/node_builder.rs @@ -37,6 +37,7 @@ use zksync_node_framework::{ house_keeper::HouseKeeperLayer, l1_batch_commitment_mode_validation::L1BatchCommitmentModeValidationLayer, l1_gas::L1GasLayer, + logs_bloom_backfill::LogsBloomBackfillLayer, metadata_calculator::MetadataCalculatorLayer, node_storage_init::{ main_node_strategy::MainNodeInitStrategyLayer, NodeStorageInitializerLayer, @@ -441,7 +442,7 @@ impl MainNodeBuilder { fn add_house_keeper_layer(mut self) -> anyhow::Result { let house_keeper_config = try_load_config!(self.configs.house_keeper_config); let fri_prover_config = try_load_config!(self.configs.prover_config); - let fri_witness_generator_config = try_load_config!(self.configs.witness_generator); + let fri_witness_generator_config = try_load_config!(self.configs.witness_generator_config); let fri_prover_group_config = try_load_config!(self.configs.prover_group_config); let fri_proof_compressor_config = try_load_config!(self.configs.proof_compressor_config); @@ -609,6 +610,12 @@ impl MainNodeBuilder { Ok(self) } + fn add_logs_bloom_backfill_layer(mut self) -> anyhow::Result { + self.node.add_layer(LogsBloomBackfillLayer); + + Ok(self) + } + /// This layer will make sure that the database is initialized correctly, /// e.g. genesis will be performed if it's required. /// @@ -679,7 +686,8 @@ impl MainNodeBuilder { self = self .add_l1_gas_layer()? .add_storage_initialization_layer(LayerKind::Task)? - .add_state_keeper_layer()?; + .add_state_keeper_layer()? + .add_logs_bloom_backfill_layer()?; } Component::HttpApi => { self = self diff --git a/core/lib/basic_types/src/basic_fri_types.rs b/core/lib/basic_types/src/basic_fri_types.rs index 9765435f097..5969cca6b8c 100644 --- a/core/lib/basic_types/src/basic_fri_types.rs +++ b/core/lib/basic_types/src/basic_fri_types.rs @@ -2,11 +2,19 @@ // TODO (PLA-773): Should be moved to the prover workspace. -use std::{convert::TryFrom, str::FromStr}; +use std::{ + collections::{hash_map::IntoIter, HashMap}, + convert::TryFrom, + iter::once, + str::FromStr, +}; use serde::{Deserialize, Serialize}; -use crate::protocol_version::{ProtocolSemanticVersion, ProtocolVersionId, VersionPatch}; +use crate::{ + protocol_version::{ProtocolSemanticVersion, ProtocolVersionId, VersionPatch}, + prover_dal::JobCountStatistics, +}; const BLOB_CHUNK_SIZE: usize = 31; const ELEMENTS_PER_4844_BLOCK: usize = 4096; @@ -127,6 +135,14 @@ impl From for AggregationRound { } impl AggregationRound { + pub const ALL_ROUNDS: [AggregationRound; 5] = [ + AggregationRound::BasicCircuits, + AggregationRound::LeafAggregation, + AggregationRound::NodeAggregation, + AggregationRound::RecursionTip, + AggregationRound::Scheduler, + ]; + pub fn next(&self) -> Option { match self { AggregationRound::BasicCircuits => Some(AggregationRound::LeafAggregation), @@ -187,6 +203,156 @@ impl TryFrom for AggregationRound { } } +/// Wrapper for mapping from protocol version to prover circuits job stats +#[derive(Debug)] +pub struct ProtocolVersionedCircuitProverStats { + protocol_versioned_circuit_stats: HashMap, +} + +impl FromIterator for ProtocolVersionedCircuitProverStats { + fn from_iter>(iter: I) -> Self { + let mut mapping = HashMap::new(); + for entry in iter { + let protocol_semantic_version = entry.protocol_semantic_version; + let circuit_prover_stats: &mut CircuitProverStats = + mapping.entry(protocol_semantic_version).or_default(); + circuit_prover_stats.add(entry.circuit_id_round_tuple, entry.job_count_statistics); + } + Self { + protocol_versioned_circuit_stats: mapping, + } + } +} + +impl IntoIterator for ProtocolVersionedCircuitProverStats { + type Item = (ProtocolSemanticVersion, CircuitProverStats); + type IntoIter = IntoIter; + + fn into_iter(self) -> Self::IntoIter { + self.protocol_versioned_circuit_stats.into_iter() + } +} + +/// Wrapper for mapping between circuit/aggregation round to number of such jobs (queued and in progress) +#[derive(Debug)] +pub struct CircuitProverStats { + circuits_prover_stats: HashMap, +} + +impl IntoIterator for CircuitProverStats { + type Item = (CircuitIdRoundTuple, JobCountStatistics); + type IntoIter = IntoIter; + + fn into_iter(self) -> Self::IntoIter { + self.circuits_prover_stats.into_iter() + } +} + +impl CircuitProverStats { + fn add( + &mut self, + circuit_id_round_tuple: CircuitIdRoundTuple, + job_count_statistics: JobCountStatistics, + ) { + let stats = self + .circuits_prover_stats + .entry(circuit_id_round_tuple) + .or_default(); + stats.queued += job_count_statistics.queued; + stats.in_progress += job_count_statistics.in_progress; + } +} + +impl Default for CircuitProverStats { + fn default() -> Self { + let mut stats = HashMap::new(); + for circuit in (1..=15).chain(once(255)) { + stats.insert( + CircuitIdRoundTuple::new(circuit, 0), + JobCountStatistics::default(), + ); + } + for circuit in 3..=18 { + stats.insert( + CircuitIdRoundTuple::new(circuit, 1), + JobCountStatistics::default(), + ); + } + stats.insert( + CircuitIdRoundTuple::new(2, 2), + JobCountStatistics::default(), + ); + stats.insert( + CircuitIdRoundTuple::new(255, 3), + JobCountStatistics::default(), + ); + stats.insert( + CircuitIdRoundTuple::new(1, 4), + JobCountStatistics::default(), + ); + Self { + circuits_prover_stats: stats, + } + } +} + +/// DTO for communication between DAL and prover_job_monitor. +/// Represents an entry -- count (queued & in progress) of jobs (circuit_id, aggregation_round) for a given protocol version. +#[derive(Debug)] +pub struct CircuitProverStatsEntry { + circuit_id_round_tuple: CircuitIdRoundTuple, + protocol_semantic_version: ProtocolSemanticVersion, + job_count_statistics: JobCountStatistics, +} + +impl CircuitProverStatsEntry { + pub fn new( + circuit_id: i16, + aggregation_round: i16, + protocol_version: i32, + protocol_version_patch: i32, + status: &str, + count: i64, + ) -> Self { + let mut queued = 0; + let mut in_progress = 0; + match status { + "queued" => queued = count as usize, + "in_progress" => in_progress = count as usize, + _ => unreachable!("received {:?}, expected only 'queued'/'in_progress' from DB as part of query filter", status), + }; + + let job_count_statistics = JobCountStatistics { + queued, + in_progress, + }; + let protocol_semantic_version = ProtocolSemanticVersion::new( + ProtocolVersionId::try_from(protocol_version as u16) + .expect("received protocol version is broken"), + VersionPatch(protocol_version_patch as u32), + ); + + // BEWARE, HERE BE DRAGONS. + // In database, the `circuit_id` stored is the circuit for which the aggregation is done, + // not the circuit which is running. + // There is a single node level aggregation circuit, which is circuit 2. + // This can aggregate multiple leaf nodes (which may belong to different circuits). + // This "conversion" is a forced hacky way to use `circuit_id` 2 for nodes. + // A proper fix will be later provided to solve this once new auto-scaler is in place. + let circuit_id = if aggregation_round == 2 { + 2 + } else { + circuit_id as u8 + }; + let circuit_id_round_tuple = CircuitIdRoundTuple::new(circuit_id, aggregation_round as u8); + CircuitProverStatsEntry { + circuit_id_round_tuple, + protocol_semantic_version, + job_count_statistics, + } + } +} + #[derive(Debug, Deserialize, Serialize, Clone, PartialEq, Eq, Hash)] pub struct JobIdentifiers { pub circuit_id: u8, diff --git a/core/lib/basic_types/src/lib.rs b/core/lib/basic_types/src/lib.rs index 5633fa3e10d..6e73d9f5fac 100644 --- a/core/lib/basic_types/src/lib.rs +++ b/core/lib/basic_types/src/lib.rs @@ -15,7 +15,9 @@ use std::{ pub use ethabi::{ self, - ethereum_types::{Address, Bloom as H2048, H128, H160, H256, H512, H520, H64, U128, U256, U64}, + ethereum_types::{ + Address, Bloom, BloomInput, H128, H160, H256, H512, H520, H64, U128, U256, U64, + }, }; use serde::{de, Deserialize, Deserializer, Serialize}; diff --git a/core/lib/basic_types/src/prover_dal.rs b/core/lib/basic_types/src/prover_dal.rs index edaad3798e8..7eb67144860 100644 --- a/core/lib/basic_types/src/prover_dal.rs +++ b/core/lib/basic_types/src/prover_dal.rs @@ -52,6 +52,8 @@ pub struct StuckJobs { pub status: String, pub attempts: u64, pub circuit_id: Option, + pub picked_by: Option, + pub error: Option, } // TODO (PLA-774): Redundant structure, should be replaced with `std::net::SocketAddr`. @@ -260,6 +262,11 @@ pub struct ProverJobFriInfo { pub picked_by: Option, } +pub trait Stallable { + fn get_status(&self) -> WitnessJobStatus; + fn get_attempts(&self) -> u32; +} + #[derive(Debug, Clone)] pub struct BasicWitnessGeneratorJobInfo { pub l1_batch_number: L1BatchNumber, @@ -275,6 +282,16 @@ pub struct BasicWitnessGeneratorJobInfo { pub picked_by: Option, } +impl Stallable for BasicWitnessGeneratorJobInfo { + fn get_status(&self) -> WitnessJobStatus { + self.status.clone() + } + + fn get_attempts(&self) -> u32 { + self.attempts + } +} + #[derive(Debug, Clone)] pub struct LeafWitnessGeneratorJobInfo { pub id: u32, @@ -293,6 +310,16 @@ pub struct LeafWitnessGeneratorJobInfo { pub picked_by: Option, } +impl Stallable for LeafWitnessGeneratorJobInfo { + fn get_status(&self) -> WitnessJobStatus { + self.status.clone() + } + + fn get_attempts(&self) -> u32 { + self.attempts + } +} + #[derive(Debug, Clone)] pub struct NodeWitnessGeneratorJobInfo { pub id: u32, @@ -312,6 +339,16 @@ pub struct NodeWitnessGeneratorJobInfo { pub picked_by: Option, } +impl Stallable for NodeWitnessGeneratorJobInfo { + fn get_status(&self) -> WitnessJobStatus { + self.status.clone() + } + + fn get_attempts(&self) -> u32 { + self.attempts + } +} + #[derive(Debug, Clone)] pub struct RecursionTipWitnessGeneratorJobInfo { pub l1_batch_number: L1BatchNumber, @@ -327,6 +364,16 @@ pub struct RecursionTipWitnessGeneratorJobInfo { pub picked_by: Option, } +impl Stallable for RecursionTipWitnessGeneratorJobInfo { + fn get_status(&self) -> WitnessJobStatus { + self.status.clone() + } + + fn get_attempts(&self) -> u32 { + self.attempts + } +} + #[derive(Debug, Clone)] pub struct SchedulerWitnessGeneratorJobInfo { pub l1_batch_number: L1BatchNumber, @@ -342,6 +389,16 @@ pub struct SchedulerWitnessGeneratorJobInfo { pub picked_by: Option, } +impl Stallable for SchedulerWitnessGeneratorJobInfo { + fn get_status(&self) -> WitnessJobStatus { + self.status.clone() + } + + fn get_attempts(&self) -> u32 { + self.attempts + } +} + #[derive(Debug, EnumString, Display, Clone)] pub enum ProofCompressionJobStatus { #[strum(serialize = "queued")] diff --git a/core/lib/basic_types/src/web3/mod.rs b/core/lib/basic_types/src/web3/mod.rs index 9bc10c8ab36..ecbe73f785b 100644 --- a/core/lib/basic_types/src/web3/mod.rs +++ b/core/lib/basic_types/src/web3/mod.rs @@ -13,7 +13,7 @@ use serde::{ }; use serde_json::Value; -use crate::{H160, H2048, H256, U256, U64}; +use crate::{Bloom, H160, H256, U256, U64}; pub mod contract; #[cfg(test)] @@ -389,7 +389,7 @@ pub struct BlockHeader { pub extra_data: Bytes, /// Logs bloom #[serde(rename = "logsBloom")] - pub logs_bloom: H2048, + pub logs_bloom: Bloom, /// Timestamp pub timestamp: U256, /// Difficulty @@ -441,7 +441,7 @@ pub struct Block { pub extra_data: Bytes, /// Logs bloom #[serde(rename = "logsBloom")] - pub logs_bloom: Option, + pub logs_bloom: Option, /// Timestamp pub timestamp: U256, /// Difficulty @@ -727,7 +727,7 @@ pub struct TransactionReceipt { pub root: Option, /// Logs bloom #[serde(rename = "logsBloom")] - pub logs_bloom: H2048, + pub logs_bloom: Bloom, /// Transaction type, Some(1) for AccessList transaction, None for Legacy #[serde(rename = "type", default, skip_serializing_if = "Option::is_none")] pub transaction_type: Option, diff --git a/core/lib/config/src/configs/eth_sender.rs b/core/lib/config/src/configs/eth_sender.rs index e932cd9819b..7e6ef2244cb 100644 --- a/core/lib/config/src/configs/eth_sender.rs +++ b/core/lib/config/src/configs/eth_sender.rs @@ -24,7 +24,7 @@ impl EthConfig { Self { sender: Some(SenderConfig { aggregated_proof_sizes: vec![1], - wait_confirmations: Some(1), + wait_confirmations: Some(10), tx_poll_period: 1, aggregate_tx_poll_period: 1, max_txs_in_flight: 30, @@ -40,6 +40,8 @@ impl EthConfig { l1_batch_min_age_before_execute_seconds: None, max_acceptable_priority_fee_in_gwei: 100000000000, pubdata_sending_mode: PubdataSendingMode::Calldata, + tx_aggregation_paused: false, + tx_aggregation_only_prove_and_execute: false, }), gas_adjuster: Some(GasAdjusterConfig { default_priority_fee_per_gas: 1000000000, @@ -119,6 +121,12 @@ pub struct SenderConfig { /// The mode in which we send pubdata: Calldata, Blobs or Custom (DA layers, Object Store, etc.) pub pubdata_sending_mode: PubdataSendingMode, + /// special mode specifically for gateway migration to allow all inflight txs to be processed + #[serde(default = "SenderConfig::default_tx_aggregation_paused")] + pub tx_aggregation_paused: bool, + /// special mode specifically for gateway migration to decrease number of non-executed batches + #[serde(default = "SenderConfig::default_tx_aggregation_only_prove_and_execute")] + pub tx_aggregation_only_prove_and_execute: bool, } impl SenderConfig { @@ -153,6 +161,13 @@ impl SenderConfig { .ok() .map(|pk| pk.parse().unwrap()) } + + const fn default_tx_aggregation_paused() -> bool { + false + } + const fn default_tx_aggregation_only_prove_and_execute() -> bool { + false + } } #[derive(Debug, Deserialize, Copy, Clone, PartialEq, Default)] diff --git a/core/lib/config/src/configs/general.rs b/core/lib/config/src/configs/general.rs index 3e6b05d8003..38ffd3d45fa 100644 --- a/core/lib/config/src/configs/general.rs +++ b/core/lib/config/src/configs/general.rs @@ -6,6 +6,7 @@ use crate::{ da_dispatcher::DADispatcherConfig, fri_prover_group::FriProverGroupConfig, house_keeper::HouseKeeperConfig, + prover_job_monitor::ProverJobMonitorConfig, pruning::PruningConfig, snapshot_recovery::SnapshotRecoveryConfig, vm_runner::{BasicWitnessInputProducerConfig, ProtectiveReadsWriterConfig}, @@ -33,7 +34,7 @@ pub struct GeneralConfig { pub prover_gateway: Option, pub witness_vector_generator: Option, pub prover_group_config: Option, - pub witness_generator: Option, + pub witness_generator_config: Option, pub prometheus_config: Option, pub proof_data_handler_config: Option, pub db_config: Option, @@ -52,4 +53,5 @@ pub struct GeneralConfig { pub consensus_config: Option, pub external_proof_integration_api_config: Option, pub experimental_vm_config: Option, + pub prover_job_monitor_config: Option, } diff --git a/core/lib/config/src/configs/mod.rs b/core/lib/config/src/configs/mod.rs index 0ecd8ee0df9..b213060f7ce 100644 --- a/core/lib/config/src/configs/mod.rs +++ b/core/lib/config/src/configs/mod.rs @@ -22,6 +22,7 @@ pub use self::{ object_store::ObjectStoreConfig, observability::{ObservabilityConfig, OpentelemetryConfig}, proof_data_handler::ProofDataHandlerConfig, + prover_job_monitor::ProverJobMonitorConfig, pruning::PruningConfig, secrets::{DatabaseSecrets, L1Secrets, Secrets}, snapshot_recovery::SnapshotRecoveryConfig, @@ -57,6 +58,7 @@ pub mod house_keeper; pub mod object_store; pub mod observability; pub mod proof_data_handler; +pub mod prover_job_monitor; pub mod pruning; pub mod secrets; pub mod snapshot_recovery; diff --git a/core/lib/config/src/configs/prover_job_monitor.rs b/core/lib/config/src/configs/prover_job_monitor.rs new file mode 100644 index 00000000000..c16b1db81b7 --- /dev/null +++ b/core/lib/config/src/configs/prover_job_monitor.rs @@ -0,0 +1,185 @@ +use std::time::Duration; + +use serde::{Deserialize, Serialize}; + +/// Config used for running ProverJobMonitor. +/// It handles configuration for setup of the binary (like database connections, prometheus) and configuration for jobs that are being ran. +#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)] +pub struct ProverJobMonitorConfig { + /// Port for prometheus metrics connection. + pub prometheus_port: u16, + /// Maximum number of database connections per pool. + /// In a balanced system it should match the number of Tasks ran by ProverJobMonitor. + /// If lower, components will wait on one another for a connection. + /// If more, database will use more resources for idle connections (which drains DB resources needed for other components in Prover Subsystems). + pub max_db_connections: u32, + /// Amount of time ProverJobMonitor will wait all it's tasks to finish. + #[serde(default = "ProverJobMonitorConfig::default_graceful_shutdown_timeout_ms")] + pub graceful_shutdown_timeout_ms: u64, + /// The interval between runs for GPU Prover Archiver. + #[serde(default = "ProverJobMonitorConfig::default_gpu_prover_archiver_run_interval_ms")] + pub gpu_prover_archiver_run_interval_ms: u64, + /// The amount of time after which 'dead' provers can be archived. + #[serde( + default = "ProverJobMonitorConfig::default_gpu_prover_archiver_archive_prover_after_ms" + )] + pub gpu_prover_archiver_archive_prover_after_ms: u64, + /// The interval between runs for Prover Jobs Archiver. + #[serde(default = "ProverJobMonitorConfig::default_prover_jobs_archiver_run_interval_ms")] + pub prover_jobs_archiver_run_interval_ms: u64, + /// The amount of time after which completed jobs (that belong to completed batches) can be archived. + #[serde( + default = "ProverJobMonitorConfig::default_prover_jobs_archiver_archive_jobs_after_ms" + )] + pub prover_jobs_archiver_archive_jobs_after_ms: u64, + /// The interval between runs for Proof Compressor Job Requeuer. + #[serde( + default = "ProverJobMonitorConfig::default_proof_compressor_job_requeuer_run_interval_ms" + )] + pub proof_compressor_job_requeuer_run_interval_ms: u64, + /// The interval between runs for Prover Job Requeuer. + #[serde(default = "ProverJobMonitorConfig::default_prover_job_requeuer_run_interval_ms")] + pub prover_job_requeuer_run_interval_ms: u64, + /// The interval between runs for Witness Generator Job Requeuer. + #[serde( + default = "ProverJobMonitorConfig::default_witness_generator_job_requeuer_run_interval_ms" + )] + pub witness_generator_job_requeuer_run_interval_ms: u64, + /// The interval between runs for Proof Compressor Queue Reporter. + #[serde( + default = "ProverJobMonitorConfig::default_proof_compressor_queue_reporter_run_interval_ms" + )] + pub proof_compressor_queue_reporter_run_interval_ms: u64, + /// The interval between runs for Prover Queue Reporter. + #[serde(default = "ProverJobMonitorConfig::default_prover_queue_reporter_run_interval_ms")] + pub prover_queue_reporter_run_interval_ms: u64, + /// The interval between runs for Witness Generator Queue Reporter. + #[serde( + default = "ProverJobMonitorConfig::default_witness_generator_queue_reporter_run_interval_ms" + )] + pub witness_generator_queue_reporter_run_interval_ms: u64, + /// The interval between runs for Witness Job Queuer. + #[serde(default = "ProverJobMonitorConfig::default_witness_job_queuer_run_interval_ms")] + pub witness_job_queuer_run_interval_ms: u64, +} + +impl ProverJobMonitorConfig { + /// Default graceful shutdown timeout -- 5 seconds + pub fn default_graceful_shutdown_timeout_ms() -> u64 { + 5_000 + } + + /// Amount of time ProverJobMonitor will wait all it's tasks to finish. + pub fn graceful_shutdown_timeout(&self) -> Duration { + Duration::from_millis(self.graceful_shutdown_timeout_ms) + } + + /// The interval between runs for GPU Prover Archiver. + pub fn gpu_prover_archiver_run_interval(&self) -> Duration { + Duration::from_millis(self.gpu_prover_archiver_run_interval_ms) + } + + /// Default gpu_prover_archiver_archive_prover_after_secs -- 1 day + pub fn default_gpu_prover_archiver_run_interval_ms() -> u64 { + 86_400_000 + } + + /// The amount of time after which 'dead' provers can be archived. + pub fn archive_gpu_prover_duration(&self) -> Duration { + Duration::from_millis(self.gpu_prover_archiver_archive_prover_after_ms) + } + + /// Default gpu_prover_archiver_archive_prover_after_ms -- 2 days + pub fn default_gpu_prover_archiver_archive_prover_after_ms() -> u64 { + 172_800_000 + } + + /// The amount of time after which completed jobs (that belong to completed batches) can be archived. + pub fn prover_jobs_archiver_run_interval(&self) -> Duration { + Duration::from_millis(self.prover_jobs_archiver_run_interval_ms) + } + + /// Default prover_jobs_archiver_run_interval_ms -- 30 minutes + pub fn default_prover_jobs_archiver_run_interval_ms() -> u64 { + 1_800_000 + } + /// The interval between runs for Prover Jobs Archiver. + pub fn archive_prover_jobs_duration(&self) -> Duration { + Duration::from_millis(self.prover_jobs_archiver_archive_jobs_after_ms) + } + + /// Default prover_jobs_archiver_archive_jobs_after_ms -- 2 days + pub fn default_prover_jobs_archiver_archive_jobs_after_ms() -> u64 { + 172_800_000 + } + + /// The interval between runs for Proof Compressor Job Requeuer. + pub fn proof_compressor_job_requeuer_run_interval(&self) -> Duration { + Duration::from_millis(self.proof_compressor_job_requeuer_run_interval_ms) + } + + /// Default proof_compressor_job_requeuer_run_interval_ms -- 10 seconds + pub fn default_proof_compressor_job_requeuer_run_interval_ms() -> u64 { + 10_000 + } + + /// The interval between runs for Prover Job Requeuer. + pub fn prover_job_requeuer_run_interval(&self) -> Duration { + Duration::from_millis(self.prover_job_requeuer_run_interval_ms) + } + + /// Default prover_job_requeuer_run_interval_ms -- 10 seconds + pub fn default_prover_job_requeuer_run_interval_ms() -> u64 { + 10_000 + } + + /// The interval between runs for Witness Generator Job Requeuer. + pub fn witness_generator_job_requeuer_run_interval(&self) -> Duration { + Duration::from_millis(self.witness_generator_job_requeuer_run_interval_ms) + } + + /// Default witness_generator_job_requeuer_run_interval_ms -- 10 seconds + pub fn default_witness_generator_job_requeuer_run_interval_ms() -> u64 { + 10_000 + } + + /// The interval between runs for Proof Compressor Queue Reporter. + pub fn proof_compressor_queue_reporter_run_interval(&self) -> Duration { + Duration::from_millis(self.proof_compressor_queue_reporter_run_interval_ms) + } + + /// Default proof_compressor_queue_reporter_run_interval_ms -- 10 seconds + pub fn default_proof_compressor_queue_reporter_run_interval_ms() -> u64 { + 10_000 + } + + /// The interval between runs for Prover Queue Reporter. + pub fn prover_queue_reporter_run_interval(&self) -> Duration { + Duration::from_millis(self.prover_queue_reporter_run_interval_ms) + } + + /// Default prover_queue_reporter_run_interval_ms -- 10 seconds + pub fn default_prover_queue_reporter_run_interval_ms() -> u64 { + 10_000 + } + + /// The interval between runs for Witness Generator Queue Reporter. + pub fn witness_generator_queue_reporter_run_interval(&self) -> Duration { + Duration::from_millis(self.witness_generator_queue_reporter_run_interval_ms) + } + + /// Default witness_generator_queue_reporter_run_interval_ms -- 10 seconds + pub fn default_witness_generator_queue_reporter_run_interval_ms() -> u64 { + 10_000 + } + + /// The interval between runs for Witness Job Queuer. + pub fn witness_job_queuer_run_interval(&self) -> Duration { + Duration::from_millis(self.witness_job_queuer_run_interval_ms) + } + + /// Default witness_job_queuer_run_interval_ms -- 10 seconds + pub fn default_witness_job_queuer_run_interval_ms() -> u64 { + 10_000 + } +} diff --git a/core/lib/config/src/testonly.rs b/core/lib/config/src/testonly.rs index 3f548ac1c80..632030e8f1d 100644 --- a/core/lib/config/src/testonly.rs +++ b/core/lib/config/src/testonly.rs @@ -407,6 +407,8 @@ impl Distribution for EncodeDist { l1_batch_min_age_before_execute_seconds: self.sample(rng), max_acceptable_priority_fee_in_gwei: self.sample(rng), pubdata_sending_mode: PubdataSendingMode::Calldata, + tx_aggregation_paused: false, + tx_aggregation_only_prove_and_execute: false, } } } @@ -1055,6 +1057,30 @@ impl Distribution for EncodeDist { + fn sample( + &self, + rng: &mut R, + ) -> configs::prover_job_monitor::ProverJobMonitorConfig { + configs::prover_job_monitor::ProverJobMonitorConfig { + prometheus_port: self.sample(rng), + max_db_connections: self.sample(rng), + graceful_shutdown_timeout_ms: self.sample(rng), + gpu_prover_archiver_run_interval_ms: self.sample(rng), + gpu_prover_archiver_archive_prover_after_ms: self.sample(rng), + prover_jobs_archiver_run_interval_ms: self.sample(rng), + prover_jobs_archiver_archive_jobs_after_ms: self.sample(rng), + proof_compressor_job_requeuer_run_interval_ms: self.sample(rng), + prover_job_requeuer_run_interval_ms: self.sample(rng), + witness_generator_job_requeuer_run_interval_ms: self.sample(rng), + proof_compressor_queue_reporter_run_interval_ms: self.sample(rng), + prover_queue_reporter_run_interval_ms: self.sample(rng), + witness_generator_queue_reporter_run_interval_ms: self.sample(rng), + witness_job_queuer_run_interval_ms: self.sample(rng), + } + } +} + impl Distribution for EncodeDist { fn sample(&self, rng: &mut R) -> configs::GeneralConfig { configs::GeneralConfig { @@ -1071,7 +1097,7 @@ impl Distribution for EncodeDist { prover_gateway: self.sample(rng), witness_vector_generator: self.sample(rng), prover_group_config: self.sample(rng), - witness_generator: self.sample(rng), + witness_generator_config: self.sample(rng), prometheus_config: self.sample(rng), proof_data_handler_config: self.sample(rng), db_config: self.sample(rng), @@ -1090,6 +1116,7 @@ impl Distribution for EncodeDist { consensus_config: self.sample(rng), external_proof_integration_api_config: self.sample(rng), experimental_vm_config: self.sample(rng), + prover_job_monitor_config: self.sample(rng), } } } diff --git a/core/lib/dal/.sqlx/query-04e03884a6bb62b28b18f97007e921d3c9a2f3e156ed8415ffc67c274e773fae.json b/core/lib/dal/.sqlx/query-04e03884a6bb62b28b18f97007e921d3c9a2f3e156ed8415ffc67c274e773fae.json new file mode 100644 index 00000000000..160c20d3988 --- /dev/null +++ b/core/lib/dal/.sqlx/query-04e03884a6bb62b28b18f97007e921d3c9a2f3e156ed8415ffc67c274e773fae.json @@ -0,0 +1,20 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n (logs_bloom IS NOT NULL) AS \"logs_bloom_not_null!\"\n FROM\n miniblocks\n ORDER BY\n number DESC\n LIMIT\n 1\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "logs_bloom_not_null!", + "type_info": "Bool" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + null + ] + }, + "hash": "04e03884a6bb62b28b18f97007e921d3c9a2f3e156ed8415ffc67c274e773fae" +} diff --git a/core/lib/dal/.sqlx/query-2a2680234c38904e5c19df45193a8c13d04079683e09c65f7f4e76a9987e2ab4.json b/core/lib/dal/.sqlx/query-0fede71ed258790cf70d6d6a32dcf9654c06dfef57863281601c947830ad448a.json similarity index 81% rename from core/lib/dal/.sqlx/query-2a2680234c38904e5c19df45193a8c13d04079683e09c65f7f4e76a9987e2ab4.json rename to core/lib/dal/.sqlx/query-0fede71ed258790cf70d6d6a32dcf9654c06dfef57863281601c947830ad448a.json index 8b984f4939a..cdf425de713 100644 --- a/core/lib/dal/.sqlx/query-2a2680234c38904e5c19df45193a8c13d04079683e09c65f7f4e76a9987e2ab4.json +++ b/core/lib/dal/.sqlx/query-0fede71ed258790cf70d6d6a32dcf9654c06dfef57863281601c947830ad448a.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n INSERT INTO\n eth_txs (\n raw_tx,\n nonce,\n tx_type,\n contract_address,\n predicted_gas_cost,\n created_at,\n updated_at,\n from_addr,\n blob_sidecar\n )\n VALUES\n ($1, $2, $3, $4, $5, NOW(), NOW(), $6, $7)\n RETURNING\n *\n ", + "query": "\n INSERT INTO\n eth_txs (\n raw_tx,\n nonce,\n tx_type,\n contract_address,\n predicted_gas_cost,\n created_at,\n updated_at,\n from_addr,\n blob_sidecar,\n is_gateway\n )\n VALUES\n ($1, $2, $3, $4, $5, NOW(), NOW(), $6, $7, $8)\n RETURNING\n *\n ", "describe": { "columns": [ { @@ -72,6 +72,11 @@ "ordinal": 13, "name": "blob_sidecar", "type_info": "Bytea" + }, + { + "ordinal": 14, + "name": "is_gateway", + "type_info": "Bool" } ], "parameters": { @@ -82,7 +87,8 @@ "Text", "Int8", "Bytea", - "Bytea" + "Bytea", + "Bool" ] }, "nullable": [ @@ -99,8 +105,9 @@ true, false, true, - true + true, + false ] }, - "hash": "2a2680234c38904e5c19df45193a8c13d04079683e09c65f7f4e76a9987e2ab4" + "hash": "0fede71ed258790cf70d6d6a32dcf9654c06dfef57863281601c947830ad448a" } diff --git a/core/lib/dal/.sqlx/query-13b09ea7749530884233add59dee9906a81580f252b2260bfdaa46a25f45d1cd.json b/core/lib/dal/.sqlx/query-13b09ea7749530884233add59dee9906a81580f252b2260bfdaa46a25f45d1cd.json new file mode 100644 index 00000000000..45b58a1c833 --- /dev/null +++ b/core/lib/dal/.sqlx/query-13b09ea7749530884233add59dee9906a81580f252b2260bfdaa46a25f45d1cd.json @@ -0,0 +1,53 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n address,\n topic1,\n topic2,\n topic3,\n topic4,\n miniblock_number\n FROM\n events\n WHERE\n miniblock_number BETWEEN $1 AND $2\n ORDER BY\n miniblock_number\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "address", + "type_info": "Bytea" + }, + { + "ordinal": 1, + "name": "topic1", + "type_info": "Bytea" + }, + { + "ordinal": 2, + "name": "topic2", + "type_info": "Bytea" + }, + { + "ordinal": 3, + "name": "topic3", + "type_info": "Bytea" + }, + { + "ordinal": 4, + "name": "topic4", + "type_info": "Bytea" + }, + { + "ordinal": 5, + "name": "miniblock_number", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [ + "Int8", + "Int8" + ] + }, + "nullable": [ + false, + false, + false, + false, + false, + false + ] + }, + "hash": "13b09ea7749530884233add59dee9906a81580f252b2260bfdaa46a25f45d1cd" +} diff --git a/core/lib/dal/.sqlx/query-29a9350164fc0b2983f753e105a70e583b455383eec526eee3acfe6670e30f2f.json b/core/lib/dal/.sqlx/query-29a9350164fc0b2983f753e105a70e583b455383eec526eee3acfe6670e30f2f.json new file mode 100644 index 00000000000..7582e0f64e7 --- /dev/null +++ b/core/lib/dal/.sqlx/query-29a9350164fc0b2983f753e105a70e583b455383eec526eee3acfe6670e30f2f.json @@ -0,0 +1,15 @@ +{ + "db_name": "PostgreSQL", + "query": "\n UPDATE miniblocks\n SET\n logs_bloom = data.logs_bloom\n FROM\n (\n SELECT\n UNNEST($1::BIGINT[]) AS number,\n UNNEST($2::BYTEA[]) AS logs_bloom\n ) AS data\n WHERE\n miniblocks.number = data.number\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int8Array", + "ByteaArray" + ] + }, + "nullable": [] + }, + "hash": "29a9350164fc0b2983f753e105a70e583b455383eec526eee3acfe6670e30f2f" +} diff --git a/core/lib/dal/.sqlx/query-f35d539db19ed626a2e0b537468bcef1260bd43f7eee2710dfbdeed1a0228318.json b/core/lib/dal/.sqlx/query-39a105cba1be0ec8f2b2b88d2f10c6286fcc824e84bb40a6e9f289c34b85fded.json similarity index 87% rename from core/lib/dal/.sqlx/query-f35d539db19ed626a2e0b537468bcef1260bd43f7eee2710dfbdeed1a0228318.json rename to core/lib/dal/.sqlx/query-39a105cba1be0ec8f2b2b88d2f10c6286fcc824e84bb40a6e9f289c34b85fded.json index 8981f7e8a08..26a3458bff9 100644 --- a/core/lib/dal/.sqlx/query-f35d539db19ed626a2e0b537468bcef1260bd43f7eee2710dfbdeed1a0228318.json +++ b/core/lib/dal/.sqlx/query-39a105cba1be0ec8f2b2b88d2f10c6286fcc824e84bb40a6e9f289c34b85fded.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n number,\n timestamp,\n hash,\n l1_tx_count,\n l2_tx_count,\n fee_account_address AS \"fee_account_address!\",\n base_fee_per_gas,\n l1_gas_price,\n l2_fair_gas_price,\n gas_per_pubdata_limit,\n bootloader_code_hash,\n default_aa_code_hash,\n protocol_version,\n virtual_blocks,\n fair_pubdata_price,\n gas_limit\n FROM\n miniblocks\n ORDER BY\n number DESC\n LIMIT\n 1\n ", + "query": "\n SELECT\n number,\n timestamp,\n hash,\n l1_tx_count,\n l2_tx_count,\n fee_account_address AS \"fee_account_address!\",\n base_fee_per_gas,\n l1_gas_price,\n l2_fair_gas_price,\n gas_per_pubdata_limit,\n bootloader_code_hash,\n default_aa_code_hash,\n protocol_version,\n virtual_blocks,\n fair_pubdata_price,\n gas_limit,\n logs_bloom\n FROM\n miniblocks\n ORDER BY\n number DESC\n LIMIT\n 1\n ", "describe": { "columns": [ { @@ -82,6 +82,11 @@ "ordinal": 15, "name": "gas_limit", "type_info": "Int8" + }, + { + "ordinal": 16, + "name": "logs_bloom", + "type_info": "Bytea" } ], "parameters": { @@ -103,8 +108,9 @@ true, false, true, + true, true ] }, - "hash": "f35d539db19ed626a2e0b537468bcef1260bd43f7eee2710dfbdeed1a0228318" + "hash": "39a105cba1be0ec8f2b2b88d2f10c6286fcc824e84bb40a6e9f289c34b85fded" } diff --git a/core/lib/dal/.sqlx/query-8998ddd82cf15feb671b0d4efc6f7496667ce703e30d1bf2e0fb5a66fb0a1d18.json b/core/lib/dal/.sqlx/query-45e52d05a4483def84c141e3529bab30553732953e589cd237595227044f438d.json similarity index 88% rename from core/lib/dal/.sqlx/query-8998ddd82cf15feb671b0d4efc6f7496667ce703e30d1bf2e0fb5a66fb0a1d18.json rename to core/lib/dal/.sqlx/query-45e52d05a4483def84c141e3529bab30553732953e589cd237595227044f438d.json index a8a811f2580..74a6187e644 100644 --- a/core/lib/dal/.sqlx/query-8998ddd82cf15feb671b0d4efc6f7496667ce703e30d1bf2e0fb5a66fb0a1d18.json +++ b/core/lib/dal/.sqlx/query-45e52d05a4483def84c141e3529bab30553732953e589cd237595227044f438d.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n number,\n timestamp,\n hash,\n l1_tx_count,\n l2_tx_count,\n fee_account_address AS \"fee_account_address!\",\n base_fee_per_gas,\n l1_gas_price,\n l2_fair_gas_price,\n gas_per_pubdata_limit,\n bootloader_code_hash,\n default_aa_code_hash,\n protocol_version,\n virtual_blocks,\n fair_pubdata_price,\n gas_limit\n FROM\n miniblocks\n WHERE\n number = $1\n ", + "query": "\n SELECT\n number,\n timestamp,\n hash,\n l1_tx_count,\n l2_tx_count,\n fee_account_address AS \"fee_account_address!\",\n base_fee_per_gas,\n l1_gas_price,\n l2_fair_gas_price,\n gas_per_pubdata_limit,\n bootloader_code_hash,\n default_aa_code_hash,\n protocol_version,\n virtual_blocks,\n fair_pubdata_price,\n gas_limit,\n logs_bloom\n FROM\n miniblocks\n WHERE\n number = $1\n ", "describe": { "columns": [ { @@ -82,6 +82,11 @@ "ordinal": 15, "name": "gas_limit", "type_info": "Int8" + }, + { + "ordinal": 16, + "name": "logs_bloom", + "type_info": "Bytea" } ], "parameters": { @@ -105,8 +110,9 @@ true, false, true, + true, true ] }, - "hash": "8998ddd82cf15feb671b0d4efc6f7496667ce703e30d1bf2e0fb5a66fb0a1d18" + "hash": "45e52d05a4483def84c141e3529bab30553732953e589cd237595227044f438d" } diff --git a/core/lib/dal/.sqlx/query-482bee9b383b17bddf819b977a012e49a65da26212e6d676abffb9d137aa3c2e.json b/core/lib/dal/.sqlx/query-482bee9b383b17bddf819b977a012e49a65da26212e6d676abffb9d137aa3c2e.json new file mode 100644 index 00000000000..f0fca373443 --- /dev/null +++ b/core/lib/dal/.sqlx/query-482bee9b383b17bddf819b977a012e49a65da26212e6d676abffb9d137aa3c2e.json @@ -0,0 +1,14 @@ +{ + "db_name": "PostgreSQL", + "query": "\n UPDATE miniblocks\n SET\n logs_bloom = NULL\n WHERE\n number = $1\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int8" + ] + }, + "nullable": [] + }, + "hash": "482bee9b383b17bddf819b977a012e49a65da26212e6d676abffb9d137aa3c2e" +} diff --git a/core/lib/dal/.sqlx/query-e28e052dbd306ba408dd26e01c38176f2f9dbba45761a2117c185912d1e07bdf.json b/core/lib/dal/.sqlx/query-4cfdfb32d808e33779ea4566e9cf9bb44a0952d475c3e6f207443b72ebddb0cd.json similarity index 63% rename from core/lib/dal/.sqlx/query-e28e052dbd306ba408dd26e01c38176f2f9dbba45761a2117c185912d1e07bdf.json rename to core/lib/dal/.sqlx/query-4cfdfb32d808e33779ea4566e9cf9bb44a0952d475c3e6f207443b72ebddb0cd.json index 580a5370c89..4ea4aea2ea6 100644 --- a/core/lib/dal/.sqlx/query-e28e052dbd306ba408dd26e01c38176f2f9dbba45761a2117c185912d1e07bdf.json +++ b/core/lib/dal/.sqlx/query-4cfdfb32d808e33779ea4566e9cf9bb44a0952d475c3e6f207443b72ebddb0cd.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n miniblocks.hash AS \"block_hash\",\n miniblocks.number AS \"block_number\",\n prev_miniblock.hash AS \"parent_hash?\",\n miniblocks.timestamp AS \"block_timestamp\",\n miniblocks.base_fee_per_gas AS \"base_fee_per_gas\",\n miniblocks.gas_limit AS \"block_gas_limit?\",\n transactions.gas_limit AS \"transaction_gas_limit?\",\n transactions.refunded_gas AS \"transaction_refunded_gas?\"\n FROM\n miniblocks\n LEFT JOIN miniblocks prev_miniblock ON prev_miniblock.number = miniblocks.number - 1\n LEFT JOIN transactions ON transactions.miniblock_number = miniblocks.number\n WHERE\n miniblocks.number > $1\n ORDER BY\n miniblocks.number ASC,\n transactions.index_in_block ASC\n ", + "query": "\n SELECT\n miniblocks.hash AS \"block_hash\",\n miniblocks.number AS \"block_number\",\n prev_miniblock.hash AS \"parent_hash?\",\n miniblocks.timestamp AS \"block_timestamp\",\n miniblocks.base_fee_per_gas AS \"base_fee_per_gas\",\n miniblocks.gas_limit AS \"block_gas_limit?\",\n miniblocks.logs_bloom AS \"block_logs_bloom?\",\n transactions.gas_limit AS \"transaction_gas_limit?\",\n transactions.refunded_gas AS \"transaction_refunded_gas?\"\n FROM\n miniblocks\n LEFT JOIN miniblocks prev_miniblock ON prev_miniblock.number = miniblocks.number - 1\n LEFT JOIN transactions ON transactions.miniblock_number = miniblocks.number\n WHERE\n miniblocks.number > $1\n ORDER BY\n miniblocks.number ASC,\n transactions.index_in_block ASC\n ", "describe": { "columns": [ { @@ -35,11 +35,16 @@ }, { "ordinal": 6, + "name": "block_logs_bloom?", + "type_info": "Bytea" + }, + { + "ordinal": 7, "name": "transaction_gas_limit?", "type_info": "Numeric" }, { - "ordinal": 7, + "ordinal": 8, "name": "transaction_refunded_gas?", "type_info": "Int8" } @@ -57,8 +62,9 @@ false, true, true, + true, false ] }, - "hash": "e28e052dbd306ba408dd26e01c38176f2f9dbba45761a2117c185912d1e07bdf" + "hash": "4cfdfb32d808e33779ea4566e9cf9bb44a0952d475c3e6f207443b72ebddb0cd" } diff --git a/core/lib/dal/.sqlx/query-4d15a3d05fb4819a6ba7532504ea342c80f54d844064121feaef9d7143e9ba7a.json b/core/lib/dal/.sqlx/query-4d15a3d05fb4819a6ba7532504ea342c80f54d844064121feaef9d7143e9ba7a.json new file mode 100644 index 00000000000..e980f08b0da --- /dev/null +++ b/core/lib/dal/.sqlx/query-4d15a3d05fb4819a6ba7532504ea342c80f54d844064121feaef9d7143e9ba7a.json @@ -0,0 +1,20 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n MIN(number) AS \"number\"\n FROM\n miniblocks\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "number", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + null + ] + }, + "hash": "4d15a3d05fb4819a6ba7532504ea342c80f54d844064121feaef9d7143e9ba7a" +} diff --git a/core/lib/dal/.sqlx/query-5b7d2612dd2dd064ea0095b40669754ed7219a77459ef40cd99d7d4d0749e538.json b/core/lib/dal/.sqlx/query-5b7d2612dd2dd064ea0095b40669754ed7219a77459ef40cd99d7d4d0749e538.json new file mode 100644 index 00000000000..88bac1a3602 --- /dev/null +++ b/core/lib/dal/.sqlx/query-5b7d2612dd2dd064ea0095b40669754ed7219a77459ef40cd99d7d4d0749e538.json @@ -0,0 +1,20 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n COUNT(*)\n FROM\n eth_txs\n WHERE\n confirmed_eth_tx_history_id IS NULL\n AND is_gateway = FALSE\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "count", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + null + ] + }, + "hash": "5b7d2612dd2dd064ea0095b40669754ed7219a77459ef40cd99d7d4d0749e538" +} diff --git a/core/lib/dal/.sqlx/query-6692ff6c0fbb2fc94f5cd2837a43ce80f9b2b27758651ccfc09df61a4ae8a363.json b/core/lib/dal/.sqlx/query-6692ff6c0fbb2fc94f5cd2837a43ce80f9b2b27758651ccfc09df61a4ae8a363.json index 985f998b439..49578cd67be 100644 --- a/core/lib/dal/.sqlx/query-6692ff6c0fbb2fc94f5cd2837a43ce80f9b2b27758651ccfc09df61a4ae8a363.json +++ b/core/lib/dal/.sqlx/query-6692ff6c0fbb2fc94f5cd2837a43ce80f9b2b27758651ccfc09df61a4ae8a363.json @@ -72,6 +72,11 @@ "ordinal": 13, "name": "blob_sidecar", "type_info": "Bytea" + }, + { + "ordinal": 14, + "name": "is_gateway", + "type_info": "Bool" } ], "parameters": { @@ -93,7 +98,8 @@ true, false, true, - true + true, + false ] }, "hash": "6692ff6c0fbb2fc94f5cd2837a43ce80f9b2b27758651ccfc09df61a4ae8a363" diff --git a/core/lib/dal/.sqlx/query-6bb5eab89be2b08a08c00b5cd8d725208b0ecfe8065c8f893ff38c49072a21fc.json b/core/lib/dal/.sqlx/query-a71a87d91dcf0f624dbd64eb8828f65ff83204ebab2ea31847ae305a098823b0.json similarity index 70% rename from core/lib/dal/.sqlx/query-6bb5eab89be2b08a08c00b5cd8d725208b0ecfe8065c8f893ff38c49072a21fc.json rename to core/lib/dal/.sqlx/query-a71a87d91dcf0f624dbd64eb8828f65ff83204ebab2ea31847ae305a098823b0.json index 71318c9a102..28058b9e42a 100644 --- a/core/lib/dal/.sqlx/query-6bb5eab89be2b08a08c00b5cd8d725208b0ecfe8065c8f893ff38c49072a21fc.json +++ b/core/lib/dal/.sqlx/query-a71a87d91dcf0f624dbd64eb8828f65ff83204ebab2ea31847ae305a098823b0.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n *\n FROM\n eth_txs\n WHERE\n from_addr IS NOT DISTINCT FROM $1 -- can't just use equality as NULL != NULL\n AND confirmed_eth_tx_history_id IS NULL\n AND id <= (\n SELECT\n COALESCE(MAX(eth_tx_id), 0)\n FROM\n eth_txs_history\n JOIN eth_txs ON eth_txs.id = eth_txs_history.eth_tx_id\n WHERE\n eth_txs_history.sent_at_block IS NOT NULL\n AND eth_txs.from_addr IS NOT DISTINCT FROM $1\n )\n ORDER BY\n id\n ", + "query": "\n SELECT\n *\n FROM\n eth_txs\n WHERE\n from_addr IS NOT DISTINCT FROM $1 -- can't just use equality as NULL != NULL\n AND confirmed_eth_tx_history_id IS NULL\n AND is_gateway = $2\n AND id <= (\n SELECT\n COALESCE(MAX(eth_tx_id), 0)\n FROM\n eth_txs_history\n JOIN eth_txs ON eth_txs.id = eth_txs_history.eth_tx_id\n WHERE\n eth_txs_history.sent_at_block IS NOT NULL\n AND eth_txs.from_addr IS NOT DISTINCT FROM $1\n AND is_gateway = $2\n )\n ORDER BY\n id\n ", "describe": { "columns": [ { @@ -72,11 +72,17 @@ "ordinal": 13, "name": "blob_sidecar", "type_info": "Bytea" + }, + { + "ordinal": 14, + "name": "is_gateway", + "type_info": "Bool" } ], "parameters": { "Left": [ - "Bytea" + "Bytea", + "Bool" ] }, "nullable": [ @@ -93,8 +99,9 @@ true, false, true, - true + true, + false ] }, - "hash": "6bb5eab89be2b08a08c00b5cd8d725208b0ecfe8065c8f893ff38c49072a21fc" + "hash": "a71a87d91dcf0f624dbd64eb8828f65ff83204ebab2ea31847ae305a098823b0" } diff --git a/core/lib/dal/.sqlx/query-b8d5c838533b8f8ce75c39b45995048f6d9a7817042dcbf64d040b6c916fe8f2.json b/core/lib/dal/.sqlx/query-b8d5c838533b8f8ce75c39b45995048f6d9a7817042dcbf64d040b6c916fe8f2.json new file mode 100644 index 00000000000..30a22873196 --- /dev/null +++ b/core/lib/dal/.sqlx/query-b8d5c838533b8f8ce75c39b45995048f6d9a7817042dcbf64d040b6c916fe8f2.json @@ -0,0 +1,20 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n MAX(number) AS \"max?\"\n FROM\n miniblocks\n WHERE\n logs_bloom IS NULL\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "max?", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + null + ] + }, + "hash": "b8d5c838533b8f8ce75c39b45995048f6d9a7817042dcbf64d040b6c916fe8f2" +} diff --git a/core/lib/dal/.sqlx/query-e296f3e9849910734196c91c2c35b0996a4274e793ed6a9b1bc7d687954f9a0f.json b/core/lib/dal/.sqlx/query-c4835d40921af47bfb4f60102bbba3af74e8e7b5944cb2943b5badb906167046.json similarity index 56% rename from core/lib/dal/.sqlx/query-e296f3e9849910734196c91c2c35b0996a4274e793ed6a9b1bc7d687954f9a0f.json rename to core/lib/dal/.sqlx/query-c4835d40921af47bfb4f60102bbba3af74e8e7b5944cb2943b5badb906167046.json index 4de23050455..9ae9d2e50cd 100644 --- a/core/lib/dal/.sqlx/query-e296f3e9849910734196c91c2c35b0996a4274e793ed6a9b1bc7d687954f9a0f.json +++ b/core/lib/dal/.sqlx/query-c4835d40921af47bfb4f60102bbba3af74e8e7b5944cb2943b5badb906167046.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n INSERT INTO\n miniblocks (\n number,\n timestamp,\n hash,\n l1_tx_count,\n l2_tx_count,\n fee_account_address,\n base_fee_per_gas,\n l1_gas_price,\n l2_fair_gas_price,\n gas_per_pubdata_limit,\n bootloader_code_hash,\n default_aa_code_hash,\n protocol_version,\n virtual_blocks,\n fair_pubdata_price,\n gas_limit,\n created_at,\n updated_at\n )\n VALUES\n (\n $1,\n $2,\n $3,\n $4,\n $5,\n $6,\n $7,\n $8,\n $9,\n $10,\n $11,\n $12,\n $13,\n $14,\n $15,\n $16,\n NOW(),\n NOW()\n )\n ", + "query": "\n INSERT INTO\n miniblocks (\n number,\n timestamp,\n hash,\n l1_tx_count,\n l2_tx_count,\n fee_account_address,\n base_fee_per_gas,\n l1_gas_price,\n l2_fair_gas_price,\n gas_per_pubdata_limit,\n bootloader_code_hash,\n default_aa_code_hash,\n protocol_version,\n virtual_blocks,\n fair_pubdata_price,\n gas_limit,\n logs_bloom,\n created_at,\n updated_at\n )\n VALUES\n (\n $1,\n $2,\n $3,\n $4,\n $5,\n $6,\n $7,\n $8,\n $9,\n $10,\n $11,\n $12,\n $13,\n $14,\n $15,\n $16,\n $17,\n NOW(),\n NOW()\n )\n ", "describe": { "columns": [], "parameters": { @@ -20,10 +20,11 @@ "Int4", "Int8", "Int8", - "Int8" + "Int8", + "Bytea" ] }, "nullable": [] }, - "hash": "e296f3e9849910734196c91c2c35b0996a4274e793ed6a9b1bc7d687954f9a0f" + "hash": "c4835d40921af47bfb4f60102bbba3af74e8e7b5944cb2943b5badb906167046" } diff --git a/core/lib/dal/.sqlx/query-c988b8aa7708a4b76671a8454c3e9806a8cb4031f7c11c6890213d8693e7d385.json b/core/lib/dal/.sqlx/query-c988b8aa7708a4b76671a8454c3e9806a8cb4031f7c11c6890213d8693e7d385.json new file mode 100644 index 00000000000..c2d68b62c31 --- /dev/null +++ b/core/lib/dal/.sqlx/query-c988b8aa7708a4b76671a8454c3e9806a8cb4031f7c11c6890213d8693e7d385.json @@ -0,0 +1,22 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n (logs_bloom IS NOT NULL) AS \"logs_bloom_not_null!\"\n FROM\n miniblocks\n WHERE\n number = $1\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "logs_bloom_not_null!", + "type_info": "Bool" + } + ], + "parameters": { + "Left": [ + "Int8" + ] + }, + "nullable": [ + null + ] + }, + "hash": "c988b8aa7708a4b76671a8454c3e9806a8cb4031f7c11c6890213d8693e7d385" +} diff --git a/core/lib/dal/.sqlx/query-d3fa49388c38985d27a5f00bd6ff439cc6378a87a3ba999575dbdafca990cb9a.json b/core/lib/dal/.sqlx/query-dcfc3c0df11b923116af194a26c122dbdbf650edfec6d9c18f96c3bd0064d18d.json similarity index 63% rename from core/lib/dal/.sqlx/query-d3fa49388c38985d27a5f00bd6ff439cc6378a87a3ba999575dbdafca990cb9a.json rename to core/lib/dal/.sqlx/query-dcfc3c0df11b923116af194a26c122dbdbf650edfec6d9c18f96c3bd0064d18d.json index c61299c0d21..36e56da404e 100644 --- a/core/lib/dal/.sqlx/query-d3fa49388c38985d27a5f00bd6ff439cc6378a87a3ba999575dbdafca990cb9a.json +++ b/core/lib/dal/.sqlx/query-dcfc3c0df11b923116af194a26c122dbdbf650edfec6d9c18f96c3bd0064d18d.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n miniblocks.hash AS block_hash,\n miniblocks.number,\n miniblocks.l1_batch_number,\n miniblocks.timestamp,\n miniblocks.base_fee_per_gas,\n miniblocks.gas_limit AS \"block_gas_limit?\",\n prev_miniblock.hash AS \"parent_hash?\",\n l1_batches.timestamp AS \"l1_batch_timestamp?\",\n transactions.gas_limit AS \"transaction_gas_limit?\",\n transactions.refunded_gas AS \"refunded_gas?\",\n transactions.hash AS \"tx_hash?\"\n FROM\n miniblocks\n LEFT JOIN miniblocks prev_miniblock ON prev_miniblock.number = miniblocks.number - 1\n LEFT JOIN l1_batches ON l1_batches.number = miniblocks.l1_batch_number\n LEFT JOIN transactions ON transactions.miniblock_number = miniblocks.number\n WHERE\n miniblocks.number = $1\n ORDER BY\n transactions.index_in_block ASC\n ", + "query": "\n SELECT\n miniblocks.hash AS block_hash,\n miniblocks.number,\n miniblocks.l1_batch_number,\n miniblocks.timestamp,\n miniblocks.base_fee_per_gas,\n miniblocks.gas_limit AS \"block_gas_limit?\",\n miniblocks.logs_bloom,\n prev_miniblock.hash AS \"parent_hash?\",\n l1_batches.timestamp AS \"l1_batch_timestamp?\",\n transactions.gas_limit AS \"transaction_gas_limit?\",\n transactions.refunded_gas AS \"refunded_gas?\",\n transactions.hash AS \"tx_hash?\"\n FROM\n miniblocks\n LEFT JOIN miniblocks prev_miniblock ON prev_miniblock.number = miniblocks.number - 1\n LEFT JOIN l1_batches ON l1_batches.number = miniblocks.l1_batch_number\n LEFT JOIN transactions ON transactions.miniblock_number = miniblocks.number\n WHERE\n miniblocks.number = $1\n ORDER BY\n transactions.index_in_block ASC\n ", "describe": { "columns": [ { @@ -35,26 +35,31 @@ }, { "ordinal": 6, - "name": "parent_hash?", + "name": "logs_bloom", "type_info": "Bytea" }, { "ordinal": 7, + "name": "parent_hash?", + "type_info": "Bytea" + }, + { + "ordinal": 8, "name": "l1_batch_timestamp?", "type_info": "Int8" }, { - "ordinal": 8, + "ordinal": 9, "name": "transaction_gas_limit?", "type_info": "Numeric" }, { - "ordinal": 9, + "ordinal": 10, "name": "refunded_gas?", "type_info": "Int8" }, { - "ordinal": 10, + "ordinal": 11, "name": "tx_hash?", "type_info": "Bytea" } @@ -71,6 +76,7 @@ false, false, true, + true, false, false, true, @@ -78,5 +84,5 @@ false ] }, - "hash": "d3fa49388c38985d27a5f00bd6ff439cc6378a87a3ba999575dbdafca990cb9a" + "hash": "dcfc3c0df11b923116af194a26c122dbdbf650edfec6d9c18f96c3bd0064d18d" } diff --git a/core/lib/dal/.sqlx/query-4570e9ffd0b2973d0bc2986c391d0a59076dda4aa572ade2492f37e537fdf6ed.json b/core/lib/dal/.sqlx/query-eab36591af61369e36e3dab79025ac6758a0a4e367f93a9bd48ec82c51e09755.json similarity index 68% rename from core/lib/dal/.sqlx/query-4570e9ffd0b2973d0bc2986c391d0a59076dda4aa572ade2492f37e537fdf6ed.json rename to core/lib/dal/.sqlx/query-eab36591af61369e36e3dab79025ac6758a0a4e367f93a9bd48ec82c51e09755.json index 7297bcdcad2..fb6ea1d2d3e 100644 --- a/core/lib/dal/.sqlx/query-4570e9ffd0b2973d0bc2986c391d0a59076dda4aa572ade2492f37e537fdf6ed.json +++ b/core/lib/dal/.sqlx/query-eab36591af61369e36e3dab79025ac6758a0a4e367f93a9bd48ec82c51e09755.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n *\n FROM\n eth_txs\n WHERE\n from_addr IS NOT DISTINCT FROM $2 -- can't just use equality as NULL != NULL\n AND id > (\n SELECT\n COALESCE(MAX(eth_tx_id), 0)\n FROM\n eth_txs_history\n JOIN eth_txs ON eth_txs.id = eth_txs_history.eth_tx_id\n WHERE\n eth_txs_history.sent_at_block IS NOT NULL\n AND eth_txs.from_addr IS NOT DISTINCT FROM $2\n )\n ORDER BY\n id\n LIMIT\n $1\n ", + "query": "\n SELECT\n *\n FROM\n eth_txs\n WHERE\n from_addr IS NOT DISTINCT FROM $2 -- can't just use equality as NULL != NULL\n AND is_gateway = $3\n AND id > (\n SELECT\n COALESCE(MAX(eth_tx_id), 0)\n FROM\n eth_txs_history\n JOIN eth_txs ON eth_txs.id = eth_txs_history.eth_tx_id\n WHERE\n eth_txs_history.sent_at_block IS NOT NULL\n AND eth_txs.from_addr IS NOT DISTINCT FROM $2\n AND is_gateway = $3\n )\n ORDER BY\n id\n LIMIT\n $1\n ", "describe": { "columns": [ { @@ -72,12 +72,18 @@ "ordinal": 13, "name": "blob_sidecar", "type_info": "Bytea" + }, + { + "ordinal": 14, + "name": "is_gateway", + "type_info": "Bool" } ], "parameters": { "Left": [ "Int8", - "Bytea" + "Bytea", + "Bool" ] }, "nullable": [ @@ -94,8 +100,9 @@ true, false, true, - true + true, + false ] }, - "hash": "4570e9ffd0b2973d0bc2986c391d0a59076dda4aa572ade2492f37e537fdf6ed" + "hash": "eab36591af61369e36e3dab79025ac6758a0a4e367f93a9bd48ec82c51e09755" } diff --git a/core/lib/dal/Cargo.toml b/core/lib/dal/Cargo.toml index c046b3d3b42..9c13eeb3014 100644 --- a/core/lib/dal/Cargo.toml +++ b/core/lib/dal/Cargo.toml @@ -14,6 +14,7 @@ links = "zksync_dal_proto" [dependencies] vise.workspace = true +zksync_vm_interface.workspace = true zksync_utils.workspace = true zksync_system_constants.workspace = true zksync_contracts.workspace = true diff --git a/core/lib/dal/migrations/20240803083814_add_is_gateway_column_to_eth_txs.down.sql b/core/lib/dal/migrations/20240803083814_add_is_gateway_column_to_eth_txs.down.sql new file mode 100644 index 00000000000..02fbc8cb075 --- /dev/null +++ b/core/lib/dal/migrations/20240803083814_add_is_gateway_column_to_eth_txs.down.sql @@ -0,0 +1 @@ +ALTER TABLE eth_txs DROP COLUMN is_gateway; diff --git a/core/lib/dal/migrations/20240803083814_add_is_gateway_column_to_eth_txs.up.sql b/core/lib/dal/migrations/20240803083814_add_is_gateway_column_to_eth_txs.up.sql new file mode 100644 index 00000000000..af1ef835cf3 --- /dev/null +++ b/core/lib/dal/migrations/20240803083814_add_is_gateway_column_to_eth_txs.up.sql @@ -0,0 +1 @@ +ALTER TABLE eth_txs ADD COLUMN is_gateway BOOLEAN NOT NULL DEFAULT FALSE; diff --git a/core/lib/dal/migrations/20240809130434_add-block-logs-bloom.down.sql b/core/lib/dal/migrations/20240809130434_add-block-logs-bloom.down.sql new file mode 100644 index 00000000000..d6d67c3aa52 --- /dev/null +++ b/core/lib/dal/migrations/20240809130434_add-block-logs-bloom.down.sql @@ -0,0 +1,2 @@ +ALTER TABLE miniblocks + DROP COLUMN IF EXISTS logs_bloom; diff --git a/core/lib/dal/migrations/20240809130434_add-block-logs-bloom.up.sql b/core/lib/dal/migrations/20240809130434_add-block-logs-bloom.up.sql new file mode 100644 index 00000000000..83eca63239f --- /dev/null +++ b/core/lib/dal/migrations/20240809130434_add-block-logs-bloom.up.sql @@ -0,0 +1,2 @@ +ALTER TABLE miniblocks + ADD COLUMN IF NOT EXISTS logs_bloom BYTEA; diff --git a/core/lib/dal/src/blocks_dal.rs b/core/lib/dal/src/blocks_dal.rs index 4f4b3e99ff7..1f4cc3b0b98 100644 --- a/core/lib/dal/src/blocks_dal.rs +++ b/core/lib/dal/src/blocks_dal.rs @@ -19,12 +19,12 @@ use zksync_types::{ BlockGasCount, L1BatchHeader, L1BatchStatistics, L1BatchTreeData, L2BlockHeader, StorageOracleInfo, }, - circuit::CircuitStatistic, commitment::{L1BatchCommitmentArtifacts, L1BatchWithMetadata}, l2_to_l1_log::UserL2ToL1Log, writes::TreeWrite, - Address, L1BatchNumber, L2BlockNumber, ProtocolVersionId, H256, U256, + Address, Bloom, L1BatchNumber, L2BlockNumber, ProtocolVersionId, H256, U256, }; +use zksync_vm_interface::CircuitStatistic; pub use crate::models::storage_block::{L1BatchMetadataError, L1BatchWithOptionalMetadata}; use crate::{ @@ -150,6 +150,22 @@ impl BlocksDal<'_, '_> { Ok(row.number.map(|num| L1BatchNumber(num as u32))) } + pub async fn get_earliest_l2_block_number(&mut self) -> DalResult> { + let row = sqlx::query!( + r#" + SELECT + MIN(number) AS "number" + FROM + miniblocks + "# + ) + .instrument("get_earliest_l2_block_number") + .fetch_one(self.storage) + .await?; + + Ok(row.number.map(|num| L2BlockNumber(num as u32))) + } + pub async fn get_last_l1_batch_number_with_tree_data( &mut self, ) -> DalResult> { @@ -691,6 +707,7 @@ impl BlocksDal<'_, '_> { virtual_blocks, fair_pubdata_price, gas_limit, + logs_bloom, created_at, updated_at ) @@ -712,6 +729,7 @@ impl BlocksDal<'_, '_> { $14, $15, $16, + $17, NOW(), NOW() ) @@ -738,6 +756,7 @@ impl BlocksDal<'_, '_> { i64::from(l2_block_header.virtual_blocks), l2_block_header.batch_fee_input.fair_pubdata_price() as i64, l2_block_header.gas_limit as i64, + l2_block_header.logs_bloom.as_bytes(), ); instrumentation.with(query).execute(self.storage).await?; @@ -764,7 +783,8 @@ impl BlocksDal<'_, '_> { protocol_version, virtual_blocks, fair_pubdata_price, - gas_limit + gas_limit, + logs_bloom FROM miniblocks ORDER BY @@ -803,7 +823,8 @@ impl BlocksDal<'_, '_> { protocol_version, virtual_blocks, fair_pubdata_price, - gas_limit + gas_limit, + logs_bloom FROM miniblocks WHERE @@ -2334,6 +2355,103 @@ impl BlocksDal<'_, '_> { Ok(results.into_iter().map(L::from).collect()) } + + pub async fn has_l2_block_bloom(&mut self, l2_block_number: L2BlockNumber) -> DalResult { + let row = sqlx::query!( + r#" + SELECT + (logs_bloom IS NOT NULL) AS "logs_bloom_not_null!" + FROM + miniblocks + WHERE + number = $1 + "#, + i64::from(l2_block_number.0), + ) + .instrument("has_l2_block_bloom") + .fetch_optional(self.storage) + .await?; + + Ok(row.map(|row| row.logs_bloom_not_null).unwrap_or(false)) + } + + pub async fn has_last_l2_block_bloom(&mut self) -> DalResult { + let row = sqlx::query!( + r#" + SELECT + (logs_bloom IS NOT NULL) AS "logs_bloom_not_null!" + FROM + miniblocks + ORDER BY + number DESC + LIMIT + 1 + "#, + ) + .instrument("has_last_l2_block_bloom") + .fetch_optional(self.storage) + .await?; + + Ok(row.map(|row| row.logs_bloom_not_null).unwrap_or(false)) + } + + pub async fn get_max_l2_block_without_bloom(&mut self) -> DalResult> { + let row = sqlx::query!( + r#" + SELECT + MAX(number) AS "max?" + FROM + miniblocks + WHERE + logs_bloom IS NULL + "#, + ) + .instrument("get_max_l2_block_without_bloom") + .fetch_one(self.storage) + .await?; + + Ok(row.max.map(|n| L2BlockNumber(n as u32))) + } + + pub async fn range_update_logs_bloom( + &mut self, + from_l2_block: L2BlockNumber, + blooms: &[Bloom], + ) -> DalResult<()> { + if blooms.is_empty() { + return Ok(()); + } + + let to_l2_block = from_l2_block + (blooms.len() - 1) as u32; + let numbers: Vec<_> = (i64::from(from_l2_block.0)..=i64::from(to_l2_block.0)).collect(); + + let blooms = blooms + .iter() + .map(|blooms| blooms.as_bytes()) + .collect::>(); + sqlx::query!( + r#" + UPDATE miniblocks + SET + logs_bloom = data.logs_bloom + FROM + ( + SELECT + UNNEST($1::BIGINT[]) AS number, + UNNEST($2::BYTEA[]) AS logs_bloom + ) AS data + WHERE + miniblocks.number = data.number + "#, + &numbers, + &blooms as &[&[u8]], + ) + .instrument("range_update_logs_bloom") + .execute(self.storage) + .await?; + + Ok(()) + } } /// These methods should only be used for tests. @@ -2416,6 +2534,24 @@ impl BlocksDal<'_, '_> { .context("storage contains neither L2 blocks, nor snapshot recovery info")?; Ok(snapshot_recovery.protocol_version) } + + pub async fn drop_l2_block_bloom(&mut self, l2_block_number: L2BlockNumber) -> DalResult<()> { + sqlx::query!( + r#" + UPDATE miniblocks + SET + logs_bloom = NULL + WHERE + number = $1 + "#, + i64::from(l2_block_number.0) + ) + .instrument("drop_l2_block_bloom") + .with_arg("l2_block_number", &l2_block_number) + .execute(self.storage) + .await?; + Ok(()) + } } #[cfg(test)] @@ -2431,7 +2567,16 @@ mod tests { async fn save_mock_eth_tx(action_type: AggregatedActionType, conn: &mut Connection<'_, Core>) { conn.eth_sender_dal() - .save_eth_tx(1, vec![], action_type, Address::default(), 1, None, None) + .save_eth_tx( + 1, + vec![], + action_type, + Address::default(), + 1, + None, + None, + false, + ) .await .unwrap(); } diff --git a/core/lib/dal/src/blocks_web3_dal.rs b/core/lib/dal/src/blocks_web3_dal.rs index 13fa9070f82..36a4acc0a6d 100644 --- a/core/lib/dal/src/blocks_web3_dal.rs +++ b/core/lib/dal/src/blocks_web3_dal.rs @@ -7,11 +7,11 @@ use zksync_types::{ api, fee_model::BatchFeeInput, l2_to_l1_log::L2ToL1Log, - vm_trace::Call, web3::{BlockHeader, Bytes}, - L1BatchNumber, L2BlockNumber, ProtocolVersionId, H160, H2048, H256, U256, U64, + Bloom, L1BatchNumber, L2BlockNumber, ProtocolVersionId, H160, H256, U256, U64, }; use zksync_utils::bigdecimal_to_u256; +use zksync_vm_interface::Call; use crate::{ models::{ @@ -44,6 +44,7 @@ impl BlocksWeb3Dal<'_, '_> { miniblocks.timestamp, miniblocks.base_fee_per_gas, miniblocks.gas_limit AS "block_gas_limit?", + miniblocks.logs_bloom, prev_miniblock.hash AS "parent_hash?", l1_batches.timestamp AS "l1_batch_timestamp?", transactions.gas_limit AS "transaction_gas_limit?", @@ -87,7 +88,10 @@ impl BlocksWeb3Dal<'_, '_> { .unwrap_or(i64::from(LEGACY_BLOCK_GAS_LIMIT)) as u64) .into(), - // TODO: include logs + logs_bloom: row + .logs_bloom + .map(|b| Bloom::from_slice(&b)) + .unwrap_or_default(), ..api::Block::default() } }); @@ -175,6 +179,7 @@ impl BlocksWeb3Dal<'_, '_> { miniblocks.timestamp AS "block_timestamp", miniblocks.base_fee_per_gas AS "base_fee_per_gas", miniblocks.gas_limit AS "block_gas_limit?", + miniblocks.logs_bloom AS "block_logs_bloom?", transactions.gas_limit AS "transaction_gas_limit?", transactions.refunded_gas AS "transaction_refunded_gas?" FROM @@ -219,7 +224,11 @@ impl BlocksWeb3Dal<'_, '_> { .into(), base_fee_per_gas: Some(bigdecimal_to_u256(row.base_fee_per_gas.clone())), extra_data: Bytes::default(), - logs_bloom: H2048::default(), + logs_bloom: row + .block_logs_bloom + .as_ref() + .map(|b| Bloom::from_slice(b)) + .unwrap_or_default(), timestamp: U256::from(row.block_timestamp), difficulty: U256::zero(), mix_hash: None, @@ -757,9 +766,9 @@ mod tests { use zksync_types::{ aggregated_operations::AggregatedActionType, block::{L2BlockHasher, L2BlockHeader}, - fee::TransactionExecutionMetrics, Address, L2BlockNumber, ProtocolVersion, ProtocolVersionId, }; + use zksync_vm_interface::TransactionExecutionMetrics; use super::*; use crate::{ @@ -968,6 +977,7 @@ mod tests { 0, None, None, + false, ) .await .unwrap(); diff --git a/core/lib/dal/src/consensus_dal.rs b/core/lib/dal/src/consensus_dal.rs index d8f28705421..8f05cb38177 100644 --- a/core/lib/dal/src/consensus_dal.rs +++ b/core/lib/dal/src/consensus_dal.rs @@ -454,7 +454,7 @@ impl ConsensusDal<'_, '_> { /// Gets a number of the last L1 batch that was inserted. It might have gaps before it, /// depending on the order in which votes have been collected over gossip by consensus. - pub async fn get_last_batch_certificate_number( + pub async fn last_batch_certificate_number( &mut self, ) -> anyhow::Result> { let row = sqlx::query!( @@ -465,7 +465,7 @@ impl ConsensusDal<'_, '_> { l1_batches_consensus "# ) - .instrument("get_last_batch_certificate_number") + .instrument("last_batch_certificate_number") .report_latency() .fetch_one(self.storage) .await?; @@ -480,7 +480,7 @@ impl ConsensusDal<'_, '_> { /// Number of L1 batch that the L2 block belongs to. /// None if the L2 block doesn't exist. - async fn batch_of_block( + pub async fn batch_of_block( &mut self, block: validator::BlockNumber, ) -> anyhow::Result> { @@ -535,9 +535,9 @@ impl ConsensusDal<'_, '_> { let Some(next_batch_to_attest) = async { // First batch that we don't have a certificate for. if let Some(last) = self - .get_last_batch_certificate_number() + .last_batch_certificate_number() .await - .context("get_last_batch_certificate_number()")? + .context("last_batch_certificate_number()")? { return Ok(Some(last + 1)); } @@ -669,7 +669,7 @@ mod tests { // Retrieve the latest certificate. let number = conn .consensus_dal() - .get_last_batch_certificate_number() + .last_batch_certificate_number() .await .unwrap() .unwrap(); diff --git a/core/lib/dal/src/contract_verification_dal.rs b/core/lib/dal/src/contract_verification_dal.rs index 3045c84255e..194d85323b6 100644 --- a/core/lib/dal/src/contract_verification_dal.rs +++ b/core/lib/dal/src/contract_verification_dal.rs @@ -12,10 +12,10 @@ use zksync_types::{ DeployContractCalldata, VerificationIncomingRequest, VerificationInfo, VerificationRequest, VerificationRequestStatus, }, - event::DEPLOY_EVENT_SIGNATURE, Address, CONTRACT_DEPLOYER_ADDRESS, }; use zksync_utils::address_to_h256; +use zksync_vm_interface::VmEvent; use crate::{models::storage_verification_request::StorageVerificationRequest, Core}; @@ -291,6 +291,7 @@ impl ContractVerificationDal<'_, '_> { address: Address, ) -> anyhow::Result, DeployContractCalldata)>> { let address_h256 = address_to_h256(&address); + let Some(row) = sqlx::query!( r#" SELECT @@ -323,7 +324,7 @@ impl ContractVerificationDal<'_, '_> { ) "#, CONTRACT_DEPLOYER_ADDRESS.as_bytes(), - DEPLOY_EVENT_SIGNATURE.as_bytes(), + VmEvent::DEPLOY_EVENT_SIGNATURE.as_bytes(), address_h256.as_bytes(), ) .fetch_optional(self.storage.conn()) diff --git a/core/lib/dal/src/eth_sender_dal.rs b/core/lib/dal/src/eth_sender_dal.rs index f1ff515f506..eb7e1cd642c 100644 --- a/core/lib/dal/src/eth_sender_dal.rs +++ b/core/lib/dal/src/eth_sender_dal.rs @@ -25,6 +25,7 @@ impl EthSenderDal<'_, '_> { pub async fn get_inflight_txs( &mut self, operator_address: Option
, + is_gateway: bool, ) -> sqlx::Result> { let txs = sqlx::query_as!( StorageEthTx, @@ -36,6 +37,7 @@ impl EthSenderDal<'_, '_> { WHERE from_addr IS NOT DISTINCT FROM $1 -- can't just use equality as NULL != NULL AND confirmed_eth_tx_history_id IS NULL + AND is_gateway = $2 AND id <= ( SELECT COALESCE(MAX(eth_tx_id), 0) @@ -45,17 +47,40 @@ impl EthSenderDal<'_, '_> { WHERE eth_txs_history.sent_at_block IS NOT NULL AND eth_txs.from_addr IS NOT DISTINCT FROM $1 + AND is_gateway = $2 ) ORDER BY id "#, operator_address.as_ref().map(|h160| h160.as_bytes()), + is_gateway ) .fetch_all(self.storage.conn()) .await?; Ok(txs.into_iter().map(|tx| tx.into()).collect()) } + pub async fn get_non_gateway_inflight_txs_count_for_gateway_migration( + &mut self, + ) -> sqlx::Result { + let count = sqlx::query!( + r#" + SELECT + COUNT(*) + FROM + eth_txs + WHERE + confirmed_eth_tx_history_id IS NULL + AND is_gateway = FALSE + "# + ) + .fetch_one(self.storage.conn()) + .await? + .count + .unwrap(); + Ok(count.try_into().unwrap()) + } + pub async fn get_eth_l1_batches(&mut self) -> sqlx::Result { struct EthTxRow { number: i64, @@ -132,6 +157,7 @@ impl EthSenderDal<'_, '_> { &mut self, limit: u64, operator_address: &Option
, + is_gateway: bool, ) -> sqlx::Result> { let txs = sqlx::query_as!( StorageEthTx, @@ -142,6 +168,7 @@ impl EthSenderDal<'_, '_> { eth_txs WHERE from_addr IS NOT DISTINCT FROM $2 -- can't just use equality as NULL != NULL + AND is_gateway = $3 AND id > ( SELECT COALESCE(MAX(eth_tx_id), 0) @@ -151,6 +178,7 @@ impl EthSenderDal<'_, '_> { WHERE eth_txs_history.sent_at_block IS NOT NULL AND eth_txs.from_addr IS NOT DISTINCT FROM $2 + AND is_gateway = $3 ) ORDER BY id @@ -159,6 +187,7 @@ impl EthSenderDal<'_, '_> { "#, limit as i64, operator_address.as_ref().map(|h160| h160.as_bytes()), + is_gateway ) .fetch_all(self.storage.conn()) .await?; @@ -202,6 +231,7 @@ impl EthSenderDal<'_, '_> { predicted_gas_cost: u32, from_address: Option
, blob_sidecar: Option, + is_gateway: bool, ) -> sqlx::Result { let address = format!("{:#x}", contract_address); let eth_tx = sqlx::query_as!( @@ -217,10 +247,11 @@ impl EthSenderDal<'_, '_> { created_at, updated_at, from_addr, - blob_sidecar + blob_sidecar, + is_gateway ) VALUES - ($1, $2, $3, $4, $5, NOW(), NOW(), $6, $7) + ($1, $2, $3, $4, $5, NOW(), NOW(), $6, $7, $8) RETURNING * "#, @@ -232,6 +263,7 @@ impl EthSenderDal<'_, '_> { from_address.as_ref().map(Address::as_bytes), blob_sidecar.map(|sidecar| bincode::serialize(&sidecar) .expect("can always bincode serialize EthTxBlobSidecar; qed")), + is_gateway, ) .fetch_one(self.storage.conn()) .await?; diff --git a/core/lib/dal/src/events_dal.rs b/core/lib/dal/src/events_dal.rs index c2b296fc085..4050acf7135 100644 --- a/core/lib/dal/src/events_dal.rs +++ b/core/lib/dal/src/events_dal.rs @@ -1,4 +1,4 @@ -use std::{collections::HashMap, fmt}; +use std::{collections::HashMap, fmt, ops::RangeInclusive}; use sqlx::types::chrono::Utc; use zksync_db_connection::{ @@ -10,11 +10,11 @@ use zksync_db_connection::{ use zksync_system_constants::L1_MESSENGER_ADDRESS; use zksync_types::{ api, - event::L1_MESSENGER_BYTECODE_PUBLICATION_EVENT_SIGNATURE, l2_to_l1_log::{L2ToL1Log, UserL2ToL1Log}, tx::IncludedTxLocation, - Address, L1BatchNumber, L2BlockNumber, VmEvent, H256, + Address, L1BatchNumber, L2BlockNumber, H256, }; +use zksync_vm_interface::VmEvent; use crate::{ models::storage_event::{StorageL2ToL1Log, StorageWeb3Log}, @@ -278,7 +278,7 @@ impl EventsDal<'_, '_> { i64::from(from_l2_block.0), i64::from(to_l2_block.0), L1_MESSENGER_ADDRESS.as_bytes(), - L1_MESSENGER_BYTECODE_PUBLICATION_EVENT_SIGNATURE.as_bytes() + VmEvent::L1_MESSENGER_BYTECODE_PUBLICATION_EVENT_SIGNATURE.as_bytes() ) .instrument("get_l1_batch_raw_published_bytecode_hashes") .with_arg("from_l2_block", &from_l2_block) @@ -409,6 +409,47 @@ impl EventsDal<'_, '_> { .collect(); Ok(Some(events)) } + + pub async fn get_bloom_items_for_l2_blocks( + &mut self, + l2_block_range: RangeInclusive, + ) -> DalResult>>> { + let rows = sqlx::query!( + r#" + SELECT + address, + topic1, + topic2, + topic3, + topic4, + miniblock_number + FROM + events + WHERE + miniblock_number BETWEEN $1 AND $2 + ORDER BY + miniblock_number + "#, + i64::from(l2_block_range.start().0), + i64::from(l2_block_range.end().0), + ) + .instrument("get_bloom_items_for_l2_blocks") + .fetch_all(self.storage) + .await?; + + let mut items = HashMap::new(); + for row in rows { + let block = L2BlockNumber(row.miniblock_number as u32); + let vec: &mut Vec<_> = items.entry(block).or_default(); + + let iter = [row.address, row.topic1, row.topic2, row.topic3, row.topic4] + .into_iter() + .filter(|x| !x.is_empty()); + vec.extend(iter); + } + + Ok(items) + } } #[cfg(test)] diff --git a/core/lib/types/src/vm_trace.rs b/core/lib/dal/src/models/call.rs similarity index 55% rename from core/lib/types/src/vm_trace.rs rename to core/lib/dal/src/models/call.rs index 80a3eea92f6..3e81fbbeece 100644 --- a/core/lib/types/src/vm_trace.rs +++ b/core/lib/dal/src/models/call.rs @@ -1,24 +1,14 @@ -use std::fmt; +//! Legacy VM call representations. -use serde::{Deserialize, Deserializer, Serialize, Serializer}; -use zksync_system_constants::BOOTLOADER_ADDRESS; - -use crate::{zk_evm_types::FarCallOpcode, Address, U256}; - -#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq)] -pub enum CallType { - #[serde(serialize_with = "far_call_type_to_u8")] - #[serde(deserialize_with = "far_call_type_from_u8")] - Call(FarCallOpcode), - Create, - NearCall, -} +use serde::{Deserialize, Serialize}; +use zksync_types::{Address, U256}; +use zksync_vm_interface::{Call, CallType}; /// Represents a call in the VM trace. /// This version of the call represents the call structure before the 1.5.0 protocol version, where /// all the gas-related fields were represented as `u32` instead of `u64`. #[derive(Clone, Serialize, Deserialize)] -pub struct LegacyCall { +pub(super) struct LegacyCall { /// Type of the call. pub r#type: CallType, /// Address of the caller. @@ -48,7 +38,7 @@ pub struct LegacyCall { /// Represents a call in the VM trace. /// This version has subcalls in the form of "new" calls. #[derive(Debug, Clone, Serialize, Deserialize)] -pub struct LegacyMixedCall { +pub(super) struct LegacyMixedCall { /// Type of the call. pub r#type: CallType, /// Address of the caller. @@ -75,44 +65,15 @@ pub struct LegacyMixedCall { pub calls: Vec, } -/// Represents a call in the VM trace. -#[derive(Clone, Serialize, Deserialize)] -pub struct Call { - /// Type of the call. - pub r#type: CallType, - /// Address of the caller. - pub from: Address, - /// Address of the callee. - pub to: Address, - /// Gas from the parent call. - pub parent_gas: u64, - /// Gas provided for the call. - pub gas: u64, - /// Gas used by the call. - pub gas_used: u64, - /// Value transferred. - pub value: U256, - /// Input data. - pub input: Vec, - /// Output data. - pub output: Vec, - /// Error message provided by vm or some unexpected errors. - pub error: Option, - /// Revert reason. - pub revert_reason: Option, - /// Subcalls. - pub calls: Vec, -} - impl From for Call { fn from(legacy_call: LegacyCall) -> Self { Self { r#type: legacy_call.r#type, from: legacy_call.from, to: legacy_call.to, - parent_gas: legacy_call.parent_gas as u64, - gas: legacy_call.gas as u64, - gas_used: legacy_call.gas_used as u64, + parent_gas: legacy_call.parent_gas.into(), + gas: legacy_call.gas.into(), + gas_used: legacy_call.gas_used.into(), value: legacy_call.value, input: legacy_call.input, output: legacy_call.output, @@ -129,9 +90,9 @@ impl From for Call { r#type: legacy_call.r#type, from: legacy_call.from, to: legacy_call.to, - parent_gas: legacy_call.parent_gas as u64, - gas: legacy_call.gas as u64, - gas_used: legacy_call.gas_used as u64, + parent_gas: legacy_call.parent_gas.into(), + gas: legacy_call.gas.into(), + gas_used: legacy_call.gas_used.into(), value: legacy_call.value, input: legacy_call.input, output: legacy_call.output, @@ -142,8 +103,8 @@ impl From for Call { } } -#[derive(Debug, Clone)] -pub struct LegacyCallConversionOverflowError; +#[derive(Debug)] +pub(super) struct LegacyCallConversionOverflowError; impl TryFrom for LegacyCall { type Error = LegacyCallConversionOverflowError; @@ -207,124 +168,6 @@ impl TryFrom for LegacyMixedCall { } } -impl Call { - pub fn new_high_level( - gas: u64, - gas_used: u64, - value: U256, - input: Vec, - output: Vec, - revert_reason: Option, - calls: Vec, - ) -> Self { - Self { - r#type: CallType::Call(FarCallOpcode::Normal), - from: Address::zero(), - to: BOOTLOADER_ADDRESS, - parent_gas: gas, - gas, - gas_used, - value, - input, - output, - error: None, - revert_reason, - calls, - } - } -} - -impl PartialEq for Call { - fn eq(&self, other: &Self) -> bool { - self.revert_reason == other.revert_reason - && self.input == other.input - && self.from == other.from - && self.to == other.to - && self.r#type == other.r#type - && self.value == other.value - && self.error == other.error - && self.output == other.output - && self.calls == other.calls - } -} - -fn far_call_type_from_u8<'de, D>(deserializer: D) -> Result -where - D: Deserializer<'de>, -{ - let res = u8::deserialize(deserializer)?; - match res { - 0 => Ok(FarCallOpcode::Normal), - 1 => Ok(FarCallOpcode::Delegate), - 2 => Ok(FarCallOpcode::Mimic), - _ => Err(serde::de::Error::custom("Invalid FarCallOpcode")), - } -} - -fn far_call_type_to_u8(far_call_type: &FarCallOpcode, s: S) -> Result -where - S: Serializer, -{ - s.serialize_u8(*far_call_type as u8) -} - -impl Default for Call { - fn default() -> Self { - Self { - r#type: CallType::Call(FarCallOpcode::Normal), - from: Default::default(), - to: Default::default(), - parent_gas: 0, - gas: 0, - gas_used: 0, - value: Default::default(), - input: vec![], - output: vec![], - error: None, - revert_reason: None, - calls: vec![], - } - } -} - -impl fmt::Debug for Call { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("Call") - .field("type", &self.r#type) - .field("to", &self.to) - .field("from", &self.from) - .field("parent_gas", &self.parent_gas) - .field("gas_used", &self.gas_used) - .field("gas", &self.gas) - .field("value", &self.value) - .field("input", &format_args!("{:?}", self.input)) - .field("output", &format_args!("{:?}", self.output)) - .field("error", &self.error) - .field("revert_reason", &format_args!("{:?}", self.revert_reason)) - .field("call_traces", &self.calls) - .finish() - } -} - -impl fmt::Debug for LegacyCall { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("LegacyCall") - .field("type", &self.r#type) - .field("to", &self.to) - .field("from", &self.from) - .field("parent_gas", &self.parent_gas) - .field("gas_used", &self.gas_used) - .field("gas", &self.gas) - .field("value", &self.value) - .field("input", &format_args!("{:?}", self.input)) - .field("output", &format_args!("{:?}", self.output)) - .field("error", &self.error) - .field("revert_reason", &format_args!("{:?}", self.revert_reason)) - .field("call_traces", &self.calls) - .finish() - } -} - #[cfg(test)] mod tests { use super::*; diff --git a/core/lib/dal/src/models/mod.rs b/core/lib/dal/src/models/mod.rs index d22541620f2..a9690dcb799 100644 --- a/core/lib/dal/src/models/mod.rs +++ b/core/lib/dal/src/models/mod.rs @@ -3,6 +3,7 @@ use anyhow::Context as _; use zksync_db_connection::error::SqlxContext; use zksync_types::{ProtocolVersionId, H160, H256}; +mod call; pub mod storage_base_token_ratio; pub(crate) mod storage_data_availability; pub mod storage_eth_tx; diff --git a/core/lib/dal/src/models/storage_block.rs b/core/lib/dal/src/models/storage_block.rs index be8b4e4152b..34e14387ca6 100644 --- a/core/lib/dal/src/models/storage_block.rs +++ b/core/lib/dal/src/models/storage_block.rs @@ -10,7 +10,7 @@ use zksync_types::{ commitment::{L1BatchMetaParameters, L1BatchMetadata}, fee_model::{BatchFeeInput, L1PeggedBatchFeeModelInput, PubdataIndependentBatchFeeModelInput}, l2_to_l1_log::{L2ToL1Log, SystemL2ToL1Log, UserL2ToL1Log}, - Address, L1BatchNumber, L2BlockNumber, ProtocolVersionId, H2048, H256, + Address, Bloom, L1BatchNumber, L2BlockNumber, ProtocolVersionId, H256, }; /// This is the gas limit that was used inside blocks before we started saving block gas limit into the database. @@ -76,7 +76,7 @@ impl StorageL1BatchHeader { l2_to_l1_logs, l2_to_l1_messages: self.l2_to_l1_messages, - bloom: H2048::from_slice(&self.bloom), + bloom: Bloom::from_slice(&self.bloom), used_contract_hashes: serde_json::from_value(self.used_contract_hashes) .expect("invalid value for used_contract_hashes in the DB"), base_system_contracts_hashes: convert_base_system_contracts_hashes( @@ -171,7 +171,7 @@ impl StorageL1Batch { l2_to_l1_logs, l2_to_l1_messages: self.l2_to_l1_messages, - bloom: H2048::from_slice(&self.bloom), + bloom: Bloom::from_slice(&self.bloom), used_contract_hashes: serde_json::from_value(self.used_contract_hashes) .expect("invalid value for used_contract_hashes in the DB"), base_system_contracts_hashes: convert_base_system_contracts_hashes( @@ -433,6 +433,7 @@ pub(crate) struct StorageL2BlockHeader { /// The formal value of the gas limit for the miniblock. /// This value should bound the maximal amount of gas that can be spent by transactions in the miniblock. pub gas_limit: Option, + pub logs_bloom: Option>, } impl From for L2BlockHeader { @@ -475,6 +476,10 @@ impl From for L2BlockHeader { protocol_version, virtual_blocks: row.virtual_blocks as u32, gas_limit: row.gas_limit.unwrap_or(i64::from(LEGACY_BLOCK_GAS_LIMIT)) as u64, + logs_bloom: row + .logs_bloom + .map(|b| Bloom::from_slice(&b)) + .unwrap_or_default(), } } } diff --git a/core/lib/dal/src/models/storage_eth_tx.rs b/core/lib/dal/src/models/storage_eth_tx.rs index 2654ffe0e0a..c721f938838 100644 --- a/core/lib/dal/src/models/storage_eth_tx.rs +++ b/core/lib/dal/src/models/storage_eth_tx.rs @@ -29,6 +29,7 @@ pub struct StorageEthTx { // // Format a `bincode`-encoded `EthTxBlobSidecar` enum. pub blob_sidecar: Option>, + pub is_gateway: bool, } #[derive(Debug, Default)] @@ -83,6 +84,7 @@ impl From for EthTx { blob_sidecar: tx.blob_sidecar.map(|b| { bincode::deserialize(&b).expect("EthTxBlobSidecar is encoded correctly; qed") }), + is_gateway: tx.is_gateway, } } } diff --git a/core/lib/dal/src/models/storage_transaction.rs b/core/lib/dal/src/models/storage_transaction.rs index 31a182a7eca..aca93ee8c5a 100644 --- a/core/lib/dal/src/models/storage_transaction.rs +++ b/core/lib/dal/src/models/storage_transaction.rs @@ -10,7 +10,6 @@ use zksync_types::{ l2::TransactionType, protocol_upgrade::ProtocolUpgradeTxCommonData, transaction_request::PaymasterParams, - vm_trace::{Call, LegacyCall, LegacyMixedCall}, web3::Bytes, Address, Execute, ExecuteTransactionCommon, L1TxCommonData, L2ChainId, L2TxCommonData, Nonce, PackedEthSignature, PriorityOpId, ProtocolVersionId, Transaction, EIP_1559_TX_TYPE, @@ -18,7 +17,9 @@ use zksync_types::{ PROTOCOL_UPGRADE_TX_TYPE, U256, U64, }; use zksync_utils::{bigdecimal_to_u256, h256_to_account_address}; +use zksync_vm_interface::Call; +use super::call::{LegacyCall, LegacyMixedCall}; use crate::BigDecimal; #[derive(Debug, Clone, sqlx::FromRow)] diff --git a/core/lib/dal/src/pruning_dal/tests.rs b/core/lib/dal/src/pruning_dal/tests.rs index 0999e2be164..4f94ff7f63d 100644 --- a/core/lib/dal/src/pruning_dal/tests.rs +++ b/core/lib/dal/src/pruning_dal/tests.rs @@ -2,10 +2,10 @@ use std::ops; use zksync_db_connection::connection::Connection; use zksync_types::{ - fee::TransactionExecutionMetrics, tx::IncludedTxLocation, AccountTreeId, Address, - L1BatchNumber, L2BlockNumber, L2ChainId, ProtocolVersion, ProtocolVersionId, StorageKey, - StorageLog, H256, + tx::IncludedTxLocation, AccountTreeId, Address, L1BatchNumber, L2BlockNumber, L2ChainId, + ProtocolVersion, ProtocolVersionId, StorageKey, StorageLog, H256, }; +use zksync_vm_interface::TransactionExecutionMetrics; use super::*; use crate::{ diff --git a/core/lib/dal/src/sync_dal.rs b/core/lib/dal/src/sync_dal.rs index 898770c38f5..ec6ee0f9281 100644 --- a/core/lib/dal/src/sync_dal.rs +++ b/core/lib/dal/src/sync_dal.rs @@ -106,9 +106,9 @@ impl SyncDal<'_, '_> { mod tests { use zksync_types::{ block::{L1BatchHeader, L2BlockHeader}, - fee::TransactionExecutionMetrics, Address, L1BatchNumber, ProtocolVersion, ProtocolVersionId, Transaction, }; + use zksync_vm_interface::TransactionExecutionMetrics; use super::*; use crate::{ diff --git a/core/lib/dal/src/tests/mod.rs b/core/lib/dal/src/tests/mod.rs index 11f88ba8a70..c17e8c5d1fe 100644 --- a/core/lib/dal/src/tests/mod.rs +++ b/core/lib/dal/src/tests/mod.rs @@ -4,7 +4,7 @@ use zksync_contracts::BaseSystemContractsHashes; use zksync_db_connection::connection_pool::ConnectionPool; use zksync_types::{ block::{L1BatchHeader, L2BlockHasher, L2BlockHeader}, - fee::{Fee, TransactionExecutionMetrics}, + fee::Fee, fee_model::BatchFeeInput, helpers::unix_timestamp_ms, l1::{L1Tx, OpProcessingType, PriorityQueueType}, @@ -12,9 +12,12 @@ use zksync_types::{ l2_to_l1_log::{L2ToL1Log, UserL2ToL1Log}, protocol_upgrade::{ProtocolUpgradeTx, ProtocolUpgradeTxCommonData}, snapshots::SnapshotRecoveryStatus, - tx::{tx_execution_info::TxExecutionStatus, ExecutionMetrics, TransactionExecutionResult}, Address, Execute, K256PrivateKey, L1BatchNumber, L1BlockNumber, L1TxCommonData, L2BlockNumber, - L2ChainId, PriorityOpId, ProtocolVersion, ProtocolVersionId, VmEvent, H160, H256, U256, + L2ChainId, PriorityOpId, ProtocolVersion, ProtocolVersionId, H160, H256, U256, +}; +use zksync_vm_interface::{ + TransactionExecutionMetrics, TransactionExecutionResult, TxExecutionStatus, VmEvent, + VmExecutionMetrics, }; use crate::{ @@ -48,6 +51,7 @@ pub(crate) fn create_l2_block_header(number: u32) -> L2BlockHeader { protocol_version: Some(protocol_version), virtual_blocks: 1, gas_limit: 0, + logs_bloom: Default::default(), } } pub(crate) fn create_l1_batch_header(number: u32) -> L1BatchHeader { @@ -151,7 +155,7 @@ pub(crate) fn mock_execution_result(transaction: L2Tx) -> TransactionExecutionRe TransactionExecutionResult { hash: transaction.hash(), transaction: transaction.into(), - execution_info: ExecutionMetrics::default(), + execution_info: VmExecutionMetrics::default(), execution_status: TxExecutionStatus::Success, refunded_gas: 0, operator_suggested_refund: 0, diff --git a/core/lib/dal/src/transactions_dal.rs b/core/lib/dal/src/transactions_dal.rs index f76b61ec164..49791f776e0 100644 --- a/core/lib/dal/src/transactions_dal.rs +++ b/core/lib/dal/src/transactions_dal.rs @@ -10,17 +10,14 @@ use zksync_db_connection::{ utils::pg_interval_from_duration, }; use zksync_types::{ - block::L2BlockExecutionData, - fee::TransactionExecutionMetrics, - l1::L1Tx, - l2::L2Tx, - protocol_upgrade::ProtocolUpgradeTx, - tx::{tx_execution_info::TxExecutionStatus, TransactionExecutionResult}, - vm_trace::Call, - Address, ExecuteTransactionCommon, L1BatchNumber, L1BlockNumber, L2BlockNumber, PriorityOpId, + block::L2BlockExecutionData, l1::L1Tx, l2::L2Tx, protocol_upgrade::ProtocolUpgradeTx, Address, + ExecuteTransactionCommon, L1BatchNumber, L1BlockNumber, L2BlockNumber, PriorityOpId, ProtocolVersionId, Transaction, H256, PROTOCOL_UPGRADE_TX_TYPE, U256, }; use zksync_utils::u256_to_big_decimal; +use zksync_vm_interface::{ + Call, TransactionExecutionMetrics, TransactionExecutionResult, TxExecutionStatus, +}; use crate::{ models::storage_transaction::{CallTrace, StorageTransaction}, diff --git a/core/lib/dal/src/transactions_web3_dal.rs b/core/lib/dal/src/transactions_web3_dal.rs index ff82664109d..f5a3c492f8a 100644 --- a/core/lib/dal/src/transactions_web3_dal.rs +++ b/core/lib/dal/src/transactions_web3_dal.rs @@ -1,4 +1,4 @@ -use std::collections::HashMap; +use std::{collections::HashMap, iter::once}; use anyhow::Context as _; use sqlx::types::chrono::NaiveDateTime; @@ -9,9 +9,10 @@ use zksync_db_connection::{ interpolate_query, match_query_as, }; use zksync_types::{ - api, api::TransactionReceipt, event::DEPLOY_EVENT_SIGNATURE, Address, L2BlockNumber, L2ChainId, - Transaction, CONTRACT_DEPLOYER_ADDRESS, H256, U256, + api, api::TransactionReceipt, block::build_bloom, Address, BloomInput, L2BlockNumber, + L2ChainId, Transaction, CONTRACT_DEPLOYER_ADDRESS, H256, U256, }; +use zksync_vm_interface::VmEvent; use crate::{ models::storage_transaction::{ @@ -40,6 +41,7 @@ impl TransactionsWeb3Dal<'_, '_> { hashes: &[H256], ) -> DalResult> { let hash_bytes: Vec<_> = hashes.iter().map(H256::as_bytes).collect(); + // Clarification for first part of the query(`WITH` clause): // Looking for `ContractDeployed` event in the events table // to find the address of deployed contract @@ -88,7 +90,7 @@ impl TransactionsWeb3Dal<'_, '_> { // ^ Filter out transactions with pruned data, which would lead to potentially incomplete / bogus // transaction info. CONTRACT_DEPLOYER_ADDRESS.as_bytes(), - DEPLOY_EVENT_SIGNATURE.as_bytes(), + VmEvent::DEPLOY_EVENT_SIGNATURE.as_bytes(), &hash_bytes as &[&[u8]], ) .instrument("get_transaction_receipts") @@ -118,6 +120,13 @@ impl TransactionsWeb3Dal<'_, '_> { let logs_for_tx = logs.remove(&receipt.transaction_hash); if let Some(logs) = logs_for_tx { + let iter = logs.iter().flat_map(|log| { + log.topics + .iter() + .map(|topic| BloomInput::Raw(topic.as_bytes())) + .chain(once(BloomInput::Raw(log.address.as_bytes()))) + }); + receipt.logs_bloom = build_bloom(iter); receipt.logs = logs .into_iter() .map(|mut log| { @@ -479,9 +488,8 @@ impl TransactionsWeb3Dal<'_, '_> { mod tests { use std::collections::HashMap; - use zksync_types::{ - fee::TransactionExecutionMetrics, l2::L2Tx, Nonce, ProtocolVersion, ProtocolVersionId, - }; + use zksync_types::{l2::L2Tx, Nonce, ProtocolVersion, ProtocolVersionId}; + use zksync_vm_interface::TransactionExecutionMetrics; use super::*; use crate::{ diff --git a/core/lib/db_connection/src/connection.rs b/core/lib/db_connection/src/connection.rs index 22a63765b3b..e178395b333 100644 --- a/core/lib/db_connection/src/connection.rs +++ b/core/lib/db_connection/src/connection.rs @@ -1,10 +1,11 @@ use std::{ collections::HashMap, fmt, io, + marker::PhantomData, panic::Location, sync::{ atomic::{AtomicUsize, Ordering}, - Mutex, + Arc, Mutex, Weak, }, time::{Instant, SystemTime}, }; @@ -98,14 +99,14 @@ impl TracedConnections { } } -struct PooledConnection<'a> { +struct PooledConnection { connection: PoolConnection, tags: Option, created_at: Instant, - traced: Option<(&'a TracedConnections, usize)>, + traced: (Weak, usize), } -impl fmt::Debug for PooledConnection<'_> { +impl fmt::Debug for PooledConnection { fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { formatter .debug_struct("PooledConnection") @@ -115,7 +116,7 @@ impl fmt::Debug for PooledConnection<'_> { } } -impl Drop for PooledConnection<'_> { +impl Drop for PooledConnection { fn drop(&mut self) { if let Some(tags) = &self.tags { let lifetime = self.created_at.elapsed(); @@ -132,15 +133,17 @@ impl Drop for PooledConnection<'_> { ); } } - if let Some((connections, id)) = self.traced { - connections.mark_as_dropped(id); + + let (traced_connections, id) = &self.traced; + if let Some(connections) = traced_connections.upgrade() { + connections.mark_as_dropped(*id); } } } #[derive(Debug)] enum ConnectionInner<'a> { - Pooled(PooledConnection<'a>), + Pooled(PooledConnection), Transaction { transaction: Transaction<'a, Postgres>, tags: Option<&'a ConnectionTags>, @@ -156,7 +159,7 @@ pub trait DbMarker: 'static + Send + Sync + Clone {} #[derive(Debug)] pub struct Connection<'a, DB: DbMarker> { inner: ConnectionInner<'a>, - _marker: std::marker::PhantomData, + _marker: PhantomData, } impl<'a, DB: DbMarker> Connection<'a, DB> { @@ -166,21 +169,23 @@ impl<'a, DB: DbMarker> Connection<'a, DB> { pub(crate) fn from_pool( connection: PoolConnection, tags: Option, - traced_connections: Option<&'a TracedConnections>, + traced_connections: Option<&Arc>, ) -> Self { let created_at = Instant::now(); let inner = ConnectionInner::Pooled(PooledConnection { connection, tags, created_at, - traced: traced_connections.map(|connections| { + traced: if let Some(connections) = traced_connections { let id = connections.acquire(tags, created_at); - (connections, id) - }), + (Arc::downgrade(connections), id) + } else { + (Weak::new(), 0) + }, }); Self { inner, - _marker: Default::default(), + _marker: PhantomData, } } @@ -196,7 +201,7 @@ impl<'a, DB: DbMarker> Connection<'a, DB> { }; Ok(Connection { inner, - _marker: Default::default(), + _marker: PhantomData, }) } diff --git a/core/lib/db_connection/src/connection_pool.rs b/core/lib/db_connection/src/connection_pool.rs index 78d9184222d..d262e374aef 100644 --- a/core/lib/db_connection/src/connection_pool.rs +++ b/core/lib/db_connection/src/connection_pool.rs @@ -158,6 +158,14 @@ impl TestTemplate { Ok(Self(db_url.parse()?)) } + pub fn prover_empty() -> anyhow::Result { + let db_url = env::var("TEST_DATABASE_PROVER_URL").context( + "TEST_DATABASE_PROVER_URL must be set. Normally, this is done by the 'zk' tool. \ + Make sure that you are running the tests with 'zk test rust' command or equivalent.", + )?; + Ok(Self(db_url.parse()?)) + } + /// Closes the connection pool, disallows connecting to the underlying db, /// so that the db can be used as a template. pub async fn freeze(pool: ConnectionPool) -> anyhow::Result { @@ -291,6 +299,11 @@ impl ConnectionPool { Self::constrained_test_pool(DEFAULT_CONNECTIONS).await } + pub async fn prover_test_pool() -> ConnectionPool { + const DEFAULT_CONNECTIONS: u32 = 100; // Expected to be enough for any unit test. + Self::constrained_prover_test_pool(DEFAULT_CONNECTIONS).await + } + /// Same as [`Self::test_pool()`], but with a configurable number of connections. This is useful to test /// behavior of components that rely on singleton / constrained pools in production. pub async fn constrained_test_pool(connections: u32) -> ConnectionPool { @@ -309,6 +322,22 @@ impl ConnectionPool { pool } + pub async fn constrained_prover_test_pool(connections: u32) -> ConnectionPool { + assert!(connections > 0, "Number of connections must be positive"); + let mut builder = TestTemplate::prover_empty() + .expect("failed creating test template") + .create_db(connections) + .await + .expect("failed creating database for tests"); + let mut pool = builder + .set_acquire_timeout(Some(Self::TEST_ACQUIRE_TIMEOUT)) + .build() + .await + .expect("cannot build connection pool"); + pool.traced_connections = Some(Arc::default()); + pool + } + /// Initializes a builder for connection pools. pub fn builder(database_url: SensitiveUrl, max_pool_size: u32) -> ConnectionPoolBuilder { ConnectionPoolBuilder { @@ -347,7 +376,7 @@ impl ConnectionPool { /// /// This method is intended to be used in crucial contexts, where the /// database access is must-have (e.g. block committer). - pub async fn connection(&self) -> DalResult> { + pub async fn connection(&self) -> DalResult> { self.connection_inner(None).await } @@ -361,7 +390,7 @@ impl ConnectionPool { pub fn connection_tagged( &self, requester: &'static str, - ) -> impl Future>> + '_ { + ) -> impl Future>> + '_ { let location = Location::caller(); async move { let tags = ConnectionTags { @@ -375,7 +404,7 @@ impl ConnectionPool { async fn connection_inner( &self, tags: Option, - ) -> DalResult> { + ) -> DalResult> { let acquire_latency = CONNECTION_METRICS.acquire.start(); let conn = self.acquire_connection_retried(tags.as_ref()).await?; let elapsed = acquire_latency.observe(); @@ -386,7 +415,7 @@ impl ConnectionPool { Ok(Connection::::from_pool( conn, tags, - self.traced_connections.as_deref(), + self.traced_connections.as_ref(), )) } diff --git a/core/lib/env_config/src/eth_sender.rs b/core/lib/env_config/src/eth_sender.rs index 18a661099b6..30a6ebf4f00 100644 --- a/core/lib/env_config/src/eth_sender.rs +++ b/core/lib/env_config/src/eth_sender.rs @@ -70,6 +70,8 @@ mod tests { l1_batch_min_age_before_execute_seconds: Some(1000), max_acceptable_priority_fee_in_gwei: 100_000_000_000, pubdata_sending_mode: PubdataSendingMode::Calldata, + tx_aggregation_only_prove_and_execute: false, + tx_aggregation_paused: false, }), gas_adjuster: Some(GasAdjusterConfig { default_priority_fee_per_gas: 20000000000, diff --git a/core/lib/env_config/src/lib.rs b/core/lib/env_config/src/lib.rs index fcb0f3625ea..8cfa7b58a31 100644 --- a/core/lib/env_config/src/lib.rs +++ b/core/lib/env_config/src/lib.rs @@ -26,6 +26,7 @@ mod da_dispatcher; mod external_price_api_client; mod external_proof_integration_api; mod genesis; +mod prover_job_monitor; #[cfg(test)] mod test_utils; mod vm_runner; diff --git a/core/lib/env_config/src/prover_job_monitor.rs b/core/lib/env_config/src/prover_job_monitor.rs new file mode 100644 index 00000000000..3a8f80473eb --- /dev/null +++ b/core/lib/env_config/src/prover_job_monitor.rs @@ -0,0 +1,89 @@ +use zksync_config::configs::ProverJobMonitorConfig; + +use crate::{envy_load, FromEnv}; + +impl FromEnv for ProverJobMonitorConfig { + fn from_env() -> anyhow::Result { + envy_load("prover_job_monitor", "PROVER_JOB_MONITOR_") + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::test_utils::EnvMutex; + + static MUTEX: EnvMutex = EnvMutex::new(); + + fn expected_config() -> ProverJobMonitorConfig { + ProverJobMonitorConfig { + prometheus_port: 3317, + max_db_connections: 9, + graceful_shutdown_timeout_ms: 5000, + gpu_prover_archiver_run_interval_ms: 86400000, + gpu_prover_archiver_archive_prover_after_ms: 172800000, + prover_jobs_archiver_run_interval_ms: 1800000, + prover_jobs_archiver_archive_jobs_after_ms: 172800000, + proof_compressor_job_requeuer_run_interval_ms: 10000, + prover_job_requeuer_run_interval_ms: 10000, + witness_generator_job_requeuer_run_interval_ms: 10000, + proof_compressor_queue_reporter_run_interval_ms: 10000, + prover_queue_reporter_run_interval_ms: 10000, + witness_generator_queue_reporter_run_interval_ms: 10000, + witness_job_queuer_run_interval_ms: 10000, + } + } + + fn expected_changed_config() -> ProverJobMonitorConfig { + let mut config = expected_config(); + config.graceful_shutdown_timeout_ms += 1; + config.gpu_prover_archiver_run_interval_ms += 1; + config.gpu_prover_archiver_archive_prover_after_ms += 1; + config.prover_jobs_archiver_run_interval_ms += 1; + config.prover_jobs_archiver_archive_jobs_after_ms += 1; + config.proof_compressor_job_requeuer_run_interval_ms += 1; + config.prover_job_requeuer_run_interval_ms += 1; + config.witness_generator_job_requeuer_run_interval_ms += 1; + config.proof_compressor_queue_reporter_run_interval_ms += 1; + config.prover_queue_reporter_run_interval_ms += 1; + config.witness_generator_queue_reporter_run_interval_ms += 1; + config.witness_job_queuer_run_interval_ms += 1; + config + } + + #[test] + fn from_env_with_default() { + let config = r#" + PROVER_JOB_MONITOR_PROMETHEUS_PORT=3317 + PROVER_JOB_MONITOR_MAX_DB_CONNECTIONS=9 + "#; + let mut lock = MUTEX.lock(); + lock.set_env(config); + let actual = ProverJobMonitorConfig::from_env().unwrap(); + assert_eq!(actual, expected_config()); + } + + #[test] + fn from_env() { + let config = r#" + PROVER_JOB_MONITOR_PROMETHEUS_PORT=3317 + PROVER_JOB_MONITOR_MAX_DB_CONNECTIONS=9 + PROVER_JOB_MONITOR_GRACEFUL_SHUTDOWN_TIMEOUT_MS=5001 + PROVER_JOB_MONITOR_GPU_PROVER_ARCHIVER_RUN_INTERVAL_MS=86400001 + PROVER_JOB_MONITOR_GPU_PROVER_ARCHIVER_ARCHIVE_PROVER_AFTER_MS=172800001 + PROVER_JOB_MONITOR_PROVER_JOBS_ARCHIVER_RUN_INTERVAL_MS=1800001 + PROVER_JOB_MONITOR_PROVER_JOBS_ARCHIVER_ARCHIVE_JOBS_AFTER_MS=172800001 + PROVER_JOB_MONITOR_PROOF_COMPRESSOR_JOB_REQUEUER_RUN_INTERVAL_MS=10001 + PROVER_JOB_MONITOR_PROVER_JOB_REQUEUER_RUN_INTERVAL_MS=10001 + PROVER_JOB_MONITOR_WITNESS_GENERATOR_JOB_REQUEUER_RUN_INTERVAL_MS=10001 + PROVER_JOB_MONITOR_PROOF_COMPRESSOR_QUEUE_REPORTER_RUN_INTERVAL_MS=10001 + PROVER_JOB_MONITOR_PROVER_QUEUE_REPORTER_RUN_INTERVAL_MS=10001 + PROVER_JOB_MONITOR_WITNESS_GENERATOR_QUEUE_REPORTER_RUN_INTERVAL_MS=10001 + PROVER_JOB_MONITOR_WITNESS_JOB_QUEUER_RUN_INTERVAL_MS=10001 + "#; + let mut lock = MUTEX.lock(); + lock.set_env(config); + let actual = ProverJobMonitorConfig::from_env().unwrap(); + assert_eq!(actual, expected_changed_config()); + } +} diff --git a/core/lib/eth_client/src/clients/mock.rs b/core/lib/eth_client/src/clients/mock.rs index 46ad5dc5310..b33554b6292 100644 --- a/core/lib/eth_client/src/clients/mock.rs +++ b/core/lib/eth_client/src/clients/mock.rs @@ -107,7 +107,7 @@ impl MockSettlementLayerInner { self.block_number += confirmations; let nonce = self.current_nonce; self.current_nonce += 1; - tracing::info!("Executing tx with hash {tx_hash:?}, success: {success}, current nonce: {}, confirmations: {confirmations}", self.current_nonce); + tracing::info!("Executing tx with hash {tx_hash:?} at block {}, success: {success}, current nonce: {}, confirmations: {confirmations}", self.block_number - confirmations, self.current_nonce); let tx_nonce = self.sent_txs[&tx_hash].nonce; if non_ordering_confirmations { diff --git a/core/lib/multivm/src/glue/types/vm/vm_tx_execution_result.rs b/core/lib/multivm/src/glue/types/vm/vm_tx_execution_result.rs index 80d1ef8a294..2dc680ba77d 100644 --- a/core/lib/multivm/src/glue/types/vm/vm_tx_execution_result.rs +++ b/core/lib/multivm/src/glue/types/vm/vm_tx_execution_result.rs @@ -1,8 +1,8 @@ -use zksync_types::tx::tx_execution_info::TxExecutionStatus; - use crate::{ glue::{GlueFrom, GlueInto}, - interface::{ExecutionResult, Refunds, TxRevertReason, VmExecutionResultAndLogs}, + interface::{ + ExecutionResult, Refunds, TxExecutionStatus, TxRevertReason, VmExecutionResultAndLogs, + }, }; impl GlueFrom for VmExecutionResultAndLogs { diff --git a/core/lib/multivm/src/tracers/call_tracer/mod.rs b/core/lib/multivm/src/tracers/call_tracer/mod.rs index 4013be101e5..44f27487603 100644 --- a/core/lib/multivm/src/tracers/call_tracer/mod.rs +++ b/core/lib/multivm/src/tracers/call_tracer/mod.rs @@ -1,9 +1,10 @@ use std::sync::Arc; use once_cell::sync::OnceCell; -use zksync_types::vm_trace::Call; -use crate::{glue::tracers::IntoOldVmTracer, tracers::call_tracer::metrics::CALL_METRICS}; +use crate::{ + glue::tracers::IntoOldVmTracer, interface::Call, tracers::call_tracer::metrics::CALL_METRICS, +}; mod metrics; pub mod vm_1_4_1; diff --git a/core/lib/multivm/src/tracers/call_tracer/vm_1_4_1/mod.rs b/core/lib/multivm/src/tracers/call_tracer/vm_1_4_1/mod.rs index 10ea9ba250e..a48c9a75f62 100644 --- a/core/lib/multivm/src/tracers/call_tracer/vm_1_4_1/mod.rs +++ b/core/lib/multivm/src/tracers/call_tracer/vm_1_4_1/mod.rs @@ -6,18 +6,14 @@ use zk_evm_1_4_1::{ }, }; use zksync_system_constants::CONTRACT_DEPLOYER_ADDRESS; -use zksync_types::{ - vm_trace::{Call, CallType}, - zk_evm_types::FarCallOpcode, - U256, -}; +use zksync_types::{zk_evm_types::FarCallOpcode, U256}; use crate::{ glue::GlueInto, interface::{ storage::{StoragePtr, WriteStorage}, tracer::VmExecutionStopReason, - VmRevertReason, + Call, CallType, VmRevertReason, }, tracers::{dynamic::vm_1_4_1::DynTracer, CallTracer}, vm_1_4_1::{BootloaderState, HistoryMode, SimpleMemory, VmTracer, ZkSyncVmState}, diff --git a/core/lib/multivm/src/tracers/call_tracer/vm_1_4_2/mod.rs b/core/lib/multivm/src/tracers/call_tracer/vm_1_4_2/mod.rs index 0464164a50a..3493a0511ea 100644 --- a/core/lib/multivm/src/tracers/call_tracer/vm_1_4_2/mod.rs +++ b/core/lib/multivm/src/tracers/call_tracer/vm_1_4_2/mod.rs @@ -6,18 +6,14 @@ use zk_evm_1_4_1::{ }, }; use zksync_system_constants::CONTRACT_DEPLOYER_ADDRESS; -use zksync_types::{ - vm_trace::{Call, CallType}, - zk_evm_types::FarCallOpcode, - U256, -}; +use zksync_types::{zk_evm_types::FarCallOpcode, U256}; use crate::{ glue::GlueInto, interface::{ storage::{StoragePtr, WriteStorage}, tracer::VmExecutionStopReason, - VmRevertReason, + Call, CallType, VmRevertReason, }, tracers::{dynamic::vm_1_4_1::DynTracer, CallTracer}, vm_1_4_2::{BootloaderState, HistoryMode, SimpleMemory, VmTracer, ZkSyncVmState}, diff --git a/core/lib/multivm/src/tracers/call_tracer/vm_boojum_integration/mod.rs b/core/lib/multivm/src/tracers/call_tracer/vm_boojum_integration/mod.rs index a8d035e6c1c..75837211d32 100644 --- a/core/lib/multivm/src/tracers/call_tracer/vm_boojum_integration/mod.rs +++ b/core/lib/multivm/src/tracers/call_tracer/vm_boojum_integration/mod.rs @@ -6,18 +6,14 @@ use zk_evm_1_4_0::{ }, }; use zksync_system_constants::CONTRACT_DEPLOYER_ADDRESS; -use zksync_types::{ - vm_trace::{Call, CallType}, - zk_evm_types::FarCallOpcode, - U256, -}; +use zksync_types::{zk_evm_types::FarCallOpcode, U256}; use crate::{ glue::GlueInto, interface::{ storage::{StoragePtr, WriteStorage}, tracer::VmExecutionStopReason, - VmRevertReason, + Call, CallType, VmRevertReason, }, tracers::{dynamic::vm_1_4_0::DynTracer, CallTracer}, vm_boojum_integration::{BootloaderState, HistoryMode, SimpleMemory, VmTracer, ZkSyncVmState}, diff --git a/core/lib/multivm/src/tracers/call_tracer/vm_latest/mod.rs b/core/lib/multivm/src/tracers/call_tracer/vm_latest/mod.rs index 8b1ccfa5b7a..ed18a3eca47 100644 --- a/core/lib/multivm/src/tracers/call_tracer/vm_latest/mod.rs +++ b/core/lib/multivm/src/tracers/call_tracer/vm_latest/mod.rs @@ -6,18 +6,14 @@ use zk_evm_1_5_0::{ }, }; use zksync_system_constants::CONTRACT_DEPLOYER_ADDRESS; -use zksync_types::{ - vm_trace::{Call, CallType}, - zk_evm_types::FarCallOpcode, - U256, -}; +use zksync_types::{zk_evm_types::FarCallOpcode, U256}; use crate::{ glue::GlueInto, interface::{ storage::{StoragePtr, WriteStorage}, tracer::VmExecutionStopReason, - VmRevertReason, + Call, CallType, VmRevertReason, }, tracers::{dynamic::vm_1_5_0::DynTracer, CallTracer}, vm_latest::{BootloaderState, HistoryMode, SimpleMemory, VmTracer, ZkSyncVmState}, diff --git a/core/lib/multivm/src/tracers/call_tracer/vm_refunds_enhancement/mod.rs b/core/lib/multivm/src/tracers/call_tracer/vm_refunds_enhancement/mod.rs index 30a2effb9f5..ff341e50c45 100644 --- a/core/lib/multivm/src/tracers/call_tracer/vm_refunds_enhancement/mod.rs +++ b/core/lib/multivm/src/tracers/call_tracer/vm_refunds_enhancement/mod.rs @@ -6,18 +6,14 @@ use zk_evm_1_3_3::{ }, }; use zksync_system_constants::CONTRACT_DEPLOYER_ADDRESS; -use zksync_types::{ - vm_trace::{Call, CallType}, - zk_evm_types::FarCallOpcode, - U256, -}; +use zksync_types::{zk_evm_types::FarCallOpcode, U256}; use crate::{ glue::GlueInto, interface::{ storage::{StoragePtr, WriteStorage}, tracer::VmExecutionStopReason, - VmRevertReason, + Call, CallType, VmRevertReason, }, tracers::{dynamic::vm_1_3_3::DynTracer, CallTracer}, vm_refunds_enhancement::{BootloaderState, HistoryMode, SimpleMemory, VmTracer, ZkSyncVmState}, diff --git a/core/lib/multivm/src/tracers/call_tracer/vm_virtual_blocks/mod.rs b/core/lib/multivm/src/tracers/call_tracer/vm_virtual_blocks/mod.rs index 0e3bea139d6..41286ccd877 100644 --- a/core/lib/multivm/src/tracers/call_tracer/vm_virtual_blocks/mod.rs +++ b/core/lib/multivm/src/tracers/call_tracer/vm_virtual_blocks/mod.rs @@ -6,17 +6,13 @@ use zk_evm_1_3_3::{ }, }; use zksync_system_constants::CONTRACT_DEPLOYER_ADDRESS; -use zksync_types::{ - vm_trace::{Call, CallType}, - zk_evm_types::FarCallOpcode, - U256, -}; +use zksync_types::{zk_evm_types::FarCallOpcode, U256}; use crate::{ glue::GlueInto, interface::{ storage::{StoragePtr, WriteStorage}, - VmExecutionResultAndLogs, VmRevertReason, + Call, CallType, VmExecutionResultAndLogs, VmRevertReason, }, tracers::{dynamic::vm_1_3_3::DynTracer, CallTracer}, vm_virtual_blocks::{ diff --git a/core/lib/multivm/src/tracers/old.rs b/core/lib/multivm/src/tracers/old.rs index 54e5e45aa2c..f0a0fae0f65 100644 --- a/core/lib/multivm/src/tracers/old.rs +++ b/core/lib/multivm/src/tracers/old.rs @@ -1,7 +1,8 @@ use std::sync::Arc; use once_cell::sync::OnceCell; -use zksync_types::vm_trace::Call; + +use crate::interface::Call; /// For backward compatibility with vm before vm with virtual blocks. /// These tracers are tightly coupled with the VM implementation and we have to pass only params for them and not tracers by itself. diff --git a/core/lib/multivm/src/utils/bytecode.rs b/core/lib/multivm/src/utils/bytecode.rs new file mode 100644 index 00000000000..260749b44f3 --- /dev/null +++ b/core/lib/multivm/src/utils/bytecode.rs @@ -0,0 +1,154 @@ +use std::collections::HashMap; + +use zksync_types::ethabi::{self, Token}; +use zksync_utils::bytecode::{hash_bytecode, validate_bytecode, InvalidBytecodeError}; + +use crate::interface::CompressedBytecodeInfo; + +#[derive(Debug, thiserror::Error)] +pub(crate) enum FailedToCompressBytecodeError { + #[error("Number of unique 8-bytes bytecode chunks exceed the limit of 2^16 - 1")] + DictionaryOverflow, + #[error("Bytecode is invalid: {0}")] + InvalidBytecode(#[from] InvalidBytecodeError), +} + +/// Implements, a simple compression algorithm for the bytecode. +fn compress_to_bytes(code: &[u8]) -> Result, FailedToCompressBytecodeError> { + validate_bytecode(code)?; + + // Statistic is a hash map of values (number of occurrences, first occurrence position), + // this is needed to ensure that the determinism during sorting of the statistic, i.e. + // each element will have unique first occurrence position + let mut statistic: HashMap = HashMap::new(); + let mut dictionary: HashMap = HashMap::new(); + let mut encoded_data: Vec = Vec::new(); + + // Split original bytecode into 8-byte chunks. + for (position, chunk_bytes) in code.chunks(8).enumerate() { + // It is safe to unwrap here, because each chunk is exactly 8 bytes, since + // valid bytecodes are divisible by 8. + let chunk = u64::from_be_bytes(chunk_bytes.try_into().unwrap()); + + // Count the number of occurrences of each chunk. + statistic.entry(chunk).or_insert((0, position)).0 += 1; + } + + let mut statistic_sorted_by_value: Vec<_> = statistic.into_iter().collect::>(); + statistic_sorted_by_value.sort_by_key(|x| x.1); + + // The dictionary size is limited by 2^16 - 1, + if statistic_sorted_by_value.len() > u16::MAX.into() { + return Err(FailedToCompressBytecodeError::DictionaryOverflow); + } + + // Fill the dictionary with the most popular chunks. + // The most popular chunks will be encoded with the smallest indexes, so that + // the 255 most popular chunks will be encoded with one zero byte. + // And the encoded data will be filled with more zeros, so + // the calldata that will be sent to L1 will be cheaper. + for (chunk, _) in statistic_sorted_by_value.iter().rev() { + dictionary.insert(*chunk, dictionary.len() as u16); + } + + for chunk_bytes in code.chunks(8) { + // It is safe to unwrap here, because each chunk is exactly 8 bytes, since + // valid bytecodes are divisible by 8. + let chunk = u64::from_be_bytes(chunk_bytes.try_into().unwrap()); + + // Add the index of the chunk to the encoded data. + encoded_data.extend(dictionary.get(&chunk).unwrap().to_be_bytes()); + } + + // Prepare the raw compressed bytecode in the following format: + // - 2 bytes: the length of the dictionary (N) + // - N bytes: packed dictionary bytes + // - remaining bytes: packed encoded data bytes + + let mut compressed: Vec = Vec::new(); + compressed.extend((dictionary.len() as u16).to_be_bytes()); + + let mut entries: Vec<_> = dictionary.into_iter().map(|(k, v)| (v, k)).collect(); + entries.sort_unstable(); + for (_, chunk) in entries { + compressed.extend(chunk.to_be_bytes()); + } + compressed.extend(encoded_data); + Ok(compressed) +} + +pub(crate) fn compress( + bytecode: Vec, +) -> Result { + Ok(CompressedBytecodeInfo { + compressed: compress_to_bytes(&bytecode)?, + original: bytecode, + }) +} + +pub(crate) fn encode_call(bytecode: &CompressedBytecodeInfo) -> Vec { + let mut bytecode_hash = hash_bytecode(&bytecode.original).as_bytes().to_vec(); + let empty_cell = [0_u8; 32]; + bytecode_hash.extend_from_slice(&empty_cell); + + let bytes_encoded = ethabi::encode(&[ + Token::Bytes(bytecode.original.clone()), + Token::Bytes(bytecode.compressed.clone()), + ]); + bytecode_hash.extend_from_slice(&bytes_encoded); + bytecode_hash +} + +#[cfg(test)] +mod tests { + use super::*; + + fn decompress_bytecode(raw_compressed_bytecode: &[u8]) -> Vec { + let mut decompressed: Vec = Vec::new(); + let mut dictionary: Vec = Vec::new(); + + let dictionary_len = u16::from_be_bytes(raw_compressed_bytecode[0..2].try_into().unwrap()); + for index in 0..dictionary_len { + let chunk = u64::from_be_bytes( + raw_compressed_bytecode[2 + index as usize * 8..10 + index as usize * 8] + .try_into() + .unwrap(), + ); + dictionary.push(chunk); + } + + let encoded_data = &raw_compressed_bytecode[2 + dictionary_len as usize * 8..]; + for index_bytes in encoded_data.chunks(2) { + let index = u16::from_be_bytes(index_bytes.try_into().unwrap()); + + let chunk = dictionary[index as usize]; + decompressed.extend(chunk.to_be_bytes()); + } + + decompressed + } + + #[test] + fn bytecode_compression() { + let example_code = hex::decode("000200000000000200010000000103550000006001100270000000150010019d0000000101200190000000080000c13d0000000001000019004e00160000040f0000000101000039004e00160000040f0000001504000041000000150510009c000000000104801900000040011002100000000001310019000000150320009c0000000002048019000000600220021000000000012100190000004f0001042e000000000100001900000050000104300000008002000039000000400020043f0000000002000416000000000110004c000000240000613d000000000120004c0000004d0000c13d000000200100003900000100001004430000012000000443000001000100003900000040020000390000001d03000041004e000a0000040f000000000120004c0000004d0000c13d0000000001000031000000030110008c0000004d0000a13d0000000101000367000000000101043b0000001601100197000000170110009c0000004d0000c13d0000000101000039000000000101041a0000000202000039000000000202041a000000400300043d00000040043000390000001805200197000000000600041a0000000000540435000000180110019700000020043000390000000000140435000000a0012002700000001901100197000000600430003900000000001404350000001a012001980000001b010000410000000001006019000000b8022002700000001c02200197000000000121019f0000008002300039000000000012043500000018016001970000000000130435000000400100043d0000000002130049000000a0022000390000000003000019004e000a0000040f004e00140000040f0000004e000004320000004f0001042e000000500001043000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000ffffffffffffffff000000000000000000000000000000000000000000000000000000008903573000000000000000000000000000000000000000000000000000000000000000000000000000000000ffffffffffffffffffffffffffffffffffffffff0000000000000000000000000000000000000000000000000000000000ffffff0000000000008000000000000000000000000000000000000000000000000000ffffffffffffffffffffffffffffffffffffffffffffffffffffffffff80000000000000000000000000000000000000000000000000000000000000007fffff00000002000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000").unwrap(); + let compressed = compress_to_bytes(&example_code).unwrap(); + let decompressed = decompress_bytecode(&compressed); + + assert_eq!(example_code, decompressed); + } + + #[test] + fn bytecode_compression_statisticst() { + let example_code = + hex::decode("0000000000000000111111111111111111111111111111112222222222222222") + .unwrap(); + // The size of the dictionary should be `0x0003` + // The dictionary itself should put the most common chunk first, i.e. `0x1111111111111111` + // Then, the ordering does not matter, but the algorithm will return the one with the highest position, i.e. `0x2222222222222222` + let expected_encoding = + hex::decode("00031111111111111111222222222222222200000000000000000002000000000001") + .unwrap(); + + assert_eq!(expected_encoding, compress_to_bytes(&example_code).unwrap()); + } +} diff --git a/core/lib/types/src/storage_writes_deduplicator.rs b/core/lib/multivm/src/utils/deduplicator.rs similarity index 98% rename from core/lib/types/src/storage_writes_deduplicator.rs rename to core/lib/multivm/src/utils/deduplicator.rs index f9f3cc323b9..e9a870e6901 100644 --- a/core/lib/types/src/storage_writes_deduplicator.rs +++ b/core/lib/multivm/src/utils/deduplicator.rs @@ -1,13 +1,12 @@ use std::collections::HashMap; -use zksync_basic_types::H256; -use zksync_utils::h256_to_u256; - -use crate::{ - tx::tx_execution_info::DeduplicatedWritesMetrics, +use zksync_types::{ writes::compression::compress_with_best_strategy, StorageKey, StorageLogKind, - StorageLogWithPreviousValue, + StorageLogWithPreviousValue, H256, }; +use zksync_utils::h256_to_u256; + +use crate::interface::DeduplicatedWritesMetrics; #[derive(Debug, Clone, Copy, PartialEq, Default)] pub struct ModifiedSlot { @@ -212,11 +211,10 @@ impl StorageWritesDeduplicator { #[cfg(test)] mod tests { - use zksync_basic_types::{AccountTreeId, U256}; + use zksync_types::{AccountTreeId, StorageLog, H160, U256}; use zksync_utils::u256_to_h256; use super::*; - use crate::{StorageLog, H160}; fn storage_log( key: U256, diff --git a/core/lib/multivm/src/utils/events.rs b/core/lib/multivm/src/utils/events.rs new file mode 100644 index 00000000000..9720cb77914 --- /dev/null +++ b/core/lib/multivm/src/utils/events.rs @@ -0,0 +1,294 @@ +use zksync_system_constants::L1_MESSENGER_ADDRESS; +use zksync_types::{ + ethabi::{self, Token}, + l2_to_l1_log::L2ToL1Log, + Address, H256, U256, +}; +use zksync_utils::{u256_to_bytes_be, u256_to_h256}; + +use crate::interface::VmEvent; + +/// Corresponds to the following solidity event: +/// ```solidity +/// struct L2ToL1Log { +/// uint8 l2ShardId; +/// bool isService; +/// uint16 txNumberInBlock; +/// address sender; +/// bytes32 key; +/// bytes32 value; +/// } +/// ``` +#[derive(Debug, Default, Clone, PartialEq)] +pub(crate) struct L1MessengerL2ToL1Log { + pub l2_shard_id: u8, + pub is_service: bool, + pub tx_number_in_block: u16, + pub sender: Address, + pub key: U256, + pub value: U256, +} + +impl L1MessengerL2ToL1Log { + pub fn packed_encoding(&self) -> Vec { + let mut res: Vec = vec![]; + res.push(self.l2_shard_id); + res.push(self.is_service as u8); + res.extend_from_slice(&self.tx_number_in_block.to_be_bytes()); + res.extend_from_slice(self.sender.as_bytes()); + res.extend(u256_to_bytes_be(&self.key)); + res.extend(u256_to_bytes_be(&self.value)); + res + } +} + +impl From for L2ToL1Log { + fn from(log: L1MessengerL2ToL1Log) -> Self { + L2ToL1Log { + shard_id: log.l2_shard_id, + is_service: log.is_service, + tx_number_in_block: log.tx_number_in_block, + sender: log.sender, + key: u256_to_h256(log.key), + value: u256_to_h256(log.value), + } + } +} + +#[derive(Debug, PartialEq)] +pub(crate) struct L1MessengerBytecodePublicationRequest { + pub bytecode_hash: H256, +} + +/// Extracts all the `L2ToL1Logs` that were emitted by the `L1Messenger` contract. +pub fn extract_l2tol1logs_from_l1_messenger( + all_generated_events: &[VmEvent], +) -> Vec { + let params = &[ethabi::ParamType::Tuple(vec![ + ethabi::ParamType::Uint(8), + ethabi::ParamType::Bool, + ethabi::ParamType::Uint(16), + ethabi::ParamType::Address, + ethabi::ParamType::FixedBytes(32), + ethabi::ParamType::FixedBytes(32), + ])]; + + let l1_messenger_l2_to_l1_log_event_signature = ethabi::long_signature("L2ToL1LogSent", params); + + all_generated_events + .iter() + .filter(|event| { + // Filter events from the l1 messenger contract that match the expected signature. + event.address == L1_MESSENGER_ADDRESS + && !event.indexed_topics.is_empty() + && event.indexed_topics[0] == l1_messenger_l2_to_l1_log_event_signature + }) + .map(|event| { + let tuple = ethabi::decode(params, &event.value) + .expect("Failed to decode L2ToL1LogSent message") + .first() + .unwrap() + .clone(); + let Token::Tuple(tokens) = tuple else { + panic!("Tuple was expected, got: {}", tuple); + }; + let [ + Token::Uint(shard_id), + Token::Bool(is_service), + Token::Uint(tx_number_in_block), + Token::Address(sender), + Token::FixedBytes(key_bytes), + Token::FixedBytes(value_bytes), + ] = tokens.as_slice() else { + panic!("Invalid tuple types"); + }; + L1MessengerL2ToL1Log { + l2_shard_id: shard_id.low_u64() as u8, + is_service: *is_service, + tx_number_in_block: tx_number_in_block.low_u64() as u16, + sender: *sender, + key: U256::from_big_endian(key_bytes), + value: U256::from_big_endian(value_bytes), + } + }) + .collect() +} + +/// Extracts all the bytecode publication requests that were emitted by the L1Messenger contract. +pub(crate) fn extract_bytecode_publication_requests_from_l1_messenger( + all_generated_events: &[VmEvent], +) -> Vec { + all_generated_events + .iter() + .filter(|event| { + // Filter events from the l1 messenger contract that match the expected signature. + event.address == L1_MESSENGER_ADDRESS + && !event.indexed_topics.is_empty() + && event.indexed_topics[0] + == VmEvent::L1_MESSENGER_BYTECODE_PUBLICATION_EVENT_SIGNATURE + }) + .map(|event| { + let mut tokens = ethabi::decode(&[ethabi::ParamType::FixedBytes(32)], &event.value) + .expect("Failed to decode BytecodeL1PublicationRequested message"); + L1MessengerBytecodePublicationRequest { + bytecode_hash: H256::from_slice(&tokens.remove(0).into_fixed_bytes().unwrap()), + } + }) + .collect() +} + +#[cfg(test)] +mod tests { + use zksync_system_constants::{ + BOOTLOADER_ADDRESS, KNOWN_CODES_STORAGE_ADDRESS, L2_BASE_TOKEN_ADDRESS, + }; + use zksync_types::L1BatchNumber; + + use super::*; + + fn create_l2_to_l1_log_vm_event( + from: Address, + tx_number: U256, + sender: Address, + key: U256, + value: U256, + ) -> VmEvent { + let l1_messenger_l2_to_l1_log_event_signature = ethabi::long_signature( + "L2ToL1LogSent", + &[ethabi::ParamType::Tuple(vec![ + ethabi::ParamType::Uint(8), + ethabi::ParamType::Bool, + ethabi::ParamType::Uint(16), + ethabi::ParamType::Address, + ethabi::ParamType::FixedBytes(32), + ethabi::ParamType::FixedBytes(32), + ])], + ); + + VmEvent { + location: (L1BatchNumber(1), 0u32), + address: from, + indexed_topics: vec![l1_messenger_l2_to_l1_log_event_signature], + value: create_l2_to_l1_log_sent_value(tx_number, sender, key, value), + } + } + + fn create_l2_to_l1_log_sent_value( + tx_number: U256, + sender: Address, + key: U256, + value: U256, + ) -> Vec { + let mut key_arr = [0u8; 32]; + key.to_big_endian(&mut key_arr); + + let mut val_arr = [0u8; 32]; + value.to_big_endian(&mut val_arr); + + let tokens = vec![ + /*`l2ShardId`*/ Token::Uint(U256::from(0)), + /*`isService`*/ Token::Bool(true), + /*`txNumberInBlock`*/ Token::Uint(tx_number), + /*sender*/ Token::Address(sender), + /*key*/ Token::FixedBytes(key_arr.to_vec()), + /*value*/ Token::FixedBytes(val_arr.to_vec()), + ]; + + ethabi::encode(&tokens) + } + + #[test] + fn test_extract_l2tol1logs_from_l1_messenger() { + let expected = vec![ + L1MessengerL2ToL1Log { + l2_shard_id: 0u8, + is_service: true, + tx_number_in_block: 5u16, + sender: KNOWN_CODES_STORAGE_ADDRESS, + key: U256::from(11), + value: U256::from(19), + }, + L1MessengerL2ToL1Log { + l2_shard_id: 0u8, + is_service: true, + tx_number_in_block: 7u16, + sender: L1_MESSENGER_ADDRESS, + key: U256::from(19), + value: U256::from(93), + }, + ]; + + let events = vec![ + create_l2_to_l1_log_vm_event( + L1_MESSENGER_ADDRESS, + U256::from(5), + KNOWN_CODES_STORAGE_ADDRESS, + U256::from(11), + U256::from(19), + ), + create_l2_to_l1_log_vm_event( + BOOTLOADER_ADDRESS, + U256::from(6), + L2_BASE_TOKEN_ADDRESS, + U256::from(6), + U256::from(8), + ), + create_l2_to_l1_log_vm_event( + L1_MESSENGER_ADDRESS, + U256::from(7), + L1_MESSENGER_ADDRESS, + U256::from(19), + U256::from(93), + ), + ]; + + let logs = extract_l2tol1logs_from_l1_messenger(&events); + + assert_eq!(expected, logs); + } + + fn create_byte_code_publication_req_value(hash: U256) -> Vec { + let mut hash_arr = [0u8; 32]; + hash.to_big_endian(&mut hash_arr); + + let tokens = vec![/*bytecode hash*/ Token::FixedBytes(hash_arr.to_vec())]; + + ethabi::encode(&tokens) + } + + fn create_bytecode_publication_vm_event(from: Address, hash: U256) -> VmEvent { + let bytecode_publication_event_signature = ethabi::long_signature( + "BytecodeL1PublicationRequested", + &[ethabi::ParamType::FixedBytes(32)], + ); + + VmEvent { + location: (L1BatchNumber(1), 0u32), + address: from, + indexed_topics: vec![bytecode_publication_event_signature], + value: create_byte_code_publication_req_value(hash), + } + } + + #[test] + fn test_extract_bytecode_publication_requests_from_l1_messenger() { + let expected = vec![ + L1MessengerBytecodePublicationRequest { + bytecode_hash: u256_to_h256(U256::from(1438284388)), + }, + L1MessengerBytecodePublicationRequest { + bytecode_hash: u256_to_h256(U256::from(1231014388)), + }, + ]; + + let events = vec![ + create_bytecode_publication_vm_event(L2_BASE_TOKEN_ADDRESS, U256::from(1337)), + create_bytecode_publication_vm_event(L1_MESSENGER_ADDRESS, U256::from(1438284388)), + create_bytecode_publication_vm_event(L1_MESSENGER_ADDRESS, U256::from(1231014388)), + ]; + + let logs = extract_bytecode_publication_requests_from_l1_messenger(&events); + + assert_eq!(expected, logs); + } +} diff --git a/core/lib/multivm/src/utils.rs b/core/lib/multivm/src/utils/mod.rs similarity index 96% rename from core/lib/multivm/src/utils.rs rename to core/lib/multivm/src/utils/mod.rs index 4ea613252d0..5d8fba7a2ac 100644 --- a/core/lib/multivm/src/utils.rs +++ b/core/lib/multivm/src/utils/mod.rs @@ -4,8 +4,13 @@ use zksync_types::{ U256, }; +pub use self::deduplicator::{ModifiedSlot, StorageWritesDeduplicator}; use crate::interface::L1BatchEnv; +pub(crate) mod bytecode; +mod deduplicator; +pub(crate) mod events; + /// Calculates the base fee and gas per pubdata for the given L1 gas price. pub fn derive_base_fee_and_gas_per_pubdata( batch_fee_input: BatchFeeInput, @@ -496,3 +501,21 @@ pub fn get_max_batch_base_layer_circuits(version: VmVersion) -> usize { } } } + +/// Holds information about number of cycles used per circuit type. +#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)] +pub(crate) struct CircuitCycleStatistic { + pub main_vm_cycles: u32, + pub ram_permutation_cycles: u32, + pub storage_application_cycles: u32, + pub storage_sorter_cycles: u32, + pub code_decommitter_cycles: u32, + pub code_decommitter_sorter_cycles: u32, + pub log_demuxer_cycles: u32, + pub events_sorter_cycles: u32, + pub keccak256_cycles: u32, + pub ecrecover_cycles: u32, + pub sha256_cycles: u32, + pub secp256k1_verify_cycles: u32, + pub transient_storage_checker_cycles: u32, +} diff --git a/core/lib/multivm/src/versions/shadow.rs b/core/lib/multivm/src/versions/shadow.rs index 8fe10f83367..6af546318af 100644 --- a/core/lib/multivm/src/versions/shadow.rs +++ b/core/lib/multivm/src/versions/shadow.rs @@ -5,14 +5,14 @@ use std::{ use anyhow::Context as _; use zksync_types::{StorageKey, StorageLog, StorageLogWithPreviousValue, Transaction}; -use zksync_utils::bytecode::CompressedBytecodeInfo; use crate::{ interface::{ storage::{ImmutableStorageView, ReadStorage, StoragePtr, StorageView}, - BootloaderMemory, BytecodeCompressionError, CurrentExecutionState, FinishedL1Batch, - L1BatchEnv, L2BlockEnv, SystemEnv, VmExecutionMode, VmExecutionResultAndLogs, VmFactory, - VmInterface, VmInterfaceHistoryEnabled, VmMemoryMetrics, + BootloaderMemory, BytecodeCompressionError, CompressedBytecodeInfo, CurrentExecutionState, + FinishedL1Batch, L1BatchEnv, L2BlockEnv, SystemEnv, VmExecutionMode, + VmExecutionResultAndLogs, VmFactory, VmInterface, VmInterfaceHistoryEnabled, + VmMemoryMetrics, }, vm_fast, }; diff --git a/core/lib/multivm/src/versions/shared.rs b/core/lib/multivm/src/versions/shared.rs index 97954043f42..fe7570fbb73 100644 --- a/core/lib/multivm/src/versions/shared.rs +++ b/core/lib/multivm/src/versions/shared.rs @@ -2,7 +2,9 @@ use std::collections::{HashMap, HashSet}; -use zksync_types::{vm_trace::Call, Address, U256}; +use zksync_types::{Address, U256}; + +use crate::interface::Call; #[derive(Debug, Clone, PartialEq)] pub enum VmTrace { diff --git a/core/lib/multivm/src/versions/vm_1_3_2/events.rs b/core/lib/multivm/src/versions/vm_1_3_2/events.rs index 4870b92d351..7b1f03c8ac9 100644 --- a/core/lib/multivm/src/versions/vm_1_3_2/events.rs +++ b/core/lib/multivm/src/versions/vm_1_3_2/events.rs @@ -1,7 +1,9 @@ use zk_evm_1_3_3::{ethereum_types::Address, reference_impls::event_sink::EventMessage}; -use zksync_types::{L1BatchNumber, VmEvent, EVENT_WRITER_ADDRESS, H256}; +use zksync_types::{L1BatchNumber, EVENT_WRITER_ADDRESS, H256}; use zksync_utils::{be_chunks_to_h256_words, h256_to_account_address}; +use crate::interface::VmEvent; + #[derive(Clone)] pub struct SolidityLikeEvent { pub shard_id: u8, diff --git a/core/lib/multivm/src/versions/vm_1_3_2/oracles/tracer/call.rs b/core/lib/multivm/src/versions/vm_1_3_2/oracles/tracer/call.rs index a3d5f622286..f8674bbd77e 100644 --- a/core/lib/multivm/src/versions/vm_1_3_2/oracles/tracer/call.rs +++ b/core/lib/multivm/src/versions/vm_1_3_2/oracles/tracer/call.rs @@ -10,14 +10,11 @@ use zk_evm_1_3_3::{ }, }; use zksync_system_constants::CONTRACT_DEPLOYER_ADDRESS; -use zksync_types::{ - vm_trace::{Call, CallType}, - zk_evm_types::FarCallOpcode, - U256, -}; +use zksync_types::{zk_evm_types::FarCallOpcode, U256}; use crate::{ glue::GlueInto, + interface::{Call, CallType}, vm_1_3_2::{errors::VmRevertReason, history_recorder::HistoryMode, memory::SimpleMemory}, }; diff --git a/core/lib/multivm/src/versions/vm_1_3_2/oracles/tracer/one_tx.rs b/core/lib/multivm/src/versions/vm_1_3_2/oracles/tracer/one_tx.rs index 9bf5a9b7d22..8ef1e2fb746 100644 --- a/core/lib/multivm/src/versions/vm_1_3_2/oracles/tracer/one_tx.rs +++ b/core/lib/multivm/src/versions/vm_1_3_2/oracles/tracer/one_tx.rs @@ -4,18 +4,20 @@ use zk_evm_1_3_3::{ }, vm_state::VmLocalState, }; -use zksync_types::vm_trace::Call; use super::utils::{computational_gas_price, print_debug_if_needed}; -use crate::vm_1_3_2::{ - history_recorder::HistoryMode, - memory::SimpleMemory, - oracles::tracer::{ - utils::{gas_spent_on_bytecodes_and_long_messages_this_opcode, VmHook}, - BootloaderTracer, CallTracer, ExecutionEndTracer, PendingRefundTracer, PubdataSpentTracer, - StorageInvocationTracer, +use crate::{ + interface::Call, + vm_1_3_2::{ + history_recorder::HistoryMode, + memory::SimpleMemory, + oracles::tracer::{ + utils::{gas_spent_on_bytecodes_and_long_messages_this_opcode, VmHook}, + BootloaderTracer, CallTracer, ExecutionEndTracer, PendingRefundTracer, + PubdataSpentTracer, StorageInvocationTracer, + }, + vm_instance::get_vm_hook_params, }, - vm_instance::get_vm_hook_params, }; /// Allows any opcodes, but tells the VM to end the execution once the tx is over. diff --git a/core/lib/multivm/src/versions/vm_1_3_2/oracles/tracer/transaction_result.rs b/core/lib/multivm/src/versions/vm_1_3_2/oracles/tracer/transaction_result.rs index c74e9bb862d..efad575f783 100644 --- a/core/lib/multivm/src/versions/vm_1_3_2/oracles/tracer/transaction_result.rs +++ b/core/lib/multivm/src/versions/vm_1_3_2/oracles/tracer/transaction_result.rs @@ -5,20 +5,23 @@ use zk_evm_1_3_3::{ vm_state::VmLocalState, zkevm_opcode_defs::FatPointer, }; -use zksync_types::{vm_trace, U256}; +use zksync_types::U256; -use crate::vm_1_3_2::{ - history_recorder::HistoryMode, - memory::SimpleMemory, - oracles::tracer::{ - utils::{ - gas_spent_on_bytecodes_and_long_messages_this_opcode, print_debug_if_needed, - read_pointer, VmHook, +use crate::{ + interface::Call, + vm_1_3_2::{ + history_recorder::HistoryMode, + memory::SimpleMemory, + oracles::tracer::{ + utils::{ + gas_spent_on_bytecodes_and_long_messages_this_opcode, print_debug_if_needed, + read_pointer, VmHook, + }, + CallTracer, ExecutionEndTracer, PendingRefundTracer, PubdataSpentTracer, + StorageInvocationTracer, }, - CallTracer, ExecutionEndTracer, PendingRefundTracer, PubdataSpentTracer, - StorageInvocationTracer, + vm_instance::get_vm_hook_params, }, - vm_instance::get_vm_hook_params, }; #[derive(Debug)] @@ -45,7 +48,7 @@ impl TransactionResultTracer { call_tracer, } } - pub fn call_trace(&mut self) -> Option> { + pub fn call_trace(&mut self) -> Option> { self.call_tracer .as_mut() .map(|call_tracer| call_tracer.extract_calls()) diff --git a/core/lib/multivm/src/versions/vm_1_3_2/pubdata_utils.rs b/core/lib/multivm/src/versions/vm_1_3_2/pubdata_utils.rs index 2c16fc6129e..d88ee70991b 100644 --- a/core/lib/multivm/src/versions/vm_1_3_2/pubdata_utils.rs +++ b/core/lib/multivm/src/versions/vm_1_3_2/pubdata_utils.rs @@ -2,14 +2,11 @@ use std::collections::HashMap; use circuit_sequencer_api_1_3_3::sort_storage_access::sort_storage_access_queries; use zk_evm_1_3_3::aux_structures::Timestamp; -use zksync_types::{ - event::{extract_long_l2_to_l1_messages, extract_published_bytecodes}, - StorageKey, PUBLISH_BYTECODE_OVERHEAD, SYSTEM_CONTEXT_ADDRESS, -}; +use zksync_types::{StorageKey, PUBLISH_BYTECODE_OVERHEAD, SYSTEM_CONTEXT_ADDRESS}; use zksync_utils::bytecode::bytecode_len_in_bytes; use crate::{ - interface::storage::WriteStorage, + interface::{storage::WriteStorage, VmEvent}, vm_1_3_2::{history_recorder::HistoryMode, oracles::storage::storage_key_of_log, VmInstance}, }; @@ -26,12 +23,12 @@ impl VmInstance { .filter(|log| log.sender != SYSTEM_CONTEXT_ADDRESS) .count() as u32) * zk_evm_1_3_3::zkevm_opcode_defs::system_params::L1_MESSAGE_PUBDATA_BYTES; - let l2_l1_long_messages_bytes: u32 = extract_long_l2_to_l1_messages(&events) + let l2_l1_long_messages_bytes: u32 = VmEvent::extract_long_l2_to_l1_messages(&events) .iter() .map(|event| event.len() as u32) .sum(); - let published_bytecode_bytes: u32 = extract_published_bytecodes(&events) + let published_bytecode_bytes: u32 = VmEvent::extract_published_bytecodes(&events) .iter() .map(|bytecodehash| { bytecode_len_in_bytes(*bytecodehash) as u32 + PUBLISH_BYTECODE_OVERHEAD diff --git a/core/lib/multivm/src/versions/vm_1_3_2/vm.rs b/core/lib/multivm/src/versions/vm_1_3_2/vm.rs index 3bf5ae25e39..f86beb2d400 100644 --- a/core/lib/multivm/src/versions/vm_1_3_2/vm.rs +++ b/core/lib/multivm/src/versions/vm_1_3_2/vm.rs @@ -5,21 +5,19 @@ use zksync_types::{ l2_to_l1_log::{L2ToL1Log, UserL2ToL1Log}, Transaction, }; -use zksync_utils::{ - bytecode::{hash_bytecode, CompressedBytecodeInfo}, - h256_to_u256, u256_to_h256, -}; +use zksync_utils::{bytecode::hash_bytecode, h256_to_u256, u256_to_h256}; use crate::{ glue::{history_mode::HistoryMode, GlueInto}, interface::{ storage::{StoragePtr, WriteStorage}, - BootloaderMemory, BytecodeCompressionError, CurrentExecutionState, FinishedL1Batch, - L1BatchEnv, L2BlockEnv, SystemEnv, TxExecutionMode, VmExecutionMode, + BootloaderMemory, BytecodeCompressionError, CompressedBytecodeInfo, CurrentExecutionState, + FinishedL1Batch, L1BatchEnv, L2BlockEnv, SystemEnv, TxExecutionMode, VmExecutionMode, VmExecutionResultAndLogs, VmFactory, VmInterface, VmInterfaceHistoryEnabled, VmMemoryMetrics, }, tracers::old::TracerDispatcher, + utils::bytecode, vm_1_3_2::{events::merge_events, VmInstance}, }; @@ -173,7 +171,7 @@ impl VmInterface for Vm { None } else { bytecode_hashes.push(bytecode_hash); - CompressedBytecodeInfo::from_original(bytecode.clone()).ok() + bytecode::compress(bytecode.clone()).ok() } }); let compressed_bytecodes: Vec<_> = filtered_deps.collect(); diff --git a/core/lib/multivm/src/versions/vm_1_3_2/vm_instance.rs b/core/lib/multivm/src/versions/vm_1_3_2/vm_instance.rs index a2bc552e9ec..b82282f0a56 100644 --- a/core/lib/multivm/src/versions/vm_1_3_2/vm_instance.rs +++ b/core/lib/multivm/src/versions/vm_1_3_2/vm_instance.rs @@ -12,14 +12,12 @@ use zk_evm_1_3_3::{ }; use zksync_types::{ l2_to_l1_log::{L2ToL1Log, UserL2ToL1Log}, - tx::tx_execution_info::TxExecutionStatus, - vm_trace::Call, - L1BatchNumber, VmEvent, H256, U256, + L1BatchNumber, H256, U256, }; use crate::{ glue::GlueInto, - interface::{storage::WriteStorage, VmExecutionLogs}, + interface::{storage::WriteStorage, Call, TxExecutionStatus, VmEvent, VmExecutionLogs}, versions::shared::{VmExecutionTrace, VmTrace}, vm_1_3_2::{ bootloader_state::BootloaderState, diff --git a/core/lib/multivm/src/versions/vm_1_3_2/vm_with_bootloader.rs b/core/lib/multivm/src/versions/vm_1_3_2/vm_with_bootloader.rs index aef5b1dc78a..d1acdf7708e 100644 --- a/core/lib/multivm/src/versions/vm_1_3_2/vm_with_bootloader.rs +++ b/core/lib/multivm/src/versions/vm_1_3_2/vm_with_bootloader.rs @@ -19,14 +19,12 @@ use zksync_types::{ BOOTLOADER_ADDRESS, L1_GAS_PER_PUBDATA_BYTE, MAX_NEW_FACTORY_DEPS, U256, }; use zksync_utils::{ - address_to_u256, - bytecode::{compress_bytecode, hash_bytecode, CompressedBytecodeInfo}, - bytes_to_be_words, h256_to_u256, - misc::ceil_div, + address_to_u256, bytecode::hash_bytecode, bytes_to_be_words, h256_to_u256, misc::ceil_div, }; use crate::{ - interface::{storage::WriteStorage, L1BatchEnv}, + interface::{storage::WriteStorage, CompressedBytecodeInfo, L1BatchEnv}, + utils::bytecode, vm_1_3_2::{ bootloader_state::BootloaderState, history_recorder::HistoryMode, @@ -448,7 +446,7 @@ pub fn get_bootloader_memory( let mut total_compressed_len_words = 0; for i in compressed_bytecodes.iter() { - total_compressed_len_words += i.encode_call().len() / 32; + total_compressed_len_words += bytecode::encode_call(i).len() / 32; } let memory_for_current_tx = get_bootloader_memory_for_tx( @@ -521,20 +519,13 @@ pub fn push_raw_transaction_to_bootloader_memory = compressed_bytecodes .into_iter() - .flat_map(|x| x.encode_call()) + .flat_map(|x| bytecode::encode_call(&x)) .collect(); let memory_addition = bytes_to_be_words(memory_addition); diff --git a/core/lib/multivm/src/versions/vm_1_4_1/bootloader_state/state.rs b/core/lib/multivm/src/versions/vm_1_4_1/bootloader_state/state.rs index 9a1a727aab3..22d7b2814cf 100644 --- a/core/lib/multivm/src/versions/vm_1_4_1/bootloader_state/state.rs +++ b/core/lib/multivm/src/versions/vm_1_4_1/bootloader_state/state.rs @@ -2,11 +2,10 @@ use std::cmp::Ordering; use once_cell::sync::OnceCell; use zksync_types::{L2ChainId, U256}; -use zksync_utils::bytecode::CompressedBytecodeInfo; use super::{tx::BootloaderTx, utils::apply_pubdata_to_memory}; use crate::{ - interface::{BootloaderMemory, L2BlockEnv, TxExecutionMode}, + interface::{BootloaderMemory, CompressedBytecodeInfo, L2BlockEnv, TxExecutionMode}, vm_1_4_1::{ bootloader_state::{ l2_block::BootloaderL2Block, diff --git a/core/lib/multivm/src/versions/vm_1_4_1/bootloader_state/tx.rs b/core/lib/multivm/src/versions/vm_1_4_1/bootloader_state/tx.rs index f82f34a7b0e..4c6b6d3d061 100644 --- a/core/lib/multivm/src/versions/vm_1_4_1/bootloader_state/tx.rs +++ b/core/lib/multivm/src/versions/vm_1_4_1/bootloader_state/tx.rs @@ -1,7 +1,6 @@ use zksync_types::{L2ChainId, H256, U256}; -use zksync_utils::bytecode::CompressedBytecodeInfo; -use crate::vm_1_4_1::types::internals::TransactionData; +use crate::{interface::CompressedBytecodeInfo, vm_1_4_1::types::internals::TransactionData}; /// Information about tx necessary for execution in bootloader. #[derive(Debug, Clone)] diff --git a/core/lib/multivm/src/versions/vm_1_4_1/bootloader_state/utils.rs b/core/lib/multivm/src/versions/vm_1_4_1/bootloader_state/utils.rs index d203542b16b..393eb043cb7 100644 --- a/core/lib/multivm/src/versions/vm_1_4_1/bootloader_state/utils.rs +++ b/core/lib/multivm/src/versions/vm_1_4_1/bootloader_state/utils.rs @@ -1,9 +1,10 @@ use zksync_types::{ethabi, U256}; -use zksync_utils::{bytecode::CompressedBytecodeInfo, bytes_to_be_words, h256_to_u256}; +use zksync_utils::{bytes_to_be_words, h256_to_u256}; use super::tx::BootloaderTx; use crate::{ - interface::{BootloaderMemory, TxExecutionMode}, + interface::{BootloaderMemory, CompressedBytecodeInfo, TxExecutionMode}, + utils::bytecode, vm_1_4_1::{ bootloader_state::l2_block::BootloaderL2Block, constants::{ @@ -22,7 +23,7 @@ pub(super) fn get_memory_for_compressed_bytecodes( ) -> Vec { let memory_addition: Vec<_> = compressed_bytecodes .iter() - .flat_map(|x| x.encode_call()) + .flat_map(bytecode::encode_call) .collect(); bytes_to_be_words(memory_addition) diff --git a/core/lib/multivm/src/versions/vm_1_4_1/implementation/bytecode.rs b/core/lib/multivm/src/versions/vm_1_4_1/implementation/bytecode.rs index cc03b53aa53..6e0e31d461d 100644 --- a/core/lib/multivm/src/versions/vm_1_4_1/implementation/bytecode.rs +++ b/core/lib/multivm/src/versions/vm_1_4_1/implementation/bytecode.rs @@ -1,15 +1,13 @@ use itertools::Itertools; use zksync_types::U256; -use zksync_utils::{ - bytecode::{compress_bytecode, hash_bytecode, CompressedBytecodeInfo}, - bytes_to_be_words, -}; +use zksync_utils::{bytecode::hash_bytecode, bytes_to_be_words}; use crate::{ interface::{ storage::{StoragePtr, WriteStorage}, - VmInterface, + CompressedBytecodeInfo, VmInterface, }, + utils::bytecode, vm_1_4_1::Vm, HistoryMode, }; @@ -50,15 +48,6 @@ pub(crate) fn compress_bytecodes( .dedup_by(|x, y| x.1 == y.1) .filter(|(_idx, dep)| !storage.borrow_mut().is_bytecode_known(&hash_bytecode(dep))) .sorted_by_key(|(idx, _dep)| *idx) - .filter_map(|(_idx, dep)| { - let compressed_bytecode = compress_bytecode(dep); - - compressed_bytecode - .ok() - .map(|compressed| CompressedBytecodeInfo { - original: dep.clone(), - compressed, - }) - }) + .filter_map(|(_idx, dep)| bytecode::compress(dep.clone()).ok()) .collect() } diff --git a/core/lib/multivm/src/versions/vm_1_4_1/implementation/logs.rs b/core/lib/multivm/src/versions/vm_1_4_1/implementation/logs.rs index 3a2321d4d0e..4d365a8535c 100644 --- a/core/lib/multivm/src/versions/vm_1_4_1/implementation/logs.rs +++ b/core/lib/multivm/src/versions/vm_1_4_1/implementation/logs.rs @@ -1,13 +1,10 @@ use zk_evm_1_4_1::aux_structures::Timestamp; -use zksync_types::{ - event::extract_l2tol1logs_from_l1_messenger, - l2_to_l1_log::{L2ToL1Log, SystemL2ToL1Log, UserL2ToL1Log}, - VmEvent, -}; +use zksync_types::l2_to_l1_log::{L2ToL1Log, SystemL2ToL1Log, UserL2ToL1Log}; use crate::{ glue::GlueInto, - interface::{storage::WriteStorage, VmExecutionLogs}, + interface::{storage::WriteStorage, VmEvent, VmExecutionLogs}, + utils::events::extract_l2tol1logs_from_l1_messenger, vm_1_4_1::{old_vm::utils::precompile_calls_count_after_timestamp, utils::logs, vm::Vm}, HistoryMode, }; diff --git a/core/lib/multivm/src/versions/vm_1_4_1/implementation/statistics.rs b/core/lib/multivm/src/versions/vm_1_4_1/implementation/statistics.rs index dfdd42be718..71ae20d4406 100644 --- a/core/lib/multivm/src/versions/vm_1_4_1/implementation/statistics.rs +++ b/core/lib/multivm/src/versions/vm_1_4_1/implementation/statistics.rs @@ -1,8 +1,8 @@ use zk_evm_1_4_1::aux_structures::Timestamp; -use zksync_types::{circuit::CircuitStatistic, U256}; +use zksync_types::U256; use crate::{ - interface::{storage::WriteStorage, VmExecutionStatistics, VmMemoryMetrics}, + interface::{storage::WriteStorage, CircuitStatistic, VmExecutionStatistics, VmMemoryMetrics}, vm_1_4_1::{tracers::DefaultExecutionTracer, vm::Vm}, HistoryMode, }; diff --git a/core/lib/multivm/src/versions/vm_1_4_1/old_vm/events.rs b/core/lib/multivm/src/versions/vm_1_4_1/old_vm/events.rs index fc97b6f4a41..ffa4b4d50b8 100644 --- a/core/lib/multivm/src/versions/vm_1_4_1/old_vm/events.rs +++ b/core/lib/multivm/src/versions/vm_1_4_1/old_vm/events.rs @@ -1,7 +1,9 @@ use zk_evm_1_4_1::{ethereum_types::Address, reference_impls::event_sink::EventMessage}; -use zksync_types::{L1BatchNumber, VmEvent, EVENT_WRITER_ADDRESS, H256}; +use zksync_types::{L1BatchNumber, EVENT_WRITER_ADDRESS, H256}; use zksync_utils::{be_chunks_to_h256_words, h256_to_account_address}; +use crate::interface::VmEvent; + #[derive(Clone)] pub(crate) struct SolidityLikeEvent { pub(crate) shard_id: u8, diff --git a/core/lib/multivm/src/versions/vm_1_4_1/tracers/circuits_capacity.rs b/core/lib/multivm/src/versions/vm_1_4_1/tracers/circuits_capacity.rs index b93eb88a21b..a32328bbc18 100644 --- a/core/lib/multivm/src/versions/vm_1_4_1/tracers/circuits_capacity.rs +++ b/core/lib/multivm/src/versions/vm_1_4_1/tracers/circuits_capacity.rs @@ -1,5 +1,6 @@ use circuit_sequencer_api_1_4_1::{geometry_config::get_geometry_config, toolset::GeometryConfig}; -use zksync_types::circuit::{CircuitCycleStatistic, CircuitStatistic}; + +use crate::{interface::CircuitStatistic, utils::CircuitCycleStatistic}; // "Rich addressing" opcodes are opcodes that can write their return value/read the input onto the stack // and so take 1-2 RAM permutations more than an average opcode. diff --git a/core/lib/multivm/src/versions/vm_1_4_1/tracers/circuits_tracer.rs b/core/lib/multivm/src/versions/vm_1_4_1/tracers/circuits_tracer.rs index 43a41897fdd..04842ab7bb6 100644 --- a/core/lib/multivm/src/versions/vm_1_4_1/tracers/circuits_tracer.rs +++ b/core/lib/multivm/src/versions/vm_1_4_1/tracers/circuits_tracer.rs @@ -5,7 +5,6 @@ use zk_evm_1_4_1::{ zk_evm_abstractions::precompiles::PrecompileAddress, zkevm_opcode_defs::{LogOpcode, Opcode, UMAOpcode}, }; -use zksync_types::circuit::CircuitCycleStatistic; use super::circuits_capacity::*; use crate::{ @@ -14,6 +13,7 @@ use crate::{ tracer::TracerExecutionStatus, }, tracers::dynamic::vm_1_4_1::DynTracer, + utils::CircuitCycleStatistic, vm_1_4_1::{ bootloader_state::BootloaderState, old_vm::{history_recorder::HistoryMode, memory::SimpleMemory}, @@ -137,7 +137,7 @@ impl VmTracer for CircuitsTracer { impl CircuitsTracer { pub(crate) fn new() -> Self { Self { - statistics: CircuitCycleStatistic::new(), + statistics: CircuitCycleStatistic::default(), last_decommitment_history_entry_checked: None, last_written_keys_history_entry_checked: None, last_read_keys_history_entry_checked: None, diff --git a/core/lib/multivm/src/versions/vm_1_4_1/tracers/pubdata_tracer.rs b/core/lib/multivm/src/versions/vm_1_4_1/tracers/pubdata_tracer.rs index d32691ebdfb..238804bc7fc 100644 --- a/core/lib/multivm/src/versions/vm_1_4_1/tracers/pubdata_tracer.rs +++ b/core/lib/multivm/src/versions/vm_1_4_1/tracers/pubdata_tracer.rs @@ -5,23 +5,20 @@ use zk_evm_1_4_1::{ aux_structures::Timestamp, tracing::{BeforeExecutionData, VmLocalStateData}, }; -use zksync_types::{ - event::{ - extract_bytecode_publication_requests_from_l1_messenger, - extract_l2tol1logs_from_l1_messenger, extract_long_l2_to_l1_messages, L1MessengerL2ToL1Log, - }, - writes::StateDiffRecord, - AccountTreeId, StorageKey, L1_MESSENGER_ADDRESS, -}; +use zksync_types::{writes::StateDiffRecord, AccountTreeId, StorageKey, L1_MESSENGER_ADDRESS}; use zksync_utils::{h256_to_u256, u256_to_bytes_be, u256_to_h256}; use crate::{ interface::{ storage::{StoragePtr, WriteStorage}, tracer::{TracerExecutionStatus, TracerExecutionStopReason}, - L1BatchEnv, VmExecutionMode, + L1BatchEnv, VmEvent, VmExecutionMode, }, tracers::dynamic::vm_1_4_1::DynTracer, + utils::events::{ + extract_bytecode_publication_requests_from_l1_messenger, + extract_l2tol1logs_from_l1_messenger, L1MessengerL2ToL1Log, + }, vm_1_4_1::{ bootloader_state::{utils::apply_pubdata_to_memory, BootloaderState}, constants::BOOTLOADER_HEAP_PAGE, @@ -81,8 +78,7 @@ impl PubdataTracer { &self.l1_batch_env, Timestamp(0), ); - - extract_long_l2_to_l1_messages(&all_generated_events) + VmEvent::extract_long_l2_to_l1_messages(&all_generated_events) } // Packs part of L1 Messenger total pubdata that corresponds to diff --git a/core/lib/multivm/src/versions/vm_1_4_1/tracers/refunds.rs b/core/lib/multivm/src/versions/vm_1_4_1/tracers/refunds.rs index 6de4b170eb1..2586d8d7f87 100644 --- a/core/lib/multivm/src/versions/vm_1_4_1/tracers/refunds.rs +++ b/core/lib/multivm/src/versions/vm_1_4_1/tracers/refunds.rs @@ -8,18 +8,14 @@ use zk_evm_1_4_1::{ zkevm_opcode_defs::system_params::L1_MESSAGE_PUBDATA_BYTES, }; use zksync_system_constants::{PUBLISH_BYTECODE_OVERHEAD, SYSTEM_CONTEXT_ADDRESS}; -use zksync_types::{ - event::{extract_long_l2_to_l1_messages, extract_published_bytecodes}, - l2_to_l1_log::L2ToL1Log, - L1BatchNumber, H256, U256, -}; +use zksync_types::{l2_to_l1_log::L2ToL1Log, L1BatchNumber, H256, U256}; use zksync_utils::{bytecode::bytecode_len_in_bytes, ceil_div_u256, u256_to_h256}; use crate::{ interface::{ storage::{StoragePtr, WriteStorage}, tracer::TracerExecutionStatus, - L1BatchEnv, Refunds, + L1BatchEnv, Refunds, VmEvent, }, tracers::dynamic::vm_1_4_1::DynTracer, vm_1_4_1::{ @@ -345,12 +341,12 @@ pub(crate) fn pubdata_published( .filter(|log| log.sender != SYSTEM_CONTEXT_ADDRESS) .count() as u32) * L1_MESSAGE_PUBDATA_BYTES; - let l2_l1_long_messages_bytes: u32 = extract_long_l2_to_l1_messages(&events) + let l2_l1_long_messages_bytes: u32 = VmEvent::extract_long_l2_to_l1_messages(&events) .iter() .map(|event| event.len() as u32) .sum(); - let published_bytecode_bytes: u32 = extract_published_bytecodes(&events) + let published_bytecode_bytes: u32 = VmEvent::extract_published_bytecodes(&events) .iter() .map(|bytecodehash| bytecode_len_in_bytes(*bytecodehash) as u32 + PUBLISH_BYTECODE_OVERHEAD) .sum(); diff --git a/core/lib/multivm/src/versions/vm_1_4_1/types/internals/pubdata.rs b/core/lib/multivm/src/versions/vm_1_4_1/types/internals/pubdata.rs index 38489a6c8e9..d07732ae435 100644 --- a/core/lib/multivm/src/versions/vm_1_4_1/types/internals/pubdata.rs +++ b/core/lib/multivm/src/versions/vm_1_4_1/types/internals/pubdata.rs @@ -1,7 +1,6 @@ -use zksync_types::{ - event::L1MessengerL2ToL1Log, - writes::{compress_state_diffs, StateDiffRecord}, -}; +use zksync_types::writes::{compress_state_diffs, StateDiffRecord}; + +use crate::utils::events::L1MessengerL2ToL1Log; /// Struct based on which the pubdata blob is formed #[derive(Debug, Clone, Default)] diff --git a/core/lib/multivm/src/versions/vm_1_4_1/utils/logs.rs b/core/lib/multivm/src/versions/vm_1_4_1/utils/logs.rs index 48a1b49a460..41a13eeb118 100644 --- a/core/lib/multivm/src/versions/vm_1_4_1/utils/logs.rs +++ b/core/lib/multivm/src/versions/vm_1_4_1/utils/logs.rs @@ -1,9 +1,9 @@ use zk_evm_1_4_1::aux_structures::{LogQuery, Timestamp}; -use zksync_types::{l2_to_l1_log::L2ToL1Log, StorageLogKind, VmEvent}; +use zksync_types::{l2_to_l1_log::L2ToL1Log, StorageLogKind}; use crate::{ glue::GlueInto, - interface::{storage::WriteStorage, L1BatchEnv}, + interface::{storage::WriteStorage, L1BatchEnv, VmEvent}, vm_1_4_1::{ old_vm::{events::merge_events, history_recorder::HistoryMode}, types::internals::ZkSyncVmState, diff --git a/core/lib/multivm/src/versions/vm_1_4_1/vm.rs b/core/lib/multivm/src/versions/vm_1_4_1/vm.rs index e37a8757ee1..8f20e8654d7 100644 --- a/core/lib/multivm/src/versions/vm_1_4_1/vm.rs +++ b/core/lib/multivm/src/versions/vm_1_4_1/vm.rs @@ -1,19 +1,19 @@ use circuit_sequencer_api_1_4_1::sort_storage_access::sort_storage_access_queries; use zksync_types::{ - event::extract_l2tol1logs_from_l1_messenger, l2_to_l1_log::{SystemL2ToL1Log, UserL2ToL1Log}, Transaction, }; -use zksync_utils::bytecode::CompressedBytecodeInfo; use crate::{ glue::GlueInto, interface::{ storage::{StoragePtr, WriteStorage}, - BootloaderMemory, BytecodeCompressionError, CurrentExecutionState, FinishedL1Batch, - L1BatchEnv, L2BlockEnv, SystemEnv, VmExecutionMode, VmExecutionResultAndLogs, VmFactory, - VmInterface, VmInterfaceHistoryEnabled, VmMemoryMetrics, + BootloaderMemory, BytecodeCompressionError, CompressedBytecodeInfo, CurrentExecutionState, + FinishedL1Batch, L1BatchEnv, L2BlockEnv, SystemEnv, VmExecutionMode, + VmExecutionResultAndLogs, VmFactory, VmInterface, VmInterfaceHistoryEnabled, + VmMemoryMetrics, }, + utils::events::extract_l2tol1logs_from_l1_messenger, vm_1_4_1::{ bootloader_state::BootloaderState, old_vm::events::merge_events, diff --git a/core/lib/multivm/src/versions/vm_1_4_2/bootloader_state/state.rs b/core/lib/multivm/src/versions/vm_1_4_2/bootloader_state/state.rs index 059d2a93e27..e692c8a2640 100644 --- a/core/lib/multivm/src/versions/vm_1_4_2/bootloader_state/state.rs +++ b/core/lib/multivm/src/versions/vm_1_4_2/bootloader_state/state.rs @@ -2,11 +2,10 @@ use std::cmp::Ordering; use once_cell::sync::OnceCell; use zksync_types::{L2ChainId, U256}; -use zksync_utils::bytecode::CompressedBytecodeInfo; use super::{tx::BootloaderTx, utils::apply_pubdata_to_memory}; use crate::{ - interface::{BootloaderMemory, L2BlockEnv, TxExecutionMode}, + interface::{BootloaderMemory, CompressedBytecodeInfo, L2BlockEnv, TxExecutionMode}, vm_1_4_2::{ bootloader_state::{ l2_block::BootloaderL2Block, diff --git a/core/lib/multivm/src/versions/vm_1_4_2/bootloader_state/tx.rs b/core/lib/multivm/src/versions/vm_1_4_2/bootloader_state/tx.rs index 2ec99c34ec3..f2c177ee684 100644 --- a/core/lib/multivm/src/versions/vm_1_4_2/bootloader_state/tx.rs +++ b/core/lib/multivm/src/versions/vm_1_4_2/bootloader_state/tx.rs @@ -1,7 +1,6 @@ use zksync_types::{L2ChainId, H256, U256}; -use zksync_utils::bytecode::CompressedBytecodeInfo; -use crate::vm_1_4_2::types::internals::TransactionData; +use crate::{interface::CompressedBytecodeInfo, vm_1_4_2::types::internals::TransactionData}; /// Information about tx necessary for execution in bootloader. #[derive(Debug, Clone)] diff --git a/core/lib/multivm/src/versions/vm_1_4_2/bootloader_state/utils.rs b/core/lib/multivm/src/versions/vm_1_4_2/bootloader_state/utils.rs index 0da7502186b..600ab83bf48 100644 --- a/core/lib/multivm/src/versions/vm_1_4_2/bootloader_state/utils.rs +++ b/core/lib/multivm/src/versions/vm_1_4_2/bootloader_state/utils.rs @@ -1,9 +1,10 @@ use zksync_types::{ethabi, U256}; -use zksync_utils::{bytecode::CompressedBytecodeInfo, bytes_to_be_words, h256_to_u256}; +use zksync_utils::{bytes_to_be_words, h256_to_u256}; use super::tx::BootloaderTx; use crate::{ - interface::{BootloaderMemory, TxExecutionMode}, + interface::{BootloaderMemory, CompressedBytecodeInfo, TxExecutionMode}, + utils::bytecode, vm_1_4_2::{ bootloader_state::l2_block::BootloaderL2Block, constants::{ @@ -22,7 +23,7 @@ pub(super) fn get_memory_for_compressed_bytecodes( ) -> Vec { let memory_addition: Vec<_> = compressed_bytecodes .iter() - .flat_map(|x| x.encode_call()) + .flat_map(bytecode::encode_call) .collect(); bytes_to_be_words(memory_addition) diff --git a/core/lib/multivm/src/versions/vm_1_4_2/implementation/bytecode.rs b/core/lib/multivm/src/versions/vm_1_4_2/implementation/bytecode.rs index a4bd40110f2..54e69289521 100644 --- a/core/lib/multivm/src/versions/vm_1_4_2/implementation/bytecode.rs +++ b/core/lib/multivm/src/versions/vm_1_4_2/implementation/bytecode.rs @@ -1,15 +1,13 @@ use itertools::Itertools; use zksync_types::U256; -use zksync_utils::{ - bytecode::{compress_bytecode, hash_bytecode, CompressedBytecodeInfo}, - bytes_to_be_words, -}; +use zksync_utils::{bytecode::hash_bytecode, bytes_to_be_words}; use crate::{ interface::{ storage::{StoragePtr, WriteStorage}, - VmInterface, + CompressedBytecodeInfo, VmInterface, }, + utils::bytecode, vm_1_4_2::Vm, HistoryMode, }; @@ -50,15 +48,6 @@ pub(crate) fn compress_bytecodes( .dedup_by(|x, y| x.1 == y.1) .filter(|(_idx, dep)| !storage.borrow_mut().is_bytecode_known(&hash_bytecode(dep))) .sorted_by_key(|(idx, _dep)| *idx) - .filter_map(|(_idx, dep)| { - let compressed_bytecode = compress_bytecode(dep); - - compressed_bytecode - .ok() - .map(|compressed| CompressedBytecodeInfo { - original: dep.clone(), - compressed, - }) - }) + .filter_map(|(_idx, dep)| bytecode::compress(dep.clone()).ok()) .collect() } diff --git a/core/lib/multivm/src/versions/vm_1_4_2/implementation/logs.rs b/core/lib/multivm/src/versions/vm_1_4_2/implementation/logs.rs index 04acc26467d..20fb2124af7 100644 --- a/core/lib/multivm/src/versions/vm_1_4_2/implementation/logs.rs +++ b/core/lib/multivm/src/versions/vm_1_4_2/implementation/logs.rs @@ -1,13 +1,10 @@ use zk_evm_1_4_1::aux_structures::Timestamp; -use zksync_types::{ - event::extract_l2tol1logs_from_l1_messenger, - l2_to_l1_log::{L2ToL1Log, SystemL2ToL1Log, UserL2ToL1Log}, - VmEvent, -}; +use zksync_types::l2_to_l1_log::{L2ToL1Log, SystemL2ToL1Log, UserL2ToL1Log}; use crate::{ glue::GlueInto, - interface::{storage::WriteStorage, VmExecutionLogs}, + interface::{storage::WriteStorage, VmEvent, VmExecutionLogs}, + utils::events::extract_l2tol1logs_from_l1_messenger, vm_1_4_2::{old_vm::utils::precompile_calls_count_after_timestamp, utils::logs, vm::Vm}, HistoryMode, }; diff --git a/core/lib/multivm/src/versions/vm_1_4_2/implementation/statistics.rs b/core/lib/multivm/src/versions/vm_1_4_2/implementation/statistics.rs index 4d1675227fb..92a2eaa650c 100644 --- a/core/lib/multivm/src/versions/vm_1_4_2/implementation/statistics.rs +++ b/core/lib/multivm/src/versions/vm_1_4_2/implementation/statistics.rs @@ -1,8 +1,8 @@ use zk_evm_1_4_1::aux_structures::Timestamp; -use zksync_types::{circuit::CircuitStatistic, U256}; +use zksync_types::U256; use crate::{ - interface::{storage::WriteStorage, VmExecutionStatistics, VmMemoryMetrics}, + interface::{storage::WriteStorage, CircuitStatistic, VmExecutionStatistics, VmMemoryMetrics}, vm_1_4_2::{tracers::DefaultExecutionTracer, vm::Vm}, HistoryMode, }; diff --git a/core/lib/multivm/src/versions/vm_1_4_2/old_vm/events.rs b/core/lib/multivm/src/versions/vm_1_4_2/old_vm/events.rs index fc97b6f4a41..ffa4b4d50b8 100644 --- a/core/lib/multivm/src/versions/vm_1_4_2/old_vm/events.rs +++ b/core/lib/multivm/src/versions/vm_1_4_2/old_vm/events.rs @@ -1,7 +1,9 @@ use zk_evm_1_4_1::{ethereum_types::Address, reference_impls::event_sink::EventMessage}; -use zksync_types::{L1BatchNumber, VmEvent, EVENT_WRITER_ADDRESS, H256}; +use zksync_types::{L1BatchNumber, EVENT_WRITER_ADDRESS, H256}; use zksync_utils::{be_chunks_to_h256_words, h256_to_account_address}; +use crate::interface::VmEvent; + #[derive(Clone)] pub(crate) struct SolidityLikeEvent { pub(crate) shard_id: u8, diff --git a/core/lib/multivm/src/versions/vm_1_4_2/tracers/circuits_capacity.rs b/core/lib/multivm/src/versions/vm_1_4_2/tracers/circuits_capacity.rs index 8cabd911cc6..974e0757721 100644 --- a/core/lib/multivm/src/versions/vm_1_4_2/tracers/circuits_capacity.rs +++ b/core/lib/multivm/src/versions/vm_1_4_2/tracers/circuits_capacity.rs @@ -1,5 +1,6 @@ use circuit_sequencer_api_1_4_2::{geometry_config::get_geometry_config, toolset::GeometryConfig}; -use zksync_types::circuit::{CircuitCycleStatistic, CircuitStatistic}; + +use crate::{interface::CircuitStatistic, utils::CircuitCycleStatistic}; // "Rich addressing" opcodes are opcodes that can write their return value/read the input onto the stack // and so take 1-2 RAM permutations more than an average opcode. diff --git a/core/lib/multivm/src/versions/vm_1_4_2/tracers/circuits_tracer.rs b/core/lib/multivm/src/versions/vm_1_4_2/tracers/circuits_tracer.rs index b781ee186fd..04b6e532b2b 100644 --- a/core/lib/multivm/src/versions/vm_1_4_2/tracers/circuits_tracer.rs +++ b/core/lib/multivm/src/versions/vm_1_4_2/tracers/circuits_tracer.rs @@ -5,7 +5,6 @@ use zk_evm_1_4_1::{ zk_evm_abstractions::precompiles::PrecompileAddress, zkevm_opcode_defs::{LogOpcode, Opcode, UMAOpcode}, }; -use zksync_types::circuit::CircuitCycleStatistic; use super::circuits_capacity::*; use crate::{ @@ -14,6 +13,7 @@ use crate::{ tracer::TracerExecutionStatus, }, tracers::dynamic::vm_1_4_1::DynTracer, + utils::CircuitCycleStatistic, vm_1_4_2::{ bootloader_state::BootloaderState, old_vm::{history_recorder::HistoryMode, memory::SimpleMemory}, @@ -138,7 +138,7 @@ impl VmTracer for CircuitsTracer { impl CircuitsTracer { pub(crate) fn new() -> Self { Self { - statistics: CircuitCycleStatistic::new(), + statistics: CircuitCycleStatistic::default(), last_decommitment_history_entry_checked: None, last_written_keys_history_entry_checked: None, last_read_keys_history_entry_checked: None, diff --git a/core/lib/multivm/src/versions/vm_1_4_2/tracers/pubdata_tracer.rs b/core/lib/multivm/src/versions/vm_1_4_2/tracers/pubdata_tracer.rs index fab790ec572..ffe65b5e050 100644 --- a/core/lib/multivm/src/versions/vm_1_4_2/tracers/pubdata_tracer.rs +++ b/core/lib/multivm/src/versions/vm_1_4_2/tracers/pubdata_tracer.rs @@ -5,23 +5,20 @@ use zk_evm_1_4_1::{ aux_structures::Timestamp, tracing::{BeforeExecutionData, VmLocalStateData}, }; -use zksync_types::{ - event::{ - extract_bytecode_publication_requests_from_l1_messenger, - extract_l2tol1logs_from_l1_messenger, extract_long_l2_to_l1_messages, L1MessengerL2ToL1Log, - }, - writes::StateDiffRecord, - AccountTreeId, StorageKey, L1_MESSENGER_ADDRESS, -}; +use zksync_types::{writes::StateDiffRecord, AccountTreeId, StorageKey, L1_MESSENGER_ADDRESS}; use zksync_utils::{h256_to_u256, u256_to_bytes_be, u256_to_h256}; use crate::{ interface::{ storage::{StoragePtr, WriteStorage}, tracer::{TracerExecutionStatus, TracerExecutionStopReason}, - L1BatchEnv, VmExecutionMode, + L1BatchEnv, VmEvent, VmExecutionMode, }, tracers::dynamic::vm_1_4_1::DynTracer, + utils::events::{ + extract_bytecode_publication_requests_from_l1_messenger, + extract_l2tol1logs_from_l1_messenger, L1MessengerL2ToL1Log, + }, vm_1_4_2::{ bootloader_state::{utils::apply_pubdata_to_memory, BootloaderState}, constants::BOOTLOADER_HEAP_PAGE, @@ -101,7 +98,7 @@ impl PubdataTracer { Timestamp(0), ); - extract_long_l2_to_l1_messages(&all_generated_events) + VmEvent::extract_long_l2_to_l1_messages(&all_generated_events) } // Packs part of L1 Messenger total pubdata that corresponds to diff --git a/core/lib/multivm/src/versions/vm_1_4_2/tracers/refunds.rs b/core/lib/multivm/src/versions/vm_1_4_2/tracers/refunds.rs index 6af3a34376c..0da5736bf95 100644 --- a/core/lib/multivm/src/versions/vm_1_4_2/tracers/refunds.rs +++ b/core/lib/multivm/src/versions/vm_1_4_2/tracers/refunds.rs @@ -8,18 +8,14 @@ use zk_evm_1_4_1::{ zkevm_opcode_defs::system_params::L1_MESSAGE_PUBDATA_BYTES, }; use zksync_system_constants::{PUBLISH_BYTECODE_OVERHEAD, SYSTEM_CONTEXT_ADDRESS}; -use zksync_types::{ - event::{extract_long_l2_to_l1_messages, extract_published_bytecodes}, - l2_to_l1_log::L2ToL1Log, - L1BatchNumber, H256, U256, -}; +use zksync_types::{l2_to_l1_log::L2ToL1Log, L1BatchNumber, H256, U256}; use zksync_utils::{bytecode::bytecode_len_in_bytes, ceil_div_u256, u256_to_h256}; use crate::{ interface::{ storage::{StoragePtr, WriteStorage}, tracer::TracerExecutionStatus, - L1BatchEnv, Refunds, + L1BatchEnv, Refunds, VmEvent, }, tracers::dynamic::vm_1_4_1::DynTracer, vm_1_4_2::{ @@ -345,12 +341,12 @@ pub(crate) fn pubdata_published( .filter(|log| log.sender != SYSTEM_CONTEXT_ADDRESS) .count() as u32) * L1_MESSAGE_PUBDATA_BYTES; - let l2_l1_long_messages_bytes: u32 = extract_long_l2_to_l1_messages(&events) + let l2_l1_long_messages_bytes: u32 = VmEvent::extract_long_l2_to_l1_messages(&events) .iter() .map(|event| event.len() as u32) .sum(); - let published_bytecode_bytes: u32 = extract_published_bytecodes(&events) + let published_bytecode_bytes: u32 = VmEvent::extract_published_bytecodes(&events) .iter() .map(|bytecodehash| bytecode_len_in_bytes(*bytecodehash) as u32 + PUBLISH_BYTECODE_OVERHEAD) .sum(); diff --git a/core/lib/multivm/src/versions/vm_1_4_2/types/internals/pubdata.rs b/core/lib/multivm/src/versions/vm_1_4_2/types/internals/pubdata.rs index 38489a6c8e9..d07732ae435 100644 --- a/core/lib/multivm/src/versions/vm_1_4_2/types/internals/pubdata.rs +++ b/core/lib/multivm/src/versions/vm_1_4_2/types/internals/pubdata.rs @@ -1,7 +1,6 @@ -use zksync_types::{ - event::L1MessengerL2ToL1Log, - writes::{compress_state_diffs, StateDiffRecord}, -}; +use zksync_types::writes::{compress_state_diffs, StateDiffRecord}; + +use crate::utils::events::L1MessengerL2ToL1Log; /// Struct based on which the pubdata blob is formed #[derive(Debug, Clone, Default)] diff --git a/core/lib/multivm/src/versions/vm_1_4_2/utils/logs.rs b/core/lib/multivm/src/versions/vm_1_4_2/utils/logs.rs index 48832f0ecf2..003a806625f 100644 --- a/core/lib/multivm/src/versions/vm_1_4_2/utils/logs.rs +++ b/core/lib/multivm/src/versions/vm_1_4_2/utils/logs.rs @@ -1,9 +1,9 @@ use zk_evm_1_4_1::aux_structures::{LogQuery, Timestamp}; -use zksync_types::{l2_to_l1_log::L2ToL1Log, StorageLogKind, VmEvent}; +use zksync_types::{l2_to_l1_log::L2ToL1Log, StorageLogKind}; use crate::{ glue::GlueInto, - interface::{storage::WriteStorage, L1BatchEnv}, + interface::{storage::WriteStorage, L1BatchEnv, VmEvent}, vm_1_4_2::{ old_vm::{events::merge_events, history_recorder::HistoryMode}, types::internals::ZkSyncVmState, diff --git a/core/lib/multivm/src/versions/vm_1_4_2/vm.rs b/core/lib/multivm/src/versions/vm_1_4_2/vm.rs index 434e8ea1c42..e612885086d 100644 --- a/core/lib/multivm/src/versions/vm_1_4_2/vm.rs +++ b/core/lib/multivm/src/versions/vm_1_4_2/vm.rs @@ -1,19 +1,19 @@ use circuit_sequencer_api_1_4_2::sort_storage_access::sort_storage_access_queries; use zksync_types::{ - event::extract_l2tol1logs_from_l1_messenger, l2_to_l1_log::{SystemL2ToL1Log, UserL2ToL1Log}, Transaction, }; -use zksync_utils::bytecode::CompressedBytecodeInfo; use crate::{ glue::GlueInto, interface::{ storage::{StoragePtr, WriteStorage}, - BootloaderMemory, BytecodeCompressionError, CurrentExecutionState, FinishedL1Batch, - L1BatchEnv, L2BlockEnv, SystemEnv, VmExecutionMode, VmExecutionResultAndLogs, VmFactory, - VmInterface, VmInterfaceHistoryEnabled, VmMemoryMetrics, + BootloaderMemory, BytecodeCompressionError, CompressedBytecodeInfo, CurrentExecutionState, + FinishedL1Batch, L1BatchEnv, L2BlockEnv, SystemEnv, VmExecutionMode, + VmExecutionResultAndLogs, VmFactory, VmInterface, VmInterfaceHistoryEnabled, + VmMemoryMetrics, }, + utils::events::extract_l2tol1logs_from_l1_messenger, vm_1_4_2::{ bootloader_state::BootloaderState, old_vm::events::merge_events, diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/bootloader_state/state.rs b/core/lib/multivm/src/versions/vm_boojum_integration/bootloader_state/state.rs index db13d2aace5..8a605978a1e 100644 --- a/core/lib/multivm/src/versions/vm_boojum_integration/bootloader_state/state.rs +++ b/core/lib/multivm/src/versions/vm_boojum_integration/bootloader_state/state.rs @@ -2,11 +2,10 @@ use std::cmp::Ordering; use once_cell::sync::OnceCell; use zksync_types::{L2ChainId, U256}; -use zksync_utils::bytecode::CompressedBytecodeInfo; use super::{tx::BootloaderTx, utils::apply_pubdata_to_memory}; use crate::{ - interface::{BootloaderMemory, L2BlockEnv, TxExecutionMode}, + interface::{BootloaderMemory, CompressedBytecodeInfo, L2BlockEnv, TxExecutionMode}, vm_boojum_integration::{ bootloader_state::{ l2_block::BootloaderL2Block, diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/bootloader_state/tx.rs b/core/lib/multivm/src/versions/vm_boojum_integration/bootloader_state/tx.rs index 3030427281b..7ae8f9612cd 100644 --- a/core/lib/multivm/src/versions/vm_boojum_integration/bootloader_state/tx.rs +++ b/core/lib/multivm/src/versions/vm_boojum_integration/bootloader_state/tx.rs @@ -1,7 +1,8 @@ use zksync_types::{L2ChainId, H256, U256}; -use zksync_utils::bytecode::CompressedBytecodeInfo; -use crate::vm_boojum_integration::types::internals::TransactionData; +use crate::{ + interface::CompressedBytecodeInfo, vm_boojum_integration::types::internals::TransactionData, +}; /// Information about tx necessary for execution in bootloader. #[derive(Debug, Clone)] diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/bootloader_state/utils.rs b/core/lib/multivm/src/versions/vm_boojum_integration/bootloader_state/utils.rs index 77a8ed2ce9b..1a1c620c2b2 100644 --- a/core/lib/multivm/src/versions/vm_boojum_integration/bootloader_state/utils.rs +++ b/core/lib/multivm/src/versions/vm_boojum_integration/bootloader_state/utils.rs @@ -1,9 +1,10 @@ use zksync_types::{ethabi, U256}; -use zksync_utils::{bytecode::CompressedBytecodeInfo, bytes_to_be_words, h256_to_u256}; +use zksync_utils::{bytes_to_be_words, h256_to_u256}; use super::tx::BootloaderTx; use crate::{ - interface::{BootloaderMemory, TxExecutionMode}, + interface::{BootloaderMemory, CompressedBytecodeInfo, TxExecutionMode}, + utils::bytecode, vm_boojum_integration::{ bootloader_state::l2_block::BootloaderL2Block, constants::{ @@ -22,7 +23,7 @@ pub(super) fn get_memory_for_compressed_bytecodes( ) -> Vec { let memory_addition: Vec<_> = compressed_bytecodes .iter() - .flat_map(|x| x.encode_call()) + .flat_map(bytecode::encode_call) .collect(); bytes_to_be_words(memory_addition) diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/implementation/bytecode.rs b/core/lib/multivm/src/versions/vm_boojum_integration/implementation/bytecode.rs index 00ff620727b..b7e702b7a95 100644 --- a/core/lib/multivm/src/versions/vm_boojum_integration/implementation/bytecode.rs +++ b/core/lib/multivm/src/versions/vm_boojum_integration/implementation/bytecode.rs @@ -1,15 +1,13 @@ use itertools::Itertools; use zksync_types::U256; -use zksync_utils::{ - bytecode::{compress_bytecode, hash_bytecode, CompressedBytecodeInfo}, - bytes_to_be_words, -}; +use zksync_utils::{bytecode::hash_bytecode, bytes_to_be_words}; use crate::{ interface::{ storage::{StoragePtr, WriteStorage}, - VmInterface, + CompressedBytecodeInfo, VmInterface, }, + utils::bytecode, vm_boojum_integration::Vm, HistoryMode, }; @@ -50,15 +48,6 @@ pub(crate) fn compress_bytecodes( .dedup_by(|x, y| x.1 == y.1) .filter(|(_idx, dep)| !storage.borrow_mut().is_bytecode_known(&hash_bytecode(dep))) .sorted_by_key(|(idx, _dep)| *idx) - .filter_map(|(_idx, dep)| { - let compressed_bytecode = compress_bytecode(dep); - - compressed_bytecode - .ok() - .map(|compressed| CompressedBytecodeInfo { - original: dep.clone(), - compressed, - }) - }) + .filter_map(|(_idx, dep)| bytecode::compress(dep.clone()).ok()) .collect() } diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/implementation/logs.rs b/core/lib/multivm/src/versions/vm_boojum_integration/implementation/logs.rs index fa460089302..55630e5457d 100644 --- a/core/lib/multivm/src/versions/vm_boojum_integration/implementation/logs.rs +++ b/core/lib/multivm/src/versions/vm_boojum_integration/implementation/logs.rs @@ -1,13 +1,10 @@ use zk_evm_1_4_0::aux_structures::Timestamp; -use zksync_types::{ - event::extract_l2tol1logs_from_l1_messenger, - l2_to_l1_log::{L2ToL1Log, SystemL2ToL1Log, UserL2ToL1Log}, - VmEvent, -}; +use zksync_types::l2_to_l1_log::{L2ToL1Log, SystemL2ToL1Log, UserL2ToL1Log}; use crate::{ glue::GlueInto, - interface::{storage::WriteStorage, VmExecutionLogs}, + interface::{storage::WriteStorage, VmEvent, VmExecutionLogs}, + utils::events::extract_l2tol1logs_from_l1_messenger, vm_boojum_integration::{ old_vm::utils::precompile_calls_count_after_timestamp, utils::logs, vm::Vm, }, diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/implementation/statistics.rs b/core/lib/multivm/src/versions/vm_boojum_integration/implementation/statistics.rs index fe5b8abd683..46f8bc2f400 100644 --- a/core/lib/multivm/src/versions/vm_boojum_integration/implementation/statistics.rs +++ b/core/lib/multivm/src/versions/vm_boojum_integration/implementation/statistics.rs @@ -1,8 +1,8 @@ use zk_evm_1_4_0::aux_structures::Timestamp; -use zksync_types::{circuit::CircuitStatistic, U256}; +use zksync_types::U256; use crate::{ - interface::{storage::WriteStorage, VmExecutionStatistics, VmMemoryMetrics}, + interface::{storage::WriteStorage, CircuitStatistic, VmExecutionStatistics, VmMemoryMetrics}, vm_boojum_integration::{tracers::DefaultExecutionTracer, vm::Vm}, HistoryMode, }; diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/old_vm/events.rs b/core/lib/multivm/src/versions/vm_boojum_integration/old_vm/events.rs index eed8fee4ac8..1e95d0bc8f3 100644 --- a/core/lib/multivm/src/versions/vm_boojum_integration/old_vm/events.rs +++ b/core/lib/multivm/src/versions/vm_boojum_integration/old_vm/events.rs @@ -1,7 +1,9 @@ use zk_evm_1_4_0::{ethereum_types::Address, reference_impls::event_sink::EventMessage}; -use zksync_types::{L1BatchNumber, VmEvent, EVENT_WRITER_ADDRESS, H256}; +use zksync_types::{L1BatchNumber, EVENT_WRITER_ADDRESS, H256}; use zksync_utils::{be_chunks_to_h256_words, h256_to_account_address}; +use crate::interface::VmEvent; + #[derive(Clone)] pub(crate) struct SolidityLikeEvent { pub(crate) shard_id: u8, diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/tracers/circuits_capacity.rs b/core/lib/multivm/src/versions/vm_boojum_integration/tracers/circuits_capacity.rs index fedbfd47c8e..a9e5e17e797 100644 --- a/core/lib/multivm/src/versions/vm_boojum_integration/tracers/circuits_capacity.rs +++ b/core/lib/multivm/src/versions/vm_boojum_integration/tracers/circuits_capacity.rs @@ -1,5 +1,6 @@ use circuit_sequencer_api_1_4_0::{geometry_config::get_geometry_config, toolset::GeometryConfig}; -use zksync_types::circuit::{CircuitCycleStatistic, CircuitStatistic}; + +use crate::{interface::CircuitStatistic, utils::CircuitCycleStatistic}; // "Rich addressing" opcodes are opcodes that can write their return value/read the input onto the stack // and so take 1-2 RAM permutations more than an average opcode. diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/tracers/circuits_tracer.rs b/core/lib/multivm/src/versions/vm_boojum_integration/tracers/circuits_tracer.rs index 9bcf2a3783f..c92f261d9cb 100644 --- a/core/lib/multivm/src/versions/vm_boojum_integration/tracers/circuits_tracer.rs +++ b/core/lib/multivm/src/versions/vm_boojum_integration/tracers/circuits_tracer.rs @@ -5,7 +5,6 @@ use zk_evm_1_4_0::{ zk_evm_abstractions::precompiles::PrecompileAddress, zkevm_opcode_defs::{LogOpcode, Opcode, UMAOpcode}, }; -use zksync_types::circuit::CircuitCycleStatistic; use super::circuits_capacity::*; use crate::{ @@ -14,6 +13,7 @@ use crate::{ tracer::TracerExecutionStatus, }, tracers::dynamic::vm_1_4_0::DynTracer, + utils::CircuitCycleStatistic, vm_boojum_integration::{ bootloader_state::BootloaderState, old_vm::{history_recorder::HistoryMode, memory::SimpleMemory}, @@ -137,7 +137,7 @@ impl VmTracer for CircuitsTracer { impl CircuitsTracer { pub(crate) fn new() -> Self { Self { - statistics: CircuitCycleStatistic::new(), + statistics: CircuitCycleStatistic::default(), last_decommitment_history_entry_checked: None, last_written_keys_history_entry_checked: None, last_read_keys_history_entry_checked: None, diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/tracers/pubdata_tracer.rs b/core/lib/multivm/src/versions/vm_boojum_integration/tracers/pubdata_tracer.rs index 6727dfd54e8..326a5789612 100644 --- a/core/lib/multivm/src/versions/vm_boojum_integration/tracers/pubdata_tracer.rs +++ b/core/lib/multivm/src/versions/vm_boojum_integration/tracers/pubdata_tracer.rs @@ -5,23 +5,20 @@ use zk_evm_1_4_0::{ aux_structures::Timestamp, tracing::{BeforeExecutionData, VmLocalStateData}, }; -use zksync_types::{ - event::{ - extract_bytecode_publication_requests_from_l1_messenger, - extract_l2tol1logs_from_l1_messenger, extract_long_l2_to_l1_messages, L1MessengerL2ToL1Log, - }, - writes::StateDiffRecord, - AccountTreeId, StorageKey, L1_MESSENGER_ADDRESS, -}; +use zksync_types::{writes::StateDiffRecord, AccountTreeId, StorageKey, L1_MESSENGER_ADDRESS}; use zksync_utils::{h256_to_u256, u256_to_bytes_be, u256_to_h256}; use crate::{ interface::{ storage::{StoragePtr, WriteStorage}, tracer::{TracerExecutionStatus, TracerExecutionStopReason}, - L1BatchEnv, VmExecutionMode, + L1BatchEnv, VmEvent, VmExecutionMode, }, tracers::dynamic::vm_1_4_0::DynTracer, + utils::events::{ + extract_bytecode_publication_requests_from_l1_messenger, + extract_l2tol1logs_from_l1_messenger, L1MessengerL2ToL1Log, + }, vm_boojum_integration::{ bootloader_state::{utils::apply_pubdata_to_memory, BootloaderState}, constants::BOOTLOADER_HEAP_PAGE, @@ -79,8 +76,7 @@ impl PubdataTracer { &self.l1_batch_env, Timestamp(0), ); - - extract_long_l2_to_l1_messages(&all_generated_events) + VmEvent::extract_long_l2_to_l1_messages(&all_generated_events) } // Packs part of L1 Messenger total pubdata that corresponds to diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/tracers/refunds.rs b/core/lib/multivm/src/versions/vm_boojum_integration/tracers/refunds.rs index 5f2ceb105b9..ffbb1d80a80 100644 --- a/core/lib/multivm/src/versions/vm_boojum_integration/tracers/refunds.rs +++ b/core/lib/multivm/src/versions/vm_boojum_integration/tracers/refunds.rs @@ -7,18 +7,14 @@ use zk_evm_1_4_0::{ vm_state::VmLocalState, }; use zksync_system_constants::{PUBLISH_BYTECODE_OVERHEAD, SYSTEM_CONTEXT_ADDRESS}; -use zksync_types::{ - event::{extract_long_l2_to_l1_messages, extract_published_bytecodes}, - l2_to_l1_log::L2ToL1Log, - L1BatchNumber, U256, -}; +use zksync_types::{l2_to_l1_log::L2ToL1Log, L1BatchNumber, U256}; use zksync_utils::{bytecode::bytecode_len_in_bytes, ceil_div_u256, u256_to_h256}; use crate::{ interface::{ storage::{StoragePtr, WriteStorage}, tracer::TracerExecutionStatus, - L1BatchEnv, Refunds, + L1BatchEnv, Refunds, VmEvent, }, tracers::dynamic::vm_1_4_0::DynTracer, vm_boojum_integration::{ @@ -336,12 +332,12 @@ pub(crate) fn pubdata_published( .filter(|log| log.sender != SYSTEM_CONTEXT_ADDRESS) .count() as u32) * zk_evm_1_4_0::zkevm_opcode_defs::system_params::L1_MESSAGE_PUBDATA_BYTES; - let l2_l1_long_messages_bytes: u32 = extract_long_l2_to_l1_messages(&events) + let l2_l1_long_messages_bytes: u32 = VmEvent::extract_long_l2_to_l1_messages(&events) .iter() .map(|event| event.len() as u32) .sum(); - let published_bytecode_bytes: u32 = extract_published_bytecodes(&events) + let published_bytecode_bytes: u32 = VmEvent::extract_published_bytecodes(&events) .iter() .map(|bytecodehash| bytecode_len_in_bytes(*bytecodehash) as u32 + PUBLISH_BYTECODE_OVERHEAD) .sum(); diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/types/internals/pubdata.rs b/core/lib/multivm/src/versions/vm_boojum_integration/types/internals/pubdata.rs index 5451201c5bc..9df9009831f 100644 --- a/core/lib/multivm/src/versions/vm_boojum_integration/types/internals/pubdata.rs +++ b/core/lib/multivm/src/versions/vm_boojum_integration/types/internals/pubdata.rs @@ -1,7 +1,6 @@ -use zksync_types::{ - event::L1MessengerL2ToL1Log, - writes::{compress_state_diffs, StateDiffRecord}, -}; +use zksync_types::writes::{compress_state_diffs, StateDiffRecord}; + +use crate::utils::events::L1MessengerL2ToL1Log; /// Struct based on which the pubdata blob is formed #[derive(Debug, Clone, Default)] diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/utils/logs.rs b/core/lib/multivm/src/versions/vm_boojum_integration/utils/logs.rs index f26cea2f2f5..680f97e2558 100644 --- a/core/lib/multivm/src/versions/vm_boojum_integration/utils/logs.rs +++ b/core/lib/multivm/src/versions/vm_boojum_integration/utils/logs.rs @@ -1,10 +1,10 @@ use zk_evm_1_3_3::aux_structures::LogQuery; use zk_evm_1_4_0::aux_structures::Timestamp; -use zksync_types::{l2_to_l1_log::L2ToL1Log, StorageLogKind, VmEvent}; +use zksync_types::{l2_to_l1_log::L2ToL1Log, StorageLogKind}; use crate::{ glue::GlueInto, - interface::{storage::WriteStorage, L1BatchEnv}, + interface::{storage::WriteStorage, L1BatchEnv, VmEvent}, vm_boojum_integration::{ old_vm::{events::merge_events, history_recorder::HistoryMode}, types::internals::ZkSyncVmState, diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/vm.rs b/core/lib/multivm/src/versions/vm_boojum_integration/vm.rs index 1e9f73be598..0a9e1286507 100644 --- a/core/lib/multivm/src/versions/vm_boojum_integration/vm.rs +++ b/core/lib/multivm/src/versions/vm_boojum_integration/vm.rs @@ -1,19 +1,19 @@ use circuit_sequencer_api_1_4_0::sort_storage_access::sort_storage_access_queries; use zksync_types::{ - event::extract_l2tol1logs_from_l1_messenger, l2_to_l1_log::{SystemL2ToL1Log, UserL2ToL1Log}, Transaction, }; -use zksync_utils::bytecode::CompressedBytecodeInfo; use crate::{ glue::GlueInto, interface::{ storage::{StoragePtr, WriteStorage}, - BootloaderMemory, BytecodeCompressionError, CurrentExecutionState, FinishedL1Batch, - L1BatchEnv, L2BlockEnv, SystemEnv, VmExecutionMode, VmExecutionResultAndLogs, VmFactory, - VmInterface, VmInterfaceHistoryEnabled, VmMemoryMetrics, + BootloaderMemory, BytecodeCompressionError, CompressedBytecodeInfo, CurrentExecutionState, + FinishedL1Batch, L1BatchEnv, L2BlockEnv, SystemEnv, VmExecutionMode, + VmExecutionResultAndLogs, VmFactory, VmInterface, VmInterfaceHistoryEnabled, + VmMemoryMetrics, }, + utils::events::extract_l2tol1logs_from_l1_messenger, vm_boojum_integration::{ bootloader_state::BootloaderState, old_vm::events::merge_events, diff --git a/core/lib/multivm/src/versions/vm_fast/bootloader_state/state.rs b/core/lib/multivm/src/versions/vm_fast/bootloader_state/state.rs index ae1c70db586..ce37636d2cd 100644 --- a/core/lib/multivm/src/versions/vm_fast/bootloader_state/state.rs +++ b/core/lib/multivm/src/versions/vm_fast/bootloader_state/state.rs @@ -2,7 +2,6 @@ use std::cmp::Ordering; use once_cell::sync::OnceCell; use zksync_types::{L2ChainId, U256}; -use zksync_utils::bytecode::CompressedBytecodeInfo; use super::{ l2_block::BootloaderL2Block, @@ -11,7 +10,7 @@ use super::{ BootloaderStateSnapshot, }; use crate::{ - interface::{BootloaderMemory, L2BlockEnv, TxExecutionMode}, + interface::{BootloaderMemory, CompressedBytecodeInfo, L2BlockEnv, TxExecutionMode}, versions::vm_fast::{pubdata::PubdataInput, transaction_data::TransactionData}, vm_latest::{constants::TX_DESCRIPTION_OFFSET, utils::l2_blocks::assert_next_block}, }; diff --git a/core/lib/multivm/src/versions/vm_fast/bootloader_state/tx.rs b/core/lib/multivm/src/versions/vm_fast/bootloader_state/tx.rs index 36c1d65ddd3..dc0706561d5 100644 --- a/core/lib/multivm/src/versions/vm_fast/bootloader_state/tx.rs +++ b/core/lib/multivm/src/versions/vm_fast/bootloader_state/tx.rs @@ -1,7 +1,8 @@ use zksync_types::{L2ChainId, H256, U256}; -use zksync_utils::bytecode::CompressedBytecodeInfo; -use crate::versions::vm_fast::transaction_data::TransactionData; +use crate::{ + interface::CompressedBytecodeInfo, versions::vm_fast::transaction_data::TransactionData, +}; /// Information about tx necessary for execution in bootloader. #[derive(Debug, Clone)] diff --git a/core/lib/multivm/src/versions/vm_fast/bootloader_state/utils.rs b/core/lib/multivm/src/versions/vm_fast/bootloader_state/utils.rs index 21259e366d1..f280f56a828 100644 --- a/core/lib/multivm/src/versions/vm_fast/bootloader_state/utils.rs +++ b/core/lib/multivm/src/versions/vm_fast/bootloader_state/utils.rs @@ -1,9 +1,10 @@ use zksync_types::{ethabi, U256}; -use zksync_utils::{bytecode::CompressedBytecodeInfo, bytes_to_be_words, h256_to_u256}; +use zksync_utils::{bytes_to_be_words, h256_to_u256}; use super::{l2_block::BootloaderL2Block, tx::BootloaderTx}; use crate::{ - interface::{BootloaderMemory, TxExecutionMode}, + interface::{BootloaderMemory, CompressedBytecodeInfo, TxExecutionMode}, + utils::bytecode, versions::vm_fast::pubdata::PubdataInput, vm_latest::constants::{ BOOTLOADER_TX_DESCRIPTION_OFFSET, BOOTLOADER_TX_DESCRIPTION_SIZE, @@ -19,7 +20,7 @@ pub(super) fn get_memory_for_compressed_bytecodes( ) -> Vec { let memory_addition: Vec<_> = compressed_bytecodes .iter() - .flat_map(|x| x.encode_call()) + .flat_map(bytecode::encode_call) .collect(); bytes_to_be_words(memory_addition) diff --git a/core/lib/multivm/src/versions/vm_fast/bytecode.rs b/core/lib/multivm/src/versions/vm_fast/bytecode.rs index 3507b84840e..02122e5f29c 100644 --- a/core/lib/multivm/src/versions/vm_fast/bytecode.rs +++ b/core/lib/multivm/src/versions/vm_fast/bytecode.rs @@ -1,12 +1,12 @@ use itertools::Itertools; use zksync_types::H256; -use zksync_utils::{ - bytecode::{compress_bytecode, hash_bytecode, CompressedBytecodeInfo}, - h256_to_u256, -}; +use zksync_utils::{bytecode::hash_bytecode, h256_to_u256}; use super::Vm; -use crate::interface::storage::ReadStorage; +use crate::{ + interface::{storage::ReadStorage, CompressedBytecodeInfo}, + utils::bytecode, +}; impl Vm { /// Checks the last transaction has successfully published compressed bytecodes and returns `true` if there is at least one is still unknown. @@ -38,15 +38,6 @@ pub(crate) fn compress_bytecodes( .dedup_by(|x, y| x.1 == y.1) .filter(|(_idx, dep)| !is_bytecode_known(hash_bytecode(dep))) .sorted_by_key(|(idx, _dep)| *idx) - .filter_map(|(_idx, dep)| { - let compressed_bytecode = compress_bytecode(dep); - - compressed_bytecode - .ok() - .map(|compressed| CompressedBytecodeInfo { - original: dep.clone(), - compressed, - }) - }) + .filter_map(|(_idx, dep)| bytecode::compress(dep.clone()).ok()) .collect() } diff --git a/core/lib/multivm/src/versions/vm_fast/events.rs b/core/lib/multivm/src/versions/vm_fast/events.rs index b39d501b655..798a1e12bdd 100644 --- a/core/lib/multivm/src/versions/vm_fast/events.rs +++ b/core/lib/multivm/src/versions/vm_fast/events.rs @@ -1,7 +1,9 @@ use vm2::Event; -use zksync_types::{L1BatchNumber, VmEvent, H256}; +use zksync_types::{L1BatchNumber, H256}; use zksync_utils::h256_to_account_address; +use crate::interface::VmEvent; + #[derive(Clone)] struct EventAccumulator { pub(crate) shard_id: u8, diff --git a/core/lib/multivm/src/versions/vm_fast/pubdata.rs b/core/lib/multivm/src/versions/vm_fast/pubdata.rs index 38489a6c8e9..d07732ae435 100644 --- a/core/lib/multivm/src/versions/vm_fast/pubdata.rs +++ b/core/lib/multivm/src/versions/vm_fast/pubdata.rs @@ -1,7 +1,6 @@ -use zksync_types::{ - event::L1MessengerL2ToL1Log, - writes::{compress_state_diffs, StateDiffRecord}, -}; +use zksync_types::writes::{compress_state_diffs, StateDiffRecord}; + +use crate::utils::events::L1MessengerL2ToL1Log; /// Struct based on which the pubdata blob is formed #[derive(Debug, Clone, Default)] diff --git a/core/lib/multivm/src/versions/vm_fast/tests/bytecode_publishing.rs b/core/lib/multivm/src/versions/vm_fast/tests/bytecode_publishing.rs index 01fc8dc07d0..56c20e785ee 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/bytecode_publishing.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/bytecode_publishing.rs @@ -1,8 +1,6 @@ -use zksync_types::event::extract_long_l2_to_l1_messages; -use zksync_utils::bytecode::compress_bytecode; - use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, + interface::{TxExecutionMode, VmEvent, VmExecutionMode, VmInterface}, + utils::bytecode, vm_fast::tests::{ tester::{DeployContractsTx, TxType, VmTesterBuilder}, utils::read_test_contract, @@ -22,7 +20,7 @@ fn test_bytecode_publishing() { let counter = read_test_contract(); let account = &mut vm.rich_accounts[0]; - let compressed_bytecode = compress_bytecode(&counter).unwrap(); + let compressed_bytecode = bytecode::compress(counter.clone()).unwrap().compressed; let DeployContractsTx { tx, .. } = account.get_deploy_tx(&counter, None, TxType::L2); vm.vm.push_transaction(tx); @@ -32,7 +30,7 @@ fn test_bytecode_publishing() { vm.vm.execute(VmExecutionMode::Batch); let state = vm.vm.get_current_execution_state(); - let long_messages = extract_long_l2_to_l1_messages(&state.events); + let long_messages = VmEvent::extract_long_l2_to_l1_messages(&state.events); assert!( long_messages.contains(&compressed_bytecode), "Bytecode not published" diff --git a/core/lib/multivm/src/versions/vm_fast/tests/l1_tx_execution.rs b/core/lib/multivm/src/versions/vm_fast/tests/l1_tx_execution.rs index 033a7b2658f..f1411497c24 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/l1_tx_execution.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/l1_tx_execution.rs @@ -4,13 +4,13 @@ use zksync_system_constants::{BOOTLOADER_ADDRESS, L1_MESSENGER_ADDRESS}; use zksync_types::{ get_code_key, get_known_code_key, l2_to_l1_log::{L2ToL1Log, UserL2ToL1Log}, - storage_writes_deduplicator::StorageWritesDeduplicator, Execute, ExecuteTransactionCommon, U256, }; use zksync_utils::{h256_to_u256, u256_to_h256}; use crate::{ interface::{TxExecutionMode, VmExecutionMode, VmInterface}, + utils::StorageWritesDeduplicator, vm_fast::{ tests::{ tester::{TxType, VmTesterBuilder}, diff --git a/core/lib/multivm/src/versions/vm_fast/vm.rs b/core/lib/multivm/src/versions/vm_fast/vm.rs index 56d98a537bf..a9b2fcd605c 100644 --- a/core/lib/multivm/src/versions/vm_fast/vm.rs +++ b/core/lib/multivm/src/versions/vm_fast/vm.rs @@ -7,10 +7,6 @@ use vm2::{ use zk_evm_1_5_0::zkevm_opcode_defs::system_params::INITIAL_FRAME_FORMAL_EH_LOCATION; use zksync_contracts::SystemContractCode; use zksync_types::{ - event::{ - extract_l2tol1logs_from_l1_messenger, extract_long_l2_to_l1_messages, - L1_MESSENGER_BYTECODE_PUBLICATION_EVENT_SIGNATURE, - }, l1::is_l1_tx_type, l2_to_l1_log::UserL2ToL1Log, utils::key_for_eth_balance, @@ -34,12 +30,13 @@ use super::{ use crate::{ glue::GlueInto, interface::{ - storage::ReadStorage, BootloaderMemory, BytecodeCompressionError, CurrentExecutionState, - ExecutionResult, FinishedL1Batch, Halt, L1BatchEnv, L2BlockEnv, Refunds, SystemEnv, - TxRevertReason, VmExecutionLogs, VmExecutionMode, VmExecutionResultAndLogs, - VmExecutionStatistics, VmInterface, VmInterfaceHistoryEnabled, VmMemoryMetrics, - VmRevertReason, + storage::ReadStorage, BootloaderMemory, BytecodeCompressionError, CompressedBytecodeInfo, + CurrentExecutionState, ExecutionResult, FinishedL1Batch, Halt, L1BatchEnv, L2BlockEnv, + Refunds, SystemEnv, TxRevertReason, VmEvent, VmExecutionLogs, VmExecutionMode, + VmExecutionResultAndLogs, VmExecutionStatistics, VmInterface, VmInterfaceHistoryEnabled, + VmMemoryMetrics, VmRevertReason, }, + utils::events::extract_l2tol1logs_from_l1_messenger, vm_fast::{ bootloader_state::utils::{apply_l2_block, apply_pubdata_to_memory}, events::merge_events, @@ -205,7 +202,7 @@ impl Vm { event.address == L1_MESSENGER_ADDRESS && !event.indexed_topics.is_empty() && event.indexed_topics[0] - == *L1_MESSENGER_BYTECODE_PUBLICATION_EVENT_SIGNATURE + == VmEvent::L1_MESSENGER_BYTECODE_PUBLICATION_EVENT_SIGNATURE }) .map(|event| { let hash = U256::from_big_endian(&event.value[..32]); @@ -219,7 +216,7 @@ impl Vm { let pubdata_input = PubdataInput { user_logs: extract_l2tol1logs_from_l1_messenger(&events), - l2_to_l1_messages: extract_long_l2_to_l1_messages(&events), + l2_to_l1_messages: VmEvent::extract_long_l2_to_l1_messages(&events), published_bytecodes, state_diffs: self .compute_state_diffs() @@ -533,9 +530,7 @@ impl VmInterface for Vm { self.bootloader_state.bootloader_memory() } - fn get_last_tx_compressed_bytecodes( - &self, - ) -> Vec { + fn get_last_tx_compressed_bytecodes(&self) -> Vec { self.bootloader_state.get_last_tx_compressed_bytecodes() } diff --git a/core/lib/multivm/src/versions/vm_latest/bootloader_state/state.rs b/core/lib/multivm/src/versions/vm_latest/bootloader_state/state.rs index a3f59937d57..f15199a74f8 100644 --- a/core/lib/multivm/src/versions/vm_latest/bootloader_state/state.rs +++ b/core/lib/multivm/src/versions/vm_latest/bootloader_state/state.rs @@ -2,11 +2,10 @@ use std::cmp::Ordering; use once_cell::sync::OnceCell; use zksync_types::{L2ChainId, U256}; -use zksync_utils::bytecode::CompressedBytecodeInfo; use super::{tx::BootloaderTx, utils::apply_pubdata_to_memory}; use crate::{ - interface::{BootloaderMemory, L2BlockEnv, TxExecutionMode}, + interface::{BootloaderMemory, CompressedBytecodeInfo, L2BlockEnv, TxExecutionMode}, vm_latest::{ bootloader_state::{ l2_block::BootloaderL2Block, diff --git a/core/lib/multivm/src/versions/vm_latest/bootloader_state/tx.rs b/core/lib/multivm/src/versions/vm_latest/bootloader_state/tx.rs index 8f14976be34..2c63db7e435 100644 --- a/core/lib/multivm/src/versions/vm_latest/bootloader_state/tx.rs +++ b/core/lib/multivm/src/versions/vm_latest/bootloader_state/tx.rs @@ -1,7 +1,6 @@ use zksync_types::{L2ChainId, H256, U256}; -use zksync_utils::bytecode::CompressedBytecodeInfo; -use crate::vm_latest::types::internals::TransactionData; +use crate::{interface::CompressedBytecodeInfo, vm_latest::types::internals::TransactionData}; /// Information about tx necessary for execution in bootloader. #[derive(Debug, Clone)] diff --git a/core/lib/multivm/src/versions/vm_latest/bootloader_state/utils.rs b/core/lib/multivm/src/versions/vm_latest/bootloader_state/utils.rs index db4c834fbc7..4931082d6da 100644 --- a/core/lib/multivm/src/versions/vm_latest/bootloader_state/utils.rs +++ b/core/lib/multivm/src/versions/vm_latest/bootloader_state/utils.rs @@ -1,9 +1,10 @@ use zksync_types::{ethabi, U256}; -use zksync_utils::{bytecode::CompressedBytecodeInfo, bytes_to_be_words, h256_to_u256}; +use zksync_utils::{bytes_to_be_words, h256_to_u256}; use super::tx::BootloaderTx; use crate::{ - interface::{BootloaderMemory, TxExecutionMode}, + interface::{BootloaderMemory, CompressedBytecodeInfo, TxExecutionMode}, + utils::bytecode, vm_latest::{ bootloader_state::l2_block::BootloaderL2Block, constants::{ @@ -22,7 +23,7 @@ pub(super) fn get_memory_for_compressed_bytecodes( ) -> Vec { let memory_addition: Vec<_> = compressed_bytecodes .iter() - .flat_map(|x| x.encode_call()) + .flat_map(bytecode::encode_call) .collect(); bytes_to_be_words(memory_addition) diff --git a/core/lib/multivm/src/versions/vm_latest/implementation/bytecode.rs b/core/lib/multivm/src/versions/vm_latest/implementation/bytecode.rs index 30a428bb834..d0a41ce69f4 100644 --- a/core/lib/multivm/src/versions/vm_latest/implementation/bytecode.rs +++ b/core/lib/multivm/src/versions/vm_latest/implementation/bytecode.rs @@ -1,15 +1,13 @@ use itertools::Itertools; use zksync_types::U256; -use zksync_utils::{ - bytecode::{compress_bytecode, hash_bytecode, CompressedBytecodeInfo}, - bytes_to_be_words, -}; +use zksync_utils::{bytecode::hash_bytecode, bytes_to_be_words}; use crate::{ interface::{ storage::{StoragePtr, WriteStorage}, - VmInterface, + CompressedBytecodeInfo, VmInterface, }, + utils::bytecode, vm_latest::Vm, HistoryMode, }; @@ -50,15 +48,6 @@ pub(crate) fn compress_bytecodes( .dedup_by(|x, y| x.1 == y.1) .filter(|(_idx, dep)| !storage.borrow_mut().is_bytecode_known(&hash_bytecode(dep))) .sorted_by_key(|(idx, _dep)| *idx) - .filter_map(|(_idx, dep)| { - let compressed_bytecode = compress_bytecode(dep); - - compressed_bytecode - .ok() - .map(|compressed| CompressedBytecodeInfo { - original: dep.clone(), - compressed, - }) - }) + .filter_map(|(_idx, dep)| bytecode::compress(dep.clone()).ok()) .collect() } diff --git a/core/lib/multivm/src/versions/vm_latest/implementation/logs.rs b/core/lib/multivm/src/versions/vm_latest/implementation/logs.rs index 4417bf7a3ff..b339cdff301 100644 --- a/core/lib/multivm/src/versions/vm_latest/implementation/logs.rs +++ b/core/lib/multivm/src/versions/vm_latest/implementation/logs.rs @@ -1,13 +1,10 @@ use zk_evm_1_5_0::aux_structures::Timestamp; -use zksync_types::{ - event::extract_l2tol1logs_from_l1_messenger, - l2_to_l1_log::{L2ToL1Log, SystemL2ToL1Log, UserL2ToL1Log}, - VmEvent, -}; +use zksync_types::l2_to_l1_log::{L2ToL1Log, SystemL2ToL1Log, UserL2ToL1Log}; use crate::{ glue::GlueInto, - interface::{storage::WriteStorage, VmExecutionLogs}, + interface::{storage::WriteStorage, VmEvent, VmExecutionLogs}, + utils::events::extract_l2tol1logs_from_l1_messenger, vm_latest::{old_vm::utils::precompile_calls_count_after_timestamp, utils::logs, vm::Vm}, HistoryMode, }; diff --git a/core/lib/multivm/src/versions/vm_latest/implementation/statistics.rs b/core/lib/multivm/src/versions/vm_latest/implementation/statistics.rs index ed61962648a..34c1e1f81da 100644 --- a/core/lib/multivm/src/versions/vm_latest/implementation/statistics.rs +++ b/core/lib/multivm/src/versions/vm_latest/implementation/statistics.rs @@ -1,8 +1,8 @@ use zk_evm_1_5_0::aux_structures::Timestamp; -use zksync_types::{circuit::CircuitStatistic, U256}; +use zksync_types::U256; use crate::{ - interface::{storage::WriteStorage, VmExecutionStatistics, VmMemoryMetrics}, + interface::{storage::WriteStorage, CircuitStatistic, VmExecutionStatistics, VmMemoryMetrics}, vm_latest::vm::Vm, HistoryMode, }; diff --git a/core/lib/multivm/src/versions/vm_latest/old_vm/events.rs b/core/lib/multivm/src/versions/vm_latest/old_vm/events.rs index 14fcb870284..fd6f393155d 100644 --- a/core/lib/multivm/src/versions/vm_latest/old_vm/events.rs +++ b/core/lib/multivm/src/versions/vm_latest/old_vm/events.rs @@ -1,7 +1,9 @@ use zk_evm_1_5_0::{ethereum_types::Address, reference_impls::event_sink::EventMessage}; -use zksync_types::{L1BatchNumber, VmEvent, EVENT_WRITER_ADDRESS, H256}; +use zksync_types::{L1BatchNumber, EVENT_WRITER_ADDRESS, H256}; use zksync_utils::{be_chunks_to_h256_words, h256_to_account_address}; +use crate::interface::VmEvent; + #[derive(Clone)] pub(crate) struct SolidityLikeEvent { pub(crate) shard_id: u8, diff --git a/core/lib/multivm/src/versions/vm_latest/tests/bytecode_publishing.rs b/core/lib/multivm/src/versions/vm_latest/tests/bytecode_publishing.rs index a0c10addff9..ef56aafe4cb 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/bytecode_publishing.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/bytecode_publishing.rs @@ -1,8 +1,6 @@ -use zksync_types::event::extract_long_l2_to_l1_messages; -use zksync_utils::bytecode::compress_bytecode; - use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, + interface::{TxExecutionMode, VmEvent, VmExecutionMode, VmInterface}, + utils::bytecode, vm_latest::{ tests::{ tester::{DeployContractsTx, TxType, VmTesterBuilder}, @@ -25,7 +23,7 @@ fn test_bytecode_publishing() { let counter = read_test_contract(); let account = &mut vm.rich_accounts[0]; - let compressed_bytecode = compress_bytecode(&counter).unwrap(); + let compressed_bytecode = bytecode::compress(counter.clone()).unwrap().compressed; let DeployContractsTx { tx, .. } = account.get_deploy_tx(&counter, None, TxType::L2); vm.vm.push_transaction(tx); @@ -35,7 +33,7 @@ fn test_bytecode_publishing() { vm.vm.execute(VmExecutionMode::Batch); let state = vm.vm.get_current_execution_state(); - let long_messages = extract_long_l2_to_l1_messages(&state.events); + let long_messages = VmEvent::extract_long_l2_to_l1_messages(&state.events); assert!( long_messages.contains(&compressed_bytecode), "Bytecode not published" diff --git a/core/lib/multivm/src/versions/vm_latest/tests/l1_tx_execution.rs b/core/lib/multivm/src/versions/vm_latest/tests/l1_tx_execution.rs index 6b3be989fb3..4d42bb96cc9 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/l1_tx_execution.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/l1_tx_execution.rs @@ -5,13 +5,13 @@ use zksync_test_account::Account; use zksync_types::{ get_code_key, get_known_code_key, l2_to_l1_log::{L2ToL1Log, UserL2ToL1Log}, - storage_writes_deduplicator::StorageWritesDeduplicator, Execute, ExecuteTransactionCommon, K256PrivateKey, U256, }; use zksync_utils::u256_to_h256; use crate::{ interface::{TxExecutionMode, VmExecutionMode, VmInterface}, + utils::StorageWritesDeduplicator, vm_latest::{ tests::{ tester::{TxType, VmTesterBuilder}, diff --git a/core/lib/multivm/src/versions/vm_latest/tracers/circuits_capacity.rs b/core/lib/multivm/src/versions/vm_latest/tracers/circuits_capacity.rs index a570d3bd99b..0977a323d19 100644 --- a/core/lib/multivm/src/versions/vm_latest/tracers/circuits_capacity.rs +++ b/core/lib/multivm/src/versions/vm_latest/tracers/circuits_capacity.rs @@ -1,5 +1,6 @@ use circuit_sequencer_api_1_5_0::{geometry_config::get_geometry_config, toolset::GeometryConfig}; -use zksync_types::circuit::{CircuitCycleStatistic, CircuitStatistic}; + +use crate::{interface::CircuitStatistic, utils::CircuitCycleStatistic}; // "Rich addressing" opcodes are opcodes that can write their return value/read the input onto the stack // and so take 1-2 RAM permutations more than an average opcode. diff --git a/core/lib/multivm/src/versions/vm_latest/tracers/circuits_tracer.rs b/core/lib/multivm/src/versions/vm_latest/tracers/circuits_tracer.rs index b3a0e2480dc..6a47f3ae2fb 100644 --- a/core/lib/multivm/src/versions/vm_latest/tracers/circuits_tracer.rs +++ b/core/lib/multivm/src/versions/vm_latest/tracers/circuits_tracer.rs @@ -5,7 +5,6 @@ use zk_evm_1_5_0::{ zk_evm_abstractions::precompiles::PrecompileAddress, zkevm_opcode_defs::{LogOpcode, Opcode, UMAOpcode}, }; -use zksync_types::circuit::CircuitCycleStatistic; use super::circuits_capacity::*; use crate::{ @@ -14,6 +13,7 @@ use crate::{ tracer::TracerExecutionStatus, }, tracers::dynamic::vm_1_5_0::DynTracer, + utils::CircuitCycleStatistic, vm_latest::{ bootloader_state::BootloaderState, old_vm::{history_recorder::HistoryMode, memory::SimpleMemory}, @@ -162,7 +162,7 @@ impl VmTracer for CircuitsTracer { impl CircuitsTracer { pub(crate) fn new() -> Self { Self { - statistics: CircuitCycleStatistic::new(), + statistics: CircuitCycleStatistic::default(), last_decommitment_history_entry_checked: None, last_written_keys_history_entry_checked: None, last_read_keys_history_entry_checked: None, diff --git a/core/lib/multivm/src/versions/vm_latest/tracers/pubdata_tracer.rs b/core/lib/multivm/src/versions/vm_latest/tracers/pubdata_tracer.rs index edd244a2d08..32f3984834c 100644 --- a/core/lib/multivm/src/versions/vm_latest/tracers/pubdata_tracer.rs +++ b/core/lib/multivm/src/versions/vm_latest/tracers/pubdata_tracer.rs @@ -5,23 +5,20 @@ use zk_evm_1_5_0::{ aux_structures::Timestamp, tracing::{BeforeExecutionData, VmLocalStateData}, }; -use zksync_types::{ - event::{ - extract_bytecode_publication_requests_from_l1_messenger, - extract_l2tol1logs_from_l1_messenger, extract_long_l2_to_l1_messages, L1MessengerL2ToL1Log, - }, - writes::StateDiffRecord, - AccountTreeId, StorageKey, L1_MESSENGER_ADDRESS, -}; +use zksync_types::{writes::StateDiffRecord, AccountTreeId, StorageKey, L1_MESSENGER_ADDRESS}; use zksync_utils::{h256_to_u256, u256_to_bytes_be, u256_to_h256}; use crate::{ interface::{ storage::{StoragePtr, WriteStorage}, tracer::{TracerExecutionStatus, TracerExecutionStopReason}, - L1BatchEnv, VmExecutionMode, + L1BatchEnv, VmEvent, VmExecutionMode, }, tracers::dynamic::vm_1_5_0::DynTracer, + utils::events::{ + extract_bytecode_publication_requests_from_l1_messenger, + extract_l2tol1logs_from_l1_messenger, L1MessengerL2ToL1Log, + }, vm_latest::{ bootloader_state::{utils::apply_pubdata_to_memory, BootloaderState}, constants::BOOTLOADER_HEAP_PAGE, @@ -107,8 +104,7 @@ impl PubdataTracer { &self.l1_batch_env, Timestamp(0), ); - - extract_long_l2_to_l1_messages(&all_generated_events) + VmEvent::extract_long_l2_to_l1_messages(&all_generated_events) } // Packs part of L1 Messenger total pubdata that corresponds to diff --git a/core/lib/multivm/src/versions/vm_latest/types/internals/pubdata.rs b/core/lib/multivm/src/versions/vm_latest/types/internals/pubdata.rs index 38489a6c8e9..d07732ae435 100644 --- a/core/lib/multivm/src/versions/vm_latest/types/internals/pubdata.rs +++ b/core/lib/multivm/src/versions/vm_latest/types/internals/pubdata.rs @@ -1,7 +1,6 @@ -use zksync_types::{ - event::L1MessengerL2ToL1Log, - writes::{compress_state_diffs, StateDiffRecord}, -}; +use zksync_types::writes::{compress_state_diffs, StateDiffRecord}; + +use crate::utils::events::L1MessengerL2ToL1Log; /// Struct based on which the pubdata blob is formed #[derive(Debug, Clone, Default)] diff --git a/core/lib/multivm/src/versions/vm_latest/utils/logs.rs b/core/lib/multivm/src/versions/vm_latest/utils/logs.rs index 67d202657f6..dfa23685dcd 100644 --- a/core/lib/multivm/src/versions/vm_latest/utils/logs.rs +++ b/core/lib/multivm/src/versions/vm_latest/utils/logs.rs @@ -1,9 +1,9 @@ use zk_evm_1_5_0::aux_structures::{LogQuery, Timestamp}; -use zksync_types::{l2_to_l1_log::L2ToL1Log, StorageLogKind, VmEvent}; +use zksync_types::{l2_to_l1_log::L2ToL1Log, StorageLogKind}; use crate::{ glue::GlueInto, - interface::{storage::WriteStorage, L1BatchEnv}, + interface::{storage::WriteStorage, L1BatchEnv, VmEvent}, vm_latest::{ old_vm::{events::merge_events, history_recorder::HistoryMode}, types::internals::ZkSyncVmState, diff --git a/core/lib/multivm/src/versions/vm_latest/vm.rs b/core/lib/multivm/src/versions/vm_latest/vm.rs index a5e7d8ef8be..1c85133e117 100644 --- a/core/lib/multivm/src/versions/vm_latest/vm.rs +++ b/core/lib/multivm/src/versions/vm_latest/vm.rs @@ -1,20 +1,20 @@ use circuit_sequencer_api_1_5_0::sort_storage_access::sort_storage_access_queries; use zksync_types::{ - event::extract_l2tol1logs_from_l1_messenger, l2_to_l1_log::{SystemL2ToL1Log, UserL2ToL1Log}, vm::VmVersion, Transaction, }; -use zksync_utils::bytecode::CompressedBytecodeInfo; use crate::{ glue::GlueInto, interface::{ storage::{StoragePtr, WriteStorage}, - BootloaderMemory, BytecodeCompressionError, CurrentExecutionState, FinishedL1Batch, - L1BatchEnv, L2BlockEnv, SystemEnv, VmExecutionMode, VmExecutionResultAndLogs, VmFactory, - VmInterface, VmInterfaceHistoryEnabled, VmMemoryMetrics, + BootloaderMemory, BytecodeCompressionError, CompressedBytecodeInfo, CurrentExecutionState, + FinishedL1Batch, L1BatchEnv, L2BlockEnv, SystemEnv, VmExecutionMode, + VmExecutionResultAndLogs, VmFactory, VmInterface, VmInterfaceHistoryEnabled, + VmMemoryMetrics, }, + utils::events::extract_l2tol1logs_from_l1_messenger, vm_latest::{ bootloader_state::BootloaderState, old_vm::{events::merge_events, history_recorder::HistoryEnabled}, diff --git a/core/lib/multivm/src/versions/vm_m5/events.rs b/core/lib/multivm/src/versions/vm_m5/events.rs index 146c938021a..a444ad37feb 100644 --- a/core/lib/multivm/src/versions/vm_m5/events.rs +++ b/core/lib/multivm/src/versions/vm_m5/events.rs @@ -1,7 +1,9 @@ use zk_evm_1_3_1::{ethereum_types::Address, reference_impls::event_sink::EventMessage}; -use zksync_types::{L1BatchNumber, VmEvent, EVENT_WRITER_ADDRESS, H256}; +use zksync_types::{L1BatchNumber, EVENT_WRITER_ADDRESS, H256}; use zksync_utils::{be_chunks_to_h256_words, h256_to_account_address}; +use crate::interface::VmEvent; + #[derive(Clone)] pub struct SolidityLikeEvent { pub shard_id: u8, diff --git a/core/lib/multivm/src/versions/vm_m5/pubdata_utils.rs b/core/lib/multivm/src/versions/vm_m5/pubdata_utils.rs index 0c580554d99..1fd8c246093 100644 --- a/core/lib/multivm/src/versions/vm_m5/pubdata_utils.rs +++ b/core/lib/multivm/src/versions/vm_m5/pubdata_utils.rs @@ -3,14 +3,12 @@ use std::collections::HashMap; use circuit_sequencer_api_1_3_3::sort_storage_access::sort_storage_access_queries; use itertools::Itertools; use zk_evm_1_3_1::aux_structures::{LogQuery, Timestamp}; -use zksync_types::{ - event::{extract_long_l2_to_l1_messages, extract_published_bytecodes}, - StorageKey, PUBLISH_BYTECODE_OVERHEAD, SYSTEM_CONTEXT_ADDRESS, -}; +use zksync_types::{StorageKey, PUBLISH_BYTECODE_OVERHEAD, SYSTEM_CONTEXT_ADDRESS}; use zksync_utils::bytecode::bytecode_len_in_bytes; use crate::{ glue::GlueInto, + interface::VmEvent, vm_m5::{ oracles::storage::storage_key_of_log, storage::Storage, utils::collect_storage_log_queries_after_timestamp, vm_instance::VmInstance, @@ -30,12 +28,12 @@ impl VmInstance { .filter(|log| log.sender != SYSTEM_CONTEXT_ADDRESS) .count() as u32) * zk_evm_1_3_1::zkevm_opcode_defs::system_params::L1_MESSAGE_PUBDATA_BYTES; - let l2_l1_long_messages_bytes: u32 = extract_long_l2_to_l1_messages(&events) + let l2_l1_long_messages_bytes: u32 = VmEvent::extract_long_l2_to_l1_messages(&events) .iter() .map(|event| event.len() as u32) .sum(); - let published_bytecode_bytes: u32 = extract_published_bytecodes(&events) + let published_bytecode_bytes: u32 = VmEvent::extract_published_bytecodes(&events) .iter() .map(|bytecodehash| { bytecode_len_in_bytes(*bytecodehash) as u32 + PUBLISH_BYTECODE_OVERHEAD diff --git a/core/lib/multivm/src/versions/vm_m5/vm.rs b/core/lib/multivm/src/versions/vm_m5/vm.rs index a0d6ea39cea..8f232c95b38 100644 --- a/core/lib/multivm/src/versions/vm_m5/vm.rs +++ b/core/lib/multivm/src/versions/vm_m5/vm.rs @@ -6,15 +6,15 @@ use zksync_types::{ vm::VmVersion, Transaction, }; -use zksync_utils::{bytecode::CompressedBytecodeInfo, h256_to_u256, u256_to_h256}; +use zksync_utils::{h256_to_u256, u256_to_h256}; use crate::{ glue::{history_mode::HistoryMode, GlueInto}, interface::{ - storage::StoragePtr, BootloaderMemory, BytecodeCompressionError, CurrentExecutionState, - FinishedL1Batch, L1BatchEnv, L2BlockEnv, SystemEnv, TxExecutionMode, VmExecutionMode, - VmExecutionResultAndLogs, VmFactory, VmInterface, VmInterfaceHistoryEnabled, - VmMemoryMetrics, + storage::StoragePtr, BootloaderMemory, BytecodeCompressionError, CompressedBytecodeInfo, + CurrentExecutionState, FinishedL1Batch, L1BatchEnv, L2BlockEnv, SystemEnv, TxExecutionMode, + VmExecutionMode, VmExecutionResultAndLogs, VmFactory, VmInterface, + VmInterfaceHistoryEnabled, VmMemoryMetrics, }, vm_m5::{ events::merge_events, diff --git a/core/lib/multivm/src/versions/vm_m5/vm_instance.rs b/core/lib/multivm/src/versions/vm_m5/vm_instance.rs index b97b5e047c6..f0a94d0c3b6 100644 --- a/core/lib/multivm/src/versions/vm_m5/vm_instance.rs +++ b/core/lib/multivm/src/versions/vm_m5/vm_instance.rs @@ -11,13 +11,12 @@ use zk_evm_1_3_1::{ }; use zksync_types::{ l2_to_l1_log::{L2ToL1Log, UserL2ToL1Log}, - tx::tx_execution_info::TxExecutionStatus, - L1BatchNumber, VmEvent, U256, + L1BatchNumber, U256, }; use crate::{ glue::GlueInto, - interface::VmExecutionLogs, + interface::{TxExecutionStatus, VmEvent, VmExecutionLogs}, versions::shared::VmExecutionTrace, vm_m5::{ bootloader_state::BootloaderState, diff --git a/core/lib/multivm/src/versions/vm_m6/events.rs b/core/lib/multivm/src/versions/vm_m6/events.rs index 146c938021a..a444ad37feb 100644 --- a/core/lib/multivm/src/versions/vm_m6/events.rs +++ b/core/lib/multivm/src/versions/vm_m6/events.rs @@ -1,7 +1,9 @@ use zk_evm_1_3_1::{ethereum_types::Address, reference_impls::event_sink::EventMessage}; -use zksync_types::{L1BatchNumber, VmEvent, EVENT_WRITER_ADDRESS, H256}; +use zksync_types::{L1BatchNumber, EVENT_WRITER_ADDRESS, H256}; use zksync_utils::{be_chunks_to_h256_words, h256_to_account_address}; +use crate::interface::VmEvent; + #[derive(Clone)] pub struct SolidityLikeEvent { pub shard_id: u8, diff --git a/core/lib/multivm/src/versions/vm_m6/oracles/tracer/call.rs b/core/lib/multivm/src/versions/vm_m6/oracles/tracer/call.rs index ed47ace7b89..e4906c5ede2 100644 --- a/core/lib/multivm/src/versions/vm_m6/oracles/tracer/call.rs +++ b/core/lib/multivm/src/versions/vm_m6/oracles/tracer/call.rs @@ -10,13 +10,11 @@ use zk_evm_1_3_1::{ }, }; use zksync_system_constants::CONTRACT_DEPLOYER_ADDRESS; -use zksync_types::{ - vm_trace::{Call, CallType}, - U256, -}; +use zksync_types::U256; use crate::{ glue::GlueInto, + interface::{Call, CallType}, vm_m6::{errors::VmRevertReason, history_recorder::HistoryMode, memory::SimpleMemory}, }; diff --git a/core/lib/multivm/src/versions/vm_m6/oracles/tracer/one_tx.rs b/core/lib/multivm/src/versions/vm_m6/oracles/tracer/one_tx.rs index 53e5e4ee2f6..98f21732b68 100644 --- a/core/lib/multivm/src/versions/vm_m6/oracles/tracer/one_tx.rs +++ b/core/lib/multivm/src/versions/vm_m6/oracles/tracer/one_tx.rs @@ -4,18 +4,20 @@ use zk_evm_1_3_1::{ }, vm_state::VmLocalState, }; -use zksync_types::vm_trace::Call; use super::utils::{computational_gas_price, print_debug_if_needed}; -use crate::vm_m6::{ - history_recorder::HistoryMode, - memory::SimpleMemory, - oracles::tracer::{ - utils::{gas_spent_on_bytecodes_and_long_messages_this_opcode, VmHook}, - BootloaderTracer, CallTracer, ExecutionEndTracer, PendingRefundTracer, PubdataSpentTracer, - StorageInvocationTracer, +use crate::{ + interface::Call, + vm_m6::{ + history_recorder::HistoryMode, + memory::SimpleMemory, + oracles::tracer::{ + utils::{gas_spent_on_bytecodes_and_long_messages_this_opcode, VmHook}, + BootloaderTracer, CallTracer, ExecutionEndTracer, PendingRefundTracer, + PubdataSpentTracer, StorageInvocationTracer, + }, + vm_instance::get_vm_hook_params, }, - vm_instance::get_vm_hook_params, }; /// Allows any opcodes, but tells the VM to end the execution once the tx is over. diff --git a/core/lib/multivm/src/versions/vm_m6/oracles/tracer/transaction_result.rs b/core/lib/multivm/src/versions/vm_m6/oracles/tracer/transaction_result.rs index 2ecf484b60a..176dc25bc69 100644 --- a/core/lib/multivm/src/versions/vm_m6/oracles/tracer/transaction_result.rs +++ b/core/lib/multivm/src/versions/vm_m6/oracles/tracer/transaction_result.rs @@ -5,20 +5,23 @@ use zk_evm_1_3_1::{ vm_state::VmLocalState, zkevm_opcode_defs::FatPointer, }; -use zksync_types::{vm_trace, U256}; +use zksync_types::U256; -use crate::vm_m6::{ - history_recorder::HistoryMode, - memory::SimpleMemory, - oracles::tracer::{ - utils::{ - gas_spent_on_bytecodes_and_long_messages_this_opcode, print_debug_if_needed, - read_pointer, VmHook, +use crate::{ + interface::Call, + vm_m6::{ + history_recorder::HistoryMode, + memory::SimpleMemory, + oracles::tracer::{ + utils::{ + gas_spent_on_bytecodes_and_long_messages_this_opcode, print_debug_if_needed, + read_pointer, VmHook, + }, + CallTracer, ExecutionEndTracer, PendingRefundTracer, PubdataSpentTracer, + StorageInvocationTracer, }, - CallTracer, ExecutionEndTracer, PendingRefundTracer, PubdataSpentTracer, - StorageInvocationTracer, + vm_instance::get_vm_hook_params, }, - vm_instance::get_vm_hook_params, }; #[derive(Debug)] @@ -45,7 +48,7 @@ impl TransactionResultTracer { call_tracer, } } - pub fn call_trace(&mut self) -> Option> { + pub fn call_trace(&mut self) -> Option> { self.call_tracer .as_mut() .map(|call_tracer| call_tracer.extract_calls()) diff --git a/core/lib/multivm/src/versions/vm_m6/pubdata_utils.rs b/core/lib/multivm/src/versions/vm_m6/pubdata_utils.rs index 952ad89c74e..196883e1c93 100644 --- a/core/lib/multivm/src/versions/vm_m6/pubdata_utils.rs +++ b/core/lib/multivm/src/versions/vm_m6/pubdata_utils.rs @@ -3,14 +3,12 @@ use std::collections::HashMap; use circuit_sequencer_api_1_3_3::sort_storage_access::sort_storage_access_queries; use itertools::Itertools; use zk_evm_1_3_1::aux_structures::{LogQuery, Timestamp}; -use zksync_types::{ - event::{extract_long_l2_to_l1_messages, extract_published_bytecodes}, - StorageKey, PUBLISH_BYTECODE_OVERHEAD, SYSTEM_CONTEXT_ADDRESS, -}; +use zksync_types::{StorageKey, PUBLISH_BYTECODE_OVERHEAD, SYSTEM_CONTEXT_ADDRESS}; use zksync_utils::bytecode::bytecode_len_in_bytes; use crate::{ glue::GlueInto, + interface::VmEvent, vm_m6::{ history_recorder::HistoryMode, oracles::storage::storage_key_of_log, storage::Storage, utils::collect_storage_log_queries_after_timestamp, VmInstance, @@ -30,12 +28,12 @@ impl VmInstance { .filter(|log| log.sender != SYSTEM_CONTEXT_ADDRESS) .count() as u32) * zk_evm_1_3_1::zkevm_opcode_defs::system_params::L1_MESSAGE_PUBDATA_BYTES; - let l2_l1_long_messages_bytes: u32 = extract_long_l2_to_l1_messages(&events) + let l2_l1_long_messages_bytes: u32 = VmEvent::extract_long_l2_to_l1_messages(&events) .iter() .map(|event| event.len() as u32) .sum(); - let published_bytecode_bytes: u32 = extract_published_bytecodes(&events) + let published_bytecode_bytes: u32 = VmEvent::extract_published_bytecodes(&events) .iter() .map(|bytecodehash| { bytecode_len_in_bytes(*bytecodehash) as u32 + PUBLISH_BYTECODE_OVERHEAD diff --git a/core/lib/multivm/src/versions/vm_m6/vm.rs b/core/lib/multivm/src/versions/vm_m6/vm.rs index 3626378ce59..b59561319f5 100644 --- a/core/lib/multivm/src/versions/vm_m6/vm.rs +++ b/core/lib/multivm/src/versions/vm_m6/vm.rs @@ -8,20 +8,18 @@ use zksync_types::{ vm::VmVersion, Transaction, }; -use zksync_utils::{ - bytecode::{hash_bytecode, CompressedBytecodeInfo}, - h256_to_u256, u256_to_h256, -}; +use zksync_utils::{bytecode::hash_bytecode, h256_to_u256, u256_to_h256}; use crate::{ glue::{history_mode::HistoryMode, GlueInto}, interface::{ - storage::StoragePtr, BootloaderMemory, BytecodeCompressionError, CurrentExecutionState, - FinishedL1Batch, L1BatchEnv, L2BlockEnv, SystemEnv, TxExecutionMode, VmExecutionMode, - VmExecutionResultAndLogs, VmFactory, VmInterface, VmInterfaceHistoryEnabled, - VmMemoryMetrics, + storage::StoragePtr, BootloaderMemory, BytecodeCompressionError, CompressedBytecodeInfo, + CurrentExecutionState, FinishedL1Batch, L1BatchEnv, L2BlockEnv, SystemEnv, TxExecutionMode, + VmExecutionMode, VmExecutionResultAndLogs, VmFactory, VmInterface, + VmInterfaceHistoryEnabled, VmMemoryMetrics, }, tracers::old::TracerDispatcher, + utils::bytecode, vm_m6::{events::merge_events, storage::Storage, vm_instance::MultiVMSubversion, VmInstance}, }; @@ -216,7 +214,7 @@ impl VmInterface for Vm { None } else { bytecode_hashes.push(bytecode_hash); - CompressedBytecodeInfo::from_original(bytecode.clone()).ok() + bytecode::compress(bytecode.clone()).ok() } }); let compressed_bytecodes: Vec<_> = filtered_deps.collect(); diff --git a/core/lib/multivm/src/versions/vm_m6/vm_instance.rs b/core/lib/multivm/src/versions/vm_m6/vm_instance.rs index 5d6a9bf9149..bc60530b6f5 100644 --- a/core/lib/multivm/src/versions/vm_m6/vm_instance.rs +++ b/core/lib/multivm/src/versions/vm_m6/vm_instance.rs @@ -11,14 +11,12 @@ use zk_evm_1_3_1::{ }; use zksync_types::{ l2_to_l1_log::{L2ToL1Log, UserL2ToL1Log}, - tx::tx_execution_info::TxExecutionStatus, - vm_trace::Call, - L1BatchNumber, VmEvent, H256, U256, + L1BatchNumber, H256, U256, }; use crate::{ glue::GlueInto, - interface::VmExecutionLogs, + interface::{Call, TxExecutionStatus, VmEvent, VmExecutionLogs}, versions::shared::{VmExecutionTrace, VmTrace}, vm_m6::{ bootloader_state::BootloaderState, diff --git a/core/lib/multivm/src/versions/vm_m6/vm_with_bootloader.rs b/core/lib/multivm/src/versions/vm_m6/vm_with_bootloader.rs index 4409a7a8958..7a9fbb73fe4 100644 --- a/core/lib/multivm/src/versions/vm_m6/vm_with_bootloader.rs +++ b/core/lib/multivm/src/versions/vm_m6/vm_with_bootloader.rs @@ -18,14 +18,12 @@ use zksync_types::{ L1_GAS_PER_PUBDATA_BYTE, MAX_NEW_FACTORY_DEPS, U256, }; use zksync_utils::{ - address_to_u256, - bytecode::{compress_bytecode, hash_bytecode, CompressedBytecodeInfo}, - bytes_to_be_words, h256_to_u256, - misc::ceil_div, + address_to_u256, bytecode::hash_bytecode, bytes_to_be_words, h256_to_u256, misc::ceil_div, }; use crate::{ - interface::L1BatchEnv, + interface::{CompressedBytecodeInfo, L1BatchEnv}, + utils::bytecode, vm_m6::{ bootloader_state::BootloaderState, history_recorder::HistoryMode, @@ -482,7 +480,7 @@ fn get_bootloader_memory_v1( let mut total_compressed_len = 0; for i in compressed_bytecodes.iter() { - total_compressed_len += i.encode_call().len() + total_compressed_len += bytecode::encode_call(i).len() } let memory_for_current_tx = get_bootloader_memory_for_tx( @@ -527,7 +525,7 @@ fn get_bootloader_memory_v2( let mut total_compressed_len_words = 0; for i in compressed_bytecodes.iter() { - total_compressed_len_words += i.encode_call().len() / 32; + total_compressed_len_words += bytecode::encode_call(i).len() / 32; } let memory_for_current_tx = get_bootloader_memory_for_tx( @@ -624,13 +622,7 @@ fn push_raw_transaction_to_bootloader_memory_v1( if vm.is_bytecode_exists(&hash_bytecode(bytecode)) { return None; } - - compress_bytecode(bytecode) - .ok() - .map(|compressed| CompressedBytecodeInfo { - original: bytecode.clone(), - compressed, - }) + bytecode::compress(bytecode.clone()).ok() }) .collect() }); @@ -701,20 +693,14 @@ fn push_raw_transaction_to_bootloader_memory_v2( if vm.is_bytecode_exists(&hash_bytecode(bytecode)) { return None; } - - compress_bytecode(bytecode) - .ok() - .map(|compressed| CompressedBytecodeInfo { - original: bytecode.clone(), - compressed, - }) + bytecode::compress(bytecode.clone()).ok() }) .collect() }); let compressed_bytecodes_encoding_len_words = compressed_bytecodes .iter() .map(|bytecode| { - let encoding_length_bytes = bytecode.encode_call().len(); + let encoding_length_bytes = bytecode::encode_call(bytecode).len(); assert!( encoding_length_bytes % 32 == 0, "ABI encoding of bytecode is not 32-byte aligned" @@ -830,7 +816,7 @@ pub(crate) fn get_bootloader_memory_for_encoded_tx( let memory_addition: Vec<_> = compressed_bytecodes .into_iter() - .flat_map(|x| x.encode_call()) + .flat_map(|x| bytecode::encode_call(&x)) .collect(); let memory_addition = bytes_to_be_words(memory_addition); diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/bootloader_state/state.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/bootloader_state/state.rs index d436a2adb0a..12aab3c7364 100644 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/bootloader_state/state.rs +++ b/core/lib/multivm/src/versions/vm_refunds_enhancement/bootloader_state/state.rs @@ -1,11 +1,10 @@ use std::cmp::Ordering; use zksync_types::{L2ChainId, U256}; -use zksync_utils::bytecode::CompressedBytecodeInfo; use super::tx::BootloaderTx; use crate::{ - interface::{BootloaderMemory, L2BlockEnv, TxExecutionMode}, + interface::{BootloaderMemory, CompressedBytecodeInfo, L2BlockEnv, TxExecutionMode}, vm_refunds_enhancement::{ bootloader_state::{ l2_block::BootloaderL2Block, diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/bootloader_state/tx.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/bootloader_state/tx.rs index e7f833e5bad..b4581d066d1 100644 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/bootloader_state/tx.rs +++ b/core/lib/multivm/src/versions/vm_refunds_enhancement/bootloader_state/tx.rs @@ -1,7 +1,8 @@ use zksync_types::{L2ChainId, H256, U256}; -use zksync_utils::bytecode::CompressedBytecodeInfo; -use crate::vm_refunds_enhancement::types::internals::TransactionData; +use crate::{ + interface::CompressedBytecodeInfo, vm_refunds_enhancement::types::internals::TransactionData, +}; /// Information about tx necessary for execution in bootloader. #[derive(Debug, Clone)] diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/bootloader_state/utils.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/bootloader_state/utils.rs index f47b95d6cbf..7bd488f90a9 100644 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/bootloader_state/utils.rs +++ b/core/lib/multivm/src/versions/vm_refunds_enhancement/bootloader_state/utils.rs @@ -1,9 +1,10 @@ use zksync_types::U256; -use zksync_utils::{bytecode::CompressedBytecodeInfo, bytes_to_be_words, h256_to_u256}; +use zksync_utils::{bytes_to_be_words, h256_to_u256}; use super::tx::BootloaderTx; use crate::{ - interface::{BootloaderMemory, TxExecutionMode}, + interface::{BootloaderMemory, CompressedBytecodeInfo, TxExecutionMode}, + utils::bytecode, vm_refunds_enhancement::{ bootloader_state::l2_block::BootloaderL2Block, constants::{ @@ -20,7 +21,7 @@ pub(super) fn get_memory_for_compressed_bytecodes( ) -> Vec { let memory_addition: Vec<_> = compressed_bytecodes .iter() - .flat_map(|x| x.encode_call()) + .flat_map(bytecode::encode_call) .collect(); bytes_to_be_words(memory_addition) diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/implementation/bytecode.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/implementation/bytecode.rs index b3f578302c0..2289cca7a47 100644 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/implementation/bytecode.rs +++ b/core/lib/multivm/src/versions/vm_refunds_enhancement/implementation/bytecode.rs @@ -1,15 +1,13 @@ use itertools::Itertools; use zksync_types::U256; -use zksync_utils::{ - bytecode::{compress_bytecode, hash_bytecode, CompressedBytecodeInfo}, - bytes_to_be_words, -}; +use zksync_utils::{bytecode::hash_bytecode, bytes_to_be_words}; use crate::{ interface::{ storage::{StoragePtr, WriteStorage}, - VmInterface, + CompressedBytecodeInfo, VmInterface, }, + utils::bytecode, vm_refunds_enhancement::Vm, HistoryMode, }; @@ -50,15 +48,6 @@ pub(crate) fn compress_bytecodes( .dedup_by(|x, y| x.1 == y.1) .filter(|(_idx, dep)| !storage.borrow_mut().is_bytecode_known(&hash_bytecode(dep))) .sorted_by_key(|(idx, _dep)| *idx) - .filter_map(|(_idx, dep)| { - let compressed_bytecode = compress_bytecode(dep); - - compressed_bytecode - .ok() - .map(|compressed| CompressedBytecodeInfo { - original: dep.clone(), - compressed, - }) - }) + .filter_map(|(_idx, dep)| bytecode::compress(dep.clone()).ok()) .collect() } diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/implementation/logs.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/implementation/logs.rs index 91f502eafd7..a1d9221f1f1 100644 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/implementation/logs.rs +++ b/core/lib/multivm/src/versions/vm_refunds_enhancement/implementation/logs.rs @@ -1,12 +1,9 @@ use zk_evm_1_3_3::aux_structures::Timestamp; -use zksync_types::{ - l2_to_l1_log::{L2ToL1Log, UserL2ToL1Log}, - VmEvent, -}; +use zksync_types::l2_to_l1_log::{L2ToL1Log, UserL2ToL1Log}; use crate::{ glue::GlueInto, - interface::{storage::WriteStorage, VmExecutionLogs}, + interface::{storage::WriteStorage, VmEvent, VmExecutionLogs}, vm_refunds_enhancement::{ old_vm::{events::merge_events, utils::precompile_calls_count_after_timestamp}, vm::Vm, diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/old_vm/events.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/old_vm/events.rs index de918e06914..52a4ed8a287 100644 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/old_vm/events.rs +++ b/core/lib/multivm/src/versions/vm_refunds_enhancement/old_vm/events.rs @@ -1,7 +1,9 @@ use zk_evm_1_3_3::{ethereum_types::Address, reference_impls::event_sink::EventMessage}; -use zksync_types::{L1BatchNumber, VmEvent, EVENT_WRITER_ADDRESS, H256}; +use zksync_types::{L1BatchNumber, EVENT_WRITER_ADDRESS, H256}; use zksync_utils::{be_chunks_to_h256_words, h256_to_account_address}; +use crate::interface::VmEvent; + #[derive(Clone)] pub(crate) struct SolidityLikeEvent { pub(crate) shard_id: u8, diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/tracers/refunds.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/tracers/refunds.rs index cb56acd7e43..0dbf5a3cbf4 100644 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/tracers/refunds.rs +++ b/core/lib/multivm/src/versions/vm_refunds_enhancement/tracers/refunds.rs @@ -5,18 +5,14 @@ use zk_evm_1_3_3::{ vm_state::VmLocalState, }; use zksync_system_constants::{PUBLISH_BYTECODE_OVERHEAD, SYSTEM_CONTEXT_ADDRESS}; -use zksync_types::{ - event::{extract_long_l2_to_l1_messages, extract_published_bytecodes}, - l2_to_l1_log::L2ToL1Log, - L1BatchNumber, U256, -}; +use zksync_types::{l2_to_l1_log::L2ToL1Log, L1BatchNumber, U256}; use zksync_utils::{bytecode::bytecode_len_in_bytes, ceil_div_u256, u256_to_h256}; use crate::{ interface::{ storage::{StoragePtr, WriteStorage}, tracer::TracerExecutionStatus, - L1BatchEnv, Refunds, + L1BatchEnv, Refunds, VmEvent, }, tracers::dynamic::vm_1_3_3::DynTracer, vm_refunds_enhancement::{ @@ -332,12 +328,12 @@ pub(crate) fn pubdata_published( .filter(|log| log.sender != SYSTEM_CONTEXT_ADDRESS) .count() as u32) * zk_evm_1_3_3::zkevm_opcode_defs::system_params::L1_MESSAGE_PUBDATA_BYTES; - let l2_l1_long_messages_bytes: u32 = extract_long_l2_to_l1_messages(&events) + let l2_l1_long_messages_bytes: u32 = VmEvent::extract_long_l2_to_l1_messages(&events) .iter() .map(|event| event.len() as u32) .sum(); - let published_bytecode_bytes: u32 = extract_published_bytecodes(&events) + let published_bytecode_bytes: u32 = VmEvent::extract_published_bytecodes(&events) .iter() .map(|bytecodehash| bytecode_len_in_bytes(*bytecodehash) as u32 + PUBLISH_BYTECODE_OVERHEAD) .sum(); diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/vm.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/vm.rs index 59938788466..821a8144249 100644 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/vm.rs +++ b/core/lib/multivm/src/versions/vm_refunds_enhancement/vm.rs @@ -1,14 +1,13 @@ use circuit_sequencer_api_1_3_3::sort_storage_access::sort_storage_access_queries; use zksync_types::{l2_to_l1_log::UserL2ToL1Log, Transaction}; -use zksync_utils::bytecode::CompressedBytecodeInfo; use crate::{ glue::GlueInto, interface::{ storage::{StoragePtr, WriteStorage}, - BootloaderMemory, BytecodeCompressionError, CurrentExecutionState, L1BatchEnv, L2BlockEnv, - SystemEnv, VmExecutionMode, VmExecutionResultAndLogs, VmFactory, VmInterface, - VmInterfaceHistoryEnabled, VmMemoryMetrics, + BootloaderMemory, BytecodeCompressionError, CompressedBytecodeInfo, CurrentExecutionState, + L1BatchEnv, L2BlockEnv, SystemEnv, VmExecutionMode, VmExecutionResultAndLogs, VmFactory, + VmInterface, VmInterfaceHistoryEnabled, VmMemoryMetrics, }, vm_latest::HistoryEnabled, vm_refunds_enhancement::{ diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/bootloader_state/state.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/bootloader_state/state.rs index 685b1821fd5..562d7451371 100644 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/bootloader_state/state.rs +++ b/core/lib/multivm/src/versions/vm_virtual_blocks/bootloader_state/state.rs @@ -1,11 +1,10 @@ use std::cmp::Ordering; use zksync_types::{L2ChainId, U256}; -use zksync_utils::bytecode::CompressedBytecodeInfo; use super::tx::BootloaderTx; use crate::{ - interface::{BootloaderMemory, L2BlockEnv, TxExecutionMode}, + interface::{BootloaderMemory, CompressedBytecodeInfo, L2BlockEnv, TxExecutionMode}, vm_virtual_blocks::{ bootloader_state::{ l2_block::BootloaderL2Block, diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/bootloader_state/tx.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/bootloader_state/tx.rs index 067d62a9fdd..e37320cf5ac 100644 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/bootloader_state/tx.rs +++ b/core/lib/multivm/src/versions/vm_virtual_blocks/bootloader_state/tx.rs @@ -1,7 +1,8 @@ use zksync_types::{L2ChainId, H256, U256}; -use zksync_utils::bytecode::CompressedBytecodeInfo; -use crate::vm_virtual_blocks::types::internals::TransactionData; +use crate::{ + interface::CompressedBytecodeInfo, vm_virtual_blocks::types::internals::TransactionData, +}; /// Information about tx necessary for execution in bootloader. #[derive(Debug, Clone)] diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/bootloader_state/utils.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/bootloader_state/utils.rs index 9a682da3a5a..2ccedcc6aa9 100644 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/bootloader_state/utils.rs +++ b/core/lib/multivm/src/versions/vm_virtual_blocks/bootloader_state/utils.rs @@ -1,9 +1,10 @@ use zksync_types::U256; -use zksync_utils::{bytecode::CompressedBytecodeInfo, bytes_to_be_words, h256_to_u256}; +use zksync_utils::{bytes_to_be_words, h256_to_u256}; use super::tx::BootloaderTx; use crate::{ - interface::{BootloaderMemory, TxExecutionMode}, + interface::{BootloaderMemory, CompressedBytecodeInfo, TxExecutionMode}, + utils::bytecode, vm_virtual_blocks::{ bootloader_state::l2_block::BootloaderL2Block, constants::{ @@ -20,7 +21,7 @@ pub(super) fn get_memory_for_compressed_bytecodes( ) -> Vec { let memory_addition: Vec<_> = compressed_bytecodes .iter() - .flat_map(|x| x.encode_call()) + .flat_map(bytecode::encode_call) .collect(); bytes_to_be_words(memory_addition) diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/implementation/bytecode.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/implementation/bytecode.rs index 7c1b15027b4..96a30d50805 100644 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/implementation/bytecode.rs +++ b/core/lib/multivm/src/versions/vm_virtual_blocks/implementation/bytecode.rs @@ -1,15 +1,13 @@ use itertools::Itertools; use zksync_types::U256; -use zksync_utils::{ - bytecode::{compress_bytecode, hash_bytecode, CompressedBytecodeInfo}, - bytes_to_be_words, -}; +use zksync_utils::{bytecode::hash_bytecode, bytes_to_be_words}; use crate::{ interface::{ storage::{StoragePtr, WriteStorage}, - VmInterface, + CompressedBytecodeInfo, VmInterface, }, + utils::bytecode, vm_virtual_blocks::Vm, HistoryMode, }; @@ -50,15 +48,6 @@ pub(crate) fn compress_bytecodes( .dedup_by(|x, y| x.1 == y.1) .filter(|(_idx, dep)| !storage.borrow_mut().is_bytecode_known(&hash_bytecode(dep))) .sorted_by_key(|(idx, _dep)| *idx) - .filter_map(|(_idx, dep)| { - let compressed_bytecode = compress_bytecode(dep); - - compressed_bytecode - .ok() - .map(|compressed| CompressedBytecodeInfo { - original: dep.clone(), - compressed, - }) - }) + .filter_map(|(_idx, dep)| bytecode::compress(dep.clone()).ok()) .collect() } diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/implementation/logs.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/implementation/logs.rs index 8b60953c834..4479de77b6d 100644 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/implementation/logs.rs +++ b/core/lib/multivm/src/versions/vm_virtual_blocks/implementation/logs.rs @@ -1,12 +1,9 @@ use zk_evm_1_3_3::aux_structures::Timestamp; -use zksync_types::{ - l2_to_l1_log::{L2ToL1Log, UserL2ToL1Log}, - VmEvent, -}; +use zksync_types::l2_to_l1_log::{L2ToL1Log, UserL2ToL1Log}; use crate::{ glue::GlueInto, - interface::{storage::WriteStorage, VmExecutionLogs}, + interface::{storage::WriteStorage, VmEvent, VmExecutionLogs}, vm_virtual_blocks::{ old_vm::{events::merge_events, utils::precompile_calls_count_after_timestamp}, vm::Vm, diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/old_vm/events.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/old_vm/events.rs index de918e06914..52a4ed8a287 100644 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/old_vm/events.rs +++ b/core/lib/multivm/src/versions/vm_virtual_blocks/old_vm/events.rs @@ -1,7 +1,9 @@ use zk_evm_1_3_3::{ethereum_types::Address, reference_impls::event_sink::EventMessage}; -use zksync_types::{L1BatchNumber, VmEvent, EVENT_WRITER_ADDRESS, H256}; +use zksync_types::{L1BatchNumber, EVENT_WRITER_ADDRESS, H256}; use zksync_utils::{be_chunks_to_h256_words, h256_to_account_address}; +use crate::interface::VmEvent; + #[derive(Clone)] pub(crate) struct SolidityLikeEvent { pub(crate) shard_id: u8, diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/tracers/refunds.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/tracers/refunds.rs index b97d86889c8..a2ca08a7ef9 100644 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/tracers/refunds.rs +++ b/core/lib/multivm/src/versions/vm_virtual_blocks/tracers/refunds.rs @@ -8,17 +8,13 @@ use zk_evm_1_3_3::{ vm_state::VmLocalState, }; use zksync_system_constants::{PUBLISH_BYTECODE_OVERHEAD, SYSTEM_CONTEXT_ADDRESS}; -use zksync_types::{ - event::{extract_long_l2_to_l1_messages, extract_published_bytecodes}, - l2_to_l1_log::L2ToL1Log, - L1BatchNumber, StorageKey, U256, -}; +use zksync_types::{l2_to_l1_log::L2ToL1Log, L1BatchNumber, StorageKey, U256}; use zksync_utils::{bytecode::bytecode_len_in_bytes, ceil_div_u256, u256_to_h256}; use crate::{ interface::{ storage::{StoragePtr, WriteStorage}, - L1BatchEnv, Refunds, VmExecutionResultAndLogs, + L1BatchEnv, Refunds, VmEvent, VmExecutionResultAndLogs, }, tracers::dynamic::vm_1_3_3::DynTracer, vm_virtual_blocks::{ @@ -324,12 +320,12 @@ pub(crate) fn pubdata_published( .filter(|log| log.sender != SYSTEM_CONTEXT_ADDRESS) .count() as u32) * zk_evm_1_3_3::zkevm_opcode_defs::system_params::L1_MESSAGE_PUBDATA_BYTES; - let l2_l1_long_messages_bytes: u32 = extract_long_l2_to_l1_messages(&events) + let l2_l1_long_messages_bytes: u32 = VmEvent::extract_long_l2_to_l1_messages(&events) .iter() .map(|event| event.len() as u32) .sum(); - let published_bytecode_bytes: u32 = extract_published_bytecodes(&events) + let published_bytecode_bytes: u32 = VmEvent::extract_published_bytecodes(&events) .iter() .map(|bytecodehash| bytecode_len_in_bytes(*bytecodehash) as u32 + PUBLISH_BYTECODE_OVERHEAD) .sum(); diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/vm.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/vm.rs index 9d234ec117a..8991ee1b4b9 100644 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/vm.rs +++ b/core/lib/multivm/src/versions/vm_virtual_blocks/vm.rs @@ -1,14 +1,13 @@ use circuit_sequencer_api_1_3_3::sort_storage_access::sort_storage_access_queries; use zksync_types::{l2_to_l1_log::UserL2ToL1Log, Transaction}; -use zksync_utils::bytecode::CompressedBytecodeInfo; use crate::{ glue::GlueInto, interface::{ storage::{StoragePtr, WriteStorage}, - BootloaderMemory, BytecodeCompressionError, CurrentExecutionState, L1BatchEnv, L2BlockEnv, - SystemEnv, VmExecutionMode, VmExecutionResultAndLogs, VmFactory, VmInterface, - VmInterfaceHistoryEnabled, VmMemoryMetrics, + BootloaderMemory, BytecodeCompressionError, CompressedBytecodeInfo, CurrentExecutionState, + L1BatchEnv, L2BlockEnv, SystemEnv, VmExecutionMode, VmExecutionResultAndLogs, VmFactory, + VmInterface, VmInterfaceHistoryEnabled, VmMemoryMetrics, }, vm_latest::HistoryEnabled, vm_virtual_blocks::{ diff --git a/core/lib/multivm/src/vm_instance.rs b/core/lib/multivm/src/vm_instance.rs index 0cc8916a104..0e4cefd3c80 100644 --- a/core/lib/multivm/src/vm_instance.rs +++ b/core/lib/multivm/src/vm_instance.rs @@ -1,13 +1,13 @@ use zksync_types::vm::{FastVmMode, VmVersion}; -use zksync_utils::bytecode::CompressedBytecodeInfo; use crate::{ glue::history_mode::HistoryMode, interface::{ storage::{ImmutableStorageView, ReadStorage, StoragePtr, StorageView}, - BootloaderMemory, BytecodeCompressionError, CurrentExecutionState, FinishedL1Batch, - L1BatchEnv, L2BlockEnv, SystemEnv, VmExecutionMode, VmExecutionResultAndLogs, VmFactory, - VmInterface, VmInterfaceHistoryEnabled, VmMemoryMetrics, + BootloaderMemory, BytecodeCompressionError, CompressedBytecodeInfo, CurrentExecutionState, + FinishedL1Batch, L1BatchEnv, L2BlockEnv, SystemEnv, VmExecutionMode, + VmExecutionResultAndLogs, VmFactory, VmInterface, VmInterfaceHistoryEnabled, + VmMemoryMetrics, }, tracers::TracerDispatcher, versions::shadow::ShadowVm, diff --git a/core/lib/protobuf_config/src/eth.rs b/core/lib/protobuf_config/src/eth.rs index c605e6d2ccc..273b7f4e344 100644 --- a/core/lib/protobuf_config/src/eth.rs +++ b/core/lib/protobuf_config/src/eth.rs @@ -113,6 +113,8 @@ impl ProtoRepr for proto::Sender { .and_then(|x| Ok(proto::PubdataSendingMode::try_from(*x)?)) .context("pubdata_sending_mode")? .parse(), + tx_aggregation_only_prove_and_execute: self.tx_aggregation_paused.unwrap_or(false), + tx_aggregation_paused: self.tx_aggregation_only_prove_and_execute.unwrap_or(false), }) } @@ -143,6 +145,8 @@ impl ProtoRepr for proto::Sender { pubdata_sending_mode: Some( proto::PubdataSendingMode::new(&this.pubdata_sending_mode).into(), ), + tx_aggregation_only_prove_and_execute: Some(this.tx_aggregation_only_prove_and_execute), + tx_aggregation_paused: Some(this.tx_aggregation_paused), } } } diff --git a/core/lib/protobuf_config/src/general.rs b/core/lib/protobuf_config/src/general.rs index af6f690dfc8..87bca88db38 100644 --- a/core/lib/protobuf_config/src/general.rs +++ b/core/lib/protobuf_config/src/general.rs @@ -22,7 +22,7 @@ impl ProtoRepr for proto::GeneralConfig { prover_group_config: read_optional_repr(&self.prover_group), prometheus_config: read_optional_repr(&self.prometheus), proof_data_handler_config: read_optional_repr(&self.data_handler), - witness_generator: read_optional_repr(&self.witness_generator), + witness_generator_config: read_optional_repr(&self.witness_generator), api_config: read_optional_repr(&self.api), db_config: read_optional_repr(&self.db), eth: read_optional_repr(&self.eth), @@ -44,6 +44,7 @@ impl ProtoRepr for proto::GeneralConfig { &self.external_proof_integration_api, ), experimental_vm_config: read_optional_repr(&self.experimental_vm), + prover_job_monitor_config: read_optional_repr(&self.prover_job_monitor), }) } @@ -62,7 +63,7 @@ impl ProtoRepr for proto::GeneralConfig { proof_compressor: this.proof_compressor_config.as_ref().map(ProtoRepr::build), prover: this.prover_config.as_ref().map(ProtoRepr::build), prover_group: this.prover_group_config.as_ref().map(ProtoRepr::build), - witness_generator: this.witness_generator.as_ref().map(ProtoRepr::build), + witness_generator: this.witness_generator_config.as_ref().map(ProtoRepr::build), prover_gateway: this.prover_gateway.as_ref().map(ProtoRepr::build), witness_vector_generator: this.witness_vector_generator.as_ref().map(ProtoRepr::build), prometheus: this.prometheus_config.as_ref().map(ProtoRepr::build), @@ -99,6 +100,10 @@ impl ProtoRepr for proto::GeneralConfig { .as_ref() .map(ProtoRepr::build), experimental_vm: this.experimental_vm_config.as_ref().map(ProtoRepr::build), + prover_job_monitor: this + .prover_job_monitor_config + .as_ref() + .map(ProtoRepr::build), } } } diff --git a/core/lib/protobuf_config/src/lib.rs b/core/lib/protobuf_config/src/lib.rs index ee526b2bb67..f4d0188ea20 100644 --- a/core/lib/protobuf_config/src/lib.rs +++ b/core/lib/protobuf_config/src/lib.rs @@ -31,6 +31,7 @@ mod snapshots_creator; mod external_price_api_client; mod external_proof_integration_api; +mod prover_job_monitor; mod snapshot_recovery; #[cfg(test)] mod tests; diff --git a/core/lib/protobuf_config/src/proto/config/eth_sender.proto b/core/lib/protobuf_config/src/proto/config/eth_sender.proto index 536ac216863..b102a08be04 100644 --- a/core/lib/protobuf_config/src/proto/config/eth_sender.proto +++ b/core/lib/protobuf_config/src/proto/config/eth_sender.proto @@ -46,6 +46,8 @@ message Sender { optional uint64 max_acceptable_priority_fee_in_gwei = 16; // required; gwei optional PubdataSendingMode pubdata_sending_mode = 18; // required reserved 19; reserved "proof_loading_mode"; + optional bool tx_aggregation_paused = 20; // required + optional bool tx_aggregation_only_prove_and_execute = 21; // required } message GasAdjuster { diff --git a/core/lib/protobuf_config/src/proto/config/general.proto b/core/lib/protobuf_config/src/proto/config/general.proto index 373559e7351..3595468949b 100644 --- a/core/lib/protobuf_config/src/proto/config/general.proto +++ b/core/lib/protobuf_config/src/proto/config/general.proto @@ -24,6 +24,7 @@ import "zksync/config/base_token_adjuster.proto"; import "zksync/config/external_price_api_client.proto"; import "zksync/config/external_proof_integration_api.proto"; import "zksync/core/consensus.proto"; +import "zksync/config/prover_job_monitor.proto"; message GeneralConfig { optional database.Postgres postgres = 1; @@ -58,4 +59,5 @@ message GeneralConfig { optional core.consensus.Config consensus = 42; optional external_proof_integration_api.ExternalProofIntegrationApi external_proof_integration_api = 43; optional experimental.Vm experimental_vm = 44; + optional prover_job_monitor.ProverJobMonitor prover_job_monitor = 45; } diff --git a/core/lib/protobuf_config/src/proto/config/prover_job_monitor.proto b/core/lib/protobuf_config/src/proto/config/prover_job_monitor.proto new file mode 100644 index 00000000000..7b505aa3bcf --- /dev/null +++ b/core/lib/protobuf_config/src/proto/config/prover_job_monitor.proto @@ -0,0 +1,20 @@ +syntax = "proto3"; + +package zksync.config.prover_job_monitor; + +message ProverJobMonitor { + optional uint32 prometheus_port = 1; // required; u32 + optional uint32 max_db_connections = 2; // required; u32 + optional uint64 graceful_shutdown_timeout_ms = 3; // optional; ms + optional uint64 gpu_prover_archiver_run_interval_ms = 4; // optional; ms + optional uint64 gpu_prover_archiver_archive_prover_after_ms = 5; // optional; ms + optional uint64 prover_jobs_archiver_run_interval_ms = 6; // optional; ms + optional uint64 prover_jobs_archiver_archive_jobs_after_ms = 7; // optional; ms + optional uint64 proof_compressor_job_requeuer_run_interval_ms = 8; // optional; ms + optional uint64 prover_job_requeuer_run_interval_ms = 9; // optional; ms + optional uint64 witness_generator_job_requeuer_run_interval_ms = 10; // optional; ms + optional uint64 proof_compressor_queue_reporter_run_interval_ms = 11; // optional; ms + optional uint64 prover_queue_reporter_run_interval_ms = 12; // optional; ms + optional uint64 witness_generator_queue_reporter_run_interval_ms = 13; // optional; ms + optional uint64 witness_job_queuer_run_interval_ms = 14; // optional; ms +} diff --git a/core/lib/protobuf_config/src/prover_job_monitor.rs b/core/lib/protobuf_config/src/prover_job_monitor.rs new file mode 100644 index 00000000000..a1c5a7c0599 --- /dev/null +++ b/core/lib/protobuf_config/src/prover_job_monitor.rs @@ -0,0 +1,131 @@ +use anyhow::Context as _; +use zksync_config::configs; +use zksync_protobuf::{repr::ProtoRepr, required}; + +use crate::proto::prover_job_monitor as proto; + +impl ProtoRepr for proto::ProverJobMonitor { + type Type = configs::prover_job_monitor::ProverJobMonitorConfig; + fn read(&self) -> anyhow::Result { + Ok(Self::Type { + prometheus_port: required(&self.prometheus_port) + .and_then(|x| Ok((*x).try_into()?)) + .context("prometheus_port")?, + max_db_connections: *required(&self.max_db_connections) + .context("max_db_connections")?, + graceful_shutdown_timeout_ms: *required( + &self + .graceful_shutdown_timeout_ms + .or_else(|| Some(Self::Type::default_graceful_shutdown_timeout_ms())), + ) + .context("graceful_shutdown_timeout_ms")?, + gpu_prover_archiver_run_interval_ms: *required( + &self + .gpu_prover_archiver_run_interval_ms + .or_else(|| Some(Self::Type::default_gpu_prover_archiver_run_interval_ms())), + ) + .context("gpu_prover_archiver_run_interval_ms")?, + gpu_prover_archiver_archive_prover_after_ms: *required( + &self + .gpu_prover_archiver_archive_prover_after_ms + .or_else(|| { + Some(Self::Type::default_gpu_prover_archiver_archive_prover_after_ms()) + }), + ) + .context("gpu_prover_archiver_archive_prover_after_ms")?, + prover_jobs_archiver_run_interval_ms: *required( + &self + .prover_jobs_archiver_run_interval_ms + .or_else(|| Some(Self::Type::default_prover_jobs_archiver_run_interval_ms())), + ) + .context("prover_jobs_archiver_run_interval_ms")?, + prover_jobs_archiver_archive_jobs_after_ms: *required( + &self.prover_jobs_archiver_archive_jobs_after_ms.or_else(|| { + Some(Self::Type::default_prover_jobs_archiver_archive_jobs_after_ms()) + }), + ) + .context("prover_jobs_archiver_archive_jobs_after_ms")?, + proof_compressor_job_requeuer_run_interval_ms: *required( + &self + .proof_compressor_job_requeuer_run_interval_ms + .or_else(|| { + Some(Self::Type::default_proof_compressor_job_requeuer_run_interval_ms()) + }), + ) + .context("proof_compressor_job_requeuer_run_interval_ms")?, + prover_job_requeuer_run_interval_ms: *required( + &self + .prover_job_requeuer_run_interval_ms + .or_else(|| Some(Self::Type::default_prover_job_requeuer_run_interval_ms())), + ) + .context("prover_job_requeuer_run_interval_ms")?, + witness_generator_job_requeuer_run_interval_ms: *required( + &self + .witness_generator_job_requeuer_run_interval_ms + .or_else(|| { + Some(Self::Type::default_witness_generator_job_requeuer_run_interval_ms()) + }), + ) + .context("witness_generator_job_requeuer_run_interval_ms")?, + proof_compressor_queue_reporter_run_interval_ms: *required( + &self + .proof_compressor_queue_reporter_run_interval_ms + .or_else(|| { + Some(Self::Type::default_proof_compressor_queue_reporter_run_interval_ms()) + }), + ) + .context("proof_compressor_queue_reporter_run_interval_ms")?, + prover_queue_reporter_run_interval_ms: *required( + &self + .prover_queue_reporter_run_interval_ms + .or_else(|| Some(Self::Type::default_prover_queue_reporter_run_interval_ms())), + ) + .context("prover_queue_reporter_run_interval_ms")?, + witness_generator_queue_reporter_run_interval_ms: *required( + &self + .witness_generator_queue_reporter_run_interval_ms + .or_else(|| { + Some(Self::Type::default_witness_generator_queue_reporter_run_interval_ms()) + }), + ) + .context("witness_generator_queue_reporter_run_interval_ms")?, + witness_job_queuer_run_interval_ms: *required( + &self + .witness_job_queuer_run_interval_ms + .or_else(|| Some(Self::Type::default_witness_job_queuer_run_interval_ms())), + ) + .context("witness_job_queuer_run_interval_ms")?, + }) + } + + fn build(this: &Self::Type) -> Self { + Self { + prometheus_port: Some(this.prometheus_port.into()), + max_db_connections: Some(this.max_db_connections), + graceful_shutdown_timeout_ms: Some(this.graceful_shutdown_timeout_ms), + gpu_prover_archiver_run_interval_ms: Some(this.gpu_prover_archiver_run_interval_ms), + gpu_prover_archiver_archive_prover_after_ms: Some( + this.gpu_prover_archiver_archive_prover_after_ms, + ), + prover_jobs_archiver_run_interval_ms: Some(this.prover_jobs_archiver_run_interval_ms), + prover_jobs_archiver_archive_jobs_after_ms: Some( + this.prover_jobs_archiver_archive_jobs_after_ms, + ), + proof_compressor_job_requeuer_run_interval_ms: Some( + this.proof_compressor_job_requeuer_run_interval_ms, + ), + prover_job_requeuer_run_interval_ms: Some(this.prover_job_requeuer_run_interval_ms), + witness_generator_job_requeuer_run_interval_ms: Some( + this.witness_generator_job_requeuer_run_interval_ms, + ), + proof_compressor_queue_reporter_run_interval_ms: Some( + this.proof_compressor_queue_reporter_run_interval_ms, + ), + prover_queue_reporter_run_interval_ms: Some(this.prover_queue_reporter_run_interval_ms), + witness_generator_queue_reporter_run_interval_ms: Some( + this.witness_generator_queue_reporter_run_interval_ms, + ), + witness_job_queuer_run_interval_ms: Some(this.witness_job_queuer_run_interval_ms), + } + } +} diff --git a/core/lib/snapshots_applier/src/tests/utils.rs b/core/lib/snapshots_applier/src/tests/utils.rs index c546fb60c09..2c9b1440af2 100644 --- a/core/lib/snapshots_applier/src/tests/utils.rs +++ b/core/lib/snapshots_applier/src/tests/utils.rs @@ -181,6 +181,7 @@ pub(super) fn mock_l2_block_header(l2_block_number: L2BlockNumber) -> L2BlockHea protocol_version: Some(Default::default()), virtual_blocks: 0, gas_limit: 0, + logs_bloom: Default::default(), } } diff --git a/core/lib/state/src/lib.rs b/core/lib/state/src/lib.rs index c386426d066..ad5361c4608 100644 --- a/core/lib/state/src/lib.rs +++ b/core/lib/state/src/lib.rs @@ -20,8 +20,7 @@ pub use self::{ }, shadow_storage::ShadowStorage, storage_factory::{ - BatchDiff, OwnedPostgresStorage, OwnedStorage, PgOrRocksdbStorage, ReadStorageFactory, - RocksdbWithMemory, + BatchDiff, OwnedStorage, PgOrRocksdbStorage, ReadStorageFactory, RocksdbWithMemory, }, }; diff --git a/core/lib/state/src/storage_factory.rs b/core/lib/state/src/storage_factory.rs index d3b978356a5..e2b5275c48d 100644 --- a/core/lib/state/src/storage_factory.rs +++ b/core/lib/state/src/storage_factory.rs @@ -10,11 +10,15 @@ use zksync_vm_interface::storage::ReadStorage; use crate::{PostgresStorage, RocksdbStorage, RocksdbStorageBuilder, StateKeeperColumnFamily}; -/// Factory that can produce [`OwnedStorage`] instances on demand. +/// Storage with a static lifetime that can be sent to Tokio tasks etc. +pub type OwnedStorage = PgOrRocksdbStorage<'static>; + +/// Factory that can produce storage instances on demand. The storage type is encapsulated as a type param +/// (mostly for testing purposes); the default is [`OwnedStorage`]. #[async_trait] -pub trait ReadStorageFactory: Debug + Send + Sync + 'static { - /// Creates an [`OwnedStorage`] entity over either a Postgres connection or RocksDB - /// instance. The specific criteria on which one are left up to the implementation. +pub trait ReadStorageFactory: Debug + Send + Sync + 'static { + /// Creates a storage instance, e.g. over a Postgres connection or a RocksDB instance. + /// The specific criteria on which one are left up to the implementation. /// /// Implementations may be cancel-aware and return `Ok(None)` iff `stop_receiver` receives /// a stop signal; this is the only case in which `Ok(None)` should be returned. @@ -22,7 +26,7 @@ pub trait ReadStorageFactory: Debug + Send + Sync + 'static { &self, stop_receiver: &watch::Receiver, l1_batch_number: L1BatchNumber, - ) -> anyhow::Result>; + ) -> anyhow::Result>; } /// [`ReadStorageFactory`] producing Postgres-backed storage instances. Hence, it is slower than more advanced @@ -34,8 +38,9 @@ impl ReadStorageFactory for ConnectionPool { _stop_receiver: &watch::Receiver, l1_batch_number: L1BatchNumber, ) -> anyhow::Result> { - let storage = OwnedPostgresStorage::new(self.clone(), l1_batch_number); - Ok(Some(storage.into())) + let connection = self.connection().await?; + let storage = OwnedStorage::postgres(connection, l1_batch_number).await?; + Ok(Some(storage)) } } @@ -60,31 +65,29 @@ pub struct RocksdbWithMemory { pub batch_diffs: Vec, } -/// Owned Postgres-backed VM storage for a certain L1 batch. +/// A [`ReadStorage`] implementation that uses either [`PostgresStorage`] or [`RocksdbStorage`] +/// underneath. #[derive(Debug)] -pub struct OwnedPostgresStorage { - connection_pool: ConnectionPool, - l1_batch_number: L1BatchNumber, +pub enum PgOrRocksdbStorage<'a> { + /// Implementation over a Postgres connection. + Postgres(PostgresStorage<'a>), + /// Implementation over a RocksDB cache instance. + Rocksdb(RocksdbStorage), + /// Implementation over a RocksDB cache instance with in-memory DB diffs. + RocksdbWithMemory(RocksdbWithMemory), } -impl OwnedPostgresStorage { - /// Creates a VM storage for the specified batch number. - pub fn new(connection_pool: ConnectionPool, l1_batch_number: L1BatchNumber) -> Self { - Self { - connection_pool, - l1_batch_number, - } - } - - /// Returns a [`ReadStorage`] implementation backed by Postgres +impl PgOrRocksdbStorage<'static> { + /// Creates a Postgres-based storage. Because of the `'static` lifetime requirement, `connection` must be + /// non-transactional. /// /// # Errors /// - /// Propagates Postgres errors. - pub async fn borrow(&self) -> anyhow::Result> { - let l1_batch_number = self.l1_batch_number; - let mut connection = self.connection_pool.connection().await?; - + /// Propagates Postgres I/O errors. + pub async fn postgres( + mut connection: Connection<'static, Core>, + l1_batch_number: L1BatchNumber, + ) -> anyhow::Result { let l2_block_number = if let Some((_, l2_block_number)) = connection .blocks_dal() .get_l2_block_range_of_l1_batch(l1_batch_number) @@ -113,42 +116,7 @@ impl OwnedPostgresStorage { .into(), ) } -} - -/// Owned version of [`PgOrRocksdbStorage`]. It is thus possible to send to blocking tasks for VM execution. -#[derive(Debug)] -pub enum OwnedStorage { - /// Readily initialized storage with a static lifetime. - Static(PgOrRocksdbStorage<'static>), - /// Storage that must be `borrow()`ed from. - Lending(OwnedPostgresStorage), -} -impl From for OwnedStorage { - fn from(storage: OwnedPostgresStorage) -> Self { - Self::Lending(storage) - } -} - -impl From> for OwnedStorage { - fn from(storage: PgOrRocksdbStorage<'static>) -> Self { - Self::Static(storage) - } -} - -/// A [`ReadStorage`] implementation that uses either [`PostgresStorage`] or [`RocksdbStorage`] -/// underneath. -#[derive(Debug)] -pub enum PgOrRocksdbStorage<'a> { - /// Implementation over a Postgres connection. - Postgres(PostgresStorage<'a>), - /// Implementation over a RocksDB cache instance. - Rocksdb(RocksdbStorage), - /// Implementation over a RocksDB cache instance with in-memory DB diffs. - RocksdbWithMemory(RocksdbWithMemory), -} - -impl PgOrRocksdbStorage<'static> { /// Catches up RocksDB synchronously (i.e. assumes the gap is small) and /// returns a [`ReadStorage`] implementation backed by caught-up RocksDB. /// diff --git a/core/lib/state/src/test_utils.rs b/core/lib/state/src/test_utils.rs index 1d1731bf001..decb2a0f403 100644 --- a/core/lib/state/src/test_utils.rs +++ b/core/lib/state/src/test_utils.rs @@ -87,6 +87,7 @@ pub(crate) async fn create_l2_block( protocol_version: Some(Default::default()), virtual_blocks: 0, gas_limit: 0, + logs_bloom: Default::default(), }; conn.blocks_dal() diff --git a/core/lib/types/src/api/mod.rs b/core/lib/types/src/api/mod.rs index 0210a28f2a2..916fae6a35b 100644 --- a/core/lib/types/src/api/mod.rs +++ b/core/lib/types/src/api/mod.rs @@ -5,18 +5,14 @@ use strum::Display; use zksync_basic_types::{ tee_types::TeeType, web3::{AccessList, Bytes, Index}, - L1BatchNumber, H160, H2048, H256, H64, U256, U64, + Bloom, L1BatchNumber, H160, H256, H64, U256, U64, }; use zksync_contracts::BaseSystemContractsHashes; pub use crate::transaction_request::{ Eip712Meta, SerializationTransactionError, TransactionRequest, }; -use crate::{ - protocol_version::L1VerifierConfig, - vm_trace::{Call, CallType}, - Address, L2BlockNumber, ProtocolVersionId, -}; +use crate::{protocol_version::L1VerifierConfig, Address, L2BlockNumber, ProtocolVersionId}; pub mod en; pub mod state_override; @@ -259,7 +255,7 @@ pub struct TransactionReceipt { pub root: H256, /// Logs bloom #[serde(rename = "logsBloom")] - pub logs_bloom: H2048, + pub logs_bloom: Bloom, /// Transaction type, Some(1) for AccessList transaction, None for Legacy #[serde(rename = "type", default, skip_serializing_if = "Option::is_none")] pub transaction_type: Option, @@ -311,7 +307,7 @@ pub struct Block { pub extra_data: Bytes, /// Logs bloom #[serde(rename = "logsBloom")] - pub logs_bloom: H2048, + pub logs_bloom: Bloom, /// Timestamp pub timestamp: U256, /// Timestamp of the l1 batch this L2 block was included within @@ -355,7 +351,7 @@ impl Default for Block { gas_limit: U256::default(), base_fee_per_gas: U256::default(), extra_data: Bytes::default(), - logs_bloom: H2048::default(), + logs_bloom: Bloom::default(), timestamp: U256::default(), l1_batch_timestamp: None, difficulty: U256::default(), @@ -604,13 +600,14 @@ pub struct ResultDebugCall { pub result: DebugCall, } -#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)] +#[derive(Debug, Default, Serialize, Deserialize, Clone, PartialEq)] pub enum DebugCallType { + #[default] Call, Create, } -#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +#[derive(Debug, Clone, PartialEq, Default, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct DebugCall { pub r#type: DebugCallType, @@ -626,30 +623,6 @@ pub struct DebugCall { pub calls: Vec, } -impl From for DebugCall { - fn from(value: Call) -> Self { - let calls = value.calls.into_iter().map(DebugCall::from).collect(); - let debug_type = match value.r#type { - CallType::Call(_) => DebugCallType::Call, - CallType::Create => DebugCallType::Create, - CallType::NearCall => unreachable!("We have to filter our near calls before"), - }; - Self { - r#type: debug_type, - from: value.from, - to: value.to, - gas: U256::from(value.gas), - gas_used: U256::from(value.gas_used), - value: value.value, - output: Bytes::from(value.output.clone()), - input: Bytes::from(value.input.clone()), - error: value.error.clone(), - revert_reason: value.revert_reason, - calls, - } - } -} - // TODO (PLA-965): remove deprecated fields from the struct. It is currently in a "migration" phase // to keep compatibility between old and new versions. #[derive(Default, Serialize, Deserialize, Clone, Debug)] diff --git a/core/lib/types/src/block.rs b/core/lib/types/src/block.rs index bc13bed457b..9c1609bf175 100644 --- a/core/lib/types/src/block.rs +++ b/core/lib/types/src/block.rs @@ -1,7 +1,7 @@ use std::{fmt, ops}; use serde::{Deserialize, Serialize}; -use zksync_basic_types::{Address, H2048, H256, U256}; +use zksync_basic_types::{Address, Bloom, BloomInput, H256, U256}; use zksync_contracts::BaseSystemContractsHashes; use zksync_system_constants::SYSTEM_BLOCK_INFO_BLOCK_NUMBER_MULTIPLIER; use zksync_utils::concat_and_hash; @@ -56,7 +56,7 @@ pub struct L1BatchHeader { /// Preimages of the hashes that were sent as value of L2 logs by special system L2 contract. pub l2_to_l1_messages: Vec>, /// Bloom filter for the event logs in the block. - pub bloom: H2048, + pub bloom: Bloom, /// Hashes of contracts used this block pub used_contract_hashes: Vec, pub base_system_contracts_hashes: BaseSystemContractsHashes, @@ -90,6 +90,7 @@ pub struct L2BlockHeader { /// Note, that it is an `u64`, i.e. while the computational limit for the bootloader is an `u32` a much larger /// amount of gas can be spent on pubdata. pub gas_limit: u64, + pub logs_bloom: Bloom, } /// Structure that represents the data is returned by the storage oracle during batch execution. @@ -125,7 +126,7 @@ impl L1BatchHeader { priority_ops_onchain_data: vec![], l2_to_l1_logs: vec![], l2_to_l1_messages: vec![], - bloom: H2048::default(), + bloom: Bloom::default(), used_contract_hashes: vec![], base_system_contracts_hashes, system_logs: vec![], @@ -294,8 +295,19 @@ pub struct L1BatchTreeData { pub rollup_last_leaf_index: u64, } +pub fn build_bloom<'a, I: IntoIterator>>(items: I) -> Bloom { + let mut bloom = Bloom::zero(); + for item in items { + bloom.accrue(item); + } + + bloom +} + #[cfg(test)] mod tests { + use std::{iter, str::FromStr}; + use super::*; #[test] @@ -345,4 +357,76 @@ mod tests { assert_eq!(block_number, unpacked_block_number); assert_eq!(block_timestamp, unpacked_block_timestamp); } + + #[test] + fn test_build_bloom() { + let logs = [ + ( + Address::from_str("0x86Fa049857E0209aa7D9e616F7eb3b3B78ECfdb0").unwrap(), + vec![ + H256::from_str( + "0x3452f51d00000000000000000000000000000000000000000000000000000000", + ) + .unwrap(), + H256::from_str( + "0x000000000000000000000000d0a6e6c54dbc68db5db3a091b171a77407ff7ccf", + ) + .unwrap(), + H256::from_str( + "0x0000000000000000000000000f5e378a82a55f24e88317a8fb7cd2ed8bd3873f", + ) + .unwrap(), + H256::from_str( + "0x000000000000000000000000000000000000000000000004f0e6ade1e67bb719", + ) + .unwrap(), + ], + ), + ( + Address::from_str("0x86Fa049857E0209aa7D9e616F7eb3b3B78ECfdb0").unwrap(), + vec![ + H256::from_str( + "0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef", + ) + .unwrap(), + H256::from_str( + "0x000000000000000000000000d0a6e6c54dbc68db5db3a091b171a77407ff7ccf", + ) + .unwrap(), + H256::from_str( + "0x0000000000000000000000000f5e378a82a55f24e88317a8fb7cd2ed8bd3873f", + ) + .unwrap(), + ], + ), + ( + Address::from_str("0xd0a6E6C54DbC68Db5db3A091B171A77407Ff7ccf").unwrap(), + vec![H256::from_str( + "0x51223fdc0a25891366fb358b4af9fe3c381b1566e287c61a29d01c8a173fe4f4", + ) + .unwrap()], + ), + ]; + let iter = logs.iter().flat_map(|log| { + log.1 + .iter() + .map(|topic| BloomInput::Raw(topic.as_bytes())) + .chain(iter::once(BloomInput::Raw(log.0.as_bytes()))) + }); + + let bloom = build_bloom(iter); + let expected = Bloom::from_str( + "0000000004000000000000000100000000000000000000000000000000000000\ + 0000000000000000000040000000000000000000000000000000000000000200\ + 0000000000020000400000180000000000000000000000000000000000000000\ + 0000000000000000000000000000000000000000080000000000201000000000\ + 2000000000000000400000000000080000008000000000000000000000000000\ + 0000000000000000000000000004000000000001000000000000804000000000\ + 0000000200000000000000000000000400000000000000000000000800200000\ + 0000000000000010000000000000000000000000000000000000000000000000", + ) + .unwrap(); + + assert_eq!(bloom, expected); + } } diff --git a/core/lib/types/src/circuit.rs b/core/lib/types/src/circuit.rs deleted file mode 100644 index 2aeb226e165..00000000000 --- a/core/lib/types/src/circuit.rs +++ /dev/null @@ -1,106 +0,0 @@ -use std::ops::Add; - -use serde::{Deserialize, Serialize}; - -/// Holds information about number of cycles used per circuit type. -#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)] -pub struct CircuitCycleStatistic { - pub main_vm_cycles: u32, - pub ram_permutation_cycles: u32, - pub storage_application_cycles: u32, - pub storage_sorter_cycles: u32, - pub code_decommitter_cycles: u32, - pub code_decommitter_sorter_cycles: u32, - pub log_demuxer_cycles: u32, - pub events_sorter_cycles: u32, - pub keccak256_cycles: u32, - pub ecrecover_cycles: u32, - pub sha256_cycles: u32, - pub secp256k1_verify_cycles: u32, - pub transient_storage_checker_cycles: u32, -} - -impl CircuitCycleStatistic { - pub fn new() -> Self { - Self::default() - } -} - -/// Holds information about number of circuits used per circuit type. -#[derive(Debug, Clone, Copy, Default, PartialEq, Serialize, Deserialize)] -pub struct CircuitStatistic { - pub main_vm: f32, - pub ram_permutation: f32, - pub storage_application: f32, - pub storage_sorter: f32, - pub code_decommitter: f32, - pub code_decommitter_sorter: f32, - pub log_demuxer: f32, - pub events_sorter: f32, - pub keccak256: f32, - pub ecrecover: f32, - pub sha256: f32, - #[serde(default)] - pub secp256k1_verify: f32, - #[serde(default)] - pub transient_storage_checker: f32, -} - -impl CircuitStatistic { - /// Rounds up numbers and adds them. - pub fn total(&self) -> usize { - self.main_vm.ceil() as usize - + self.ram_permutation.ceil() as usize - + self.storage_application.ceil() as usize - + self.storage_sorter.ceil() as usize - + self.code_decommitter.ceil() as usize - + self.code_decommitter_sorter.ceil() as usize - + self.log_demuxer.ceil() as usize - + self.events_sorter.ceil() as usize - + self.keccak256.ceil() as usize - + self.ecrecover.ceil() as usize - + self.sha256.ceil() as usize - + self.secp256k1_verify.ceil() as usize - + self.transient_storage_checker.ceil() as usize - } - - /// Adds numbers. - pub fn total_f32(&self) -> f32 { - self.main_vm - + self.ram_permutation - + self.storage_application - + self.storage_sorter - + self.code_decommitter - + self.code_decommitter_sorter - + self.log_demuxer - + self.events_sorter - + self.keccak256 - + self.ecrecover - + self.sha256 - + self.secp256k1_verify - + self.transient_storage_checker - } -} - -impl Add for CircuitStatistic { - type Output = CircuitStatistic; - - fn add(self, other: CircuitStatistic) -> CircuitStatistic { - CircuitStatistic { - main_vm: self.main_vm + other.main_vm, - ram_permutation: self.ram_permutation + other.ram_permutation, - storage_application: self.storage_application + other.storage_application, - storage_sorter: self.storage_sorter + other.storage_sorter, - code_decommitter: self.code_decommitter + other.code_decommitter, - code_decommitter_sorter: self.code_decommitter_sorter + other.code_decommitter_sorter, - log_demuxer: self.log_demuxer + other.log_demuxer, - events_sorter: self.events_sorter + other.events_sorter, - keccak256: self.keccak256 + other.keccak256, - ecrecover: self.ecrecover + other.ecrecover, - sha256: self.sha256 + other.sha256, - secp256k1_verify: self.secp256k1_verify + other.secp256k1_verify, - transient_storage_checker: self.transient_storage_checker - + other.transient_storage_checker, - } - } -} diff --git a/core/lib/types/src/debug_flat_call.rs b/core/lib/types/src/debug_flat_call.rs index 1b4bfdd21ce..b5c0d79c857 100644 --- a/core/lib/types/src/debug_flat_call.rs +++ b/core/lib/types/src/debug_flat_call.rs @@ -86,7 +86,6 @@ mod test { use super::*; use crate::{ api::{DebugCall, DebugCallType, ResultDebugCall}, - vm_trace::Call, Address, BOOTLOADER_ADDRESS, }; @@ -120,26 +119,24 @@ mod test { } fn new_testing_trace() -> Vec { - let first_call_trace = Call { + let first_call_trace = DebugCall { from: Address::zero(), to: Address::zero(), - gas: 100, - gas_used: 42, - ..Call::default() + gas: 100.into(), + gas_used: 42.into(), + ..DebugCall::default() }; - let second_call_trace = Call { + let second_call_trace = DebugCall { from: Address::zero(), to: Address::zero(), value: 123.into(), - gas: 58, - gas_used: 10, - input: b"input".to_vec(), - output: b"output".to_vec(), - ..Call::default() + gas: 58.into(), + gas_used: 10.into(), + input: Bytes(b"input".to_vec()), + output: Bytes(b"output".to_vec()), + ..DebugCall::default() }; - [first_call_trace, second_call_trace] - .map(|call_trace| call_trace.into()) - .into() + [first_call_trace, second_call_trace].into() } fn expected_flat_trace() -> Vec { diff --git a/core/lib/types/src/eth_sender.rs b/core/lib/types/src/eth_sender.rs index bab57165b3d..09ea915283e 100644 --- a/core/lib/types/src/eth_sender.rs +++ b/core/lib/types/src/eth_sender.rs @@ -51,6 +51,7 @@ pub struct EthTx { /// this transaction. If it is set to `None` this transaction was sent by the main operator. pub from_addr: Option
, pub blob_sidecar: Option, + pub is_gateway: bool, } impl std::fmt::Debug for EthTx { diff --git a/core/lib/types/src/event/mod.rs b/core/lib/types/src/event/mod.rs deleted file mode 100644 index 81e79609724..00000000000 --- a/core/lib/types/src/event/mod.rs +++ /dev/null @@ -1,441 +0,0 @@ -use std::fmt::Debug; - -use itertools::Itertools; -use once_cell::sync::Lazy; -use serde::{Deserialize, Serialize}; -use zksync_basic_types::ethabi::Token; -use zksync_system_constants::EVENT_WRITER_ADDRESS; -use zksync_utils::{ - address_to_u256, h256_to_account_address, h256_to_u256, u256_to_bytes_be, u256_to_h256, -}; - -use crate::{ - api::Log, - ethabi, - l2_to_l1_log::L2ToL1Log, - tokens::{TokenInfo, TokenMetadata}, - web3::{Bytes, Index}, - zk_evm_types::{LogQuery, Timestamp}, - Address, L1BatchNumber, CONTRACT_DEPLOYER_ADDRESS, H256, KNOWN_CODES_STORAGE_ADDRESS, - L1_MESSENGER_ADDRESS, U256, U64, -}; - -#[cfg(test)] -mod tests; - -#[derive(Default, Debug, Clone, Serialize, Deserialize, PartialEq, Eq, Hash)] -pub struct VmEvent { - pub location: (L1BatchNumber, u32), - pub address: Address, - pub indexed_topics: Vec, - pub value: Vec, -} - -impl VmEvent { - pub fn index_keys(&self) -> impl Iterator + '_ { - self.indexed_topics - .iter() - .enumerate() - .map(move |(idx, &topic)| VmEventGroupKey { - address: self.address, - topic: (idx as u32, topic), - }) - } -} - -impl From<&VmEvent> for Log { - fn from(vm_event: &VmEvent) -> Self { - Log { - address: vm_event.address, - topics: vm_event.indexed_topics.clone(), - data: Bytes::from(vm_event.value.clone()), - block_hash: None, - block_number: None, - l1_batch_number: Some(U64::from(vm_event.location.0 .0)), - transaction_hash: None, - transaction_index: Some(Index::from(vm_event.location.1)), - log_index: None, - transaction_log_index: None, - log_type: None, - removed: Some(false), - block_timestamp: None, - } - } -} - -pub static DEPLOY_EVENT_SIGNATURE: Lazy = Lazy::new(|| { - ethabi::long_signature( - "ContractDeployed", - &[ - ethabi::ParamType::Address, - ethabi::ParamType::FixedBytes(32), - ethabi::ParamType::Address, - ], - ) -}); - -static L1_MESSAGE_EVENT_SIGNATURE: Lazy = Lazy::new(|| { - ethabi::long_signature( - "L1MessageSent", - &[ - ethabi::ParamType::Address, - ethabi::ParamType::FixedBytes(32), - ethabi::ParamType::Bytes, - ], - ) -}); - -/// Corresponds to the following solidity event: -/// ```solidity -/// struct L2ToL1Log { -/// uint8 l2ShardId; -/// bool isService; -/// uint16 txNumberInBlock; -/// address sender; -/// bytes32 key; -/// bytes32 value; -/// } -/// ``` -#[derive(Debug, Default, Clone, PartialEq)] -pub struct L1MessengerL2ToL1Log { - pub l2_shard_id: u8, - pub is_service: bool, - pub tx_number_in_block: u16, - pub sender: Address, - pub key: U256, - pub value: U256, -} - -impl L1MessengerL2ToL1Log { - pub fn packed_encoding(&self) -> Vec { - let mut res: Vec = vec![]; - res.push(self.l2_shard_id); - res.push(self.is_service as u8); - res.extend_from_slice(&self.tx_number_in_block.to_be_bytes()); - res.extend_from_slice(self.sender.as_bytes()); - res.extend(u256_to_bytes_be(&self.key)); - res.extend(u256_to_bytes_be(&self.value)); - res - } -} - -impl From for L2ToL1Log { - fn from(log: L1MessengerL2ToL1Log) -> Self { - L2ToL1Log { - shard_id: log.l2_shard_id, - is_service: log.is_service, - tx_number_in_block: log.tx_number_in_block, - sender: log.sender, - key: u256_to_h256(log.key), - value: u256_to_h256(log.value), - } - } -} - -pub static L1_MESSENGER_BYTECODE_PUBLICATION_EVENT_SIGNATURE: Lazy = Lazy::new(|| { - ethabi::long_signature( - "BytecodeL1PublicationRequested", - &[ethabi::ParamType::FixedBytes(32)], - ) -}); - -#[derive(Debug, PartialEq)] -pub struct L1MessengerBytecodePublicationRequest { - pub bytecode_hash: H256, -} - -static BRIDGE_INITIALIZATION_SIGNATURE_OLD: Lazy = Lazy::new(|| { - ethabi::long_signature( - "BridgeInitialization", - &[ - ethabi::ParamType::Address, - ethabi::ParamType::String, - ethabi::ParamType::String, - ethabi::ParamType::Uint(8), - ], - ) -}); - -static BRIDGE_INITIALIZATION_SIGNATURE_NEW: Lazy = Lazy::new(|| { - ethabi::long_signature( - "BridgeInitialize", - &[ - ethabi::ParamType::Address, - ethabi::ParamType::String, - ethabi::ParamType::String, - ethabi::ParamType::Uint(8), - ], - ) -}); - -static PUBLISHED_BYTECODE_SIGNATURE: Lazy = Lazy::new(|| { - ethabi::long_signature( - "MarkedAsKnown", - &[ethabi::ParamType::FixedBytes(32), ethabi::ParamType::Bool], - ) -}); - -// moved from Runtime Context -pub fn extract_added_tokens( - l2_shared_bridge_addr: Address, - all_generated_events: &[VmEvent], -) -> Vec { - let deployed_tokens = all_generated_events - .iter() - .filter(|event| { - // Filter events from the deployer contract that match the expected signature. - event.address == CONTRACT_DEPLOYER_ADDRESS - && event.indexed_topics.len() == 4 - && event.indexed_topics[0] == *DEPLOY_EVENT_SIGNATURE - && h256_to_account_address(&event.indexed_topics[1]) == l2_shared_bridge_addr - }) - .map(|event| h256_to_account_address(&event.indexed_topics[3])); - - extract_added_token_info_from_addresses(all_generated_events, deployed_tokens) -} - -// moved from Runtime Context -fn extract_added_token_info_from_addresses( - all_generated_events: &[VmEvent], - deployed_tokens: impl Iterator, -) -> Vec { - deployed_tokens - .filter_map(|l2_token_address| { - all_generated_events - .iter() - .find(|event| { - event.address == l2_token_address - && (event.indexed_topics[0] == *BRIDGE_INITIALIZATION_SIGNATURE_NEW - || event.indexed_topics[0] == *BRIDGE_INITIALIZATION_SIGNATURE_OLD) - }) - .map(|event| { - let l1_token_address = h256_to_account_address(&event.indexed_topics[1]); - let mut dec_ev = ethabi::decode( - &[ - ethabi::ParamType::String, - ethabi::ParamType::String, - ethabi::ParamType::Uint(8), - ], - &event.value, - ) - .unwrap(); - - TokenInfo { - l1_address: l1_token_address, - l2_address: l2_token_address, - metadata: TokenMetadata { - name: dec_ev.remove(0).into_string().unwrap(), - symbol: dec_ev.remove(0).into_string().unwrap(), - decimals: dec_ev.remove(0).into_uint().unwrap().as_u32() as u8, - }, - } - }) - }) - .collect() -} - -// moved from `RuntimeContext` -// Extracts all the "long" L2->L1 messages that were submitted by the -// L1Messenger contract -pub fn extract_long_l2_to_l1_messages(all_generated_events: &[VmEvent]) -> Vec> { - all_generated_events - .iter() - .filter(|event| { - // Filter events from the l1 messenger contract that match the expected signature. - event.address == L1_MESSENGER_ADDRESS - && event.indexed_topics.len() == 3 - && event.indexed_topics[0] == *L1_MESSAGE_EVENT_SIGNATURE - }) - .map(|event| { - let decoded_tokens = ethabi::decode(&[ethabi::ParamType::Bytes], &event.value) - .expect("Failed to decode L1MessageSent message"); - // The `Token` does not implement `Copy` trait, so I had to do it like that: - let bytes_token = decoded_tokens.into_iter().next().unwrap(); - bytes_token.into_bytes().unwrap() - }) - .collect() -} - -// Extracts all the `L2ToL1Logs` that were emitted -// by the `L1Messenger` contract -pub fn extract_l2tol1logs_from_l1_messenger( - all_generated_events: &[VmEvent], -) -> Vec { - let params = &[ethabi::ParamType::Tuple(vec![ - ethabi::ParamType::Uint(8), - ethabi::ParamType::Bool, - ethabi::ParamType::Uint(16), - ethabi::ParamType::Address, - ethabi::ParamType::FixedBytes(32), - ethabi::ParamType::FixedBytes(32), - ])]; - - let l1_messenger_l2_to_l1_log_event_signature = ethabi::long_signature("L2ToL1LogSent", params); - - all_generated_events - .iter() - .filter(|event| { - // Filter events from the l1 messenger contract that match the expected signature. - event.address == L1_MESSENGER_ADDRESS - && !event.indexed_topics.is_empty() - && event.indexed_topics[0] == l1_messenger_l2_to_l1_log_event_signature - }) - .map(|event| { - let tuple = ethabi::decode(params, &event.value) - .expect("Failed to decode L2ToL1LogSent message") - .first() - .unwrap() - .clone(); - let Token::Tuple(tokens) = tuple else { - panic!("Tuple was expected, got: {}", tuple); - }; - let [ - Token::Uint(shard_id), - Token::Bool(is_service), - Token::Uint(tx_number_in_block), - Token::Address(sender), - Token::FixedBytes(key_bytes), - Token::FixedBytes(value_bytes), - ] = tokens.as_slice() else { - panic!("Invalid tuple types"); - }; - L1MessengerL2ToL1Log { - l2_shard_id: shard_id.low_u64() as u8, - is_service: *is_service, - tx_number_in_block: tx_number_in_block.low_u64() as u16, - sender: *sender, - key: U256::from_big_endian(key_bytes), - value: U256::from_big_endian(value_bytes), - } - }) - .collect() -} - -// Extracts all the bytecode publication requests -// that were emitted by the L1Messenger contract -pub fn extract_bytecode_publication_requests_from_l1_messenger( - all_generated_events: &[VmEvent], -) -> Vec { - all_generated_events - .iter() - .filter(|event| { - // Filter events from the l1 messenger contract that match the expected signature. - event.address == L1_MESSENGER_ADDRESS - && !event.indexed_topics.is_empty() - && event.indexed_topics[0] == *L1_MESSENGER_BYTECODE_PUBLICATION_EVENT_SIGNATURE - }) - .map(|event| { - let mut tokens = ethabi::decode(&[ethabi::ParamType::FixedBytes(32)], &event.value) - .expect("Failed to decode BytecodeL1PublicationRequested message"); - L1MessengerBytecodePublicationRequest { - bytecode_hash: H256::from_slice(&tokens.remove(0).into_fixed_bytes().unwrap()), - } - }) - .collect() -} - -// Extract all bytecodes marked as known on the system contracts -pub fn extract_bytecodes_marked_as_known(all_generated_events: &[VmEvent]) -> Vec { - all_generated_events - .iter() - .filter(|event| { - // Filter events from the deployer contract that match the expected signature. - event.address == KNOWN_CODES_STORAGE_ADDRESS - && event.indexed_topics.len() == 3 - && event.indexed_topics[0] == *PUBLISHED_BYTECODE_SIGNATURE - }) - .map(|event| event.indexed_topics[1]) - .collect() -} - -// Extract bytecodes that were marked as known on the system contracts and should be published onchain -pub fn extract_published_bytecodes(all_generated_events: &[VmEvent]) -> Vec { - all_generated_events - .iter() - .filter(|event| { - // Filter events from the deployer contract that match the expected signature. - event.address == KNOWN_CODES_STORAGE_ADDRESS - && event.indexed_topics.len() == 3 - && event.indexed_topics[0] == *PUBLISHED_BYTECODE_SIGNATURE - && event.indexed_topics[2] != H256::zero() - }) - .map(|event| event.indexed_topics[1]) - .collect() -} - -#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq, Hash)] -pub struct VmEventGroupKey { - pub address: Address, - pub topic: (u32, H256), -} - -/// Each `VmEvent` can be translated to several log queries. -/// This methods converts each event from input to log queries and returns all produced log queries. -pub fn convert_vm_events_to_log_queries(events: &[VmEvent]) -> Vec { - events - .iter() - .flat_map(|event| { - // Construct first query. This query holds an information about - // - number of event topics (on log query level `event.address` is treated as a topic, thus + 1 is added) - // - length of event value - // - `event.address` (or first topic in terms of log query terminology). - let first_key_word = - (event.indexed_topics.len() as u64 + 1) + ((event.value.len() as u64) << 32); - let key = U256([first_key_word, 0, 0, 0]); - - // `timestamp`, `aux_byte`, `read_value`, `rw_flag`, `rollback` are set as per convention. - let first_log = LogQuery { - timestamp: Timestamp(0), - tx_number_in_block: event.location.1 as u16, - aux_byte: 0, - shard_id: 0, - address: EVENT_WRITER_ADDRESS, - key, - read_value: U256::zero(), - written_value: address_to_u256(&event.address), - rw_flag: false, - rollback: false, - is_service: true, - }; - - // The next logs hold information about remaining topics and `event.value`. - // Each log can hold at most two values each of 32 bytes. - // The following piece of code prepares these 32-byte values. - let values = event.indexed_topics.iter().map(|h| h256_to_u256(*h)).chain( - event.value.chunks(32).map(|value_chunk| { - let mut padded = value_chunk.to_vec(); - padded.resize(32, 0); - U256::from_big_endian(&padded) - }), - ); - - // And now we process these values in chunks by two. - let value_chunks = values.chunks(2); - let other_logs = value_chunks.into_iter().map(|mut chunk| { - // The first value goes to `log_query.key`. - let key = chunk.next().unwrap(); - - // If the second one is present then it goes to `log_query.written_value`. - let written_value = chunk.next().unwrap_or_default(); - - LogQuery { - timestamp: Timestamp(0), - tx_number_in_block: event.location.1 as u16, - aux_byte: 0, - shard_id: 0, - address: EVENT_WRITER_ADDRESS, - key, - read_value: U256::zero(), - written_value, - rw_flag: false, - rollback: false, - is_service: false, - } - }); - - std::iter::once(first_log) - .chain(other_logs) - .collect::>() - }) - .collect() -} diff --git a/core/lib/types/src/event/tests.rs b/core/lib/types/src/event/tests.rs deleted file mode 100644 index f63e33ef600..00000000000 --- a/core/lib/types/src/event/tests.rs +++ /dev/null @@ -1,171 +0,0 @@ -use zksync_system_constants::{BOOTLOADER_ADDRESS, L2_BASE_TOKEN_ADDRESS}; - -use super::*; - -fn create_l2_to_l1_log_sent_value( - tx_number: U256, - sender: Address, - key: U256, - value: U256, -) -> Vec { - let mut key_arr = [0u8; 32]; - key.to_big_endian(&mut key_arr); - - let mut val_arr = [0u8; 32]; - value.to_big_endian(&mut val_arr); - - let tokens = vec![ - /*`l2ShardId`*/ Token::Uint(U256::from(0)), - /*`isService`*/ Token::Bool(true), - /*`txNumberInBlock`*/ Token::Uint(tx_number), - /*sender*/ Token::Address(sender), - /*key*/ Token::FixedBytes(key_arr.to_vec()), - /*value*/ Token::FixedBytes(val_arr.to_vec()), - ]; - - ethabi::encode(&tokens) -} - -fn create_byte_code_publication_req_value(hash: U256) -> Vec { - let mut hash_arr = [0u8; 32]; - hash.to_big_endian(&mut hash_arr); - - let tokens = vec![/*bytecode hash*/ Token::FixedBytes(hash_arr.to_vec())]; - - ethabi::encode(&tokens) -} - -fn create_l2_to_l1_log_vm_event( - from: Address, - tx_number: U256, - sender: Address, - key: U256, - value: U256, -) -> VmEvent { - let l1_messenger_l2_to_l1_log_event_signature = ethabi::long_signature( - "L2ToL1LogSent", - &[ethabi::ParamType::Tuple(vec![ - ethabi::ParamType::Uint(8), - ethabi::ParamType::Bool, - ethabi::ParamType::Uint(16), - ethabi::ParamType::Address, - ethabi::ParamType::FixedBytes(32), - ethabi::ParamType::FixedBytes(32), - ])], - ); - - VmEvent { - location: (L1BatchNumber(1), 0u32), - address: from, - indexed_topics: vec![l1_messenger_l2_to_l1_log_event_signature], - value: create_l2_to_l1_log_sent_value(tx_number, sender, key, value), - } -} - -fn create_bytecode_publication_vm_event(from: Address, hash: U256) -> VmEvent { - let bytecode_publication_event_signature = ethabi::long_signature( - "BytecodeL1PublicationRequested", - &[ethabi::ParamType::FixedBytes(32)], - ); - - VmEvent { - location: (L1BatchNumber(1), 0u32), - address: from, - indexed_topics: vec![bytecode_publication_event_signature], - value: create_byte_code_publication_req_value(hash), - } -} - -#[test] -fn test_extract_l2tol1logs_from_l1_messenger() { - let expected = vec![ - L1MessengerL2ToL1Log { - l2_shard_id: 0u8, - is_service: true, - tx_number_in_block: 5u16, - sender: KNOWN_CODES_STORAGE_ADDRESS, - key: U256::from(11), - value: U256::from(19), - }, - L1MessengerL2ToL1Log { - l2_shard_id: 0u8, - is_service: true, - tx_number_in_block: 7u16, - sender: L1_MESSENGER_ADDRESS, - key: U256::from(19), - value: U256::from(93), - }, - ]; - - let events = vec![ - create_l2_to_l1_log_vm_event( - L1_MESSENGER_ADDRESS, - U256::from(5), - KNOWN_CODES_STORAGE_ADDRESS, - U256::from(11), - U256::from(19), - ), - create_l2_to_l1_log_vm_event( - BOOTLOADER_ADDRESS, - U256::from(6), - L2_BASE_TOKEN_ADDRESS, - U256::from(6), - U256::from(8), - ), - create_l2_to_l1_log_vm_event( - L1_MESSENGER_ADDRESS, - U256::from(7), - L1_MESSENGER_ADDRESS, - U256::from(19), - U256::from(93), - ), - ]; - - let logs = extract_l2tol1logs_from_l1_messenger(&events); - - assert_eq!(expected, logs); -} - -#[test] -fn test_extract_bytecode_publication_requests_from_l1_messenger() { - let expected = vec![ - L1MessengerBytecodePublicationRequest { - bytecode_hash: u256_to_h256(U256::from(1438284388)), - }, - L1MessengerBytecodePublicationRequest { - bytecode_hash: u256_to_h256(U256::from(1231014388)), - }, - ]; - - let events = vec![ - create_bytecode_publication_vm_event(L2_BASE_TOKEN_ADDRESS, U256::from(1337)), - create_bytecode_publication_vm_event(L1_MESSENGER_ADDRESS, U256::from(1438284388)), - create_bytecode_publication_vm_event(L1_MESSENGER_ADDRESS, U256::from(1231014388)), - ]; - - let logs = extract_bytecode_publication_requests_from_l1_messenger(&events); - - assert_eq!(expected, logs); -} - -#[test] -fn test_convert_vm_events_to_log_queries() { - let cases: Vec = vec![ - serde_json::from_str(include_str!( - "./test_vectors/event_with_1_topic_and_long_value.json" - )) - .unwrap(), - serde_json::from_str(include_str!("./test_vectors/event_with_2_topics.json")).unwrap(), - serde_json::from_str(include_str!("./test_vectors/event_with_3_topics.json")).unwrap(), - serde_json::from_str(include_str!("./test_vectors/event_with_4_topics.json")).unwrap(), - serde_json::from_str(include_str!("./test_vectors/event_with_value_len_1.json")).unwrap(), - ]; - - for case in cases { - let event: VmEvent = serde_json::from_value(case["event"].clone()).unwrap(); - let expected_list: Vec = serde_json::from_value(case["list"].clone()).unwrap(); - - let actual_list = convert_vm_events_to_log_queries(&[event]); - assert_eq!(actual_list, expected_list); - } -} diff --git a/core/lib/types/src/fee.rs b/core/lib/types/src/fee.rs index 524015cdd09..9dc2cda9e62 100644 --- a/core/lib/types/src/fee.rs +++ b/core/lib/types/src/fee.rs @@ -1,57 +1,7 @@ use serde::{Deserialize, Serialize}; use zksync_utils::ceil_div; -use crate::{circuit::CircuitStatistic, U256}; - -#[derive(Debug, Clone, Copy, Serialize, Deserialize)] -#[serde(rename_all = "camelCase", tag = "result")] -pub struct TransactionExecutionMetrics { - pub initial_storage_writes: usize, - pub repeated_storage_writes: usize, - pub gas_used: usize, - pub gas_remaining: u32, - pub event_topics: u16, - pub published_bytecode_bytes: usize, - pub l2_l1_long_messages: usize, - pub l2_l1_logs: usize, - pub contracts_used: usize, - pub contracts_deployed: u16, - pub vm_events: usize, - pub storage_logs: usize, - // it's the sum of storage logs, vm events, l2->l1 logs, - // and the number of precompile calls - pub total_log_queries: usize, - pub cycles_used: u32, - pub computational_gas_used: u32, - pub total_updated_values_size: usize, - pub pubdata_published: u32, - pub circuit_statistic: CircuitStatistic, -} - -impl Default for TransactionExecutionMetrics { - fn default() -> Self { - Self { - initial_storage_writes: 0, - repeated_storage_writes: 0, - gas_used: 0, - gas_remaining: u32::MAX, - event_topics: 0, - published_bytecode_bytes: 0, - l2_l1_long_messages: 0, - l2_l1_logs: 0, - contracts_used: 0, - contracts_deployed: 0, - vm_events: 0, - storage_logs: 0, - total_log_queries: 0, - cycles_used: 0, - computational_gas_used: 0, - total_updated_values_size: 0, - pubdata_published: 0, - circuit_statistic: Default::default(), - } - } -} +use crate::U256; #[derive(Debug, Default, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)] pub struct Fee { diff --git a/core/lib/types/src/lib.rs b/core/lib/types/src/lib.rs index a55f6b5753d..402e16afd43 100644 --- a/core/lib/types/src/lib.rs +++ b/core/lib/types/src/lib.rs @@ -8,7 +8,6 @@ use std::{fmt, fmt::Debug}; use anyhow::Context as _; -pub use event::{VmEvent, VmEventGroupKey}; use fee::encoding_len; pub use l1::L1TxCommonData; pub use l2::L2TxCommonData; @@ -34,11 +33,9 @@ pub mod abi; pub mod aggregated_operations; pub mod blob; pub mod block; -pub mod circuit; pub mod commitment; pub mod contract_verification_api; pub mod debug_flat_call; -pub mod event; pub mod fee; pub mod fee_model; pub mod l1; @@ -49,11 +46,9 @@ pub mod protocol_upgrade; pub mod pubdata_da; pub mod snapshots; pub mod storage; -pub mod storage_writes_deduplicator; pub mod system_contracts; pub mod tokens; pub mod tx; -pub mod vm_trace; pub mod zk_evm_types; pub mod api; diff --git a/core/lib/types/src/tx/mod.rs b/core/lib/types/src/tx/mod.rs index 7078f4ee3fe..ed6e61184c4 100644 --- a/core/lib/types/src/tx/mod.rs +++ b/core/lib/types/src/tx/mod.rs @@ -4,50 +4,13 @@ //! it makes more sense to define the contents of each transaction chain-agnostic, and extent this data //! with metadata (such as fees and/or signatures) for L1 and L2 separately. -use std::fmt::Debug; - use zksync_basic_types::{Address, H256}; -use zksync_utils::bytecode::CompressedBytecodeInfo; -use self::tx_execution_info::TxExecutionStatus; -pub use self::{execute::Execute, tx_execution_info::ExecutionMetrics}; -use crate::{vm_trace::Call, Transaction}; +pub use self::execute::Execute; pub mod execute; -pub mod tx_execution_info; pub use zksync_crypto_primitives as primitives; -#[derive(Debug, Clone, PartialEq)] -pub struct TransactionExecutionResult { - pub transaction: Transaction, - pub hash: H256, - pub execution_info: ExecutionMetrics, - pub execution_status: TxExecutionStatus, - pub refunded_gas: u64, - pub operator_suggested_refund: u64, - pub compressed_bytecodes: Vec, - pub call_traces: Vec, - pub revert_reason: Option, -} - -impl TransactionExecutionResult { - pub fn call_trace(&self) -> Option { - if self.call_traces.is_empty() { - None - } else { - Some(Call::new_high_level( - self.transaction.gas_limit().as_u64(), - self.transaction.gas_limit().as_u64() - self.refunded_gas, - self.transaction.execute.value, - self.transaction.execute.calldata.clone(), - vec![], - self.revert_reason.clone(), - self.call_traces.clone(), - )) - } - } -} - #[derive(Debug, Clone, Copy)] pub struct IncludedTxLocation { pub tx_hash: H256, diff --git a/core/lib/types/src/tx/tx_execution_info.rs b/core/lib/types/src/tx/tx_execution_info.rs deleted file mode 100644 index 7b2b0dbd27e..00000000000 --- a/core/lib/types/src/tx/tx_execution_info.rs +++ /dev/null @@ -1,134 +0,0 @@ -use std::ops::{Add, AddAssign}; - -use crate::{ - circuit::CircuitStatistic, - commitment::SerializeCommitment, - fee::TransactionExecutionMetrics, - l2_to_l1_log::L2ToL1Log, - writes::{ - InitialStorageWrite, RepeatedStorageWrite, BYTES_PER_DERIVED_KEY, - BYTES_PER_ENUMERATION_INDEX, - }, - ProtocolVersionId, -}; - -#[derive(Debug, Clone, Copy, Eq, PartialEq)] -pub enum TxExecutionStatus { - Success, - Failure, -} - -impl TxExecutionStatus { - pub fn from_has_failed(has_failed: bool) -> Self { - if has_failed { - Self::Failure - } else { - Self::Success - } - } -} - -#[derive(Debug, Default, Clone, Copy, PartialEq)] -pub struct DeduplicatedWritesMetrics { - pub initial_storage_writes: usize, - pub repeated_storage_writes: usize, - pub total_updated_values_size: usize, -} - -impl DeduplicatedWritesMetrics { - pub fn from_tx_metrics(tx_metrics: &TransactionExecutionMetrics) -> Self { - Self { - initial_storage_writes: tx_metrics.initial_storage_writes, - repeated_storage_writes: tx_metrics.repeated_storage_writes, - total_updated_values_size: tx_metrics.total_updated_values_size, - } - } - - pub fn size(&self, protocol_version: ProtocolVersionId) -> usize { - if protocol_version.is_pre_boojum() { - self.initial_storage_writes * InitialStorageWrite::SERIALIZED_SIZE - + self.repeated_storage_writes * RepeatedStorageWrite::SERIALIZED_SIZE - } else { - self.total_updated_values_size - + (BYTES_PER_DERIVED_KEY as usize) * self.initial_storage_writes - + (BYTES_PER_ENUMERATION_INDEX as usize) * self.repeated_storage_writes - } - } -} - -#[derive(Debug, Clone, Copy, Default, PartialEq, serde::Serialize)] -pub struct ExecutionMetrics { - pub gas_used: usize, - pub published_bytecode_bytes: usize, - pub l2_l1_long_messages: usize, - pub l2_to_l1_logs: usize, - pub contracts_used: usize, - pub contracts_deployed: u16, - pub vm_events: usize, - pub storage_logs: usize, - pub total_log_queries: usize, - pub cycles_used: u32, - pub computational_gas_used: u32, - pub pubdata_published: u32, - pub circuit_statistic: CircuitStatistic, -} - -impl ExecutionMetrics { - pub fn from_tx_metrics(tx_metrics: &TransactionExecutionMetrics) -> Self { - Self { - published_bytecode_bytes: tx_metrics.published_bytecode_bytes, - l2_l1_long_messages: tx_metrics.l2_l1_long_messages, - l2_to_l1_logs: tx_metrics.l2_l1_logs, - contracts_deployed: tx_metrics.contracts_deployed, - contracts_used: tx_metrics.contracts_used, - gas_used: tx_metrics.gas_used, - storage_logs: tx_metrics.storage_logs, - vm_events: tx_metrics.vm_events, - total_log_queries: tx_metrics.total_log_queries, - cycles_used: tx_metrics.cycles_used, - computational_gas_used: tx_metrics.computational_gas_used, - pubdata_published: tx_metrics.pubdata_published, - circuit_statistic: tx_metrics.circuit_statistic, - } - } - - pub fn size(&self) -> usize { - self.l2_to_l1_logs * L2ToL1Log::SERIALIZED_SIZE - + self.l2_l1_long_messages - + self.published_bytecode_bytes - // TODO(PLA-648): refactor this constant - // It represents the need to store the length's of messages as well as bytecodes. - // It works due to the fact that each bytecode/L2->L1 long message is accompanied by a corresponding - // user L2->L1 log. - + self.l2_to_l1_logs * 4 - } -} - -impl Add for ExecutionMetrics { - type Output = ExecutionMetrics; - - fn add(self, other: ExecutionMetrics) -> ExecutionMetrics { - ExecutionMetrics { - published_bytecode_bytes: self.published_bytecode_bytes - + other.published_bytecode_bytes, - contracts_deployed: self.contracts_deployed + other.contracts_deployed, - contracts_used: self.contracts_used + other.contracts_used, - l2_l1_long_messages: self.l2_l1_long_messages + other.l2_l1_long_messages, - l2_to_l1_logs: self.l2_to_l1_logs + other.l2_to_l1_logs, - gas_used: self.gas_used + other.gas_used, - vm_events: self.vm_events + other.vm_events, - storage_logs: self.storage_logs + other.storage_logs, - total_log_queries: self.total_log_queries + other.total_log_queries, - cycles_used: self.cycles_used + other.cycles_used, - computational_gas_used: self.computational_gas_used + other.computational_gas_used, - pubdata_published: self.pubdata_published + other.pubdata_published, - circuit_statistic: self.circuit_statistic + other.circuit_statistic, - } - } -} - -impl AddAssign for ExecutionMetrics { - fn add_assign(&mut self, other: Self) { - *self = *self + other; - } -} diff --git a/core/lib/utils/src/bytecode.rs b/core/lib/utils/src/bytecode.rs index f9554c6f72b..48bdb433020 100644 --- a/core/lib/utils/src/bytecode.rs +++ b/core/lib/utils/src/bytecode.rs @@ -1,10 +1,6 @@ -use std::{collections::HashMap, convert::TryInto}; +// FIXME: move to basic_types? -use itertools::Itertools; -use zksync_basic_types::{ - ethabi::{encode, Token}, - H256, -}; +use zksync_basic_types::H256; use crate::bytes_to_chunks; @@ -21,117 +17,6 @@ pub enum InvalidBytecodeError { BytecodeLengthIsNotDivisibleBy32, } -#[derive(Debug, thiserror::Error)] -pub enum FailedToCompressBytecodeError { - #[error("Number of unique 8-bytes bytecode chunks exceed the limit of 2^16 - 1")] - DictionaryOverflow, - #[error("Bytecode is invalid: {0}")] - InvalidBytecode(#[from] InvalidBytecodeError), -} - -/// Implements, a simple compression algorithm for the bytecode. -pub fn compress_bytecode(code: &[u8]) -> Result, FailedToCompressBytecodeError> { - validate_bytecode(code)?; - - // Statistic is a hash map of values (number of occurrences, first occurrence position), - // this is needed to ensure that the determinism during sorting of the statistic, i.e. - // each element will have unique first occurrence position - let mut statistic: HashMap = HashMap::new(); - let mut dictionary: HashMap = HashMap::new(); - let mut encoded_data: Vec = Vec::new(); - - // Split original bytecode into 8-byte chunks. - for (position, chunk_bytes) in code.chunks(8).enumerate() { - // It is safe to unwrap here, because each chunk is exactly 8 bytes, since - // valid bytecodes are divisible by 8. - let chunk = u64::from_be_bytes(chunk_bytes.try_into().unwrap()); - - // Count the number of occurrences of each chunk. - statistic.entry(chunk).or_insert((0, position)).0 += 1; - } - - let mut statistic_sorted_by_value: Vec<_> = statistic.into_iter().collect::>(); - statistic_sorted_by_value.sort_by_key(|x| x.1); - - // The dictionary size is limited by 2^16 - 1, - if statistic_sorted_by_value.len() > u16::MAX.into() { - return Err(FailedToCompressBytecodeError::DictionaryOverflow); - } - - // Fill the dictionary with the most popular chunks. - // The most popular chunks will be encoded with the smallest indexes, so that - // the 255 most popular chunks will be encoded with one zero byte. - // And the encoded data will be filled with more zeros, so - // the calldata that will be sent to L1 will be cheaper. - for (chunk, _) in statistic_sorted_by_value.iter().rev() { - dictionary.insert(*chunk, dictionary.len() as u16); - } - - for chunk_bytes in code.chunks(8) { - // It is safe to unwrap here, because each chunk is exactly 8 bytes, since - // valid bytecodes are divisible by 8. - let chunk = u64::from_be_bytes(chunk_bytes.try_into().unwrap()); - - // Add the index of the chunk to the encoded data. - encoded_data.extend(dictionary.get(&chunk).unwrap().to_be_bytes()); - } - - // Prepare the raw compressed bytecode in the following format: - // - 2 bytes: the length of the dictionary (N) - // - N bytes: packed dictionary bytes - // - remaining bytes: packed encoded data bytes - - let mut compressed: Vec = Vec::new(); - compressed.extend((dictionary.len() as u16).to_be_bytes()); - - dictionary - .into_iter() - .map(|(k, v)| (v, k)) - .sorted() - .for_each(|(_, chunk)| { - compressed.extend(chunk.to_be_bytes()); - }); - - compressed.extend(encoded_data); - - Ok(compressed) -} - -#[derive(Debug, Clone, PartialEq, Eq)] -pub struct CompressedBytecodeInfo { - pub original: Vec, - pub compressed: Vec, -} - -impl CompressedBytecodeInfo { - pub fn from_original(bytecode: Vec) -> Result { - let compressed = compress_bytecode(&bytecode)?; - - let result = Self { - original: bytecode, - compressed, - }; - - Ok(result) - } - - pub fn encode_call(&self) -> Vec { - let bytecode_hash = hash_bytecode(&self.original).as_bytes().to_vec(); - let empty_cell = vec![0u8; 32]; - - let bytes_encoded = encode(&[ - Token::Bytes(self.original.clone()), - Token::Bytes(self.compressed.clone()), - ]); - - bytecode_hash - .into_iter() - .chain(empty_cell) - .chain(bytes_encoded) - .collect() - } -} - pub fn validate_bytecode(code: &[u8]) -> Result<(), InvalidBytecodeError> { let bytecode_len = code.len(); @@ -170,57 +55,3 @@ pub fn bytecode_len_in_words(bytecodehash: &H256) -> u16 { pub fn bytecode_len_in_bytes(bytecodehash: H256) -> usize { bytecode_len_in_words(&bytecodehash) as usize * 32 } - -#[cfg(test)] -mod test { - use super::*; - - fn decompress_bytecode(raw_compressed_bytecode: &[u8]) -> Vec { - let mut decompressed: Vec = Vec::new(); - let mut dictionary: Vec = Vec::new(); - - let dictionary_len = u16::from_be_bytes(raw_compressed_bytecode[0..2].try_into().unwrap()); - for index in 0..dictionary_len { - let chunk = u64::from_be_bytes( - raw_compressed_bytecode[2 + index as usize * 8..10 + index as usize * 8] - .try_into() - .unwrap(), - ); - dictionary.push(chunk); - } - - let encoded_data = &raw_compressed_bytecode[2 + dictionary_len as usize * 8..]; - for index_bytes in encoded_data.chunks(2) { - let index = u16::from_be_bytes(index_bytes.try_into().unwrap()); - - let chunk = dictionary[index as usize]; - decompressed.extend(chunk.to_be_bytes()); - } - - decompressed - } - - #[test] - fn bytecode_compression_test() { - let example_code = hex::decode("000200000000000200010000000103550000006001100270000000150010019d0000000101200190000000080000c13d0000000001000019004e00160000040f0000000101000039004e00160000040f0000001504000041000000150510009c000000000104801900000040011002100000000001310019000000150320009c0000000002048019000000600220021000000000012100190000004f0001042e000000000100001900000050000104300000008002000039000000400020043f0000000002000416000000000110004c000000240000613d000000000120004c0000004d0000c13d000000200100003900000100001004430000012000000443000001000100003900000040020000390000001d03000041004e000a0000040f000000000120004c0000004d0000c13d0000000001000031000000030110008c0000004d0000a13d0000000101000367000000000101043b0000001601100197000000170110009c0000004d0000c13d0000000101000039000000000101041a0000000202000039000000000202041a000000400300043d00000040043000390000001805200197000000000600041a0000000000540435000000180110019700000020043000390000000000140435000000a0012002700000001901100197000000600430003900000000001404350000001a012001980000001b010000410000000001006019000000b8022002700000001c02200197000000000121019f0000008002300039000000000012043500000018016001970000000000130435000000400100043d0000000002130049000000a0022000390000000003000019004e000a0000040f004e00140000040f0000004e000004320000004f0001042e000000500001043000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000ffffffffffffffff000000000000000000000000000000000000000000000000000000008903573000000000000000000000000000000000000000000000000000000000000000000000000000000000ffffffffffffffffffffffffffffffffffffffff0000000000000000000000000000000000000000000000000000000000ffffff0000000000008000000000000000000000000000000000000000000000000000ffffffffffffffffffffffffffffffffffffffffffffffffffffffffff80000000000000000000000000000000000000000000000000000000000000007fffff00000002000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000").unwrap(); - let compressed = compress_bytecode(&example_code).unwrap(); - let decompressed = decompress_bytecode(&compressed); - - assert_eq!(example_code, decompressed); - } - - #[test] - fn bytecode_compression_statistics_test() { - let example_code = - hex::decode("0000000000000000111111111111111111111111111111112222222222222222") - .unwrap(); - // The size of the dictionary should be `0x0003` - // The dictionary itself should put the most common chunk first, i.e. `0x1111111111111111` - // Then, the ordering does not matter, but the algorithm will return the one with the highest position, i.e. `0x2222222222222222` - let expected_encoding = - hex::decode("00031111111111111111222222222222222200000000000000000002000000000001") - .unwrap(); - - assert_eq!(expected_encoding, compress_bytecode(&example_code).unwrap()); - } -} diff --git a/core/lib/vm_interface/Cargo.toml b/core/lib/vm_interface/Cargo.toml index 75362d7da3f..1d4efe06634 100644 --- a/core/lib/vm_interface/Cargo.toml +++ b/core/lib/vm_interface/Cargo.toml @@ -14,7 +14,6 @@ categories.workspace = true zksync_contracts.workspace = true zksync_system_constants.workspace = true zksync_types.workspace = true -zksync_utils.workspace = true hex.workspace = true serde.workspace = true diff --git a/core/lib/vm_interface/src/lib.rs b/core/lib/vm_interface/src/lib.rs index 1837bec4aff..b2b7d6484da 100644 --- a/core/lib/vm_interface/src/lib.rs +++ b/core/lib/vm_interface/src/lib.rs @@ -1,4 +1,21 @@ //! ZKsync Era VM interfaces. +//! +//! # Developer guidelines +//! +//! Which types should be put in this crate and which ones in `zksync_multivm` or other downstream crates? +//! +//! - This crate should contain logic not tied to a particular VM version; in contrast, most logic in `zksync_multivm` +//! is version-specific. +//! - This crate should not have heavyweight dependencies (like VM implementations). Anything heavier than `serde` is discouraged. +//! In contrast, `zksync_multivm` depends on old VM versions. +//! - If a type belongs in this crate, still be thorough about its methods. VM implementation details belong to `zksync_multivm` +//! and should be implemented as functions / extension traits there, rather than as methods here. +//! +//! Which types should be put in this crate vs `zksync_types`? +//! +//! - In this case, we want to separate types by domain. If a certain type clearly belongs to the VM domain +//! (e.g., can only be produced by VM execution), it probably belongs here. In contrast, if a type is more general / fundamental, +//! it may belong to `zksync_types`. pub use crate::{ types::{ @@ -8,9 +25,11 @@ pub use crate::{ }, inputs::{L1BatchEnv, L2BlockEnv, SystemEnv, TxExecutionMode, VmExecutionMode}, outputs::{ - BootloaderMemory, CurrentExecutionState, ExecutionResult, FinishedL1Batch, L2Block, - Refunds, VmExecutionLogs, VmExecutionResultAndLogs, VmExecutionStatistics, - VmMemoryMetrics, + BootloaderMemory, Call, CallType, CircuitStatistic, CompressedBytecodeInfo, + CurrentExecutionState, DeduplicatedWritesMetrics, ExecutionResult, FinishedL1Batch, + L2Block, Refunds, TransactionExecutionMetrics, TransactionExecutionResult, + TxExecutionStatus, VmEvent, VmExecutionLogs, VmExecutionMetrics, + VmExecutionResultAndLogs, VmExecutionStatistics, VmMemoryMetrics, }, tracer, }, diff --git a/core/lib/vm_interface/src/types/outputs/bytecode.rs b/core/lib/vm_interface/src/types/outputs/bytecode.rs new file mode 100644 index 00000000000..100acb3d3d2 --- /dev/null +++ b/core/lib/vm_interface/src/types/outputs/bytecode.rs @@ -0,0 +1,5 @@ +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct CompressedBytecodeInfo { + pub original: Vec, + pub compressed: Vec, +} diff --git a/core/lib/vm_interface/src/types/outputs/execution_result.rs b/core/lib/vm_interface/src/types/outputs/execution_result.rs index 1037cc1d6e8..37e122c6d9d 100644 --- a/core/lib/vm_interface/src/types/outputs/execution_result.rs +++ b/core/lib/vm_interface/src/types/outputs/execution_result.rs @@ -1,17 +1,89 @@ -use zksync_system_constants::PUBLISH_BYTECODE_OVERHEAD; +use serde::{Deserialize, Deserializer, Serialize, Serializer}; +use zksync_system_constants::{ + BOOTLOADER_ADDRESS, KNOWN_CODES_STORAGE_ADDRESS, L1_MESSENGER_ADDRESS, + PUBLISH_BYTECODE_OVERHEAD, +}; use zksync_types::{ - event::{extract_long_l2_to_l1_messages, extract_published_bytecodes}, + ethabi, l2_to_l1_log::{SystemL2ToL1Log, UserL2ToL1Log}, - tx::ExecutionMetrics, - StorageLogWithPreviousValue, Transaction, VmEvent, H256, + zk_evm_types::FarCallOpcode, + Address, L1BatchNumber, StorageLogWithPreviousValue, Transaction, H256, U256, +}; + +use crate::{ + CompressedBytecodeInfo, Halt, VmExecutionMetrics, VmExecutionStatistics, VmRevertReason, }; -use crate::{Halt, VmExecutionStatistics, VmRevertReason}; +const L1_MESSAGE_EVENT_SIGNATURE: H256 = H256([ + 58, 54, 228, 114, 145, 244, 32, 31, 175, 19, 127, 171, 8, 29, 146, 41, 91, 206, 45, 83, 190, + 44, 108, 166, 139, 168, 44, 127, 170, 156, 226, 65, +]); +const PUBLISHED_BYTECODE_SIGNATURE: H256 = H256([ + 201, 71, 34, 255, 19, 234, 207, 83, 84, 124, 71, 65, 218, 181, 34, 131, 83, 160, 89, 56, 255, + 205, 213, 212, 162, 213, 51, 174, 14, 97, 130, 135, +]); pub fn bytecode_len_in_bytes(bytecodehash: H256) -> usize { usize::from(u16::from_be_bytes([bytecodehash[2], bytecodehash[3]])) * 32 } +/// Event generated by the VM. +#[derive(Default, Debug, Clone, PartialEq)] +pub struct VmEvent { + pub location: (L1BatchNumber, u32), + pub address: Address, + pub indexed_topics: Vec, + pub value: Vec, +} + +impl VmEvent { + /// Long signature of the contract deployment event (`ContractDeployed`). + pub const DEPLOY_EVENT_SIGNATURE: H256 = H256([ + 41, 10, 253, 174, 35, 26, 63, 192, 187, 174, 139, 26, 246, 54, 152, 176, 161, 215, 155, 33, + 173, 23, 223, 3, 66, 223, 185, 82, 254, 116, 248, 229, + ]); + /// Long signature of the L1 messenger bytecode publication event (`BytecodeL1PublicationRequested`). + pub const L1_MESSENGER_BYTECODE_PUBLICATION_EVENT_SIGNATURE: H256 = H256([ + 72, 13, 60, 159, 114, 123, 94, 92, 18, 3, 212, 198, 31, 177, 133, 211, 127, 8, 230, 178, + 220, 94, 155, 191, 152, 89, 27, 26, 122, 221, 245, 124, + ]); + + /// Extracts all the "long" L2->L1 messages that were submitted by the L1Messenger contract. + pub fn extract_long_l2_to_l1_messages(events: &[Self]) -> Vec> { + events + .iter() + .filter(|event| { + // Filter events from the l1 messenger contract that match the expected signature. + event.address == L1_MESSENGER_ADDRESS + && event.indexed_topics.len() == 3 + && event.indexed_topics[0] == L1_MESSAGE_EVENT_SIGNATURE + }) + .map(|event| { + let decoded_tokens = ethabi::decode(&[ethabi::ParamType::Bytes], &event.value) + .expect("Failed to decode L1MessageSent message"); + // The `Token` does not implement `Copy` trait, so I had to do it like that: + let bytes_token = decoded_tokens.into_iter().next().unwrap(); + bytes_token.into_bytes().unwrap() + }) + .collect() + } + + /// Extracts bytecodes that were marked as known on the system contracts and should be published onchain. + pub fn extract_published_bytecodes(events: &[Self]) -> Vec { + events + .iter() + .filter(|event| { + // Filter events from the deployer contract that match the expected signature. + event.address == KNOWN_CODES_STORAGE_ADDRESS + && event.indexed_topics.len() == 3 + && event.indexed_topics[0] == PUBLISHED_BYTECODE_SIGNATURE + && event.indexed_topics[2] != H256::zero() + }) + .map(|event| event.indexed_topics[1]) + .collect() + } +} + /// Refunds produced for the user. #[derive(Debug, Clone, Default, PartialEq)] pub struct Refunds { @@ -65,7 +137,7 @@ impl ExecutionResult { } impl VmExecutionResultAndLogs { - pub fn get_execution_metrics(&self, tx: Option<&Transaction>) -> ExecutionMetrics { + pub fn get_execution_metrics(&self, tx: Option<&Transaction>) -> VmExecutionMetrics { let contracts_deployed = tx .map(|tx| tx.execute.factory_deps.len() as u16) .unwrap_or(0); @@ -74,19 +146,19 @@ impl VmExecutionResultAndLogs { // - message length in bytes, rounded up to a multiple of 32 // - 32 bytes of encoded offset // - 32 bytes of encoded length - let l2_l1_long_messages = extract_long_l2_to_l1_messages(&self.logs.events) + let l2_l1_long_messages = VmEvent::extract_long_l2_to_l1_messages(&self.logs.events) .iter() .map(|event| (event.len() + 31) / 32 * 32 + 64) .sum(); - let published_bytecode_bytes = extract_published_bytecodes(&self.logs.events) + let published_bytecode_bytes = VmEvent::extract_published_bytecodes(&self.logs.events) .iter() .map(|bytecodehash| { bytecode_len_in_bytes(*bytecodehash) + PUBLISH_BYTECODE_OVERHEAD as usize }) .sum(); - ExecutionMetrics { + VmExecutionMetrics { gas_used: self.statistics.gas_used as usize, published_bytecode_bytes, l2_l1_long_messages, @@ -103,3 +175,209 @@ impl VmExecutionResultAndLogs { } } } + +#[derive(Debug, Clone, Copy, Eq, PartialEq)] +pub enum TxExecutionStatus { + Success, + Failure, +} + +impl TxExecutionStatus { + pub fn from_has_failed(has_failed: bool) -> Self { + if has_failed { + Self::Failure + } else { + Self::Success + } + } +} + +#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq)] +pub enum CallType { + #[serde(serialize_with = "far_call_type_to_u8")] + #[serde(deserialize_with = "far_call_type_from_u8")] + Call(FarCallOpcode), + Create, + NearCall, +} + +impl Default for CallType { + fn default() -> Self { + Self::Call(FarCallOpcode::Normal) + } +} + +fn far_call_type_from_u8<'de, D>(deserializer: D) -> Result +where + D: Deserializer<'de>, +{ + let res = u8::deserialize(deserializer)?; + match res { + 0 => Ok(FarCallOpcode::Normal), + 1 => Ok(FarCallOpcode::Delegate), + 2 => Ok(FarCallOpcode::Mimic), + _ => Err(serde::de::Error::custom("Invalid FarCallOpcode")), + } +} + +fn far_call_type_to_u8(far_call_type: &FarCallOpcode, s: S) -> Result +where + S: Serializer, +{ + s.serialize_u8(*far_call_type as u8) +} + +/// Represents a call in the VM trace. +#[derive(Debug, Clone, Default, Serialize, Deserialize)] +pub struct Call { + /// Type of the call. + pub r#type: CallType, + /// Address of the caller. + pub from: Address, + /// Address of the callee. + pub to: Address, + /// Gas from the parent call. + pub parent_gas: u64, + /// Gas provided for the call. + pub gas: u64, + /// Gas used by the call. + pub gas_used: u64, + /// Value transferred. + pub value: U256, + /// Input data. + pub input: Vec, + /// Output data. + pub output: Vec, + /// Error message provided by vm or some unexpected errors. + pub error: Option, + /// Revert reason. + pub revert_reason: Option, + /// Subcalls. + pub calls: Vec, +} + +impl PartialEq for Call { + fn eq(&self, other: &Self) -> bool { + self.revert_reason == other.revert_reason + && self.input == other.input + && self.from == other.from + && self.to == other.to + && self.r#type == other.r#type + && self.value == other.value + && self.error == other.error + && self.output == other.output + && self.calls == other.calls + } +} + +impl Call { + pub fn new_high_level( + gas: u64, + gas_used: u64, + value: U256, + input: Vec, + output: Vec, + revert_reason: Option, + calls: Vec, + ) -> Self { + Self { + r#type: CallType::Call(FarCallOpcode::Normal), + from: Address::zero(), + to: BOOTLOADER_ADDRESS, + parent_gas: gas, + gas, + gas_used, + value, + input, + output, + error: None, + revert_reason, + calls, + } + } +} + +#[derive(Debug, Clone, PartialEq)] +pub struct TransactionExecutionResult { + pub transaction: Transaction, + pub hash: H256, + pub execution_info: VmExecutionMetrics, + pub execution_status: TxExecutionStatus, + pub refunded_gas: u64, + pub operator_suggested_refund: u64, + pub compressed_bytecodes: Vec, + pub call_traces: Vec, + pub revert_reason: Option, +} + +impl TransactionExecutionResult { + pub fn call_trace(&self) -> Option { + if self.call_traces.is_empty() { + None + } else { + Some(Call::new_high_level( + self.transaction.gas_limit().as_u64(), + self.transaction.gas_limit().as_u64() - self.refunded_gas, + self.transaction.execute.value, + self.transaction.execute.calldata.clone(), + vec![], + self.revert_reason.clone(), + self.call_traces.clone(), + )) + } + } +} + +#[cfg(test)] +mod tests { + use zksync_types::ethabi; + + use super::*; + + #[test] + fn deploy_event_signature_matches() { + let expected_signature = ethabi::long_signature( + "ContractDeployed", + &[ + ethabi::ParamType::Address, + ethabi::ParamType::FixedBytes(32), + ethabi::ParamType::Address, + ], + ); + assert_eq!(VmEvent::DEPLOY_EVENT_SIGNATURE, expected_signature); + } + + #[test] + fn bytecode_publication_request_event_signature_matches() { + let expected_signature = ethabi::long_signature( + "BytecodeL1PublicationRequested", + &[ethabi::ParamType::FixedBytes(32)], + ); + assert_eq!( + VmEvent::L1_MESSENGER_BYTECODE_PUBLICATION_EVENT_SIGNATURE, + expected_signature + ); + } + + #[test] + fn l1_message_event_signature_matches() { + let expected_signature = ethabi::long_signature( + "L1MessageSent", + &[ + ethabi::ParamType::Address, + ethabi::ParamType::FixedBytes(32), + ethabi::ParamType::Bytes, + ], + ); + assert_eq!(L1_MESSAGE_EVENT_SIGNATURE, expected_signature); + } + + #[test] + fn published_bytecode_event_signature_matches() { + let expected_signature = ethabi::long_signature( + "MarkedAsKnown", + &[ethabi::ParamType::FixedBytes(32), ethabi::ParamType::Bool], + ); + assert_eq!(PUBLISHED_BYTECODE_SIGNATURE, expected_signature); + } +} diff --git a/core/lib/vm_interface/src/types/outputs/execution_state.rs b/core/lib/vm_interface/src/types/outputs/execution_state.rs index 05eab795c87..6ea24397f83 100644 --- a/core/lib/vm_interface/src/types/outputs/execution_state.rs +++ b/core/lib/vm_interface/src/types/outputs/execution_state.rs @@ -1,8 +1,10 @@ use zksync_types::{ l2_to_l1_log::{SystemL2ToL1Log, UserL2ToL1Log}, - StorageLog, VmEvent, U256, + StorageLog, U256, }; +use super::VmEvent; + /// State of the VM since the start of the batch execution. #[derive(Debug, Clone, PartialEq)] pub struct CurrentExecutionState { diff --git a/core/lib/vm_interface/src/types/outputs/mod.rs b/core/lib/vm_interface/src/types/outputs/mod.rs index eec19826e0b..d24e1440f83 100644 --- a/core/lib/vm_interface/src/types/outputs/mod.rs +++ b/core/lib/vm_interface/src/types/outputs/mod.rs @@ -1,11 +1,19 @@ pub use self::{ - execution_result::{ExecutionResult, Refunds, VmExecutionLogs, VmExecutionResultAndLogs}, + bytecode::CompressedBytecodeInfo, + execution_result::{ + Call, CallType, ExecutionResult, Refunds, TransactionExecutionResult, TxExecutionStatus, + VmEvent, VmExecutionLogs, VmExecutionResultAndLogs, + }, execution_state::{BootloaderMemory, CurrentExecutionState}, finished_l1batch::FinishedL1Batch, l2_block::L2Block, - statistic::{VmExecutionStatistics, VmMemoryMetrics}, + statistic::{ + CircuitStatistic, DeduplicatedWritesMetrics, TransactionExecutionMetrics, + VmExecutionMetrics, VmExecutionStatistics, VmMemoryMetrics, + }, }; +mod bytecode; mod execution_result; mod execution_state; mod finished_l1batch; diff --git a/core/lib/vm_interface/src/types/outputs/statistic.rs b/core/lib/vm_interface/src/types/outputs/statistic.rs index fb99ba7e36b..095547076d4 100644 --- a/core/lib/vm_interface/src/types/outputs/statistic.rs +++ b/core/lib/vm_interface/src/types/outputs/statistic.rs @@ -1,4 +1,94 @@ -use zksync_types::circuit::CircuitStatistic; +use std::ops; + +use serde::{Deserialize, Serialize}; +use zksync_types::{ + commitment::SerializeCommitment, + l2_to_l1_log::L2ToL1Log, + writes::{ + InitialStorageWrite, RepeatedStorageWrite, BYTES_PER_DERIVED_KEY, + BYTES_PER_ENUMERATION_INDEX, + }, + ProtocolVersionId, +}; + +/// Holds information about number of circuits used per circuit type. +#[derive(Debug, Clone, Copy, Default, PartialEq, Serialize, Deserialize)] +pub struct CircuitStatistic { + pub main_vm: f32, + pub ram_permutation: f32, + pub storage_application: f32, + pub storage_sorter: f32, + pub code_decommitter: f32, + pub code_decommitter_sorter: f32, + pub log_demuxer: f32, + pub events_sorter: f32, + pub keccak256: f32, + pub ecrecover: f32, + pub sha256: f32, + #[serde(default)] + pub secp256k1_verify: f32, + #[serde(default)] + pub transient_storage_checker: f32, +} + +impl CircuitStatistic { + /// Rounds up numbers and adds them. + pub fn total(&self) -> usize { + self.main_vm.ceil() as usize + + self.ram_permutation.ceil() as usize + + self.storage_application.ceil() as usize + + self.storage_sorter.ceil() as usize + + self.code_decommitter.ceil() as usize + + self.code_decommitter_sorter.ceil() as usize + + self.log_demuxer.ceil() as usize + + self.events_sorter.ceil() as usize + + self.keccak256.ceil() as usize + + self.ecrecover.ceil() as usize + + self.sha256.ceil() as usize + + self.secp256k1_verify.ceil() as usize + + self.transient_storage_checker.ceil() as usize + } + + /// Adds numbers. + pub fn total_f32(&self) -> f32 { + self.main_vm + + self.ram_permutation + + self.storage_application + + self.storage_sorter + + self.code_decommitter + + self.code_decommitter_sorter + + self.log_demuxer + + self.events_sorter + + self.keccak256 + + self.ecrecover + + self.sha256 + + self.secp256k1_verify + + self.transient_storage_checker + } +} + +impl ops::Add for CircuitStatistic { + type Output = Self; + + fn add(self, other: Self) -> Self { + Self { + main_vm: self.main_vm + other.main_vm, + ram_permutation: self.ram_permutation + other.ram_permutation, + storage_application: self.storage_application + other.storage_application, + storage_sorter: self.storage_sorter + other.storage_sorter, + code_decommitter: self.code_decommitter + other.code_decommitter, + code_decommitter_sorter: self.code_decommitter_sorter + other.code_decommitter_sorter, + log_demuxer: self.log_demuxer + other.log_demuxer, + events_sorter: self.events_sorter + other.events_sorter, + keccak256: self.keccak256 + other.keccak256, + ecrecover: self.ecrecover + other.ecrecover, + sha256: self.sha256 + other.sha256, + secp256k1_verify: self.secp256k1_verify + other.secp256k1_verify, + transient_storage_checker: self.transient_storage_checker + + other.transient_storage_checker, + } + } +} /// Statistics of the tx execution. #[derive(Debug, Default, Clone)] @@ -47,3 +137,156 @@ impl VmMemoryMetrics { .sum::() } } + +#[derive(Debug, Default, Clone, Copy, PartialEq)] +pub struct DeduplicatedWritesMetrics { + pub initial_storage_writes: usize, + pub repeated_storage_writes: usize, + pub total_updated_values_size: usize, +} + +impl DeduplicatedWritesMetrics { + pub fn from_tx_metrics(tx_metrics: &TransactionExecutionMetrics) -> Self { + Self { + initial_storage_writes: tx_metrics.initial_storage_writes, + repeated_storage_writes: tx_metrics.repeated_storage_writes, + total_updated_values_size: tx_metrics.total_updated_values_size, + } + } + + pub fn size(&self, protocol_version: ProtocolVersionId) -> usize { + if protocol_version.is_pre_boojum() { + self.initial_storage_writes * InitialStorageWrite::SERIALIZED_SIZE + + self.repeated_storage_writes * RepeatedStorageWrite::SERIALIZED_SIZE + } else { + self.total_updated_values_size + + (BYTES_PER_DERIVED_KEY as usize) * self.initial_storage_writes + + (BYTES_PER_ENUMERATION_INDEX as usize) * self.repeated_storage_writes + } + } +} + +#[derive(Debug, Clone, Copy)] +pub struct TransactionExecutionMetrics { + pub initial_storage_writes: usize, + pub repeated_storage_writes: usize, + pub gas_used: usize, + pub gas_remaining: u32, + pub event_topics: u16, + pub published_bytecode_bytes: usize, + pub l2_l1_long_messages: usize, + pub l2_l1_logs: usize, + pub contracts_used: usize, + pub contracts_deployed: u16, + pub vm_events: usize, + pub storage_logs: usize, + /// Sum of storage logs, vm events, l2->l1 logs, and the number of precompile calls. + pub total_log_queries: usize, + pub cycles_used: u32, + pub computational_gas_used: u32, + pub total_updated_values_size: usize, + pub pubdata_published: u32, + pub circuit_statistic: CircuitStatistic, +} + +impl Default for TransactionExecutionMetrics { + fn default() -> Self { + Self { + initial_storage_writes: 0, + repeated_storage_writes: 0, + gas_used: 0, + gas_remaining: u32::MAX, + event_topics: 0, + published_bytecode_bytes: 0, + l2_l1_long_messages: 0, + l2_l1_logs: 0, + contracts_used: 0, + contracts_deployed: 0, + vm_events: 0, + storage_logs: 0, + total_log_queries: 0, + cycles_used: 0, + computational_gas_used: 0, + total_updated_values_size: 0, + pubdata_published: 0, + circuit_statistic: Default::default(), + } + } +} + +#[derive(Debug, Clone, Copy, Default, PartialEq, Serialize)] +pub struct VmExecutionMetrics { + pub gas_used: usize, + pub published_bytecode_bytes: usize, + pub l2_l1_long_messages: usize, + pub l2_to_l1_logs: usize, + pub contracts_used: usize, + pub contracts_deployed: u16, + pub vm_events: usize, + pub storage_logs: usize, + pub total_log_queries: usize, + pub cycles_used: u32, + pub computational_gas_used: u32, + pub pubdata_published: u32, + pub circuit_statistic: CircuitStatistic, +} + +impl VmExecutionMetrics { + pub fn from_tx_metrics(tx_metrics: &TransactionExecutionMetrics) -> Self { + Self { + published_bytecode_bytes: tx_metrics.published_bytecode_bytes, + l2_l1_long_messages: tx_metrics.l2_l1_long_messages, + l2_to_l1_logs: tx_metrics.l2_l1_logs, + contracts_deployed: tx_metrics.contracts_deployed, + contracts_used: tx_metrics.contracts_used, + gas_used: tx_metrics.gas_used, + storage_logs: tx_metrics.storage_logs, + vm_events: tx_metrics.vm_events, + total_log_queries: tx_metrics.total_log_queries, + cycles_used: tx_metrics.cycles_used, + computational_gas_used: tx_metrics.computational_gas_used, + pubdata_published: tx_metrics.pubdata_published, + circuit_statistic: tx_metrics.circuit_statistic, + } + } + + pub fn size(&self) -> usize { + self.l2_to_l1_logs * L2ToL1Log::SERIALIZED_SIZE + + self.l2_l1_long_messages + + self.published_bytecode_bytes + // TODO(PLA-648): refactor this constant + // It represents the need to store the length's of messages as well as bytecodes. + // It works due to the fact that each bytecode/L2->L1 long message is accompanied by a corresponding + // user L2->L1 log. + + self.l2_to_l1_logs * 4 + } +} + +impl ops::Add for VmExecutionMetrics { + type Output = Self; + + fn add(self, other: Self) -> Self { + Self { + published_bytecode_bytes: self.published_bytecode_bytes + + other.published_bytecode_bytes, + contracts_deployed: self.contracts_deployed + other.contracts_deployed, + contracts_used: self.contracts_used + other.contracts_used, + l2_l1_long_messages: self.l2_l1_long_messages + other.l2_l1_long_messages, + l2_to_l1_logs: self.l2_to_l1_logs + other.l2_to_l1_logs, + gas_used: self.gas_used + other.gas_used, + vm_events: self.vm_events + other.vm_events, + storage_logs: self.storage_logs + other.storage_logs, + total_log_queries: self.total_log_queries + other.total_log_queries, + cycles_used: self.cycles_used + other.cycles_used, + computational_gas_used: self.computational_gas_used + other.computational_gas_used, + pubdata_published: self.pubdata_published + other.pubdata_published, + circuit_statistic: self.circuit_statistic + other.circuit_statistic, + } + } +} + +impl ops::AddAssign for VmExecutionMetrics { + fn add_assign(&mut self, other: Self) { + *self = *self + other; + } +} diff --git a/core/lib/vm_interface/src/vm.rs b/core/lib/vm_interface/src/vm.rs index fd488e5100c..b8614a46c14 100644 --- a/core/lib/vm_interface/src/vm.rs +++ b/core/lib/vm_interface/src/vm.rs @@ -12,12 +12,11 @@ //! where `VmTracer` is a trait implemented for a specific VM version. use zksync_types::Transaction; -use zksync_utils::bytecode::CompressedBytecodeInfo; use crate::{ - storage::StoragePtr, BootloaderMemory, BytecodeCompressionError, CurrentExecutionState, - FinishedL1Batch, L1BatchEnv, L2BlockEnv, SystemEnv, VmExecutionMode, VmExecutionResultAndLogs, - VmMemoryMetrics, + storage::StoragePtr, BootloaderMemory, BytecodeCompressionError, CompressedBytecodeInfo, + CurrentExecutionState, FinishedL1Batch, L1BatchEnv, L2BlockEnv, SystemEnv, VmExecutionMode, + VmExecutionResultAndLogs, VmMemoryMetrics, }; pub trait VmInterface { diff --git a/core/lib/zksync_core_leftovers/src/temp_config_store/mod.rs b/core/lib/zksync_core_leftovers/src/temp_config_store/mod.rs index d25c46bda08..4d2606dcf12 100644 --- a/core/lib/zksync_core_leftovers/src/temp_config_store/mod.rs +++ b/core/lib/zksync_core_leftovers/src/temp_config_store/mod.rs @@ -16,7 +16,7 @@ use zksync_config::{ ExternalPriceApiClientConfig, FriProofCompressorConfig, FriProverConfig, FriProverGatewayConfig, FriWitnessGeneratorConfig, FriWitnessVectorGeneratorConfig, GeneralConfig, ObservabilityConfig, PrometheusConfig, ProofDataHandlerConfig, - ProtectiveReadsWriterConfig, PruningConfig, SnapshotRecoveryConfig, + ProtectiveReadsWriterConfig, ProverJobMonitorConfig, PruningConfig, SnapshotRecoveryConfig, }, ApiConfig, BaseTokenAdjusterConfig, ContractVerifierConfig, DADispatcherConfig, DBConfig, EthConfig, EthWatchConfig, ExternalProofIntegrationApiConfig, GasAdjusterConfig, @@ -79,6 +79,7 @@ pub struct TempConfigStore { pub external_price_api_client_config: Option, pub external_proof_integration_api_config: Option, pub experimental_vm_config: Option, + pub prover_job_monitor_config: Option, } impl TempConfigStore { @@ -97,7 +98,7 @@ impl TempConfigStore { prover_gateway: self.fri_prover_gateway_config.clone(), witness_vector_generator: self.fri_witness_vector_generator.clone(), prover_group_config: self.fri_prover_group_config.clone(), - witness_generator: self.fri_witness_generator_config.clone(), + witness_generator_config: self.fri_witness_generator_config.clone(), prometheus_config: self.prometheus_config.clone(), proof_data_handler_config: self.proof_data_handler_config.clone(), db_config: self.db_config.clone(), @@ -118,6 +119,7 @@ impl TempConfigStore { .external_proof_integration_api_config .clone(), experimental_vm_config: self.experimental_vm_config.clone(), + prover_job_monitor_config: self.prover_job_monitor_config.clone(), } } @@ -191,6 +193,7 @@ fn load_env_config() -> anyhow::Result { external_price_api_client_config: ExternalPriceApiClientConfig::from_env().ok(), external_proof_integration_api_config: ExternalProofIntegrationApiConfig::from_env().ok(), experimental_vm_config: ExperimentalVmConfig::from_env().ok(), + prover_job_monitor_config: ProverJobMonitorConfig::from_env().ok(), }) } diff --git a/core/node/api_server/src/execution_sandbox/execute.rs b/core/node/api_server/src/execution_sandbox/execute.rs index f633b133ab0..741bcaea18f 100644 --- a/core/node/api_server/src/execution_sandbox/execute.rs +++ b/core/node/api_server/src/execution_sandbox/execute.rs @@ -4,13 +4,15 @@ use anyhow::Context as _; use tracing::{span, Level}; use zksync_dal::{ConnectionPool, Core}; use zksync_multivm::{ - interface::{TxExecutionMode, VmExecutionResultAndLogs, VmInterface}, + interface::{ + TransactionExecutionMetrics, TxExecutionMode, VmExecutionResultAndLogs, VmInterface, + }, tracers::StorageInvocations, MultiVMTracer, }; use zksync_types::{ - fee::TransactionExecutionMetrics, l2::L2Tx, transaction_request::CallOverrides, - ExecuteTransactionCommon, Nonce, PackedEthSignature, Transaction, U256, + l2::L2Tx, transaction_request::CallOverrides, ExecuteTransactionCommon, Nonce, + PackedEthSignature, Transaction, U256, }; use super::{ diff --git a/core/node/api_server/src/execution_sandbox/testonly.rs b/core/node/api_server/src/execution_sandbox/testonly.rs index 673c30b9f17..59fa2e38db7 100644 --- a/core/node/api_server/src/execution_sandbox/testonly.rs +++ b/core/node/api_server/src/execution_sandbox/testonly.rs @@ -1,9 +1,9 @@ use std::fmt; -use zksync_multivm::interface::{ExecutionResult, VmExecutionResultAndLogs}; -use zksync_types::{ - fee::TransactionExecutionMetrics, l2::L2Tx, ExecuteTransactionCommon, Transaction, +use zksync_multivm::interface::{ + ExecutionResult, TransactionExecutionMetrics, VmExecutionResultAndLogs, }; +use zksync_types::{l2::L2Tx, ExecuteTransactionCommon, Transaction}; use super::{ execute::{TransactionExecutionOutput, TransactionExecutor}, diff --git a/core/node/api_server/src/execution_sandbox/tracers.rs b/core/node/api_server/src/execution_sandbox/tracers.rs index f03c17a5fa4..8d61d896a36 100644 --- a/core/node/api_server/src/execution_sandbox/tracers.rs +++ b/core/node/api_server/src/execution_sandbox/tracers.rs @@ -2,10 +2,11 @@ use std::sync::Arc; use once_cell::sync::OnceCell; use zksync_multivm::{ - interface::storage::WriteStorage, tracers::CallTracer, vm_latest::HistoryMode, MultiVMTracer, - MultiVmTracerPointer, + interface::{storage::WriteStorage, Call}, + tracers::CallTracer, + vm_latest::HistoryMode, + MultiVMTracer, MultiVmTracerPointer, }; -use zksync_types::vm_trace::Call; /// Custom tracers supported by our API #[derive(Debug)] diff --git a/core/node/api_server/src/execution_sandbox/vm_metrics.rs b/core/node/api_server/src/execution_sandbox/vm_metrics.rs index 27e1c2ab305..ffe87be899b 100644 --- a/core/node/api_server/src/execution_sandbox/vm_metrics.rs +++ b/core/node/api_server/src/execution_sandbox/vm_metrics.rs @@ -3,16 +3,15 @@ use std::time::Duration; use vise::{ Buckets, EncodeLabelSet, EncodeLabelValue, Family, Gauge, Histogram, LatencyObserver, Metrics, }; -use zksync_multivm::interface::{ - storage::StorageViewMetrics, VmExecutionResultAndLogs, VmMemoryMetrics, +use zksync_multivm::{ + interface::{ + storage::StorageViewMetrics, TransactionExecutionMetrics, VmEvent, + VmExecutionResultAndLogs, VmMemoryMetrics, + }, + utils::StorageWritesDeduplicator, }; use zksync_shared_metrics::InteractionType; -use zksync_types::{ - event::{extract_long_l2_to_l1_messages, extract_published_bytecodes}, - fee::TransactionExecutionMetrics, - storage_writes_deduplicator::StorageWritesDeduplicator, - H256, -}; +use zksync_types::H256; use zksync_utils::bytecode::bytecode_len_in_bytes; use crate::utils::ReportFilter; @@ -275,11 +274,11 @@ pub(super) fn collect_tx_execution_metrics( .iter() .map(|event| event.indexed_topics.len() as u16) .sum(); - let l2_l1_long_messages = extract_long_l2_to_l1_messages(&result.logs.events) + let l2_l1_long_messages = VmEvent::extract_long_l2_to_l1_messages(&result.logs.events) .iter() .map(|event| event.len()) .sum(); - let published_bytecode_bytes = extract_published_bytecodes(&result.logs.events) + let published_bytecode_bytes = VmEvent::extract_published_bytecodes(&result.logs.events) .iter() .map(|bytecode_hash| bytecode_len_in_bytes(*bytecode_hash)) .sum(); diff --git a/core/node/api_server/src/tx_sender/master_pool_sink.rs b/core/node/api_server/src/tx_sender/master_pool_sink.rs index cb4e73e3bb7..736edf0b247 100644 --- a/core/node/api_server/src/tx_sender/master_pool_sink.rs +++ b/core/node/api_server/src/tx_sender/master_pool_sink.rs @@ -2,8 +2,9 @@ use std::collections::hash_map::{Entry, HashMap}; use tokio::sync::Mutex; use zksync_dal::{transactions_dal::L2TxSubmissionResult, ConnectionPool, Core, CoreDal}; +use zksync_multivm::interface::TransactionExecutionMetrics; use zksync_shared_metrics::{TxStage, APP_METRICS}; -use zksync_types::{fee::TransactionExecutionMetrics, l2::L2Tx, Address, Nonce, H256}; +use zksync_types::{l2::L2Tx, Address, Nonce, H256}; use super::{tx_sink::TxSink, SubmitTxError}; use crate::web3::metrics::API_METRICS; diff --git a/core/node/api_server/src/tx_sender/mod.rs b/core/node/api_server/src/tx_sender/mod.rs index 826200b5537..085f3c395dd 100644 --- a/core/node/api_server/src/tx_sender/mod.rs +++ b/core/node/api_server/src/tx_sender/mod.rs @@ -10,7 +10,7 @@ use zksync_dal::{ transactions_dal::L2TxSubmissionResult, Connection, ConnectionPool, Core, CoreDal, }; use zksync_multivm::{ - interface::VmExecutionResultAndLogs, + interface::{TransactionExecutionMetrics, VmExecutionResultAndLogs}, utils::{ adjust_pubdata_price_for_tx, derive_base_fee_and_gas_per_pubdata, derive_overhead, get_eth_call_gas_limit, get_max_batch_gas_limit, @@ -25,7 +25,7 @@ use zksync_state_keeper::{ }; use zksync_types::{ api::state_override::StateOverride, - fee::{Fee, TransactionExecutionMetrics}, + fee::Fee, fee_model::BatchFeeInput, get_code_key, get_intrinsic_constants, l2::{error::TxCheckError::TxDuplication, L2Tx}, diff --git a/core/node/api_server/src/tx_sender/proxy.rs b/core/node/api_server/src/tx_sender/proxy.rs index e179cdcb774..536a9767c1f 100644 --- a/core/node/api_server/src/tx_sender/proxy.rs +++ b/core/node/api_server/src/tx_sender/proxy.rs @@ -11,8 +11,9 @@ use zksync_dal::{ helpers::wait_for_l1_batch, transactions_dal::L2TxSubmissionResult, Connection, ConnectionPool, Core, CoreDal, DalError, }; +use zksync_multivm::interface::TransactionExecutionMetrics; use zksync_shared_metrics::{TxStage, APP_METRICS}; -use zksync_types::{api, fee::TransactionExecutionMetrics, l2::L2Tx, Address, Nonce, H256, U256}; +use zksync_types::{api, l2::L2Tx, Address, Nonce, H256, U256}; use zksync_web3_decl::{ client::{DynClient, L2}, error::{ClientRpcContext, EnrichedClientResult, Web3Error}, diff --git a/core/node/api_server/src/tx_sender/tx_sink.rs b/core/node/api_server/src/tx_sender/tx_sink.rs index 5edf21b0701..3d764816fe0 100644 --- a/core/node/api_server/src/tx_sender/tx_sink.rs +++ b/core/node/api_server/src/tx_sender/tx_sink.rs @@ -1,7 +1,7 @@ use zksync_dal::{transactions_dal::L2TxSubmissionResult, Connection, Core}; +use zksync_multivm::interface::TransactionExecutionMetrics; use zksync_types::{ api::{Transaction, TransactionDetails, TransactionId}, - fee::TransactionExecutionMetrics, l2::L2Tx, Address, Nonce, H256, }; diff --git a/core/node/api_server/src/web3/backend_jsonrpsee/namespaces/zks.rs b/core/node/api_server/src/web3/backend_jsonrpsee/namespaces/zks.rs index 16bbde13509..f83eb37ad96 100644 --- a/core/node/api_server/src/web3/backend_jsonrpsee/namespaces/zks.rs +++ b/core/node/api_server/src/web3/backend_jsonrpsee/namespaces/zks.rs @@ -1,6 +1,6 @@ use std::collections::HashMap; -use itertools::Itertools; +use zksync_multivm::interface::VmEvent; use zksync_types::{ api::{ state_override::StateOverride, ApiStorageLog, BlockDetails, BridgeAddresses, @@ -10,8 +10,7 @@ use zksync_types::{ fee::Fee, fee_model::{FeeParams, PubdataIndependentBatchFeeModelInput}, transaction_request::CallRequest, - web3::Bytes, - Address, L1BatchNumber, L2BlockNumber, H256, U256, U64, + web3, Address, L1BatchNumber, L2BlockNumber, H256, U256, U64, }; use zksync_web3_decl::{ jsonrpsee::core::{async_trait, RpcResult}, @@ -196,7 +195,7 @@ impl ZksNamespaceServer for ZksNamespace { async fn send_raw_transaction_with_detailed_output( &self, - tx_bytes: Bytes, + tx_bytes: web3::Bytes, ) -> RpcResult { self.send_raw_transaction_with_detailed_output_impl(tx_bytes) .await @@ -209,19 +208,37 @@ impl ZksNamespaceServer for ZksNamespace { .iter() .filter(|x| x.log.is_write()) .map(ApiStorageLog::from) - .collect_vec(), + .collect(), events: result .1 .logs .events .iter() - .map(|x| { - let mut l = Log::from(x); - l.transaction_hash = Some(result.0); - l + .map(|event| { + let mut log = map_event(event); + log.transaction_hash = Some(result.0); + log }) - .collect_vec(), + .collect(), }) .map_err(|err| self.current_method().map_err(err)) } } + +fn map_event(vm_event: &VmEvent) -> Log { + Log { + address: vm_event.address, + topics: vm_event.indexed_topics.clone(), + data: web3::Bytes::from(vm_event.value.clone()), + block_hash: None, + block_number: None, + l1_batch_number: Some(U64::from(vm_event.location.0 .0)), + transaction_hash: None, + transaction_index: Some(web3::Index::from(vm_event.location.1)), + log_index: None, + transaction_log_index: None, + log_type: None, + removed: Some(false), + block_timestamp: None, + } +} diff --git a/core/node/api_server/src/web3/namespaces/debug.rs b/core/node/api_server/src/web3/namespaces/debug.rs index 2f2d1d44cba..e71f4bd1e1e 100644 --- a/core/node/api_server/src/web3/namespaces/debug.rs +++ b/core/node/api_server/src/web3/namespaces/debug.rs @@ -4,17 +4,17 @@ use anyhow::Context as _; use once_cell::sync::OnceCell; use zksync_dal::{CoreDal, DalError}; use zksync_multivm::{ - interface::ExecutionResult, vm_latest::constants::BATCH_COMPUTATIONAL_GAS_LIMIT, + interface::{Call, CallType, ExecutionResult}, + vm_latest::constants::BATCH_COMPUTATIONAL_GAS_LIMIT, }; use zksync_system_constants::MAX_ENCODED_TX_SIZE; use zksync_types::{ - api::{BlockId, BlockNumber, DebugCall, ResultDebugCall, TracerConfig}, + api::{BlockId, BlockNumber, DebugCall, DebugCallType, ResultDebugCall, TracerConfig}, debug_flat_call::{flatten_debug_calls, DebugCallFlat}, fee_model::BatchFeeInput, l2::L2Tx, transaction_request::CallRequest, - vm_trace::Call, - AccountTreeId, H256, + web3, AccountTreeId, H256, U256, }; use zksync_web3_decl::error::Web3Error; @@ -51,6 +51,35 @@ impl DebugNamespace { }) } + pub(crate) fn map_call(call: Call, only_top_call: bool) -> DebugCall { + let calls = if only_top_call { + vec![] + } else { + call.calls + .into_iter() + .map(|call| Self::map_call(call, false)) + .collect() + }; + let debug_type = match call.r#type { + CallType::Call(_) => DebugCallType::Call, + CallType::Create => DebugCallType::Create, + CallType::NearCall => unreachable!("We have to filter our near calls before"), + }; + DebugCall { + r#type: debug_type, + from: call.from, + to: call.to, + gas: U256::from(call.gas), + gas_used: U256::from(call.gas_used), + value: call.value, + output: web3::Bytes::from(call.output), + input: web3::Bytes::from(call.input), + error: call.error, + revert_reason: call.revert_reason, + calls, + } + } + fn sender_config(&self) -> &TxSenderConfig { &self.state.tx_sender.0.sender_config } @@ -86,10 +115,7 @@ impl DebugNamespace { let call_trace = call_traces .into_iter() .map(|call_trace| { - let mut result: DebugCall = call_trace.into(); - if only_top_call { - result.calls = vec![]; - } + let result = Self::map_call(call_trace, only_top_call); ResultDebugCall { result } }) .collect(); @@ -120,13 +146,7 @@ impl DebugNamespace { .get_call_trace(tx_hash) .await .map_err(DalError::generalize)?; - Ok(call_trace.map(|call_trace| { - let mut result: DebugCall = call_trace.into(); - if only_top_call { - result.calls = vec![]; - } - result - })) + Ok(call_trace.map(|call_trace| Self::map_call(call_trace, only_top_call))) } pub async fn debug_trace_call_impl( @@ -226,7 +246,7 @@ impl DebugNamespace { revert_reason, trace, ); - Ok(call.into()) + Ok(Self::map_call(call, false)) } async fn shared_args(&self) -> TxSharedArgs { diff --git a/core/node/api_server/src/web3/state.rs b/core/node/api_server/src/web3/state.rs index b0e74706e52..5c8b47dabeb 100644 --- a/core/node/api_server/src/web3/state.rs +++ b/core/node/api_server/src/web3/state.rs @@ -287,7 +287,7 @@ impl RpcState { #[track_caller] pub(crate) fn acquire_connection( &self, - ) -> impl Future, Web3Error>> + '_ { + ) -> impl Future, Web3Error>> + '_ { self.connection_pool .connection_tagged("api") .map_err(|err| err.generalize().into()) diff --git a/core/node/api_server/src/web3/tests/debug.rs b/core/node/api_server/src/web3/tests/debug.rs index a074c143057..76496b42cad 100644 --- a/core/node/api_server/src/web3/tests/debug.rs +++ b/core/node/api_server/src/web3/tests/debug.rs @@ -1,6 +1,7 @@ //! Tests for the `debug` Web3 namespace. -use zksync_types::{tx::TransactionExecutionResult, vm_trace::Call, BOOTLOADER_ADDRESS}; +use zksync_multivm::interface::{Call, TransactionExecutionResult}; +use zksync_types::BOOTLOADER_ADDRESS; use zksync_web3_decl::{ client::{DynClient, L2}, namespaces::DebugNamespaceClient, @@ -68,7 +69,7 @@ impl HttpTest for TraceBlockTest { let expected_calls: Vec<_> = tx_result .call_traces .iter() - .map(|call| api::DebugCall::from(call.clone())) + .map(|call| DebugNamespace::map_call(call.clone(), false)) .collect(); assert_eq!(result.calls, expected_calls); } @@ -197,7 +198,7 @@ impl HttpTest for TraceTransactionTest { let expected_calls: Vec<_> = tx_results[0] .call_traces .iter() - .map(|call| api::DebugCall::from(call.clone())) + .map(|call| DebugNamespace::map_call(call.clone(), false)) .collect(); let result = client diff --git a/core/node/api_server/src/web3/tests/mod.rs b/core/node/api_server/src/web3/tests/mod.rs index d136971734a..409eb2004d1 100644 --- a/core/node/api_server/src/web3/tests/mod.rs +++ b/core/node/api_server/src/web3/tests/mod.rs @@ -17,7 +17,10 @@ use zksync_config::{ GenesisConfig, }; use zksync_dal::{transactions_dal::L2TxSubmissionResult, Connection, ConnectionPool, CoreDal}; -use zksync_multivm::zk_evm_latest::ethereum_types::U256; +use zksync_multivm::interface::{ + TransactionExecutionMetrics, TransactionExecutionResult, TxExecutionStatus, VmEvent, + VmExecutionMetrics, +}; use zksync_node_genesis::{insert_genesis_batch, mock_genesis_config, GenesisParams}; use zksync_node_test_utils::{ create_l1_batch, create_l1_batch_metadata, create_l2_block, create_l2_transaction, @@ -26,18 +29,14 @@ use zksync_node_test_utils::{ use zksync_types::{ api, block::L2BlockHeader, - fee::TransactionExecutionMetrics, get_nonce_key, l2::L2Tx, storage::get_code_key, tokens::{TokenInfo, TokenMetadata}, - tx::{ - tx_execution_info::TxExecutionStatus, ExecutionMetrics, IncludedTxLocation, - TransactionExecutionResult, - }, + tx::IncludedTxLocation, utils::{storage_key_for_eth_balance, storage_key_for_standard_token_balance}, - AccountTreeId, Address, L1BatchNumber, Nonce, ProtocolVersionId, StorageKey, StorageLog, - VmEvent, H256, U64, + AccountTreeId, Address, L1BatchNumber, Nonce, ProtocolVersionId, StorageKey, StorageLog, H256, + U256, U64, }; use zksync_utils::u256_to_h256; use zksync_web3_decl::{ @@ -273,7 +272,7 @@ fn execute_l2_transaction(transaction: L2Tx) -> TransactionExecutionResult { TransactionExecutionResult { hash: transaction.hash(), transaction: transaction.into(), - execution_info: ExecutionMetrics::default(), + execution_info: VmExecutionMetrics::default(), execution_status: TxExecutionStatus::Success, refunded_gas: 0, operator_suggested_refund: 0, diff --git a/core/node/api_server/src/web3/tests/vm.rs b/core/node/api_server/src/web3/tests/vm.rs index f9629f6dab9..90e1373a5cc 100644 --- a/core/node/api_server/src/web3/tests/vm.rs +++ b/core/node/api_server/src/web3/tests/vm.rs @@ -3,16 +3,12 @@ use std::sync::atomic::{AtomicU32, Ordering}; use api::state_override::{OverrideAccount, StateOverride}; -use itertools::Itertools; use zksync_multivm::interface::{ ExecutionResult, VmExecutionLogs, VmExecutionResultAndLogs, VmRevertReason, }; use zksync_types::{ - api::{ApiStorageLog, Log}, - get_intrinsic_constants, - transaction_request::CallRequest, - K256PrivateKey, L2ChainId, PackedEthSignature, StorageLogKind, StorageLogWithPreviousValue, - U256, + api::ApiStorageLog, get_intrinsic_constants, transaction_request::CallRequest, K256PrivateKey, + L2ChainId, PackedEthSignature, StorageLogKind, StorageLogWithPreviousValue, U256, }; use zksync_utils::u256_to_h256; use zksync_web3_decl::namespaces::DebugNamespaceClient; @@ -360,24 +356,24 @@ impl HttpTest for SendTransactionWithDetailedOutputTest { .send_raw_transaction_with_detailed_output(tx_bytes.into()) .await?; assert_eq!(send_result.transaction_hash, tx_hash); - assert_eq!( - send_result.events, - self.vm_events() - .iter() - .map(|x| { - let mut l = Log::from(x); - l.transaction_hash = Some(tx_hash); - l - }) - .collect_vec() - ); + + let expected_events = self.vm_events(); + assert_eq!(send_result.events.len(), expected_events.len()); + for (event, expected_event) in send_result.events.iter().zip(&expected_events) { + assert_eq!(event.transaction_hash, Some(tx_hash)); + assert_eq!(event.address, expected_event.address); + assert_eq!(event.topics, expected_event.indexed_topics); + assert_eq!(event.l1_batch_number, Some(1.into())); + assert_eq!(event.transaction_index, Some(1.into())); + } + assert_eq!( send_result.storage_logs, self.storage_logs() .iter() .filter(|x| x.log.is_write()) .map(ApiStorageLog::from) - .collect_vec() + .collect::>() ); Ok(()) } diff --git a/core/node/api_server/src/web3/tests/ws.rs b/core/node/api_server/src/web3/tests/ws.rs index cccebdd6ddd..39f991aba04 100644 --- a/core/node/api_server/src/web3/tests/ws.rs +++ b/core/node/api_server/src/web3/tests/ws.rs @@ -8,7 +8,7 @@ use http::StatusCode; use tokio::sync::watch; use zksync_config::configs::chain::NetworkConfig; use zksync_dal::ConnectionPool; -use zksync_types::{api, Address, L1BatchNumber, H160, H2048, H256, U64}; +use zksync_types::{api, Address, Bloom, L1BatchNumber, H160, H256, U64}; use zksync_web3_decl::{ client::{WsClient, L2}, jsonrpsee::{ @@ -318,7 +318,7 @@ impl WsTest for BasicSubscriptionsTest { Some(new_l2_block.base_fee_per_gas.into()) ); assert_eq!(received_block_header.extra_data, Bytes::default()); - assert_eq!(received_block_header.logs_bloom, H2048::default()); + assert_eq!(received_block_header.logs_bloom, Bloom::default()); assert_eq!( received_block_header.timestamp, new_l2_block.timestamp.into() diff --git a/core/node/block_reverter/src/tests.rs b/core/node/block_reverter/src/tests.rs index a2dcae1724f..b29d01af39a 100644 --- a/core/node/block_reverter/src/tests.rs +++ b/core/node/block_reverter/src/tests.rs @@ -66,6 +66,7 @@ async fn setup_storage(storage: &mut Connection<'_, Core>, storage_logs: &[Stora protocol_version: Some(ProtocolVersionId::latest()), virtual_blocks: 1, gas_limit: 0, + logs_bloom: Default::default(), }; storage .blocks_dal() diff --git a/core/node/commitment_generator/Cargo.toml b/core/node/commitment_generator/Cargo.toml index a88b494a7d8..5ec8410124f 100644 --- a/core/node/commitment_generator/Cargo.toml +++ b/core/node/commitment_generator/Cargo.toml @@ -35,6 +35,7 @@ anyhow.workspace = true tracing.workspace = true itertools.workspace = true serde_json.workspace = true +serde = { version = "1.0.189", features = ["derive"] } [dev-dependencies] zksync_web3_decl.workspace = true diff --git a/core/node/commitment_generator/src/lib.rs b/core/node/commitment_generator/src/lib.rs index 64e60b6dec0..6cb14cfda53 100644 --- a/core/node/commitment_generator/src/lib.rs +++ b/core/node/commitment_generator/src/lib.rs @@ -6,22 +6,20 @@ use tokio::{sync::watch, task::JoinHandle}; use zksync_dal::{ConnectionPool, Core, CoreDal}; use zksync_health_check::{Health, HealthStatus, HealthUpdater, ReactiveHealthCheck}; use zksync_l1_contract_interface::i_executor::commit::kzg::pubdata_to_blob_commitments; -use zksync_multivm::zk_evm_latest::ethereum_types::U256; use zksync_types::{ blob::num_blobs_required, commitment::{ AuxCommitments, CommitmentCommonInput, CommitmentInput, L1BatchAuxiliaryOutput, L1BatchCommitment, L1BatchCommitmentArtifacts, L1BatchCommitmentMode, }, - event::convert_vm_events_to_log_queries, writes::{InitialStorageWrite, RepeatedStorageWrite, StateDiffRecord}, - L1BatchNumber, ProtocolVersionId, StorageKey, H256, + L1BatchNumber, ProtocolVersionId, StorageKey, H256, U256, }; use zksync_utils::h256_to_u256; use crate::{ metrics::{CommitmentStage, METRICS}, - utils::{CommitmentComputer, RealCommitmentComputer}, + utils::{convert_vm_events_to_log_queries, CommitmentComputer, RealCommitmentComputer}, }; mod metrics; diff --git a/core/node/commitment_generator/src/tests.rs b/core/node/commitment_generator/src/tests/mod.rs similarity index 87% rename from core/node/commitment_generator/src/tests.rs rename to core/node/commitment_generator/src/tests/mod.rs index d857013a769..e4afe882b00 100644 --- a/core/node/commitment_generator/src/tests.rs +++ b/core/node/commitment_generator/src/tests/mod.rs @@ -3,7 +3,9 @@ use std::thread; use rand::{thread_rng, Rng}; +use serde::Deserialize; use zksync_dal::Connection; +use zksync_multivm::interface::VmEvent; use zksync_node_genesis::{insert_genesis_batch, GenesisParams}; use zksync_node_test_utils::{create_l1_batch, create_l2_block}; use zksync_types::{ @@ -299,3 +301,44 @@ async fn commitment_generator_with_tree_emulation() { stop_sender.send_replace(true); generator_handle.await.unwrap().unwrap(); } + +#[derive(Debug, Deserialize)] +struct SerdeVmEvent { + location: (L1BatchNumber, u32), + address: Address, + indexed_topics: Vec, + value: Vec, +} + +impl From for VmEvent { + fn from(event: SerdeVmEvent) -> VmEvent { + VmEvent { + location: event.location, + address: event.address, + indexed_topics: event.indexed_topics, + value: event.value, + } + } +} + +#[test] +fn test_convert_vm_events_to_log_queries() { + let cases: Vec = vec![ + serde_json::from_str(include_str!( + "./test_vectors/event_with_1_topic_and_long_value.json" + )) + .unwrap(), + serde_json::from_str(include_str!("./test_vectors/event_with_2_topics.json")).unwrap(), + serde_json::from_str(include_str!("./test_vectors/event_with_3_topics.json")).unwrap(), + serde_json::from_str(include_str!("./test_vectors/event_with_4_topics.json")).unwrap(), + serde_json::from_str(include_str!("./test_vectors/event_with_value_len_1.json")).unwrap(), + ]; + + for case in cases { + let event: SerdeVmEvent = serde_json::from_value(case["event"].clone()).unwrap(); + let expected_list: Vec = serde_json::from_value(case["list"].clone()).unwrap(); + + let actual_list = convert_vm_events_to_log_queries(&[event.into()]); + assert_eq!(actual_list, expected_list); + } +} diff --git a/core/lib/types/src/event/test_vectors/event_with_1_topic_and_long_value.json b/core/node/commitment_generator/src/tests/test_vectors/event_with_1_topic_and_long_value.json similarity index 100% rename from core/lib/types/src/event/test_vectors/event_with_1_topic_and_long_value.json rename to core/node/commitment_generator/src/tests/test_vectors/event_with_1_topic_and_long_value.json diff --git a/core/lib/types/src/event/test_vectors/event_with_2_topics.json b/core/node/commitment_generator/src/tests/test_vectors/event_with_2_topics.json similarity index 100% rename from core/lib/types/src/event/test_vectors/event_with_2_topics.json rename to core/node/commitment_generator/src/tests/test_vectors/event_with_2_topics.json diff --git a/core/lib/types/src/event/test_vectors/event_with_3_topics.json b/core/node/commitment_generator/src/tests/test_vectors/event_with_3_topics.json similarity index 100% rename from core/lib/types/src/event/test_vectors/event_with_3_topics.json rename to core/node/commitment_generator/src/tests/test_vectors/event_with_3_topics.json diff --git a/core/lib/types/src/event/test_vectors/event_with_4_topics.json b/core/node/commitment_generator/src/tests/test_vectors/event_with_4_topics.json similarity index 100% rename from core/lib/types/src/event/test_vectors/event_with_4_topics.json rename to core/node/commitment_generator/src/tests/test_vectors/event_with_4_topics.json diff --git a/core/lib/types/src/event/test_vectors/event_with_value_len_1.json b/core/node/commitment_generator/src/tests/test_vectors/event_with_value_len_1.json similarity index 100% rename from core/lib/types/src/event/test_vectors/event_with_value_len_1.json rename to core/node/commitment_generator/src/tests/test_vectors/event_with_value_len_1.json diff --git a/core/node/commitment_generator/src/utils.rs b/core/node/commitment_generator/src/utils.rs index 59f8753859a..86643b6b581 100644 --- a/core/node/commitment_generator/src/utils.rs +++ b/core/node/commitment_generator/src/utils.rs @@ -2,6 +2,7 @@ use std::fmt; +use itertools::Itertools; use zk_evm_1_3_3::{ aux_structures::Timestamp as Timestamp_1_3_3, zk_evm_abstractions::queries::LogQuery as LogQuery_1_3_3, @@ -14,9 +15,13 @@ use zk_evm_1_5_0::{ aux_structures::Timestamp as Timestamp_1_5_0, zk_evm_abstractions::queries::LogQuery as LogQuery_1_5_0, }; -use zksync_multivm::utils::get_used_bootloader_memory_bytes; -use zksync_types::{vm::VmVersion, zk_evm_types::LogQuery, ProtocolVersionId, H256, U256}; -use zksync_utils::expand_memory_contents; +use zksync_multivm::{interface::VmEvent, utils::get_used_bootloader_memory_bytes}; +use zksync_types::{ + vm::VmVersion, + zk_evm_types::{LogQuery, Timestamp}, + ProtocolVersionId, EVENT_WRITER_ADDRESS, H256, U256, +}; +use zksync_utils::{address_to_u256, expand_memory_contents, h256_to_u256}; /// Encapsulates computations of commitment components. /// @@ -158,3 +163,74 @@ fn to_log_query_1_5_0(log_query: LogQuery) -> LogQuery_1_5_0 { is_service: log_query.is_service, } } + +/// Each `VmEvent` can be translated to several log queries. +/// This methods converts each event from input to log queries and returns all produced log queries. +pub(crate) fn convert_vm_events_to_log_queries(events: &[VmEvent]) -> Vec { + events + .iter() + .flat_map(|event| { + // Construct first query. This query holds an information about + // - number of event topics (on log query level `event.address` is treated as a topic, thus + 1 is added) + // - length of event value + // - `event.address` (or first topic in terms of log query terminology). + let first_key_word = + (event.indexed_topics.len() as u64 + 1) + ((event.value.len() as u64) << 32); + let key = U256([first_key_word, 0, 0, 0]); + + // `timestamp`, `aux_byte`, `read_value`, `rw_flag`, `rollback` are set as per convention. + let first_log = LogQuery { + timestamp: Timestamp(0), + tx_number_in_block: event.location.1 as u16, + aux_byte: 0, + shard_id: 0, + address: EVENT_WRITER_ADDRESS, + key, + read_value: U256::zero(), + written_value: address_to_u256(&event.address), + rw_flag: false, + rollback: false, + is_service: true, + }; + + // The next logs hold information about remaining topics and `event.value`. + // Each log can hold at most two values each of 32 bytes. + // The following piece of code prepares these 32-byte values. + let values = event.indexed_topics.iter().map(|h| h256_to_u256(*h)).chain( + event.value.chunks(32).map(|value_chunk| { + let mut padded = value_chunk.to_vec(); + padded.resize(32, 0); + U256::from_big_endian(&padded) + }), + ); + + // And now we process these values in chunks by two. + let value_chunks = values.chunks(2); + let other_logs = value_chunks.into_iter().map(|mut chunk| { + // The first value goes to `log_query.key`. + let key = chunk.next().unwrap(); + + // If the second one is present then it goes to `log_query.written_value`. + let written_value = chunk.next().unwrap_or_default(); + + LogQuery { + timestamp: Timestamp(0), + tx_number_in_block: event.location.1 as u16, + aux_byte: 0, + shard_id: 0, + address: EVENT_WRITER_ADDRESS, + key, + read_value: U256::zero(), + written_value, + rw_flag: false, + rollback: false, + is_service: false, + } + }); + + std::iter::once(first_log) + .chain(other_logs) + .collect::>() + }) + .collect() +} diff --git a/core/node/consensus/src/en.rs b/core/node/consensus/src/en.rs index ce8a555e06d..259cac5d074 100644 --- a/core/node/consensus/src/en.rs +++ b/core/node/consensus/src/en.rs @@ -1,10 +1,8 @@ +use std::sync::Arc; + use anyhow::Context as _; -use async_trait::async_trait; use zksync_concurrency::{ctx, error::Wrap as _, scope, time}; -use zksync_consensus_executor::{ - self as executor, - attestation::{AttestationStatusClient, AttestationStatusRunner}, -}; +use zksync_consensus_executor::{self as executor, attestation}; use zksync_consensus_roles::{attester, validator}; use zksync_consensus_storage::{BatchStore, BlockStore}; use zksync_dal::consensus_dal; @@ -38,9 +36,7 @@ impl EN { cfg: ConsensusConfig, secrets: ConsensusSecrets, ) -> anyhow::Result<()> { - let attester = config::attester_key(&secrets) - .context("attester_key")? - .map(|key| executor::Attester { key }); + let attester = config::attester_key(&secrets).context("attester_key")?; tracing::debug!( is_attester = attester.is_some(), @@ -53,7 +49,6 @@ impl EN { // Initialize genesis. let genesis = self.fetch_genesis(ctx).await.wrap("fetch_genesis()")?; - let genesis_hash = genesis.hash(); let mut conn = self.pool.connection(ctx).await.wrap("connection()")?; conn.try_update_genesis(ctx, &genesis) @@ -74,18 +69,21 @@ impl EN { // Monitor the genesis of the main node. // If it changes, it means that a hard fork occurred and we need to reset the consensus state. - s.spawn_bg::<()>(async { - let old = genesis; - loop { - if let Ok(new) = self.fetch_genesis(ctx).await { - if new != old { - return Err(anyhow::format_err!( - "genesis changed: old {old:?}, new {new:?}" - ) - .into()); + s.spawn_bg::<()>({ + let old = genesis.clone(); + async { + let old = old; + loop { + if let Ok(new) = self.fetch_genesis(ctx).await { + if new != old { + return Err(anyhow::format_err!( + "genesis changed: old {old:?}, new {new:?}" + ) + .into()); + } } + ctx.sleep(time::Duration::seconds(5)).await?; } - ctx.sleep(time::Duration::seconds(5)).await?; } }); @@ -106,17 +104,8 @@ impl EN { .wrap("BatchStore::new()")?; s.spawn_bg(async { Ok(runner.run(ctx).await?) }); - let (attestation_status, runner) = { - AttestationStatusRunner::init( - ctx, - Box::new(MainNodeAttestationStatus(self.client.clone())), - time::Duration::seconds(5), - genesis_hash, - ) - .await - .wrap("AttestationStatusRunner::init()")? - }; - s.spawn_bg(async { Ok(runner.run(ctx).await?) }); + let attestation = Arc::new(attestation::Controller::new(attester)); + s.spawn_bg(self.run_attestation_updater(ctx, genesis.clone(), attestation.clone())); let executor = executor::Executor { config: config::executor(&cfg, &secrets)?, @@ -129,8 +118,7 @@ impl EN { replica_store: Box::new(store.clone()), payload_manager: Box::new(store.clone()), }), - attester, - attestation_status, + attestation, }; tracing::info!("running the external node executor"); executor.run(ctx).await?; @@ -174,6 +162,62 @@ impl EN { } } + /// Monitors the `AttestationStatus` on the main node, + /// and updates the attestation config accordingly. + async fn run_attestation_updater( + &self, + ctx: &ctx::Ctx, + genesis: validator::Genesis, + attestation: Arc, + ) -> ctx::Result<()> { + const POLL_INTERVAL: time::Duration = time::Duration::seconds(5); + let Some(committee) = &genesis.attesters else { + return Ok(()); + }; + let committee = Arc::new(committee.clone()); + let mut next = attester::BatchNumber(0); + loop { + let status = loop { + match self.fetch_attestation_status(ctx).await { + Err(err) => tracing::warn!("{err:#}"), + Ok(status) => { + if status.genesis != genesis.hash() { + return Err(anyhow::format_err!("genesis mismatch").into()); + } + if status.next_batch_to_attest >= next { + break status; + } + } + } + ctx.sleep(POLL_INTERVAL).await?; + }; + tracing::info!( + "waiting for hash of batch {:?}", + status.next_batch_to_attest + ); + let hash = self + .pool + .wait_for_batch_hash(ctx, status.next_batch_to_attest) + .await?; + tracing::info!( + "attesting batch {:?} with hash {hash:?}", + status.next_batch_to_attest + ); + attestation + .start_attestation(Arc::new(attestation::Info { + batch_to_attest: attester::Batch { + genesis: status.genesis, + hash, + number: status.next_batch_to_attest, + }, + committee: committee.clone(), + })) + .await + .context("start_attestation()")?; + next = status.next_batch_to_attest.next(); + } + } + /// Periodically fetches the head of the main node /// and updates `SyncState` accordingly. async fn fetch_state_loop(&self, ctx: &ctx::Ctx) -> ctx::Result<()> { @@ -213,6 +257,22 @@ impl EN { .with_hash()) } + #[tracing::instrument(skip_all)] + async fn fetch_attestation_status( + &self, + ctx: &ctx::Ctx, + ) -> ctx::Result { + match ctx.wait(self.client.fetch_attestation_status()).await? { + Ok(Some(status)) => Ok(zksync_protobuf::serde::deserialize(&status.0) + .context("deserialize(AttestationStatus")?), + Ok(None) => Err(anyhow::format_err!("empty response").into()), + Err(err) => Err(anyhow::format_err!( + "AttestationStatus call to main node HTTP RPC failed: {err:#}" + ) + .into()), + } + } + /// Fetches (with retries) the given block from the main node. async fn fetch_block(&self, ctx: &ctx::Ctx, n: L2BlockNumber) -> ctx::Result { const RETRY_INTERVAL: time::Duration = time::Duration::seconds(5); @@ -269,31 +329,3 @@ impl EN { Ok(()) } } - -/// Wrapper to call [MainNodeClient::fetch_attestation_status] and adapt the return value to [AttestationStatusClient]. -struct MainNodeAttestationStatus(Box>); - -#[async_trait] -impl AttestationStatusClient for MainNodeAttestationStatus { - async fn attestation_status( - &self, - ctx: &ctx::Ctx, - ) -> ctx::Result> { - match ctx.wait(self.0.fetch_attestation_status()).await? { - Ok(Some(status)) => { - // If this fails the AttestationStatusRunner will log it an retry it later, - // but it won't stop the whole node. - let status: consensus_dal::AttestationStatus = - zksync_protobuf::serde::deserialize(&status.0) - .context("deserialize(AttestationStatus")?; - - Ok(Some((status.genesis, status.next_batch_to_attest))) - } - Ok(None) => Ok(None), - Err(err) => { - tracing::warn!("AttestationStatus call to main node HTTP RPC failed: {err}"); - Ok(None) - } - } - } -} diff --git a/core/node/consensus/src/mn.rs b/core/node/consensus/src/mn.rs index b5e76afd63e..7de86b4d8ba 100644 --- a/core/node/consensus/src/mn.rs +++ b/core/node/consensus/src/mn.rs @@ -1,13 +1,15 @@ +use std::sync::Arc; + use anyhow::Context as _; use zksync_concurrency::{ctx, error::Wrap as _, scope, time}; use zksync_config::configs::consensus::{ConsensusConfig, ConsensusSecrets}; -use zksync_consensus_executor::{self as executor, attestation::AttestationStatusRunner, Attester}; -use zksync_consensus_roles::validator; +use zksync_consensus_executor::{self as executor, attestation}; +use zksync_consensus_roles::{attester, validator}; use zksync_consensus_storage::{BatchStore, BlockStore}; use crate::{ config, - storage::{ConnectionPool, Store}, + storage::{ConnectionPool, InsertCertificateError, Store}, }; /// Task running a consensus validator for the main node. @@ -23,9 +25,7 @@ pub async fn run_main_node( .context("validator_key")? .context("missing validator_key")?; - let attester = config::attester_key(&secrets) - .context("attester_key")? - .map(|key| Attester { key }); + let attester = config::attester_key(&secrets).context("attester_key")?; tracing::debug!(is_attester = attester.is_some(), "main node attester mode"); @@ -42,7 +42,9 @@ pub async fn run_main_node( } // The main node doesn't have a payload queue as it produces all the L2 blocks itself. - let (store, runner) = Store::new(ctx, pool, None).await.wrap("Store::new()")?; + let (store, runner) = Store::new(ctx, pool.clone(), None) + .await + .wrap("Store::new()")?; s.spawn_bg(runner.run(ctx)); let (block_store, runner) = BlockStore::new(ctx, Box::new(store.clone())) @@ -50,8 +52,9 @@ pub async fn run_main_node( .wrap("BlockStore::new()")?; s.spawn_bg(runner.run(ctx)); + let genesis = block_store.genesis().clone(); anyhow::ensure!( - block_store.genesis().leader_selection + genesis.leader_selection == validator::LeaderSelectionMode::Sticky(validator_key.public()), "unsupported leader selection mode - main node has to be the leader" ); @@ -61,17 +64,13 @@ pub async fn run_main_node( .wrap("BatchStore::new()")?; s.spawn_bg(runner.run(ctx)); - let (attestation_status, runner) = { - AttestationStatusRunner::init_from_store( - ctx, - batch_store.clone(), - time::Duration::seconds(1), - block_store.genesis().hash(), - ) - .await - .wrap("AttestationStatusRunner::init_from_store()")? - }; - s.spawn_bg(runner.run(ctx)); + let attestation = Arc::new(attestation::Controller::new(attester)); + s.spawn_bg(run_attestation_updater( + ctx, + &pool, + genesis, + attestation.clone(), + )); let executor = executor::Executor { config: config::executor(&cfg, &secrets)?, @@ -82,8 +81,7 @@ pub async fn run_main_node( replica_store: Box::new(store.clone()), payload_manager: Box::new(store.clone()), }), - attester, - attestation_status, + attestation, }; tracing::info!("running the main node executor"); @@ -91,3 +89,85 @@ pub async fn run_main_node( }) .await } + +/// Manages attestation state by configuring the +/// next batch to attest and storing the collected +/// certificates. +async fn run_attestation_updater( + ctx: &ctx::Ctx, + pool: &ConnectionPool, + genesis: validator::Genesis, + attestation: Arc, +) -> anyhow::Result<()> { + const POLL_INTERVAL: time::Duration = time::Duration::seconds(5); + let res = async { + let Some(committee) = &genesis.attesters else { + return Ok(()); + }; + let committee = Arc::new(committee.clone()); + loop { + // After regenesis it might happen that the batch number for the first block + // is not immediately known (the first block was not produced yet), + // therefore we need to wait for it. + let status = loop { + match pool + .connection(ctx) + .await + .wrap("connection()")? + .attestation_status(ctx) + .await + .wrap("attestation_status()")? + { + Some(status) => break status, + None => ctx.sleep(POLL_INTERVAL).await?, + } + }; + tracing::info!( + "waiting for hash of batch {:?}", + status.next_batch_to_attest + ); + let hash = pool + .wait_for_batch_hash(ctx, status.next_batch_to_attest) + .await?; + tracing::info!( + "attesting batch {:?} with hash {hash:?}", + status.next_batch_to_attest + ); + attestation + .start_attestation(Arc::new(attestation::Info { + batch_to_attest: attester::Batch { + hash, + number: status.next_batch_to_attest, + genesis: status.genesis, + }, + committee: committee.clone(), + })) + .await + .context("start_attestation()")?; + // Main node is the only node which can update the global AttestationStatus, + // therefore we can synchronously wait for the certificate. + let qc = attestation + .wait_for_cert(ctx, status.next_batch_to_attest) + .await? + .context("attestation config has changed unexpectedly")?; + tracing::info!( + "collected certificate for batch {:?}", + status.next_batch_to_attest + ); + pool.connection(ctx) + .await + .wrap("connection()")? + .insert_batch_certificate(ctx, &qc) + .await + .map_err(|err| match err { + InsertCertificateError::Canceled(err) => ctx::Error::Canceled(err), + InsertCertificateError::Inner(err) => ctx::Error::Internal(err.into()), + })?; + } + } + .await; + match res { + Ok(()) | Err(ctx::Error::Canceled(_)) => Ok(()), + Err(ctx::Error::Internal(err)) => Err(err), + } +} diff --git a/core/node/consensus/src/storage/connection.rs b/core/node/consensus/src/storage/connection.rs index 0e2039ae6bc..6ff2fb1ce0a 100644 --- a/core/node/consensus/src/storage/connection.rs +++ b/core/node/consensus/src/storage/connection.rs @@ -27,6 +27,7 @@ impl ConnectionPool { } /// Waits for the `number` L2 block. + #[tracing::instrument(skip_all)] pub async fn wait_for_payload( &self, ctx: &ctx::Ctx, @@ -47,6 +48,29 @@ impl ConnectionPool { ctx.sleep(POLL_INTERVAL).await?; } } + + /// Waits for the `number` L1 batch hash. + #[tracing::instrument(skip_all)] + pub async fn wait_for_batch_hash( + &self, + ctx: &ctx::Ctx, + number: attester::BatchNumber, + ) -> ctx::Result { + const POLL_INTERVAL: time::Duration = time::Duration::milliseconds(500); + loop { + if let Some(hash) = self + .connection(ctx) + .await + .wrap("connection()")? + .batch_hash(ctx, number) + .await + .with_wrap(|| format!("batch_hash({number})"))? + { + return Ok(hash); + } + ctx.sleep(POLL_INTERVAL).await?; + } + } } /// Context-aware `zksync_dal::Connection` wrapper. @@ -321,29 +345,6 @@ impl<'a> Connection<'a> { .map(|nr| attester::BatchNumber(nr.0 as u64))) } - /// Wrapper for `consensus_dal().get_last_batch_certificate_number()`. - pub async fn get_last_batch_certificate_number( - &mut self, - ctx: &ctx::Ctx, - ) -> ctx::Result> { - Ok(ctx - .wait(self.0.consensus_dal().get_last_batch_certificate_number()) - .await? - .context("get_last_batch_certificate_number()")?) - } - - /// Wrapper for `consensus_dal().batch_certificate()`. - pub async fn batch_certificate( - &mut self, - ctx: &ctx::Ctx, - number: attester::BatchNumber, - ) -> ctx::Result> { - Ok(ctx - .wait(self.0.consensus_dal().batch_certificate(number)) - .await? - .context("batch_certificate()")?) - } - /// Wrapper for `blocks_dal().get_l2_block_range_of_l1_batch()`. pub async fn get_l2_block_range_of_l1_batch( &mut self, diff --git a/core/node/consensus/src/storage/store.rs b/core/node/consensus/src/storage/store.rs index 0e08811c237..6a96812ae40 100644 --- a/core/node/consensus/src/storage/store.rs +++ b/core/node/consensus/src/storage/store.rs @@ -57,8 +57,6 @@ pub(crate) struct Store { block_payloads: Arc>>, /// L2 block QCs received from consensus block_certificates: ctx::channel::UnboundedSender, - /// L1 batch QCs received from consensus - batch_certificates: ctx::channel::UnboundedSender, /// Range of L2 blocks for which we have a QC persisted. blocks_persisted: sync::watch::Receiver, /// Range of L1 batches we have persisted. @@ -73,7 +71,6 @@ pub struct StoreRunner { blocks_persisted: PersistedBlockState, batches_persisted: sync::watch::Sender, block_certificates: ctx::channel::UnboundedReceiver, - batch_certificates: ctx::channel::UnboundedReceiver, } impl Store { @@ -98,13 +95,11 @@ impl Store { let blocks_persisted = sync::watch::channel(blocks_persisted).0; let batches_persisted = sync::watch::channel(batches_persisted).0; let (block_certs_send, block_certs_recv) = ctx::channel::unbounded(); - let (batch_certs_send, batch_certs_recv) = ctx::channel::unbounded(); Ok(( Store { pool: pool.clone(), block_certificates: block_certs_send, - batch_certificates: batch_certs_send, block_payloads: Arc::new(sync::Mutex::new(payload_queue)), blocks_persisted: blocks_persisted.subscribe(), batches_persisted: batches_persisted.subscribe(), @@ -114,7 +109,6 @@ impl Store { blocks_persisted: PersistedBlockState(blocks_persisted), batches_persisted, block_certificates: block_certs_recv, - batch_certificates: batch_certs_recv, }, )) } @@ -171,7 +165,6 @@ impl StoreRunner { blocks_persisted, batches_persisted, mut block_certificates, - mut batch_certificates, } = self; let res = scope::run!(ctx, |ctx, s| async { @@ -256,60 +249,6 @@ impl StoreRunner { } }); - #[tracing::instrument(skip_all)] - async fn insert_batch_certificates_iteration( - ctx: &ctx::Ctx, - pool: &ConnectionPool, - batch_certificates: &mut ctx::channel::UnboundedReceiver, - ) -> ctx::Result<()> { - const POLL_INTERVAL: time::Duration = time::Duration::milliseconds(50); - - let cert = batch_certificates - .recv(ctx) - .instrument(tracing::info_span!("wait_for_batch_certificate")) - .await?; - - loop { - use consensus_dal::InsertCertificateError as E; - // Try to insert the cert. - let res = pool - .connection(ctx) - .await? - .insert_batch_certificate(ctx, &cert) - .await; - - match res { - Ok(()) => { - break; - } - Err(InsertCertificateError::Inner(E::MissingPayload)) => { - // The L1 batch isn't available yet. - // We can wait until it's produced/received, or we could modify gossip - // so that we don't even accept votes until we have the corresponding batch. - ctx.sleep(POLL_INTERVAL) - .instrument(tracing::info_span!("wait_for_batch")) - .await?; - } - Err(InsertCertificateError::Inner(err)) => { - return Err(ctx::Error::Internal(anyhow::Error::from(err))) - } - Err(InsertCertificateError::Canceled(err)) => { - return Err(ctx::Error::Canceled(err)) - } - } - } - - Ok(()) - } - - s.spawn::<()>(async { - // Loop inserting batch certificates into storage - loop { - insert_batch_certificates_iteration(ctx, &pool, &mut batch_certificates) - .await?; - } - }); - #[tracing::instrument(skip_all)] async fn insert_block_certificates_iteration( ctx: &ctx::Ctx, @@ -523,39 +462,6 @@ impl storage::PersistentBatchStore for Store { self.batches_persisted.clone() } - /// Get the next L1 batch number which has to be signed by attesters. - async fn next_batch_to_attest( - &self, - ctx: &ctx::Ctx, - ) -> ctx::Result> { - Ok(self - .conn(ctx) - .await? - .attestation_status(ctx) - .await - .wrap("next_batch_to_attest")? - .map(|s| s.next_batch_to_attest)) - } - - /// Get the L1 batch QC from storage with the highest number. - /// - /// This might have gaps before it. Until there is a way to catch up with missing - /// certificates by fetching from the main node, returning the last inserted one - /// is the best we can do. - async fn last_batch_qc(&self, ctx: &ctx::Ctx) -> ctx::Result> { - let Some(number) = self - .conn(ctx) - .await? - .get_last_batch_certificate_number(ctx) - .await - .wrap("get_last_batch_certificate_number")? - else { - return Ok(None); - }; - - self.get_batch_qc(ctx, number).await - } - /// Returns the batch with the given number. async fn get_batch( &self, @@ -569,54 +475,6 @@ impl storage::PersistentBatchStore for Store { .wrap("get_batch") } - /// Returns the [attester::Batch] with the given number, which is the `message` that - /// appears in [attester::BatchQC], and represents the content that needs to be signed - /// by the attesters. - async fn get_batch_to_sign( - &self, - ctx: &ctx::Ctx, - number: attester::BatchNumber, - ) -> ctx::Result> { - let mut conn = self.conn(ctx).await?; - - let Some(hash) = conn.batch_hash(ctx, number).await.wrap("batch_hash()")? else { - return Ok(None); - }; - - let Some(genesis) = conn.genesis(ctx).await.wrap("genesis()")? else { - return Ok(None); - }; - - Ok(Some(attester::Batch { - number, - hash, - genesis: genesis.hash(), - })) - } - - /// Returns the QC of the batch with the given number. - async fn get_batch_qc( - &self, - ctx: &ctx::Ctx, - number: attester::BatchNumber, - ) -> ctx::Result> { - self.conn(ctx) - .await? - .batch_certificate(ctx, number) - .await - .wrap("batch_certificate") - } - - /// Store the given QC in the storage. - /// - /// Storing a QC is allowed even if it creates a gap in the L1 batch history. - /// If we need the last batch QC that still needs to be signed then the queries need to look for gaps. - async fn store_qc(&self, _ctx: &ctx::Ctx, qc: attester::BatchQC) -> ctx::Result<()> { - // Storing asynchronously because we might get the QC before the L1 batch itself. - self.batch_certificates.send(qc); - Ok(()) - } - /// Queue the batch to be persisted in storage. /// /// The caller [BatchStore] ensures that this is only called when the batch is the next expected one. diff --git a/core/node/consensus/src/storage/testonly.rs b/core/node/consensus/src/storage/testonly.rs index c73d20982c1..5d1279afbbf 100644 --- a/core/node/consensus/src/storage/testonly.rs +++ b/core/node/consensus/src/storage/testonly.rs @@ -2,7 +2,7 @@ use anyhow::Context as _; use zksync_concurrency::{ctx, error::Wrap as _, time}; -use zksync_consensus_roles::validator; +use zksync_consensus_roles::{attester, validator}; use zksync_contracts::BaseSystemContracts; use zksync_dal::CoreDal as _; use zksync_node_genesis::{insert_genesis_batch, mock_genesis_config, GenesisParams}; @@ -12,7 +12,41 @@ use zksync_types::{ system_contracts::get_system_smart_contracts, L1BatchNumber, L2BlockNumber, ProtocolVersionId, }; -use super::ConnectionPool; +use super::{Connection, ConnectionPool}; + +impl Connection<'_> { + /// Wrapper for `consensus_dal().batch_of_block()`. + pub async fn batch_of_block( + &mut self, + ctx: &ctx::Ctx, + block: validator::BlockNumber, + ) -> ctx::Result> { + Ok(ctx + .wait(self.0.consensus_dal().batch_of_block(block)) + .await??) + } + + /// Wrapper for `consensus_dal().last_batch_certificate_number()`. + pub async fn last_batch_certificate_number( + &mut self, + ctx: &ctx::Ctx, + ) -> ctx::Result> { + Ok(ctx + .wait(self.0.consensus_dal().last_batch_certificate_number()) + .await??) + } + + /// Wrapper for `consensus_dal().batch_certificate()`. + pub async fn batch_certificate( + &mut self, + ctx: &ctx::Ctx, + number: attester::BatchNumber, + ) -> ctx::Result> { + Ok(ctx + .wait(self.0.consensus_dal().batch_certificate(number)) + .await??) + } +} pub(crate) fn mock_genesis_params(protocol_version: ProtocolVersionId) -> GenesisParams { let mut cfg = mock_genesis_config(); @@ -161,6 +195,57 @@ impl ConnectionPool { Ok(blocks) } + pub async fn wait_for_batch_certificates_and_verify( + &self, + ctx: &ctx::Ctx, + want_last: attester::BatchNumber, + ) -> ctx::Result<()> { + // Wait for the last batch to be attested. + const POLL_INTERVAL: time::Duration = time::Duration::milliseconds(100); + while self + .connection(ctx) + .await + .wrap("connection()")? + .last_batch_certificate_number(ctx) + .await + .wrap("last_batch_certificate_number()")? + .map_or(true, |got| got < want_last) + { + ctx.sleep(POLL_INTERVAL).await?; + } + let mut conn = self.connection(ctx).await.wrap("connection()")?; + let genesis = conn + .genesis(ctx) + .await + .wrap("genesis()")? + .context("genesis is missing")?; + let first = conn + .batch_of_block(ctx, genesis.first_block) + .await + .wrap("batch_of_block()")? + .context("batch of first_block is missing")?; + let committee = genesis.attesters.as_ref().unwrap(); + for i in first.0..want_last.0 { + let i = attester::BatchNumber(i); + let hash = conn + .batch_hash(ctx, i) + .await + .wrap("batch_hash()")? + .context("hash missing")?; + let cert = conn + .batch_certificate(ctx, i) + .await + .wrap("batch_certificate")? + .context("cert missing")?; + if cert.message.hash != hash { + return Err(anyhow::format_err!("cert[{i:?}]: hash mismatch").into()); + } + cert.verify(genesis.hash(), committee) + .context("cert[{i:?}].verify()")?; + } + Ok(()) + } + pub async fn prune_batches( &self, ctx: &ctx::Ctx, diff --git a/core/node/consensus/src/testonly.rs b/core/node/consensus/src/testonly.rs index 2c6fdc79a52..0537aaabc56 100644 --- a/core/node/consensus/src/testonly.rs +++ b/core/node/consensus/src/testonly.rs @@ -14,7 +14,7 @@ use zksync_config::{ }; use zksync_consensus_crypto::TextFmt as _; use zksync_consensus_network as network; -use zksync_consensus_roles::validator; +use zksync_consensus_roles::{attester, validator, validator::testonly::Setup}; use zksync_dal::{CoreDal, DalError}; use zksync_l1_contract_interface::i_executor::structures::StoredBatchInfo; use zksync_metadata_calculator::{ @@ -32,7 +32,10 @@ use zksync_node_test_utils::{create_l1_batch_metadata, l1_batch_metadata_to_comm use zksync_state_keeper::{ io::{IoCursor, L1BatchParams, L2BlockParams}, seal_criteria::NoopSealer, - testonly::{fund, l1_transaction, l2_transaction, MockBatchExecutor}, + testonly::{ + fund, l1_transaction, l2_transaction, test_batch_executor::MockReadStorageFactory, + MockBatchExecutor, + }, AsyncRocksdbCache, MainBatchExecutor, OutputHandler, StateKeeperPersistence, TreeWritesPersistence, ZkSyncStateKeeper, }; @@ -69,55 +72,105 @@ pub(super) struct StateKeeper { tree_reader: LazyAsyncTreeReader, } -pub(super) fn config(cfg: &network::Config) -> (config::ConsensusConfig, config::ConsensusSecrets) { - ( - config::ConsensusConfig { - server_addr: *cfg.server_addr, - public_addr: config::Host(cfg.public_addr.0.clone()), - max_payload_size: usize::MAX, - max_batch_size: usize::MAX, - gossip_dynamic_inbound_limit: cfg.gossip.dynamic_inbound_limit, - gossip_static_inbound: cfg - .gossip - .static_inbound - .iter() - .map(|k| config::NodePublicKey(k.encode())) - .collect(), - gossip_static_outbound: cfg - .gossip - .static_outbound - .iter() - .map(|(k, v)| (config::NodePublicKey(k.encode()), config::Host(v.0.clone()))) - .collect(), - genesis_spec: cfg.validator_key.as_ref().map(|key| config::GenesisSpec { - chain_id: L2ChainId::default(), - protocol_version: config::ProtocolVersion(validator::ProtocolVersion::CURRENT.0), - validators: vec![config::WeightedValidator { - key: config::ValidatorPublicKey(key.public().encode()), - weight: 1, - }], - // We only have access to the main node attester key in the `cfg`, which is fine - // for validators because at the moment there is only one leader. It doesn't - // allow us to form a full attester committee. However in the current tests - // the `new_configs` used to produce the array of `network::Config` doesn't - // assign an attester key, so it doesn't matter. - attesters: Vec::new(), - leader: config::ValidatorPublicKey(key.public().encode()), - }), - rpc: None, - }, - config::ConsensusSecrets { - node_key: Some(config::NodeSecretKey(cfg.gossip.key.encode().into())), - validator_key: cfg - .validator_key - .as_ref() - .map(|k| config::ValidatorSecretKey(k.encode().into())), - attester_key: cfg - .attester_key - .as_ref() - .map(|k| config::AttesterSecretKey(k.encode().into())), - }, - ) +#[derive(Clone)] +pub(super) struct ConfigSet { + net: network::Config, + pub(super) config: config::ConsensusConfig, + pub(super) secrets: config::ConsensusSecrets, +} + +impl ConfigSet { + pub(super) fn new_fullnode(&self, rng: &mut impl Rng) -> ConfigSet { + let net = network::testonly::new_fullnode(rng, &self.net); + ConfigSet { + config: make_config(&net, None), + secrets: make_secrets(&net, None), + net, + } + } +} + +pub(super) fn new_configs( + rng: &mut impl Rng, + setup: &Setup, + gossip_peers: usize, +) -> Vec { + let genesis_spec = config::GenesisSpec { + chain_id: setup.genesis.chain_id.0.try_into().unwrap(), + protocol_version: config::ProtocolVersion(setup.genesis.protocol_version.0), + validators: setup + .validator_keys + .iter() + .map(|k| config::WeightedValidator { + key: config::ValidatorPublicKey(k.public().encode()), + weight: 1, + }) + .collect(), + attesters: setup + .attester_keys + .iter() + .map(|k| config::WeightedAttester { + key: config::AttesterPublicKey(k.public().encode()), + weight: 1, + }) + .collect(), + leader: config::ValidatorPublicKey(setup.validator_keys[0].public().encode()), + }; + network::testonly::new_configs(rng, setup, gossip_peers) + .into_iter() + .enumerate() + .map(|(i, net)| ConfigSet { + config: make_config(&net, Some(genesis_spec.clone())), + secrets: make_secrets(&net, setup.attester_keys.get(i).cloned()), + net, + }) + .collect() +} + +fn make_secrets( + cfg: &network::Config, + attester_key: Option, +) -> config::ConsensusSecrets { + config::ConsensusSecrets { + node_key: Some(config::NodeSecretKey(cfg.gossip.key.encode().into())), + validator_key: cfg + .validator_key + .as_ref() + .map(|k| config::ValidatorSecretKey(k.encode().into())), + attester_key: attester_key.map(|k| config::AttesterSecretKey(k.encode().into())), + } +} + +fn make_config( + cfg: &network::Config, + genesis_spec: Option, +) -> config::ConsensusConfig { + config::ConsensusConfig { + server_addr: *cfg.server_addr, + public_addr: config::Host(cfg.public_addr.0.clone()), + max_payload_size: usize::MAX, + max_batch_size: usize::MAX, + gossip_dynamic_inbound_limit: cfg.gossip.dynamic_inbound_limit, + gossip_static_inbound: cfg + .gossip + .static_inbound + .iter() + .map(|k| config::NodePublicKey(k.encode())) + .collect(), + gossip_static_outbound: cfg + .gossip + .static_outbound + .iter() + .map(|(k, v)| (config::NodePublicKey(k.encode()), config::Host(v.0.clone()))) + .collect(), + // This is only relevant for the main node, which populates the genesis on the first run. + // Note that the spec doesn't match 100% the genesis provided. + // That's because not all genesis setups are currently supported in zksync-era. + // TODO: this might be misleading, so it would be better to write some more custom + // genesis generator for zksync-era tests. + genesis_spec, + rpc: None, + } } /// Fake StateKeeper task to be executed in the background. @@ -390,15 +443,14 @@ impl StateKeeper { self, ctx: &ctx::Ctx, client: Box>, - cfg: &network::Config, + cfgs: ConfigSet, ) -> anyhow::Result<()> { - let (cfg, secrets) = config(cfg); en::EN { pool: self.pool, client, sync_state: self.sync_state.clone(), } - .run(ctx, self.actions_sender, cfg, secrets) + .run(ctx, self.actions_sender, cfgs.config, cfgs.secrets) .await } } @@ -631,7 +683,7 @@ impl StateKeeperRunner { .with_handler(Box::new(tree_writes_persistence)) .with_handler(Box::new(self.sync_state.clone())), Arc::new(NoopSealer), - Arc::new(self.pool.0.clone()), + Arc::new(MockReadStorageFactory), ) .run() .await diff --git a/core/node/consensus/src/tests/attestation.rs b/core/node/consensus/src/tests/attestation.rs new file mode 100644 index 00000000000..b245d0524aa --- /dev/null +++ b/core/node/consensus/src/tests/attestation.rs @@ -0,0 +1,166 @@ +use anyhow::Context as _; +use test_casing::{test_casing, Product}; +use tracing::Instrument as _; +use zksync_concurrency::{ctx, error::Wrap, scope}; +use zksync_consensus_roles::{ + attester, + validator::testonly::{Setup, SetupSpec}, +}; +use zksync_dal::consensus_dal::AttestationStatus; +use zksync_node_sync::MainNodeClient; +use zksync_types::{L1BatchNumber, ProtocolVersionId}; + +use super::{FROM_SNAPSHOT, VERSIONS}; +use crate::{mn::run_main_node, storage::ConnectionPool, testonly}; + +#[test_casing(2, VERSIONS)] +#[tokio::test] +async fn test_attestation_status_api(version: ProtocolVersionId) { + zksync_concurrency::testonly::abort_on_panic(); + let ctx = &ctx::test_root(&ctx::RealClock); + let rng = &mut ctx.rng(); + + scope::run!(ctx, |ctx, s| async { + let pool = ConnectionPool::test(false, version).await; + let (mut sk, runner) = testonly::StateKeeper::new(ctx, pool.clone()).await?; + s.spawn_bg(runner.run(ctx).instrument(tracing::info_span!("validator"))); + + // Setup nontrivial genesis. + while sk.last_sealed_batch() < L1BatchNumber(3) { + sk.push_random_blocks(rng, 10).await; + } + let mut setup = SetupSpec::new(rng, 3); + setup.first_block = sk.last_block(); + let first_batch = sk.last_batch(); + let setup = Setup::from(setup); + let mut conn = pool.connection(ctx).await.wrap("connection()")?; + conn.try_update_genesis(ctx, &setup.genesis) + .await + .wrap("try_update_genesis()")?; + // Make sure that the first_batch is actually sealed. + sk.seal_batch().await; + pool.wait_for_batch(ctx, first_batch).await?; + + // Connect to API endpoint. + let api = sk.connect(ctx).await?; + let fetch_status = || async { + let s = api + .fetch_attestation_status() + .await? + .context("no attestation_status")?; + let s: AttestationStatus = + zksync_protobuf::serde::deserialize(&s.0).context("deserialize()")?; + anyhow::ensure!(s.genesis == setup.genesis.hash(), "genesis hash mismatch"); + Ok(s) + }; + + // If the main node has no L1 batch certificates, + // then the first one to sign should be the batch with the `genesis.first_block`. + let status = fetch_status().await?; + assert_eq!( + status.next_batch_to_attest, + attester::BatchNumber(first_batch.0.into()) + ); + + // Insert a (fake) cert, then check again. + { + let mut conn = pool.connection(ctx).await?; + let number = status.next_batch_to_attest; + let hash = conn.batch_hash(ctx, number).await?.unwrap(); + let genesis = conn.genesis(ctx).await?.unwrap().hash(); + let cert = attester::BatchQC { + signatures: attester::MultiSig::default(), + message: attester::Batch { + number, + hash, + genesis, + }, + }; + conn.insert_batch_certificate(ctx, &cert) + .await + .context("insert_batch_certificate()")?; + } + let want = status.next_batch_to_attest.next(); + let got = fetch_status().await?; + assert_eq!(want, got.next_batch_to_attest); + + Ok(()) + }) + .await + .unwrap(); +} + +// Test running a couple of attesters (which are also validators). +// Main node is expected to collect all certificates. +// External nodes are expected to just vote for the batch. +// +// TODO: it would be nice to use `StateKeeperRunner::run_real()` in this test, +// however as of now it doesn't work with ENs and it doesn't work with +// `ConnectionPool::from_snapshot`. +#[test_casing(4, Product((FROM_SNAPSHOT,VERSIONS)))] +#[tokio::test] +async fn test_multiple_attesters(from_snapshot: bool, version: ProtocolVersionId) { + const NODES: usize = 4; + + zksync_concurrency::testonly::abort_on_panic(); + let ctx = &ctx::test_root(&ctx::AffineClock::new(10.)); + let rng = &mut ctx.rng(); + let setup = Setup::new(rng, 4); + let cfgs = testonly::new_configs(rng, &setup, NODES); + + scope::run!(ctx, |ctx, s| async { + let validator_pool = ConnectionPool::test(from_snapshot, version).await; + let (mut validator, runner) = + testonly::StateKeeper::new(ctx, validator_pool.clone()).await?; + s.spawn_bg(async { + runner + .run(ctx) + .instrument(tracing::info_span!("validator")) + .await + .context("validator") + }); + // API server needs at least 1 L1 batch to start. + validator.seal_batch().await; + validator_pool + .wait_for_payload(ctx, validator.last_block()) + .await?; + + tracing::info!("Run validator."); + s.spawn_bg(run_main_node( + ctx, + cfgs[0].config.clone(), + cfgs[0].secrets.clone(), + validator_pool.clone(), + )); + + tracing::info!("Run nodes."); + let mut node_pools = vec![]; + for (i, cfg) in cfgs[1..].iter().enumerate() { + let i = ctx::NoCopy(i); + let pool = ConnectionPool::test(from_snapshot, version).await; + let (node, runner) = testonly::StateKeeper::new(ctx, pool.clone()).await?; + node_pools.push(pool.clone()); + s.spawn_bg(async { + let i = i; + runner + .run(ctx) + .instrument(tracing::info_span!("node", i = *i)) + .await + .with_context(|| format!("node{}", *i)) + }); + s.spawn_bg(node.run_consensus(ctx, validator.connect(ctx).await?, cfg.clone())); + } + + tracing::info!("Create some batches"); + validator.push_random_blocks(rng, 20).await; + validator.seal_batch().await; + tracing::info!("Wait for the batches to be attested"); + let want_last = attester::BatchNumber(validator.last_sealed_batch().0.into()); + validator_pool + .wait_for_batch_certificates_and_verify(ctx, want_last) + .await?; + Ok(()) + }) + .await + .unwrap(); +} diff --git a/core/node/consensus/src/tests/batch.rs b/core/node/consensus/src/tests/batch.rs new file mode 100644 index 00000000000..41d73fdb87c --- /dev/null +++ b/core/node/consensus/src/tests/batch.rs @@ -0,0 +1,120 @@ +use test_casing::{test_casing, Product}; +use zksync_concurrency::{ctx, scope}; +use zksync_consensus_roles::validator; +use zksync_types::{L1BatchNumber, ProtocolVersionId}; + +use super::{FROM_SNAPSHOT, VERSIONS}; +use crate::{storage::ConnectionPool, testonly}; + +#[test_casing(4, Product((FROM_SNAPSHOT,VERSIONS)))] +#[tokio::test] +async fn test_connection_get_batch(from_snapshot: bool, version: ProtocolVersionId) { + zksync_concurrency::testonly::abort_on_panic(); + let ctx = &ctx::test_root(&ctx::RealClock); + let rng = &mut ctx.rng(); + let pool = ConnectionPool::test(from_snapshot, version).await; + + // Fill storage with unsigned L2 blocks and L1 batches in a way that the + // last L1 batch is guaranteed to have some L2 blocks executed in it. + scope::run!(ctx, |ctx, s| async { + // Start state keeper. + let (mut sk, runner) = testonly::StateKeeper::new(ctx, pool.clone()).await?; + s.spawn_bg(runner.run(ctx)); + + for _ in 0..3 { + for _ in 0..2 { + sk.push_random_block(rng).await; + } + sk.seal_batch().await; + } + sk.push_random_block(rng).await; + + pool.wait_for_payload(ctx, sk.last_block()).await?; + + Ok(()) + }) + .await + .unwrap(); + + // Now we can try to retrieve the batch. + scope::run!(ctx, |ctx, _s| async { + let mut conn = pool.connection(ctx).await?; + let batches = conn.batches_range(ctx).await?; + let last = batches.last.expect("last is set"); + let (min, max) = conn + .get_l2_block_range_of_l1_batch(ctx, last) + .await? + .unwrap(); + + let last_batch = conn + .get_batch(ctx, last) + .await? + .expect("last batch can be retrieved"); + + assert_eq!( + last_batch.payloads.len(), + (max.0 - min.0) as usize, + "all block payloads present" + ); + + let first_payload = last_batch + .payloads + .first() + .expect("last batch has payloads"); + + let want_payload = conn.payload(ctx, min).await?.expect("payload is in the DB"); + let want_payload = want_payload.encode(); + + assert_eq!( + first_payload, &want_payload, + "first payload is the right number" + ); + + anyhow::Ok(()) + }) + .await + .unwrap(); +} + +/// Tests that generated L1 batch witnesses can be verified successfully. +/// TODO: add tests for verification failures. +#[test_casing(2, VERSIONS)] +#[tokio::test] +async fn test_batch_witness(version: ProtocolVersionId) { + zksync_concurrency::testonly::abort_on_panic(); + let ctx = &ctx::test_root(&ctx::RealClock); + let rng = &mut ctx.rng(); + + scope::run!(ctx, |ctx, s| async { + let pool = ConnectionPool::from_genesis(version).await; + let (mut node, runner) = testonly::StateKeeper::new(ctx, pool.clone()).await?; + s.spawn_bg(runner.run_real(ctx)); + + tracing::info!("analyzing storage"); + { + let mut conn = pool.connection(ctx).await.unwrap(); + let mut n = validator::BlockNumber(0); + while let Some(p) = conn.payload(ctx, n).await? { + tracing::info!("block[{n}] = {p:?}"); + n = n + 1; + } + } + + // Seal a bunch of batches. + node.push_random_blocks(rng, 10).await; + node.seal_batch().await; + pool.wait_for_batch(ctx, node.last_sealed_batch()).await?; + // We can verify only 2nd batch onward, because + // batch witness verifies parent of the last block of the + // previous batch (and 0th batch contains only 1 block). + for n in 2..=node.last_sealed_batch().0 { + let n = L1BatchNumber(n); + let batch_with_witness = node.load_batch_with_witness(ctx, n).await?; + let commit = node.load_batch_commit(ctx, n).await?; + batch_with_witness.verify(&commit)?; + } + Ok(()) + }) + .await + .unwrap(); +} diff --git a/core/node/consensus/src/tests.rs b/core/node/consensus/src/tests/mod.rs similarity index 69% rename from core/node/consensus/src/tests.rs rename to core/node/consensus/src/tests/mod.rs index 8e1594393ea..0b611d55f06 100644 --- a/core/node/consensus/src/tests.rs +++ b/core/node/consensus/src/tests/mod.rs @@ -2,17 +2,12 @@ use anyhow::Context as _; use test_casing::{test_casing, Product}; use tracing::Instrument as _; use zksync_concurrency::{ctx, error::Wrap, scope}; -use zksync_config::configs::consensus::{ValidatorPublicKey, WeightedValidator}; -use zksync_consensus_crypto::TextFmt as _; -use zksync_consensus_network::testonly::{new_configs, new_fullnode}; use zksync_consensus_roles::{ - attester, validator, + validator, validator::testonly::{Setup, SetupSpec}, }; use zksync_consensus_storage::BlockStore; -use zksync_dal::consensus_dal::AttestationStatus; -use zksync_node_sync::MainNodeClient; -use zksync_types::{L1BatchNumber, ProtocolVersionId}; +use zksync_types::ProtocolVersionId; use crate::{ mn::run_main_node, @@ -20,6 +15,9 @@ use crate::{ testonly, }; +mod attestation; +mod batch; + const VERSIONS: [ProtocolVersionId; 2] = [ProtocolVersionId::latest(), ProtocolVersionId::next()]; const FROM_SNAPSHOT: [bool; 2] = [true, false]; @@ -86,76 +84,6 @@ async fn test_validator_block_store(version: ProtocolVersionId) { } } -#[test_casing(4, Product((FROM_SNAPSHOT,VERSIONS)))] -#[tokio::test] -async fn test_connection_get_batch(from_snapshot: bool, version: ProtocolVersionId) { - zksync_concurrency::testonly::abort_on_panic(); - let ctx = &ctx::test_root(&ctx::RealClock); - let rng = &mut ctx.rng(); - let pool = ConnectionPool::test(from_snapshot, version).await; - - // Fill storage with unsigned L2 blocks and L1 batches in a way that the - // last L1 batch is guaranteed to have some L2 blocks executed in it. - scope::run!(ctx, |ctx, s| async { - // Start state keeper. - let (mut sk, runner) = testonly::StateKeeper::new(ctx, pool.clone()).await?; - s.spawn_bg(runner.run(ctx)); - - for _ in 0..3 { - for _ in 0..2 { - sk.push_random_block(rng).await; - } - sk.seal_batch().await; - } - sk.push_random_block(rng).await; - - pool.wait_for_payload(ctx, sk.last_block()).await?; - - Ok(()) - }) - .await - .unwrap(); - - // Now we can try to retrieve the batch. - scope::run!(ctx, |ctx, _s| async { - let mut conn = pool.connection(ctx).await?; - let batches = conn.batches_range(ctx).await?; - let last = batches.last.expect("last is set"); - let (min, max) = conn - .get_l2_block_range_of_l1_batch(ctx, last) - .await? - .unwrap(); - - let last_batch = conn - .get_batch(ctx, last) - .await? - .expect("last batch can be retrieved"); - - assert_eq!( - last_batch.payloads.len(), - (max.0 - min.0) as usize, - "all block payloads present" - ); - - let first_payload = last_batch - .payloads - .first() - .expect("last batch has payloads"); - - let want_payload = conn.payload(ctx, min).await?.expect("payload is in the DB"); - let want_payload = want_payload.encode(); - - assert_eq!( - first_payload, &want_payload, - "first payload is the right number" - ); - - anyhow::Ok(()) - }) - .await - .unwrap(); -} - // In the current implementation, consensus certificates are created asynchronously // for the L2 blocks constructed by the StateKeeper. This means that consensus actor // is effectively just back filling the consensus certificates for the L2 blocks in storage. @@ -166,7 +94,7 @@ async fn test_validator(from_snapshot: bool, version: ProtocolVersionId) { let ctx = &ctx::test_root(&ctx::AffineClock::new(10.)); let rng = &mut ctx.rng(); let setup = Setup::new(rng, 1); - let cfgs = new_configs(rng, &setup, 0); + let cfg = testonly::new_configs(rng, &setup, 0)[0].clone(); scope::run!(ctx, |ctx, s| async { tracing::info!("Start state keeper."); @@ -187,8 +115,7 @@ async fn test_validator(from_snapshot: bool, version: ProtocolVersionId) { scope::run!(ctx, |ctx, s| async { tracing::info!("Start consensus actor"); // In the first iteration it will initialize genesis. - let (cfg,secrets) = testonly::config(&cfgs[0]); - s.spawn_bg(run_main_node(ctx, cfg, secrets, pool.clone())); + s.spawn_bg(run_main_node(ctx, cfg.config.clone(), cfg.secrets.clone(), pool.clone())); tracing::info!("Generate couple more blocks and wait for consensus to catch up."); sk.push_random_blocks(rng, 3).await; @@ -230,7 +157,7 @@ async fn test_nodes_from_various_snapshots(version: ProtocolVersionId) { let ctx = &ctx::test_root(&ctx::AffineClock::new(10.)); let rng = &mut ctx.rng(); let setup = Setup::new(rng, 1); - let validator_cfg = new_configs(rng, &setup, 0).pop().unwrap(); + let validator_cfg = testonly::new_configs(rng, &setup, 0)[0].clone(); scope::run!(ctx, |ctx, s| async { tracing::info!("spawn validator"); @@ -238,8 +165,12 @@ async fn test_nodes_from_various_snapshots(version: ProtocolVersionId) { let (mut validator, runner) = testonly::StateKeeper::new(ctx, validator_pool.clone()).await?; s.spawn_bg(runner.run(ctx).instrument(tracing::info_span!("validator"))); - let (cfg, secrets) = testonly::config(&validator_cfg); - s.spawn_bg(run_main_node(ctx, cfg, secrets, validator_pool.clone())); + s.spawn_bg(run_main_node( + ctx, + validator_cfg.config.clone(), + validator_cfg.secrets.clone(), + validator_pool.clone(), + )); tracing::info!("produce some batches"); validator.push_random_blocks(rng, 5).await; @@ -255,8 +186,8 @@ async fn test_nodes_from_various_snapshots(version: ProtocolVersionId) { s.spawn_bg(runner.run(ctx).instrument(tracing::info_span!("node1"))); let conn = validator.connect(ctx).await?; s.spawn_bg(async { - let cfg = new_fullnode(&mut ctx.rng(), &validator_cfg); - node.run_consensus(ctx, conn, &cfg).await + let cfg = validator_cfg.new_fullnode(&mut ctx.rng()); + node.run_consensus(ctx, conn, cfg).await }); tracing::info!("produce more batches"); @@ -273,8 +204,8 @@ async fn test_nodes_from_various_snapshots(version: ProtocolVersionId) { s.spawn_bg(runner.run(ctx).instrument(tracing::info_span!("node2"))); let conn = validator.connect(ctx).await?; s.spawn_bg(async { - let cfg = new_fullnode(&mut ctx.rng(), &validator_cfg); - node.run_consensus(ctx, conn, &cfg).await + let cfg = validator_cfg.new_fullnode(&mut ctx.rng()); + node.run_consensus(ctx, conn, cfg).await }); tracing::info!("produce more blocks and compare storages"); @@ -311,16 +242,13 @@ async fn test_full_nodes(from_snapshot: bool, version: ProtocolVersionId) { let ctx = &ctx::test_root(&ctx::AffineClock::new(10.)); let rng = &mut ctx.rng(); let setup = Setup::new(rng, 1); - let validator_cfgs = new_configs(rng, &setup, 0); + let validator_cfg = testonly::new_configs(rng, &setup, 0)[0].clone(); // topology: // validator <-> node <-> node <-> ... let mut node_cfgs = vec![]; for _ in 0..NODES { - node_cfgs.push(new_fullnode( - rng, - node_cfgs.last().unwrap_or(&validator_cfgs[0]), - )); + node_cfgs.push(node_cfgs.last().unwrap_or(&validator_cfg).new_fullnode(rng)); } // Run validator and fetchers in parallel. @@ -344,8 +272,12 @@ async fn test_full_nodes(from_snapshot: bool, version: ProtocolVersionId) { .await?; tracing::info!("Run validator."); - let (cfg, secrets) = testonly::config(&validator_cfgs[0]); - s.spawn_bg(run_main_node(ctx, cfg, secrets, validator_pool.clone())); + s.spawn_bg(run_main_node( + ctx, + validator_cfg.config.clone(), + validator_cfg.secrets.clone(), + validator_pool.clone(), + )); tracing::info!("Run nodes."); let mut node_pools = vec![]; @@ -362,7 +294,7 @@ async fn test_full_nodes(from_snapshot: bool, version: ProtocolVersionId) { .await .with_context(|| format!("node{}", *i)) }); - s.spawn_bg(node.run_consensus(ctx, validator.connect(ctx).await?, cfg)); + s.spawn_bg(node.run_consensus(ctx, validator.connect(ctx).await?, cfg.clone())); } tracing::info!("Make validator produce blocks and wait for fetchers to get them."); @@ -395,7 +327,7 @@ async fn test_en_validators(from_snapshot: bool, version: ProtocolVersionId) { let ctx = &ctx::test_root(&ctx::AffineClock::new(10.)); let rng = &mut ctx.rng(); let setup = Setup::new(rng, NODES); - let cfgs = new_configs(rng, &setup, 1); + let cfgs = testonly::new_configs(rng, &setup, 1); // Run all nodes in parallel. scope::run!(ctx, |ctx, s| async { @@ -423,16 +355,12 @@ async fn test_en_validators(from_snapshot: bool, version: ProtocolVersionId) { main_node.connect(ctx).await?; tracing::info!("Run main node with all nodes being validators."); - let (mut cfg, secrets) = testonly::config(&cfgs[0]); - cfg.genesis_spec.as_mut().unwrap().validators = setup - .validator_keys - .iter() - .map(|k| WeightedValidator { - key: ValidatorPublicKey(k.public().encode()), - weight: 1, - }) - .collect(); - s.spawn_bg(run_main_node(ctx, cfg, secrets, main_node_pool.clone())); + s.spawn_bg(run_main_node( + ctx, + cfgs[0].config.clone(), + cfgs[0].secrets.clone(), + main_node_pool.clone(), + )); tracing::info!("Run external nodes."); let mut ext_node_pools = vec![]; @@ -449,7 +377,7 @@ async fn test_en_validators(from_snapshot: bool, version: ProtocolVersionId) { .await .with_context(|| format!("en{}", *i)) }); - s.spawn_bg(ext_node.run_consensus(ctx, main_node.connect(ctx).await?, cfg)); + s.spawn_bg(ext_node.run_consensus(ctx, main_node.connect(ctx).await?, cfg.clone())); } tracing::info!("Make the main node produce blocks and wait for consensus to finalize them"); @@ -479,8 +407,8 @@ async fn test_p2p_fetcher_backfill_certs(from_snapshot: bool, version: ProtocolV let ctx = &ctx::test_root(&ctx::AffineClock::new(10.)); let rng = &mut ctx.rng(); let setup = Setup::new(rng, 1); - let validator_cfg = new_configs(rng, &setup, 0)[0].clone(); - let node_cfg = new_fullnode(rng, &validator_cfg); + let validator_cfg = testonly::new_configs(rng, &setup, 0)[0].clone(); + let node_cfg = validator_cfg.new_fullnode(rng); scope::run!(ctx, |ctx, s| async { tracing::info!("Spawn validator."); @@ -488,8 +416,12 @@ async fn test_p2p_fetcher_backfill_certs(from_snapshot: bool, version: ProtocolV let (mut validator, runner) = testonly::StateKeeper::new(ctx, validator_pool.clone()).await?; s.spawn_bg(runner.run(ctx)); - let (cfg, secrets) = testonly::config(&validator_cfg); - s.spawn_bg(run_main_node(ctx, cfg, secrets, validator_pool.clone())); + s.spawn_bg(run_main_node( + ctx, + validator_cfg.config.clone(), + validator_cfg.secrets.clone(), + validator_pool.clone(), + )); // API server needs at least 1 L1 batch to start. validator.seal_batch().await; let client = validator.connect(ctx).await?; @@ -500,7 +432,7 @@ async fn test_p2p_fetcher_backfill_certs(from_snapshot: bool, version: ProtocolV scope::run!(ctx, |ctx, s| async { let (node, runner) = testonly::StateKeeper::new(ctx, node_pool.clone()).await?; s.spawn_bg(runner.run(ctx)); - s.spawn_bg(node.run_consensus(ctx, client.clone(), &node_cfg)); + s.spawn_bg(node.run_consensus(ctx, client.clone(), node_cfg.clone())); validator.push_random_blocks(rng, 3).await; node_pool .wait_for_block_certificate(ctx, validator.last_block()) @@ -528,7 +460,7 @@ async fn test_p2p_fetcher_backfill_certs(from_snapshot: bool, version: ProtocolV scope::run!(ctx, |ctx, s| async { let (node, runner) = testonly::StateKeeper::new(ctx, node_pool.clone()).await?; s.spawn_bg(runner.run(ctx)); - s.spawn_bg(node.run_consensus(ctx, client.clone(), &node_cfg)); + s.spawn_bg(node.run_consensus(ctx, client.clone(), node_cfg)); validator.push_random_blocks(rng, 3).await; let want = validator_pool .wait_for_block_certificates_and_verify(ctx, validator.last_block()) @@ -554,8 +486,8 @@ async fn test_with_pruning(version: ProtocolVersionId) { let ctx = &ctx::test_root(&ctx::RealClock); let rng = &mut ctx.rng(); let setup = Setup::new(rng, 1); - let validator_cfg = new_configs(rng, &setup, 0)[0].clone(); - let node_cfg = new_fullnode(rng, &validator_cfg); + let validator_cfg = testonly::new_configs(rng, &setup, 0)[0].clone(); + let node_cfg = validator_cfg.new_fullnode(rng); scope::run!(ctx, |ctx, s| async { let validator_pool = ConnectionPool::test(false, version).await; @@ -569,16 +501,20 @@ async fn test_with_pruning(version: ProtocolVersionId) { .context("validator") }); tracing::info!("Run validator."); - let (cfg, secrets) = testonly::config(&validator_cfg); s.spawn_bg({ let validator_pool = validator_pool.clone(); async { - run_main_node(ctx, cfg, secrets, validator_pool) - .await - .context("run_main_node()") + run_main_node( + ctx, + validator_cfg.config.clone(), + validator_cfg.secrets.clone(), + validator_pool, + ) + .await + .context("run_main_node()") } }); - // TODO: ensure at least L1 batch in `testonly::StateKeeper::new()` to make it fool proof. + // TODO: ensure at least 1 L1 batch in `testonly::StateKeeper::new()` to make it fool proof. validator.seal_batch().await; tracing::info!("Run node."); @@ -593,7 +529,7 @@ async fn test_with_pruning(version: ProtocolVersionId) { }); let conn = validator.connect(ctx).await?; s.spawn_bg(async { - node.run_consensus(ctx, conn, &node_cfg) + node.run_consensus(ctx, conn, node_cfg) .await .context("run_consensus()") }); @@ -678,123 +614,3 @@ async fn test_centralized_fetcher(from_snapshot: bool, version: ProtocolVersionI .await .unwrap(); } - -#[test_casing(2, VERSIONS)] -#[tokio::test] -async fn test_attestation_status_api(version: ProtocolVersionId) { - zksync_concurrency::testonly::abort_on_panic(); - let ctx = &ctx::test_root(&ctx::RealClock); - let rng = &mut ctx.rng(); - - scope::run!(ctx, |ctx, s| async { - let pool = ConnectionPool::test(false, version).await; - let (mut sk, runner) = testonly::StateKeeper::new(ctx, pool.clone()).await?; - s.spawn_bg(runner.run(ctx).instrument(tracing::info_span!("validator"))); - - // Setup nontrivial genesis. - while sk.last_sealed_batch() < L1BatchNumber(3) { - sk.push_random_blocks(rng, 10).await; - } - let mut setup = SetupSpec::new(rng, 3); - setup.first_block = sk.last_block(); - let first_batch = sk.last_batch(); - let setup = Setup::from(setup); - let mut conn = pool.connection(ctx).await.wrap("connection()")?; - conn.try_update_genesis(ctx, &setup.genesis) - .await - .wrap("try_update_genesis()")?; - // Make sure that the first_batch is actually sealed. - sk.seal_batch().await; - pool.wait_for_batch(ctx, first_batch).await?; - - // Connect to API endpoint. - let api = sk.connect(ctx).await?; - let fetch_status = || async { - let s = api - .fetch_attestation_status() - .await? - .context("no attestation_status")?; - let s: AttestationStatus = - zksync_protobuf::serde::deserialize(&s.0).context("deserialize()")?; - anyhow::ensure!(s.genesis == setup.genesis.hash(), "genesis hash mismatch"); - Ok(s) - }; - - // If the main node has no L1 batch certificates, - // then the first one to sign should be the batch with the `genesis.first_block`. - let status = fetch_status().await?; - assert_eq!( - status.next_batch_to_attest, - attester::BatchNumber(first_batch.0.into()) - ); - - // Insert a (fake) cert, then check again. - { - let mut conn = pool.connection(ctx).await?; - let number = status.next_batch_to_attest; - let hash = conn.batch_hash(ctx, number).await?.unwrap(); - let genesis = conn.genesis(ctx).await?.unwrap().hash(); - let cert = attester::BatchQC { - signatures: attester::MultiSig::default(), - message: attester::Batch { - number, - hash, - genesis, - }, - }; - conn.insert_batch_certificate(ctx, &cert) - .await - .context("insert_batch_certificate()")?; - } - let want = status.next_batch_to_attest.next(); - let got = fetch_status().await?; - assert_eq!(want, got.next_batch_to_attest); - - Ok(()) - }) - .await - .unwrap(); -} - -/// Tests that generated L1 batch witnesses can be verified successfully. -/// TODO: add tests for verification failures. -#[test_casing(2, VERSIONS)] -#[tokio::test] -async fn test_batch_witness(version: ProtocolVersionId) { - zksync_concurrency::testonly::abort_on_panic(); - let ctx = &ctx::test_root(&ctx::RealClock); - let rng = &mut ctx.rng(); - - scope::run!(ctx, |ctx, s| async { - let pool = ConnectionPool::from_genesis(version).await; - let (mut node, runner) = testonly::StateKeeper::new(ctx, pool.clone()).await?; - s.spawn_bg(runner.run_real(ctx)); - - tracing::info!("analyzing storage"); - { - let mut conn = pool.connection(ctx).await.unwrap(); - let mut n = validator::BlockNumber(0); - while let Some(p) = conn.payload(ctx, n).await? { - tracing::info!("block[{n}] = {p:?}"); - n = n + 1; - } - } - - // Seal a bunch of batches. - node.push_random_blocks(rng, 10).await; - node.seal_batch().await; - pool.wait_for_batch(ctx, node.last_sealed_batch()).await?; - // We can verify only 2nd batch onward, because - // batch witness verifies parent of the last block of the - // previous batch (and 0th batch contains only 1 block). - for n in 2..=node.last_sealed_batch().0 { - let n = L1BatchNumber(n); - let batch_with_witness = node.load_batch_with_witness(ctx, n).await?; - let commit = node.load_batch_commit(ctx, n).await?; - batch_with_witness.verify(&commit)?; - } - Ok(()) - }) - .await - .unwrap(); -} diff --git a/core/node/db_pruner/src/tests.rs b/core/node/db_pruner/src/tests.rs index d4dbe454603..a5458e996e1 100644 --- a/core/node/db_pruner/src/tests.rs +++ b/core/node/db_pruner/src/tests.rs @@ -121,6 +121,7 @@ async fn insert_l2_blocks( protocol_version: Some(Default::default()), virtual_blocks: 0, gas_limit: 0, + logs_bloom: Default::default(), }; conn.blocks_dal() diff --git a/core/node/eth_sender/src/abstract_l1_interface.rs b/core/node/eth_sender/src/abstract_l1_interface.rs index 1f1956c9dd8..312f483fd29 100644 --- a/core/node/eth_sender/src/abstract_l1_interface.rs +++ b/core/node/eth_sender/src/abstract_l1_interface.rs @@ -9,7 +9,6 @@ use zksync_eth_client::{ #[cfg(test)] use zksync_types::web3; use zksync_types::{ - aggregated_operations::AggregatedActionType, eth_sender::{EthTx, EthTxBlobSidecar}, web3::{BlockId, BlockNumber}, Address, L1BlockNumber, Nonce, EIP_1559_TX_TYPE, EIP_4844_TX_TYPE, H256, U256, @@ -37,14 +36,25 @@ pub(crate) struct L1BlockNumbers { pub(crate) enum OperatorType { NonBlob, Blob, + Gateway, } #[async_trait] pub(super) trait AbstractL1Interface: 'static + Sync + Send + fmt::Debug { - async fn failure_reason(&self, tx_hash: H256) -> Option; + fn supported_operator_types(&self) -> Vec; + + async fn failure_reason( + &self, + tx_hash: H256, + operator_type: OperatorType, + ) -> Option; #[cfg(test)] - async fn get_tx(&self, tx_hash: H256) -> EnrichedClientResult>; + async fn get_tx( + &self, + tx_hash: H256, + operator_type: OperatorType, + ) -> EnrichedClientResult>; async fn get_tx_status( &self, @@ -73,50 +83,77 @@ pub(super) trait AbstractL1Interface: 'static + Sync + Send + fmt::Debug { priority_fee_per_gas: u64, blob_gas_price: Option, max_aggregated_tx_gas: U256, + operator_type: OperatorType, ) -> SignedCallResult; - async fn get_l1_block_numbers(&self) -> Result; - - fn ethereum_gateway(&self) -> &dyn BoundEthInterface; - - fn ethereum_gateway_blobs(&self) -> Option<&dyn BoundEthInterface>; + async fn get_l1_block_numbers( + &self, + operator_type: OperatorType, + ) -> Result; } #[derive(Debug)] pub(super) struct RealL1Interface { - pub ethereum_gateway: Box, + pub ethereum_gateway: Option>, pub ethereum_gateway_blobs: Option>, + pub l2_gateway: Option>, pub wait_confirmations: Option, } impl RealL1Interface { - pub(crate) fn query_client(&self) -> &dyn EthInterface { - self.ethereum_gateway().as_ref() + fn query_client(&self, operator_type: OperatorType) -> &dyn EthInterface { + match operator_type { + OperatorType::NonBlob => self.ethereum_gateway.as_deref().unwrap().as_ref(), + OperatorType::Blob => self.ethereum_gateway_blobs.as_deref().unwrap().as_ref(), + OperatorType::Gateway => self.l2_gateway.as_deref().unwrap().as_ref(), + } } - pub(crate) fn query_client_for_operator( - &self, - operator_type: OperatorType, - ) -> &dyn EthInterface { - if operator_type == OperatorType::Blob { - self.ethereum_gateway_blobs().unwrap().as_ref() - } else { - self.ethereum_gateway().as_ref() + fn bound_query_client(&self, operator_type: OperatorType) -> &dyn BoundEthInterface { + match operator_type { + OperatorType::NonBlob => self.ethereum_gateway.as_deref().unwrap(), + OperatorType::Blob => self.ethereum_gateway_blobs.as_deref().unwrap(), + OperatorType::Gateway => self.l2_gateway.as_deref().unwrap(), } } } #[async_trait] impl AbstractL1Interface for RealL1Interface { - async fn failure_reason(&self, tx_hash: H256) -> Option { - self.query_client().failure_reason(tx_hash).await.expect( - "Tx is already failed, it's safe to fail here and apply the status on the next run", - ) + fn supported_operator_types(&self) -> Vec { + let mut result = vec![]; + if self.l2_gateway.is_some() { + result.push(OperatorType::Gateway); + } + if self.ethereum_gateway_blobs.is_some() { + result.push(OperatorType::Blob) + } + if self.ethereum_gateway.is_some() { + result.push(OperatorType::NonBlob); + } + result + } + + async fn failure_reason( + &self, + tx_hash: H256, + operator_type: OperatorType, + ) -> Option { + self.query_client(operator_type) + .failure_reason(tx_hash) + .await + .expect( + "Tx is already failed, it's safe to fail here and apply the status on the next run", + ) } #[cfg(test)] - async fn get_tx(&self, tx_hash: H256) -> EnrichedClientResult> { - self.query_client().get_tx(tx_hash).await + async fn get_tx( + &self, + tx_hash: H256, + operator_type: OperatorType, + ) -> EnrichedClientResult> { + self.query_client(operator_type).get_tx(tx_hash).await } async fn get_tx_status( @@ -124,7 +161,7 @@ impl AbstractL1Interface for RealL1Interface { tx_hash: H256, operator_type: OperatorType, ) -> Result, EthSenderError> { - self.query_client_for_operator(operator_type) + self.query_client(operator_type) .get_tx_status(tx_hash) .await .map_err(Into::into) @@ -135,13 +172,12 @@ impl AbstractL1Interface for RealL1Interface { tx_bytes: RawTransactionBytes, operator_type: OperatorType, ) -> EnrichedClientResult { - self.query_client_for_operator(operator_type) - .send_raw_tx(tx_bytes) - .await + self.query_client(operator_type).send_raw_tx(tx_bytes).await } fn get_blobs_operator_account(&self) -> Option
{ - self.ethereum_gateway_blobs() + self.ethereum_gateway_blobs + .as_deref() .as_ref() .map(|s| s.sender_account()) } @@ -151,27 +187,20 @@ impl AbstractL1Interface for RealL1Interface { block_numbers: L1BlockNumbers, operator_type: OperatorType, ) -> Result, EthSenderError> { - let gateway = match operator_type { - OperatorType::NonBlob => Some(self.ethereum_gateway()), - OperatorType::Blob => self.ethereum_gateway_blobs(), - }; - match gateway { - None => Ok(None), - Some(gateway) => { - let finalized = gateway - .nonce_at(block_numbers.finalized.0.into()) - .await? - .as_u32() - .into(); - - let latest = gateway - .nonce_at(block_numbers.latest.0.into()) - .await? - .as_u32() - .into(); - Ok(Some(OperatorNonce { finalized, latest })) - } - } + let finalized = self + .bound_query_client(operator_type) + .nonce_at(block_numbers.finalized.0.into()) + .await? + .as_u32() + .into(); + + let latest = self + .bound_query_client(operator_type) + .nonce_at(block_numbers.latest.0.into()) + .await? + .as_u32() + .into(); + Ok(Some(OperatorNonce { finalized, latest })) } async fn sign_tx( @@ -181,22 +210,9 @@ impl AbstractL1Interface for RealL1Interface { priority_fee_per_gas: u64, blob_gas_price: Option, max_aggregated_tx_gas: U256, + operator_type: OperatorType, ) -> SignedCallResult { - // Chose the signing gateway. Use a custom one in case - // the operator is in 4844 mode and the operation at hand is Commit. - // then the optional gateway is used to send this transaction from a - // custom sender account. - let signing_gateway = if let Some(blobs_gateway) = self.ethereum_gateway_blobs() { - if tx.tx_type == AggregatedActionType::Commit { - blobs_gateway - } else { - self.ethereum_gateway() - } - } else { - self.ethereum_gateway() - }; - - signing_gateway + self.bound_query_client(operator_type) .sign_prepared_tx_for_addr( tx.raw_tx.clone(), tx.contract_address, @@ -206,34 +222,40 @@ impl AbstractL1Interface for RealL1Interface { opt.max_fee_per_gas = Some(U256::from(base_fee_per_gas + priority_fee_per_gas)); opt.max_priority_fee_per_gas = Some(U256::from(priority_fee_per_gas)); opt.nonce = Some(tx.nonce.0.into()); - opt.transaction_type = if tx.blob_sidecar.is_some() { + opt.transaction_type = Some(EIP_1559_TX_TYPE.into()); + if tx.blob_sidecar.is_some() { + opt.transaction_type = Some(EIP_4844_TX_TYPE.into()); opt.max_fee_per_blob_gas = blob_gas_price; - Some(EIP_4844_TX_TYPE.into()) - } else { - Some(EIP_1559_TX_TYPE.into()) - }; - opt.blob_versioned_hashes = tx.blob_sidecar.as_ref().map(|s| match s { - EthTxBlobSidecar::EthTxBlobSidecarV1(s) => s - .blobs - .iter() - .map(|blob| H256::from_slice(&blob.versioned_hash)) - .collect(), - }); + opt.blob_versioned_hashes = tx.blob_sidecar.as_ref().map(|s| match s { + EthTxBlobSidecar::EthTxBlobSidecarV1(s) => s + .blobs + .iter() + .map(|blob| H256::from_slice(&blob.versioned_hash)) + .collect(), + }); + } }), ) .await .expect("Failed to sign transaction") } - async fn get_l1_block_numbers(&self) -> Result { + async fn get_l1_block_numbers( + &self, + operator_type: OperatorType, + ) -> Result { let (finalized, safe) = if let Some(confirmations) = self.wait_confirmations { - let latest_block_number = self.query_client().block_number().await?.as_u64(); + let latest_block_number: u64 = self + .query_client(operator_type) + .block_number() + .await? + .as_u64(); let finalized = (latest_block_number.saturating_sub(confirmations) as u32).into(); (finalized, finalized) } else { let finalized = self - .query_client() + .query_client(operator_type) .block(BlockId::Number(BlockNumber::Finalized)) .await? .expect("Finalized block must be present on L1") @@ -243,7 +265,7 @@ impl AbstractL1Interface for RealL1Interface { .into(); let safe = self - .query_client() + .query_client(operator_type) .block(BlockId::Number(BlockNumber::Safe)) .await? .expect("Safe block must be present on L1") @@ -254,7 +276,12 @@ impl AbstractL1Interface for RealL1Interface { (finalized, safe) }; - let latest = self.query_client().block_number().await?.as_u32().into(); + let latest = self + .query_client(operator_type) + .block_number() + .await? + .as_u32() + .into(); Ok(L1BlockNumbers { finalized, @@ -262,12 +289,4 @@ impl AbstractL1Interface for RealL1Interface { safe, }) } - - fn ethereum_gateway(&self) -> &dyn BoundEthInterface { - self.ethereum_gateway.as_ref() - } - - fn ethereum_gateway_blobs(&self) -> Option<&dyn BoundEthInterface> { - self.ethereum_gateway_blobs.as_deref() - } } diff --git a/core/node/eth_sender/src/aggregated_operations.rs b/core/node/eth_sender/src/aggregated_operations.rs index 657624e3a7c..2dfaf594265 100644 --- a/core/node/eth_sender/src/aggregated_operations.rs +++ b/core/node/eth_sender/src/aggregated_operations.rs @@ -53,4 +53,9 @@ impl AggregatedOperation { Self::Execute(op) => op.l1_batches[0].header.protocol_version.unwrap(), } } + + pub fn is_prove_or_execute(&self) -> bool { + self.get_action_type() == AggregatedActionType::PublishProofOnchain + || self.get_action_type() == AggregatedActionType::Execute + } } diff --git a/core/node/eth_sender/src/eth_fees_oracle.rs b/core/node/eth_sender/src/eth_fees_oracle.rs index 89d10bc2b1e..271a33d49c3 100644 --- a/core/node/eth_sender/src/eth_fees_oracle.rs +++ b/core/node/eth_sender/src/eth_fees_oracle.rs @@ -5,30 +5,32 @@ use std::{ }; use zksync_eth_client::{ClientError, EnrichedClientError}; -use zksync_node_fee_model::l1_gas_price::L1TxParamsProvider; +use zksync_node_fee_model::l1_gas_price::TxParamsProvider; use zksync_types::eth_sender::TxHistory; -use crate::EthSenderError; +use crate::{abstract_l1_interface::OperatorType, EthSenderError}; #[derive(Debug)] pub(crate) struct EthFees { pub(crate) base_fee_per_gas: u64, pub(crate) priority_fee_per_gas: u64, pub(crate) blob_base_fee_per_gas: Option, + #[allow(dead_code)] + pub(crate) pubdata_price: Option, } pub(crate) trait EthFeesOracle: 'static + Sync + Send + fmt::Debug { fn calculate_fees( &self, previous_sent_tx: &Option, - has_blob_sidecar: bool, time_in_mempool: u32, + operator_type: OperatorType, ) -> Result; } #[derive(Debug)] pub(crate) struct GasAdjusterFeesOracle { - pub gas_adjuster: Arc, + pub gas_adjuster: Arc, pub max_acceptable_priority_fee_in_gwei: u64, } @@ -53,12 +55,14 @@ impl GasAdjusterFeesOracle { previous_sent_tx.blob_base_fee_per_gas.map(|v| v * 2), blob_base_fee_per_gas, ), + pubdata_price: None, }); } Ok(EthFees { base_fee_per_gas, priority_fee_per_gas, blob_base_fee_per_gas, + pubdata_price: None, }) } @@ -105,6 +109,7 @@ impl GasAdjusterFeesOracle { base_fee_per_gas, blob_base_fee_per_gas: None, priority_fee_per_gas, + pubdata_price: None, }) } @@ -143,9 +148,10 @@ impl EthFeesOracle for GasAdjusterFeesOracle { fn calculate_fees( &self, previous_sent_tx: &Option, - has_blob_sidecar: bool, time_in_mempool: u32, + operator_type: OperatorType, ) -> Result { + let has_blob_sidecar = operator_type == OperatorType::Blob; if has_blob_sidecar { self.calculate_fees_with_blob_sidecar(previous_sent_tx) } else { diff --git a/core/node/eth_sender/src/eth_tx_aggregator.rs b/core/node/eth_sender/src/eth_tx_aggregator.rs index 9ec79dfc300..856b79eb5c9 100644 --- a/core/node/eth_sender/src/eth_tx_aggregator.rs +++ b/core/node/eth_sender/src/eth_tx_aggregator.rs @@ -355,8 +355,27 @@ impl EthTxAggregator { ) .await { + if self.config.tx_aggregation_paused { + tracing::info!( + "Skipping sending operation of type {} for batches {}-{} \ + as tx_aggregation_paused=true", + agg_op.get_action_type(), + agg_op.l1_batch_range().start(), + agg_op.l1_batch_range().end() + ); + return Ok(()); + } + if self.config.tx_aggregation_only_prove_and_execute && !agg_op.is_prove_or_execute() { + tracing::info!( + "Skipping sending commit operation for batches {}-{} \ + as tx_aggregation_only_prove_and_execute=true", + agg_op.l1_batch_range().start(), + agg_op.l1_batch_range().end() + ); + return Ok(()); + } let tx = self - .save_eth_tx(storage, &agg_op, contracts_are_pre_shared_bridge) + .save_eth_tx(storage, &agg_op, contracts_are_pre_shared_bridge, false) .await?; Self::report_eth_tx_saving(storage, &agg_op, &tx).await; } @@ -521,6 +540,7 @@ impl EthTxAggregator { storage: &mut Connection<'_, Core>, aggregated_op: &AggregatedOperation, contracts_are_pre_shared_bridge: bool, + is_gateway: bool, ) -> Result { let mut transaction = storage.start_transaction().await.unwrap(); let op_type = aggregated_op.get_action_type(); @@ -553,6 +573,7 @@ impl EthTxAggregator { eth_tx_predicted_gas, sender_addr, encoded_aggregated_op.sidecar, + is_gateway, ) .await .unwrap(); diff --git a/core/node/eth_sender/src/eth_tx_manager.rs b/core/node/eth_sender/src/eth_tx_manager.rs index 79a9b1dfdb5..a97aed88a0a 100644 --- a/core/node/eth_sender/src/eth_tx_manager.rs +++ b/core/node/eth_sender/src/eth_tx_manager.rs @@ -6,7 +6,7 @@ use zksync_dal::{Connection, ConnectionPool, Core, CoreDal}; use zksync_eth_client::{ encode_blob_tx_with_sidecar, BoundEthInterface, ExecutedTxStatus, RawTransactionBytes, }; -use zksync_node_fee_model::l1_gas_price::L1TxParamsProvider; +use zksync_node_fee_model::l1_gas_price::TxParamsProvider; use zksync_shared_metrics::BlockL1Stage; use zksync_types::{eth_sender::EthTx, Address, L1BlockNumber, H256, U256}; use zksync_utils::time::seconds_since_epoch; @@ -37,11 +37,12 @@ impl EthTxManager { pub fn new( pool: ConnectionPool, config: SenderConfig, - gas_adjuster: Arc, - ethereum_gateway: Box, + gas_adjuster: Arc, + ethereum_gateway: Option>, ethereum_gateway_blobs: Option>, + l2_gateway: Option>, ) -> Self { - let ethereum_gateway = ethereum_gateway.for_component("eth_tx_manager"); + let ethereum_gateway = ethereum_gateway.map(|eth| eth.for_component("eth_tx_manager")); let ethereum_gateway_blobs = ethereum_gateway_blobs.map(|eth| eth.for_component("eth_tx_manager")); let fees_oracle = GasAdjusterFeesOracle { @@ -52,6 +53,7 @@ impl EthTxManager { l1_interface: Box::new(RealL1Interface { ethereum_gateway, ethereum_gateway_blobs, + l2_gateway, wait_confirmations: config.wait_confirmations, }), config, @@ -77,18 +79,12 @@ impl EthTxManager { .await .unwrap() { - let operator_type = if op.blob_sidecar.is_some() { - OperatorType::Blob - } else { - OperatorType::NonBlob - }; - // `status` is a Result here and we don't unwrap it with `?` // because if we do and get an `Err`, we won't finish the for loop, // which means we might miss the transaction that actually succeeded. match self .l1_interface - .get_tx_status(history_item.tx_hash, operator_type) + .get_tx_status(history_item.tx_hash, self.operator_type(op)) .await { Ok(Some(s)) => return Ok(Some(s)), @@ -118,23 +114,19 @@ impl EthTxManager { .get_last_sent_eth_tx(tx.id) .await .unwrap(); - let has_blob_sidecar = tx.blob_sidecar.is_some(); let EthFees { base_fee_per_gas, priority_fee_per_gas, blob_base_fee_per_gas, + pubdata_price: _, } = self.fees_oracle.calculate_fees( &previous_sent_tx, - has_blob_sidecar, time_in_mempool, + self.operator_type(tx), )?; - let operator_type = if tx.blob_sidecar.is_some() { - OperatorType::Blob - } else { - OperatorType::NonBlob - }; + let operator_type = self.operator_type(tx); if let Some(previous_sent_tx) = previous_sent_tx { METRICS.transaction_resent.inc(); @@ -177,7 +169,7 @@ impl EthTxManager { .observe(priority_fee_per_gas); } - let blob_gas_price = if has_blob_sidecar { + let blob_gas_price = if tx.blob_sidecar.is_some() { Some( blob_base_fee_per_gas .expect("always ready to query blob gas price for blob transactions; qed") @@ -195,6 +187,7 @@ impl EthTxManager { priority_fee_per_gas, blob_gas_price, self.config.max_aggregated_tx_gas.into(), + operator_type, ) .await; @@ -286,7 +279,10 @@ impl EthTxManager { if let Some(operator_nonce) = operator_nonce { let inflight_txs = storage .eth_sender_dal() - .get_inflight_txs(self.operator_address(operator_type)) + .get_inflight_txs( + self.operator_address(operator_type), + operator_type == OperatorType::Gateway, + ) .await .unwrap(); METRICS.number_of_inflight_txs[&operator_type].set(inflight_txs.len()); @@ -427,6 +423,16 @@ impl EthTxManager { } } + fn operator_type(&self, tx: &EthTx) -> OperatorType { + if tx.is_gateway { + OperatorType::Gateway + } else if tx.from_addr.is_none() { + OperatorType::NonBlob + } else { + OperatorType::Blob + } + } + pub async fn fail_tx( &self, storage: &mut Connection<'_, Core>, @@ -440,7 +446,7 @@ impl EthTxManager { .unwrap(); let failure_reason = self .l1_interface - .failure_reason(tx_status.receipt.transaction_hash) + .failure_reason(tx_status.receipt.transaction_hash, self.operator_type(tx)) .await; tracing::error!( @@ -513,10 +519,13 @@ impl EthTxManager { tracing::info!("Stop signal received, eth_tx_manager is shutting down"); break; } - let l1_block_numbers = self.l1_interface.get_l1_block_numbers().await?; + let l1_block_numbers = self + .l1_interface + .get_l1_block_numbers(OperatorType::Blob) + .await?; METRICS.track_block_numbers(&l1_block_numbers); - self.loop_iteration(&mut storage, l1_block_numbers).await; + self.loop_iteration(&mut storage).await; tokio::time::sleep(self.config.tx_poll_period()).await; } Ok(()) @@ -530,7 +539,10 @@ impl EthTxManager { ) { let number_inflight_txs = storage .eth_sender_dal() - .get_inflight_txs(self.operator_address(operator_type)) + .get_inflight_txs( + self.operator_address(operator_type), + operator_type == OperatorType::Gateway, + ) .await .unwrap() .len(); @@ -546,6 +558,7 @@ impl EthTxManager { .get_new_eth_txs( number_of_available_slots_for_eth_txs, &self.operator_address(operator_type), + operator_type == OperatorType::Gateway, ) .await .unwrap(); @@ -594,17 +607,46 @@ impl EthTxManager { Ok(()) } - #[tracing::instrument(skip_all, name = "EthTxManager::loop_iteration")] - pub async fn loop_iteration( + pub async fn assert_there_are_no_pre_gateway_txs_with_gateway_enabled( &mut self, storage: &mut Connection<'_, Core>, - l1_block_numbers: L1BlockNumbers, ) { - tracing::debug!("Loop iteration at block {}", l1_block_numbers.latest); - // We can treat those two operators independently as they have different nonces and + if !self + .l1_interface + .supported_operator_types() + .contains(&OperatorType::Gateway) + { + return; + } + + let inflight_count = storage + .eth_sender_dal() + .get_non_gateway_inflight_txs_count_for_gateway_migration() + .await + .unwrap(); + if inflight_count != 0 { + panic!("eth-sender was switched to gateway, but there are still {inflight_count} pre-gateway transactions in-flight!") + } + } + + #[tracing::instrument(skip_all, name = "EthTxManager::loop_iteration")] + pub async fn loop_iteration(&mut self, storage: &mut Connection<'_, Core>) { + self.assert_there_are_no_pre_gateway_txs_with_gateway_enabled(storage) + .await; + + // We can treat blob and non-blob operators independently as they have different nonces and // aggregator makes sure that corresponding Commit transaction is confirmed before creating // a PublishProof transaction - for operator_type in [OperatorType::NonBlob, OperatorType::Blob] { + for operator_type in self.l1_interface.supported_operator_types() { + let l1_block_numbers = self + .l1_interface + .get_l1_block_numbers(operator_type) + .await + .unwrap(); + tracing::info!( + "Loop iteration at block {} for {operator_type:?} operator", + l1_block_numbers.latest + ); self.send_new_eth_txs(storage, l1_block_numbers.latest, operator_type) .await; let result = self diff --git a/core/node/eth_sender/src/tester.rs b/core/node/eth_sender/src/tester.rs index 5bd5181ed8c..508a38e6173 100644 --- a/core/node/eth_sender/src/tester.rs +++ b/core/node/eth_sender/src/tester.rs @@ -111,6 +111,7 @@ pub(crate) struct EthSenderTester { pub conn: ConnectionPool, pub gateway: Box, pub gateway_blobs: Box, + pub l2_gateway: Box, pub manager: MockEthTxManager, pub aggregator: EthTxAggregator, pub gas_adjuster: Arc, @@ -120,6 +121,7 @@ pub(crate) struct EthSenderTester { next_l1_batch_number_to_prove: L1BatchNumber, next_l1_batch_number_to_execute: L1BatchNumber, tx_sent_in_last_iteration_count: usize, + pub is_l2: bool, } impl EthSenderTester { @@ -176,6 +178,26 @@ impl EthSenderTester { gateway.advance_block_number(Self::WAIT_CONFIRMATIONS); let gateway = Box::new(gateway); + let l2_gateway: MockSettlementLayer = MockSettlementLayer::builder() + .with_fee_history( + std::iter::repeat_with(|| BaseFees { + base_fee_per_gas: 0, + base_fee_per_blob_gas: 0.into(), + l2_pubdata_price: 0.into(), + }) + .take(Self::WAIT_CONFIRMATIONS as usize) + .chain(history.clone()) + .collect(), + ) + .with_non_ordering_confirmation(non_ordering_confirmations) + .with_call_handler(move |call, _| { + assert_eq!(call.to, Some(contracts_config.l1_multicall3_addr)); + crate::tests::mock_multicall_response() + }) + .build(); + l2_gateway.advance_block_number(Self::WAIT_CONFIRMATIONS); + let l2_gateway = Box::new(l2_gateway); + let gateway_blobs = MockSettlementLayer::builder() .with_fee_history( std::iter::repeat_with(|| BaseFees { @@ -249,8 +271,9 @@ impl EthSenderTester { connection_pool.clone(), eth_sender.clone(), gas_adjuster.clone(), - gateway.clone(), + Some(gateway.clone()), Some(gateway_blobs.clone()), + None, ); let connection_pool_clone = connection_pool.clone(); @@ -264,6 +287,7 @@ impl EthSenderTester { Self { gateway, gateway_blobs, + l2_gateway, manager, aggregator, gas_adjuster, @@ -274,9 +298,23 @@ impl EthSenderTester { next_l1_batch_number_to_execute: L1BatchNumber(1), next_l1_batch_number_to_prove: L1BatchNumber(1), tx_sent_in_last_iteration_count: 0, + is_l2: false, } } + pub fn switch_to_using_gateway(&mut self) { + self.manager = EthTxManager::new( + self.conn.clone(), + EthConfig::for_tests().sender.unwrap(), + self.gas_adjuster.clone(), + None, + None, + Some(self.l2_gateway.clone()), + ); + self.is_l2 = true; + tracing::info!("Switched eth-sender tester to use Gateway!"); + } + pub async fn storage(&self) -> Connection<'_, Core> { self.conn.connection().await.unwrap() } @@ -285,7 +323,7 @@ impl EthSenderTester { let latest = self .manager .l1_interface() - .get_l1_block_numbers() + .get_l1_block_numbers(OperatorType::NonBlob) .await .unwrap() .latest; @@ -341,13 +379,18 @@ impl EthSenderTester { .get_last_sent_eth_tx_hash(l1_batch_number, operation_type) .await .unwrap(); - let (gateway, other) = if tx.blob_base_fee_per_gas.is_some() { - (self.gateway_blobs.as_ref(), self.gateway.as_ref()) + if !self.is_l2 { + let (gateway, other) = if tx.blob_base_fee_per_gas.is_some() { + (self.gateway_blobs.as_ref(), self.gateway.as_ref()) + } else { + (self.gateway.as_ref(), self.gateway_blobs.as_ref()) + }; + gateway.execute_tx(tx.tx_hash, success, confirmations); + other.advance_block_number(confirmations); } else { - (self.gateway.as_ref(), self.gateway_blobs.as_ref()) - }; - gateway.execute_tx(tx.tx_hash, success, confirmations); - other.advance_block_number(confirmations); + self.l2_gateway + .execute_tx(tx.tx_hash, success, confirmations); + } } pub async fn seal_l1_batch(&mut self) -> L1BatchHeader { @@ -407,15 +450,17 @@ impl EthSenderTester { pub async fn run_eth_sender_tx_manager_iteration_after_n_blocks(&mut self, n: u64) { self.gateway.advance_block_number(n); self.gateway_blobs.advance_block_number(n); - let tx_sent_before = self.gateway.sent_tx_count() + self.gateway_blobs.sent_tx_count(); + self.l2_gateway.advance_block_number(n); + let tx_sent_before = self.gateway.sent_tx_count() + + self.gateway_blobs.sent_tx_count() + + self.l2_gateway.sent_tx_count(); self.manager - .loop_iteration( - &mut self.conn.connection().await.unwrap(), - self.get_block_numbers().await, - ) + .loop_iteration(&mut self.conn.connection().await.unwrap()) .await; - self.tx_sent_in_last_iteration_count = - (self.gateway.sent_tx_count() + self.gateway_blobs.sent_tx_count()) - tx_sent_before; + self.tx_sent_in_last_iteration_count = (self.gateway.sent_tx_count() + + self.gateway_blobs.sent_tx_count() + + self.l2_gateway.sent_tx_count()) + - tx_sent_before; } pub async fn run_eth_sender_tx_manager_iteration(&mut self) { @@ -467,6 +512,7 @@ impl EthSenderTester { &mut self.conn.connection().await.unwrap(), &aggregated_operation, false, + self.is_l2, ) .await .unwrap() @@ -491,14 +537,18 @@ impl EthSenderTester { } pub async fn confirm_tx(&mut self, hash: H256, is_blob: bool) { - let (gateway, other) = if is_blob { - (self.gateway_blobs.as_ref(), self.gateway.as_ref()) + if !self.is_l2 { + let (gateway, other) = if is_blob { + (self.gateway_blobs.as_ref(), self.gateway.as_ref()) + } else { + (self.gateway.as_ref(), self.gateway_blobs.as_ref()) + }; + gateway.execute_tx(hash, true, EthSenderTester::WAIT_CONFIRMATIONS); + other.advance_block_number(EthSenderTester::WAIT_CONFIRMATIONS); } else { - (self.gateway.as_ref(), self.gateway_blobs.as_ref()) - }; - gateway.execute_tx(hash, true, EthSenderTester::WAIT_CONFIRMATIONS); - other.advance_block_number(EthSenderTester::WAIT_CONFIRMATIONS); - + self.l2_gateway + .execute_tx(hash, true, EthSenderTester::WAIT_CONFIRMATIONS); + } self.run_eth_sender_tx_manager_iteration().await; } @@ -543,13 +593,13 @@ impl EthSenderTester { } pub async fn assert_inflight_txs_count_equals(&mut self, value: usize) { - //sanity check - assert!(self.manager.operator_address(OperatorType::Blob).is_some()); - assert_eq!( + let inflight_count = if !self.is_l2 { + //sanity check + assert!(self.manager.operator_address(OperatorType::Blob).is_some()); self.storage() .await .eth_sender_dal() - .get_inflight_txs(self.manager.operator_address(OperatorType::NonBlob)) + .get_inflight_txs(self.manager.operator_address(OperatorType::NonBlob), false) .await .unwrap() .len() @@ -557,11 +607,22 @@ impl EthSenderTester { .storage() .await .eth_sender_dal() - .get_inflight_txs(self.manager.operator_address(OperatorType::Blob)) + .get_inflight_txs(self.manager.operator_address(OperatorType::Blob), false) .await .unwrap() - .len(), - value, + .len() + } else { + self.storage() + .await + .eth_sender_dal() + .get_inflight_txs(None, true) + .await + .unwrap() + .len() + }; + + assert_eq!( + inflight_count, value, "Unexpected number of in-flight transactions" ); } diff --git a/core/node/eth_sender/src/tests.rs b/core/node/eth_sender/src/tests.rs index 83c37dd5d0a..e03532458f1 100644 --- a/core/node/eth_sender/src/tests.rs +++ b/core/node/eth_sender/src/tests.rs @@ -160,6 +160,7 @@ async fn resend_each_block(commitment_mode: L1BatchCommitmentMode) -> anyhow::Re &mut tester.conn.connection().await.unwrap(), &get_dummy_operation(0), false, + false, ) .await?; @@ -175,7 +176,10 @@ async fn resend_each_block(commitment_mode: L1BatchCommitmentMode) -> anyhow::Re .storage() .await .eth_sender_dal() - .get_inflight_txs(tester.manager.operator_address(OperatorType::NonBlob)) + .get_inflight_txs( + tester.manager.operator_address(OperatorType::NonBlob), + false + ) .await .unwrap() .len(), @@ -185,7 +189,7 @@ async fn resend_each_block(commitment_mode: L1BatchCommitmentMode) -> anyhow::Re let sent_tx = tester .manager .l1_interface() - .get_tx(hash) + .get_tx(hash, OperatorType::NonBlob) .await .unwrap() .expect("no transaction"); @@ -228,7 +232,10 @@ async fn resend_each_block(commitment_mode: L1BatchCommitmentMode) -> anyhow::Re .storage() .await .eth_sender_dal() - .get_inflight_txs(tester.manager.operator_address(OperatorType::NonBlob)) + .get_inflight_txs( + tester.manager.operator_address(OperatorType::NonBlob), + false + ) .await .unwrap() .len(), @@ -238,7 +245,7 @@ async fn resend_each_block(commitment_mode: L1BatchCommitmentMode) -> anyhow::Re let resent_tx = tester .manager .l1_interface() - .get_tx(resent_hash) + .get_tx(resent_hash, OperatorType::NonBlob) .await .unwrap() .expect("no transaction"); @@ -425,6 +432,67 @@ async fn transactions_are_not_resent_on_the_same_block() { tester.assert_just_sent_tx_count_equals(0).await; } +#[should_panic( + expected = "eth-sender was switched to gateway, but there are still 1 pre-gateway transactions in-flight!" +)] +#[test_log::test(tokio::test)] +async fn switching_to_gateway_while_some_transactions_were_in_flight_should_cause_panic() { + let mut tester = EthSenderTester::new( + ConnectionPool::::test_pool().await, + vec![100; 100], + true, + true, + L1BatchCommitmentMode::Rollup, + ) + .await; + + let _genesis_l1_batch = TestL1Batch::sealed(&mut tester).await; + let first_l1_batch = TestL1Batch::sealed(&mut tester).await; + + first_l1_batch.save_commit_tx(&mut tester).await; + tester.run_eth_sender_tx_manager_iteration().await; + + // sanity check + tester.assert_inflight_txs_count_equals(1).await; + + tester.switch_to_using_gateway(); + tester.run_eth_sender_tx_manager_iteration().await; +} + +#[test_log::test(tokio::test)] +async fn switching_to_gateway_works_for_most_basic_scenario() { + let mut tester = EthSenderTester::new( + ConnectionPool::::test_pool().await, + vec![100; 100], + true, + true, + L1BatchCommitmentMode::Rollup, + ) + .await; + + let _genesis_l1_batch = TestL1Batch::sealed(&mut tester).await; + let first_l1_batch = TestL1Batch::sealed(&mut tester).await; + + first_l1_batch.save_commit_tx(&mut tester).await; + tester.run_eth_sender_tx_manager_iteration().await; + + first_l1_batch.execute_commit_tx(&mut tester).await; + tester.run_eth_sender_tx_manager_iteration().await; + // sanity check + tester.assert_inflight_txs_count_equals(0).await; + + tester.switch_to_using_gateway(); + tester.run_eth_sender_tx_manager_iteration().await; + + first_l1_batch.save_prove_tx(&mut tester).await; + tester.run_eth_sender_tx_manager_iteration().await; + tester.assert_inflight_txs_count_equals(1).await; + + first_l1_batch.execute_prove_tx(&mut tester).await; + tester.run_eth_sender_tx_manager_iteration().await; + tester.assert_inflight_txs_count_equals(0).await; +} + #[test_casing(2, COMMITMENT_MODES)] #[test_log::test(tokio::test)] async fn correct_order_for_confirmations( diff --git a/core/node/fee_model/src/l1_gas_price/gas_adjuster/mod.rs b/core/node/fee_model/src/l1_gas_price/gas_adjuster/mod.rs index 244220da026..4ed9cf1330e 100644 --- a/core/node/fee_model/src/l1_gas_price/gas_adjuster/mod.rs +++ b/core/node/fee_model/src/l1_gas_price/gas_adjuster/mod.rs @@ -12,7 +12,7 @@ use zksync_types::{commitment::L1BatchCommitmentMode, L1_GAS_PER_PUBDATA_BYTE, U use zksync_web3_decl::client::{DynClient, L1, L2}; use self::metrics::METRICS; -use super::L1TxParamsProvider; +use super::TxParamsProvider; mod metrics; #[cfg(test)] @@ -310,7 +310,7 @@ impl GasAdjuster { } } -impl L1TxParamsProvider for GasAdjuster { +impl TxParamsProvider for GasAdjuster { // This is the method where we decide how much we are ready to pay for the // base_fee based on the number of L1 blocks the transaction has been in the mempool. // This is done in order to avoid base_fee spikes (e.g. during NFT drops) and @@ -331,21 +331,6 @@ impl L1TxParamsProvider for GasAdjuster { new_fee as u64 } - fn get_blob_base_fee(&self) -> u64 { - let a = self.config.pricing_formula_parameter_a; - let b = self.config.pricing_formula_parameter_b; - - // Use the single evaluation at zero of the following: - // Currently we use an exponential formula. - // The alternative is a linear one: - // `let scale_factor = a + b * time_in_mempool as f64;` - let scale_factor = a * b.powf(0.0); - let median = self.blob_base_fee_statistics.median(); - METRICS.median_blob_base_fee_per_gas.set(median.as_u64()); - let new_fee = median.as_u64() as f64 * scale_factor; - new_fee as u64 - } - fn get_next_block_minimal_base_fee(&self) -> u64 { let last_block_base_fee = self.base_fee_statistics.last_added_value(); @@ -379,6 +364,14 @@ impl L1TxParamsProvider for GasAdjuster { fn get_blob_tx_priority_fee(&self) -> u64 { self.get_priority_fee() * 2 } + + fn get_gateway_tx_base_fee(&self) -> u64 { + todo!() + } + + fn get_gateway_tx_pubdata_price(&self) -> u64 { + todo!() + } } /// Helper structure responsible for collecting the data about recent transactions, diff --git a/core/node/fee_model/src/l1_gas_price/mod.rs b/core/node/fee_model/src/l1_gas_price/mod.rs index 29db21bc173..2a5d63089ca 100644 --- a/core/node/fee_model/src/l1_gas_price/mod.rs +++ b/core/node/fee_model/src/l1_gas_price/mod.rs @@ -14,13 +14,10 @@ mod main_node_fetcher; /// mining time into account. /// /// This trait, as a bound, should only be used in components that actually sign and send transactions. -pub trait L1TxParamsProvider: fmt::Debug + 'static + Send + Sync { +pub trait TxParamsProvider: fmt::Debug + 'static + Send + Sync { /// Returns the recommended `max_fee_per_gas` value (EIP1559). fn get_base_fee(&self, time_in_mempool: u32) -> u64; - /// Returns the recommended `max_blob_fee_per_gas` value (EIP4844). - fn get_blob_base_fee(&self) -> u64; - /// Returns the recommended `max_priority_fee_per_gas` value (EIP1559). fn get_priority_fee(&self) -> u64; @@ -35,4 +32,10 @@ pub trait L1TxParamsProvider: fmt::Debug + 'static + Send + Sync { /// Returns the recommended `max_priority_fee_per_gas` value (EIP1559) for blob transaction. fn get_blob_tx_priority_fee(&self) -> u64; + + /// Returns the recommended `max_fee_per_gas` value for gateway transactions. + fn get_gateway_tx_base_fee(&self) -> u64; + + /// Returns the recommended `max_fee_per_gas` value for gateway transactions. + fn get_gateway_tx_pubdata_price(&self) -> u64; } diff --git a/core/node/genesis/src/lib.rs b/core/node/genesis/src/lib.rs index dcb9ba2c012..bbad6b9a222 100644 --- a/core/node/genesis/src/lib.rs +++ b/core/node/genesis/src/lib.rs @@ -20,8 +20,8 @@ use zksync_types::{ protocol_version::{L1VerifierConfig, ProtocolSemanticVersion}, system_contracts::get_system_smart_contracts, web3::{BlockNumber, FilterBuilder}, - AccountTreeId, Address, L1BatchNumber, L1ChainId, L2BlockNumber, L2ChainId, ProtocolVersion, - ProtocolVersionId, StorageKey, H256, + AccountTreeId, Address, Bloom, L1BatchNumber, L1ChainId, L2BlockNumber, L2ChainId, + ProtocolVersion, ProtocolVersionId, StorageKey, H256, }; use zksync_utils::{bytecode::hash_bytecode, u256_to_h256}; @@ -359,6 +359,7 @@ pub async fn create_genesis_l1_batch( protocol_version: Some(protocol_version.minor), virtual_blocks: 0, gas_limit: 0, + logs_bloom: Bloom::zero(), }; let mut transaction = storage.start_transaction().await?; diff --git a/core/node/house_keeper/src/prover/archiver/fri_gpu_prover_archiver.rs b/core/node/house_keeper/src/prover/archiver/fri_gpu_prover_archiver.rs index 5db53710733..b0f5ff23fe3 100644 --- a/core/node/house_keeper/src/prover/archiver/fri_gpu_prover_archiver.rs +++ b/core/node/house_keeper/src/prover/archiver/fri_gpu_prover_archiver.rs @@ -1,3 +1,5 @@ +use std::time::Duration; + use zksync_dal::ConnectionPool; use zksync_prover_dal::{Prover, ProverDal}; @@ -38,7 +40,7 @@ impl PeriodicJob for FriGpuProverArchiver { .await .unwrap() .fri_gpu_prover_queue_dal() - .archive_old_provers(self.archive_prover_after_secs) + .archive_old_provers(Duration::from_secs(self.archive_prover_after_secs)) .await; tracing::info!("Archived {:?} fri gpu prover records", archived_provers); HOUSE_KEEPER_METRICS diff --git a/core/node/house_keeper/src/prover/archiver/fri_prover_jobs_archiver.rs b/core/node/house_keeper/src/prover/archiver/fri_prover_jobs_archiver.rs index 02268c60e5f..684c955231c 100644 --- a/core/node/house_keeper/src/prover/archiver/fri_prover_jobs_archiver.rs +++ b/core/node/house_keeper/src/prover/archiver/fri_prover_jobs_archiver.rs @@ -1,3 +1,5 @@ +use std::time::Duration; + use zksync_dal::ConnectionPool; use zksync_prover_dal::{Prover, ProverDal}; @@ -38,7 +40,7 @@ impl PeriodicJob for FriProverJobsArchiver { .await .unwrap() .fri_prover_jobs_dal() - .archive_old_jobs(self.archiving_interval_secs) + .archive_old_jobs(Duration::from_secs(self.archiving_interval_secs)) .await; tracing::info!("Archived {:?} fri prover jobs", archived_jobs); HOUSE_KEEPER_METRICS diff --git a/core/node/house_keeper/src/prover/queue_reporter/fri_prover_queue_reporter.rs b/core/node/house_keeper/src/prover/queue_reporter/fri_prover_queue_reporter.rs index f429367c44a..12dfae86ab4 100644 --- a/core/node/house_keeper/src/prover/queue_reporter/fri_prover_queue_reporter.rs +++ b/core/node/house_keeper/src/prover/queue_reporter/fri_prover_queue_reporter.rs @@ -2,9 +2,9 @@ use async_trait::async_trait; use zksync_config::configs::fri_prover_group::FriProverGroupConfig; use zksync_dal::{ConnectionPool, Core, CoreDal}; use zksync_prover_dal::{Prover, ProverDal}; +use zksync_types::{basic_fri_types::CircuitIdRoundTuple, prover_dal::JobCountStatistics}; use crate::{periodic_job::PeriodicJob, prover::metrics::FRI_PROVER_METRICS}; - /// `FriProverQueueReporter` is a task that periodically reports prover jobs status. /// Note: these values will be used for auto-scaling provers and Witness Vector Generators. #[derive(Debug)] @@ -39,45 +39,42 @@ impl PeriodicJob for FriProverQueueReporter { let mut conn = self.prover_connection_pool.connection().await.unwrap(); let stats = conn.fri_prover_jobs_dal().get_prover_jobs_stats().await; - for (job_identifiers, stats) in &stats { - // BEWARE, HERE BE DRAGONS. - // In database, the `circuit_id` stored is the circuit for which the aggregation is done, - // not the circuit which is running. - // There is a single node level aggregation circuit, which is circuit 2. - // This can aggregate multiple leaf nodes (which may belong to different circuits). - // This reporting is a hacky forced way to use `circuit_id` 2 which will solve auto scalers. - // A proper fix will be later provided to solve this at database level. - let circuit_id = if job_identifiers.aggregation_round == 2 { - 2 - } else { - job_identifiers.circuit_id - }; - - let group_id = self - .config - .get_group_id_for_circuit_id_and_aggregation_round( + for (protocol_semantic_version, circuit_prover_stats) in stats { + for (tuple, stat) in circuit_prover_stats { + let CircuitIdRoundTuple { + circuit_id, + aggregation_round, + } = tuple; + let JobCountStatistics { + queued, + in_progress, + } = stat; + let group_id = self + .config + .get_group_id_for_circuit_id_and_aggregation_round( + circuit_id, + aggregation_round, + ) + .unwrap_or(u8::MAX); + + FRI_PROVER_METRICS.report_prover_jobs( + "queued", circuit_id, - job_identifiers.aggregation_round, - ) - .unwrap_or(u8::MAX); - - FRI_PROVER_METRICS.report_prover_jobs( - "queued", - circuit_id, - job_identifiers.aggregation_round, - group_id, - job_identifiers.get_semantic_protocol_version(), - stats.queued as u64, - ); - - FRI_PROVER_METRICS.report_prover_jobs( - "in_progress", - circuit_id, - job_identifiers.aggregation_round, - group_id, - job_identifiers.get_semantic_protocol_version(), - stats.in_progress as u64, - ); + aggregation_round, + group_id, + protocol_semantic_version, + queued as u64, + ); + + FRI_PROVER_METRICS.report_prover_jobs( + "in_progress", + circuit_id, + aggregation_round, + group_id, + protocol_semantic_version, + in_progress as u64, + ); + } } let lag_by_circuit_type = conn diff --git a/core/node/house_keeper/src/prover/retry_manager/fri_witness_generator_jobs_retry_manager.rs b/core/node/house_keeper/src/prover/retry_manager/fri_witness_generator_jobs_retry_manager.rs index 817d1e29025..b3d990e2754 100644 --- a/core/node/house_keeper/src/prover/retry_manager/fri_witness_generator_jobs_retry_manager.rs +++ b/core/node/house_keeper/src/prover/retry_manager/fri_witness_generator_jobs_retry_manager.rs @@ -48,7 +48,7 @@ impl FriWitnessGeneratorJobRetryManager { .await .unwrap() .fri_witness_generator_dal() - .requeue_stuck_jobs(self.processing_timeouts.basic(), self.max_attempts) + .requeue_stuck_basic_jobs(self.processing_timeouts.basic(), self.max_attempts) .await; self.emit_telemetry("witness_inputs_fri", &stuck_jobs); } @@ -60,10 +60,7 @@ impl FriWitnessGeneratorJobRetryManager { .await .unwrap() .fri_witness_generator_dal() - .requeue_stuck_leaf_aggregations_jobs( - self.processing_timeouts.leaf(), - self.max_attempts, - ) + .requeue_stuck_leaf_jobs(self.processing_timeouts.leaf(), self.max_attempts) .await; self.emit_telemetry("leaf_aggregations_jobs_fri", &stuck_jobs); } @@ -75,10 +72,7 @@ impl FriWitnessGeneratorJobRetryManager { .await .unwrap() .fri_witness_generator_dal() - .requeue_stuck_node_aggregations_jobs( - self.processing_timeouts.node(), - self.max_attempts, - ) + .requeue_stuck_node_jobs(self.processing_timeouts.node(), self.max_attempts) .await; self.emit_telemetry("node_aggregations_jobs_fri", &stuck_jobs); } diff --git a/core/node/logs_bloom_backfill/Cargo.toml b/core/node/logs_bloom_backfill/Cargo.toml new file mode 100644 index 00000000000..706fdb22fce --- /dev/null +++ b/core/node/logs_bloom_backfill/Cargo.toml @@ -0,0 +1,22 @@ +[package] +name = "zksync_logs_bloom_backfill" +description = "ZKsync logs bloom backfill" +version.workspace = true +edition.workspace = true +authors.workspace = true +homepage.workspace = true +repository.workspace = true +license.workspace = true +keywords.workspace = true +categories.workspace = true + +[dependencies] +zksync_dal.workspace = true +zksync_types.workspace = true + +tokio = { workspace = true, features = ["time"] } +anyhow.workspace = true +tracing.workspace = true + +[dev-dependencies] +zksync_vm_interface.workspace = true diff --git a/core/node/logs_bloom_backfill/src/lib.rs b/core/node/logs_bloom_backfill/src/lib.rs new file mode 100644 index 00000000000..4337c0b8dc9 --- /dev/null +++ b/core/node/logs_bloom_backfill/src/lib.rs @@ -0,0 +1,242 @@ +use std::time::Duration; + +use anyhow::Context; +use tokio::sync::watch; +use zksync_dal::{Connection, ConnectionPool, Core, CoreDal}; +use zksync_types::{block::build_bloom, BloomInput, L2BlockNumber}; + +#[derive(Debug)] +pub struct LogsBloomBackfill { + connection_pool: ConnectionPool, +} + +#[derive(Debug, PartialEq)] +enum BloomWaitOutcome { + Ok, + Canceled, +} + +impl LogsBloomBackfill { + pub fn new(connection_pool: ConnectionPool) -> Self { + Self { connection_pool } + } + + async fn wait_for_l2_block_with_bloom( + connection: &mut Connection<'_, Core>, + stop_receiver: &mut watch::Receiver, + ) -> anyhow::Result { + const INTERVAL: Duration = Duration::from_secs(1); + tracing::debug!("waiting for at least one L2 block in DB with bloom"); + + loop { + if *stop_receiver.borrow() { + return Ok(BloomWaitOutcome::Canceled); + } + + if connection.blocks_dal().has_last_l2_block_bloom().await? { + return Ok(BloomWaitOutcome::Ok); + } + + // We don't check the result: if a stop signal is received, we'll return at the start + // of the next iteration. + tokio::time::timeout(INTERVAL, stop_receiver.changed()) + .await + .ok(); + } + } + + pub async fn run(self, mut stop_receiver: watch::Receiver) -> anyhow::Result<()> { + let mut connection = self + .connection_pool + .connection_tagged("logs_bloom_backfill") + .await?; + + if Self::wait_for_l2_block_with_bloom(&mut connection, &mut stop_receiver).await? + == BloomWaitOutcome::Canceled + { + return Ok(()); // Stop signal received + } + + let genesis_block_has_bloom = connection + .blocks_dal() + .has_l2_block_bloom(L2BlockNumber(0)) + .await?; + if genesis_block_has_bloom { + return Ok(()); // Migration has already been completed. + } + + let max_block_without_bloom = connection + .blocks_dal() + .get_max_l2_block_without_bloom() + .await?; + let Some(max_block_without_bloom) = max_block_without_bloom else { + tracing::info!("all blooms are already there, exiting migration"); + return Ok(()); + }; + let first_l2_block = connection + .blocks_dal() + .get_earliest_l2_block_number() + .await? + .context( + "logs_bloom_backfill: missing l2 block in DB after waiting for at least one", + )?; + + tracing::info!("starting blooms backfill from block {max_block_without_bloom}"); + let mut right_bound = max_block_without_bloom.0; + loop { + const WINDOW: u32 = 1000; + + if *stop_receiver.borrow_and_update() { + tracing::info!("received a stop signal; logs bloom backfill is shut down"); + } + + let left_bound = right_bound.saturating_sub(WINDOW - 1).max(first_l2_block.0); + tracing::info!( + "started calculating blooms for block range {left_bound}..={right_bound}" + ); + + let mut bloom_items = connection + .events_dal() + .get_bloom_items_for_l2_blocks( + L2BlockNumber(left_bound)..=L2BlockNumber(right_bound), + ) + .await?; + + let blooms: Vec<_> = (left_bound..=right_bound) + .map(|block| { + let items = bloom_items + .remove(&L2BlockNumber(block)) + .unwrap_or_default(); + let iter = items.iter().map(|v| BloomInput::Raw(v.as_slice())); + build_bloom(iter) + }) + .collect(); + connection + .blocks_dal() + .range_update_logs_bloom(L2BlockNumber(left_bound), &blooms) + .await?; + tracing::info!("filled blooms for block range {left_bound}..={right_bound}"); + + if left_bound == first_l2_block.0 { + break; + } else { + right_bound = left_bound - 1; + } + } + + tracing::info!("logs bloom backfill is finished"); + Ok(()) + } +} + +#[cfg(test)] +mod tests { + use zksync_types::{ + block::L2BlockHeader, tx::IncludedTxLocation, Address, L1BatchNumber, H256, + }; + use zksync_vm_interface::VmEvent; + + use super::*; + + async fn create_l2_block( + conn: &mut Connection<'_, Core>, + l2_block_number: L2BlockNumber, + block_events: &[VmEvent], + ) { + let l2_block_header = L2BlockHeader { + number: l2_block_number, + timestamp: 0, + hash: H256::from_low_u64_be(u64::from(l2_block_number.0)), + l1_tx_count: 0, + l2_tx_count: 0, + fee_account_address: Address::repeat_byte(1), + base_fee_per_gas: 0, + gas_per_pubdata_limit: 0, + batch_fee_input: Default::default(), + base_system_contracts_hashes: Default::default(), + protocol_version: Some(Default::default()), + virtual_blocks: 0, + gas_limit: 0, + logs_bloom: Default::default(), + }; + + conn.blocks_dal() + .insert_l2_block(&l2_block_header) + .await + .unwrap(); + + let events_vec: Vec<_> = block_events.iter().collect(); + conn.events_dal() + .save_events( + l2_block_number, + &[( + IncludedTxLocation { + tx_hash: Default::default(), + tx_index_in_l2_block: 0, + tx_initiator_address: Default::default(), + }, + events_vec, + )], + ) + .await + .unwrap(); + } + + #[tokio::test] + async fn test_logs_bloom_backfill() { + let connection_pool = ConnectionPool::::test_pool().await; + let mut connection = connection_pool.connection().await.unwrap(); + connection + .protocol_versions_dal() + .save_protocol_version_with_tx(&Default::default()) + .await + .unwrap(); + + let blocks_count = 5u32; + for block_number in 0..blocks_count { + let event = VmEvent { + location: (L1BatchNumber(0), 0), + address: Address::from_low_u64_be(block_number as u64 + 1), + indexed_topics: Vec::new(), + value: Vec::new(), + }; + create_l2_block(&mut connection, L2BlockNumber(block_number), &[event]).await; + + if block_number + 1 < blocks_count { + // Drop bloom if block is not last. + connection + .blocks_dal() + .drop_l2_block_bloom(L2BlockNumber(block_number)) + .await + .unwrap(); + } + } + let max_block_without_bloom = connection + .blocks_dal() + .get_max_l2_block_without_bloom() + .await + .unwrap(); + assert_eq!( + max_block_without_bloom, + Some(L2BlockNumber(blocks_count) - 2) + ); + + let migration = LogsBloomBackfill::new(connection_pool.clone()); + let (_sender, receiver) = watch::channel(false); + migration.run(receiver).await.unwrap(); + + for block_number in 0..(blocks_count - 1) { + let header = connection + .blocks_dal() + .get_l2_block_header(L2BlockNumber(block_number)) + .await + .unwrap() + .unwrap(); + let address = Address::from_low_u64_be(block_number as u64 + 1); + let contains_address = header + .logs_bloom + .contains_input(BloomInput::Raw(address.as_bytes())); + assert!(contains_address); + } + } +} diff --git a/core/node/node_framework/Cargo.toml b/core/node/node_framework/Cargo.toml index 142d6cfa11a..3a81a578c03 100644 --- a/core/node/node_framework/Cargo.toml +++ b/core/node/node_framework/Cargo.toml @@ -54,6 +54,7 @@ zksync_base_token_adjuster.workspace = true zksync_node_storage_init.workspace = true zksync_external_price_api.workspace = true zksync_external_proof_integration_api.workspace = true +zksync_logs_bloom_backfill.workspace = true pin-project-lite.workspace = true tracing.workspace = true diff --git a/core/node/node_framework/src/implementations/layers/eth_sender/manager.rs b/core/node/node_framework/src/implementations/layers/eth_sender/manager.rs index b5f8ee42313..d6989d8db72 100644 --- a/core/node/node_framework/src/implementations/layers/eth_sender/manager.rs +++ b/core/node/node_framework/src/implementations/layers/eth_sender/manager.rs @@ -6,7 +6,10 @@ use zksync_eth_sender::EthTxManager; use crate::{ implementations::resources::{ circuit_breakers::CircuitBreakersResource, - eth_interface::{BoundEthInterfaceForBlobsResource, BoundEthInterfaceResource}, + eth_interface::{ + BoundEthInterfaceForBlobsResource, BoundEthInterfaceForL2Resource, + BoundEthInterfaceResource, + }, gas_adjuster::GasAdjusterResource, pools::{MasterPool, PoolResource, ReplicaPool}, }, @@ -27,7 +30,7 @@ use crate::{ /// - `PoolResource` /// - `BoundEthInterfaceResource` /// - `BoundEthInterfaceForBlobsResource` (optional) -/// - `L1TxParamsResource` +/// - `TxParamsResource` /// - `CircuitBreakersResource` (adds a circuit breaker) /// /// ## Adds tasks @@ -45,6 +48,7 @@ pub struct Input { pub replica_pool: PoolResource, pub eth_client: BoundEthInterfaceResource, pub eth_client_blobs: Option, + pub l2_client: Option, pub gas_adjuster: GasAdjusterResource, #[context(default)] pub circuit_breakers: CircuitBreakersResource, @@ -79,6 +83,7 @@ impl WiringLayer for EthTxManagerLayer { let eth_client = input.eth_client.0; let eth_client_blobs = input.eth_client_blobs.map(|c| c.0); + let l2_client = input.l2_client.map(|c| c.0); let config = self.eth_sender_config.sender.context("sender")?; @@ -88,8 +93,9 @@ impl WiringLayer for EthTxManagerLayer { master_pool, config, gas_adjuster, - eth_client, + Some(eth_client), eth_client_blobs, + l2_client, ); // Insert circuit breaker. diff --git a/core/node/node_framework/src/implementations/layers/eth_watch.rs b/core/node/node_framework/src/implementations/layers/eth_watch.rs index 13f593644dc..53eeb1c5280 100644 --- a/core/node/node_framework/src/implementations/layers/eth_watch.rs +++ b/core/node/node_framework/src/implementations/layers/eth_watch.rs @@ -56,7 +56,7 @@ impl WiringLayer for EthWatchLayer { } async fn wire(self, input: Self::Input) -> Result { - let main_pool = input.master_pool.get().await.unwrap(); + let main_pool = input.master_pool.get().await?; let client = input.eth_client.0; let eth_client = EthHttpQueryClient::new( diff --git a/core/node/node_framework/src/implementations/layers/external_proof_integration_api.rs b/core/node/node_framework/src/implementations/layers/external_proof_integration_api.rs index 7877bc6abbe..6f8805bc5fa 100644 --- a/core/node/node_framework/src/implementations/layers/external_proof_integration_api.rs +++ b/core/node/node_framework/src/implementations/layers/external_proof_integration_api.rs @@ -59,7 +59,7 @@ impl WiringLayer for ExternalProofIntegrationApiLayer { } async fn wire(self, input: Self::Input) -> Result { - let main_pool = input.master_pool.get().await.unwrap(); + let main_pool = input.master_pool.get().await?; let blob_store = input.object_store.0; let task = ProverApiTask { diff --git a/core/node/node_framework/src/implementations/layers/l1_gas.rs b/core/node/node_framework/src/implementations/layers/l1_gas.rs index 9a4ccb8264f..35c4bc3fc20 100644 --- a/core/node/node_framework/src/implementations/layers/l1_gas.rs +++ b/core/node/node_framework/src/implementations/layers/l1_gas.rs @@ -9,7 +9,7 @@ use crate::{ base_token_ratio_provider::BaseTokenRatioProviderResource, fee_input::{ApiFeeInputResource, SequencerFeeInputResource}, gas_adjuster::GasAdjusterResource, - l1_tx_params::L1TxParamsResource, + l1_tx_params::TxParamsResource, pools::{PoolResource, ReplicaPool}, }, wiring_layer::{WiringError, WiringLayer}, @@ -38,7 +38,7 @@ pub struct Input { pub struct Output { pub sequencer_fee_input: SequencerFeeInputResource, pub api_fee_input: ApiFeeInputResource, - pub l1_tx_params: L1TxParamsResource, + pub l1_tx_params: TxParamsResource, } impl L1GasLayer { diff --git a/core/node/node_framework/src/implementations/layers/logs_bloom_backfill.rs b/core/node/node_framework/src/implementations/layers/logs_bloom_backfill.rs new file mode 100644 index 00000000000..4e37549a775 --- /dev/null +++ b/core/node/node_framework/src/implementations/layers/logs_bloom_backfill.rs @@ -0,0 +1,61 @@ +use zksync_logs_bloom_backfill::LogsBloomBackfill; + +use crate::{ + implementations::resources::pools::{MasterPool, PoolResource}, + service::StopReceiver, + task::{Task, TaskId, TaskKind}, + wiring_layer::{WiringError, WiringLayer}, + FromContext, IntoContext, +}; + +/// Wiring layer for ethereum watcher +/// +/// Responsible for initializing and running of [`LogsBloomBackfill`] task, that backfills `logsBloom` for old blocks. +#[derive(Debug)] +pub struct LogsBloomBackfillLayer; + +#[derive(Debug, FromContext)] +#[context(crate = crate)] +pub struct Input { + pub master_pool: PoolResource, +} + +#[derive(Debug, IntoContext)] +#[context(crate = crate)] +pub struct Output { + #[context(task)] + pub logs_bloom_backfill: LogsBloomBackfill, +} + +#[async_trait::async_trait] +impl WiringLayer for LogsBloomBackfillLayer { + type Input = Input; + type Output = Output; + + fn layer_name(&self) -> &'static str { + "logs_bloom_backfill_layer" + } + + async fn wire(self, input: Self::Input) -> Result { + let pool = input.master_pool.get_singleton().await?; + let logs_bloom_backfill = LogsBloomBackfill::new(pool); + Ok(Output { + logs_bloom_backfill, + }) + } +} + +#[async_trait::async_trait] +impl Task for LogsBloomBackfill { + fn kind(&self) -> TaskKind { + TaskKind::OneshotTask + } + + fn id(&self) -> TaskId { + "logs_bloom_backfill".into() + } + + async fn run(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { + (*self).run(stop_receiver.0).await + } +} diff --git a/core/node/node_framework/src/implementations/layers/mod.rs b/core/node/node_framework/src/implementations/layers/mod.rs index 6256f2d6104..6f3500a82cb 100644 --- a/core/node/node_framework/src/implementations/layers/mod.rs +++ b/core/node/node_framework/src/implementations/layers/mod.rs @@ -15,6 +15,7 @@ pub mod healtcheck_server; pub mod house_keeper; pub mod l1_batch_commitment_mode_validation; pub mod l1_gas; +pub mod logs_bloom_backfill; pub mod main_node_client; pub mod main_node_fee_params_fetcher; pub mod metadata_calculator; diff --git a/core/node/node_framework/src/implementations/layers/proof_data_handler.rs b/core/node/node_framework/src/implementations/layers/proof_data_handler.rs index bcb3cedc6e7..b53ff73c1a0 100644 --- a/core/node/node_framework/src/implementations/layers/proof_data_handler.rs +++ b/core/node/node_framework/src/implementations/layers/proof_data_handler.rs @@ -59,7 +59,7 @@ impl WiringLayer for ProofDataHandlerLayer { } async fn wire(self, input: Self::Input) -> Result { - let main_pool = input.master_pool.get().await.unwrap(); + let main_pool = input.master_pool.get().await?; let blob_store = input.object_store.0; let task = ProofDataHandlerTask { diff --git a/core/node/node_framework/src/implementations/layers/state_keeper/mod.rs b/core/node/node_framework/src/implementations/layers/state_keeper/mod.rs index b0dfe0f1600..a77344f3706 100644 --- a/core/node/node_framework/src/implementations/layers/state_keeper/mod.rs +++ b/core/node/node_framework/src/implementations/layers/state_keeper/mod.rs @@ -102,7 +102,7 @@ impl WiringLayer for StateKeeperLayer { let state_keeper = StateKeeperTask { io, - batch_executor_base, + batch_executor: batch_executor_base, output_handler, sealer, storage_factory: Arc::new(storage_factory), @@ -125,7 +125,7 @@ impl WiringLayer for StateKeeperLayer { #[derive(Debug)] pub struct StateKeeperTask { io: Box, - batch_executor_base: Box, + batch_executor: Box, output_handler: OutputHandler, sealer: Arc, storage_factory: Arc, @@ -141,7 +141,7 @@ impl Task for StateKeeperTask { let state_keeper = ZkSyncStateKeeper::new( stop_receiver.0, self.io, - self.batch_executor_base, + self.batch_executor, self.output_handler, self.sealer, self.storage_factory, diff --git a/core/node/node_framework/src/implementations/resources/eth_interface.rs b/core/node/node_framework/src/implementations/resources/eth_interface.rs index 5879610b75e..24b7df327f6 100644 --- a/core/node/node_framework/src/implementations/resources/eth_interface.rs +++ b/core/node/node_framework/src/implementations/resources/eth_interface.rs @@ -46,3 +46,12 @@ impl Resource for BoundEthInterfaceForBlobsResource { "common/bound_eth_interface_for_blobs".into() } } + +#[derive(Debug, Clone)] +pub struct BoundEthInterfaceForL2Resource(pub Box); + +impl Resource for BoundEthInterfaceForL2Resource { + fn name() -> String { + "common/bound_eth_interface_for_l2".into() + } +} diff --git a/core/node/node_framework/src/implementations/resources/l1_tx_params.rs b/core/node/node_framework/src/implementations/resources/l1_tx_params.rs index 676828c3988..5cb8af5ed44 100644 --- a/core/node/node_framework/src/implementations/resources/l1_tx_params.rs +++ b/core/node/node_framework/src/implementations/resources/l1_tx_params.rs @@ -1,20 +1,20 @@ use std::sync::Arc; -use zksync_node_fee_model::l1_gas_price::L1TxParamsProvider; +use zksync_node_fee_model::l1_gas_price::TxParamsProvider; use crate::resource::Resource; -/// A resource that provides [`L1TxParamsProvider`] implementation to the service. +/// A resource that provides [`TxParamsProvider`] implementation to the service. #[derive(Debug, Clone)] -pub struct L1TxParamsResource(pub Arc); +pub struct TxParamsResource(pub Arc); -impl Resource for L1TxParamsResource { +impl Resource for TxParamsResource { fn name() -> String { - "common/l1_tx_params".into() + "common/tx_params".into() } } -impl From> for L1TxParamsResource { +impl From> for TxParamsResource { fn from(provider: Arc) -> Self { Self(provider) } diff --git a/core/node/node_sync/src/tests.rs b/core/node/node_sync/src/tests.rs index e091472ad51..edd8306e72e 100644 --- a/core/node/node_sync/src/tests.rs +++ b/core/node/node_sync/src/tests.rs @@ -13,7 +13,7 @@ use zksync_node_test_utils::{ use zksync_state_keeper::{ io::{L1BatchParams, L2BlockParams}, seal_criteria::NoopSealer, - testonly::test_batch_executor::TestBatchExecutorBuilder, + testonly::test_batch_executor::{MockReadStorageFactory, TestBatchExecutorBuilder}, OutputHandler, StateKeeperPersistence, TreeWritesPersistence, ZkSyncStateKeeper, }; use zksync_types::{ @@ -132,7 +132,7 @@ impl StateKeeperHandles { Box::new(batch_executor_base), output_handler, Arc::new(NoopSealer), - Arc::new(pool), + Arc::new(MockReadStorageFactory), ); Self { diff --git a/core/node/state_keeper/Cargo.toml b/core/node/state_keeper/Cargo.toml index 890543bcd91..16eb657bc9b 100644 --- a/core/node/state_keeper/Cargo.toml +++ b/core/node/state_keeper/Cargo.toml @@ -29,11 +29,11 @@ zksync_test_account.workspace = true zksync_node_genesis.workspace = true zksync_node_test_utils.workspace = true zksync_vm_utils.workspace = true +zksync_system_constants.workspace = true zksync_base_token_adjuster.workspace = true anyhow.workspace = true async-trait.workspace = true -tempfile.workspace = true # used in `testonly` module tokio = { workspace = true, features = ["time"] } thiserror.workspace = true tracing.workspace = true @@ -44,6 +44,7 @@ hex.workspace = true [dev-dependencies] assert_matches.workspace = true +tempfile.workspace = true test-casing.workspace = true futures.workspace = true diff --git a/core/node/state_keeper/src/batch_executor/main_executor.rs b/core/node/state_keeper/src/batch_executor/main_executor.rs index cc05da9235b..db4daeb7744 100644 --- a/core/node/state_keeper/src/batch_executor/main_executor.rs +++ b/core/node/state_keeper/src/batch_executor/main_executor.rs @@ -2,21 +2,19 @@ use std::sync::Arc; use anyhow::Context as _; use once_cell::sync::OnceCell; -use tokio::{runtime::Handle, sync::mpsc}; +use tokio::sync::mpsc; use zksync_multivm::{ interface::{ storage::{ReadStorage, StorageView}, - ExecutionResult, FinishedL1Batch, Halt, L1BatchEnv, L2BlockEnv, SystemEnv, - VmExecutionResultAndLogs, VmInterface, VmInterfaceHistoryEnabled, + Call, CompressedBytecodeInfo, ExecutionResult, FinishedL1Batch, Halt, L1BatchEnv, + L2BlockEnv, SystemEnv, VmExecutionResultAndLogs, VmInterface, VmInterfaceHistoryEnabled, }, tracers::CallTracer, vm_latest::HistoryEnabled, MultiVMTracer, VmInstance, }; use zksync_shared_metrics::{InteractionType, TxStage, APP_METRICS}; -use zksync_state::OwnedStorage; -use zksync_types::{vm::FastVmMode, vm_trace::Call, Transaction}; -use zksync_utils::bytecode::CompressedBytecodeInfo; +use zksync_types::{vm::FastVmMode, Transaction}; use super::{BatchExecutor, BatchExecutorHandle, Command, TxExecutionResult}; use crate::{ @@ -58,10 +56,10 @@ impl MainBatchExecutor { } } -impl BatchExecutor for MainBatchExecutor { +impl BatchExecutor for MainBatchExecutor { fn init_batch( &mut self, - storage: OwnedStorage, + storage: S, l1_batch_params: L1BatchEnv, system_env: SystemEnv, ) -> BatchExecutorHandle { @@ -75,20 +73,19 @@ impl BatchExecutor for MainBatchExecutor { commands: commands_receiver, }; - let handle = tokio::task::spawn_blocking(move || { - let storage = match storage { - OwnedStorage::Static(storage) => storage, - OwnedStorage::Lending(ref storage) => Handle::current() - .block_on(storage.borrow()) - .context("failed accessing state keeper storage")?, - }; - executor.run(storage, l1_batch_params, system_env); - anyhow::Ok(()) - }); + let handle = + tokio::task::spawn_blocking(move || executor.run(storage, l1_batch_params, system_env)); BatchExecutorHandle::from_raw(handle, commands_sender) } } +#[derive(Debug)] +struct TransactionOutput { + tx_result: VmExecutionResultAndLogs, + compressed_bytecodes: Vec, + calls: Vec, +} + /// Implementation of the "primary" (non-test) batch executor. /// Upon launch, it initializes the VM object with provided block context and properties, and keeps invoking the commands /// sent to it one by one until the batch is finished. @@ -106,13 +103,13 @@ struct CommandReceiver { impl CommandReceiver { pub(super) fn run( mut self, - secondary_storage: S, + storage: S, l1_batch_params: L1BatchEnv, system_env: SystemEnv, - ) { + ) -> anyhow::Result<()> { tracing::info!("Starting executing L1 batch #{}", &l1_batch_params.number); - let storage_view = StorageView::new(secondary_storage).to_rc_ptr(); + let storage_view = StorageView::new(storage).to_rc_ptr(); let mut vm = VmInstance::maybe_fast( l1_batch_params, system_env, @@ -123,7 +120,9 @@ impl CommandReceiver { while let Some(cmd) = self.commands.blocking_recv() { match cmd { Command::ExecuteTx(tx, resp) => { - let result = self.execute_tx(&tx, &mut vm); + let result = self + .execute_tx(&tx, &mut vm) + .with_context(|| format!("fatal error executing transaction {tx:?}"))?; if resp.send(result).is_err() { break; } @@ -141,7 +140,7 @@ impl CommandReceiver { } } Command::FinishBatch(resp) => { - let vm_block_result = self.finish_batch(&mut vm); + let vm_block_result = self.finish_batch(&mut vm)?; if resp.send(vm_block_result).is_err() { break; } @@ -153,28 +152,28 @@ impl CommandReceiver { .observe(metrics.time_spent_on_get_value); EXECUTOR_METRICS.batch_storage_interaction_duration[&InteractionType::SetValue] .observe(metrics.time_spent_on_set_value); - return; + return Ok(()); } Command::FinishBatchWithCache(resp) => { - let vm_block_result = self.finish_batch(&mut vm); + let vm_block_result = self.finish_batch(&mut vm)?; let cache = (*storage_view).borrow().cache(); if resp.send((vm_block_result, cache)).is_err() { break; } - - return; + return Ok(()); } } } // State keeper can exit because of stop signal, so it's OK to exit mid-batch. tracing::info!("State keeper exited with an unfinished L1 batch"); + Ok(()) } fn execute_tx( &self, tx: &Transaction, vm: &mut VmInstance, - ) -> TxExecutionResult { + ) -> anyhow::Result { // Executing a next transaction means that a previous transaction was either rolled back (in which case its snapshot // was already removed), or that we build on top of it (in which case, it can be removed now). vm.pop_snapshot_no_rollback(); @@ -183,33 +182,38 @@ impl CommandReceiver { // Execute the transaction. let latency = KEEPER_METRICS.tx_execution_time[&TxExecutionStage::Execution].start(); - let (tx_result, compressed_bytecodes, call_tracer_result) = - if self.optional_bytecode_compression { - self.execute_tx_in_vm_with_optional_compression(tx, vm) - } else { - self.execute_tx_in_vm(tx, vm) - }; + let output = if self.optional_bytecode_compression { + self.execute_tx_in_vm_with_optional_compression(tx, vm)? + } else { + self.execute_tx_in_vm(tx, vm)? + }; latency.observe(); APP_METRICS.processed_txs[&TxStage::StateKeeper].inc(); APP_METRICS.processed_l1_txs[&TxStage::StateKeeper].inc_by(tx.is_l1().into()); + let TransactionOutput { + tx_result, + compressed_bytecodes, + calls, + } = output; + if let ExecutionResult::Halt { reason } = tx_result.result { - return match reason { + return Ok(match reason { Halt::BootloaderOutOfGas => TxExecutionResult::BootloaderOutOfGasForTx, _ => TxExecutionResult::RejectedByVm { reason }, - }; + }); } let tx_metrics = ExecutionMetricsForCriteria::new(Some(tx), &tx_result); let gas_remaining = vm.gas_remaining(); - TxExecutionResult::Success { + Ok(TxExecutionResult::Success { tx_result: Box::new(tx_result), tx_metrics: Box::new(tx_metrics), compressed_bytecodes, - call_tracer_result, + call_tracer_result: calls, gas_remaining, - } + }) } fn rollback_last_tx(&self, vm: &mut VmInstance) { @@ -229,19 +233,18 @@ impl CommandReceiver { fn finish_batch( &self, vm: &mut VmInstance, - ) -> FinishedL1Batch { + ) -> anyhow::Result { // The vm execution was paused right after the last transaction was executed. // There is some post-processing work that the VM needs to do before the block is fully processed. let result = vm.finish_batch(); - if result.block_tip_execution_result.result.is_failed() { - panic!( - "VM must not fail when finalizing block: {:#?}", - result.block_tip_execution_result.result - ); - } + anyhow::ensure!( + !result.block_tip_execution_result.result.is_failed(), + "VM must not fail when finalizing block: {:#?}", + result.block_tip_execution_result.result + ); BATCH_TIP_METRICS.observe(&result.block_tip_execution_result); - result + Ok(result) } /// Attempts to execute transaction with or without bytecode compression. @@ -250,11 +253,7 @@ impl CommandReceiver { &self, tx: &Transaction, vm: &mut VmInstance, - ) -> ( - VmExecutionResultAndLogs, - Vec, - Vec, - ) { + ) -> anyhow::Result { // Note, that the space where we can put the calldata for compressing transactions // is limited and the transactions do not pay for taking it. // In order to not let the accounts spam the space of compressed bytecodes with bytecodes @@ -271,16 +270,20 @@ impl CommandReceiver { vec![] }; - if let (Ok(()), result) = + if let (Ok(()), tx_result) = vm.inspect_transaction_with_bytecode_compression(tracer.into(), tx.clone(), true) { let compressed_bytecodes = vm.get_last_tx_compressed_bytecodes(); - let trace = Arc::try_unwrap(call_tracer_result) - .unwrap() + let calls = Arc::try_unwrap(call_tracer_result) + .map_err(|_| anyhow::anyhow!("failed extracting call traces"))? .take() .unwrap_or_default(); - return (result, compressed_bytecodes, trace); + return Ok(TransactionOutput { + tx_result, + compressed_bytecodes, + calls, + }); } // Roll back to the snapshot just before the transaction execution taken in `Self::execute_tx()` @@ -295,20 +298,22 @@ impl CommandReceiver { vec![] }; - let result = + let (compression_result, tx_result) = vm.inspect_transaction_with_bytecode_compression(tracer.into(), tx.clone(), false); - result - .0 - .expect("Compression can't fail if we don't apply it"); + compression_result.context("compression failed when it wasn't applied")?; let compressed_bytecodes = vm.get_last_tx_compressed_bytecodes(); // TODO implement tracer manager which will be responsible - // for collecting result from all tracers and save it to the database - let trace = Arc::try_unwrap(call_tracer_result) - .unwrap() + // for collecting result from all tracers and save it to the database + let calls = Arc::try_unwrap(call_tracer_result) + .map_err(|_| anyhow::anyhow!("failed extracting call traces"))? .take() .unwrap_or_default(); - (result.1, compressed_bytecodes, trace) + Ok(TransactionOutput { + tx_result, + compressed_bytecodes, + calls, + }) } /// Attempts to execute transaction with mandatory bytecode compression. @@ -317,11 +322,7 @@ impl CommandReceiver { &self, tx: &Transaction, vm: &mut VmInstance, - ) -> ( - VmExecutionResultAndLogs, - Vec, - Vec, - ) { + ) -> anyhow::Result { let call_tracer_result = Arc::new(OnceCell::default()); let tracer = if self.save_call_traces { vec![CallTracer::new(call_tracer_result.clone()).into_tracer_pointer()] @@ -329,22 +330,29 @@ impl CommandReceiver { vec![] }; - let (published_bytecodes, mut result) = + let (published_bytecodes, mut tx_result) = vm.inspect_transaction_with_bytecode_compression(tracer.into(), tx.clone(), true); if published_bytecodes.is_ok() { let compressed_bytecodes = vm.get_last_tx_compressed_bytecodes(); - - let trace = Arc::try_unwrap(call_tracer_result) - .unwrap() + let calls = Arc::try_unwrap(call_tracer_result) + .map_err(|_| anyhow::anyhow!("failed extracting call traces"))? .take() .unwrap_or_default(); - (result, compressed_bytecodes, trace) + Ok(TransactionOutput { + tx_result, + compressed_bytecodes, + calls, + }) } else { // Transaction failed to publish bytecodes, we reject it so initiator doesn't pay fee. - result.result = ExecutionResult::Halt { + tx_result.result = ExecutionResult::Halt { reason: Halt::FailedToPublishCompressedBytecodes, }; - (result, Default::default(), Default::default()) + Ok(TransactionOutput { + tx_result, + compressed_bytecodes: vec![], + calls: vec![], + }) } } } diff --git a/core/node/state_keeper/src/batch_executor/mod.rs b/core/node/state_keeper/src/batch_executor/mod.rs index b6f57694afa..235a8f581c8 100644 --- a/core/node/state_keeper/src/batch_executor/mod.rs +++ b/core/node/state_keeper/src/batch_executor/mod.rs @@ -6,12 +6,11 @@ use tokio::{ task::JoinHandle, }; use zksync_multivm::interface::{ - storage::StorageViewCache, FinishedL1Batch, Halt, L1BatchEnv, L2BlockEnv, SystemEnv, - VmExecutionResultAndLogs, + storage::StorageViewCache, Call, CompressedBytecodeInfo, FinishedL1Batch, Halt, L1BatchEnv, + L2BlockEnv, SystemEnv, VmExecutionResultAndLogs, }; use zksync_state::OwnedStorage; -use zksync_types::{vm_trace::Call, Transaction}; -use zksync_utils::bytecode::CompressedBytecodeInfo; +use zksync_types::Transaction; use crate::{ metrics::{ExecutorCommand, EXECUTOR_METRICS}, @@ -55,10 +54,12 @@ impl TxExecutionResult { /// An abstraction that allows us to create different kinds of batch executors. /// The only requirement is to return a [`BatchExecutorHandle`], which does its work /// by communicating with the externally initialized thread. -pub trait BatchExecutor: 'static + Send + Sync + fmt::Debug { +/// +/// This type is generic over the storage type accepted to create the VM instance, mostly for testing purposes. +pub trait BatchExecutor: 'static + Send + Sync + fmt::Debug { fn init_batch( &mut self, - storage: OwnedStorage, + storage: S, l1_batch_params: L1BatchEnv, system_env: SystemEnv, ) -> BatchExecutorHandle; diff --git a/core/node/state_keeper/src/batch_executor/tests/read_storage_factory.rs b/core/node/state_keeper/src/batch_executor/tests/read_storage_factory.rs index 838b9240767..e0096cd0417 100644 --- a/core/node/state_keeper/src/batch_executor/tests/read_storage_factory.rs +++ b/core/node/state_keeper/src/batch_executor/tests/read_storage_factory.rs @@ -2,7 +2,7 @@ use anyhow::Context; use async_trait::async_trait; use tokio::sync::watch; use zksync_dal::{ConnectionPool, Core}; -use zksync_state::{OwnedStorage, PgOrRocksdbStorage, ReadStorageFactory, RocksdbStorage}; +use zksync_state::{OwnedStorage, ReadStorageFactory, RocksdbStorage}; use zksync_types::L1BatchNumber; #[derive(Debug, Clone)] @@ -33,7 +33,7 @@ impl ReadStorageFactory for RocksdbStorageFactory { else { return Ok(None); }; - Ok(Some(PgOrRocksdbStorage::Rocksdb(rocksdb_storage).into())) + Ok(Some(OwnedStorage::Rocksdb(rocksdb_storage))) } } diff --git a/core/node/state_keeper/src/batch_executor/tests/tester.rs b/core/node/state_keeper/src/batch_executor/tests/tester.rs index 6730d427c67..e70c8b06fe0 100644 --- a/core/node/state_keeper/src/batch_executor/tests/tester.rs +++ b/core/node/state_keeper/src/batch_executor/tests/tester.rs @@ -10,6 +10,7 @@ use zksync_contracts::{get_loadnext_contract, test_contracts::LoadnextContractEx use zksync_dal::{ConnectionPool, Core, CoreDal}; use zksync_multivm::{ interface::{L1BatchEnv, L2BlockEnv, SystemEnv}, + utils::StorageWritesDeduplicator, vm_latest::constants::INITIAL_STORAGE_WRITE_PUBDATA_BYTES, }; use zksync_node_genesis::{create_genesis_l1_batch, GenesisParams}; @@ -21,7 +22,6 @@ use zksync_types::{ ethabi::Token, protocol_version::ProtocolSemanticVersion, snapshots::{SnapshotRecoveryStatus, SnapshotStorageLog}, - storage_writes_deduplicator::StorageWritesDeduplicator, system_contracts::get_system_smart_contracts, utils::storage_key_for_standard_token_balance, vm::FastVmMode, diff --git a/core/node/state_keeper/src/io/common/tests.rs b/core/node/state_keeper/src/io/common/tests.rs index f3b3f6e0fb4..4d2907e8291 100644 --- a/core/node/state_keeper/src/io/common/tests.rs +++ b/core/node/state_keeper/src/io/common/tests.rs @@ -9,14 +9,15 @@ use futures::FutureExt; use zksync_config::GenesisConfig; use zksync_contracts::BaseSystemContractsHashes; use zksync_dal::{ConnectionPool, Core}; +use zksync_multivm::interface::TransactionExecutionMetrics; use zksync_node_genesis::{insert_genesis_batch, mock_genesis_config, GenesisParams}; use zksync_node_test_utils::{ create_l1_batch, create_l2_block, create_l2_transaction, execute_l2_transaction, prepare_recovery_snapshot, }; use zksync_types::{ - block::L2BlockHasher, fee::TransactionExecutionMetrics, - protocol_version::ProtocolSemanticVersion, L2ChainId, ProtocolVersion, ProtocolVersionId, + block::L2BlockHasher, protocol_version::ProtocolSemanticVersion, L2ChainId, ProtocolVersion, + ProtocolVersionId, }; use zksync_vm_utils::storage::L1BatchParamsProvider; diff --git a/core/node/state_keeper/src/io/persistence.rs b/core/node/state_keeper/src/io/persistence.rs index de9ac22e177..4dfb7400ffc 100644 --- a/core/node/state_keeper/src/io/persistence.rs +++ b/core/node/state_keeper/src/io/persistence.rs @@ -352,11 +352,11 @@ mod tests { use assert_matches::assert_matches; use futures::FutureExt; use zksync_dal::CoreDal; - use zksync_multivm::zk_evm_latest::ethereum_types::{H256, U256}; + use zksync_multivm::interface::VmExecutionMetrics; use zksync_node_genesis::{insert_genesis_batch, GenesisParams}; use zksync_types::{ - api::TransactionStatus, block::BlockGasCount, tx::ExecutionMetrics, - writes::StateDiffRecord, L1BatchNumber, L2BlockNumber, StorageLogKind, + api::TransactionStatus, block::BlockGasCount, writes::StateDiffRecord, L1BatchNumber, + L2BlockNumber, StorageLogKind, H256, U256, }; use zksync_utils::h256_to_u256; @@ -464,7 +464,7 @@ mod tests { tx_result, vec![], BlockGasCount::default(), - ExecutionMetrics::default(), + VmExecutionMetrics::default(), vec![], ); output_handler.handle_l2_block(&updates).await.unwrap(); diff --git a/core/node/state_keeper/src/io/seal_logic/l2_block_seal_subtasks.rs b/core/node/state_keeper/src/io/seal_logic/l2_block_seal_subtasks.rs index 03495c0d98b..7ef466805e3 100644 --- a/core/node/state_keeper/src/io/seal_logic/l2_block_seal_subtasks.rs +++ b/core/node/state_keeper/src/io/seal_logic/l2_block_seal_subtasks.rs @@ -1,7 +1,15 @@ use anyhow::Context; use async_trait::async_trait; +use once_cell::sync::Lazy; use zksync_dal::{Connection, Core, CoreDal}; -use zksync_types::{event::extract_added_tokens, L2BlockNumber}; +use zksync_multivm::interface::VmEvent; +use zksync_system_constants::CONTRACT_DEPLOYER_ADDRESS; +use zksync_types::{ + ethabi, + tokens::{TokenInfo, TokenMetadata}, + Address, L2BlockNumber, H256, +}; +use zksync_utils::h256_to_account_address; use crate::{ io::seal_logic::SealStrategy, @@ -9,6 +17,87 @@ use crate::{ updates::L2BlockSealCommand, }; +fn extract_added_tokens( + l2_shared_bridge_addr: Address, + all_generated_events: &[VmEvent], +) -> Vec { + let deployed_tokens = all_generated_events + .iter() + .filter(|event| { + // Filter events from the deployer contract that match the expected signature. + event.address == CONTRACT_DEPLOYER_ADDRESS + && event.indexed_topics.len() == 4 + && event.indexed_topics[0] == VmEvent::DEPLOY_EVENT_SIGNATURE + && h256_to_account_address(&event.indexed_topics[1]) == l2_shared_bridge_addr + }) + .map(|event| h256_to_account_address(&event.indexed_topics[3])); + + extract_added_token_info_from_addresses(all_generated_events, deployed_tokens) +} + +fn extract_added_token_info_from_addresses( + all_generated_events: &[VmEvent], + deployed_tokens: impl Iterator, +) -> Vec { + static BRIDGE_INITIALIZATION_SIGNATURE_OLD: Lazy = Lazy::new(|| { + ethabi::long_signature( + "BridgeInitialization", + &[ + ethabi::ParamType::Address, + ethabi::ParamType::String, + ethabi::ParamType::String, + ethabi::ParamType::Uint(8), + ], + ) + }); + + static BRIDGE_INITIALIZATION_SIGNATURE_NEW: Lazy = Lazy::new(|| { + ethabi::long_signature( + "BridgeInitialize", + &[ + ethabi::ParamType::Address, + ethabi::ParamType::String, + ethabi::ParamType::String, + ethabi::ParamType::Uint(8), + ], + ) + }); + + deployed_tokens + .filter_map(|l2_token_address| { + all_generated_events + .iter() + .find(|event| { + event.address == l2_token_address + && (event.indexed_topics[0] == *BRIDGE_INITIALIZATION_SIGNATURE_NEW + || event.indexed_topics[0] == *BRIDGE_INITIALIZATION_SIGNATURE_OLD) + }) + .map(|event| { + let l1_token_address = h256_to_account_address(&event.indexed_topics[1]); + let mut dec_ev = ethabi::decode( + &[ + ethabi::ParamType::String, + ethabi::ParamType::String, + ethabi::ParamType::Uint(8), + ], + &event.value, + ) + .unwrap(); + + TokenInfo { + l1_address: l1_token_address, + l2_address: l2_token_address, + metadata: TokenMetadata { + name: dec_ev.remove(0).into_string().unwrap(), + symbol: dec_ev.remove(0).into_string().unwrap(), + decimals: dec_ev.remove(0).into_uint().unwrap().as_u32() as u8, + }, + } + }) + }) + .collect() +} + /// Helper struct that encapsulates parallel l2 block sealing logic. #[derive(Debug)] pub struct L2BlockSealProcess; @@ -367,6 +456,7 @@ impl L2BlockSealSubtask for InsertL2ToL1LogsSubtask { mod tests { use zksync_dal::{ConnectionPool, Core}; use zksync_multivm::{ + interface::{TransactionExecutionResult, TxExecutionStatus}, utils::{get_max_batch_gas_limit, get_max_gas_per_pubdata_byte}, zk_evm_latest::ethereum_types::H256, VmVersion, @@ -375,9 +465,8 @@ mod tests { use zksync_types::{ block::L2BlockHeader, l2_to_l1_log::{L2ToL1Log, UserL2ToL1Log}, - tx::{tx_execution_info::TxExecutionStatus, TransactionExecutionResult}, AccountTreeId, Address, L1BatchNumber, ProtocolVersionId, StorageKey, StorageLog, - StorageLogKind, StorageLogWithPreviousValue, VmEvent, + StorageLogKind, StorageLogWithPreviousValue, }; use zksync_utils::h256_to_u256; @@ -526,6 +615,7 @@ mod tests { gas_per_pubdata_limit: get_max_gas_per_pubdata_byte(VmVersion::latest()), virtual_blocks: l2_block_seal_command.l2_block.virtual_blocks, gas_limit: get_max_batch_gas_limit(VmVersion::latest()), + logs_bloom: Default::default(), }; connection .protocol_versions_dal() diff --git a/core/node/state_keeper/src/io/seal_logic/mod.rs b/core/node/state_keeper/src/io/seal_logic/mod.rs index 92630015f2a..0dae7fae908 100644 --- a/core/node/state_keeper/src/io/seal_logic/mod.rs +++ b/core/node/state_keeper/src/io/seal_logic/mod.rs @@ -9,21 +9,22 @@ use std::{ use anyhow::Context as _; use itertools::Itertools; use zksync_dal::{Connection, ConnectionPool, Core, CoreDal}; -use zksync_multivm::utils::{get_max_batch_gas_limit, get_max_gas_per_pubdata_byte}; +use zksync_multivm::{ + interface::{DeduplicatedWritesMetrics, TransactionExecutionResult, VmEvent}, + utils::{ + get_max_batch_gas_limit, get_max_gas_per_pubdata_byte, ModifiedSlot, + StorageWritesDeduplicator, + }, +}; use zksync_shared_metrics::{BlockStage, L2BlockStage, APP_METRICS}; use zksync_types::{ - block::{L1BatchHeader, L2BlockHeader}, - event::extract_long_l2_to_l1_messages, + block::{build_bloom, L1BatchHeader, L2BlockHeader}, helpers::unix_timestamp_ms, l2_to_l1_log::UserL2ToL1Log, - storage_writes_deduplicator::{ModifiedSlot, StorageWritesDeduplicator}, - tx::{ - tx_execution_info::DeduplicatedWritesMetrics, IncludedTxLocation, - TransactionExecutionResult, - }, + tx::IncludedTxLocation, utils::display_timestamp, - Address, ExecuteTransactionCommon, ProtocolVersionId, StorageKey, StorageLog, Transaction, - VmEvent, H256, + Address, BloomInput, ExecuteTransactionCommon, ProtocolVersionId, StorageKey, StorageLog, + Transaction, H256, }; use zksync_utils::u256_to_h256; @@ -110,7 +111,7 @@ impl UpdatesManager { let progress = L1_BATCH_METRICS.start(L1BatchSealStage::InsertL1BatchHeader); let l2_to_l1_messages = - extract_long_l2_to_l1_messages(&finished_batch.final_execution_state.events); + VmEvent::extract_long_l2_to_l1_messages(&finished_batch.final_execution_state.events); let l1_batch = L1BatchHeader { number: self.l1_batch.number, timestamp: self.batch_timestamp(), @@ -358,6 +359,17 @@ impl L2BlockSealCommand { // Run sub-tasks in parallel. L2BlockSealProcess::run_subtasks(self, strategy).await?; + let progress = L2_BLOCK_METRICS.start(L2BlockSealStage::CalculateLogsBloom, is_fictive); + let iter = self.l2_block.events.iter().flat_map(|event| { + event + .indexed_topics + .iter() + .map(|topic| BloomInput::Raw(topic.as_bytes())) + .chain([BloomInput::Raw(event.address.as_bytes())]) + }); + let logs_bloom = build_bloom(iter); + progress.observe(Some(self.l2_block.events.len())); + // Seal block header at the last step. let progress = L2_BLOCK_METRICS.start(L2BlockSealStage::InsertL2BlockHeader, is_fictive); let definite_vm_version = self @@ -379,6 +391,7 @@ impl L2BlockSealCommand { gas_per_pubdata_limit: get_max_gas_per_pubdata_byte(definite_vm_version), virtual_blocks: self.l2_block.virtual_blocks, gas_limit: get_max_batch_gas_limit(definite_vm_version), + logs_bloom, }; let mut connection = strategy.connection().await?; diff --git a/core/node/state_keeper/src/io/tests/mod.rs b/core/node/state_keeper/src/io/tests/mod.rs index 7c70607c763..7ea01e6af1e 100644 --- a/core/node/state_keeper/src/io/tests/mod.rs +++ b/core/node/state_keeper/src/io/tests/mod.rs @@ -4,16 +4,17 @@ use test_casing::test_casing; use zksync_contracts::BaseSystemContractsHashes; use zksync_dal::{ConnectionPool, Core, CoreDal}; use zksync_mempool::L2TxFilter; -use zksync_multivm::utils::derive_base_fee_and_gas_per_pubdata; +use zksync_multivm::{ + interface::{TransactionExecutionMetrics, VmEvent, VmExecutionMetrics}, + utils::derive_base_fee_and_gas_per_pubdata, +}; use zksync_node_test_utils::prepare_recovery_snapshot; use zksync_types::{ block::{BlockGasCount, L2BlockHasher}, commitment::L1BatchCommitmentMode, - fee::TransactionExecutionMetrics, fee_model::{BatchFeeInput, PubdataIndependentBatchFeeModelInput}, - tx::ExecutionMetrics, AccountTreeId, Address, L1BatchNumber, L2BlockNumber, L2ChainId, ProtocolVersion, - ProtocolVersionId, StorageKey, VmEvent, H256, U256, + ProtocolVersionId, StorageKey, H256, U256, }; use zksync_utils::time::seconds_since_epoch; @@ -246,7 +247,7 @@ async fn processing_storage_logs_when_sealing_l2_block() { tx, execution_result, BlockGasCount::default(), - ExecutionMetrics::default(), + VmExecutionMetrics::default(), vec![], vec![], ); @@ -264,7 +265,7 @@ async fn processing_storage_logs_when_sealing_l2_block() { tx, execution_result, BlockGasCount::default(), - ExecutionMetrics::default(), + VmExecutionMetrics::default(), vec![], vec![], ); @@ -353,7 +354,7 @@ async fn processing_events_when_sealing_l2_block() { tx, execution_result, BlockGasCount::default(), - ExecutionMetrics::default(), + VmExecutionMetrics::default(), vec![], vec![], ); @@ -457,7 +458,7 @@ async fn l2_block_processing_after_snapshot_recovery(commitment_mode: L1BatchCom create_execution_result([]), vec![], BlockGasCount::default(), - ExecutionMetrics::default(), + VmExecutionMetrics::default(), vec![], ); diff --git a/core/node/state_keeper/src/io/tests/tester.rs b/core/node/state_keeper/src/io/tests/tester.rs index dc5e5f345d5..2dc45a5eaaa 100644 --- a/core/node/state_keeper/src/io/tests/tester.rs +++ b/core/node/state_keeper/src/io/tests/tester.rs @@ -10,7 +10,10 @@ use zksync_config::{ use zksync_contracts::BaseSystemContracts; use zksync_dal::{ConnectionPool, Core, CoreDal}; use zksync_eth_client::{clients::MockSettlementLayer, BaseFees}; -use zksync_multivm::vm_latest::constants::BATCH_COMPUTATIONAL_GAS_LIMIT; +use zksync_multivm::{ + interface::{TransactionExecutionMetrics, TransactionExecutionResult}, + vm_latest::constants::BATCH_COMPUTATIONAL_GAS_LIMIT, +}; use zksync_node_fee_model::{ l1_gas_price::{GasAdjuster, GasAdjusterClient}, MainNodeFeeInputProvider, @@ -22,12 +25,10 @@ use zksync_node_test_utils::{ use zksync_types::{ block::L2BlockHeader, commitment::L1BatchCommitmentMode, - fee::TransactionExecutionMetrics, fee_model::{BatchFeeInput, FeeModelConfig, FeeModelConfigV1}, l2::L2Tx, protocol_version::{L1VerifierConfig, ProtocolSemanticVersion}, system_contracts::get_system_smart_contracts, - tx::TransactionExecutionResult, L2BlockNumber, L2ChainId, PriorityOpId, ProtocolVersionId, H256, }; diff --git a/core/node/state_keeper/src/keeper.rs b/core/node/state_keeper/src/keeper.rs index 934ed9493f8..a610194ab9c 100644 --- a/core/node/state_keeper/src/keeper.rs +++ b/core/node/state_keeper/src/keeper.rs @@ -1,18 +1,22 @@ use std::{ convert::Infallible, + fmt, sync::Arc, time::{Duration, Instant}, }; use anyhow::Context as _; +use async_trait::async_trait; use tokio::sync::watch; use tracing::{info_span, Instrument}; -use zksync_multivm::interface::{Halt, L1BatchEnv, SystemEnv}; +use zksync_multivm::{ + interface::{Halt, L1BatchEnv, SystemEnv}, + utils::StorageWritesDeduplicator, +}; use zksync_state::ReadStorageFactory; use zksync_types::{ block::L2BlockExecutionData, l2::TransactionType, protocol_upgrade::ProtocolUpgradeTx, - protocol_version::ProtocolVersionId, storage_writes_deduplicator::StorageWritesDeduplicator, - utils::display_timestamp, L1BatchNumber, Transaction, + protocol_version::ProtocolVersionId, utils::display_timestamp, L1BatchNumber, Transaction, }; use super::{ @@ -48,6 +52,45 @@ impl Error { } } +/// Functionality [`BatchExecutor`] + [`ReadStorageFactory`] with an erased storage type. This allows to keep +/// [`ZkSyncStateKeeper`] not parameterized by the storage type, simplifying its dependency injection and usage in tests. +#[async_trait] +trait ErasedBatchExecutor: fmt::Debug + Send { + async fn init_batch( + &mut self, + l1_batch_env: L1BatchEnv, + system_env: SystemEnv, + stop_receiver: &watch::Receiver, + ) -> Result; +} + +/// The only [`ErasedBatchExecutor`] implementation. +#[derive(Debug)] +struct ErasedBatchExecutorImpl { + batch_executor: Box>, + storage_factory: Arc>, +} + +#[async_trait] +impl ErasedBatchExecutor for ErasedBatchExecutorImpl { + async fn init_batch( + &mut self, + l1_batch_env: L1BatchEnv, + system_env: SystemEnv, + stop_receiver: &watch::Receiver, + ) -> Result { + let storage = self + .storage_factory + .access_storage(stop_receiver, l1_batch_env.number - 1) + .await + .context("failed creating VM storage")? + .ok_or(Error::Canceled)?; + Ok(self + .batch_executor + .init_batch(storage, l1_batch_env, system_env)) + } +} + /// State keeper represents a logic layer of L1 batch / L2 block processing flow. /// It's responsible for taking all the data from the `StateKeeperIO`, feeding it into `BatchExecutor` objects /// and calling `SealManager` to decide whether an L2 block or L1 batch should be sealed. @@ -62,27 +105,28 @@ pub struct ZkSyncStateKeeper { stop_receiver: watch::Receiver, io: Box, output_handler: OutputHandler, - batch_executor_base: Box, + batch_executor: Box, sealer: Arc, - storage_factory: Arc, } impl ZkSyncStateKeeper { - pub fn new( + pub fn new( stop_receiver: watch::Receiver, sequencer: Box, - batch_executor_base: Box, + batch_executor: Box>, output_handler: OutputHandler, sealer: Arc, - storage_factory: Arc, + storage_factory: Arc>, ) -> Self { Self { stop_receiver, io: sequencer, - batch_executor_base, + batch_executor: Box::new(ErasedBatchExecutorImpl { + batch_executor, + storage_factory, + }), output_handler, sealer, - storage_factory, } } @@ -146,7 +190,12 @@ impl ZkSyncStateKeeper { .await?; let mut batch_executor = self - .create_batch_executor(l1_batch_env.clone(), system_env.clone()) + .batch_executor + .init_batch( + l1_batch_env.clone(), + system_env.clone(), + &self.stop_receiver, + ) .await?; self.restore_state(&mut batch_executor, &mut updates_manager, pending_l2_blocks) .await?; @@ -195,7 +244,12 @@ impl ZkSyncStateKeeper { (system_env, l1_batch_env) = self.wait_for_new_batch_env(&next_cursor).await?; updates_manager = UpdatesManager::new(&l1_batch_env, &system_env); batch_executor = self - .create_batch_executor(l1_batch_env.clone(), system_env.clone()) + .batch_executor + .init_batch( + l1_batch_env.clone(), + system_env.clone(), + &self.stop_receiver, + ) .await?; let version_changed = system_env.version != sealed_batch_protocol_version; @@ -208,24 +262,6 @@ impl ZkSyncStateKeeper { Err(Error::Canceled) } - async fn create_batch_executor( - &mut self, - l1_batch_env: L1BatchEnv, - system_env: SystemEnv, - ) -> Result { - let Some(storage) = self - .storage_factory - .access_storage(&self.stop_receiver, l1_batch_env.number - 1) - .await - .context("failed creating VM storage")? - else { - return Err(Error::Canceled); - }; - Ok(self - .batch_executor_base - .init_batch(storage, l1_batch_env, system_env)) - } - /// This function is meant to be called only once during the state-keeper initialization. /// It will check if we should load a protocol upgrade or a `setChainId` transaction, /// perform some checks and return it. diff --git a/core/node/state_keeper/src/mempool_actor.rs b/core/node/state_keeper/src/mempool_actor.rs index 5003d75b669..dbe1e4cb977 100644 --- a/core/node/state_keeper/src/mempool_actor.rs +++ b/core/node/state_keeper/src/mempool_actor.rs @@ -158,13 +158,11 @@ async fn get_transaction_nonces( #[cfg(test)] mod tests { + use zksync_multivm::interface::TransactionExecutionMetrics; use zksync_node_fee_model::MockBatchFeeParamsProvider; use zksync_node_genesis::{insert_genesis_batch, GenesisParams}; use zksync_node_test_utils::create_l2_transaction; - use zksync_types::{ - fee::TransactionExecutionMetrics, L2BlockNumber, PriorityOpId, ProtocolVersionId, - StorageLog, H256, - }; + use zksync_types::{L2BlockNumber, PriorityOpId, ProtocolVersionId, StorageLog, H256}; use zksync_utils::u256_to_h256; use super::*; diff --git a/core/node/state_keeper/src/metrics.rs b/core/node/state_keeper/src/metrics.rs index 0f9650881b2..1bf314d1b91 100644 --- a/core/node/state_keeper/src/metrics.rs +++ b/core/node/state_keeper/src/metrics.rs @@ -10,9 +10,11 @@ use vise::{ Metrics, }; use zksync_mempool::MempoolStore; -use zksync_multivm::interface::{VmExecutionResultAndLogs, VmRevertReason}; +use zksync_multivm::interface::{ + DeduplicatedWritesMetrics, VmExecutionResultAndLogs, VmRevertReason, +}; use zksync_shared_metrics::InteractionType; -use zksync_types::{tx::tx_execution_info::DeduplicatedWritesMetrics, ProtocolVersionId}; +use zksync_types::ProtocolVersionId; use super::seal_criteria::SealResolution; @@ -346,6 +348,7 @@ pub(super) enum L2BlockSealStage { ExtractL2ToL1Logs, InsertL2ToL1Logs, ReportTxMetrics, + CalculateLogsBloom, } #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, EncodeLabelSet)] diff --git a/core/node/state_keeper/src/seal_criteria/criteria/geometry_seal_criteria.rs b/core/node/state_keeper/src/seal_criteria/criteria/geometry_seal_criteria.rs index 264618f5d13..1f3e8d104ce 100644 --- a/core/node/state_keeper/src/seal_criteria/criteria/geometry_seal_criteria.rs +++ b/core/node/state_keeper/src/seal_criteria/criteria/geometry_seal_criteria.rs @@ -69,7 +69,7 @@ impl SealCriterion for CircuitsCriterion { } #[cfg(test)] mod tests { - use zksync_types::{circuit::CircuitStatistic, tx::ExecutionMetrics}; + use zksync_multivm::interface::{CircuitStatistic, VmExecutionMetrics}; use super::*; @@ -85,7 +85,7 @@ mod tests { } fn test_no_seal_block_resolution( - block_execution_metrics: ExecutionMetrics, + block_execution_metrics: VmExecutionMetrics, criterion: &dyn SealCriterion, protocol_version: ProtocolVersionId, ) { @@ -105,7 +105,7 @@ mod tests { } fn test_include_and_seal_block_resolution( - block_execution_metrics: ExecutionMetrics, + block_execution_metrics: VmExecutionMetrics, criterion: &dyn SealCriterion, protocol_version: ProtocolVersionId, ) { @@ -125,7 +125,7 @@ mod tests { } fn test_exclude_and_seal_block_resolution( - block_execution_metrics: ExecutionMetrics, + block_execution_metrics: VmExecutionMetrics, criterion: &dyn SealCriterion, protocol_version: ProtocolVersionId, ) { @@ -145,7 +145,7 @@ mod tests { } fn test_unexecutable_tx_resolution( - tx_execution_metrics: ExecutionMetrics, + tx_execution_metrics: VmExecutionMetrics, criterion: &dyn SealCriterion, protocol_version: ProtocolVersionId, ) { @@ -169,12 +169,12 @@ mod tests { fn circuits_seal_criterion() { let config = get_config(); let protocol_version = ProtocolVersionId::latest(); - let block_execution_metrics = ExecutionMetrics { + let block_execution_metrics = VmExecutionMetrics { circuit_statistic: CircuitStatistic { main_vm: (MAX_CIRCUITS_PER_BATCH / 4) as f32, ..CircuitStatistic::default() }, - ..ExecutionMetrics::default() + ..VmExecutionMetrics::default() }; test_no_seal_block_resolution( block_execution_metrics, @@ -182,7 +182,7 @@ mod tests { protocol_version, ); - let block_execution_metrics = ExecutionMetrics { + let block_execution_metrics = VmExecutionMetrics { circuit_statistic: CircuitStatistic { main_vm: (MAX_CIRCUITS_PER_BATCH - 1 @@ -191,7 +191,7 @@ mod tests { )) as f32, ..CircuitStatistic::default() }, - ..ExecutionMetrics::default() + ..VmExecutionMetrics::default() }; test_include_and_seal_block_resolution( @@ -200,12 +200,12 @@ mod tests { protocol_version, ); - let block_execution_metrics = ExecutionMetrics { + let block_execution_metrics = VmExecutionMetrics { circuit_statistic: CircuitStatistic { main_vm: MAX_CIRCUITS_PER_BATCH as f32, ..CircuitStatistic::default() }, - ..ExecutionMetrics::default() + ..VmExecutionMetrics::default() }; test_exclude_and_seal_block_resolution( @@ -214,14 +214,14 @@ mod tests { protocol_version, ); - let tx_execution_metrics = ExecutionMetrics { + let tx_execution_metrics = VmExecutionMetrics { circuit_statistic: CircuitStatistic { main_vm: MAX_CIRCUITS_PER_BATCH as f32 * config.reject_tx_at_geometry_percentage as f32 + 1.0, ..CircuitStatistic::default() }, - ..ExecutionMetrics::default() + ..VmExecutionMetrics::default() }; test_unexecutable_tx_resolution(tx_execution_metrics, &CircuitsCriterion, protocol_version); diff --git a/core/node/state_keeper/src/seal_criteria/criteria/pubdata_bytes.rs b/core/node/state_keeper/src/seal_criteria/criteria/pubdata_bytes.rs index f575a905891..09fcf2f0fc1 100644 --- a/core/node/state_keeper/src/seal_criteria/criteria/pubdata_bytes.rs +++ b/core/node/state_keeper/src/seal_criteria/criteria/pubdata_bytes.rs @@ -66,7 +66,7 @@ impl SealCriterion for PubDataBytesCriterion { #[cfg(test)] mod tests { - use zksync_types::tx::ExecutionMetrics; + use zksync_multivm::interface::VmExecutionMetrics; use super::*; @@ -84,7 +84,7 @@ mod tests { max_pubdata_per_batch: 100000, }; - let block_execution_metrics = ExecutionMetrics { + let block_execution_metrics = VmExecutionMetrics { l2_l1_long_messages: (config.max_pubdata_per_batch as f64 * config.close_block_at_eth_params_percentage - 1.0 @@ -92,7 +92,7 @@ mod tests { ProtocolVersionId::latest().into(), ) as f64) .round() as usize, - ..ExecutionMetrics::default() + ..VmExecutionMetrics::default() }; let empty_block_resolution = criterion.should_seal( @@ -108,12 +108,12 @@ mod tests { ); assert_eq!(empty_block_resolution, SealResolution::NoSeal); - let block_execution_metrics = ExecutionMetrics { + let block_execution_metrics = VmExecutionMetrics { l2_l1_long_messages: (config.max_pubdata_per_batch as f64 * config.close_block_at_eth_params_percentage + 1f64) .round() as usize, - ..ExecutionMetrics::default() + ..VmExecutionMetrics::default() }; let full_block_resolution = criterion.should_seal( @@ -129,9 +129,9 @@ mod tests { ); assert_eq!(full_block_resolution, SealResolution::IncludeAndSeal); - let block_execution_metrics = ExecutionMetrics { + let block_execution_metrics = VmExecutionMetrics { l2_l1_long_messages: config.max_pubdata_per_batch as usize + 1, - ..ExecutionMetrics::default() + ..VmExecutionMetrics::default() }; let full_block_resolution = criterion.should_seal( &config, diff --git a/core/node/state_keeper/src/seal_criteria/mod.rs b/core/node/state_keeper/src/seal_criteria/mod.rs index 01be129dde6..e3fe849e802 100644 --- a/core/node/state_keeper/src/seal_criteria/mod.rs +++ b/core/node/state_keeper/src/seal_criteria/mod.rs @@ -13,13 +13,12 @@ use std::fmt; use zksync_config::configs::chain::StateKeeperConfig; -use zksync_multivm::{interface::Halt, vm_latest::TransactionVmExt}; +use zksync_multivm::{ + interface::{DeduplicatedWritesMetrics, Halt, TransactionExecutionMetrics, VmExecutionMetrics}, + vm_latest::TransactionVmExt, +}; use zksync_types::{ - block::BlockGasCount, - fee::TransactionExecutionMetrics, - tx::tx_execution_info::{DeduplicatedWritesMetrics, ExecutionMetrics}, - utils::display_timestamp, - ProtocolVersionId, Transaction, + block::BlockGasCount, utils::display_timestamp, ProtocolVersionId, Transaction, }; use zksync_utils::time::millis_since; @@ -159,7 +158,7 @@ impl SealResolution { /// to the entire L2 block / L1 batch. #[derive(Debug, Default)] pub struct SealData { - pub(super) execution_metrics: ExecutionMetrics, + pub(super) execution_metrics: VmExecutionMetrics, pub(super) gas_count: BlockGasCount, pub(super) cumulative_size: usize, pub(super) writes_metrics: DeduplicatedWritesMetrics, @@ -174,7 +173,7 @@ impl SealData { tx_metrics: &TransactionExecutionMetrics, protocol_version: ProtocolVersionId, ) -> Self { - let execution_metrics = ExecutionMetrics::from_tx_metrics(tx_metrics); + let execution_metrics = VmExecutionMetrics::from_tx_metrics(tx_metrics); let writes_metrics = DeduplicatedWritesMetrics::from_tx_metrics(tx_metrics); let gas_count = gas_count_from_tx_and_metrics(transaction, &execution_metrics) + gas_count_from_writes(&writes_metrics, protocol_version); @@ -289,7 +288,7 @@ mod tests { create_execution_result([]), vec![], BlockGasCount::default(), - ExecutionMetrics::default(), + VmExecutionMetrics::default(), vec![], ); } diff --git a/core/node/state_keeper/src/state_keeper_storage.rs b/core/node/state_keeper/src/state_keeper_storage.rs index fbda064b5d7..1b35f8ef73d 100644 --- a/core/node/state_keeper/src/state_keeper_storage.rs +++ b/core/node/state_keeper/src/state_keeper_storage.rs @@ -5,8 +5,7 @@ use async_trait::async_trait; use tokio::sync::watch; use zksync_dal::{ConnectionPool, Core}; use zksync_state::{ - AsyncCatchupTask, OwnedPostgresStorage, OwnedStorage, PgOrRocksdbStorage, ReadStorageFactory, - RocksdbCell, RocksdbStorageOptions, + AsyncCatchupTask, OwnedStorage, ReadStorageFactory, RocksdbCell, RocksdbStorageOptions, }; use zksync_types::L1BatchNumber; @@ -58,24 +57,20 @@ impl ReadStorageFactory for AsyncRocksdbCache { self.rocksdb_cell.get() }; - if let Some(rocksdb) = rocksdb { - let mut connection = self - .pool - .connection_tagged("state_keeper") - .await - .context("Failed getting a Postgres connection")?; - let storage = PgOrRocksdbStorage::rocksdb( - &mut connection, - rocksdb, - stop_receiver, - l1_batch_number, - ) + let mut connection = self + .pool + .connection_tagged("state_keeper") .await - .context("Failed accessing RocksDB storage")?; - Ok(storage.map(Into::into)) + .context("Failed getting a Postgres connection")?; + if let Some(rocksdb) = rocksdb { + let storage = + OwnedStorage::rocksdb(&mut connection, rocksdb, stop_receiver, l1_batch_number) + .await + .context("Failed accessing RocksDB storage")?; + Ok(storage) } else { Ok(Some( - OwnedPostgresStorage::new(self.pool.clone(), l1_batch_number).into(), + OwnedStorage::postgres(connection, l1_batch_number).await?, )) } } diff --git a/core/node/state_keeper/src/testonly/mod.rs b/core/node/state_keeper/src/testonly/mod.rs index 02b0043b97c..d17261a3a0f 100644 --- a/core/node/state_keeper/src/testonly/mod.rs +++ b/core/node/state_keeper/src/testonly/mod.rs @@ -9,7 +9,6 @@ use zksync_multivm::interface::{ storage::StorageViewCache, CurrentExecutionState, ExecutionResult, FinishedL1Batch, L1BatchEnv, Refunds, SystemEnv, VmExecutionLogs, VmExecutionResultAndLogs, VmExecutionStatistics, }; -use zksync_state::OwnedStorage; use zksync_test_account::Account; use zksync_types::{ fee::Fee, utils::storage_key_for_standard_token_balance, AccountTreeId, Address, Execute, @@ -78,10 +77,10 @@ pub(crate) fn storage_view_cache() -> StorageViewCache { #[derive(Debug)] pub struct MockBatchExecutor; -impl BatchExecutor for MockBatchExecutor { +impl BatchExecutor<()> for MockBatchExecutor { fn init_batch( &mut self, - _storage: OwnedStorage, + _storage: (), _l1batch_params: L1BatchEnv, _system_env: SystemEnv, ) -> BatchExecutorHandle { diff --git a/core/node/state_keeper/src/testonly/test_batch_executor.rs b/core/node/state_keeper/src/testonly/test_batch_executor.rs index aefc8d50bc7..d8ee36990a1 100644 --- a/core/node/state_keeper/src/testonly/test_batch_executor.rs +++ b/core/node/state_keeper/src/testonly/test_batch_executor.rs @@ -20,7 +20,7 @@ use zksync_multivm::{ vm_latest::constants::BATCH_COMPUTATIONAL_GAS_LIMIT, }; use zksync_node_test_utils::create_l2_transaction; -use zksync_state::{OwnedStorage, PgOrRocksdbStorage, ReadStorageFactory, RocksdbStorage}; +use zksync_state::ReadStorageFactory; use zksync_types::{ fee_model::BatchFeeInput, protocol_upgrade::ProtocolUpgradeTx, Address, L1BatchNumber, L2BlockNumber, L2ChainId, ProtocolVersionId, Transaction, H256, @@ -207,7 +207,7 @@ impl TestScenario { Box::new(batch_executor_base), output_handler, Arc::new(sealer), - Arc::::default(), + Arc::new(MockReadStorageFactory), ); let sk_thread = tokio::spawn(state_keeper.run()); @@ -410,10 +410,10 @@ impl TestBatchExecutorBuilder { } } -impl BatchExecutor for TestBatchExecutorBuilder { +impl BatchExecutor<()> for TestBatchExecutorBuilder { fn init_batch( &mut self, - _storage: OwnedStorage, + _storage: (), _l1_batch_params: L1BatchEnv, _system_env: SystemEnv, ) -> BatchExecutorHandle { @@ -806,28 +806,15 @@ impl StateKeeperIO for TestIO { /// Storage factory that produces empty VM storage for any batch. Should only be used with a mock batch executor /// that doesn't read from the storage. Prefer using `ConnectionPool` as a factory if it's available. #[derive(Debug)] -pub struct MockReadStorageFactory(tempfile::TempDir); - -impl Default for MockReadStorageFactory { - fn default() -> Self { - Self( - tempfile::TempDir::new() - .expect("failed creating temporary directory for `MockReadStorageFactory`"), - ) - } -} +pub struct MockReadStorageFactory; #[async_trait] -impl ReadStorageFactory for MockReadStorageFactory { +impl ReadStorageFactory<()> for MockReadStorageFactory { async fn access_storage( &self, _stop_receiver: &watch::Receiver, _l1_batch_number: L1BatchNumber, - ) -> anyhow::Result> { - let storage = RocksdbStorage::builder(self.0.path()) - .await - .expect("Cannot create mock RocksDB storage") - .build_unchecked(); - Ok(Some(PgOrRocksdbStorage::Rocksdb(storage).into())) + ) -> anyhow::Result> { + Ok(Some(())) } } diff --git a/core/node/state_keeper/src/tests/mod.rs b/core/node/state_keeper/src/tests/mod.rs index a5239f44483..e9a0a57c697 100644 --- a/core/node/state_keeper/src/tests/mod.rs +++ b/core/node/state_keeper/src/tests/mod.rs @@ -11,7 +11,7 @@ use zksync_config::configs::chain::StateKeeperConfig; use zksync_multivm::{ interface::{ ExecutionResult, Halt, L1BatchEnv, L2BlockEnv, Refunds, SystemEnv, TxExecutionMode, - VmExecutionLogs, VmExecutionResultAndLogs, VmExecutionStatistics, + VmExecutionLogs, VmExecutionMetrics, VmExecutionResultAndLogs, VmExecutionStatistics, }, vm_latest::constants::BATCH_COMPUTATIONAL_GAS_LIMIT, }; @@ -20,7 +20,6 @@ use zksync_types::{ aggregated_operations::AggregatedActionType, block::{BlockGasCount, L2BlockExecutionData, L2BlockHasher}, fee_model::{BatchFeeInput, PubdataIndependentBatchFeeModelInput}, - tx::tx_execution_info::ExecutionMetrics, AccountTreeId, Address, L1BatchNumber, L2BlockNumber, L2ChainId, ProtocolVersionId, StorageKey, StorageLog, StorageLogKind, StorageLogWithPreviousValue, Transaction, H256, U256, ZKPORTER_IS_AVAILABLE, @@ -210,7 +209,7 @@ async fn sealed_by_gas() { }; let execution_result = successful_exec_with_metrics(ExecutionMetricsForCriteria { l1_gas: l1_gas_per_tx, - execution_metrics: ExecutionMetrics::default(), + execution_metrics: VmExecutionMetrics::default(), }); TestScenario::new() @@ -261,7 +260,7 @@ async fn sealed_by_gas_then_by_num_tx() { prove: 0, execute: 0, }, - execution_metrics: ExecutionMetrics::default(), + execution_metrics: VmExecutionMetrics::default(), }); // 1st tx is sealed by gas sealer; 2nd, 3rd, & 4th are sealed by slots sealer. @@ -438,7 +437,7 @@ async fn load_upgrade_tx() { Box::new(batch_executor_base), output_handler, Arc::new(sealer), - Arc::::default(), + Arc::new(MockReadStorageFactory), ); // Since the version hasn't changed, and we are not using shared bridge, we should not load any diff --git a/core/node/state_keeper/src/types.rs b/core/node/state_keeper/src/types.rs index 2606e7d5c7b..e112871a647 100644 --- a/core/node/state_keeper/src/types.rs +++ b/core/node/state_keeper/src/types.rs @@ -5,10 +5,8 @@ use std::{ use zksync_dal::{Connection, Core, CoreDal}; use zksync_mempool::{L2TxFilter, MempoolInfo, MempoolStore}; -use zksync_multivm::interface::VmExecutionResultAndLogs; -use zksync_types::{ - block::BlockGasCount, tx::ExecutionMetrics, Address, Nonce, PriorityOpId, Transaction, -}; +use zksync_multivm::interface::{VmExecutionMetrics, VmExecutionResultAndLogs}; +use zksync_types::{block::BlockGasCount, Address, Nonce, PriorityOpId, Transaction}; use super::{ metrics::StateKeeperGauges, @@ -83,7 +81,7 @@ impl MempoolGuard { #[derive(Debug, Clone, Copy, PartialEq)] pub struct ExecutionMetricsForCriteria { pub l1_gas: BlockGasCount, - pub execution_metrics: ExecutionMetrics, + pub execution_metrics: VmExecutionMetrics, } impl ExecutionMetricsForCriteria { diff --git a/core/node/state_keeper/src/updates/l1_batch_updates.rs b/core/node/state_keeper/src/updates/l1_batch_updates.rs index 7bc2095ff9b..aa2e22cac48 100644 --- a/core/node/state_keeper/src/updates/l1_batch_updates.rs +++ b/core/node/state_keeper/src/updates/l1_batch_updates.rs @@ -1,8 +1,6 @@ -use zksync_multivm::interface::FinishedL1Batch; +use zksync_multivm::interface::{FinishedL1Batch, TransactionExecutionResult, VmExecutionMetrics}; use zksync_types::{ - block::BlockGasCount, - priority_op_onchain_data::PriorityOpOnchainData, - tx::{tx_execution_info::ExecutionMetrics, TransactionExecutionResult}, + block::BlockGasCount, priority_op_onchain_data::PriorityOpOnchainData, ExecuteTransactionCommon, L1BatchNumber, }; @@ -13,7 +11,7 @@ pub struct L1BatchUpdates { pub number: L1BatchNumber, pub executed_transactions: Vec, pub priority_ops_onchain_data: Vec, - pub block_execution_metrics: ExecutionMetrics, + pub block_execution_metrics: VmExecutionMetrics, // how much L1 gas will it take to submit this block? pub l1_gas_count: BlockGasCount, pub txs_encoding_size: usize, @@ -76,7 +74,7 @@ mod tests { tx, create_execution_result([]), BlockGasCount::default(), - ExecutionMetrics::default(), + VmExecutionMetrics::default(), vec![], vec![], ); diff --git a/core/node/state_keeper/src/updates/l2_block_updates.rs b/core/node/state_keeper/src/updates/l2_block_updates.rs index 8b3060babad..d8673088dc3 100644 --- a/core/node/state_keeper/src/updates/l2_block_updates.rs +++ b/core/node/state_keeper/src/updates/l2_block_updates.rs @@ -1,21 +1,45 @@ use std::collections::HashMap; +use once_cell::sync::Lazy; use zksync_multivm::{ - interface::{ExecutionResult, L2BlockEnv, VmExecutionResultAndLogs}, + interface::{ + Call, CompressedBytecodeInfo, ExecutionResult, L2BlockEnv, TransactionExecutionResult, + TxExecutionStatus, VmEvent, VmExecutionMetrics, VmExecutionResultAndLogs, + }, vm_latest::TransactionVmExt, }; +use zksync_system_constants::KNOWN_CODES_STORAGE_ADDRESS; use zksync_types::{ block::{BlockGasCount, L2BlockHasher}, - event::extract_bytecodes_marked_as_known, + ethabi, l2_to_l1_log::{SystemL2ToL1Log, UserL2ToL1Log}, - tx::{tx_execution_info::TxExecutionStatus, ExecutionMetrics, TransactionExecutionResult}, - vm_trace::Call, - L2BlockNumber, ProtocolVersionId, StorageLogWithPreviousValue, Transaction, VmEvent, H256, + L2BlockNumber, ProtocolVersionId, StorageLogWithPreviousValue, Transaction, H256, }; -use zksync_utils::bytecode::{hash_bytecode, CompressedBytecodeInfo}; +use zksync_utils::bytecode::hash_bytecode; use crate::metrics::KEEPER_METRICS; +/// Extracts all bytecodes marked as known on the system contracts. +fn extract_bytecodes_marked_as_known(all_generated_events: &[VmEvent]) -> Vec { + static PUBLISHED_BYTECODE_SIGNATURE: Lazy = Lazy::new(|| { + ethabi::long_signature( + "MarkedAsKnown", + &[ethabi::ParamType::FixedBytes(32), ethabi::ParamType::Bool], + ) + }); + + all_generated_events + .iter() + .filter(|event| { + // Filter events from the deployer contract that match the expected signature. + event.address == KNOWN_CODES_STORAGE_ADDRESS + && event.indexed_topics.len() == 3 + && event.indexed_topics[0] == *PUBLISHED_BYTECODE_SIGNATURE + }) + .map(|event| event.indexed_topics[1]) + .collect() +} + #[derive(Debug, Clone, PartialEq)] pub struct L2BlockUpdates { pub executed_transactions: Vec, @@ -26,7 +50,7 @@ pub struct L2BlockUpdates { pub new_factory_deps: HashMap>, /// How much L1 gas will it take to submit this block? pub l1_gas_count: BlockGasCount, - pub block_execution_metrics: ExecutionMetrics, + pub block_execution_metrics: VmExecutionMetrics, pub txs_encoding_size: usize, pub payload_encoding_size: usize, pub timestamp: u64, @@ -52,7 +76,7 @@ impl L2BlockUpdates { system_l2_to_l1_logs: vec![], new_factory_deps: HashMap::new(), l1_gas_count: BlockGasCount::default(), - block_execution_metrics: ExecutionMetrics::default(), + block_execution_metrics: VmExecutionMetrics::default(), txs_encoding_size: 0, payload_encoding_size: 0, timestamp, @@ -67,7 +91,7 @@ impl L2BlockUpdates { &mut self, result: VmExecutionResultAndLogs, l1_gas_count: BlockGasCount, - execution_metrics: ExecutionMetrics, + execution_metrics: VmExecutionMetrics, ) { self.events.extend(result.logs.events); self.storage_logs.extend(result.logs.storage_logs); @@ -85,7 +109,7 @@ impl L2BlockUpdates { tx: Transaction, tx_execution_result: VmExecutionResultAndLogs, tx_l1_gas_this_tx: BlockGasCount, - execution_metrics: ExecutionMetrics, + execution_metrics: VmExecutionMetrics, compressed_bytecodes: Vec, call_traces: Vec, ) { @@ -204,7 +228,7 @@ mod tests { tx, create_execution_result([]), BlockGasCount::default(), - ExecutionMetrics::default(), + VmExecutionMetrics::default(), vec![], vec![], ); diff --git a/core/node/state_keeper/src/updates/mod.rs b/core/node/state_keeper/src/updates/mod.rs index b1310800d8a..2fad56a9929 100644 --- a/core/node/state_keeper/src/updates/mod.rs +++ b/core/node/state_keeper/src/updates/mod.rs @@ -1,17 +1,15 @@ use zksync_contracts::BaseSystemContractsHashes; use zksync_multivm::{ interface::{ - storage::StorageViewCache, FinishedL1Batch, L1BatchEnv, SystemEnv, VmExecutionResultAndLogs, + storage::StorageViewCache, Call, CompressedBytecodeInfo, FinishedL1Batch, L1BatchEnv, + SystemEnv, VmExecutionMetrics, VmExecutionResultAndLogs, }, - utils::get_batch_base_fee, + utils::{get_batch_base_fee, StorageWritesDeduplicator}, }; use zksync_types::{ - block::BlockGasCount, fee_model::BatchFeeInput, - storage_writes_deduplicator::StorageWritesDeduplicator, - tx::tx_execution_info::ExecutionMetrics, vm_trace::Call, Address, L1BatchNumber, L2BlockNumber, + block::BlockGasCount, fee_model::BatchFeeInput, Address, L1BatchNumber, L2BlockNumber, ProtocolVersionId, Transaction, }; -use zksync_utils::bytecode::CompressedBytecodeInfo; pub(crate) use self::{l1_batch_updates::L1BatchUpdates, l2_block_updates::L2BlockUpdates}; use super::{ @@ -112,7 +110,7 @@ impl UpdatesManager { tx_execution_result: VmExecutionResultAndLogs, compressed_bytecodes: Vec, tx_l1_gas_this_tx: BlockGasCount, - execution_metrics: ExecutionMetrics, + execution_metrics: VmExecutionMetrics, call_traces: Vec, ) { let latency = UPDATES_MANAGER_METRICS @@ -188,7 +186,7 @@ impl UpdatesManager { self.l1_batch.l1_gas_count + self.l2_block.l1_gas_count } - pub(crate) fn pending_execution_metrics(&self) -> ExecutionMetrics { + pub(crate) fn pending_execution_metrics(&self) -> VmExecutionMetrics { self.l1_batch.block_execution_metrics + self.l2_block.block_execution_metrics } @@ -236,7 +234,7 @@ mod tests { create_execution_result([]), vec![], new_block_gas_count(), - ExecutionMetrics::default(), + VmExecutionMetrics::default(), vec![], ); diff --git a/core/node/state_keeper/src/utils.rs b/core/node/state_keeper/src/utils.rs index c99bbf51945..4240ad30625 100644 --- a/core/node/state_keeper/src/utils.rs +++ b/core/node/state_keeper/src/utils.rs @@ -1,9 +1,9 @@ +use zksync_multivm::interface::{DeduplicatedWritesMetrics, VmExecutionMetrics}; use zksync_types::{ - aggregated_operations::AggregatedActionType, - block::BlockGasCount, - tx::{tx_execution_info::DeduplicatedWritesMetrics, ExecutionMetrics}, - ExecuteTransactionCommon, ProtocolVersionId, Transaction, + aggregated_operations::AggregatedActionType, block::BlockGasCount, ExecuteTransactionCommon, + ProtocolVersionId, Transaction, }; + // TODO(QIT-32): Remove constants(except `L1_OPERATION_EXECUTE_COST`) and logic that use them const L1_BATCH_COMMIT_BASE_COST: u32 = 31_000; const L1_BATCH_PROVE_BASE_COST: u32 = 7_000; @@ -36,7 +36,7 @@ fn base_tx_cost(tx: &Transaction, op: AggregatedActionType) -> u32 { } } -fn additional_pubdata_commit_cost(execution_metrics: &ExecutionMetrics) -> u32 { +fn additional_pubdata_commit_cost(execution_metrics: &VmExecutionMetrics) -> u32 { (execution_metrics.size() as u32) * GAS_PER_BYTE } @@ -57,7 +57,7 @@ pub(super) fn new_block_gas_count() -> BlockGasCount { pub(super) fn gas_count_from_tx_and_metrics( tx: &Transaction, - execution_metrics: &ExecutionMetrics, + execution_metrics: &VmExecutionMetrics, ) -> BlockGasCount { let commit = base_tx_cost(tx, AggregatedActionType::Commit) + additional_pubdata_commit_cost(execution_metrics); @@ -68,7 +68,7 @@ pub(super) fn gas_count_from_tx_and_metrics( } } -pub(super) fn gas_count_from_metrics(execution_metrics: &ExecutionMetrics) -> BlockGasCount { +pub(super) fn gas_count_from_metrics(execution_metrics: &VmExecutionMetrics) -> BlockGasCount { BlockGasCount { commit: additional_pubdata_commit_cost(execution_metrics), prove: 0, diff --git a/core/node/test_utils/src/lib.rs b/core/node/test_utils/src/lib.rs index 614d64805b9..acb65bf1634 100644 --- a/core/node/test_utils/src/lib.rs +++ b/core/node/test_utils/src/lib.rs @@ -5,7 +5,10 @@ use std::collections::HashMap; use zksync_contracts::BaseSystemContractsHashes; use zksync_dal::{Connection, Core, CoreDal}; use zksync_merkle_tree::{domain::ZkSyncTree, TreeInstruction}; -use zksync_multivm::utils::get_max_gas_per_pubdata_byte; +use zksync_multivm::{ + interface::{TransactionExecutionResult, TxExecutionStatus, VmExecutionMetrics}, + utils::get_max_gas_per_pubdata_byte, +}; use zksync_node_genesis::GenesisParams; use zksync_system_constants::{get_intrinsic_constants, ZKPORTER_IS_AVAILABLE}; use zksync_types::{ @@ -21,7 +24,6 @@ use zksync_types::{ protocol_version::ProtocolSemanticVersion, snapshots::{SnapshotRecoveryStatus, SnapshotStorageLog}, transaction_request::PaymasterParams, - tx::{tx_execution_info::TxExecutionStatus, ExecutionMetrics, TransactionExecutionResult}, Address, K256PrivateKey, L1BatchNumber, L2BlockNumber, L2ChainId, Nonce, ProtocolVersion, ProtocolVersionId, StorageLog, H256, U256, }; @@ -42,6 +44,7 @@ pub fn create_l2_block(number: u32) -> L2BlockHeader { protocol_version: Some(ProtocolVersionId::latest()), virtual_blocks: 1, gas_limit: 0, + logs_bloom: Default::default(), } } @@ -157,7 +160,7 @@ pub fn execute_l2_transaction(transaction: L2Tx) -> TransactionExecutionResult { TransactionExecutionResult { hash: transaction.hash(), transaction: transaction.into(), - execution_info: ExecutionMetrics::default(), + execution_info: VmExecutionMetrics::default(), execution_status: TxExecutionStatus::Success, refunded_gas: 0, operator_suggested_refund: 0, @@ -207,6 +210,7 @@ impl Snapshot { protocol_version: Some(genesis_params.minor_protocol_version()), virtual_blocks: 1, gas_limit: 0, + logs_bloom: Default::default(), }; Snapshot { l1_batch, diff --git a/core/node/vm_runner/src/storage.rs b/core/node/vm_runner/src/storage.rs index b7518903cae..e351b09ad2b 100644 --- a/core/node/vm_runner/src/storage.rs +++ b/core/node/vm_runner/src/storage.rs @@ -11,8 +11,8 @@ use tokio::sync::{watch, RwLock}; use zksync_dal::{Connection, ConnectionPool, Core, CoreDal}; use zksync_multivm::interface::{L1BatchEnv, SystemEnv}; use zksync_state::{ - AsyncCatchupTask, BatchDiff, OwnedPostgresStorage, OwnedStorage, PgOrRocksdbStorage, - RocksdbCell, RocksdbStorage, RocksdbStorageBuilder, RocksdbWithMemory, + AsyncCatchupTask, BatchDiff, OwnedStorage, RocksdbCell, RocksdbStorage, RocksdbStorageBuilder, + RocksdbWithMemory, }; use zksync_types::{block::L2BlockExecutionData, L1BatchNumber, L2ChainId}; use zksync_vm_utils::storage::L1BatchParamsProvider; @@ -140,12 +140,12 @@ impl StorageLoader for VmRunnerStorage { ) .await?; - return Ok(batch_data.map(|data| { - ( - data, - OwnedPostgresStorage::new(self.pool.clone(), l1_batch_number - 1).into(), - ) - })); + return Ok(if let Some(data) = batch_data { + let storage = OwnedStorage::postgres(conn, l1_batch_number - 1).await?; + Some((data, storage)) + } else { + None + }); }; match state.storage.get(&l1_batch_number) { @@ -166,11 +166,11 @@ impl StorageLoader for VmRunnerStorage { .filter(|(&num, _)| num < l1_batch_number) .map(|(_, data)| data.diff.clone()) .collect::>(); - let storage = PgOrRocksdbStorage::RocksdbWithMemory(RocksdbWithMemory { + let storage = OwnedStorage::RocksdbWithMemory(RocksdbWithMemory { rocksdb: rocksdb.clone(), batch_diffs, }); - Ok(Some((data, storage.into()))) + Ok(Some((data, storage))) } } } diff --git a/core/node/vm_runner/src/tests/mod.rs b/core/node/vm_runner/src/tests/mod.rs index 4cb2d26f6bd..dd14e4dd1b0 100644 --- a/core/node/vm_runner/src/tests/mod.rs +++ b/core/node/vm_runner/src/tests/mod.rs @@ -5,16 +5,17 @@ use rand::{prelude::SliceRandom, Rng}; use tokio::sync::RwLock; use zksync_contracts::BaseSystemContractsHashes; use zksync_dal::{Connection, ConnectionPool, Core, CoreDal}; +use zksync_multivm::interface::TransactionExecutionMetrics; use zksync_node_test_utils::{ create_l1_batch_metadata, create_l2_block, execute_l2_transaction, l1_batch_metadata_to_commitment_artifacts, }; -use zksync_state::{OwnedPostgresStorage, OwnedStorage}; +use zksync_state::OwnedStorage; use zksync_state_keeper::{StateKeeperOutputHandler, UpdatesManager}; use zksync_test_account::Account; use zksync_types::{ block::{BlockGasCount, L1BatchHeader, L2BlockHasher}, - fee::{Fee, TransactionExecutionMetrics}, + fee::Fee, get_intrinsic_constants, l2::L2Tx, utils::storage_key_for_standard_token_balance, @@ -57,8 +58,8 @@ impl StorageLoader for PostgresLoader { return Ok(None); }; - let storage = OwnedPostgresStorage::new(self.0.clone(), l1_batch_number - 1); - Ok(Some((data, storage.into()))) + let storage = OwnedStorage::postgres(conn, l1_batch_number - 1).await?; + Ok(Some((data, storage))) } } diff --git a/core/node/vm_runner/src/tests/storage.rs b/core/node/vm_runner/src/tests/storage.rs index 1dfb5a60135..f6f7a2ba9e6 100644 --- a/core/node/vm_runner/src/tests/storage.rs +++ b/core/node/vm_runner/src/tests/storage.rs @@ -301,12 +301,8 @@ async fn access_vm_runner_storage() -> anyhow::Result<()> { .unwrap(); let mut pg_storage = PostgresStorage::new(rt_handle.clone(), conn, last_l2_block_number, true); - let (_, vm_storage) = rt_handle + let (_, mut vm_storage) = rt_handle .block_on(vm_runner_storage.load_batch_eventually(L1BatchNumber(i + 1)))?; - let mut vm_storage = match vm_storage { - OwnedStorage::Lending(ref storage) => rt_handle.block_on(storage.borrow()).unwrap(), - OwnedStorage::Static(storage) => storage, - }; // Check that both storages have identical key-value pairs written in them for storage_log in &storage_logs { diff --git a/core/tests/ts-integration/src/env.ts b/core/tests/ts-integration/src/env.ts index 6b48387f90d..8f6ff12224b 100644 --- a/core/tests/ts-integration/src/env.ts +++ b/core/tests/ts-integration/src/env.ts @@ -97,7 +97,6 @@ async function loadTestEnvironmentFromFile(chain: string): Promise { if (!token) { token = tokens[0]; } - const weth = tokens.find((token: { symbol: string }) => token.symbol == 'WETH')!; const baseToken = tokens.find((token: { address: string }) => zksync.utils.isAddressEq(token.address, baseTokenAddress) )!; @@ -225,12 +210,6 @@ export async function loadTestEnvironmentFromEnv(): Promise { ethers.getDefaultProvider(l1NodeUrl) ).l2TokenAddress(token.address); - const l2WethAddress = await new zksync.Wallet( - mainWalletPK, - l2Provider, - ethers.getDefaultProvider(l1NodeUrl) - ).l2TokenAddress(weth.address); - const baseTokenAddressL2 = L2_BASE_TOKEN_ADDRESS; const l2ChainId = BigInt(process.env.CHAIN_ETH_ZKSYNC_NETWORK_ID!); // If the `CHAIN_STATE_KEEPER_L1_BATCH_COMMIT_DATA_GENERATOR_MODE` is not set, the default value is `Rollup`. @@ -280,13 +259,6 @@ export async function loadTestEnvironmentFromEnv(): Promise { l1Address: token.address, l2Address: l2TokenAddress }, - wethToken: { - name: weth.name, - symbol: weth.symbol, - decimals: weth.decimals, - l1Address: weth.address, - l2Address: l2WethAddress - }, baseToken: { name: baseToken?.name || token.name, symbol: baseToken?.symbol || token.symbol, diff --git a/core/tests/ts-integration/src/types.ts b/core/tests/ts-integration/src/types.ts index 058dcd4929d..415a8519a1b 100644 --- a/core/tests/ts-integration/src/types.ts +++ b/core/tests/ts-integration/src/types.ts @@ -85,10 +85,6 @@ export interface TestEnvironment { * Description of the "main" ERC20 token used in the tests. */ erc20Token: Token; - /** - * Description of the WETH token used in the tests. - */ - wethToken: Token; /** * Description of the "base" ERC20 token used in the tests. */ diff --git a/core/tests/ts-integration/tests/api/web3.test.ts b/core/tests/ts-integration/tests/api/web3.test.ts index 569321d548c..c6d0ae40a43 100644 --- a/core/tests/ts-integration/tests/api/web3.test.ts +++ b/core/tests/ts-integration/tests/api/web3.test.ts @@ -44,8 +44,14 @@ describe('web3 API compatibility tests', () => { const blockHash = (await alice.provider.getBlock(blockNumber)).hash!; const blockWithTxsByNumber = await alice.provider.getBlock(blockNumber, true); expect(blockWithTxsByNumber.gasLimit).toBeGreaterThan(0n); - let sumTxGasUsed = 0n; + // `ethers.Block` doesn't include `logsBloom` for some reason. + const blockByNumberFull = await alice.provider.send('eth_getBlockByNumber', [blockNumberHex, false]); + expect(blockByNumberFull.logsBloom).toEqual(expect.stringMatching(HEX_VALUE_REGEX)); + expect(blockByNumberFull.logsBloom.length).toEqual(514); + expect(blockByNumberFull.logsBloom != ethers.zeroPadValue('0x00', 256)).toBeTruthy(); + + let sumTxGasUsed = 0n; for (const tx of blockWithTxsByNumber.prefetchedTransactions) { const receipt = await alice.provider.getTransactionReceipt(tx.hash); sumTxGasUsed = sumTxGasUsed + receipt!.gasUsed; @@ -53,12 +59,21 @@ describe('web3 API compatibility tests', () => { expect(blockWithTxsByNumber.gasUsed).toBeGreaterThanOrEqual(sumTxGasUsed); let expectedReceipts = []; + let expectedBloom = blockByNumberFull.logsBloom.toLowerCase(); + let blockBloomFromReceipts = new Uint8Array(256); for (const tx of blockWithTxsByNumber.prefetchedTransactions) { const receipt = await alice.provider.send('eth_getTransactionReceipt', [tx.hash]); expectedReceipts.push(receipt); + + let receiptBloom = ethers.getBytes(receipt.logsBloom); + for (let i = 0; i < blockBloomFromReceipts.length; i++) { + blockBloomFromReceipts[i] = blockBloomFromReceipts[i] | receiptBloom[i]; + } } + expect(ethers.hexlify(blockBloomFromReceipts)).toEqual(expectedBloom); + let receipts = await alice.provider.send('eth_getBlockReceipts', [blockNumberHex]); expect(receipts).toEqual(expectedReceipts); diff --git a/core/tests/vm-benchmark/Cargo.toml b/core/tests/vm-benchmark/Cargo.toml index efbc08a957a..27218d79aaf 100644 --- a/core/tests/vm-benchmark/Cargo.toml +++ b/core/tests/vm-benchmark/Cargo.toml @@ -6,8 +6,11 @@ license.workspace = true publish = false [dependencies] -zksync_vm_benchmark_harness.workspace = true +zksync_types.workspace = true zksync_vlog.workspace = true +zksync_vm_benchmark_harness.workspace = true + +rand.workspace = true vise.workspace = true tokio.workspace = true diff --git a/core/tests/vm-benchmark/benches/criterion.rs b/core/tests/vm-benchmark/benches/criterion.rs index 5becccfab80..9e12fc25f54 100644 --- a/core/tests/vm-benchmark/benches/criterion.rs +++ b/core/tests/vm-benchmark/benches/criterion.rs @@ -1,7 +1,24 @@ -use criterion::{black_box, criterion_group, criterion_main, Criterion}; -use zksync_vm_benchmark_harness::{cut_to_allowed_bytecode_size, get_deploy_tx, BenchmarkingVm}; +use std::time::Duration; + +use criterion::{ + black_box, criterion_group, criterion_main, measurement::WallTime, BatchSize, BenchmarkGroup, + Criterion, +}; +use zksync_types::Transaction; +use zksync_vm_benchmark_harness::{ + cut_to_allowed_bytecode_size, get_deploy_tx, get_heavy_load_test_tx, get_load_test_deploy_tx, + get_load_test_tx, get_realistic_load_test_tx, BenchmarkingVm, BenchmarkingVmFactory, Fast, + Legacy, LoadTestParams, +}; + +const SAMPLE_SIZE: usize = 20; + +fn benches_in_folder(c: &mut Criterion) { + let mut group = c.benchmark_group(VM::LABEL.as_str()); + group + .sample_size(SAMPLE_SIZE) + .measurement_time(Duration::from_secs(10)); -fn benches_in_folder(c: &mut Criterion) { for path in std::fs::read_dir("deployment_benchmarks").unwrap() { let path = path.unwrap().path(); @@ -9,12 +26,73 @@ fn benches_in_folder(c: &mut Criterion) { let code = cut_to_allowed_bytecode_size(&test_contract).unwrap(); let tx = get_deploy_tx(code); - - c.bench_function(path.file_name().unwrap().to_str().unwrap(), |b| { - b.iter(|| BenchmarkingVm::new().run_transaction(black_box(&tx))) + let file_name = path.file_name().unwrap().to_str().unwrap(); + let full_suffix = if FULL { "/full" } else { "" }; + let bench_name = format!("{file_name}{full_suffix}"); + group.bench_function(bench_name, |bencher| { + if FULL { + // Include VM initialization / drop into the measured time + bencher.iter(|| BenchmarkingVm::::default().run_transaction(black_box(&tx))); + } else { + bencher.iter_batched( + BenchmarkingVm::::default, + |mut vm| { + let result = vm.run_transaction(black_box(&tx)); + (vm, result) + }, + BatchSize::LargeInput, // VM can consume significant amount of RAM, especially the new one + ); + } }); } } -criterion_group!(benches, benches_in_folder); +fn bench_load_test(c: &mut Criterion) { + let mut group = c.benchmark_group(VM::LABEL.as_str()); + group + .sample_size(SAMPLE_SIZE) + .measurement_time(Duration::from_secs(10)); + + // Nonce 0 is used for the deployment transaction + let tx = get_load_test_tx(1, 10_000_000, LoadTestParams::default()); + bench_load_test_transaction::(&mut group, "load_test", &tx); + + let tx = get_realistic_load_test_tx(1); + bench_load_test_transaction::(&mut group, "load_test_realistic", &tx); + + let tx = get_heavy_load_test_tx(1); + bench_load_test_transaction::(&mut group, "load_test_heavy", &tx); +} + +fn bench_load_test_transaction( + group: &mut BenchmarkGroup<'_, WallTime>, + name: &str, + tx: &Transaction, +) { + group.bench_function(name, |bencher| { + bencher.iter_batched( + || { + let mut vm = BenchmarkingVm::::default(); + vm.run_transaction(&get_load_test_deploy_tx()); + vm + }, + |mut vm| { + let result = vm.run_transaction(black_box(tx)); + assert!(!result.result.is_failed(), "{:?}", result.result); + (vm, result) + }, + BatchSize::LargeInput, + ); + }); +} + +criterion_group!( + benches, + benches_in_folder::, + benches_in_folder::, + benches_in_folder::, + benches_in_folder::, + bench_load_test::, + bench_load_test:: +); criterion_main!(benches); diff --git a/core/tests/vm-benchmark/benches/fill_bootloader.rs b/core/tests/vm-benchmark/benches/fill_bootloader.rs index fac422c8237..13fa1df0b2f 100644 --- a/core/tests/vm-benchmark/benches/fill_bootloader.rs +++ b/core/tests/vm-benchmark/benches/fill_bootloader.rs @@ -1,23 +1,195 @@ -use std::time::Instant; +//! Benchmarks executing entire batches of transactions with varying size (from 1 to 5,000). +//! +//! - `fill_bootloader_full/*` benches emulate the entire transaction lifecycle including taking a snapshot +//! before a transaction and rolling back to it on halt. They also include VM initialization and drop. +//! In contrast, `fill_bootloader/*` benches only cover transaction execution. +//! - `deploy_simple_contract` benches deploy a simple contract in each transaction. All transactions succeed. +//! - `transfer` benches perform the base token transfer in each transaction. All transactions succeed. +//! - `transfer_with_invalid_nonce` benches are similar to `transfer`, but each transaction with a probability +//! `TX_FAILURE_PROBABILITY` has a previously used nonce and thus halts during validation. +//! - `load_test(|_realistic|_heavy)` execute the load test contract (a mixture of storage reads, writes, emitting events, +//! recursive calls, hashing and deploying new contracts). These 3 categories differ in how many operations of each kind +//! are performed in each transaction. Beware that the first executed transaction is load test contract deployment, +//! which skews results for small-size batches. -use criterion::black_box; +use std::{iter, time::Duration}; + +use criterion::{ + black_box, criterion_group, criterion_main, measurement::WallTime, BatchSize, BenchmarkGroup, + BenchmarkId, Criterion, Throughput, +}; +use rand::{rngs::StdRng, Rng, SeedableRng}; +use zksync_types::Transaction; use zksync_vm_benchmark_harness::{ - cut_to_allowed_bytecode_size, get_deploy_tx_with_gas_limit, BenchmarkingVm, + cut_to_allowed_bytecode_size, get_deploy_tx_with_gas_limit, get_heavy_load_test_tx, + get_load_test_deploy_tx, get_load_test_tx, get_realistic_load_test_tx, get_transfer_tx, + BenchmarkingVm, BenchmarkingVmFactory, Fast, Legacy, LoadTestParams, }; -fn main() { - let test_contract = - std::fs::read("deployment_benchmarks/event_spam").expect("failed to read file"); +/// Gas limit for deployment transactions. +const DEPLOY_GAS_LIMIT: u32 = 30_000_000; +/// Tested numbers of transactions in a batch. +const TXS_IN_BATCH: &[usize] = &[1, 10, 50, 100, 200, 500, 1_000, 2_000, 5_000]; + +/// RNG seed used e.g. to randomize failing transactions. +const RNG_SEED: u64 = 123; +/// Probability for a transaction to fail in the `transfer_with_invalid_nonce` benchmarks. +const TX_FAILURE_PROBABILITY: f64 = 0.2; + +fn bench_vm( + vm: &mut BenchmarkingVm, + txs: &[Transaction], + expected_failures: &[bool], +) { + for (i, tx) in txs.iter().enumerate() { + let result = if FULL { + vm.run_transaction_full(black_box(tx)) + } else { + vm.run_transaction(black_box(tx)) + }; + let result = &result.result; + let expecting_failure = expected_failures.get(i).copied().unwrap_or(false); + assert_eq!( + result.is_failed(), + expecting_failure, + "{result:?} on tx #{i}" + ); + black_box(result); + } +} + +fn run_vm_expecting_failures( + group: &mut BenchmarkGroup<'_, WallTime>, + name: &str, + txs: &[Transaction], + expected_failures: &[bool], +) { + for txs_in_batch in TXS_IN_BATCH { + if *txs_in_batch > txs.len() { + break; + } + + group.throughput(Throughput::Elements(*txs_in_batch as u64)); + group.bench_with_input( + BenchmarkId::new(name, txs_in_batch), + txs_in_batch, + |bencher, &txs_in_batch| { + if FULL { + // Include VM initialization / drop into the measured time + bencher.iter(|| { + let mut vm = BenchmarkingVm::::default(); + bench_vm::<_, true>(&mut vm, &txs[..txs_in_batch], expected_failures); + }); + } else { + bencher.iter_batched( + BenchmarkingVm::::default, + |mut vm| { + bench_vm::<_, false>(&mut vm, &txs[..txs_in_batch], expected_failures); + vm + }, + BatchSize::LargeInput, // VM can consume significant amount of RAM, especially the new one + ); + } + }, + ); + } +} +fn run_vm( + group: &mut BenchmarkGroup<'_, WallTime>, + name: &str, + txs: &[Transaction], +) { + run_vm_expecting_failures::(group, name, txs, &[]); +} + +fn bench_fill_bootloader(c: &mut Criterion) { + let is_test_mode = !std::env::args().any(|arg| arg == "--bench"); + let txs_in_batch = if is_test_mode { + &TXS_IN_BATCH[..3] // Reduce the number of transactions in a batch so that tests don't take long + } else { + TXS_IN_BATCH + }; + + let mut group = c.benchmark_group(if FULL { + format!("fill_bootloader_full{}", VM::LABEL.as_suffix()) + } else { + format!("fill_bootloader{}", VM::LABEL.as_suffix()) + }); + group + .sample_size(10) + .measurement_time(Duration::from_secs(10)); + + // Deploying simple contract + let test_contract = + std::fs::read("deployment_benchmarks/deploy_simple_contract").expect("failed to read file"); let code = cut_to_allowed_bytecode_size(&test_contract).unwrap(); - let tx = get_deploy_tx_with_gas_limit(code, 1000); + let max_txs = *txs_in_batch.last().unwrap() as u32; + let txs: Vec<_> = (0..max_txs) + .map(|nonce| get_deploy_tx_with_gas_limit(code, DEPLOY_GAS_LIMIT, nonce)) + .collect(); + run_vm::(&mut group, "deploy_simple_contract", &txs); + drop(txs); + + // Load test with various parameters + let txs = + (1..=max_txs).map(|nonce| get_load_test_tx(nonce, 10_000_000, LoadTestParams::default())); + let txs: Vec<_> = iter::once(get_load_test_deploy_tx()).chain(txs).collect(); + run_vm::(&mut group, "load_test", &txs); + drop(txs); - let start = Instant::now(); + let txs = (1..=max_txs).map(get_realistic_load_test_tx); + let txs: Vec<_> = iter::once(get_load_test_deploy_tx()).chain(txs).collect(); + run_vm::(&mut group, "load_test_realistic", &txs); + drop(txs); - let mut vm = BenchmarkingVm::new(); - for _ in 0..1000 { - vm.run_transaction(black_box(&tx)); + let txs = (1..=max_txs).map(get_heavy_load_test_tx); + let txs: Vec<_> = iter::once(get_load_test_deploy_tx()).chain(txs).collect(); + run_vm::(&mut group, "load_test_heavy", &txs); + drop(txs); + + // Base token transfers + let txs: Vec<_> = (0..max_txs).map(get_transfer_tx).collect(); + run_vm::(&mut group, "transfer", &txs); + + // Halted transactions produced by the following benchmarks *must* be rolled back, + // otherwise the bootloader will process following transactions incorrectly. + if !FULL { + return; } - println!("{:?}", start.elapsed()); + let mut rng = StdRng::seed_from_u64(RNG_SEED); + + let mut txs_with_failures = Vec::with_capacity(txs.len()); + let mut expected_failures = Vec::with_capacity(txs.len()); + txs_with_failures.push(txs[0].clone()); + expected_failures.push(false); + let mut successful_txs = &txs[1..]; + for _ in 1..txs.len() { + let (tx, should_fail) = if rng.gen_bool(TX_FAILURE_PROBABILITY) { + // Since we add the transaction with nonce 0 unconditionally as the first tx to execute, + // all transactions generated here should halt during validation. + (get_transfer_tx(0), true) + } else { + let (tx, remaining_txs) = successful_txs.split_first().unwrap(); + successful_txs = remaining_txs; + (tx.clone(), false) + }; + txs_with_failures.push(tx); + expected_failures.push(should_fail); + } + run_vm_expecting_failures::( + &mut group, + "transfer_with_invalid_nonce", + &txs_with_failures, + &expected_failures, + ); } + +criterion_group!( + benches, + bench_fill_bootloader::, + bench_fill_bootloader::, + bench_fill_bootloader:: +); +criterion_main!(benches); diff --git a/core/tests/vm-benchmark/benches/iai.rs b/core/tests/vm-benchmark/benches/iai.rs index f0ba43f2685..2837a2345a5 100644 --- a/core/tests/vm-benchmark/benches/iai.rs +++ b/core/tests/vm-benchmark/benches/iai.rs @@ -1,33 +1,40 @@ use iai::black_box; -use zksync_vm_benchmark_harness::{cut_to_allowed_bytecode_size, get_deploy_tx, BenchmarkingVm}; +use zksync_vm_benchmark_harness::{ + cut_to_allowed_bytecode_size, get_deploy_tx, BenchmarkingVm, BenchmarkingVmFactory, Fast, + Legacy, +}; -fn run_bytecode(path: &str) { +fn run_bytecode(path: &str) { let test_contract = std::fs::read(path).expect("failed to read file"); let code = cut_to_allowed_bytecode_size(&test_contract).unwrap(); let tx = get_deploy_tx(code); - black_box(BenchmarkingVm::new().run_transaction(&tx)); + black_box(BenchmarkingVm::::default().run_transaction(&tx)); } macro_rules! make_functions_and_main { - ($($file:ident,)+) => { + ($($file:ident => $legacy_name:ident,)+) => { $( - fn $file() { - run_bytecode(concat!("deployment_benchmarks/", stringify!($file))) - } + fn $file() { + run_bytecode::(concat!("deployment_benchmarks/", stringify!($file))); + } + + fn $legacy_name() { + run_bytecode::(concat!("deployment_benchmarks/", stringify!($file))); + } )+ - iai::main!($($file,)+); + iai::main!($($file, $legacy_name,)+); }; } make_functions_and_main!( - access_memory, - call_far, - decode_shl_sub, - deploy_simple_contract, - finish_eventful_frames, - write_and_decode, - event_spam, - slot_hash_collision, + access_memory => access_memory_legacy, + call_far => call_far_legacy, + decode_shl_sub => decode_shl_sub_legacy, + deploy_simple_contract => deploy_simple_contract_legacy, + finish_eventful_frames => finish_eventful_frames_legacy, + write_and_decode => write_and_decode_legacy, + event_spam => event_spam_legacy, + slot_hash_collision => slot_hash_collision_legacy, ); diff --git a/core/tests/vm-benchmark/harness/Cargo.toml b/core/tests/vm-benchmark/harness/Cargo.toml index acd5f37cbc7..a24d3fa1294 100644 --- a/core/tests/vm-benchmark/harness/Cargo.toml +++ b/core/tests/vm-benchmark/harness/Cargo.toml @@ -14,3 +14,6 @@ zksync_system_constants.workspace = true zksync_contracts.workspace = true zk_evm.workspace = true once_cell.workspace = true + +[dev-dependencies] +assert_matches.workspace = true diff --git a/core/tests/vm-benchmark/harness/src/instruction_counter.rs b/core/tests/vm-benchmark/harness/src/instruction_counter.rs index 28e6d151965..48b1e3527ad 100644 --- a/core/tests/vm-benchmark/harness/src/instruction_counter.rs +++ b/core/tests/vm-benchmark/harness/src/instruction_counter.rs @@ -13,7 +13,7 @@ pub struct InstructionCounter { /// A tracer that counts the number of instructions executed by the VM. impl InstructionCounter { - #[allow(dead_code)] // FIXME + #[allow(dead_code)] // FIXME: re-enable instruction counting once new tracers are merged pub fn new(output: Rc>) -> Self { Self { count: 0, output } } diff --git a/core/tests/vm-benchmark/harness/src/lib.rs b/core/tests/vm-benchmark/harness/src/lib.rs index f206728d40b..6460d25a8e8 100644 --- a/core/tests/vm-benchmark/harness/src/lib.rs +++ b/core/tests/vm-benchmark/harness/src/lib.rs @@ -1,15 +1,17 @@ use std::{cell::RefCell, rc::Rc}; use once_cell::sync::Lazy; -use zksync_contracts::{deployer_contract, BaseSystemContracts}; +pub use zksync_contracts::test_contracts::LoadnextContractExecutionParams as LoadTestParams; +use zksync_contracts::{deployer_contract, BaseSystemContracts, TestContract}; use zksync_multivm::{ interface::{ - storage::InMemoryStorage, L2BlockEnv, TxExecutionMode, VmExecutionMode, - VmExecutionResultAndLogs, VmInterface, + storage::{InMemoryStorage, StorageView}, + ExecutionResult, L1BatchEnv, L2BlockEnv, SystemEnv, TxExecutionMode, VmExecutionMode, + VmExecutionResultAndLogs, VmFactory, VmInterface, VmInterfaceHistoryEnabled, }, utils::get_max_gas_per_pubdata_byte, - vm_fast::Vm, - vm_latest::constants::BATCH_COMPUTATIONAL_GAS_LIMIT, + vm_fast, vm_latest, + vm_latest::{constants::BATCH_COMPUTATIONAL_GAS_LIMIT, HistoryEnabled}, }; use zksync_types::{ block::L2BlockHasher, @@ -18,7 +20,7 @@ use zksync_types::{ fee_model::BatchFeeInput, helpers::unix_timestamp_ms, l2::L2Tx, - utils::storage_key_for_eth_balance, + utils::{deployed_address_create, storage_key_for_eth_balance}, Address, K256PrivateKey, L1BatchNumber, L2BlockNumber, L2ChainId, Nonce, ProtocolVersionId, Transaction, CONTRACT_DEPLOYER_ADDRESS, H256, U256, }; @@ -40,18 +42,24 @@ pub fn cut_to_allowed_bytecode_size(bytes: &[u8]) -> Option<&[u8]> { Some(&bytes[..32 * words]) } +const LOAD_TEST_MAX_READS: usize = 100; + +static LOAD_TEST_CONTRACT_ADDRESS: Lazy
= + Lazy::new(|| deployed_address_create(PRIVATE_KEY.address(), 0.into())); + static STORAGE: Lazy = Lazy::new(|| { let mut storage = InMemoryStorage::with_system_contracts(hash_bytecode); - // Give `PRIVATE_KEY` some money + let balance = U256::from(10u32).pow(U256::from(32)); //10^32 wei let key = storage_key_for_eth_balance(&PRIVATE_KEY.address()); - storage.set_value(key, zksync_utils::u256_to_h256(U256([0, 0, 1, 0]))); - + storage.set_value(key, zksync_utils::u256_to_h256(balance)); storage }); static SYSTEM_CONTRACTS: Lazy = Lazy::new(BaseSystemContracts::load_from_disk); +static LOAD_TEST_CONTRACT: Lazy = Lazy::new(zksync_contracts::get_loadnext_contract); + static CREATE_FUNCTION_SIGNATURE: Lazy<[u8; 4]> = Lazy::new(|| { deployer_contract() .function("create") @@ -62,15 +70,92 @@ static CREATE_FUNCTION_SIGNATURE: Lazy<[u8; 4]> = Lazy::new(|| { static PRIVATE_KEY: Lazy = Lazy::new(|| K256PrivateKey::from_bytes(H256([42; 32])).expect("invalid key bytes")); -pub struct BenchmarkingVm(Vm<&'static InMemoryStorage>); +/// VM label used to name `criterion` benchmarks. +#[derive(Debug, Clone, Copy)] +pub enum VmLabel { + Fast, + Legacy, +} -impl BenchmarkingVm { - #[allow(clippy::new_without_default)] - pub fn new() -> Self { - let timestamp = unix_timestamp_ms(); +impl VmLabel { + /// Non-empty name for `criterion` benchmark naming. + pub const fn as_str(self) -> &'static str { + match self { + Self::Fast => "fast", + Self::Legacy => "legacy", + } + } + + /// Optional prefix for `criterion` benchmark naming (including a starting `/`). + pub const fn as_suffix(self) -> &'static str { + match self { + Self::Fast => "", + Self::Legacy => "/legacy", + } + } +} + +/// Factory for VMs used in benchmarking. +pub trait BenchmarkingVmFactory { + /// VM label used to name `criterion` benchmarks. + const LABEL: VmLabel; + + /// Type of the VM instance created by this factory. + type Instance: VmInterfaceHistoryEnabled; + + /// Creates a VM instance. + fn create( + batch_env: L1BatchEnv, + system_env: SystemEnv, + storage: &'static InMemoryStorage, + ) -> Self::Instance; +} - Self(Vm::new( - zksync_multivm::interface::L1BatchEnv { +/// Factory for the new / fast VM. +#[derive(Debug)] +pub struct Fast(()); + +impl BenchmarkingVmFactory for Fast { + const LABEL: VmLabel = VmLabel::Fast; + + type Instance = vm_fast::Vm<&'static InMemoryStorage>; + + fn create( + batch_env: L1BatchEnv, + system_env: SystemEnv, + storage: &'static InMemoryStorage, + ) -> Self::Instance { + vm_fast::Vm::new(batch_env, system_env, storage) + } +} + +/// Factory for the legacy VM (latest version). +#[derive(Debug)] +pub struct Legacy; + +impl BenchmarkingVmFactory for Legacy { + const LABEL: VmLabel = VmLabel::Legacy; + + type Instance = vm_latest::Vm, HistoryEnabled>; + + fn create( + batch_env: L1BatchEnv, + system_env: SystemEnv, + storage: &'static InMemoryStorage, + ) -> Self::Instance { + let storage = StorageView::new(storage).to_rc_ptr(); + vm_latest::Vm::new(batch_env, system_env, storage) + } +} + +#[derive(Debug)] +pub struct BenchmarkingVm(VM::Instance); + +impl Default for BenchmarkingVm { + fn default() -> Self { + let timestamp = unix_timestamp_ms(); + Self(VM::create( + L1BatchEnv { previous_batch_hash: None, number: L1BatchNumber(1), timestamp, @@ -87,7 +172,7 @@ impl BenchmarkingVm { max_virtual_blocks_to_create: 100, }, }, - zksync_multivm::interface::SystemEnv { + SystemEnv { zk_porter_available: false, version: ProtocolVersionId::latest(), base_system_smart_contracts: SYSTEM_CONTRACTS.clone(), @@ -96,33 +181,63 @@ impl BenchmarkingVm { default_validation_computational_gas_limit: BATCH_COMPUTATIONAL_GAS_LIMIT, chain_id: L2ChainId::from(270), }, - &*STORAGE, + &STORAGE, )) } +} +impl BenchmarkingVm { pub fn run_transaction(&mut self, tx: &Transaction) -> VmExecutionResultAndLogs { self.0.push_transaction(tx.clone()); self.0.execute(VmExecutionMode::OneTx) } + pub fn run_transaction_full(&mut self, tx: &Transaction) -> VmExecutionResultAndLogs { + self.0.make_snapshot(); + let (compression_result, tx_result) = self.0.inspect_transaction_with_bytecode_compression( + Default::default(), + tx.clone(), + true, + ); + compression_result.expect("compressing bytecodes failed"); + + if matches!(tx_result.result, ExecutionResult::Halt { .. }) { + self.0.rollback_to_the_latest_snapshot(); + } else { + self.0.pop_snapshot_no_rollback(); + } + tx_result + } + pub fn instruction_count(&mut self, tx: &Transaction) -> usize { self.0.push_transaction(tx.clone()); - let count = Rc::new(RefCell::new(0)); + self.0.inspect(Default::default(), VmExecutionMode::OneTx); // FIXME: re-enable instruction counting once new tracers are merged + count.take() + } +} - self.0.inspect((), VmExecutionMode::OneTx); +impl BenchmarkingVm { + pub fn new() -> Self { + Self::default() + } +} - count.take() +impl BenchmarkingVm { + pub fn legacy() -> Self { + Self::default() } } pub fn get_deploy_tx(code: &[u8]) -> Transaction { - get_deploy_tx_with_gas_limit(code, 30_000_000) + get_deploy_tx_with_gas_limit(code, 30_000_000, 0) } -pub fn get_deploy_tx_with_gas_limit(code: &[u8], gas_limit: u32) -> Transaction { +pub fn get_deploy_tx_with_gas_limit(code: &[u8], gas_limit: u32, nonce: u32) -> Transaction { + let mut salt = vec![0_u8; 32]; + salt[28..32].copy_from_slice(&nonce.to_be_bytes()); let params = [ - Token::FixedBytes(vec![0u8; 32]), + Token::FixedBytes(salt), Token::FixedBytes(hash_bytecode(code).0.to_vec()), Token::Bytes([].to_vec()), ]; @@ -135,15 +250,8 @@ pub fn get_deploy_tx_with_gas_limit(code: &[u8], gas_limit: u32) -> Transaction let mut signed = L2Tx::new_signed( CONTRACT_DEPLOYER_ADDRESS, calldata, - Nonce(0), - Fee { - gas_limit: U256::from(gas_limit), - max_fee_per_gas: U256::from(250_000_000), - max_priority_fee_per_gas: U256::from(0), - gas_per_pubdata_limit: U256::from(get_max_gas_per_pubdata_byte( - ProtocolVersionId::latest().into(), - )), - }, + Nonce(nonce), + tx_fee(gas_limit), U256::zero(), L2ChainId::from(270), &PRIVATE_KEY, @@ -153,13 +261,144 @@ pub fn get_deploy_tx_with_gas_limit(code: &[u8], gas_limit: u32) -> Transaction .expect("should create a signed execute transaction"); signed.set_input(H256::random().as_bytes().to_vec(), H256::random()); + signed.into() +} + +fn tx_fee(gas_limit: u32) -> Fee { + Fee { + gas_limit: U256::from(gas_limit), + max_fee_per_gas: U256::from(250_000_000), + max_priority_fee_per_gas: U256::from(0), + gas_per_pubdata_limit: U256::from(get_max_gas_per_pubdata_byte( + ProtocolVersionId::latest().into(), + )), + } +} +pub fn get_transfer_tx(nonce: u32) -> Transaction { + let mut signed = L2Tx::new_signed( + PRIVATE_KEY.address(), + vec![], // calldata + Nonce(nonce), + tx_fee(1_000_000), + 1_000_000_000.into(), // value + L2ChainId::from(270), + &PRIVATE_KEY, + vec![], // factory deps + Default::default(), // paymaster params + ) + .expect("should create a signed execute transaction"); + + signed.set_input(H256::random().as_bytes().to_vec(), H256::random()); signed.into() } +pub fn get_load_test_deploy_tx() -> Transaction { + let calldata = [Token::Uint(LOAD_TEST_MAX_READS.into())]; + let params = [ + Token::FixedBytes(vec![0_u8; 32]), + Token::FixedBytes(hash_bytecode(&LOAD_TEST_CONTRACT.bytecode).0.to_vec()), + Token::Bytes(encode(&calldata)), + ]; + let create_calldata = CREATE_FUNCTION_SIGNATURE + .iter() + .cloned() + .chain(encode(¶ms)) + .collect(); + + let mut factory_deps = LOAD_TEST_CONTRACT.factory_deps.clone(); + factory_deps.push(LOAD_TEST_CONTRACT.bytecode.clone()); + + let mut signed = L2Tx::new_signed( + CONTRACT_DEPLOYER_ADDRESS, + create_calldata, + Nonce(0), + tx_fee(100_000_000), + U256::zero(), + L2ChainId::from(270), + &PRIVATE_KEY, + factory_deps, + Default::default(), + ) + .expect("should create a signed execute transaction"); + + signed.set_input(H256::random().as_bytes().to_vec(), H256::random()); + signed.into() +} + +pub fn get_load_test_tx(nonce: u32, gas_limit: u32, params: LoadTestParams) -> Transaction { + assert!( + params.reads <= LOAD_TEST_MAX_READS, + "Too many reads: {params:?}, should be <={LOAD_TEST_MAX_READS}" + ); + + let execute_function = LOAD_TEST_CONTRACT + .contract + .function("execute") + .expect("no `execute` function in load test contract"); + let calldata = execute_function + .encode_input(&vec![ + Token::Uint(U256::from(params.reads)), + Token::Uint(U256::from(params.writes)), + Token::Uint(U256::from(params.hashes)), + Token::Uint(U256::from(params.events)), + Token::Uint(U256::from(params.recursive_calls)), + Token::Uint(U256::from(params.deploys)), + ]) + .expect("cannot encode `execute` inputs"); + + let mut signed = L2Tx::new_signed( + *LOAD_TEST_CONTRACT_ADDRESS, + calldata, + Nonce(nonce), + tx_fee(gas_limit), + U256::zero(), + L2ChainId::from(270), + &PRIVATE_KEY, + LOAD_TEST_CONTRACT.factory_deps.clone(), + Default::default(), + ) + .expect("should create a signed execute transaction"); + + signed.set_input(H256::random().as_bytes().to_vec(), H256::random()); + signed.into() +} + +pub fn get_realistic_load_test_tx(nonce: u32) -> Transaction { + get_load_test_tx( + nonce, + 10_000_000, + LoadTestParams { + reads: 30, + writes: 2, + events: 5, + hashes: 10, + recursive_calls: 0, + deploys: 0, + }, + ) +} + +pub fn get_heavy_load_test_tx(nonce: u32) -> Transaction { + get_load_test_tx( + nonce, + 10_000_000, + LoadTestParams { + reads: 100, + writes: 5, + events: 20, + hashes: 100, + recursive_calls: 20, + deploys: 5, + }, + ) +} + #[cfg(test)] mod tests { + use assert_matches::assert_matches; use zksync_contracts::read_bytecode; + use zksync_multivm::interface::ExecutionResult; use crate::*; @@ -171,9 +410,44 @@ mod tests { let mut vm = BenchmarkingVm::new(); let res = vm.run_transaction(&get_deploy_tx(&test_contract)); - assert!(matches!( - res.result, - zksync_multivm::interface::ExecutionResult::Success { .. } - )); + assert_matches!(res.result, ExecutionResult::Success { .. }); + } + + #[test] + fn can_transfer() { + let mut vm = BenchmarkingVm::new(); + let res = vm.run_transaction(&get_transfer_tx(0)); + assert_matches!(res.result, ExecutionResult::Success { .. }); + } + + #[test] + fn can_load_test() { + let mut vm = BenchmarkingVm::new(); + let res = vm.run_transaction(&get_load_test_deploy_tx()); + assert_matches!(res.result, ExecutionResult::Success { .. }); + + let params = LoadTestParams::default(); + let res = vm.run_transaction(&get_load_test_tx(1, 10_000_000, params)); + assert_matches!(res.result, ExecutionResult::Success { .. }); + } + + #[test] + fn can_load_test_with_realistic_txs() { + let mut vm = BenchmarkingVm::new(); + let res = vm.run_transaction(&get_load_test_deploy_tx()); + assert_matches!(res.result, ExecutionResult::Success { .. }); + + let res = vm.run_transaction(&get_realistic_load_test_tx(1)); + assert_matches!(res.result, ExecutionResult::Success { .. }); + } + + #[test] + fn can_load_test_with_heavy_txs() { + let mut vm = BenchmarkingVm::new(); + let res = vm.run_transaction(&get_load_test_deploy_tx()); + assert_matches!(res.result, ExecutionResult::Success { .. }); + + let res = vm.run_transaction(&get_heavy_load_test_tx(1)); + assert_matches!(res.result, ExecutionResult::Success { .. }); } } diff --git a/deny.toml b/deny.toml index 1e4a30ad623..3ed6dcb7441 100644 --- a/deny.toml +++ b/deny.toml @@ -6,7 +6,9 @@ vulnerability = "deny" unmaintained = "warn" yanked = "warn" notice = "warn" -ignore = [] +ignore = [ + "RUSTSEC-2024-0363", # allows sqlx@0.8.0 until fix is released, more here -- https://github.com/launchbadge/sqlx/issues/3440 +] [licenses] unlicensed = "deny" diff --git a/docker-compose-cpu-runner.yml b/docker-compose-cpu-runner.yml index 38ae8788940..08d01390d77 100644 --- a/docker-compose-cpu-runner.yml +++ b/docker-compose-cpu-runner.yml @@ -11,7 +11,7 @@ services: source: ./etc/reth/chaindata target: /chaindata - command: node --dev --datadir /rethdata --http --http.addr 0.0.0.0 --http.port 8545 --dev.block-time 300ms --chain /chaindata/reth_config + command: node --dev --datadir /rethdata --http --http.addr 0.0.0.0 --http.port 8545 --http.corsdomain "*" --dev.block-time 300ms --chain /chaindata/reth_config ports: - 127.0.0.1:8545:8545 diff --git a/docker-compose-gpu-runner-cuda-12-0.yml b/docker-compose-gpu-runner-cuda-12-0.yml index eedacee81d6..92a7b0b0088 100644 --- a/docker-compose-gpu-runner-cuda-12-0.yml +++ b/docker-compose-gpu-runner-cuda-12-0.yml @@ -11,7 +11,7 @@ services: source: ./etc/reth/chaindata target: /chaindata - command: node --dev --datadir /rethdata --http --http.addr 0.0.0.0 --http.port 8545 --dev.block-time 300ms --chain /chaindata/reth_config + command: node --dev --datadir /rethdata --http --http.addr 0.0.0.0 --http.port 8545 --http.corsdomain "*" --dev.block-time 300ms --chain /chaindata/reth_config ports: - 127.0.0.1:8545:8545 diff --git a/docker-compose-gpu-runner.yml b/docker-compose-gpu-runner.yml index 74afb598539..bbd61715842 100644 --- a/docker-compose-gpu-runner.yml +++ b/docker-compose-gpu-runner.yml @@ -11,7 +11,7 @@ services: source: ./etc/reth/chaindata target: /chaindata - command: node --dev --datadir /rethdata --http --http.addr 0.0.0.0 --http.port 8545 --dev.block-time 300ms --chain /chaindata/reth_config + command: node --dev --datadir /rethdata --http --http.addr 0.0.0.0 --http.port 8545 --http.corsdomain "*" --dev.block-time 300ms --chain /chaindata/reth_config ports: - 127.0.0.1:8545:8545 diff --git a/docker-compose.yml b/docker-compose.yml index 116cc347818..68feb0769c2 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -11,7 +11,7 @@ services: source: ./etc/reth/chaindata target: /chaindata - command: node --dev --datadir /rethdata --http --http.addr 0.0.0.0 --http.port 8545 --dev.block-time 300ms --chain /chaindata/reth_config + command: node --dev --datadir /rethdata --http --http.addr 0.0.0.0 --http.port 8545 --http.corsdomain "*" --dev.block-time 300ms --chain /chaindata/reth_config ports: - 127.0.0.1:8545:8545 diff --git a/docs/guides/external-node/building-from-scratch/Dockerfile b/docs/guides/external-node/building-from-scratch/Dockerfile index e0aa07cfa7c..da098df91d5 100644 --- a/docs/guides/external-node/building-from-scratch/Dockerfile +++ b/docs/guides/external-node/building-from-scratch/Dockerfile @@ -21,6 +21,8 @@ RUN cp target/release/zksync_external_node /usr/bin # build contracts RUN git submodule update --init --recursive RUN zk run yarn +RUN zk compiler all || true +RUN rm /root/.cache/hardhat-nodejs/compilers-v2/linux-amd64/solc-*.does.not.work || true RUN zk compiler all RUN zk contract build RUN zk f yarn run l2-contracts build diff --git a/docs/guides/external-node/docker-compose-examples/mainnet-external-node-docker-compose.yml b/docs/guides/external-node/docker-compose-examples/mainnet-external-node-docker-compose.yml index a3e823b260a..369ce50be0b 100644 --- a/docs/guides/external-node/docker-compose-examples/mainnet-external-node-docker-compose.yml +++ b/docs/guides/external-node/docker-compose-examples/mainnet-external-node-docker-compose.yml @@ -50,7 +50,7 @@ services: - POSTGRES_PASSWORD=notsecurepassword - PGPORT=5430 external-node: - image: "matterlabs/external-node:2.0-v24.6.0" + image: "matterlabs/external-node:2.0-v24.16.0" depends_on: postgres: condition: service_healthy diff --git a/docs/guides/external-node/docker-compose-examples/testnet-external-node-docker-compose.yml b/docs/guides/external-node/docker-compose-examples/testnet-external-node-docker-compose.yml index e7ebaafb3c4..1417c6cc360 100644 --- a/docs/guides/external-node/docker-compose-examples/testnet-external-node-docker-compose.yml +++ b/docs/guides/external-node/docker-compose-examples/testnet-external-node-docker-compose.yml @@ -50,7 +50,7 @@ services: - POSTGRES_PASSWORD=notsecurepassword - PGPORT=5430 external-node: - image: "matterlabs/external-node:2.0-v24.6.0" + image: "matterlabs/external-node:2.0-v24.16.0" depends_on: postgres: condition: service_healthy diff --git a/docs/guides/external-node/prepared_configs/mainnet_consensus_config.yaml b/docs/guides/external-node/prepared_configs/mainnet_consensus_config.yaml index 6d61ef3963e..be37aaf2932 100644 --- a/docs/guides/external-node/prepared_configs/mainnet_consensus_config.yaml +++ b/docs/guides/external-node/prepared_configs/mainnet_consensus_config.yaml @@ -8,3 +8,7 @@ gossip_static_outbound: addr: 'external-node-consensus-mainnet.zksync.dev:3054' - key: 'node:public:ed25519:b521e1bb173d04bc83d46b859d1296378e94a40427a6beb9e7fdd17cbd934c11' addr: 'external-node-moby-consensus-mainnet.zksync.dev:3054' + - key: 'node:public:ed25519:45d23515008b5121484eb774507df63ff4ce9f4b65e6a03b7c9ec4e0474d3044' + addr: 'consensus-mainnet-1.zksync-nodes.com:3054' + - key: 'node:public:ed25519:c278bb0831e8d0dcd3aaf0b7af7c3dca048d50b28c578ceecce61a412986b883' + addr: 'consensus-mainnet-2.zksync-nodes.com:3054' diff --git a/docs/guides/external-node/prepared_configs/testnet_consensus_config.yaml b/docs/guides/external-node/prepared_configs/testnet_consensus_config.yaml index 25461b5dfc4..8d2551c0708 100644 --- a/docs/guides/external-node/prepared_configs/testnet_consensus_config.yaml +++ b/docs/guides/external-node/prepared_configs/testnet_consensus_config.yaml @@ -8,3 +8,7 @@ gossip_static_outbound: addr: 'external-node-consensus-sepolia.zksync.dev:3054' - key: 'node:public:ed25519:cfbbebc74127099680584f07a051a2573e2dd7463abdd000d31aaa44a7985045' addr: 'external-node-moby-consensus-sepolia.zksync.dev:3054' + - key: 'node:public:ed25519:f48616db5965ada49dcbd51b1de11068a27c9886c900d3522607f16dff2e66fc' + addr: 'consensus-sepolia-1.zksync-nodes.com:3054' + - key: 'node:public:ed25519:3789d49293792755a9c1c2a7ed9b0e210e92994606dcf76388b5635d7ed676cb' + addr: 'consensus-sepolia-2.zksync-nodes.com:3054' diff --git a/etc/env/base/prover_job_monitor.toml b/etc/env/base/prover_job_monitor.toml new file mode 100644 index 00000000000..40cdf76b8b1 --- /dev/null +++ b/etc/env/base/prover_job_monitor.toml @@ -0,0 +1,15 @@ +[prover_job_monitor] +prometheus_port = 3317 +max_db_connections = 9 +graceful_shutdown_timeout_ms = 5000 +gpu_prover_archiver_run_interval_ms = 86400000 +gpu_prover_archiver_archive_prover_after_ms = 172800000 +prover_jobs_archiver_run_interval_ms = 1800000 +prover_jobs_archiver_archive_jobs_after_ms = 172800000 +proof_compressor_job_requeuer_run_interval_ms = 10000 +prover_job_requeuer_run_interval_ms = 10000 +witness_generator_job_requeuer_run_interval_ms = 10000 +proof_compressor_queue_reporter_run_interval_ms = 10000 +prover_queue_reporter_run_interval_ms = 10000 +witness_generator_queue_reporter_run_interval_ms = 10000 +witness_job_queuer_run_interval_ms = 10000 diff --git a/etc/env/file_based/general.yaml b/etc/env/file_based/general.yaml index 670bfc1cc77..90a509638c6 100644 --- a/etc/env/file_based/general.yaml +++ b/etc/env/file_based/general.yaml @@ -272,6 +272,23 @@ prover_group: aggregation_round: 1 - circuit_id: 18 aggregation_round: 1 +prover_job_monitor: + prometheus_port: 3317 + max_db_connections: 9 + graceful_shutdown_timeout_ms: 5000 + gpu_prover_archiver_run_interval_ms: 86400000 + gpu_prover_archiver_archive_prover_after_ms: 172800000 + prover_jobs_archiver_run_interval_ms: 1800000 + prover_jobs_archiver_archive_jobs_after_ms: 172800000 + proof_compressor_job_requeuer_run_interval_ms: 10000 + prover_job_requeuer_run_interval_ms: 10000 + witness_generator_job_requeuer_run_interval_ms: 10000 + proof_compressor_queue_reporter_run_interval_ms: 10000 + prover_queue_reporter_run_interval_ms: 10000 + witness_generator_queue_reporter_run_interval_ms: 10000 + witness_job_queuer_run_interval_ms: 10000 + + base_token_adjuster: price_polling_interval_ms: 30000 price_cache_update_interval_ms: 2000 diff --git a/etc/nix/container-tee_prover.nix b/etc/nix/container-tee_prover.nix index ab2b12c48db..303c91b137c 100644 --- a/etc/nix/container-tee_prover.nix +++ b/etc/nix/container-tee_prover.nix @@ -41,7 +41,7 @@ nixsgxLib.mkSGXContainer { sgx = { edmm_enable = false; - enclave_size = "32G"; + enclave_size = "8G"; max_threads = 128; }; }; diff --git a/infrastructure/zk/src/test/test.ts b/infrastructure/zk/src/test/test.ts index 2e320205191..9059283af44 100644 --- a/infrastructure/zk/src/test/test.ts +++ b/infrastructure/zk/src/test/test.ts @@ -7,9 +7,25 @@ import * as db from '../database'; export { integration }; -export async function prover() { +export async function prover(options: string[]) { + await db.resetTest({ core: false, prover: true }); process.chdir(process.env.ZKSYNC_HOME! + '/prover'); - await utils.spawn('cargo test --release --workspace --locked'); + + let result = await utils.exec('cargo install --list'); + let test_runner = 'cargo nextest run'; + + if (!result.stdout.includes('cargo-nextest')) { + console.warn( + chalk.bold.red( + `cargo-nextest is missing, please run "cargo install cargo-nextest". Falling back to "cargo test".` + ) + ); + test_runner = 'cargo test'; + } + + let cmd = `${test_runner} --release --locked --${options.join(' ')}`; + console.log(`running prover unit tests with '${cmd}'`); + await utils.spawn(cmd); } export async function rust(options: string[]) { @@ -38,7 +54,13 @@ export async function l1Contracts() { export const command = new Command('test').description('run test suites').addCommand(integration.command); -command.command('prover').description('run unit-tests for the prover').action(prover); +command + .command('prover [command...]') + .allowUnknownOption() + .description('run unit-tests for the prover') + .action(async (args: string[]) => { + await prover(args); + }); command.command('l1-contracts').description('run unit-tests for the layer 1 smart contracts').action(l1Contracts); command .command('rust [command...]') diff --git a/prover/CHANGELOG.md b/prover/CHANGELOG.md index 778edf4a9bc..4df2039589e 100644 --- a/prover/CHANGELOG.md +++ b/prover/CHANGELOG.md @@ -1,5 +1,23 @@ # Changelog +## [16.4.0](https://github.com/matter-labs/zksync-era/compare/prover-v16.3.0...prover-v16.4.0) (2024-08-16) + + +### Features + +* Bump harness & gpu deps ([#2634](https://github.com/matter-labs/zksync-era/issues/2634)) ([2a7d566](https://github.com/matter-labs/zksync-era/commit/2a7d566ffeb63dc0a038d6b38cbda6bef7c7b105)) +* Poll the main node API for attestation status - relaxed (BFT-496) ([#2583](https://github.com/matter-labs/zksync-era/issues/2583)) ([b45aa91](https://github.com/matter-labs/zksync-era/commit/b45aa9168dd66d07ca61c8bb4c01f73dda822040)) +* **vlog:** Report observability config, flush, and shutdown ([#2622](https://github.com/matter-labs/zksync-era/issues/2622)) ([e23e661](https://github.com/matter-labs/zksync-era/commit/e23e6611731835ef3abd34f3f9867f9dc533eb21)) +* **vm:** Extract VM interface to separate crate ([#2638](https://github.com/matter-labs/zksync-era/issues/2638)) ([cb9ac4e](https://github.com/matter-labs/zksync-era/commit/cb9ac4e59fd16e6c125586bc02ef90e3b97ff80b)) +* **vm:** Fast VM integration ([#1949](https://github.com/matter-labs/zksync-era/issues/1949)) ([b752a54](https://github.com/matter-labs/zksync-era/commit/b752a54bebe6eb3bf0bea044996f5116cc5dc4e2)) + + +### Bug Fixes + +* Bump prover dependencies & rust toolchain ([#2600](https://github.com/matter-labs/zksync-era/issues/2600)) ([849c6a5](https://github.com/matter-labs/zksync-era/commit/849c6a5dcd095e8fead0630a2a403f282c26a2aa)) +* **prover:** Fix NWG ([#2590](https://github.com/matter-labs/zksync-era/issues/2590)) ([9b58ae9](https://github.com/matter-labs/zksync-era/commit/9b58ae97875455d58d42fe203cfb1f51cb270f62)) +* **prover:** Updated README.md ([#2604](https://github.com/matter-labs/zksync-era/issues/2604)) ([be9f357](https://github.com/matter-labs/zksync-era/commit/be9f357099ed281892c1ff4618514fc7c25f9b59)) + ## [16.3.0](https://github.com/matter-labs/zksync-era/compare/prover-v16.2.0...prover-v16.3.0) (2024-08-07) diff --git a/prover/Cargo.lock b/prover/Cargo.lock index a7249ca9ffc..8268b121847 100644 --- a/prover/Cargo.lock +++ b/prover/Cargo.lock @@ -193,6 +193,21 @@ version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "96d30a06541fbafbc7f82ed10c06164cfbd2c401138f6addd8404629c4b16711" +[[package]] +name = "assert_cmd" +version = "2.0.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ed72493ac66d5804837f480ab3766c72bdfab91a65e565fc54fa9e42db0073a8" +dependencies = [ + "anstyle", + "bstr", + "doc-comment", + "predicates", + "predicates-core", + "predicates-tree", + "wait-timeout", +] + [[package]] name = "async-stream" version = "0.3.5" @@ -718,9 +733,9 @@ dependencies = [ [[package]] name = "boojum-cuda" -version = "0.2.0" +version = "0.150.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "407123a79308091866f0199d510ee2fb930727204dd77d6805b3437d6cb859eb" +checksum = "c861b4baec895cb8e53b10825407f0844b0eafda2ac79e7f02de95439f0f1e74" dependencies = [ "boojum", "cmake", @@ -754,6 +769,17 @@ dependencies = [ "syn_derive", ] +[[package]] +name = "bstr" +version = "1.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05efc5cfd9110c8416e471df0e96702d58690178e206e61b7173706673c93706" +dependencies = [ + "memchr", + "regex-automata 0.4.6", + "serde", +] + [[package]] name = "bumpalo" version = "3.16.0" @@ -872,11 +898,11 @@ dependencies = [ [[package]] name = "circuit_definitions" -version = "0.150.2" +version = "0.150.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "382960e9ff16705f95157bac88d2b0b556181229019eb57db6c990e3a0fff35f" +checksum = "fffaa17c1585fbf010b9340bb1fd7f4c4eedec2c15cb74a72162fd2d16435d55" dependencies = [ - "circuit_encodings 0.150.2", + "circuit_encodings 0.150.4", "crossbeam 0.8.4", "derivative", "seq-macro", @@ -922,14 +948,14 @@ dependencies = [ [[package]] name = "circuit_encodings" -version = "0.150.2" +version = "0.150.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2ba840a74f8d0b8b1334e93e4c87514a27c9be83d42d9f78d0c577572bb5f435" +checksum = "2593c02ad6b4b31ba63506c3f807f666133dd36bf47422f99b1d2947cf3c8dc1" dependencies = [ "derivative", "serde", - "zk_evm 0.150.0", - "zkevm_circuits 0.150.3", + "zk_evm 0.150.4", + "zkevm_circuits 0.150.4", ] [[package]] @@ -989,12 +1015,12 @@ dependencies = [ [[package]] name = "circuit_sequencer_api" -version = "0.150.2" +version = "0.150.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "79f3177b2bcd4ef5da9d2ca6916f6de31fb1973dfece27907a8dc7c69233494d" +checksum = "42d1a86b9c2207f3bb2dff5f00d1af1cb95004b6d07e9bacb6519fe08f12c04b" dependencies = [ "bellman_ce 0.7.0", - "circuit_encodings 0.150.2", + "circuit_encodings 0.150.4", "derivative", "rayon", "serde", @@ -1598,6 +1624,12 @@ version = "0.1.13" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "56254986775e3233ffa9c4d7d3faaf6d36a2c09d30b20687e9f88bc8bafc16c8" +[[package]] +name = "difflib" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6184e33543162437515c2e2b48714794e37845ec9851711914eec9d308f6ebe8" + [[package]] name = "digest" version = "0.9.0" @@ -1619,6 +1651,12 @@ dependencies = [ "subtle", ] +[[package]] +name = "doc-comment" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fea41bba32d969b513997752735605054bc0dfa92b4c56bf1189f2e174be7a10" + [[package]] name = "dotenvy" version = "0.15.7" @@ -1824,9 +1862,9 @@ checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5" [[package]] name = "era_cudart" -version = "0.2.0" +version = "0.150.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6592e1277ac1ab0f3925151784a3809f4f973b1a63a0244b6d44e3872b413199" +checksum = "4ac97d833b861e32bc0a71d0542bf5c92094f9818c52d65c695227bfa95ffbe3" dependencies = [ "bitflags 2.6.0", "era_cudart_sys", @@ -1835,9 +1873,9 @@ dependencies = [ [[package]] name = "era_cudart_sys" -version = "0.2.0" +version = "0.150.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21767c452b418a7fb2bb9ffb07c744e4616da8d14176db4dcab76649c3206ece" +checksum = "ee6aed60cf09cb6d0b954d74351acb9beb13daab0bacad279691f6b97504b7e6" dependencies = [ "serde_json", ] @@ -4136,6 +4174,33 @@ version = "0.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" +[[package]] +name = "predicates" +version = "3.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7e9086cc7640c29a356d1a29fd134380bee9d8f79a17410aa76e7ad295f42c97" +dependencies = [ + "anstyle", + "difflib", + "predicates-core", +] + +[[package]] +name = "predicates-core" +version = "1.0.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ae8177bee8e75d6846599c6b9ff679ed51e882816914eec639944d7c9aa11931" + +[[package]] +name = "predicates-tree" +version = "1.0.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "41b740d195ed3166cd147c8047ec98db0e22ec019eb8eeb76d343b795304fb13" +dependencies = [ + "predicates-core", + "termtree", +] + [[package]] name = "pretty_assertions" version = "1.4.0" @@ -4421,6 +4486,7 @@ name = "prover_cli" version = "0.1.0" dependencies = [ "anyhow", + "assert_cmd", "bincode", "chrono", "circuit_definitions", @@ -4429,7 +4495,6 @@ dependencies = [ "dialoguer", "hex", "serde_json", - "sqlx", "strum", "tokio", "tracing", @@ -5507,9 +5572,9 @@ checksum = "24188a676b6ae68c3b2cb3a01be17fbf7240ce009799bb56d5b1409051e78fde" [[package]] name = "shivini" -version = "0.150.3" +version = "0.150.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee96349e7395922586c312936b259cb80b3d0a27f227dc3adee480a79d52a4e6" +checksum = "c5e5d862287bb883a4cb0bc4f8ea938ba3fdaa5e495f1a59bc3515231017a0e2" dependencies = [ "bincode", "blake2 0.10.6", @@ -6085,6 +6150,12 @@ dependencies = [ "winapi-util", ] +[[package]] +name = "termtree" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3369f5ac52d5eb6ab48c6b4ffdc8efbcad6b89c765749064ba298f2c68a16a76" + [[package]] name = "test-log" version = "0.2.16" @@ -6781,8 +6852,8 @@ source = "git+https://github.com/matter-labs/vm2.git?rev=9a38900d7af9b1d72b47ce3 dependencies = [ "enum_dispatch", "primitive-types", - "zk_evm_abstractions 0.150.0", - "zkevm_opcode_defs 0.150.0", + "zk_evm_abstractions 0.150.4", + "zkevm_opcode_defs 0.150.4", ] [[package]] @@ -7286,9 +7357,9 @@ dependencies = [ [[package]] name = "zk_evm" -version = "0.150.0" +version = "0.150.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f5bf91304aa14827758afa3def8cf622f9a7f9fb65fe5d5099018dbacf0c5984" +checksum = "e2dbb0ed38d61fbd04bd7575755924d1303e129c04c909abba7f5bfcc6260bcf" dependencies = [ "anyhow", "lazy_static", @@ -7296,7 +7367,7 @@ dependencies = [ "serde", "serde_json", "static_assertions", - "zk_evm_abstractions 0.150.0", + "zk_evm_abstractions 0.150.4", ] [[package]] @@ -7327,22 +7398,22 @@ dependencies = [ [[package]] name = "zk_evm_abstractions" -version = "0.150.0" +version = "0.150.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc313cea4ac9ef6b855264b1425cbe9de30dd8f009559dabcb6b2896122da5db" +checksum = "31460aacfe65b39ac484a2a2e0bbb02baf141f65264bf48e1e4f59ab375fe933" dependencies = [ "anyhow", "num_enum 0.6.1", "serde", "static_assertions", - "zkevm_opcode_defs 0.150.0", + "zkevm_opcode_defs 0.150.4", ] [[package]] name = "zkevm-assembly" -version = "0.150.0" +version = "0.150.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d55e7082c5a313e46e1017d12ea5acfba9f961af3c260ff580490ce02d52067c" +checksum = "7b69d09d125b94767847c4cdc4ae399654b9e2a2f9304bd8935a7033bef4b07c" dependencies = [ "env_logger 0.9.3", "hex", @@ -7355,7 +7426,7 @@ dependencies = [ "smallvec", "structopt", "thiserror", - "zkevm_opcode_defs 0.150.0", + "zkevm_opcode_defs 0.150.4", ] [[package]] @@ -7404,9 +7475,9 @@ dependencies = [ [[package]] name = "zkevm_circuits" -version = "0.150.3" +version = "0.150.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2d64bda28dec766324d2e5095a46fb141540d86a232106760dfb20ab4ae6e5c" +checksum = "abdfaa95dfe0878fda219dd17a6cc8c28711e2067785910c0e06d3ffdca78629" dependencies = [ "arrayvec 0.7.4", "boojum", @@ -7419,7 +7490,7 @@ dependencies = [ "seq-macro", "serde", "smallvec", - "zkevm_opcode_defs 0.150.0", + "zkevm_opcode_defs 0.150.4", ] [[package]] @@ -7466,9 +7537,9 @@ dependencies = [ [[package]] name = "zkevm_opcode_defs" -version = "0.150.0" +version = "0.150.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b3328c012d444bdbfadb754a72c01a56879eb66584efc71eac457e89e7843608" +checksum = "bb7c5c7b4481a646f8696b08cee64a8dec097509a6378d18242f81022f327f1e" dependencies = [ "bitflags 2.6.0", "blake2 0.10.6", @@ -7483,13 +7554,13 @@ dependencies = [ [[package]] name = "zkevm_test_harness" -version = "0.150.2" +version = "0.150.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be67d84d0ac41145a4daed8333feac0936ade29feda6448f46d80ae80285911d" +checksum = "9416dc5fcf7bc403d4c24d37f0e9a492a81926ff0e89a7792dc8a29de69aec1b" dependencies = [ "bincode", "circuit_definitions", - "circuit_sequencer_api 0.150.2", + "circuit_sequencer_api 0.150.4", "codegen", "crossbeam 0.8.4", "derivative", @@ -7510,9 +7581,9 @@ dependencies = [ [[package]] name = "zksync-gpu-ffi" -version = "0.150.0" +version = "0.150.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3143200cfbf1dd8e2e14c2bf2a2b89da8fa5628c7192a4739f13269b9707656e" +checksum = "82fe099f4f4a2cc8ca8ca591d7619ac00b8054f63b712fa6ceee2b84c6e04c62" dependencies = [ "bindgen 0.59.2", "crossbeam 0.8.4", @@ -7524,9 +7595,9 @@ dependencies = [ [[package]] name = "zksync-gpu-prover" -version = "0.150.0" +version = "0.150.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1aeacd406321241ecbcedf9f3025af23511a83e666ecdec2c971935225ea5b98" +checksum = "f73d27e0e4589c7445f5a22e511cb5186e2d205172ca4b26acd7a334b3af9492" dependencies = [ "bit-vec", "cfg-if 1.0.0", @@ -7541,9 +7612,9 @@ dependencies = [ [[package]] name = "zksync-wrapper-prover" -version = "0.150.0" +version = "0.150.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bdf646f359c7275451c218dcf3cd99c06afb0d21da9cc518a1aa5222ee44ee8c" +checksum = "1cf4c09adf0a84af0d7ded1fd85a2487fef4cbf1cfc1925412717d0eef03dd5a" dependencies = [ "circuit_definitions", "zkevm_test_harness", @@ -7570,9 +7641,9 @@ dependencies = [ [[package]] name = "zksync_concurrency" -version = "0.1.0-rc.10" +version = "0.1.0-rc.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a463106f37cfa589896e6a165b5bb0533013377990e19f10e8c4894346a62e8b" +checksum = "b0e31a9fc9a390b440cd12bbe040330dc64f64697a8a8ecbc3beb98cd0747909" dependencies = [ "anyhow", "once_cell", @@ -7606,9 +7677,9 @@ dependencies = [ [[package]] name = "zksync_consensus_crypto" -version = "0.1.0-rc.10" +version = "0.1.0-rc.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f0883af373e9198fd27c0148e7e47b37f912cb4b444bec3f7eed0af0b0dfc69" +checksum = "efb7ff3ec44b7b92fd4e28d9d92b83d61dc74125ccfc90bcfb27a5750d8a8580" dependencies = [ "anyhow", "blst", @@ -7630,9 +7701,9 @@ dependencies = [ [[package]] name = "zksync_consensus_roles" -version = "0.1.0-rc.10" +version = "0.1.0-rc.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e426aa7c68a12dde702c3ec4ef49de24d9054ef908384232b7887e043ca3f2fe" +checksum = "72223c0b20621775db51bcc4b043addafeaf784d444af2ad4bc8bcdee477367c" dependencies = [ "anyhow", "bit-vec", @@ -7652,9 +7723,9 @@ dependencies = [ [[package]] name = "zksync_consensus_storage" -version = "0.1.0-rc.10" +version = "0.1.0-rc.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8388c33fd5bc3725e58c26db2d3016538c6221c6448b3e92cf5df07f6074a028" +checksum = "41d1750ad93f7e3a0c2f5880f9bcc1244a3b46d3e6c124c4f65f545032b87464" dependencies = [ "anyhow", "async-trait", @@ -7672,9 +7743,9 @@ dependencies = [ [[package]] name = "zksync_consensus_utils" -version = "0.1.0-rc.10" +version = "0.1.0-rc.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "612920e56dcb99f227bc23e1254f4dabc7cb4c5cd1a9ec400ceba0ec6fa77c1e" +checksum = "2ff679f8b5f671d887a750b8107f3b5c01fd6085f68eef37ab01de8d2bd0736b" dependencies = [ "anyhow", "rand 0.8.5", @@ -7757,6 +7828,7 @@ dependencies = [ "zksync_system_constants", "zksync_types", "zksync_utils", + "zksync_vm_interface", ] [[package]] @@ -7830,9 +7902,9 @@ dependencies = [ [[package]] name = "zksync_kzg" -version = "0.150.2" +version = "0.150.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b76d0e08b3e0970565f7a9a611278547f4f1dbd6184a250c8c5e743aed61c525" +checksum = "9949f48ea1a9f9a0e73242d4d1e87e681095181827486b3fcc2cf93e5aa03280" dependencies = [ "boojum", "derivative", @@ -7842,7 +7914,7 @@ dependencies = [ "serde", "serde_json", "serde_with", - "zkevm_circuits 0.150.3", + "zkevm_circuits 0.150.4", ] [[package]] @@ -7882,7 +7954,7 @@ dependencies = [ "circuit_sequencer_api 0.140.0", "circuit_sequencer_api 0.141.1", "circuit_sequencer_api 0.142.0", - "circuit_sequencer_api 0.150.2", + "circuit_sequencer_api 0.150.4", "hex", "itertools 0.10.5", "once_cell", @@ -7895,7 +7967,7 @@ dependencies = [ "zk_evm 0.133.0", "zk_evm 0.140.0", "zk_evm 0.141.0", - "zk_evm 0.150.0", + "zk_evm 0.150.4", "zksync_contracts", "zksync_system_constants", "zksync_types", @@ -7954,7 +8026,7 @@ dependencies = [ "anyhow", "async-trait", "bincode", - "circuit_sequencer_api 0.150.2", + "circuit_sequencer_api 0.150.4", "clap 4.5.4", "ctrlc", "futures 0.3.30", @@ -7983,9 +8055,9 @@ dependencies = [ [[package]] name = "zksync_protobuf" -version = "0.1.0-rc.10" +version = "0.1.0-rc.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0d82fd63f27681b9c01f0e01e3060e71b72809db8e21d9130663ee92bd1e391" +checksum = "f4f6ba3bf0aac20de18b4ae18a22d8c81b83f8f72e8fdec1c879525ecdacd2f5" dependencies = [ "anyhow", "bit-vec", @@ -8004,9 +8076,9 @@ dependencies = [ [[package]] name = "zksync_protobuf_build" -version = "0.1.0-rc.10" +version = "0.1.0-rc.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee3c158ab4d211053886371d4a00514bdf8ebdf826d40ee03b98fee2e0d1605e" +checksum = "7798c248b9a64505f0586bd5fadad6b26c999be4a8dec6b1a86b10b3888169c5" dependencies = [ "anyhow", "heck 0.5.0", @@ -8140,7 +8212,7 @@ name = "zksync_prover_interface" version = "0.1.0" dependencies = [ "chrono", - "circuit_sequencer_api 0.150.2", + "circuit_sequencer_api 0.150.4", "serde", "serde_with", "strum", @@ -8149,6 +8221,25 @@ dependencies = [ "zksync_types", ] +[[package]] +name = "zksync_prover_job_monitor" +version = "0.1.0" +dependencies = [ + "anyhow", + "async-trait", + "clap 4.5.4", + "ctrlc", + "tokio", + "tracing", + "vise", + "zksync_config", + "zksync_core_leftovers", + "zksync_prover_dal", + "zksync_types", + "zksync_utils", + "zksync_vlog", +] + [[package]] name = "zksync_queued_job_processor" version = "0.1.0" @@ -8273,7 +8364,6 @@ dependencies = [ "zksync_contracts", "zksync_system_constants", "zksync_types", - "zksync_utils", ] [[package]] diff --git a/prover/Cargo.toml b/prover/Cargo.toml index 8be6f355223..9a1a50a2ddb 100644 --- a/prover/Cargo.toml +++ b/prover/Cargo.toml @@ -56,13 +56,13 @@ tracing-subscriber = { version = "0.3" } vise = "0.2.0" # Proving dependencies -circuit_definitions = "=0.150.2" -circuit_sequencer_api = "=0.150.2" -zkevm_test_harness = "=0.150.2" +circuit_definitions = "=0.150.4" +circuit_sequencer_api = "=0.150.4" +zkevm_test_harness = "=0.150.4" # GPU proving dependencies -wrapper_prover = { package = "zksync-wrapper-prover", version = "=0.150.0" } -shivini = "=0.150.3" +wrapper_prover = { package = "zksync-wrapper-prover", version = "=0.150.4" } +shivini = "=0.150.4" # Core workspace dependencies zksync_multivm = { path = "../core/lib/multivm", version = "0.1.0" } @@ -81,6 +81,7 @@ zksync_utils = { path = "../core/lib/utils" } zksync_eth_client = { path = "../core/lib/eth_client" } zksync_contracts = { path = "../core/lib/contracts" } zksync_core_leftovers = { path = "../core/lib/zksync_core_leftovers" } +zksync_periodic_job = { path = "../core/lib/periodic_job" } # Prover workspace dependencies zksync_prover_dal = { path = "crates/lib/prover_dal" } diff --git a/prover/crates/bin/prover_cli/Cargo.toml b/prover/crates/bin/prover_cli/Cargo.toml index f91cd47e094..e4ccb280574 100644 --- a/prover/crates/bin/prover_cli/Cargo.toml +++ b/prover/crates/bin/prover_cli/Cargo.toml @@ -32,12 +32,14 @@ zksync_dal.workspace = true zksync_utils.workspace = true strum.workspace = true colored.workspace = true -sqlx.workspace = true circuit_definitions.workspace = true serde_json.workspace = true zkevm_test_harness = { workspace = true, optional = true, features = ["verbose_circuits"] } chrono.workspace = true +[dev-dependencies] +assert_cmd = "2" + [features] # enable verbose circuits, if you want to use debug_circuit command (as it is quite heavy dependency). verbose_circuits = ["zkevm_test_harness"] diff --git a/prover/crates/bin/prover_cli/src/cli.rs b/prover/crates/bin/prover_cli/src/cli.rs index 7174830f44d..0c7022cae29 100644 --- a/prover/crates/bin/prover_cli/src/cli.rs +++ b/prover/crates/bin/prover_cli/src/cli.rs @@ -1,19 +1,37 @@ use clap::{command, Args, Parser, Subcommand}; use zksync_types::url::SensitiveUrl; -use crate::commands::{self, config, debug_proof, delete, get_file_info, requeue, restart, stats}; +use crate::commands::{ + config, debug_proof, delete, get_file_info, requeue, restart, stats, status::StatusCommand, +}; pub const VERSION_STRING: &str = env!("CARGO_PKG_VERSION"); #[derive(Parser)] #[command(name = "prover-cli", version = VERSION_STRING, about, long_about = None)] -struct ProverCLI { +pub struct ProverCLI { #[command(subcommand)] command: ProverCommand, #[clap(flatten)] config: ProverCLIConfig, } +impl ProverCLI { + pub async fn start(self) -> anyhow::Result<()> { + match self.command { + ProverCommand::FileInfo(args) => get_file_info::run(args).await?, + ProverCommand::Config(cfg) => config::run(cfg).await?, + ProverCommand::Delete(args) => delete::run(args, self.config).await?, + ProverCommand::Status(cmd) => cmd.run(self.config).await?, + ProverCommand::Requeue(args) => requeue::run(args, self.config).await?, + ProverCommand::Restart(args) => restart::run(args).await?, + ProverCommand::DebugProof(args) => debug_proof::run(args).await?, + ProverCommand::Stats(args) => stats::run(args, self.config).await?, + }; + Ok(()) + } +} + // Note: this is set via the `config` command. Values are taken from the file pointed // by the env var `PLI__CONFIG` or from `$ZKSYNC_HOME/etc/pliconfig` if unset. #[derive(Args)] @@ -26,31 +44,15 @@ pub struct ProverCLIConfig { } #[derive(Subcommand)] -enum ProverCommand { +pub enum ProverCommand { DebugProof(debug_proof::Args), FileInfo(get_file_info::Args), Config(ProverCLIConfig), Delete(delete::Args), #[command(subcommand)] - Status(commands::StatusCommand), + Status(StatusCommand), Requeue(requeue::Args), Restart(restart::Args), #[command(about = "Displays L1 Batch proving stats for a given period")] Stats(stats::Options), } - -pub async fn start() -> anyhow::Result<()> { - let ProverCLI { command, config } = ProverCLI::parse(); - match command { - ProverCommand::FileInfo(args) => get_file_info::run(args).await?, - ProverCommand::Config(cfg) => config::run(cfg).await?, - ProverCommand::Delete(args) => delete::run(args, config).await?, - ProverCommand::Status(cmd) => cmd.run(config).await?, - ProverCommand::Requeue(args) => requeue::run(args, config).await?, - ProverCommand::Restart(args) => restart::run(args).await?, - ProverCommand::DebugProof(args) => debug_proof::run(args).await?, - ProverCommand::Stats(args) => stats::run(args, config).await?, - }; - - Ok(()) -} diff --git a/prover/crates/bin/prover_cli/src/commands/debug_proof.rs b/prover/crates/bin/prover_cli/src/commands/debug_proof.rs index 7875554ae92..26856ed6ca8 100644 --- a/prover/crates/bin/prover_cli/src/commands/debug_proof.rs +++ b/prover/crates/bin/prover_cli/src/commands/debug_proof.rs @@ -1,13 +1,13 @@ use clap::Args as ClapArgs; #[derive(ClapArgs)] -pub(crate) struct Args { +pub struct Args { /// File with the basic proof. #[clap(short, long)] file: String, } -pub(crate) async fn run(_args: Args) -> anyhow::Result<()> { +pub async fn run(_args: Args) -> anyhow::Result<()> { #[cfg(not(feature = "verbose_circuits"))] anyhow::bail!("Please compile with verbose_circuits feature"); #[cfg(feature = "verbose_circuits")] diff --git a/prover/crates/bin/prover_cli/src/commands/delete.rs b/prover/crates/bin/prover_cli/src/commands/delete.rs index 436bb10e10c..da45a909af3 100644 --- a/prover/crates/bin/prover_cli/src/commands/delete.rs +++ b/prover/crates/bin/prover_cli/src/commands/delete.rs @@ -7,7 +7,7 @@ use zksync_types::L1BatchNumber; use crate::cli::ProverCLIConfig; #[derive(ClapArgs)] -pub(crate) struct Args { +pub struct Args { /// Delete data from all batches #[clap( short, @@ -22,7 +22,7 @@ pub(crate) struct Args { batch: L1BatchNumber, } -pub(crate) async fn run(args: Args, config: ProverCLIConfig) -> anyhow::Result<()> { +pub async fn run(args: Args, config: ProverCLIConfig) -> anyhow::Result<()> { let confirmation = Input::::with_theme(&ColorfulTheme::default()) .with_prompt("Are you sure you want to delete the data?") .default("no".to_owned()) diff --git a/prover/crates/bin/prover_cli/src/commands/get_file_info.rs b/prover/crates/bin/prover_cli/src/commands/get_file_info.rs index 63d7f25f615..271cf38c37a 100644 --- a/prover/crates/bin/prover_cli/src/commands/get_file_info.rs +++ b/prover/crates/bin/prover_cli/src/commands/get_file_info.rs @@ -18,7 +18,7 @@ use zksync_prover_fri_types::{ use zksync_prover_interface::outputs::L1BatchProofForL1; #[derive(ClapArgs)] -pub(crate) struct Args { +pub struct Args { #[clap(short, long)] file_path: String, } @@ -73,7 +73,7 @@ fn pretty_print_scheduler_witness( fn pretty_print_circuit_wrapper(circuit: &CircuitWrapper) { println!(" == Circuit =="); match circuit { - CircuitWrapper::Base(circuit) => { + CircuitWrapper::Base(circuit) | CircuitWrapper::BasePartial((circuit, _)) => { println!( "Type: basic. Id: {:?} ({})", circuit.numeric_circuit_type(), diff --git a/prover/crates/bin/prover_cli/src/commands/mod.rs b/prover/crates/bin/prover_cli/src/commands/mod.rs index 4bc8b2eb392..d9dde52284b 100644 --- a/prover/crates/bin/prover_cli/src/commands/mod.rs +++ b/prover/crates/bin/prover_cli/src/commands/mod.rs @@ -1,4 +1,3 @@ -pub(crate) use status::StatusCommand; pub(crate) mod config; pub(crate) mod debug_proof; pub(crate) mod delete; @@ -6,4 +5,4 @@ pub(crate) mod get_file_info; pub(crate) mod requeue; pub(crate) mod restart; pub(crate) mod stats; -pub(crate) mod status; +pub mod status; diff --git a/prover/crates/bin/prover_cli/src/commands/restart.rs b/prover/crates/bin/prover_cli/src/commands/restart.rs index 75beafd7100..24bd76e6335 100644 --- a/prover/crates/bin/prover_cli/src/commands/restart.rs +++ b/prover/crates/bin/prover_cli/src/commands/restart.rs @@ -8,7 +8,7 @@ use zksync_prover_dal::{ use zksync_types::{basic_fri_types::AggregationRound, L1BatchNumber}; #[derive(ClapArgs)] -pub(crate) struct Args { +pub struct Args { /// Batch number to restart #[clap( short, @@ -22,7 +22,7 @@ pub(crate) struct Args { prover_job: Option, } -pub(crate) async fn run(args: Args) -> anyhow::Result<()> { +pub async fn run(args: Args) -> anyhow::Result<()> { let config = DatabaseSecrets::from_env()?; let prover_connection_pool = ConnectionPool::::singleton(config.prover_url()?) .build() diff --git a/prover/crates/bin/prover_cli/src/commands/stats.rs b/prover/crates/bin/prover_cli/src/commands/stats.rs index 307775fa27d..538238f2211 100644 --- a/prover/crates/bin/prover_cli/src/commands/stats.rs +++ b/prover/crates/bin/prover_cli/src/commands/stats.rs @@ -14,7 +14,7 @@ enum StatsPeriod { } #[derive(Args)] -pub(crate) struct Options { +pub struct Options { #[clap( short = 'p', long = "period", @@ -24,7 +24,7 @@ pub(crate) struct Options { period: StatsPeriod, } -pub(crate) async fn run(opts: Options, config: ProverCLIConfig) -> anyhow::Result<()> { +pub async fn run(opts: Options, config: ProverCLIConfig) -> anyhow::Result<()> { let prover_connection_pool = ConnectionPool::::singleton(config.db_url) .build() .await diff --git a/prover/crates/bin/prover_cli/src/commands/status/batch.rs b/prover/crates/bin/prover_cli/src/commands/status/batch.rs index 84a8e7184a6..797695b0227 100644 --- a/prover/crates/bin/prover_cli/src/commands/status/batch.rs +++ b/prover/crates/bin/prover_cli/src/commands/status/batch.rs @@ -4,6 +4,8 @@ use anyhow::Context as _; use circuit_definitions::zkevm_circuits::scheduler::aux::BaseLayerCircuitType; use clap::Args as ClapArgs; use colored::*; +use zksync_config::configs::FriProverConfig; +use zksync_env_config::FromEnv; use zksync_prover_dal::{Connection, ConnectionPool, Prover, ProverDal}; use zksync_types::{ basic_fri_types::AggregationRound, @@ -16,8 +18,11 @@ use zksync_types::{ L1BatchNumber, }; -use super::utils::{BatchData, StageInfo, Status}; -use crate::cli::ProverCLIConfig; +use super::utils::{get_prover_job_status, BatchData, StageInfo, Status}; +use crate::{ + cli::ProverCLIConfig, + commands::status::utils::{get_prover_jobs_status_from_vec, get_witness_generator_job_status}, +}; #[derive(ClapArgs)] pub struct Args { @@ -36,7 +41,7 @@ pub(crate) async fn run(args: Args, config: ProverCLIConfig) -> anyhow::Result<( format!("Batch {} Status", batch_data.batch_number).bold() ); - if let Status::Custom(msg) = batch_data.compressor.witness_generator_jobs_status() { + if let Status::Custom(msg) = batch_data.compressor.witness_generator_jobs_status(10) { if msg.contains("Sent to server") { println!("> Proof sent to server ✅"); continue; @@ -45,7 +50,7 @@ pub(crate) async fn run(args: Args, config: ProverCLIConfig) -> anyhow::Result<( let basic_witness_generator_status = batch_data .basic_witness_generator - .witness_generator_jobs_status(); + .witness_generator_jobs_status(10); if matches!(basic_witness_generator_status, Status::JobsNotFound) { println!("> No batch found. 🚫"); continue; @@ -205,25 +210,21 @@ fn display_batch_status(batch_data: BatchData) { } fn display_status_for_stage(stage_info: StageInfo) { + let max_attempts = FriProverConfig::from_env() + .expect("Fail to read prover config.") + .max_attempts; display_aggregation_round(&stage_info); - match stage_info.witness_generator_jobs_status() { + let status = stage_info.witness_generator_jobs_status(max_attempts); + match status { Status::Custom(msg) => { println!("{}: {} \n", stage_info.to_string().bold(), msg); } Status::Queued | Status::WaitingForProofs | Status::Stuck | Status::JobsNotFound => { - println!( - "{}: {}", - stage_info.to_string().bold(), - stage_info.witness_generator_jobs_status() - ) + println!("{}: {}", stage_info.to_string().bold(), status) } Status::InProgress | Status::Successful => { - println!( - "{}: {}", - stage_info.to_string().bold(), - stage_info.witness_generator_jobs_status() - ); - if let Some(job_status) = stage_info.prover_jobs_status() { + println!("{}: {}", stage_info.to_string().bold(), status); + if let Some(job_status) = stage_info.prover_jobs_status(max_attempts) { println!("> {}: {}", "Prover Jobs".to_owned().bold(), job_status); } } @@ -240,53 +241,51 @@ fn display_batch_info(batch_data: BatchData) { } fn display_info_for_stage(stage_info: StageInfo) { + let max_attempts = FriProverConfig::from_env() + .expect("Fail to read prover config.") + .max_attempts; display_aggregation_round(&stage_info); - match stage_info.witness_generator_jobs_status() { + let status = stage_info.witness_generator_jobs_status(max_attempts); + match status { Status::Custom(msg) => { println!("{}: {}", stage_info.to_string().bold(), msg); } - Status::Queued | Status::WaitingForProofs | Status::Stuck | Status::JobsNotFound => { - println!( - " > {}: {}", - stage_info.to_string().bold(), - stage_info.witness_generator_jobs_status() - ) + Status::Queued | Status::WaitingForProofs | Status::JobsNotFound => { + println!(" > {}: {}", stage_info.to_string().bold(), status) } - Status::InProgress => { - println!( - "v {}: {}", - stage_info.to_string().bold(), - stage_info.witness_generator_jobs_status() - ); + Status::InProgress | Status::Stuck => { + println!("v {}: {}", stage_info.to_string().bold(), status); match stage_info { StageInfo::BasicWitnessGenerator { prover_jobs_info, .. } => { - display_prover_jobs_info(prover_jobs_info); + display_prover_jobs_info(prover_jobs_info, max_attempts); } StageInfo::LeafWitnessGenerator { witness_generator_jobs_info, prover_jobs_info, } => { - display_leaf_witness_generator_jobs_info(witness_generator_jobs_info); - display_prover_jobs_info(prover_jobs_info); + display_leaf_witness_generator_jobs_info( + witness_generator_jobs_info, + max_attempts, + ); + display_prover_jobs_info(prover_jobs_info, max_attempts); } StageInfo::NodeWitnessGenerator { witness_generator_jobs_info, prover_jobs_info, } => { - display_node_witness_generator_jobs_info(witness_generator_jobs_info); - display_prover_jobs_info(prover_jobs_info); + display_node_witness_generator_jobs_info( + witness_generator_jobs_info, + max_attempts, + ); + display_prover_jobs_info(prover_jobs_info, max_attempts); } _ => (), } } Status::Successful => { - println!( - "> {}: {}", - stage_info.to_string().bold(), - stage_info.witness_generator_jobs_status() - ); + println!("> {}: {}", stage_info.to_string().bold(), status); match stage_info { StageInfo::BasicWitnessGenerator { prover_jobs_info, .. @@ -296,7 +295,7 @@ fn display_info_for_stage(stage_info: StageInfo) { } | StageInfo::NodeWitnessGenerator { prover_jobs_info, .. - } => display_prover_jobs_info(prover_jobs_info), + } => display_prover_jobs_info(prover_jobs_info, max_attempts), _ => (), } } @@ -304,11 +303,12 @@ fn display_info_for_stage(stage_info: StageInfo) { } fn display_leaf_witness_generator_jobs_info( - mut leaf_witness_generators_jobs_info: Vec, + mut jobs_info: Vec, + max_attempts: u32, ) { - leaf_witness_generators_jobs_info.sort_by_key(|job| job.circuit_id); + jobs_info.sort_by_key(|job| job.circuit_id); - leaf_witness_generators_jobs_info.iter().for_each(|job| { + jobs_info.iter().for_each(|job| { println!( " > {}: {}", format!( @@ -316,17 +316,18 @@ fn display_leaf_witness_generator_jobs_info( BaseLayerCircuitType::from_numeric_value(job.circuit_id as u8) ) .bold(), - Status::from(job.status.clone()) + get_witness_generator_job_status(job, max_attempts) ) }); } fn display_node_witness_generator_jobs_info( - mut node_witness_generators_jobs_info: Vec, + mut jobs_info: Vec, + max_attempts: u32, ) { - node_witness_generators_jobs_info.sort_by_key(|job| job.circuit_id); + jobs_info.sort_by_key(|job| job.circuit_id); - node_witness_generators_jobs_info.iter().for_each(|job| { + jobs_info.iter().for_each(|job| { println!( " > {}: {}", format!( @@ -334,17 +335,18 @@ fn display_node_witness_generator_jobs_info( BaseLayerCircuitType::from_numeric_value(job.circuit_id as u8) ) .bold(), - Status::from(job.status.clone()) + get_witness_generator_job_status(job, max_attempts) ) }); } -fn display_prover_jobs_info(prover_jobs_info: Vec) { - let prover_jobs_status = Status::from(prover_jobs_info.clone()); +fn display_prover_jobs_info(prover_jobs_info: Vec, max_attempts: u32) { + let prover_jobs_status = get_prover_jobs_status_from_vec(&prover_jobs_info, max_attempts); - if matches!(prover_jobs_status, Status::Successful) - || matches!(prover_jobs_status, Status::JobsNotFound) - { + if matches!( + prover_jobs_status, + Status::Successful | Status::JobsNotFound + ) { println!( "> {}: {prover_jobs_status}", "Prover Jobs".to_owned().bold() @@ -366,7 +368,7 @@ fn display_prover_jobs_info(prover_jobs_info: Vec) { }); for (circuit_id, prover_jobs_info) in jobs_by_circuit_id { - let status = Status::from(prover_jobs_info.clone()); + let status = get_prover_jobs_status_from_vec(&prover_jobs_info, max_attempts); println!( " > {}: {}", format!( @@ -376,8 +378,10 @@ fn display_prover_jobs_info(prover_jobs_info: Vec) { .bold(), status ); - if matches!(status, Status::InProgress) { - display_job_status_count(prover_jobs_info); + match status { + Status::InProgress => display_job_status_count(prover_jobs_info), + Status::Stuck => display_stuck_jobs(prover_jobs_info, max_attempts), + _ => (), } } } @@ -400,6 +404,20 @@ fn display_job_status_count(jobs: Vec) { println!(" - Failed: {}", jobs_counts.failed); } +fn display_stuck_jobs(jobs: Vec, max_attempts: u32) { + jobs.iter().for_each(|job| { + if matches!( + get_prover_job_status(job.clone(), max_attempts), + Status::Stuck + ) { + println!( + " - Prover Job: {} stuck after {} attempts", + job.id, job.attempts + ); + } + }) +} + fn display_aggregation_round(stage_info: &StageInfo) { if let Some(aggregation_round) = stage_info.aggregation_round() { println!( diff --git a/prover/crates/bin/prover_cli/src/commands/status/mod.rs b/prover/crates/bin/prover_cli/src/commands/status/mod.rs index b6df8680151..574d7f7be23 100644 --- a/prover/crates/bin/prover_cli/src/commands/status/mod.rs +++ b/prover/crates/bin/prover_cli/src/commands/status/mod.rs @@ -4,7 +4,7 @@ use crate::cli::ProverCLIConfig; pub(crate) mod batch; pub(crate) mod l1; -mod utils; +pub mod utils; #[derive(Subcommand)] pub enum StatusCommand { diff --git a/prover/crates/bin/prover_cli/src/commands/status/utils.rs b/prover/crates/bin/prover_cli/src/commands/status/utils.rs index 31726e74920..eee5c08b96f 100644 --- a/prover/crates/bin/prover_cli/src/commands/status/utils.rs +++ b/prover/crates/bin/prover_cli/src/commands/status/utils.rs @@ -6,7 +6,8 @@ use zksync_types::{ prover_dal::{ BasicWitnessGeneratorJobInfo, LeafWitnessGeneratorJobInfo, NodeWitnessGeneratorJobInfo, ProofCompressionJobInfo, ProofCompressionJobStatus, ProverJobFriInfo, ProverJobStatus, - RecursionTipWitnessGeneratorJobInfo, SchedulerWitnessGeneratorJobInfo, WitnessJobStatus, + RecursionTipWitnessGeneratorJobInfo, SchedulerWitnessGeneratorJobInfo, Stallable, + WitnessJobStatus, }, L1BatchNumber, }; @@ -55,6 +56,20 @@ pub enum Status { JobsNotFound, } +impl From for Status { + fn from(status: ProverJobStatus) -> Self { + match status { + ProverJobStatus::Queued => Status::Queued, + ProverJobStatus::InProgress(_) => Status::InProgress, + ProverJobStatus::Successful(_) => Status::Successful, + ProverJobStatus::Failed(_) => Status::Custom("Failed".to_owned()), + ProverJobStatus::Skipped => Status::Custom("Skipped ⏩".to_owned()), + ProverJobStatus::Ignored => Status::Custom("Ignored".to_owned()), + ProverJobStatus::InGPUProof => Status::Custom("In GPU Proof".to_owned()), + } + } +} + impl From for Status { fn from(status: WitnessJobStatus) -> Self { match status { @@ -151,31 +166,6 @@ impl From for Status { } } -impl From> for Status { - fn from(jobs_vector: Vec) -> Self { - if jobs_vector.is_empty() { - Status::JobsNotFound - } else if jobs_vector - .iter() - .all(|job| matches!(job.status, ProverJobStatus::InGPUProof)) - { - Status::Custom("In GPU Proof ⚡️".to_owned()) - } else if jobs_vector - .iter() - .all(|job| matches!(job.status, ProverJobStatus::Queued)) - { - Status::Queued - } else if jobs_vector - .iter() - .all(|job| matches!(job.status, ProverJobStatus::Successful(_))) - { - Status::Successful - } else { - Status::InProgress - } - } -} - #[allow(clippy::large_enum_variant)] #[derive(EnumString, Clone, Display)] pub enum StageInfo { @@ -214,7 +204,7 @@ impl StageInfo { } } - pub fn prover_jobs_status(&self) -> Option { + pub fn prover_jobs_status(&self, max_attempts: u32) -> Option { match self.clone() { StageInfo::BasicWitnessGenerator { prover_jobs_info, .. @@ -224,38 +214,144 @@ impl StageInfo { } | StageInfo::NodeWitnessGenerator { prover_jobs_info, .. - } => Some(Status::from(prover_jobs_info)), + } => Some(get_prover_jobs_status_from_vec( + &prover_jobs_info, + max_attempts, + )), StageInfo::RecursionTipWitnessGenerator(_) | StageInfo::SchedulerWitnessGenerator(_) | StageInfo::Compressor(_) => None, } } - pub fn witness_generator_jobs_status(&self) -> Status { + pub fn witness_generator_jobs_status(&self, max_attempts: u32) -> Status { match self.clone() { StageInfo::BasicWitnessGenerator { witness_generator_job_info, .. } => witness_generator_job_info - .map(|witness_generator_job_info| Status::from(witness_generator_job_info.status)) + .map(|witness_generator_job_info| { + get_witness_generator_job_status(&witness_generator_job_info, max_attempts) + }) .unwrap_or_default(), StageInfo::LeafWitnessGenerator { witness_generator_jobs_info, .. - } => Status::from(witness_generator_jobs_info), + } => { + get_witness_generator_job_status_from_vec(witness_generator_jobs_info, max_attempts) + } StageInfo::NodeWitnessGenerator { witness_generator_jobs_info, .. - } => Status::from(witness_generator_jobs_info), - StageInfo::RecursionTipWitnessGenerator(status) => status - .map(|job| Status::from(job.status)) - .unwrap_or_default(), - StageInfo::SchedulerWitnessGenerator(status) => status - .map(|job| Status::from(job.status)) - .unwrap_or_default(), + } => { + get_witness_generator_job_status_from_vec(witness_generator_jobs_info, max_attempts) + } + StageInfo::RecursionTipWitnessGenerator(witness_generator_job_info) => { + witness_generator_job_info + .map(|witness_generator_job_info| { + get_witness_generator_job_status(&witness_generator_job_info, max_attempts) + }) + .unwrap_or_default() + } + StageInfo::SchedulerWitnessGenerator(witness_generator_job_info) => { + witness_generator_job_info + .map(|witness_generator_job_info| { + get_witness_generator_job_status(&witness_generator_job_info, max_attempts) + }) + .unwrap_or_default() + } StageInfo::Compressor(status) => status .map(|job| Status::from(job.status)) .unwrap_or_default(), } } } + +pub fn get_witness_generator_job_status(data: &impl Stallable, max_attempts: u32) -> Status { + let status = data.get_status(); + if matches!( + status, + WitnessJobStatus::Failed(_) | WitnessJobStatus::InProgress, + ) && data.get_attempts() >= max_attempts + { + return Status::Stuck; + } + Status::from(status) +} + +pub fn get_witness_generator_job_status_from_vec( + prover_jobs: Vec, + max_attempts: u32, +) -> Status { + if prover_jobs.is_empty() { + Status::JobsNotFound + } else if prover_jobs + .iter() + .all(|job| matches!(job.get_status(), WitnessJobStatus::WaitingForProofs)) + { + Status::WaitingForProofs + } else if prover_jobs.iter().any(|job| { + matches!( + job.get_status(), + WitnessJobStatus::Failed(_) | WitnessJobStatus::InProgress, + ) && job.get_attempts() >= max_attempts + }) { + Status::Stuck + } else if prover_jobs.iter().all(|job| { + matches!(job.get_status(), WitnessJobStatus::Queued) + || matches!(job.get_status(), WitnessJobStatus::WaitingForProofs) + }) { + Status::Queued + } else if prover_jobs + .iter() + .all(|job| matches!(job.get_status(), WitnessJobStatus::Successful(_))) + { + Status::Successful + } else { + Status::InProgress + } +} + +pub fn get_prover_job_status(prover_jobs: ProverJobFriInfo, max_attempts: u32) -> Status { + if matches!( + prover_jobs.status, + ProverJobStatus::Failed(_) | ProverJobStatus::InProgress(_), + ) && prover_jobs.attempts as u32 >= max_attempts + { + return Status::Stuck; + } + Status::from(prover_jobs.status) +} + +pub fn get_prover_jobs_status_from_vec( + prover_jobs: &[ProverJobFriInfo], + max_attempts: u32, +) -> Status { + if prover_jobs.is_empty() { + Status::JobsNotFound + } else if prover_jobs.iter().any(|job| { + matches!( + job.status, + ProverJobStatus::Failed(_) | ProverJobStatus::InProgress(_), + ) && job.attempts as u32 >= max_attempts + }) { + Status::Stuck + } else if prover_jobs + .iter() + .all(|job| matches!(job.status, ProverJobStatus::InGPUProof)) + { + Status::Custom("In GPU Proof ⚡️".to_owned()) + } else if prover_jobs + .iter() + .all(|job| matches!(job.status, ProverJobStatus::Queued)) + { + Status::Queued + } else if prover_jobs + .iter() + .all(|job| matches!(job.status, ProverJobStatus::Successful(_))) + { + Status::Successful + } else { + Status::InProgress + } +} diff --git a/prover/crates/bin/prover_cli/src/main.rs b/prover/crates/bin/prover_cli/src/main.rs index b393fad6a31..c334b2b2e1f 100644 --- a/prover/crates/bin/prover_cli/src/main.rs +++ b/prover/crates/bin/prover_cli/src/main.rs @@ -1,4 +1,5 @@ -use prover_cli::{cli, config}; +use clap::Parser; +use prover_cli::{cli::ProverCLI, config}; #[tokio::main] async fn main() { @@ -14,7 +15,9 @@ async fn main() { }) .unwrap(); - match cli::start().await { + let prover = ProverCLI::parse(); + + match prover.start().await { Ok(_) => {} Err(err) => { tracing::error!("{err:?}"); diff --git a/prover/crates/bin/prover_cli/tests/batch.rs b/prover/crates/bin/prover_cli/tests/batch.rs new file mode 100644 index 00000000000..bfd944ec29b --- /dev/null +++ b/prover/crates/bin/prover_cli/tests/batch.rs @@ -0,0 +1,1494 @@ +use assert_cmd::Command; +use circuit_definitions::zkevm_circuits::scheduler::aux::BaseLayerCircuitType; +use prover_cli::commands::status::utils::Status; +use zksync_prover_dal::{ + fri_witness_generator_dal::FriWitnessJobStatus, Connection, ConnectionPool, Prover, ProverDal, +}; +use zksync_types::{ + basic_fri_types::AggregationRound, + protocol_version::{L1VerifierConfig, ProtocolSemanticVersion}, + prover_dal::{ + ProofCompressionJobStatus, ProverJobStatus, ProverJobStatusFailed, + ProverJobStatusInProgress, ProverJobStatusSuccessful, WitnessJobStatus, + WitnessJobStatusSuccessful, + }, + L1BatchNumber, +}; + +const NON_EXISTING_BATCH_STATUS_STDOUT: &str = "== Batch 10000 Status == +> No batch found. 🚫 +"; + +const MULTIPLE_NON_EXISTING_BATCHES_STATUS_STDOUT: &str = "== Batch 10000 Status == +> No batch found. 🚫 +== Batch 10001 Status == +> No batch found. 🚫 +"; + +const COMPLETE_BATCH_STATUS_STDOUT: &str = "== Batch 0 Status == +> Proof sent to server ✅ +"; + +#[test] +#[doc = "prover_cli status"] +fn pli_status_empty_fails() { + Command::cargo_bin("prover_cli") + .unwrap() + .arg("status") + .assert() + .failure(); +} + +#[test] +#[doc = "prover_cli status --help"] +fn pli_status_help_succeeds() { + Command::cargo_bin("prover_cli") + .unwrap() + .arg("status") + .arg("help") + .assert() + .success(); +} + +#[test] +#[doc = "prover_cli status batch"] +fn pli_status_batch_empty_fails() { + Command::cargo_bin("prover_cli") + .unwrap() + .arg("status") + .arg("batch") + .assert() + .failure(); +} + +#[test] +#[doc = "prover_cli status batch --help"] +fn pli_status_batch_help_succeeds() { + Command::cargo_bin("prover_cli") + .unwrap() + .arg("status") + .arg("batch") + .arg("--help") + .assert() + .success(); +} + +#[tokio::test] +#[doc = "prover_cli status batch -n 10000"] +async fn pli_status_of_non_existing_batch_succeeds() { + let connection_pool = ConnectionPool::::prover_test_pool().await; + let mut connection = connection_pool.connection().await.unwrap(); + + connection + .fri_protocol_versions_dal() + .save_prover_protocol_version( + ProtocolSemanticVersion::default(), + L1VerifierConfig::default(), + ) + .await; + + Command::cargo_bin("prover_cli") + .unwrap() + .arg(connection_pool.database_url().expose_str()) + .arg("status") + .arg("batch") + .args(["-n", "10000"]) + .assert() + .success() + .stdout(NON_EXISTING_BATCH_STATUS_STDOUT); +} + +#[tokio::test] +#[doc = "prover_cli status batch -n 10000 10001"] +async fn pli_status_of_multiple_non_existing_batch_succeeds() { + let connection_pool = ConnectionPool::::prover_test_pool().await; + let mut connection = connection_pool.connection().await.unwrap(); + + connection + .fri_protocol_versions_dal() + .save_prover_protocol_version( + ProtocolSemanticVersion::default(), + L1VerifierConfig::default(), + ) + .await; + + Command::cargo_bin("prover_cli") + .unwrap() + .arg(connection_pool.database_url().expose_str()) + .arg("status") + .arg("batch") + .args(["-n", "10000", "10001"]) + .assert() + .success() + .stdout(MULTIPLE_NON_EXISTING_BATCHES_STATUS_STDOUT); +} + +fn status_batch_0_expects(db_url: &str, expected_output: String) { + Command::cargo_bin("prover_cli") + .unwrap() + .arg(db_url) + .arg("status") + .arg("batch") + .args(["-n", "0"]) + .assert() + .success() + .stdout(expected_output); +} + +fn status_verbose_batch_0_expects(db_url: &str, expected_output: String) { + Command::cargo_bin("prover_cli") + .unwrap() + .arg(db_url) + .arg("status") + .arg("batch") + .args(["-n", "0", "--verbose"]) + .assert() + .success() + .stdout(expected_output); +} + +async fn insert_prover_job( + status: ProverJobStatus, + circuit_id: BaseLayerCircuitType, + aggregation_round: AggregationRound, + batch_number: L1BatchNumber, + sequence_number: usize, + connection: &mut Connection<'_, Prover>, +) { + connection + .fri_prover_jobs_dal() + .insert_prover_job( + batch_number, + circuit_id as u8, + 0, + sequence_number, + aggregation_round, + "", + false, + ProtocolSemanticVersion::default(), + ) + .await; + connection + .cli_test_dal() + .update_prover_job( + status, + circuit_id as u8, + aggregation_round as i64, + batch_number, + sequence_number, + ) + .await; +} + +async fn update_attempts_prover_job( + status: ProverJobStatus, + attempts: u8, + circuit_id: BaseLayerCircuitType, + aggregation_round: AggregationRound, + batch_number: L1BatchNumber, + sequence_number: usize, + connection: &mut Connection<'_, Prover>, +) { + connection + .cli_test_dal() + .update_attempts_prover_job( + status, + attempts, + circuit_id as u8, + aggregation_round as i64, + batch_number, + sequence_number, + ) + .await; +} + +async fn update_attempts_lwg( + status: ProverJobStatus, + attempts: u8, + circuit_id: BaseLayerCircuitType, + batch_number: L1BatchNumber, + connection: &mut Connection<'_, Prover>, +) { + connection + .cli_test_dal() + .update_attempts_lwg(status, attempts, circuit_id as u8, batch_number) + .await; +} + +async fn insert_bwg_job( + status: FriWitnessJobStatus, + batch_number: L1BatchNumber, + connection: &mut Connection<'_, Prover>, +) { + connection + .fri_witness_generator_dal() + .save_witness_inputs(batch_number, "", ProtocolSemanticVersion::default()) + .await; + connection + .fri_witness_generator_dal() + .mark_witness_job(status, batch_number) + .await; +} + +async fn insert_lwg_job( + status: WitnessJobStatus, + batch_number: L1BatchNumber, + circuit_id: BaseLayerCircuitType, + connection: &mut Connection<'_, Prover>, +) { + connection + .cli_test_dal() + .insert_lwg_job(status, batch_number, circuit_id as u8) + .await; +} + +async fn insert_nwg_job( + status: WitnessJobStatus, + batch_number: L1BatchNumber, + circuit_id: BaseLayerCircuitType, + connection: &mut Connection<'_, Prover>, +) { + connection + .cli_test_dal() + .insert_nwg_job(status, batch_number, circuit_id as u8) + .await; +} + +async fn insert_rt_job( + status: WitnessJobStatus, + batch_number: L1BatchNumber, + connection: &mut Connection<'_, Prover>, +) { + connection + .cli_test_dal() + .insert_rt_job(status, batch_number) + .await; +} + +async fn insert_scheduler_job( + status: WitnessJobStatus, + batch_number: L1BatchNumber, + connection: &mut Connection<'_, Prover>, +) { + connection + .cli_test_dal() + .insert_scheduler_job(status, batch_number) + .await; +} + +async fn insert_compressor_job( + status: ProofCompressionJobStatus, + batch_number: L1BatchNumber, + connection: &mut Connection<'_, Prover>, +) { + connection + .cli_test_dal() + .insert_compressor_job(status, batch_number) + .await; +} + +#[derive(Default)] +struct Scenario { + bwg_status: Option, + agg_0_prover_jobs_status: Option>, + lwg_status: Option>, + agg_1_prover_jobs_status: Option>, + nwg_status: Option>, + agg_2_prover_jobs_status: Option>, + rt_status: Option, + scheduler_status: Option, + compressor_status: Option, + batch_number: L1BatchNumber, +} + +impl Scenario { + fn new(batch_number: L1BatchNumber) -> Scenario { + Scenario { + batch_number, + ..Default::default() + } + } + fn add_bwg(mut self, status: FriWitnessJobStatus) -> Self { + self.bwg_status = Some(status); + self + } + + fn add_agg_0_prover_job( + mut self, + job_status: ProverJobStatus, + circuit_type: BaseLayerCircuitType, + sequence_number: usize, + ) -> Self { + if let Some(ref mut vec) = self.agg_0_prover_jobs_status { + vec.push((job_status, circuit_type, sequence_number)); + } else { + self.agg_0_prover_jobs_status = Some(vec![(job_status, circuit_type, sequence_number)]); + } + self + } + + fn add_lwg(mut self, job_status: WitnessJobStatus, circuit_type: BaseLayerCircuitType) -> Self { + if let Some(ref mut vec) = self.lwg_status { + vec.push((job_status, circuit_type)); + } else { + self.lwg_status = Some(vec![(job_status, circuit_type)]); + } + self + } + + fn add_agg_1_prover_job( + mut self, + job_status: ProverJobStatus, + circuit_type: BaseLayerCircuitType, + sequence_number: usize, + ) -> Self { + if let Some(ref mut vec) = self.agg_1_prover_jobs_status { + vec.push((job_status, circuit_type, sequence_number)); + } else { + self.agg_1_prover_jobs_status = Some(vec![(job_status, circuit_type, sequence_number)]); + } + self + } + + fn add_nwg(mut self, job_status: WitnessJobStatus, circuit_type: BaseLayerCircuitType) -> Self { + if let Some(ref mut vec) = self.nwg_status { + vec.push((job_status, circuit_type)); + } else { + self.nwg_status = Some(vec![(job_status, circuit_type)]); + } + self + } + + fn add_agg_2_prover_job( + mut self, + job_status: ProverJobStatus, + circuit_type: BaseLayerCircuitType, + sequence_number: usize, + ) -> Self { + if let Some(ref mut vec) = self.agg_2_prover_jobs_status { + vec.push((job_status, circuit_type, sequence_number)); + } else { + self.agg_2_prover_jobs_status = Some(vec![(job_status, circuit_type, sequence_number)]); + } + self + } + + fn add_rt(mut self, status: WitnessJobStatus) -> Self { + self.rt_status = Some(status); + self + } + + fn add_scheduler(mut self, status: WitnessJobStatus) -> Self { + self.scheduler_status = Some(status); + self + } + + fn add_compressor(mut self, status: ProofCompressionJobStatus) -> Self { + self.compressor_status = Some(status); + self + } +} + +#[allow(clippy::too_many_arguments)] +async fn load_scenario(scenario: Scenario, connection: &mut Connection<'_, Prover>) { + if let Some(status) = scenario.bwg_status { + insert_bwg_job(status, scenario.batch_number, connection).await; + } + if let Some(jobs) = scenario.agg_0_prover_jobs_status { + for (status, circuit_id, sequence_number) in jobs.into_iter() { + insert_prover_job( + status, + circuit_id, + AggregationRound::BasicCircuits, + scenario.batch_number, + sequence_number, + connection, + ) + .await; + } + } + if let Some(jobs) = scenario.lwg_status { + for (status, circuit_id) in jobs.into_iter() { + insert_lwg_job(status, scenario.batch_number, circuit_id, connection).await; + } + } + if let Some(jobs) = scenario.agg_1_prover_jobs_status { + for (status, circuit_id, sequence_number) in jobs.into_iter() { + insert_prover_job( + status, + circuit_id, + AggregationRound::LeafAggregation, + scenario.batch_number, + sequence_number, + connection, + ) + .await; + } + } + if let Some(jobs) = scenario.nwg_status { + for (status, circuit_id) in jobs.into_iter() { + insert_nwg_job(status, scenario.batch_number, circuit_id, connection).await; + } + } + if let Some(jobs) = scenario.agg_2_prover_jobs_status { + for (status, circuit_id, sequence_number) in jobs.into_iter() { + insert_prover_job( + status, + circuit_id, + AggregationRound::NodeAggregation, + scenario.batch_number, + sequence_number, + connection, + ) + .await; + } + } + if let Some(status) = scenario.rt_status { + insert_rt_job(status, scenario.batch_number, connection).await; + } + if let Some(status) = scenario.scheduler_status { + insert_scheduler_job(status, scenario.batch_number, connection).await; + } + if let Some(status) = scenario.compressor_status { + insert_compressor_job(status, scenario.batch_number, connection).await; + } +} + +#[allow(clippy::too_many_arguments)] +fn scenario_expected_stdout( + bwg_status: Status, + agg_0_prover_jobs_status: Option, + lwg_status: Status, + agg_1_prover_jobs_status: Option, + nwg_status: Status, + agg_2_prover_jobs_status: Option, + rt_status: Status, + scheduler_status: Status, + compressor_status: Status, + batch_number: L1BatchNumber, +) -> String { + let agg_0_prover_jobs_status = match agg_0_prover_jobs_status { + Some(status) => format!("\n> Prover Jobs: {}", status), + None => String::new(), + }; + let agg_1_prover_jobs_status = match agg_1_prover_jobs_status { + Some(status) => format!("\n> Prover Jobs: {}", status), + None => String::new(), + }; + let agg_2_prover_jobs_status = match agg_2_prover_jobs_status { + Some(status) => format!("\n> Prover Jobs: {}", status), + None => String::new(), + }; + + format!( + "== Batch {} Status == + +-- Aggregation Round 0 -- +Basic Witness Generator: {}{} + +-- Aggregation Round 1 -- +Leaf Witness Generator: {}{} + +-- Aggregation Round 2 -- +Node Witness Generator: {}{} + +-- Aggregation Round 3 -- +Recursion Tip: {} + +-- Aggregation Round 4 -- +Scheduler: {} + +-- Proof Compression -- +Compressor: {} +", + batch_number.0, + bwg_status, + agg_0_prover_jobs_status, + lwg_status, + agg_1_prover_jobs_status, + nwg_status, + agg_2_prover_jobs_status, + rt_status, + scheduler_status, + compressor_status + ) +} + +#[tokio::test] +async fn pli_status_complete() { + let connection_pool = ConnectionPool::::prover_test_pool().await; + let mut connection = connection_pool.connection().await.unwrap(); + + connection + .fri_protocol_versions_dal() + .save_prover_protocol_version( + ProtocolSemanticVersion::default(), + L1VerifierConfig::default(), + ) + .await; + + let batch_0 = L1BatchNumber(0); + + // A BWG is created for batch 0. + let scenario = Scenario::new(batch_0).add_bwg(FriWitnessJobStatus::Queued); + + load_scenario(scenario, &mut connection).await; + + status_batch_0_expects( + connection_pool.database_url().expose_str(), + scenario_expected_stdout( + Status::Queued, + None, + Status::JobsNotFound, + None, + Status::JobsNotFound, + None, + Status::JobsNotFound, + Status::JobsNotFound, + Status::JobsNotFound, + batch_0, + ), + ); + + // The BWS start, agg_round 0 prover jobs created. All WG set in wating for proofs. + let scenario = Scenario::new(batch_0) + .add_bwg(FriWitnessJobStatus::InProgress) + .add_agg_0_prover_job(ProverJobStatus::Queued, BaseLayerCircuitType::VM, 1) + .add_agg_0_prover_job(ProverJobStatus::Queued, BaseLayerCircuitType::VM, 2) + .add_agg_0_prover_job( + ProverJobStatus::Queued, + BaseLayerCircuitType::DecommitmentsFilter, + 1, + ) + .add_lwg(WitnessJobStatus::WaitingForProofs, BaseLayerCircuitType::VM) + .add_lwg( + WitnessJobStatus::WaitingForProofs, + BaseLayerCircuitType::DecommitmentsFilter, + ) + .add_nwg(WitnessJobStatus::WaitingForProofs, BaseLayerCircuitType::VM) + .add_nwg( + WitnessJobStatus::WaitingForProofs, + BaseLayerCircuitType::DecommitmentsFilter, + ) + .add_rt(WitnessJobStatus::WaitingForProofs) + .add_scheduler(WitnessJobStatus::WaitingForProofs); + load_scenario(scenario, &mut connection).await; + + status_batch_0_expects( + connection_pool.database_url().expose_str(), + scenario_expected_stdout( + Status::InProgress, + Some(Status::Queued), + Status::WaitingForProofs, + None, + Status::WaitingForProofs, + None, + Status::WaitingForProofs, + Status::WaitingForProofs, + Status::JobsNotFound, + batch_0, + ), + ); + + // The BWS done, agg_round 0 prover jobs in progress. + let scenario = Scenario::new(batch_0) + .add_bwg(FriWitnessJobStatus::Successful) + .add_agg_0_prover_job( + ProverJobStatus::Successful(ProverJobStatusSuccessful::default()), + BaseLayerCircuitType::VM, + 1, + ) + .add_agg_0_prover_job( + ProverJobStatus::InProgress(ProverJobStatusInProgress::default()), + BaseLayerCircuitType::VM, + 2, + ) + .add_agg_0_prover_job( + ProverJobStatus::Queued, + BaseLayerCircuitType::DecommitmentsFilter, + 1, + ); + load_scenario(scenario, &mut connection).await; + + status_batch_0_expects( + connection_pool.database_url().expose_str(), + scenario_expected_stdout( + Status::Successful, + Some(Status::InProgress), + Status::WaitingForProofs, + None, + Status::WaitingForProofs, + None, + Status::WaitingForProofs, + Status::WaitingForProofs, + Status::JobsNotFound, + batch_0, + ), + ); + + // Agg_round 0, prover jobs done for VM circuit, LWG set in queue. + let scenario = Scenario::new(batch_0) + .add_agg_0_prover_job( + ProverJobStatus::Successful(ProverJobStatusSuccessful::default()), + BaseLayerCircuitType::VM, + 2, + ) + .add_agg_0_prover_job( + ProverJobStatus::InProgress(ProverJobStatusInProgress::default()), + BaseLayerCircuitType::DecommitmentsFilter, + 1, + ) + .add_lwg(WitnessJobStatus::Queued, BaseLayerCircuitType::VM); + load_scenario(scenario, &mut connection).await; + + status_batch_0_expects( + connection_pool.database_url().expose_str(), + scenario_expected_stdout( + Status::Successful, + Some(Status::InProgress), + Status::Queued, + None, + Status::WaitingForProofs, + None, + Status::WaitingForProofs, + Status::WaitingForProofs, + Status::JobsNotFound, + batch_0, + ), + ); + + // Agg_round 0: all prover jobs successful, LWG in progress. Agg_round 1: prover jobs in queue. + let scenario = Scenario::new(batch_0) + .add_agg_0_prover_job( + ProverJobStatus::Successful(ProverJobStatusSuccessful::default()), + BaseLayerCircuitType::DecommitmentsFilter, + 1, + ) + .add_lwg( + WitnessJobStatus::Successful(WitnessJobStatusSuccessful::default()), + BaseLayerCircuitType::VM, + ) + .add_lwg( + WitnessJobStatus::InProgress, + BaseLayerCircuitType::DecommitmentsFilter, + ) + .add_agg_1_prover_job(ProverJobStatus::Queued, BaseLayerCircuitType::VM, 1) + .add_agg_1_prover_job(ProverJobStatus::Queued, BaseLayerCircuitType::VM, 2); + load_scenario(scenario, &mut connection).await; + + status_batch_0_expects( + connection_pool.database_url().expose_str(), + scenario_expected_stdout( + Status::Successful, + Some(Status::Successful), + Status::InProgress, + Some(Status::Queued), + Status::WaitingForProofs, + None, + Status::WaitingForProofs, + Status::WaitingForProofs, + Status::JobsNotFound, + batch_0, + ), + ); + + // LWG succees. Agg_round 1: Done for VM circuit. + let scenario = Scenario::new(batch_0) + .add_lwg( + WitnessJobStatus::Successful(WitnessJobStatusSuccessful::default()), + BaseLayerCircuitType::DecommitmentsFilter, + ) + .add_agg_1_prover_job( + ProverJobStatus::Successful(ProverJobStatusSuccessful::default()), + BaseLayerCircuitType::VM, + 1, + ) + .add_agg_1_prover_job( + ProverJobStatus::Successful(ProverJobStatusSuccessful::default()), + BaseLayerCircuitType::VM, + 2, + ) + .add_agg_1_prover_job( + ProverJobStatus::InProgress(ProverJobStatusInProgress::default()), + BaseLayerCircuitType::DecommitmentsFilter, + 1, + ); + load_scenario(scenario, &mut connection).await; + + status_batch_0_expects( + connection_pool.database_url().expose_str(), + scenario_expected_stdout( + Status::Successful, + Some(Status::Successful), + Status::Successful, + Some(Status::InProgress), + Status::WaitingForProofs, + None, + Status::WaitingForProofs, + Status::WaitingForProofs, + Status::JobsNotFound, + batch_0, + ), + ); + + // Agg_round 1: all prover jobs successful. NWG queue. + let scenario = Scenario::new(batch_0) + .add_agg_1_prover_job( + ProverJobStatus::Successful(ProverJobStatusSuccessful::default()), + BaseLayerCircuitType::DecommitmentsFilter, + 1, + ) + .add_nwg(WitnessJobStatus::Queued, BaseLayerCircuitType::VM) + .add_nwg( + WitnessJobStatus::Queued, + BaseLayerCircuitType::DecommitmentsFilter, + ); + load_scenario(scenario, &mut connection).await; + + status_batch_0_expects( + connection_pool.database_url().expose_str(), + scenario_expected_stdout( + Status::Successful, + Some(Status::Successful), + Status::Successful, + Some(Status::Successful), + Status::Queued, + None, + Status::WaitingForProofs, + Status::WaitingForProofs, + Status::JobsNotFound, + batch_0, + ), + ); + + // NWG successful for VM circuit, agg_round 2 prover jobs created. + let scenario = Scenario::new(batch_0) + .add_nwg( + WitnessJobStatus::Successful(WitnessJobStatusSuccessful::default()), + BaseLayerCircuitType::VM, + ) + .add_nwg( + WitnessJobStatus::InProgress, + BaseLayerCircuitType::DecommitmentsFilter, + ) + .add_agg_2_prover_job(ProverJobStatus::Queued, BaseLayerCircuitType::VM, 1); + load_scenario(scenario, &mut connection).await; + + status_batch_0_expects( + connection_pool.database_url().expose_str(), + scenario_expected_stdout( + Status::Successful, + Some(Status::Successful), + Status::Successful, + Some(Status::Successful), + Status::InProgress, + Some(Status::Queued), + Status::WaitingForProofs, + Status::WaitingForProofs, + Status::JobsNotFound, + batch_0, + ), + ); + + // NWG successful, agg_round 2 prover jobs updated. + let scenario = Scenario::new(batch_0) + .add_nwg( + WitnessJobStatus::Successful(WitnessJobStatusSuccessful::default()), + BaseLayerCircuitType::DecommitmentsFilter, + ) + .add_agg_2_prover_job( + ProverJobStatus::InProgress(ProverJobStatusInProgress::default()), + BaseLayerCircuitType::VM, + 1, + ) + .add_agg_2_prover_job( + ProverJobStatus::Queued, + BaseLayerCircuitType::DecommitmentsFilter, + 1, + ); + load_scenario(scenario, &mut connection).await; + + status_batch_0_expects( + connection_pool.database_url().expose_str(), + scenario_expected_stdout( + Status::Successful, + Some(Status::Successful), + Status::Successful, + Some(Status::Successful), + Status::Successful, + Some(Status::InProgress), + Status::WaitingForProofs, + Status::WaitingForProofs, + Status::JobsNotFound, + batch_0, + ), + ); + + // Agg_round 2 prover jobs successful. RT in progress. + let scenario = Scenario::new(batch_0) + .add_agg_2_prover_job( + ProverJobStatus::Successful(ProverJobStatusSuccessful::default()), + BaseLayerCircuitType::VM, + 1, + ) + .add_agg_2_prover_job( + ProverJobStatus::Successful(ProverJobStatusSuccessful::default()), + BaseLayerCircuitType::DecommitmentsFilter, + 1, + ) + .add_rt(WitnessJobStatus::InProgress); + load_scenario(scenario, &mut connection).await; + + status_batch_0_expects( + connection_pool.database_url().expose_str(), + scenario_expected_stdout( + Status::Successful, + Some(Status::Successful), + Status::Successful, + Some(Status::Successful), + Status::Successful, + Some(Status::Successful), + Status::InProgress, + Status::WaitingForProofs, + Status::JobsNotFound, + batch_0, + ), + ); + + // RT in successful, Scheduler in progress. + let scenario = Scenario::new(batch_0) + .add_rt(WitnessJobStatus::Successful( + WitnessJobStatusSuccessful::default(), + )) + .add_scheduler(WitnessJobStatus::InProgress); + load_scenario(scenario, &mut connection).await; + + status_batch_0_expects( + connection_pool.database_url().expose_str(), + scenario_expected_stdout( + Status::Successful, + Some(Status::Successful), + Status::Successful, + Some(Status::Successful), + Status::Successful, + Some(Status::Successful), + Status::Successful, + Status::InProgress, + Status::JobsNotFound, + batch_0, + ), + ); + + // Scheduler in successful, Compressor in progress. + let scenario = Scenario::new(batch_0) + .add_scheduler(WitnessJobStatus::Successful( + WitnessJobStatusSuccessful::default(), + )) + .add_compressor(ProofCompressionJobStatus::InProgress); + load_scenario(scenario, &mut connection).await; + + status_batch_0_expects( + connection_pool.database_url().expose_str(), + scenario_expected_stdout( + Status::Successful, + Some(Status::Successful), + Status::Successful, + Some(Status::Successful), + Status::Successful, + Some(Status::Successful), + Status::Successful, + Status::Successful, + Status::InProgress, + batch_0, + ), + ); + + // Compressor Done. + let scenario = Scenario::new(batch_0).add_compressor(ProofCompressionJobStatus::SentToServer); + load_scenario(scenario, &mut connection).await; + + status_batch_0_expects( + connection_pool.database_url().expose_str(), + COMPLETE_BATCH_STATUS_STDOUT.into(), + ); +} + +#[tokio::test] +async fn pli_status_complete_verbose() { + let connection_pool = ConnectionPool::::prover_test_pool().await; + let mut connection = connection_pool.connection().await.unwrap(); + + connection + .fri_protocol_versions_dal() + .save_prover_protocol_version( + ProtocolSemanticVersion::default(), + L1VerifierConfig::default(), + ) + .await; + + let batch_0 = L1BatchNumber(0); + + let scenario = Scenario::new(batch_0) + .add_bwg(FriWitnessJobStatus::Successful) + .add_agg_0_prover_job( + ProverJobStatus::Successful(ProverJobStatusSuccessful::default()), + BaseLayerCircuitType::VM, + 1, + ) + .add_agg_0_prover_job( + ProverJobStatus::Successful(ProverJobStatusSuccessful::default()), + BaseLayerCircuitType::VM, + 2, + ) + .add_agg_0_prover_job( + ProverJobStatus::Successful(ProverJobStatusSuccessful::default()), + BaseLayerCircuitType::VM, + 3, + ) + .add_agg_0_prover_job( + ProverJobStatus::Successful(ProverJobStatusSuccessful::default()), + BaseLayerCircuitType::DecommitmentsFilter, + 1, + ) + .add_agg_0_prover_job( + ProverJobStatus::Successful(ProverJobStatusSuccessful::default()), + BaseLayerCircuitType::DecommitmentsFilter, + 2, + ) + .add_agg_0_prover_job( + ProverJobStatus::Successful(ProverJobStatusSuccessful::default()), + BaseLayerCircuitType::DecommitmentsFilter, + 3, + ) + .add_agg_0_prover_job( + ProverJobStatus::Successful(ProverJobStatusSuccessful::default()), + BaseLayerCircuitType::Decommiter, + 1, + ) + .add_agg_0_prover_job( + ProverJobStatus::InProgress(ProverJobStatusInProgress::default()), + BaseLayerCircuitType::Decommiter, + 2, + ) + .add_agg_0_prover_job(ProverJobStatus::Queued, BaseLayerCircuitType::Decommiter, 3) + .add_agg_0_prover_job( + ProverJobStatus::Queued, + BaseLayerCircuitType::LogDemultiplexer, + 1, + ) + .add_agg_0_prover_job( + ProverJobStatus::Queued, + BaseLayerCircuitType::LogDemultiplexer, + 2, + ) + .add_agg_0_prover_job( + ProverJobStatus::Queued, + BaseLayerCircuitType::LogDemultiplexer, + 3, + ) + .add_lwg(WitnessJobStatus::WaitingForProofs, BaseLayerCircuitType::VM) + .add_lwg( + WitnessJobStatus::WaitingForProofs, + BaseLayerCircuitType::DecommitmentsFilter, + ) + .add_lwg( + WitnessJobStatus::WaitingForProofs, + BaseLayerCircuitType::Decommiter, + ) + .add_lwg( + WitnessJobStatus::WaitingForProofs, + BaseLayerCircuitType::LogDemultiplexer, + ) + .add_nwg(WitnessJobStatus::WaitingForProofs, BaseLayerCircuitType::VM) + .add_nwg( + WitnessJobStatus::WaitingForProofs, + BaseLayerCircuitType::DecommitmentsFilter, + ) + .add_nwg( + WitnessJobStatus::WaitingForProofs, + BaseLayerCircuitType::Decommiter, + ) + .add_nwg( + WitnessJobStatus::WaitingForProofs, + BaseLayerCircuitType::LogDemultiplexer, + ) + .add_rt(WitnessJobStatus::WaitingForProofs) + .add_scheduler(WitnessJobStatus::WaitingForProofs); + load_scenario(scenario, &mut connection).await; + + status_verbose_batch_0_expects( + connection_pool.database_url().expose_str(), + "== Batch 0 Status == + +-- Aggregation Round 0 -- +> Basic Witness Generator: Successful ✅ +v Prover Jobs: In Progress ⌛️ + > VM: Successful ✅ + > DecommitmentsFilter: Successful ✅ + > Decommiter: In Progress ⌛️ + - Total jobs: 3 + - Successful: 1 + - In Progress: 1 + - Queued: 1 + - Failed: 0 + > LogDemultiplexer: Queued 📥 + +-- Aggregation Round 1 -- + > Leaf Witness Generator: Waiting for Proof ⏱️ + +-- Aggregation Round 2 -- + > Node Witness Generator: Waiting for Proof ⏱️ + +-- Aggregation Round 3 -- + > Recursion Tip: Waiting for Proof ⏱️ + +-- Aggregation Round 4 -- + > Scheduler: Waiting for Proof ⏱️ + +-- Proof Compression -- + > Compressor: Jobs not found 🚫 +" + .into(), + ); + + let scenario = Scenario::new(batch_0) + .add_agg_0_prover_job( + ProverJobStatus::Successful(ProverJobStatusSuccessful::default()), + BaseLayerCircuitType::Decommiter, + 2, + ) + .add_agg_0_prover_job( + ProverJobStatus::Successful(ProverJobStatusSuccessful::default()), + BaseLayerCircuitType::Decommiter, + 3, + ) + .add_agg_0_prover_job( + ProverJobStatus::Successful(ProverJobStatusSuccessful::default()), + BaseLayerCircuitType::LogDemultiplexer, + 1, + ) + .add_agg_0_prover_job( + ProverJobStatus::Successful(ProverJobStatusSuccessful::default()), + BaseLayerCircuitType::LogDemultiplexer, + 2, + ) + .add_agg_0_prover_job( + ProverJobStatus::Successful(ProverJobStatusSuccessful::default()), + BaseLayerCircuitType::LogDemultiplexer, + 3, + ) + .add_lwg( + WitnessJobStatus::Successful(WitnessJobStatusSuccessful::default()), + BaseLayerCircuitType::VM, + ) + .add_lwg( + WitnessJobStatus::Successful(WitnessJobStatusSuccessful::default()), + BaseLayerCircuitType::DecommitmentsFilter, + ) + .add_lwg( + WitnessJobStatus::InProgress, + BaseLayerCircuitType::Decommiter, + ) + .add_lwg( + WitnessJobStatus::Queued, + BaseLayerCircuitType::LogDemultiplexer, + ) + .add_agg_1_prover_job( + ProverJobStatus::Successful(ProverJobStatusSuccessful::default()), + BaseLayerCircuitType::VM, + 1, + ) + .add_agg_1_prover_job( + ProverJobStatus::Successful(ProverJobStatusSuccessful::default()), + BaseLayerCircuitType::VM, + 2, + ) + .add_agg_1_prover_job( + ProverJobStatus::Successful(ProverJobStatusSuccessful::default()), + BaseLayerCircuitType::VM, + 3, + ) + .add_agg_1_prover_job( + ProverJobStatus::Successful(ProverJobStatusSuccessful::default()), + BaseLayerCircuitType::VM, + 4, + ) + .add_agg_1_prover_job( + ProverJobStatus::Successful(ProverJobStatusSuccessful::default()), + BaseLayerCircuitType::DecommitmentsFilter, + 1, + ) + .add_agg_1_prover_job( + ProverJobStatus::Successful(ProverJobStatusSuccessful::default()), + BaseLayerCircuitType::DecommitmentsFilter, + 2, + ) + .add_agg_1_prover_job( + ProverJobStatus::InProgress(ProverJobStatusInProgress::default()), + BaseLayerCircuitType::DecommitmentsFilter, + 3, + ) + .add_agg_1_prover_job( + ProverJobStatus::InProgress(ProverJobStatusInProgress::default()), + BaseLayerCircuitType::Decommiter, + 1, + ) + .add_agg_1_prover_job(ProverJobStatus::Queued, BaseLayerCircuitType::Decommiter, 2) + .add_agg_1_prover_job( + ProverJobStatus::InProgress(ProverJobStatusInProgress::default()), + BaseLayerCircuitType::Decommiter, + 3, + ) + .add_nwg(WitnessJobStatus::Queued, BaseLayerCircuitType::VM); + load_scenario(scenario, &mut connection).await; + + status_verbose_batch_0_expects( + connection_pool.database_url().expose_str(), + "== Batch 0 Status == + +-- Aggregation Round 0 -- +> Basic Witness Generator: Successful ✅ +> Prover Jobs: Successful ✅ + +-- Aggregation Round 1 -- +v Leaf Witness Generator: In Progress ⌛️ + > VM: Successful ✅ + > DecommitmentsFilter: Successful ✅ + > Decommiter: In Progress ⌛️ + > LogDemultiplexer: Queued 📥 +v Prover Jobs: In Progress ⌛️ + > VM: Successful ✅ + > DecommitmentsFilter: In Progress ⌛️ + - Total jobs: 3 + - Successful: 2 + - In Progress: 1 + - Queued: 0 + - Failed: 0 + > Decommiter: In Progress ⌛️ + - Total jobs: 3 + - Successful: 0 + - In Progress: 2 + - Queued: 1 + - Failed: 0 + +-- Aggregation Round 2 -- + > Node Witness Generator: Queued 📥 + +-- Aggregation Round 3 -- + > Recursion Tip: Waiting for Proof ⏱️ + +-- Aggregation Round 4 -- + > Scheduler: Waiting for Proof ⏱️ + +-- Proof Compression -- + > Compressor: Jobs not found 🚫 +" + .into(), + ); + + let scenario = Scenario::new(batch_0) + .add_lwg( + WitnessJobStatus::Successful(WitnessJobStatusSuccessful::default()), + BaseLayerCircuitType::Decommiter, + ) + .add_lwg( + WitnessJobStatus::Successful(WitnessJobStatusSuccessful::default()), + BaseLayerCircuitType::LogDemultiplexer, + ) + .add_agg_1_prover_job( + ProverJobStatus::Successful(ProverJobStatusSuccessful::default()), + BaseLayerCircuitType::DecommitmentsFilter, + 3, + ) + .add_agg_1_prover_job( + ProverJobStatus::Successful(ProverJobStatusSuccessful::default()), + BaseLayerCircuitType::Decommiter, + 1, + ) + .add_agg_1_prover_job( + ProverJobStatus::Successful(ProverJobStatusSuccessful::default()), + BaseLayerCircuitType::Decommiter, + 2, + ) + .add_agg_1_prover_job( + ProverJobStatus::Successful(ProverJobStatusSuccessful::default()), + BaseLayerCircuitType::Decommiter, + 3, + ) + .add_nwg( + WitnessJobStatus::Successful(WitnessJobStatusSuccessful::default()), + BaseLayerCircuitType::VM, + ) + .add_nwg( + WitnessJobStatus::Successful(WitnessJobStatusSuccessful::default()), + BaseLayerCircuitType::DecommitmentsFilter, + ) + .add_nwg( + WitnessJobStatus::InProgress, + BaseLayerCircuitType::Decommiter, + ) + .add_nwg( + WitnessJobStatus::Queued, + BaseLayerCircuitType::LogDemultiplexer, + ) + .add_agg_2_prover_job( + ProverJobStatus::Successful(ProverJobStatusSuccessful::default()), + BaseLayerCircuitType::VM, + 1, + ) + .add_agg_2_prover_job( + ProverJobStatus::InProgress(ProverJobStatusInProgress::default()), + BaseLayerCircuitType::DecommitmentsFilter, + 1, + ); + load_scenario(scenario, &mut connection).await; + + status_verbose_batch_0_expects( + connection_pool.database_url().expose_str(), + "== Batch 0 Status == + +-- Aggregation Round 0 -- +> Basic Witness Generator: Successful ✅ +> Prover Jobs: Successful ✅ + +-- Aggregation Round 1 -- +> Leaf Witness Generator: Successful ✅ +> Prover Jobs: Successful ✅ + +-- Aggregation Round 2 -- +v Node Witness Generator: In Progress ⌛️ + > VM: Successful ✅ + > DecommitmentsFilter: Successful ✅ + > Decommiter: In Progress ⌛️ + > LogDemultiplexer: Queued 📥 +v Prover Jobs: In Progress ⌛️ + > VM: Successful ✅ + > DecommitmentsFilter: In Progress ⌛️ + - Total jobs: 1 + - Successful: 0 + - In Progress: 1 + - Queued: 0 + - Failed: 0 + +-- Aggregation Round 3 -- + > Recursion Tip: Waiting for Proof ⏱️ + +-- Aggregation Round 4 -- + > Scheduler: Waiting for Proof ⏱️ + +-- Proof Compression -- + > Compressor: Jobs not found 🚫 +" + .into(), + ); + + let scenario = Scenario::new(batch_0) + .add_nwg( + WitnessJobStatus::Successful(WitnessJobStatusSuccessful::default()), + BaseLayerCircuitType::Decommiter, + ) + .add_nwg( + WitnessJobStatus::Successful(WitnessJobStatusSuccessful::default()), + BaseLayerCircuitType::LogDemultiplexer, + ) + .add_agg_2_prover_job( + ProverJobStatus::Successful(ProverJobStatusSuccessful::default()), + BaseLayerCircuitType::DecommitmentsFilter, + 1, + ) + .add_rt(WitnessJobStatus::InProgress); + load_scenario(scenario, &mut connection).await; + + status_verbose_batch_0_expects( + connection_pool.database_url().expose_str(), + "== Batch 0 Status == + +-- Aggregation Round 0 -- +> Basic Witness Generator: Successful ✅ +> Prover Jobs: Successful ✅ + +-- Aggregation Round 1 -- +> Leaf Witness Generator: Successful ✅ +> Prover Jobs: Successful ✅ + +-- Aggregation Round 2 -- +> Node Witness Generator: Successful ✅ +> Prover Jobs: Successful ✅ + +-- Aggregation Round 3 -- +v Recursion Tip: In Progress ⌛️ + +-- Aggregation Round 4 -- + > Scheduler: Waiting for Proof ⏱️ + +-- Proof Compression -- + > Compressor: Jobs not found 🚫 +" + .into(), + ); + + let scenario = Scenario::new(batch_0) + .add_rt(WitnessJobStatus::Successful( + WitnessJobStatusSuccessful::default(), + )) + .add_scheduler(WitnessJobStatus::InProgress); + load_scenario(scenario, &mut connection).await; + + status_verbose_batch_0_expects( + connection_pool.database_url().expose_str(), + "== Batch 0 Status == + +-- Aggregation Round 0 -- +> Basic Witness Generator: Successful ✅ +> Prover Jobs: Successful ✅ + +-- Aggregation Round 1 -- +> Leaf Witness Generator: Successful ✅ +> Prover Jobs: Successful ✅ + +-- Aggregation Round 2 -- +> Node Witness Generator: Successful ✅ +> Prover Jobs: Successful ✅ + +-- Aggregation Round 3 -- +> Recursion Tip: Successful ✅ + +-- Aggregation Round 4 -- +v Scheduler: In Progress ⌛️ + +-- Proof Compression -- + > Compressor: Jobs not found 🚫 +" + .into(), + ); + + let scenario = Scenario::new(batch_0) + .add_scheduler(WitnessJobStatus::Successful( + WitnessJobStatusSuccessful::default(), + )) + .add_compressor(ProofCompressionJobStatus::SentToServer); + load_scenario(scenario, &mut connection).await; + + status_batch_0_expects( + connection_pool.database_url().expose_str(), + COMPLETE_BATCH_STATUS_STDOUT.into(), + ); +} + +#[tokio::test] +async fn pli_status_stuck_job() { + let connection_pool = ConnectionPool::::prover_test_pool().await; + let mut connection = connection_pool.connection().await.unwrap(); + + connection + .fri_protocol_versions_dal() + .save_prover_protocol_version( + ProtocolSemanticVersion::default(), + L1VerifierConfig::default(), + ) + .await; + + let batch_0 = L1BatchNumber(0); + + let scenario = Scenario::new(batch_0) + .add_bwg(FriWitnessJobStatus::Successful) + .add_agg_0_prover_job( + ProverJobStatus::Successful(ProverJobStatusSuccessful::default()), + BaseLayerCircuitType::VM, + 1, + ) + .add_agg_0_prover_job(ProverJobStatus::Queued, BaseLayerCircuitType::VM, 2) + .add_lwg(WitnessJobStatus::WaitingForProofs, BaseLayerCircuitType::VM) + .add_nwg(WitnessJobStatus::WaitingForProofs, BaseLayerCircuitType::VM) + .add_rt(WitnessJobStatus::WaitingForProofs) + .add_scheduler(WitnessJobStatus::WaitingForProofs); + load_scenario(scenario, &mut connection).await; + + update_attempts_prover_job( + ProverJobStatus::Failed(ProverJobStatusFailed::default()), + 10, + BaseLayerCircuitType::VM, + AggregationRound::BasicCircuits, + batch_0, + 2, + &mut connection, + ) + .await; + + status_verbose_batch_0_expects( + connection_pool.database_url().expose_str(), + "== Batch 0 Status == + +-- Aggregation Round 0 -- +> Basic Witness Generator: Successful ✅ +v Prover Jobs: Stuck ⛔️ + > VM: Stuck ⛔️ + - Prover Job: 2 stuck after 10 attempts + +-- Aggregation Round 1 -- + > Leaf Witness Generator: Waiting for Proof ⏱️ + +-- Aggregation Round 2 -- + > Node Witness Generator: Waiting for Proof ⏱️ + +-- Aggregation Round 3 -- + > Recursion Tip: Waiting for Proof ⏱️ + +-- Aggregation Round 4 -- + > Scheduler: Waiting for Proof ⏱️ + +-- Proof Compression -- + > Compressor: Jobs not found 🚫 +" + .into(), + ); + + let scenario = Scenario::new(batch_0) + .add_agg_0_prover_job( + ProverJobStatus::Successful(ProverJobStatusSuccessful::default()), + BaseLayerCircuitType::VM, + 2, + ) + .add_lwg(WitnessJobStatus::InProgress, BaseLayerCircuitType::VM) + .add_agg_1_prover_job(ProverJobStatus::Queued, BaseLayerCircuitType::VM, 1) + .add_agg_1_prover_job(ProverJobStatus::Queued, BaseLayerCircuitType::VM, 2); + load_scenario(scenario, &mut connection).await; + + update_attempts_lwg( + ProverJobStatus::Failed(ProverJobStatusFailed::default()), + 10, + BaseLayerCircuitType::VM, + batch_0, + &mut connection, + ) + .await; + + status_verbose_batch_0_expects( + connection_pool.database_url().expose_str(), + "== Batch 0 Status == + +-- Aggregation Round 0 -- +> Basic Witness Generator: Successful ✅ +> Prover Jobs: Successful ✅ + +-- Aggregation Round 1 -- +v Leaf Witness Generator: Stuck ⛔️ + > VM: Stuck ⛔️ +v Prover Jobs: Queued 📥 + > VM: Queued 📥 + +-- Aggregation Round 2 -- + > Node Witness Generator: Waiting for Proof ⏱️ + +-- Aggregation Round 3 -- + > Recursion Tip: Waiting for Proof ⏱️ + +-- Aggregation Round 4 -- + > Scheduler: Waiting for Proof ⏱️ + +-- Proof Compression -- + > Compressor: Jobs not found 🚫 +" + .into(), + ); +} diff --git a/prover/crates/bin/prover_cli/tests/cli.rs b/prover/crates/bin/prover_cli/tests/cli.rs new file mode 100644 index 00000000000..4a68491f09b --- /dev/null +++ b/prover/crates/bin/prover_cli/tests/cli.rs @@ -0,0 +1,42 @@ +use assert_cmd::Command; +use zksync_dal::ConnectionPool; +use zksync_prover_dal::{Prover, ProverDal}; +use zksync_types::protocol_version::{L1VerifierConfig, ProtocolSemanticVersion}; + +#[test] +#[doc = "prover_cli"] +fn pli_empty_fails() { + Command::cargo_bin("prover_cli").unwrap().assert().failure(); +} + +#[test] +#[doc = "prover_cli"] +fn pli_help_succeeds() { + Command::cargo_bin("prover_cli") + .unwrap() + .arg("help") + .assert() + .success(); +} + +#[tokio::test] +#[doc = "prover_cli config"] +async fn pli_config_succeeds() { + let connection_pool = ConnectionPool::::prover_test_pool().await; + let mut connection = connection_pool.connection().await.unwrap(); + + connection + .fri_protocol_versions_dal() + .save_prover_protocol_version( + ProtocolSemanticVersion::default(), + L1VerifierConfig::default(), + ) + .await; + + Command::cargo_bin("prover_cli") + .unwrap() + .arg("config") + .arg(connection_pool.database_url().expose_str()) + .assert() + .success(); +} diff --git a/prover/crates/bin/prover_fri/src/gpu_prover_job_processor.rs b/prover/crates/bin/prover_fri/src/gpu_prover_job_processor.rs index 04146473f64..4407dbcd852 100644 --- a/prover/crates/bin/prover_fri/src/gpu_prover_job_processor.rs +++ b/prover/crates/bin/prover_fri/src/gpu_prover_job_processor.rs @@ -154,6 +154,7 @@ pub mod gpu_prover { recursion_layer_proof_config(), circuit.numeric_circuit_type(), ), + CircuitWrapper::BasePartial(_) => panic!("Invalid CircuitWrapper received"), }; let started_at = Instant::now(); @@ -196,6 +197,7 @@ pub mod gpu_prover { CircuitWrapper::Recursive(_) => FriProofWrapper::Recursive( ZkSyncRecursionLayerProof::from_inner(circuit_id, proof), ), + CircuitWrapper::BasePartial(_) => panic!("Received partial base circuit"), }; ProverArtifacts::new(prover_job.block_number, proof_wrapper) } diff --git a/prover/crates/bin/prover_fri/src/prover_job_processor.rs b/prover/crates/bin/prover_fri/src/prover_job_processor.rs index f06f1bbab93..09c9d38348f 100644 --- a/prover/crates/bin/prover_fri/src/prover_job_processor.rs +++ b/prover/crates/bin/prover_fri/src/prover_job_processor.rs @@ -109,6 +109,7 @@ impl Prover { CircuitWrapper::Recursive(recursive_circuit) => { Self::prove_recursive_layer(job.job_id, recursive_circuit, config, setup_data) } + CircuitWrapper::BasePartial(_) => panic!("Received partial base circuit"), }; ProverArtifacts::new(job.block_number, proof) } diff --git a/prover/crates/bin/prover_fri/src/utils.rs b/prover/crates/bin/prover_fri/src/utils.rs index 15a2a6c18bb..2941c15439a 100644 --- a/prover/crates/bin/prover_fri/src/utils.rs +++ b/prover/crates/bin/prover_fri/src/utils.rs @@ -128,6 +128,7 @@ pub fn verify_proof( verify_recursion_layer_proof::(recursive_circuit, proof, vk), recursive_circuit.numeric_circuit_type(), ), + CircuitWrapper::BasePartial(_) => panic!("Invalid CircuitWrapper received"), }; METRICS.proof_verification_time[&circuit_id.to_string()].observe(started_at.elapsed()); diff --git a/prover/crates/bin/prover_job_monitor/Cargo.toml b/prover/crates/bin/prover_job_monitor/Cargo.toml new file mode 100644 index 00000000000..160d3a603e3 --- /dev/null +++ b/prover/crates/bin/prover_job_monitor/Cargo.toml @@ -0,0 +1,27 @@ +[package] +name = "zksync_prover_job_monitor" +version.workspace = true +edition.workspace = true +authors.workspace = true +homepage.workspace = true +repository.workspace = true +license.workspace = true +keywords.workspace = true +categories.workspace = true + +[dependencies] +zksync_core_leftovers.workspace = true +zksync_vlog.workspace = true +zksync_prover_dal.workspace = true +zksync_utils.workspace = true +zksync_types.workspace = true +zksync_config = { workspace = true, features = ["observability_ext"] } + +vise.workspace = true + +tokio = { workspace = true, features = ["time", "macros"] } +anyhow.workspace = true +clap = { workspace = true, features = ["derive"] } +ctrlc = { workspace = true, features = ["termination"] } +tracing.workspace = true +async-trait.workspace = true diff --git a/prover/crates/bin/prover_job_monitor/src/archiver/gpu_prover_archiver.rs b/prover/crates/bin/prover_job_monitor/src/archiver/gpu_prover_archiver.rs new file mode 100644 index 00000000000..cebec06218d --- /dev/null +++ b/prover/crates/bin/prover_job_monitor/src/archiver/gpu_prover_archiver.rs @@ -0,0 +1,39 @@ +use std::time::Duration; + +use zksync_prover_dal::{Connection, Prover, ProverDal}; + +use crate::{metrics::PROVER_JOB_MONITOR_METRICS, task_wiring::Task}; + +/// `GpuProverArchiver` is a task that archives old fri GPU provers. +/// The task will archive the `dead` prover records that have not been updated for a certain amount of time. +/// Note: This component speeds up provers, in their absence, queries would slow down due to state growth. +#[derive(Debug)] +pub struct GpuProverArchiver { + /// duration after which a prover can be archived + archive_prover_after: Duration, +} + +impl GpuProverArchiver { + pub fn new(archive_prover_after: Duration) -> Self { + Self { + archive_prover_after, + } + } +} + +#[async_trait::async_trait] +impl Task for GpuProverArchiver { + async fn invoke(&self, connection: &mut Connection) -> anyhow::Result<()> { + let archived_provers = connection + .fri_gpu_prover_queue_dal() + .archive_old_provers(self.archive_prover_after) + .await; + if archived_provers > 0 { + tracing::info!("Archived {:?} gpu provers", archived_provers); + } + PROVER_JOB_MONITOR_METRICS + .archived_gpu_provers + .inc_by(archived_provers as u64); + Ok(()) + } +} diff --git a/prover/crates/bin/prover_job_monitor/src/archiver/mod.rs b/prover/crates/bin/prover_job_monitor/src/archiver/mod.rs new file mode 100644 index 00000000000..7e33e216596 --- /dev/null +++ b/prover/crates/bin/prover_job_monitor/src/archiver/mod.rs @@ -0,0 +1,5 @@ +pub use gpu_prover_archiver::GpuProverArchiver; +pub use prover_jobs_archiver::ProverJobsArchiver; + +mod gpu_prover_archiver; +mod prover_jobs_archiver; diff --git a/prover/crates/bin/prover_job_monitor/src/archiver/prover_jobs_archiver.rs b/prover/crates/bin/prover_job_monitor/src/archiver/prover_jobs_archiver.rs new file mode 100644 index 00000000000..41e6d6cf4e4 --- /dev/null +++ b/prover/crates/bin/prover_job_monitor/src/archiver/prover_jobs_archiver.rs @@ -0,0 +1,37 @@ +use std::time::Duration; + +use zksync_prover_dal::{Connection, Prover, ProverDal}; + +use crate::{metrics::PROVER_JOB_MONITOR_METRICS, task_wiring::Task}; + +/// `ProverJobsArchiver` is a task that archives old finalized prover job. +/// The task will archive the `successful` prover jobs that have been done for a certain amount of time. +/// Note: This component speeds up provers, in their absence, queries would slow down due to state growth. +#[derive(Debug)] +pub struct ProverJobsArchiver { + /// duration after which a prover job can be archived + archive_jobs_after: Duration, +} + +impl ProverJobsArchiver { + pub fn new(archive_jobs_after: Duration) -> Self { + Self { archive_jobs_after } + } +} + +#[async_trait::async_trait] +impl Task for ProverJobsArchiver { + async fn invoke(&self, connection: &mut Connection) -> anyhow::Result<()> { + let archived_jobs = connection + .fri_prover_jobs_dal() + .archive_old_jobs(self.archive_jobs_after) + .await; + if archived_jobs > 0 { + tracing::info!("Archived {:?} prover jobs", archived_jobs); + } + PROVER_JOB_MONITOR_METRICS + .archived_prover_jobs + .inc_by(archived_jobs as u64); + Ok(()) + } +} diff --git a/prover/crates/bin/prover_job_monitor/src/job_requeuer/mod.rs b/prover/crates/bin/prover_job_monitor/src/job_requeuer/mod.rs new file mode 100644 index 00000000000..5130849b7fe --- /dev/null +++ b/prover/crates/bin/prover_job_monitor/src/job_requeuer/mod.rs @@ -0,0 +1,7 @@ +pub use proof_compressor_job_requeuer::ProofCompressorJobRequeuer; +pub use prover_job_requeuer::ProverJobRequeuer; +pub use witness_generator_job_requeuer::WitnessGeneratorJobRequeuer; + +mod proof_compressor_job_requeuer; +mod prover_job_requeuer; +mod witness_generator_job_requeuer; diff --git a/prover/crates/bin/prover_job_monitor/src/job_requeuer/proof_compressor_job_requeuer.rs b/prover/crates/bin/prover_job_monitor/src/job_requeuer/proof_compressor_job_requeuer.rs new file mode 100644 index 00000000000..baeba3ce369 --- /dev/null +++ b/prover/crates/bin/prover_job_monitor/src/job_requeuer/proof_compressor_job_requeuer.rs @@ -0,0 +1,42 @@ +use std::time::Duration; + +use async_trait::async_trait; +use zksync_prover_dal::{Connection, Prover, ProverDal}; + +use crate::{metrics::PROVER_JOB_MONITOR_METRICS, task_wiring::Task}; + +/// `ProofCompressorJobRequeuer` is a task that requeues compressor jobs that have not made progress in a given unit of time. +#[derive(Debug)] +pub struct ProofCompressorJobRequeuer { + /// max attempts before giving up on the job + max_attempts: u32, + /// the amount of time that must have passed before a job is considered to have not made progress + processing_timeout: Duration, +} + +impl ProofCompressorJobRequeuer { + pub fn new(max_attempts: u32, processing_timeout: Duration) -> Self { + Self { + max_attempts, + processing_timeout, + } + } +} + +#[async_trait] +impl Task for ProofCompressorJobRequeuer { + async fn invoke(&self, connection: &mut Connection) -> anyhow::Result<()> { + let stuck_jobs = connection + .fri_proof_compressor_dal() + .requeue_stuck_jobs(self.processing_timeout, self.max_attempts) + .await; + let job_len = stuck_jobs.len(); + for stuck_job in stuck_jobs { + tracing::info!("requeued proof compressor job {:?}", stuck_job); + } + PROVER_JOB_MONITOR_METRICS + .requeued_proof_compressor_jobs + .inc_by(job_len as u64); + Ok(()) + } +} diff --git a/prover/crates/bin/prover_job_monitor/src/job_requeuer/prover_job_requeuer.rs b/prover/crates/bin/prover_job_monitor/src/job_requeuer/prover_job_requeuer.rs new file mode 100644 index 00000000000..7f5e97203d6 --- /dev/null +++ b/prover/crates/bin/prover_job_monitor/src/job_requeuer/prover_job_requeuer.rs @@ -0,0 +1,42 @@ +use std::time::Duration; + +use async_trait::async_trait; +use zksync_prover_dal::{Connection, Prover, ProverDal}; + +use crate::{metrics::PROVER_JOB_MONITOR_METRICS, task_wiring::Task}; + +/// `ProverJobRequeuer` is a task that requeues prover jobs that have not made progress in a given unit of time. +#[derive(Debug)] +pub struct ProverJobRequeuer { + /// max attempts before giving up on the job + max_attempts: u32, + /// the amount of time that must have passed before a job is considered to have not made progress + processing_timeout: Duration, +} + +impl ProverJobRequeuer { + pub fn new(max_attempts: u32, processing_timeout: Duration) -> Self { + Self { + max_attempts, + processing_timeout, + } + } +} + +#[async_trait] +impl Task for ProverJobRequeuer { + async fn invoke(&self, connection: &mut Connection) -> anyhow::Result<()> { + let stuck_jobs = connection + .fri_prover_jobs_dal() + .requeue_stuck_jobs(self.processing_timeout, self.max_attempts) + .await; + let job_len = stuck_jobs.len(); + for stuck_job in stuck_jobs { + tracing::info!("requeued circuit prover job {:?}", stuck_job); + } + PROVER_JOB_MONITOR_METRICS + .requeued_circuit_prover_jobs + .inc_by(job_len as u64); + Ok(()) + } +} diff --git a/prover/crates/bin/prover_job_monitor/src/job_requeuer/witness_generator_job_requeuer.rs b/prover/crates/bin/prover_job_monitor/src/job_requeuer/witness_generator_job_requeuer.rs new file mode 100644 index 00000000000..e7d89f7d25d --- /dev/null +++ b/prover/crates/bin/prover_job_monitor/src/job_requeuer/witness_generator_job_requeuer.rs @@ -0,0 +1,90 @@ +use async_trait::async_trait; +use zksync_config::configs::fri_witness_generator::WitnessGenerationTimeouts; +use zksync_prover_dal::{Connection, Prover, ProverDal}; +use zksync_types::prover_dal::StuckJobs; + +use crate::{ + metrics::{WitnessType, PROVER_JOB_MONITOR_METRICS}, + task_wiring::Task, +}; + +/// `WitnessGeneratorJobRequeuer` s a task that requeues witness generator jobs that have not made progress in a given unit of time. +#[derive(Debug)] +pub struct WitnessGeneratorJobRequeuer { + /// max attempts before giving up on the job + max_attempts: u32, + /// the amount of time that must have passed before a job is considered to have not made progress + processing_timeouts: WitnessGenerationTimeouts, +} + +impl WitnessGeneratorJobRequeuer { + pub fn new(max_attempts: u32, processing_timeouts: WitnessGenerationTimeouts) -> Self { + Self { + max_attempts, + processing_timeouts, + } + } + + fn emit_telemetry(&self, witness_type: WitnessType, stuck_jobs: &Vec) { + for stuck_job in stuck_jobs { + tracing::info!("requeued {:?} {:?}", witness_type, stuck_job); + } + PROVER_JOB_MONITOR_METRICS.requeued_witness_generator_jobs[&witness_type] + .inc_by(stuck_jobs.len() as u64); + } + + async fn requeue_stuck_basic_jobs(&self, connection: &mut Connection<'_, Prover>) { + let stuck_jobs = connection + .fri_witness_generator_dal() + .requeue_stuck_basic_jobs(self.processing_timeouts.basic(), self.max_attempts) + .await; + self.emit_telemetry(WitnessType::BasicWitnessGenerator, &stuck_jobs); + } + + async fn requeue_stuck_leaf_jobs(&self, connection: &mut Connection<'_, Prover>) { + let stuck_jobs = connection + .fri_witness_generator_dal() + .requeue_stuck_leaf_jobs(self.processing_timeouts.leaf(), self.max_attempts) + .await; + self.emit_telemetry(WitnessType::LeafWitnessGenerator, &stuck_jobs); + } + + async fn requeue_stuck_node_jobs(&self, connection: &mut Connection<'_, Prover>) { + let stuck_jobs = connection + .fri_witness_generator_dal() + .requeue_stuck_node_jobs(self.processing_timeouts.node(), self.max_attempts) + .await; + self.emit_telemetry(WitnessType::NodeWitnessGenerator, &stuck_jobs); + } + + async fn requeue_stuck_recursion_tip_jobs(&self, connection: &mut Connection<'_, Prover>) { + let stuck_jobs = connection + .fri_witness_generator_dal() + .requeue_stuck_recursion_tip_jobs( + self.processing_timeouts.recursion_tip(), + self.max_attempts, + ) + .await; + self.emit_telemetry(WitnessType::RecursionTipWitnessGenerator, &stuck_jobs); + } + + async fn requeue_stuck_scheduler_jobs(&self, connection: &mut Connection<'_, Prover>) { + let stuck_jobs = connection + .fri_witness_generator_dal() + .requeue_stuck_scheduler_jobs(self.processing_timeouts.scheduler(), self.max_attempts) + .await; + self.emit_telemetry(WitnessType::SchedulerWitnessGenerator, &stuck_jobs); + } +} + +#[async_trait] +impl Task for WitnessGeneratorJobRequeuer { + async fn invoke(&self, connection: &mut Connection) -> anyhow::Result<()> { + self.requeue_stuck_basic_jobs(connection).await; + self.requeue_stuck_leaf_jobs(connection).await; + self.requeue_stuck_node_jobs(connection).await; + self.requeue_stuck_recursion_tip_jobs(connection).await; + self.requeue_stuck_scheduler_jobs(connection).await; + Ok(()) + } +} diff --git a/prover/crates/bin/prover_job_monitor/src/lib.rs b/prover/crates/bin/prover_job_monitor/src/lib.rs new file mode 100644 index 00000000000..60d8be297cf --- /dev/null +++ b/prover/crates/bin/prover_job_monitor/src/lib.rs @@ -0,0 +1,6 @@ +pub mod archiver; +pub mod job_requeuer; +pub(crate) mod metrics; +pub mod queue_reporter; +pub mod task_wiring; +pub mod witness_job_queuer; diff --git a/prover/crates/bin/prover_job_monitor/src/main.rs b/prover/crates/bin/prover_job_monitor/src/main.rs new file mode 100644 index 00000000000..e585c06ad77 --- /dev/null +++ b/prover/crates/bin/prover_job_monitor/src/main.rs @@ -0,0 +1,201 @@ +use anyhow::Context as _; +use clap::Parser; +use tokio::{ + sync::{oneshot, watch}, + task::JoinHandle, +}; +use zksync_config::configs::{ + fri_prover_group::FriProverGroupConfig, FriProofCompressorConfig, FriProverConfig, + FriWitnessGeneratorConfig, ProverJobMonitorConfig, +}; +use zksync_core_leftovers::temp_config_store::{load_database_secrets, load_general_config}; +use zksync_prover_dal::{ConnectionPool, Prover}; +use zksync_prover_job_monitor::{ + archiver::{GpuProverArchiver, ProverJobsArchiver}, + job_requeuer::{ProofCompressorJobRequeuer, ProverJobRequeuer, WitnessGeneratorJobRequeuer}, + queue_reporter::{ + ProofCompressorQueueReporter, ProverQueueReporter, WitnessGeneratorQueueReporter, + }, + task_wiring::TaskRunner, + witness_job_queuer::WitnessJobQueuer, +}; +use zksync_utils::wait_for_tasks::ManagedTasks; +use zksync_vlog::prometheus::PrometheusExporterConfig; + +#[derive(Debug, Parser)] +#[command(author = "Matter Labs", version)] +pub(crate) struct CliOpts { + #[arg(long)] + pub(crate) config_path: Option, + #[arg(long)] + pub(crate) secrets_path: Option, +} + +#[tokio::main] +async fn main() -> anyhow::Result<()> { + let opt = CliOpts::parse(); + + let general_config = load_general_config(opt.config_path).context("general config")?; + + println!("general_config = {general_config:?}"); + let database_secrets = load_database_secrets(opt.secrets_path).context("database secrets")?; + + let observability_config = general_config + .observability + .context("observability config")?; + let _observability_guard = observability_config.install()?; + + let prover_job_monitor_config = general_config + .prover_job_monitor_config + .context("prover_job_monitor_config")?; + let proof_compressor_config = general_config + .proof_compressor_config + .context("proof_compressor_config")?; + let prover_config = general_config.prover_config.context("prover_config")?; + let witness_generator_config = general_config + .witness_generator_config + .context("witness_generator_config")?; + let prover_group_config = general_config + .prover_group_config + .context("fri_prover_group_config")?; + let exporter_config = PrometheusExporterConfig::pull(prover_job_monitor_config.prometheus_port); + + let (stop_signal_sender, stop_signal_receiver) = oneshot::channel(); + let mut stop_signal_sender = Some(stop_signal_sender); + ctrlc::set_handler(move || { + if let Some(sender) = stop_signal_sender.take() { + sender.send(()).ok(); + } + }) + .context("Error setting Ctrl+C handler")?; + + let (stop_sender, stop_receiver) = watch::channel(false); + + tracing::info!("Starting ProverJobMonitoring"); + + let connection_pool = ConnectionPool::::builder( + database_secrets.prover_url()?, + prover_job_monitor_config.max_db_connections, + ) + .build() + .await + .context("failed to build a connection pool")?; + + let graceful_shutdown_timeout = prover_job_monitor_config.graceful_shutdown_timeout(); + + let mut tasks = vec![tokio::spawn(exporter_config.run(stop_receiver.clone()))]; + + tasks.extend(get_tasks( + connection_pool, + prover_job_monitor_config, + proof_compressor_config, + prover_config, + witness_generator_config, + prover_group_config, + stop_receiver, + )?); + let mut tasks = ManagedTasks::new(tasks); + + tokio::select! { + _ = tasks.wait_single() => {}, + _ = stop_signal_receiver => { + tracing::info!("Stop signal received, shutting down"); + } + } + stop_sender.send(true).ok(); + tasks.complete(graceful_shutdown_timeout).await; + + Ok(()) +} + +fn get_tasks( + connection_pool: ConnectionPool, + prover_job_monitor_config: ProverJobMonitorConfig, + proof_compressor_config: FriProofCompressorConfig, + prover_config: FriProverConfig, + witness_generator_config: FriWitnessGeneratorConfig, + prover_group_config: FriProverGroupConfig, + stop_receiver: watch::Receiver, +) -> anyhow::Result>>> { + let mut task_runner = TaskRunner::new(connection_pool); + + // archivers + let gpu_prover_archiver = + GpuProverArchiver::new(prover_job_monitor_config.archive_gpu_prover_duration()); + task_runner.add( + "GpuProverArchiver", + prover_job_monitor_config.gpu_prover_archiver_run_interval(), + gpu_prover_archiver, + ); + + let prover_jobs_archiver = + ProverJobsArchiver::new(prover_job_monitor_config.archive_prover_jobs_duration()); + task_runner.add( + "ProverJobsArchiver", + prover_job_monitor_config.prover_jobs_archiver_run_interval(), + prover_jobs_archiver, + ); + + // job requeuers + let proof_compressor_job_requeuer = ProofCompressorJobRequeuer::new( + proof_compressor_config.max_attempts, + proof_compressor_config.generation_timeout(), + ); + task_runner.add( + "ProofCompressorJobRequeuer", + prover_job_monitor_config.proof_compressor_job_requeuer_run_interval(), + proof_compressor_job_requeuer, + ); + + let prover_job_requeuer = ProverJobRequeuer::new( + prover_config.max_attempts, + prover_config.proof_generation_timeout(), + ); + task_runner.add( + "ProverJobRequeuer", + prover_job_monitor_config.prover_job_requeuer_run_interval(), + prover_job_requeuer, + ); + + let witness_generator_job_requeuer = WitnessGeneratorJobRequeuer::new( + witness_generator_config.max_attempts, + witness_generator_config.witness_generation_timeouts(), + ); + task_runner.add( + "WitnessGeneratorJobRequeuer", + prover_job_monitor_config.witness_generator_job_requeuer_run_interval(), + witness_generator_job_requeuer, + ); + + // queue reporters + let proof_compressor_queue_reporter = ProofCompressorQueueReporter {}; + task_runner.add( + "ProofCompressorQueueReporter", + prover_job_monitor_config.proof_compressor_queue_reporter_run_interval(), + proof_compressor_queue_reporter, + ); + + let prover_queue_reporter = ProverQueueReporter::new(prover_group_config); + task_runner.add( + "ProverQueueReporter", + prover_job_monitor_config.prover_queue_reporter_run_interval(), + prover_queue_reporter, + ); + + let witness_generator_queue_reporter = WitnessGeneratorQueueReporter {}; + task_runner.add( + "WitnessGeneratorQueueReporter", + prover_job_monitor_config.witness_generator_queue_reporter_run_interval(), + witness_generator_queue_reporter, + ); + + // witness job queuer + let witness_job_queuer = WitnessJobQueuer {}; + task_runner.add( + "WitnessJobQueuer", + prover_job_monitor_config.witness_job_queuer_run_interval(), + witness_job_queuer, + ); + + Ok(task_runner.spawn(stop_receiver)) +} diff --git a/prover/crates/bin/prover_job_monitor/src/metrics.rs b/prover/crates/bin/prover_job_monitor/src/metrics.rs new file mode 100644 index 00000000000..fa5e22111ae --- /dev/null +++ b/prover/crates/bin/prover_job_monitor/src/metrics.rs @@ -0,0 +1,98 @@ +use vise::{Counter, EncodeLabelSet, EncodeLabelValue, Family, Gauge, LabeledFamily, Metrics}; +use zksync_types::protocol_version::ProtocolSemanticVersion; + +#[derive(Debug, Metrics)] +#[metrics(prefix = "prover_job_monitor")] +pub(crate) struct ProverJobMonitorMetrics { + // archivers + /// number of dead GPU provers archived + pub archived_gpu_provers: Counter, + /// number of finished prover job archived + pub archived_prover_jobs: Counter, + + // job requeuers + /// number of proof compressor jobs that have been requeued for execution + pub requeued_proof_compressor_jobs: Counter, + /// number of circuit prover jobs that have been requeued for execution + pub requeued_circuit_prover_jobs: Counter, + /// number of witness generator jobs that have been requeued for execution + pub requeued_witness_generator_jobs: Family>, + + // queues reporters + /// number of proof compressor jobs that are queued/in_progress per protocol version + #[metrics(labels = ["type", "protocol_version"])] + pub proof_compressor_jobs: LabeledFamily<(JobStatus, String), Gauge, 2>, + /// the oldest batch that has not been compressed yet + pub oldest_uncompressed_batch: Gauge, + /// number of prover jobs per circuit, per round, per protocol version, per status + /// Sets a specific value for a struct as follows: + /// { + /// status: Queued, + /// circuit_id: 1, + /// round: 0, + /// group_id: + /// protocol_version: 0.24.2, + /// } + pub prover_jobs: Family>, + /// the oldest batch that has not been proven yet, per circuit id and aggregation round + #[metrics(labels = ["circuit_id", "aggregation_round"])] + pub oldest_unprocessed_batch: LabeledFamily<(String, String), Gauge, 2>, + /// number of witness generator jobs per "round" + #[metrics(labels = ["type", "round", "protocol_version"])] + pub witness_generator_jobs_by_round: LabeledFamily<(JobStatus, String, String), Gauge, 3>, + + // witness job queuer + /// number of jobs queued per type of witness generator + pub queued_witness_generator_jobs: Family>, +} + +impl ProverJobMonitorMetrics { + pub fn report_prover_jobs( + &self, + status: JobStatus, + circuit_id: u8, + round: u8, + group_id: u8, + protocol_version: ProtocolSemanticVersion, + amount: u64, + ) { + self.prover_jobs[&ProverJobsLabels { + status, + circuit_id: circuit_id.to_string(), + round: round.to_string(), + group_id: group_id.to_string(), + protocol_version: protocol_version.to_string(), + }] + .set(amount); + } +} +#[vise::register] +pub(crate) static PROVER_JOB_MONITOR_METRICS: vise::Global = + vise::Global::new(); + +#[derive(Debug, Clone, PartialEq, Eq, Hash, EncodeLabelSet)] +pub(crate) struct ProverJobsLabels { + pub status: JobStatus, + pub circuit_id: String, + pub round: String, + pub group_id: String, + pub protocol_version: String, +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, EncodeLabelValue, EncodeLabelSet)] +#[metrics(label = "type", rename_all = "snake_case")] +#[allow(clippy::enum_variant_names)] +pub(crate) enum WitnessType { + BasicWitnessGenerator, + LeafWitnessGenerator, + NodeWitnessGenerator, + RecursionTipWitnessGenerator, + SchedulerWitnessGenerator, +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, EncodeLabelValue)] +#[metrics(rename_all = "snake_case")] +pub enum JobStatus { + Queued, + InProgress, +} diff --git a/prover/crates/bin/prover_job_monitor/src/queue_reporter/mod.rs b/prover/crates/bin/prover_job_monitor/src/queue_reporter/mod.rs new file mode 100644 index 00000000000..f325f1fcba7 --- /dev/null +++ b/prover/crates/bin/prover_job_monitor/src/queue_reporter/mod.rs @@ -0,0 +1,7 @@ +pub use proof_compressor_queue_reporter::ProofCompressorQueueReporter; +pub use prover_queue_reporter::ProverQueueReporter; +pub use witness_generator_queue_reporter::WitnessGeneratorQueueReporter; + +mod proof_compressor_queue_reporter; +mod prover_queue_reporter; +mod witness_generator_queue_reporter; diff --git a/prover/crates/bin/prover_job_monitor/src/queue_reporter/proof_compressor_queue_reporter.rs b/prover/crates/bin/prover_job_monitor/src/queue_reporter/proof_compressor_queue_reporter.rs new file mode 100644 index 00000000000..f31af8e247a --- /dev/null +++ b/prover/crates/bin/prover_job_monitor/src/queue_reporter/proof_compressor_queue_reporter.rs @@ -0,0 +1,68 @@ +use std::collections::HashMap; + +use async_trait::async_trait; +use zksync_prover_dal::{Connection, Prover, ProverDal}; +use zksync_types::{protocol_version::ProtocolSemanticVersion, prover_dal::JobCountStatistics}; + +use crate::{ + metrics::{JobStatus, PROVER_JOB_MONITOR_METRICS}, + task_wiring::Task, +}; + +/// `ProofCompressorQueueReporter` is a task that reports compression jobs status. +/// Note: these values will be used for auto-scaling proof compressor. +#[derive(Debug)] +pub struct ProofCompressorQueueReporter {} + +impl ProofCompressorQueueReporter { + async fn get_job_statistics( + connection: &mut Connection<'_, Prover>, + ) -> HashMap { + connection.fri_proof_compressor_dal().get_jobs_stats().await + } +} + +#[async_trait] +impl Task for ProofCompressorQueueReporter { + async fn invoke(&self, connection: &mut Connection) -> anyhow::Result<()> { + let stats = Self::get_job_statistics(connection).await; + + for (protocol_version, stats) in &stats { + if stats.queued > 0 { + tracing::info!( + "Found {} queued proof compressor jobs for protocol version {}.", + stats.queued, + protocol_version + ); + } + if stats.in_progress > 0 { + tracing::info!( + "Found {} in progress proof compressor jobs for protocol version {}.", + stats.in_progress, + protocol_version + ); + } + + PROVER_JOB_MONITOR_METRICS.proof_compressor_jobs + [&(JobStatus::Queued, protocol_version.to_string())] + .set(stats.queued as u64); + + PROVER_JOB_MONITOR_METRICS.proof_compressor_jobs + [&(JobStatus::InProgress, protocol_version.to_string())] + .set(stats.in_progress as u64); + } + + let oldest_not_compressed_batch = connection + .fri_proof_compressor_dal() + .get_oldest_not_compressed_batch() + .await; + + if let Some(l1_batch_number) = oldest_not_compressed_batch { + PROVER_JOB_MONITOR_METRICS + .oldest_uncompressed_batch + .set(l1_batch_number.0 as u64); + } + + Ok(()) + } +} diff --git a/prover/crates/bin/prover_job_monitor/src/queue_reporter/prover_queue_reporter.rs b/prover/crates/bin/prover_job_monitor/src/queue_reporter/prover_queue_reporter.rs new file mode 100644 index 00000000000..365000acb59 --- /dev/null +++ b/prover/crates/bin/prover_job_monitor/src/queue_reporter/prover_queue_reporter.rs @@ -0,0 +1,83 @@ +use async_trait::async_trait; +use zksync_config::configs::fri_prover_group::FriProverGroupConfig; +use zksync_prover_dal::{Connection, Prover, ProverDal}; +use zksync_types::{basic_fri_types::CircuitIdRoundTuple, prover_dal::JobCountStatistics}; + +use crate::{ + metrics::{JobStatus, PROVER_JOB_MONITOR_METRICS}, + task_wiring::Task, +}; + +/// `ProverQueueReporter` is a task that reports prover jobs status. +/// Note: these values will be used for auto-scaling provers and Witness Vector Generators. +#[derive(Debug)] +pub struct ProverQueueReporter { + config: FriProverGroupConfig, +} + +impl ProverQueueReporter { + pub fn new(config: FriProverGroupConfig) -> Self { + Self { config } + } +} + +#[async_trait] +impl Task for ProverQueueReporter { + async fn invoke(&self, connection: &mut Connection) -> anyhow::Result<()> { + let stats = connection + .fri_prover_jobs_dal() + .get_prover_jobs_stats() + .await; + + for (protocol_semantic_version, circuit_prover_stats) in stats { + for (tuple, stat) in circuit_prover_stats { + let CircuitIdRoundTuple { + circuit_id, + aggregation_round, + } = tuple; + let JobCountStatistics { + queued, + in_progress, + } = stat; + let group_id = self + .config + .get_group_id_for_circuit_id_and_aggregation_round( + circuit_id, + aggregation_round, + ) + .unwrap_or(u8::MAX); + + PROVER_JOB_MONITOR_METRICS.report_prover_jobs( + JobStatus::Queued, + circuit_id, + aggregation_round, + group_id, + protocol_semantic_version, + queued as u64, + ); + + PROVER_JOB_MONITOR_METRICS.report_prover_jobs( + JobStatus::InProgress, + circuit_id, + aggregation_round, + group_id, + protocol_semantic_version, + in_progress as u64, + ) + } + } + + let lag_by_circuit_type = connection + .fri_prover_jobs_dal() + .min_unproved_l1_batch_number() + .await; + + for ((circuit_id, aggregation_round), l1_batch_number) in lag_by_circuit_type { + PROVER_JOB_MONITOR_METRICS.oldest_unprocessed_batch + [&(circuit_id.to_string(), aggregation_round.to_string())] + .set(l1_batch_number.0 as u64); + } + + Ok(()) + } +} diff --git a/prover/crates/bin/prover_job_monitor/src/queue_reporter/witness_generator_queue_reporter.rs b/prover/crates/bin/prover_job_monitor/src/queue_reporter/witness_generator_queue_reporter.rs new file mode 100644 index 00000000000..0d222f129d3 --- /dev/null +++ b/prover/crates/bin/prover_job_monitor/src/queue_reporter/witness_generator_queue_reporter.rs @@ -0,0 +1,71 @@ +use async_trait::async_trait; +use zksync_prover_dal::{Connection, Prover, ProverDal}; +use zksync_types::{ + basic_fri_types::AggregationRound, protocol_version::ProtocolSemanticVersion, + prover_dal::JobCountStatistics, +}; + +use crate::{ + metrics::{JobStatus, PROVER_JOB_MONITOR_METRICS}, + task_wiring::Task, +}; + +/// `WitnessGeneratorQueueReporter` is a task that reports witness generator jobs status. +/// Note: these values will be used for auto-scaling witness generators (Basic, Leaf, Node, Recursion Tip and Scheduler). +#[derive(Debug)] +pub struct WitnessGeneratorQueueReporter; + +impl WitnessGeneratorQueueReporter { + fn emit_metrics_for_round( + round: AggregationRound, + protocol_version: ProtocolSemanticVersion, + stats: &JobCountStatistics, + ) { + if stats.queued > 0 { + tracing::info!( + "Found {} queued {} witness generator jobs for protocol version {}.", + stats.queued, + round, + protocol_version + ); + } + if stats.in_progress > 0 { + tracing::info!( + "Found {} in progress {} witness generator jobs for protocol version {}.", + stats.in_progress, + round, + protocol_version + ); + } + + PROVER_JOB_MONITOR_METRICS.witness_generator_jobs_by_round[&( + JobStatus::Queued, + round.to_string(), + protocol_version.to_string(), + )] + .set(stats.queued as u64); + PROVER_JOB_MONITOR_METRICS.witness_generator_jobs_by_round[&( + JobStatus::InProgress, + round.to_string(), + protocol_version.to_string(), + )] + .set(stats.in_progress as u64); + } +} + +#[async_trait] +impl Task for WitnessGeneratorQueueReporter { + async fn invoke(&self, connection: &mut Connection) -> anyhow::Result<()> { + for round in AggregationRound::ALL_ROUNDS { + let stats = connection + .fri_witness_generator_dal() + .get_witness_jobs_stats(round) + .await; + for ((round, semantic_protocol_version), job_stats) in stats { + Self::emit_metrics_for_round(round, semantic_protocol_version, &job_stats); + } + } + + Ok(()) + } +} diff --git a/prover/crates/bin/prover_job_monitor/src/task_wiring.rs b/prover/crates/bin/prover_job_monitor/src/task_wiring.rs new file mode 100644 index 00000000000..d6539141b1d --- /dev/null +++ b/prover/crates/bin/prover_job_monitor/src/task_wiring.rs @@ -0,0 +1,86 @@ +use std::time::Duration; + +use anyhow::Context; +use tracing::Instrument; +use zksync_prover_dal::{Connection, ConnectionPool, Prover}; + +/// Task trait to be run in ProverJobMonitor. +#[async_trait::async_trait] +pub trait Task { + async fn invoke(&self, connection: &mut Connection) -> anyhow::Result<()>; +} + +/// Wrapper for Task with a periodic interface. Holds information about the task and provides DB connectivity. +struct PeriodicTask { + job: Box, + name: String, + interval: Duration, +} + +impl PeriodicTask { + async fn run( + &self, + mut stop_receiver: tokio::sync::watch::Receiver, + connection_pool: ConnectionPool, + ) -> anyhow::Result<()> { + tracing::info!( + "Started Task {} with run interval: {:?}", + self.name, + self.interval + ); + + let mut interval = tokio::time::interval(self.interval); + + while !*stop_receiver.borrow_and_update() { + interval.tick().await; + let mut connection = connection_pool + .connection() + .await + .context("failed to get database connection")?; + self.job + .invoke(&mut connection) + .instrument(tracing::info_span!("run", service_name = %self.name)) + .await + .context("failed to invoke task")?; + } + tracing::info!("Stop signal received; Task {} is shut down", self.name); + Ok(()) + } +} + +/// Wrapper on a vector of task. Makes adding/spawning tasks and sharing resources ergonomic. +pub struct TaskRunner { + pool: ConnectionPool, + tasks: Vec, +} + +impl TaskRunner { + pub fn new(pool: ConnectionPool) -> Self { + Self { + pool, + tasks: Vec::new(), + } + } + + pub fn add(&mut self, name: &str, interval: Duration, job: T) { + self.tasks.push(PeriodicTask { + name: name.into(), + interval, + job: Box::new(job), + }); + } + + pub fn spawn( + self, + stop_receiver: tokio::sync::watch::Receiver, + ) -> Vec>> { + self.tasks + .into_iter() + .map(|task| { + let pool = self.pool.clone(); + let receiver = stop_receiver.clone(); + tokio::spawn(async move { task.run(receiver, pool).await }) + }) + .collect() + } +} diff --git a/prover/crates/bin/prover_job_monitor/src/witness_job_queuer.rs b/prover/crates/bin/prover_job_monitor/src/witness_job_queuer.rs new file mode 100644 index 00000000000..d8d12df4abe --- /dev/null +++ b/prover/crates/bin/prover_job_monitor/src/witness_job_queuer.rs @@ -0,0 +1,121 @@ +use async_trait::async_trait; +use zksync_prover_dal::{Connection, Prover, ProverDal}; + +use crate::{ + metrics::{WitnessType, PROVER_JOB_MONITOR_METRICS}, + task_wiring::Task, +}; + +/// `WitnessJobQueuer` is a task that moves witness generator jobs from 'waiting_for_proofs' to 'queued'. +/// Note: this task is the backbone of scheduling/getting ready witness jobs to execute. +#[derive(Debug)] +pub struct WitnessJobQueuer; + +impl WitnessJobQueuer { + /// Marks leaf witness jobs as queued. + /// The trigger condition is all prover jobs on round 0 for a given circuit, per batch, have been completed. + async fn queue_leaf_jobs(&self, connection: &mut Connection<'_, Prover>) { + let l1_batch_numbers = connection + .fri_witness_generator_dal() + .move_leaf_aggregation_jobs_from_waiting_to_queued() + .await; + let len = l1_batch_numbers.len(); + for (l1_batch_number, circuit_id) in l1_batch_numbers { + tracing::info!( + "Marked leaf job for l1_batch {} and circuit_id {} as queued.", + l1_batch_number, + circuit_id + ); + } + + PROVER_JOB_MONITOR_METRICS.queued_witness_generator_jobs + [&WitnessType::LeafWitnessGenerator] + .inc_by(len as u64); + } + + async fn move_node_aggregation_jobs_from_waiting_to_queued( + &self, + connection: &mut Connection<'_, Prover>, + ) -> Vec<(i64, u8, u16)> { + let mut jobs = connection + .fri_witness_generator_dal() + .move_depth_zero_node_aggregation_jobs() + .await; + jobs.extend( + connection + .fri_witness_generator_dal() + .move_depth_non_zero_node_aggregation_jobs() + .await, + ); + jobs + } + + /// Marks node witness jobs as queued. + /// The trigger condition is all prover jobs on round 1 (or 2 if recursing) for a given circuit, per batch, have been completed. + async fn queue_node_jobs(&self, connection: &mut Connection<'_, Prover>) { + let l1_batch_numbers = self + .move_node_aggregation_jobs_from_waiting_to_queued(connection) + .await; + let len = l1_batch_numbers.len(); + for (l1_batch_number, circuit_id, depth) in l1_batch_numbers { + tracing::info!( + "Marked node job for l1_batch {} and circuit_id {} at depth {} as queued.", + l1_batch_number, + circuit_id, + depth + ); + } + PROVER_JOB_MONITOR_METRICS.queued_witness_generator_jobs + [&WitnessType::NodeWitnessGenerator] + .inc_by(len as u64); + } + + /// Marks recursion tip witness jobs as queued. + /// The trigger condition is all final node proving jobs for the batch have been completed. + async fn queue_recursion_tip_jobs(&self, connection: &mut Connection<'_, Prover>) { + let l1_batch_numbers = connection + .fri_witness_generator_dal() + .move_recursion_tip_jobs_from_waiting_to_queued() + .await; + for l1_batch_number in &l1_batch_numbers { + tracing::info!( + "Marked recursion tip job for l1_batch {} as queued.", + l1_batch_number, + ); + } + PROVER_JOB_MONITOR_METRICS.queued_witness_generator_jobs + [&WitnessType::RecursionTipWitnessGenerator] + .inc_by(l1_batch_numbers.len() as u64); + } + + /// Marks scheduler witness jobs as queued. + /// The trigger condition is the recursion tip proving job for the batch has been completed. + async fn queue_scheduler_jobs(&self, connection: &mut Connection<'_, Prover>) { + let l1_batch_numbers = connection + .fri_witness_generator_dal() + .move_scheduler_jobs_from_waiting_to_queued() + .await; + for l1_batch_number in &l1_batch_numbers { + tracing::info!( + "Marked scheduler job for l1_batch {} as queued.", + l1_batch_number, + ); + } + PROVER_JOB_MONITOR_METRICS.queued_witness_generator_jobs + [&WitnessType::SchedulerWitnessGenerator] + .inc_by(l1_batch_numbers.len() as u64); + } +} + +#[async_trait] +impl Task for WitnessJobQueuer { + async fn invoke(&self, connection: &mut Connection) -> anyhow::Result<()> { + // Note that there's no basic jobs here; basic witness generation is ready by the time it reaches prover subsystem. + // It doesn't need to wait for any proof to start, as it is the process that maps the future execution (how many proofs and future witness generators). + self.queue_leaf_jobs(connection).await; + self.queue_node_jobs(connection).await; + self.queue_recursion_tip_jobs(connection).await; + self.queue_scheduler_jobs(connection).await; + Ok(()) + } +} diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/proptest-regressions/tests.txt b/prover/crates/bin/vk_setup_data_generator_server_fri/proptest-regressions/tests.txt new file mode 100644 index 00000000000..7e50d86cb4f --- /dev/null +++ b/prover/crates/bin/vk_setup_data_generator_server_fri/proptest-regressions/tests.txt @@ -0,0 +1,9 @@ +# Seeds for failure cases proptest has generated in the past. It is +# automatically read and these particular cases re-run before any +# novel cases are generated. +# +# It is recommended to check this file in to source control so that +# everyone who runs the test benefits from these saved cases. +cc ca181a7669a6e07b68bce71c8c723efcb8fd2a4e895fc962ca1d33ce5f8188f7 # shrinks to circuit_id = 1 +cc ce71957c410fa7af30e04b3e85423555a8e1bbd26b4682b748fa67162bc5687f # shrinks to circuit_id = 1 +cc 6d3b0c60d8a5e7d7dc3bb4a2a21cce97461827583ae01b2414345175a02a1221 # shrinks to key = ProverServiceDataKey { circuit_id: 1, round: BasicCircuits } diff --git a/prover/crates/bin/witness_generator/src/basic_circuits.rs b/prover/crates/bin/witness_generator/src/basic_circuits.rs index 75326ace7f6..00a4d99ba9a 100644 --- a/prover/crates/bin/witness_generator/src/basic_circuits.rs +++ b/prover/crates/bin/witness_generator/src/basic_circuits.rs @@ -8,13 +8,15 @@ use std::{ use anyhow::Context as _; use async_trait::async_trait; use circuit_definitions::{ - circuit_definitions::base_layer::ZkSyncBaseLayerStorage, + circuit_definitions::base_layer::{ZkSyncBaseLayerCircuit, ZkSyncBaseLayerStorage}, encodings::recursion_request::RecursionQueueSimulator, zkevm_circuits::fsm_input_output::ClosedFormInputCompactFormWitness, }; use tokio::sync::Semaphore; use tracing::Instrument; -use zkevm_test_harness::geometry_config::get_geometry_config; +use zkevm_test_harness::{ + geometry_config::get_geometry_config, witness::oracle::WitnessGenerationArtifact, +}; use zksync_config::configs::FriWitnessGeneratorConfig; use zksync_multivm::{ interface::storage::StorageView, @@ -34,7 +36,7 @@ use zksync_prover_fri_types::{ }, get_current_pod_name, keys::ClosedFormInputKey, - AuxOutputWitnessWrapper, + AuxOutputWitnessWrapper, CircuitAuxData, }; use zksync_prover_fri_utils::get_recursive_layer_circuit_id_for_base_layer; use zksync_prover_interface::inputs::WitnessInputData; @@ -49,8 +51,8 @@ use crate::{ precalculated_merkle_paths_provider::PrecalculatedMerklePathsProvider, storage_oracle::StorageOracle, utils::{ - expand_bootloader_contents, save_circuit, ClosedFormInputWrapper, - SchedulerPartialInputWrapper, KZG_TRUSTED_SETUP_FILE, + expand_bootloader_contents, save_circuit, save_ram_premutation_queue_witness, + ClosedFormInputWrapper, SchedulerPartialInputWrapper, KZG_TRUSTED_SETUP_FILE, }, witness::WitnessStorage, }; @@ -432,6 +434,8 @@ async fn generate_witness( let (circuit_sender, mut circuit_receiver) = tokio::sync::mpsc::channel(1); let (queue_sender, mut queue_receiver) = tokio::sync::mpsc::channel(1); + let (ram_permutation_queue_sender, mut ram_permutation_queue_receiver) = + tokio::sync::mpsc::channel(1); let make_circuits_span = tracing::info_span!("make_circuits"); let make_circuits_span_copy = make_circuits_span.clone(); @@ -457,6 +461,29 @@ async fn generate_witness( .to_str() .expect("Path to KZG trusted setup is not a UTF-8 string"); + let artifacts_callback = |artifact: WitnessGenerationArtifact| match artifact { + WitnessGenerationArtifact::BaseLayerCircuit(circuit) => { + let parent_span = span.clone(); + tracing::info_span!(parent: parent_span, "send_circuit").in_scope(|| { + circuit_sender + .blocking_send(circuit) + .expect("failed to send circuit from harness"); + }); + } + WitnessGenerationArtifact::RecursionQueue((a, b, c)) => queue_sender + .blocking_send((a as u8, b, c)) + .expect("failed to send recursion queue from harness"), + a @ WitnessGenerationArtifact::MemoryQueueWitness(_) => { + let parent_span = span.clone(); + tracing::info_span!(parent: parent_span, "send_ram_permutation_queue_witness") + .in_scope(|| { + ram_permutation_queue_sender + .blocking_send(a) + .expect("failed to send ram permutation queue sitness from harness"); + }); + } + }; + let (scheduler_witness, block_witness) = zkevm_test_harness::external_calls::run( Address::zero(), BOOTLOADER_ADDRESS, @@ -474,24 +501,14 @@ async fn generate_witness( tree, path, input.eip_4844_blobs.blobs(), - |circuit| { - let parent_span = span.clone(); - tracing::info_span!(parent: parent_span, "send_circuit").in_scope(|| { - circuit_sender - .blocking_send(circuit) - .expect("failed to send circuit from harness"); - }); - }, - |a, b, c| { - queue_sender - .blocking_send((a as u8, b, c)) - .expect("failed to send recursion queue from harness") - }, + artifacts_callback, ); (scheduler_witness, block_witness) }) .instrument(make_circuits_span); + let semaphore = Arc::new(Semaphore::new(max_circuits_in_flight)); + let mut save_circuit_handles = vec![]; let save_circuits_span = tracing::info_span!("save_circuits"); @@ -503,7 +520,7 @@ async fn generate_witness( // If the order is tampered with, proving will fail (as the proof would be computed for a different sequence of instruction). let mut circuit_sequence = 0; - let semaphore = Arc::new(Semaphore::new(max_circuits_in_flight)); + let mut ram_circuit_sequence = 0; while let Some(circuit) = circuit_receiver .recv() @@ -518,9 +535,27 @@ async fn generate_witness( .acquire_owned() .await .expect("failed to get permit for running save circuit task"); + + let partial_circuit_aux_data = match &circuit { + ZkSyncBaseLayerCircuit::RAMPermutation(_) => { + let circuit_subsequence_number = ram_circuit_sequence; + ram_circuit_sequence += 1; + Some(CircuitAuxData { + circuit_subsequence_number, + }) + } + _ => None, + }; + save_circuit_handles.push(tokio::task::spawn(async move { - let (circuit_id, circuit_url) = - save_circuit(block_number, circuit, sequence, object_store).await; + let (circuit_id, circuit_url) = save_circuit( + block_number, + circuit, + sequence, + partial_circuit_aux_data, + object_store, + ) + .await; drop(permit); (circuit_id, circuit_url) })); @@ -528,6 +563,57 @@ async fn generate_witness( } .instrument(save_circuits_span); + let mut save_ram_queue_witness_handles = vec![]; + + let save_ram_queue_witness_span = tracing::info_span!("save_circuits"); + + // Future which receives part of RAM permutation circuits witnesses and saves them async. + // Uses semaphore because these artifacts are of significant size + let ram_queue_witness_receiver_handle = async { + let mut sorted_sequence = 0; + let mut unsorted_sequence = 0; + + while let Some(witness_artifact) = ram_permutation_queue_receiver + .recv() + .instrument(tracing::info_span!("wait_for_ram_witness")) + .await + { + let object_store = object_store.clone(); + let semaphore = semaphore.clone(); + let permit = semaphore + .acquire_owned() + .await + .expect("failed to get permit for running save ram permutation queue witness task"); + let (is_sorted, witness, sequence) = match witness_artifact { + WitnessGenerationArtifact::MemoryQueueWitness((witness, sorted)) => { + let sequence = if sorted { + let sequence = sorted_sequence; + sorted_sequence += 1; + sequence + } else { + let sequence = unsorted_sequence; + unsorted_sequence += 1; + sequence + }; + (sorted, witness, sequence) + } + _ => panic!("Invalid artifact received"), + }; + save_ram_queue_witness_handles.push(tokio::task::spawn(async move { + let _ = save_ram_premutation_queue_witness( + block_number, + sequence, + is_sorted, + witness, + object_store, + ) + .await; + drop(permit); + })); + } + } + .instrument(save_ram_queue_witness_span); + let mut save_queue_handles = vec![]; let save_queues_span = tracing::info_span!("save_queues"); @@ -553,10 +639,11 @@ async fn generate_witness( } .instrument(save_queues_span); - let (witnesses, _, _) = tokio::join!( + let (witnesses, _, _, _) = tokio::join!( make_circuits_handle, circuit_receiver_handle, - queue_receiver_handle + queue_receiver_handle, + ram_queue_witness_receiver_handle ); let (mut scheduler_witness, block_aux_witness) = witnesses.unwrap(); @@ -581,6 +668,12 @@ async fn generate_witness( .filter(|(circuit_id, _, _)| circuits_present.contains(circuit_id)) .collect(); + let _: Vec<_> = futures::future::join_all(save_ram_queue_witness_handles) + .await + .into_iter() + .map(|result| result.expect("failed to save ram permutation queue witness")) + .collect(); + scheduler_witness.previous_block_meta_hash = input.previous_batch_metadata.meta_hash.0; scheduler_witness.previous_block_aux_hash = input.previous_batch_metadata.aux_hash.0; diff --git a/prover/crates/bin/witness_generator/src/leaf_aggregation.rs b/prover/crates/bin/witness_generator/src/leaf_aggregation.rs index d8cad84e777..2cfae160028 100644 --- a/prover/crates/bin/witness_generator/src/leaf_aggregation.rs +++ b/prover/crates/bin/witness_generator/src/leaf_aggregation.rs @@ -3,7 +3,7 @@ use std::{sync::Arc, time::Instant}; use anyhow::Context as _; use async_trait::async_trait; use circuit_definitions::circuit_definitions::recursion_layer::base_circuit_type_into_recursive_leaf_circuit_type; -use tokio::sync::Semaphore; +use tokio::{runtime::Handle, sync::Semaphore}; use zkevm_test_harness::{ witness::recursive_aggregation::{ compute_leaf_params, create_leaf_witness, split_recursion_queue, @@ -298,44 +298,48 @@ pub async fn process_leaf_aggregation_job( let base_vk = job.base_vk.clone(); let leaf_params = (circuit_id, job.leaf_params.clone()); - let handle = tokio::task::spawn(async move { - let _permit = semaphore - .acquire() + let handle = tokio::task::spawn_blocking(move || { + let async_task = async { + let _permit = semaphore + .acquire() + .await + .expect("failed to get permit to process queues chunk"); + + let proofs = load_proofs_for_job_ids(&proofs_ids_for_queue, &*object_store).await; + let base_proofs = proofs + .into_iter() + .map(|wrapper| match wrapper { + FriProofWrapper::Base(base_proof) => base_proof, + FriProofWrapper::Recursive(_) => { + panic!( + "Expected only base proofs for leaf agg {} {}", + job.circuit_id, job.block_number + ); + } + }) + .collect(); + + let (_, circuit) = create_leaf_witness( + circuit_id.into(), + queue, + base_proofs, + &base_vk, + &leaf_params, + ); + + save_recursive_layer_prover_input_artifacts( + job.block_number, + circuit_idx, + vec![circuit], + AggregationRound::LeafAggregation, + 0, + &*object_store, + None, + ) .await - .expect("failed to get permit to process queues chunk"); - - let proofs = load_proofs_for_job_ids(&proofs_ids_for_queue, &*object_store).await; - let base_proofs = proofs - .into_iter() - .map(|wrapper| match wrapper { - FriProofWrapper::Base(base_proof) => base_proof, - FriProofWrapper::Recursive(_) => { - panic!( - "Expected only base proofs for leaf agg {} {}", - job.circuit_id, job.block_number - ); - } - }) - .collect(); - - let (_, circuit) = create_leaf_witness( - circuit_id.into(), - queue, - base_proofs, - &base_vk, - &leaf_params, - ); - - save_recursive_layer_prover_input_artifacts( - job.block_number, - circuit_idx, - vec![circuit], - AggregationRound::LeafAggregation, - 0, - &*object_store, - None, - ) - .await + }; + + Handle::current().block_on(async_task) }); handles.push(handle); diff --git a/prover/crates/bin/witness_generator/src/main.rs b/prover/crates/bin/witness_generator/src/main.rs index e914d3742b5..a88dd8726d3 100644 --- a/prover/crates/bin/witness_generator/src/main.rs +++ b/prover/crates/bin/witness_generator/src/main.rs @@ -79,7 +79,7 @@ async fn main() -> anyhow::Result<()> { ); let store_factory = ObjectStoreFactory::new(object_store_config.0); let config = general_config - .witness_generator + .witness_generator_config .context("witness generator config")?; let prometheus_config = general_config.prometheus_config; diff --git a/prover/crates/bin/witness_generator/src/node_aggregation.rs b/prover/crates/bin/witness_generator/src/node_aggregation.rs index c9d5ab32bc5..4f396fd4b5a 100644 --- a/prover/crates/bin/witness_generator/src/node_aggregation.rs +++ b/prover/crates/bin/witness_generator/src/node_aggregation.rs @@ -3,12 +3,12 @@ use std::{sync::Arc, time::Instant}; use anyhow::Context as _; use async_trait::async_trait; use circuit_definitions::circuit_definitions::recursion_layer::RECURSION_ARITY; -use tokio::sync::Semaphore; +use tokio::{runtime::Handle, sync::Semaphore}; use zkevm_test_harness::witness::recursive_aggregation::{ compute_node_vk_commitment, create_node_witness, }; use zksync_config::configs::FriWitnessGeneratorConfig; -use zksync_object_store::{ObjectStore, ObjectStoreError}; +use zksync_object_store::ObjectStore; use zksync_prover_dal::{ConnectionPool, Prover, ProverDal}; use zksync_prover_fri_types::{ circuit_definitions::{ @@ -34,7 +34,7 @@ use crate::{ metrics::WITNESS_GENERATOR_METRICS, utils::{ load_proofs_for_job_ids, save_node_aggregations_artifacts, - save_recursive_layer_prover_input_artifacts, AggregationWrapper, AggregationWrapperLegacy, + save_recursive_layer_prover_input_artifacts, AggregationWrapper, }, }; @@ -138,51 +138,56 @@ impl NodeAggregationWitnessGenerator { let vk = vk.clone(); let all_leafs_layer_params = job.all_leafs_layer_params.clone(); - let handle = tokio::task::spawn(async move { - let _permit = semaphore - .acquire() - .await - .expect("failed to get permit to process queues chunk"); - - let proofs = load_proofs_for_job_ids(&proofs_ids_for_chunk, &*object_store).await; - let mut recursive_proofs = vec![]; - for wrapper in proofs { - match wrapper { - FriProofWrapper::Base(_) => { - panic!( - "Expected only recursive proofs for node agg {} {}", - job.circuit_id, job.block_number - ); - } - FriProofWrapper::Recursive(recursive_proof) => { - recursive_proofs.push(recursive_proof) + let handle = tokio::task::spawn_blocking(move || { + let async_task = async { + let _permit = semaphore + .acquire() + .await + .expect("failed to get permit to process queues chunk"); + + let proofs = + load_proofs_for_job_ids(&proofs_ids_for_chunk, &*object_store).await; + let mut recursive_proofs = vec![]; + for wrapper in proofs { + match wrapper { + FriProofWrapper::Base(_) => { + panic!( + "Expected only recursive proofs for node agg {} {}", + job.circuit_id, job.block_number + ); + } + FriProofWrapper::Recursive(recursive_proof) => { + recursive_proofs.push(recursive_proof) + } } } - } - - let (result_circuit_id, recursive_circuit, input_queue) = create_node_witness( - &chunk, - recursive_proofs, - &vk, - node_vk_commitment, - &all_leafs_layer_params, - ); - - let recursive_circuit_id_and_url = save_recursive_layer_prover_input_artifacts( - job.block_number, - circuit_idx, - vec![recursive_circuit], - AggregationRound::NodeAggregation, - job.depth + 1, - &*object_store, - Some(job.circuit_id), - ) - .await; - ( - (result_circuit_id, input_queue), - recursive_circuit_id_and_url, - ) + let (result_circuit_id, recursive_circuit, input_queue) = create_node_witness( + &chunk, + recursive_proofs, + &vk, + node_vk_commitment, + &all_leafs_layer_params, + ); + + let recursive_circuit_id_and_url = save_recursive_layer_prover_input_artifacts( + job.block_number, + circuit_idx, + vec![recursive_circuit], + AggregationRound::NodeAggregation, + job.depth + 1, + &*object_store, + Some(job.circuit_id), + ) + .await; + + ( + (result_circuit_id, input_queue), + recursive_circuit_id_and_url, + ) + }; + + Handle::current().block_on(async_task) }); handles.push(handle); @@ -444,27 +449,12 @@ async fn get_artifacts( circuit_id: metadata.circuit_id, depth: metadata.depth, }; - let result = object_store.get(key).await; - - // TODO: remove after transition - return match result { - Ok(aggregation_wrapper) => aggregation_wrapper, - Err(error) => { - // probably legacy struct is saved in GCS - if let ObjectStoreError::Serialization(serialization_error) = error { - let legacy_wrapper: AggregationWrapperLegacy = - object_store.get(key).await.unwrap_or_else(|inner_error| { - panic!( - "node aggregation job artifacts getting error. Key: {:?}, errors: {:?} {:?}", - key, serialization_error, inner_error - ) - }); - AggregationWrapper(legacy_wrapper.0.into_iter().map(|x| (x.0, x.1)).collect()) - } else { - panic!("node aggregation job artifacts missing: {:?}", key) - } - } - }; + object_store.get(key).await.unwrap_or_else(|error| { + panic!( + "node aggregation job artifacts getting error. Key: {:?}, error: {:?}", + key, error + ) + }) } #[tracing::instrument( diff --git a/prover/crates/bin/witness_generator/src/utils.rs b/prover/crates/bin/witness_generator/src/utils.rs index a21aabc5d6d..f8656ac90f4 100644 --- a/prover/crates/bin/witness_generator/src/utils.rs +++ b/prover/crates/bin/witness_generator/src/utils.rs @@ -4,9 +4,12 @@ use std::{ sync::Arc, }; -use circuit_definitions::circuit_definitions::{ - base_layer::ZkSyncBaseLayerCircuit, - recursion_layer::{ZkSyncRecursionLayerStorageType, ZkSyncRecursionProof}, +use circuit_definitions::{ + circuit_definitions::{ + base_layer::ZkSyncBaseLayerCircuit, + recursion_layer::{ZkSyncRecursionLayerStorageType, ZkSyncRecursionProof}, + }, + encodings::memory_query::MemoryQueueStateWitnesses, }; use once_cell::sync::Lazy; use zkevm_test_harness::{ @@ -28,8 +31,8 @@ use zksync_prover_fri_types::{ encodings::recursion_request::RecursionQueueSimulator, zkevm_circuits::scheduler::input::SchedulerCircuitInstanceWitness, }, - keys::{AggregationsKey, ClosedFormInputKey, FriCircuitKey}, - CircuitWrapper, FriProofWrapper, + keys::{AggregationsKey, ClosedFormInputKey, FriCircuitKey, RamPermutationQueueWitnessKey}, + CircuitAuxData, CircuitWrapper, FriProofWrapper, RamPermutationQueueWitness, }; use zksync_types::{basic_fri_types::AggregationRound, L1BatchNumber, ProtocolVersionId, U256}; @@ -97,32 +100,6 @@ impl StoredObject for AggregationWrapper { serialize_using_bincode!(); } -/// TODO: remove after transition -#[derive(serde::Serialize, serde::Deserialize)] -pub struct AggregationWrapperLegacy( - pub Vec<( - u64, - RecursionQueueSimulator, - ZkSyncRecursiveLayerCircuit, - )>, -); - -impl StoredObject for AggregationWrapperLegacy { - const BUCKET: Bucket = Bucket::NodeAggregationWitnessJobsFri; - type Key<'a> = AggregationsKey; - - fn encode_key(key: Self::Key<'_>) -> String { - let AggregationsKey { - block_number, - circuit_id, - depth, - } = key; - format!("aggregations_{block_number}_{circuit_id}_{depth}.bin") - } - - serialize_using_bincode!(); -} - #[derive(serde::Serialize, serde::Deserialize)] pub struct SchedulerPartialInputWrapper( pub SchedulerCircuitInstanceWitness< @@ -151,6 +128,7 @@ pub async fn save_circuit( block_number: L1BatchNumber, circuit: ZkSyncBaseLayerCircuit, sequence_number: usize, + aux_data_for_partial_circuit: Option, object_store: Arc, ) -> (u8, String) { let circuit_id = circuit.numeric_circuit_type(); @@ -161,13 +139,46 @@ pub async fn save_circuit( aggregation_round: AggregationRound::BasicCircuits, depth: 0, }; - let blob_url = object_store - .put(circuit_key, &CircuitWrapper::Base(circuit)) - .await - .unwrap(); + + let blob_url = if let Some(aux_data_for_partial_circuit) = aux_data_for_partial_circuit { + object_store + .put( + circuit_key, + &CircuitWrapper::BasePartial((circuit, aux_data_for_partial_circuit)), + ) + .await + .unwrap() + } else { + object_store + .put(circuit_key, &CircuitWrapper::Base(circuit)) + .await + .unwrap() + }; (circuit_id, blob_url) } +#[tracing::instrument( + skip_all, + fields(l1_batch = %block_number) +)] +pub async fn save_ram_premutation_queue_witness( + block_number: L1BatchNumber, + circuit_subsequence_number: usize, + is_sorted: bool, + witness: MemoryQueueStateWitnesses, + object_store: Arc, +) -> String { + let witness_key = RamPermutationQueueWitnessKey { + block_number, + circuit_subsequence_number, + is_sorted, + }; + object_store + .put(witness_key, &RamPermutationQueueWitness { witness }) + .await + .unwrap() +} + #[tracing::instrument( skip_all, fields(l1_batch = %block_number) @@ -227,11 +238,15 @@ pub async fn load_proofs_for_job_ids( job_ids: &[u32], object_store: &dyn ObjectStore, ) -> Vec { - let mut proofs = Vec::with_capacity(job_ids.len()); + let mut handles = Vec::with_capacity(job_ids.len()); for job_id in job_ids { - proofs.push(object_store.get(*job_id).await.unwrap()); + handles.push(object_store.get(*job_id)); } - proofs + futures::future::join_all(handles) + .await + .into_iter() + .map(|x| x.unwrap()) + .collect() } /// Loads all proofs for a given recursion tip's job ids. diff --git a/prover/crates/bin/witness_vector_generator/src/generator.rs b/prover/crates/bin/witness_vector_generator/src/generator.rs index e26173067fb..800931f5d7c 100644 --- a/prover/crates/bin/witness_vector_generator/src/generator.rs +++ b/prover/crates/bin/witness_vector_generator/src/generator.rs @@ -79,6 +79,9 @@ impl WitnessVectorGenerator { CircuitWrapper::Recursive(recursive_circuit) => { recursive_circuit.synthesis::(&finalization_hints) } + CircuitWrapper::BasePartial(_) => { + panic!("Invalid circuit wrapper received for witness vector generation"); + } }; Ok(WitnessVectorArtifacts::new(cs.witness.unwrap(), job)) } diff --git a/prover/crates/lib/prover_dal/.sqlx/query-860846c9bcad1edd1a2906542c178815e29440592b2bb00adacf02730b526458.json b/prover/crates/lib/prover_dal/.sqlx/query-102b79726652d9150c802350bdca80c233a9fd3e892b5a867a5517c2e04497a8.json similarity index 68% rename from prover/crates/lib/prover_dal/.sqlx/query-860846c9bcad1edd1a2906542c178815e29440592b2bb00adacf02730b526458.json rename to prover/crates/lib/prover_dal/.sqlx/query-102b79726652d9150c802350bdca80c233a9fd3e892b5a867a5517c2e04497a8.json index f3ed6e34148..f912d06de81 100644 --- a/prover/crates/lib/prover_dal/.sqlx/query-860846c9bcad1edd1a2906542c178815e29440592b2bb00adacf02730b526458.json +++ b/prover/crates/lib/prover_dal/.sqlx/query-102b79726652d9150c802350bdca80c233a9fd3e892b5a867a5517c2e04497a8.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n UPDATE proof_compression_jobs_fri\n SET\n status = 'queued',\n error = 'Manually requeued',\n attempts = 2,\n updated_at = NOW(),\n processing_started_at = NOW()\n WHERE\n l1_batch_number = $1\n AND attempts >= $2\n AND (\n status = 'in_progress'\n OR status = 'failed'\n )\n RETURNING\n status,\n attempts\n ", + "query": "\n UPDATE proof_compression_jobs_fri\n SET\n status = 'queued',\n error = 'Manually requeued',\n attempts = 2,\n updated_at = NOW(),\n processing_started_at = NOW()\n WHERE\n l1_batch_number = $1\n AND attempts >= $2\n AND (\n status = 'in_progress'\n OR status = 'failed'\n )\n RETURNING\n status,\n attempts,\n error,\n picked_by\n ", "describe": { "columns": [ { @@ -12,6 +12,16 @@ "ordinal": 1, "name": "attempts", "type_info": "Int2" + }, + { + "ordinal": 2, + "name": "error", + "type_info": "Text" + }, + { + "ordinal": 3, + "name": "picked_by", + "type_info": "Text" } ], "parameters": { @@ -22,8 +32,10 @@ }, "nullable": [ false, - false + false, + true, + true ] }, - "hash": "860846c9bcad1edd1a2906542c178815e29440592b2bb00adacf02730b526458" + "hash": "102b79726652d9150c802350bdca80c233a9fd3e892b5a867a5517c2e04497a8" } diff --git a/prover/crates/lib/prover_dal/.sqlx/query-3c3abbf689fa64c6da7de69fd916769dbb04d3a61cf232892236c974660ffe64.json b/prover/crates/lib/prover_dal/.sqlx/query-216d0c263539739b53975a96a10332b826708800a2f72f09bd7aea08cf724e1a.json similarity index 71% rename from prover/crates/lib/prover_dal/.sqlx/query-3c3abbf689fa64c6da7de69fd916769dbb04d3a61cf232892236c974660ffe64.json rename to prover/crates/lib/prover_dal/.sqlx/query-216d0c263539739b53975a96a10332b826708800a2f72f09bd7aea08cf724e1a.json index 56d8b1fa995..ec503eabee0 100644 --- a/prover/crates/lib/prover_dal/.sqlx/query-3c3abbf689fa64c6da7de69fd916769dbb04d3a61cf232892236c974660ffe64.json +++ b/prover/crates/lib/prover_dal/.sqlx/query-216d0c263539739b53975a96a10332b826708800a2f72f09bd7aea08cf724e1a.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n UPDATE scheduler_witness_jobs_fri\n SET\n status = 'queued',\n updated_at = NOW(),\n processing_started_at = NOW()\n WHERE\n (\n status = 'in_progress'\n AND processing_started_at <= NOW() - $1::INTERVAL\n AND attempts < $2\n )\n OR (\n status = 'failed'\n AND attempts < $2\n )\n RETURNING\n l1_batch_number,\n status,\n attempts\n ", + "query": "\n UPDATE scheduler_witness_jobs_fri\n SET\n status = 'queued',\n updated_at = NOW(),\n processing_started_at = NOW()\n WHERE\n (\n status = 'in_progress'\n AND processing_started_at <= NOW() - $1::INTERVAL\n AND attempts < $2\n )\n OR (\n status = 'failed'\n AND attempts < $2\n )\n RETURNING\n l1_batch_number,\n status,\n attempts,\n error,\n picked_by\n ", "describe": { "columns": [ { @@ -17,6 +17,16 @@ "ordinal": 2, "name": "attempts", "type_info": "Int2" + }, + { + "ordinal": 3, + "name": "error", + "type_info": "Text" + }, + { + "ordinal": 4, + "name": "picked_by", + "type_info": "Text" } ], "parameters": { @@ -28,8 +38,10 @@ "nullable": [ false, false, - false + false, + true, + true ] }, - "hash": "3c3abbf689fa64c6da7de69fd916769dbb04d3a61cf232892236c974660ffe64" + "hash": "216d0c263539739b53975a96a10332b826708800a2f72f09bd7aea08cf724e1a" } diff --git a/prover/crates/lib/prover_dal/.sqlx/query-8719c090a9ad2488d556e495238cdce6412e2725cf5162ce7a733f6dceaecb11.json b/prover/crates/lib/prover_dal/.sqlx/query-2b12c5d469e6220cc8ddc997c666e4aa4f797bcc6e05ec2f2e435a7e940d8cf9.json similarity index 76% rename from prover/crates/lib/prover_dal/.sqlx/query-8719c090a9ad2488d556e495238cdce6412e2725cf5162ce7a733f6dceaecb11.json rename to prover/crates/lib/prover_dal/.sqlx/query-2b12c5d469e6220cc8ddc997c666e4aa4f797bcc6e05ec2f2e435a7e940d8cf9.json index 6493053b122..14b64e8122e 100644 --- a/prover/crates/lib/prover_dal/.sqlx/query-8719c090a9ad2488d556e495238cdce6412e2725cf5162ce7a733f6dceaecb11.json +++ b/prover/crates/lib/prover_dal/.sqlx/query-2b12c5d469e6220cc8ddc997c666e4aa4f797bcc6e05ec2f2e435a7e940d8cf9.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n UPDATE node_aggregation_witness_jobs_fri\n SET\n status = 'queued',\n updated_at = NOW(),\n processing_started_at = NOW()\n WHERE\n (\n status = 'in_progress'\n AND processing_started_at <= NOW() - $1::INTERVAL\n AND attempts < $2\n )\n OR (\n status = 'failed'\n AND attempts < $2\n )\n RETURNING\n id,\n status,\n attempts,\n circuit_id\n ", + "query": "\n UPDATE node_aggregation_witness_jobs_fri\n SET\n status = 'queued',\n updated_at = NOW(),\n processing_started_at = NOW()\n WHERE\n (\n status = 'in_progress'\n AND processing_started_at <= NOW() - $1::INTERVAL\n AND attempts < $2\n )\n OR (\n status = 'failed'\n AND attempts < $2\n )\n RETURNING\n id,\n status,\n attempts,\n circuit_id,\n error,\n picked_by\n ", "describe": { "columns": [ { @@ -22,6 +22,16 @@ "ordinal": 3, "name": "circuit_id", "type_info": "Int2" + }, + { + "ordinal": 4, + "name": "error", + "type_info": "Text" + }, + { + "ordinal": 5, + "name": "picked_by", + "type_info": "Text" } ], "parameters": { @@ -34,8 +44,10 @@ false, false, false, - false + false, + true, + true ] }, - "hash": "8719c090a9ad2488d556e495238cdce6412e2725cf5162ce7a733f6dceaecb11" + "hash": "2b12c5d469e6220cc8ddc997c666e4aa4f797bcc6e05ec2f2e435a7e940d8cf9" } diff --git a/prover/crates/lib/prover_dal/.sqlx/query-ca9d06141265b8524ee28c55569cb21a635037d89ce24dd3ad58ffaadb59594a.json b/prover/crates/lib/prover_dal/.sqlx/query-5f18efe2fb3a16cdf3c23379f36536b9704e8a76de95811cb23e3aa9f2512ade.json similarity index 65% rename from prover/crates/lib/prover_dal/.sqlx/query-ca9d06141265b8524ee28c55569cb21a635037d89ce24dd3ad58ffaadb59594a.json rename to prover/crates/lib/prover_dal/.sqlx/query-5f18efe2fb3a16cdf3c23379f36536b9704e8a76de95811cb23e3aa9f2512ade.json index ff49f615ab5..a9c675855ba 100644 --- a/prover/crates/lib/prover_dal/.sqlx/query-ca9d06141265b8524ee28c55569cb21a635037d89ce24dd3ad58ffaadb59594a.json +++ b/prover/crates/lib/prover_dal/.sqlx/query-5f18efe2fb3a16cdf3c23379f36536b9704e8a76de95811cb23e3aa9f2512ade.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n l1_batch_number\n FROM\n proof_compression_jobs_fri\n WHERE\n status <> 'successful'\n ORDER BY\n l1_batch_number ASC\n LIMIT\n 1\n ", + "query": "\n SELECT\n l1_batch_number\n FROM\n proof_compression_jobs_fri\n WHERE\n status <> 'successful'\n AND status <> 'sent_to_server'\n ORDER BY\n l1_batch_number ASC\n LIMIT\n 1\n ", "describe": { "columns": [ { @@ -16,5 +16,5 @@ false ] }, - "hash": "ca9d06141265b8524ee28c55569cb21a635037d89ce24dd3ad58ffaadb59594a" + "hash": "5f18efe2fb3a16cdf3c23379f36536b9704e8a76de95811cb23e3aa9f2512ade" } diff --git a/prover/crates/lib/prover_dal/.sqlx/query-6cfc59d2fc039c706f30ae91b7d9d0c658093dede5eb61489205aa751ad5b8ec.json b/prover/crates/lib/prover_dal/.sqlx/query-6cfc59d2fc039c706f30ae91b7d9d0c658093dede5eb61489205aa751ad5b8ec.json deleted file mode 100644 index 02b7862517f..00000000000 --- a/prover/crates/lib/prover_dal/.sqlx/query-6cfc59d2fc039c706f30ae91b7d9d0c658093dede5eb61489205aa751ad5b8ec.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n WITH deleted AS (\n DELETE FROM prover_jobs_fri\n WHERE\n status NOT IN ('queued', 'in_progress', 'in_gpu_proof', 'failed')\n AND updated_at < NOW() - $1::INTERVAL\n RETURNING *\n ),\n inserted_count AS (\n INSERT INTO prover_jobs_fri_archive\n SELECT * FROM deleted\n )\n SELECT COUNT(*) FROM deleted\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "count", - "type_info": "Int8" - } - ], - "parameters": { - "Left": [ - "Interval" - ] - }, - "nullable": [ - null - ] - }, - "hash": "6cfc59d2fc039c706f30ae91b7d9d0c658093dede5eb61489205aa751ad5b8ec" -} diff --git a/prover/crates/lib/prover_dal/.sqlx/query-a0f60a97f09b2467ca73bb6fbebb210d65149cdd4a3411a79b717aadbffb43af.json b/prover/crates/lib/prover_dal/.sqlx/query-8357972a21b39644e4cbe4bedc3b6d9065bf4494daf8f7632ab2bfe055773f7b.json similarity index 71% rename from prover/crates/lib/prover_dal/.sqlx/query-a0f60a97f09b2467ca73bb6fbebb210d65149cdd4a3411a79b717aadbffb43af.json rename to prover/crates/lib/prover_dal/.sqlx/query-8357972a21b39644e4cbe4bedc3b6d9065bf4494daf8f7632ab2bfe055773f7b.json index f718a93a590..54fba3bbeac 100644 --- a/prover/crates/lib/prover_dal/.sqlx/query-a0f60a97f09b2467ca73bb6fbebb210d65149cdd4a3411a79b717aadbffb43af.json +++ b/prover/crates/lib/prover_dal/.sqlx/query-8357972a21b39644e4cbe4bedc3b6d9065bf4494daf8f7632ab2bfe055773f7b.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n UPDATE recursion_tip_witness_jobs_fri\n SET\n status = 'queued',\n updated_at = NOW(),\n processing_started_at = NOW()\n WHERE\n (\n status = 'in_progress'\n AND processing_started_at <= NOW() - $1::INTERVAL\n AND attempts < $2\n )\n OR (\n status = 'failed'\n AND attempts < $2\n )\n RETURNING\n l1_batch_number,\n status,\n attempts\n ", + "query": "\n UPDATE recursion_tip_witness_jobs_fri\n SET\n status = 'queued',\n updated_at = NOW(),\n processing_started_at = NOW()\n WHERE\n (\n status = 'in_progress'\n AND processing_started_at <= NOW() - $1::INTERVAL\n AND attempts < $2\n )\n OR (\n status = 'failed'\n AND attempts < $2\n )\n RETURNING\n l1_batch_number,\n status,\n attempts,\n error,\n picked_by\n ", "describe": { "columns": [ { @@ -17,6 +17,16 @@ "ordinal": 2, "name": "attempts", "type_info": "Int2" + }, + { + "ordinal": 3, + "name": "error", + "type_info": "Text" + }, + { + "ordinal": 4, + "name": "picked_by", + "type_info": "Text" } ], "parameters": { @@ -28,8 +38,10 @@ "nullable": [ false, false, - false + false, + true, + true ] }, - "hash": "a0f60a97f09b2467ca73bb6fbebb210d65149cdd4a3411a79b717aadbffb43af" + "hash": "8357972a21b39644e4cbe4bedc3b6d9065bf4494daf8f7632ab2bfe055773f7b" } diff --git a/prover/crates/lib/prover_dal/.sqlx/query-e3194873d24e67f8d0e98bf8bf2d4f9a3b98458746972c9860fb9473947d59ff.json b/prover/crates/lib/prover_dal/.sqlx/query-9895b2ded08be3e81a5357decf76b4d3d6a762761e45af2a73fe96da804e627e.json similarity index 74% rename from prover/crates/lib/prover_dal/.sqlx/query-e3194873d24e67f8d0e98bf8bf2d4f9a3b98458746972c9860fb9473947d59ff.json rename to prover/crates/lib/prover_dal/.sqlx/query-9895b2ded08be3e81a5357decf76b4d3d6a762761e45af2a73fe96da804e627e.json index 0264238ee48..90ea9994206 100644 --- a/prover/crates/lib/prover_dal/.sqlx/query-e3194873d24e67f8d0e98bf8bf2d4f9a3b98458746972c9860fb9473947d59ff.json +++ b/prover/crates/lib/prover_dal/.sqlx/query-9895b2ded08be3e81a5357decf76b4d3d6a762761e45af2a73fe96da804e627e.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n UPDATE prover_jobs_fri\n SET\n status = 'queued',\n error = 'Manually requeued',\n attempts = 2,\n updated_at = NOW(),\n processing_started_at = NOW()\n WHERE\n l1_batch_number = $1\n AND attempts >= $2\n AND (\n status = 'in_progress'\n OR status = 'failed'\n )\n RETURNING\n id,\n status,\n attempts,\n circuit_id\n ", + "query": "\n UPDATE prover_jobs_fri\n SET\n status = 'queued',\n error = 'Manually requeued',\n attempts = 2,\n updated_at = NOW(),\n processing_started_at = NOW()\n WHERE\n l1_batch_number = $1\n AND attempts >= $2\n AND (\n status = 'in_progress'\n OR status = 'failed'\n )\n RETURNING\n id,\n status,\n attempts,\n circuit_id,\n error,\n picked_by\n ", "describe": { "columns": [ { @@ -22,6 +22,16 @@ "ordinal": 3, "name": "circuit_id", "type_info": "Int2" + }, + { + "ordinal": 4, + "name": "error", + "type_info": "Text" + }, + { + "ordinal": 5, + "name": "picked_by", + "type_info": "Text" } ], "parameters": { @@ -34,8 +44,10 @@ false, false, false, - false + false, + true, + true ] }, - "hash": "e3194873d24e67f8d0e98bf8bf2d4f9a3b98458746972c9860fb9473947d59ff" + "hash": "9895b2ded08be3e81a5357decf76b4d3d6a762761e45af2a73fe96da804e627e" } diff --git a/prover/crates/lib/prover_dal/.sqlx/query-a9e9399edfcaf7569869d5ac72ae8e0ed14ad1f42ffd0b383fbfb38e78df8ae3.json b/prover/crates/lib/prover_dal/.sqlx/query-a9e9399edfcaf7569869d5ac72ae8e0ed14ad1f42ffd0b383fbfb38e78df8ae3.json new file mode 100644 index 00000000000..ea6e6c23e6a --- /dev/null +++ b/prover/crates/lib/prover_dal/.sqlx/query-a9e9399edfcaf7569869d5ac72ae8e0ed14ad1f42ffd0b383fbfb38e78df8ae3.json @@ -0,0 +1,22 @@ +{ + "db_name": "PostgreSQL", + "query": "\n WITH deleted AS (\n DELETE FROM prover_jobs_fri AS p\n USING proof_compression_jobs_fri AS c\n WHERE\n p.status NOT IN ('queued', 'in_progress', 'in_gpu_proof', 'failed')\n AND p.updated_at < NOW() - $1::INTERVAL\n AND p.l1_batch_number = c.l1_batch_number\n AND c.status = 'sent_to_server'\n RETURNING p.*\n ),\n inserted_count AS (\n INSERT INTO prover_jobs_fri_archive\n SELECT * FROM deleted\n )\n SELECT COUNT(*) FROM deleted\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "count", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [ + "Interval" + ] + }, + "nullable": [ + null + ] + }, + "hash": "a9e9399edfcaf7569869d5ac72ae8e0ed14ad1f42ffd0b383fbfb38e78df8ae3" +} diff --git a/prover/crates/lib/prover_dal/.sqlx/query-bfb80956a18eabf266f5b5a9d62912d57f8eb2a38bdb7884fc812a2897a3a660.json b/prover/crates/lib/prover_dal/.sqlx/query-bcc5d3d35652f49b41d4ee673b171570fc88c17822bebd5b92e3b2f726d9af3a.json similarity index 63% rename from prover/crates/lib/prover_dal/.sqlx/query-bfb80956a18eabf266f5b5a9d62912d57f8eb2a38bdb7884fc812a2897a3a660.json rename to prover/crates/lib/prover_dal/.sqlx/query-bcc5d3d35652f49b41d4ee673b171570fc88c17822bebd5b92e3b2f726d9af3a.json index 550cb5ec743..ab1c2dd6552 100644 --- a/prover/crates/lib/prover_dal/.sqlx/query-bfb80956a18eabf266f5b5a9d62912d57f8eb2a38bdb7884fc812a2897a3a660.json +++ b/prover/crates/lib/prover_dal/.sqlx/query-bcc5d3d35652f49b41d4ee673b171570fc88c17822bebd5b92e3b2f726d9af3a.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n UPDATE witness_inputs_fri\n SET\n status = 'queued',\n updated_at = NOW(),\n processing_started_at = NOW()\n WHERE\n (\n status = 'in_progress'\n AND processing_started_at <= NOW() - $1::INTERVAL\n AND attempts < $2\n )\n OR (\n status = 'in_gpu_proof'\n AND processing_started_at <= NOW() - $1::INTERVAL\n AND attempts < $2\n )\n OR (\n status = 'failed'\n AND attempts < $2\n )\n RETURNING\n l1_batch_number,\n status,\n attempts\n ", + "query": "\n UPDATE witness_inputs_fri\n SET\n status = 'queued',\n updated_at = NOW(),\n processing_started_at = NOW()\n WHERE\n (\n status = 'in_progress'\n AND processing_started_at <= NOW() - $1::INTERVAL\n AND attempts < $2\n )\n OR (\n status = 'failed'\n AND attempts < $2\n )\n RETURNING\n l1_batch_number,\n status,\n attempts,\n error,\n picked_by\n ", "describe": { "columns": [ { @@ -17,6 +17,16 @@ "ordinal": 2, "name": "attempts", "type_info": "Int2" + }, + { + "ordinal": 3, + "name": "error", + "type_info": "Text" + }, + { + "ordinal": 4, + "name": "picked_by", + "type_info": "Text" } ], "parameters": { @@ -28,8 +38,10 @@ "nullable": [ false, false, - false + false, + true, + true ] }, - "hash": "bfb80956a18eabf266f5b5a9d62912d57f8eb2a38bdb7884fc812a2897a3a660" + "hash": "bcc5d3d35652f49b41d4ee673b171570fc88c17822bebd5b92e3b2f726d9af3a" } diff --git a/prover/crates/lib/prover_dal/.sqlx/query-c156004a0e5ad5bcc33d3b894fd69718349ac4fc08b455c7f4265d7443f2ec13.json b/prover/crates/lib/prover_dal/.sqlx/query-d0be28042b50199075cb0eca26f6b93bfd5d96fdc68732fe38c79ccd44b84def.json similarity index 50% rename from prover/crates/lib/prover_dal/.sqlx/query-c156004a0e5ad5bcc33d3b894fd69718349ac4fc08b455c7f4265d7443f2ec13.json rename to prover/crates/lib/prover_dal/.sqlx/query-d0be28042b50199075cb0eca26f6b93bfd5d96fdc68732fe38c79ccd44b84def.json index 60f8a0df709..3943480b896 100644 --- a/prover/crates/lib/prover_dal/.sqlx/query-c156004a0e5ad5bcc33d3b894fd69718349ac4fc08b455c7f4265d7443f2ec13.json +++ b/prover/crates/lib/prover_dal/.sqlx/query-d0be28042b50199075cb0eca26f6b93bfd5d96fdc68732fe38c79ccd44b84def.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n UPDATE prover_jobs_fri\n SET\n status = 'queued',\n updated_at = NOW(),\n processing_started_at = NOW()\n WHERE\n id IN (\n SELECT\n id\n FROM\n prover_jobs_fri\n WHERE\n (\n status = 'in_progress'\n AND processing_started_at <= NOW() - $1::INTERVAL\n AND attempts < $2\n )\n OR (\n status = 'in_gpu_proof'\n AND processing_started_at <= NOW() - $1::INTERVAL\n AND attempts < $2\n )\n OR (\n status = 'failed'\n AND attempts < $2\n )\n FOR UPDATE\n SKIP LOCKED\n )\n RETURNING\n id,\n status,\n attempts,\n circuit_id\n ", + "query": "\n UPDATE prover_jobs_fri\n SET\n status = 'queued',\n updated_at = NOW(),\n processing_started_at = NOW()\n WHERE\n id IN (\n SELECT\n id\n FROM\n prover_jobs_fri\n WHERE\n (\n status IN ('in_progress', 'in_gpu_proof')\n AND processing_started_at <= NOW() - $1::INTERVAL\n AND attempts < $2\n )\n OR (\n status = 'failed'\n AND attempts < $2\n )\n FOR UPDATE\n SKIP LOCKED\n )\n RETURNING\n id,\n status,\n attempts,\n circuit_id,\n error,\n picked_by\n ", "describe": { "columns": [ { @@ -22,6 +22,16 @@ "ordinal": 3, "name": "circuit_id", "type_info": "Int2" + }, + { + "ordinal": 4, + "name": "error", + "type_info": "Text" + }, + { + "ordinal": 5, + "name": "picked_by", + "type_info": "Text" } ], "parameters": { @@ -34,8 +44,10 @@ false, false, false, - false + false, + true, + true ] }, - "hash": "c156004a0e5ad5bcc33d3b894fd69718349ac4fc08b455c7f4265d7443f2ec13" + "hash": "d0be28042b50199075cb0eca26f6b93bfd5d96fdc68732fe38c79ccd44b84def" } diff --git a/prover/crates/lib/prover_dal/.sqlx/query-e32c0d85cb2841efb0b7cea6b049bae42849574731d33539bfdcca21c9b64f4e.json b/prover/crates/lib/prover_dal/.sqlx/query-d5bb897092bce2788fe02f31c9de6dde4142e09330557cc627fee2db278ace50.json similarity index 76% rename from prover/crates/lib/prover_dal/.sqlx/query-e32c0d85cb2841efb0b7cea6b049bae42849574731d33539bfdcca21c9b64f4e.json rename to prover/crates/lib/prover_dal/.sqlx/query-d5bb897092bce2788fe02f31c9de6dde4142e09330557cc627fee2db278ace50.json index 3a8362d2866..9df8f1c849c 100644 --- a/prover/crates/lib/prover_dal/.sqlx/query-e32c0d85cb2841efb0b7cea6b049bae42849574731d33539bfdcca21c9b64f4e.json +++ b/prover/crates/lib/prover_dal/.sqlx/query-d5bb897092bce2788fe02f31c9de6dde4142e09330557cc627fee2db278ace50.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n UPDATE leaf_aggregation_witness_jobs_fri\n SET\n status = 'queued',\n updated_at = NOW(),\n processing_started_at = NOW()\n WHERE\n (\n status = 'in_progress'\n AND processing_started_at <= NOW() - $1::INTERVAL\n AND attempts < $2\n )\n OR (\n status = 'failed'\n AND attempts < $2\n )\n RETURNING\n id,\n status,\n attempts,\n circuit_id\n ", + "query": "\n UPDATE leaf_aggregation_witness_jobs_fri\n SET\n status = 'queued',\n updated_at = NOW(),\n processing_started_at = NOW()\n WHERE\n (\n status = 'in_progress'\n AND processing_started_at <= NOW() - $1::INTERVAL\n AND attempts < $2\n )\n OR (\n status = 'failed'\n AND attempts < $2\n )\n RETURNING\n id,\n status,\n attempts,\n circuit_id,\n error,\n picked_by\n ", "describe": { "columns": [ { @@ -22,6 +22,16 @@ "ordinal": 3, "name": "circuit_id", "type_info": "Int2" + }, + { + "ordinal": 4, + "name": "error", + "type_info": "Text" + }, + { + "ordinal": 5, + "name": "picked_by", + "type_info": "Text" } ], "parameters": { @@ -34,8 +44,10 @@ false, false, false, - false + false, + true, + true ] }, - "hash": "e32c0d85cb2841efb0b7cea6b049bae42849574731d33539bfdcca21c9b64f4e" + "hash": "d5bb897092bce2788fe02f31c9de6dde4142e09330557cc627fee2db278ace50" } diff --git a/prover/crates/lib/prover_dal/.sqlx/query-5e781f84ec41edd0941fa84de837effac442434c6e734d977e6682a7484abe7f.json b/prover/crates/lib/prover_dal/.sqlx/query-eb2a85cb60c680a71203769db7baf89bbd72934e1405e320e746158e6d395d96.json similarity index 75% rename from prover/crates/lib/prover_dal/.sqlx/query-5e781f84ec41edd0941fa84de837effac442434c6e734d977e6682a7484abe7f.json rename to prover/crates/lib/prover_dal/.sqlx/query-eb2a85cb60c680a71203769db7baf89bbd72934e1405e320e746158e6d395d96.json index 4958f38f535..27680c0bb46 100644 --- a/prover/crates/lib/prover_dal/.sqlx/query-5e781f84ec41edd0941fa84de837effac442434c6e734d977e6682a7484abe7f.json +++ b/prover/crates/lib/prover_dal/.sqlx/query-eb2a85cb60c680a71203769db7baf89bbd72934e1405e320e746158e6d395d96.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n UPDATE proof_compression_jobs_fri\n SET\n status = 'queued',\n updated_at = NOW(),\n processing_started_at = NOW()\n WHERE\n (\n status = 'in_progress'\n AND processing_started_at <= NOW() - $1::INTERVAL\n AND attempts < $2\n )\n OR (\n status = 'failed'\n AND attempts < $2\n )\n RETURNING\n l1_batch_number,\n status,\n attempts\n ", + "query": "\n UPDATE proof_compression_jobs_fri\n SET\n status = 'queued',\n updated_at = NOW(),\n processing_started_at = NOW()\n WHERE\n (\n status = 'in_progress'\n AND processing_started_at <= NOW() - $1::INTERVAL\n AND attempts < $2\n )\n OR (\n status = 'failed'\n AND attempts < $2\n )\n RETURNING\n l1_batch_number,\n status,\n attempts,\n error,\n picked_by\n ", "describe": { "columns": [ { @@ -17,6 +17,16 @@ "ordinal": 2, "name": "attempts", "type_info": "Int2" + }, + { + "ordinal": 3, + "name": "error", + "type_info": "Text" + }, + { + "ordinal": 4, + "name": "picked_by", + "type_info": "Text" } ], "parameters": { @@ -28,8 +38,10 @@ "nullable": [ false, false, - false + false, + true, + true ] }, - "hash": "5e781f84ec41edd0941fa84de837effac442434c6e734d977e6682a7484abe7f" + "hash": "eb2a85cb60c680a71203769db7baf89bbd72934e1405e320e746158e6d395d96" } diff --git a/prover/crates/lib/prover_dal/src/cli_test_dal.rs b/prover/crates/lib/prover_dal/src/cli_test_dal.rs new file mode 100644 index 00000000000..069fa9c6a41 --- /dev/null +++ b/prover/crates/lib/prover_dal/src/cli_test_dal.rs @@ -0,0 +1,215 @@ +use zksync_basic_types::{ + prover_dal::{ProofCompressionJobStatus, ProverJobStatus, WitnessJobStatus}, + L1BatchNumber, +}; +use zksync_db_connection::connection::Connection; + +use crate::Prover; + +#[derive(Debug)] +pub struct CliTestDal<'a, 'c> { + pub storage: &'a mut Connection<'c, Prover>, +} + +impl CliTestDal<'_, '_> { + pub async fn update_prover_job( + &mut self, + status: ProverJobStatus, + circuit_id: u8, + aggregation_round: i64, + batch_number: L1BatchNumber, + sequence_number: usize, + ) { + sqlx::query(&format!( + "UPDATE prover_jobs_fri SET status = '{}' + WHERE l1_batch_number = {} + AND sequence_number = {} + AND aggregation_round = {} + AND circuit_id = {}", + status, batch_number.0, sequence_number, aggregation_round, circuit_id, + )) + .execute(self.storage.conn()) + .await + .unwrap(); + } + + pub async fn insert_lwg_job( + &mut self, + status: WitnessJobStatus, + batch_number: L1BatchNumber, + circuit_id: u8, + ) { + sqlx::query(&format!( + " + INSERT INTO + leaf_aggregation_witness_jobs_fri ( + l1_batch_number, + circuit_id, + status, + number_of_basic_circuits, + created_at, + updated_at + ) + VALUES + ({}, {}, 'waiting_for_proofs', 2, NOW(), NOW()) + ON CONFLICT (l1_batch_number, circuit_id) DO + UPDATE + SET status = '{}' + ", + batch_number.0, circuit_id, status + )) + .execute(self.storage.conn()) + .await + .unwrap(); + } + + pub async fn insert_nwg_job( + &mut self, + status: WitnessJobStatus, + batch_number: L1BatchNumber, + circuit_id: u8, + ) { + sqlx::query(&format!( + " + INSERT INTO + node_aggregation_witness_jobs_fri ( + l1_batch_number, + circuit_id, + status, + created_at, + updated_at + ) + VALUES + ({}, {}, 'waiting_for_proofs', NOW(), NOW()) + ON CONFLICT (l1_batch_number, circuit_id, depth) DO + UPDATE + SET status = '{}' + ", + batch_number.0, circuit_id, status, + )) + .execute(self.storage.conn()) + .await + .unwrap(); + } + + pub async fn insert_rt_job(&mut self, status: WitnessJobStatus, batch_number: L1BatchNumber) { + sqlx::query(&format!( + " + INSERT INTO + recursion_tip_witness_jobs_fri ( + l1_batch_number, + status, + number_of_final_node_jobs, + created_at, + updated_at + ) + VALUES + ({}, 'waiting_for_proofs',1, NOW(), NOW()) + ON CONFLICT (l1_batch_number) DO + UPDATE + SET status = '{}' + ", + batch_number.0, status, + )) + .execute(self.storage.conn()) + .await + .unwrap(); + } + + pub async fn insert_scheduler_job( + &mut self, + status: WitnessJobStatus, + batch_number: L1BatchNumber, + ) { + sqlx::query(&format!( + " + INSERT INTO + scheduler_witness_jobs_fri ( + l1_batch_number, + scheduler_partial_input_blob_url, + status, + created_at, + updated_at + ) + VALUES + ({}, '', 'waiting_for_proofs', NOW(), NOW()) + ON CONFLICT (l1_batch_number) DO + UPDATE + SET status = '{}' + ", + batch_number.0, status, + )) + .execute(self.storage.conn()) + .await + .unwrap(); + } + + pub async fn insert_compressor_job( + &mut self, + status: ProofCompressionJobStatus, + batch_number: L1BatchNumber, + ) { + sqlx::query(&format!( + " + INSERT INTO + proof_compression_jobs_fri ( + l1_batch_number, + status, + created_at, + updated_at + ) + VALUES + ({}, '{}', NOW(), NOW()) + ON CONFLICT (l1_batch_number) DO + UPDATE + SET status = '{}' + ", + batch_number.0, status, status, + )) + .execute(self.storage.conn()) + .await + .unwrap(); + } + + pub async fn update_attempts_prover_job( + &mut self, + status: ProverJobStatus, + attempts: u8, + circuit_id: u8, + aggregation_round: i64, + batch_number: L1BatchNumber, + sequence_number: usize, + ) { + sqlx::query(&format!( + "UPDATE prover_jobs_fri + SET status = '{}', attempts = {} + WHERE l1_batch_number = {} + AND sequence_number = {} + AND aggregation_round = {} + AND circuit_id = {}", + status, attempts, batch_number.0, sequence_number, aggregation_round, circuit_id, + )) + .execute(self.storage.conn()) + .await + .unwrap(); + } + + pub async fn update_attempts_lwg( + &mut self, + status: ProverJobStatus, + attempts: u8, + circuit_id: u8, + batch_number: L1BatchNumber, + ) { + sqlx::query(&format!( + "UPDATE leaf_aggregation_witness_jobs_fri + SET status = '{}', attempts = {} + WHERE l1_batch_number = {} + AND circuit_id = {}", + status, attempts, batch_number.0, circuit_id, + )) + .execute(self.storage.conn()) + .await + .unwrap(); + } +} diff --git a/prover/crates/lib/prover_dal/src/fri_gpu_prover_queue_dal.rs b/prover/crates/lib/prover_dal/src/fri_gpu_prover_queue_dal.rs index 753b65b4ef0..aa4810ad2f6 100644 --- a/prover/crates/lib/prover_dal/src/fri_gpu_prover_queue_dal.rs +++ b/prover/crates/lib/prover_dal/src/fri_gpu_prover_queue_dal.rs @@ -198,9 +198,8 @@ impl FriGpuProverQueueDal<'_, '_> { .map(|row| GpuProverInstanceStatus::from_str(&row.instance_status).unwrap()) } - pub async fn archive_old_provers(&mut self, archive_prover_after_secs: u64) -> usize { - let prover_max_age = - pg_interval_from_duration(Duration::from_secs(archive_prover_after_secs)); + pub async fn archive_old_provers(&mut self, archive_prover_after: Duration) -> usize { + let prover_max_age = pg_interval_from_duration(archive_prover_after); sqlx::query_scalar!( r#" diff --git a/prover/crates/lib/prover_dal/src/fri_proof_compressor_dal.rs b/prover/crates/lib/prover_dal/src/fri_proof_compressor_dal.rs index 7adc08b680d..31b121e51e4 100644 --- a/prover/crates/lib/prover_dal/src/fri_proof_compressor_dal.rs +++ b/prover/crates/lib/prover_dal/src/fri_proof_compressor_dal.rs @@ -288,6 +288,7 @@ impl FriProofCompressorDal<'_, '_> { proof_compression_jobs_fri WHERE status <> 'successful' + AND status <> 'sent_to_server' ORDER BY l1_batch_number ASC LIMIT @@ -329,7 +330,9 @@ impl FriProofCompressorDal<'_, '_> { RETURNING l1_batch_number, status, - attempts + attempts, + error, + picked_by "#, &processing_timeout, max_attempts as i32, @@ -343,6 +346,8 @@ impl FriProofCompressorDal<'_, '_> { status: row.status, attempts: row.attempts as u64, circuit_id: None, + error: row.error, + picked_by: row.picked_by, }) .collect() } @@ -431,7 +436,9 @@ impl FriProofCompressorDal<'_, '_> { ) RETURNING status, - attempts + attempts, + error, + picked_by "#, i64::from(block_number.0), max_attempts as i32, @@ -445,6 +452,8 @@ impl FriProofCompressorDal<'_, '_> { status: row.status, attempts: row.attempts as u64, circuit_id: None, + error: row.error, + picked_by: row.picked_by, }) .collect() } diff --git a/prover/crates/lib/prover_dal/src/fri_prover_dal.rs b/prover/crates/lib/prover_dal/src/fri_prover_dal.rs index f6efc6afa6a..c2dadae58d0 100644 --- a/prover/crates/lib/prover_dal/src/fri_prover_dal.rs +++ b/prover/crates/lib/prover_dal/src/fri_prover_dal.rs @@ -2,11 +2,12 @@ use std::{collections::HashMap, convert::TryFrom, str::FromStr, time::Duration}; use zksync_basic_types::{ - basic_fri_types::{AggregationRound, CircuitIdRoundTuple, JobIdentifiers}, - protocol_version::{ProtocolSemanticVersion, ProtocolVersionId}, - prover_dal::{ - FriProverJobMetadata, JobCountStatistics, ProverJobFriInfo, ProverJobStatus, StuckJobs, + basic_fri_types::{ + AggregationRound, CircuitIdRoundTuple, CircuitProverStatsEntry, + ProtocolVersionedCircuitProverStats, }, + protocol_version::{ProtocolSemanticVersion, ProtocolVersionId}, + prover_dal::{FriProverJobMetadata, ProverJobFriInfo, ProverJobStatus, StuckJobs}, L1BatchNumber, }; use zksync_db_connection::{ @@ -310,12 +311,7 @@ impl FriProverDal<'_, '_> { prover_jobs_fri WHERE ( - status = 'in_progress' - AND processing_started_at <= NOW() - $1::INTERVAL - AND attempts < $2 - ) - OR ( - status = 'in_gpu_proof' + status IN ('in_progress', 'in_gpu_proof') AND processing_started_at <= NOW() - $1::INTERVAL AND attempts < $2 ) @@ -330,7 +326,9 @@ impl FriProverDal<'_, '_> { id, status, attempts, - circuit_id + circuit_id, + error, + picked_by "#, &processing_timeout, max_attempts as i32, @@ -344,6 +342,8 @@ impl FriProverDal<'_, '_> { status: row.status, attempts: row.attempts as u64, circuit_id: Some(row.circuit_id as u32), + error: row.error, + picked_by: row.picked_by, }) .collect() } @@ -400,9 +400,9 @@ impl FriProverDal<'_, '_> { .unwrap(); } - pub async fn get_prover_jobs_stats(&mut self) -> HashMap { + pub async fn get_prover_jobs_stats(&mut self) -> ProtocolVersionedCircuitProverStats { { - let rows = sqlx::query!( + sqlx::query!( r#" SELECT COUNT(*) AS "count!", @@ -429,27 +429,19 @@ impl FriProverDal<'_, '_> { ) .fetch_all(self.storage.conn()) .await - .unwrap(); - - let mut result = HashMap::new(); - - for row in &rows { - let stats: &mut JobCountStatistics = result - .entry(JobIdentifiers { - circuit_id: row.circuit_id as u8, - aggregation_round: row.aggregation_round as u8, - protocol_version: row.protocol_version as u16, - protocol_version_patch: row.protocol_version_patch as u32, - }) - .or_default(); - match row.status.as_ref() { - "queued" => stats.queued = row.count as usize, - "in_progress" => stats.in_progress = row.count as usize, - _ => (), - } - } - - result + .unwrap() + .iter() + .map(|row| { + CircuitProverStatsEntry::new( + row.circuit_id, + row.aggregation_round, + row.protocol_version, + row.protocol_version_patch, + &row.status, + row.count, + ) + }) + .collect() } } @@ -577,19 +569,20 @@ impl FriProverDal<'_, '_> { .ok()? .map(|row| row.id as u32) } - - pub async fn archive_old_jobs(&mut self, archiving_interval_secs: u64) -> usize { - let archiving_interval_secs = - pg_interval_from_duration(Duration::from_secs(archiving_interval_secs)); + pub async fn archive_old_jobs(&mut self, archiving_interval: Duration) -> usize { + let archiving_interval_secs = pg_interval_from_duration(archiving_interval); sqlx::query_scalar!( r#" WITH deleted AS ( - DELETE FROM prover_jobs_fri + DELETE FROM prover_jobs_fri AS p + USING proof_compression_jobs_fri AS c WHERE - status NOT IN ('queued', 'in_progress', 'in_gpu_proof', 'failed') - AND updated_at < NOW() - $1::INTERVAL - RETURNING * + p.status NOT IN ('queued', 'in_progress', 'in_gpu_proof', 'failed') + AND p.updated_at < NOW() - $1::INTERVAL + AND p.l1_batch_number = c.l1_batch_number + AND c.status = 'sent_to_server' + RETURNING p.* ), inserted_count AS ( INSERT INTO prover_jobs_fri_archive @@ -744,7 +737,9 @@ impl FriProverDal<'_, '_> { id, status, attempts, - circuit_id + circuit_id, + error, + picked_by "#, i64::from(block_number.0), max_attempts as i32, @@ -758,6 +753,8 @@ impl FriProverDal<'_, '_> { status: row.status, attempts: row.attempts as u64, circuit_id: Some(row.circuit_id as u32), + error: row.error, + picked_by: row.picked_by, }) .collect() } diff --git a/prover/crates/lib/prover_dal/src/fri_witness_generator_dal.rs b/prover/crates/lib/prover_dal/src/fri_witness_generator_dal.rs index 488d5b3a5ec..65d490ee4e0 100644 --- a/prover/crates/lib/prover_dal/src/fri_witness_generator_dal.rs +++ b/prover/crates/lib/prover_dal/src/fri_witness_generator_dal.rs @@ -248,7 +248,7 @@ impl FriWitnessGeneratorDal<'_, '_> { .unwrap(); } - pub async fn requeue_stuck_jobs( + pub async fn requeue_stuck_basic_jobs( &mut self, processing_timeout: Duration, max_attempts: u32, @@ -267,11 +267,6 @@ impl FriWitnessGeneratorDal<'_, '_> { AND processing_started_at <= NOW() - $1::INTERVAL AND attempts < $2 ) - OR ( - status = 'in_gpu_proof' - AND processing_started_at <= NOW() - $1::INTERVAL - AND attempts < $2 - ) OR ( status = 'failed' AND attempts < $2 @@ -279,7 +274,9 @@ impl FriWitnessGeneratorDal<'_, '_> { RETURNING l1_batch_number, status, - attempts + attempts, + error, + picked_by "#, &processing_timeout, max_attempts as i32, @@ -293,6 +290,8 @@ impl FriWitnessGeneratorDal<'_, '_> { status: row.status, attempts: row.attempts as u64, circuit_id: None, + error: row.error, + picked_by: row.picked_by, }) .collect() } @@ -928,15 +927,15 @@ impl FriWitnessGeneratorDal<'_, '_> { "#, AggregationRound::RecursionTip as i64, ) - .fetch_all(self.storage.conn()) - .await - .unwrap() - .into_iter() - .map(|row| (row.l1_batch_number as u64)) - .collect() + .fetch_all(self.storage.conn()) + .await + .unwrap() + .into_iter() + .map(|row| (row.l1_batch_number as u64)) + .collect() } - pub async fn requeue_stuck_leaf_aggregations_jobs( + pub async fn requeue_stuck_leaf_jobs( &mut self, processing_timeout: Duration, max_attempts: u32, @@ -963,7 +962,9 @@ impl FriWitnessGeneratorDal<'_, '_> { id, status, attempts, - circuit_id + circuit_id, + error, + picked_by "#, &processing_timeout, max_attempts as i32, @@ -977,11 +978,13 @@ impl FriWitnessGeneratorDal<'_, '_> { status: row.status, attempts: row.attempts as u64, circuit_id: Some(row.circuit_id as u32), + error: row.error, + picked_by: row.picked_by, }) .collect() } - pub async fn requeue_stuck_node_aggregations_jobs( + pub async fn requeue_stuck_node_jobs( &mut self, processing_timeout: Duration, max_attempts: u32, @@ -1008,7 +1011,9 @@ impl FriWitnessGeneratorDal<'_, '_> { id, status, attempts, - circuit_id + circuit_id, + error, + picked_by "#, &processing_timeout, max_attempts as i32, @@ -1022,6 +1027,8 @@ impl FriWitnessGeneratorDal<'_, '_> { status: row.status, attempts: row.attempts as u64, circuit_id: Some(row.circuit_id as u32), + error: row.error, + picked_by: row.picked_by, }) .collect() } @@ -1052,7 +1059,9 @@ impl FriWitnessGeneratorDal<'_, '_> { RETURNING l1_batch_number, status, - attempts + attempts, + error, + picked_by "#, &processing_timeout, max_attempts as i32, @@ -1066,6 +1075,8 @@ impl FriWitnessGeneratorDal<'_, '_> { status: row.status, attempts: row.attempts as u64, circuit_id: None, + error: row.error, + picked_by: row.picked_by, }) .collect() } @@ -1164,7 +1175,9 @@ impl FriWitnessGeneratorDal<'_, '_> { RETURNING l1_batch_number, status, - attempts + attempts, + error, + picked_by "#, &processing_timeout, max_attempts as i32, @@ -1178,6 +1191,8 @@ impl FriWitnessGeneratorDal<'_, '_> { status: row.status, attempts: row.attempts as u64, circuit_id: None, + error: row.error, + picked_by: row.picked_by, }) .collect() } @@ -1708,7 +1723,9 @@ impl FriWitnessGeneratorDal<'_, '_> { RETURNING l1_batch_number, status, - attempts + attempts, + error, + picked_by "#, i64::from(block_number.0), max_attempts @@ -1723,6 +1740,8 @@ impl FriWitnessGeneratorDal<'_, '_> { status: row.get("status"), attempts: row.get::("attempts") as u64, circuit_id: None, + error: row.get("error"), + picked_by: row.get("picked_by"), }) .collect() } @@ -1772,7 +1791,9 @@ impl FriWitnessGeneratorDal<'_, '_> { RETURNING l1_batch_number, status, - attempts + attempts, + error, + picked_by "#, i64::from(block_number.0), max_attempts @@ -1787,6 +1808,8 @@ impl FriWitnessGeneratorDal<'_, '_> { status: row.get("status"), attempts: row.get::("attempts") as u64, circuit_id: None, + error: row.get("error"), + picked_by: row.get("picked_by"), }) .collect() } @@ -1810,7 +1833,9 @@ impl FriWitnessGeneratorDal<'_, '_> { RETURNING l1_batch_number, status, - attempts + attempts, + error, + picked_by "#, i64::from(block_number.0), max_attempts @@ -1825,6 +1850,8 @@ impl FriWitnessGeneratorDal<'_, '_> { status: row.get("status"), attempts: row.get::("attempts") as u64, circuit_id: None, + error: row.get("error"), + picked_by: row.get("picked_by"), }) .collect() } @@ -1852,7 +1879,9 @@ impl FriWitnessGeneratorDal<'_, '_> { {}, status, attempts, - circuit_id + circuit_id, + error, + picked_by "#, table_name, i64::from(block_number.0), @@ -1869,6 +1898,8 @@ impl FriWitnessGeneratorDal<'_, '_> { status: row.get("status"), attempts: row.get::("attempts") as u64, circuit_id: Some(row.get::("circuit_id") as u32), + error: row.get("error"), + picked_by: row.get("picked_by"), }) .collect() } diff --git a/prover/crates/lib/prover_dal/src/lib.rs b/prover/crates/lib/prover_dal/src/lib.rs index bb552b899e9..85fcc260aa8 100644 --- a/prover/crates/lib/prover_dal/src/lib.rs +++ b/prover/crates/lib/prover_dal/src/lib.rs @@ -6,12 +6,13 @@ pub use zksync_db_connection::{ }; use crate::{ - fri_gpu_prover_queue_dal::FriGpuProverQueueDal, + cli_test_dal::CliTestDal, fri_gpu_prover_queue_dal::FriGpuProverQueueDal, fri_proof_compressor_dal::FriProofCompressorDal, fri_protocol_versions_dal::FriProtocolVersionsDal, fri_prover_dal::FriProverDal, fri_witness_generator_dal::FriWitnessGeneratorDal, }; +pub mod cli_test_dal; pub mod fri_gpu_prover_queue_dal; pub mod fri_proof_compressor_dal; pub mod fri_protocol_versions_dal; @@ -29,6 +30,8 @@ pub trait ProverDal<'a>: private::Sealed where Self: 'a, { + fn cli_test_dal(&mut self) -> CliTestDal<'_, 'a>; + fn fri_witness_generator_dal(&mut self) -> FriWitnessGeneratorDal<'_, 'a>; fn fri_prover_jobs_dal(&mut self) -> FriProverDal<'_, 'a>; @@ -68,4 +71,7 @@ impl<'a> ProverDal<'a> for Connection<'a, Prover> { fn fri_proof_compressor_dal(&mut self) -> FriProofCompressorDal<'_, 'a> { FriProofCompressorDal { storage: self } } + fn cli_test_dal(&mut self) -> CliTestDal<'_, 'a> { + CliTestDal { storage: self } + } } diff --git a/prover/crates/lib/prover_fri_types/src/keys.rs b/prover/crates/lib/prover_fri_types/src/keys.rs index 729db754178..2948fc5f84e 100644 --- a/prover/crates/lib/prover_fri_types/src/keys.rs +++ b/prover/crates/lib/prover_fri_types/src/keys.rs @@ -35,3 +35,11 @@ pub struct CircuitKey<'a> { pub circuit_type: &'a str, pub aggregation_round: AggregationRound, } + +/// Storage key for a [`RamPermutationQueueWitness`]. +#[derive(Debug, Clone, Copy)] +pub struct RamPermutationQueueWitnessKey { + pub block_number: L1BatchNumber, + pub circuit_subsequence_number: usize, + pub is_sorted: bool, +} diff --git a/prover/crates/lib/prover_fri_types/src/lib.rs b/prover/crates/lib/prover_fri_types/src/lib.rs index 423be1f88fa..c14bc190563 100644 --- a/prover/crates/lib/prover_fri_types/src/lib.rs +++ b/prover/crates/lib/prover_fri_types/src/lib.rs @@ -9,10 +9,12 @@ use circuit_definitions::{ ZkSyncRecursionLayerProof, ZkSyncRecursionLayerStorageType, ZkSyncRecursiveLayerCircuit, }, }, + encodings::memory_query::MemoryQueueStateWitnesses, zkevm_circuits::scheduler::{ aux::BaseLayerCircuitType, block_header::BlockAuxilaryOutputWitness, }, }; +use keys::RamPermutationQueueWitnessKey; use zksync_object_store::{serialize_using_bincode, Bucket, StoredObject}; use zksync_types::{ basic_fri_types::AggregationRound, @@ -33,11 +35,17 @@ pub const PROVER_PROTOCOL_SEMANTIC_VERSION: ProtocolSemanticVersion = ProtocolSe patch: PROVER_PROTOCOL_PATCH, }; +#[derive(serde::Serialize, serde::Deserialize, Clone)] +pub struct CircuitAuxData { + pub circuit_subsequence_number: u32, +} + #[derive(serde::Serialize, serde::Deserialize, Clone)] #[allow(clippy::large_enum_variant)] pub enum CircuitWrapper { Base(ZkSyncBaseLayerCircuit), Recursive(ZkSyncRecursiveLayerCircuit), + BasePartial((ZkSyncBaseLayerCircuit, CircuitAuxData)), } impl StoredObject for CircuitWrapper { @@ -214,3 +222,27 @@ impl StoredObject for AuxOutputWitnessWrapper { pub fn get_current_pod_name() -> String { env::var("POD_NAME").unwrap_or("UNKNOWN_POD".to_owned()) } + +#[derive(serde::Serialize, serde::Deserialize)] +pub struct RamPermutationQueueWitness { + pub witness: MemoryQueueStateWitnesses, +} + +impl StoredObject for RamPermutationQueueWitness { + const BUCKET: Bucket = Bucket::ProverJobsFri; + type Key<'a> = RamPermutationQueueWitnessKey; + + fn encode_key(key: Self::Key<'_>) -> String { + let RamPermutationQueueWitnessKey { + block_number, + circuit_subsequence_number, + is_sorted, + } = key; + format!( + "queue_witness_{block_number}_{circuit_subsequence_number}_{}.bin", + is_sorted as u64 + ) + } + + serialize_using_bincode!(); +} diff --git a/prover/crates/lib/prover_fri_utils/src/lib.rs b/prover/crates/lib/prover_fri_utils/src/lib.rs index 0873d505628..02c6da3d5f5 100644 --- a/prover/crates/lib/prover_fri_utils/src/lib.rs +++ b/prover/crates/lib/prover_fri_utils/src/lib.rs @@ -4,14 +4,18 @@ use zksync_object_store::ObjectStore; use zksync_prover_dal::{Connection, Prover, ProverDal}; use zksync_prover_fri_types::{ circuit_definitions::{ - circuit_definitions::recursion_layer::{ - base_circuit_type_into_recursive_leaf_circuit_type, ZkSyncRecursionLayerStorageType, + boojum::gadgets::queue::full_state_queue::FullStateCircuitQueueRawWitness, + circuit_definitions::{ + base_layer::ZkSyncBaseLayerCircuit, + recursion_layer::{ + base_circuit_type_into_recursive_leaf_circuit_type, ZkSyncRecursionLayerStorageType, + }, }, zkevm_circuits::scheduler::aux::BaseLayerCircuitType, }, get_current_pod_name, - keys::FriCircuitKey, - CircuitWrapper, ProverJob, ProverServiceDataKey, + keys::{FriCircuitKey, RamPermutationQueueWitnessKey}, + CircuitWrapper, ProverJob, ProverServiceDataKey, RamPermutationQueueWitness, }; use zksync_types::{ basic_fri_types::{AggregationRound, CircuitIdRoundTuple}, @@ -61,10 +65,52 @@ pub async fn fetch_next_circuit( depth: prover_job.depth, }; let started_at = Instant::now(); - let input = blob_store + let circuit_wrapper = blob_store .get(circuit_key) .await .unwrap_or_else(|err| panic!("{err:?}")); + let input = match circuit_wrapper { + a @ CircuitWrapper::Base(_) => a, + a @ CircuitWrapper::Recursive(_) => a, + CircuitWrapper::BasePartial((circuit, aux_data)) => { + // inject additional data + if let ZkSyncBaseLayerCircuit::RAMPermutation(circuit_instance) = circuit { + let sorted_witness_key = RamPermutationQueueWitnessKey { + block_number: prover_job.block_number, + circuit_subsequence_number: aux_data.circuit_subsequence_number as usize, + is_sorted: true, + }; + + let sorted_witness_handle = blob_store.get(sorted_witness_key); + + let unsorted_witness_key = RamPermutationQueueWitnessKey { + block_number: prover_job.block_number, + circuit_subsequence_number: aux_data.circuit_subsequence_number as usize, + is_sorted: false, + }; + + let unsorted_witness_handle = blob_store.get(unsorted_witness_key); + + let unsorted_witness: RamPermutationQueueWitness = + unsorted_witness_handle.await.unwrap(); + let sorted_witness: RamPermutationQueueWitness = + sorted_witness_handle.await.unwrap(); + + let mut witness = circuit_instance.witness.take().unwrap(); + witness.unsorted_queue_witness = FullStateCircuitQueueRawWitness { + elements: unsorted_witness.witness.into(), + }; + witness.sorted_queue_witness = FullStateCircuitQueueRawWitness { + elements: sorted_witness.witness.into(), + }; + circuit_instance.witness.store(Some(witness)); + + CircuitWrapper::Base(ZkSyncBaseLayerCircuit::RAMPermutation(circuit_instance)) + } else { + panic!("Unexpected circuit received with partial witness"); + } + } + }; let label = CircuitLabels { circuit_type: prover_job.circuit_id, @@ -97,7 +143,9 @@ pub fn get_base_layer_circuit_id_for_recursive_layer(recursive_layer_circuit_id: pub fn get_numeric_circuit_id(circuit_wrapper: &CircuitWrapper) -> u8 { match circuit_wrapper { - CircuitWrapper::Base(circuit) => circuit.numeric_circuit_type(), + CircuitWrapper::Base(circuit) | CircuitWrapper::BasePartial((circuit, _)) => { + circuit.numeric_circuit_type() + } CircuitWrapper::Recursive(circuit) => circuit.numeric_circuit_type(), } } diff --git a/zk_toolbox/Cargo.lock b/zk_toolbox/Cargo.lock index 04a29f5b0f4..41b972a4cef 100644 --- a/zk_toolbox/Cargo.lock +++ b/zk_toolbox/Cargo.lock @@ -6337,9 +6337,9 @@ dependencies = [ [[package]] name = "zksync_concurrency" -version = "0.1.0-rc.10" +version = "0.1.0-rc.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a463106f37cfa589896e6a165b5bb0533013377990e19f10e8c4894346a62e8b" +checksum = "b0e31a9fc9a390b440cd12bbe040330dc64f64697a8a8ecbc3beb98cd0747909" dependencies = [ "anyhow", "once_cell", @@ -6371,9 +6371,9 @@ dependencies = [ [[package]] name = "zksync_consensus_utils" -version = "0.1.0-rc.10" +version = "0.1.0-rc.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "612920e56dcb99f227bc23e1254f4dabc7cb4c5cd1a9ec400ceba0ec6fa77c1e" +checksum = "2ff679f8b5f671d887a750b8107f3b5c01fd6085f68eef37ab01de8d2bd0736b" dependencies = [ "anyhow", "rand", @@ -6422,9 +6422,9 @@ dependencies = [ [[package]] name = "zksync_protobuf" -version = "0.1.0-rc.10" +version = "0.1.0-rc.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0d82fd63f27681b9c01f0e01e3060e71b72809db8e21d9130663ee92bd1e391" +checksum = "f4f6ba3bf0aac20de18b4ae18a22d8c81b83f8f72e8fdec1c879525ecdacd2f5" dependencies = [ "anyhow", "bit-vec", @@ -6443,9 +6443,9 @@ dependencies = [ [[package]] name = "zksync_protobuf_build" -version = "0.1.0-rc.10" +version = "0.1.0-rc.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee3c158ab4d211053886371d4a00514bdf8ebdf826d40ee03b98fee2e0d1605e" +checksum = "7798c248b9a64505f0586bd5fadad6b26c999be4a8dec6b1a86b10b3888169c5" dependencies = [ "anyhow", "heck", diff --git a/zk_toolbox/Cargo.toml b/zk_toolbox/Cargo.toml index ab850d82770..ef2aed7c99c 100644 --- a/zk_toolbox/Cargo.toml +++ b/zk_toolbox/Cargo.toml @@ -30,7 +30,7 @@ types = { path = "crates/types" } zksync_config = { path = "../core/lib/config" } zksync_protobuf_config = { path = "../core/lib/protobuf_config" } zksync_basic_types = { path = "../core/lib/basic_types" } -zksync_protobuf = "=0.1.0-rc.10" +zksync_protobuf = "=0.1.0-rc.11" # External dependencies anyhow = "1.0.82" diff --git a/zk_toolbox/crates/common/src/ethereum.rs b/zk_toolbox/crates/common/src/ethereum.rs index c035d588370..93393f8a59c 100644 --- a/zk_toolbox/crates/common/src/ethereum.rs +++ b/zk_toolbox/crates/common/src/ethereum.rs @@ -1,6 +1,7 @@ -use std::{ops::Add, time::Duration}; +use std::{ops::Add, sync::Arc, time::Duration}; use ethers::{ + contract::abigen, core::k256::ecdsa::SigningKey, middleware::MiddlewareBuilder, prelude::{Http, LocalWallet, Provider, Signer, SignerMiddleware}, @@ -8,7 +9,7 @@ use ethers::{ types::{Address, TransactionRequest, H256}, }; -use crate::wallets::Wallet; +use crate::{logger, wallets::Wallet}; pub fn create_ethers_client( private_key: H256, @@ -53,3 +54,51 @@ pub async fn distribute_eth( futures::future::join_all(pending_txs).await; Ok(()) } + +abigen!( + TokenContract, + r"[ + function mint(address to, uint256 amount) + ]" +); + +pub async fn mint_token( + main_wallet: Wallet, + token_address: Address, + addresses: Vec
, + l1_rpc: String, + chain_id: u64, + amount: u128, +) -> anyhow::Result<()> { + let client = Arc::new(create_ethers_client( + main_wallet.private_key.unwrap(), + l1_rpc, + Some(chain_id), + )?); + + let contract = TokenContract::new(token_address, client); + // contract + for address in addresses { + if let Err(err) = mint(&contract, address, amount).await { + logger::warn(format!("Failed to mint {err}")) + } + } + + Ok(()) +} + +async fn mint( + contract: &TokenContract, + address: Address, + amount: u128, +) -> anyhow::Result<()> { + contract + .mint(address, amount.into()) + .send() + .await? + // It's safe to set such low number of confirmations and low interval for localhost + .confirmations(1) + .interval(Duration::from_millis(30)) + .await?; + Ok(()) +} diff --git a/zk_toolbox/crates/config/src/ecosystem.rs b/zk_toolbox/crates/config/src/ecosystem.rs index 76d8a0c45b2..8ce4b733c26 100644 --- a/zk_toolbox/crates/config/src/ecosystem.rs +++ b/zk_toolbox/crates/config/src/ecosystem.rs @@ -13,11 +13,14 @@ use zksync_basic_types::L2ChainId; use crate::{ consts::{ CONFIGS_PATH, CONFIG_NAME, CONTRACTS_FILE, ECOSYSTEM_PATH, ERA_CHAIN_ID, - ERC20_DEPLOYMENT_FILE, INITIAL_DEPLOYMENT_FILE, L1_CONTRACTS_FOUNDRY, LOCAL_DB_PATH, - WALLETS_FILE, + ERC20_CONFIGS_FILE, ERC20_DEPLOYMENT_FILE, INITIAL_DEPLOYMENT_FILE, L1_CONTRACTS_FOUNDRY, + LOCAL_DB_PATH, WALLETS_FILE, }, create_localhost_wallets, - forge_interface::deploy_ecosystem::input::{Erc20DeploymentConfig, InitialDeploymentConfig}, + forge_interface::deploy_ecosystem::{ + input::{Erc20DeploymentConfig, InitialDeploymentConfig}, + output::{ERC20Tokens, Erc20Token}, + }, traits::{FileConfigWithDefaultName, ReadConfig, SaveConfig, ZkToolboxConfig}, ChainConfig, ChainConfigInternal, ContractsConfig, WalletsConfig, }; @@ -169,6 +172,11 @@ impl EcosystemConfig { pub fn get_erc20_deployment_config(&self) -> anyhow::Result { Erc20DeploymentConfig::read(self.get_shell(), self.config.join(ERC20_DEPLOYMENT_FILE)) } + pub fn get_erc20_tokens(&self) -> Vec { + ERC20Tokens::read(self.get_shell(), self.config.join(ERC20_CONFIGS_FILE)) + .map(|tokens| tokens.tokens.values().cloned().collect()) + .unwrap_or_default() + } pub fn get_wallets(&self) -> anyhow::Result { let path = self.config.join(WALLETS_FILE); diff --git a/zk_toolbox/crates/config/src/forge_interface/deploy_ecosystem/input.rs b/zk_toolbox/crates/config/src/forge_interface/deploy_ecosystem/input.rs index 0dc117ae8cd..30ec0eeb9c4 100644 --- a/zk_toolbox/crates/config/src/forge_interface/deploy_ecosystem/input.rs +++ b/zk_toolbox/crates/config/src/forge_interface/deploy_ecosystem/input.rs @@ -92,13 +92,6 @@ impl Default for Erc20DeploymentConfig { implementation: String::from("TestnetERC20Token.sol"), mint: U256::from_str("9000000000000000000000").unwrap(), }, - Erc20DeploymentTokensConfig { - name: String::from("Wrapped Ether"), - symbol: String::from("WETH"), - decimals: 18, - implementation: String::from("WETH9.sol"), - mint: U256::zero(), - }, ], } } diff --git a/zk_toolbox/crates/config/src/forge_interface/deploy_ecosystem/output.rs b/zk_toolbox/crates/config/src/forge_interface/deploy_ecosystem/output.rs index 77f21211491..bf9292e9ba3 100644 --- a/zk_toolbox/crates/config/src/forge_interface/deploy_ecosystem/output.rs +++ b/zk_toolbox/crates/config/src/forge_interface/deploy_ecosystem/output.rs @@ -79,7 +79,7 @@ pub struct L1StateTransitionOutput { } #[derive(Debug, Deserialize, Serialize, Clone)] -pub struct TokenDeployErc20Output { +pub struct Erc20Token { pub address: Address, pub name: String, pub symbol: String, @@ -89,12 +89,12 @@ pub struct TokenDeployErc20Output { } #[derive(Debug, Deserialize, Serialize, Clone)] -pub struct DeployErc20Output { - pub tokens: HashMap, +pub struct ERC20Tokens { + pub tokens: HashMap, } -impl FileConfigWithDefaultName for DeployErc20Output { +impl FileConfigWithDefaultName for ERC20Tokens { const FILE_NAME: &'static str = ERC20_CONFIGS_FILE; } -impl ZkToolboxConfig for DeployErc20Output {} +impl ZkToolboxConfig for ERC20Tokens {} diff --git a/zk_toolbox/crates/config/src/general.rs b/zk_toolbox/crates/config/src/general.rs index 091d1893661..4dfc6c17470 100644 --- a/zk_toolbox/crates/config/src/general.rs +++ b/zk_toolbox/crates/config/src/general.rs @@ -14,6 +14,7 @@ use crate::{ pub struct RocksDbs { pub state_keeper: PathBuf, pub merkle_tree: PathBuf, + pub protective_reads: PathBuf, } pub fn set_rocks_db_config(config: &mut GeneralConfig, rocks_dbs: RocksDbs) -> anyhow::Result<()> { @@ -28,6 +29,11 @@ pub fn set_rocks_db_config(config: &mut GeneralConfig, rocks_dbs: RocksDbs) -> a .context("DB config is not presented")? .merkle_tree .path = rocks_dbs.merkle_tree.to_str().unwrap().to_string(); + config + .protective_reads_writer_config + .as_mut() + .context("Protective reads config is not presented")? + .db_path = rocks_dbs.protective_reads.to_str().unwrap().to_string(); Ok(()) } diff --git a/zk_toolbox/crates/types/src/base_token.rs b/zk_toolbox/crates/types/src/base_token.rs index f3b01185da6..12a079e9abd 100644 --- a/zk_toolbox/crates/types/src/base_token.rs +++ b/zk_toolbox/crates/types/src/base_token.rs @@ -1,7 +1,7 @@ use ethers::types::Address; use serde::{Deserialize, Serialize}; -#[derive(Debug, Serialize, Deserialize, Clone)] +#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)] pub struct BaseToken { pub address: Address, pub nominator: u64, diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/args/create.rs b/zk_toolbox/crates/zk_inception/src/commands/chain/args/create.rs index 3ccc737acc4..65f80928789 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/chain/args/create.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/chain/args/create.rs @@ -3,6 +3,7 @@ use std::{path::PathBuf, str::FromStr}; use anyhow::{bail, Context}; use clap::{Parser, ValueEnum}; use common::{Prompt, PromptConfirm, PromptSelect}; +use config::forge_interface::deploy_ecosystem::output::Erc20Token; use serde::{Deserialize, Serialize}; use slugify_rs::slugify; use strum::{Display, EnumIter, IntoEnumIterator}; @@ -71,6 +72,7 @@ impl ChainCreateArgs { self, number_of_chains: u32, l1_network: &L1Network, + possible_erc20: Vec, ) -> anyhow::Result { let mut chain_name = self .chain_name @@ -151,14 +153,24 @@ impl ChainCreateArgs { && self.base_token_price_denominator.is_none() && self.base_token_price_nominator.is_none() { - let base_token_selection = - PromptSelect::new(MSG_BASE_TOKEN_SELECTION_PROMPT, BaseTokenSelection::iter()) - .ask(); + let mut token_selection: Vec<_> = + BaseTokenSelection::iter().map(|a| a.to_string()).collect(); - match base_token_selection { - BaseTokenSelection::Eth => BaseToken::eth(), - BaseTokenSelection::Custom => { - let address = Prompt::new(MSG_BASE_TOKEN_ADDRESS_PROMPT).ask(); + let erc20_tokens = &mut (possible_erc20 + .iter() + .map(|t| format!("{:?}", t.address)) + .collect()); + token_selection.append(erc20_tokens); + let base_token_selection = + PromptSelect::new(MSG_BASE_TOKEN_SELECTION_PROMPT, token_selection).ask(); + match base_token_selection.as_str() { + "Eth" => BaseToken::eth(), + other => { + let address = if other == "Custom" { + Prompt::new(MSG_BASE_TOKEN_ADDRESS_PROMPT).ask() + } else { + H160::from_str(other)? + }; let nominator = Prompt::new(MSG_BASE_TOKEN_PRICE_NOMINATOR_PROMPT) .validate_with(number_validator) .ask(); diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/create.rs b/zk_toolbox/crates/zk_inception/src/commands/chain/create.rs index 7e20ae449a8..9e109094cbe 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/chain/create.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/chain/create.rs @@ -27,10 +27,12 @@ fn create( ecosystem_config: &mut EcosystemConfig, shell: &Shell, ) -> anyhow::Result<()> { + let tokens = ecosystem_config.get_erc20_tokens(); let args = args .fill_values_with_prompt( ecosystem_config.list_of_chains().len() as u32, &ecosystem_config.l1_network, + tokens, ) .context(MSG_ARGS_VALIDATOR_ERR)?; diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/init.rs b/zk_toolbox/crates/zk_inception/src/commands/chain/init.rs index 17a993a86ac..b3b43c75c36 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/chain/init.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/chain/init.rs @@ -15,6 +15,7 @@ use config::{ traits::{ReadConfig, SaveConfig, SaveConfigWithBasePath}, update_from_chain_config, ChainConfig, ContractsConfig, EcosystemConfig, }; +use types::{BaseToken, L1Network, WalletCreation}; use xshell::Shell; use crate::{ @@ -24,10 +25,11 @@ use crate::{ deploy_l2_contracts, deploy_paymaster, genesis::genesis, }, + consts::AMOUNT_FOR_DISTRIBUTION_TO_WALLETS, messages::{ msg_initializing_chain, MSG_ACCEPTING_ADMIN_SPINNER, MSG_CHAIN_INITIALIZED, - MSG_CHAIN_NOT_FOUND_ERR, MSG_GENESIS_DATABASE_ERR, MSG_REGISTERING_CHAIN_SPINNER, - MSG_SELECTED_CONFIG, + MSG_CHAIN_NOT_FOUND_ERR, MSG_DISTRIBUTING_ETH_SPINNER, MSG_GENESIS_DATABASE_ERR, + MSG_MINT_BASE_TOKEN_SPINNER, MSG_REGISTERING_CHAIN_SPINNER, MSG_SELECTED_CONFIG, }, utils::forge::{check_the_balance, fill_forge_private_key}, }; @@ -67,12 +69,9 @@ pub async fn init( contracts_config.l1.base_token_addr = chain_config.base_token.address; contracts_config.save_with_base_path(shell, &chain_config.configs)?; - crate::commands::ecosystem::init::distribute_eth( - ecosystem_config, - chain_config, - init_args.l1_rpc_url.clone(), - ) - .await?; + distribute_eth(ecosystem_config, chain_config, init_args.l1_rpc_url.clone()).await?; + mint_base_token(ecosystem_config, chain_config, init_args.l1_rpc_url.clone()).await?; + let mut secrets = chain_config.get_secrets_config()?; set_l1_rpc_url(&mut secrets, init_args.l1_rpc_url.clone())?; secrets.save_with_base_path(shell, &chain_config.configs)?; @@ -160,3 +159,66 @@ async fn register_chain( contracts.set_chain_contracts(®ister_chain_output); Ok(()) } + +// Distribute eth to the chain wallets for localhost environment +pub async fn distribute_eth( + ecosystem_config: &EcosystemConfig, + chain_config: &ChainConfig, + l1_rpc_url: String, +) -> anyhow::Result<()> { + if chain_config.wallet_creation == WalletCreation::Localhost + && ecosystem_config.l1_network == L1Network::Localhost + { + let spinner = Spinner::new(MSG_DISTRIBUTING_ETH_SPINNER); + let wallets = ecosystem_config.get_wallets()?; + let chain_wallets = chain_config.get_wallets_config()?; + let mut addresses = vec![ + chain_wallets.operator.address, + chain_wallets.blob_operator.address, + chain_wallets.governor.address, + ]; + if let Some(deployer) = chain_wallets.deployer { + addresses.push(deployer.address) + } + common::ethereum::distribute_eth( + wallets.operator, + addresses, + l1_rpc_url, + ecosystem_config.l1_network.chain_id(), + AMOUNT_FOR_DISTRIBUTION_TO_WALLETS, + ) + .await?; + spinner.finish(); + } + Ok(()) +} + +pub async fn mint_base_token( + ecosystem_config: &EcosystemConfig, + chain_config: &ChainConfig, + l1_rpc_url: String, +) -> anyhow::Result<()> { + if chain_config.wallet_creation == WalletCreation::Localhost + && ecosystem_config.l1_network == L1Network::Localhost + && chain_config.base_token != BaseToken::eth() + { + let spinner = Spinner::new(MSG_MINT_BASE_TOKEN_SPINNER); + let wallets = ecosystem_config.get_wallets()?; + let chain_wallets = chain_config.get_wallets_config()?; + let base_token = &chain_config.base_token; + let addresses = vec![wallets.governor.address, chain_wallets.governor.address]; + let amount = AMOUNT_FOR_DISTRIBUTION_TO_WALLETS * base_token.nominator as u128 + / base_token.denominator as u128; + common::ethereum::mint_token( + wallets.operator, + base_token.address, + addresses, + l1_rpc_url, + ecosystem_config.l1_network.chain_id(), + amount, + ) + .await?; + spinner.finish(); + } + Ok(()) +} diff --git a/zk_toolbox/crates/zk_inception/src/commands/ecosystem/args/create.rs b/zk_toolbox/crates/zk_inception/src/commands/ecosystem/args/create.rs index 4063f4ccdcd..2e5c50f4538 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/ecosystem/args/create.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/ecosystem/args/create.rs @@ -71,7 +71,7 @@ impl EcosystemCreateArgs { // Make the only chain as a default one self.chain.set_as_default = Some(true); - let chain = self.chain.fill_values_with_prompt(0, &l1_network)?; + let chain = self.chain.fill_values_with_prompt(0, &l1_network, vec![])?; let start_containers = self.start_containers.unwrap_or_else(|| { PromptConfirm::new(MSG_START_CONTAINERS_PROMPT) diff --git a/zk_toolbox/crates/zk_inception/src/commands/ecosystem/init.rs b/zk_toolbox/crates/zk_inception/src/commands/ecosystem/init.rs index 9afc674a5da..fc4a3c9b320 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/ecosystem/init.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/ecosystem/init.rs @@ -18,7 +18,7 @@ use config::{ input::{ DeployErc20Config, DeployL1Config, Erc20DeploymentConfig, InitialDeploymentConfig, }, - output::{DeployErc20Output, DeployL1Output}, + output::{DeployL1Output, ERC20Tokens}, }, script_params::{DEPLOY_ECOSYSTEM_SCRIPT_PARAMS, DEPLOY_ERC20_SCRIPT_PARAMS}, }, @@ -26,9 +26,9 @@ use config::{ FileConfigWithDefaultName, ReadConfig, ReadConfigWithBasePath, SaveConfig, SaveConfigWithBasePath, }, - ChainConfig, ContractsConfig, EcosystemConfig, GenesisConfig, + ContractsConfig, EcosystemConfig, GenesisConfig, }; -use types::{L1Network, ProverMode, WalletCreation}; +use types::{L1Network, ProverMode}; use xshell::{cmd, Shell}; use super::{ @@ -43,14 +43,13 @@ use crate::{ create_erc20_deployment_config, create_initial_deployments_config, }, }, - consts::AMOUNT_FOR_DISTRIBUTION_TO_WALLETS, messages::{ msg_ecosystem_initialized, msg_ecosystem_no_found_preexisting_contract, msg_initializing_chain, MSG_CHAIN_NOT_INITIALIZED, MSG_DEPLOYING_ECOSYSTEM_CONTRACTS_SPINNER, MSG_DEPLOYING_ERC20, - MSG_DEPLOYING_ERC20_SPINNER, MSG_DISTRIBUTING_ETH_SPINNER, - MSG_ECOSYSTEM_CONTRACTS_PATH_INVALID_ERR, MSG_ECOSYSTEM_CONTRACTS_PATH_PROMPT, - MSG_INITIALIZING_ECOSYSTEM, MSG_INTALLING_DEPS_SPINNER, + MSG_DEPLOYING_ERC20_SPINNER, MSG_ECOSYSTEM_CONTRACTS_PATH_INVALID_ERR, + MSG_ECOSYSTEM_CONTRACTS_PATH_PROMPT, MSG_INITIALIZING_ECOSYSTEM, + MSG_INTALLING_DEPS_SPINNER, }, utils::forge::{check_the_balance, fill_forge_private_key}, }; @@ -136,39 +135,6 @@ pub async fn run(args: EcosystemInitArgs, shell: &Shell) -> anyhow::Result<()> { Ok(()) } -// Distribute eth to the chain wallets for localhost environment -pub async fn distribute_eth( - ecosystem_config: &EcosystemConfig, - chain_config: &ChainConfig, - l1_rpc_url: String, -) -> anyhow::Result<()> { - if chain_config.wallet_creation == WalletCreation::Localhost - && ecosystem_config.l1_network == L1Network::Localhost - { - let spinner = Spinner::new(MSG_DISTRIBUTING_ETH_SPINNER); - let wallets = ecosystem_config.get_wallets()?; - let chain_wallets = chain_config.get_wallets_config()?; - let mut addresses = vec![ - chain_wallets.operator.address, - chain_wallets.blob_operator.address, - chain_wallets.governor.address, - ]; - if let Some(deployer) = chain_wallets.deployer { - addresses.push(deployer.address) - } - common::ethereum::distribute_eth( - wallets.operator, - addresses, - l1_rpc_url, - ecosystem_config.l1_network.chain_id(), - AMOUNT_FOR_DISTRIBUTION_TO_WALLETS, - ) - .await?; - spinner.finish(); - } - Ok(()) -} - async fn init( init_args: &mut EcosystemInitArgsFinal, shell: &Shell, @@ -199,7 +165,7 @@ async fn deploy_erc20( contracts_config: &ContractsConfig, forge_args: ForgeScriptArgs, l1_rpc_url: String, -) -> anyhow::Result { +) -> anyhow::Result { let deploy_config_path = DEPLOY_ERC20_SCRIPT_PARAMS.input(&ecosystem_config.link_to_code); let wallets = ecosystem_config.get_wallets()?; DeployErc20Config::new( @@ -229,7 +195,7 @@ async fn deploy_erc20( forge.run(shell)?; spinner.finish(); - let result = DeployErc20Output::read( + let result = ERC20Tokens::read( shell, DEPLOY_ERC20_SCRIPT_PARAMS.output(&ecosystem_config.link_to_code), )?; diff --git a/zk_toolbox/crates/zk_inception/src/defaults.rs b/zk_toolbox/crates/zk_inception/src/defaults.rs index fcbde71b012..34b0eeae419 100644 --- a/zk_toolbox/crates/zk_inception/src/defaults.rs +++ b/zk_toolbox/crates/zk_inception/src/defaults.rs @@ -11,6 +11,7 @@ lazy_static! { pub const ROCKS_DB_STATE_KEEPER: &str = "state_keeper"; pub const ROCKS_DB_TREE: &str = "tree"; +pub const ROCKS_DB_PROTECTIVE_READS: &str = "protective_reads"; pub const EN_ROCKS_DB_PREFIX: &str = "en"; pub const MAIN_ROCKS_DB_PREFIX: &str = "main"; diff --git a/zk_toolbox/crates/zk_inception/src/messages.rs b/zk_toolbox/crates/zk_inception/src/messages.rs index 2b524aebd38..420dd6706e3 100644 --- a/zk_toolbox/crates/zk_inception/src/messages.rs +++ b/zk_toolbox/crates/zk_inception/src/messages.rs @@ -67,6 +67,8 @@ pub(super) const MSG_INITIALIZING_ECOSYSTEM: &str = "Initializing ecosystem"; pub(super) const MSG_DEPLOYING_ERC20: &str = "Deploying ERC20 contracts"; pub(super) const MSG_CHAIN_INITIALIZED: &str = "Chain initialized successfully"; pub(super) const MSG_DISTRIBUTING_ETH_SPINNER: &str = "Distributing eth..."; +pub(super) const MSG_MINT_BASE_TOKEN_SPINNER: &str = + "Minting base token to the governance addresses..."; pub(super) const MSG_INTALLING_DEPS_SPINNER: &str = "Installing and building dependencies..."; pub(super) const MSG_DEPLOYING_ERC20_SPINNER: &str = "Deploying ERC20 contracts..."; pub(super) const MSG_DEPLOYING_ECOSYSTEM_CONTRACTS_SPINNER: &str = diff --git a/zk_toolbox/crates/zk_inception/src/utils/rocks_db.rs b/zk_toolbox/crates/zk_inception/src/utils/rocks_db.rs index fc80aca100b..17cffa66e39 100644 --- a/zk_toolbox/crates/zk_inception/src/utils/rocks_db.rs +++ b/zk_toolbox/crates/zk_inception/src/utils/rocks_db.rs @@ -4,7 +4,8 @@ use config::RocksDbs; use xshell::Shell; use crate::defaults::{ - EN_ROCKS_DB_PREFIX, MAIN_ROCKS_DB_PREFIX, ROCKS_DB_STATE_KEEPER, ROCKS_DB_TREE, + EN_ROCKS_DB_PREFIX, MAIN_ROCKS_DB_PREFIX, ROCKS_DB_PROTECTIVE_READS, ROCKS_DB_STATE_KEEPER, + ROCKS_DB_TREE, }; pub enum RocksDBDirOption { @@ -32,8 +33,13 @@ pub fn recreate_rocksdb_dirs( shell.remove_path(&state_keeper)?; let merkle_tree = rocks_db_path.join(option.prefix()).join(ROCKS_DB_TREE); shell.remove_path(&merkle_tree)?; + let protective_reads = rocks_db_path + .join(option.prefix()) + .join(ROCKS_DB_PROTECTIVE_READS); + shell.remove_path(&protective_reads)?; Ok(RocksDbs { state_keeper: shell.create_dir(state_keeper)?, merkle_tree: shell.create_dir(merkle_tree)?, + protective_reads: shell.create_dir(protective_reads)?, }) } diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/lint.rs b/zk_toolbox/crates/zk_supervisor/src/commands/lint.rs new file mode 100644 index 00000000000..bbad72f6537 --- /dev/null +++ b/zk_toolbox/crates/zk_supervisor/src/commands/lint.rs @@ -0,0 +1,189 @@ +use clap::{Parser, ValueEnum}; +use common::{cmd::Cmd, logger, spinner::Spinner}; +use config::EcosystemConfig; +use strum::EnumIter; +use xshell::{cmd, Shell}; + +use crate::messages::{ + msg_running_linter_for_extension_spinner, msg_running_linters_for_files, + MSG_LINT_CONFIG_PATH_ERR, MSG_RUNNING_CONTRACTS_LINTER_SPINNER, +}; + +const IGNORED_DIRS: [&str; 18] = [ + "target", + "node_modules", + "volumes", + "build", + "dist", + ".git", + "generated", + "grafonnet-lib", + "prettier-config", + "lint-config", + "cache", + "artifacts", + "typechain", + "binaryen", + "system-contracts", + "artifacts-zk", + "cache-zk", + // Ignore directories with OZ and forge submodules. + "contracts/l1-contracts/lib", +]; + +const IGNORED_FILES: [&str; 4] = [ + "KeysWithPlonkVerifier.sol", + "TokenInit.sol", + ".tslintrc.js", + ".prettierrc.js", +]; + +const CONFIG_PATH: &str = "etc/lint-config"; + +#[derive(Debug, Parser)] +pub struct LintArgs { + #[clap(long, short = 'c')] + pub check: bool, + #[clap(long, short = 'e')] + pub extensions: Vec, +} + +#[derive(Debug, ValueEnum, EnumIter, strum::Display, PartialEq, Eq, Clone)] +#[strum(serialize_all = "lowercase")] +pub enum Extension { + Rs, + Md, + Sol, + Js, + Ts, +} + +pub fn run(shell: &Shell, args: LintArgs) -> anyhow::Result<()> { + let extensions = if args.extensions.is_empty() { + vec![ + Extension::Rs, + Extension::Md, + Extension::Sol, + Extension::Js, + Extension::Ts, + ] + } else { + args.extensions.clone() + }; + + logger::info(msg_running_linters_for_files(&extensions)); + + let ecosystem = EcosystemConfig::from_file(shell)?; + + for extension in extensions { + match extension { + Extension::Rs => lint_rs(shell, &ecosystem)?, + Extension::Sol => lint_contracts(shell, &ecosystem, args.check)?, + ext => lint(shell, &ecosystem, &ext, args.check)?, + } + } + + Ok(()) +} + +fn lint_rs(shell: &Shell, ecosystem: &EcosystemConfig) -> anyhow::Result<()> { + let spinner = Spinner::new(&msg_running_linter_for_extension_spinner(&Extension::Rs)); + + let link_to_code = &ecosystem.link_to_code; + let lint_to_prover = &ecosystem.link_to_code.join("prover"); + let link_to_toolbox = &ecosystem.link_to_code.join("zk_toolbox"); + let paths = vec![link_to_code, lint_to_prover, link_to_toolbox]; + + for path in paths { + let _dir_guard = shell.push_dir(path); + Cmd::new(cmd!( + shell, + "cargo clippy --locked -- -D warnings -D unstable_features" + )) + .run()?; + } + + spinner.finish(); + + Ok(()) +} + +fn get_linter(extension: &Extension) -> Vec { + match extension { + Extension::Rs => vec!["cargo".to_string(), "clippy".to_string()], + Extension::Md => vec!["markdownlint".to_string()], + Extension::Sol => vec!["solhint".to_string()], + Extension::Js => vec!["eslint".to_string()], + Extension::Ts => vec!["eslint".to_string(), "--ext".to_string(), "ts".to_string()], + } +} + +fn lint( + shell: &Shell, + ecosystem: &EcosystemConfig, + extension: &Extension, + check: bool, +) -> anyhow::Result<()> { + let spinner = Spinner::new(&msg_running_linter_for_extension_spinner(extension)); + let _dir_guard = shell.push_dir(&ecosystem.link_to_code); + let files = get_unignored_files(shell, extension)?; + + let cmd = cmd!(shell, "yarn"); + let config_path = ecosystem.link_to_code.join(CONFIG_PATH); + let config_path = config_path.join(format!("{}.js", extension)); + let config_path = config_path + .to_str() + .expect(MSG_LINT_CONFIG_PATH_ERR) + .to_string(); + + let linter = get_linter(extension); + + let fix_option = if check { + vec![] + } else { + vec!["--fix".to_string()] + }; + + let args = [ + linter.as_slice(), + &fix_option, + &["--config".to_string(), config_path], + files.as_slice(), + ] + .concat(); + + Cmd::new(cmd.args(&args)).run()?; + spinner.finish(); + Ok(()) +} + +fn lint_contracts(shell: &Shell, ecosystem: &EcosystemConfig, check: bool) -> anyhow::Result<()> { + lint(shell, ecosystem, &Extension::Sol, check)?; + + let spinner = Spinner::new(MSG_RUNNING_CONTRACTS_LINTER_SPINNER); + let _dir_guard = shell.push_dir(&ecosystem.link_to_code); + let cmd = cmd!(shell, "yarn"); + let linter = if check { "lint:check" } else { "lint:fix" }; + let args = ["--cwd", "contracts", linter]; + Cmd::new(cmd.args(&args)).run()?; + spinner.finish(); + + Ok(()) +} + +fn get_unignored_files(shell: &Shell, extension: &Extension) -> anyhow::Result> { + let mut files = Vec::new(); + let output = cmd!(shell, "git ls-files").read()?; + + for line in output.lines() { + let path = line.to_string(); + if !IGNORED_DIRS.iter().any(|dir| path.contains(dir)) + && !IGNORED_FILES.contains(&path.as_str()) + && path.ends_with(&format!(".{}", extension)) + { + files.push(path); + } + } + + Ok(files) +} diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/mod.rs b/zk_toolbox/crates/zk_supervisor/src/commands/mod.rs index cc2b0a12b33..b7a6a54f121 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/mod.rs +++ b/zk_toolbox/crates/zk_supervisor/src/commands/mod.rs @@ -1,4 +1,5 @@ pub mod clean; pub mod database; +pub mod lint; pub mod snapshot; pub mod test; diff --git a/zk_toolbox/crates/zk_supervisor/src/main.rs b/zk_toolbox/crates/zk_supervisor/src/main.rs index 2976fb55418..51b8f00ef37 100644 --- a/zk_toolbox/crates/zk_supervisor/src/main.rs +++ b/zk_toolbox/crates/zk_supervisor/src/main.rs @@ -1,5 +1,7 @@ use clap::{Parser, Subcommand}; -use commands::{database::DatabaseCommands, snapshot::SnapshotCommands, test::TestCommands}; +use commands::{ + database::DatabaseCommands, lint::LintArgs, snapshot::SnapshotCommands, test::TestCommands, +}; use common::{ check_general_prerequisites, config::{global_config, init_global_config, GlobalConfig}, @@ -9,7 +11,7 @@ use common::{ use config::EcosystemConfig; use messages::{ msg_global_chain_does_not_exist, MSG_SUBCOMMAND_CLEAN, MSG_SUBCOMMAND_DATABASE_ABOUT, - MSG_SUBCOMMAND_TESTS_ABOUT, + MSG_SUBCOMMAND_LINT_ABOUT, MSG_SUBCOMMAND_TESTS_ABOUT, }; use xshell::Shell; @@ -38,6 +40,8 @@ enum SupervisorSubcommands { Clean(CleanCommands), #[command(subcommand, about = "Snapshots creator")] Snapshot(SnapshotCommands), + #[command(about = MSG_SUBCOMMAND_LINT_ABOUT, alias = "l")] + Lint(LintArgs), #[command(hide = true)] Markdown, } @@ -94,6 +98,7 @@ async fn run_subcommand(args: Supervisor, shell: &Shell) -> anyhow::Result<()> { SupervisorSubcommands::Markdown => { clap_markdown::print_help_markdown::(); } + SupervisorSubcommands::Lint(args) => commands::lint::run(shell, args)?, } Ok(()) } diff --git a/zk_toolbox/crates/zk_supervisor/src/messages.rs b/zk_toolbox/crates/zk_supervisor/src/messages.rs index bb58b0983e7..6368cb4e3d5 100644 --- a/zk_toolbox/crates/zk_supervisor/src/messages.rs +++ b/zk_toolbox/crates/zk_supervisor/src/messages.rs @@ -1,3 +1,5 @@ +use crate::commands::lint::Extension; + // Ecosystem related messages pub(super) const MSG_CHAIN_NOT_FOUND_ERR: &str = "Chain not found"; @@ -9,6 +11,7 @@ pub(super) fn msg_global_chain_does_not_exist(chain: &str, available_chains: &st pub(super) const MSG_SUBCOMMAND_DATABASE_ABOUT: &str = "Database related commands"; pub(super) const MSG_SUBCOMMAND_TESTS_ABOUT: &str = "Run tests"; pub(super) const MSG_SUBCOMMAND_CLEAN: &str = "Clean artifacts"; +pub(super) const MSG_SUBCOMMAND_LINT_ABOUT: &str = "Lint code"; // Database related messages pub(super) const MSG_NO_DATABASES_SELECTED: &str = "No databases selected"; @@ -135,3 +138,19 @@ pub(super) const MSG_CONTRACTS_CLEANING_FINISHED: &str = /// Snapshot creator related messages pub(super) const MSG_RUNNING_SNAPSHOT_CREATOR: &str = "Running snapshot creator"; + +// Lint related messages +pub(super) fn msg_running_linters_for_files(extensions: &[Extension]) -> String { + let extensions: Vec = extensions.iter().map(|e| format!(".{}", e)).collect(); + format!( + "Running linters for files with extensions: {:?}", + extensions + ) +} + +pub(super) fn msg_running_linter_for_extension_spinner(extension: &Extension) -> String { + format!("Running linter for files with extension: .{}", extension) +} + +pub(super) const MSG_LINT_CONFIG_PATH_ERR: &str = "Lint config path error"; +pub(super) const MSG_RUNNING_CONTRACTS_LINTER_SPINNER: &str = "Running contracts linter..";