diff --git a/Cargo.lock b/Cargo.lock index 323984689a0..38f489dd7a0 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -78,15 +78,6 @@ version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "aae1277d39aeec15cb388266ecc24b11c80469deae6067e17a1a7aa9e5c1f234" -[[package]] -name = "aead" -version = "0.4.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b613b8e1e3cf911a086f53f03bf286f52fd7a7258e4fa606f0ef220d39d8877" -dependencies = [ - "generic-array", -] - [[package]] name = "aead" version = "0.5.2" @@ -121,31 +112,17 @@ dependencies = [ "cpufeatures", ] -[[package]] -name = "aes-gcm" -version = "0.9.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc3be92e19a7ef47457b8e6f90707e12b6ac5d20c6f3866584fa3be0787d839f" -dependencies = [ - "aead 0.4.3", - "aes 0.7.5", - "cipher 0.3.0", - "ctr 0.7.0", - "ghash 0.4.4", - "subtle", -] - [[package]] name = "aes-gcm" version = "0.10.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "831010a0f742e1209b3bcea8fab6a8e149051ba6099432c8cb2cc117dec3ead1" dependencies = [ - "aead 0.5.2", + "aead", "aes 0.8.4", "cipher 0.4.4", "ctr 0.9.2", - "ghash 0.5.1", + "ghash", "subtle", ] @@ -178,9 +155,9 @@ checksum = "5c6cb57a04249c6480766f7f7cef5467412af1490f8d1e243141daddada3264f" [[package]] name = "alloy-consensus" -version = "0.3.1" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4177d135789e282e925092be8939d421b701c6d92c0a16679faa659d9166289d" +checksum = "629b62e38d471cc15fea534eb7283d2f8a4e8bdb1811bcc5d66dda6cfce6fae1" dependencies = [ "alloy-eips", "alloy-primitives", @@ -210,9 +187,9 @@ dependencies = [ [[package]] name = "alloy-eips" -version = "0.3.1" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "499ee14d296a133d142efd215eb36bf96124829fe91cf8f5d4e5ccdd381eae00" +checksum = "f923dd5fca5f67a43d81ed3ebad0880bd41f6dd0ada930030353ac356c54cd0f" dependencies = [ "alloy-eip2930", "alloy-eip7702", @@ -227,9 +204,9 @@ dependencies = [ [[package]] name = "alloy-primitives" -version = "0.8.2" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ccb865df835f851b367ae439d6c82b117ded971628c8888b24fed411a290e38a" +checksum = "411aff151f2a73124ee473708e82ed51b2535f68928b6a1caa8bc1246ae6f7cd" dependencies = [ "alloy-rlp", "arbitrary", @@ -345,9 +322,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.87" +version = "1.0.89" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "10f00e1f6e58a40e807377c75c6a7f97bf9044fab57816f2414e6f5f4499d7b8" +checksum = "86fdf8605db99b54d3cd748a44c6d04df638eb5dafb219b135d0149bd0db01f6" [[package]] name = "arbitrary" @@ -505,9 +482,9 @@ checksum = "7d902e3d592a523def97af8f317b08ce16b7ab854c1985a0c671e6f15cebc236" [[package]] name = "arrayref" -version = "0.3.8" +version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d151e35f61089500b617991b791fc8bfd237ae50cd5950803758a179b41e67a" +checksum = "76a2e8124351fda1ef8aaaa3bbd7ebbcb486bbcd4225aca0aa0d84bb2db8fecb" [[package]] name = "arrayvec" @@ -584,7 +561,7 @@ dependencies = [ "futures-lite", "parking", "polling", - "rustix 0.38.36", + "rustix 0.38.37", "slab", "tracing", "windows-sys 0.59.0", @@ -1141,9 +1118,9 @@ checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" [[package]] name = "bytes" -version = "1.7.1" +version = "1.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8318a53db07bb3f8dca91a600466bdb3f2eaadeedfdbcf02e1accbad9271ba50" +checksum = "428d9aa8fbc0670b7b8d6030a7fadd0f86151cae55e4dbbece15f3780a3dfaf3" dependencies = [ "serde", ] @@ -1224,9 +1201,9 @@ checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5" [[package]] name = "cc" -version = "1.1.18" +version = "1.1.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b62ac837cdb5cb22e10a256099b4fc502b1dfe560cb282963a974d7abd80e476" +checksum = "07b1695e2c7e8fc85310cde85aeaab7e3097f593c91d209d3f9df76c928100f0" dependencies = [ "jobserver", "libc", @@ -1271,7 +1248,7 @@ version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "10cd79432192d1c0f4e1a0fef9527696cc039165d729fb41b3f4f4f354c2dc35" dependencies = [ - "aead 0.5.2", + "aead", "chacha20", "cipher 0.4.4", "poly1305", @@ -1742,15 +1719,6 @@ dependencies = [ "subtle", ] -[[package]] -name = "ctr" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a232f92a03f37dd7d7dd2adc67166c77e9cd88de5b019b9a9eecfaeaf7bfd481" -dependencies = [ - "cipher 0.3.0", -] - [[package]] name = "ctr" version = "0.8.0" @@ -2206,13 +2174,15 @@ dependencies = [ [[package]] name = "discv5" -version = "0.4.1" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bac33cb3f99889a57e56a8c6ccb77aaf0cfc7787602b7af09783f736d77314e1" +checksum = "f569b8c367554666c8652305621e8bae3634a2ff5c6378081d5bd8c399c99f23" dependencies = [ - "aes 0.7.5", - "aes-gcm 0.9.2", + "aes 0.8.4", + "aes-gcm", + "alloy-rlp", "arrayvec", + "ctr 0.9.2", "delay_map 0.3.0", "enr", "fnv", @@ -2221,12 +2191,12 @@ dependencies = [ "hex", "hkdf", "lazy_static", - "libp2p 0.53.2", + "libp2p-identity", "lru", "more-asserts", + "multiaddr", "parking_lot 0.11.2", "rand", - "rlp", "smallvec", "socket2 0.4.10", "tokio", @@ -2411,10 +2381,11 @@ dependencies = [ [[package]] name = "enr" -version = "0.10.0" +version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a3d8dc56e02f954cac8eb489772c552c473346fc34f67412bb6244fd647f7e4" +checksum = "972070166c68827e64bd1ebc8159dd8e32d9bc2da7ebe8f20b61308f7974ad30" dependencies = [ + "alloy-rlp", "base64 0.21.7", "bytes", "ed25519-dalek", @@ -2422,7 +2393,6 @@ dependencies = [ "k256 0.13.3", "log", "rand", - "rlp", "serde", "sha3 0.10.8", "zeroize", @@ -2430,11 +2400,11 @@ dependencies = [ [[package]] name = "enum-as-inner" -version = "0.6.0" +version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ffccbb6966c05b32ef8fbac435df276c4ae4d3dc55a8cd0eb9745e6c12f546a" +checksum = "a1e6a265c649f3f5979b601d26f1d05ada116434c87741c9493cb56218f76cbc" dependencies = [ - "heck 0.4.1", + "heck 0.5.0", "proc-macro2", "quote", "syn 2.0.77", @@ -3355,7 +3325,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a8f2f12607f92c69b12ed746fabf9ca4f5c482cba46679c1a75b874ed7c26adb" dependencies = [ "futures-io", - "rustls 0.23.12", + "rustls 0.23.13", "rustls-pki-types", ] @@ -3460,16 +3430,6 @@ dependencies = [ "wasm-bindgen", ] -[[package]] -name = "ghash" -version = "0.4.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1583cc1656d7839fd3732b80cf4f38850336cdb9b8ded1cd399ca62958de3c99" -dependencies = [ - "opaque-debug", - "polyval 0.5.3", -] - [[package]] name = "ghash" version = "0.5.1" @@ -3477,7 +3437,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f0d8a4362ccb29cb0b265253fb0a2728f592895ee6854fd9bc13f2ffda266ff1" dependencies = [ "opaque-debug", - "polyval 0.6.2", + "polyval", ] [[package]] @@ -3529,7 +3489,7 @@ dependencies = [ "getrandom", "hashlink 0.9.1", "hex_fmt", - "libp2p 0.54.1", + "libp2p", "prometheus-client", "quick-protobuf", "quick-protobuf-codec", @@ -4051,9 +4011,9 @@ dependencies = [ [[package]] name = "hyper-util" -version = "0.1.7" +version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cde7055719c54e36e95e8719f95883f22072a48ede39db7fc17a4e1d5281e9b9" +checksum = "da62f120a8a37763efb0cf8fdf264b884c7b8b9ac8660b900c8661030c00e6ba" dependencies = [ "bytes", "futures-util", @@ -4066,9 +4026,9 @@ dependencies = [ [[package]] name = "iana-time-zone" -version = "0.1.60" +version = "0.1.61" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e7ffbb5a1b541ea2561f8c41c087286cc091e21e556a4f09a8f6cbf17b69b141" +checksum = "235e081f3925a06703c2d0117ea8b91f042756fd6e7a6e5d901e8ca1a996b220" dependencies = [ "android_system_properties", "core-foundation-sys", @@ -4432,9 +4392,9 @@ dependencies = [ [[package]] name = "keccak-asm" -version = "0.1.3" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "422fbc7ff2f2f5bdffeb07718e5a5324dca72b0c9293d50df4026652385e3314" +checksum = "505d1856a39b200489082f90d897c3f07c455563880bc5952e38eabf731c83b6" dependencies = [ "digest 0.10.7", "sha3-asm", @@ -4605,29 +4565,6 @@ dependencies = [ "thiserror", ] -[[package]] -name = "libp2p" -version = "0.53.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "681fb3f183edfbedd7a57d32ebe5dcdc0b9f94061185acf3c30249349cc6fc99" -dependencies = [ - "bytes", - "either", - "futures", - "futures-timer", - "getrandom", - "instant", - "libp2p-allow-block-list 0.3.0", - "libp2p-connection-limits 0.3.1", - "libp2p-core 0.41.3", - "libp2p-identity", - "libp2p-swarm 0.44.2", - "multiaddr", - "pin-project", - "rw-stream-sink", - "thiserror", -] - [[package]] name = "libp2p" version = "0.54.1" @@ -4639,9 +4576,9 @@ dependencies = [ "futures", "futures-timer", "getrandom", - "libp2p-allow-block-list 0.4.0", - "libp2p-connection-limits 0.4.0", - "libp2p-core 0.42.0", + "libp2p-allow-block-list", + "libp2p-connection-limits", + "libp2p-core", "libp2p-dns", "libp2p-identify", "libp2p-identity", @@ -4650,7 +4587,7 @@ dependencies = [ "libp2p-noise", "libp2p-plaintext", "libp2p-quic", - "libp2p-swarm 0.45.1", + "libp2p-swarm", "libp2p-tcp", "libp2p-upnp", "libp2p-yamux", @@ -4660,39 +4597,15 @@ dependencies = [ "thiserror", ] -[[package]] -name = "libp2p-allow-block-list" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "107b238b794cb83ab53b74ad5dcf7cca3200899b72fe662840cfb52f5b0a32e6" -dependencies = [ - "libp2p-core 0.41.3", - "libp2p-identity", - "libp2p-swarm 0.44.2", - "void", -] - [[package]] name = "libp2p-allow-block-list" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d1027ccf8d70320ed77e984f273bc8ce952f623762cb9bf2d126df73caef8041" dependencies = [ - "libp2p-core 0.42.0", + "libp2p-core", "libp2p-identity", - "libp2p-swarm 0.45.1", - "void", -] - -[[package]] -name = "libp2p-connection-limits" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c7cd50a78ccfada14de94cbacd3ce4b0138157f376870f13d3a8422cd075b4fd" -dependencies = [ - "libp2p-core 0.41.3", - "libp2p-identity", - "libp2p-swarm 0.44.2", + "libp2p-swarm", "void", ] @@ -4702,38 +4615,10 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8d003540ee8baef0d254f7b6bfd79bac3ddf774662ca0abf69186d517ef82ad8" dependencies = [ - "libp2p-core 0.42.0", - "libp2p-identity", - "libp2p-swarm 0.45.1", - "void", -] - -[[package]] -name = "libp2p-core" -version = "0.41.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a5a8920cbd8540059a01950c1e5c96ea8d89eb50c51cd366fc18bdf540a6e48f" -dependencies = [ - "either", - "fnv", - "futures", - "futures-timer", + "libp2p-core", "libp2p-identity", - "multiaddr", - "multihash", - "multistream-select", - "once_cell", - "parking_lot 0.12.3", - "pin-project", - "quick-protobuf", - "rand", - "rw-stream-sink", - "smallvec", - "thiserror", - "tracing", - "unsigned-varint 0.8.0", + "libp2p-swarm", "void", - "web-time", ] [[package]] @@ -4773,7 +4658,7 @@ dependencies = [ "async-trait", "futures", "hickory-resolver", - "libp2p-core 0.42.0", + "libp2p-core", "libp2p-identity", "parking_lot 0.12.3", "smallvec", @@ -4791,9 +4676,9 @@ dependencies = [ "futures", "futures-bounded", "futures-timer", - "libp2p-core 0.42.0", + "libp2p-core", "libp2p-identity", - "libp2p-swarm 0.45.1", + "libp2p-swarm", "lru", "quick-protobuf", "quick-protobuf-codec", @@ -4836,9 +4721,9 @@ dependencies = [ "futures", "hickory-proto", "if-watch", - "libp2p-core 0.42.0", + "libp2p-core", "libp2p-identity", - "libp2p-swarm 0.45.1", + "libp2p-swarm", "rand", "smallvec", "socket2 0.5.7", @@ -4854,10 +4739,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "77ebafa94a717c8442d8db8d3ae5d1c6a15e30f2d347e0cd31d057ca72e42566" dependencies = [ "futures", - "libp2p-core 0.42.0", + "libp2p-core", "libp2p-identify", "libp2p-identity", - "libp2p-swarm 0.45.1", + "libp2p-swarm", "pin-project", "prometheus-client", "web-time", @@ -4872,7 +4757,7 @@ dependencies = [ "asynchronous-codec", "bytes", "futures", - "libp2p-core 0.42.0", + "libp2p-core", "libp2p-identity", "nohash-hasher", "parking_lot 0.12.3", @@ -4892,7 +4777,7 @@ dependencies = [ "bytes", "curve25519-dalek", "futures", - "libp2p-core 0.42.0", + "libp2p-core", "libp2p-identity", "multiaddr", "multihash", @@ -4917,7 +4802,7 @@ dependencies = [ "asynchronous-codec", "bytes", "futures", - "libp2p-core 0.42.0", + "libp2p-core", "libp2p-identity", "quick-protobuf", "quick-protobuf-codec", @@ -4934,42 +4819,20 @@ dependencies = [ "futures", "futures-timer", "if-watch", - "libp2p-core 0.42.0", + "libp2p-core", "libp2p-identity", "libp2p-tls", "parking_lot 0.12.3", "quinn", "rand", "ring 0.17.8", - "rustls 0.23.12", + "rustls 0.23.13", "socket2 0.5.7", "thiserror", "tokio", "tracing", ] -[[package]] -name = "libp2p-swarm" -version = "0.44.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "80cae6cb75f89dbca53862f9ebe0b9f463aa7b302762fcfaafb9e51dcc9b0f7e" -dependencies = [ - "either", - "fnv", - "futures", - "futures-timer", - "instant", - "libp2p-core 0.41.3", - "libp2p-identity", - "lru", - "multistream-select", - "once_cell", - "rand", - "smallvec", - "tracing", - "void", -] - [[package]] name = "libp2p-swarm" version = "0.45.1" @@ -4980,7 +4843,7 @@ dependencies = [ "fnv", "futures", "futures-timer", - "libp2p-core 0.42.0", + "libp2p-core", "libp2p-identity", "libp2p-swarm-derive", "lru", @@ -5016,7 +4879,7 @@ dependencies = [ "futures-timer", "if-watch", "libc", - "libp2p-core 0.42.0", + "libp2p-core", "libp2p-identity", "socket2 0.5.7", "tokio", @@ -5031,11 +4894,11 @@ checksum = "47b23dddc2b9c355f73c1e36eb0c3ae86f7dc964a3715f0731cfad352db4d847" dependencies = [ "futures", "futures-rustls", - "libp2p-core 0.42.0", + "libp2p-core", "libp2p-identity", "rcgen", "ring 0.17.8", - "rustls 0.23.12", + "rustls 0.23.13", "rustls-webpki 0.101.7", "thiserror", "x509-parser", @@ -5051,8 +4914,8 @@ dependencies = [ "futures", "futures-timer", "igd-next", - "libp2p-core 0.42.0", - "libp2p-swarm 0.45.1", + "libp2p-core", + "libp2p-swarm", "tokio", "tracing", "void", @@ -5066,7 +4929,7 @@ checksum = "788b61c80789dba9760d8c669a5bedb642c8267555c803fabd8396e4ca5c5882" dependencies = [ "either", "futures", - "libp2p-core 0.42.0", + "libp2p-core", "thiserror", "tracing", "yamux 0.12.1", @@ -5221,7 +5084,7 @@ dependencies = [ "gossipsub", "hex", "itertools 0.10.5", - "libp2p 0.54.1", + "libp2p", "libp2p-mplex", "lighthouse_metrics", "lighthouse_version", @@ -5764,6 +5627,7 @@ name = "network" version = "0.2.0" dependencies = [ "alloy-primitives", + "alloy-rlp", "anyhow", "async-channel", "beacon_chain", @@ -5789,7 +5653,6 @@ dependencies = [ "operation_pool", "parking_lot 0.12.3", "rand", - "rlp", "slog", "slog-async", "slog-term", @@ -6225,7 +6088,7 @@ checksum = "1e401f977ab385c9e4e3ab30627d6f26d00e2c73eef317493c4ec6d468726cf8" dependencies = [ "cfg-if", "libc", - "redox_syscall 0.5.3", + "redox_syscall 0.5.4", "smallvec", "windows-targets 0.52.6", ] @@ -6434,7 +6297,7 @@ dependencies = [ "concurrent-queue", "hermit-abi 0.4.0", "pin-project-lite", - "rustix 0.38.36", + "rustix 0.38.37", "tracing", "windows-sys 0.59.0", ] @@ -6447,19 +6310,7 @@ checksum = "8159bd90725d2df49889a078b54f4f79e87f1f8a8444194cdca81d38f5393abf" dependencies = [ "cpufeatures", "opaque-debug", - "universal-hash 0.5.1", -] - -[[package]] -name = "polyval" -version = "0.5.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8419d2b623c7c0896ff2d5d96e2cb4ede590fed28fcc34934f4c33c036e620a1" -dependencies = [ - "cfg-if", - "cpufeatures", - "opaque-debug", - "universal-hash 0.4.0", + "universal-hash", ] [[package]] @@ -6471,7 +6322,7 @@ dependencies = [ "cfg-if", "cpufeatures", "opaque-debug", - "universal-hash 0.5.1", + "universal-hash", ] [[package]] @@ -6494,9 +6345,9 @@ dependencies = [ [[package]] name = "postgres-types" -version = "0.2.7" +version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02048d9e032fb3cc3413bbf7b83a15d84a5d419778e2628751896d856498eee9" +checksum = "f66ea23a2d0e5734297357705193335e0a957696f34bed2f2faefacb2fec336f" dependencies = [ "bytes", "fallible-iterator", @@ -6520,9 +6371,9 @@ dependencies = [ [[package]] name = "pq-sys" -version = "0.6.1" +version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a24ff9e4cf6945c988f0db7005d87747bf72864965c3529d259ad155ac41d584" +checksum = "f6cc05d7ea95200187117196eee9edd0644424911821aeb28a18ce60ea0b8793" dependencies = [ "vcpkg", ] @@ -6587,7 +6438,7 @@ version = "3.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8ecf48c7ca261d60b74ab1a7b20da18bede46776b2e55535cb958eb595c5fa7b" dependencies = [ - "toml_edit 0.22.20", + "toml_edit 0.22.21", ] [[package]] @@ -6782,7 +6633,7 @@ dependencies = [ "quinn-proto", "quinn-udp", "rustc-hash 2.0.0", - "rustls 0.23.12", + "rustls 0.23.13", "socket2 0.5.7", "thiserror", "tokio", @@ -6799,7 +6650,7 @@ dependencies = [ "rand", "ring 0.17.8", "rustc-hash 2.0.0", - "rustls 0.23.12", + "rustls 0.23.13", "slab", "thiserror", "tinyvec", @@ -6934,9 +6785,9 @@ dependencies = [ [[package]] name = "redb" -version = "2.1.2" +version = "2.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "58323dc32ea52a8ae105ff94bc0460c5d906307533ba3401aa63db3cbe491fe5" +checksum = "e4760ad04a88ef77075ba86ba9ea79b919e6bab29c1764c5747237cd6eaedcaa" dependencies = [ "libc", ] @@ -6952,9 +6803,9 @@ dependencies = [ [[package]] name = "redox_syscall" -version = "0.5.3" +version = "0.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a908a6e00f1fdd0dfd9c0eb08ce85126f6d8bbda50017e74bc4a4b7d4a926a4" +checksum = "0884ad60e090bf1345b93da0a5de8923c93884cd03f40dfcfddd3b4bee661853" dependencies = [ "bitflags 2.6.0", ] @@ -7308,9 +7159,9 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.36" +version = "0.38.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f55e80d50763938498dd5ebb18647174e0c76dc38c5505294bb224624f30f36" +checksum = "8acb788b847c24f28525660c4d7758620a7210875711f79e7f663cc152726811" dependencies = [ "bitflags 2.6.0", "errno", @@ -7340,21 +7191,21 @@ dependencies = [ "log", "ring 0.17.8", "rustls-pki-types", - "rustls-webpki 0.102.7", + "rustls-webpki 0.102.8", "subtle", "zeroize", ] [[package]] name = "rustls" -version = "0.23.12" +version = "0.23.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c58f8c84392efc0a126acce10fa59ff7b3d2ac06ab451a33f2741989b806b044" +checksum = "f2dabaac7466917e566adb06783a81ca48944c6898a1b08b9374106dd671f4c8" dependencies = [ "once_cell", "ring 0.17.8", "rustls-pki-types", - "rustls-webpki 0.102.7", + "rustls-webpki 0.102.8", "subtle", "zeroize", ] @@ -7396,9 +7247,9 @@ dependencies = [ [[package]] name = "rustls-webpki" -version = "0.102.7" +version = "0.102.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "84678086bd54edf2b415183ed7a94d0efb049f1b646a33e22a36f3794be6ae56" +checksum = "64ca1bc8749bd4cf37b5ce386cc146580777b4e8572c7b97baf22c83f444bee9" dependencies = [ "ring 0.17.8", "rustls-pki-types", @@ -7808,9 +7659,9 @@ dependencies = [ [[package]] name = "sha3-asm" -version = "0.1.3" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57d79b758b7cb2085612b11a235055e485605a5103faccdd633f35bd7aee69dd" +checksum = "c28efc5e327c837aa837c59eae585fc250715ef939ac32881bcc11677cd02d46" dependencies = [ "cc", "cfg-if", @@ -8104,7 +7955,7 @@ version = "0.9.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "850948bee068e713b8ab860fe1adc4d109676ab4c3b621fd8147f06b261f2f85" dependencies = [ - "aes-gcm 0.10.3", + "aes-gcm", "blake2", "chacha20poly1305", "curve25519-dalek", @@ -8250,6 +8101,7 @@ dependencies = [ "lighthouse_metrics", "lru", "parking_lot 0.12.3", + "safe_arith", "serde", "slog", "sloggers", @@ -8473,7 +8325,7 @@ dependencies = [ "cfg-if", "fastrand", "once_cell", - "rustix 0.38.36", + "rustix 0.38.37", "windows-sys 0.59.0", ] @@ -8503,7 +8355,7 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "21bebf2b7c9e0a515f6e0f8c51dc0f8e4696391e6f1ff30379559f8365fb0df7" dependencies = [ - "rustix 0.38.36", + "rustix 0.38.37", "windows-sys 0.48.0", ] @@ -8755,9 +8607,9 @@ dependencies = [ [[package]] name = "tokio-postgres" -version = "0.7.11" +version = "0.7.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03adcf0147e203b6032c0b2d30be1415ba03bc348901f3ff1cc0df6a733e60c3" +checksum = "3b5d3742945bc7d7f210693b0c58ae542c6fd47b17adbbda0885f3dcb34a6bdb" dependencies = [ "async-trait", "byteorder", @@ -8845,7 +8697,7 @@ dependencies = [ "serde", "serde_spanned", "toml_datetime", - "toml_edit 0.22.20", + "toml_edit 0.22.21", ] [[package]] @@ -8870,9 +8722,9 @@ dependencies = [ [[package]] name = "toml_edit" -version = "0.22.20" +version = "0.22.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "583c44c02ad26b0c3f3066fe629275e50627026c51ac2e595cca4c230ce1ce1d" +checksum = "3b072cee73c449a636ffd6f32bd8de3a9f7119139aff882f44943ce2986dc5cf" dependencies = [ "indexmap 2.5.0", "serde", @@ -9167,15 +9019,15 @@ checksum = "08f95100a766bf4f8f28f90d77e0a5461bbdb219042e7679bebe79004fed8d75" [[package]] name = "unicode-ident" -version = "1.0.12" +version = "1.0.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b" +checksum = "e91b56cd4cadaeb79bbf1a5645f6b4f8dc5bde8834ad5894a8db35fda9efa1fe" [[package]] name = "unicode-normalization" -version = "0.1.23" +version = "0.1.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a56d1686db2308d901306f92a263857ef59ea39678a5458e7cb17f01415101f5" +checksum = "5033c97c4262335cded6d6fc3e5c18ab755e1a3dc96376350f3d8e9f009ad956" dependencies = [ "tinyvec", ] @@ -9192,16 +9044,6 @@ version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "229730647fbc343e3a80e463c1db7f78f3855d3f3739bee0dda773c9a037c90a" -[[package]] -name = "universal-hash" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8326b2c654932e3e4f9196e69d08fdf7cfd718e1dc6f66b347e6024a0c961402" -dependencies = [ - "generic-array", - "subtle", -] - [[package]] name = "universal-hash" version = "0.5.1" @@ -9683,7 +9525,7 @@ version = "1.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "372d5b87f58ec45c384ba03563b03544dc5fadc3983e434b286913f5b4a9bb6d" dependencies = [ - "redox_syscall 0.5.3", + "redox_syscall 0.5.4", "wasite", "web-sys", ] @@ -10078,9 +9920,9 @@ dependencies = [ [[package]] name = "xml-rs" -version = "0.8.21" +version = "0.8.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "539a77ee7c0de333dcc6da69b177380a0b81e0dacfa4f7344c465a36871ee601" +checksum = "af4e2e2f7cba5a093896c1e150fbfe177d1883e7448200efb81d40b9d339ef26" [[package]] name = "xmltree" diff --git a/Cargo.toml b/Cargo.toml index 34c4ca9f55b..fff3addb1e4 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -116,7 +116,7 @@ derivative = "2" dirs = "3" either = "1.9" rust_eth_kzg = "0.5.1" -discv5 = { version = "0.4.1", features = ["libp2p"] } +discv5 = { version = "0.7", features = ["libp2p"] } env_logger = "0.9" error-chain = "0.12" ethereum_hashing = "0.7.0" diff --git a/beacon_node/beacon_chain/src/attestation_rewards.rs b/beacon_node/beacon_chain/src/attestation_rewards.rs index d48a83130e6..87b7384ea68 100644 --- a/beacon_node/beacon_chain/src/attestation_rewards.rs +++ b/beacon_node/beacon_chain/src/attestation_rewards.rs @@ -11,7 +11,7 @@ use state_processing::per_epoch_processing::altair::{ }; use state_processing::per_epoch_processing::base::rewards_and_penalties::{ get_attestation_component_delta, get_attestation_deltas_all, get_attestation_deltas_subset, - get_inactivity_penalty_delta, get_inclusion_delay_delta, + get_inactivity_penalty_delta, get_inclusion_delay_delta, ProposerRewardCalculation, }; use state_processing::per_epoch_processing::base::validator_statuses::InclusionInfo; use state_processing::per_epoch_processing::base::{ @@ -81,13 +81,24 @@ impl BeaconChain { self.compute_ideal_rewards_base(&state, &validator_statuses.total_balances)?; let indices_to_attestation_delta = if validators.is_empty() { - get_attestation_deltas_all(&state, &validator_statuses, spec)? - .into_iter() - .enumerate() - .collect() + get_attestation_deltas_all( + &state, + &validator_statuses, + ProposerRewardCalculation::Exclude, + spec, + )? + .into_iter() + .enumerate() + .collect() } else { let validator_indices = Self::validators_ids_to_indices(&mut state, validators)?; - get_attestation_deltas_subset(&state, &validator_statuses, &validator_indices, spec)? + get_attestation_deltas_subset( + &state, + &validator_statuses, + ProposerRewardCalculation::Exclude, + &validator_indices, + spec, + )? }; let mut total_rewards = vec![]; diff --git a/beacon_node/beacon_chain/src/beacon_block_reward.rs b/beacon_node/beacon_chain/src/beacon_block_reward.rs index 33567001e3c..e0bb79bf38b 100644 --- a/beacon_node/beacon_chain/src/beacon_block_reward.rs +++ b/beacon_node/beacon_chain/src/beacon_block_reward.rs @@ -1,20 +1,25 @@ -use crate::{BeaconChain, BeaconChainError, BeaconChainTypes}; +use crate::{BeaconChain, BeaconChainError, BeaconChainTypes, StateSkipConfig}; +use attesting_indices_base::get_attesting_indices; use eth2::lighthouse::StandardBlockReward; -use operation_pool::RewardCache; use safe_arith::SafeArith; use slog::error; +use state_processing::common::attesting_indices_base; use state_processing::{ - common::{get_attestation_participation_flag_indices, get_attesting_indices_from_state}, + common::{ + base::{self, SqrtTotalActiveBalance}, + get_attestation_participation_flag_indices, get_attesting_indices_from_state, + }, epoch_cache::initialize_epoch_cache, per_block_processing::{ altair::sync_committee::compute_sync_aggregate_rewards, get_slashable_indices, }, }; +use std::collections::HashSet; use store::{ consts::altair::{PARTICIPATION_FLAG_WEIGHTS, PROPOSER_WEIGHT, WEIGHT_DENOMINATOR}, RelativeEpoch, }; -use types::{AbstractExecPayload, BeaconBlockRef, BeaconState, BeaconStateError, Hash256}; +use types::{AbstractExecPayload, BeaconBlockRef, BeaconState, BeaconStateError, EthSpec}; type BeaconBlockSubRewardValue = u64; @@ -22,7 +27,6 @@ impl BeaconChain { pub fn compute_beacon_block_reward>( &self, block: BeaconBlockRef<'_, T::EthSpec, Payload>, - block_root: Hash256, state: &mut BeaconState, ) -> Result { if block.slot() != state.slot() { @@ -33,7 +37,7 @@ impl BeaconChain { state.build_committee_cache(RelativeEpoch::Current, &self.spec)?; initialize_epoch_cache(state, &self.spec)?; - self.compute_beacon_block_reward_with_cache(block, block_root, state) + self.compute_beacon_block_reward_with_cache(block, state) } // This should only be called after a committee cache has been built @@ -41,7 +45,6 @@ impl BeaconChain { fn compute_beacon_block_reward_with_cache>( &self, block: BeaconBlockRef<'_, T::EthSpec, Payload>, - block_root: Hash256, state: &BeaconState, ) -> Result { let proposer_index = block.proposer_index(); @@ -72,7 +75,7 @@ impl BeaconChain { })?; let block_attestation_reward = if let BeaconState::Base(_) = state { - self.compute_beacon_block_attestation_reward_base(block, block_root, state) + self.compute_beacon_block_attestation_reward_base(block, state) .map_err(|e| { error!( self.log, @@ -169,19 +172,85 @@ impl BeaconChain { fn compute_beacon_block_attestation_reward_base>( &self, block: BeaconBlockRef<'_, T::EthSpec, Payload>, - block_root: Hash256, state: &BeaconState, ) -> Result { - // Call compute_block_reward in the base case - // Since base does not have sync aggregate, we only grab attesation portion of the returned - // value - let mut reward_cache = RewardCache::default(); - let block_attestation_reward = self - .compute_block_reward(block, block_root, state, &mut reward_cache, true)? - .attestation_rewards - .total; - - Ok(block_attestation_reward) + // In phase0, rewards for including attestations are awarded at epoch boundaries when the corresponding + // attestations are contained in state.previous_epoch_attestations. So, if an attestation within this block has + // target = previous_epoch, it is directly inserted into previous_epoch_attestations and we need the state at + // the end of this epoch, or the attestation has target = current_epoch and thus we need the state at the end + // of the next epoch. + // We fetch these lazily, as only one might be needed depending on the block's content. + let mut current_epoch_end = None; + let mut next_epoch_end = None; + + let epoch = block.epoch(); + let mut block_reward = 0; + + let mut rewarded_attesters = HashSet::new(); + + for attestation in block.body().attestations() { + let processing_epoch_end = if attestation.data().target.epoch == epoch { + let next_epoch_end = match &mut next_epoch_end { + Some(next_epoch_end) => next_epoch_end, + None => { + let state = self.state_at_slot( + epoch.safe_add(1)?.end_slot(T::EthSpec::slots_per_epoch()), + StateSkipConfig::WithoutStateRoots, + )?; + next_epoch_end.get_or_insert(state) + } + }; + + // If the next epoch end is no longer phase0, no proposer rewards are awarded, as Altair epoch boundry + // processing kicks in. We check this here, as we know that current_epoch_end will always be phase0. + if !matches!(next_epoch_end, BeaconState::Base(_)) { + continue; + } + + next_epoch_end + } else if attestation.data().target.epoch == epoch.safe_sub(1)? { + match &mut current_epoch_end { + Some(current_epoch_end) => current_epoch_end, + None => { + let state = self.state_at_slot( + epoch.end_slot(T::EthSpec::slots_per_epoch()), + StateSkipConfig::WithoutStateRoots, + )?; + current_epoch_end.get_or_insert(state) + } + } + } else { + return Err(BeaconChainError::BlockRewardAttestationError); + }; + + let inclusion_delay = state.slot().safe_sub(attestation.data().slot)?.as_u64(); + let sqrt_total_active_balance = + SqrtTotalActiveBalance::new(processing_epoch_end.get_total_active_balance()?); + for attester in get_attesting_indices_from_state(state, attestation)? { + let validator = processing_epoch_end.get_validator(attester as usize)?; + if !validator.slashed + && !rewarded_attesters.contains(&attester) + && !has_earlier_attestation( + state, + processing_epoch_end, + inclusion_delay, + attester, + )? + { + let base_reward = base::get_base_reward( + validator.effective_balance, + sqrt_total_active_balance, + &self.spec, + )?; + let proposer_reward = + base_reward.safe_div(self.spec.proposer_reward_quotient)?; + block_reward.safe_add_assign(proposer_reward)?; + rewarded_attesters.insert(attester); + } + } + } + + Ok(block_reward) } fn compute_beacon_block_attestation_reward_altair_deneb< @@ -244,3 +313,25 @@ impl BeaconChain { Ok(total_proposer_reward) } } + +fn has_earlier_attestation( + state: &BeaconState, + processing_epoch_end: &BeaconState, + inclusion_delay: u64, + attester: u64, +) -> Result { + if inclusion_delay > 1 { + for epoch_att in processing_epoch_end.previous_epoch_attestations()? { + if epoch_att.inclusion_delay < inclusion_delay { + let committee = + state.get_beacon_committee(epoch_att.data.slot, epoch_att.data.index)?; + let earlier_attesters = + get_attesting_indices::(committee.committee, &epoch_att.aggregation_bits)?; + if earlier_attesters.contains(&attester) { + return Ok(true); + } + } + } + } + Ok(false) +} diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 322a2caa673..d83955854d8 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -5639,7 +5639,7 @@ impl BeaconChain { let mut ctxt = ConsensusContext::new(block.slot()); let consensus_block_value = self - .compute_beacon_block_reward(block.message(), Hash256::zero(), &mut state) + .compute_beacon_block_reward(block.message(), &mut state) .map(|reward| reward.total) .unwrap_or(0); @@ -6987,32 +6987,18 @@ impl BeaconChain { &self, block_root: &Hash256, ) -> Result, ForkName)>, Error> { - let Some(block) = self.get_blinded_block(block_root)? else { - return Ok(None); - }; - - let (state_root, slot) = (block.state_root(), block.slot()); - - let Some(mut state) = self.get_state(&state_root, Some(slot))? else { - return Ok(None); - }; + let head_state = &self.head().snapshot.beacon_state; + let finalized_period = head_state + .finalized_checkpoint() + .epoch + .sync_committee_period(&self.spec)?; - let fork_name = state - .fork_name(&self.spec) - .map_err(Error::InconsistentFork)?; - - match fork_name { - ForkName::Altair - | ForkName::Bellatrix - | ForkName::Capella - | ForkName::Deneb - | ForkName::Electra => { - LightClientBootstrap::from_beacon_state(&mut state, &block, &self.spec) - .map(|bootstrap| Some((bootstrap, fork_name))) - .map_err(Error::LightClientError) - } - ForkName::Base => Err(Error::UnsupportedFork), - } + self.light_client_server_cache.get_light_client_bootstrap( + &self.store, + block_root, + finalized_period, + &self.spec, + ) } pub fn metrics(&self) -> BeaconChainMetrics { diff --git a/beacon_node/beacon_chain/src/errors.rs b/beacon_node/beacon_chain/src/errors.rs index 4db3f0ebb41..994ac79af7e 100644 --- a/beacon_node/beacon_chain/src/errors.rs +++ b/beacon_node/beacon_chain/src/errors.rs @@ -216,7 +216,8 @@ pub enum BeaconChainError { UnableToPublish, UnableToBuildColumnSidecar(String), AvailabilityCheckError(AvailabilityCheckError), - LightClientError(LightClientError), + LightClientUpdateError(LightClientUpdateError), + LightClientBootstrapError(String), UnsupportedFork, MilhouseError(MilhouseError), EmptyRpcCustodyColumns, @@ -250,7 +251,7 @@ easy_from_to!(BlockReplayError, BeaconChainError); easy_from_to!(InconsistentFork, BeaconChainError); easy_from_to!(AvailabilityCheckError, BeaconChainError); easy_from_to!(EpochCacheError, BeaconChainError); -easy_from_to!(LightClientError, BeaconChainError); +easy_from_to!(LightClientUpdateError, BeaconChainError); easy_from_to!(MilhouseError, BeaconChainError); easy_from_to!(AttestationError, BeaconChainError); diff --git a/beacon_node/beacon_chain/src/light_client_server_cache.rs b/beacon_node/beacon_chain/src/light_client_server_cache.rs index efc746675dc..ca015d0365a 100644 --- a/beacon_node/beacon_chain/src/light_client_server_cache.rs +++ b/beacon_node/beacon_chain/src/light_client_server_cache.rs @@ -1,23 +1,25 @@ use crate::errors::BeaconChainError; use crate::{metrics, BeaconChainTypes, BeaconStore}; +use eth2::types::light_client_update::CurrentSyncCommitteeProofLen; use parking_lot::{Mutex, RwLock}; use safe_arith::SafeArith; use slog::{debug, Logger}; use ssz::Decode; -use ssz::Encode; use ssz_types::FixedVector; use std::num::NonZeroUsize; use std::sync::Arc; use store::DBColumn; use store::KeyValueStore; +use tree_hash::TreeHash; use types::light_client_update::{ - FinalizedRootProofLen, NextSyncCommitteeProofLen, FINALIZED_ROOT_INDEX, - NEXT_SYNC_COMMITTEE_INDEX, + FinalizedRootProofLen, NextSyncCommitteeProofLen, CURRENT_SYNC_COMMITTEE_INDEX, + FINALIZED_ROOT_INDEX, NEXT_SYNC_COMMITTEE_INDEX, }; use types::non_zero_usize::new_non_zero_usize; use types::{ - BeaconBlockRef, BeaconState, ChainSpec, EthSpec, ForkName, Hash256, LightClientFinalityUpdate, - LightClientOptimisticUpdate, LightClientUpdate, Slot, SyncAggregate, SyncCommittee, + BeaconBlockRef, BeaconState, ChainSpec, Checkpoint, EthSpec, ForkName, Hash256, + LightClientBootstrap, LightClientFinalityUpdate, LightClientOptimisticUpdate, + LightClientUpdate, Slot, SyncAggregate, SyncCommittee, }; /// A prev block cache miss requires to re-generate the state of the post-parent block. Items in the @@ -28,7 +30,6 @@ const PREV_BLOCK_CACHE_SIZE: NonZeroUsize = new_non_zero_usize(32); /// This cache computes light client messages ahead of time, required to satisfy p2p and API /// requests. These messages include proofs on historical states, so on-demand computation is /// expensive. -/// pub struct LightClientServerCache { /// Tracks a single global latest finality update out of all imported blocks. /// @@ -41,6 +42,8 @@ pub struct LightClientServerCache { latest_optimistic_update: RwLock>>, /// Caches the most recent light client update latest_light_client_update: RwLock>>, + /// Caches the current sync committee, + latest_written_current_sync_committee: RwLock>>>, /// Caches state proofs by block root prev_block_cache: Mutex>>, } @@ -51,6 +54,7 @@ impl LightClientServerCache { latest_finality_update: None.into(), latest_optimistic_update: None.into(), latest_light_client_update: None.into(), + latest_written_current_sync_committee: None.into(), prev_block_cache: lru::LruCache::new(PREV_BLOCK_CACHE_SIZE).into(), } } @@ -96,6 +100,10 @@ impl LightClientServerCache { let signature_slot = block_slot; let attested_block_root = block_parent_root; + let sync_period = block_slot + .epoch(T::EthSpec::slots_per_epoch()) + .sync_committee_period(chain_spec)?; + let attested_block = store.get_blinded_block(attested_block_root)?.ok_or( BeaconChainError::DBInconsistent(format!( "Block not available {:?}", @@ -110,6 +118,18 @@ impl LightClientServerCache { attested_block.slot(), )?; + let finalized_period = cached_parts + .finalized_checkpoint + .epoch + .sync_committee_period(chain_spec)?; + + store.store_sync_committee_branch( + attested_block.message().tree_hash_root(), + &cached_parts.current_sync_committee_branch, + )?; + + self.store_current_sync_committee(&store, &cached_parts, sync_period, finalized_period)?; + let attested_slot = attested_block.slot(); let maybe_finalized_block = store.get_blinded_block(&cached_parts.finalized_block_root)?; @@ -178,57 +198,57 @@ impl LightClientServerCache { // Spec: Full nodes SHOULD provide the best derivable LightClientUpdate (according to is_better_update) // for each sync committee period - let prev_light_client_update = match &self.latest_light_client_update.read().clone() { - Some(prev_light_client_update) => Some(prev_light_client_update.clone()), - None => self.get_light_client_update(&store, sync_period, chain_spec)?, - }; + let prev_light_client_update = + self.get_light_client_update(&store, sync_period, chain_spec)?; let should_persist_light_client_update = if let Some(prev_light_client_update) = prev_light_client_update { - let prev_sync_period = prev_light_client_update - .signature_slot() - .epoch(T::EthSpec::slots_per_epoch()) - .sync_committee_period(chain_spec)?; - - if sync_period != prev_sync_period { - true - } else { - prev_light_client_update - .is_better_light_client_update(&new_light_client_update, chain_spec)? - } + prev_light_client_update + .is_better_light_client_update(&new_light_client_update, chain_spec)? } else { true }; if should_persist_light_client_update { - self.store_light_client_update(&store, sync_period, &new_light_client_update)?; + store.store_light_client_update(sync_period, &new_light_client_update)?; + *self.latest_light_client_update.write() = Some(new_light_client_update); } Ok(()) } - fn store_light_client_update( + fn store_current_sync_committee( &self, store: &BeaconStore, + cached_parts: &LightClientCachedData, sync_committee_period: u64, - light_client_update: &LightClientUpdate, + finalized_period: u64, ) -> Result<(), BeaconChainError> { - let column = DBColumn::LightClientUpdate; - - store.hot_db.put_bytes( - column.into(), - &sync_committee_period.to_le_bytes(), - &light_client_update.as_ssz_bytes(), - )?; + if let Some(latest_sync_committee) = + self.latest_written_current_sync_committee.read().clone() + { + if latest_sync_committee == cached_parts.current_sync_committee { + return Ok(()); + } + }; - *self.latest_light_client_update.write() = Some(light_client_update.clone()); + if finalized_period + 1 >= sync_committee_period { + store.store_sync_committee( + sync_committee_period, + &cached_parts.current_sync_committee, + )?; + *self.latest_written_current_sync_committee.write() = + Some(cached_parts.current_sync_committee.clone()); + } Ok(()) } - // Used to fetch the most recently persisted "best" light client update. - // Should not be used outside the light client server, as it also caches the fetched - // light client update. + /// Used to fetch the most recently persisted light client update for the given `sync_committee_period`. + /// It first checks the `latest_light_client_update` cache before querying the db. + /// + /// Note: Should not be used outside the light client server, as it also caches the fetched + /// light client update. fn get_light_client_update( &self, store: &BeaconStore, @@ -245,21 +265,7 @@ impl LightClientServerCache { } } - let column = DBColumn::LightClientUpdate; - let res = store - .hot_db - .get_bytes(column.into(), &sync_committee_period.to_le_bytes())?; - - if let Some(light_client_update_bytes) = res { - let epoch = sync_committee_period - .safe_mul(chain_spec.epochs_per_sync_committee_period.into())?; - - let fork_name = chain_spec.fork_name_at_epoch(epoch.into()); - - let light_client_update = - LightClientUpdate::from_ssz_bytes(&light_client_update_bytes, &fork_name) - .map_err(store::errors::Error::SszDecodeError)?; - + if let Some(light_client_update) = store.get_light_client_update(sync_committee_period)? { *self.latest_light_client_update.write() = Some(light_client_update.clone()); return Ok(Some(light_client_update)); } @@ -340,6 +346,65 @@ impl LightClientServerCache { pub fn get_latest_optimistic_update(&self) -> Option> { self.latest_optimistic_update.read().clone() } + + /// Fetches a light client bootstrap for a given finalized checkpoint `block_root`. We eagerly persist + /// `sync_committee_branch and `sync_committee` to allow for a more efficient bootstrap construction. + /// + /// Note: It should be the case that a `sync_committee_branch` and `sync_committee` exist in the db + /// for a finalized checkpoint block root. However, we currently have no backfill mechanism for these values. + /// Therefore, `sync_committee_branch` and `sync_committee` are only persisted while a node is synced. + #[allow(clippy::type_complexity)] + pub fn get_light_client_bootstrap( + &self, + store: &BeaconStore, + block_root: &Hash256, + finalized_period: u64, + chain_spec: &ChainSpec, + ) -> Result, ForkName)>, BeaconChainError> { + let Some(block) = store.get_blinded_block(block_root)? else { + return Err(BeaconChainError::LightClientBootstrapError(format!( + "Block root {block_root} not found" + ))); + }; + + let (_, slot) = (block.state_root(), block.slot()); + + let fork_name = chain_spec.fork_name_at_slot::(slot); + + let sync_committee_period = block + .slot() + .epoch(T::EthSpec::slots_per_epoch()) + .sync_committee_period(chain_spec)?; + + let Some(current_sync_committee_branch) = store.get_sync_committee_branch(block_root)? + else { + return Err(BeaconChainError::LightClientBootstrapError(format!( + "Sync committee branch for block root {:?} not found", + block_root + ))); + }; + + if sync_committee_period > finalized_period { + return Err(BeaconChainError::LightClientBootstrapError( + format!("The blocks sync committee period {sync_committee_period} is greater than the current finalized period {finalized_period}"), + )); + } + + let Some(current_sync_committee) = store.get_sync_committee(sync_committee_period)? else { + return Err(BeaconChainError::LightClientBootstrapError(format!( + "Sync committee for period {sync_committee_period} not found" + ))); + }; + + let light_client_bootstrap = LightClientBootstrap::new( + &block, + Arc::new(current_sync_committee), + current_sync_committee_branch, + chain_spec, + )?; + + Ok(Some((light_client_bootstrap, fork_name))) + } } impl Default for LightClientServerCache { @@ -350,23 +415,32 @@ impl Default for LightClientServerCache { type FinalityBranch = FixedVector; type NextSyncCommitteeBranch = FixedVector; +type CurrentSyncCommitteeBranch = FixedVector; #[derive(Clone)] struct LightClientCachedData { + finalized_checkpoint: Checkpoint, finality_branch: FinalityBranch, next_sync_committee_branch: NextSyncCommitteeBranch, + current_sync_committee_branch: CurrentSyncCommitteeBranch, next_sync_committee: Arc>, + current_sync_committee: Arc>, finalized_block_root: Hash256, } impl LightClientCachedData { fn from_state(state: &mut BeaconState) -> Result { Ok(Self { + finalized_checkpoint: state.finalized_checkpoint(), finality_branch: state.compute_merkle_proof(FINALIZED_ROOT_INDEX)?.into(), next_sync_committee: state.next_sync_committee()?.clone(), + current_sync_committee: state.current_sync_committee()?.clone(), next_sync_committee_branch: state .compute_merkle_proof(NEXT_SYNC_COMMITTEE_INDEX)? .into(), + current_sync_committee_branch: state + .compute_merkle_proof(CURRENT_SYNC_COMMITTEE_INDEX)? + .into(), finalized_block_root: state.finalized_checkpoint().root, }) } diff --git a/beacon_node/beacon_chain/src/metrics.rs b/beacon_node/beacon_chain/src/metrics.rs index 79b2fc592b2..f15b46fc4bf 100644 --- a/beacon_node/beacon_chain/src/metrics.rs +++ b/beacon_node/beacon_chain/src/metrics.rs @@ -1651,7 +1651,7 @@ pub static DATA_COLUMN_SIDECAR_COMPUTATION: LazyLock> = Laz try_create_histogram_vec_with_buckets( "data_column_sidecar_computation_seconds", "Time taken to compute data column sidecar, including cells, proofs and inclusion proof", - Ok(vec![0.1, 0.25, 0.5, 1.0, 2.5, 5.0, 10.0]), + Ok(vec![0.1, 0.15, 0.25, 0.35, 0.5, 0.7, 1.0, 2.5, 5.0, 10.0]), &["blob_count"], ) }); diff --git a/beacon_node/beacon_chain/src/migrate.rs b/beacon_node/beacon_chain/src/migrate.rs index fcb8fb1c897..f83df7b4468 100644 --- a/beacon_node/beacon_chain/src/migrate.rs +++ b/beacon_node/beacon_chain/src/migrate.rs @@ -676,6 +676,7 @@ impl, Cold: ItemStore> BackgroundMigrator, + slot: Slot, + block_root: Hash256, + ) { + let fork_name = state.fork_name(&self.spec).unwrap(); + if !fork_name.altair_enabled() { + return; + } + + let log = self.logger(); + let contributions = + self.make_sync_contributions(state, block_root, slot, RelativeSyncCommittee::Current); + + for (_, contribution_and_proof) in contributions { + let Some(contribution_and_proof) = contribution_and_proof else { + continue; + }; + let contribution = contribution_and_proof.message.contribution; + self.chain + .op_pool + .insert_sync_contribution(contribution.clone()) + .unwrap(); + self.chain + .op_pool + .insert_sync_contribution(contribution) + .unwrap(); + } + + let Some(sync_aggregate) = self.chain.op_pool.get_sync_aggregate(state).unwrap() else { + return; + }; + + let _ = self + .chain + .light_client_server_cache + .recompute_and_cache_updates( + self.chain.store.clone(), + slot, + &block_root, + &sync_aggregate, + log, + &self.spec, + ); + } + + pub async fn add_attested_blocks_at_slots_with_lc_data( + &self, + mut state: BeaconState, + state_root: Hash256, + slots: &[Slot], + validators: &[usize], + mut latest_block_hash: Option, + sync_committee_strategy: SyncCommitteeStrategy, + ) -> AddBlocksResult { + assert!( + slots.windows(2).all(|w| w[0] <= w[1]), + "Slots have to be sorted" + ); // slice.is_sorted() isn't stabilized at the moment of writing this + let mut block_hash_from_slot: HashMap = HashMap::new(); + let mut state_hash_from_slot: HashMap = HashMap::new(); + for slot in slots { + let (block_hash, new_state) = self + .add_attested_block_at_slot_with_sync( + *slot, + state, + state_root, + validators, + sync_committee_strategy, + ) + .await + .unwrap(); + + state = new_state; + + self.update_light_client_server_cache(&state, *slot, block_hash.into()); + + block_hash_from_slot.insert(*slot, block_hash); + state_hash_from_slot.insert(*slot, state.canonical_root().unwrap().into()); + latest_block_hash = Some(block_hash); + } + ( + block_hash_from_slot, + state_hash_from_slot, + latest_block_hash.unwrap(), + state, + ) + } + async fn add_attested_blocks_at_slots_given_lbh( &self, mut state: BeaconState, @@ -2250,7 +2346,9 @@ where ) .await .unwrap(); + state = new_state; + block_hash_from_slot.insert(*slot, block_hash); state_hash_from_slot.insert(*slot, state.canonical_root().unwrap().into()); latest_block_hash = Some(block_hash); @@ -2459,6 +2557,23 @@ where block_strategy, attestation_strategy, SyncCommitteeStrategy::NoValidators, + LightClientStrategy::Disabled, + ) + .await + } + + pub async fn extend_chain_with_light_client_data( + &self, + num_blocks: usize, + block_strategy: BlockStrategy, + attestation_strategy: AttestationStrategy, + ) -> Hash256 { + self.extend_chain_with_sync( + num_blocks, + block_strategy, + attestation_strategy, + SyncCommitteeStrategy::NoValidators, + LightClientStrategy::Enabled, ) .await } @@ -2469,6 +2584,7 @@ where block_strategy: BlockStrategy, attestation_strategy: AttestationStrategy, sync_committee_strategy: SyncCommitteeStrategy, + light_client_strategy: LightClientStrategy, ) -> Hash256 { let (mut state, slots) = match block_strategy { BlockStrategy::OnCanonicalHead => { @@ -2500,15 +2616,30 @@ where }; let state_root = state.update_tree_hash_cache().unwrap(); - let (_, _, last_produced_block_hash, _) = self - .add_attested_blocks_at_slots_with_sync( - state, - state_root, - &slots, - &validators, - sync_committee_strategy, - ) - .await; + let (_, _, last_produced_block_hash, _) = match light_client_strategy { + LightClientStrategy::Enabled => { + self.add_attested_blocks_at_slots_with_lc_data( + state, + state_root, + &slots, + &validators, + None, + sync_committee_strategy, + ) + .await + } + LightClientStrategy::Disabled => { + self.add_attested_blocks_at_slots_with_sync( + state, + state_root, + &slots, + &validators, + sync_committee_strategy, + ) + .await + } + }; + last_produced_block_hash.into() } diff --git a/beacon_node/beacon_chain/tests/rewards.rs b/beacon_node/beacon_chain/tests/rewards.rs index f04f4062f1c..323f4f38eb2 100644 --- a/beacon_node/beacon_chain/tests/rewards.rs +++ b/beacon_node/beacon_chain/tests/rewards.rs @@ -1,20 +1,22 @@ #![cfg(test)] -use std::collections::HashMap; -use std::sync::LazyLock; - +use beacon_chain::block_verification_types::AsBlock; use beacon_chain::test_utils::{ generate_deterministic_keypairs, BeaconChainHarness, EphemeralHarnessType, }; use beacon_chain::{ test_utils::{AttestationStrategy, BlockStrategy, RelativeSyncCommittee}, types::{Epoch, EthSpec, Keypair, MinimalEthSpec}, + BlockError, ChainConfig, StateSkipConfig, WhenSlotSkipped, }; use eth2::lighthouse::attestation_rewards::TotalAttestationRewards; use eth2::lighthouse::StandardAttestationRewards; use eth2::types::ValidatorId; -use types::beacon_state::Error as BeaconStateError; -use types::{BeaconState, ChainSpec, ForkName, Slot}; +use state_processing::{BlockReplayError, BlockReplayer}; +use std::array::IntoIter; +use std::collections::HashMap; +use std::sync::{Arc, LazyLock}; +use types::{ChainSpec, ForkName, Slot}; pub const VALIDATOR_COUNT: usize = 64; @@ -24,10 +26,16 @@ static KEYPAIRS: LazyLock> = LazyLock::new(|| generate_deterministic_keypairs(VALIDATOR_COUNT)); fn get_harness(spec: ChainSpec) -> BeaconChainHarness> { + let chain_config = ChainConfig { + reconstruct_historic_states: true, + ..Default::default() + }; + let harness = BeaconChainHarness::builder(E::default()) .spec(spec) .keypairs(KEYPAIRS.to_vec()) .fresh_ephemeral_store() + .chain_config(chain_config) .build(); harness.advance_slot(); @@ -37,9 +45,7 @@ fn get_harness(spec: ChainSpec) -> BeaconChainHarness> { #[tokio::test] async fn test_sync_committee_rewards() { - let mut spec = E::default_spec(); - spec.altair_fork_epoch = Some(Epoch::new(0)); - + let spec = ForkName::Altair.make_genesis_spec(E::default_spec()); let harness = get_harness(spec); let num_block_produced = E::slots_per_epoch(); @@ -126,123 +132,65 @@ async fn test_sync_committee_rewards() { } #[tokio::test] -async fn test_verify_attestation_rewards_base() { - let harness = get_harness(E::default_spec()); +async fn test_rewards_base() { + let spec = ForkName::Base.make_genesis_spec(E::default_spec()); + let harness = get_harness(spec); + let initial_balances = harness.get_current_state().balances().to_vec(); - // epoch 0 (N), only two thirds of validators vote. - let two_thirds = (VALIDATOR_COUNT / 3) * 2; - let two_thirds_validators: Vec = (0..two_thirds).collect(); harness - .extend_chain( - E::slots_per_epoch() as usize, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::SomeValidators(two_thirds_validators), - ) + .extend_slots(E::slots_per_epoch() as usize * 2 - 1) .await; - let initial_balances: Vec = harness.get_current_state().balances().to_vec(); - - // extend slots to beginning of epoch N + 2 - harness.extend_slots(E::slots_per_epoch() as usize).await; - - // compute reward deltas for all validators in epoch N - let StandardAttestationRewards { - ideal_rewards, - total_rewards, - } = harness - .chain - .compute_attestation_rewards(Epoch::new(0), vec![]) - .unwrap(); - - // assert no inactivity penalty for both ideal rewards and individual validators - assert!(ideal_rewards.iter().all(|reward| reward.inactivity == 0)); - assert!(total_rewards.iter().all(|reward| reward.inactivity == 0)); - - // apply attestation rewards to initial balances - let expected_balances = apply_attestation_rewards(&initial_balances, total_rewards); - - // verify expected balances against actual balances - let balances: Vec = harness.get_current_state().balances().to_vec(); - assert_eq!(expected_balances, balances); + check_all_base_rewards(&harness, initial_balances).await; } #[tokio::test] -async fn test_verify_attestation_rewards_base_inactivity_leak() { - let spec = E::default_spec(); +async fn test_rewards_base_inactivity_leak() { + let spec = ForkName::Base.make_genesis_spec(E::default_spec()); let harness = get_harness(spec.clone()); + let initial_balances = harness.get_current_state().balances().to_vec(); let half = VALIDATOR_COUNT / 2; let half_validators: Vec = (0..half).collect(); // target epoch is the epoch where the chain enters inactivity leak let target_epoch = &spec.min_epochs_to_inactivity_penalty + 1; - // advance until beginning of epoch N + 1 and get balances - harness - .extend_chain( - (E::slots_per_epoch() * (target_epoch + 1)) as usize, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::SomeValidators(half_validators.clone()), - ) - .await; - let initial_balances: Vec = harness.get_current_state().balances().to_vec(); - - // extend slots to beginning of epoch N + 2 - harness.advance_slot(); + // advance until end of target epoch harness - .extend_chain( - E::slots_per_epoch() as usize, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::SomeValidators(half_validators), + .extend_slots_some_validators( + ((E::slots_per_epoch() * target_epoch) - 1) as usize, + half_validators.clone(), ) .await; - let _slot = harness.get_current_slot(); - - // compute reward deltas for all validators in epoch N - let StandardAttestationRewards { - ideal_rewards, - total_rewards, - } = harness - .chain - .compute_attestation_rewards(Epoch::new(target_epoch), vec![]) - .unwrap(); - - // assert inactivity penalty for both ideal rewards and individual validators - assert!(ideal_rewards.iter().all(|reward| reward.inactivity < 0)); - assert!(total_rewards.iter().all(|reward| reward.inactivity < 0)); - // apply attestation rewards to initial balances - let expected_balances = apply_attestation_rewards(&initial_balances, total_rewards); - - // verify expected balances against actual balances - let balances: Vec = harness.get_current_state().balances().to_vec(); - assert_eq!(expected_balances, balances); + check_all_base_rewards(&harness, initial_balances).await; } #[tokio::test] -async fn test_verify_attestation_rewards_base_inactivity_leak_justification_epoch() { - let spec = E::default_spec(); +async fn test_rewards_base_inactivity_leak_justification_epoch() { + let spec = ForkName::Base.make_genesis_spec(E::default_spec()); let harness = get_harness(spec.clone()); + let initial_balances = harness.get_current_state().balances().to_vec(); let half = VALIDATOR_COUNT / 2; let half_validators: Vec = (0..half).collect(); // target epoch is the epoch where the chain enters inactivity leak - let mut target_epoch = &spec.min_epochs_to_inactivity_penalty + 2; + let mut target_epoch = &spec.min_epochs_to_inactivity_penalty + 1; - // advance until beginning of epoch N + 2 + // advance until end of target epoch harness .extend_chain( - (E::slots_per_epoch() * (target_epoch + 1)) as usize, + ((E::slots_per_epoch() * target_epoch) - 1) as usize, BlockStrategy::OnCanonicalHead, AttestationStrategy::SomeValidators(half_validators.clone()), ) .await; - // advance to create first justification epoch and get initial balances + // advance to create first justification epoch harness.extend_slots(E::slots_per_epoch() as usize).await; target_epoch += 1; - let initial_balances: Vec = harness.get_current_state().balances().to_vec(); - //assert previous_justified_checkpoint matches 0 as we were in inactivity leak from beginning + // assert previous_justified_checkpoint matches 0 as we were in inactivity leak from beginning assert_eq!( 0, harness @@ -252,10 +200,12 @@ async fn test_verify_attestation_rewards_base_inactivity_leak_justification_epoc .as_u64() ); - // extend slots to beginning of epoch N + 1 + // extend slots to end of epoch target_epoch + 2 harness.extend_slots(E::slots_per_epoch() as usize).await; - //assert target epoch and previous_justified_checkpoint match + check_all_base_rewards(&harness, initial_balances).await; + + // assert target epoch and previous_justified_checkpoint match assert_eq!( target_epoch, harness @@ -264,31 +214,94 @@ async fn test_verify_attestation_rewards_base_inactivity_leak_justification_epoc .epoch .as_u64() ); +} - // compute reward deltas for all validators in epoch N - let StandardAttestationRewards { - ideal_rewards, - total_rewards, - } = harness - .chain - .compute_attestation_rewards(Epoch::new(target_epoch), vec![]) - .unwrap(); +#[tokio::test] +async fn test_rewards_base_slashings() { + let spec = ForkName::Base.make_genesis_spec(E::default_spec()); + let harness = get_harness(spec); + let mut initial_balances = harness.get_current_state().balances().to_vec(); - // assert we successfully get ideal rewards for justified epoch out of inactivity leak - assert!(ideal_rewards - .iter() - .all(|reward| reward.head > 0 && reward.target > 0 && reward.source > 0)); + harness + .extend_slots(E::slots_per_epoch() as usize - 1) + .await; - // apply attestation rewards to initial balances - let expected_balances = apply_attestation_rewards(&initial_balances, total_rewards); + harness.add_attester_slashing(vec![0]).unwrap(); + let slashed_balance = initial_balances.get_mut(0).unwrap(); + *slashed_balance -= *slashed_balance / harness.spec.min_slashing_penalty_quotient; - // verify expected balances against actual balances - let balances: Vec = harness.get_current_state().balances().to_vec(); - assert_eq!(expected_balances, balances); + harness.extend_slots(E::slots_per_epoch() as usize).await; + + check_all_base_rewards(&harness, initial_balances).await; +} + +#[tokio::test] +async fn test_rewards_base_multi_inclusion() { + let spec = ForkName::Base.make_genesis_spec(E::default_spec()); + let harness = get_harness(spec); + let initial_balances = harness.get_current_state().balances().to_vec(); + + harness.extend_slots(2).await; + + let prev_block = harness.chain.head_beacon_block(); + + harness.extend_slots(1).await; + + harness.advance_slot(); + let slot = harness.get_current_slot(); + let mut block = + // pin to reduce stack size for clippy + Box::pin( + harness.make_block_with_modifier(harness.get_current_state(), slot, |block| { + // add one attestation from the same block + let attestations = &mut block.body_base_mut().unwrap().attestations; + attestations + .push(attestations.first().unwrap().clone()) + .unwrap(); + + // add one attestation from the previous block + let attestation = prev_block + .as_block() + .message_base() + .unwrap() + .body + .attestations + .first() + .unwrap() + .clone(); + attestations.push(attestation).unwrap(); + }), + ) + .await + .0; + + // funky hack: on first try, the state root will mismatch due to our modification + // thankfully, the correct state root is reported back, so we just take that one :^) + // there probably is a better way... + let Err(BlockError::StateRootMismatch { local, .. }) = harness + .process_block(slot, block.0.canonical_root(), block.clone()) + .await + else { + panic!("unexpected match of state root"); + }; + let mut new_block = block.0.message_base().unwrap().clone(); + new_block.state_root = local; + block.0 = Arc::new(harness.sign_beacon_block(new_block.into(), &harness.get_current_state())); + harness + .process_block(slot, block.0.canonical_root(), block.clone()) + .await + .unwrap(); + + harness + .extend_slots(E::slots_per_epoch() as usize * 2 - 4) + .await; + + // pin to reduce stack size for clippy + Box::pin(check_all_base_rewards(&harness, initial_balances)).await; } #[tokio::test] -async fn test_verify_attestation_rewards_altair() { +async fn test_rewards_altair() { let spec = ForkName::Altair.make_genesis_spec(E::default_spec()); let harness = get_harness(spec.clone()); let target_epoch = 0; @@ -297,11 +310,11 @@ async fn test_verify_attestation_rewards_altair() { harness .extend_slots((E::slots_per_epoch() * (target_epoch + 1)) as usize) .await; - let initial_balances: Vec = harness.get_current_state().balances().to_vec(); + let mut expected_balances = harness.get_current_state().balances().to_vec(); // advance until epoch N + 2 and build proposal rewards map - let mut proposal_rewards_map: HashMap = HashMap::new(); - let mut sync_committee_rewards_map: HashMap = HashMap::new(); + let mut proposal_rewards_map = HashMap::new(); + let mut sync_committee_rewards_map = HashMap::new(); for _ in 0..E::slots_per_epoch() { let state = harness.get_current_state(); let slot = state.slot() + Slot::new(1); @@ -311,19 +324,13 @@ async fn test_verify_attestation_rewards_altair() { harness.make_block_return_pre_state(state, slot).await; let beacon_block_reward = harness .chain - .compute_beacon_block_reward( - signed_block.message(), - signed_block.canonical_root(), - &mut state, - ) + .compute_beacon_block_reward(signed_block.message(), &mut state) .unwrap(); let total_proposer_reward = proposal_rewards_map - .get(&beacon_block_reward.proposer_index) - .unwrap_or(&0u64) - + beacon_block_reward.total; - - proposal_rewards_map.insert(beacon_block_reward.proposer_index, total_proposer_reward); + .entry(beacon_block_reward.proposer_index) + .or_insert(0); + *total_proposer_reward += beacon_block_reward.total as i64; // calculate sync committee rewards / penalties let reward_payload = harness @@ -331,13 +338,12 @@ async fn test_verify_attestation_rewards_altair() { .compute_sync_committee_rewards(signed_block.message(), &mut state) .unwrap(); - reward_payload.iter().for_each(|reward| { - let mut amount = *sync_committee_rewards_map - .get(&reward.validator_index) - .unwrap_or(&0); - amount += reward.reward; - sync_committee_rewards_map.insert(reward.validator_index, amount); - }); + for reward in reward_payload { + let total_sync_reward = sync_committee_rewards_map + .entry(reward.validator_index) + .or_insert(0); + *total_sync_reward += reward.reward; + } harness.extend_slots(1).await; } @@ -357,10 +363,9 @@ async fn test_verify_attestation_rewards_altair() { .all(|reward| reward.head > 0 && reward.target > 0 && reward.source > 0)); // apply attestation, proposal, and sync committee rewards and penalties to initial balances - let expected_balances = apply_attestation_rewards(&initial_balances, total_rewards); - let expected_balances = apply_beacon_block_rewards(&proposal_rewards_map, expected_balances); - let expected_balances = - apply_sync_committee_rewards(&sync_committee_rewards_map, expected_balances); + apply_attestation_rewards(&mut expected_balances, total_rewards); + apply_other_rewards(&mut expected_balances, &proposal_rewards_map); + apply_other_rewards(&mut expected_balances, &sync_committee_rewards_map); // verify expected balances against actual balances let balances: Vec = harness.get_current_state().balances().to_vec(); @@ -369,7 +374,7 @@ async fn test_verify_attestation_rewards_altair() { } #[tokio::test] -async fn test_verify_attestation_rewards_altair_inactivity_leak() { +async fn test_rewards_altair_inactivity_leak() { let spec = ForkName::Altair.make_genesis_spec(E::default_spec()); let harness = get_harness(spec.clone()); @@ -385,11 +390,11 @@ async fn test_verify_attestation_rewards_altair_inactivity_leak() { half_validators.clone(), ) .await; - let initial_balances: Vec = harness.get_current_state().balances().to_vec(); + let mut expected_balances = harness.get_current_state().balances().to_vec(); // advance until epoch N + 2 and build proposal rewards map - let mut proposal_rewards_map: HashMap = HashMap::new(); - let mut sync_committee_rewards_map: HashMap = HashMap::new(); + let mut proposal_rewards_map = HashMap::new(); + let mut sync_committee_rewards_map = HashMap::new(); for _ in 0..E::slots_per_epoch() { let state = harness.get_current_state(); let slot = state.slot() + Slot::new(1); @@ -399,19 +404,13 @@ async fn test_verify_attestation_rewards_altair_inactivity_leak() { harness.make_block_return_pre_state(state, slot).await; let beacon_block_reward = harness .chain - .compute_beacon_block_reward( - signed_block.message(), - signed_block.canonical_root(), - &mut state, - ) + .compute_beacon_block_reward(signed_block.message(), &mut state) .unwrap(); let total_proposer_reward = proposal_rewards_map - .get(&beacon_block_reward.proposer_index) - .unwrap_or(&0u64) - + beacon_block_reward.total; - - proposal_rewards_map.insert(beacon_block_reward.proposer_index, total_proposer_reward); + .entry(beacon_block_reward.proposer_index) + .or_insert(0i64); + *total_proposer_reward += beacon_block_reward.total as i64; // calculate sync committee rewards / penalties let reward_payload = harness @@ -419,13 +418,12 @@ async fn test_verify_attestation_rewards_altair_inactivity_leak() { .compute_sync_committee_rewards(signed_block.message(), &mut state) .unwrap(); - reward_payload.iter().for_each(|reward| { - let mut amount = *sync_committee_rewards_map - .get(&reward.validator_index) - .unwrap_or(&0); - amount += reward.reward; - sync_committee_rewards_map.insert(reward.validator_index, amount); - }); + for reward in reward_payload { + let total_sync_reward = sync_committee_rewards_map + .entry(reward.validator_index) + .or_insert(0); + *total_sync_reward += reward.reward; + } harness .extend_slots_some_validators(1, half_validators.clone()) @@ -451,10 +449,9 @@ async fn test_verify_attestation_rewards_altair_inactivity_leak() { .all(|reward| reward.inactivity < 0)); // apply attestation, proposal, and sync committee rewards and penalties to initial balances - let expected_balances = apply_attestation_rewards(&initial_balances, total_rewards); - let expected_balances = apply_beacon_block_rewards(&proposal_rewards_map, expected_balances); - let expected_balances = - apply_sync_committee_rewards(&sync_committee_rewards_map, expected_balances); + apply_attestation_rewards(&mut expected_balances, total_rewards); + apply_other_rewards(&mut expected_balances, &proposal_rewards_map); + apply_other_rewards(&mut expected_balances, &sync_committee_rewards_map); // verify expected balances against actual balances let balances: Vec = harness.get_current_state().balances().to_vec(); @@ -463,7 +460,7 @@ async fn test_verify_attestation_rewards_altair_inactivity_leak() { } #[tokio::test] -async fn test_verify_attestation_rewards_altair_inactivity_leak_justification_epoch() { +async fn test_rewards_altair_inactivity_leak_justification_epoch() { let spec = ForkName::Altair.make_genesis_spec(E::default_spec()); let harness = get_harness(spec.clone()); @@ -491,11 +488,11 @@ async fn test_verify_attestation_rewards_altair_inactivity_leak_justification_ep // advance for first justification epoch and get balances harness.extend_slots(E::slots_per_epoch() as usize).await; target_epoch += 1; - let initial_balances: Vec = harness.get_current_state().balances().to_vec(); + let mut expected_balances = harness.get_current_state().balances().to_vec(); // advance until epoch N + 2 and build proposal rewards map - let mut proposal_rewards_map: HashMap = HashMap::new(); - let mut sync_committee_rewards_map: HashMap = HashMap::new(); + let mut proposal_rewards_map = HashMap::new(); + let mut sync_committee_rewards_map = HashMap::new(); for _ in 0..E::slots_per_epoch() { let state = harness.get_current_state(); let slot = state.slot() + Slot::new(1); @@ -505,19 +502,13 @@ async fn test_verify_attestation_rewards_altair_inactivity_leak_justification_ep harness.make_block_return_pre_state(state, slot).await; let beacon_block_reward = harness .chain - .compute_beacon_block_reward( - signed_block.message(), - signed_block.canonical_root(), - &mut state, - ) + .compute_beacon_block_reward(signed_block.message(), &mut state) .unwrap(); let total_proposer_reward = proposal_rewards_map - .get(&beacon_block_reward.proposer_index) - .unwrap_or(&0u64) - + beacon_block_reward.total; - - proposal_rewards_map.insert(beacon_block_reward.proposer_index, total_proposer_reward); + .entry(beacon_block_reward.proposer_index) + .or_insert(0); + *total_proposer_reward += beacon_block_reward.total as i64; // calculate sync committee rewards / penalties let reward_payload = harness @@ -525,13 +516,12 @@ async fn test_verify_attestation_rewards_altair_inactivity_leak_justification_ep .compute_sync_committee_rewards(signed_block.message(), &mut state) .unwrap(); - reward_payload.iter().for_each(|reward| { - let mut amount = *sync_committee_rewards_map - .get(&reward.validator_index) - .unwrap_or(&0); - amount += reward.reward; - sync_committee_rewards_map.insert(reward.validator_index, amount); - }); + for reward in reward_payload { + let total_sync_reward = sync_committee_rewards_map + .entry(reward.validator_index) + .or_insert(0); + *total_sync_reward += reward.reward; + } harness.extend_slots(1).await; } @@ -561,10 +551,9 @@ async fn test_verify_attestation_rewards_altair_inactivity_leak_justification_ep .all(|reward| reward.head > 0 && reward.target > 0 && reward.source > 0)); // apply attestation, proposal, and sync committee rewards and penalties to initial balances - let expected_balances = apply_attestation_rewards(&initial_balances, total_rewards); - let expected_balances = apply_beacon_block_rewards(&proposal_rewards_map, expected_balances); - let expected_balances = - apply_sync_committee_rewards(&sync_committee_rewards_map, expected_balances); + apply_attestation_rewards(&mut expected_balances, total_rewards); + apply_other_rewards(&mut expected_balances, &proposal_rewards_map); + apply_other_rewards(&mut expected_balances, &sync_committee_rewards_map); // verify expected balances against actual balances let balances: Vec = harness.get_current_state().balances().to_vec(); @@ -572,109 +561,130 @@ async fn test_verify_attestation_rewards_altair_inactivity_leak_justification_ep } #[tokio::test] -async fn test_verify_attestation_rewards_base_subset_only() { - let harness = get_harness(E::default_spec()); +async fn test_rewards_base_subset_only() { + let spec = ForkName::Base.make_genesis_spec(E::default_spec()); + let harness = get_harness(spec); + let initial_balances = harness.get_current_state().balances().to_vec(); + + // a subset of validators to compute attestation rewards for + let validators_subset = (0..16).chain(56..64).collect::>(); // epoch 0 (N), only two thirds of validators vote. let two_thirds = (VALIDATOR_COUNT / 3) * 2; let two_thirds_validators: Vec = (0..two_thirds).collect(); harness - .extend_chain( - E::slots_per_epoch() as usize, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::SomeValidators(two_thirds_validators), - ) + .extend_slots_some_validators(E::slots_per_epoch() as usize, two_thirds_validators.clone()) .await; - // a small subset of validators to compute attestation rewards for - let validators_subset = [0, VALIDATOR_COUNT / 2, VALIDATOR_COUNT - 1]; - - // capture balances before transitioning to N + 2 - let initial_balances = get_validator_balances(harness.get_current_state(), &validators_subset); + check_all_base_rewards_for_subset(&harness, initial_balances, validators_subset).await; +} - // extend slots to beginning of epoch N + 2 - harness.extend_slots(E::slots_per_epoch() as usize).await; +async fn check_all_base_rewards( + harness: &BeaconChainHarness>, + balances: Vec, +) { + check_all_base_rewards_for_subset(harness, balances, vec![]).await; +} - let validators_subset_ids: Vec = validators_subset - .into_iter() - .map(|idx| ValidatorId::Index(idx as u64)) +async fn check_all_base_rewards_for_subset( + harness: &BeaconChainHarness>, + mut balances: Vec, + validator_subset: Vec, +) { + let validator_subset_ids: Vec = validator_subset + .iter() + .map(|&idx| ValidatorId::Index(idx)) .collect(); - // compute reward deltas for the subset of validators in epoch N - let StandardAttestationRewards { - ideal_rewards: _, - total_rewards, - } = harness - .chain - .compute_attestation_rewards(Epoch::new(0), validators_subset_ids) - .unwrap(); + // capture the amount of epochs generated by the caller + let epochs = harness.get_current_slot().epoch(E::slots_per_epoch()) + 1; + + // advance two empty epochs to ensure balances are updated by the epoch boundaries + for _ in 0..E::slots_per_epoch() * 2 { + harness.advance_slot(); + } + // fill one slot to ensure state is updated + harness.extend_slots(1).await; + + // calculate proposal awards + let mut proposal_rewards_map = HashMap::new(); + for slot in 1..(E::slots_per_epoch() * epochs.as_u64()) { + if let Some(block) = harness + .chain + .block_at_slot(Slot::new(slot), WhenSlotSkipped::None) + .unwrap() + { + let parent_state = harness + .chain + .state_at_slot(Slot::new(slot - 1), StateSkipConfig::WithoutStateRoots) + .unwrap(); + + let mut pre_state = BlockReplayer::>::new( + parent_state, + &harness.spec, + ) + .no_signature_verification() + .minimal_block_root_verification() + .apply_blocks(vec![], Some(block.slot())) + .unwrap() + .into_state(); + + let beacon_block_reward = harness + .chain + .compute_beacon_block_reward(block.message(), &mut pre_state) + .unwrap(); + let total_proposer_reward = proposal_rewards_map + .entry(beacon_block_reward.proposer_index) + .or_insert(0); + *total_proposer_reward += beacon_block_reward.total as i64; + } + } + apply_other_rewards(&mut balances, &proposal_rewards_map); - // apply attestation rewards to initial balances - let expected_balances = apply_attestation_rewards(&initial_balances, total_rewards); + for epoch in 0..epochs.as_u64() { + // compute reward deltas in epoch + let total_rewards = harness + .chain + .compute_attestation_rewards(Epoch::new(epoch), validator_subset_ids.clone()) + .unwrap() + .total_rewards; + + // apply attestation rewards to balances + apply_attestation_rewards(&mut balances, total_rewards); + } // verify expected balances against actual balances - let balances = get_validator_balances(harness.get_current_state(), &validators_subset); - assert_eq!(expected_balances, balances); + let actual_balances: Vec = harness.get_current_state().balances().to_vec(); + if validator_subset.is_empty() { + assert_eq!(balances, actual_balances); + } else { + for validator in validator_subset { + assert_eq!( + balances[validator as usize], + actual_balances[validator as usize] + ); + } + } } /// Apply a vec of `TotalAttestationRewards` to initial balances, and return fn apply_attestation_rewards( - initial_balances: &[u64], + balances: &mut [u64], attestation_rewards: Vec, -) -> Vec { - initial_balances - .iter() - .zip(attestation_rewards) - .map(|(&initial_balance, rewards)| { - let expected_balance = initial_balance as i64 - + rewards.head - + rewards.source - + rewards.target - + rewards.inclusion_delay.map(|q| q.value).unwrap_or(0) as i64 - + rewards.inactivity; - expected_balance as u64 - }) - .collect::>() -} - -fn get_validator_balances(state: BeaconState, validators: &[usize]) -> Vec { - validators - .iter() - .flat_map(|&id| { - state - .balances() - .get(id) - .cloned() - .ok_or(BeaconStateError::BalancesOutOfBounds(id)) - }) - .collect() -} - -fn apply_beacon_block_rewards( - proposal_rewards_map: &HashMap, - expected_balances: Vec, -) -> Vec { - let calculated_balances = expected_balances - .iter() - .enumerate() - .map(|(i, balance)| balance + proposal_rewards_map.get(&(i as u64)).unwrap_or(&0u64)) - .collect(); - - calculated_balances +) { + for rewards in attestation_rewards { + let balance = balances.get_mut(rewards.validator_index as usize).unwrap(); + *balance = (*balance as i64 + + rewards.head + + rewards.source + + rewards.target + + rewards.inclusion_delay.map(|q| q.value).unwrap_or(0) as i64 + + rewards.inactivity) as u64; + } } -fn apply_sync_committee_rewards( - sync_committee_rewards_map: &HashMap, - expected_balances: Vec, -) -> Vec { - let calculated_balances = expected_balances - .iter() - .enumerate() - .map(|(i, balance)| { - (*balance as i64 + sync_committee_rewards_map.get(&(i as u64)).unwrap_or(&0i64)) - .unsigned_abs() - }) - .collect(); - - calculated_balances +fn apply_other_rewards(balances: &mut [u64], rewards_map: &HashMap) { + for (i, balance) in balances.iter_mut().enumerate() { + *balance = balance.saturating_add_signed(*rewards_map.get(&(i as u64)).unwrap_or(&0)); + } } diff --git a/beacon_node/beacon_chain/tests/store_tests.rs b/beacon_node/beacon_chain/tests/store_tests.rs index 95bf7f1ce84..1b1e5ea5149 100644 --- a/beacon_node/beacon_chain/tests/store_tests.rs +++ b/beacon_node/beacon_chain/tests/store_tests.rs @@ -5,7 +5,7 @@ use beacon_chain::block_verification_types::RpcBlock; use beacon_chain::builder::BeaconChainBuilder; use beacon_chain::data_availability_checker::AvailableBlock; use beacon_chain::schema_change::migrate_schema; -use beacon_chain::test_utils::RelativeSyncCommittee; +use beacon_chain::test_utils::SyncCommitteeStrategy; use beacon_chain::test_utils::{ mock_execution_layer_from_parts, test_spec, AttestationStrategy, BeaconChainHarness, BlockStrategy, DiskHarnessType, KZG, @@ -104,6 +104,142 @@ fn get_harness_generic( harness } +#[tokio::test] +async fn light_client_bootstrap_test() { + let spec = test_spec::(); + let Some(_) = spec.altair_fork_epoch else { + // No-op prior to Altair. + return; + }; + + let checkpoint_slot = Slot::new(E::slots_per_epoch() * 6); + let db_path = tempdir().unwrap(); + let log = test_logger(); + + let seconds_per_slot = spec.seconds_per_slot; + let store = get_store_generic( + &db_path, + StoreConfig { + slots_per_restore_point: 2 * E::slots_per_epoch(), + ..Default::default() + }, + test_spec::(), + ); + let harness = get_harness(store.clone(), LOW_VALIDATOR_COUNT); + let all_validators = (0..LOW_VALIDATOR_COUNT).collect::>(); + let num_initial_slots = E::slots_per_epoch() * 7; + let slots: Vec = (1..num_initial_slots).map(Slot::new).collect(); + + let (genesis_state, genesis_state_root) = harness.get_current_state_and_root(); + harness + .add_attested_blocks_at_slots_with_lc_data( + genesis_state.clone(), + genesis_state_root, + &slots, + &all_validators, + None, + SyncCommitteeStrategy::NoValidators, + ) + .await; + + let wss_block_root = harness + .chain + .block_root_at_slot(checkpoint_slot, WhenSlotSkipped::Prev) + .unwrap() + .unwrap(); + let wss_state_root = harness + .chain + .state_root_at_slot(checkpoint_slot) + .unwrap() + .unwrap(); + let wss_block = harness + .chain + .store + .get_full_block(&wss_block_root) + .unwrap() + .unwrap(); + let wss_blobs_opt = harness.chain.store.get_blobs(&wss_block_root).unwrap(); + let wss_state = store + .get_state(&wss_state_root, Some(checkpoint_slot)) + .unwrap() + .unwrap(); + + let kzg = spec.deneb_fork_epoch.map(|_| KZG.clone()); + + let mock = + mock_execution_layer_from_parts(&harness.spec, harness.runtime.task_executor.clone()); + + // Initialise a new beacon chain from the finalized checkpoint. + // The slot clock must be set to a time ahead of the checkpoint state. + let slot_clock = TestingSlotClock::new( + Slot::new(0), + Duration::from_secs(harness.chain.genesis_time), + Duration::from_secs(seconds_per_slot), + ); + slot_clock.set_slot(harness.get_current_slot().as_u64()); + + let (shutdown_tx, _shutdown_rx) = futures::channel::mpsc::channel(1); + + let beacon_chain = BeaconChainBuilder::>::new(MinimalEthSpec) + .store(store.clone()) + .custom_spec(test_spec::()) + .task_executor(harness.chain.task_executor.clone()) + .logger(log.clone()) + .weak_subjectivity_state( + wss_state, + wss_block.clone(), + wss_blobs_opt.clone(), + genesis_state, + ) + .unwrap() + .store_migrator_config(MigratorConfig::default().blocking()) + .dummy_eth1_backend() + .expect("should build dummy backend") + .slot_clock(slot_clock) + .shutdown_sender(shutdown_tx) + .chain_config(ChainConfig::default()) + .event_handler(Some(ServerSentEventHandler::new_with_capacity( + log.clone(), + 1, + ))) + .execution_layer(Some(mock.el)) + .kzg(kzg) + .build() + .expect("should build"); + + let current_state = harness.get_current_state(); + + if ForkName::Electra == current_state.fork_name_unchecked() { + // TODO(electra) fix beacon state `compute_merkle_proof` + return; + } + + let finalized_checkpoint = beacon_chain + .canonical_head + .cached_head() + .finalized_checkpoint(); + + let block_root = finalized_checkpoint.root; + + let (lc_bootstrap, _) = harness + .chain + .get_light_client_bootstrap(&block_root) + .unwrap() + .unwrap(); + + let bootstrap_slot = match lc_bootstrap { + LightClientBootstrap::Altair(lc_bootstrap) => lc_bootstrap.header.beacon.slot, + LightClientBootstrap::Capella(lc_bootstrap) => lc_bootstrap.header.beacon.slot, + LightClientBootstrap::Deneb(lc_bootstrap) => lc_bootstrap.header.beacon.slot, + LightClientBootstrap::Electra(lc_bootstrap) => lc_bootstrap.header.beacon.slot, + }; + + assert_eq!( + bootstrap_slot.epoch(E::slots_per_epoch()), + finalized_checkpoint.epoch + ); +} + #[tokio::test] async fn light_client_updates_test() { let spec = test_spec::(); @@ -170,7 +306,7 @@ async fn light_client_updates_test() { harness.advance_slot(); harness - .extend_chain( + .extend_chain_with_light_client_data( num_final_blocks as usize, BlockStrategy::OnCanonicalHead, AttestationStrategy::AllValidators, @@ -224,53 +360,6 @@ async fn light_client_updates_test() { return; } - let block_root = *current_state - .get_block_root(current_state.slot() - Slot::new(1)) - .unwrap(); - - let contributions = harness.make_sync_contributions( - ¤t_state, - block_root, - current_state.slot() - Slot::new(1), - RelativeSyncCommittee::Current, - ); - - // generate sync aggregates - for (_, contribution_and_proof) in contributions { - let contribution = contribution_and_proof - .expect("contribution exists for committee") - .message - .contribution; - beacon_chain - .op_pool - .insert_sync_contribution(contribution.clone()) - .unwrap(); - beacon_chain - .op_pool - .insert_sync_contribution(contribution) - .unwrap(); - } - - // check that we can fetch the newly generated sync aggregate - let sync_aggregate = beacon_chain - .op_pool - .get_sync_aggregate(¤t_state) - .unwrap() - .unwrap(); - - // cache light client data - beacon_chain - .light_client_server_cache - .recompute_and_cache_updates( - store.clone(), - current_state.slot() - Slot::new(1), - &block_root, - &sync_aggregate, - &log, - &spec, - ) - .unwrap(); - // calculate the sync period from the previous slot let sync_period = (current_state.slot() - Slot::new(1)) .epoch(E::slots_per_epoch()) @@ -291,61 +380,13 @@ async fn light_client_updates_test() { } harness - .extend_chain( + .extend_chain_with_light_client_data( num_final_blocks as usize, BlockStrategy::OnCanonicalHead, AttestationStrategy::AllValidators, ) .await; - let current_state = harness.get_current_state(); - - let block_root = *current_state - .get_block_root(current_state.slot() - Slot::new(1)) - .unwrap(); - - let contributions = harness.make_sync_contributions( - ¤t_state, - block_root, - current_state.slot() - Slot::new(1), - RelativeSyncCommittee::Current, - ); - - // generate new sync aggregates from this new state - for (_, contribution_and_proof) in contributions { - let contribution = contribution_and_proof - .expect("contribution exists for committee") - .message - .contribution; - beacon_chain - .op_pool - .insert_sync_contribution(contribution.clone()) - .unwrap(); - beacon_chain - .op_pool - .insert_sync_contribution(contribution) - .unwrap(); - } - - let sync_aggregate = beacon_chain - .op_pool - .get_sync_aggregate(¤t_state) - .unwrap() - .unwrap(); - - // cache new light client data - beacon_chain - .light_client_server_cache - .recompute_and_cache_updates( - store.clone(), - current_state.slot() - Slot::new(1), - &block_root, - &sync_aggregate, - &log, - &spec, - ) - .unwrap(); - // we should now have two light client updates in the db let lc_updates = beacon_chain .get_light_client_updates(sync_period, 100) diff --git a/beacon_node/beacon_processor/src/lib.rs b/beacon_node/beacon_processor/src/lib.rs index f506f0bb94d..cd5a1d6cff0 100644 --- a/beacon_node/beacon_processor/src/lib.rs +++ b/beacon_node/beacon_processor/src/lib.rs @@ -57,6 +57,7 @@ use std::pin::Pin; use std::sync::Arc; use std::task::Context; use std::time::Duration; +use strum::IntoStaticStr; use task_executor::TaskExecutor; use tokio::sync::mpsc; use tokio::sync::mpsc::error::TrySendError; @@ -219,46 +220,6 @@ const DEFAULT_MAX_GOSSIP_AGGREGATE_BATCH_SIZE: usize = 64; /// Unique IDs used for metrics and testing. pub const WORKER_FREED: &str = "worker_freed"; pub const NOTHING_TO_DO: &str = "nothing_to_do"; -pub const GOSSIP_ATTESTATION: &str = "gossip_attestation"; -pub const GOSSIP_ATTESTATION_BATCH: &str = "gossip_attestation_batch"; -pub const GOSSIP_AGGREGATE: &str = "gossip_aggregate"; -pub const GOSSIP_AGGREGATE_BATCH: &str = "gossip_aggregate_batch"; -pub const GOSSIP_BLOCK: &str = "gossip_block"; -pub const GOSSIP_BLOBS_SIDECAR: &str = "gossip_blobs_sidecar"; -pub const GOSSIP_BLOBS_COLUMN_SIDECAR: &str = "gossip_blobs_column_sidecar"; -pub const DELAYED_IMPORT_BLOCK: &str = "delayed_import_block"; -pub const GOSSIP_VOLUNTARY_EXIT: &str = "gossip_voluntary_exit"; -pub const GOSSIP_PROPOSER_SLASHING: &str = "gossip_proposer_slashing"; -pub const GOSSIP_ATTESTER_SLASHING: &str = "gossip_attester_slashing"; -pub const GOSSIP_SYNC_SIGNATURE: &str = "gossip_sync_signature"; -pub const GOSSIP_SYNC_CONTRIBUTION: &str = "gossip_sync_contribution"; -pub const GOSSIP_LIGHT_CLIENT_FINALITY_UPDATE: &str = "light_client_finality_update"; -pub const GOSSIP_LIGHT_CLIENT_OPTIMISTIC_UPDATE: &str = "light_client_optimistic_update"; -pub const RPC_BLOCK: &str = "rpc_block"; -pub const IGNORED_RPC_BLOCK: &str = "ignored_rpc_block"; -pub const RPC_BLOBS: &str = "rpc_blob"; -pub const RPC_CUSTODY_COLUMN: &str = "rpc_custody_column"; -pub const RPC_VERIFY_DATA_COLUMNS: &str = "rpc_verify_data_columns"; -pub const SAMPLING_RESULT: &str = "sampling_result"; -pub const CHAIN_SEGMENT: &str = "chain_segment"; -pub const CHAIN_SEGMENT_BACKFILL: &str = "chain_segment_backfill"; -pub const STATUS_PROCESSING: &str = "status_processing"; -pub const BLOCKS_BY_RANGE_REQUEST: &str = "blocks_by_range_request"; -pub const BLOCKS_BY_ROOTS_REQUEST: &str = "blocks_by_roots_request"; -pub const BLOBS_BY_RANGE_REQUEST: &str = "blobs_by_range_request"; -pub const BLOBS_BY_ROOTS_REQUEST: &str = "blobs_by_roots_request"; -pub const DATA_COLUMNS_BY_ROOTS_REQUEST: &str = "data_columns_by_roots_request"; -pub const DATA_COLUMNS_BY_RANGE_REQUEST: &str = "data_columns_by_range_request"; -pub const LIGHT_CLIENT_BOOTSTRAP_REQUEST: &str = "light_client_bootstrap"; -pub const LIGHT_CLIENT_FINALITY_UPDATE_REQUEST: &str = "light_client_finality_update_request"; -pub const LIGHT_CLIENT_OPTIMISTIC_UPDATE_REQUEST: &str = "light_client_optimistic_update_request"; -pub const UNKNOWN_BLOCK_ATTESTATION: &str = "unknown_block_attestation"; -pub const UNKNOWN_BLOCK_AGGREGATE: &str = "unknown_block_aggregate"; -pub const UNKNOWN_LIGHT_CLIENT_UPDATE: &str = "unknown_light_client_update"; -pub const UNKNOWN_BLOCK_SAMPLING_REQUEST: &str = "unknown_block_sampling_request"; -pub const GOSSIP_BLS_TO_EXECUTION_CHANGE: &str = "gossip_bls_to_execution_change"; -pub const API_REQUEST_P0: &str = "api_request_p0"; -pub const API_REQUEST_P1: &str = "api_request_p1"; #[derive(Clone, PartialEq, Debug, Serialize, Deserialize)] pub struct BeaconProcessorConfig { @@ -454,9 +415,14 @@ pub struct WorkEvent { } impl WorkEvent { + /// Get a representation of the type of work this `WorkEvent` contains. + pub fn work_type(&self) -> WorkType { + self.work.to_type() + } + /// Get a `str` representation of the type of work this `WorkEvent` contains. - pub fn work_type(&self) -> &'static str { - self.work.str_id() + pub fn work_type_str(&self) -> &'static str { + self.work_type().into() } } @@ -555,7 +521,7 @@ impl BeaconProcessorSend { Err(e) => { metrics::inc_counter_vec( &metrics::BEACON_PROCESSOR_SEND_ERROR_PER_WORK_TYPE, - &[work_type], + &[work_type.into()], ); Err(e) } @@ -651,54 +617,109 @@ pub enum Work { impl fmt::Debug for Work { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "{}", self.str_id()) + write!(f, "{}", Into::<&'static str>::into(self.to_type())) } } +#[derive(IntoStaticStr, PartialEq, Eq, Debug)] +#[strum(serialize_all = "snake_case")] +pub enum WorkType { + GossipAttestation, + UnknownBlockAttestation, + GossipAttestationBatch, + GossipAggregate, + UnknownBlockAggregate, + UnknownLightClientOptimisticUpdate, + UnknownBlockSamplingRequest, + GossipAggregateBatch, + GossipBlock, + GossipBlobSidecar, + GossipDataColumnSidecar, + DelayedImportBlock, + GossipVoluntaryExit, + GossipProposerSlashing, + GossipAttesterSlashing, + GossipSyncSignature, + GossipSyncContribution, + GossipLightClientFinalityUpdate, + GossipLightClientOptimisticUpdate, + RpcBlock, + RpcBlobs, + RpcCustodyColumn, + RpcVerifyDataColumn, + SamplingResult, + IgnoredRpcBlock, + ChainSegment, + ChainSegmentBackfill, + Status, + BlocksByRangeRequest, + BlocksByRootsRequest, + BlobsByRangeRequest, + BlobsByRootsRequest, + DataColumnsByRootsRequest, + DataColumnsByRangeRequest, + GossipBlsToExecutionChange, + LightClientBootstrapRequest, + LightClientOptimisticUpdateRequest, + LightClientFinalityUpdateRequest, + ApiRequestP0, + ApiRequestP1, +} + impl Work { - /// Provides a `&str` that uniquely identifies each enum variant. fn str_id(&self) -> &'static str { + self.to_type().into() + } + + /// Provides a `&str` that uniquely identifies each enum variant. + fn to_type(&self) -> WorkType { match self { - Work::GossipAttestation { .. } => GOSSIP_ATTESTATION, - Work::GossipAttestationBatch { .. } => GOSSIP_ATTESTATION_BATCH, - Work::GossipAggregate { .. } => GOSSIP_AGGREGATE, - Work::GossipAggregateBatch { .. } => GOSSIP_AGGREGATE_BATCH, - Work::GossipBlock(_) => GOSSIP_BLOCK, - Work::GossipBlobSidecar(_) => GOSSIP_BLOBS_SIDECAR, - Work::GossipDataColumnSidecar(_) => GOSSIP_BLOBS_COLUMN_SIDECAR, - Work::DelayedImportBlock { .. } => DELAYED_IMPORT_BLOCK, - Work::GossipVoluntaryExit(_) => GOSSIP_VOLUNTARY_EXIT, - Work::GossipProposerSlashing(_) => GOSSIP_PROPOSER_SLASHING, - Work::GossipAttesterSlashing(_) => GOSSIP_ATTESTER_SLASHING, - Work::GossipSyncSignature(_) => GOSSIP_SYNC_SIGNATURE, - Work::GossipSyncContribution(_) => GOSSIP_SYNC_CONTRIBUTION, - Work::GossipLightClientFinalityUpdate(_) => GOSSIP_LIGHT_CLIENT_FINALITY_UPDATE, - Work::GossipLightClientOptimisticUpdate(_) => GOSSIP_LIGHT_CLIENT_OPTIMISTIC_UPDATE, - Work::RpcBlock { .. } => RPC_BLOCK, - Work::RpcBlobs { .. } => RPC_BLOBS, - Work::RpcCustodyColumn { .. } => RPC_CUSTODY_COLUMN, - Work::RpcVerifyDataColumn(_) => RPC_VERIFY_DATA_COLUMNS, - Work::SamplingResult(_) => SAMPLING_RESULT, - Work::IgnoredRpcBlock { .. } => IGNORED_RPC_BLOCK, - Work::ChainSegment { .. } => CHAIN_SEGMENT, - Work::ChainSegmentBackfill(_) => CHAIN_SEGMENT_BACKFILL, - Work::Status(_) => STATUS_PROCESSING, - Work::BlocksByRangeRequest(_) => BLOCKS_BY_RANGE_REQUEST, - Work::BlocksByRootsRequest(_) => BLOCKS_BY_ROOTS_REQUEST, - Work::BlobsByRangeRequest(_) => BLOBS_BY_RANGE_REQUEST, - Work::BlobsByRootsRequest(_) => BLOBS_BY_ROOTS_REQUEST, - Work::DataColumnsByRootsRequest(_) => DATA_COLUMNS_BY_ROOTS_REQUEST, - Work::DataColumnsByRangeRequest(_) => DATA_COLUMNS_BY_RANGE_REQUEST, - Work::LightClientBootstrapRequest(_) => LIGHT_CLIENT_BOOTSTRAP_REQUEST, - Work::LightClientOptimisticUpdateRequest(_) => LIGHT_CLIENT_OPTIMISTIC_UPDATE_REQUEST, - Work::LightClientFinalityUpdateRequest(_) => LIGHT_CLIENT_FINALITY_UPDATE_REQUEST, - Work::UnknownBlockAttestation { .. } => UNKNOWN_BLOCK_ATTESTATION, - Work::UnknownBlockAggregate { .. } => UNKNOWN_BLOCK_AGGREGATE, - Work::UnknownLightClientOptimisticUpdate { .. } => UNKNOWN_LIGHT_CLIENT_UPDATE, - Work::UnknownBlockSamplingRequest { .. } => UNKNOWN_BLOCK_SAMPLING_REQUEST, - Work::GossipBlsToExecutionChange(_) => GOSSIP_BLS_TO_EXECUTION_CHANGE, - Work::ApiRequestP0 { .. } => API_REQUEST_P0, - Work::ApiRequestP1 { .. } => API_REQUEST_P1, + Work::GossipAttestation { .. } => WorkType::GossipAttestation, + Work::GossipAttestationBatch { .. } => WorkType::GossipAttestationBatch, + Work::GossipAggregate { .. } => WorkType::GossipAggregate, + Work::GossipAggregateBatch { .. } => WorkType::GossipAggregateBatch, + Work::GossipBlock(_) => WorkType::GossipBlock, + Work::GossipBlobSidecar(_) => WorkType::GossipBlobSidecar, + Work::GossipDataColumnSidecar(_) => WorkType::GossipDataColumnSidecar, + Work::DelayedImportBlock { .. } => WorkType::DelayedImportBlock, + Work::GossipVoluntaryExit(_) => WorkType::GossipVoluntaryExit, + Work::GossipProposerSlashing(_) => WorkType::GossipProposerSlashing, + Work::GossipAttesterSlashing(_) => WorkType::GossipAttesterSlashing, + Work::GossipSyncSignature(_) => WorkType::GossipSyncSignature, + Work::GossipSyncContribution(_) => WorkType::GossipSyncContribution, + Work::GossipLightClientFinalityUpdate(_) => WorkType::GossipLightClientFinalityUpdate, + Work::GossipLightClientOptimisticUpdate(_) => { + WorkType::GossipLightClientOptimisticUpdate + } + Work::GossipBlsToExecutionChange(_) => WorkType::GossipBlsToExecutionChange, + Work::RpcBlock { .. } => WorkType::RpcBlock, + Work::RpcBlobs { .. } => WorkType::RpcBlobs, + Work::RpcCustodyColumn { .. } => WorkType::RpcCustodyColumn, + Work::RpcVerifyDataColumn { .. } => WorkType::RpcVerifyDataColumn, + Work::SamplingResult { .. } => WorkType::SamplingResult, + Work::IgnoredRpcBlock { .. } => WorkType::IgnoredRpcBlock, + Work::ChainSegment { .. } => WorkType::ChainSegment, + Work::ChainSegmentBackfill(_) => WorkType::ChainSegmentBackfill, + Work::Status(_) => WorkType::Status, + Work::BlocksByRangeRequest(_) => WorkType::BlocksByRangeRequest, + Work::BlocksByRootsRequest(_) => WorkType::BlocksByRootsRequest, + Work::BlobsByRangeRequest(_) => WorkType::BlobsByRangeRequest, + Work::BlobsByRootsRequest(_) => WorkType::BlobsByRootsRequest, + Work::DataColumnsByRootsRequest(_) => WorkType::DataColumnsByRootsRequest, + Work::DataColumnsByRangeRequest(_) => WorkType::DataColumnsByRangeRequest, + Work::LightClientBootstrapRequest(_) => WorkType::LightClientBootstrapRequest, + Work::LightClientOptimisticUpdateRequest(_) => { + WorkType::LightClientOptimisticUpdateRequest + } + Work::LightClientFinalityUpdateRequest(_) => WorkType::LightClientFinalityUpdateRequest, + Work::UnknownBlockAttestation { .. } => WorkType::UnknownBlockAttestation, + Work::UnknownBlockAggregate { .. } => WorkType::UnknownBlockAggregate, + Work::UnknownBlockSamplingRequest { .. } => WorkType::UnknownBlockSamplingRequest, + Work::UnknownLightClientOptimisticUpdate { .. } => { + WorkType::UnknownLightClientOptimisticUpdate + } + Work::ApiRequestP0 { .. } => WorkType::ApiRequestP0, + Work::ApiRequestP1 { .. } => WorkType::ApiRequestP1, } } } @@ -987,7 +1008,7 @@ impl BeaconProcessor { .map_or(false, |event| event.drop_during_sync); let idle_tx = idle_tx.clone(); - match work_event { + let modified_queue_id = match work_event { // There is no new work event, but we are able to spawn a new worker. // // We don't check the `work.drop_during_sync` here. We assume that if it made @@ -995,38 +1016,40 @@ impl BeaconProcessor { None if can_spawn => { // Check for chain segments first, they're the most efficient way to get // blocks into the system. - if let Some(item) = chain_segment_queue.pop() { - self.spawn_worker(item, idle_tx); + let work_event: Option> = if let Some(item) = + chain_segment_queue.pop() + { + Some(item) // Check sync blocks before gossip blocks, since we've already explicitly // requested these blocks. } else if let Some(item) = rpc_block_queue.pop() { - self.spawn_worker(item, idle_tx); + Some(item) } else if let Some(item) = rpc_blob_queue.pop() { - self.spawn_worker(item, idle_tx); + Some(item) } else if let Some(item) = rpc_custody_column_queue.pop() { - self.spawn_worker(item, idle_tx); + Some(item) // TODO(das): decide proper prioritization for sampling columns } else if let Some(item) = rpc_custody_column_queue.pop() { - self.spawn_worker(item, idle_tx); + Some(item) } else if let Some(item) = rpc_verify_data_column_queue.pop() { - self.spawn_worker(item, idle_tx); + Some(item) } else if let Some(item) = sampling_result_queue.pop() { - self.spawn_worker(item, idle_tx); + Some(item) // Check delayed blocks before gossip blocks, the gossip blocks might rely // on the delayed ones. } else if let Some(item) = delayed_block_queue.pop() { - self.spawn_worker(item, idle_tx); + Some(item) // Check gossip blocks before gossip attestations, since a block might be // required to verify some attestations. } else if let Some(item) = gossip_block_queue.pop() { - self.spawn_worker(item, idle_tx); + Some(item) } else if let Some(item) = gossip_blob_queue.pop() { - self.spawn_worker(item, idle_tx); + Some(item) } else if let Some(item) = gossip_data_column_queue.pop() { - self.spawn_worker(item, idle_tx); + Some(item) // Check the priority 0 API requests after blocks and blobs, but before attestations. } else if let Some(item) = api_request_p0_queue.pop() { - self.spawn_worker(item, idle_tx); + Some(item) // Check the aggregates, *then* the unaggregates since we assume that // aggregates are more valuable to local validators and effectively give us // more information with less signature verification time. @@ -1038,9 +1061,7 @@ impl BeaconProcessor { if batch_size < 2 { // One single aggregate is in the queue, process it individually. - if let Some(item) = aggregate_queue.pop() { - self.spawn_worker(item, idle_tx); - } + aggregate_queue.pop() } else { // Collect two or more aggregates into a batch, so they can take // advantage of batch signature verification. @@ -1071,13 +1092,10 @@ impl BeaconProcessor { if let Some(process_batch) = process_batch_opt { // Process all aggregates with a single worker. - self.spawn_worker( - Work::GossipAggregateBatch { - aggregates, - process_batch, - }, - idle_tx, - ) + Some(Work::GossipAggregateBatch { + aggregates, + process_batch, + }) } else { // There is no good reason for this to // happen, it is a serious logic error. @@ -1085,6 +1103,7 @@ impl BeaconProcessor { // work items exist, we should always have a // work closure at this point. crit!(self.log, "Missing aggregate work"); + None } } // Check the unaggregated attestation queue. @@ -1098,9 +1117,7 @@ impl BeaconProcessor { if batch_size < 2 { // One single attestation is in the queue, process it individually. - if let Some(item) = attestation_queue.pop() { - self.spawn_worker(item, idle_tx); - } + attestation_queue.pop() } else { // Collect two or more attestations into a batch, so they can take // advantage of batch signature verification. @@ -1132,13 +1149,10 @@ impl BeaconProcessor { if let Some(process_batch) = process_batch_opt { // Process all attestations with a single worker. - self.spawn_worker( - Work::GossipAttestationBatch { - attestations, - process_batch, - }, - idle_tx, - ) + Some(Work::GossipAttestationBatch { + attestations, + process_batch, + }) } else { // There is no good reason for this to // happen, it is a serious logic error. @@ -1146,71 +1160,72 @@ impl BeaconProcessor { // work items exist, we should always have a // work closure at this point. crit!(self.log, "Missing attestations work"); + None } } // Check sync committee messages after attestations as their rewards are lesser // and they don't influence fork choice. } else if let Some(item) = sync_contribution_queue.pop() { - self.spawn_worker(item, idle_tx); + Some(item) } else if let Some(item) = sync_message_queue.pop() { - self.spawn_worker(item, idle_tx); + Some(item) // Aggregates and unaggregates queued for re-processing are older and we // care about fresher ones, so check those first. } else if let Some(item) = unknown_block_aggregate_queue.pop() { - self.spawn_worker(item, idle_tx); + Some(item) } else if let Some(item) = unknown_block_attestation_queue.pop() { - self.spawn_worker(item, idle_tx); + Some(item) // Check RPC methods next. Status messages are needed for sync so // prioritize them over syncing requests from other peers (BlocksByRange // and BlocksByRoot) } else if let Some(item) = status_queue.pop() { - self.spawn_worker(item, idle_tx); + Some(item) } else if let Some(item) = bbrange_queue.pop() { - self.spawn_worker(item, idle_tx); + Some(item) } else if let Some(item) = bbroots_queue.pop() { - self.spawn_worker(item, idle_tx); + Some(item) } else if let Some(item) = blbrange_queue.pop() { - self.spawn_worker(item, idle_tx); + Some(item) } else if let Some(item) = blbroots_queue.pop() { - self.spawn_worker(item, idle_tx); + Some(item) } else if let Some(item) = dcbroots_queue.pop() { - self.spawn_worker(item, idle_tx); + Some(item) } else if let Some(item) = dcbrange_queue.pop() { - self.spawn_worker(item, idle_tx); + Some(item) // Prioritize sampling requests after block syncing requests } else if let Some(item) = unknown_block_sampling_request_queue.pop() { - self.spawn_worker(item, idle_tx); + Some(item) // Check slashings after all other consensus messages so we prioritize // following head. // // Check attester slashings before proposer slashings since they have the // potential to slash multiple validators at once. } else if let Some(item) = gossip_attester_slashing_queue.pop() { - self.spawn_worker(item, idle_tx); + Some(item) } else if let Some(item) = gossip_proposer_slashing_queue.pop() { - self.spawn_worker(item, idle_tx); + Some(item) // Check exits and address changes late since our validators don't get // rewards from them. } else if let Some(item) = gossip_voluntary_exit_queue.pop() { - self.spawn_worker(item, idle_tx); + Some(item) } else if let Some(item) = gossip_bls_to_execution_change_queue.pop() { - self.spawn_worker(item, idle_tx); + Some(item) // Check the priority 1 API requests after we've // processed all the interesting things from the network // and things required for us to stay in good repute // with our P2P peers. } else if let Some(item) = api_request_p1_queue.pop() { - self.spawn_worker(item, idle_tx); + Some(item) // Handle backfill sync chain segments. } else if let Some(item) = backfill_chain_segment.pop() { - self.spawn_worker(item, idle_tx); + Some(item) // Handle light client requests. } else if let Some(item) = lc_bootstrap_queue.pop() { - self.spawn_worker(item, idle_tx); + Some(item) } else if let Some(item) = lc_optimistic_update_queue.pop() { - self.spawn_worker(item, idle_tx); + Some(item) } else if let Some(item) = lc_finality_update_queue.pop() { - self.spawn_worker(item, idle_tx); + Some(item) // This statement should always be the final else statement. } else { // Let the journal know that a worker is freed and there's nothing else @@ -1220,6 +1235,15 @@ impl BeaconProcessor { // during testing. let _ = work_journal_tx.try_send(NOTHING_TO_DO); } + None + }; + + if let Some(work_event) = work_event { + let work_type = work_event.to_type(); + self.spawn_worker(work_event, idle_tx); + Some(work_type) + } else { + None } } // There is no new work event and we are unable to spawn a new worker. @@ -1231,6 +1255,7 @@ impl BeaconProcessor { "Unexpected gossip processor condition"; "msg" => "no new work and cannot spawn worker" ); + None } // The chain is syncing and this event should be dropped during sync. Some(work_event) @@ -1248,11 +1273,13 @@ impl BeaconProcessor { "msg" => "chain is syncing", "work_id" => work_id ); + None } // There is a new work event and the chain is not syncing. Process it or queue // it. Some(WorkEvent { work, .. }) => { let work_id = work.str_id(); + let work_type = work.to_type(); match work { _ if can_spawn => self.spawn_worker(work, idle_tx), @@ -1371,94 +1398,76 @@ impl BeaconProcessor { Work::ApiRequestP1 { .. } => { api_request_p1_queue.push(work, work_id, &self.log) } - } + }; + Some(work_type) } - } + }; metrics::set_gauge( &metrics::BEACON_PROCESSOR_WORKERS_ACTIVE_TOTAL, self.current_workers as i64, ); - metrics::set_gauge( - &metrics::BEACON_PROCESSOR_UNAGGREGATED_ATTESTATION_QUEUE_TOTAL, - attestation_queue.len() as i64, - ); - metrics::set_gauge( - &metrics::BEACON_PROCESSOR_AGGREGATED_ATTESTATION_QUEUE_TOTAL, - aggregate_queue.len() as i64, - ); - metrics::set_gauge( - &metrics::BEACON_PROCESSOR_SYNC_MESSAGE_QUEUE_TOTAL, - sync_message_queue.len() as i64, - ); - metrics::set_gauge( - &metrics::BEACON_PROCESSOR_SYNC_CONTRIBUTION_QUEUE_TOTAL, - sync_contribution_queue.len() as i64, - ); - metrics::set_gauge( - &metrics::BEACON_PROCESSOR_GOSSIP_BLOCK_QUEUE_TOTAL, - gossip_block_queue.len() as i64, - ); - metrics::set_gauge( - &metrics::BEACON_PROCESSOR_GOSSIP_BLOB_QUEUE_TOTAL, - gossip_blob_queue.len() as i64, - ); - metrics::set_gauge( - &metrics::BEACON_PROCESSOR_GOSSIP_DATA_COLUMN_QUEUE_TOTAL, - gossip_data_column_queue.len() as i64, - ); - metrics::set_gauge( - &metrics::BEACON_PROCESSOR_RPC_BLOCK_QUEUE_TOTAL, - rpc_block_queue.len() as i64, - ); - metrics::set_gauge( - &metrics::BEACON_PROCESSOR_RPC_BLOB_QUEUE_TOTAL, - rpc_blob_queue.len() as i64, - ); - metrics::set_gauge( - &metrics::BEACON_PROCESSOR_RPC_CUSTODY_COLUMN_QUEUE_TOTAL, - rpc_custody_column_queue.len() as i64, - ); - metrics::set_gauge( - &metrics::BEACON_PROCESSOR_RPC_VERIFY_DATA_COLUMN_QUEUE_TOTAL, - rpc_verify_data_column_queue.len() as i64, - ); - metrics::set_gauge( - &metrics::BEACON_PROCESSOR_SAMPLING_RESULT_QUEUE_TOTAL, - sampling_result_queue.len() as i64, - ); - metrics::set_gauge( - &metrics::BEACON_PROCESSOR_CHAIN_SEGMENT_QUEUE_TOTAL, - chain_segment_queue.len() as i64, - ); - metrics::set_gauge( - &metrics::BEACON_PROCESSOR_BACKFILL_CHAIN_SEGMENT_QUEUE_TOTAL, - backfill_chain_segment.len() as i64, - ); - metrics::set_gauge( - &metrics::BEACON_PROCESSOR_EXIT_QUEUE_TOTAL, - gossip_voluntary_exit_queue.len() as i64, - ); - metrics::set_gauge( - &metrics::BEACON_PROCESSOR_PROPOSER_SLASHING_QUEUE_TOTAL, - gossip_proposer_slashing_queue.len() as i64, - ); - metrics::set_gauge( - &metrics::BEACON_PROCESSOR_ATTESTER_SLASHING_QUEUE_TOTAL, - gossip_attester_slashing_queue.len() as i64, - ); - metrics::set_gauge( - &metrics::BEACON_PROCESSOR_BLS_TO_EXECUTION_CHANGE_QUEUE_TOTAL, - gossip_bls_to_execution_change_queue.len() as i64, - ); - metrics::set_gauge( - &metrics::BEACON_PROCESSOR_API_REQUEST_P0_QUEUE_TOTAL, - api_request_p0_queue.len() as i64, - ); - metrics::set_gauge( - &metrics::BEACON_PROCESSOR_API_REQUEST_P1_QUEUE_TOTAL, - api_request_p1_queue.len() as i64, - ); + + if let Some(modified_queue_id) = modified_queue_id { + let queue_len = match modified_queue_id { + WorkType::GossipAttestation => aggregate_queue.len(), + WorkType::UnknownBlockAttestation => unknown_block_attestation_queue.len(), + WorkType::GossipAttestationBatch => 0, // No queue + WorkType::GossipAggregate => aggregate_queue.len(), + WorkType::UnknownBlockAggregate => unknown_block_aggregate_queue.len(), + WorkType::UnknownLightClientOptimisticUpdate => { + unknown_light_client_update_queue.len() + } + WorkType::UnknownBlockSamplingRequest => { + unknown_block_sampling_request_queue.len() + } + WorkType::GossipAggregateBatch => 0, // No queue + WorkType::GossipBlock => gossip_block_queue.len(), + WorkType::GossipBlobSidecar => gossip_blob_queue.len(), + WorkType::GossipDataColumnSidecar => gossip_data_column_queue.len(), + WorkType::DelayedImportBlock => delayed_block_queue.len(), + WorkType::GossipVoluntaryExit => gossip_voluntary_exit_queue.len(), + WorkType::GossipProposerSlashing => gossip_proposer_slashing_queue.len(), + WorkType::GossipAttesterSlashing => gossip_attester_slashing_queue.len(), + WorkType::GossipSyncSignature => sync_message_queue.len(), + WorkType::GossipSyncContribution => sync_contribution_queue.len(), + WorkType::GossipLightClientFinalityUpdate => finality_update_queue.len(), + WorkType::GossipLightClientOptimisticUpdate => { + optimistic_update_queue.len() + } + WorkType::RpcBlock => rpc_block_queue.len(), + WorkType::RpcBlobs | WorkType::IgnoredRpcBlock => rpc_blob_queue.len(), + WorkType::RpcCustodyColumn => rpc_custody_column_queue.len(), + WorkType::RpcVerifyDataColumn => rpc_verify_data_column_queue.len(), + WorkType::SamplingResult => sampling_result_queue.len(), + WorkType::ChainSegment => chain_segment_queue.len(), + WorkType::ChainSegmentBackfill => backfill_chain_segment.len(), + WorkType::Status => status_queue.len(), + WorkType::BlocksByRangeRequest => blbrange_queue.len(), + WorkType::BlocksByRootsRequest => blbroots_queue.len(), + WorkType::BlobsByRangeRequest => bbrange_queue.len(), + WorkType::BlobsByRootsRequest => bbroots_queue.len(), + WorkType::DataColumnsByRootsRequest => dcbroots_queue.len(), + WorkType::DataColumnsByRangeRequest => dcbrange_queue.len(), + WorkType::GossipBlsToExecutionChange => { + gossip_bls_to_execution_change_queue.len() + } + WorkType::LightClientBootstrapRequest => lc_bootstrap_queue.len(), + WorkType::LightClientOptimisticUpdateRequest => { + lc_optimistic_update_queue.len() + } + WorkType::LightClientFinalityUpdateRequest => { + lc_finality_update_queue.len() + } + WorkType::ApiRequestP0 => api_request_p0_queue.len(), + WorkType::ApiRequestP1 => api_request_p1_queue.len(), + }; + metrics::observe_vec( + &metrics::BEACON_PROCESSOR_QUEUE_LENGTH, + &[modified_queue_id.into()], + queue_len as f64, + ); + } if aggregate_queue.is_full() && aggregate_debounce.elapsed() { error!( diff --git a/beacon_node/beacon_processor/src/metrics.rs b/beacon_node/beacon_processor/src/metrics.rs index 8bc03cee6c7..0a7bdba18d1 100644 --- a/beacon_node/beacon_processor/src/metrics.rs +++ b/beacon_node/beacon_processor/src/metrics.rs @@ -62,163 +62,16 @@ pub static BEACON_PROCESSOR_EVENT_HANDLING_SECONDS: LazyLock> "Time spent handling a new message and allocating it to a queue or worker.", ) }); -// Gossip blocks. -pub static BEACON_PROCESSOR_GOSSIP_BLOCK_QUEUE_TOTAL: LazyLock> = - LazyLock::new(|| { - try_create_int_gauge( - "beacon_processor_gossip_block_queue_total", - "Count of blocks from gossip waiting to be verified.", - ) - }); -// Gossip blobs. -pub static BEACON_PROCESSOR_GOSSIP_BLOB_QUEUE_TOTAL: LazyLock> = - LazyLock::new(|| { - try_create_int_gauge( - "beacon_processor_gossip_blob_queue_total", - "Count of blobs from gossip waiting to be verified.", - ) - }); -// Gossip data column sidecars. -pub static BEACON_PROCESSOR_GOSSIP_DATA_COLUMN_QUEUE_TOTAL: LazyLock> = - LazyLock::new(|| { - try_create_int_gauge( - "beacon_processor_gossip_data_column_queue_total", - "Count of data column sidecars from gossip waiting to be verified.", - ) - }); -// Gossip Exits. -pub static BEACON_PROCESSOR_EXIT_QUEUE_TOTAL: LazyLock> = LazyLock::new(|| { - try_create_int_gauge( - "beacon_processor_exit_queue_total", - "Count of exits from gossip waiting to be verified.", +pub static BEACON_PROCESSOR_QUEUE_LENGTH: LazyLock> = LazyLock::new(|| { + try_create_histogram_vec_with_buckets( + "beacon_processor_work_event_queue_length", + "Count of work events in queue waiting to be processed.", + Ok(vec![ + 0.0, 1.0, 4.0, 16.0, 64.0, 256.0, 1024.0, 4096.0, 16384.0, 65536.0, + ]), + &["type"], ) }); -// Gossip proposer slashings. -pub static BEACON_PROCESSOR_PROPOSER_SLASHING_QUEUE_TOTAL: LazyLock> = - LazyLock::new(|| { - try_create_int_gauge( - "beacon_processor_proposer_slashing_queue_total", - "Count of proposer slashings from gossip waiting to be verified.", - ) - }); -// Gossip attester slashings. -pub static BEACON_PROCESSOR_ATTESTER_SLASHING_QUEUE_TOTAL: LazyLock> = - LazyLock::new(|| { - try_create_int_gauge( - "beacon_processor_attester_slashing_queue_total", - "Count of attester slashings from gossip waiting to be verified.", - ) - }); -// Gossip BLS to execution changes. -pub static BEACON_PROCESSOR_BLS_TO_EXECUTION_CHANGE_QUEUE_TOTAL: LazyLock> = - LazyLock::new(|| { - try_create_int_gauge( - "beacon_processor_bls_to_execution_change_queue_total", - "Count of address changes from gossip waiting to be verified.", - ) - }); -// Rpc blocks. -pub static BEACON_PROCESSOR_RPC_BLOCK_QUEUE_TOTAL: LazyLock> = - LazyLock::new(|| { - try_create_int_gauge( - "beacon_processor_rpc_block_queue_total", - "Count of blocks from the rpc waiting to be verified.", - ) - }); -// Rpc blobs. -pub static BEACON_PROCESSOR_RPC_BLOB_QUEUE_TOTAL: LazyLock> = - LazyLock::new(|| { - try_create_int_gauge( - "beacon_processor_rpc_blob_queue_total", - "Count of blobs from the rpc waiting to be verified.", - ) - }); -// Rpc custody data columns. -pub static BEACON_PROCESSOR_RPC_CUSTODY_COLUMN_QUEUE_TOTAL: LazyLock> = - LazyLock::new(|| { - try_create_int_gauge( - "beacon_processor_rpc_custody_column_queue_total", - "Count of custody columns from the rpc waiting to be imported.", - ) - }); -// Rpc verify data columns -pub static BEACON_PROCESSOR_RPC_VERIFY_DATA_COLUMN_QUEUE_TOTAL: LazyLock> = - LazyLock::new(|| { - try_create_int_gauge( - "beacon_processor_rpc_verify_data_column_queue_total", - "Count of data columns from the rpc waiting to be verified.", - ) - }); -// Sampling result -pub static BEACON_PROCESSOR_SAMPLING_RESULT_QUEUE_TOTAL: LazyLock> = - LazyLock::new(|| { - try_create_int_gauge( - "beacon_processor_sampling_result_queue_total", - "Count of sampling results waiting to be processed.", - ) - }); -// Chain segments. -pub static BEACON_PROCESSOR_CHAIN_SEGMENT_QUEUE_TOTAL: LazyLock> = - LazyLock::new(|| { - try_create_int_gauge( - "beacon_processor_chain_segment_queue_total", - "Count of chain segments from the rpc waiting to be verified.", - ) - }); -pub static BEACON_PROCESSOR_BACKFILL_CHAIN_SEGMENT_QUEUE_TOTAL: LazyLock> = - LazyLock::new(|| { - try_create_int_gauge( - "beacon_processor_backfill_chain_segment_queue_total", - "Count of backfill chain segments from the rpc waiting to be verified.", - ) - }); -// Unaggregated attestations. -pub static BEACON_PROCESSOR_UNAGGREGATED_ATTESTATION_QUEUE_TOTAL: LazyLock> = - LazyLock::new(|| { - try_create_int_gauge( - "beacon_processor_unaggregated_attestation_queue_total", - "Count of unagg. attestations waiting to be processed.", - ) - }); -// Aggregated attestations. -pub static BEACON_PROCESSOR_AGGREGATED_ATTESTATION_QUEUE_TOTAL: LazyLock> = - LazyLock::new(|| { - try_create_int_gauge( - "beacon_processor_aggregated_attestation_queue_total", - "Count of agg. attestations waiting to be processed.", - ) - }); -// Sync committee messages. -pub static BEACON_PROCESSOR_SYNC_MESSAGE_QUEUE_TOTAL: LazyLock> = - LazyLock::new(|| { - try_create_int_gauge( - "beacon_processor_sync_message_queue_total", - "Count of sync committee messages waiting to be processed.", - ) - }); -// Sync contribution. -pub static BEACON_PROCESSOR_SYNC_CONTRIBUTION_QUEUE_TOTAL: LazyLock> = - LazyLock::new(|| { - try_create_int_gauge( - "beacon_processor_sync_contribution_queue_total", - "Count of sync committee contributions waiting to be processed.", - ) - }); -// HTTP API requests. -pub static BEACON_PROCESSOR_API_REQUEST_P0_QUEUE_TOTAL: LazyLock> = - LazyLock::new(|| { - try_create_int_gauge( - "beacon_processor_api_request_p0_queue_total", - "Count of P0 HTTP requesets waiting to be processed.", - ) - }); -pub static BEACON_PROCESSOR_API_REQUEST_P1_QUEUE_TOTAL: LazyLock> = - LazyLock::new(|| { - try_create_int_gauge( - "beacon_processor_api_request_p1_queue_total", - "Count of P1 HTTP requesets waiting to be processed.", - ) - }); /* * Attestation reprocessing queue metrics. diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index 22e9931043e..998114f565e 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -31,7 +31,7 @@ mod validator_inclusion; mod validators; mod version; -use crate::light_client::get_light_client_updates; +use crate::light_client::{get_light_client_bootstrap, get_light_client_updates}; use crate::produce_block::{produce_blinded_block_v2, produce_block_v2, produce_block_v3}; use crate::version::fork_versioned_response; use beacon_chain::{ @@ -2411,40 +2411,7 @@ pub fn serve( block_root: Hash256, accept_header: Option| { task_spawner.blocking_response_task(Priority::P1, move || { - let (bootstrap, fork_name) = match chain.get_light_client_bootstrap(&block_root) - { - Ok(Some(res)) => res, - Ok(None) => { - return Err(warp_utils::reject::custom_not_found( - "Light client bootstrap unavailable".to_string(), - )); - } - Err(e) => { - return Err(warp_utils::reject::custom_server_error(format!( - "Unable to obtain LightClientBootstrap instance: {e:?}" - ))); - } - }; - - match accept_header { - Some(api_types::Accept::Ssz) => Response::builder() - .status(200) - .body(bootstrap.as_ssz_bytes().into()) - .map(|res: Response| add_ssz_content_type_header(res)) - .map_err(|e| { - warp_utils::reject::custom_server_error(format!( - "failed to create response: {}", - e - )) - }), - _ => Ok(warp::reply::json(&ForkVersionedResponse { - version: Some(fork_name), - metadata: EmptyMetadata {}, - data: bootstrap, - }) - .into_response()), - } - .map(|resp| add_consensus_version_header(resp, fork_name)) + get_light_client_bootstrap::(chain, &block_root, accept_header) }) }, ); diff --git a/beacon_node/http_api/src/light_client.rs b/beacon_node/http_api/src/light_client.rs index a6543114b85..ac8c08581ca 100644 --- a/beacon_node/http_api/src/light_client.rs +++ b/beacon_node/http_api/src/light_client.rs @@ -1,18 +1,20 @@ -use beacon_chain::{BeaconChain, BeaconChainTypes}; +use crate::version::{ + add_consensus_version_header, add_ssz_content_type_header, fork_versioned_response, V1, +}; +use beacon_chain::{BeaconChain, BeaconChainError, BeaconChainTypes}; use eth2::types::{ self as api_types, ChainSpec, ForkVersionedResponse, LightClientUpdate, LightClientUpdateResponseChunk, LightClientUpdateSszResponse, LightClientUpdatesQuery, }; use ssz::Encode; use std::sync::Arc; +use types::{ForkName, Hash256, LightClientBootstrap}; use warp::{ hyper::{Body, Response}, reply::Reply, Rejection, }; -use crate::version::{add_ssz_content_type_header, fork_versioned_response, V1}; - const MAX_REQUEST_LIGHT_CLIENT_UPDATES: u64 = 128; pub fn get_light_client_updates( @@ -62,6 +64,45 @@ pub fn get_light_client_updates( } } +pub fn get_light_client_bootstrap( + chain: Arc>, + block_root: &Hash256, + accept_header: Option, +) -> Result, Rejection> { + let (light_client_bootstrap, fork_name) = chain + .get_light_client_bootstrap(block_root) + .map_err(|err| { + let error_message = if let BeaconChainError::LightClientBootstrapError(err) = err { + println!("{:?}", err); + err + } else { + "No LightClientBootstrap found".to_string() + }; + warp_utils::reject::custom_not_found(error_message) + })? + .ok_or(warp_utils::reject::custom_not_found( + "No LightClientBootstrap found".to_string(), + ))?; + + match accept_header { + Some(api_types::Accept::Ssz) => Response::builder() + .status(200) + .body(light_client_bootstrap.as_ssz_bytes().into()) + .map(|res: Response| add_consensus_version_header(res, fork_name)) + .map(|res: Response| add_ssz_content_type_header(res)) + .map_err(|e| { + warp_utils::reject::custom_server_error(format!("failed to create response: {}", e)) + }), + _ => { + let fork_versioned_response = map_light_client_bootstrap_to_json_response::( + fork_name, + light_client_bootstrap, + )?; + Ok(warp::reply::json(&fork_versioned_response).into_response()) + } + } +} + pub fn validate_light_client_updates_request( chain: &BeaconChain, query: &LightClientUpdatesQuery, @@ -131,6 +172,13 @@ fn map_light_client_update_to_ssz_chunk( } } +fn map_light_client_bootstrap_to_json_response( + fork_name: ForkName, + light_client_bootstrap: LightClientBootstrap, +) -> Result>, Rejection> { + fork_versioned_response(V1, fork_name, light_client_bootstrap) +} + fn map_light_client_update_to_json_response( chain: &BeaconChain, light_client_update: LightClientUpdate, diff --git a/beacon_node/http_api/src/standard_block_rewards.rs b/beacon_node/http_api/src/standard_block_rewards.rs index 97e5a87fd3a..1ab75374ea8 100644 --- a/beacon_node/http_api/src/standard_block_rewards.rs +++ b/beacon_node/http_api/src/standard_block_rewards.rs @@ -15,12 +15,10 @@ pub fn compute_beacon_block_rewards( let block_ref = block.message(); - let block_root = block.canonical_root(); - let mut state = get_state_before_applying_block(chain.clone(), &block)?; let rewards = chain - .compute_beacon_block_reward(block_ref, block_root, &mut state) + .compute_beacon_block_reward(block_ref, &mut state) .map_err(beacon_chain_error)?; Ok((rewards, execution_optimistic, finalized)) diff --git a/beacon_node/http_api/tests/interactive_tests.rs b/beacon_node/http_api/tests/interactive_tests.rs index 9ff411cf1c9..5034492e250 100644 --- a/beacon_node/http_api/tests/interactive_tests.rs +++ b/beacon_node/http_api/tests/interactive_tests.rs @@ -1,7 +1,7 @@ //! Generic tests that make use of the (newer) `InteractiveApiTester` use beacon_chain::{ chain_config::{DisallowedReOrgOffsets, ReOrgThreshold}, - test_utils::{AttestationStrategy, BlockStrategy, SyncCommitteeStrategy}, + test_utils::{AttestationStrategy, BlockStrategy, LightClientStrategy, SyncCommitteeStrategy}, ChainConfig, }; use beacon_processor::work_reprocessing_queue::ReprocessQueueMessage; @@ -88,6 +88,7 @@ async fn state_by_root_pruned_from_fork_choice() { BlockStrategy::OnCanonicalHead, AttestationStrategy::AllValidators, SyncCommitteeStrategy::NoValidators, + LightClientStrategy::Disabled, ) .await; @@ -469,6 +470,7 @@ pub async fn proposer_boost_re_org_test( BlockStrategy::OnCanonicalHead, AttestationStrategy::AllValidators, SyncCommitteeStrategy::AllValidators, + LightClientStrategy::Disabled, ) .await; diff --git a/beacon_node/http_api/tests/status_tests.rs b/beacon_node/http_api/tests/status_tests.rs index 8f962995300..01731530d36 100644 --- a/beacon_node/http_api/tests/status_tests.rs +++ b/beacon_node/http_api/tests/status_tests.rs @@ -1,6 +1,6 @@ //! Tests related to the beacon node's sync status use beacon_chain::{ - test_utils::{AttestationStrategy, BlockStrategy, SyncCommitteeStrategy}, + test_utils::{AttestationStrategy, BlockStrategy, LightClientStrategy, SyncCommitteeStrategy}, BlockError, }; use eth2::StatusCode; @@ -37,6 +37,7 @@ async fn post_merge_tester(chain_depth: u64, validator_count: u64) -> Interactiv BlockStrategy::OnCanonicalHead, AttestationStrategy::AllValidators, SyncCommitteeStrategy::AllValidators, + LightClientStrategy::Disabled, ) .await; tester diff --git a/beacon_node/http_api/tests/tests.rs b/beacon_node/http_api/tests/tests.rs index 6e6f72b6c08..19a01a91c50 100644 --- a/beacon_node/http_api/tests/tests.rs +++ b/beacon_node/http_api/tests/tests.rs @@ -153,7 +153,7 @@ impl ApiTester { if !SKIPPED_SLOTS.contains(&slot) { harness - .extend_chain( + .extend_chain_with_light_client_data( 1, BlockStrategy::OnCanonicalHead, AttestationStrategy::AllValidators, @@ -1926,6 +1926,7 @@ impl ApiTester { ) .unwrap(); + assert_eq!(1, expected.len()); assert_eq!(result.clone().unwrap().len(), expected.len()); self } @@ -1933,19 +1934,26 @@ impl ApiTester { pub async fn test_get_beacon_light_client_bootstrap(self) -> Self { let block_id = BlockId(CoreBlockId::Finalized); let (block_root, _, _) = block_id.root(&self.chain).unwrap(); - let (block, _, _) = block_id.full_block(&self.chain).await.unwrap(); let result = match self .client .get_light_client_bootstrap::(block_root) .await { - Ok(result) => result.unwrap().data, + Ok(result) => result, Err(e) => panic!("query failed incorrectly: {e:?}"), }; - let expected = block.slot(); - assert_eq!(result.get_slot(), expected); + assert!(result.is_some()); + + let expected = self + .chain + .light_client_server_cache + .get_light_client_bootstrap(&self.chain.store, &block_root, 1u64, &self.chain.spec); + + assert!(expected.is_ok()); + + assert_eq!(result.unwrap().data, expected.unwrap().unwrap().0); self } diff --git a/beacon_node/lighthouse_network/src/discovery/enr.rs b/beacon_node/lighthouse_network/src/discovery/enr.rs index 73552e0197f..6aa4e232d2f 100644 --- a/beacon_node/lighthouse_network/src/discovery/enr.rs +++ b/beacon_node/lighthouse_network/src/discovery/enr.rs @@ -45,22 +45,24 @@ pub trait Eth2Enr { impl Eth2Enr for Enr { fn attestation_bitfield(&self) -> Result, &'static str> { - let bitfield_bytes = self - .get(ATTESTATION_BITFIELD_ENR_KEY) - .ok_or("ENR attestation bitfield non-existent")?; + let bitfield_bytes: Vec = self + .get_decodable(ATTESTATION_BITFIELD_ENR_KEY) + .ok_or("ENR attestation bitfield non-existent")? + .map_err(|_| "Invalid RLP Encoding")?; - BitVector::::from_ssz_bytes(bitfield_bytes) + BitVector::::from_ssz_bytes(&bitfield_bytes) .map_err(|_| "Could not decode the ENR attnets bitfield") } fn sync_committee_bitfield( &self, ) -> Result, &'static str> { - let bitfield_bytes = self - .get(SYNC_COMMITTEE_BITFIELD_ENR_KEY) - .ok_or("ENR sync committee bitfield non-existent")?; + let bitfield_bytes: Vec = self + .get_decodable(SYNC_COMMITTEE_BITFIELD_ENR_KEY) + .ok_or("ENR sync committee bitfield non-existent")? + .map_err(|_| "Invalid RLP Encoding")?; - BitVector::::from_ssz_bytes(bitfield_bytes) + BitVector::::from_ssz_bytes(&bitfield_bytes) .map_err(|_| "Could not decode the ENR syncnets bitfield") } @@ -78,9 +80,12 @@ impl Eth2Enr for Enr { } fn eth2(&self) -> Result { - let eth2_bytes = self.get(ETH2_ENR_KEY).ok_or("ENR has no eth2 field")?; + let eth2_bytes: Vec = self + .get_decodable(ETH2_ENR_KEY) + .ok_or("ENR has no eth2 field")? + .map_err(|_| "Invalid RLP Encoding")?; - EnrForkId::from_ssz_bytes(eth2_bytes).map_err(|_| "Could not decode EnrForkId") + EnrForkId::from_ssz_bytes(ð2_bytes).map_err(|_| "Could not decode EnrForkId") } } @@ -270,16 +275,16 @@ fn compare_enr(local_enr: &Enr, disk_enr: &Enr) -> bool { && local_enr.quic4() == disk_enr.quic4() && local_enr.quic6() == disk_enr.quic6() // must match on the same fork - && local_enr.get(ETH2_ENR_KEY) == disk_enr.get(ETH2_ENR_KEY) + && local_enr.get_decodable::>(ETH2_ENR_KEY) == disk_enr.get_decodable(ETH2_ENR_KEY) // take preference over disk udp port if one is not specified && (local_enr.udp4().is_none() || local_enr.udp4() == disk_enr.udp4()) && (local_enr.udp6().is_none() || local_enr.udp6() == disk_enr.udp6()) // we need the ATTESTATION_BITFIELD_ENR_KEY and SYNC_COMMITTEE_BITFIELD_ENR_KEY and // PEERDAS_CUSTODY_SUBNET_COUNT_ENR_KEY key to match, otherwise we use a new ENR. This will // likely only be true for non-validating nodes. - && local_enr.get(ATTESTATION_BITFIELD_ENR_KEY) == disk_enr.get(ATTESTATION_BITFIELD_ENR_KEY) - && local_enr.get(SYNC_COMMITTEE_BITFIELD_ENR_KEY) == disk_enr.get(SYNC_COMMITTEE_BITFIELD_ENR_KEY) - && local_enr.get(PEERDAS_CUSTODY_SUBNET_COUNT_ENR_KEY) == disk_enr.get(PEERDAS_CUSTODY_SUBNET_COUNT_ENR_KEY) + && local_enr.get_decodable::>(ATTESTATION_BITFIELD_ENR_KEY) == disk_enr.get_decodable(ATTESTATION_BITFIELD_ENR_KEY) + && local_enr.get_decodable::>(SYNC_COMMITTEE_BITFIELD_ENR_KEY) == disk_enr.get_decodable(SYNC_COMMITTEE_BITFIELD_ENR_KEY) + && local_enr.get_decodable::>(PEERDAS_CUSTODY_SUBNET_COUNT_ENR_KEY) == disk_enr.get_decodable(PEERDAS_CUSTODY_SUBNET_COUNT_ENR_KEY) } /// Loads enr from the given directory diff --git a/beacon_node/lighthouse_network/src/discovery/mod.rs b/beacon_node/lighthouse_network/src/discovery/mod.rs index c92a8bd2b45..3356dd3cf78 100644 --- a/beacon_node/lighthouse_network/src/discovery/mod.rs +++ b/beacon_node/lighthouse_network/src/discovery/mod.rs @@ -1072,10 +1072,7 @@ impl NetworkBehaviour for Discovery { // NOTE: We assume libp2p itself can keep track of IP changes and we do // not inform it about IP changes found via discovery. } - discv5::Event::EnrAdded { .. } - | discv5::Event::TalkRequest(_) - | discv5::Event::NodeInserted { .. } - | discv5::Event::SessionEstablished { .. } => {} // Ignore all other discv5 server events + _ => {} // Ignore all other discv5 server events } } } diff --git a/beacon_node/lighthouse_network/src/peer_manager/peerdb.rs b/beacon_node/lighthouse_network/src/peer_manager/peerdb.rs index f6b63e6de22..08d9e5209c8 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/peerdb.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/peerdb.rs @@ -1,5 +1,5 @@ use crate::discovery::enr::PEERDAS_CUSTODY_SUBNET_COUNT_ENR_KEY; -use crate::discovery::CombinedKey; +use crate::discovery::{peer_id_to_node_id, CombinedKey}; use crate::{metrics, multiaddr::Multiaddr, types::Subnet, Enr, EnrExt, Gossipsub, PeerId}; use peer_info::{ConnectionDirection, PeerConnectionStatus, PeerInfo}; use rand::seq::SliceRandom; @@ -723,6 +723,17 @@ impl PeerDB { .map(|csc| csc.into()) .collect(); peer_info.set_custody_subnets(all_subnets); + } else { + let peer_info = self.peers.get_mut(&peer_id).expect("peer exists"); + let node_id = peer_id_to_node_id(&peer_id).expect("convert peer_id to node_id"); + let subnets = DataColumnSubnetId::compute_custody_subnets::( + node_id.raw(), + spec.custody_requirement, + spec, + ) + .expect("should compute custody subnets") + .collect(); + peer_info.set_custody_subnets(subnets); } peer_id diff --git a/beacon_node/lighthouse_network/src/rpc/handler.rs b/beacon_node/lighthouse_network/src/rpc/handler.rs index 6f338ebc8be..08e55e50c9c 100644 --- a/beacon_node/lighthouse_network/src/rpc/handler.rs +++ b/beacon_node/lighthouse_network/src/rpc/handler.rs @@ -4,7 +4,7 @@ use super::methods::{GoodbyeReason, RPCCodedResponse, RPCResponseErrorCode}; use super::outbound::OutboundRequestContainer; use super::protocol::{InboundOutput, InboundRequest, Protocol, RPCError, RPCProtocol}; -use super::{RPCReceived, RPCSend, ReqId}; +use super::{RPCReceived, RPCResponse, RPCSend, ReqId}; use crate::rpc::outbound::{OutboundFramed, OutboundRequest}; use crate::rpc::protocol::InboundFramed; use fnv::FnvHashMap; @@ -14,7 +14,8 @@ use libp2p::swarm::handler::{ ConnectionEvent, ConnectionHandler, ConnectionHandlerEvent, DialUpgradeError, FullyNegotiatedInbound, FullyNegotiatedOutbound, StreamUpgradeError, SubstreamProtocol, }; -use libp2p::swarm::Stream; +use libp2p::swarm::{ConnectionId, Stream}; +use libp2p::PeerId; use slog::{crit, debug, trace}; use smallvec::SmallVec; use std::{ @@ -88,6 +89,12 @@ pub struct RPCHandler where E: EthSpec, { + /// This `ConnectionId`. + id: ConnectionId, + + /// The matching `PeerId` of this connection. + peer_id: PeerId, + /// The upgrade for inbound substreams. listen_protocol: SubstreamProtocol, ()>, @@ -218,12 +225,16 @@ where E: EthSpec, { pub fn new( + id: ConnectionId, + peer_id: PeerId, listen_protocol: SubstreamProtocol, ()>, fork_context: Arc, log: &slog::Logger, resp_timeout: Duration, ) -> Self { RPCHandler { + id, + peer_id, listen_protocol, events_out: SmallVec::new(), dial_queue: SmallVec::new(), @@ -892,6 +903,15 @@ where self.shutdown(None); } + // If we received a Ping, we queue a Pong response. + if let InboundRequest::Ping(ping) = req { + trace!(self.log, "Received Ping, queueing Pong";"connection_id" => %self.id, "peer_id" => %self.peer_id); + self.send_response( + self.current_inbound_substream_id, + RPCCodedResponse::Success(RPCResponse::Pong(ping)), + ); + } + self.events_out.push(HandlerEvent::Ok(RPCReceived::Request( self.current_inbound_substream_id, req, diff --git a/beacon_node/lighthouse_network/src/rpc/methods.rs b/beacon_node/lighthouse_network/src/rpc/methods.rs index a96b9d1b166..6e1ba9cd302 100644 --- a/beacon_node/lighthouse_network/src/rpc/methods.rs +++ b/beacon_node/lighthouse_network/src/rpc/methods.rs @@ -81,7 +81,7 @@ pub struct StatusMessage { } /// The PING request/response message. -#[derive(Encode, Decode, Clone, Debug, PartialEq)] +#[derive(Encode, Decode, Copy, Clone, Debug, PartialEq)] pub struct Ping { /// The metadata sequence number. pub data: u64, diff --git a/beacon_node/lighthouse_network/src/rpc/mod.rs b/beacon_node/lighthouse_network/src/rpc/mod.rs index cd591554a36..eae206e022d 100644 --- a/beacon_node/lighthouse_network/src/rpc/mod.rs +++ b/beacon_node/lighthouse_network/src/rpc/mod.rs @@ -106,7 +106,7 @@ pub struct RPCMessage { /// Handler managing this message. pub conn_id: ConnectionId, /// The message that was sent. - pub event: HandlerEvent, + pub message: Result, HandlerErr>, } type BehaviourAction = ToSwarm, RPCSend>; @@ -245,6 +245,8 @@ where .log .new(slog::o!("peer_id" => peer_id.to_string(), "connection_id" => connection_id.to_string())); let handler = RPCHandler::new( + connection_id, + peer_id, protocol, self.fork_context.clone(), &log, @@ -278,6 +280,8 @@ where .new(slog::o!("peer_id" => peer_id.to_string(), "connection_id" => connection_id.to_string())); let handler = RPCHandler::new( + connection_id, + peer_id, protocol, self.fork_context.clone(), &log, @@ -311,7 +315,7 @@ where let error_msg = ToSwarm::GenerateEvent(RPCMessage { peer_id, conn_id: connection_id, - event: HandlerEvent::Err(HandlerErr::Outbound { + message: Err(HandlerErr::Outbound { id, proto, error: RPCError::Disconnected, @@ -332,7 +336,7 @@ where *event = ToSwarm::GenerateEvent(RPCMessage { peer_id, conn_id: connection_id, - event: HandlerEvent::Err(HandlerErr::Outbound { + message: Err(HandlerErr::Outbound { id: *request_id, proto: req.versioned_protocol().protocol(), error: RPCError::Disconnected, @@ -351,16 +355,16 @@ where event: ::ToBehaviour, ) { match event { - HandlerEvent::Ok(RPCReceived::Request(ref id, ref req)) => { + HandlerEvent::Ok(RPCReceived::Request(id, req)) => { if let Some(limiter) = self.limiter.as_mut() { // check if the request is conformant to the quota - match limiter.allows(&peer_id, req) { + match limiter.allows(&peer_id, &req) { Ok(()) => { // send the event to the user self.events.push(ToSwarm::GenerateEvent(RPCMessage { peer_id, conn_id, - event, + message: Ok(RPCReceived::Request(id, req)), })) } Err(RateLimitedErr::TooLarge) => { @@ -384,7 +388,7 @@ where // the handler upon receiving the error code will send it back to the behaviour self.send_response( peer_id, - (conn_id, *id), + (conn_id, id), RPCCodedResponse::Error( RPCResponseErrorCode::RateLimited, "Rate limited. Request too large".into(), @@ -398,7 +402,7 @@ where // the handler upon receiving the error code will send it back to the behaviour self.send_response( peer_id, - (conn_id, *id), + (conn_id, id), RPCCodedResponse::Error( RPCResponseErrorCode::RateLimited, format!("Wait {:?}", wait_time).into(), @@ -411,24 +415,31 @@ where self.events.push(ToSwarm::GenerateEvent(RPCMessage { peer_id, conn_id, - event, + message: Ok(RPCReceived::Request(id, req)), })) } } - HandlerEvent::Close(_) => { - // Handle the close event here. - self.events.push(ToSwarm::CloseConnection { + HandlerEvent::Ok(rpc) => { + self.events.push(ToSwarm::GenerateEvent(RPCMessage { peer_id, - connection: CloseConnection::All, - }); + conn_id, + message: Ok(rpc), + })); } - _ => { + HandlerEvent::Err(err) => { self.events.push(ToSwarm::GenerateEvent(RPCMessage { peer_id, conn_id, - event, + message: Err(err), })); } + HandlerEvent::Close(_) => { + // Handle the close event here. + self.events.push(ToSwarm::CloseConnection { + peer_id, + connection: CloseConnection::All, + }); + } } } @@ -463,8 +474,8 @@ where serializer: &mut dyn slog::Serializer, ) -> slog::Result { serializer.emit_arguments("peer_id", &format_args!("{}", self.peer_id))?; - match &self.event { - HandlerEvent::Ok(received) => { + match &self.message { + Ok(received) => { let (msg_kind, protocol) = match received { RPCReceived::Request(_, req) => { ("request", req.versioned_protocol().protocol()) @@ -485,7 +496,7 @@ where serializer.emit_str("msg_kind", msg_kind)?; serializer.emit_arguments("protocol", &format_args!("{}", protocol))?; } - HandlerEvent::Err(error) => { + Err(error) => { let (msg_kind, protocol) = match &error { HandlerErr::Inbound { proto, .. } => ("inbound_err", *proto), HandlerErr::Outbound { proto, .. } => ("outbound_err", *proto), @@ -493,9 +504,6 @@ where serializer.emit_str("msg_kind", msg_kind)?; serializer.emit_arguments("protocol", &format_args!("{}", protocol))?; } - HandlerEvent::Close(err) => { - serializer.emit_arguments("handler_close", &format_args!("{}", err))?; - } }; slog::Result::Ok(()) diff --git a/beacon_node/lighthouse_network/src/service/mod.rs b/beacon_node/lighthouse_network/src/service/mod.rs index d97b52f79f1..a97157ff0a4 100644 --- a/beacon_node/lighthouse_network/src/service/mod.rs +++ b/beacon_node/lighthouse_network/src/service/mod.rs @@ -10,7 +10,11 @@ use crate::peer_manager::{ }; use crate::peer_manager::{MIN_OUTBOUND_ONLY_FACTOR, PEER_EXCESS_FACTOR, PRIORITY_PEER_EXCESS}; use crate::rpc::methods::MetadataRequest; -use crate::rpc::*; +use crate::rpc::{ + methods, BlocksByRangeRequest, GoodbyeReason, HandlerErr, InboundRequest, NetworkParams, + OutboundRequest, Protocol, RPCCodedResponse, RPCError, RPCMessage, RPCReceived, RPCResponse, + RPCResponseErrorCode, ResponseTermination, RPC, +}; use crate::service::behaviour::BehaviourEvent; pub use crate::service::behaviour::Gossipsub; use crate::types::{ @@ -1128,16 +1132,6 @@ impl Network { .send_request(peer_id, id, OutboundRequest::Ping(ping)); } - /// Sends a Pong response to the peer. - fn pong(&mut self, id: PeerRequestId, peer_id: PeerId) { - let ping = crate::rpc::Ping { - data: *self.network_globals.local_metadata.read().seq_number(), - }; - trace!(self.log, "Sending Pong"; "request_id" => id.1, "peer_id" => %peer_id); - let event = RPCCodedResponse::Success(RPCResponse::Pong(ping)); - self.eth2_rpc_mut().send_response(peer_id, id, event); - } - /// Sends a METADATA request to a peer. fn send_meta_data_request(&mut self, peer_id: PeerId) { let event = if self.fork_context.spec.is_peer_das_scheduled() { @@ -1406,10 +1400,7 @@ impl Network { let peer_id = event.peer_id; // Do not permit Inbound events from peers that are being disconnected, or RPC requests. - if !self.peer_manager().is_connected(&peer_id) - && (matches!(event.event, HandlerEvent::Err(HandlerErr::Inbound { .. })) - || matches!(event.event, HandlerEvent::Ok(RPCReceived::Request(..)))) - { + if !self.peer_manager().is_connected(&peer_id) { debug!( self.log, "Ignoring rpc message of disconnecting peer"; @@ -1420,8 +1411,8 @@ impl Network { let handler_id = event.conn_id; // The METADATA and PING RPC responses are handled within the behaviour and not propagated - match event.event { - HandlerEvent::Err(handler_err) => { + match event.message { + Err(handler_err) => { match handler_err { HandlerErr::Inbound { id: _, @@ -1456,15 +1447,13 @@ impl Network { } } } - HandlerEvent::Ok(RPCReceived::Request(id, request)) => { + Ok(RPCReceived::Request(id, request)) => { let peer_request_id = (handler_id, id); match request { /* Behaviour managed protocols: Ping and Metadata */ InboundRequest::Ping(ping) => { // inform the peer manager and send the response self.peer_manager_mut().ping_request(&peer_id, ping.data); - // send a ping response - self.pong(peer_request_id, peer_id); None } InboundRequest::MetaData(req) => { @@ -1587,7 +1576,7 @@ impl Network { } } } - HandlerEvent::Ok(RPCReceived::Response(id, resp)) => { + Ok(RPCReceived::Response(id, resp)) => { match resp { /* Behaviour managed protocols */ RPCResponse::Pong(ping) => { @@ -1640,7 +1629,7 @@ impl Network { ), } } - HandlerEvent::Ok(RPCReceived::EndOfStream(id, termination)) => { + Ok(RPCReceived::EndOfStream(id, termination)) => { let response = match termination { ResponseTermination::BlocksByRange => Response::BlocksByRange(None), ResponseTermination::BlocksByRoot => Response::BlocksByRoot(None), @@ -1651,10 +1640,6 @@ impl Network { }; self.build_response(id, peer_id, response) } - HandlerEvent::Close(_) => { - // NOTE: This is handled in the RPC behaviour. - None - } } } diff --git a/beacon_node/network/Cargo.toml b/beacon_node/network/Cargo.toml index 192fdd644c3..6a81eb33f08 100644 --- a/beacon_node/network/Cargo.toml +++ b/beacon_node/network/Cargo.toml @@ -33,7 +33,7 @@ tokio-stream = { workspace = true } smallvec = { workspace = true } rand = { workspace = true } fnv = { workspace = true } -rlp = "0.5.0" +alloy-rlp = { workspace = true } lighthouse_metrics = { workspace = true } logging = { workspace = true } task_executor = { workspace = true } diff --git a/beacon_node/network/src/network_beacon_processor/rpc_methods.rs b/beacon_node/network/src/network_beacon_processor/rpc_methods.rs index dde6f2e3130..0c98f5c17e5 100644 --- a/beacon_node/network/src/network_beacon_processor/rpc_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/rpc_methods.rs @@ -394,7 +394,7 @@ impl NetworkBeaconProcessor { Ok(Some((bootstrap, _))) => Ok(Arc::new(bootstrap)), Ok(None) => Err(( RPCResponseErrorCode::ResourceUnavailable, - "Bootstrap not available", + "Bootstrap not available".to_string(), )), Err(e) => { error!(self.log, "Error getting LightClientBootstrap instance"; @@ -404,7 +404,7 @@ impl NetworkBeaconProcessor { ); Err(( RPCResponseErrorCode::ResourceUnavailable, - "Bootstrap not available", + format!("{:?}", e), )) } }, @@ -429,7 +429,7 @@ impl NetworkBeaconProcessor { Some(update) => Ok(Arc::new(update)), None => Err(( RPCResponseErrorCode::ResourceUnavailable, - "Latest optimistic update not available", + "Latest optimistic update not available".to_string(), )), }, Response::LightClientOptimisticUpdate, @@ -453,7 +453,7 @@ impl NetworkBeaconProcessor { Some(update) => Ok(Arc::new(update)), None => Err(( RPCResponseErrorCode::ResourceUnavailable, - "Latest finality update not available", + "Latest finality update not available".to_string(), )), }, Response::LightClientFinalityUpdate, @@ -1081,7 +1081,7 @@ impl NetworkBeaconProcessor { &self, peer_id: PeerId, request_id: PeerRequestId, - result: Result, + result: Result, into_response: F, ) { match result { @@ -1096,7 +1096,7 @@ impl NetworkBeaconProcessor { }); } Err((error_code, reason)) => { - self.send_error_response(peer_id, error_code, reason.into(), request_id); + self.send_error_response(peer_id, error_code, reason, request_id); } } } diff --git a/beacon_node/network/src/network_beacon_processor/tests.rs b/beacon_node/network/src/network_beacon_processor/tests.rs index 40c69a0baa5..391175ccd41 100644 --- a/beacon_node/network/src/network_beacon_processor/tests.rs +++ b/beacon_node/network/src/network_beacon_processor/tests.rs @@ -467,10 +467,11 @@ impl TestRig { /// /// Given the described logic, `expected` must not contain `WORKER_FREED` or `NOTHING_TO_DO` /// events. - pub async fn assert_event_journal_contains_ordered(&mut self, expected: &[&str]) { - assert!(expected + pub async fn assert_event_journal_contains_ordered(&mut self, expected: &[WorkType]) { + let expected = expected .iter() - .all(|ev| ev != &WORKER_FREED && ev != &NOTHING_TO_DO)); + .map(|ev| ev.into()) + .collect::>(); let mut events = Vec::with_capacity(expected.len()); let mut worker_freed_remaining = expected.len(); @@ -517,6 +518,18 @@ impl TestRig { .await } + pub async fn assert_event_journal_completes(&mut self, expected: &[WorkType]) { + self.assert_event_journal( + &expected + .iter() + .map(|ev| Into::<&'static str>::into(ev)) + .chain(std::iter::once(WORKER_FREED)) + .chain(std::iter::once(NOTHING_TO_DO)) + .collect::>(), + ) + .await + } + /// Assert that the `BeaconProcessor` event journal is as `expected`. /// /// ## Note @@ -587,13 +600,13 @@ async fn import_gossip_block_acceptably_early() { rig.enqueue_gossip_block(); - rig.assert_event_journal(&[GOSSIP_BLOCK, WORKER_FREED, NOTHING_TO_DO]) + rig.assert_event_journal_completes(&[WorkType::GossipBlock]) .await; let num_blobs = rig.next_blobs.as_ref().map(|b| b.len()).unwrap_or(0); for i in 0..num_blobs { rig.enqueue_gossip_blob(i); - rig.assert_event_journal(&[GOSSIP_BLOBS_SIDECAR, WORKER_FREED, NOTHING_TO_DO]) + rig.assert_event_journal_completes(&[WorkType::GossipBlobSidecar]) .await; } @@ -611,7 +624,7 @@ async fn import_gossip_block_acceptably_early() { "block not yet imported" ); - rig.assert_event_journal(&[DELAYED_IMPORT_BLOCK, WORKER_FREED, NOTHING_TO_DO]) + rig.assert_event_journal_completes(&[WorkType::DelayedImportBlock]) .await; assert_eq!( @@ -644,7 +657,7 @@ async fn import_gossip_block_unacceptably_early() { rig.enqueue_gossip_block(); - rig.assert_event_journal(&[GOSSIP_BLOCK, WORKER_FREED, NOTHING_TO_DO]) + rig.assert_event_journal_completes(&[WorkType::GossipBlock]) .await; // Waiting for 5 seconds is a bit arbitrary, however it *should* be long enough to ensure the @@ -670,7 +683,7 @@ async fn import_gossip_block_at_current_slot() { rig.enqueue_gossip_block(); - rig.assert_event_journal(&[GOSSIP_BLOCK, WORKER_FREED, NOTHING_TO_DO]) + rig.assert_event_journal_completes(&[WorkType::GossipBlock]) .await; let num_blobs = rig @@ -682,7 +695,7 @@ async fn import_gossip_block_at_current_slot() { for i in 0..num_blobs { rig.enqueue_gossip_blob(i); - rig.assert_event_journal(&[GOSSIP_BLOBS_SIDECAR, WORKER_FREED, NOTHING_TO_DO]) + rig.assert_event_journal_completes(&[WorkType::GossipBlobSidecar]) .await; } @@ -702,7 +715,7 @@ async fn import_gossip_attestation() { rig.enqueue_unaggregated_attestation(); - rig.assert_event_journal(&[GOSSIP_ATTESTATION, WORKER_FREED, NOTHING_TO_DO]) + rig.assert_event_journal_completes(&[WorkType::GossipAttestation]) .await; assert_eq!( @@ -728,7 +741,7 @@ async fn attestation_to_unknown_block_processed(import_method: BlockImportMethod rig.enqueue_next_block_unaggregated_attestation(); - rig.assert_event_journal(&[GOSSIP_ATTESTATION, WORKER_FREED, NOTHING_TO_DO]) + rig.assert_event_journal_completes(&[WorkType::GossipAttestation]) .await; assert_eq!( @@ -747,23 +760,23 @@ async fn attestation_to_unknown_block_processed(import_method: BlockImportMethod match import_method { BlockImportMethod::Gossip => { rig.enqueue_gossip_block(); - events.push(GOSSIP_BLOCK); + events.push(WorkType::GossipBlock); for i in 0..num_blobs { rig.enqueue_gossip_blob(i); - events.push(GOSSIP_BLOBS_SIDECAR); + events.push(WorkType::GossipBlobSidecar); } } BlockImportMethod::Rpc => { rig.enqueue_rpc_block(); - events.push(RPC_BLOCK); + events.push(WorkType::RpcBlock); if num_blobs > 0 { rig.enqueue_single_lookup_rpc_blobs(); - events.push(RPC_BLOBS); + events.push(WorkType::RpcBlobs); } } }; - events.push(UNKNOWN_BLOCK_ATTESTATION); + events.push(WorkType::UnknownBlockAttestation); rig.assert_event_journal_contains_ordered(&events).await; @@ -809,7 +822,7 @@ async fn aggregate_attestation_to_unknown_block(import_method: BlockImportMethod rig.enqueue_next_block_aggregated_attestation(); - rig.assert_event_journal(&[GOSSIP_AGGREGATE, WORKER_FREED, NOTHING_TO_DO]) + rig.assert_event_journal_completes(&[WorkType::GossipAggregate]) .await; assert_eq!( @@ -828,23 +841,23 @@ async fn aggregate_attestation_to_unknown_block(import_method: BlockImportMethod match import_method { BlockImportMethod::Gossip => { rig.enqueue_gossip_block(); - events.push(GOSSIP_BLOCK); + events.push(WorkType::GossipBlock); for i in 0..num_blobs { rig.enqueue_gossip_blob(i); - events.push(GOSSIP_BLOBS_SIDECAR); + events.push(WorkType::GossipBlobSidecar); } } BlockImportMethod::Rpc => { rig.enqueue_rpc_block(); - events.push(RPC_BLOCK); + events.push(WorkType::RpcBlock); if num_blobs > 0 { rig.enqueue_single_lookup_rpc_blobs(); - events.push(RPC_BLOBS); + events.push(WorkType::RpcBlobs); } } }; - events.push(UNKNOWN_BLOCK_AGGREGATE); + events.push(WorkType::UnknownBlockAggregate); rig.assert_event_journal_contains_ordered(&events).await; @@ -887,7 +900,7 @@ async fn requeue_unknown_block_gossip_attestation_without_import() { rig.enqueue_next_block_unaggregated_attestation(); - rig.assert_event_journal(&[GOSSIP_ATTESTATION, WORKER_FREED, NOTHING_TO_DO]) + rig.assert_event_journal_completes(&[WorkType::GossipAttestation]) .await; assert_eq!( @@ -899,7 +912,11 @@ async fn requeue_unknown_block_gossip_attestation_without_import() { // Ensure that the attestation is received back but not imported. rig.assert_event_journal_with_timeout( - &[UNKNOWN_BLOCK_ATTESTATION, WORKER_FREED, NOTHING_TO_DO], + &[ + WorkType::UnknownBlockAttestation.into(), + WORKER_FREED, + NOTHING_TO_DO, + ], Duration::from_secs(1) + QUEUED_ATTESTATION_DELAY, ) .await; @@ -923,7 +940,7 @@ async fn requeue_unknown_block_gossip_aggregated_attestation_without_import() { rig.enqueue_next_block_aggregated_attestation(); - rig.assert_event_journal(&[GOSSIP_AGGREGATE, WORKER_FREED, NOTHING_TO_DO]) + rig.assert_event_journal_completes(&[WorkType::GossipAggregate]) .await; assert_eq!( @@ -935,7 +952,11 @@ async fn requeue_unknown_block_gossip_aggregated_attestation_without_import() { // Ensure that the attestation is received back but not imported. rig.assert_event_journal_with_timeout( - &[UNKNOWN_BLOCK_AGGREGATE, WORKER_FREED, NOTHING_TO_DO], + &[ + WorkType::UnknownBlockAggregate.into(), + WORKER_FREED, + NOTHING_TO_DO, + ], Duration::from_secs(1) + QUEUED_ATTESTATION_DELAY, ) .await; @@ -961,7 +982,7 @@ async fn import_misc_gossip_ops() { rig.enqueue_gossip_attester_slashing(); - rig.assert_event_journal(&[GOSSIP_ATTESTER_SLASHING, WORKER_FREED, NOTHING_TO_DO]) + rig.assert_event_journal_completes(&[WorkType::GossipAttesterSlashing]) .await; assert_eq!( @@ -978,7 +999,7 @@ async fn import_misc_gossip_ops() { rig.enqueue_gossip_proposer_slashing(); - rig.assert_event_journal(&[GOSSIP_PROPOSER_SLASHING, WORKER_FREED, NOTHING_TO_DO]) + rig.assert_event_journal_completes(&[WorkType::GossipProposerSlashing]) .await; assert_eq!( @@ -995,7 +1016,7 @@ async fn import_misc_gossip_ops() { rig.enqueue_gossip_voluntary_exit(); - rig.assert_event_journal(&[GOSSIP_VOLUNTARY_EXIT, WORKER_FREED, NOTHING_TO_DO]) + rig.assert_event_journal_completes(&[WorkType::GossipVoluntaryExit]) .await; assert_eq!( @@ -1014,12 +1035,12 @@ async fn test_rpc_block_reprocessing() { // Insert the next block into the duplicate cache manually let handle = rig.duplicate_cache.check_and_insert(next_block_root); rig.enqueue_single_lookup_rpc_block(); - rig.assert_event_journal(&[RPC_BLOCK, WORKER_FREED, NOTHING_TO_DO]) + rig.assert_event_journal_completes(&[WorkType::RpcBlock]) .await; rig.enqueue_single_lookup_rpc_blobs(); if rig.next_blobs.as_ref().map(|b| b.len()).unwrap_or(0) > 0 { - rig.assert_event_journal(&[RPC_BLOBS, WORKER_FREED, NOTHING_TO_DO]) + rig.assert_event_journal_completes(&[WorkType::RpcBlobs]) .await; } @@ -1033,7 +1054,7 @@ async fn test_rpc_block_reprocessing() { // the specified delay. tokio::time::sleep(QUEUED_RPC_BLOCK_DELAY).await; - rig.assert_event_journal(&[RPC_BLOCK]).await; + rig.assert_event_journal(&[WorkType::RpcBlock.into()]).await; // Add an extra delay for block processing tokio::time::sleep(Duration::from_millis(10)).await; // head should update to next block now since the duplicate @@ -1055,7 +1076,11 @@ async fn test_backfill_sync_processing() { rig.assert_no_events_for(Duration::from_millis(100)).await; // A new batch should be processed within a slot. rig.assert_event_journal_with_timeout( - &[CHAIN_SEGMENT_BACKFILL, WORKER_FREED, NOTHING_TO_DO], + &[ + WorkType::ChainSegmentBackfill.into(), + WORKER_FREED, + NOTHING_TO_DO, + ], rig.chain.slot_clock.slot_duration(), ) .await; @@ -1075,9 +1100,9 @@ async fn test_backfill_sync_processing_rate_limiting_disabled() { // ensure all batches are processed rig.assert_event_journal_with_timeout( &[ - CHAIN_SEGMENT_BACKFILL, - CHAIN_SEGMENT_BACKFILL, - CHAIN_SEGMENT_BACKFILL, + WorkType::ChainSegmentBackfill.into(), + WorkType::ChainSegmentBackfill.into(), + WorkType::ChainSegmentBackfill.into(), ], Duration::from_millis(100), ) diff --git a/beacon_node/network/src/persisted_dht.rs b/beacon_node/network/src/persisted_dht.rs index e1085c4f0c1..522ff0536eb 100644 --- a/beacon_node/network/src/persisted_dht.rs +++ b/beacon_node/network/src/persisted_dht.rs @@ -45,14 +45,23 @@ impl StoreItem for PersistedDht { } fn as_store_bytes(&self) -> Vec { - rlp::encode_list(&self.enrs).to_vec() + let mut buffer = Vec::::new(); + alloy_rlp::encode_list(&self.enrs, &mut buffer); + buffer } fn from_store_bytes(bytes: &[u8]) -> Result { - let rlp = rlp::Rlp::new(bytes); - let enrs: Vec = rlp - .as_list() - .map_err(|e| StoreError::RlpError(format!("{}", e)))?; + let mut enrs: Vec = Vec::new(); + let mut rlp = alloy_rlp::Rlp::new(bytes) + .map_err(|e| StoreError::RlpError(format!("Failed to decode RLP: {}", e)))?; + loop { + match rlp.get_next() { + Ok(Some(enr)) => enrs.push(enr), + Ok(None) => break, // No more list elements + Err(e) => return Err(StoreError::RlpError(format!("{}", e))), + } + } + Ok(PersistedDht { enrs }) } } diff --git a/beacon_node/network/src/router.rs b/beacon_node/network/src/router.rs index a5e27f582af..26c1d14f020 100644 --- a/beacon_node/network/src/router.rs +++ b/beacon_node/network/src/router.rs @@ -710,7 +710,7 @@ impl Router { if let Err(e) = result { let work_type = match &e { mpsc::error::TrySendError::Closed(work) | mpsc::error::TrySendError::Full(work) => { - work.work_type() + work.work_type_str() } }; diff --git a/beacon_node/network/src/sync/block_lookups/tests.rs b/beacon_node/network/src/sync/block_lookups/tests.rs index 6d852b2572d..5b4f17ac0dd 100644 --- a/beacon_node/network/src/sync/block_lookups/tests.rs +++ b/beacon_node/network/src/sync/block_lookups/tests.rs @@ -1032,17 +1032,17 @@ impl TestRig { match response_type { ResponseType::Block => self .pop_received_processor_event(|ev| { - (ev.work_type() == beacon_processor::RPC_BLOCK).then_some(()) + (ev.work_type() == beacon_processor::WorkType::RpcBlock).then_some(()) }) .unwrap_or_else(|e| panic!("Expected block work event: {e}")), ResponseType::Blob => self .pop_received_processor_event(|ev| { - (ev.work_type() == beacon_processor::RPC_BLOBS).then_some(()) + (ev.work_type() == beacon_processor::WorkType::RpcBlobs).then_some(()) }) .unwrap_or_else(|e| panic!("Expected blobs work event: {e}")), ResponseType::CustodyColumn => self .pop_received_processor_event(|ev| { - (ev.work_type() == beacon_processor::RPC_CUSTODY_COLUMN).then_some(()) + (ev.work_type() == beacon_processor::WorkType::RpcCustodyColumn).then_some(()) }) .unwrap_or_else(|e| panic!("Expected column work event: {e}")), } @@ -1050,7 +1050,7 @@ impl TestRig { fn expect_rpc_custody_column_work_event(&mut self) { self.pop_received_processor_event(|ev| { - if ev.work_type() == beacon_processor::RPC_CUSTODY_COLUMN { + if ev.work_type() == beacon_processor::WorkType::RpcCustodyColumn { Some(()) } else { None @@ -1061,7 +1061,7 @@ impl TestRig { fn expect_rpc_sample_verify_work_event(&mut self) { self.pop_received_processor_event(|ev| { - if ev.work_type() == beacon_processor::RPC_VERIFY_DATA_COLUMNS { + if ev.work_type() == beacon_processor::WorkType::RpcVerifyDataColumn { Some(()) } else { None @@ -1072,7 +1072,7 @@ impl TestRig { fn expect_sampling_result_work(&mut self) { self.pop_received_processor_event(|ev| { - if ev.work_type() == beacon_processor::SAMPLING_RESULT { + if ev.work_type() == beacon_processor::WorkType::SamplingResult { Some(()) } else { None @@ -1103,7 +1103,7 @@ impl TestRig { match self.beacon_processor_rx.try_recv() { Ok(work) => { // Parent chain sends blocks one by one - assert_eq!(work.work_type(), beacon_processor::RPC_BLOCK); + assert_eq!(work.work_type(), beacon_processor::WorkType::RpcBlock); } other => panic!( "Expected rpc_block from chain segment process, found {:?}", @@ -1972,12 +1972,13 @@ fn sampling_happy_path() { } #[test] -#[ignore] // Ignoring due to flakiness https://github.com/sigp/lighthouse/issues/6319 fn sampling_with_retries() { let Some(mut r) = TestRig::test_setup_after_peerdas() else { return; }; r.new_connected_peers_for_peerdas(); + // Add another supernode to ensure that the node can retry. + r.new_connected_supernode_peer(); let (block, data_columns) = r.rand_block_and_data_columns(); let block_root = block.canonical_root(); r.trigger_sample_block(block_root, block.slot()); diff --git a/beacon_node/network/src/sync/network_context/requests.rs b/beacon_node/network/src/sync/network_context/requests.rs index 94eecff42d3..0c2f59d143f 100644 --- a/beacon_node/network/src/sync/network_context/requests.rs +++ b/beacon_node/network/src/sync/network_context/requests.rs @@ -1,18 +1,14 @@ -use beacon_chain::get_block_root; -use lighthouse_network::{ - rpc::{methods::BlobsByRootRequest, BlocksByRootRequest}, - PeerId, -}; -use std::sync::Arc; use strum::IntoStaticStr; -use types::{ - blob_sidecar::BlobIdentifier, BlobSidecar, ChainSpec, EthSpec, Hash256, SignedBeaconBlock, -}; +use types::Hash256; +pub use blobs_by_root::{ActiveBlobsByRootRequest, BlobsByRootSingleBlockRequest}; +pub use blocks_by_root::{ActiveBlocksByRootRequest, BlocksByRootSingleRequest}; pub use data_columns_by_root::{ ActiveDataColumnsByRootRequest, DataColumnsByRootSingleBlockRequest, }; +mod blobs_by_root; +mod blocks_by_root; mod data_columns_by_root; #[derive(Debug, PartialEq, Eq, IntoStaticStr)] @@ -25,148 +21,3 @@ pub enum LookupVerifyError { InvalidInclusionProof, DuplicateData, } - -pub struct ActiveBlocksByRootRequest { - request: BlocksByRootSingleRequest, - resolved: bool, - pub(crate) peer_id: PeerId, -} - -impl ActiveBlocksByRootRequest { - pub fn new(request: BlocksByRootSingleRequest, peer_id: PeerId) -> Self { - Self { - request, - resolved: false, - peer_id, - } - } - - /// Append a response to the single chunk request. If the chunk is valid, the request is - /// resolved immediately. - /// The active request SHOULD be dropped after `add_response` returns an error - pub fn add_response( - &mut self, - block: Arc>, - ) -> Result>, LookupVerifyError> { - if self.resolved { - return Err(LookupVerifyError::TooManyResponses); - } - - let block_root = get_block_root(&block); - if self.request.0 != block_root { - return Err(LookupVerifyError::UnrequestedBlockRoot(block_root)); - } - - // Valid data, blocks by root expects a single response - self.resolved = true; - Ok(block) - } - - pub fn terminate(self) -> Result<(), LookupVerifyError> { - if self.resolved { - Ok(()) - } else { - Err(LookupVerifyError::NoResponseReturned) - } - } -} - -#[derive(Debug, Copy, Clone)] -pub struct BlocksByRootSingleRequest(pub Hash256); - -impl BlocksByRootSingleRequest { - pub fn into_request(self, spec: &ChainSpec) -> BlocksByRootRequest { - BlocksByRootRequest::new(vec![self.0], spec) - } -} - -#[derive(Debug, Clone)] -pub struct BlobsByRootSingleBlockRequest { - pub block_root: Hash256, - pub indices: Vec, -} - -impl BlobsByRootSingleBlockRequest { - pub fn into_request(self, spec: &ChainSpec) -> BlobsByRootRequest { - BlobsByRootRequest::new( - self.indices - .into_iter() - .map(|index| BlobIdentifier { - block_root: self.block_root, - index, - }) - .collect(), - spec, - ) - } -} - -pub struct ActiveBlobsByRootRequest { - request: BlobsByRootSingleBlockRequest, - blobs: Vec>>, - resolved: bool, - pub(crate) peer_id: PeerId, -} - -impl ActiveBlobsByRootRequest { - pub fn new(request: BlobsByRootSingleBlockRequest, peer_id: PeerId) -> Self { - Self { - request, - blobs: vec![], - resolved: false, - peer_id, - } - } - - /// Appends a chunk to this multi-item request. If all expected chunks are received, this - /// method returns `Some`, resolving the request before the stream terminator. - /// The active request SHOULD be dropped after `add_response` returns an error - pub fn add_response( - &mut self, - blob: Arc>, - ) -> Result>>>, LookupVerifyError> { - if self.resolved { - return Err(LookupVerifyError::TooManyResponses); - } - - let block_root = blob.block_root(); - if self.request.block_root != block_root { - return Err(LookupVerifyError::UnrequestedBlockRoot(block_root)); - } - if !blob.verify_blob_sidecar_inclusion_proof() { - return Err(LookupVerifyError::InvalidInclusionProof); - } - if !self.request.indices.contains(&blob.index) { - return Err(LookupVerifyError::UnrequestedIndex(blob.index)); - } - if self.blobs.iter().any(|b| b.index == blob.index) { - return Err(LookupVerifyError::DuplicateData); - } - - self.blobs.push(blob); - if self.blobs.len() >= self.request.indices.len() { - // All expected chunks received, return result early - self.resolved = true; - Ok(Some(std::mem::take(&mut self.blobs))) - } else { - Ok(None) - } - } - - pub fn terminate(self) -> Result<(), LookupVerifyError> { - if self.resolved { - Ok(()) - } else { - Err(LookupVerifyError::NotEnoughResponsesReturned { - expected: self.request.indices.len(), - actual: self.blobs.len(), - }) - } - } - - /// Mark request as resolved (= has returned something downstream) while marking this status as - /// true for future calls. - pub fn resolve(&mut self) -> bool { - std::mem::replace(&mut self.resolved, true) - } -} diff --git a/beacon_node/network/src/sync/network_context/requests/blobs_by_root.rs b/beacon_node/network/src/sync/network_context/requests/blobs_by_root.rs new file mode 100644 index 00000000000..cb2b1a42ec4 --- /dev/null +++ b/beacon_node/network/src/sync/network_context/requests/blobs_by_root.rs @@ -0,0 +1,96 @@ +use lighthouse_network::{rpc::methods::BlobsByRootRequest, PeerId}; +use std::sync::Arc; +use types::{blob_sidecar::BlobIdentifier, BlobSidecar, ChainSpec, EthSpec, Hash256}; + +use super::LookupVerifyError; + +#[derive(Debug, Clone)] +pub struct BlobsByRootSingleBlockRequest { + pub block_root: Hash256, + pub indices: Vec, +} + +impl BlobsByRootSingleBlockRequest { + pub fn into_request(self, spec: &ChainSpec) -> BlobsByRootRequest { + BlobsByRootRequest::new( + self.indices + .into_iter() + .map(|index| BlobIdentifier { + block_root: self.block_root, + index, + }) + .collect(), + spec, + ) + } +} + +pub struct ActiveBlobsByRootRequest { + request: BlobsByRootSingleBlockRequest, + blobs: Vec>>, + resolved: bool, + pub(crate) peer_id: PeerId, +} + +impl ActiveBlobsByRootRequest { + pub fn new(request: BlobsByRootSingleBlockRequest, peer_id: PeerId) -> Self { + Self { + request, + blobs: vec![], + resolved: false, + peer_id, + } + } + + /// Appends a chunk to this multi-item request. If all expected chunks are received, this + /// method returns `Some`, resolving the request before the stream terminator. + /// The active request SHOULD be dropped after `add_response` returns an error + pub fn add_response( + &mut self, + blob: Arc>, + ) -> Result>>>, LookupVerifyError> { + if self.resolved { + return Err(LookupVerifyError::TooManyResponses); + } + + let block_root = blob.block_root(); + if self.request.block_root != block_root { + return Err(LookupVerifyError::UnrequestedBlockRoot(block_root)); + } + if !blob.verify_blob_sidecar_inclusion_proof() { + return Err(LookupVerifyError::InvalidInclusionProof); + } + if !self.request.indices.contains(&blob.index) { + return Err(LookupVerifyError::UnrequestedIndex(blob.index)); + } + if self.blobs.iter().any(|b| b.index == blob.index) { + return Err(LookupVerifyError::DuplicateData); + } + + self.blobs.push(blob); + if self.blobs.len() >= self.request.indices.len() { + // All expected chunks received, return result early + self.resolved = true; + Ok(Some(std::mem::take(&mut self.blobs))) + } else { + Ok(None) + } + } + + pub fn terminate(self) -> Result<(), LookupVerifyError> { + if self.resolved { + Ok(()) + } else { + Err(LookupVerifyError::NotEnoughResponsesReturned { + expected: self.request.indices.len(), + actual: self.blobs.len(), + }) + } + } + + /// Mark request as resolved (= has returned something downstream) while marking this status as + /// true for future calls. + pub fn resolve(&mut self) -> bool { + std::mem::replace(&mut self.resolved, true) + } +} diff --git a/beacon_node/network/src/sync/network_context/requests/blocks_by_root.rs b/beacon_node/network/src/sync/network_context/requests/blocks_by_root.rs new file mode 100644 index 00000000000..a15d4e39353 --- /dev/null +++ b/beacon_node/network/src/sync/network_context/requests/blocks_by_root.rs @@ -0,0 +1,60 @@ +use beacon_chain::get_block_root; +use lighthouse_network::{rpc::BlocksByRootRequest, PeerId}; +use std::sync::Arc; +use types::{ChainSpec, EthSpec, Hash256, SignedBeaconBlock}; + +use super::LookupVerifyError; + +#[derive(Debug, Copy, Clone)] +pub struct BlocksByRootSingleRequest(pub Hash256); + +impl BlocksByRootSingleRequest { + pub fn into_request(self, spec: &ChainSpec) -> BlocksByRootRequest { + BlocksByRootRequest::new(vec![self.0], spec) + } +} + +pub struct ActiveBlocksByRootRequest { + request: BlocksByRootSingleRequest, + resolved: bool, + pub(crate) peer_id: PeerId, +} + +impl ActiveBlocksByRootRequest { + pub fn new(request: BlocksByRootSingleRequest, peer_id: PeerId) -> Self { + Self { + request, + resolved: false, + peer_id, + } + } + + /// Append a response to the single chunk request. If the chunk is valid, the request is + /// resolved immediately. + /// The active request SHOULD be dropped after `add_response` returns an error + pub fn add_response( + &mut self, + block: Arc>, + ) -> Result>, LookupVerifyError> { + if self.resolved { + return Err(LookupVerifyError::TooManyResponses); + } + + let block_root = get_block_root(&block); + if self.request.0 != block_root { + return Err(LookupVerifyError::UnrequestedBlockRoot(block_root)); + } + + // Valid data, blocks by root expects a single response + self.resolved = true; + Ok(block) + } + + pub fn terminate(self) -> Result<(), LookupVerifyError> { + if self.resolved { + Ok(()) + } else { + Err(LookupVerifyError::NoResponseReturned) + } + } +} diff --git a/beacon_node/network/src/sync/range_sync/range.rs b/beacon_node/network/src/sync/range_sync/range.rs index beb04fac28b..28dea8e4b5e 100644 --- a/beacon_node/network/src/sync/range_sync/range.rs +++ b/beacon_node/network/src/sync/range_sync/range.rs @@ -652,7 +652,10 @@ mod tests { fn expect_empty_processor(&mut self) { match self.beacon_processor_rx.try_recv() { Ok(work) => { - panic!("Expected empty processor. Instead got {}", work.work_type()); + panic!( + "Expected empty processor. Instead got {}", + work.work_type_str() + ); } Err(e) => match e { mpsc::error::TryRecvError::Empty => {} @@ -665,7 +668,7 @@ mod tests { fn expect_chain_segment(&mut self) { match self.beacon_processor_rx.try_recv() { Ok(work) => { - assert_eq!(work.work_type(), beacon_processor::CHAIN_SEGMENT); + assert_eq!(work.work_type(), beacon_processor::WorkType::ChainSegment); } other => panic!("Expected chain segment process, found {:?}", other), } diff --git a/beacon_node/store/Cargo.toml b/beacon_node/store/Cargo.toml index b26eb2bb91b..cdb18b3b9cb 100644 --- a/beacon_node/store/Cargo.toml +++ b/beacon_node/store/Cargo.toml @@ -16,6 +16,7 @@ itertools = { workspace = true } ethereum_ssz = { workspace = true } ethereum_ssz_derive = { workspace = true } types = { workspace = true } +safe_arith = { workspace = true } state_processing = { workspace = true } slog = { workspace = true } serde = { workspace = true } diff --git a/beacon_node/store/src/errors.rs b/beacon_node/store/src/errors.rs index e3b2d327b0a..c543a9c4e4a 100644 --- a/beacon_node/store/src/errors.rs +++ b/beacon_node/store/src/errors.rs @@ -59,6 +59,7 @@ pub enum Error { state_root: Hash256, slot: Slot, }, + ArithError(safe_arith::ArithError), } pub trait HandleUnavailable { @@ -129,6 +130,12 @@ impl From for Error { } } +impl From for Error { + fn from(e: safe_arith::ArithError) -> Error { + Error::ArithError(e) + } +} + #[derive(Debug)] pub struct DBError { pub message: String, diff --git a/beacon_node/store/src/hot_cold_store.rs b/beacon_node/store/src/hot_cold_store.rs index a53b697ea77..bd87cdcfee6 100644 --- a/beacon_node/store/src/hot_cold_store.rs +++ b/beacon_node/store/src/hot_cold_store.rs @@ -27,6 +27,7 @@ use itertools::process_results; use leveldb::iterator::LevelDBIterator; use lru::LruCache; use parking_lot::{Mutex, RwLock}; +use safe_arith::SafeArith; use serde::{Deserialize, Serialize}; use slog::{debug, error, info, trace, warn, Logger}; use ssz::{Decode, Encode}; @@ -36,13 +37,14 @@ use state_processing::{ SlotProcessingError, }; use std::cmp::min; -use std::collections::HashMap; +use std::collections::{HashMap, HashSet}; use std::marker::PhantomData; use std::num::NonZeroUsize; use std::path::Path; use std::sync::Arc; use std::time::Duration; use types::data_column_sidecar::{ColumnIndex, DataColumnSidecar, DataColumnSidecarList}; +use types::light_client_update::CurrentSyncCommitteeProofLen; use types::*; /// On-disk database that stores finalized states efficiently. @@ -634,6 +636,143 @@ impl, Cold: ItemStore> HotColdDB .map(|payload| payload.is_some()) } + /// Get the sync committee branch for the given block root + /// Note: we only persist sync committee branches for checkpoint slots + pub fn get_sync_committee_branch( + &self, + block_root: &Hash256, + ) -> Result>, Error> { + let column = DBColumn::SyncCommitteeBranch; + + if let Some(bytes) = self + .hot_db + .get_bytes(column.into(), &block_root.as_ssz_bytes())? + { + let sync_committee_branch: FixedVector = + FixedVector::from_ssz_bytes(&bytes)?; + return Ok(Some(sync_committee_branch)); + } + + Ok(None) + } + + /// Fetch sync committee by sync committee period + pub fn get_sync_committee( + &self, + sync_committee_period: u64, + ) -> Result>, Error> { + let column = DBColumn::SyncCommittee; + + if let Some(bytes) = self + .hot_db + .get_bytes(column.into(), &sync_committee_period.as_ssz_bytes())? + { + let sync_committee: SyncCommittee = SyncCommittee::from_ssz_bytes(&bytes)?; + return Ok(Some(sync_committee)); + } + + Ok(None) + } + + pub fn store_sync_committee_branch( + &self, + block_root: Hash256, + sync_committee_branch: &FixedVector, + ) -> Result<(), Error> { + let column = DBColumn::SyncCommitteeBranch; + self.hot_db.put_bytes( + column.into(), + &block_root.as_ssz_bytes(), + &sync_committee_branch.as_ssz_bytes(), + )?; + Ok(()) + } + + pub fn store_sync_committee( + &self, + sync_committee_period: u64, + sync_committee: &SyncCommittee, + ) -> Result<(), Error> { + let column = DBColumn::SyncCommittee; + self.hot_db.put_bytes( + column.into(), + &sync_committee_period.to_le_bytes(), + &sync_committee.as_ssz_bytes(), + )?; + + Ok(()) + } + + pub fn get_light_client_update( + &self, + sync_committee_period: u64, + ) -> Result>, Error> { + let column = DBColumn::LightClientUpdate; + let res = self + .hot_db + .get_bytes(column.into(), &sync_committee_period.to_le_bytes())?; + + if let Some(light_client_update_bytes) = res { + let epoch = sync_committee_period + .safe_mul(self.spec.epochs_per_sync_committee_period.into())?; + + let fork_name = self.spec.fork_name_at_epoch(epoch.into()); + + let light_client_update = + LightClientUpdate::from_ssz_bytes(&light_client_update_bytes, &fork_name)?; + + return Ok(Some(light_client_update)); + } + + Ok(None) + } + + pub fn get_light_client_updates( + &self, + start_period: u64, + count: u64, + ) -> Result>, Error> { + let column = DBColumn::LightClientUpdate; + let mut light_client_updates = vec![]; + for res in self + .hot_db + .iter_column_from::>(column, &start_period.to_le_bytes()) + { + let (sync_committee_bytes, light_client_update_bytes) = res?; + let sync_committee_period = u64::from_ssz_bytes(&sync_committee_bytes)?; + let epoch = sync_committee_period + .safe_mul(self.spec.epochs_per_sync_committee_period.into())?; + + let fork_name = self.spec.fork_name_at_epoch(epoch.into()); + + let light_client_update = + LightClientUpdate::from_ssz_bytes(&light_client_update_bytes, &fork_name)?; + + light_client_updates.push(light_client_update); + + if sync_committee_period >= start_period + count { + break; + } + } + Ok(light_client_updates) + } + + pub fn store_light_client_update( + &self, + sync_committee_period: u64, + light_client_update: &LightClientUpdate, + ) -> Result<(), Error> { + let column = DBColumn::LightClientUpdate; + + self.hot_db.put_bytes( + column.into(), + &sync_committee_period.to_le_bytes(), + &light_client_update.as_ssz_bytes(), + )?; + + Ok(()) + } + /// Check if the blobs for a block exists on disk. pub fn blobs_exist(&self, block_root: &Hash256) -> Result { self.blobs_db @@ -1037,6 +1176,14 @@ impl, Cold: ItemStore> HotColdDB key_value_batch.push(KeyValueStoreOp::DeleteKey(key)); } + StoreOp::DeleteSyncCommitteeBranch(block_root) => { + let key = get_key_for_col( + DBColumn::SyncCommitteeBranch.into(), + block_root.as_slice(), + ); + key_value_batch.push(KeyValueStoreOp::DeleteKey(key)); + } + StoreOp::KeyValueOp(kv_op) => { key_value_batch.push(kv_op); } @@ -1182,6 +1329,8 @@ impl, Cold: ItemStore> HotColdDB StoreOp::DeleteExecutionPayload(_) => (), + StoreOp::DeleteSyncCommitteeBranch(_) => (), + StoreOp::KeyValueOp(_) => (), } } @@ -2816,12 +2965,16 @@ pub fn migrate_database, Cold: ItemStore>( .into()); } + // finalized_state.slot() must be at an epoch boundary + // else we may introduce bugs to the migration/pruning logic if finalized_state.slot() % E::slots_per_epoch() != 0 { return Err(HotColdDBError::FreezeSlotUnaligned(finalized_state.slot()).into()); } let mut hot_db_ops = vec![]; let mut cold_db_ops = vec![]; + let mut epoch_boundary_blocks = HashSet::new(); + let mut non_checkpoint_block_roots = HashSet::new(); // Chunk writer for the linear block roots in the freezer DB. // Start at the new upper limit because we iterate backwards. @@ -2849,6 +3002,22 @@ pub fn migrate_database, Cold: ItemStore>( hot_db_ops.push(StoreOp::DeleteExecutionPayload(block_root)); } + // At a missed slot, `state_root_iter` will return the block root + // from the previous non-missed slot. This ensures that the block root at an + // epoch boundary is always a checkpoint block root. We keep track of block roots + // at epoch boundaries by storing them in the `epoch_boundary_blocks` hash set. + // We then ensure that block roots at the epoch boundary aren't included in the + // `non_checkpoint_block_roots` hash set. + if slot % E::slots_per_epoch() == 0 { + epoch_boundary_blocks.insert(block_root); + } else { + non_checkpoint_block_roots.insert(block_root); + } + + if epoch_boundary_blocks.contains(&block_root) { + non_checkpoint_block_roots.remove(&block_root); + } + // Delete the old summary, and the full state if we lie on an epoch boundary. hot_db_ops.push(StoreOp::DeleteState(state_root, Some(slot))); @@ -2888,6 +3057,19 @@ pub fn migrate_database, Cold: ItemStore>( } } + // Prune sync committee branch data for all non checkpoint block roots. + // Note that `non_checkpoint_block_roots` should only contain non checkpoint block roots + // as long as `finalized_state.slot()` is at an epoch boundary. If this were not the case + // we risk the chance of pruning a `sync_committee_branch` for a checkpoint block root. + // E.g. if `current_split_slot` = (Epoch A slot 0) and `finalized_state.slot()` = (Epoch C slot 31) + // and (Epoch D slot 0) is a skipped slot, we will have pruned a `sync_committee_branch` + // for a checkpoint block root. + non_checkpoint_block_roots + .into_iter() + .for_each(|block_root| { + hot_db_ops.push(StoreOp::DeleteSyncCommitteeBranch(block_root)); + }); + // Finish writing the block roots and commit the remaining cold DB ops. block_root_writer.write(&mut cold_db_ops)?; store.cold_db.do_atomically(cold_db_ops)?; @@ -2904,7 +3086,6 @@ pub fn migrate_database, Cold: ItemStore>( // Flush to disk all the states that have just been migrated to the cold store. store.cold_db.sync()?; - { let mut split_guard = store.split.write(); let latest_split_slot = split_guard.slot; diff --git a/beacon_node/store/src/lib.rs b/beacon_node/store/src/lib.rs index 60dddeb1760..1d02bfbb3cc 100644 --- a/beacon_node/store/src/lib.rs +++ b/beacon_node/store/src/lib.rs @@ -241,6 +241,7 @@ pub enum StoreOp<'a, E: EthSpec> { DeleteDataColumns(Hash256, Vec), DeleteState(Hash256, Option), DeleteExecutionPayload(Hash256), + DeleteSyncCommitteeBranch(Hash256), KeyValueOp(KeyValueStoreOp), } @@ -303,6 +304,12 @@ pub enum DBColumn { /// For persisting eagerly computed light client data #[strum(serialize = "lcu")] LightClientUpdate, + /// For helping persist eagerly computed light client bootstrap data + #[strum(serialize = "scb")] + SyncCommitteeBranch, + /// For helping persist eagerly computed light client bootstrap data + #[strum(serialize = "scm")] + SyncCommittee, } /// A block from the database, which might have an execution payload or not. @@ -346,6 +353,8 @@ impl DBColumn { | Self::BeaconHistoricalRoots | Self::BeaconHistoricalSummaries | Self::BeaconRandaoMixes + | Self::SyncCommittee + | Self::SyncCommitteeBranch | Self::LightClientUpdate => 8, Self::BeaconDataColumn => DATA_COLUMN_DB_KEY_SIZE, } diff --git a/boot_node/src/server.rs b/boot_node/src/server.rs index 286fa9e0f0f..00738462e0d 100644 --- a/boot_node/src/server.rs +++ b/boot_node/src/server.rs @@ -145,16 +145,13 @@ pub async fn run( Some(event) = event_stream.recv() => { match event { discv5::Event::Discovered(_enr) => { - // An ENR has bee obtained by the server + // An ENR has been obtained by the server // Ignore these events here } - discv5::Event::EnrAdded { .. } => {} // Ignore - discv5::Event::TalkRequest(_) => {} // Ignore - discv5::Event::NodeInserted { .. } => {} // Ignore discv5::Event::SocketUpdated(socket_addr) => { info!(log, "Advertised socket address updated"; "socket_addr" => %socket_addr); } - discv5::Event::SessionEstablished{ .. } => {} // Ignore + _ => {} // Ignore } } } diff --git a/common/lighthouse_metrics/src/lib.rs b/common/lighthouse_metrics/src/lib.rs index f52913dd001..2a1e99defaf 100644 --- a/common/lighthouse_metrics/src/lib.rs +++ b/common/lighthouse_metrics/src/lib.rs @@ -283,6 +283,12 @@ pub fn stop_timer(timer: Option) { } } +pub fn observe_vec(vec: &Result, name: &[&str], value: f64) { + if let Some(h) = get_histogram(vec, name) { + h.observe(value) + } +} + pub fn inc_counter(counter: &Result) { if let Ok(counter) = counter { counter.inc(); diff --git a/consensus/state_processing/src/per_epoch_processing/base/rewards_and_penalties.rs b/consensus/state_processing/src/per_epoch_processing/base/rewards_and_penalties.rs index ecea0b554e0..a316c55bef3 100644 --- a/consensus/state_processing/src/per_epoch_processing/base/rewards_and_penalties.rs +++ b/consensus/state_processing/src/per_epoch_processing/base/rewards_and_penalties.rs @@ -45,6 +45,12 @@ impl AttestationDelta { } } +#[derive(Debug)] +pub enum ProposerRewardCalculation { + Include, + Exclude, +} + /// Apply attester and proposer rewards. pub fn process_rewards_and_penalties( state: &mut BeaconState, @@ -62,7 +68,12 @@ pub fn process_rewards_and_penalties( return Err(Error::ValidatorStatusesInconsistent); } - let deltas = get_attestation_deltas_all(state, validator_statuses, spec)?; + let deltas = get_attestation_deltas_all( + state, + validator_statuses, + ProposerRewardCalculation::Include, + spec, + )?; // Apply the deltas, erroring on overflow above but not on overflow below (saturating at 0 // instead). @@ -79,9 +90,10 @@ pub fn process_rewards_and_penalties( pub fn get_attestation_deltas_all( state: &BeaconState, validator_statuses: &ValidatorStatuses, + proposer_reward: ProposerRewardCalculation, spec: &ChainSpec, ) -> Result, Error> { - get_attestation_deltas(state, validator_statuses, None, spec) + get_attestation_deltas(state, validator_statuses, proposer_reward, None, spec) } /// Apply rewards for participation in attestations during the previous epoch, and only compute @@ -89,10 +101,18 @@ pub fn get_attestation_deltas_all( pub fn get_attestation_deltas_subset( state: &BeaconState, validator_statuses: &ValidatorStatuses, + proposer_reward: ProposerRewardCalculation, validators_subset: &Vec, spec: &ChainSpec, ) -> Result, Error> { - get_attestation_deltas(state, validator_statuses, Some(validators_subset), spec).map(|deltas| { + get_attestation_deltas( + state, + validator_statuses, + proposer_reward, + Some(validators_subset), + spec, + ) + .map(|deltas| { deltas .into_iter() .enumerate() @@ -109,6 +129,7 @@ pub fn get_attestation_deltas_subset( fn get_attestation_deltas( state: &BeaconState, validator_statuses: &ValidatorStatuses, + proposer_reward: ProposerRewardCalculation, maybe_validators_subset: Option<&Vec>, spec: &ChainSpec, ) -> Result, Error> { @@ -169,13 +190,15 @@ fn get_attestation_deltas( .combine(inactivity_penalty_delta)?; } - if let Some((proposer_index, proposer_delta)) = proposer_delta { - if include_validator_delta(proposer_index) { - deltas - .get_mut(proposer_index) - .ok_or(Error::ValidatorStatusesInconsistent)? - .inclusion_delay_delta - .combine(proposer_delta)?; + if let ProposerRewardCalculation::Include = proposer_reward { + if let Some((proposer_index, proposer_delta)) = proposer_delta { + if include_validator_delta(proposer_index) { + deltas + .get_mut(proposer_index) + .ok_or(Error::ValidatorStatusesInconsistent)? + .inclusion_delay_delta + .combine(proposer_delta)?; + } } } } diff --git a/consensus/types/src/lib.rs b/consensus/types/src/lib.rs index afc64e86a82..281a84d8592 100644 --- a/consensus/types/src/lib.rs +++ b/consensus/types/src/lib.rs @@ -197,7 +197,7 @@ pub use crate::light_client_optimistic_update::{ LightClientOptimisticUpdateElectra, }; pub use crate::light_client_update::{ - Error as LightClientError, LightClientUpdate, LightClientUpdateAltair, + Error as LightClientUpdateError, LightClientUpdate, LightClientUpdateAltair, LightClientUpdateCapella, LightClientUpdateDeneb, LightClientUpdateElectra, }; pub use crate::participation_flags::ParticipationFlags; diff --git a/consensus/types/src/light_client_bootstrap.rs b/consensus/types/src/light_client_bootstrap.rs index f06a94adce9..7c716e6bb2d 100644 --- a/consensus/types/src/light_client_bootstrap.rs +++ b/consensus/types/src/light_client_bootstrap.rs @@ -112,6 +112,42 @@ impl LightClientBootstrap { fixed_len + LightClientHeader::::ssz_max_var_len_for_fork(fork_name) } + pub fn new( + block: &SignedBlindedBeaconBlock, + current_sync_committee: Arc>, + current_sync_committee_branch: FixedVector, + chain_spec: &ChainSpec, + ) -> Result { + let light_client_bootstrap = match block + .fork_name(chain_spec) + .map_err(|_| Error::InconsistentFork)? + { + ForkName::Base => return Err(Error::AltairForkNotActive), + ForkName::Altair | ForkName::Bellatrix => Self::Altair(LightClientBootstrapAltair { + header: LightClientHeaderAltair::block_to_light_client_header(block)?, + current_sync_committee, + current_sync_committee_branch, + }), + ForkName::Capella => Self::Capella(LightClientBootstrapCapella { + header: LightClientHeaderCapella::block_to_light_client_header(block)?, + current_sync_committee, + current_sync_committee_branch, + }), + ForkName::Deneb => Self::Deneb(LightClientBootstrapDeneb { + header: LightClientHeaderDeneb::block_to_light_client_header(block)?, + current_sync_committee, + current_sync_committee_branch, + }), + ForkName::Electra => Self::Electra(LightClientBootstrapElectra { + header: LightClientHeaderElectra::block_to_light_client_header(block)?, + current_sync_committee, + current_sync_committee_branch, + }), + }; + + Ok(light_client_bootstrap) + } + pub fn from_beacon_state( beacon_state: &mut BeaconState, block: &SignedBlindedBeaconBlock, diff --git a/testing/ef_tests/src/cases/rewards.rs b/testing/ef_tests/src/cases/rewards.rs index ea75c69c35f..c5879f5c9cc 100644 --- a/testing/ef_tests/src/cases/rewards.rs +++ b/testing/ef_tests/src/cases/rewards.rs @@ -5,6 +5,7 @@ use compare_fields_derive::CompareFields; use serde::Deserialize; use ssz::four_byte_option_impl; use ssz_derive::{Decode, Encode}; +use state_processing::per_epoch_processing::base::rewards_and_penalties::ProposerRewardCalculation; use state_processing::{ per_epoch_processing::{ altair, @@ -130,6 +131,7 @@ impl Case for RewardsTest { let deltas = base::rewards_and_penalties::get_attestation_deltas_all( &state, &validator_statuses, + ProposerRewardCalculation::Include, spec, )?;