diff --git a/.clippy.toml b/.clippy.toml index 6c16172bd..a670f5023 100644 --- a/.clippy.toml +++ b/.clippy.toml @@ -1,2 +1,14 @@ allow-print-in-tests = true -allow-dbg-in-tests = true \ No newline at end of file +allow-dbg-in-tests = true + +# https://rust-lang.github.io/rust-clippy/master/index.html#disallowed_methods +disallowed-methods = [ + { path = "std::fs::File::create", reason = "use tycho_storage::FileDb instead" }, + { path = "std::fs::File::create_new", reason = "use tycho_storage::FileDb instead" }, + { path = "std::fs::File::open", reason = "use tycho_storage::FileDb instead" }, + { path = "std::fs::File::options", reason = "use tycho_storage::FileDb instead" }, +] + +disallowed-types = [ + { path = "std::fs::OpenOptions", reason = "use tycho_storage::FileDb instead" }, +] \ No newline at end of file diff --git a/.gitignore b/.gitignore index 638826373..3866526a4 100644 --- a/.gitignore +++ b/.gitignore @@ -14,3 +14,11 @@ perf.data* zerostate.json zerostate.boc + +config.json +global-config.json +keys.json +logger.json + +db/ +.temp/ diff --git a/Cargo.lock b/Cargo.lock index 9da4679ac..c49328228 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -92,6 +92,9 @@ name = "anyhow" version = "1.0.82" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f538837af36e6f6a9be0faa67f9a314f8119e4e4b5867c6ab40ed60360142519" +dependencies = [ + "backtrace", +] [[package]] name = "arc-swap" @@ -338,7 +341,7 @@ dependencies = [ "anstream", "anstyle", "clap_lex", - "strsim", + "strsim 0.11.1", ] [[package]] @@ -347,7 +350,7 @@ version = "4.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "528131438037fd55894f62d6e9f068b8f45ac57ffa77517819645d10aed04f64" dependencies = [ - "heck", + "heck 0.5.0", "proc-macro2", "quote", "syn 2.0.60", @@ -481,6 +484,41 @@ dependencies = [ "syn 2.0.60", ] +[[package]] +name = "darling" +version = "0.10.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0d706e75d87e35569db781a9b5e2416cff1236a47ed380831f959382ccd5f858" +dependencies = [ + "darling_core", + "darling_macro", +] + +[[package]] +name = "darling_core" +version = "0.10.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f0c960ae2da4de88a91b2d920c2a7233b400bc33cb28453a2987822d8392519b" +dependencies = [ + "fnv", + "ident_case", + "proc-macro2", + "quote", + "strsim 0.9.3", + "syn 1.0.109", +] + +[[package]] +name = "darling_macro" +version = "0.10.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d9b5a2f4ac4969822c62224815d069952656cadc7084fdca9751e6d959189b72" +dependencies = [ + "darling_core", + "quote", + "syn 1.0.109", +] + [[package]] name = "dashmap" version = "5.5.3" @@ -533,6 +571,31 @@ dependencies = [ "powerfmt", ] +[[package]] +name = "derive_builder" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a2658621297f2cf68762a6f7dc0bb7e1ff2cfd6583daef8ee0fed6f7ec468ec0" +dependencies = [ + "darling", + "derive_builder_core", + "proc-macro2", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "derive_builder_core" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2791ea3e372c8495c0bc2033991d76b512cd799d07491fbd6890124db9458bef" +dependencies = [ + "darling", + "proc-macro2", + "quote", + "syn 1.0.109", +] + [[package]] name = "digest" version = "0.10.7" @@ -554,6 +617,18 @@ dependencies = [ "syn 2.0.60", ] +[[package]] +name = "dns-lookup" +version = "1.0.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "53ecafc952c4528d9b51a458d1a8904b81783feff9fde08ab6ed2545ff396872" +dependencies = [ + "cfg-if", + "libc", + "socket2 0.4.10", + "winapi", +] + [[package]] name = "ed25519" version = "2.2.3" @@ -570,6 +645,24 @@ version = "1.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a47c1c47d2f5964e29c61246e81db715514cd532db6b5116a25ea3c03d6780a2" +[[package]] +name = "endian-type" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c34f04666d835ff5d62e058c3995147c06f42fe86ff053337632bca83e42702d" + +[[package]] +name = "enum-as-inner" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "570d109b813e904becc80d8d5da38376818a143348413f7149f1340fe04754d4" +dependencies = [ + "heck 0.4.1", + "proc-macro2", + "quote", + "syn 1.0.109", +] + [[package]] name = "equivalent" version = "1.0.1" @@ -604,7 +697,7 @@ dependencies = [ [[package]] name = "everscale-types" version = "0.1.0-rc.6" -source = "git+https://github.com/broxus/everscale-types.git?branch=tycho#640ed863dd20e38964798ec7d9ae2bada5b2b20a" +source = "git+https://github.com/broxus/everscale-types.git?branch=tycho#98544f3ef1fde79846d1d9312048241e70374853" dependencies = [ "ahash", "base64 0.21.7", @@ -624,7 +717,7 @@ dependencies = [ [[package]] name = "everscale-types-proc" version = "0.1.4" -source = "git+https://github.com/broxus/everscale-types.git?branch=tycho#640ed863dd20e38964798ec7d9ae2bada5b2b20a" +source = "git+https://github.com/broxus/everscale-types.git?branch=tycho#98544f3ef1fde79846d1d9312048241e70374853" dependencies = [ "proc-macro2", "quote", @@ -668,12 +761,69 @@ version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0ce7134b9999ecaf8bcd65542e436736ef32ddca1b3e06094cb6ec5755203b80" +[[package]] +name = "fnv" +version = "1.0.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" + +[[package]] +name = "form_urlencoded" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e13624c2627564efccf4934284bdd98cbaa14e79b0b5a141218e507b3a823456" +dependencies = [ + "percent-encoding", +] + +[[package]] +name = "futures" +version = "0.3.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "645c6916888f6cb6350d2550b80fb63e734897a8498abe35cfb732b6487804b0" +dependencies = [ + "futures-channel", + "futures-core", + "futures-executor", + "futures-io", + "futures-sink", + "futures-task", + "futures-util", +] + +[[package]] +name = "futures-channel" +version = "0.3.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eac8f7d7865dcb88bd4373ab671c8cf4508703796caa2b1985a9ca867b3fcb78" +dependencies = [ + "futures-core", + "futures-sink", +] + [[package]] name = "futures-core" version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dfc6580bb841c5a68e9ef15c77ccc837b40a7504914d52e47b8b0e9bbda25a1d" +[[package]] +name = "futures-executor" +version = "0.3.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a576fc72ae164fca6b9db127eaa9a9dda0d61316034f33a0a0d4eda41f02b01d" +dependencies = [ + "futures-core", + "futures-task", + "futures-util", +] + +[[package]] +name = "futures-io" +version = "0.3.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a44623e20b9681a318efdd71c299b6b222ed6f231972bfe2f224ebad6311f0c1" + [[package]] name = "futures-macro" version = "0.3.30" @@ -703,10 +853,13 @@ version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3d6401deb83407ab3da39eba7e33987a73c3df0c82b4bb5813ee871c19c41d48" dependencies = [ + "futures-channel", "futures-core", + "futures-io", "futures-macro", "futures-sink", "futures-task", + "memchr", "pin-project-lite", "pin-utils", "slab", @@ -751,6 +904,12 @@ version = "0.14.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "290f1a1d9242c78d09ce40a5e87e7554ee637af1351968159f4952f028f75604" +[[package]] +name = "heck" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8" + [[package]] name = "heck" version = "0.5.0" @@ -769,12 +928,110 @@ version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" +[[package]] +name = "http" +version = "0.2.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "601cbb57e577e2f5ef5be8e7b83f0f63994f25aa94d673e54a92d5c516d101f1" +dependencies = [ + "bytes", + "fnv", + "itoa", +] + +[[package]] +name = "http-body" +version = "0.4.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7ceab25649e9960c0311ea418d17bee82c0dcec1bd053b5f9a66e265a693bed2" +dependencies = [ + "bytes", + "http", + "pin-project-lite", +] + +[[package]] +name = "httparse" +version = "1.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d897f394bad6a705d5f4104762e116a75639e470d80901eed05a860a95cb1904" + +[[package]] +name = "httpdate" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9" + [[package]] name = "humantime" version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" +[[package]] +name = "hyper" +version = "0.14.28" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bf96e135eb83a2a8ddf766e426a841d8ddd7449d5f00d34ea02b41d2f19eef80" +dependencies = [ + "bytes", + "futures-channel", + "futures-core", + "futures-util", + "http", + "http-body", + "httparse", + "httpdate", + "itoa", + "pin-project-lite", + "socket2 0.5.6", + "tokio", + "tower-service", + "tracing", + "want", +] + +[[package]] +name = "hyper-system-resolver" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6eea26c5d0b6ab9d72219f65000af310f042a740926f7b2fa3553e774036e2e7" +dependencies = [ + "derive_builder", + "dns-lookup", + "hyper", + "tokio", + "tower-service", + "tracing", +] + +[[package]] +name = "ident_case" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b9e0384b61958566e926dc50660321d12159025e767c18e043daf26b70104c39" + +[[package]] +name = "idna" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "418a0a6fab821475f634efe3ccc45c013f742efe03d853e8d3355d5cb850ecf8" +dependencies = [ + "matches", + "unicode-bidi", + "unicode-normalization", +] + +[[package]] +name = "idna" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "634d9b1461af396cad843f47fdba5597a4f9e6ddd4bfb6ff5d85028c25cb12f6" +dependencies = [ + "unicode-bidi", + "unicode-normalization", +] + [[package]] name = "indexmap" version = "2.2.6" @@ -785,6 +1042,12 @@ dependencies = [ "hashbrown", ] +[[package]] +name = "ipnet" +version = "2.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f518f335dce6725a761382244631d86cf0ccb2863413590b31338feb467f9c3" + [[package]] name = "itertools" version = "0.12.1" @@ -915,6 +1178,12 @@ dependencies = [ "regex-automata 0.1.10", ] +[[package]] +name = "matches" +version = "0.1.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2532096657941c2fea9c289d370a250971c689d4f143798ff67113ec042024a5" + [[package]] name = "memchr" version = "2.7.2" @@ -977,6 +1246,15 @@ dependencies = [ "uuid", ] +[[package]] +name = "nibble_vec" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "77a5d83df9f36fe23f0c3648c6bbb8b0298bb5f1939c8f2704431371f4b84d43" +dependencies = [ + "smallvec", +] + [[package]] name = "nom" version = "7.1.3" @@ -1123,6 +1401,12 @@ dependencies = [ "serde", ] +[[package]] +name = "percent-encoding" +version = "2.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" + [[package]] name = "pest" version = "2.7.9" @@ -1178,6 +1462,26 @@ dependencies = [ "indexmap", ] +[[package]] +name = "pin-project" +version = "1.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6bf43b791c5b9e34c3d182969b4abb522f9343702850a2e57f460d00d09b4b3" +dependencies = [ + "pin-project-internal", +] + +[[package]] +name = "pin-project-internal" +version = "1.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2f38a4412a78282e09a2cf38d195ea5420d15ba0602cb375210efbc877243965" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.60", +] + [[package]] name = "pin-project-lite" version = "0.2.14" @@ -1249,6 +1553,27 @@ dependencies = [ "unicode-ident", ] +[[package]] +name = "public-ip" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b4c40db5262d93298c363a299f8bc1b3a956a78eecddba3bc0e58b76e2f419a" +dependencies = [ + "dns-lookup", + "futures-core", + "futures-util", + "http", + "hyper", + "hyper-system-resolver", + "pin-project-lite", + "thiserror", + "tokio", + "tracing", + "tracing-futures", + "trust-dns-client", + "trust-dns-proto", +] + [[package]] name = "quanta" version = "0.12.3" @@ -1318,7 +1643,7 @@ checksum = "055b4e778e8feb9f93c4e439f71dc2156ef13360b432b799e179a8c4cdf0b1d7" dependencies = [ "bytes", "libc", - "socket2", + "socket2 0.5.6", "tracing", "windows-sys 0.48.0", ] @@ -1332,6 +1657,16 @@ dependencies = [ "proc-macro2", ] +[[package]] +name = "radix_trie" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c069c179fcdc6a2fe24d8d18305cf085fdbd4f922c041943e203685d6a1c58fd" +dependencies = [ + "endian-type", + "nibble_vec", +] + [[package]] name = "rand" version = "0.8.5" @@ -1714,6 +2049,16 @@ version = "1.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3c5e1a9a646d36c3599cd173a41282daf47c44583ad367b8e6837255952e5c67" +[[package]] +name = "socket2" +version = "0.4.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9f7916fc008ca5542385b89a3d3ce689953c143e9304a9bf8beec1de48994c0d" +dependencies = [ + "libc", + "winapi", +] + [[package]] name = "socket2" version = "0.5.6" @@ -1752,6 +2097,12 @@ version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a8f112729512f8e442d81f95a8a7ddf2b7c6b8a1a6f509a95864142b30cab2d3" +[[package]] +name = "strsim" +version = "0.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6446ced80d6c486436db5c078dde11a9f73d42b57fb273121e160b84f63d894c" + [[package]] name = "strsim" version = "0.11.1" @@ -1991,7 +2342,7 @@ dependencies = [ "parking_lot", "pin-project-lite", "signal-hook-registry", - "socket2", + "socket2 0.5.6", "tokio-macros", "windows-sys 0.48.0", ] @@ -2021,6 +2372,12 @@ dependencies = [ "tracing", ] +[[package]] +name = "tower-service" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6bc1c9ce2b5135ac7f93c72918fc37feb872bdc6a5533a8b85eb4b86bfdae52" + [[package]] name = "tracing" version = "0.1.40" @@ -2065,6 +2422,18 @@ dependencies = [ "valuable", ] +[[package]] +name = "tracing-futures" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "97d095ae15e245a057c8e8451bab9b3ee1e1f68e9ba2b4fbc18d0ac5237835f2" +dependencies = [ + "futures", + "futures-task", + "pin-project", + "tracing", +] + [[package]] name = "tracing-log" version = "0.2.0" @@ -2117,6 +2486,17 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "trait-variant" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "70977707304198400eb4835a78f6a9f928bf41bba420deb8fdb175cd965d77a7" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.60", +] + [[package]] name = "triomphe" version = "0.1.11" @@ -2127,6 +2507,57 @@ dependencies = [ "stable_deref_trait", ] +[[package]] +name = "trust-dns-client" +version = "0.20.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5b4ef9b9bde0559b78a4abb00339143750085f05e5a453efb7b8bef1061f09dc" +dependencies = [ + "cfg-if", + "data-encoding", + "futures-channel", + "futures-util", + "lazy_static", + "log", + "radix_trie", + "rand", + "thiserror", + "time", + "tokio", + "trust-dns-proto", +] + +[[package]] +name = "trust-dns-proto" +version = "0.20.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ca94d4e9feb6a181c690c4040d7a24ef34018d8313ac5044a61d21222ae24e31" +dependencies = [ + "async-trait", + "cfg-if", + "data-encoding", + "enum-as-inner", + "futures-channel", + "futures-io", + "futures-util", + "idna 0.2.3", + "ipnet", + "lazy_static", + "log", + "rand", + "smallvec", + "thiserror", + "tinyvec", + "tokio", + "url", +] + +[[package]] +name = "try-lock" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e421abadd41a4225275504ea4d6566923418b7f05506fbc9c0fe86ba7396114b" + [[package]] name = "tycho-block-util" version = "0.0.1" @@ -2152,17 +2583,27 @@ dependencies = [ "clap", "everscale-crypto", "everscale-types", + "futures-util", "hex", + "libc", + "public-ip", "rand", "rustc_version", "serde", "serde_json", "serde_path_to_error", "sha2", + "tempfile", "thiserror", "tikv-jemallocator", "tokio", + "tracing", + "tracing-subscriber", + "tycho-block-util", + "tycho-collator", + "tycho-core", "tycho-network", + "tycho-storage", "tycho-util", ] @@ -2186,7 +2627,9 @@ dependencies = [ "tracing", "tracing-subscriber", "tracing-test", + "trait-variant", "tycho-block-util", + "tycho-collator", "tycho-consensus", "tycho-core", "tycho-network", @@ -2283,7 +2726,7 @@ dependencies = [ "rustls-webpki", "serde", "serde_json", - "socket2", + "socket2 0.5.6", "thiserror", "tl-proto", "tokio", @@ -2306,6 +2749,7 @@ dependencies = [ "rand", "serde", "serde_json", + "tycho-util", ] [[package]] @@ -2325,6 +2769,7 @@ dependencies = [ "hex", "humantime", "libc", + "metrics", "num-traits", "parking_lot", "parking_lot_core", @@ -2344,6 +2789,7 @@ dependencies = [ "tracing-test", "triomphe", "tycho-block-util", + "tycho-storage", "tycho-util", "weedb", ] @@ -2353,6 +2799,7 @@ name = "tycho-util" version = "0.0.1" dependencies = [ "ahash", + "anyhow", "castaway", "dashmap", "everscale-crypto", @@ -2362,6 +2809,8 @@ dependencies = [ "libc", "rand", "serde", + "serde_json", + "serde_path_to_error", "thiserror", "tokio", "tracing", @@ -2380,12 +2829,27 @@ version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ed646292ffc8188ef8ea4d1e0e0150fb15a5c2e12ad9b8fc191ae7a8a7f3c4b9" +[[package]] +name = "unicode-bidi" +version = "0.3.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "08f95100a766bf4f8f28f90d77e0a5461bbdb219042e7679bebe79004fed8d75" + [[package]] name = "unicode-ident" version = "1.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b" +[[package]] +name = "unicode-normalization" +version = "0.1.23" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a56d1686db2308d901306f92a263857ef59ea39678a5458e7cb17f01415101f5" +dependencies = [ + "tinyvec", +] + [[package]] name = "unicode-xid" version = "0.2.4" @@ -2404,6 +2868,17 @@ version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" +[[package]] +name = "url" +version = "2.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "31e6302e3bb753d46e83516cae55ae196fc0c309407cf11ab35cc51a4c2a4633" +dependencies = [ + "form_urlencoded", + "idna 0.5.0", + "percent-encoding", +] + [[package]] name = "utf8parse" version = "0.2.1" @@ -2437,6 +2912,15 @@ version = "0.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" +[[package]] +name = "want" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bfa7760aed19e106de2c7c0b581b509f2f25d3dacaf737cb82ac61bc6d760b0e" +dependencies = [ + "try-lock", +] + [[package]] name = "wasi" version = "0.11.0+wasi-snapshot-preview1" diff --git a/Cargo.toml b/Cargo.toml index 08b642cab..7dbda4d33 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -52,6 +52,7 @@ num-traits = "0.2.18" parking_lot = "0.12.1" parking_lot_core = "0.9.9" pin-project-lite = "0.2" +public-ip = "0.2" pkcs8 = "0.10" quick_cache = "0.4.1" quinn = { version = "0.10", default-features = false, features = ["runtime-tokio", "tls-rustls"] } @@ -83,6 +84,7 @@ tracing = "0.1" tracing-appender = "0.2.3" tracing-subscriber = { version = "0.3", features = ["env-filter"] } tracing-test = "0.2" +trait-variant = "0.1.2" triomphe = "0.1.11" weedb = "0.1.1" x509-parser = "0.15" @@ -114,6 +116,7 @@ char_lit_as_u8 = "warn" checked_conversions = "warn" dbg_macro = "warn" debug_assert_with_mut_call = "warn" +disallowed_methods = "deny" doc_markdown = "warn" empty_enum = "warn" enum_glob_use = "warn" diff --git a/README.md b/README.md index b0509229f..35ecc2cb8 100644 --- a/README.md +++ b/README.md @@ -4,11 +4,22 @@ - [rust](https://rustup.rs/) - [just](https://pkgs.org/download/just) +- [zstd](https://pkgs.org/download/zstd) +- [clang](https://pkgs.org/download/clang) +- [llvm](https://pkgs.org/download/llvm) +- [fzf](https://github.com/junegunn/fzf?tab=readme-ov-file#using-linux-package-managers) + for just help menu # testing To run tests from ci: +```bash +just ci +``` + +To interactivly choose what to run: + ```bash just ``` \ No newline at end of file diff --git a/block-util/src/block/block_stuff.rs b/block-util/src/block/block_stuff.rs index e02c6c437..4c7c8a456 100644 --- a/block-util/src/block/block_stuff.rs +++ b/block-util/src/block/block_stuff.rs @@ -39,7 +39,7 @@ impl BlockStuff { let cell = CellBuilder::build_from(&block).unwrap(); let root_hash = *cell.repr_hash(); - let file_hash = sha2::Sha256::digest(Boc::encode(&cell)).into(); + let file_hash = Boc::file_hash(Boc::encode(&cell)); let block_id = BlockId { shard: block_info.shard, diff --git a/block-util/src/block/top_blocks.rs b/block-util/src/block/top_blocks.rs index 965f38403..0266fa87f 100644 --- a/block-util/src/block/top_blocks.rs +++ b/block-util/src/block/top_blocks.rs @@ -64,7 +64,7 @@ impl ShardHeights { self.0.is_empty() } - pub fn iter(&self) -> impl Iterator + ExactSizeIterator + Clone + '_ { + pub fn iter(&self) -> impl ExactSizeIterator + Clone + '_ { self.0 .iter() .map(|(shard, seqno)| BlockIdShort::from((*shard, *seqno))) diff --git a/block-util/src/config/mod.rs b/block-util/src/config/mod.rs index 36aec4d3b..a06280207 100644 --- a/block-util/src/config/mod.rs +++ b/block-util/src/config/mod.rs @@ -1,6 +1,6 @@ use anyhow::Result; - -use everscale_types::{dict::Dict, models::BlockchainConfig}; +use everscale_types::dict::Dict; +use everscale_types::models::BlockchainConfig; pub trait BlockchainConfigExt { /// Check that config is valid. @@ -17,8 +17,8 @@ impl BlockchainConfigExt for BlockchainConfig { _relax_par0: bool, _mandatory_params: Option>, ) -> Result { - //TODO: refer to https://github.com/everx-labs/ever-block/blob/master/src/config_params.rs#L452 - //STUB: currently should not be invoked in prototype + // TODO: refer to https://github.com/everx-labs/ever-block/blob/master/src/config_params.rs#L452 + // STUB: currently should not be invoked in prototype todo!() } } diff --git a/block-util/src/state/shard_state_stuff.rs b/block-util/src/state/shard_state_stuff.rs index 3dacd2ccc..82909005d 100644 --- a/block-util/src/state/shard_state_stuff.rs +++ b/block-util/src/state/shard_state_stuff.rs @@ -23,15 +23,27 @@ impl ShardStateStuff { .map_err(From::from) } - pub fn new(block_id: BlockId, root: Cell, tracker: &MinRefMcStateTracker) -> Result { - let shard_state = root.parse::()?; - + pub fn from_root( + block_id: &BlockId, + root: Cell, + tracker: &MinRefMcStateTracker, + ) -> Result { + let shard_state = root.parse::>()?; Self::from_state_and_root(block_id, shard_state, root, tracker) } + pub fn from_state( + block_id: &BlockId, + shard_state: Box, + tracker: &MinRefMcStateTracker, + ) -> Result { + let root = CellBuilder::build_from(&shard_state)?; + ShardStateStuff::from_state_and_root(block_id, shard_state, root, tracker) + } + pub fn from_state_and_root( - block_id: BlockId, - shard_state: ShardStateUnsplit, + block_id: &BlockId, + shard_state: Box, root: Cell, tracker: &MinRefMcStateTracker, ) -> Result { @@ -48,7 +60,7 @@ impl ShardStateStuff { let handle = tracker.insert(shard_state.min_ref_mc_seqno); Ok(Self { inner: Arc::new(Inner { - block_id, + block_id: *block_id, shard_state_extra: shard_state.load_custom()?, shard_state, root, @@ -57,28 +69,27 @@ impl ShardStateStuff { }) } - pub fn deserialize_zerostate(id: BlockId, bytes: &[u8]) -> Result { - anyhow::ensure!(id.seqno == 0, "given id has a non-zero seqno"); + pub fn deserialize_zerostate(zerostate_id: &BlockId, bytes: &[u8]) -> Result { + anyhow::ensure!(zerostate_id.seqno == 0, "given id has a non-zero seqno"); let file_hash = sha2::Sha256::digest(bytes); anyhow::ensure!( - id.file_hash.as_slice() == file_hash.as_slice(), + zerostate_id.file_hash.as_slice() == file_hash.as_slice(), "file_hash mismatch. Expected: {}, got: {}", hex::encode(file_hash), - id.file_hash, + zerostate_id.file_hash, ); let root = Boc::decode(bytes)?; anyhow::ensure!( - &id.root_hash == root.repr_hash(), - "root_hash mismatch for {id}. Expected: {expected}, got: {got}", - id = id, - expected = id.root_hash, + &zerostate_id.root_hash == root.repr_hash(), + "root_hash mismatch for {zerostate_id}. Expected: {expected}, got: {got}", + expected = zerostate_id.root_hash, got = root.repr_hash(), ); - Self::new( - id, + Self::from_root( + zerostate_id, root, ZEROSTATE_REFS.get_or_init(MinRefMcStateTracker::new), ) @@ -118,7 +129,7 @@ impl ShardStateStuff { struct Inner { block_id: BlockId, - shard_state: ShardStateUnsplit, + shard_state: Box, shard_state_extra: Option, handle: RefMcStateHandle, root: Cell, diff --git a/cli/Cargo.toml b/cli/Cargo.toml index ce4df01fc..ca0dd06ef 100644 --- a/cli/Cargo.toml +++ b/cli/Cargo.toml @@ -20,21 +20,31 @@ base64 = { workspace = true } clap = { workspace = true } everscale-crypto = { workspace = true } everscale-types = { workspace = true } +futures-util = { workspace = true } hex = { workspace = true } +libc = { workspace = true } +public-ip = { workspace = true } rand = { workspace = true } serde = { workspace = true } serde_path_to_error = { workspace = true } serde_json = { workspace = true, features = ["preserve_order"] } sha2 = { workspace = true } +tempfile = { workspace = true } thiserror = { workspace = true } tikv-jemallocator = { workspace = true, features = [ "unprefixed_malloc_on_supported_platforms", "background_threads", ], optional = true } -tokio = { workspace = true, features = ["macros", "rt-multi-thread"] } +tokio = { workspace = true, features = ["macros", "rt-multi-thread", "signal"] } +tracing = { workspace = true } +tracing-subscriber = { workspace = true } # local deps +tycho-block-util = { workspace = true } +tycho-collator = { workspace = true } +tycho-core = { workspace = true } tycho-network = { workspace = true } +tycho-storage = { workspace = true } tycho-util = { workspace = true } [build-dependencies] diff --git a/cli/src/main.rs b/cli/src/main.rs index f9d1ecf49..d18b11d1e 100644 --- a/cli/src/main.rs +++ b/cli/src/main.rs @@ -11,6 +11,7 @@ mod tools { pub mod gen_zerostate; } +mod node; mod util; #[cfg(feature = "jemalloc")] @@ -50,9 +51,8 @@ impl App { #[derive(Subcommand)] enum Cmd { - Init(InitCmd), - - Run(RunCmd), + #[clap(subcommand)] + Node(NodeCmd), #[clap(subcommand)] Tool(ToolCmd), @@ -61,20 +61,25 @@ enum Cmd { impl Cmd { fn run(self) -> Result<()> { match self { - Cmd::Init(_cmd) => Ok(()), // todo - Cmd::Run(_cmd) => Ok(()), // todo + Cmd::Node(cmd) => cmd.run(), Cmd::Tool(cmd) => cmd.run(), } } } -/// Initialize a node environment -#[derive(Parser)] -struct InitCmd {} +/// Node commands +#[derive(Subcommand)] +enum NodeCmd { + Run(node::CmdRun), +} -/// Run a node -#[derive(Parser)] -struct RunCmd {} +impl NodeCmd { + fn run(self) -> Result<()> { + match self { + NodeCmd::Run(cmd) => cmd.run(), + } + } +} /// A collection of tools #[derive(Subcommand)] diff --git a/cli/src/node/config.rs b/cli/src/node/config.rs new file mode 100644 index 000000000..02f55336e --- /dev/null +++ b/cli/src/node/config.rs @@ -0,0 +1,90 @@ +use std::net::Ipv4Addr; +use std::path::Path; + +use anyhow::Result; +use everscale_crypto::ed25519; +use everscale_types::cell::HashBytes; +use serde::{Deserialize, Serialize}; +use tycho_core::block_strider::BlockchainBlockProviderConfig; +use tycho_core::blockchain_rpc::BlockchainRpcServiceConfig; +use tycho_core::overlay_client::PublicOverlayClientConfig; +use tycho_network::{DhtConfig, NetworkConfig, OverlayConfig, PeerResolverConfig}; +use tycho_storage::StorageConfig; + +#[derive(Debug, Deserialize)] +pub struct NodeKeys { + pub secret: HashBytes, +} + +impl NodeKeys { + pub fn from_file>(path: P) -> Result { + tycho_util::serde_helpers::load_json_from_file(path) + } + + pub fn as_secret(&self) -> ed25519::SecretKey { + ed25519::SecretKey::from_bytes(self.secret.0) + } +} + +#[derive(Debug, Serialize, Deserialize)] +#[serde(default)] +pub struct NodeConfig { + /// Public IP address of the node. + /// + /// Default: resolved automatically. + pub public_ip: Option, + + /// Ip address to listen on. + /// + /// Default: 0.0.0.0 + pub local_ip: Ipv4Addr, + + /// Default: 30000. + pub port: u16, + + pub network: NetworkConfig, + + pub dht: DhtConfig, + + pub peer_resolver: PeerResolverConfig, + + pub overlay: OverlayConfig, + + pub public_overlay_client: PublicOverlayClientConfig, + + pub storage: StorageConfig, + + pub blockchain_rpc_service: BlockchainRpcServiceConfig, + + pub blockchain_block_provider: BlockchainBlockProviderConfig, +} + +impl Default for NodeConfig { + fn default() -> Self { + Self { + public_ip: None, + local_ip: Ipv4Addr::UNSPECIFIED, + port: 30000, + network: NetworkConfig::default(), + dht: DhtConfig::default(), + peer_resolver: PeerResolverConfig::default(), + overlay: OverlayConfig::default(), + public_overlay_client: PublicOverlayClientConfig::default(), + storage: StorageConfig::default(), + blockchain_rpc_service: BlockchainRpcServiceConfig::default(), + blockchain_block_provider: BlockchainBlockProviderConfig::default(), + } + } +} + +impl NodeConfig { + pub fn from_file>(path: P) -> Result { + tycho_util::serde_helpers::load_json_from_file(path) + } + + pub fn save_to_file>(&self, path: P) -> Result<()> { + let data = serde_json::to_string_pretty(self)?; + std::fs::write(path, data)?; + Ok(()) + } +} diff --git a/cli/src/node/mod.rs b/cli/src/node/mod.rs new file mode 100644 index 000000000..9b5e55e08 --- /dev/null +++ b/cli/src/node/mod.rs @@ -0,0 +1,675 @@ +use std::net::{Ipv4Addr, SocketAddr}; +use std::path::PathBuf; +use std::sync::Arc; +use std::time::Duration; + +use anyhow::Result; +use clap::Parser; +use everscale_crypto::ed25519; +use everscale_types::models::*; +use everscale_types::prelude::*; +use futures_util::future::BoxFuture; +use tycho_block_util::state::{MinRefMcStateTracker, ShardStateStuff}; +use tycho_collator::collator::CollatorStdImplFactory; +use tycho_collator::manager::CollationManager; +use tycho_collator::mempool::MempoolAdapterStdImpl; +use tycho_collator::msg_queue::MessageQueueAdapterStdImpl; +use tycho_collator::state_node::{StateNodeAdapter, StateNodeAdapterStdImpl}; +use tycho_collator::types::{CollationConfig, ValidatorNetwork}; +use tycho_collator::validator::config::ValidatorConfig; +use tycho_collator::validator::validator::ValidatorStdImplFactory; +use tycho_core::block_strider::{ + BlockProvider, BlockStrider, BlockchainBlockProvider, BlockchainBlockProviderConfig, + OptionalBlockStuff, PersistentBlockStriderState, StateSubscriber, StateSubscriberContext, + StorageBlockProvider, +}; +use tycho_core::blockchain_rpc::{BlockchainRpcClient, BlockchainRpcService}; +use tycho_core::global_config::{GlobalConfig, ZerostateId}; +use tycho_core::overlay_client::PublicOverlayClient; +use tycho_network::{ + DhtClient, DhtService, Network, OverlayService, PeerResolver, PublicOverlay, Router, +}; +use tycho_storage::{BlockMetaData, Storage}; +use tycho_util::FastHashMap; + +use self::config::{NodeConfig, NodeKeys}; +use crate::util::error::ResultExt; +use crate::util::logger::LoggerConfig; +use crate::util::signal; + +mod config; + +const SERVICE_NAME: &str = "tycho-node"; + +/// Run a Tycho node. +#[derive(Parser)] +pub struct CmdRun { + /// dump the template of the zero state config + #[clap( + short = 'i', + long, + conflicts_with_all = ["config", "global_config", "keys", "logger_config", "import_zerostate"] + )] + init_config: Option, + + /// overwrite the existing config + #[clap(short, long)] + force: bool, + + /// path to the node config + #[clap(long, required_unless_present = "init_config")] + config: Option, + + /// path to the global config + #[clap(long, required_unless_present = "init_config")] + global_config: Option, + + /// path to the node keys + #[clap(long, required_unless_present = "init_config")] + keys: Option, + + /// path to the logger config + #[clap(long)] + logger_config: Option, + + /// list of zerostate files to import + #[clap(long)] + import_zerostate: Option>, +} + +impl CmdRun { + pub fn run(self) -> Result<()> { + tokio::runtime::Builder::new_multi_thread() + .enable_all() + .build()? + .block_on(async move { + let run_fut = tokio::spawn(self.run_impl()); + let stop_fut = signal::any_signal(signal::TERMINATION_SIGNALS); + tokio::select! { + res = run_fut => res.unwrap(), + signal = stop_fut => match signal { + Ok(signal) => { + tracing::info!(?signal, "received termination signal"); + Ok(()) + } + Err(e) => Err(e.into()), + } + } + }) + } + + async fn run_impl(self) -> Result<()> { + if let Some(init_config_path) = self.init_config { + return NodeConfig::default() + .save_to_file(init_config_path) + .wrap_err("failed to save node config"); + } + + init_logger(self.logger_config)?; + + let node = { + let node_config = NodeConfig::from_file(self.config.unwrap()) + .wrap_err("failed to load node config")?; + + let global_config = GlobalConfig::from_file(self.global_config.unwrap()) + .wrap_err("failed to load global config")?; + + let keys = config::NodeKeys::from_file(&self.keys.unwrap()) + .wrap_err("failed to load node keys")?; + + let public_ip = resolve_public_ip(node_config.public_ip).await?; + let socket_addr = SocketAddr::new(public_ip.into(), node_config.port); + + Node::new(socket_addr, keys, node_config, global_config)? + }; + + let init_block_id = node + .try_init(self.import_zerostate) + .await + .wrap_err("failed to init node")?; + tracing::info!(%init_block_id, "node initialized"); + + node.run(&init_block_id).await?; + + Ok(()) + } +} + +fn init_logger(logger_config: Option) -> Result<()> { + use tracing_subscriber::layer::SubscriberExt; + use tracing_subscriber::{fmt, reload, EnvFilter}; + + let try_make_filter = { + let logger_config = logger_config.clone(); + move || { + Ok::<_, anyhow::Error>(match &logger_config { + None => EnvFilter::builder() + .with_default_directive(tracing::Level::INFO.into()) + .from_env_lossy(), + Some(path) => LoggerConfig::load_from(path) + .wrap_err("failed to load logger config")? + .build_subscriber(), + }) + } + }; + + let (layer, handle) = reload::Layer::new(try_make_filter()?); + + let subscriber = tracing_subscriber::registry() + .with(layer) + .with(fmt::layer()); + tracing::subscriber::set_global_default(subscriber).unwrap(); + + if let Some(logger_config) = logger_config { + tokio::spawn(async move { + tracing::info!( + logger_config = %logger_config.display(), + "started watching for changes in logger config" + ); + + let get_metadata = move || { + std::fs::metadata(&logger_config) + .ok() + .and_then(|m| m.modified().ok()) + }; + + let mut last_modified = get_metadata(); + + let mut interval = tokio::time::interval(Duration::from_secs(10)); + loop { + interval.tick().await; + + let modified = get_metadata(); + if last_modified == modified { + continue; + } + last_modified = modified; + + match try_make_filter() { + Ok(filter) => { + if handle.reload(filter).is_err() { + break; + } + tracing::info!("reloaded logger config"); + } + Err(e) => tracing::error!(%e, "failed to reload logger config"), + } + } + + tracing::info!("stopped watching for changes in logger config"); + }); + } + + std::panic::set_hook(Box::new(|info| { + use std::io::Write; + + tracing::error!("PANIC: {}", info); + std::io::stderr().flush().ok(); + std::io::stdout().flush().ok(); + std::process::exit(1); + })); + + Ok(()) +} + +async fn resolve_public_ip(ip: Option) -> Result { + match ip { + Some(address) => Ok(address), + None => match public_ip::addr_v4().await { + Some(address) => Ok(address), + None => anyhow::bail!("failed to resolve public IP address"), + }, + } +} + +pub struct Node { + pub keypair: Arc, + + pub zerostate: ZerostateId, + + pub network: Network, + pub dht_client: DhtClient, + pub peer_resolver: PeerResolver, + pub overlay_service: OverlayService, + pub storage: Storage, + pub blockchain_rpc_client: BlockchainRpcClient, + + pub state_tracker: MinRefMcStateTracker, + pub blockchain_block_provider_config: BlockchainBlockProviderConfig, +} + +impl Node { + pub fn new( + public_addr: SocketAddr, + keys: NodeKeys, + node_config: NodeConfig, + global_config: GlobalConfig, + ) -> Result { + // Setup network + let keypair = Arc::new(ed25519::KeyPair::from(&keys.as_secret())); + let local_id = keypair.public_key.into(); + + let (dht_tasks, dht_service) = DhtService::builder(local_id) + .with_config(node_config.dht) + .build(); + + let (overlay_tasks, overlay_service) = OverlayService::builder(local_id) + .with_config(node_config.overlay) + .with_dht_service(dht_service.clone()) + .build(); + + let router = Router::builder() + .route(dht_service.clone()) + .route(overlay_service.clone()) + .build(); + + let local_addr = SocketAddr::from((node_config.local_ip, node_config.port)); + + let network = Network::builder() + .with_config(node_config.network) + .with_private_key(keys.secret.0) + .with_service_name(SERVICE_NAME) + .with_remote_addr(public_addr) + .build(local_addr, router) + .wrap_err("failed to build node network")?; + + dht_tasks.spawn(&network); + overlay_tasks.spawn(&network); + + let dht_client = dht_service.make_client(&network); + let peer_resolver = dht_service + .make_peer_resolver() + .with_config(node_config.peer_resolver) + .build(&network); + + let mut bootstrap_peers = 0usize; + for peer in global_config.bootstrap_peers { + let is_new = dht_client.add_peer(Arc::new(peer))?; + bootstrap_peers += is_new as usize; + } + + tracing::info!( + %local_id, + %local_addr, + %public_addr, + bootstrap_peers, + "initialized network" + ); + + // Setup storage + let storage = Storage::new(node_config.storage).wrap_err("failed to create storage")?; + tracing::info!( + root_dir = %storage.root().path().display(), + "initialized storage" + ); + + // Setup blockchain rpc + let blockchain_rpc_service = + BlockchainRpcService::new(storage.clone(), node_config.blockchain_rpc_service); + + let public_overlay = + PublicOverlay::builder(global_config.zerostate.compute_public_overlay_id()) + .with_peer_resolver(peer_resolver.clone()) + .build(blockchain_rpc_service); + overlay_service.add_public_overlay(&public_overlay); + + let blockchain_rpc_client = BlockchainRpcClient::new(PublicOverlayClient::new( + network.clone(), + public_overlay, + node_config.public_overlay_client, + )); + + tracing::info!( + overlay_id = %blockchain_rpc_client.overlay().overlay_id(), + "initialized blockchain rpc" + ); + + // Setup block strider + let state_tracker = MinRefMcStateTracker::default(); + + Ok(Self { + keypair, + zerostate: global_config.zerostate, + network, + dht_client, + peer_resolver, + overlay_service, + blockchain_rpc_client, + storage, + state_tracker, + blockchain_block_provider_config: node_config.blockchain_block_provider, + }) + } + + /// Initialize the node and return the init block id. + async fn try_init(&self, zerostates: Option>) -> Result { + let node_state = self.storage.node_state(); + + match node_state.load_last_mc_block_id() { + Some(block_id) => { + tracing::info!("warm init"); + Ok(block_id) + } + None => { + tracing::info!("cold init"); + + let zerostate_id = if let Some(zerostates) = zerostates { + let zerostate_id = self.import_zerostates(zerostates).await?; + node_state.store_init_mc_block_id(&zerostate_id); + node_state.store_last_mc_block_id(&zerostate_id); + zerostate_id + } else { + // TODO: Download zerostates + anyhow::bail!("zerostates not provided (STUB)"); + }; + Ok(zerostate_id) + } + } + } + + async fn import_zerostates(&self, paths: Vec) -> Result { + // Use a separate tracker for zerostates + let tracker = MinRefMcStateTracker::default(); + + // Read all zerostates + let mut zerostates = FastHashMap::default(); + for path in paths { + let state = load_zerostate(&tracker, &path) + .wrap_err_with(|| format!("failed to load zerostate {}", path.display()))?; + + if let Some(prev) = zerostates.insert(*state.block_id(), state) { + anyhow::bail!("duplicate zerostate {}", prev.block_id()); + } + } + + // Find the masterchain zerostate + let zerostate_id = self.zerostate.as_block_id(); + let Some(masterchain_zerostate) = zerostates.remove(&zerostate_id) else { + anyhow::bail!("missing mc zerostate for {zerostate_id}"); + }; + + // Prepare the list of zerostates to import + let mut to_import = vec![masterchain_zerostate.clone()]; + + let global_id = masterchain_zerostate.state().global_id; + let gen_utime = masterchain_zerostate.state().gen_utime; + + for entry in masterchain_zerostate.shards()?.iter() { + let (shard_ident, descr) = entry.wrap_err("invalid mc zerostate")?; + anyhow::ensure!(descr.seqno == 0, "invalid shard description {shard_ident}"); + + let block_id = BlockId { + shard: shard_ident, + seqno: 0, + root_hash: descr.root_hash, + file_hash: descr.file_hash, + }; + + let state = match zerostates.remove(&block_id) { + Some(existing) => { + tracing::debug!(block_id = %block_id, "using custom zerostate"); + existing + } + None => { + tracing::debug!(block_id = %block_id, "creating default zerostate"); + let state = + make_shard_state(&self.state_tracker, global_id, shard_ident, gen_utime) + .wrap_err("failed to create shard zerostate")?; + + anyhow::ensure!( + state.block_id() == &block_id, + "custom zerostate must be provided for {shard_ident}", + ); + + state + } + }; + + to_import.push(state); + } + + anyhow::ensure!( + zerostates.is_empty(), + "unused zerostates left: {}", + zerostates.len() + ); + + // Import all zerostates + let handle_storage = self.storage.block_handle_storage(); + let state_storage = self.storage.shard_state_storage(); + + for state in to_import { + let (handle, status) = handle_storage.create_or_load_handle( + state.block_id(), + BlockMetaData { + is_key_block: true, + gen_utime, + mc_ref_seqno: 0, + }, + ); + + let stored = state_storage + .store_state(&handle, &state) + .await + .wrap_err_with(|| { + format!("failed to import zerostate for {}", state.block_id().shard) + })?; + + tracing::debug!( + block_id = %state.block_id(), + handle_status = ?status, + stored, + "importing zerostate" + ); + } + + tracing::info!("imported zerostates"); + Ok(zerostate_id) + } + + async fn run(&self, last_block_id: &BlockId) -> Result<()> { + // Ensure that there are some neighbours + tracing::info!("waiting for initial neighbours"); + self.blockchain_rpc_client + .overlay_client() + .neighbours() + .wait_for_peers(1) + .await; + tracing::info!("found initial neighbours"); + + // Create collator + tracing::info!("starting collator"); + + // TODO: move into config + let collation_config = CollationConfig { + key_pair: self.keypair.clone(), + mc_block_min_interval_ms: 10000, + max_mc_block_delta_from_bc_to_await_own: 2, + supported_block_version: 50, + supported_capabilities: supported_capabilities(), + max_collate_threads: 1, + test_validators_keypairs: vec![], + }; + + let collation_manager = CollationManager::start( + collation_config, + Arc::new(MessageQueueAdapterStdImpl::default()), + |listener| StateNodeAdapterStdImpl::new(listener, self.storage.clone()), + MempoolAdapterStdImpl::new, + ValidatorStdImplFactory { + network: ValidatorNetwork { + overlay_service: self.overlay_service.clone(), + peer_resolver: self.peer_resolver.clone(), + dht_client: self.dht_client.clone(), + }, + // TODO: Move into node config + config: ValidatorConfig { + base_loop_delay: Duration::from_millis(50), + max_loop_delay: Duration::from_secs(10), + }, + }, + CollatorStdImplFactory, + ); + + let collator_state_subscriber = CollatorStateSubscriber { + adapter: collation_manager.state_node_adapter().clone(), + }; + + { + // Force load last applied state + let mc_state = self + .storage + .shard_state_storage() + .load_state(&last_block_id) + .await?; + + collator_state_subscriber + .adapter + .handle_state(&mc_state) + .await?; + } + + tracing::info!("collator started"); + + // Create block strider + let blockchain_block_provider = BlockchainBlockProvider::new( + self.blockchain_rpc_client.clone(), + self.storage.clone(), + self.blockchain_block_provider_config.clone(), + ); + + let storage_block_provider = StorageBlockProvider::new(self.storage.clone()); + + let collator_block_provider = CollatorBlockProvider { + adapter: collation_manager.state_node_adapter().clone(), + }; + + let strider_state = + PersistentBlockStriderState::new(self.zerostate.as_block_id(), self.storage.clone()); + + let block_strider = BlockStrider::builder() + .with_provider(( + (blockchain_block_provider, storage_block_provider), + collator_block_provider, + )) + .with_state(strider_state) + .with_state_subscriber( + self.state_tracker.clone(), + self.storage.clone(), + collator_state_subscriber, + ) + .build(); + + // Run block strider + tracing::info!("block strider started"); + block_strider.run().await?; + tracing::info!("block strider finished"); + + Ok(()) + } +} + +struct CollatorStateSubscriber { + adapter: Arc, +} + +impl StateSubscriber for CollatorStateSubscriber { + type HandleStateFut<'a> = BoxFuture<'a, Result<()>>; + + fn handle_state<'a>(&'a self, cx: &'a StateSubscriberContext) -> Self::HandleStateFut<'a> { + self.adapter.handle_state(&cx.state) + } +} + +struct CollatorBlockProvider { + adapter: Arc, +} + +impl BlockProvider for CollatorBlockProvider { + type GetNextBlockFut<'a> = BoxFuture<'a, OptionalBlockStuff>; + type GetBlockFut<'a> = BoxFuture<'a, OptionalBlockStuff>; + + fn get_next_block<'a>(&'a self, prev_block_id: &'a BlockId) -> Self::GetNextBlockFut<'a> { + self.adapter.wait_for_block_next(prev_block_id) + } + + fn get_block<'a>(&'a self, block_id: &'a BlockId) -> Self::GetBlockFut<'a> { + self.adapter.wait_for_block(block_id) + } +} + +fn load_zerostate(tracker: &MinRefMcStateTracker, path: &PathBuf) -> Result { + let data = std::fs::read(path).wrap_err("failed to read file")?; + let file_hash = Boc::file_hash(&data); + + let root = Boc::decode(data).wrap_err("failed to decode BOC")?; + let root_hash = *root.repr_hash(); + + let state = root + .parse::() + .wrap_err("failed to parse state")?; + + anyhow::ensure!(state.seqno == 0, "not a zerostate"); + + let block_id = BlockId { + shard: state.shard_ident, + seqno: state.seqno, + root_hash, + file_hash, + }; + + ShardStateStuff::from_root(&block_id, root, &tracker) +} + +fn make_shard_state( + tracker: &MinRefMcStateTracker, + global_id: i32, + shard_ident: ShardIdent, + now: u32, +) -> Result { + let state = ShardStateUnsplit { + global_id, + shard_ident, + gen_utime: now, + min_ref_mc_seqno: u32::MAX, + ..Default::default() + }; + + let root = CellBuilder::build_from(&state)?; + let root_hash = *root.repr_hash(); + let file_hash = Boc::file_hash(Boc::encode(&root)); + + let block_id = BlockId { + shard: state.shard_ident, + seqno: state.seqno, + root_hash, + file_hash, + }; + + ShardStateStuff::from_root(&block_id, root, &tracker) +} + +fn supported_capabilities() -> u64 { + GlobalCapabilities::from([ + GlobalCapability::CapCreateStatsEnabled, + GlobalCapability::CapBounceMsgBody, + GlobalCapability::CapReportVersion, + GlobalCapability::CapShortDequeue, + GlobalCapability::CapInitCodeHash, + GlobalCapability::CapOffHypercube, + GlobalCapability::CapFixTupleIndexBug, + GlobalCapability::CapFastStorageStat, + GlobalCapability::CapMyCode, + GlobalCapability::CapFullBodyInBounced, + GlobalCapability::CapStorageFeeToTvm, + GlobalCapability::CapWorkchains, + GlobalCapability::CapStcontNewFormat, + GlobalCapability::CapFastStorageStatBugfix, + GlobalCapability::CapResolveMerkleCell, + GlobalCapability::CapFeeInGasUnits, + GlobalCapability::CapBounceAfterFailedAction, + GlobalCapability::CapSuspendedList, + GlobalCapability::CapsTvmBugfixes2022, + ]) + .into_inner() +} diff --git a/cli/src/tools/gen_zerostate.rs b/cli/src/tools/gen_zerostate.rs index cb38490a9..4aa883a3d 100644 --- a/cli/src/tools/gen_zerostate.rs +++ b/cli/src/tools/gen_zerostate.rs @@ -12,12 +12,17 @@ use serde::{Deserialize, Serialize}; use sha2::Digest; use crate::util::compute_storage_used; +use crate::util::error::ResultExt; /// Generate a zero state for a network. #[derive(clap::Parser)] pub struct Cmd { /// dump the template of the zero state config - #[clap(short = 'i', long, exclusive = true)] + #[clap( + short = 'i', + long, + conflicts_with_all = ["config", "output", "now"] + )] init_config: Option, /// path to the zero state config @@ -78,25 +83,23 @@ fn generate_zerostate( config .prepare_config_params(now) - .map_err(|e| GenError::new("validator config is invalid", e))?; + .wrap_err("validator config is invalid")?; config .add_required_accounts() - .map_err(|e| GenError::new("failed to add required accounts", e))?; + .wrap_err("failed to add required accounts")?; let state = config .build_masterchain_state(now) - .map_err(|e| GenError::new("failed to build masterchain zerostate", e))?; + .wrap_err("failed to build masterchain zerostate")?; - let boc = CellBuilder::build_from(&state) - .map_err(|e| GenError::new("failed to serialize zerostate", e))?; + let boc = CellBuilder::build_from(&state).wrap_err("failed to serialize zerostate")?; let root_hash = *boc.repr_hash(); let data = Boc::encode(&boc); let file_hash = HashBytes::from(sha2::Sha256::digest(&data)); - std::fs::write(output_path, data) - .map_err(|e| GenError::new("failed to write masterchain zerostate", e))?; + std::fs::write(output_path, data).wrap_err("failed to write masterchain zerostate")?; let hashes = serde_json::json!({ "root_hash": root_hash, @@ -297,12 +300,40 @@ impl ZerostateConfig { state.total_balance = state .total_balance .checked_add(&account.balance) - .map_err(|e| GenError::new("failed ot compute total balance", e))?; + .wrap_err("failed ot compute total balance")?; } } + let workchains = self.params.get::()?.unwrap(); + let mut shards = Vec::new(); + for entry in workchains.iter() { + let (workchain, descr) = entry?; + shards.push((ShardIdent::new_full(workchain), ShardDescription { + seqno: 0, + reg_mc_seqno: 0, + start_lt: 0, + end_lt: 0, + root_hash: descr.zerostate_root_hash, + file_hash: descr.zerostate_file_hash, + before_split: false, + before_merge: false, + want_split: false, + want_merge: false, + nx_cc_updated: true, + next_catchain_seqno: 0, + next_validator_shard: ShardIdent::PREFIX_FULL, + min_ref_mc_seqno: u32::MAX, + gen_utime: now, + split_merge_at: None, + fees_collected: CurrencyCollection::ZERO, + funds_created: CurrencyCollection::ZERO, + copyleft_rewards: Dict::new(), + proof_chain: None, + })); + } + state.custom = Some(Lazy::new(&McStateExtra { - shards: Default::default(), + shards: ShardHashes::from_shards(shards.iter().map(|(ident, descr)| (ident, descr)))?, config: BlockchainConfig { address: self.params.get::()?.unwrap(), params: self.params.clone(), @@ -408,24 +439,21 @@ fn make_default_params() -> Result { // Param 12 { let mut workchains = Dict::new(); - workchains.set( - 0, - WorkchainDescription { - enabled_since: 0, - actual_min_split: 0, - min_split: 0, - max_split: 3, - active: true, - accept_msgs: true, - zerostate_root_hash: HashBytes::ZERO, - zerostate_file_hash: HashBytes::ZERO, - version: 0, - format: WorkchainFormat::Basic(WorkchainFormatBasic { - vm_version: 0, - vm_mode: 0, - }), - }, - )?; + workchains.set(0, WorkchainDescription { + enabled_since: 0, + actual_min_split: 0, + min_split: 0, + max_split: 3, + active: true, + accept_msgs: true, + zerostate_root_hash: HashBytes::ZERO, + zerostate_file_hash: HashBytes::ZERO, + version: 0, + format: WorkchainFormat::Basic(WorkchainFormatBasic { + vm_version: 0, + vm_mode: 0, + }), + })?; params.set::(&workchains)?; } @@ -468,106 +496,88 @@ fn make_default_params() -> Result { }])?; // Param 20 (masterchain) - params.set_gas_prices( - true, - &GasLimitsPrices { - gas_price: 655360000, - gas_limit: 1000000, - special_gas_limit: 100000000, - gas_credit: 10000, - block_gas_limit: 11000000, - freeze_due_limit: 100000000, - delete_due_limit: 1000000000, - flat_gas_limit: 1000, - flat_gas_price: 10000000, - }, - )?; + params.set_gas_prices(true, &GasLimitsPrices { + gas_price: 655360000, + gas_limit: 1000000, + special_gas_limit: 100000000, + gas_credit: 10000, + block_gas_limit: 11000000, + freeze_due_limit: 100000000, + delete_due_limit: 1000000000, + flat_gas_limit: 1000, + flat_gas_price: 10000000, + })?; // Param 21 (basechain) - params.set_gas_prices( - false, - &GasLimitsPrices { - gas_price: 65536000, - gas_limit: 1000000, - special_gas_limit: 1000000, - gas_credit: 10000, - block_gas_limit: 10000000, - freeze_due_limit: 100000000, - delete_due_limit: 1000000000, - flat_gas_limit: 1000, - flat_gas_price: 1000000, - }, - )?; + params.set_gas_prices(false, &GasLimitsPrices { + gas_price: 65536000, + gas_limit: 1000000, + special_gas_limit: 1000000, + gas_credit: 10000, + block_gas_limit: 10000000, + freeze_due_limit: 100000000, + delete_due_limit: 1000000000, + flat_gas_limit: 1000, + flat_gas_price: 1000000, + })?; // Param 22 (masterchain) - params.set_block_limits( - true, - &BlockLimits { - bytes: BlockParamLimits { - underload: 131072, - soft_limit: 524288, - hard_limit: 1048576, - }, - gas: BlockParamLimits { - underload: 900000, - soft_limit: 1200000, - hard_limit: 2000000, - }, - lt_delta: BlockParamLimits { - underload: 1000, - soft_limit: 5000, - hard_limit: 10000, - }, + params.set_block_limits(true, &BlockLimits { + bytes: BlockParamLimits { + underload: 131072, + soft_limit: 524288, + hard_limit: 1048576, + }, + gas: BlockParamLimits { + underload: 900000, + soft_limit: 1200000, + hard_limit: 2000000, + }, + lt_delta: BlockParamLimits { + underload: 1000, + soft_limit: 5000, + hard_limit: 10000, }, - )?; + })?; // Param 23 (basechain) - params.set_block_limits( - false, - &BlockLimits { - bytes: BlockParamLimits { - underload: 131072, - soft_limit: 524288, - hard_limit: 1048576, - }, - gas: BlockParamLimits { - underload: 900000, - soft_limit: 1200000, - hard_limit: 2000000, - }, - lt_delta: BlockParamLimits { - underload: 1000, - soft_limit: 5000, - hard_limit: 10000, - }, + params.set_block_limits(false, &BlockLimits { + bytes: BlockParamLimits { + underload: 131072, + soft_limit: 524288, + hard_limit: 1048576, }, - )?; + gas: BlockParamLimits { + underload: 900000, + soft_limit: 1200000, + hard_limit: 2000000, + }, + lt_delta: BlockParamLimits { + underload: 1000, + soft_limit: 5000, + hard_limit: 10000, + }, + })?; // Param 24 (masterchain) - params.set_msg_forward_prices( - true, - &MsgForwardPrices { - lump_price: 10000000, - bit_price: 655360000, - cell_price: 65536000000, - ihr_price_factor: 98304, - first_frac: 21845, - next_frac: 21845, - }, - )?; + params.set_msg_forward_prices(true, &MsgForwardPrices { + lump_price: 10000000, + bit_price: 655360000, + cell_price: 65536000000, + ihr_price_factor: 98304, + first_frac: 21845, + next_frac: 21845, + })?; // Param 25 (basechain) - params.set_msg_forward_prices( - false, - &MsgForwardPrices { - lump_price: 1000000, - bit_price: 65536000, - cell_price: 6553600000, - ihr_price_factor: 98304, - first_frac: 21845, - next_frac: 21845, - }, - )?; + params.set_msg_forward_prices(false, &MsgForwardPrices { + lump_price: 1000000, + bit_price: 65536000, + cell_price: 6553600000, + ihr_price_factor: 98304, + first_frac: 21845, + next_frac: 21845, + })?; // Param 28 params.set_catchain_config(&CatchainConfig { @@ -647,7 +657,7 @@ fn build_elector_code(address: &HashBytes, balance: Tokens) -> Result { let code = Boc::decode(ELECTOR_CODE)?; let mut data = CellBuilder::new(); - data.store_small_uint(0, 3)?; //empty dict, empty dict, empty dict + data.store_small_uint(0, 3)?; // empty dict, empty dict, empty dict data.store_small_uint(0, 4)?; // tokens data.store_u32(0)?; // elections id data.store_zeros(256)?; // elections hash @@ -708,29 +718,12 @@ fn zero_public_key() -> &'static ed25519::PublicKey { KEY.get_or_init(|| ed25519::PublicKey::from_bytes([0; 32]).unwrap()) } -#[derive(thiserror::Error, Debug)] -#[error("{context}: {source}")] -struct GenError { - context: String, - #[source] - source: anyhow::Error, -} - -impl GenError { - fn new(context: impl Into, source: impl Into) -> Self { - Self { - context: context.into(), - source: source.into(), - } - } -} - mod serde_account_states { - use super::*; - use serde::de::Deserializer; use serde::ser::{SerializeMap, Serializer}; + use super::*; + pub fn serialize( value: &HashMap, serializer: S, diff --git a/cli/src/util/error.rs b/cli/src/util/error.rs new file mode 100644 index 000000000..3007f924d --- /dev/null +++ b/cli/src/util/error.rs @@ -0,0 +1,42 @@ +pub trait ResultExt { + fn wrap_err(self, context: impl Into) -> anyhow::Result; + + fn wrap_err_with(self, f: F) -> anyhow::Result + where + F: FnOnce() -> R, + R: Into; +} + +impl> ResultExt for Result { + fn wrap_err(self, context: impl Into) -> anyhow::Result { + self.map_err(|e| { + ErrorWithContext { + context: context.into(), + source: e.into(), + } + .into() + }) + } + + fn wrap_err_with(self, f: F) -> anyhow::Result + where + F: FnOnce() -> R, + R: Into, + { + self.map_err(|e| { + ErrorWithContext { + context: f().into(), + source: e.into(), + } + .into() + }) + } +} + +#[derive(thiserror::Error, Debug)] +#[error("{context}: {source}")] +pub struct ErrorWithContext { + context: String, + #[source] + source: anyhow::Error, +} diff --git a/cli/src/util/logger.rs b/cli/src/util/logger.rs new file mode 100644 index 000000000..190c886cf --- /dev/null +++ b/cli/src/util/logger.rs @@ -0,0 +1,60 @@ +use std::path::Path; + +use anyhow::Result; +use serde::de::Visitor; +use serde::{Deserialize, Deserializer}; +use tracing_subscriber::filter::Directive; + +pub struct LoggerConfig { + directives: Vec, +} + +impl LoggerConfig { + pub fn load_from>(path: P) -> Result { + tycho_util::serde_helpers::load_json_from_file(path) + } + + pub fn build_subscriber(&self) -> tracing_subscriber::filter::EnvFilter { + let mut builder = tracing_subscriber::filter::EnvFilter::default(); + for item in &self.directives { + builder = builder.add_directive(item.clone()); + } + builder + } +} + +impl<'de> Deserialize<'de> for LoggerConfig { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + struct LoggerVisitor; + + impl<'de> Visitor<'de> for LoggerVisitor { + type Value = LoggerConfig; + + fn expecting(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.write_str("a list of targets") + } + + fn visit_map(self, mut map: A) -> Result + where + A: serde::de::MapAccess<'de>, + { + let mut directives = Vec::new(); + + while let Some((target, level)) = map.next_entry::()? { + let directive = format!("{}={}", target, level) + .parse::() + .map_err(serde::de::Error::custom)?; + + directives.push(directive); + } + + Ok(LoggerConfig { directives }) + } + } + + deserializer.deserialize_map(LoggerVisitor) + } +} diff --git a/cli/src/util/mod.rs b/cli/src/util/mod.rs index 0dad67020..6c3a7591c 100644 --- a/cli/src/util/mod.rs +++ b/cli/src/util/mod.rs @@ -5,6 +5,10 @@ use everscale_types::models::{Account, StorageUsed}; use everscale_types::num::VarUint56; use everscale_types::prelude::*; +pub mod error; +pub mod logger; +pub mod signal; + // TODO: move into types pub fn compute_storage_used(account: &Account) -> Result { let cell = { diff --git a/cli/src/util/signal.rs b/cli/src/util/signal.rs new file mode 100644 index 000000000..d49d72f4d --- /dev/null +++ b/cli/src/util/signal.rs @@ -0,0 +1,35 @@ +use tokio::signal::unix; + +pub const TERMINATION_SIGNALS: [libc::c_int; 5] = [ + libc::SIGINT, + libc::SIGTERM, + libc::SIGQUIT, + libc::SIGABRT, + 20, // SIGTSTP +]; + +pub fn any_signal(signals: I) -> tokio::sync::oneshot::Receiver +where + I: IntoIterator, + T: Into + Send + 'static, +{ + let (tx, rx) = tokio::sync::oneshot::channel(); + + let any_signal = futures_util::future::select_all(signals.into_iter().map(|signal| { + Box::pin(async move { + let signal = signal.into(); + unix::signal(signal) + .expect("Failed subscribing on unix signals") + .recv() + .await; + signal + }) + })); + + tokio::spawn(async move { + let signal = any_signal.await.0; + tx.send(signal).ok(); + }); + + rx +} diff --git a/collator/Cargo.toml b/collator/Cargo.toml index 4510802ae..1806fca6a 100644 --- a/collator/Cargo.toml +++ b/collator/Cargo.toml @@ -23,7 +23,7 @@ tokio = { workspace = true, features = ["macros", "rt", "signal"] } tokio-util = { workspace = true } tracing = { workspace = true } tracing-subscriber = { workspace = true } - +trait-variant = { workspace = true } everscale-types = { workspace = true } everscale-crypto = { workspace = true } @@ -33,17 +33,14 @@ tycho-network = { workspace = true } tycho-storage = { workspace = true } tycho-util = { workspace = true } tycho-block-util = { workspace = true } - -#TODO: should be here? tycho-consensus = {workspace = true} - - [dev-dependencies] tempfile = { workspace = true } tokio = { version = "1", features = ["rt-multi-thread"] } tracing-test = { workspace = true } tycho-block-util = { workspace = true, features = ["test"] } +tycho-collator = { workspace = true, features = ["test"] } tycho-core = { workspace = true, features = ["test"] } tycho-storage = { workspace = true, features = ["test"] } tycho-util = { workspace = true, features = ["test"] } diff --git a/collator/src/collator/build_block.rs b/collator/src/collator/build_block.rs index 7fc667282..daa7d2b68 100644 --- a/collator/src/collator/build_block.rs +++ b/collator/src/collator/build_block.rs @@ -8,11 +8,11 @@ use sha2::Digest; use tycho_block_util::config::BlockchainConfigExt; use tycho_block_util::state::ShardStateStuff; +use super::execution_manager::ExecutionManager; +use super::CollatorStdImpl; use crate::collator::types::{AccountBlocksDict, BlockCollationData, PrevData, ShardAccountStuff}; use crate::types::BlockCandidate; -use super::{execution_manager::ExecutionManager, CollatorStdImpl}; - impl CollatorStdImpl { pub(super) async fn finalize_block( &mut self, @@ -33,20 +33,20 @@ impl CollatorStdImpl { // drop sender to stop the task that process messages and force it to return updated shard account std::mem::drop(sender); let shard_acc_stuff = handle.await??; - //TODO: read account - //TODO: get updated blockchain config if it stored in account - //TODO: if have transactions, build AccountBlock and add to account_blocks + // TODO: read account + // TODO: get updated blockchain config if it stored in account + // TODO: if have transactions, build AccountBlock and add to account_blocks changed_accounts.insert(account_id, shard_acc_stuff); } - //TODO: update new_config_opt from hard fork + // TODO: update new_config_opt from hard fork // calc value flow - //TODO: init collation_data.value_flow + // TODO: init collation_data.value_flow let mut value_flow = collation_data.value_flow.clone(); - //TODO: init collation_data.in_msgs + // TODO: init collation_data.in_msgs value_flow.imported = collation_data.in_msgs.root_extra().value_imported.clone(); - //TODO: init collation_data.out_msgs + // TODO: init collation_data.out_msgs value_flow.exported = collation_data.out_msgs.root_extra().clone(); value_flow.fees_collected = account_blocks.root_extra().clone(); value_flow @@ -61,10 +61,10 @@ impl CollatorStdImpl { value_flow.to_next_block = shard_accounts.root_extra().balance.clone(); // build master state extra or get a ref to last applied master block - //TODO: extract min_ref_mc_seqno from processed_upto info when we have many shards + // TODO: extract min_ref_mc_seqno from processed_upto info when we have many shards let (out_msg_queue_info, _min_ref_mc_seqno) = collation_data.out_msg_queue_stuff.get_out_msg_queue_info(); - //collation_data.update_ref_min_mc_seqno(min_ref_mc_seqno); + // collation_data.update_ref_min_mc_seqno(min_ref_mc_seqno); let (mc_state_extra, master_ref) = if self.shard_id.is_masterchain() { let (extra, min_ref_mc_seqno) = self.create_mc_state_extra(collation_data, new_config_opt)?; @@ -81,7 +81,7 @@ impl CollatorStdImpl { }; new_block_info.set_prev_ref(&prev_shard_data.get_blocks_ref()?); - //TODO: should set when slpit/merge logic implemented + // TODO: should set when slpit/merge logic implemented // info.after_merge = false; // info.before_split = false; // info.after_split = false; @@ -115,7 +115,7 @@ impl CollatorStdImpl { // build new state let global_id = prev_shard_data.observable_states()[0].state().global_id; - let mut new_state = ShardStateUnsplit { + let mut new_state = Box::new(ShardStateUnsplit { global_id, shard_ident: new_block_info.shard, seqno: new_block_info.seqno, @@ -139,22 +139,27 @@ impl CollatorStdImpl { custom: mc_state_extra.as_ref().map(Lazy::new).transpose()?, #[cfg(feature = "venom")] shard_block_refs: None, - }; + }); + + new_state + .total_validator_fees + .checked_add(&value_flow.fees_collected)?; new_state .total_balance .try_add_assign(&value_flow.fees_collected)?; - new_state - .total_validator_fees - .checked_sub(&value_flow.recovered)?; + // TODO: + // new_state + // .total_validator_fees + // .try_sub_assign(&value_flow.recovered)?; if self.shard_id.is_masterchain() { new_state.libraries = self.update_public_libraries(exec_manager.libraries.clone(), &changed_accounts)?; } - //TODO: update smc on hard fork + // TODO: update smc on hard fork // calc merkle update let new_state_root = CellBuilder::build_from(&new_state)?; @@ -174,13 +179,13 @@ impl CollatorStdImpl { ..Default::default() }; - //TODO: fill created_by - //extra.created_by = self.created_by.clone(); + // TODO: fill created_by + // extra.created_by = self.created_by.clone(); if let Some(mc_state_extra) = mc_state_extra { let new_mc_block_extra = McBlockExtra { shards: mc_state_extra.shards.clone(), fees: collation_data.shard_fees.clone(), - //TODO: Signatures for previous blocks + // TODO: Signatures for previous blocks prev_block_signatures: Default::default(), mint_msg: collation_data .mint_msg @@ -222,7 +227,7 @@ impl CollatorStdImpl { file_hash: sha2::Sha256::digest(&new_block_boc).into(), }; - //TODO: build collated data from collation_data.shard_top_block_descriptors + // TODO: build collated data from collation_data.shard_top_block_descriptors let collated_data = vec![]; let block_candidate = BlockCandidate::new( @@ -237,7 +242,7 @@ impl CollatorStdImpl { ); let new_state_stuff = ShardStateStuff::from_state_and_root( - new_block_id, + &new_block_id, new_state, new_state_root, &self.state_tracker, @@ -296,8 +301,8 @@ impl CollatorStdImpl { // prev_state_extra.flags is checked in the McStateExtra::load_from // 5. update validator_info - //TODO: check `create_mc_state_extra()` for a reference implementation - //STUB: currently we do not use validator_info and just do nothing there + // TODO: check `create_mc_state_extra()` for a reference implementation + // STUB: currently we do not use validator_info and just do nothing there let validator_info = prev_state_extra.validator_info.clone(); // 6. update prev_blocks (add prev block's id to the dictionary) @@ -310,7 +315,7 @@ impl CollatorStdImpl { root_hash: prev_state.block_id().root_hash, file_hash: prev_state.block_id().file_hash, }; - //TODO: use AugDict::set when it be implemented + // TODO: use AugDict::set when it be implemented // prev_blocks.set( // &prev_state.block_id().seqno, // &KeyBlockRef { @@ -375,9 +380,9 @@ impl CollatorStdImpl { wc_set: &Dict, update_cc: bool, ) -> Result { - //TODO: here should be the split/merge logic, refer to old node impl + // TODO: here should be the split/merge logic, refer to old node impl - //STUB: just do nothing for now: no split/merge, no session rotation + // STUB: just do nothing for now: no split/merge, no session rotation let mut min_ref_mc_seqno = u32::max_value(); for (_shard_id, shard_descr) in collation_data.shards_mut()? { min_ref_mc_seqno = std::cmp::min(min_ref_mc_seqno, shard_descr.min_ref_mc_seqno); @@ -391,8 +396,8 @@ impl CollatorStdImpl { collation_data: &BlockCollationData, block_create_stats: &mut Dict, ) -> Result<()> { - //TODO: implement if we really need it - //STUB: do not update anything + // TODO: implement if we really need it + // STUB: do not update anything Ok(()) } @@ -428,8 +433,6 @@ impl CollatorStdImpl { timer.elapsed().as_millis(), ); - // do not need to calc out_queue_updates - Ok(state_update) } } diff --git a/collator/src/collator/do_collate.rs b/collator/src/collator/do_collate.rs index 491271c37..d218c9bf6 100644 --- a/collator/src/collator/do_collate.rs +++ b/collator/src/collator/do_collate.rs @@ -20,7 +20,7 @@ impl CollatorStdImpl { next_chain_time: u64, top_shard_blocks_info: Vec<(BlockId, BlockInfo, ValueFlow)>, ) -> Result<()> { - //TODO: make real implementation + // TODO: make real implementation let mc_data = &self.working_state().mc_data; let prev_shard_data = &self.working_state().prev_shard_data; @@ -57,7 +57,7 @@ impl CollatorStdImpl { ); // prepare block collation data - //STUB: consider split/merge in future for taking prev_block_id + // STUB: consider split/merge in future for taking prev_block_id let prev_block_id = prev_shard_data.blocks_ids()[0]; let mut collation_data = BlockCollationData::default(); collation_data.block_id_short = BlockIdShort { @@ -84,7 +84,7 @@ impl CollatorStdImpl { } collation_data.set_shards(shards); - //TODO: setup ShardFees and update `collation_data.value_flow.fees_*` + // TODO: setup ShardFees and update `collation_data.value_flow.fees_*` } collation_data.update_ref_min_mc_seqno(mc_data.mc_state_stuff().state().seqno); @@ -97,11 +97,11 @@ impl CollatorStdImpl { )?; collation_data.max_lt = collation_data.start_lt + 1; - //TODO: should consider split/merge in future + // TODO: should consider split/merge in future let out_msg_queue_info = prev_shard_data.observable_states()[0] .state() .load_out_msg_queue_info() - .unwrap_or_default(); //TODO: should not fail there + .unwrap_or_default(); // TODO: should not fail there collation_data.out_msg_queue_stuff = OutMsgQueueInfoStuff { proc_info: out_msg_queue_info.proc_info, }; @@ -124,52 +124,50 @@ impl CollatorStdImpl { self.config.max_collate_threads, ); - //STUB: just remove fisrt anchor from cache + // STUB: just remove fisrt anchor from cache let _ext_msg = self.get_next_external(); self.has_pending_externals = false; - //STUB: do not execute transactions and produce empty block + // STUB: do not execute transactions and produce empty block // build block candidate and new state let (candidate, new_state_stuff) = self .finalize_block(&mut collation_data, exec_manager) .await?; - /* - //STUB: just send dummy block to collation manager - let prev_blocks_ids = prev_shard_data.blocks_ids().clone(); - let prev_block_id = prev_blocks_ids[0]; - let collated_block_id_short = BlockIdShort { - shard: prev_block_id.shard, - seqno: prev_block_id.seqno + 1, - }; - let mut builder = CellBuilder::new(); - builder.store_bit(collated_block_id_short.shard.workchain().is_negative())?; - builder.store_u32(collated_block_id_short.shard.workchain().unsigned_abs())?; - builder.store_u64(collated_block_id_short.shard.prefix())?; - builder.store_u32(collated_block_id_short.seqno)?; - let cell = builder.build()?; - let hash = cell.repr_hash(); - let collated_block_id = BlockId { - shard: collated_block_id_short.shard, - seqno: collated_block_id_short.seqno, - root_hash: *hash, - file_hash: *hash, - }; - let mut new_state = prev_shard_data.pure_states()[0] - .state() - .clone(); - new_state.seqno = collated_block_id.seqno; - let candidate = BlockCandidate::new( - collated_block_id, - prev_blocks_ids, - top_shard_blocks_ids, - vec![], - vec![], - collated_block_id.file_hash, - next_chain_time, - ); - */ + // STUB: just send dummy block to collation manager + // let prev_blocks_ids = prev_shard_data.blocks_ids().clone(); + // let prev_block_id = prev_blocks_ids[0]; + // let collated_block_id_short = BlockIdShort { + // shard: prev_block_id.shard, + // seqno: prev_block_id.seqno + 1, + // }; + // let mut builder = CellBuilder::new(); + // builder.store_bit(collated_block_id_short.shard.workchain().is_negative())?; + // builder.store_u32(collated_block_id_short.shard.workchain().unsigned_abs())?; + // builder.store_u64(collated_block_id_short.shard.prefix())?; + // builder.store_u32(collated_block_id_short.seqno)?; + // let cell = builder.build()?; + // let hash = cell.repr_hash(); + // let collated_block_id = BlockId { + // shard: collated_block_id_short.shard, + // seqno: collated_block_id_short.seqno, + // root_hash: *hash, + // file_hash: *hash, + // }; + // let mut new_state = prev_shard_data.pure_states()[0] + // .state() + // .clone(); + // new_state.seqno = collated_block_id.seqno; + // let candidate = BlockCandidate::new( + // collated_block_id, + // prev_blocks_ids, + // top_shard_blocks_ids, + // vec![], + // vec![], + // collated_block_id.file_hash, + // next_chain_time, + // ); let collation_result = BlockCollationResult { candidate, @@ -283,7 +281,7 @@ impl CollatorStdImpl { } else { collation_data.value_flow.created.tokens = mc_data.config().get_block_creation_reward(false)?; - //TODO: should check if it is good to cast `prefix_len` from u16 to u8 + // TODO: should check if it is good to cast `prefix_len` from u16 to u8 collation_data.value_flow.created.tokens >>= collation_data.block_id_short.shard.prefix_len() as u8; } @@ -297,7 +295,7 @@ impl CollatorStdImpl { } fn compute_minted_amount(&self, mc_data: &McData) -> Result { - //TODO: just copied from old node, needs to review + // TODO: just copied from old node, needs to review tracing::trace!("Collator ({}): compute_minted_amount", self.collator_descr); let mut to_mint = CurrencyCollection::default(); diff --git a/collator/src/collator/mod.rs b/collator/src/collator/mod.rs index b03bef3cc..6df211d60 100644 --- a/collator/src/collator/mod.rs +++ b/collator/src/collator/mod.rs @@ -9,16 +9,15 @@ use tycho_block_util::state::{MinRefMcStateTracker, ShardStateStuff}; use self::types::{McData, PrevData, WorkingState}; use crate::mempool::{MempoolAdapter, MempoolAnchor, MempoolAnchorId}; -use crate::method_to_async_task_closure; use crate::msg_queue::MessageQueueAdapter; use crate::state_node::StateNodeAdapter; -use crate::tracing_targets; use crate::types::{ BlockCollationResult, CollationConfig, CollationSessionId, CollationSessionInfo, }; use crate::utils::async_queued_dispatcher::{ AsyncQueuedDispatcher, STANDARD_DISPATCHER_QUEUE_BUFFER_SIZE, }; +use crate::{method_to_async_task_closure, tracing_targets}; mod build_block; mod do_collate; @@ -412,7 +411,7 @@ impl CollatorStdImpl { prev_states: Vec, prev_blocks_ids: Vec, ) -> Result { - //TODO: make real implementation + // TODO: make real implementation let mc_data = McData::build(mc_state)?; Self::check_prev_states_and_master(&mc_data, &prev_states)?; @@ -432,7 +431,7 @@ impl CollatorStdImpl { _mc_data: &McData, _prev_states: &[ShardStateStuff], ) -> Result<()> { - //TODO: make real implementation + // TODO: make real implementation // refer to the old node impl: // Collator::unpack_last_state() Ok(()) @@ -442,9 +441,9 @@ impl CollatorStdImpl { /// 2. Await next anchor via mempool adapter /// 3. Store anchor in cache and return it async fn import_next_anchor(&mut self) -> Result> { - //TODO: make real implementation + // TODO: make real implementation - //STUB: take 0 as last imported without checking `externals_processed_upto` + // STUB: take 0 as last imported without checking `externals_processed_upto` let prev_anchor_id = self.last_imported_anchor_id.unwrap_or(0); let next_anchor = self.mpool_adapter.get_next_anchor(prev_anchor_id).await?; @@ -475,9 +474,9 @@ impl CollatorStdImpl { /// (TODO) Should consider parallel processing for different accounts fn get_next_external(&mut self) -> Option> { - //TODO: make real implementation + // TODO: make real implementation - //STUB: just remove first anchor from cache to force next anchor import on `try_collate` run + // STUB: just remove first anchor from cache to force next anchor import on `try_collate` run self.anchors_cache.pop_first(); None @@ -485,8 +484,8 @@ impl CollatorStdImpl { /// (TODO) TRUE - when internal messages queue has internals fn has_internals(&self) -> Result { - //TODO: make real implementation - //STUB: always return false emulating that all internals were processed in prev block + // TODO: make real implementation + // STUB: always return false emulating that all internals were processed in prev block Ok(false) } @@ -513,7 +512,7 @@ impl CollatorStdImpl { self.collator_descr(), ); - //TODO: fix the work with internals + // TODO: fix the work with internals // check internals let has_internals = self.has_internals()?; diff --git a/collator/src/collator/types.rs b/collator/src/collator/types.rs index 0b449fbca..641bb5b61 100644 --- a/collator/src/collator/types.rs +++ b/collator/src/collator/types.rs @@ -1,97 +1,91 @@ use std::collections::{BTreeMap, HashMap}; use anyhow::{anyhow, bail, Result}; - -use everscale_types::{ - cell::{Cell, HashBytes, UsageTree, UsageTreeMode}, - dict::{AugDict, Dict}, - models::{ - AccountBlock, AccountState, BlockId, BlockIdShort, BlockInfo, BlockRef, BlockchainConfig, - CurrencyCollection, ImportFees, InMsg, LibDescr, McStateExtra, OutMsg, OutMsgQueueInfo, - OwnedMessage, PrevBlockRef, ProcessedUpto, ShardAccount, ShardAccounts, ShardDescription, - ShardFees, ShardIdent, SimpleLib, ValueFlow, - }, +use everscale_types::cell::{Cell, HashBytes, UsageTree, UsageTreeMode}; +use everscale_types::dict::{AugDict, Dict}; +use everscale_types::models::{ + AccountBlock, AccountState, BlockId, BlockIdShort, BlockInfo, BlockRef, BlockchainConfig, + CurrencyCollection, ImportFees, InMsg, LibDescr, McStateExtra, OutMsg, OutMsgQueueInfo, + OwnedMessage, PrevBlockRef, ProcessedUpto, ShardAccount, ShardAccounts, ShardDescription, + ShardFees, ShardIdent, SimpleLib, ValueFlow, }; - use tycho_block_util::state::{MinRefMcStateTracker, ShardStateStuff}; use crate::mempool::MempoolAnchorId; use crate::msg_queue::types::EnqueuedMessage; -/* -В текущем коллаторе перед коллацией блока импортируется: - - предыдущий мастер стейт - - предыдущие стейты шарды (их может быть 2, если мерж) -ImportedData { - mc_state: Arc, - prev_states: Vec>, - prev_ext_blocks_refs: Vec, - top_shard_blocks_descr: Vec>, -} -top_shard_blocks_descr - список верхних новых шардблоков с последнего мастера, если будем коллировать мастер - берутся из prev_states -prev_ext_blocks_refs - ссылки на предыдущие шард блоки, на момент которых загружаются стейты шарды, - они берутся на основании prev_blocks_ids коллатора, загружаются вместе с prev_states -для мастерчейна выполняется проверка на номер блока (надо в ней разобраться) - -Что входит в стейт шарды -ShardStateStuff { - block_id: BlockId, - shard_state: Option, - out_msg_queue: Option, - out_msg_queue_for: i32, - shard_state_extra: Option, - root: Cell -} - -Затем из этих данных методом prepare_data() готовится: McData, PrevData и CollatorData -pub struct McData { - mc_state_extra: McStateExtra, - prev_key_block_seqno: u32, - prev_key_block: Option, - state: Arc -} -pub struct PrevData { - states: Vec>, // предыдущие стейты с отслеживанием изменений через UsageTree - pure_states: Vec>, // исходные предыдущие стейты шарды без отслеживания посещений - state_root: Cell, // рутовая ячейка предыдущего стейта шарды (при мерже там будет объединенная ячейка из двух шард) - // без отслеживания изменений - accounts: ShardAccounts, // предыдущие аккаунты шарды с отслеживанием (получены с учетом сплита/мержа) - gen_utime: u32, - gen_lt: u64, - total_validator_fees: CurrencyCollection, - overload_history: u64, - underload_history: u64, - state_copyleft_rewards: CopyleftRewards, -} -pub struct CollatorData { - usage_tree: UsageTree, // дерево посещенный ячеек стейта для вычисления меркл пруфа -} - -Далее при коллации -При инициализации ExecutionManager стейты, ячейки и аккаунты не используются -При создании tick-tock транзакций McData используется для получения ИД контракта с конфигом и чтения конфига консенсуса -В коллацию интерналов McData не передается, передается PrevData и CollatorData - -PrevData и CollatorData передаются в execute. Там берется аккаунт из PrevData и передается в таску выполнения сообщения. -Там из аккаунта берется рутовая ячейка и передается в метод выполнения сообщения, где она изменяется. -Затем подменяется рут в аккаунте, а предыдущее состояние аккаунта сохраняется в prev_account_stuff, -то есть изменяемый аккаунт накапливает историю изменений -При завершении таски она возвращает актуальный обновленный аккаунт - это происходит при финализации блока - -В методе финализации блока -- запоминаем аккаунты из предыдущего стейта в new_accounts -- берем все измененные аккаунты shard_acc и перименяем их изменения в стейт аккаунтов new_accounts -- из измененного аккаунта делаем AccountBlock и сохраняем в accounts, если в нем есть транзакции -- так же кладем измененный аккаунт shard_acc в список changed_accounts -- создаем новый стейт шарды new_state с использованием обновленных аккаунтов new_accounts -- из нового стейта делаем новую рут ячейку new_ss_root -- вычисляем меркл апдейты -- завершаем создание блока с использованием accounts с транзакциями - -Метод коллации блока возвращает новый стейт шарды типа ShardStateUnsplit -из него можно собрать новый ShardStateStuff, который может использоваться для дальнейшей коллации -*/ +// В текущем коллаторе перед коллацией блока импортируется: +// - предыдущий мастер стейт +// - предыдущие стейты шарды (их может быть 2, если мерж) +// ImportedData { +// mc_state: Arc, +// prev_states: Vec>, +// prev_ext_blocks_refs: Vec, +// top_shard_blocks_descr: Vec>, +// } +// top_shard_blocks_descr - список верхних новых шардблоков с последнего мастера, если будем коллировать мастер +// берутся из prev_states +// prev_ext_blocks_refs - ссылки на предыдущие шард блоки, на момент которых загружаются стейты шарды, +// они берутся на основании prev_blocks_ids коллатора, загружаются вместе с prev_states +// для мастерчейна выполняется проверка на номер блока (надо в ней разобраться) +// +// Что входит в стейт шарды +// ShardStateStuff { +// block_id: BlockId, +// shard_state: Option, +// out_msg_queue: Option, +// out_msg_queue_for: i32, +// shard_state_extra: Option, +// root: Cell +// } +// +// Затем из этих данных методом prepare_data() готовится: McData, PrevData и CollatorData +// pub struct McData { +// mc_state_extra: McStateExtra, +// prev_key_block_seqno: u32, +// prev_key_block: Option, +// state: Arc +// } +// pub struct PrevData { +// states: Vec>, // предыдущие стейты с отслеживанием изменений через UsageTree +// pure_states: Vec>, // исходные предыдущие стейты шарды без отслеживания посещений +// state_root: Cell, // рутовая ячейка предыдущего стейта шарды (при мерже там будет объединенная ячейка из двух шард) +// без отслеживания изменений +// accounts: ShardAccounts, // предыдущие аккаунты шарды с отслеживанием (получены с учетом сплита/мержа) +// gen_utime: u32, +// gen_lt: u64, +// total_validator_fees: CurrencyCollection, +// overload_history: u64, +// underload_history: u64, +// state_copyleft_rewards: CopyleftRewards, +// } +// pub struct CollatorData { +// usage_tree: UsageTree, // дерево посещенный ячеек стейта для вычисления меркл пруфа +// } +// +// Далее при коллации +// При инициализации ExecutionManager стейты, ячейки и аккаунты не используются +// При создании tick-tock транзакций McData используется для получения ИД контракта с конфигом и чтения конфига консенсуса +// В коллацию интерналов McData не передается, передается PrevData и CollatorData +// +// PrevData и CollatorData передаются в execute. Там берется аккаунт из PrevData и передается в таску выполнения сообщения. +// Там из аккаунта берется рутовая ячейка и передается в метод выполнения сообщения, где она изменяется. +// Затем подменяется рут в аккаунте, а предыдущее состояние аккаунта сохраняется в prev_account_stuff, +// то есть изменяемый аккаунт накапливает историю изменений +// При завершении таски она возвращает актуальный обновленный аккаунт - это происходит при финализации блока +// +// В методе финализации блока +// - запоминаем аккаунты из предыдущего стейта в new_accounts +// - берем все измененные аккаунты shard_acc и перименяем их изменения в стейт аккаунтов new_accounts +// - из измененного аккаунта делаем AccountBlock и сохраняем в accounts, если в нем есть транзакции +// - так же кладем измененный аккаунт shard_acc в список changed_accounts +// - создаем новый стейт шарды new_state с использованием обновленных аккаунтов new_accounts +// - из нового стейта делаем новую рут ячейку new_ss_root +// - вычисляем меркл апдейты +// - завершаем создание блока с использованием accounts с транзакциями +// +// Метод коллации блока возвращает новый стейт шарды типа ShardStateUnsplit +// из него можно собрать новый ShardStateStuff, который может использоваться для дальнейшей коллации pub(super) struct WorkingState { pub mc_data: McData, @@ -192,7 +186,7 @@ pub(super) struct PrevData { } impl PrevData { pub fn build(prev_states: Vec) -> Result<(Self, UsageTree)> { - //TODO: make real implementation + // TODO: make real implementation // consider split/merge logic // Collator::prepare_data() // Collator::unpack_last_state() @@ -204,8 +198,8 @@ impl PrevData { let usage_tree = UsageTree::new(UsageTreeMode::OnDataAccess); let observable_root = usage_tree.track(pure_prev_state_root); let tracker = MinRefMcStateTracker::new(); - let observable_states = vec![ShardStateStuff::new( - *pure_prev_states[0].block_id(), + let observable_states = vec![ShardStateStuff::from_root( + pure_prev_states[0].block_id(), observable_root, &tracker, )?]; @@ -245,8 +239,8 @@ impl PrevData { } pub fn update_state(&mut self, new_blocks_ids: Vec) -> Result<()> { - //TODO: make real implementation - //STUB: currently have stub signature and implementation + // TODO: make real implementation + // STUB: currently have stub signature and implementation self.blocks_ids = new_blocks_ids; Ok(()) @@ -317,7 +311,7 @@ impl PrevData { #[derive(Debug, Default)] pub(super) struct BlockCollationData { - //block_descr: Arc, + // block_descr: Arc, pub block_id_short: BlockIdShort, pub chain_time: u32, @@ -340,7 +334,7 @@ pub(super) struct BlockCollationData { shards: Option>>, shards_max_end_lt: u64, - //TODO: setup update logic when ShardFees would be implemented + // TODO: setup update logic when ShardFees would be implemented pub shard_fees: ShardFees, pub mint_msg: Option, @@ -424,8 +418,8 @@ impl ShardAccountStuff { }; let new_libs = state_init.map(|v| v.libraries.clone()).unwrap_or_default(); if new_libs.root() != self.orig_libs.root() { - //TODO: implement when scan_diff be added - //STUB: just do nothing, no accounts, no libraries updates in prototype + // TODO: implement when scan_diff be added + // STUB: just do nothing, no accounts, no libraries updates in prototype // new_libs.scan_diff(&self.orig_libs, |key: UInt256, old, new| { // let old = old.unwrap_or_default(); // let new = new.unwrap_or_default(); @@ -448,10 +442,10 @@ pub(super) struct OutMsgQueueInfoStuff { } impl OutMsgQueueInfoStuff { - ///TODO: make real implementation + /// TODO: make real implementation pub fn get_out_msg_queue_info(&self) -> (OutMsgQueueInfo, u32) { let mut min_ref_mc_seqno = u32::MAX; - //STUB: just clone existing + // STUB: just clone existing let msg_queue_info = OutMsgQueueInfo { proc_info: self.proc_info.clone(), }; @@ -480,15 +474,15 @@ impl ShardDescriptionExt for ShardDescription { root_hash: block_id.root_hash, file_hash: block_id.file_hash, before_split: block_info.before_split, - before_merge: false, //TODO: by t-node, needs to review + before_merge: false, // TODO: by t-node, needs to review want_split: block_info.want_split, want_merge: block_info.want_merge, - nx_cc_updated: false, //TODO: by t-node, needs to review + nx_cc_updated: false, // TODO: by t-node, needs to review next_catchain_seqno: block_info.gen_catchain_seqno, - next_validator_shard: block_info.shard.prefix(), // eq to `shard_prefix_with_tag` in old node + next_validator_shard: block_info.shard.prefix(), /* eq to `shard_prefix_with_tag` in old node */ min_ref_mc_seqno: block_info.min_ref_mc_seqno, gen_utime: block_info.gen_utime, - split_merge_at: None, //TODO: check if we really should not use it here + split_merge_at: None, // TODO: check if we really should not use it here fees_collected: value_flow.fees_collected.clone(), funds_created: value_flow.created.clone(), copyleft_rewards: Default::default(), diff --git a/collator/src/internal_queue/iterator.rs b/collator/src/internal_queue/iterator.rs index 026ee909a..7851499c4 100644 --- a/collator/src/internal_queue/iterator.rs +++ b/collator/src/internal_queue/iterator.rs @@ -1,14 +1,16 @@ -use crate::internal_queue::error::QueueError; -use crate::internal_queue::snapshot::{IterRange, MessageWithSource, ShardRange, StateSnapshot}; -use crate::internal_queue::types::ext_types_stubs::{EnqueuedMessage, EnqueuedMessageKey, Lt}; -use crate::internal_queue::types::QueueDiff; -use everscale_types::cell::HashBytes; -use everscale_types::models::{BlockIdShort, ShardIdent}; use std::cmp::Reverse; use std::collections::{BinaryHeap, HashMap}; use std::str::FromStr; use std::sync::Arc; +use everscale_types::cell::HashBytes; +use everscale_types::models::{BlockIdShort, ShardIdent}; + +use crate::internal_queue::error::QueueError; +use crate::internal_queue::snapshot::{IterRange, MessageWithSource, ShardRange, StateSnapshot}; +use crate::internal_queue::types::ext_types_stubs::{EnqueuedMessage, EnqueuedMessageKey, Lt}; +use crate::internal_queue::types::QueueDiff; + pub trait QueueIterator { fn next(&mut self) -> Option; fn commit(&mut self); @@ -191,11 +193,12 @@ fn find_common_ancestor(shard1: ShardIdent, shard2: ShardIdent) -> Option Box { let shard_id = ShardIdent::new_full(0); diff --git a/collator/src/internal_queue/persistent/persistent_state.rs b/collator/src/internal_queue/persistent/persistent_state.rs index f1da22f68..88558cd4f 100644 --- a/collator/src/internal_queue/persistent/persistent_state.rs +++ b/collator/src/internal_queue/persistent/persistent_state.rs @@ -1,10 +1,13 @@ +use std::sync::Arc; + +use everscale_types::models::BlockIdShort; + use crate::internal_queue::persistent::persistent_state_snapshot::PersistentStateSnapshot; use crate::internal_queue::snapshot::StateSnapshot; use crate::internal_queue::types::ext_types_stubs::EnqueuedMessage; -use everscale_types::models::BlockIdShort; -use std::sync::Arc; -pub trait PersistentState +#[trait_variant::make(PersistentState: Send)] +pub trait LocalPersistentState where S: StateSnapshot, { diff --git a/collator/src/internal_queue/persistent/persistent_state_snapshot.rs b/collator/src/internal_queue/persistent/persistent_state_snapshot.rs index 20bb370e2..5a5d7c637 100644 --- a/collator/src/internal_queue/persistent/persistent_state_snapshot.rs +++ b/collator/src/internal_queue/persistent/persistent_state_snapshot.rs @@ -1,9 +1,11 @@ -use crate::internal_queue::error::QueueError; -use crate::internal_queue::snapshot::{MessageWithSource, ShardRange, StateSnapshot}; -use everscale_types::models::ShardIdent; use std::collections::HashMap; use std::sync::Arc; +use everscale_types::models::ShardIdent; + +use crate::internal_queue::error::QueueError; +use crate::internal_queue::snapshot::{MessageWithSource, ShardRange, StateSnapshot}; + pub struct PersistentStateSnapshot {} impl StateSnapshot for PersistentStateSnapshot { diff --git a/collator/src/internal_queue/queue.rs b/collator/src/internal_queue/queue.rs index caaf46a42..6524348a6 100644 --- a/collator/src/internal_queue/queue.rs +++ b/collator/src/internal_queue/queue.rs @@ -1,14 +1,17 @@ +use std::marker::PhantomData; +use std::sync::Arc; + +use everscale_types::models::{BlockIdShort, ShardIdent}; +use tokio::sync::{Mutex, RwLock}; + use crate::internal_queue::error::QueueError; use crate::internal_queue::persistent::persistent_state::PersistentState; use crate::internal_queue::session::session_state::SessionState; use crate::internal_queue::snapshot::StateSnapshot; use crate::internal_queue::types::QueueDiff; -use everscale_types::models::{BlockIdShort, ShardIdent}; -use std::marker::PhantomData; -use std::sync::Arc; -use tokio::sync::{Mutex, RwLock}; -pub trait Queue +#[trait_variant::make(Queue: Send)] +pub trait LocalQueue where SS: StateSnapshot + 'static, PS: StateSnapshot + 'static, @@ -24,6 +27,7 @@ where diff_id: &BlockIdShort, ) -> Result>, QueueError>; } + pub struct QueueImpl where S: SessionState, @@ -39,10 +43,10 @@ where impl Queue for QueueImpl where - S: SessionState, - P: PersistentState, - SS: StateSnapshot + 'static, - PS: StateSnapshot + 'static, + S: SessionState + Send, + P: PersistentState + Send + Sync, + SS: StateSnapshot + 'static + Send + Sync, + PS: StateSnapshot + 'static + Send + Sync, { fn new(base_shard: ShardIdent) -> Self { let session_state = Mutex::new(S::new(base_shard)); @@ -90,12 +94,13 @@ where #[cfg(test)] mod tests { + use everscale_types::models::ShardIdent; + use super::*; use crate::internal_queue::persistent::persistent_state::PersistentStateImpl; use crate::internal_queue::persistent::persistent_state_snapshot::PersistentStateSnapshot; use crate::internal_queue::session::session_state::SessionStateImpl; use crate::internal_queue::session::session_state_snapshot::SessionStateSnapshot; - use everscale_types::models::ShardIdent; #[tokio::test] async fn test_new_queue() { @@ -106,8 +111,8 @@ mod tests { PersistentStateImpl, SessionStateSnapshot, PersistentStateSnapshot, - > = QueueImpl::new(base_shard); + > = as Queue<_, _>>::new(base_shard); - queue.split_shard(&base_shard).await.unwrap(); + Queue::split_shard(&queue, &base_shard).await.unwrap(); } } diff --git a/collator/src/internal_queue/session/session_state.rs b/collator/src/internal_queue/session/session_state.rs index e9aa8086a..f68fa2690 100644 --- a/collator/src/internal_queue/session/session_state.rs +++ b/collator/src/internal_queue/session/session_state.rs @@ -1,14 +1,17 @@ +use std::collections::HashMap; +use std::sync::Arc; + +use everscale_types::models::{BlockIdShort, ShardIdent}; +use tokio::sync::RwLock; + use crate::internal_queue::error::QueueError; use crate::internal_queue::session::session_state_snapshot::SessionStateSnapshot; use crate::internal_queue::shard::Shard; use crate::internal_queue::snapshot::StateSnapshot; use crate::internal_queue::types::QueueDiff; -use everscale_types::models::{BlockIdShort, ShardIdent}; -use std::collections::HashMap; -use std::sync::Arc; -use tokio::sync::RwLock; -pub trait SessionState +#[trait_variant::make(SessionState: Send)] +pub trait LocalSessionState where S: StateSnapshot, { @@ -81,12 +84,16 @@ impl SessionState for SessionStateImpl { #[cfg(test)] mod tests { + use std::sync::Arc; + + use everscale_types::models::BlockIdShort; + use super::*; + use crate::internal_queue::session::session_state; + use crate::internal_queue::session::session_state::*; use crate::internal_queue::types::ext_types_stubs::{ EnqueuedMessage, MessageContent, MessageEnvelope, }; - use everscale_types::models::BlockIdShort; - use std::sync::Arc; fn test_shard_ident() -> ShardIdent { ShardIdent::new_full(0) @@ -108,14 +115,17 @@ mod tests { #[test] fn test_new_session_state() { let base_shard = test_shard_ident(); - let _session_state = SessionStateImpl::new(base_shard); + let _session_state = + >::new(base_shard); } #[tokio::test] async fn test_split_shard() { let base_shard = test_shard_ident(); - let session_state = SessionStateImpl::new(base_shard); - let split_shard_result = session_state.split_shard(&base_shard).await; + let session_state = + >::new(base_shard); + let split_shard_result = + SessionState::::split_shard(&session_state, &base_shard).await; assert!( split_shard_result.is_ok(), "Splitting the shard should succeed." @@ -125,7 +135,8 @@ mod tests { #[tokio::test] async fn test_apply_diff() { let base_shard = test_shard_ident(); - let session_state = SessionStateImpl::new(base_shard); + let session_state = + >::new(base_shard); let block_id = BlockIdShort { shard: base_shard, seqno: 0, @@ -135,7 +146,8 @@ mod tests { messages: vec![default_message()], processed_upto: Default::default(), }); - let apply_diff_result = session_state.apply_diff(diff).await; + let apply_diff_result = + SessionState::::apply_diff(&session_state, diff).await; assert_eq!( session_state .shards_flat @@ -168,12 +180,14 @@ mod tests { #[tokio::test] async fn test_remove_diff() { let base_shard = test_shard_ident(); - let session_state = SessionStateImpl::new(base_shard); + let session_state = + >::new(base_shard); let diff_id = BlockIdShort { shard: base_shard, seqno: 0, }; - let remove_diff_result = session_state.remove_diff(&diff_id).await; + let remove_diff_result = + session_state::SessionState::remove_diff(&session_state, &diff_id).await; assert_eq!( session_state .shards_flat @@ -206,7 +220,8 @@ mod tests { #[tokio::test] async fn test_snapshot() { let base_shard = test_shard_ident(); - let session_state = SessionStateImpl::new(base_shard); + let session_state = + >::new(base_shard); let block_id = BlockIdShort { shard: base_shard, seqno: 0, @@ -216,9 +231,10 @@ mod tests { messages: vec![default_message()], processed_upto: Default::default(), }); - let _apply_diff_result = session_state.apply_diff(diff).await; + let _apply_diff_result = + session_state::SessionState::apply_diff(&session_state, diff).await; - let snapshot = session_state.snapshot().await; + let snapshot = session_state::SessionState::snapshot(&session_state).await; assert_eq!(snapshot.flat_shards.len(), 1); assert_eq!( snapshot diff --git a/collator/src/internal_queue/session/session_state_snapshot.rs b/collator/src/internal_queue/session/session_state_snapshot.rs index ad4f1bc67..43b59d8b2 100644 --- a/collator/src/internal_queue/session/session_state_snapshot.rs +++ b/collator/src/internal_queue/session/session_state_snapshot.rs @@ -1,12 +1,15 @@ -use crate::internal_queue::error::QueueError; -use crate::internal_queue::shard::Shard; -use crate::internal_queue::snapshot::{MessageWithSource, ShardRange, StateSnapshot}; -use everscale_types::cell::HashBytes; -use everscale_types::models::ShardIdent; use std::collections::HashMap; use std::str::FromStr; use std::sync::Arc; +use everscale_types::cell::HashBytes; +use everscale_types::models::ShardIdent; +use tracing::error; + +use crate::internal_queue::error::QueueError; +use crate::internal_queue::shard::Shard; +use crate::internal_queue::snapshot::{MessageWithSource, ShardRange, StateSnapshot}; + pub struct SessionStateSnapshot { pub flat_shards: HashMap, } @@ -39,7 +42,7 @@ impl StateSnapshot for SessionStateSnapshot { } } Err(e) => { - println!("failed to convert account to hashbytes {e:?}"); + error!("failed to convert account to hashbytes {e:?}"); return false; } } diff --git a/collator/src/internal_queue/shard.rs b/collator/src/internal_queue/shard.rs index 1a23329db..727598515 100644 --- a/collator/src/internal_queue/shard.rs +++ b/collator/src/internal_queue/shard.rs @@ -1,9 +1,11 @@ -use crate::internal_queue::types::ext_types_stubs::{EnqueuedMessage, EnqueuedMessageKey}; -use crate::internal_queue::types::QueueDiff; -use everscale_types::models::{BlockIdShort, ShardIdent}; use std::collections::BTreeMap; use std::sync::Arc; +use everscale_types::models::{BlockIdShort, ShardIdent}; + +use crate::internal_queue::types::ext_types_stubs::{EnqueuedMessage, EnqueuedMessageKey}; +use crate::internal_queue::types::QueueDiff; + #[derive(Clone)] pub struct Shard { pub(crate) outgoing_messages: BTreeMap>, diff --git a/collator/src/internal_queue/snapshot.rs b/collator/src/internal_queue/snapshot.rs index 663029a59..353c5c62c 100644 --- a/collator/src/internal_queue/snapshot.rs +++ b/collator/src/internal_queue/snapshot.rs @@ -1,10 +1,12 @@ -use crate::internal_queue::error::QueueError; -use crate::internal_queue::types::ext_types_stubs::{EnqueuedMessage, Lt}; -use everscale_types::models::ShardIdent; use std::cmp::Ordering; use std::collections::HashMap; use std::sync::Arc; +use everscale_types::models::ShardIdent; + +use crate::internal_queue::error::QueueError; +use crate::internal_queue::types::ext_types_stubs::{EnqueuedMessage, Lt}; + #[derive(Eq)] pub struct MessageWithSource { pub shard_id: ShardIdent, @@ -40,7 +42,7 @@ pub struct ShardRange { pub to_lt: Option, } -pub trait StateSnapshot { +pub trait StateSnapshot: Send { fn get_outgoing_messages_by_shard( &self, shards: &mut HashMap, diff --git a/collator/src/internal_queue/types.rs b/collator/src/internal_queue/types.rs index db5bb6100..c25913e3d 100644 --- a/collator/src/internal_queue/types.rs +++ b/collator/src/internal_queue/types.rs @@ -1,8 +1,9 @@ +use std::collections::HashMap; +use std::sync::Arc; + use everscale_types::models::{BlockIdShort, ShardIdent}; use crate::internal_queue::types::ext_types_stubs::{EnqueuedMessage, EnqueuedMessageKey}; -use std::collections::HashMap; -use std::sync::Arc; pub struct QueueDiff { pub id: BlockIdShort, diff --git a/collator/src/lib.rs b/collator/src/lib.rs index f493c3b1f..3474ba309 100644 --- a/collator/src/lib.rs +++ b/collator/src/lib.rs @@ -4,11 +4,11 @@ pub mod manager; pub mod mempool; pub mod msg_queue; pub mod state_node; -pub mod test_utils; pub mod types; pub mod validator; +#[cfg(any(test, feature = "test"))] +pub mod test_utils; + mod tracing_targets; mod utils; - -// pub use validator::test_impl as validator_test_impl; diff --git a/collator/src/manager/mod.rs b/collator/src/manager/mod.rs index f7b3f544b..9dc9f337d 100644 --- a/collator/src/manager/mod.rs +++ b/collator/src/manager/mod.rs @@ -1,4 +1,5 @@ -use std::collections::{hash_map::Entry, HashMap, VecDeque}; +use std::collections::hash_map::Entry; +use std::collections::{HashMap, VecDeque}; use std::sync::Arc; use anyhow::{anyhow, bail, Result}; @@ -7,6 +8,11 @@ use everscale_types::models::{BlockId, BlockInfo, ShardIdent, ValueFlow}; use tycho_block_util::block::ValidatorSubsetInfo; use tycho_block_util::state::{MinRefMcStateTracker, ShardStateStuff}; +use self::types::{ + BlockCacheKey, BlockCandidateContainer, BlockCandidateToSend, BlocksCache, + McBlockSubgraphToSend, SendSyncStatus, +}; +use self::utils::{build_block_stuff_for_sync, find_us_in_collators_set}; use crate::collator::{Collator, CollatorContext, CollatorEventListener, CollatorFactory}; use crate::mempool::{MempoolAdapter, MempoolAdapterFactory, MempoolAnchor, MempoolEventListener}; use crate::msg_queue::MessageQueueAdapter; @@ -18,16 +24,11 @@ use crate::types::{ use crate::utils::async_queued_dispatcher::{ AsyncQueuedDispatcher, STANDARD_DISPATCHER_QUEUE_BUFFER_SIZE, }; -use crate::utils::{schedule_async_action, shard::calc_split_merge_actions}; +use crate::utils::schedule_async_action; +use crate::utils::shard::calc_split_merge_actions; use crate::validator::{Validator, ValidatorContext, ValidatorEventListener, ValidatorFactory}; use crate::{method_to_async_task_closure, tracing_targets}; -use self::types::{ - BlockCacheKey, BlockCandidateContainer, BlockCandidateToSend, BlocksCache, - McBlockSubgraphToSend, SendSyncStatus, -}; -use self::utils::{build_block_stuff_for_sync, find_us_in_collators_set}; - mod types; mod utils; @@ -112,14 +113,14 @@ where CF: CollatorFactory, V: Validator, { - async fn on_block_accepted(&self, block_id: &BlockId) -> Result<()> { - //TODO: remove accepted block from cache - //STUB: do nothing, currently we remove block from cache when it sent to state node + async fn on_block_accepted(&self, _block_id: &BlockId) -> Result<()> { + // TODO: remove accepted block from cache + // STUB: do nothing, currently we remove block from cache when it sent to state node Ok(()) } async fn on_block_accepted_external(&self, state: &ShardStateStuff) -> Result<()> { - //TODO: should store block info from blockchain if it was not already collated + // TODO: should store block info from blockchain if it was not already collated // and validated by ourself. Will use this info for faster validation further: // will consider that just collated block is already validated if it have the // same root hash and file hash @@ -317,7 +318,7 @@ where &mut self, _anchor: Arc, ) -> Result<()> { - //TODO: make real implementation, currently does nothing + // TODO: make real implementation, currently does nothing Ok(()) } @@ -333,7 +334,7 @@ where } // request mc state for this master block - //TODO: should await state and schedule processing in async task + // TODO: should await state and schedule processing in async task let mc_state = self.state_node_adapter.load_state(&mc_block_id).await?; // when state received execute master block processing routines @@ -380,7 +381,7 @@ where return false; } else if !is_equal { - //STUB: skip processing master block from bc even if it is far away from own last collated + // STUB: skip processing master block from bc even if it is far away from own last collated // because the logic for updating collators in this case is not implemented yet tracing::info!( target: tracing_targets::COLLATION_MANAGER, @@ -431,7 +432,7 @@ where mc_block_id: &BlockId, other_mc_block_id_opt: Option<&BlockId>, ) -> (i32, bool) { - //TODO: consider block shard? + // TODO: consider block shard? let (seqno_delta, is_equal) = match other_mc_block_id_opt { None => (0, false), Some(other_mc_block_id) => ( @@ -453,8 +454,8 @@ where /// * TRUE - provided `mc_block_id` is before or equal to last processed /// * FALSE - provided `mc_block_id` is ahead of last processed - fn check_if_mc_block_not_ahead_last_processed(&self, mc_block_id: &BlockId) -> bool { - //TODO: consider block shard? + fn _check_if_mc_block_not_ahead_last_processed(&self, mc_block_id: &BlockId) -> bool { + // TODO: consider block shard? let last_processed_mc_block_id_opt = self.last_processed_mc_block_id(); let is_not_ahead = matches!(last_processed_mc_block_id_opt, Some(last_processed_mc_block_id) if mc_block_id.seqno < last_processed_mc_block_id.seqno @@ -512,7 +513,7 @@ where mc_state.block_id().as_short_id() ); - //TODO: Possibly we have already updated collation sessions for this master block, + // TODO: Possibly we have already updated collation sessions for this master block, // because we may have collated it by ourselves before receiving it from the blockchain // or because we have received it from the blockchain before we collated it // @@ -551,7 +552,7 @@ where root_hash: descr.root_hash, file_hash: descr.file_hash, }; - //TODO: consider split and merge + // TODO: consider split and merge new_shards_info.insert(shard_id, vec![top_block]); } @@ -668,7 +669,9 @@ where new_session_seqno, ))?; - //TEST: override with test subset with test keypairs defined on test run + tracing::warn!("SUBSET: {subset:?}"); + + // TEST: override with test subset with test keypairs defined on test run #[cfg(feature = "test")] let subset = if self.config.test_validators_keypairs.is_empty() { subset @@ -676,7 +679,7 @@ where let mut test_subset = vec![]; for (i, keypair) in self.config.test_validators_keypairs.iter().enumerate() { let val_descr = &subset[i]; - test_subset.push(ValidatorDescription { + test_subset.push(everscale_types::models::ValidatorDescription { public_key: keypair.public_key.to_bytes().into(), adnl_addr: val_descr.adnl_addr, weight: val_descr.weight, @@ -697,6 +700,7 @@ where let local_pubkey_opt = find_us_in_collators_set(&self.config, &subset); let new_session_info = Arc::new(CollationSessionInfo::new( + shard_id.workchain(), new_session_seqno, ValidatorSubsetInfo { validators: subset, @@ -745,7 +749,7 @@ where } } - //TODO: possibly do not need to store collation sessions if we do not collate in them + // TODO: possibly do not need to store collation sessions if we do not collate in them self.active_collation_sessions .insert(shard_id, new_session_info); } @@ -855,8 +859,7 @@ where candidate_id.as_short_id(), candidate_chain_time, ); - let _handle = self - .validator + self.validator .validate(candidate_id, session_info.seqno()) .await?; @@ -923,7 +926,7 @@ where mpool_adapter: Arc, mc_state: ShardStateStuff, ) -> Result<()> { - //TODO: in current implementation CollationProcessor should not notify mempool + // TODO: in current implementation CollationProcessor should not notify mempool // about one master block more than once, but better to handle repeated request here or at mempool mpool_adapter .enqueue_process_new_mc_block_state(mc_state) @@ -968,15 +971,15 @@ where _shard_id: ShardIdent, chain_time: u64, ) -> Option { - //TODO: make real implementation + // TODO: make real implementation - //TODO: idea is to store for each shard each chain time and related shard block + // TODO: idea is to store for each shard each chain time and related shard block // that expired master block interval. So we will have a list of such chain times. // Then we can collate master block if interval expired in all shards. // We should take the max chain time among first that expired the masterblock interval in each shard // then we take shard blocks which chain time less then determined max - //STUB: when we work with only one shard we can check for master block interval easier + // STUB: when we work with only one shard we can check for master block interval easier let elapsed = chain_time - self.last_mc_block_chain_time(); let check = elapsed > self.config.mc_block_min_interval_ms; @@ -1010,9 +1013,9 @@ where _next_mc_block_chain_time: u64, _trigger_shard_block_id: Option, ) -> Result> { - //TODO: make real implementation (see comments in `enqueue_mc_block_collation``) + // TODO: make real implementation (see comments in `enqueue_mc_block_collation``) - //STUB: when we work with only one shard we can just get the last shard block + // STUB: when we work with only one shard we can just get the last shard block // because collator manager will try run master block collation before // before processing any next candidate from the shard collator // because of dispatcher tasks queue @@ -1035,14 +1038,14 @@ where next_mc_block_chain_time: u64, trigger_shard_block_id: Option, ) -> Result<()> { - //TODO: make real implementation + // TODO: make real implementation // get masterchain collator if exists let Some(mc_collator) = self.active_collators.get(&ShardIdent::MASTERCHAIN) else { bail!("Masterchain collator is not started yet!"); }; - //TODO: How to choose top shard blocks for master block collation when they are collated async and in parallel? + // TODO: How to choose top shard blocks for master block collation when they are collated async and in parallel? // We know the last anchor (An) used in shard (ShA) block that causes master block collation, // so we search for block from other shard (ShB) that includes the same anchor (An). // Or the first from previouses (An-x) that includes externals for that shard (ShB) @@ -1053,7 +1056,7 @@ where trigger_shard_block_id, )?; - //TODO: We should somehow collect externals for masterchain during the shard blocks collation + // TODO: We should somehow collect externals for masterchain during the shard blocks collation // or pull them directly when collating master self.next_mc_block_chain_time = next_mc_block_chain_time; @@ -1119,7 +1122,7 @@ where // execute required actions if block invalid if !validation_result.is_valid() { - //TODO: implement more graceful reaction on invalid block + // TODO: implement more graceful reaction on invalid block panic!("Block has collected more than 1/3 invalid signatures! Unable to continue collation process!") } @@ -1143,7 +1146,7 @@ where /// Store block in a cache structure that allow to append signatures fn store_candidate(&mut self, candidate: BlockCandidate) -> Result<()> { - //TODO: in future we may store to cache a block received from blockchain before, + // TODO: in future we may store to cache a block received from blockchain before, // then it will exist in cache when we try to store collated candidate // but the `root_hash` may differ, so we have to handle such a case @@ -1237,7 +1240,7 @@ where &self, shard_block_id: &BlockId, ) -> Option<&BlockCandidateContainer> { - //TODO: handle when master block link exist but there is not block itself + // TODO: handle when master block link exist but there is not block itself if let Some(mc_block_key) = self .blocks_cache .shards @@ -1419,7 +1422,7 @@ where .await } }); - //TODO: make proper panic and error processing without waiting for spawned task + // TODO: make proper panic and error processing without waiting for spawned task join_handle.await??; } else { tracing::debug!( @@ -1472,7 +1475,7 @@ where state_node_adapter: Arc, mut blocks_to_send: Vec, ) -> Result<()> { - //TODO: it is better to send each block separately, but it will be more tricky to handle the correct cleanup + // TODO: it is better to send each block separately, but it will be more tricky to handle the correct cleanup let _tracing_blocks_to_send_descr = blocks_to_send .iter() @@ -1493,7 +1496,7 @@ where SendSyncStatus::Sent | SendSyncStatus::Synced => sent_blocks.push(block_to_send), _ => { let block_for_sync = build_block_stuff_for_sync(&block_to_send.entry)?; - //TODO: handle different errors types + // TODO: handle different errors types if let Err(err) = state_node_adapter.accept_block(block_for_sync).await { tracing::warn!( target: tracing_targets::COLLATION_MANAGER, @@ -1519,7 +1522,7 @@ where if !should_restore_blocks_in_cache { // commit queue diffs for each block for sent_block in sent_blocks.iter() { - //TODO: handle if diff does not exist + // TODO: handle if diff does not exist if let Err(err) = mq_adapter .commit_diff(&sent_block.entry.candidate.block_id().as_short_id()) .await @@ -1575,7 +1578,7 @@ where blocks_to_send )) .await?; - //TODO: should implement resending for restored blocks + // TODO: should implement resending for restored blocks } Ok(()) diff --git a/collator/src/manager/utils.rs b/collator/src/manager/utils.rs index 4c285c389..1c8f1c4c6 100644 --- a/collator/src/manager/utils.rs +++ b/collator/src/manager/utils.rs @@ -4,9 +4,8 @@ use everscale_types::boc::BocRepr; use everscale_types::models::ValidatorDescription; use tycho_block_util::block::{BlockStuff, BlockStuffAug}; -use crate::types::{BlockStuffForSync, CollationConfig}; - use super::types::BlockCandidateEntry; +use crate::types::{BlockStuffForSync, CollationConfig}; pub fn build_block_stuff_for_sync( block_candidate: &BlockCandidateEntry, diff --git a/collator/src/mempool/mempool_adapter.rs b/collator/src/mempool/mempool_adapter.rs index b0af0042a..1feb65048 100644 --- a/collator/src/mempool/mempool_adapter.rs +++ b/collator/src/mempool/mempool_adapter.rs @@ -1,25 +1,22 @@ -use std::collections::{BTreeMap, HashMap}; -use std::sync::Arc; +use std::collections::BTreeMap; +use std::sync::{Arc, RwLock}; use anyhow::{anyhow, Result}; use async_trait::async_trait; -use everscale_crypto::ed25519::SecretKey; -use everscale_types::boc::Boc; -use everscale_types::cell::HashBytes; -use everscale_types::models::ExtInMsgInfo; -use everscale_types::prelude::{Cell, CellBuilder, Load}; -use futures_util::TryStreamExt; -use parking_lot::RwLock; -use tokio::sync::mpsc::{Sender, UnboundedReceiver}; +use everscale_types::cell::{CellBuilder, CellSliceRange, HashBytes}; +use everscale_types::models::{ExtInMsgInfo, IntAddr, MsgInfo, OwnedMessage, StdAddr}; +use rand::Rng; use tycho_block_util::state::ShardStateStuff; -use tycho_consensus::Point; -use tycho_network::{DhtClient, OverlayService, PeerId}; -use tycho_util::FastDashMap; -use crate::mempool::types::ExternalMessage; -use crate::mempool::{MempoolAnchor, MempoolAnchorId}; +use super::types::{ExternalMessage, MempoolAnchor, MempoolAnchorId}; use crate::tracing_targets; +#[cfg(test)] +#[path = "tests/mempool_adapter_tests.rs"] +pub(super) mod tests; + +// FACTORY + pub trait MempoolAdapterFactory { type Adapter: MempoolAdapter; @@ -74,108 +71,64 @@ pub trait MempoolAdapter: Send + Sync + 'static { async fn clear_anchors_cache(&self, before_anchor_id: MempoolAnchorId) -> Result<()>; } -pub struct MempoolAdapterImpl { - //TODO: replace with rocksdb - anchors: Arc>>>, -} - -impl MempoolAdapterImpl { - pub async fn new( - secret_key: SecretKey, - dht_client: DhtClient, - overlay_service: OverlayService, - peers: Vec, - ) -> Arc { - tracing::info!(target: tracing_targets::MEMPOOL_ADAPTER, "Creating mempool adapter..."); - let anchors = Arc::new(RwLock::new(BTreeMap::new())); +pub struct MempoolAdapterStdImpl { + listener: Arc, - let (sender, receiver) = - tokio::sync::mpsc::unbounded_channel::<(Arc, Vec>)>(); - - let engine = tycho_consensus::Engine::new( - &secret_key, - &dht_client, - &overlay_service, - &peers, - sender, - ) - .await; - - tokio::spawn(async move { engine.run() }); - - tracing::info!(target: tracing_targets::MEMPOOL_ADAPTER, "Mempool adapter created"); - - let mempool_adapter = Arc::new(Self { anchors }); - - //start handling mempool anchors - tokio::spawn(parse_points(mempool_adapter.clone(), receiver)); - - mempool_adapter - } - - fn add_anchor(&self, anchor: Arc) { - let mut guard = self.anchors.write(); - guard.insert(anchor.id(), anchor); - } + _stub_anchors_cache: Arc>>>, } -pub async fn parse_points( - adapter: Arc, - mut rx: UnboundedReceiver<(Arc, Vec>)>, -) { - while let Some((anchor, points)) = rx.recv().await { - let mut external_messages = HashMap::::new(); - - for point in points { - 'message: for message in &point.body.payload { - let cell = match Boc::decode(message) { - Ok(cell) => cell, - Err(e) => { - tracing::error!(target: tracing_targets::MEMPOOL_ADAPTER, "Failed to deserialize bytes into cell. Error: {e:?}"); //TODO: should handle errors properly? - continue 'message; - } - }; - - let mut slice = match cell.as_slice() { - Ok(slice) => slice, - Err(e) => { - tracing::error!(target: tracing_targets::MEMPOOL_ADAPTER, "Failed to make slice from cell. Error: {e:?}"); - continue 'message; - } - }; +impl MempoolAdapterStdImpl { + pub fn new(listener: Arc) -> Self { + tracing::info!(target: tracing_targets::MEMPOOL_ADAPTER, "Creating mempool adapter..."); - let ext_in_message = match ExtInMsgInfo::load_from(&mut slice) { - Ok(message) => message, - Err(e) => { - tracing::error!(target: tracing_targets::MEMPOOL_ADAPTER, "Bad cell. Failed to deserialize to ExtInMsgInfo. Err: {e:?}"); - continue 'message; + // TODO: make real implementation, currently runs stub task + // that produces the repeating set of anchors + let stub_anchors_cache = Arc::new(RwLock::new(BTreeMap::new())); + + tokio::spawn({ + let listener = listener.clone(); + let stub_anchors_cache = stub_anchors_cache.clone(); + async move { + let mut anchor_id = 0; + loop { + let rnd_round_interval = rand::thread_rng().gen_range(400..600); + tokio::time::sleep(tokio::time::Duration::from_millis(rnd_round_interval * 6)) + .await; + anchor_id += 1; + let anchor = _stub_create_random_anchor_with_stub_externals(anchor_id); + { + let mut anchor_cache_rw = stub_anchors_cache + .write() + .map_err(|e| anyhow!("Poison error on write lock: {:?}", e)) + .unwrap(); + tracing::debug!( + target: tracing_targets::MEMPOOL_ADAPTER, + "Random anchor (id: {}, chain_time: {}, externals: {}) added to cache", + anchor.id(), + anchor.chain_time(), + anchor.externals_count(), + ); + anchor_cache_rw.insert(anchor_id, anchor.clone()); } - }; - - let external_message = ExternalMessage::new(cell.clone(), ext_in_message); - external_messages.insert(*cell.repr_hash(), external_message); + listener.on_new_anchor(anchor).await.unwrap(); + } } - } + }); + tracing::info!(target: tracing_targets::MEMPOOL_ADAPTER, "Stub anchors generator started"); - let messages = external_messages - .into_iter() - .map(|m| Arc::new(m.1)) - .collect::>(); - - let anchor = Arc::new(MempoolAnchor::new( - anchor.body.location.round.0, - anchor.body.time.as_u64(), - messages, - )); + tracing::info!(target: tracing_targets::MEMPOOL_ADAPTER, "Mempool adapter created"); - adapter.add_anchor(anchor); + Self { + listener, + _stub_anchors_cache: stub_anchors_cache, + } } } #[async_trait] -impl MempoolAdapter for MempoolAdapterImpl { +impl MempoolAdapter for MempoolAdapterStdImpl { async fn enqueue_process_new_mc_block_state(&self, mc_state: ShardStateStuff) -> Result<()> { - //TODO: make real implementation, currently does nothing + // TODO: make real implementation, currently does nothing tracing::info!( target: tracing_targets::MEMPOOL_ADAPTER, "STUB: New masterchain state (block_id: {}) processing enqueued to mempool", @@ -187,11 +140,13 @@ impl MempoolAdapter for MempoolAdapterImpl { async fn get_anchor_by_id( &self, anchor_id: MempoolAnchorId, - ) -> anyhow::Result>> { - //TODO: make real implementation, currently only return anchor from local cache + ) -> Result>> { + // TODO: make real implementation, currently only return anchor from local cache let res = { - let anchors_cache_r = self.anchors.read(); - + let anchors_cache_r = self + ._stub_anchors_cache + .read() + .map_err(|e| anyhow!("Poison error on read lock: {:?}", e))?; anchors_cache_r.get(&anchor_id).cloned() }; if res.is_some() { @@ -216,13 +171,16 @@ impl MempoolAdapter for MempoolAdapterImpl { } async fn get_next_anchor(&self, prev_anchor_id: MempoolAnchorId) -> Result> { - //TODO: make real implementation, currently only return anchor from local cache + // TODO: make real implementation, currently only return anchor from local cache let mut stub_first_attempt = true; let mut request_timer = std::time::Instant::now(); loop { { - let anchors_cache_r = self.anchors.read(); + let anchors_cache_r = self + ._stub_anchors_cache + .read() + .map_err(|e| anyhow!("Poison error on read lock: {:?}", e))?; let mut range = anchors_cache_r.range(( std::ops::Bound::Excluded(prev_anchor_id), @@ -266,9 +224,38 @@ impl MempoolAdapter for MempoolAdapterImpl { } async fn clear_anchors_cache(&self, before_anchor_id: MempoolAnchorId) -> Result<()> { - let mut anchors_cache_rw = self.anchors.write(); - + let mut anchors_cache_rw = self + ._stub_anchors_cache + .write() + .map_err(|e| anyhow!("Poison error on write lock: {:?}", e))?; anchors_cache_rw.retain(|anchor_id, _| anchor_id >= &before_anchor_id); Ok(()) } } + +fn _stub_create_random_anchor_with_stub_externals( + anchor_id: MempoolAnchorId, +) -> Arc { + let chain_time = anchor_id as u64 * 471 * 6 % 1000000000; + let externals_count = chain_time as i32 % 10; + let mut externals = vec![]; + for i in 0..externals_count { + let rand_addr = (0..32).map(|_| rand::random::()).collect::>(); + let rand_addr = HashBytes::from_slice(&rand_addr); + let mut msg_cell_builder = CellBuilder::new(); + msg_cell_builder.store_u32(anchor_id).unwrap(); + msg_cell_builder.store_u64(chain_time).unwrap(); + msg_cell_builder.store_u32(i as u32).unwrap(); + let msg_cell = msg_cell_builder.build().unwrap(); + let msg = ExternalMessage::new( + msg_cell, + ExtInMsgInfo { + dst: IntAddr::Std(StdAddr::new(0, rand_addr)), + ..Default::default() + }, + ); + externals.push(Arc::new(msg)); + } + + Arc::new(MempoolAnchor::new(anchor_id, chain_time, externals)) +} diff --git a/collator/src/mempool/tests/mempool_adapter_tests.rs b/collator/src/mempool/tests/mempool_adapter_tests.rs index 185773af5..982ec0b83 100644 --- a/collator/src/mempool/tests/mempool_adapter_tests.rs +++ b/collator/src/mempool/tests/mempool_adapter_tests.rs @@ -3,9 +3,9 @@ use std::sync::Arc; use anyhow::Result; use async_trait::async_trait; -use crate::{mempool::MempoolAnchor, test_utils::try_init_test_tracing}; - -use super::{MempoolAdapter, MempoolAdapterStubImpl, MempoolEventListener}; +use super::{MempoolAdapter, MempoolEventListener}; +use crate::mempool::{MempoolAdapterStdImpl, MempoolAnchor}; +use crate::test_utils::try_init_test_tracing; struct MempoolEventStubListener; #[async_trait] @@ -25,7 +25,7 @@ impl MempoolEventListener for MempoolEventStubListener { async fn test_stub_anchors_generator() -> Result<()> { try_init_test_tracing(tracing_subscriber::filter::LevelFilter::TRACE); - let adapter = MempoolAdapterStubImpl::new(Arc::new(MempoolEventStubListener {})); + let adapter = MempoolAdapterStdImpl::new(Arc::new(MempoolEventStubListener {})); // try get not existing anchor by id let opt_anchor = adapter.get_anchor_by_id(10).await?; diff --git a/collator/src/msg_queue/cache_persistent.rs b/collator/src/msg_queue/cache_persistent.rs index 6d92b8613..1cf56d9bb 100644 --- a/collator/src/msg_queue/cache_persistent.rs +++ b/collator/src/msg_queue/cache_persistent.rs @@ -1,9 +1,11 @@ -use std::{any::Any, fmt::Debug}; +use std::any::Any; +use std::fmt::Debug; use anyhow::{anyhow, Result}; use super::queue::MessageQueueImpl; -use super::{state_persistent::PersistentStateService, storage::StorageService}; +use super::state_persistent::PersistentStateService; +use super::storage::StorageService; #[cfg(test)] #[path = "tests/test_cache_persistent.rs"] @@ -19,11 +21,9 @@ pub trait PersistentCacheConfig: Debug { fn as_any(&self) -> &dyn Any; } -/* -This part of the code contains logic of working with persistent cache. - -We use partials just to separate the codebase on smaller and easier maintainable parts. - */ +// This part of the code contains logic of working with persistent cache. +// +// We use partials just to separate the codebase on smaller and easier maintainable parts. impl MessageQueueImpl where CH: PersistentCacheService, diff --git a/collator/src/msg_queue/iterator.rs b/collator/src/msg_queue/iterator.rs index 594d8fde5..0e61fdaa9 100644 --- a/collator/src/msg_queue/iterator.rs +++ b/collator/src/msg_queue/iterator.rs @@ -1,29 +1,30 @@ -/* -There are 2 options to implement iteration: - 1) implement an iterator directly for the MessageQueue trait - 2) implement separate MessageQueueIterator over items in MessageQueue -(you'll find stubs for both options down) - -The next question is what kind of iterator to implement: (a) a consuming iterator -or (b) a non-consuming iterator. Finally, we should remove processed messages from -the current queue state (commit). But also we must have an option to roll back and -process messages again if the collation attempt fails. Moreover, we don't know if -we need to return the item value from the iterator or if we can return just the refs. - -When implementing a non-consuming iterator we can move items to some kind of -"remove" buffer and then clear it on commit. Or we can just remember processed -items and then clear them from the queue state. We should choose the most efficient -implementation regarding the memory and CPU utilization. We also need to consider -that the iterator should have the ability to continue iteration after the commit -with minimal overhead (we shouldn't seek for the last position). - -When implementing the separate MessageQueueIterator it should take ownership of -the source MessageQueue to lazy load more items chunks. After the iteration, we can -convert the iterator into MessageQueue back. - */ +// There are 2 options to implement iteration: +// 1) implement an iterator directly for the MessageQueue trait +// 2) implement separate MessageQueueIterator over items in MessageQueue +// (you'll find stubs for both options down) +// +// The next question is what kind of iterator to implement: (a) a consuming iterator +// or (b) a non-consuming iterator. Finally, we should remove processed messages from +// the current queue state (commit). But also we must have an option to roll back and +// process messages again if the collation attempt fails. Moreover, we don't know if +// we need to return the item value from the iterator or if we can return just the refs. +// +// When implementing a non-consuming iterator we can move items to some kind of +// "remove" buffer and then clear it on commit. Or we can just remember processed +// items and then clear them from the queue state. We should choose the most efficient +// implementation regarding the memory and CPU utilization. We also need to consider +// that the iterator should have the ability to continue iteration after the commit +// with minimal overhead (we shouldn't seek for the last position). +// +// When implementing the separate MessageQueueIterator it should take ownership of +// the source MessageQueue to lazy load more items chunks. After the iteration, we can +// convert the iterator into MessageQueue back. +use super::cache_persistent::*; use super::queue::MessageQueue; -use super::{cache_persistent::*, state_persistent::*, storage::*, types::*}; +use super::state_persistent::*; +use super::storage::*; +use super::types::*; // Option (1) - MessageQueue implement iterator by itself diff --git a/collator/src/msg_queue/loader.rs b/collator/src/msg_queue/loader.rs index 277d49e16..ff968c0b9 100644 --- a/collator/src/msg_queue/loader.rs +++ b/collator/src/msg_queue/loader.rs @@ -1,17 +1,14 @@ use anyhow::Result; +use super::cache_persistent::PersistentCacheService; use super::queue::MessageQueueImpl; -use super::{ - cache_persistent::PersistentCacheService, state_persistent::PersistentStateService, - storage::StorageService, -}; +use super::state_persistent::PersistentStateService; +use super::storage::StorageService; -/* -This code part contains the logic of messages loading to the queue state, -including lazy loading, etc. - -We use partials just to separate the codebase on smaller and easier maintainable parts. - */ +// This code part contains the logic of messages loading to the queue state, +// including lazy loading, etc. +// +// We use partials just to separate the codebase on smaller and easier maintainable parts. impl MessageQueueImpl where CH: PersistentCacheService, diff --git a/collator/src/msg_queue/mod.rs b/collator/src/msg_queue/mod.rs index e6cba9a85..bd1188a0b 100644 --- a/collator/src/msg_queue/mod.rs +++ b/collator/src/msg_queue/mod.rs @@ -1,9 +1,13 @@ +#![allow(warnings)] +#![allow(clippy::all)] + use std::sync::Arc; use anyhow::Result; use async_trait::async_trait; use everscale_types::models::*; +use self::types::QueueDiff; use crate::internal_queue::iterator::QueueIterator; use crate::internal_queue::persistent::persistent_state::PersistentStateImpl; use crate::internal_queue::persistent::persistent_state_snapshot::PersistentStateSnapshot; @@ -13,8 +17,6 @@ use crate::internal_queue::session::session_state_snapshot::SessionStateSnapshot use crate::tracing_targets; use crate::utils::shard::SplitMergeAction; -use self::types::QueueDiff; - pub mod config; pub mod types; @@ -56,8 +58,8 @@ pub struct MessageQueueAdapterStdImpl { msg_queue: MsgQueueStdImpl, } -impl MessageQueueAdapterStdImpl { - pub fn new() -> Self { +impl Default for MessageQueueAdapterStdImpl { + fn default() -> Self { let base_shard = ShardIdent::new_full(0); Self { msg_queue: MsgQueueStdImpl::new(base_shard), @@ -101,8 +103,8 @@ impl MessageQueueAdapter for MessageQueueAdapterStdImpl { } async fn commit_diff(&self, _diff_id: &BlockIdShort) -> Result> { - //TODO: make real implementation - //STUB: just return oks + // TODO: make real implementation + // STUB: just return oks Ok(Some(())) } } diff --git a/collator/src/msg_queue/queue.rs b/collator/src/msg_queue/queue.rs index 5334417d4..9b71ad46e 100644 --- a/collator/src/msg_queue/queue.rs +++ b/collator/src/msg_queue/queue.rs @@ -1,7 +1,11 @@ use anyhow::Result; +use super::cache_persistent::*; +use super::config::*; +use super::state_persistent::*; +use super::storage::*; use super::types::ext_types_stubs::*; -use super::{cache_persistent::*, config::*, state_persistent::*, storage::*, types::*}; +use super::types::*; #[cfg(test)] #[path = "tests/test_queue.rs"] @@ -237,12 +241,10 @@ where } } -/* -This part of the code contains logic that cannot be attributed specifically -to the persistent state and cache, storage, loader, diff management. - -We use partials just to separate the codebase on smaller and easier maintainable parts. - */ +// This part of the code contains logic that cannot be attributed specifically +// to the persistent state and cache, storage, loader, diff management. +// +// We use partials just to separate the codebase on smaller and easier maintainable parts. impl MessageQueueImpl where CH: PersistentCacheService, diff --git a/collator/src/msg_queue/state_persistent.rs b/collator/src/msg_queue/state_persistent.rs index 9448c0834..563a6e284 100644 --- a/collator/src/msg_queue/state_persistent.rs +++ b/collator/src/msg_queue/state_persistent.rs @@ -2,28 +2,27 @@ use std::fmt::Debug; use anyhow::Result; +use super::cache_persistent::PersistentCacheService; use super::queue::MessageQueueImpl; -use super::{cache_persistent::PersistentCacheService, storage::StorageService}; +use super::storage::StorageService; pub trait PersistentStateService: Debug + Sized { fn new() -> Result; } -/* -This part of the code contains logic of working with persistent state. - -We use partials just to separate the codebase on smaller and easier maintainable parts. - */ +// This part of the code contains logic of working with persistent state. +// +// We use partials just to separate the codebase on smaller and easier maintainable parts. impl MessageQueueImpl where CH: PersistentCacheService, ST: PersistentStateService, DB: StorageService, { - fn some_internal_method_for_persistent_state(&mut self) -> Result<()> { + fn _some_internal_method_for_persistent_state(&mut self) -> Result<()> { todo!() } - pub(super) fn some_module_internal_method_for_persistent_state(&mut self) -> Result<()> { + pub(super) fn _some_module_internal_method_for_persistent_state(&mut self) -> Result<()> { todo!() } } diff --git a/collator/src/msg_queue/storage.rs b/collator/src/msg_queue/storage.rs index 82b784ccd..c028cb2a6 100644 --- a/collator/src/msg_queue/storage.rs +++ b/collator/src/msg_queue/storage.rs @@ -2,18 +2,17 @@ use std::fmt::Debug; use anyhow::Result; +use super::cache_persistent::PersistentCacheService; use super::queue::MessageQueueImpl; -use super::{cache_persistent::PersistentCacheService, state_persistent::PersistentStateService}; +use super::state_persistent::PersistentStateService; pub trait StorageService: Debug + Sized { fn new() -> Result; } -/* -This part of the code contains logic of working with storage. - -We use partials just to separate the codebase on smaller and easier maintainable parts. - */ +// This part of the code contains logic of working with storage. +// +// We use partials just to separate the codebase on smaller and easier maintainable parts. impl MessageQueueImpl where CH: PersistentCacheService, diff --git a/collator/src/msg_queue/tests/test_config.rs b/collator/src/msg_queue/tests/test_config.rs index f1944ae39..048a0fc2c 100644 --- a/collator/src/msg_queue/tests/test_config.rs +++ b/collator/src/msg_queue/tests/test_config.rs @@ -4,12 +4,9 @@ pub fn init_test_config() -> MessageQueueConfig { use super::super::cache_persistent::PersistentCacheConfigStubImpl; use super::MessageQueueBaseConfig; - MessageQueueConfig::new( - MessageQueueBaseConfig {}, - PersistentCacheConfigStubImpl { - cfg_value1: "test_value_1".to_owned(), - }, - ) + MessageQueueConfig::new(MessageQueueBaseConfig {}, PersistentCacheConfigStubImpl { + cfg_value1: "test_value_1".to_owned(), + }) } #[test] diff --git a/collator/src/msg_queue/tests/test_queue.rs b/collator/src/msg_queue/tests/test_queue.rs index f4b027fb8..6bd68c43b 100644 --- a/collator/src/msg_queue/tests/test_queue.rs +++ b/collator/src/msg_queue/tests/test_queue.rs @@ -1,4 +1,5 @@ -use super::{super::config::tests::init_test_config, MessageQueue, MessageQueueImplOnStubs}; +use super::super::config::tests::init_test_config; +use super::{MessageQueue, MessageQueueImplOnStubs}; #[test] fn test_queue_init() { diff --git a/collator/src/state_node.rs b/collator/src/state_node.rs index e2bfd6a42..7e347fbcf 100644 --- a/collator/src/state_node.rs +++ b/collator/src/state_node.rs @@ -1,14 +1,12 @@ use std::collections::{BTreeMap, HashMap}; use std::sync::Arc; -use anyhow::{anyhow, Context, Result}; +use anyhow::{Context, Result}; use async_trait::async_trait; - -use everscale_types::models::{BlockId, ShardIdent}; +use everscale_types::models::{BlockId, BlockIdShort, ShardIdent}; use tokio::sync::{broadcast, Mutex}; - -use tycho_block_util::block::BlockStuffAug; -use tycho_block_util::{block::BlockStuff, state::ShardStateStuff}; +use tycho_block_util::block::{BlockStuff, BlockStuffAug}; +use tycho_block_util::state::ShardStateStuff; use tycho_storage::{BlockHandle, Storage}; use crate::tracing_targets; @@ -58,6 +56,8 @@ pub trait StateNodeAdapter: Send + Sync + 'static { async fn accept_block(&self, block: BlockStuffForSync) -> Result<()>; /// Waits for the specified block to be received and returns it async fn wait_for_block(&self, block_id: &BlockId) -> Option>; + /// Waits for the specified block by prev_id to be received and returns it + async fn wait_for_block_next(&self, block_id: &BlockId) -> Option>; /// Handle state after block was applied async fn handle_state(&self, state: &ShardStateStuff) -> Result<()>; } @@ -125,45 +125,96 @@ impl StateNodeAdapter for StateNodeAdapterStdImpl { async fn accept_block(&self, block: BlockStuffForSync) -> Result<()> { tracing::info!(target: tracing_targets::STATE_NODE_ADAPTER, "Block accepted: {:?}", block.block_id); let mut blocks = self.blocks.lock().await; - let block_id = match block.block_id.shard.is_masterchain() { - true => { - let prev_block_id = *block - .prev_blocks_ids - .last() - .ok_or(anyhow!("no prev block"))?; - - self.blocks_mapping - .lock() - .await - .insert(block.block_id, prev_block_id); - - blocks - .entry(block.block_id.shard) - .or_insert_with(BTreeMap::new) - .insert(prev_block_id.seqno, block); - - prev_block_id - } - false => { - let block_id = block.block_id; - blocks - .entry(block.block_id.shard) - .or_insert_with(BTreeMap::new) - .insert(block.block_id.seqno, block); - block_id - } - }; + let block_id = block.block_id; + blocks + .entry(block.block_id.shard) + .or_insert_with(BTreeMap::new) + .insert(block.block_id.seqno, block); + let broadcast_result = self.broadcaster.send(block_id).ok(); tracing::trace!(target: tracing_targets::STATE_NODE_ADAPTER, "Block broadcast_result: {:?}", broadcast_result); Ok(()) } async fn wait_for_block(&self, block_id: &BlockId) -> Option> { + let block_id = BlockIdToWait::Full(block_id); + self.wait_for_block_ext(block_id).await + } + + async fn wait_for_block_next(&self, prev_block_id: &BlockId) -> Option> { + let next_block_id_short = + BlockIdShort::from((prev_block_id.shard, prev_block_id.seqno + 1)); + let block_id = BlockIdToWait::Short(&next_block_id_short); + self.wait_for_block_ext(block_id).await + } + + async fn handle_state(&self, state: &ShardStateStuff) -> Result<()> { + tracing::info!(target: tracing_targets::STATE_NODE_ADAPTER, "Handle block: {:?}", state.block_id()); + let block_id = *state.block_id(); + + let mut to_split = Vec::new(); + + let shard = block_id.shard; + let seqno = block_id.seqno; + + { + let blocks_guard = self.blocks.lock().await; + if let Some(shard_blocks) = blocks_guard.get(&shard) { + let block = shard_blocks.get(&seqno); + + if shard.is_masterchain() { + let prev_mc_block = shard_blocks + .range(..=seqno) + .rev() + .find_map(|(&key, value)| if key < seqno { Some(value) } else { None }); + + if let Some(prev_mc_block) = prev_mc_block { + for id in &prev_mc_block.top_shard_blocks_ids { + to_split.push((id.shard, id.seqno + 1)); + } + to_split.push((shard, prev_mc_block.block_id.seqno + 1)); + } + } + + match block { + None => { + tracing::info!(target: tracing_targets::STATE_NODE_ADAPTER, "Block handled external: {:?}", block_id); + self.listener.on_block_accepted_external(state).await?; + } + Some(block) => { + tracing::info!(target: tracing_targets::STATE_NODE_ADAPTER, "Block handled: {:?}", block_id); + self.listener.on_block_accepted(&block.block_id).await?; + } + } + } else { + tracing::info!(target: tracing_targets::STATE_NODE_ADAPTER, "Block handled external. Shard ID not found in blocks buffer: {:?}", block_id); + self.listener.on_block_accepted_external(state).await?; + } + } + + { + let mut blocks_guard = self.blocks.lock().await; + for (shard, seqno) in &to_split { + if let Some(shard_blocks) = blocks_guard.get_mut(shard) { + *shard_blocks = shard_blocks.split_off(seqno); + } + } + } + + Ok(()) + } +} + +impl StateNodeAdapterStdImpl { + async fn wait_for_block_ext( + &self, + block_id: BlockIdToWait<'_>, + ) -> Option> { let mut receiver = self.broadcaster.subscribe(); loop { let blocks = self.blocks.lock().await; - if let Some(shard_blocks) = blocks.get(&block_id.shard) { - if let Some(block) = shard_blocks.get(&block_id.seqno) { + if let Some(shard_blocks) = blocks.get(&block_id.shard()) { + if let Some(block) = shard_blocks.get(&block_id.seqno()) { return Some(Ok(block.block_stuff_aug.clone())); } } @@ -171,7 +222,7 @@ impl StateNodeAdapter for StateNodeAdapterStdImpl { loop { match receiver.recv().await { - Ok(received_block_id) if received_block_id == *block_id => { + Ok(received_block_id) if block_id == received_block_id => { break; } Ok(_) => continue, @@ -187,69 +238,34 @@ impl StateNodeAdapter for StateNodeAdapterStdImpl { } } } +} - async fn handle_state(&self, state: &ShardStateStuff) -> Result<()> { - tracing::info!(target: tracing_targets::STATE_NODE_ADAPTER, "Handle block: {:?}", state.block_id()); - let block_id = *state.block_id(); - - let mut to_split = Vec::new(); - let mut to_remove = Vec::new(); - - let mut block_mapping_guard = self.blocks_mapping.lock().await; - let block_id = match block_mapping_guard.remove(&block_id) { - None => block_id, - Some(some) => some.clone(), - }; - - let shard = block_id.shard; - let seqno = block_id.seqno; - - let mut blocks_guard = self.blocks.lock().await; - - let result_future = if let Some(shard_blocks) = blocks_guard.get(&shard) { - if let Some(block_data) = shard_blocks.get(&seqno) { - if shard.is_masterchain() { - let prev_seqno = block_data - .prev_blocks_ids - .last() - .ok_or(anyhow!("no prev block"))? - .seqno; - for id in &block_data.top_shard_blocks_ids { - to_split.push((id.shard, id.seqno)); - to_remove.push((id.shard, id.seqno)); - } - to_split.push((shard, prev_seqno)); - to_remove.push((shard, prev_seqno)); - } else { - to_remove.push((shard, seqno)); - } - tracing::info!(target: tracing_targets::STATE_NODE_ADAPTER, "Block accepted: {:?}", block_id); - self.listener.on_block_accepted(&block_id) - } else { - tracing::info!(target: tracing_targets::STATE_NODE_ADAPTER, "Block accepted external: {:?}", block_id); - self.listener.on_block_accepted_external(state) - } - } else { - tracing::info!(target: tracing_targets::STATE_NODE_ADAPTER, "Block accepted external: {:?}", block_id); - self.listener.on_block_accepted_external(state) - }; +enum BlockIdToWait<'a> { + Short(&'a BlockIdShort), + Full(&'a BlockId), +} - for (shard, seqno) in &to_split { - if let Some(shard_blocks) = blocks_guard.get_mut(shard) { - shard_blocks.split_off(seqno); - } +impl BlockIdToWait<'_> { + fn shard(&self) -> ShardIdent { + match self { + Self::Short(id) => id.shard, + Self::Full(id) => id.shard, } + } - for (shard, seqno) in &to_remove { - if let Some(shard_blocks) = blocks_guard.get_mut(shard) { - shard_blocks.remove(seqno); - } + fn seqno(&self) -> u32 { + match self { + Self::Short(id) => id.seqno, + Self::Full(id) => id.seqno, } + } +} - drop(blocks_guard); - - result_future.await?; - - Ok(()) +impl PartialEq for BlockIdToWait<'_> { + fn eq(&self, other: &BlockId) -> bool { + match *self { + BlockIdToWait::Short(short) => &other.as_short_id() == short, + BlockIdToWait::Full(full) => full == other, + } } } diff --git a/collator/src/test_utils.rs b/collator/src/test_utils.rs index 63c4ca35b..b707dffb8 100644 --- a/collator/src/test_utils.rs +++ b/collator/src/test_utils.rs @@ -5,13 +5,10 @@ use everscale_crypto::ed25519; use everscale_types::boc::Boc; use everscale_types::cell::HashBytes; use everscale_types::models::{BlockId, ShardStateUnsplit}; -use futures_util::future::BoxFuture; -use futures_util::FutureExt; use sha2::Digest; use tycho_block_util::state::{MinRefMcStateTracker, ShardStateStuff}; - use tycho_network::{DhtConfig, DhtService, Network, OverlayService, PeerId, Router}; -use tycho_storage::{BlockMetaData, Db, DbOptions, Storage}; +use tycho_storage::{BlockMetaData, Storage}; use crate::types::NodeNetwork; @@ -68,17 +65,15 @@ pub fn create_node_network() -> NodeNetwork { } pub async fn prepare_test_storage() -> anyhow::Result { - let temp = tempfile::tempdir().unwrap(); - let db = Db::open(temp.path().to_path_buf(), DbOptions::default()).unwrap(); - let storage = Storage::new(db, temp.path().join("file"), 1_000_000).unwrap(); + let (storage, _tmp_dir) = Storage::new_temp()?; let tracker = MinRefMcStateTracker::default(); // master state - let master_bytes = include_bytes!("../src/state_node/tests/data/test_state_2_master.boc"); + let master_bytes = include_bytes!("../../test/test_state_2_master.boc"); let master_file_hash: HashBytes = sha2::Sha256::digest(master_bytes).into(); let master_root = Boc::decode(master_bytes)?; let master_root_hash = *master_root.repr_hash(); - let master_state = master_root.parse::()?; + let master_state = master_root.parse::>()?; let mc_state_extra = master_state.load_custom()?; let mc_state_extra = mc_state_extra.unwrap(); @@ -96,16 +91,16 @@ pub async fn prepare_test_storage() -> anyhow::Result { file_hash: master_file_hash, }; let master_state_stuff = - ShardStateStuff::from_state_and_root(master_id, master_state, master_root, &tracker)?; + ShardStateStuff::from_state_and_root(&master_id, master_state, master_root, &tracker)?; - let (handle, _) = storage.block_handle_storage().create_or_load_handle( - &master_id, - BlockMetaData { - is_key_block: mc_state_extra.after_key_block, - gen_utime: master_state_stuff.state().gen_utime, - mc_ref_seqno: 0, - }, - ); + let (handle, _) = + storage + .block_handle_storage() + .create_or_load_handle(&master_id, BlockMetaData { + is_key_block: mc_state_extra.after_key_block, + gen_utime: master_state_stuff.state().gen_utime, + mc_ref_seqno: 0, + }); storage .shard_state_storage() @@ -113,9 +108,9 @@ pub async fn prepare_test_storage() -> anyhow::Result { .await?; // shard state - let shard_bytes = include_bytes!("../src/state_node/tests/data/test_state_2_0:80.boc"); + let shard_bytes = include_bytes!("../../test/test_state_2_0:80.boc"); let shard_root = Boc::decode(shard_bytes)?; - let shard_state = shard_root.parse::()?; + let shard_state = shard_root.parse::>()?; let shard_id = BlockId { shard: shard_info.0, seqno: shard_info.1.seqno, @@ -123,16 +118,16 @@ pub async fn prepare_test_storage() -> anyhow::Result { file_hash: shard_info.1.file_hash, }; let shard_state_stuff = - ShardStateStuff::from_state_and_root(shard_id, shard_state, shard_root, &tracker)?; - - let (handle, _) = storage.block_handle_storage().create_or_load_handle( - &shard_id, - BlockMetaData { - is_key_block: false, - gen_utime: shard_state_stuff.state().gen_utime, - mc_ref_seqno: 0, - }, - ); + ShardStateStuff::from_state_and_root(&shard_id, shard_state, shard_root, &tracker)?; + + let (handle, _) = + storage + .block_handle_storage() + .create_or_load_handle(&shard_id, BlockMetaData { + is_key_block: false, + gen_utime: shard_state_stuff.state().gen_utime, + mc_ref_seqno: 0, + }); storage .shard_state_storage() diff --git a/collator/src/types.rs b/collator/src/types.rs index 9e7a5ea5d..250567e10 100644 --- a/collator/src/types.rs +++ b/collator/src/types.rs @@ -1,13 +1,11 @@ use std::sync::Arc; use anyhow::Result; - use everscale_crypto::ed25519::KeyPair; use everscale_types::cell::{CellBuilder, HashBytes}; use everscale_types::models::{ Block, BlockId, OwnedMessage, ShardIdent, ShardStateUnsplit, Signature, }; - use tycho_block_util::block::{BlockStuffAug, ValidatorSubsetInfo}; use tycho_block_util::state::{MinRefMcStateTracker, ShardStateStuff}; use tycho_network::{DhtClient, OverlayService, PeerResolver}; @@ -89,27 +87,6 @@ impl BlockCandidate { } } -pub trait ShardStateStuffExt { - fn from_state( - block_id: BlockId, - shard_state: ShardStateUnsplit, - tracker: &MinRefMcStateTracker, - ) -> Result - where - Self: Sized; -} - -impl ShardStateStuffExt for ShardStateStuff { - fn from_state( - block_id: BlockId, - shard_state: ShardStateUnsplit, - tracker: &MinRefMcStateTracker, - ) -> Result { - let root = CellBuilder::build_from(&shard_state)?; - ShardStateStuff::from_state_and_root(block_id, shard_state, root, tracker) - } -} - #[derive(Clone)] pub enum OnValidatedBlockEvent { ValidByState, @@ -163,8 +140,8 @@ impl ValidatedBlock { } pub struct BlockStuffForSync { - //STUB: will not parse Block because candidate does not contain real block - //TODO: remove `block_id` and make `block_stuff: BlockStuff` when collator will generate real blocks + // STUB: will not parse Block because candidate does not contain real block + // TODO: remove `block_id` and make `block_stuff: BlockStuff` when collator will generate real blocks pub block_id: BlockId, pub block_stuff_aug: BlockStuffAug, pub signatures: FastHashMap, @@ -178,22 +155,28 @@ pub(crate) type CollationSessionId = (ShardIdent, u32); #[derive(Clone)] pub struct CollationSessionInfo { /// Sequence number of the collation session + workchain: i32, seqno: u32, collators: ValidatorSubsetInfo, current_collator_keypair: Option>, } impl CollationSessionInfo { pub fn new( + workchain: i32, seqno: u32, collators: ValidatorSubsetInfo, current_collator_keypair: Option>, ) -> Self { Self { + workchain, seqno, collators, current_collator_keypair, } } + pub fn workchain(&self) -> i32 { + self.workchain + } pub fn seqno(&self) -> u32 { self.seqno } diff --git a/collator/src/utils/async_queued_dispatcher.rs b/collator/src/utils/async_queued_dispatcher.rs index eb9453319..73b5afb0f 100644 --- a/collator/src/utils/async_queued_dispatcher.rs +++ b/collator/src/utils/async_queued_dispatcher.rs @@ -1,4 +1,6 @@ -use std::{future::Future, pin::Pin, usize}; +use std::future::Future; +use std::pin::Pin; +use std::usize; use anyhow::{anyhow, Result}; use tokio::sync::{mpsc, oneshot}; diff --git a/collator/src/utils/shard.rs b/collator/src/utils/shard.rs index 4b47ca1f6..0d61c4d38 100644 --- a/collator/src/utils/shard.rs +++ b/collator/src/utils/shard.rs @@ -1,7 +1,6 @@ use std::collections::VecDeque; use anyhow::{anyhow, Result}; - use everscale_types::models::ShardIdent; #[derive(Debug, Clone, PartialEq)] @@ -25,7 +24,7 @@ pub fn calc_split_merge_actions( from_current_shards: Vec<&ShardIdent>, to_new_shards: Vec<&ShardIdent>, ) -> Result> { - //TODO: not the best code, possibly needs refactoring + // TODO: not the best code, possibly needs refactoring let full_shard_id = ShardIdent::new_full(0); let mut planned_actions = VecDeque::new(); if from_current_shards.is_empty() { diff --git a/collator/src/utils/task_descr.rs b/collator/src/utils/task_descr.rs index f6fd0cc9d..2722cda93 100644 --- a/collator/src/utils/task_descr.rs +++ b/collator/src/utils/task_descr.rs @@ -5,14 +5,14 @@ use tokio::sync::oneshot; pub struct TaskDesc { id: u64, descr: String, - closure: Box, //closure for execution - creation_time: std::time::SystemTime, //time of task creation + closure: Box, // closure for execution + creation_time: std::time::SystemTime, // time of task creation responder: Option>, } impl TaskDesc { pub fn create(descr: &str, closure: Box) -> Self { - //TODO: better to use global atomic counter + // TODO: better to use global atomic counter let id = std::time::SystemTime::now() .duration_since(std::time::SystemTime::UNIX_EPOCH) .unwrap() @@ -94,7 +94,7 @@ where } } pub async fn try_recv(self) -> anyhow::Result { - //TODO: awaiting error and error in result are merged here, need to fix + // TODO: awaiting error and error in result are merged here, need to fix self.inner_receiver.await? } @@ -121,7 +121,7 @@ where Ok(res) => { if let Err(e) = process_callback(res).await { tracing::error!("Error processing task response: {e:?}"); - //TODO: may be unwind panic? + // TODO: may be unwind panic? } } Err(err) => tracing::error!("Error in task result or on receiving: {err:?}"), @@ -149,7 +149,7 @@ where } } pub async fn try_recv(self) -> anyhow::Result { - //TODO: awaiting error and error in result are merged here, need to fix + // TODO: awaiting error and error in result are merged here, need to fix self.inner_receiver.await?.and_then(|res| res.try_into()) } @@ -176,7 +176,7 @@ where Ok(res) => { if let Err(e) = process_callback(res).await { tracing::error!("Error processing task response: {e:?}"); - //TODO: may be unwind panic? + // TODO: may be unwind panic? } } Err(err) => tracing::error!("Error in task result or on receiving: {err:?}"), diff --git a/collator/src/validator/network/handlers.rs b/collator/src/validator/network/handlers.rs index d352e5a72..36d96321f 100644 --- a/collator/src/validator/network/handlers.rs +++ b/collator/src/validator/network/handlers.rs @@ -1,9 +1,13 @@ +use std::sync::Arc; + +use everscale_types::models::BlockIdShort; +use tracing::trace; +use tycho_network::Response; + +use crate::tracing_targets; use crate::validator::network::dto::SignaturesQuery; use crate::validator::state::SessionInfo; use crate::validator::{process_candidate_signature_response, ValidatorEventListener}; -use everscale_types::models::BlockIdShort; -use std::sync::Arc; -use tycho_network::Response; pub async fn handle_signatures_query( session: Option>, @@ -21,6 +25,7 @@ where signatures: vec![], }, Some(session) => { + trace!(target: tracing_targets::VALIDATOR, "Processing signatures query for block {:?} with {} signatures", block_id_short, signatures.len()); process_candidate_signature_response( session.clone(), block_id_short, @@ -29,12 +34,14 @@ where ) .await?; + trace!(target: tracing_targets::VALIDATOR, "Getting valid signatures for block {:?}", block_id_short); let signatures = session .get_valid_signatures(&block_id_short) .await .into_iter() .map(|(k, v)| (k.0, v.0)) .collect::>(); + SignaturesQuery { session_seqno, block_id_short, diff --git a/collator/src/validator/network/network_service.rs b/collator/src/validator/network/network_service.rs index b912d2e95..d7db9f238 100644 --- a/collator/src/validator/network/network_service.rs +++ b/collator/src/validator/network/network_service.rs @@ -4,7 +4,6 @@ use std::sync::Arc; use futures_util::future::{self, FutureExt, Ready}; use tracing::error; - use tycho_network::__internal::tl_proto::{TlRead, TlWrite}; use tycho_network::{Response, Service, ServiceRequest}; @@ -52,7 +51,9 @@ impl Service for NetworkService { signatures, } = query; { - let session = state.get_session(session_seqno).await; + let session = state + .get_session(block_id_short.shard.workchain(), session_seqno) + .await; match handle_signatures_query( session, session_seqno, diff --git a/collator/src/validator/state.rs b/collator/src/validator/state.rs index 272a0c36a..a2b3670bf 100644 --- a/collator/src/validator/state.rs +++ b/collator/src/validator/state.rs @@ -6,14 +6,15 @@ use everscale_types::cell::HashBytes; use everscale_types::models::{BlockId, BlockIdShort, Signature}; use tokio::sync::{Mutex, RwLock}; use tracing::{debug, trace}; +use tycho_network::PrivateOverlay; +use tycho_util::{FastDashMap, FastHashMap}; +use crate::tracing_targets; use crate::types::{BlockSignatures, OnValidatedBlockEvent}; use crate::validator::types::{ BlockValidationCandidate, ValidationResult, ValidationSessionInfo, ValidatorInfo, }; use crate::validator::ValidatorEventListener; -use tycho_network::PrivateOverlay; -use tycho_util::{FastDashMap, FastHashMap}; struct SignatureMaps { valid_signatures: FastHashMap, @@ -34,12 +35,14 @@ pub trait ValidationState: Send + Sync { /// Retrieves an immutable reference to a session by its ID. fn get_session( &self, + workchain: i32, session_id: u32, ) -> impl std::future::Future>> + Send; } /// Holds information about a validation session. pub struct SessionInfo { + workchain: i32, seqno: u32, max_weight: u64, blocks_signatures: FastDashMap, @@ -50,6 +53,7 @@ pub struct SessionInfo { impl SessionInfo { pub fn new( + workchain: i32, seqno: u32, validation_session_info: Arc, private_overlay: PrivateOverlay, @@ -60,6 +64,7 @@ impl SessionInfo { .map(|vi| vi.weight) .sum(); Arc::new(Self { + workchain, seqno, max_weight, blocks_signatures: Default::default(), @@ -69,6 +74,10 @@ impl SessionInfo { }) } + pub fn workchain(&self) -> i32 { + self.workchain + } + pub fn get_seqno(&self) -> u32 { self.seqno } @@ -110,14 +119,11 @@ impl SessionInfo { self.blocks_signatures .entry(block_header) .or_insert_with(|| { - ( - block, - SignatureMaps { - valid_signatures: FastHashMap::default(), - invalid_signatures: FastHashMap::default(), - event_dispatched: Mutex::new(false), - }, - ) + (block, SignatureMaps { + valid_signatures: FastHashMap::default(), + invalid_signatures: FastHashMap::default(), + event_dispatched: Mutex::new(false), + }) }); Ok(()) } @@ -133,7 +139,6 @@ impl SessionInfo { &self, block_id_short: &BlockIdShort, ) -> anyhow::Result { - trace!("Getting validation status for block {:?}", block_id_short); // Bind the lock result to a variable to extend its lifetime // let block_signatures_guard = self.blocks_signatures; let signatures = self.blocks_signatures.get(block_id_short); @@ -207,44 +212,14 @@ impl SessionInfo { } } - /// Adds a signature for a block. - pub async fn add_signature( - &self, - block_id: &BlockId, - validator_id: HashBytes, - signature: Signature, - is_valid: bool, - ) { - let block_header = block_id.as_short_id(); - // let mut write_guard = self.blocks_signatures.write().await; // Hold onto the lock - let mut entry = self - .blocks_signatures - .entry(block_header) // Use the guard to access the map - .or_insert_with(|| { - ( - *block_id, - SignatureMaps { - valid_signatures: FastHashMap::default(), - invalid_signatures: FastHashMap::default(), - event_dispatched: Mutex::new(false), - }, - ) - }); - - if is_valid { - entry.1.valid_signatures.insert(validator_id, signature); - } else { - entry.1.invalid_signatures.insert(validator_id, signature); - } - } - pub async fn process_signatures_and_update_status( &self, block_id_short: BlockIdShort, signatures: Vec<([u8; 32], [u8; 64])>, listeners: &[Arc], - ) -> anyhow::Result<()> { - trace!( + ) -> anyhow::Result { + debug!( + target: tracing_targets::VALIDATOR, "Processing signatures for block in state {:?}", block_id_short ); @@ -252,14 +227,11 @@ impl SessionInfo { .blocks_signatures .entry(block_id_short) .or_insert_with(|| { - ( - BlockId::default(), // Default should be replaced with actual block retrieval logic if necessary - SignatureMaps { - valid_signatures: FastHashMap::default(), - invalid_signatures: FastHashMap::default(), - event_dispatched: Mutex::new(false), - }, - ) + (BlockId::default(), SignatureMaps { + valid_signatures: FastHashMap::default(), + invalid_signatures: FastHashMap::default(), + event_dispatched: Mutex::new(false), + }) }); let event_guard = entry.1.event_dispatched.lock().await; @@ -268,7 +240,7 @@ impl SessionInfo { "Validation event already dispatched for block {:?}", block_id_short ); - return Ok(()); + return Ok(true); } // Drop the guard to allow mutable access below @@ -280,14 +252,22 @@ impl SessionInfo { let signature = Signature(sig_bytes); let block_validation_candidate = BlockValidationCandidate::from(entry.0); - let is_valid = self + let validator = self .get_validation_session_info() .validators .get(&validator_id) .context("Validator not found")? + .clone(); + + let is_valid = validator .public_key .verify(block_validation_candidate.as_bytes(), &signature.0); + trace!( + target: tracing_targets::VALIDATOR, + "Adding signature for block {:?} from validator {:?} valid {}", + block_id_short, validator_id, is_valid); + if is_valid { entry.1.valid_signatures.insert(validator_id, signature); } else { @@ -296,6 +276,7 @@ impl SessionInfo { } let validation_status = self.validation_status(&entry.1).await; + // Check if the validation status qualifies for dispatching the event match validation_status { ValidationResult::Valid => { @@ -315,10 +296,12 @@ impl SessionInfo { Self::notify_listeners(entry.0, event, listeners); } - ValidationResult::Insufficient(_, _) => {} + ValidationResult::Insufficient(total_valid_weight, valid_weight_threshold) => { + debug!(total_valid_weight, valid_weight_threshold); + } } - Ok(()) + Ok(validation_status.is_finished()) } async fn validation_status(&self, signature_maps: &SignatureMaps) -> ValidationResult { @@ -361,6 +344,7 @@ impl SessionInfo { event: OnValidatedBlockEvent, listeners: &[Arc], ) { + trace!(target: tracing_targets::VALIDATOR, "Notifying listeners about block validation"); for listener in listeners { let cloned_event = event.clone(); let listener = listener.clone(); @@ -376,7 +360,7 @@ impl SessionInfo { /// Standard implementation of `ValidationState`. pub struct ValidationStateStdImpl { - sessions: RwLock>>, + sessions: RwLock>>, } impl ValidationState for ValidationStateStdImpl { @@ -387,18 +371,27 @@ impl ValidationState for ValidationStateStdImpl { } async fn try_add_session(&self, session: Arc) -> anyhow::Result<()> { + let workchain = session.workchain; let seqno = session.seqno; - let session = self.sessions.write().await.insert(seqno, session); + let session = self + .sessions + .write() + .await + .insert((workchain, seqno), session); if session.is_some() { - bail!("Session already exists with seqno: {seqno}"); + bail!("Session already exists with seqno: ({workchain}, {seqno})"); } Ok(()) } - async fn get_session(&self, session_id: u32) -> Option> { - self.sessions.read().await.get(&session_id).cloned() + async fn get_session(&self, workchain: i32, session_id: u32) -> Option> { + self.sessions + .read() + .await + .get(&(workchain, session_id)) + .cloned() } } diff --git a/collator/src/validator/types.rs b/collator/src/validator/types.rs index 01945c7d3..852aacf04 100644 --- a/collator/src/validator/types.rs +++ b/collator/src/validator/types.rs @@ -38,6 +38,7 @@ impl TryFrom<&ValidatorDescription> for ValidatorInfo { } pub struct ValidationSessionInfo { + pub workchain: i32, pub seqno: u32, pub validators: ValidatorsMap, } @@ -64,6 +65,7 @@ impl TryFrom> for ValidationSessionInfo { } let validation_session = ValidationSessionInfo { + workchain: session_info.workchain(), seqno: session_info.seqno(), validators, }; @@ -99,6 +101,7 @@ impl BlockValidationCandidate { #[derive(TlWrite, TlRead)] #[tl(boxed, id = 0x12341111)] pub(crate) struct OverlayNumber { + pub workchain: i32, pub session_seqno: u32, } diff --git a/collator/src/validator/validator.rs b/collator/src/validator/validator.rs index 7c1080890..7a8c8d17f 100644 --- a/collator/src/validator/validator.rs +++ b/collator/src/validator/validator.rs @@ -1,24 +1,25 @@ use std::sync::Arc; use std::time::Duration; -use anyhow::{bail, Context, Result}; +use anyhow::{Context, Result}; use async_trait::async_trait; -use everscale_crypto::ed25519::KeyPair; +use everscale_crypto::ed25519::{KeyPair, PublicKey}; use everscale_types::cell::HashBytes; use everscale_types::models::{BlockId, BlockIdShort, Signature}; use tokio::task::JoinHandle; -use tracing::{debug, trace, warn}; +use tracing::{debug, info, trace, warn}; use tycho_network::{OverlayId, PeerId, PrivateOverlay, Request}; +use crate::state_node::StateNodeAdapter; +use crate::tracing_targets; use crate::types::{OnValidatedBlockEvent, ValidatorNetwork}; use crate::validator::config::ValidatorConfig; use crate::validator::network::dto::SignaturesQuery; use crate::validator::network::network_service::NetworkService; use crate::validator::state::{SessionInfo, ValidationState, ValidationStateStdImpl}; use crate::validator::types::{ - BlockValidationCandidate, OverlayNumber, ValidationResult, ValidationSessionInfo, ValidatorInfo, + BlockValidationCandidate, OverlayNumber, ValidationSessionInfo, ValidatorInfo, }; -use crate::{state_node::StateNodeAdapter, tracing_targets}; // FACTORY @@ -132,7 +133,7 @@ impl Validator for ValidatorStdImpl { async fn validate(&self, candidate: BlockId, session_seqno: u32) -> Result<()> { let session = self .validation_state - .get_session(session_seqno) + .get_session(candidate.shard.workchain(), session_seqno) .await .ok_or_else(|| { anyhow::anyhow!("Validation session not found for seqno: {}", session_seqno) @@ -171,6 +172,7 @@ impl Validator for ValidatorStdImpl { }; let overlay_id = OverlayNumber { + workchain: validators_session_info.workchain, session_seqno: validators_session_info.seqno, }; trace!(target: tracing_targets::VALIDATOR, overlay_id = ?validators_session_info.seqno, "Creating private overlay"); @@ -189,10 +191,12 @@ impl Validator for ValidatorStdImpl { .add_private_overlay(&private_overlay.clone()); if !overlay_added { - bail!("Failed to add private overlay"); + warn!(target: tracing_targets::VALIDATOR, "Failed to add private overlay"); + // bail!("Failed to add private overlay"); } let session_info = SessionInfo::new( + validators_session_info.workchain, validators_session_info.seqno, validators_session_info.clone(), private_overlay.clone(), @@ -230,6 +234,7 @@ async fn start_candidate_validation( state_node_adapter: &Arc, config: &ValidatorConfig, ) -> Result<()> { + info!(target: tracing_targets::VALIDATOR, "Start candidate validation: {:?}", block_id.as_short_id()); let cancellation_token = tokio_util::sync::CancellationToken::new(); let short_id = block_id.as_short_id(); let our_signature = sign_block(current_validator_keypair, &block_id)?; @@ -241,18 +246,20 @@ async fn start_candidate_validation( let cached_signatures = session.get_cached_signatures_by_block(&block_id.as_short_id()); + trace!(target: tracing_targets::VALIDATOR, "Cached signatures len: {:?}", cached_signatures.as_ref().map(|x| x.1.len())); + if let Some(cached_signatures) = cached_signatures { initial_signatures.extend(cached_signatures.1.into_iter().map(|(k, v)| (k.0, v.0))); } + trace!(target: tracing_targets::VALIDATOR, "Initial signatures: {:?}", initial_signatures); let is_validation_finished = process_candidate_signature_response( session.clone(), short_id, - vec![(current_validator_pubkey.0, our_signature.0)], + initial_signatures, listeners, ) .await?; - trace!(target: tracing_targets::VALIDATOR, "Validation finished: {:?}", is_validation_finished); if is_validation_finished { cancellation_token.cancel(); // Cancel all tasks if validation is finished @@ -315,6 +322,7 @@ async fn start_candidate_validation( .get_validation_status(&short_id) .await? .is_finished(); + if validation_finished { trace!(target: tracing_targets::VALIDATOR, "Validation is finished"); token_clone.cancel(); // Signal cancellation to all tasks @@ -340,7 +348,7 @@ async fn start_candidate_validation( match response { Ok(Ok(response)) => { if let Ok(signatures) = response.parse_tl::() { - trace!(target: tracing_targets::VALIDATOR, "Received signatures from validator {:?}", validator.public_key.to_bytes()); + trace!(target: tracing_targets::VALIDATOR, "Received signatures from validator {}", validator.public_key); let is_finished = process_candidate_signature_response( cloned_session.clone(), @@ -358,18 +366,26 @@ async fn start_candidate_validation( } } Err(e) => { - warn!(target: tracing_targets::VALIDATOR, "Elapsed validator response {:?}: {:?}", validator.public_key.to_bytes(), e); - let delay = delay * 2_u32.pow(attempt); - let delay = std::cmp::min(delay, max_delay); - tokio::time::sleep(delay).await; - attempt += 1; + let error_message = format!("Elapsed validator response: {}", e); + handle_error_and_backoff( + delay, + max_delay, + &mut attempt, + &validator.public_key, + &error_message, + ) + .await; } Ok(Err(e)) => { - warn!(target: tracing_targets::VALIDATOR, "Error receiving signatures from validator {:?}: {:?}", validator.public_key.to_bytes(), e); - let delay = delay * 2_u32.pow(attempt); - let delay = std::cmp::min(delay, max_delay); - tokio::time::sleep(delay).await; - attempt += 1; + let error_message = format!("Error receiving signatures: {}", e); + handle_error_and_backoff( + delay, + max_delay, + &mut attempt, + &validator.public_key, + &error_message, + ) + .await; } } tokio::time::sleep(delay).await; @@ -387,19 +403,32 @@ async fn start_candidate_validation( Ok(()) } +async fn handle_error_and_backoff( + delay: Duration, + max_delay: Duration, + attempt: &mut u32, + validator_public_key: &PublicKey, + error_message: &str, +) { + warn!(target: tracing_targets::VALIDATOR, "Error validator response: validator: {:x?}: {} ", validator_public_key, error_message); + let exponential_backoff = 2_u32.saturating_pow(*attempt); + let safe_delay = delay.saturating_mul(exponential_backoff); + let new_delay = std::cmp::min(safe_delay, max_delay); + tokio::time::sleep(new_delay).await; + *attempt += 1; +} + pub async fn process_candidate_signature_response( session: Arc, block_id_short: BlockIdShort, signatures: Vec<([u8; 32], [u8; 64])>, listeners: &[Arc], ) -> Result { - trace!(target: tracing_targets::VALIDATOR, block = %block_id_short, "Processing candidate signature response"); + debug!(target: tracing_targets::VALIDATOR, block = %block_id_short, "Processing candidate signature response"); let validation_status = session.get_validation_status(&block_id_short).await?; trace!(target: tracing_targets::VALIDATOR, block = %block_id_short, "Validation status: {:?}", validation_status); - if validation_status == ValidationResult::Valid - || validation_status == ValidationResult::Invalid - { - debug!( + if validation_status.is_finished() { + trace!( "Validation status is already set for block {:?}.", block_id_short ); @@ -407,11 +436,14 @@ pub async fn process_candidate_signature_response( } if session.get_block(&block_id_short).await.is_some() { + trace!(target: tracing_targets::VALIDATOR, + "Block {:?} is already in the session. Processing signatures.", + block_id_short); session .process_signatures_and_update_status(block_id_short, signatures, listeners) - .await?; + .await } else { - trace!(target: tracing_targets::VALIDATOR, "Caching signatures for block {:?}", block_id_short); + debug!(target: tracing_targets::VALIDATOR, "Caching signatures for block {:?}", block_id_short); if block_id_short.seqno > 0 { let previous_block = BlockIdShort::from((block_id_short.shard, block_id_short.seqno - 1)); @@ -429,6 +461,6 @@ pub async fn process_candidate_signature_response( .await; } } + Ok(false) } - Ok(false) } diff --git a/collator/tests/adapter_tests.rs b/collator/tests/adapter_tests.rs index 4bae5ec86..c96e04e80 100644 --- a/collator/tests/adapter_tests.rs +++ b/collator/tests/adapter_tests.rs @@ -3,7 +3,8 @@ use std::sync::Arc; use anyhow::Result; use async_trait::async_trait; -use everscale_types::models::{BlockId, ShardIdent}; +use everscale_types::cell::Cell; +use everscale_types::models::{BlockId, ShardIdent, ShardStateUnsplit}; use tycho_block_util::block::{BlockStuff, BlockStuffAug}; use tycho_block_util::state::{MinRefMcStateTracker, ShardStateStuff}; use tycho_collator::state_node::{ @@ -124,8 +125,7 @@ async fn test_add_and_get_next_block() { }; adapter.accept_block(block).await.unwrap(); - // TOOD: Incorrect!!! Should be waiting for the next block, not the previous one - let next_block = adapter.wait_for_block(prev_block_id).await; + let next_block = adapter.wait_for_block_next(prev_block_id).await; assert!( next_block.is_some(), "Block should be retrieved after being added" @@ -135,7 +135,7 @@ async fn test_add_and_get_next_block() { #[tokio::test] async fn test_add_read_handle_1000_blocks_parallel() { try_init_test_tracing(tracing_subscriber::filter::LevelFilter::DEBUG); - tycho_util::test::init_logger("test_add_read_handle_100000_blocks_parallel"); + tycho_util::test::init_logger("test_add_read_handle_100000_blocks_parallel", "debug"); let storage = prepare_test_storage().await.unwrap(); @@ -210,22 +210,26 @@ async fn test_add_read_handle_1000_blocks_parallel() { let next_block = adapter.wait_for_block(&block_id).await; assert!( next_block.is_some(), - "Block {} should be retrieved after being added", - i + "Block {i} should be retrieved after being added", ); - let last_mc_block_id = adapter.load_last_applied_mc_block_id().await.unwrap(); - let state = storage - .shard_state_storage() - .load_state(&last_mc_block_id) - .await - .unwrap(); + let mcstate_tracker = MinRefMcStateTracker::new(); + let mut shard_state = ShardStateUnsplit::default(); + shard_state.shard_ident = block_id.shard; + shard_state.seqno = block_id.seqno; + + let state = ShardStateStuff::from_state_and_root( + &block_id, + Box::new(shard_state), + Cell::default(), + &mcstate_tracker, + ) + .unwrap(); let handle_block = adapter.handle_state(&state).await; assert!( handle_block.is_ok(), - "Block {} should be handled after being added", - i + "Block {i} should be handled after being added", ); } }) diff --git a/collator/tests/collation_tests.rs b/collator/tests/collation_tests.rs index 2c2232692..8ce0ad7dc 100644 --- a/collator/tests/collation_tests.rs +++ b/collator/tests/collation_tests.rs @@ -50,7 +50,8 @@ impl StateSubscriber for StrangeBlockProvider { /// run: `RUST_BACKTRACE=1 cargo test -p tycho-collator --features test --test collation_tests -- --nocapture` #[tokio::test] async fn test_collation_process_on_stubs() { - try_init_test_tracing(tracing_subscriber::filter::LevelFilter::TRACE); + try_init_test_tracing(tracing_subscriber::filter::LevelFilter::DEBUG); + tycho_util::test::init_logger("test_collation_process_on_stubs", "debug"); let storage = prepare_test_storage().await.unwrap(); @@ -75,7 +76,7 @@ async fn test_collation_process_on_stubs() { let node_1_keypair = Arc::new(everscale_crypto::ed25519::KeyPair::generate(&mut rnd)); let config = CollationConfig { - key_pair: node_1_keypair, + key_pair: node_1_keypair.clone(), mc_block_min_interval_ms: 10000, max_mc_block_delta_from_bc_to_await_own: 2, supported_block_version: 50, @@ -85,7 +86,7 @@ async fn test_collation_process_on_stubs() { #[cfg(feature = "test")] test_validators_keypairs: vec![ node_1_keypair, - everscale_crypto::ed25519::KeyPair::generate(&mut rnd), + // Arc::new(everscale_crypto::ed25519::KeyPair::generate(&mut rnd)), ], }; @@ -95,7 +96,7 @@ async fn test_collation_process_on_stubs() { let manager = CollationManager::start( config, - Arc::new(MessageQueueAdapterStdImpl::new()), + Arc::new(MessageQueueAdapterStdImpl::default()), |listener| StateNodeAdapterStdImpl::new(listener, storage.clone()), |listener| MempoolAdapterStdImpl::new(listener), ValidatorStdImplFactory { diff --git a/collator/tests/validator_tests.rs b/collator/tests/validator_tests.rs index 5c892be9d..78eca0b0e 100644 --- a/collator/tests/validator_tests.rs +++ b/collator/tests/validator_tests.rs @@ -12,7 +12,6 @@ use rand::prelude::ThreadRng; use tokio::sync::{Mutex, Notify}; use tokio::time::sleep; use tracing::debug; - use tycho_block_util::block::ValidatorSubsetInfo; use tycho_block_util::state::{MinRefMcStateTracker, ShardStateStuff}; use tycho_collator::state_node::{StateNodeAdapterStdImpl, StateNodeEventListener}; @@ -263,7 +262,8 @@ async fn test_validator_accept_block_by_state() -> anyhow::Result<()> { short_hash: 0, }; let keypair = Arc::new(KeyPair::generate(&mut ThreadRng::default())); - let collator_session_info = Arc::new(CollationSessionInfo::new(0, validators, Some(keypair))); + let collator_session_info = + Arc::new(CollationSessionInfo::new(-1, 0, validators, Some(keypair))); let validation_session = Arc::new(ValidationSessionInfo::try_from(collator_session_info.clone()).unwrap()); @@ -300,7 +300,7 @@ fn create_blocks(amount: u32) -> Vec { #[tokio::test(flavor = "multi_thread", worker_threads = 4)] async fn test_validator_accept_block_by_network() -> Result<()> { try_init_test_tracing(tracing_subscriber::filter::LevelFilter::DEBUG); - tycho_util::test::init_logger("test_validator_accept_block_by_network"); + tycho_util::test::init_logger("test_validator_accept_block_by_network", "debug"); let mut tmp_dirs = Vec::new(); @@ -402,6 +402,7 @@ async fn handle_validator( for session in 1..=sessions { let blocks = create_blocks(blocks_amount); let collator_session_info = Arc::new(CollationSessionInfo::new( + -1, session, validators_subset_info.clone(), Some(validator.get_keypair()), // Assuming you have access to node's keypair here diff --git a/consensus/src/test_utils.rs b/consensus/src/test_utils.rs index e1a5043bb..22084634a 100644 --- a/consensus/src/test_utils.rs +++ b/consensus/src/test_utils.rs @@ -7,7 +7,7 @@ use tokio::task::JoinHandle; use tycho_network::{ Address, DhtClient, DhtConfig, DhtService, Network, NetworkConfig, OverlayService, PeerId, - PeerInfo, Router, + PeerInfo, Router, ToSocket, }; use tycho_util::time::now_sec; @@ -55,7 +55,7 @@ pub fn make_peer_info(key: &SecretKey, address: Address, ttl: Option) -> Pe // TODO receive configured services from general node, // move current setup to tests as it provides acceptable timing // This dependencies should be passed from validator module to init mempool -pub fn from_validator( +pub fn from_validator( socket_addr: T, secret_key: &SecretKey, dht_config: DhtConfig, diff --git a/core/Cargo.toml b/core/Cargo.toml index 3dcbbdd4e..514bc3f52 100644 --- a/core/Cargo.toml +++ b/core/Cargo.toml @@ -35,10 +35,10 @@ tycho-util = { workspace = true } [dev-dependencies] bytesize = { workspace = true } everscale-crypto = { workspace = true } -tycho-util = { workspace = true, features = ["test"] } -tycho-storage = { workspace = true, features = ["test"] } tempfile = { workspace = true } tracing-test = { workspace = true } +tycho-storage = { workspace = true, features = ["test"] } +tycho-util = { workspace = true, features = ["test"] } [features] test = [] diff --git a/core/src/block_strider/mod.rs b/core/src/block_strider/mod.rs index f98103d8e..fd3f64ec5 100644 --- a/core/src/block_strider/mod.rs +++ b/core/src/block_strider/mod.rs @@ -8,22 +8,21 @@ use tycho_block_util::state::MinRefMcStateTracker; use tycho_storage::Storage; use tycho_util::FastHashMap; +#[cfg(any(test, feature = "test"))] +pub use self::provider::ArchiveBlockProvider; pub use self::provider::{ - BlockProvider, BlockchainBlockProvider, BlockchainBlockProviderConfig, EmptyBlockProvider, - OptionalBlockStuff, + BlockProvider, BlockProviderExt, BlockchainBlockProvider, BlockchainBlockProviderConfig, + ChainBlockProvider, EmptyBlockProvider, OptionalBlockStuff, StorageBlockProvider, }; pub use self::state::{BlockStriderState, PersistentBlockStriderState, TempBlockStriderState}; pub use self::state_applier::ShardStateApplier; +#[cfg(any(test, feature = "test"))] +pub use self::subscriber::test::PrintSubscriber; pub use self::subscriber::{ BlockSubscriber, BlockSubscriberContext, BlockSubscriberExt, ChainSubscriber, NoopSubscriber, StateSubscriber, StateSubscriberContext, StateSubscriberExt, }; -#[cfg(any(test, feature = "test"))] -pub use self::provider::ArchiveBlockProvider; -#[cfg(any(test, feature = "test"))] -pub use self::subscriber::test::PrintSubscriber; - mod provider; mod state; mod state_applier; diff --git a/core/src/block_strider/provider/archive_provider.rs b/core/src/block_strider/provider/archive_provider.rs index 0971265d4..65b45dd9c 100644 --- a/core/src/block_strider/provider/archive_provider.rs +++ b/core/src/block_strider/provider/archive_provider.rs @@ -8,7 +8,6 @@ use everscale_types::models::{Block, BlockId, BlockIdShort, BlockProof}; use futures_util::future::BoxFuture; use futures_util::FutureExt; use sha2::Digest; - use tycho_block_util::archive::{ArchiveEntryId, ArchiveReader}; use tycho_block_util::block::{BlockStuff, BlockStuffAug}; diff --git a/core/src/block_strider/provider/blockchain_provider.rs b/core/src/block_strider/provider/blockchain_provider.rs index b04581de9..3f6628381 100644 --- a/core/src/block_strider/provider/blockchain_provider.rs +++ b/core/src/block_strider/provider/blockchain_provider.rs @@ -5,6 +5,7 @@ use futures_util::future::BoxFuture; use serde::{Deserialize, Serialize}; use tycho_block_util::block::{BlockStuff, BlockStuffAug}; use tycho_storage::Storage; +use tycho_util::serde_helpers; use crate::block_strider::provider::OptionalBlockStuff; use crate::block_strider::BlockProvider; @@ -20,11 +21,13 @@ pub struct BlockchainBlockProviderConfig { /// Polling interval for `get_next_block` method. /// /// Default: 1 second. + #[serde(with = "serde_helpers::humantime")] pub get_next_block_polling_interval: Duration, /// Polling interval for `get_block` method. /// /// Default: 1 second. + #[serde(with = "serde_helpers::humantime")] pub get_block_polling_interval: Duration, } @@ -59,6 +62,7 @@ impl BlockchainBlockProvider { // TODO: Validate block with proof. async fn get_next_block_impl(&self, prev_block_id: &BlockId) -> OptionalBlockStuff { let mut interval = tokio::time::interval(self.config.get_next_block_polling_interval); + loop { let res = self.client.get_next_block_full(prev_block_id).await; let block = match res { diff --git a/core/src/block_strider/provider/mod.rs b/core/src/block_strider/provider/mod.rs index beffde6f3..2c1f3d3a9 100644 --- a/core/src/block_strider/provider/mod.rs +++ b/core/src/block_strider/provider/mod.rs @@ -1,15 +1,16 @@ use std::future::Future; +use std::pin::pin; use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::Arc; use everscale_types::models::BlockId; -use futures_util::future::BoxFuture; +use futures_util::future::{self, BoxFuture}; use tycho_block_util::block::BlockStuffAug; -pub use self::blockchain_provider::{BlockchainBlockProvider, BlockchainBlockProviderConfig}; - #[cfg(any(test, feature = "test"))] pub use self::archive_provider::ArchiveBlockProvider; +pub use self::blockchain_provider::{BlockchainBlockProvider, BlockchainBlockProviderConfig}; +pub use self::storage_provider::StorageBlockProvider; mod blockchain_provider; mod storage_provider; @@ -54,6 +55,20 @@ impl BlockProvider for Arc { } } +pub trait BlockProviderExt: Sized { + fn chain(self, other: T) -> ChainBlockProvider; +} + +impl BlockProviderExt for B { + fn chain(self, other: T) -> ChainBlockProvider { + ChainBlockProvider { + left: self, + right: other, + is_right: AtomicBool::new(false), + } + } +} + // === Provider combinators === #[derive(Debug, Clone, Copy)] pub struct EmptyBlockProvider; @@ -71,7 +86,7 @@ impl BlockProvider for EmptyBlockProvider { } } -struct ChainBlockProvider { +pub struct ChainBlockProvider { left: T1, right: T2, is_right: AtomicBool, @@ -105,13 +120,56 @@ impl BlockProvider for ChainBlockProvider< } } +impl BlockProvider for (T1, T2) { + type GetNextBlockFut<'a> = BoxFuture<'a, OptionalBlockStuff>; + type GetBlockFut<'a> = BoxFuture<'a, OptionalBlockStuff>; + + fn get_next_block<'a>(&'a self, prev_block_id: &'a BlockId) -> Self::GetNextBlockFut<'a> { + let left = self.0.get_next_block(prev_block_id); + let right = self.1.get_next_block(prev_block_id); + + Box::pin(async move { + match future::select(pin!(left), pin!(right)).await { + future::Either::Left((res, right)) => match res { + Some(res) => Some(res), + None => right.await, + }, + future::Either::Right((res, left)) => match res { + Some(res) => Some(res), + None => left.await, + }, + } + }) + } + + fn get_block<'a>(&'a self, block_id: &'a BlockId) -> Self::GetBlockFut<'a> { + let left = self.0.get_block(block_id); + let right = self.1.get_block(block_id); + + Box::pin(async move { + match future::select(pin!(left), pin!(right)).await { + future::Either::Left((res, right)) => match res { + Some(res) => Some(res), + None => right.await, + }, + future::Either::Right((res, left)) => match res { + Some(res) => Some(res), + None => left.await, + }, + } + }) + } +} + #[cfg(test)] mod test { - use super::*; use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::Arc; + use tycho_block_util::block::BlockStuff; + use super::*; + struct MockBlockProvider { // let's give it some state, pretending it's useful has_block: AtomicBool, diff --git a/core/src/block_strider/provider/storage_provider.rs b/core/src/block_strider/provider/storage_provider.rs index 284deef12..29625fa52 100644 --- a/core/src/block_strider/provider/storage_provider.rs +++ b/core/src/block_strider/provider/storage_provider.rs @@ -7,13 +7,23 @@ use crate::block_strider::BlockProvider; // TODO: Add an explicit storage provider type -impl BlockProvider for Storage { +pub struct StorageBlockProvider { + storage: Storage, +} + +impl StorageBlockProvider { + pub fn new(storage: Storage) -> Self { + Self { storage } + } +} + +impl BlockProvider for StorageBlockProvider { type GetNextBlockFut<'a> = BoxFuture<'a, OptionalBlockStuff>; type GetBlockFut<'a> = BoxFuture<'a, OptionalBlockStuff>; fn get_next_block<'a>(&'a self, prev_block_id: &'a BlockId) -> Self::GetNextBlockFut<'a> { Box::pin(async { - let block_storage = self.block_storage(); + let block_storage = self.storage.block_storage(); let get_next_block = || async { let rx = block_storage @@ -34,7 +44,7 @@ impl BlockProvider for Storage { fn get_block<'a>(&'a self, block_id: &'a BlockId) -> Self::GetBlockFut<'a> { Box::pin(async { - let block_storage = self.block_storage(); + let block_storage = self.storage.block_storage(); let get_block = || async { let rx = block_storage.subscribe_to_block(*block_id).await?; diff --git a/core/src/block_strider/state_applier.rs b/core/src/block_strider/state_applier.rs index bc99ded10..c9ee36b45 100644 --- a/core/src/block_strider/state_applier.rs +++ b/core/src/block_strider/state_applier.rs @@ -4,7 +4,6 @@ use anyhow::{Context, Result}; use everscale_types::cell::Cell; use everscale_types::models::BlockId; use futures_util::future::BoxFuture; - use tycho_block_util::archive::ArchiveData; use tycho_block_util::block::BlockStuff; use tycho_block_util::state::{MinRefMcStateTracker, RefMcStateHandle, ShardStateStuff}; @@ -147,15 +146,11 @@ where let info = block.load_info()?; let res = block_storage - .store_block_data( - block, - archive_data, - BlockMetaData { - is_key_block: info.key_block, - gen_utime: info.gen_utime, - mc_ref_seqno: mc_block_id.seqno, - }, - ) + .store_block_data(block, archive_data, BlockMetaData { + is_key_block: info.key_block, + gen_utime: info.gen_utime, + mc_ref_seqno: mc_block_id.seqno, + }) .await?; Ok(res.handle) @@ -177,7 +172,7 @@ where .await .context("Failed to join blocking task")? .context("Failed to apply state update")?; - let new_state = ShardStateStuff::new(*block.id(), new_state, mc_state_tracker) + let new_state = ShardStateStuff::from_root(block.id(), new_state, mc_state_tracker) .context("Failed to create new state")?; let state_storage = self.inner.storage.shard_state_storage(); @@ -215,122 +210,3 @@ struct Inner { storage: Storage, state_subscriber: S, } - -#[cfg(test)] -pub mod test { - use std::str::FromStr; - - use everscale_types::cell::HashBytes; - use everscale_types::models::*; - use tracing_test::traced_test; - use tycho_storage::{BlockMetaData, Db, DbOptions, Storage}; - - use super::*; - use crate::block_strider::subscriber::test::PrintSubscriber; - use crate::block_strider::{ArchiveBlockProvider, BlockStrider, PersistentBlockStriderState}; - - #[traced_test] - #[tokio::test] - async fn test_state_apply() -> anyhow::Result<()> { - let (provider, storage) = prepare_state_apply().await?; - - let last_mc = *provider.mc_block_ids.last_key_value().unwrap().1; - let blocks = provider.blocks.keys().copied().collect::>(); - - let block_strider = BlockStrider::builder() - .with_provider(provider) - .with_state(PersistentBlockStriderState::new(last_mc, storage.clone())) - .with_state_subscriber(Default::default(), storage.clone(), PrintSubscriber) - .build(); - - block_strider.run().await?; - - assert_eq!( - storage.node_state().load_last_mc_block_id().unwrap(), - last_mc - ); - storage - .shard_state_storage() - .load_state(&last_mc) - .await - .unwrap(); - - for block in &blocks { - let handle = storage.block_handle_storage().load_handle(block).unwrap(); - assert!(handle.meta().is_applied()); - storage - .shard_state_storage() - .load_state(block) - .await - .unwrap(); - } - - Ok(()) - } - - pub async fn prepare_state_apply() -> Result<(ArchiveBlockProvider, Storage)> { - let data = include_bytes!("../../tests/data/00001"); - let provider = ArchiveBlockProvider::new(data).unwrap(); - let temp = tempfile::tempdir().unwrap(); - let db = Db::open(temp.path().to_path_buf(), DbOptions::default()).unwrap(); - let storage = Storage::new(db, temp.path().join("file"), 1_000_000).unwrap(); - - let master = include_bytes!("../../tests/data/everscale_zerostate.boc"); - let shard = include_bytes!("../../tests/data/everscale_shard_zerostate.boc"); - - let master_id = BlockId { - root_hash: HashBytes::from_str( - "58ffca1a178daff705de54216e5433c9bd2e7d850070d334d38997847ab9e845", - ) - .unwrap(), - file_hash: HashBytes::from_str( - "d270b87b2952b5ba7daa70aaf0a8c361befcf4d8d2db92f9640d5443070838e4", - ) - .unwrap(), - shard: ShardIdent::MASTERCHAIN, - seqno: 0, - }; - let master = ShardStateStuff::deserialize_zerostate(master_id, master).unwrap(); - - // Parse block id - let block_id = BlockId::from_str("-1:8000000000000000:0:58ffca1a178daff705de54216e5433c9bd2e7d850070d334d38997847ab9e845:d270b87b2952b5ba7daa70aaf0a8c361befcf4d8d2db92f9640d5443070838e4")?; - - // Write zerostate to db - let (handle, _) = storage.block_handle_storage().create_or_load_handle( - &block_id, - BlockMetaData::zero_state(master.state().gen_utime), - ); - - storage - .shard_state_storage() - .store_state(&handle, &master) - .await?; - - let shard_id = BlockId { - root_hash: HashBytes::from_str( - "95f042d1bf5b99840cad3aaa698f5d7be13d9819364faf9dd43df5b5d3c2950e", - ) - .unwrap(), - file_hash: HashBytes::from_str( - "97af4602a57fc884f68bb4659bab8875dc1f5e45a9fd4fbafd0c9bc10aa5067c", - ) - .unwrap(), - shard: ShardIdent::BASECHAIN, - seqno: 0, - }; - - //store workchain zerostate - let shard = ShardStateStuff::deserialize_zerostate(shard_id, shard).unwrap(); - let (handle, _) = storage.block_handle_storage().create_or_load_handle( - &shard_id, - BlockMetaData::zero_state(shard.state().gen_utime), - ); - storage - .shard_state_storage() - .store_state(&handle, &shard) - .await?; - - storage.node_state().store_last_mc_block_id(&master_id); - Ok((provider, storage)) - } -} diff --git a/core/src/blockchain_rpc/client.rs b/core/src/blockchain_rpc/client.rs index 1d177f70a..acb291488 100644 --- a/core/src/blockchain_rpc/client.rs +++ b/core/src/blockchain_rpc/client.rs @@ -2,8 +2,9 @@ use std::sync::Arc; use anyhow::Result; use everscale_types::models::BlockId; +use tycho_network::PublicOverlay; -use crate::overlay_client::{PublicOverlayClient, QueryResponse}; +use crate::overlay_client::{Error, PublicOverlayClient, QueryResponse}; use crate::proto::blockchain::*; #[derive(Clone)] @@ -23,6 +24,10 @@ impl BlockchainRpcClient { } } + pub fn overlay(&self) -> &PublicOverlay { + self.inner.overlay_client.overlay() + } + pub fn overlay_client(&self) -> &PublicOverlayClient { &self.inner.overlay_client } @@ -31,7 +36,7 @@ impl BlockchainRpcClient { &self, block: &BlockId, max_size: u32, - ) -> Result> { + ) -> Result, Error> { let client = &self.inner.overlay_client; let data = client .query::<_, KeyBlockIds>(&rpc::GetNextKeyBlockIds { @@ -42,7 +47,7 @@ impl BlockchainRpcClient { Ok(data) } - pub async fn get_block_full(&self, block: &BlockId) -> Result> { + pub async fn get_block_full(&self, block: &BlockId) -> Result, Error> { let client = &self.inner.overlay_client; let data = client .query::<_, BlockFull>(&rpc::GetBlockFull { block_id: *block }) @@ -53,7 +58,7 @@ impl BlockchainRpcClient { pub async fn get_next_block_full( &self, prev_block: &BlockId, - ) -> Result> { + ) -> Result, Error> { let client = &self.inner.overlay_client; let data = client .query::<_, BlockFull>(&rpc::GetNextBlockFull { @@ -63,7 +68,10 @@ impl BlockchainRpcClient { Ok(data) } - pub async fn get_archive_info(&self, mc_seqno: u32) -> Result> { + pub async fn get_archive_info( + &self, + mc_seqno: u32, + ) -> Result, Error> { let client = &self.inner.overlay_client; let data = client .query::<_, ArchiveInfo>(&rpc::GetArchiveInfo { mc_seqno }) @@ -76,7 +84,7 @@ impl BlockchainRpcClient { archive_id: u64, offset: u64, max_size: u32, - ) -> Result> { + ) -> Result, Error> { let client = &self.inner.overlay_client; let data = client .query::<_, Data>(&rpc::GetArchiveSlice { @@ -94,7 +102,7 @@ impl BlockchainRpcClient { block: &BlockId, offset: u64, max_size: u64, - ) -> Result> { + ) -> Result, Error> { let client = &self.inner.overlay_client; let data = client .query::<_, PersistentStatePart>(&rpc::GetPersistentStatePart { diff --git a/core/src/blockchain_rpc/service.rs b/core/src/blockchain_rpc/service.rs index ebac72a7a..6da1b0934 100644 --- a/core/src/blockchain_rpc/service.rs +++ b/core/src/blockchain_rpc/service.rs @@ -71,6 +71,9 @@ impl Service for BlockchainRpcService { }; tycho_network::match_tl_request!(body, tag = constructor, { + overlay::Ping as _ => BoxFutureOrNoop::future(async { + Some(Response::from_tl(overlay::Pong)) + }), rpc::GetNextKeyBlockIds as req => { tracing::debug!( block_id = %req.block_id, @@ -345,8 +348,11 @@ impl Inner { .read_state_part(&req.mc_block_id, &req.block_id, req.offset, req.max_size) .await { - Some(data) => overlay::Response::Ok(PersistentStatePart::Found { data }), - None => overlay::Response::Ok(PersistentStatePart::NotFound), + Ok(data) => overlay::Response::Ok(PersistentStatePart::Found { data }), + Err(e) => { + tracing::debug!("failed to read persistent state part: {e}"); + overlay::Response::Ok(PersistentStatePart::NotFound) + } } } diff --git a/core/src/global_config.rs b/core/src/global_config.rs new file mode 100644 index 000000000..8c3f08e5d --- /dev/null +++ b/core/src/global_config.rs @@ -0,0 +1,52 @@ +use std::path::Path; + +use anyhow::Result; +use everscale_types::cell::HashBytes; +use everscale_types::models::{BlockId, ShardIdent}; +use serde::{Deserialize, Serialize}; +use tycho_network::{OverlayId, PeerInfo}; + +use crate::proto::blockchain::OverlayIdData; + +#[derive(Default, Debug, Serialize, Deserialize)] +pub struct GlobalConfig { + pub bootstrap_peers: Vec, + pub zerostate: ZerostateId, +} + +impl GlobalConfig { + pub fn from_file>(path: P) -> Result { + tycho_util::serde_helpers::load_json_from_file(path) + } + + pub fn validate(&self, now: u32) -> Result<()> { + for peer in &self.bootstrap_peers { + anyhow::ensure!(peer.is_valid(now), "invalid peer info for {}", peer.id); + } + Ok(()) + } +} + +#[derive(Default, Debug, Clone, Copy, Serialize, Deserialize)] +pub struct ZerostateId { + pub root_hash: HashBytes, + pub file_hash: HashBytes, +} + +impl ZerostateId { + pub fn as_block_id(&self) -> BlockId { + BlockId { + shard: ShardIdent::MASTERCHAIN, + seqno: 0, + root_hash: self.root_hash, + file_hash: self.file_hash, + } + } + + pub fn compute_public_overlay_id(&self) -> OverlayId { + OverlayId(tl_proto::hash(OverlayIdData { + zerostate_root_hash: self.root_hash.0, + zerostate_file_hash: self.file_hash.0, + })) + } +} diff --git a/core/src/lib.rs b/core/src/lib.rs index 31a27e05c..ae9166cfd 100644 --- a/core/src/lib.rs +++ b/core/src/lib.rs @@ -1,4 +1,5 @@ pub mod block_strider; pub mod blockchain_rpc; +pub mod global_config; pub mod overlay_client; pub mod proto; diff --git a/core/src/overlay_client/mod.rs b/core/src/overlay_client/mod.rs index 6f43db315..ce12c68c8 100644 --- a/core/src/overlay_client/mod.rs +++ b/core/src/overlay_client/mod.rs @@ -9,7 +9,6 @@ use tycho_network::{Network, PublicOverlay, Request}; pub use self::config::PublicOverlayClientConfig; pub use self::neighbour::{Neighbour, NeighbourStats}; pub use self::neighbours::Neighbours; - use crate::proto::overlay; mod config; @@ -136,12 +135,39 @@ impl Clone for Inner { impl Inner { async fn ping_neighbours_task(self) { + let req = Request::from_tl(overlay::Ping); + + // Start pinging neighbours let mut interval = tokio::time::interval(self.config.neighbours_ping_interval); loop { interval.tick().await; - if let Err(e) = self.query::<_, overlay::Pong>(overlay::Ping).await { - tracing::error!("failed to ping random neighbour: {e}"); + let Some(neighbour) = self.neighbours.choose().await else { + continue; + }; + + let peer_id = *neighbour.peer_id(); + match self.query_impl(neighbour.clone(), req.clone()).await { + Ok(res) => match tl_proto::deserialize::(&res.data) { + Ok(_) => { + res.accept(); + tracing::debug!(%peer_id, "pinged neighbour"); + } + Err(e) => { + tracing::warn!( + %peer_id, + "received an invalid ping response: {e}", + ); + res.reject(); + } + }, + Err(e) => { + tracing::warn!( + %peer_id, + "failed to ping neighbour: {e}", + ); + continue; + } } } } @@ -151,10 +177,22 @@ impl Inner { let max_neighbours = self.config.max_neighbours; let default_roundtrip = self.config.default_roundtrip; + let mut overlay_peers_added = self.overlay.entires_added().notified(); + let mut overlay_peer_count = self.overlay.read_entries().len(); + let mut interval = tokio::time::interval(self.config.neighbours_update_interval); loop { - interval.tick().await; + if overlay_peer_count < max_neighbours { + tracing::info!("not enough neighbours, waiting for more"); + + overlay_peers_added.await; + overlay_peers_added = self.overlay.entires_added().notified(); + + overlay_peer_count = self.overlay.read_entries().len(); + } else { + interval.tick().await; + } let active_neighbours = self.neighbours.get_active_neighbours().await.len(); let neighbours_to_get = max_neighbours + (max_neighbours - active_neighbours); diff --git a/core/src/overlay_client/neighbours.rs b/core/src/overlay_client/neighbours.rs index 7aa5eed55..daa1f24ea 100644 --- a/core/src/overlay_client/neighbours.rs +++ b/core/src/overlay_client/neighbours.rs @@ -2,7 +2,7 @@ use std::sync::Arc; use rand::distributions::uniform::{UniformInt, UniformSampler}; use rand::Rng; -use tokio::sync::Mutex; +use tokio::sync::{Mutex, Notify}; use crate::overlay_client::neighbour::Neighbour; @@ -22,10 +22,27 @@ impl Neighbours { max_neighbours, entries: Mutex::new(entries), selection_index: Mutex::new(selection_index), + changed: Notify::new(), }), } } + pub async fn wait_for_peers(&self, count: usize) { + loop { + let changed = self.inner.changed.notified(); + + if self.inner.entries.lock().await.len() >= count { + break; + } + + changed.await; + } + } + + pub fn changed(&self) -> &Notify { + &self.inner.changed + } + pub async fn choose(&self) -> Option { let selection_index = self.inner.selection_index.lock().await; selection_index.get(&mut rand::thread_rng()) @@ -53,9 +70,15 @@ impl Neighbours { pub async fn update(&self, new: Vec) { let now = tycho_util::time::now_sec(); + let mut changed = false; + let mut guard = self.inner.entries.lock().await; // remove unreliable and expired neighbours - guard.retain(|x| x.is_reliable() && x.expires_at_secs() > now); + guard.retain(|x| { + let retain = x.is_reliable() && x.expires_at_secs() > now; + changed |= !retain; + retain + }); // if all neighbours are reliable and valid then remove the worst if guard.len() >= self.inner.max_neighbours { @@ -65,6 +88,7 @@ impl Neighbours { { if let Some(index) = guard.iter().position(|x| x.peer_id() == worst.peer_id()) { guard.remove(index); + changed = true; } } } @@ -75,18 +99,23 @@ impl Neighbours { } if guard.len() < self.inner.max_neighbours { guard.push(n); + changed = true; } } drop(guard); self.update_selection_index().await; + + if changed { + self.inner.changed.notify_waiters(); + } } pub async fn remove_outdated_neighbours(&self) { let now = tycho_util::time::now_sec(); let mut guard = self.inner.entries.lock().await; - //remove unreliable and expired neighbours + // remove unreliable and expired neighbours guard.retain(|x| x.expires_at_secs() > now); drop(guard); self.update_selection_index().await; @@ -97,6 +126,7 @@ struct Inner { max_neighbours: usize, entries: Mutex>, selection_index: Mutex, + changed: Notify, } struct SelectionIndex { diff --git a/core/src/proto.tl b/core/src/proto.tl index 082db0c07..7ffccf834 100644 --- a/core/src/proto.tl +++ b/core/src/proto.tl @@ -22,6 +22,14 @@ overlay.response.err code:int = overlay.Response T; ---types--- +/** +* Data for computing a public overlay id +*/ +blockchain.overlayIdData + zerostate_root_hash:int256 + zerostate_file_hash:int256 + = blockchain.OverlayIdData; + /** * A full block id */ diff --git a/core/src/proto/blockchain.rs b/core/src/proto/blockchain.rs index a75713638..ce9d93b7a 100644 --- a/core/src/proto/blockchain.rs +++ b/core/src/proto/blockchain.rs @@ -3,6 +3,14 @@ use tl_proto::{TlRead, TlWrite}; use crate::proto::{tl_block_id, tl_block_id_vec}; +/// Data for computing a public overlay id. +#[derive(Debug, Clone, PartialEq, Eq, TlRead, TlWrite)] +#[tl(boxed, id = "blockchain.overlayIdData", scheme = "proto.tl")] +pub struct OverlayIdData { + pub zerostate_root_hash: [u8; 32], + pub zerostate_file_hash: [u8; 32], +} + #[derive(Debug, Clone, PartialEq, Eq, TlRead, TlWrite)] #[tl(boxed, id = "blockchain.data", scheme = "proto.tl")] pub struct Data { diff --git a/core/tests/block_strider.rs b/core/tests/block_strider.rs index c3220a1e2..69487cafc 100644 --- a/core/tests/block_strider.rs +++ b/core/tests/block_strider.rs @@ -3,7 +3,7 @@ use std::time::Duration; use futures_util::stream::FuturesUnordered; use futures_util::StreamExt; -use tycho_core::block_strider::{BlockProvider, BlockchainBlockProvider}; +use tycho_core::block_strider::{BlockProvider, BlockchainBlockProvider, StorageBlockProvider}; use tycho_core::blockchain_rpc::BlockchainRpcClient; use tycho_core::overlay_client::{PublicOverlayClient, PublicOverlayClientConfig}; use tycho_network::PeerId; @@ -12,14 +12,16 @@ mod common; #[tokio::test] async fn storage_block_strider() -> anyhow::Result<()> { - tycho_util::test::init_logger("storage_block_strider"); + tycho_util::test::init_logger("storage_block_strider", "debug"); - let (storage, tmp_dir) = common::storage::init_storage().await?; + let (storage, _tmp_dir) = common::storage::init_storage().await?; + + let storage_provider = StorageBlockProvider::new(storage); let archive = common::storage::get_archive()?; for (block_id, data) in archive.blocks { if block_id.shard.is_masterchain() { - let block = storage.get_block(&block_id).await; + let block = storage_provider.get_block(&block_id).await; assert!(block.is_some()); if let Some(block) = block { @@ -31,15 +33,13 @@ async fn storage_block_strider() -> anyhow::Result<()> { } } - tmp_dir.close()?; - tracing::info!("done!"); Ok(()) } #[tokio::test] async fn overlay_block_strider() -> anyhow::Result<()> { - tycho_util::test::init_logger("overlay_block_strider"); + tycho_util::test::init_logger("overlay_block_strider", "debug"); #[derive(Debug, Default)] struct PeerState { diff --git a/core/tests/common/archive.rs b/core/tests/common/archive.rs index 598f63dba..2ece09e8d 100644 --- a/core/tests/common/archive.rs +++ b/core/tests/common/archive.rs @@ -6,7 +6,6 @@ use anyhow::Result; use everscale_types::cell::Load; use everscale_types::models::{Block, BlockId, BlockIdShort, BlockProof}; use sha2::Digest; - use tycho_block_util::archive::{ArchiveEntryId, ArchiveReader, WithArchiveData}; pub struct Archive { diff --git a/core/tests/common/node.rs b/core/tests/common/node.rs index 9199c5df1..7863138e9 100644 --- a/core/tests/common/node.rs +++ b/core/tests/common/node.rs @@ -4,7 +4,6 @@ use std::time::Duration; use everscale_crypto::ed25519; use tycho_core::blockchain_rpc::BlockchainRpcService; - use tycho_network::{ DhtClient, DhtConfig, DhtService, Network, OverlayConfig, OverlayId, OverlayService, PeerResolver, PublicOverlay, Router, diff --git a/core/tests/common/storage.rs b/core/tests/common/storage.rs index bef51964a..a2ba16506 100644 --- a/core/tests/common/storage.rs +++ b/core/tests/common/storage.rs @@ -1,34 +1,11 @@ use anyhow::{Context, Result}; -use bytesize::ByteSize; use tempfile::TempDir; use tycho_block_util::archive::ArchiveData; use tycho_block_util::block::{BlockProofStuff, BlockProofStuffAug, BlockStuff}; -use tycho_storage::{BlockMetaData, Db, DbOptions, Storage}; +use tycho_storage::{BlockMetaData, Storage}; use crate::common::*; -pub(crate) async fn init_empty_storage() -> Result<(Storage, TempDir)> { - let tmp_dir = tempfile::tempdir()?; - let root_path = tmp_dir.path(); - - // Init rocksdb - let db_options = DbOptions { - rocksdb_lru_capacity: ByteSize::kb(1024), - cells_cache_size: ByteSize::kb(1024), - }; - let db = Db::open(root_path.join("db_storage"), db_options)?; - - // Init storage - let storage = Storage::new( - db, - root_path.join("file_storage"), - db_options.cells_cache_size.as_u64(), - )?; - assert!(storage.node_state().load_init_mc_block_id().is_none()); - - Ok((storage, tmp_dir)) -} - pub(crate) fn get_archive() -> Result { let data = include_bytes!("../../tests/data/00001"); let archive = archive::Archive::new(data)?; @@ -37,7 +14,7 @@ pub(crate) fn get_archive() -> Result { } pub(crate) async fn init_storage() -> Result<(Storage, TempDir)> { - let (storage, tmp_dir) = init_empty_storage().await?; + let (storage, tmp_dir) = Storage::new_temp()?; let data = include_bytes!("../../tests/data/00001"); let provider = archive::Archive::new(data)?; diff --git a/core/tests/overlay_client.rs b/core/tests/overlay_client.rs index 9d3f201fa..422670ed8 100644 --- a/core/tests/overlay_client.rs +++ b/core/tests/overlay_client.rs @@ -49,9 +49,9 @@ pub async fn test() { let mut rng = thread_rng(); let slice = initial_peers.as_slice(); while i < 1000 { - //let start = Instant::now(); + // let start = Instant::now(); let n_opt = neighbours.choose().await; - //let end = Instant::now(); + // let end = Instant::now(); if let Some(n) = n_opt { let index = slice @@ -90,5 +90,5 @@ pub async fn test() { println!("peer {} score {}", i.peer_id(), i.get_stats().score); } - //assert_ne!(peers.len(), 5); + // assert_ne!(peers.len(), 5); } diff --git a/core/tests/overlay_server.rs b/core/tests/overlay_server.rs index 3e3f439fb..f12b913a0 100644 --- a/core/tests/overlay_server.rs +++ b/core/tests/overlay_server.rs @@ -9,6 +9,7 @@ use tycho_core::blockchain_rpc::BlockchainRpcClient; use tycho_core::overlay_client::PublicOverlayClient; use tycho_core::proto::blockchain::{BlockFull, KeyBlockIds, PersistentStatePart}; use tycho_network::PeerId; +use tycho_storage::Storage; use crate::common::archive::*; @@ -16,7 +17,7 @@ mod common; #[tokio::test] async fn overlay_server_with_empty_storage() -> Result<()> { - tycho_util::test::init_logger("overlay_server_with_empty_storage"); + tycho_util::test::init_logger("overlay_server_with_empty_storage", "debug"); #[derive(Debug, Default)] struct PeerState { @@ -24,7 +25,7 @@ async fn overlay_server_with_empty_storage() -> Result<()> { known_by: usize, } - let (storage, tmp_dir) = common::storage::init_empty_storage().await?; + let (storage, _tmp_dir) = Storage::new_temp()?; const NODE_COUNT: usize = 10; let nodes = common::node::make_network(storage, NODE_COUNT); @@ -134,15 +135,13 @@ async fn overlay_server_with_empty_storage() -> Result<()> { let result = client.get_archive_slice(0, 0, 100).await; assert!(result.is_err()); - tmp_dir.close()?; - tracing::info!("done!"); Ok(()) } #[tokio::test] async fn overlay_server_blocks() -> Result<()> { - tycho_util::test::init_logger("overlay_server_blocks"); + tycho_util::test::init_logger("overlay_server_blocks", "debug"); #[derive(Debug, Default)] struct PeerState { diff --git a/justfile b/justfile index 5179c61d9..40ba2c285 100644 --- a/justfile +++ b/justfile @@ -1,23 +1,136 @@ -# Define default recipe -default: fmt lint docs test +default: + @just --choose install_fmt: rustup component add rustfmt --toolchain nightly -# helpers + +integration_test_dir := justfile_directory() / ".scratch/integration_tests" +integration_test_base_url := "https://tycho-test.broxus.cc" + +prepare_integration_tests: + #!/usr/bin/env bash + # Create the integration test directory if it does not exist + echo "Integration test directory: {{integration_test_dir}}" + mkdir -p {{integration_test_dir}} + + # Always download the checksum file first to ensure it's the latest + echo "Downloading checksum file..." + curl --request GET -sL --url {{integration_test_base_url}}/states.tar.zst.sha256 --output {{integration_test_dir}}/states.tar.zst.sha256 + + # Check if the archive file exists + if [ -f {{integration_test_dir}}/states.tar.zst ]; then + # Verify the archive against the checksum + echo "Verifying existing archive against checksum..." + cd {{integration_test_dir}} + if sha256sum -c states.tar.zst.sha256; then + echo "Checksum matches. No need to download the archive." + else + echo "Checksum does not match. Downloading the archive..." + just _download_archive + fi + else + echo "Archive file does not exist. Downloading the archive..." + just _download_archive + fi + +_download_archive: + curl --request GET -L --url {{integration_test_base_url}}/states.tar.zst --output {{integration_test_dir}}/states.tar.zst + +clean_integration_tests: + rm -rf {{integration_test_dir}} + + fmt: install_fmt cargo +nightly fmt --all # ci checks +ci: fmt lint docs test + check_format: install_fmt cargo +nightly fmt --all -- --check -lint: check_format +lint: cargo clippy --all-targets --all-features --workspace -docs: check_format +docs: export RUSTDOCFLAGS=-D warnings cargo doc --no-deps --document-private-items --all-features --workspace -test: lint - cargo test --all-targets --all-features --workspace \ No newline at end of file +test: + cargo test --all-targets --all-features --workspace + +# runs all tests including ignored. Will take a lot of time to run +integration_test: prepare_integration_tests + #cargo test -r --all-targets --all-features --workspace -- --ignored #uncomment this when all crates will compile ˙◠˙ + # for now add tests one by one + RUST_LIB_BACKTRACE=1 RUST_BACKTRACE=1 cargo test -r --package tycho-storage --lib store::shard_state::store_state_raw::test::insert_and_delete_of_several_shards -- --ignored --exact --nocapture + +gen_network n: build_debug + #!/usr/bin/env bash + TEMP_DIR="./.temp" + TYCHO_BIN="./target/debug/tycho" + + mkdir -p "$TEMP_DIR" + + N={{n}} + + GLOBAL_CONFIG='{}' + NODE_CONFIG=$(cat ./config.json) + + for i in $(seq $N); + do + $TYCHO_BIN tool gen-key > "$TEMP_DIR/keys${i}.json" + + PORT=$((20000 + i)) + + KEY=$(jq -r .secret < "$TEMP_DIR/keys${i}.json") + DHT_ENTRY=$($TYCHO_BIN tool gen-dht "127.0.0.1:$PORT" --key "$KEY") + + GLOBAL_CONFIG=$(echo "$GLOBAL_CONFIG" | jq ".bootstrap_peers += [$DHT_ENTRY]") + + NODE_CONFIG=$(echo "$NODE_CONFIG" | jq ".port = $PORT | .storage.root_dir = \"$TEMP_DIR/db${i}\"") + echo "$NODE_CONFIG" > "$TEMP_DIR/config${i}.json" + done + + ZEROSTATE=$(cat zerostate.json | jq '.validators = []') + for i in $(seq $N); + do + PUBKEY=$(jq .public < "$TEMP_DIR/keys${i}.json") + ZEROSTATE=$(echo "$ZEROSTATE" | jq ".validators += [$PUBKEY]") + done + + echo "$ZEROSTATE" > "$TEMP_DIR/zerostate.json" + ZEROSTATE_ID=$( + $TYCHO_BIN tool gen-zerostate "$TEMP_DIR/zerostate.json" \ + --output "$TEMP_DIR/zerostate.boc" \ + --force + ) + + GLOBAL_CONFIG=$(echo "$GLOBAL_CONFIG" | jq ".zerostate = $ZEROSTATE_ID") + echo "$GLOBAL_CONFIG" > "$TEMP_DIR/global-config.json" + +node n: build_debug + #!/usr/bin/env bash + TEMP_DIR="./.temp" + TYCHO_BIN="./target/debug/tycho" + + $TYCHO_BIN node run \ + --keys "$TEMP_DIR/keys{{n}}.json" \ + --config "$TEMP_DIR/config{{n}}.json" \ + --global-config "$TEMP_DIR/global-config.json" \ + --import-zerostate "$TEMP_DIR/zerostate.boc" \ + --logger-config ./logger.json \ + +init_node_config: build_debug + #!/usr/bin/env bash + TYCHO_BIN="./target/debug/tycho" + $TYCHO_BIN node run --init-config "./config.json" + +init_zerostate_config: build_debug + #!/usr/bin/env bash + TYCHO_BIN="./target/debug/tycho" + $TYCHO_BIN tool gen-zerostate --init-config "./zerostate.json" + +build_debug: + cargo build --bin tycho diff --git a/network.Dockerfile b/network.Dockerfile index 8e514d55c..47d9660eb 100644 --- a/network.Dockerfile +++ b/network.Dockerfile @@ -1,5 +1,5 @@ # syntax=docker/dockerfile:1.2 -FROM rust:1.76-buster as builder +FROM rust:1.77.2-buster as builder WORKDIR /build COPY . . diff --git a/network/examples/network_node.rs b/network/examples/network_node.rs index 4e4eda0af..ccd14fca3 100644 --- a/network/examples/network_node.rs +++ b/network/examples/network_node.rs @@ -51,7 +51,7 @@ impl Cli { .with_ansi(false) .compact() .with_writer(non_blocking) - .with_filter(EnvFilter::new("trace")), //todo: update with needed crates + .with_filter(EnvFilter::new("trace")), // todo: update with needed crates ); tracing::subscriber::set_global_default(collector)?; } else { diff --git a/network/src/dht/mod.rs b/network/src/dht/mod.rs index 23d2163a2..259640674 100644 --- a/network/src/dht/mod.rs +++ b/network/src/dht/mod.rs @@ -8,9 +8,15 @@ use tokio::sync::broadcast; use tycho_util::realloc_box_enum; use tycho_util::time::now_sec; +pub use self::config::DhtConfig; +pub use self::peer_resolver::{ + PeerResolver, PeerResolverBuilder, PeerResolverConfig, PeerResolverHandle, +}; +pub use self::query::DhtQueryMode; use self::query::{Query, QueryCache, StoreValue}; use self::routing::HandlesRoutingTable; use self::storage::Storage; +pub use self::storage::{DhtValueMerger, DhtValueSource, StorageError}; use crate::network::Network; use crate::proto::dht::{ rpc, NodeInfoResponse, NodeResponse, PeerValue, PeerValueKey, PeerValueKeyName, @@ -19,11 +25,6 @@ use crate::proto::dht::{ use crate::types::{PeerId, PeerInfo, Request, Response, Service, ServiceRequest}; use crate::util::{NetworkExt, Routable}; -pub use self::config::DhtConfig; -pub use self::peer_resolver::{PeerResolver, PeerResolverBuilder, PeerResolverHandle}; -pub use self::query::DhtQueryMode; -pub use self::storage::{DhtValueMerger, DhtValueSource, StorageError}; - mod background_tasks; mod config; mod peer_resolver; diff --git a/network/src/dht/peer_resolver.rs b/network/src/dht/peer_resolver.rs index 0f9068628..2a23c868b 100644 --- a/network/src/dht/peer_resolver.rs +++ b/network/src/dht/peer_resolver.rs @@ -4,10 +4,11 @@ use std::sync::{Arc, Mutex, Weak}; use std::time::Duration; use exponential_backoff::Backoff; +use serde::{Deserialize, Serialize}; use tokio::sync::{Notify, Semaphore}; use tycho_util::futures::JoinTask; use tycho_util::time::now_sec; -use tycho_util::FastDashMap; +use tycho_util::{serde_helpers, FastDashMap}; use crate::dht::DhtService; use crate::network::{KnownPeerHandle, KnownPeersError, Network, PeerBannedError, WeakNetwork}; @@ -20,77 +21,66 @@ pub struct PeerResolverBuilder { } impl PeerResolverBuilder { + pub fn with_config(mut self, config: PeerResolverConfig) -> Self { + self.inner = config; + self + } + + pub fn build(self, network: &Network) -> PeerResolver { + let semaphore = Semaphore::new(self.inner.max_parallel_resolve_requests); + + PeerResolver { + inner: Arc::new(PeerResolverInner { + weak_network: Network::downgrade(network), + dht_service: self.dht_service, + config: Default::default(), + tasks: Default::default(), + semaphore, + }), + } + } +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(default)] +pub struct PeerResolverConfig { + /// Maximum number of parallel resolve requests. + /// + /// Default: 100. + pub max_parallel_resolve_requests: usize, + /// Minimal time-to-live for the resolved peer info. /// /// Default: 600 seconds. - pub fn with_min_ttl_sec(mut self, ttl_sec: u32) -> Self { - self.inner.min_ttl_sec = ttl_sec; - self - } + pub min_ttl_sec: u32, /// Time before the expiration when the peer info should be updated. /// /// Default: 1200 seconds. - pub fn with_update_before_sec(mut self, update_before_sec: u32) -> Self { - self.inner.update_before_sec = update_before_sec; - self - } + pub update_before_sec: u32, /// Number of fast retries before switching to the stale retry interval. /// /// Default: 10. - pub fn with_fast_retry_count(mut self, fast_retry_count: u32) -> Self { - self.inner.fast_retry_count = fast_retry_count; - self - } + pub fast_retry_count: u32, /// Minimal interval between the fast retries. /// /// Default: 1 second. - pub fn with_min_retry_interval(mut self, min_retry_interval: Duration) -> Self { - self.inner.min_retry_interval = min_retry_interval; - self - } + #[serde(with = "serde_helpers::humantime")] + pub min_retry_interval: Duration, /// Maximal interval between the fast retries. /// /// Default: 120 seconds. - pub fn with_max_retry_interval(mut self, max_retry_interval: Duration) -> Self { - self.inner.max_retry_interval = max_retry_interval; - self - } + #[serde(with = "serde_helpers::humantime")] + pub max_retry_interval: Duration, /// Interval between the stale retries. /// /// Default: 600 seconds. - pub fn with_stale_retry_interval(mut self, stale_retry_interval: Duration) -> Self { - self.inner.stale_retry_interval = stale_retry_interval; - self - } - - pub fn build(self, network: &Network) -> PeerResolver { - let semaphore = Semaphore::new(self.inner.max_parallel_resolve_requests); - - PeerResolver { - inner: Arc::new(PeerResolverInner { - weak_network: Network::downgrade(network), - dht_service: self.dht_service, - config: Default::default(), - tasks: Default::default(), - semaphore, - }), - } - } -} - -struct PeerResolverConfig { - max_parallel_resolve_requests: usize, - min_ttl_sec: u32, - update_before_sec: u32, - fast_retry_count: u32, - min_retry_interval: Duration, - max_retry_interval: Duration, - stale_retry_interval: Duration, + #[serde(with = "serde_helpers::humantime")] + pub stale_retry_interval: Duration, } impl Default for PeerResolverConfig { diff --git a/network/src/dht/query.rs b/network/src/dht/query.rs index 19fe5188e..defd4827a 100644 --- a/network/src/dht/query.rs +++ b/network/src/dht/query.rs @@ -375,13 +375,10 @@ impl Query { return (node, None); }; - let req = network.query( - &node.id, - Request { - version: Default::default(), - body: request_body.clone(), - }, - ); + let req = network.query(&node.id, Request { + version: Default::default(), + body: request_body.clone(), + }); let res = match tokio::time::timeout(REQUEST_TIMEOUT, req).await { Ok(res) => { @@ -443,13 +440,10 @@ impl StoreValue<()> { return (node, None); }; - let req = network.send( - &node.id, - Request { - version: Default::default(), - body: request_body.clone(), - }, - ); + let req = network.send(&node.id, Request { + version: Default::default(), + body: request_body.clone(), + }); let res = match tokio::time::timeout(REQUEST_TIMEOUT, req).await { Ok(res) => Some(res), diff --git a/network/src/lib.rs b/network/src/lib.rs index c276c25cf..7937089be 100644 --- a/network/src/lib.rs +++ b/network/src/lib.rs @@ -1,20 +1,15 @@ -pub use self::overlay::{ - OverlayConfig, OverlayId, OverlayService, OverlayServiceBackgroundTasks, OverlayServiceBuilder, - PrivateOverlay, PrivateOverlayBuilder, PrivateOverlayEntries, PrivateOverlayEntriesEvent, - PrivateOverlayEntriesReadGuard, PrivateOverlayEntriesWriteGuard, PublicOverlay, - PublicOverlayBuilder, PublicOverlayEntries, PublicOverlayEntriesReadGuard, -}; -pub use self::util::{check_peer_signature, NetworkExt, Routable, Router, RouterBuilder}; pub use dht::{ xor_distance, DhtClient, DhtConfig, DhtQueryBuilder, DhtQueryMode, DhtQueryWithDataBuilder, DhtService, DhtServiceBackgroundTasks, DhtServiceBuilder, DhtValueMerger, DhtValueSource, - FindValueError, PeerResolver, PeerResolverBuilder, PeerResolverHandle, StorageError, + FindValueError, PeerResolver, PeerResolverBuilder, PeerResolverConfig, PeerResolverHandle, + StorageError, }; pub use network::{ ActivePeers, Connection, KnownPeerHandle, KnownPeers, KnownPeersError, Network, NetworkBuilder, - NetworkConfig, Peer, PeerBannedError, QuicConfig, RecvStream, SendStream, WeakActivePeers, - WeakKnownPeerHandle, WeakNetwork, + NetworkConfig, Peer, PeerBannedError, QuicConfig, RecvStream, SendStream, ToSocket, + WeakActivePeers, WeakKnownPeerHandle, WeakNetwork, }; +pub use quinn; pub use types::{ service_datagram_fn, service_message_fn, service_query_fn, Address, BoxCloneService, BoxService, Direction, DisconnectReason, InboundRequestMeta, PeerAffinity, PeerEvent, PeerId, @@ -22,7 +17,13 @@ pub use types::{ ServiceMessageFn, ServiceQueryFn, ServiceRequest, Version, }; -pub use quinn; +pub use self::overlay::{ + OverlayConfig, OverlayId, OverlayService, OverlayServiceBackgroundTasks, OverlayServiceBuilder, + PrivateOverlay, PrivateOverlayBuilder, PrivateOverlayEntries, PrivateOverlayEntriesEvent, + PrivateOverlayEntriesReadGuard, PrivateOverlayEntriesWriteGuard, PublicOverlay, + PublicOverlayBuilder, PublicOverlayEntries, PublicOverlayEntriesReadGuard, +}; +pub use self::util::{check_peer_signature, NetworkExt, Routable, Router, RouterBuilder}; mod dht; mod network; diff --git a/network/src/network/connection.rs b/network/src/network/connection.rs index bbb829343..7d601bfb1 100644 --- a/network/src/network/connection.rs +++ b/network/src/network/connection.rs @@ -201,9 +201,10 @@ pub(crate) fn parse_peer_identity(identity: Box) -> Result { #[derive(Default)] struct BuilderFields { config: Option, + remote_addr: Option
, } impl NetworkBuilder { @@ -45,6 +45,11 @@ impl NetworkBuilder { self.optional_fields.config = Some(config); self } + + pub fn with_remote_addr>(mut self, addr: T) -> Self { + self.optional_fields.remote_addr = Some(addr.into()); + self + } } impl NetworkBuilder<((), T2)> { @@ -72,13 +77,11 @@ impl NetworkBuilder<(T1, ())> { } impl NetworkBuilder { - pub fn build(self, bind_address: T, service: S) -> Result + pub fn build(self, bind_address: T, service: S) -> Result where S: Send + Sync + Clone + 'static, S: Service, { - use socket2::{Domain, Protocol, Socket, Type}; - let config = self.optional_fields.config.unwrap_or_default(); let quic_config = config.quic.clone().unwrap_or_default(); let (service_name, private_key) = self.mandatory_fields; @@ -92,18 +95,7 @@ impl NetworkBuilder { .with_transport_config(quic_config.make_transport_config()) .build()?; - let socket = 'socket: { - let mut err = anyhow::anyhow!("no addresses to bind to"); - for addr in bind_address.to_socket_addrs()? { - let s = Socket::new(Domain::for_address(addr), Type::DGRAM, Some(Protocol::UDP))?; - if let Err(e) = s.bind(&socket2::SockAddr::from(addr)) { - err = e.into(); - } else { - break 'socket s; - } - } - return Err(err); - }; + let socket = bind_address.to_socket().map(socket2::Socket::from)?; if let Some(send_buffer_size) = quic_config.socket_send_buffer_size { if let Err(e) = socket.set_send_buffer_size(send_buffer_size) { @@ -129,6 +121,12 @@ impl NetworkBuilder { let weak_active_peers = ActivePeers::downgrade(&active_peers); let known_peers = KnownPeers::new(); + let remote_addr = self.optional_fields.remote_addr.unwrap_or_else(|| { + let addr = endpoint.local_addr(); + tracing::debug!(%addr, "using local address as remote address"); + addr.into() + }); + let inner = Arc::new_cyclic(move |_weak| { let service = service.boxed_clone(); @@ -144,6 +142,7 @@ impl NetworkBuilder { NetworkInner { config, + remote_addr, endpoint, active_peers: weak_active_peers, known_peers, @@ -181,6 +180,10 @@ impl Network { } } + pub fn remote_addr(&self) -> &Address { + self.0.remote_addr() + } + pub fn local_addr(&self) -> SocketAddr { self.0.local_addr() } @@ -232,7 +235,7 @@ impl Network { pub fn sign_peer_info(&self, now: u32, ttl: u32) -> PeerInfo { let mut res = PeerInfo { id: *self.0.peer_id(), - address_list: vec![self.local_addr().into()].into_boxed_slice(), + address_list: vec![self.remote_addr().clone()].into_boxed_slice(), created_at: now, expires_at: now.saturating_add(ttl), signature: Box::new([0; 64]), @@ -248,6 +251,7 @@ impl Network { struct NetworkInner { config: Arc, + remote_addr: Address, endpoint: Arc, active_peers: WeakActivePeers, known_peers: KnownPeers, @@ -256,6 +260,10 @@ struct NetworkInner { } impl NetworkInner { + fn remote_addr(&self) -> &Address { + &self.remote_addr + } + fn local_addr(&self) -> SocketAddr { self.endpoint.local_addr() } @@ -317,16 +325,68 @@ impl Drop for NetworkInner { } } +pub trait ToSocket { + fn to_socket(self) -> Result; +} + +impl ToSocket for std::net::UdpSocket { + fn to_socket(self) -> Result { + Ok(self) + } +} + +macro_rules! impl_to_socket_for_addr { + ($($ty:ty),*$(,)?) => {$( + impl ToSocket for $ty { + fn to_socket(self) -> Result { + bind_socket_to_addr(self) + } + } + )*}; +} + +impl_to_socket_for_addr! { + SocketAddr, + std::net::SocketAddrV4, + std::net::SocketAddrV6, + (std::net::IpAddr, u16), + (std::net::Ipv4Addr, u16), + (std::net::Ipv6Addr, u16), + (&str, u16), + (String, u16), + &str, + String, + &[SocketAddr], + Address, +} + +fn bind_socket_to_addr(bind_address: T) -> Result { + use socket2::{Domain, Protocol, Socket, Type}; + + let mut err = anyhow::anyhow!("no addresses to bind to"); + for addr in bind_address.to_socket_addrs()? { + let s = Socket::new(Domain::for_address(addr), Type::DGRAM, Some(Protocol::UDP))?; + if let Err(e) = s.bind(&socket2::SockAddr::from(addr)) { + err = e.into(); + } else { + return Ok(s.into()); + } + } + Err(err) +} + #[derive(thiserror::Error, Debug)] #[error("network has been shutdown")] struct NetworkShutdownError; #[cfg(test)] mod tests { + use futures_util::stream::FuturesUnordered; + use futures_util::StreamExt; use tracing_test::traced_test; use super::*; - use crate::types::{service_query_fn, BoxCloneService, PeerInfo, Request}; + use crate::types::{service_message_fn, service_query_fn, BoxCloneService, PeerInfo, Request}; use crate::util::NetworkExt; fn echo_service() -> BoxCloneService { @@ -355,7 +415,7 @@ mod tests { fn make_peer_info(network: &Network) -> Arc { Arc::new(PeerInfo { id: *network.peer_id(), - address_list: vec![network.local_addr().into()].into_boxed_slice(), + address_list: vec![network.remote_addr().clone()].into_boxed_slice(), created_at: 0, expires_at: u32::MAX, signature: Box::new([0; 64]), @@ -425,4 +485,59 @@ mod tests { Ok(()) } + + #[traced_test] + #[tokio::test(flavor = "multi_thread")] + async fn uni_message_handler() -> Result<()> { + std::panic::set_hook(Box::new(|info| { + use std::io::Write; + + tracing::error!("{}", info); + std::io::stderr().flush().ok(); + std::io::stdout().flush().ok(); + std::process::exit(1); + })); + + fn noop_service() -> BoxCloneService { + let handle = |request: ServiceRequest| async move { + tracing::trace!("received: {} bytes", request.body.len()); + }; + service_message_fn(handle).boxed_clone() + } + + fn make_network() -> Result { + Network::builder() + .with_config(NetworkConfig { + enable_0rtt: true, + ..Default::default() + }) + .with_random_private_key() + .with_service_name("tycho") + .build("127.0.0.1:0", noop_service()) + } + + let left = make_network()?; + let right = make_network()?; + + let _left_to_right = left.known_peers().insert(make_peer_info(&right), false)?; + let _right_to_left = right.known_peers().insert(make_peer_info(&left), false)?; + + let req = Request { + version: Default::default(), + body: vec![0xff; 750 * 1024].into(), + }; + + for _ in 0..10 { + let mut futures = FuturesUnordered::new(); + for _ in 0..100 { + futures.push(left.send(&right.peer_id(), req.clone())); + } + + while let Some(res) = futures.next().await { + res?; + } + } + + Ok(()) + } } diff --git a/network/src/network/request_handler.rs b/network/src/network/request_handler.rs index 3f8c74a8a..1c460c768 100644 --- a/network/src/network/request_handler.rs +++ b/network/src/network/request_handler.rs @@ -37,6 +37,26 @@ impl InboundRequestHandler { pub async fn start(self) { tracing::debug!(peer_id = %self.connection.peer_id(), "request handler started"); + struct ClearOnDrop<'a> { + handler: &'a InboundRequestHandler, + reason: DisconnectReason, + } + + impl Drop for ClearOnDrop<'_> { + fn drop(&mut self) { + self.handler.active_peers.remove_with_stable_id( + self.handler.connection.peer_id(), + self.handler.connection.stable_id(), + self.reason, + ); + } + } + + let mut clear_on_drop = ClearOnDrop { + handler: &self, + reason: DisconnectReason::LocallyClosed, + }; + let mut inflight_requests = JoinSet::<()>::new(); let reason: quinn::ConnectionError = loop { @@ -107,12 +127,7 @@ impl InboundRequestHandler { } } }; - - self.active_peers.remove_with_stable_id( - self.connection.peer_id(), - self.connection.stable_id(), - DisconnectReason::from(reason), - ); + clear_on_drop.reason = reason.into(); inflight_requests.shutdown().await; tracing::debug!(peer_id = %self.connection.peer_id(), "request handler stopped"); diff --git a/network/src/overlay/background_tasks.rs b/network/src/overlay/background_tasks.rs index 58bdae552..333d95105 100644 --- a/network/src/overlay/background_tasks.rs +++ b/network/src/overlay/background_tasks.rs @@ -230,7 +230,7 @@ impl OverlayServiceInner { entries.extend( all_entries .choose_multiple(rng, n) - .filter(|&item| (item.entry.peer_id != target_peer_id)) + .filter(|&item| item.entry.peer_id != target_peer_id) .map(|item| item.entry.clone()) .take(n - 1), ); @@ -259,7 +259,7 @@ impl OverlayServiceInner { count = entries.len(), "received public entries" ); - overlay.add_untrusted_entries(&entries, now_sec()); + overlay.add_untrusted_entries(&self.local_id, &entries, now_sec()); } PublicEntriesResponse::OverlayNotFound => { tracing::debug!( @@ -311,7 +311,7 @@ impl OverlayServiceInner { } }; - overlay.add_untrusted_entries(&entries, now_sec()); + overlay.add_untrusted_entries(&self.local_id, &entries, now_sec()); tracing::debug!(count = entries.len(), "discovered public entries"); Ok(()) diff --git a/network/src/overlay/mod.rs b/network/src/overlay/mod.rs index c8abd000d..0a761eba7 100644 --- a/network/src/overlay/mod.rs +++ b/network/src/overlay/mod.rs @@ -7,14 +7,8 @@ use tycho_util::futures::BoxFutureOrNoop; use tycho_util::time::now_sec; use tycho_util::{FastDashMap, FastHashSet}; -use self::entries_merger::PublicOverlayEntriesMerger; -use crate::dht::DhtService; -use crate::network::Network; -use crate::proto::overlay::{rpc, PublicEntriesResponse, PublicEntry}; -use crate::types::{PeerId, Response, Service, ServiceRequest}; -use crate::util::Routable; - pub use self::config::OverlayConfig; +use self::entries_merger::PublicOverlayEntriesMerger; pub use self::overlay_id::OverlayId; pub use self::private_overlay::{ PrivateOverlay, PrivateOverlayBuilder, PrivateOverlayEntries, PrivateOverlayEntriesEvent, @@ -23,6 +17,11 @@ pub use self::private_overlay::{ pub use self::public_overlay::{ PublicOverlay, PublicOverlayBuilder, PublicOverlayEntries, PublicOverlayEntriesReadGuard, }; +use crate::dht::DhtService; +use crate::network::Network; +use crate::proto::overlay::{rpc, PublicEntriesResponse, PublicEntry}; +use crate::types::{PeerId, Response, Service, ServiceRequest}; +use crate::util::Routable; mod background_tasks; mod config; @@ -317,7 +316,7 @@ impl OverlayServiceInner { }; // Add proposed entries to the overlay - overlay.add_untrusted_entries(&req.entries, now_sec()); + overlay.add_untrusted_entries(&self.local_id, &req.entries, now_sec()); // Collect proposed entries to exclude from the response let requested_ids = req diff --git a/network/src/overlay/public_overlay.rs b/network/src/overlay/public_overlay.rs index a9dcfae32..bca628299 100644 --- a/network/src/overlay/public_overlay.rs +++ b/network/src/overlay/public_overlay.rs @@ -202,7 +202,12 @@ impl PublicOverlay { /// Adds the given entries to the overlay. /// /// NOTE: Will deadlock if called while `PublicOverlayEntriesReadGuard` is held. - pub(crate) fn add_untrusted_entries(&self, entries: &[Arc], now: u32) { + pub(crate) fn add_untrusted_entries( + &self, + local_id: &PeerId, + entries: &[Arc], + now: u32, + ) { if entries.is_empty() { return; } @@ -239,6 +244,7 @@ impl PublicOverlay { for (entry, is_valid) in std::iter::zip(entries, is_valid.iter_mut()) { if entry.is_expired(now, this.entry_ttl_sec) || self.inner.banned_peer_ids.contains(&entry.peer_id) + || entry.peer_id == local_id { // Skip expired or banned peers early continue; @@ -579,16 +585,17 @@ mod tests { #[test] fn min_capacity_works_with_single_thread() { let now = now_sec(); + let local_id: PeerId = rand::random(); // Add with small portions { let overlay = make_overlay_with_min_capacity(10); let entries = generate_public_entries(&overlay, now, 10); - overlay.add_untrusted_entries(&entries[..5], now); + overlay.add_untrusted_entries(&local_id, &entries[..5], now); assert_eq!(count_entries(&overlay), 5); - overlay.add_untrusted_entries(&entries[5..], now); + overlay.add_untrusted_entries(&local_id, &entries[5..], now); assert_eq!(count_entries(&overlay), 10); } @@ -596,7 +603,7 @@ mod tests { { let overlay = make_overlay_with_min_capacity(10); let entries = generate_public_entries(&overlay, now, 10); - overlay.add_untrusted_entries(&entries, now); + overlay.add_untrusted_entries(&local_id, &entries, now); assert_eq!(count_entries(&overlay), 10); } @@ -604,7 +611,7 @@ mod tests { { let overlay = make_overlay_with_min_capacity(10); let entries = generate_public_entries(&overlay, now, 20); - overlay.add_untrusted_entries(&entries, now); + overlay.add_untrusted_entries(&local_id, &entries, now); assert_eq!(count_entries(&overlay), 10); } @@ -612,7 +619,7 @@ mod tests { { let overlay = make_overlay_with_min_capacity(0); let entries = generate_public_entries(&overlay, now, 10); - overlay.add_untrusted_entries(&entries, now); + overlay.add_untrusted_entries(&local_id, &entries, now); assert_eq!(count_entries(&overlay), 0); } @@ -622,7 +629,7 @@ mod tests { let entries = (0..10) .map(|_| generate_invalid_public_entry(now)) .collect::>(); - overlay.add_untrusted_entries(&entries, now); + overlay.add_untrusted_entries(&local_id, &entries, now); assert_eq!(count_entries(&overlay), 0); } @@ -641,7 +648,7 @@ mod tests { generate_invalid_public_entry(now), generate_public_entry(&overlay, now), ]; - overlay.add_untrusted_entries(&entries, now); + overlay.add_untrusted_entries(&local_id, &entries, now); assert_eq!(count_entries(&overlay), 5); } @@ -660,7 +667,7 @@ mod tests { generate_public_entry(&overlay, now), generate_public_entry(&overlay, now), ]; - overlay.add_untrusted_entries(&entries, now); + overlay.add_untrusted_entries(&local_id, &entries, now); assert_eq!(count_entries(&overlay), 3); } } @@ -668,6 +675,7 @@ mod tests { #[test] fn min_capacity_works_with_multi_thread() { let now = now_sec(); + let local_id: PeerId = rand::random(); let overlay = make_overlay_with_min_capacity(201); let entries = generate_public_entries(&overlay, now, 7 * 3 * 10); @@ -676,7 +684,7 @@ mod tests { for entries in entries.chunks_exact(7 * 3) { s.spawn(|| { for entries in entries.chunks_exact(7) { - overlay.add_untrusted_entries(entries, now); + overlay.add_untrusted_entries(&local_id, entries, now); } }); } diff --git a/network/src/overlay/tasks_stream.rs b/network/src/overlay/tasks_stream.rs index e484eaf71..39e7bf895 100644 --- a/network/src/overlay/tasks_stream.rs +++ b/network/src/overlay/tasks_stream.rs @@ -105,7 +105,7 @@ impl TasksStream { let overlay_id = *overlay_id; async move { if let Err(e) = fut.await { - tracing::error!(task, %overlay_id, "task failed: {e:?}"); + tracing::error!(task, %overlay_id, "task failed: {e}"); } overlay_id } diff --git a/network/src/types/peer_info.rs b/network/src/types/peer_info.rs index 08cbbcd64..26d6412a7 100644 --- a/network/src/types/peer_info.rs +++ b/network/src/types/peer_info.rs @@ -128,9 +128,10 @@ mod serde_signature { #[cfg(test)] mod tests { - use super::*; use std::str::FromStr; + use super::*; + #[test] fn serde() { let target_peer_info = PeerInfo { diff --git a/network/src/util/mod.rs b/network/src/util/mod.rs index 2d8b8f49d..d0eb661f6 100644 --- a/network/src/util/mod.rs +++ b/network/src/util/mod.rs @@ -1,9 +1,7 @@ pub use self::router::{Routable, Router, RouterBuilder}; -pub use self::traits::NetworkExt; - #[cfg(test)] pub use self::test::make_peer_info_stub; - +pub use self::traits::NetworkExt; use crate::types::PeerId; mod router; diff --git a/rustfmt.toml b/rustfmt.toml index 16bdde911..e850fab28 100644 --- a/rustfmt.toml +++ b/rustfmt.toml @@ -1 +1,5 @@ format_code_in_doc_comments = true +imports_granularity = "Module" +normalize_comments = true +overflow_delimited_expr = true +group_imports = "StdExternalCrate" diff --git a/simulator/Cargo.toml b/simulator/Cargo.toml index 809d83e48..c562ad9ca 100644 --- a/simulator/Cargo.toml +++ b/simulator/Cargo.toml @@ -22,5 +22,7 @@ rand = { workspace = true } serde = { workspace = true, features = ["derive"] } serde_json = { workspace = true } +tycho-util = { workspace = true } + [lints] workspace = true diff --git a/simulator/README.md b/simulator/README.md index e1caea12f..d231dd8f2 100644 --- a/simulator/README.md +++ b/simulator/README.md @@ -18,6 +18,7 @@ cargo install --path ./simulator # Or alias via `alias simulator="cargo run --bin simulator --"` simulator prepare +simulator build simulator node start simulator node logs -f simulator node exec diff --git a/simulator/src/compose.rs b/simulator/src/compose.rs index 3b00d10e8..98a85bf23 100644 --- a/simulator/src/compose.rs +++ b/simulator/src/compose.rs @@ -1,12 +1,14 @@ use std::collections::HashMap; use std::ffi::OsStr; use std::path::PathBuf; -use std::process::{Command, Stdio}; +use std::process::{Command, Output, Stdio}; +use std::str; use anyhow::{Context, Result}; use serde::{Deserialize, Serialize}; use crate::config::ServiceConfig; +use crate::node::NodeOptions; pub struct ComposeRunner { compose_path: PathBuf, @@ -42,8 +44,40 @@ impl ComposeRunner { }) } - pub fn add_service(&mut self, name: String, service: Service) { - self.compose.services.insert(name, service); + pub fn add_prometheus(&mut self) -> Result<()> { + let prom_data = serde_json::json!( + { + "image": "prom/prometheus", + "ports": ["9090:9090"], + "restart": "unless-stopped", + "volumes": ["./prometheus:/etc/prometheus"], + "command": ["--config.file=/etc/prometheus/prometheus.yml"] + }); + self.compose + .services + .insert("prometheus".to_string(), prom_data); + Ok(()) + } + + pub fn add_grafana(&mut self) -> Result<()> { + let prom_data = serde_json::json!( + { + "image": "grafana/grafana", + "ports": ["3000:3000"], + "restart": "unless-stopped", + "volumes": ["./grafana:/etc/grafana/provisioning/datasources"], + "environment": ["GF_SECURITY_ADMIN_USER=admin", "GF_SECURITY_ADMIN_PASSWORD=grafana"] + }); + self.compose + .services + .insert("grafana".to_string(), prom_data); + Ok(()) + } + + pub fn add_service(&mut self, name: String, service: Service) -> Result<()> { + let value = serde_json::to_value(service)?; + self.compose.services.insert(name, value); + Ok(()) } pub fn finalize(&self) -> Result<()> { @@ -55,7 +89,7 @@ impl ComposeRunner { } /// Executes a Docker Compose command with the given arguments. - pub fn execute_compose_command(&self, args: &[T]) -> Result<()> + pub fn execute_compose_command(&self, args: &[T]) -> Result where T: AsRef, { @@ -66,16 +100,18 @@ impl ComposeRunner { command = command.arg(arg); } - command - .stdout(Stdio::inherit()) + println!("EXECUTING COMMAND {:?}", command); + + let result = command + .stdout(Stdio::piped()) .stderr(Stdio::inherit()) .stdin(Stdio::inherit()) .spawn() .context("Failed to spawn Docker Compose command")? - .wait() + .wait_with_output() .context("Failed to wait on Docker Compose command")?; - Ok(()) + Ok(result) } pub fn logs(&self, follow: bool, node: Option) -> Result<()> { @@ -89,7 +125,8 @@ impl ComposeRunner { args.push(format!("node-{}", node_index)); } - self.execute_compose_command(&args) + self.execute_compose_command(&args)?; + Ok(()) } pub fn stop_node(&self, node_index: Option) -> Result<()> { @@ -97,7 +134,8 @@ impl ComposeRunner { if let Some(node_index) = node_index { args.push(format!("node-{}", node_index)); } - self.execute_compose_command(&args) + self.execute_compose_command(&args)?; + Ok(()) } pub fn start_node(&self, node_index: Option) -> Result<()> { @@ -107,16 +145,78 @@ impl ComposeRunner { } args.push("-d".to_string()); - self.execute_compose_command(&args) + self.execute_compose_command(&args)?; + + { + for i in self.get_running_nodes_list()? { + println!("node {i}"); + let index = i + .split('-') + .collect::>() + .last() + .unwrap() + .parse::()?; + let info = self.node_info(index)?; + if info.delay > 0 { + self.set_delay(index, info.delay)?; + } + if info.packet_loss > 0 { + self.set_packet_loss(index, info.packet_loss)?; + } + } + } + + Ok(()) + } + + pub fn get_running_nodes_list(&self) -> Result> { + let docker_compose_command = vec!["config".to_string(), "--services".to_string()]; + let output = self.execute_compose_command(&docker_compose_command)?; + let x = String::from_utf8(output.stdout)? + .trim() + .lines() + .map(|x| x.to_string()) + .collect(); + Ok(x) } - pub fn exec_command(&self, node_index: usize, cmd: &str, args: Vec) -> Result<()> { + pub fn node_info(&self, node_index: usize) -> Result { + let command = "cat"; + let output = self.exec_command(node_index, command, vec![ + "/options/options.json".to_string() + ])?; + let node_options = serde_json::from_slice(output.stdout.as_slice())?; + Ok(node_options) + } + + pub fn set_delay(&self, node_index: usize, delay: u16) -> Result<()> { + println!("Setting delay {delay}ms for node {node_index}"); + let command = "sh"; + let args = format!("tc qdisc add dev eth0 root netem delay {delay}ms"); + self.exec_command(node_index, command, vec![ + "-c".to_string(), + format!("{args}"), + ])?; + Ok(()) + } + + pub fn set_packet_loss(&self, node_index: usize, loss: u16) -> Result<()> { + println!("Setting packet loss {loss}% for node {node_index}"); + let command = "sh"; + let args = format!("tc qdisc change dev eth0 root netem loss {loss}%"); + self.exec_command(node_index, command, vec![ + "-c".to_string(), + format!("{args}"), + ])?; + Ok(()) + } + + pub fn exec_command(&self, node_index: usize, cmd: &str, args: Vec) -> Result { let service_name = format!("node-{}", node_index); let mut docker_compose_command = vec!["exec".to_string(), service_name, cmd.to_string()]; docker_compose_command.extend(args); - self.execute_compose_command(&docker_compose_command)?; - - Ok(()) + let output = self.execute_compose_command(&docker_compose_command)?; + Ok(output) } pub fn down(&self) -> Result<()> { @@ -128,7 +228,7 @@ impl ComposeRunner { #[derive(Serialize, Deserialize, Debug)] struct DockerCompose { version: String, - services: HashMap, + services: HashMap, networks: HashMap, } diff --git a/simulator/src/config.rs b/simulator/src/config.rs index d6a26e47e..9d51d0fb1 100644 --- a/simulator/src/config.rs +++ b/simulator/src/config.rs @@ -1,6 +1,7 @@ -use anyhow::{Context, Result}; use std::path::PathBuf; -use std::process::Command; + +use anyhow::Result; +use tycho_util::project_root; pub struct ServiceConfig { pub project_root: PathBuf, @@ -11,17 +12,7 @@ pub struct ServiceConfig { impl ServiceConfig { pub fn new(network_subnet: String) -> Result { - let project_root = Command::new("git") - .arg("rev-parse") - .arg("--show-toplevel") - .output()? - .stdout; - // won't work on windows but we don't care - let project_root = PathBuf::from( - String::from_utf8(project_root) - .context("Invalid project root")? - .trim(), - ); + let project_root = project_root()?; let scratch_dir = project_root.join(".scratch"); Ok(Self { project_root, @@ -46,4 +37,14 @@ impl ServiceConfig { pub fn entrypoints(&self) -> PathBuf { self.scratch_dir.join("entrypoints") } + pub fn grafana(&self) -> PathBuf { + self.scratch_dir.join("grafana") + } + pub fn prometheus(&self) -> PathBuf { + self.scratch_dir.join("prometheus") + } + + pub fn options(&self) -> PathBuf { + self.scratch_dir.join("options") + } } diff --git a/simulator/src/main.rs b/simulator/src/main.rs index d13f27c94..0d9969972 100644 --- a/simulator/src/main.rs +++ b/simulator/src/main.rs @@ -1,4 +1,4 @@ -#![allow(clippy::unused_self)] +#![allow(clippy::unused_self, clippy::print_stdout, clippy::print_stderr)] use std::process::Command; @@ -113,7 +113,8 @@ impl StatusCommand { let config = config::ServiceConfig::new(DEFAULT_SUBNET.to_string())?; let compose = ComposeRunner::load_from_fs(&config)?; - compose.execute_compose_command(&["ps"]) + compose.execute_compose_command(&["ps"])?; + Ok(()) } } @@ -147,6 +148,7 @@ enum NodeCommand { Add(AddCommand), Start(NodeStartCommand), Stop(NodeStopCommand), + Info(NodeInfoCommand), Logs(NodeLogsCommand), Exec(NodeExecCommand), Status(StatusCommand), @@ -164,19 +166,25 @@ impl NodeCommand { NodeCommand::Logs(a) => a.run(compose), NodeCommand::Exec(a) => a.run(compose), NodeCommand::Status(a) => a.run(), + NodeCommand::Info(a) => a.run(compose), } } } #[derive(Parser)] -struct AddCommand; +struct AddCommand { + #[clap(short, long)] + pub delay: Option, + #[clap(short, long)] + pub loss: Option, +} impl AddCommand { fn run(self) -> Result<()> { let config = config::ServiceConfig::new(DEFAULT_SUBNET.to_string())?; let mut sim = Simulator::new(config)?; let next_node_index = sim.next_node_index(); - sim.add_node(next_node_index)?; + sim.add_node(next_node_index, self.delay, self.loss)?; sim.finalize()?; println!("Added node-{}", next_node_index); @@ -232,6 +240,24 @@ struct NodeExecCommand { impl NodeExecCommand { fn run(self, compose: ComposeRunner) -> Result<()> { - compose.exec_command(self.node_index, &self.cmd, self.args) + compose.exec_command(self.node_index, &self.cmd, self.args)?; + Ok(()) + } +} + +#[derive(Parser)] +struct NodeInfoCommand { + #[clap(short, long)] + node_index: usize, +} + +impl NodeInfoCommand { + fn run(self, compose: ComposeRunner) -> Result<()> { + let output = compose.node_info(self.node_index)?; + println!( + "Node {} artificial delay: {} ms and packet loss: {}% ", + self.node_index, output.delay, output.packet_loss + ); + Ok(()) } } diff --git a/simulator/src/node.rs b/simulator/src/node.rs index 2c970dea9..d757b7a8d 100644 --- a/simulator/src/node.rs +++ b/simulator/src/node.rs @@ -4,6 +4,7 @@ use std::path::PathBuf; use std::process::Command; use anyhow::{Context, Result}; +use serde::{Deserialize, Serialize}; use crate::compose::{Service, ServiceNetwork}; use crate::config::ServiceConfig; @@ -14,10 +15,22 @@ pub struct Node { pub port: u16, pub dht_value: serde_json::Value, pub key: String, + pub options: Option, +} + +#[derive(Serialize, Deserialize, Default, Debug)] +pub struct NodeOptions { + pub delay: u16, + pub packet_loss: u16, } impl Node { - pub fn init_from_cli(ip: Ipv4Addr, port: u16, index: usize) -> Result { + pub fn init_from_cli( + ip: Ipv4Addr, + port: u16, + index: usize, + options: Option, + ) -> Result { let private_key = hex::encode(rand::random::<[u8; 32]>()); let output = Command::new("cargo") .arg("run") @@ -43,6 +56,7 @@ impl Node { dht_value, port, key: private_key, + options, }) } @@ -56,6 +70,10 @@ impl Node { "{}:/app/global-config.json", service_config.global_config_path().to_string_lossy() ), + format!( + "./options/node-{}_options.json:/options/options.json", + self.index + ), format!( "{}:/app/logs:rw", self.logs_dir(service_config).to_string_lossy() @@ -67,12 +85,9 @@ impl Node { Ok(Service { entrypoint, image: "tycho-network".to_string(), - networks: HashMap::from([( - "default".to_string(), - ServiceNetwork { - ipv4_address: Some(self.ip.to_string()), - }, - )]), + networks: HashMap::from([("default".to_string(), ServiceNetwork { + ipv4_address: Some(self.ip.to_string()), + })]), stop_grace_period: "1s".to_string(), stop_signal: "KILL".to_string(), volumes, @@ -92,6 +107,12 @@ impl Node { .join(format!("node-{}_entrypoint.sh", self.index)) } + pub fn options_path(&self, service_config: &ServiceConfig) -> PathBuf { + service_config + .options() + .join(format!("node-{}_options.json", self.index)) + } + pub fn run_command(&self) -> String { format!( "run {ip}:{node_port} --key {key} --global-config /app/global-config.json", diff --git a/simulator/src/simulator.rs b/simulator/src/simulator.rs index 27c5898bf..c1503e033 100644 --- a/simulator/src/simulator.rs +++ b/simulator/src/simulator.rs @@ -6,7 +6,7 @@ use serde::{Deserialize, Serialize}; use crate::compose::ComposeRunner; use crate::config::ServiceConfig; -use crate::node::Node; +use crate::node::{Node, NodeOptions}; pub(crate) struct Simulator { config: ServiceConfig, @@ -37,10 +37,15 @@ impl Simulator { } pub fn prepare(&mut self, nodes: usize) -> Result<()> { + let mut ips = Vec::new(); for node_index in 0..nodes { - self.add_node(node_index)?; + let ip = self.add_node(node_index, None, None)?; + ips.push(ip); } + // self.add_grafana()?; + // self.add_prometheus(ips)?; + self.finalize()?; Ok(()) } @@ -56,43 +61,145 @@ impl Simulator { } // updates the next_node_ip and adds a new node to the network - pub fn add_node(&mut self, node_index: usize) -> Result<()> { + pub fn add_node( + &mut self, + node_index: usize, + delay: Option, + loss: Option, + ) -> Result { let node_ip = increment_ip(self.next_node_ip, 1); - let mut node = Node::init_from_cli(node_ip, self.config.node_port, node_index) + + let options = match (delay, loss) { + (Some(delay), Some(loss)) => Some(NodeOptions { + delay, + packet_loss: loss, + }), + (Some(delay), None) => Some(NodeOptions { + delay, + packet_loss: 0, + }), + (None, Some(loss)) => Some(NodeOptions { + delay: 0, + packet_loss: loss, + }), + (None, None) => None, + }; + + let mut node = Node::init_from_cli(node_ip, self.config.node_port, node_index, options) .with_context(|| format!("failed to init node-{node_index}"))?; + let ip = node.ip.to_string(); let service = node.as_service(&self.config)?; self.global_config .bootstrap_peers .push(node.dht_value.take()); self.compose - .add_service(format!("node-{}", node_index), service); + .add_service(format!("node-{}", node_index), service)?; let logs_dir = node.logs_dir(&self.config); println!("Creating {:?}", logs_dir); std::fs::create_dir_all(&logs_dir)?; - self.write_entrypoint(node)?; + self.write_run_data(node)?; self.next_node_ip = node_ip; - Ok(()) + Ok(ip) + } + + pub fn add_grafana(&mut self) -> Result<()> { + self.write_grafana_data()?; + self.compose.add_grafana() + } + + pub fn add_prometheus(&mut self, node_addresses: Vec) -> Result<()> { + self.write_prometheus_data(node_addresses)?; + self.compose.add_prometheus() } pub fn next_node_index(&self) -> usize { self.global_config.bootstrap_peers.len() } - fn write_entrypoint(&self, node: Node) -> Result<()> { + fn write_grafana_data(&self) -> Result<()> { + std::fs::create_dir_all(self.config.grafana())?; + let grafana_data = r#"apiVersion: 1 + +datasources: +- name: Prometheus + type: prometheus + url: http://prometheus:9090 + isDefault: true + access: proxy + editable: true + "#; + let grafana_datasource_config = self.config.grafana().join("datasource.yml"); + std::fs::write(grafana_datasource_config, grafana_data) + .context("Failed to write grafana data")?; + + Ok(()) + } + + fn write_prometheus_data(&self, node_addresses: Vec) -> Result<()> { + let nodes = node_addresses + .iter() + .map(|x| format!("- {x}:9081")) + .reduce(|left, right| format!("{}\n {}", left, right)) + .unwrap_or_default(); + std::fs::create_dir_all(self.config.prometheus())?; + let prometheus_data = format!( + r#"global: + scrape_interval: 15s + scrape_timeout: 10s + evaluation_interval: 15s +alerting: + alertmanagers: + - static_configs: + - targets: [] + scheme: http + timeout: 10s + api_version: v1 +scrape_configs: +- job_name: prometheus + honor_timestamps: true + scrape_interval: 15s + scrape_timeout: 10s + metrics_path: /metrics + scheme: http + static_configs: + - targets: + {} + "#, + nodes + ); + let prometheus_datasource_config = self.config.prometheus().join("prometheus.yml"); + std::fs::write(prometheus_datasource_config, prometheus_data) + .context("Failed to write prometheus data")?; + Ok(()) + } + fn write_run_data(&self, node: Node) -> Result<()> { let entrypoint_data = generate_entrypoint(node.run_command()); let entrypoint_path = node.entrypoint_path(&self.config); + let options_path = node.options_path(&self.config); println!("Writing entrypoint to {:?}", entrypoint_path); std::fs::create_dir_all(self.config.entrypoints())?; + std::fs::create_dir_all(self.config.options())?; + std::fs::write(&entrypoint_path, entrypoint_data) .context("Failed to write entrypoint data")?; std::fs::set_permissions(entrypoint_path, std::fs::Permissions::from_mode(0o755)) .context("Failed to set entrypoint permissions")?; + println!("Writing persistent options json file"); + let data = match node.options { + Some(options) => serde_json::to_string(&options)?, + None => serde_json::to_string(&NodeOptions::default())?, + }; + + std::fs::write(&options_path, data).context("Failed to write node options")?; + std::fs::set_permissions(options_path, std::fs::Permissions::from_mode(0o755)) + .context("Failed to set node options permissions")?; + Ok(()) } } diff --git a/storage/Cargo.toml b/storage/Cargo.toml index 16e02c017..acaccbac4 100644 --- a/storage/Cargo.toml +++ b/storage/Cargo.toml @@ -16,11 +16,12 @@ bytes = { workspace = true } bytesize = { workspace = true } crc = { workspace = true } dashmap = { workspace = true } -everscale-types = { workspace = true } +everscale-types = { workspace = true, features = ["tycho"] } fdlimit = { workspace = true } hex = { workspace = true } humantime = { workspace = true } libc = { workspace = true } +metrics = { workspace = true } num-traits = { workspace = true } parking_lot = { workspace = true } parking_lot_core = { workspace = true } @@ -42,14 +43,18 @@ tycho-block-util = { workspace = true } tycho-util = { workspace = true } [dev-dependencies] +anyhow = { workspace = true, features = ["backtrace"] } base64 = { workspace = true } bytesize = { workspace = true } serde_json = { workspace = true } +tempfile = { workspace = true } +tokio = { workspace = true, features = ["full"] } tracing-appender = { workspace = true } tracing-subscriber = { workspace = true } tracing-test = { workspace = true } -tempfile = { workspace = true } -tokio = { version = "1", features = ["full"] } + +tycho-util = { workspace = true, features = ["test"] } +tycho-storage = { workspace = true, features = ["test"] } [features] test = ["dep:tempfile"] diff --git a/storage/src/config.rs b/storage/src/config.rs new file mode 100644 index 000000000..f22b87373 --- /dev/null +++ b/storage/src/config.rs @@ -0,0 +1,85 @@ +use std::path::{Path, PathBuf}; + +use bytesize::ByteSize; +use serde::{Deserialize, Serialize}; + +use crate::db::DbConfig; + +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(deny_unknown_fields, default)] +pub struct StorageConfig { + /// Path to the root directory of the storage. + /// + /// Default: `./db`. + pub root_dir: PathBuf, + + /// Runtime cells cache size. + /// + /// Default: calculated based on the available memory. + pub cells_cache_size: ByteSize, + + /// RocksDB configuration. + pub db_config: DbConfig, +} + +impl StorageConfig { + /// Creates a new storage config with very low cache sizes. + pub fn new_potato(path: &Path) -> Self { + Self { + root_dir: path.to_owned(), + cells_cache_size: ByteSize::kb(1024), + db_config: DbConfig { + rocksdb_lru_capacity: ByteSize::kb(1024), + }, + } + } +} + +impl Default for StorageConfig { + fn default() -> Self { + // Fetch the currently available memory in bytes + let available = { + let mut sys = sysinfo::System::new(); + sys.refresh_memory(); + sys.available_memory() + }; + + // Estimated memory usage of components other than cache: + // - 2 GiBs for write buffers(4 if we are out of luck and all memtables are being flushed at the same time) + // - 2 GiBs for indexer logic + // - 10 bits per cell for bloom filter. Realistic case is 100M cells, so 0.25 GiBs + // - 1/3 of all available memory is reserved for kernel buffers + const WRITE_BUFFERS: ByteSize = ByteSize::gib(2); + const INDEXER_LOGIC: ByteSize = ByteSize::gib(2); + const BLOOM_FILTER: ByteSize = ByteSize::mib(256); + let estimated_memory_usage = WRITE_BUFFERS + INDEXER_LOGIC + BLOOM_FILTER + available / 3; + + // Reduce the available memory by the fixed offset + let available = available + .checked_sub(estimated_memory_usage.as_u64()) + .unwrap_or_else(|| { + tracing::error!( + "Not enough memory for cache, using 1/4 of all available memory. \ + Tweak `db_options` in config to improve performance." + ); + available / 4 + }); + + // We will use 3/4 of available memory for the cells cache (at most 4 GB). + let cells_cache_size = std::cmp::min(ByteSize(available * 4 / 3), ByteSize::gib(4)); + + // The reset of the memory is used for LRU cache (at least 128 MB) + let rocksdb_lru_capacity = std::cmp::max( + ByteSize(available.saturating_sub(cells_cache_size.as_u64())), + ByteSize::mib(128), + ); + + Self { + root_dir: PathBuf::from("./db"), + cells_cache_size, + db_config: DbConfig { + rocksdb_lru_capacity, + }, + } + } +} diff --git a/storage/src/db/file_db/mapped_file.rs b/storage/src/db/file_db/mapped_file.rs index 4594397aa..437bf632e 100644 --- a/storage/src/db/file_db/mapped_file.rs +++ b/storage/src/db/file_db/mapped_file.rs @@ -1,3 +1,4 @@ +#![allow(clippy::disallowed_types)] use std::fs::File; use std::os::fd::AsRawFd; use std::path::Path; diff --git a/storage/src/db/file_db/mod.rs b/storage/src/db/file_db/mod.rs index be5c69323..54372372f 100644 --- a/storage/src/db/file_db/mod.rs +++ b/storage/src/db/file_db/mod.rs @@ -1,8 +1,13 @@ +#![allow(clippy::disallowed_methods)] +#![allow(clippy::disallowed_types)] // it's wrapper around Files so + use std::fs::{File, OpenOptions}; use std::os::fd::AsRawFd; use std::path::{Path, PathBuf}; use std::sync::Arc; +use anyhow::{Context, Result}; + pub use self::mapped_file::MappedFile; pub use self::temp_file::TempFile; @@ -13,7 +18,21 @@ mod temp_file; pub struct FileDb(Arc); impl FileDb { - pub fn new

(root: P) -> Self + /// Creates a new `FileDb` instance. + /// If the `root` directory does not exist, it will be created. + pub fn new

(root: P) -> Result + where + P: AsRef, + { + std::fs::create_dir_all(root.as_ref()) + .with_context(|| format!("failed to create {}", root.as_ref().display()))?; + Ok(Self(Arc::new(FileDbInner { + base_dir: root.as_ref().to_path_buf(), + }))) + } + + /// Creates a new `FileDb` without creating the root directory tree + pub fn new_readonly

(root: P) -> Self where P: AsRef, { @@ -26,7 +45,7 @@ impl FileDb { &self.0.base_dir } - pub fn ensure_exists(&self) -> std::io::Result<()> { + pub fn create_if_not_exists(&self) -> std::io::Result<()> { std::fs::create_dir_all(&self.0.base_dir) } @@ -46,12 +65,21 @@ impl FileDb { } } - pub fn subdir>(&self, rel_path: P) -> Self { + /// Creates `FileDb` instance for a subdirectory of the current one. + /// **Note**: The subdirectory will not be created if it does not exist. + /// Use `create_subdir` to create it. + pub fn subdir_readonly>(&self, rel_path: P) -> Self { Self(Arc::new(FileDbInner { base_dir: self.0.base_dir.join(rel_path), })) } + /// Creates `FileDb` instance for a subdirectory of the current one. + /// The subdirectory will be created if it does not exist. + pub fn create_subdir>(&self, rel_path: P) -> Result { + Self::new(self.0.base_dir.join(rel_path)) + } + pub fn file_exists>(&self, rel_path: P) -> bool { self.path().join(rel_path).is_file() } @@ -72,24 +100,27 @@ pub struct FileBuilder { } impl FileBuilder { - pub fn open(&self) -> std::io::Result { - let file = self.options.open(&self.path)?; + pub fn open(&self) -> Result { + let file = self + .options + .open(&self.path) + .with_context(|| format!("failed to open {}", self.path.display()))?; if let Some(prealloc) = self.prealloc { alloc_file(&file, prealloc)?; } Ok(file) } - pub fn open_as_temp(&self) -> std::io::Result { + pub fn open_as_temp(&self) -> Result { let file = self.open()?; Ok(TempFile::new(self.path.clone(), file)) } - pub fn open_as_mapped(&self) -> std::io::Result { - match self.prealloc { - Some(length) => MappedFile::new(&self.path, length), - None => MappedFile::from_existing_file(self.open()?), - } + pub fn open_as_mapped(&self) -> Result { + Ok(match self.prealloc { + Some(length) => MappedFile::new(&self.path, length)?, + None => MappedFile::from_existing_file(self.open()?)?, + }) } pub fn append(&mut self, append: bool) -> &mut Self { @@ -126,6 +157,10 @@ impl FileBuilder { self.prealloc = Some(prealloc); self } + + pub fn path(&self) -> &Path { + &self.path + } } #[cfg(not(target_os = "macos"))] diff --git a/storage/src/db/kv_db/config.rs b/storage/src/db/kv_db/config.rs index bd2d53b2d..93839d9e4 100644 --- a/storage/src/db/kv_db/config.rs +++ b/storage/src/db/kv_db/config.rs @@ -3,53 +3,7 @@ use serde::{Deserialize, Serialize}; #[derive(Debug, Copy, Clone, Serialize, Deserialize)] #[serde(deny_unknown_fields, default)] -pub struct DbOptions { +pub struct DbConfig { pub rocksdb_lru_capacity: ByteSize, pub cells_cache_size: ByteSize, } - -impl Default for DbOptions { - fn default() -> Self { - // Fetch the currently available memory in bytes - let available = { - let mut sys = sysinfo::System::new(); - sys.refresh_memory(); - sys.available_memory() - }; - - // Estimated memory usage of components other than cache: - // - 2 GiBs for write buffers(4 if we are out of luck and all memtables are being flushed at the same time) - // - 2 GiBs for indexer logic - // - 10 bits per cell for bloom filter. Realistic case is 100M cells, so 0.25 GiBs - // - 1/3 of all available memory is reserved for kernel buffers - const WRITE_BUFFERS: ByteSize = ByteSize::gib(2); - const INDEXER_LOGIC: ByteSize = ByteSize::gib(2); - const BLOOM_FILTER: ByteSize = ByteSize::mib(256); - let estimated_memory_usage = WRITE_BUFFERS + INDEXER_LOGIC + BLOOM_FILTER + available / 3; - - // Reduce the available memory by the fixed offset - let available = available - .checked_sub(estimated_memory_usage.as_u64()) - .unwrap_or_else(|| { - tracing::error!( - "Not enough memory for cache, using 1/4 of all available memory. \ - Tweak `db_options` in config to improve performance." - ); - available / 4 - }); - - // We will use 3/4 of available memory for the cells cache (at most 4 GB). - let cells_cache_size = std::cmp::min(ByteSize(available * 4 / 3), ByteSize::gib(4)); - - // The reset of the memory is used for LRU cache (at least 128 MB) - let rocksdb_lru_capacity = std::cmp::max( - ByteSize(available.saturating_sub(cells_cache_size.as_u64())), - ByteSize::mib(128), - ); - - Self { - rocksdb_lru_capacity, - cells_cache_size, - } - } -} diff --git a/storage/src/db/kv_db/mod.rs b/storage/src/db/kv_db/mod.rs index aeed753fd..5e9ff3112 100644 --- a/storage/src/db/kv_db/mod.rs +++ b/storage/src/db/kv_db/mod.rs @@ -4,19 +4,20 @@ use std::thread::available_parallelism; use anyhow::{Context, Result}; use bytesize::ByteSize; +use serde::{Deserialize, Serialize}; +pub use weedb::{rocksdb, BoundedCfHandle, ColumnFamily, Stats as RocksdbStats, Table}; use weedb::{Caches, WeeDb}; -pub use weedb::Stats as RocksdbStats; -pub use weedb::{rocksdb, BoundedCfHandle, ColumnFamily, Table}; - -pub use self::config::DbOptions; - pub mod refcount; pub mod tables; -mod config; mod migrations; +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct DbConfig { + pub rocksdb_lru_capacity: ByteSize, +} + pub struct Db { pub archives: Table, pub block_handles: Table, @@ -24,6 +25,7 @@ pub struct Db { pub package_entries: Table, pub shard_states: Table, pub cells: Table, + pub temp_cells: Table, pub node_states: Table, pub prev1: Table, pub prev2: Table, @@ -35,10 +37,9 @@ pub struct Db { } impl Db { - pub fn open(path: PathBuf, options: DbOptions) -> Result> { + pub fn open(path: PathBuf, options: DbConfig) -> Result> { tracing::info!( rocksdb_lru_capacity = %options.rocksdb_lru_capacity, - cells_cache_size = %options.cells_cache_size, "opening DB" ); @@ -99,6 +100,7 @@ impl Db { .with_table::() .with_table::() .with_table::() + .with_table::() .with_table::() .with_table::() .with_table::() @@ -117,6 +119,7 @@ impl Db { package_entries: inner.instantiate_table(), shard_states: inner.instantiate_table(), cells: inner.instantiate_table(), + temp_cells: inner.instantiate_table(), node_states: inner.instantiate_table(), prev1: inner.instantiate_table(), prev2: inner.instantiate_table(), @@ -151,15 +154,22 @@ impl Db { (self.archives.cf(), "archives"), (self.shard_states.cf(), "shard states"), (self.cells.cf(), "cells"), + (self.temp_cells.cf(), "temp cells"), ]; + let mut compaction_options = rocksdb::CompactOptions::default(); + compaction_options.set_exclusive_manual_compaction(true); + compaction_options + .set_bottommost_level_compaction(rocksdb::BottommostLevelCompaction::ForceOptimized); + for (cf, title) in tables { tracing::info!("{title} compaction started"); let instant = Instant::now(); - let bound = Option::<[u8; 0]>::None; - self.raw().compact_range_cf(&cf, bound, bound); + + self.raw() + .compact_range_cf_opt(&cf, bound, bound, &compaction_options); tracing::info!( elapsed = %humantime::format_duration(instant.elapsed()), @@ -224,6 +234,7 @@ impl Db { package_entries => tables::PackageEntries, shard_states => tables::ShardStates, cells => tables::Cells, + temp_cells => tables::TempCells, node_states => tables::NodeStates, prev1 => tables::Prev1, prev2 => tables::Prev2, diff --git a/storage/src/db/kv_db/refcount.rs b/storage/src/db/kv_db/refcount.rs index a9fbb800d..eb42c0be7 100644 --- a/storage/src/db/kv_db/refcount.rs +++ b/storage/src/db/kv_db/refcount.rs @@ -32,17 +32,19 @@ pub fn merge_operator( pub fn compaction_filter(_level: u32, _key: &[u8], value: &[u8]) -> Decision { if value.is_empty() { + metrics::counter!("tycho_compaction_removes").increment(1); Decision::Remove } else { + metrics::counter!("tycho_compaction_keeps").increment(1); Decision::Keep } } pub fn decode_value_with_rc(bytes: &[u8]) -> (RcType, Option<&[u8]>) { let without_payload = match bytes.len().cmp(&RC_BYTES) { - std::cmp::Ordering::Greater => false, - std::cmp::Ordering::Equal => true, - std::cmp::Ordering::Less => return (0, None), + Ordering::Greater => false, + Ordering::Equal => true, + Ordering::Less => return (0, None), }; let rc = RcType::from_le_bytes(bytes[..RC_BYTES].try_into().unwrap()); diff --git a/storage/src/db/kv_db/tables.rs b/storage/src/db/kv_db/tables.rs index a9bbe6065..6360215ab 100644 --- a/storage/src/db/kv_db/tables.rs +++ b/storage/src/db/kv_db/tables.rs @@ -134,6 +134,29 @@ impl ColumnFamily for Cells { } } +/// Stores temp cells data +/// - Key: `ton_types::UInt256` (cell repr hash) +/// - Value: `StorageCell` +pub struct TempCells; + +impl ColumnFamily for TempCells { + const NAME: &'static str = "temp_cells"; + + fn options(opts: &mut rocksdb::Options, caches: &Caches) { + let mut block_factory = BlockBasedOptions::default(); + block_factory.set_block_cache(&caches.block_cache); + block_factory.set_data_block_index_type(DataBlockIndexType::BinaryAndHash); + block_factory.set_whole_key_filtering(true); + block_factory.set_checksum_type(rocksdb::ChecksumType::NoChecksum); + + block_factory.set_bloom_filter(10.0, false); + block_factory.set_block_size(16 * 1024); + block_factory.set_format_version(5); + + opts.set_optimize_filters_for_hits(true); + } +} + /// Stores generic node parameters /// - Key: `...` /// - Value: `...` diff --git a/storage/src/lib.rs b/storage/src/lib.rs index 974d0361a..020a0ea33 100644 --- a/storage/src/lib.rs +++ b/storage/src/lib.rs @@ -1,12 +1,13 @@ -use std::path::PathBuf; use std::sync::Arc; -use anyhow::Result; +use anyhow::{Context, Result}; +pub use self::config::*; pub use self::db::*; pub use self::models::*; pub use self::store::*; +mod config; mod db; mod models; mod store; @@ -17,6 +18,9 @@ mod util { mod stored_value; } +const DB_SUBDIR: &str = "rocksdb"; +const FILES_SUBDIR: &str = "files"; + #[derive(Clone)] #[repr(transparent)] pub struct Storage { @@ -24,30 +28,35 @@ pub struct Storage { } impl Storage { - pub fn new(db: Arc, file_db_path: PathBuf, max_cell_cache_size_bytes: u64) -> Result { - let files_dir = FileDb::new(file_db_path); + pub fn new(config: StorageConfig) -> Result { + let root = FileDb::new(&config.root_dir)?; + + let files_db = root.create_subdir(FILES_SUBDIR)?; + let kv_db = Db::open(config.root_dir.join(DB_SUBDIR), config.db_config) + .context("failed to open a rocksdb")?; - let block_handle_storage = Arc::new(BlockHandleStorage::new(db.clone())); - let block_connection_storage = Arc::new(BlockConnectionStorage::new(db.clone())); + let block_handle_storage = Arc::new(BlockHandleStorage::new(kv_db.clone())); + let block_connection_storage = Arc::new(BlockConnectionStorage::new(kv_db.clone())); let runtime_storage = Arc::new(RuntimeStorage::new(block_handle_storage.clone())); let block_storage = Arc::new(BlockStorage::new( - db.clone(), + kv_db.clone(), block_handle_storage.clone(), block_connection_storage.clone(), )?); let shard_state_storage = ShardStateStorage::new( - db.clone(), - &files_dir, + kv_db.clone(), + &files_db, block_handle_storage.clone(), block_storage.clone(), - max_cell_cache_size_bytes, + config.cells_cache_size.as_u64(), )?; let persistent_state_storage = - PersistentStateStorage::new(db.clone(), &files_dir, block_handle_storage.clone())?; - let node_state_storage = NodeStateStorage::new(db); + PersistentStateStorage::new(kv_db.clone(), &files_db, block_handle_storage.clone())?; + let node_state_storage = NodeStateStorage::new(kv_db); Ok(Self { inner: Arc::new(Inner { + root, block_handle_storage, block_storage, shard_state_storage, @@ -65,28 +74,15 @@ impl Storage { /// otherwise compaction filter will not work. #[cfg(any(test, feature = "test"))] pub fn new_temp() -> Result<(Self, tempfile::TempDir)> { - use bytesize::ByteSize; - let tmp_dir = tempfile::tempdir()?; - let root_path = tmp_dir.path(); - - // Init rocksdb - let db_options = DbOptions { - rocksdb_lru_capacity: ByteSize::kb(1024), - cells_cache_size: ByteSize::kb(1024), - }; - let db = Db::open(root_path.join("db_storage"), db_options)?; - - // Init storage - let storage = Storage::new( - db, - root_path.join("file_storage"), - db_options.cells_cache_size.as_u64(), - )?; - + let storage = Storage::new(StorageConfig::new_potato(tmp_dir.path()))?; Ok((storage, tmp_dir)) } + pub fn root(&self) -> &FileDb { + &self.inner.root + } + pub fn runtime_storage(&self) -> &RuntimeStorage { &self.inner.runtime_storage } @@ -117,6 +113,7 @@ impl Storage { } struct Inner { + root: FileDb, runtime_storage: Arc, block_handle_storage: Arc, block_connection_storage: Arc, diff --git a/storage/src/models/block_handle.rs b/storage/src/models/block_handle.rs index 86c598d39..03085348d 100644 --- a/storage/src/models/block_handle.rs +++ b/storage/src/models/block_handle.rs @@ -2,9 +2,9 @@ use std::sync::{Arc, Weak}; use everscale_types::models::*; use tokio::sync::RwLock; +use tycho_util::FastDashMap; use super::BlockMeta; -use tycho_util::FastDashMap; #[derive(Clone)] #[repr(transparent)] diff --git a/storage/src/store/block/mod.rs b/storage/src/store/block/mod.rs index 92be364bf..8ff129c92 100644 --- a/storage/src/store/block/mod.rs +++ b/storage/src/store/block/mod.rs @@ -19,10 +19,9 @@ use tycho_block_util::block::{ use tycho_util::FastHashMap; use crate::db::*; +use crate::models::*; use crate::util::*; -use crate::{ - models::*, BlockConnection, BlockConnectionStorage, BlockHandleStorage, HandleCreationStatus, -}; +use crate::{BlockConnection, BlockConnectionStorage, BlockHandleStorage, HandleCreationStatus}; pub struct BlockStorage { db: Arc, diff --git a/storage/src/store/block_connection/mod.rs b/storage/src/store/block_connection/mod.rs index d5e199dd4..0a2bbf1e4 100644 --- a/storage/src/store/block_connection/mod.rs +++ b/storage/src/store/block_connection/mod.rs @@ -103,7 +103,7 @@ where handle.id().root_hash.as_slice(), write_block_id_le(block_id), ) - .unwrap() + .unwrap(); } fn load_block_connection_impl(db: &Table, block_id: &BlockId) -> Option diff --git a/storage/src/store/node_state/mod.rs b/storage/src/store/node_state/mod.rs index ceba6675a..27c03e4ee 100644 --- a/storage/src/store/node_state/mod.rs +++ b/storage/src/store/node_state/mod.rs @@ -22,7 +22,7 @@ impl NodeStateStorage { } pub fn store_last_mc_block_id(&self, id: &BlockId) { - self.store_block_id(&self.last_mc_block_id, id) + self.store_block_id(&self.last_mc_block_id, id); } pub fn load_last_mc_block_id(&self) -> Option { @@ -30,7 +30,7 @@ impl NodeStateStorage { } pub fn store_init_mc_block_id(&self, id: &BlockId) { - self.store_block_id(&self.init_mc_block_id, id) + self.store_block_id(&self.init_mc_block_id, id); } pub fn load_init_mc_block_id(&self) -> Option { diff --git a/storage/src/store/persistent_state/mod.rs b/storage/src/store/persistent_state/mod.rs index 81f8aa7da..bbf3b4dae 100644 --- a/storage/src/store/persistent_state/mod.rs +++ b/storage/src/store/persistent_state/mod.rs @@ -31,9 +31,7 @@ impl PersistentStateStorage { files_dir: &FileDb, block_handle_storage: Arc, ) -> Result { - let storage_dir = files_dir.subdir(BASE_DIR); - storage_dir.ensure_exists()?; - + let storage_dir = files_dir.create_subdir(BASE_DIR)?; let is_cancelled = Arc::new(AtomicBool::new(false)); Ok(Self { @@ -88,19 +86,23 @@ impl PersistentStateStorage { block_id: &BlockId, offset: u64, size: u64, - ) -> Option { - let path = self + ) -> Result { + // todo: add validation for offset and size + // so it won't eat all the memory + let mut builder = self .mc_states_dir(mc_block_id) - .join(block_id.root_hash.to_string()); - + .file(block_id.root_hash.to_string()); + let file_path = builder.path().to_path_buf(); tokio::task::spawn_blocking(move || { // TODO: cache file handles - let mut file = std::fs::OpenOptions::new().read(true).open(path).ok()?; + let mut file = builder.read(true).open()?; - if let Err(e) = file.seek(SeekFrom::Start(offset)) { - tracing::error!("failed to seek state file offset: {e:?}"); - return None; - } + file.seek(SeekFrom::Start(offset)).with_context(|| { + format!( + "failed to seek state file offset, path: {}", + file_path.display() + ) + })?; let mut buf_reader = BufReader::new(file); @@ -111,48 +113,48 @@ impl PersistentStateStorage { loop { match buf_reader.read(&mut result[result_cursor..]) { Ok(bytes_read) => { - tracing::info!("Reading state file. Bytes read: {}", bytes_read); + tracing::debug!(bytes_read, "reading state file"); if bytes_read == 0 || bytes_read == size as usize { break; } result_cursor += bytes_read; } Err(e) => { - tracing::error!("Failed to read state file. Err: {e:?}"); - return None; + return Err(anyhow::Error::new(e).context(format!( + "failed to read state file. Path: {}", + file_path.display() + ))) } } } - tracing::info!( - "Finished reading buffer after: {} ms", + tracing::debug!( + "finished reading buffer after: {} ms", now.elapsed().as_millis() ); - Some(result.freeze()) + Ok(result.freeze()) }) .await - .ok() - .flatten() + .unwrap() } pub fn state_exists(&self, mc_block_id: &BlockId, block_id: &BlockId) -> bool { // TODO: cache file handles self.mc_states_dir(mc_block_id) - .join(block_id.root_hash.to_string()) - .is_file() + .file_exists(block_id.root_hash.to_string()) } pub fn prepare_persistent_states_dir(&self, mc_block: &BlockId) -> Result { - let states_dir = self.storage_dir.subdir(mc_block.seqno.to_string()); + let states_dir = self.storage_dir.subdir_readonly(mc_block.seqno.to_string()); if !states_dir.path().is_dir() { tracing::info!(mc_block = %mc_block, "creating persistent state directory"); - states_dir.ensure_exists()?; + states_dir.create_if_not_exists()?; } Ok(states_dir) } - fn mc_states_dir(&self, mc_block_id: &BlockId) -> PathBuf { - self.storage_dir.path().join(mc_block_id.seqno.to_string()) + fn mc_states_dir(&self, mc_block_id: &BlockId) -> FileDb { + FileDb::new_readonly(self.storage_dir.path().join(mc_block_id.seqno.to_string())) } pub fn cancel(&self) { diff --git a/storage/src/store/runtime/mod.rs b/storage/src/store/runtime/mod.rs index 54d9b89ad..6cf671e3c 100644 --- a/storage/src/store/runtime/mod.rs +++ b/storage/src/store/runtime/mod.rs @@ -1,7 +1,6 @@ use std::sync::Arc; pub use self::persistent_state_keeper::PersistentStateKeeper; - use super::BlockHandleStorage; mod persistent_state_keeper; diff --git a/storage/src/store/runtime/persistent_state_keeper.rs b/storage/src/store/runtime/persistent_state_keeper.rs index da820106a..98968bb6d 100644 --- a/storage/src/store/runtime/persistent_state_keeper.rs +++ b/storage/src/store/runtime/persistent_state_keeper.rs @@ -4,7 +4,6 @@ use std::sync::Arc; use anyhow::Result; use arc_swap::ArcSwapAny; use tokio::sync::Notify; - use tycho_block_util::state::*; use crate::models::{BlockHandle, BriefBlockMeta}; diff --git a/storage/src/store/shard_state/cell_storage.rs b/storage/src/store/shard_state/cell_storage.rs index 6d56d4bec..25e78fb48 100644 --- a/storage/src/store/shard_state/cell_storage.rs +++ b/storage/src/store/shard_state/cell_storage.rs @@ -2,21 +2,22 @@ use std::cell::UnsafeCell; use std::collections::hash_map; use std::mem::{ManuallyDrop, MaybeUninit}; use std::sync::atomic::{AtomicI64, AtomicU8, Ordering}; -use std::sync::{Arc, Weak}; +use std::sync::{Arc, Mutex, Weak}; use anyhow::{Context, Result}; use bumpalo::Bump; use everscale_types::cell::*; use quick_cache::sync::{Cache, DefaultLifecycle}; use triomphe::ThinArc; +use tycho_util::{FastDashMap, FastHashMap, FastHasherState}; use crate::db::*; -use tycho_util::{FastDashMap, FastHashMap, FastHasherState}; pub struct CellStorage { db: Arc, cells_cache: Arc>>, raw_cells_cache: RawCellsCache, + pending: PendingOperations, } impl CellStorage { @@ -28,14 +29,204 @@ impl CellStorage { db, cells_cache, raw_cells_cache, + pending: PendingOperations::default(), }) } + pub fn apply_temp_cell(&self, root: &HashBytes) -> Result<()> { + const MAX_NEW_CELLS_BATCH_SIZE: usize = 10000; + + struct CellHashesIter<'a> { + data: rocksdb::DBPinnableSlice<'a>, + offset: usize, + remaining_refs: u8, + } + + impl<'a> Iterator for CellHashesIter<'a> { + type Item = HashBytes; + + fn next(&mut self) -> Option { + if self.remaining_refs == 0 { + return None; + } + + // NOTE: Unwrap is safe here because we have already checked + // that data can contain all references. + let item = HashBytes(self.data[self.offset..self.offset + 32].try_into().unwrap()); + + self.remaining_refs -= 1; + self.offset += 32; + + Some(item) + } + + fn size_hint(&self) -> (usize, Option) { + let r = self.remaining_refs as usize; + (r, Some(r)) + } + } + + enum InsertedCell<'a> { + New(CellHashesIter<'a>), + Existing, + } + + struct Context<'a> { + cells_cf: BoundedCfHandle<'a>, + db: &'a Db, + buffer: Vec, + transaction: FastHashMap, + new_cells_batch: rocksdb::WriteBatch, + new_cell_count: usize, + } + + impl<'a> Context<'a> { + fn new(db: &'a Db) -> Self { + Self { + cells_cf: db.cells.cf(), + db, + buffer: Vec::with_capacity(512), + transaction: Default::default(), + new_cells_batch: rocksdb::WriteBatch::default(), + new_cell_count: 0, + } + } + + fn load_temp(&self, key: &HashBytes) -> Result, CellStorageError> { + let data = match self.db.temp_cells.get(key) { + Ok(Some(data)) => data, + Ok(None) => return Err(CellStorageError::CellNotFound), + Err(e) => return Err(CellStorageError::Internal(e)), + }; + + let (offset, remaining_refs) = { + let data = &mut data.as_ref(); + + let descriptor = CellDescriptor::new([data[0], data[1]]); + let byte_len = descriptor.byte_len() as usize; + let hash_count = descriptor.hash_count() as usize; + let ref_count = descriptor.reference_count(); + + let offset = 4usize + byte_len + (32 + 2) * hash_count; + if data.len() < offset + (ref_count as usize) * 32 { + return Err(CellStorageError::InvalidCell); + } + + (offset, ref_count) + }; + + Ok(CellHashesIter { + data, + offset, + remaining_refs, + }) + } + + fn insert_cell( + &mut self, + key: &HashBytes, + ) -> Result, CellStorageError> { + Ok(match self.transaction.entry(*key) { + hash_map::Entry::Occupied(mut entry) => { + *entry.get_mut() += 1; // 1 new reference + InsertedCell::Existing + } + hash_map::Entry::Vacant(entry) => { + if let Some(value) = self.db.cells.get(key)? { + let (rc, value) = refcount::decode_value_with_rc(value.as_ref()); + debug_assert!(rc > 0 && value.is_some() || rc == 0 && value.is_none()); + if value.is_some() { + entry.insert(1); // 1 new reference + return Ok(InsertedCell::Existing); + } + } + + entry.insert(0); // 0 new references (the first one is included in the merge below) + let iter = self.load_temp(key)?; + + self.buffer.clear(); + refcount::add_positive_refount( + 1, + Some(iter.data.as_ref()), + &mut self.buffer, + ); + + self.new_cells_batch + .put_cf(&self.cells_cf, key, self.buffer.as_slice()); + + self.new_cell_count += 1; + if self.new_cell_count >= MAX_NEW_CELLS_BATCH_SIZE { + self.flush_new_cells()?; + } + + InsertedCell::New(iter) + } + }) + } + + fn flush_new_cells(&mut self) -> Result<(), rocksdb::Error> { + if self.new_cell_count > 0 { + self.db + .raw() + .write(std::mem::take(&mut self.new_cells_batch))?; + self.new_cell_count = 0; + } + Ok(()) + } + + fn flush_existing_cells(&mut self) -> Result<(), rocksdb::Error> { + let mut batch = rocksdb::WriteBatch::default(); + + for (key, &refs) in &self.transaction { + if refs == 0 { + continue; + } + + self.buffer.clear(); + refcount::add_positive_refount(refs, None, &mut self.buffer); + batch.merge_cf(&self.cells_cf, key, self.buffer.as_slice()); + } + + self.db.raw().write(batch) + } + } + + let mut ctx = Context::new(&self.db); + + let mut stack = Vec::with_capacity(16); + if let InsertedCell::New(iter) = ctx.insert_cell(root)? { + stack.push(iter); + } + + 'outer: loop { + let Some(iter) = stack.last_mut() else { + break; + }; + + for ref child in iter { + if let InsertedCell::New(iter) = ctx.insert_cell(child)? { + stack.push(iter); + continue 'outer; + } + } + + stack.pop(); + } + + // Clear big chunks of data before finalization + drop(stack); + + ctx.flush_new_cells()?; + ctx.flush_existing_cells()?; + + Ok(()) + } + pub fn store_cell( &self, batch: &mut rocksdb::WriteBatch, root: Cell, - ) -> Result { + ) -> Result<(PendingOperation<'_>, usize), CellStorageError> { struct CellWithRefs<'a> { rc: u32, data: Option<&'a [u8]>, @@ -79,12 +270,7 @@ impl CellStorage { } } - match self - .db - .cells - .get(key.as_slice()) - .map_err(CellStorageError::Internal)? - { + match self.db.cells.get(key).map_err(CellStorageError::Internal)? { Some(value) => { let (rc, value) = refcount::decode_value_with_rc(value.as_ref()); @@ -131,6 +317,7 @@ impl CellStorage { // Prepare context and handles let alloc = Bump::new(); + let pending_op = self.pending.begin(); let mut ctx = Context { db: &self.db, @@ -145,7 +332,7 @@ impl CellStorage { let key = root.repr_hash(); if !ctx.insert_cell(key, root.as_ref(), 0)? { - return Ok(0); + return Ok((pending_op, 0)); } } @@ -175,7 +362,7 @@ impl CellStorage { drop(stack); // Write transaction to the `WriteBatch` - Ok(ctx.finalize(batch)) + Ok((pending_op, ctx.finalize(batch))) } pub fn load_cell( @@ -188,7 +375,10 @@ impl CellStorage { } } - let cell = match self.raw_cells_cache.get_raw(self.db.as_ref(), &hash) { + let cell = match self + .raw_cells_cache + .get_raw(self.db.as_ref(), &hash, &self.pending) + { Ok(value) => 'cell: { if let Some(value) = value { let rc = &value.header.header; @@ -210,10 +400,10 @@ impl CellStorage { pub fn remove_cell( &self, - batch: &mut weedb::rocksdb::WriteBatch, + batch: &mut rocksdb::WriteBatch, alloc: &Bump, hash: &HashBytes, - ) -> Result { + ) -> Result<(PendingOperation<'_>, usize), CellStorageError> { #[derive(Clone, Copy)] struct CellState<'a> { rc: i64, @@ -240,6 +430,8 @@ impl CellStorage { } } + let pending_op = self.pending.begin(); + let cells = &self.db.cells; let cells_cf = &cells.cf(); @@ -255,22 +447,10 @@ impl CellStorage { let refs = match transaction.entry(cell_id) { hash_map::Entry::Occupied(mut v) => v.get_mut().remove()?, hash_map::Entry::Vacant(v) => { - let rc = match self.db.cells.get(cell_id.as_array()) { - Ok(value) => 'rc: { - if let Some(value) = value { - buffer.clear(); - if let (rc, Some(value)) = refcount::decode_value_with_rc(&value) { - if StorageCell::deserialize_references(value, &mut buffer) { - break 'rc rc; - } else { - return Err(CellStorageError::InvalidCell); - } - } - } - return Err(CellStorageError::CellNotFound); - } - Err(e) => return Err(CellStorageError::Internal(e)), - }; + let rc = + self.raw_cells_cache + .get_raw_for_delete(&self.db, cell_id, &mut buffer)?; + debug_assert!(rc > 0); v.insert(CellState { rc, @@ -296,13 +476,14 @@ impl CellStorage { // Write transaction to the `WriteBatch` let total = transaction.len(); for (key, CellState { removes, .. }) in transaction { + self.raw_cells_cache.remove_refs(key, removes); batch.merge_cf( cells_cf, key.as_slice(), refcount::encode_negative_refcount(removes), ); } - Ok(total) + Ok((pending_op, total)) } pub fn drop_cell(&self, hash: &HashBytes) { @@ -319,7 +500,7 @@ pub enum CellStorageError { #[error("Cell counter mismatch")] CounterMismatch, #[error("Internal rocksdb error")] - Internal(#[source] weedb::rocksdb::Error), + Internal(#[from] rocksdb::Error), } pub struct StorageCell { @@ -658,6 +839,7 @@ impl StorageCellReferenceData { struct RawCellsCache(Cache); impl RawCellsCache { + #[allow(unused)] pub(crate) fn hit_ratio(&self) -> f64 { (if self.0.hits() > 0 { self.0.hits() as f64 / (self.0.hits() + self.0.misses()) as f64 @@ -729,6 +911,7 @@ impl RawCellsCache { &self, db: &Db, key: &HashBytes, + pending: &PendingOperations, ) -> Result, rocksdb::Error> { use quick_cache::GuardResult; @@ -738,7 +921,11 @@ impl RawCellsCache { let (rc, data) = refcount::decode_value_with_rc(value.as_ref()); data.map(|value| { let value = RawCellsCacheItem::from_header_and_slice(AtomicI64::new(rc), value); - _ = g.insert(value.clone()); + pending.run_if_none(|| { + // Insert value to the cache only if there are no pending operations + _ = g.insert(value.clone()); + }); + value }) } else { @@ -804,3 +991,37 @@ impl RawCellsCache { } } } + +#[derive(Default)] +struct PendingOperations { + // TODO: Replace with two atomic counters for inserts and pending operations + pending_count: Mutex, +} + +impl PendingOperations { + fn begin(&self) -> PendingOperation<'_> { + *self.pending_count.lock().unwrap() += 1; + PendingOperation { operations: self } + } + + #[inline] + fn run_if_none(&self, f: F) { + let guard = self.pending_count.lock().unwrap(); + if *guard == 0 { + f(); + } + + // NOTE: Make sure to drop the lock only after the operation is executed + drop(guard); + } +} + +pub struct PendingOperation<'a> { + operations: &'a PendingOperations, +} + +impl Drop for PendingOperation<'_> { + fn drop(&mut self) { + *self.operations.pending_count.lock().unwrap() -= 1; + } +} diff --git a/storage/src/store/shard_state/mod.rs b/storage/src/store/shard_state/mod.rs index f1ce93d23..99c1fdc26 100644 --- a/storage/src/store/shard_state/mod.rs +++ b/storage/src/store/shard_state/mod.rs @@ -9,16 +9,16 @@ use tycho_block_util::block::*; use tycho_block_util::state::*; use self::cell_storage::*; -use self::replace_transaction::ShardStateReplaceTransaction; - +use self::store_state_raw::StoreStateRaw; use crate::db::*; +use crate::models::BlockHandle; use crate::util::*; -use crate::{models::BlockHandle, BlockHandleStorage, BlockStorage}; +use crate::{BlockHandleStorage, BlockStorage}; mod cell_storage; mod entries_buffer; -mod replace_transaction; mod shard_state_reader; +mod store_state_raw; const DOWNLOADS_DIR: &str = "downloads"; @@ -44,12 +44,11 @@ impl ShardStateStorage { block_storage: Arc, cache_size_bytes: u64, ) -> Result { - let downloads_dir = files_dir.subdir(DOWNLOADS_DIR); - downloads_dir.ensure_exists()?; + let downloads_dir = files_dir.create_subdir(DOWNLOADS_DIR)?; let cell_storage = CellStorage::new(db.clone(), cache_size_bytes); - let res = Self { + Ok(Self { db, block_handle_storage, block_storage, @@ -59,10 +58,7 @@ impl ShardStateStorage { min_ref_mc_state: Default::default(), max_new_mc_cell_count: AtomicUsize::new(0), max_new_sc_cell_count: AtomicUsize::new(0), - }; - - // Done - Ok(res) + }) } pub fn metrics(&self) -> ShardStateStorageMetrics { @@ -80,9 +76,9 @@ impl ShardStateStorage { } // TODO: implement metrics - /*pub fn cache_metrics(&self) -> CacheStats { - self.cell_storage.cache_stats() - }*/ + // pub fn cache_metrics(&self) -> CacheStats { + // self.cell_storage.cache_stats() + // } pub fn min_ref_mc_state(&self) -> &MinRefMcStateTracker { &self.min_ref_mc_state @@ -97,70 +93,80 @@ impl ShardStateStorage { return Ok(false); } - let block_id = handle.id(); - let cell_id = state.root_cell().repr_hash(); - - let mut batch = weedb::rocksdb::WriteBatch::default(); + let block_id = *handle.id(); + let root_cell = state.root_cell().clone(); let _gc_lock = self.gc_lock.lock().await; - // todo: spawn_blocking - let len = self - .cell_storage - .store_cell(&mut batch, state.root_cell().clone())?; + let raw_db = self.db.raw().clone(); + let cf = self.db.shard_states.get_unbounded_cf(); + let cell_storage = self.cell_storage.clone(); + let block_handle_storage = self.block_handle_storage.clone(); + let handle = handle.clone(); - if block_id.shard.is_masterchain() { - self.max_new_mc_cell_count.fetch_max(len, Ordering::Release); - } else { - self.max_new_sc_cell_count.fetch_max(len, Ordering::Release); - } + let (new_cell_count, updated) = tokio::task::spawn_blocking(move || { + let root_hash = *root_cell.repr_hash(); + + let mut batch = rocksdb::WriteBatch::default(); - let mut value = [0; 32 * 3]; - value[..32].copy_from_slice(cell_id.as_slice()); - value[32..64].copy_from_slice(block_id.root_hash.as_slice()); - value[64..96].copy_from_slice(block_id.file_hash.as_slice()); + let (pending_op, new_cell_count) = cell_storage.store_cell(&mut batch, root_cell)?; - batch.put_cf( - &self.db.shard_states.cf(), - BlockIdShort { - shard: block_id.shard, - seqno: block_id.seqno, + let mut value = [0; 32 * 3]; + value[..32].copy_from_slice(root_hash.as_slice()); + value[32..64].copy_from_slice(block_id.root_hash.as_slice()); + value[64..96].copy_from_slice(block_id.file_hash.as_slice()); + + batch.put_cf( + &cf.bound(), + BlockIdShort { + shard: block_id.shard, + seqno: block_id.seqno, + } + .to_vec(), + value, + ); + + raw_db.write(batch)?; + + let updated = handle.meta().set_has_state(); + if updated { + block_handle_storage.store_handle(&handle); } - .to_vec(), - value, - ); - self.db.raw().write(batch)?; + // Ensure that pending operation guard is dropped after the batch is written + drop(pending_op); + Ok::<_, anyhow::Error>((new_cell_count, updated)) + }) + .await??; - Ok(if handle.meta().set_has_state() { - self.block_handle_storage.store_handle(handle); - true + let count = if block_id.shard.is_masterchain() { + &self.max_new_mc_cell_count } else { - false - }) - } + &self.max_new_sc_cell_count + }; - pub async fn load_state(&self, block_id: &BlockId) -> Result { - let cell_id = self.load_state_root(block_id.as_short_id())?; - let cell = self.cell_storage.load_cell(cell_id)?; + count.fetch_max(new_cell_count, Ordering::Release); - ShardStateStuff::new( - *block_id, - Cell::from(cell as Arc<_>), - &self.min_ref_mc_state, - ) + Ok(updated) } - pub fn begin_replace(&'_ self, block_id: &BlockId) -> Result> { - ShardStateReplaceTransaction::new( + pub fn begin_store_state_raw(&'_ self, block_id: &BlockId) -> Result> { + StoreStateRaw::new( + block_id, &self.db, &self.downloads_dir, &self.cell_storage, &self.min_ref_mc_state, - block_id, ) } + pub async fn load_state(&self, block_id: &BlockId) -> Result { + let cell_id = self.load_state_root(block_id.as_short_id())?; + let cell = self.cell_storage.load_cell(cell_id)?; + + ShardStateStuff::from_root(block_id, Cell::from(cell as Arc<_>), &self.min_ref_mc_state) + } + pub async fn remove_outdated_states(&self, mc_seqno: u32) -> Result { let _compaction_guard = self.db.delay_compaction().await; @@ -219,12 +225,15 @@ impl ShardStateStorage { let mut batch = weedb::rocksdb::WriteBatch::default(); { let _guard = self.gc_lock.lock().await; - let total = self + let (pending_op, total) = self .cell_storage .remove_cell(&mut batch, &alloc, root_hash)?; batch.delete_cf(&shard_states_cf.bound(), key); raw.write_opt(batch, cells_write_options)?; + // Ensure that pending operation guard is dropped after the batch is written + drop(pending_op); + removed_cells += total; tracing::debug!( removed_cells = total, diff --git a/storage/src/store/shard_state/replace_transaction.rs b/storage/src/store/shard_state/store_state_raw.rs similarity index 63% rename from storage/src/store/shard_state/replace_transaction.rs rename to storage/src/store/shard_state/store_state_raw.rs index 3e6aa290f..78cedb954 100644 --- a/storage/src/store/shard_state/replace_transaction.rs +++ b/storage/src/store/shard_state/store_state_raw.rs @@ -6,18 +6,20 @@ use std::sync::Arc; use anyhow::{Context, Result}; use everscale_types::cell::*; use everscale_types::models::BlockId; +use tycho_block_util::state::*; +use tycho_util::progress_bar::*; +use tycho_util::FastHashMap; use super::cell_storage::*; use super::entries_buffer::*; use super::shard_state_reader::*; use crate::db::*; -use crate::util::*; +use crate::store::shard_state::StoredValue; -use tycho_block_util::state::*; -use tycho_util::progress_bar::*; -use tycho_util::FastHashMap; +pub const MAX_DEPTH: u16 = u16::MAX - 1; -pub struct ShardStateReplaceTransaction<'a> { +pub struct StoreStateRaw<'a> { + block_id: BlockId, db: &'a Db, cell_storage: &'a Arc, min_ref_mc_state: &'a MinRefMcStateTracker, @@ -27,17 +29,19 @@ pub struct ShardStateReplaceTransaction<'a> { file_ctx: FilesContext, } -impl<'a> ShardStateReplaceTransaction<'a> { - pub fn new( +impl<'a> StoreStateRaw<'a> { + pub(crate) fn new( + block_id: &BlockId, db: &'a Db, downloads_dir: &FileDb, cell_storage: &'a Arc, min_ref_mc_state: &'a MinRefMcStateTracker, - block_id: &BlockId, ) -> Result { - let file_ctx = FilesContext::new(downloads_dir, block_id)?; + let file_ctx = + FilesContext::new(downloads_dir, block_id).context("failed to create files context")?; Ok(Self { + block_id: *block_id, db, file_ctx, cell_storage, @@ -52,14 +56,10 @@ impl<'a> ShardStateReplaceTransaction<'a> { &self.header } - pub fn process_packet( - &mut self, - packet: Vec, - progress_bar: &mut ProgressBar, - ) -> Result { + pub fn process_part(&mut self, part: Vec, progress_bar: &mut ProgressBar) -> Result { let cells_file = self.file_ctx.cells_file()?; - self.reader.set_next_packet(packet); + self.reader.set_next_packet(part); let header = loop { if let Some(header) = &self.header { @@ -115,11 +115,7 @@ impl<'a> ShardStateReplaceTransaction<'a> { Ok(true) } - pub fn finalize( - mut self, - block_id: BlockId, - progress_bar: &mut ProgressBar, - ) -> Result { + pub fn finalize(mut self, progress_bar: &mut ProgressBar) -> Result { // 2^7 bits + 1 bytes const MAX_DATA_SIZE: usize = 128; const CELLS_PER_BATCH: u64 = 1_000_000; @@ -143,6 +139,7 @@ impl<'a> ShardStateReplaceTransaction<'a> { let mut tail = [0; 4]; let mut ctx = FinalizationContext::new(self.db); + ctx.clear_temp_cells(self.db)?; // Allocate on heap to prevent big future size let mut chunk_buffer = Vec::with_capacity(1 << 20); @@ -189,7 +186,7 @@ impl<'a> ShardStateReplaceTransaction<'a> { unsafe { hashes_file.read_exact_at(index as usize * HashesEntry::LEN, buffer) } } - ShardStateReplaceTransaction::finalize_cell(&mut ctx, cell_index as u32, cell)?; + StoreStateRaw::finalize_cell(&mut ctx, cell_index as u32, cell)?; // SAFETY: `entries_buffer` is guaranteed to be in separate memory area unsafe { @@ -220,7 +217,10 @@ impl<'a> ShardStateReplaceTransaction<'a> { let root_hash = ctx.entries_buffer.repr_hash(); ctx.final_check(root_hash)?; - let shard_state_key = block_id.as_short_id().to_vec(); + self.cell_storage.apply_temp_cell(&HashBytes(*root_hash))?; + ctx.clear_temp_cells(self.db)?; + + let shard_state_key = self.block_id.as_short_id().to_vec(); self.db.shard_states.insert(&shard_state_key, root_hash)?; progress_bar.complete(); @@ -231,8 +231,8 @@ impl<'a> ShardStateReplaceTransaction<'a> { let cell_id = HashBytes::from_slice(&root[..32]); let cell = self.cell_storage.load_cell(cell_id)?; - Ok(ShardStateStuff::new( - block_id, + Ok(ShardStateStuff::from_root( + &self.block_id, Cell::from(cell as Arc<_>), self.min_ref_mc_state, )?) @@ -256,12 +256,12 @@ impl<'a> ShardStateReplaceTransaction<'a> { // Prepare mask and counters let mut children_mask = LevelMask::new(0); let mut tree_bits_count = cell.bit_len as u64; - let mut tree_cell_count = 1; + let mut tree_cell_count = 1u64; for (_, child) in children.iter() { children_mask |= child.level_mask(); - tree_bits_count += child.tree_bits_count(); - tree_cell_count += child.tree_cell_count(); + tree_bits_count = tree_bits_count.saturating_add(child.tree_bits_count()); + tree_cell_count = tree_cell_count.saturating_add(child.tree_cell_count()); } let mut is_merkle_cell = false; @@ -296,27 +296,32 @@ impl<'a> ShardStateReplaceTransaction<'a> { level_mask.level() + 1 }; - let mut max_depths = [0u16; 4]; let mut temp_descriptor = cell.descriptor; - for i in 0..hash_count { + + let mut hash_idx = 0; + for level in 0..4 { + if level != 0 && (is_pruned_cell || !level_mask.contains(level)) { + continue; + } let mut hasher = Sha256::new(); let level_mask = if is_pruned_cell { level_mask } else { - LevelMask::from_level(i) + LevelMask::from_level(level) }; temp_descriptor.d1 &= !(CellDescriptor::LEVEL_MASK | CellDescriptor::STORE_HASHES_MASK); temp_descriptor.d1 |= u8::from(level_mask) << 5; hasher.update([temp_descriptor.d1, temp_descriptor.d2]); - if i == 0 { + if level == 0 { hasher.update(cell.data); } else { - hasher.update(current_entry.get_hash_slice(i - 1)); + hasher.update(current_entry.get_hash_slice(hash_idx - 1)); } + let mut depth = 0; for (index, child) in children.iter() { let child_depth = if child.cell_type().is_pruned_branch() { let child_data = ctx @@ -324,18 +329,22 @@ impl<'a> ShardStateReplaceTransaction<'a> { .get(index) .ok_or(ReplaceTransactionError::InvalidCell) .context("Pruned branch data not found")?; - child.pruned_branch_depth(i, child_data) + child.pruned_branch_depth(hash_idx + is_merkle_cell as u8, child_data) } else { - child.depth(if is_merkle_cell { i + 1 } else { i }) + child.depth(hash_idx + is_merkle_cell as u8) }; hasher.update(child_depth.to_be_bytes()); - let depth = &mut max_depths[i as usize]; - *depth = std::cmp::max(*depth, child_depth + 1); - - current_entry.set_depth(i, *depth); + depth = child_depth + .checked_add(1) + .map(|next_depth| next_depth.max(depth)) + .filter(|&depth| depth <= MAX_DEPTH) + .ok_or(ReplaceTransactionError::InvalidCell) + .context("Max tree depth exceeded")?; } + current_entry.set_depth(hash_idx, depth); + for (index, child) in children.iter() { let child_hash = if child.cell_type().is_pruned_branch() { let child_data = ctx @@ -344,17 +353,20 @@ impl<'a> ShardStateReplaceTransaction<'a> { .ok_or(ReplaceTransactionError::InvalidCell) .context("Pruned branch data not found")?; child - .pruned_branch_hash(i, child_data) + .pruned_branch_hash(hash_idx + is_merkle_cell as u8, child_data) .context("Invalid pruned branch")? } else { - child.hash(if is_merkle_cell { i + 1 } else { i }) + child.hash(hash_idx + is_merkle_cell as u8) }; hasher.update(child_hash); } - current_entry.set_hash(i, hasher.finalize().as_slice()); + current_entry.set_hash(hash_idx, hasher.finalize().as_slice()); + hash_idx += 1; } + anyhow::ensure!(hash_count == hash_idx, "invalid hash count"); + // Update pruned branches if is_pruned_cell { ctx.pruned_branches.insert(cell_index, cell.data.to_vec()); @@ -364,22 +376,10 @@ impl<'a> ShardStateReplaceTransaction<'a> { let output_buffer = &mut ctx.output_buffer; output_buffer.clear(); - output_buffer.extend_from_slice(&[ - 1, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - cell.descriptor.d1, - cell.descriptor.d2, - ]); + output_buffer.extend_from_slice(&[cell.descriptor.d1, cell.descriptor.d2]); output_buffer.extend_from_slice(&cell.bit_len.to_le_bytes()); output_buffer.extend_from_slice(cell.data); - let hash_count = cell.descriptor.hash_count(); for i in 0..hash_count { output_buffer.extend_from_slice(current_entry.get_hash_slice(i)); output_buffer.extend_from_slice(current_entry.get_depth_slice(i)); @@ -394,31 +394,31 @@ impl<'a> ShardStateReplaceTransaction<'a> { .ok_or(ReplaceTransactionError::InvalidCell) .context("Pruned branch data not found")?; child - .pruned_branch_hash(MAX_LEVEL, child_data) + .pruned_branch_hash(LevelMask::MAX_LEVEL, child_data) .context("Invalid pruned branch")? } else { - child.hash(MAX_LEVEL) + child.hash(LevelMask::MAX_LEVEL) }; *ctx.cell_usages.entry(*child_hash).or_default() += 1; output_buffer.extend_from_slice(child_hash); } - // // Write counters - // output_buffer.extend_from_slice(current_entry.get_tree_counters()); + // Write counters + output_buffer.extend_from_slice(current_entry.get_tree_counters()); // Save serialized data let repr_hash = if is_pruned_cell { current_entry .as_reader() - .pruned_branch_hash(3, cell.data) + .pruned_branch_hash(LevelMask::MAX_LEVEL, cell.data) .context("Invalid pruned branch")? } else { - current_entry.as_reader().hash(MAX_LEVEL) + current_entry.as_reader().hash(LevelMask::MAX_LEVEL) }; ctx.write_batch - .merge_cf(&ctx.cells_cf, repr_hash, output_buffer.as_slice()); + .put_cf(&ctx.temp_cells_cf, repr_hash, output_buffer.as_slice()); ctx.cell_usages.insert(*repr_hash, -1); // Done @@ -431,7 +431,7 @@ struct FinalizationContext<'a> { cell_usages: FastHashMap<[u8; 32], i32>, entries_buffer: EntriesBuffer, output_buffer: Vec, - cells_cf: BoundedCfHandle<'a>, + temp_cells_cf: BoundedCfHandle<'a>, write_batch: rocksdb::WriteBatch, } @@ -442,26 +442,25 @@ impl<'a> FinalizationContext<'a> { cell_usages: FastHashMap::with_capacity_and_hasher(128, Default::default()), entries_buffer: EntriesBuffer::new(), output_buffer: Vec::with_capacity(1 << 10), - cells_cf: db.cells.cf(), + temp_cells_cf: db.temp_cells.cf(), write_batch: rocksdb::WriteBatch::default(), } } - fn finalize_cell_usages(&mut self) { - self.cell_usages.retain(|key, &mut rc| { - if rc > 0 { - self.write_batch.merge_cf( - &self.cells_cf, - key, - refcount::encode_positive_refcount(rc as u32), - ); - } + fn clear_temp_cells(&self, db: &Db) -> std::result::Result<(), rocksdb::Error> { + let from = &[0x00; 32]; + let to = &[0xff; 32]; + db.raw().delete_range_cf(&self.temp_cells_cf, from, to) + } - rc < 0 - }); + fn finalize_cell_usages(&mut self) { + self.cell_usages.retain(|_, &mut rc| rc < 0); } fn final_check(&self, root_hash: &[u8; 32]) -> Result<()> { + tracing::info!(root_hash = %HashBytes::wrap(root_hash), "Final check"); + tracing::info!(len=?self.cell_usages.len(), "Cell usages"); + anyhow::ensure!( self.cell_usages.len() == 1 && self.cell_usages.contains_key(root_hash), "Invalid shard state cell" @@ -557,4 +556,147 @@ enum FilesContextError { AlreadyFinalized, } -const MAX_LEVEL: u8 = 3; +#[cfg(test)] +mod test { + + use std::io::{BufReader, Read}; + + use bytesize::ByteSize; + use everscale_types::models::ShardIdent; + use tycho_util::project_root; + use weedb::rocksdb::{IteratorMode, WriteBatch}; + + use super::*; + + #[tokio::test] + #[ignore] + async fn insert_and_delete_of_several_shards() -> Result<()> { + tycho_util::test::init_logger("insert_and_delete_of_several_shards", "debug"); + let project_root = project_root()?.join(".scratch"); + let integration_test_path = project_root.join("integration_tests"); + let current_test_path = integration_test_path.join("insert_and_delete_of_several_shards"); + std::fs::remove_dir_all(¤t_test_path).ok(); + std::fs::create_dir_all(¤t_test_path)?; + // decompressing the archive + let archive_path = integration_test_path.join("states.tar.zst"); + let res = std::process::Command::new("tar") + .arg("-I") + .arg("zstd") + .arg("-xf") + .arg(&archive_path) + .arg("-C") + .arg(¤t_test_path) + .status()?; + if !res.success() { + return Err(anyhow::anyhow!("Failed to decompress the archive")); + } + tracing::info!("Decompressed the archive"); + + let db = Db::open( + current_test_path.join("rocksdb"), + DbConfig { + rocksdb_lru_capacity: ByteSize::mb(256), + }, + )?; + let file_db = FileDb::new(current_test_path.join("file_db"))?; + + let cells_storage = CellStorage::new(db.clone(), 100_000_000); + + let tracker = MinRefMcStateTracker::new(); + let download_dir = file_db.create_subdir("downloads")?; + + for file in std::fs::read_dir(current_test_path.join("states"))? { + let file = file?; + let filename = file.file_name().to_string_lossy().to_string(); + + let block_id = parse_filename(filename.as_ref()); + + let mut store_state = + StoreStateRaw::new(&block_id, &db, &download_dir, &cells_storage, &tracker) + .context("Failed to create ShardStateReplaceTransaction")?; + + let file = File::open(file.path())?; + let mut file = BufReader::new(file); + let chunk_size = 10_000_000; // size of each chunk in bytes + let mut buffer = vec![0u8; chunk_size]; + let mut pg = ProgressBar::builder("downloading state") + .exact_unit("cells") + .build(); + + loop { + let bytes_read = file.read(&mut buffer)?; + if bytes_read == 0 { + break; // End of file + } + + let packet = buffer[..bytes_read].to_vec(); + store_state.process_part(packet, &mut pg)?; + } + + let mut pg = ProgressBar::builder("processing state") + .with_mapper(|x| bytesize::to_string(x, false)) + .build(); + store_state.finalize(&mut pg)?; + } + tracing::info!("Finished processing all states"); + tracing::info!("Starting gc"); + states_gc(&cells_storage, &db).await?; + + drop(db); + drop(cells_storage); + rocksdb::DB::destroy( + &rocksdb::Options::default(), + current_test_path.join("rocksdb"), + )?; + + Ok(()) + } + + async fn states_gc(cell_storage: &Arc, db: &Db) -> Result<()> { + let states_iterator = db.shard_states.iterator(IteratorMode::Start); + let bump = bumpalo::Bump::new(); + + let total_states = db.shard_states.iterator(IteratorMode::Start).count(); + + for (deleted, state) in states_iterator.enumerate() { + let (_, value) = state?; + + // check that state actually exists + let cell = cell_storage.load_cell(HashBytes::from_slice(value.as_ref()))?; + + let mut batch = WriteBatch::default(); + cell_storage.remove_cell(&mut batch, &bump, cell.hash(LevelMask::MAX_LEVEL))?; + + // execute batch + db.raw().write_opt(batch, db.cells.write_config())?; + tracing::info!("State deleted. Progress: {}/{total_states}", deleted + 1); + } + + // two compactions in row. First one run merge operators, second one will remove all tombstones + db.trigger_compaction().await; + db.trigger_compaction().await; + + let cells_left = db.cells.iterator(IteratorMode::Start).count(); + tracing::info!("States GC finished. Cells left: {cells_left}"); + assert_eq!(cells_left, 0, "Gc is broken. Press F to pay respect"); + + Ok(()) + } + + fn parse_filename(name: &str) -> BlockId { + // Split the remaining string by commas into components + let parts: Vec<&str> = name.split(',').collect(); + + // Parse each part + let workchain: i32 = parts[0].parse().unwrap(); + let prefix = u64::from_str_radix(parts[1], 16).unwrap(); + let seqno: u32 = parts[2].parse().unwrap(); + + BlockId { + shard: ShardIdent::new(workchain, prefix).unwrap(), + seqno, + root_hash: Default::default(), + file_hash: Default::default(), + } + } +} diff --git a/storage/src/util/stored_value.rs b/storage/src/util/stored_value.rs index a4da922f2..1cfaadee0 100644 --- a/storage/src/util/stored_value.rs +++ b/storage/src/util/stored_value.rs @@ -1,8 +1,7 @@ use bytes::Buf; -use smallvec::SmallVec; - use everscale_types::cell::HashBytes; use everscale_types::models::{BlockId, BlockIdShort, ShardIdent}; +use smallvec::SmallVec; /// A trait for writing or reading data from a stack-allocated buffer pub trait StoredValue { diff --git a/storage/tests/mod.rs b/storage/tests/mod.rs index b945e216a..29091c704 100644 --- a/storage/tests/mod.rs +++ b/storage/tests/mod.rs @@ -1,12 +1,11 @@ use std::str::FromStr; use anyhow::Result; -use bytesize::ByteSize; use everscale_types::boc::Boc; use everscale_types::cell::{Cell, DynCell}; use everscale_types::models::{BlockId, ShardState}; use tycho_block_util::state::ShardStateStuff; -use tycho_storage::{BlockMetaData, Db, DbOptions, Storage}; +use tycho_storage::{BlockMetaData, Storage}; #[derive(Clone)] struct ShardStateCombined { @@ -15,8 +14,7 @@ struct ShardStateCombined { } impl ShardStateCombined { - fn from_file(path: impl AsRef) -> Result { - let bytes = std::fs::read(path.as_ref())?; + fn from_bytes(bytes: &[u8]) -> Result { let cell = Boc::decode(&bytes)?; let state = cell.parse()?; Ok(Self { cell, state }) @@ -57,29 +55,15 @@ fn compare_cells(orig_cell: &DynCell, stored_cell: &DynCell) { async fn persistent_storage_everscale() -> Result<()> { tracing_subscriber::fmt::try_init().ok(); - let tmp_dir = tempfile::tempdir()?; - let root_path = tmp_dir.path(); - - // Init rocksdb - let db_options = DbOptions { - rocksdb_lru_capacity: ByteSize::kb(1024), - cells_cache_size: ByteSize::kb(1024), - }; - let db = Db::open(root_path.join("db_storage"), db_options)?; - - // Init storage - let storage = Storage::new( - db, - root_path.join("file_storage"), - db_options.cells_cache_size.as_u64(), - )?; + let (storage, _tmp_dir) = Storage::new_temp()?; assert!(storage.node_state().load_init_mc_block_id().is_none()); // Read zerostate - let zero_state_raw = ShardStateCombined::from_file("tests/everscale_zerostate.boc")?; + let zero_state_raw = + ShardStateCombined::from_bytes(include_bytes!("../../test/test_state_2_master.boc"))?; // Parse block id - let block_id = BlockId::from_str("-1:8000000000000000:0:58ffca1a178daff705de54216e5433c9bd2e7d850070d334d38997847ab9e845:d270b87b2952b5ba7daa70aaf0a8c361befcf4d8d2db92f9640d5443070838e4")?; + let block_id = BlockId::from_str("-1:8000000000000000:2:4557702252a8fcec88387ab78407e5116e83222b213653911f86e6504cb7aa78:e2bc83d6be6975b9c68f56c5f6d4997d2a33226bfac6a431b47874e3ba18db75")?; // Write zerostate to db let (handle, _) = storage.block_handle_storage().create_or_load_handle( @@ -87,8 +71,8 @@ async fn persistent_storage_everscale() -> Result<()> { BlockMetaData::zero_state(zero_state_raw.gen_utime().unwrap()), ); - let zerostate = ShardStateStuff::new( - block_id, + let zerostate = ShardStateStuff::from_root( + &block_id, zero_state_raw.cell.clone(), storage.shard_state_storage().min_ref_mc_state(), )?; @@ -123,13 +107,13 @@ async fn persistent_storage_everscale() -> Result<()> { storage .persistent_state_storage() - .prepare_persistent_states_dir(&zerostate.block_id())?; + .prepare_persistent_states_dir(zerostate.block_id())?; storage .persistent_state_storage() .save_state( - &zerostate.block_id(), - &zerostate.block_id(), + zerostate.block_id(), + zerostate.block_id(), zero_state_raw.cell.repr_hash(), ) .await?; @@ -137,7 +121,7 @@ async fn persistent_storage_everscale() -> Result<()> { // Check if state exists let exist = storage .persistent_state_storage() - .state_exists(&zerostate.block_id(), &zerostate.block_id()); + .state_exists(zerostate.block_id(), zerostate.block_id()); assert_eq!(exist, true); // Read persistent state @@ -146,12 +130,7 @@ async fn persistent_storage_everscale() -> Result<()> { let persistent_state_storage = storage.persistent_state_storage(); let persistent_state_data = persistent_state_storage - .read_state_part( - &zerostate.block_id(), - &zerostate.block_id(), - offset, - max_size, - ) + .read_state_part(zerostate.block_id(), zerostate.block_id(), offset, max_size) .await .unwrap(); @@ -159,8 +138,5 @@ async fn persistent_storage_everscale() -> Result<()> { let cell = Boc::decode(&persistent_state_data)?; assert_eq!(&cell, zerostate.root_cell()); - // Clear files for test - tmp_dir.close()?; - Ok(()) } diff --git a/collator/src/state_node/tests/data/test_state_2_0:80.boc b/test/test_state_2_0:80.boc similarity index 100% rename from collator/src/state_node/tests/data/test_state_2_0:80.boc rename to test/test_state_2_0:80.boc diff --git a/collator/src/state_node/tests/data/test_state_2_master.boc b/test/test_state_2_master.boc similarity index 100% rename from collator/src/state_node/tests/data/test_state_2_master.boc rename to test/test_state_2_master.boc diff --git a/util/Cargo.toml b/util/Cargo.toml index 5511f9023..1ea918b08 100644 --- a/util/Cargo.toml +++ b/util/Cargo.toml @@ -11,6 +11,7 @@ license.workspace = true [dependencies] # crates.io deps ahash = { workspace = true } +anyhow = { workspace = true } castaway = { workspace = true } dashmap = { workspace = true } everscale-crypto = { workspace = true } @@ -20,6 +21,8 @@ humantime = { workspace = true } libc = { workspace = true } rand = { workspace = true } serde = { workspace = true, features = ["derive"] } +serde_json = { workspace = true } +serde_path_to_error = { workspace = true } thiserror = { workspace = true } tokio = { workspace = true, features = ["time", "sync", "rt"] } tracing = { workspace = true } diff --git a/util/src/lib.rs b/util/src/lib.rs index 773b0a3e1..c2426210e 100644 --- a/util/src/lib.rs +++ b/util/src/lib.rs @@ -1,5 +1,6 @@ -use std::collections::HashMap; -use std::collections::HashSet; +use std::collections::{HashMap, HashSet}; +use std::path::PathBuf; +use std::process::Command; pub mod progress_bar; pub mod serde_helpers; @@ -115,6 +116,23 @@ pub mod __internal { } } +pub fn project_root() -> Result { + use anyhow::Context; + + let project_root = Command::new("git") + .arg("rev-parse") + .arg("--show-toplevel") + .output()? + .stdout; + // won't work on windows but we don't care + let project_root = PathBuf::from( + String::from_utf8(project_root) + .context("invalid project root")? + .trim(), + ); + Ok(project_root) +} + #[cfg(test)] mod tests { use super::*; diff --git a/util/src/serde_helpers.rs b/util/src/serde_helpers.rs index 0d2ddd9ce..d47af4752 100644 --- a/util/src/serde_helpers.rs +++ b/util/src/serde_helpers.rs @@ -1,10 +1,22 @@ use std::borrow::Cow; use std::marker::PhantomData; +use std::path::Path; use std::str::FromStr; +use anyhow::Result; use serde::de::{Error, Expected, Visitor}; use serde::{Deserialize, Deserializer, Serialize, Serializer}; +pub fn load_json_from_file(path: P) -> Result +where + for<'de> T: Deserialize<'de>, + P: AsRef, +{ + let data = std::fs::read_to_string(path)?; + let de = &mut serde_json::Deserializer::from_str(&data); + serde_path_to_error::deserialize(de).map_err(Into::into) +} + pub mod socket_addr { use std::net::SocketAddr; @@ -321,7 +333,7 @@ impl<'de> Visitor<'de> for HexVisitor { } fn visit_str(self, value: &str) -> Result { - hex::decode(value).map_err(|_| E::invalid_type(serde::de::Unexpected::Str(value), &self)) + hex::decode(value).map_err(|_e| E::invalid_type(serde::de::Unexpected::Str(value), &self)) } fn visit_bytes(self, value: &[u8]) -> Result { diff --git a/util/src/test/logger.rs b/util/src/test/logger.rs index 1499d3dcf..9894cfa5f 100644 --- a/util/src/test/logger.rs +++ b/util/src/test/logger.rs @@ -1,3 +1,5 @@ +#![allow(clippy::exit)] + use tracing_subscriber::EnvFilter; pub fn init_logger(test_name: &str, filter: &str) {