diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 000000000..e039b6afe --- /dev/null +++ b/.dockerignore @@ -0,0 +1,12 @@ +debug/ +target/ +**/*.rs.bk +*.pdb + +.idea/ +.vscode/ +.fleet/ + +perf.data* + +.scratch \ No newline at end of file diff --git a/.gitignore b/.gitignore index 004ef028a..ca739ab20 100644 --- a/.gitignore +++ b/.gitignore @@ -7,4 +7,5 @@ target/ .vscode/ .fleet/ -perf.data* \ No newline at end of file +perf.data* +.scratch \ No newline at end of file diff --git a/Cargo.lock b/Cargo.lock index a0eb48760..4dcd6c60f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -19,9 +19,9 @@ checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" [[package]] name = "ahash" -version = "0.8.6" +version = "0.8.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "91429305e9f0a25f6205c5b8e0d2db09e0708a7a6df0f42212bb56c32c8ac97a" +checksum = "77c3a9648d43b9cd48db467b3f87fdd6e146bcc88ab0180006cef2179fe11d01" dependencies = [ "cfg-if", "getrandom", @@ -41,9 +41,40 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.75" +version = "1.0.79" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4668cab20f66d8d020e1fbc0ebe47217433c1b6c8f2040faf858554e394ace6" +checksum = "080e9890a082662b09c1ad45f567faeeb47f22b5fb23895fbe1e651e718e25ca" + +[[package]] +name = "argh" +version = "0.1.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7af5ba06967ff7214ce4c7419c7d185be7ecd6cc4965a8f6e1d8ce0398aad219" +dependencies = [ + "argh_derive", + "argh_shared", +] + +[[package]] +name = "argh_derive" +version = "0.1.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "56df0aeedf6b7a2fc67d06db35b09684c3e8da0c95f8f27685cb17e08413d87a" +dependencies = [ + "argh_shared", + "proc-macro2", + "quote", + "syn 2.0.48", +] + +[[package]] +name = "argh_shared" +version = "0.1.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5693f39141bda5760ecc4111ab08da40565d1771038c4a0250f03457ec707531" +dependencies = [ + "serde", +] [[package]] name = "asn1-rs" @@ -107,9 +138,9 @@ dependencies = [ [[package]] name = "base64" -version = "0.21.5" +version = "0.21.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "35636a1494ede3b646cc98f74f8e62c773a38a659ebc777a2cf26b9b74171df9" +checksum = "9d297deb1925b89f2ccc13d7635fa0714f12c87adce1c75356b39ca9b7178567" [[package]] name = "base64ct" @@ -117,12 +148,48 @@ version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8c3c1a368f70d6cf7302d78f8f7093da241fb8e8807c05cc9e51a125895a6d5b" +[[package]] +name = "bincode" +version = "1.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b1f45e9417d87227c7a56d22e471c6206462cba514c7590c09aff4cf6d1ddcad" +dependencies = [ + "serde", +] + +[[package]] +name = "bindgen" +version = "0.65.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cfdf7b466f9a4903edc73f95d6d2bcd5baf8ae620638762244d3f60143643cc5" +dependencies = [ + "bitflags 1.3.2", + "cexpr", + "clang-sys", + "lazy_static", + "lazycell", + "peeking_take_while", + "prettyplease", + "proc-macro2", + "quote", + "regex", + "rustc-hash", + "shlex", + "syn 2.0.48", +] + [[package]] name = "bitflags" version = "1.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" +[[package]] +name = "bitflags" +version = "2.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ed570934406eb16438a4e976b1b4500774099c13b8cb96eec99f620f05090ddf" + [[package]] name = "block-buffer" version = "0.10.4" @@ -138,11 +205,70 @@ version = "3.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7f30e7476521f6f8af1a1c4c0b8cc94f0bee37d91763d0ca2665f299b6cd8aec" +[[package]] +name = "bytecount" +version = "0.6.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e1e5f035d16fc623ae5f74981db80a439803888314e3a555fd6f04acd51a3205" + [[package]] name = "bytes" version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a2bd12c1caf447e69cd4528f47f94d203fd2582878ecb9e9465484c4148a8223" +dependencies = [ + "serde", +] + +[[package]] +name = "bzip2-sys" +version = "0.1.11+1.0.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "736a955f3fa7875102d57c82b8cac37ec45224a07fd32d58f9f7a186b6cd4cdc" +dependencies = [ + "cc", + "libc", + "pkg-config", +] + +[[package]] +name = "camino" +version = "1.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c59e92b5a388f549b863a7bea62612c09f24c8393560709a54558a9abdfb3b9c" +dependencies = [ + "serde", +] + +[[package]] +name = "cargo-platform" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ceed8ef69d8518a5dda55c07425450b58a4e1946f4951eab6d7191ee86c2443d" +dependencies = [ + "serde", +] + +[[package]] +name = "cargo_metadata" +version = "0.14.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4acbb09d9ee8e23699b9634375c72795d095bf268439da88562cf9b501f181fa" +dependencies = [ + "camino", + "cargo-platform", + "semver", + "serde", + "serde_json", +] + +[[package]] +name = "castaway" +version = "0.2.3" +source = "git+https://github.com/sagebind/castaway.git#564b11fb3394802b895f44fe42a7bba7b17df69b" +dependencies = [ + "rustversion", +] [[package]] name = "cc" @@ -150,42 +276,47 @@ version = "1.0.83" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f1174fb0b6ec23863f8b971027804a42614e347eafb0a95bf0b12cdae21fc4d0" dependencies = [ + "jobserver", "libc", ] [[package]] -name = "cfg-if" -version = "1.0.0" +name = "cexpr" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" +checksum = "6fac387a98bb7c37292057cffc56d62ecb629900026402633ae9160df93a8766" +dependencies = [ + "nom", +] [[package]] -name = "const-oid" -version = "0.9.5" +name = "cfg-if" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28c122c3980598d243d63d9a704629a2d748d101f278052ff068be5a4423ab6f" +checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" [[package]] -name = "core-foundation" -version = "0.9.3" +name = "clang-sys" +version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "194a7a9e6de53fa55116934067c844d9d749312f75c6f6d0980e8c252f8c2146" +checksum = "67523a3b4be3ce1989d607a828d036249522dd9c1c8de7f4dd2dae43a37369d1" dependencies = [ - "core-foundation-sys", + "glob", "libc", + "libloading", ] [[package]] -name = "core-foundation-sys" -version = "0.8.4" +name = "const-oid" +version = "0.9.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e496a50fda8aacccc86d7529e2c1e0892dbd0f898a6b5645b5561b89c3210efa" +checksum = "c2459377285ad874054d797f3ccebf984978aa39129f6eafde5cdc8315b612f8" [[package]] name = "cpufeatures" -version = "0.2.11" +version = "0.2.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce420fe07aecd3e67c5f910618fe65e94158f6dcc0adf44e00d69ce2bdfe0fd0" +checksum = "53fe5e26ff1b7aef8bca9c6080520cfb8d9333c7568e1829cef191a9723e5504" dependencies = [ "libc", ] @@ -205,6 +336,30 @@ version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "19d374276b40fb8bbdee95aef7c7fa6b5316ec764510eb64b8dd0e2ed0d7e7f5" +[[package]] +name = "crossbeam-channel" +version = "0.5.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "176dc175b78f56c0f321911d9c8eb2b77a78a4860b9c19db83835fea1a46649b" +dependencies = [ + "crossbeam-utils", +] + +[[package]] +name = "crossbeam-epoch" +version = "0.9.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5b82ac4a3c2ca9c3460964f020e1402edd5753411d7737aa39c3714ad1b5420e" +dependencies = [ + "crossbeam-utils", +] + +[[package]] +name = "crossbeam-utils" +version = "0.8.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "248e3bacc7dc6baa3b21e405ee045c3047101a49145e7e9eca583ab4c2ca5345" + [[package]] name = "crypto-common" version = "0.1.6" @@ -215,6 +370,33 @@ dependencies = [ "typenum", ] +[[package]] +name = "curve25519-dalek" +version = "4.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0a677b8922c94e01bdbb12126b0bc852f00447528dee1782229af9c720c3f348" +dependencies = [ + "cfg-if", + "cpufeatures", + "curve25519-dalek-derive", + "fiat-crypto", + "platforms", + "rustc_version", + "subtle", + "zeroize", +] + +[[package]] +name = "curve25519-dalek-derive" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.48", +] + [[package]] name = "dashmap" version = "5.5.3" @@ -260,9 +442,9 @@ dependencies = [ [[package]] name = "deranged" -version = "0.3.9" +version = "0.3.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f32d04922c60427da6f9fef14d042d9edddef64cb9d4ce0d64d0685fbeb1fd3" +checksum = "b42b6fa04a440b495c8b04d0e71b707c585f83cb9cb28cf8cd0d976c315e31b4" dependencies = [ "powerfmt", ] @@ -285,7 +467,7 @@ checksum = "487585f4d0c6655fe74905e2504d8ad6908e4db67f744eb140876906c2f3175d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.39", + "syn 2.0.48", ] [[package]] @@ -298,40 +480,85 @@ dependencies = [ "signature", ] +[[package]] +name = "errno" +version = "0.3.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a258e46cdc063eb8519c00b9fc845fc47bcfca4130e2f08e88665ceda8474245" +dependencies = [ + "libc", + "windows-sys 0.52.0", +] + +[[package]] +name = "error-chain" +version = "0.12.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2d2f06b9cac1506ece98fe3231e3cc9c4410ec3d5b1f24ae1c8946f0742cdefc" +dependencies = [ + "version_check", +] + +[[package]] +name = "everscale-crypto" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3b3e4fc7882223c86a7cfd8ccdb58e017b89a9f91d90114beafa0e8d35b45fb" +dependencies = [ + "curve25519-dalek", + "generic-array", + "hex", + "rand", + "sha2", + "tl-proto", +] + +[[package]] +name = "fastrand" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "25cbce373ec4653f1a01a31e8a5e5ec0c622dc27ff9c4e6606eefef5cbbed4a5" + +[[package]] +name = "fiat-crypto" +version = "0.2.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1676f435fc1dadde4d03e43f5d62b259e1ce5f40bd4ffb21db2b42ebe59c1382" + [[package]] name = "futures-core" -version = "0.3.29" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eb1d22c66e66d9d72e1758f0bd7d4fd0bee04cad842ee34587d68c07e45d088c" +checksum = "dfc6580bb841c5a68e9ef15c77ccc837b40a7504914d52e47b8b0e9bbda25a1d" [[package]] name = "futures-macro" -version = "0.3.29" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "53b153fd91e4b0147f4aced87be237c98248656bb01050b96bf3ee89220a8ddb" +checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" dependencies = [ "proc-macro2", "quote", - "syn 2.0.39", + "syn 2.0.48", ] [[package]] name = "futures-sink" -version = "0.3.29" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e36d3378ee38c2a36ad710c5d30c2911d752cb941c00c72dbabfb786a7970817" +checksum = "9fb8e00e87438d937621c1c6269e53f536c14d3fbd6a042bb24879e57d474fb5" [[package]] name = "futures-task" -version = "0.3.29" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "efd193069b0ddadc69c46389b740bbccdd97203899b48d09c5f7969591d6bae2" +checksum = "38d84fa142264698cdce1a9f9172cf383a0c82de1bddcf3092901442c4097004" [[package]] name = "futures-util" -version = "0.3.29" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a19526d624e703a3179b3d322efec918b6246ea0fa51d41124525f00f1cc8104" +checksum = "3d6401deb83407ab3da39eba7e33987a73c3df0c82b4bb5813ee871c19c41d48" dependencies = [ "futures-core", "futures-macro", @@ -354,9 +581,9 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.2.11" +version = "0.2.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fe9006bed769170c11f845cf00c7c1e9092aeb3f268e007c3e760ac68008070f" +checksum = "190092ea657667030ac6a35e305e62fc4dd69fd98ac98631e5d3a2b1575a12b5" dependencies = [ "cfg-if", "libc", @@ -365,21 +592,27 @@ dependencies = [ [[package]] name = "gimli" -version = "0.28.0" +version = "0.28.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4271d37baee1b8c7e4b708028c57d816cf9d2434acb33a549475f78c181f6253" + +[[package]] +name = "glob" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6fb8d784f27acf97159b40fc4db5ecd8aa23b9ad5ef69cdd136d3bc80665f0c0" +checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b" [[package]] name = "hashbrown" -version = "0.14.2" +version = "0.14.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f93e7192158dbcda357bdec5fb5788eebf8bbac027f3f33e719d29135ae84156" +checksum = "290f1a1d9242c78d09ce40a5e87e7554ee637af1351968159f4952f028f75604" [[package]] name = "hermit-abi" -version = "0.3.3" +version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d77f7ec81a6d05a3abb01ab6eb7590f6083d08449fe5a1c8b1e620283546ccb7" +checksum = "d0c62115964e08cb8039170eb33c1d0e2388a256930279edca206fff675f82c3" [[package]] name = "hex" @@ -387,17 +620,32 @@ version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" +[[package]] +name = "humantime" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" + [[package]] name = "itoa" -version = "1.0.9" +version = "1.0.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af150ab688ff2122fcef229be89cb50dd66af9e01a4ff320cc137eecc9bacc38" +checksum = "b1a46d1a171d865aa5f83f92695765caa047a9b4cbae2cbf37dbd613a793fd4c" + +[[package]] +name = "jobserver" +version = "0.1.28" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ab46a6e9526ddef3ae7f787c06f0f2600639ba80ea3eade3d8e670a2230f51d6" +dependencies = [ + "libc", +] [[package]] name = "js-sys" -version = "0.3.65" +version = "0.3.68" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "54c0c35952f67de54bb584e9fd912b3023117cbafc0a77d8f3dee1fb5f572fe8" +checksum = "406cda4b368d531c842222cf9d2600a9a4acce8d29423695379c6868a143a9ee" dependencies = [ "wasm-bindgen", ] @@ -408,11 +656,61 @@ version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" +[[package]] +name = "lazycell" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" + [[package]] name = "libc" -version = "0.2.150" +version = "0.2.153" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c198f91728a82281a64e1f4f9eeb25d82cb32a5de251c6bd1b5154d63a8e7bd" + +[[package]] +name = "libloading" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89d92a4743f9a61002fae18374ed11e7973f530cb3a3255fb354818118b2203c" +checksum = "c571b676ddfc9a8c12f1f3d3085a7b163966a8fd8098a90640953ce5f6170161" +dependencies = [ + "cfg-if", + "windows-sys 0.48.0", +] + +[[package]] +name = "librocksdb-sys" +version = "0.11.0+8.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3386f101bcb4bd252d8e9d2fb41ec3b0862a15a62b478c355b2982efa469e3e" +dependencies = [ + "bindgen", + "bzip2-sys", + "cc", + "glob", + "libc", + "libz-sys", + "lz4-sys", + "tikv-jemalloc-sys", + "zstd-sys", +] + +[[package]] +name = "libz-sys" +version = "1.1.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "037731f5d3aaa87a5675e895b63ddff1a87624bc29f77004ea829809654e48f6" +dependencies = [ + "cc", + "pkg-config", + "vcpkg", +] + +[[package]] +name = "linux-raw-sys" +version = "0.4.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "01cda141df6706de531b6c46c3a33ecca755538219bd484262fa09410c13539c" [[package]] name = "lock_api" @@ -430,6 +728,16 @@ version = "0.4.20" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b5e6163cb8c49088c2c36f57875e58ccd8c87c7427f7fbd50ea6710b2f3f2e8f" +[[package]] +name = "lz4-sys" +version = "1.9.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "57d27b317e207b10f69f5e75494119e391a96f48861ae870d1da6edac98ca900" +dependencies = [ + "cc", + "libc", +] + [[package]] name = "matchers" version = "0.1.0" @@ -441,9 +749,9 @@ dependencies = [ [[package]] name = "memchr" -version = "2.6.4" +version = "2.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f665ee40bc4a3c5590afb1e9677db74a508659dfd71e126420da8274909a0167" +checksum = "523dc4f511e55ab87b694dc30d0f820d60906ef06413f93d4d7a1385599cc149" [[package]] name = "minimal-lexical" @@ -453,22 +761,43 @@ checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" [[package]] name = "miniz_oxide" -version = "0.7.1" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e7810e0be55b428ada41041c41f32c9f1a42817901b4ccf45fa3d4b6561e74c7" +checksum = "9d811f3e15f28568be3407c8e7fdb6514c1cda3cb30683f15b6a1a1dc4ea14a7" dependencies = [ "adler", ] [[package]] name = "mio" -version = "0.8.9" +version = "0.8.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3dce281c5e46beae905d4de1870d8b1509a9142b62eedf18b443b011ca8343d0" +checksum = "8f3d0b296e374a4e6f3c7b0a1f5a51d748a0d34c85e7dc48fc3fa9a87657fe09" dependencies = [ "libc", "wasi", - "windows-sys", + "windows-sys 0.48.0", +] + +[[package]] +name = "moka" +version = "0.12.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b1911e88d5831f748a4097a43862d129e3c6fca831eecac9b8db6d01d93c9de2" +dependencies = [ + "crossbeam-channel", + "crossbeam-epoch", + "crossbeam-utils", + "once_cell", + "parking_lot", + "quanta", + "rustc_version", + "skeptic", + "smallvec", + "tagptr", + "thiserror", + "triomphe", + "uuid", ] [[package]] @@ -502,21 +831,26 @@ dependencies = [ "num-traits", ] +[[package]] +name = "num-conv" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "51d515d32fb182ee37cda2ccdcb92950d6a3c2893aa280e540671c2cd0f3b1d9" + [[package]] name = "num-integer" -version = "0.1.45" +version = "0.1.46" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "225d3389fb3509a24c93f5c29eb6bde2586b98d9f016636dff58d7c6f7569cd9" +checksum = "7969661fd2958a5cb096e56c8e1ad0444ac2bbcd0061bd28660485a44879858f" dependencies = [ - "autocfg", "num-traits", ] [[package]] name = "num-traits" -version = "0.2.17" +version = "0.2.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "39e3200413f237f41ab11ad6d161bc7239c84dcb631773ccd7de3dfe4b5c267c" +checksum = "da0df0e5185db44f69b44f26786fe401b6c293d1907744beaa7fa62b2e5a517a" dependencies = [ "autocfg", ] @@ -533,9 +867,9 @@ dependencies = [ [[package]] name = "object" -version = "0.32.1" +version = "0.32.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9cf5f9dd3933bd50a9e1f149ec995f39ae2c496d31fd772c1fd45ebc27e902b0" +checksum = "a6a622008b6e321afc04970976f62ee297fdbaa6f95318ca343e3eebb9648441" dependencies = [ "memchr", ] @@ -551,15 +885,9 @@ dependencies = [ [[package]] name = "once_cell" -version = "1.18.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd8b5dd2ae5ed71462c540258bedcb51965123ad7e7ccf4b9a8cafaa4a63576d" - -[[package]] -name = "openssl-probe" -version = "0.1.5" +version = "1.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" +checksum = "3fdb12b2476b595f9358c5161aa467c2438859caa136dec86c26fdd2efe17b92" [[package]] name = "overload" @@ -567,6 +895,16 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b15813163c1d831bf4a13c3610c05c0d03b39feb07f7e09fa234dac9b15aaf39" +[[package]] +name = "parking_lot" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3742b2c103b9f06bc9fff0a37ff4912935851bee6d36f3c02bcc755bcfec228f" +dependencies = [ + "lock_api", + "parking_lot_core", +] + [[package]] name = "parking_lot_core" version = "0.9.9" @@ -577,14 +915,20 @@ dependencies = [ "libc", "redox_syscall", "smallvec", - "windows-targets", + "windows-targets 0.48.5", ] +[[package]] +name = "peeking_take_while" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "19b17cddbe7ec3f8bc800887bab5e717348c95ea2ca0b1bf0837fb964dc67099" + [[package]] name = "pem" -version = "3.0.2" +version = "3.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3163d2912b7c3b52d651a055f2c7eec9ba5cd22d26ef75b8dd3a59980b185923" +checksum = "1b8fcc794035347fb64beda2d3b462595dd2753e3f268d89c5aae77e8cf2c310" dependencies = [ "base64", "serde", @@ -592,9 +936,9 @@ dependencies = [ [[package]] name = "pest" -version = "2.7.5" +version = "2.7.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae9cee2a55a544be8b89dc6848072af97a20f2422603c10865be2a42b580fff5" +checksum = "219c0dcc30b6a27553f9cc242972b67f75b60eb0db71f0b5462f38b058c41546" dependencies = [ "memchr", "thiserror", @@ -603,9 +947,9 @@ dependencies = [ [[package]] name = "pest_derive" -version = "2.7.5" +version = "2.7.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81d78524685f5ef2a3b3bd1cafbc9fcabb036253d9b1463e726a91cd16e2dfc2" +checksum = "22e1288dbd7786462961e69bfd4df7848c1e37e8b74303dbdab82c3a9cdd2809" dependencies = [ "pest", "pest_generator", @@ -613,22 +957,22 @@ dependencies = [ [[package]] name = "pest_generator" -version = "2.7.5" +version = "2.7.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "68bd1206e71118b5356dae5ddc61c8b11e28b09ef6a31acbd15ea48a28e0c227" +checksum = "1381c29a877c6d34b8c176e734f35d7f7f5b3adaefe940cb4d1bb7af94678e2e" dependencies = [ "pest", "pest_meta", "proc-macro2", "quote", - "syn 2.0.39", + "syn 2.0.48", ] [[package]] name = "pest_meta" -version = "2.7.5" +version = "2.7.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7c747191d4ad9e4a4ab9c8798f1e82a39affe7ef9648390b7e5548d18e099de6" +checksum = "d0934d6907f148c22a3acbda520c7eed243ad7487a30f51f6ce52b58b7077a8a" dependencies = [ "once_cell", "pest", @@ -657,6 +1001,18 @@ dependencies = [ "spki", ] +[[package]] +name = "pkg-config" +version = "0.3.29" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2900ede94e305130c13ddd391e0ab7cbaeb783945ae07a279c268cb05109c6cb" + +[[package]] +name = "platforms" +version = "3.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "626dec3cac7cc0e1577a2ec3fc496277ec2baa084bebad95bb6fdbfae235f84c" + [[package]] name = "powerfmt" version = "0.2.0" @@ -669,15 +1025,51 @@ version = "0.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" +[[package]] +name = "prettyplease" +version = "0.2.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a41cf62165e97c7f814d2221421dbb9afcbcdb0a88068e5ea206e19951c2cbb5" +dependencies = [ + "proc-macro2", + "syn 2.0.48", +] + [[package]] name = "proc-macro2" -version = "1.0.69" +version = "1.0.78" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "134c189feb4956b20f6f547d2cf727d4c0fe06722b20a0eec87ed445a97f92da" +checksum = "e2422ad645d89c99f8f3e6b88a9fdeca7fabeac836b1002371c4367c8f984aae" dependencies = [ "unicode-ident", ] +[[package]] +name = "pulldown-cmark" +version = "0.9.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "57206b407293d2bcd3af849ce869d52068623f19e1b5ff8e8778e3309439682b" +dependencies = [ + "bitflags 2.4.2", + "memchr", + "unicase", +] + +[[package]] +name = "quanta" +version = "0.12.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ca0b7bac0b97248c40bb77288fc52029cf1459c0461ea1b05ee32ccf011de2c" +dependencies = [ + "crossbeam-utils", + "libc", + "once_cell", + "raw-cpuid", + "wasi", + "web-sys", + "winapi", +] + [[package]] name = "quinn" version = "0.10.2" @@ -706,7 +1098,6 @@ dependencies = [ "ring 0.16.20", "rustc-hash", "rustls", - "rustls-native-certs", "slab", "thiserror", "tinyvec", @@ -723,14 +1114,14 @@ dependencies = [ "libc", "socket2", "tracing", - "windows-sys", + "windows-sys 0.48.0", ] [[package]] name = "quote" -version = "1.0.33" +version = "1.0.35" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5267fca4496028628a95160fc423a33e8b2e6af8a5302579e322e4b520293cae" +checksum = "291ec9ab5efd934aaf503a6466c5d5251535d108ee747472c3977cc5acc868ef" dependencies = [ "proc-macro2", ] @@ -765,6 +1156,15 @@ dependencies = [ "getrandom", ] +[[package]] +name = "raw-cpuid" +version = "11.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9d86a7c4638d42c44551f4791a20e687dbb4c3de1f33c43dd71e355cd429def1" +dependencies = [ + "bitflags 2.4.2", +] + [[package]] name = "rcgen" version = "0.11.3" @@ -783,18 +1183,18 @@ version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4722d768eff46b75989dd134e5c353f0d6296e5aaa3132e776cbdb56be7731aa" dependencies = [ - "bitflags", + "bitflags 1.3.2", ] [[package]] name = "regex" -version = "1.10.2" +version = "1.10.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "380b951a9c5e80ddfd6136919eef32310721aa4aacd4889a8d39124b026ab343" +checksum = "b62dbe01f0b06f9d8dc7d49e05a0785f153b00b2c227856282f671e0318c9b15" dependencies = [ "aho-corasick", "memchr", - "regex-automata 0.4.3", + "regex-automata 0.4.5", "regex-syntax 0.8.2", ] @@ -809,9 +1209,9 @@ dependencies = [ [[package]] name = "regex-automata" -version = "0.4.3" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f804c7828047e88b2d32e2d7fe5a105da8ee3264f01902f796c8e067dc2483f" +checksum = "5bb987efffd3c6d0d8f5f89510bb458559eab11e4f869acb20bf845e016259cd" dependencies = [ "aho-corasick", "memchr", @@ -847,16 +1247,26 @@ dependencies = [ [[package]] name = "ring" -version = "0.17.5" +version = "0.17.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb0205304757e5d899b9c2e448b867ffd03ae7f988002e47cd24954391394d0b" +checksum = "688c63d65483050968b2a8937f7995f443e27041a0f7700aa59b0822aedebb74" dependencies = [ "cc", "getrandom", "libc", "spin 0.9.8", "untrusted 0.9.0", - "windows-sys", + "windows-sys 0.48.0", +] + +[[package]] +name = "rocksdb" +version = "0.21.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bb6f170a4041d50a0ce04b0d2e14916d6ca863ea2e422689a5b694395d299ffe" +dependencies = [ + "libc", + "librocksdb-sys", ] [[package]] @@ -872,45 +1282,46 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2" [[package]] -name = "rusticata-macros" -version = "4.1.0" +name = "rustc_version" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "faf0c4a6ece9950b9abdb62b1cfcf2a68b3b67a10ba445b3bb85be2a293d0632" +checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366" dependencies = [ - "nom", + "semver", ] [[package]] -name = "rustls" -version = "0.21.9" +name = "rusticata-macros" +version = "4.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "629648aced5775d558af50b2b4c7b02983a04b312126d45eeead26e7caa498b9" +checksum = "faf0c4a6ece9950b9abdb62b1cfcf2a68b3b67a10ba445b3bb85be2a293d0632" dependencies = [ - "log", - "ring 0.17.5", - "rustls-webpki", - "sct", + "nom", ] [[package]] -name = "rustls-native-certs" -version = "0.6.3" +name = "rustix" +version = "0.38.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9aace74cb666635c918e9c12bc0d348266037aa8eb599b5cba565709a8dff00" +checksum = "6ea3e1a662af26cd7a3ba09c0297a31af215563ecf42817c98df621387f4e949" dependencies = [ - "openssl-probe", - "rustls-pemfile", - "schannel", - "security-framework", + "bitflags 2.4.2", + "errno", + "libc", + "linux-raw-sys", + "windows-sys 0.52.0", ] [[package]] -name = "rustls-pemfile" -version = "1.0.4" +name = "rustls" +version = "0.21.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1c74cae0a4cf6ccbbf5f359f08efdf8ee7e1dc532573bf0db71968cb56b1448c" +checksum = "f9d5a6813c0759e4609cd494e8e725babae6a2ca7b62a5536a13daaec6fcb7ba" dependencies = [ - "base64", + "log", + "ring 0.17.7", + "rustls-webpki", + "sct", ] [[package]] @@ -919,17 +1330,29 @@ version = "0.101.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b6275d1ee7a1cd780b64aca7726599a1dbc893b1e64144529e55c3c2f745765" dependencies = [ - "ring 0.17.5", + "ring 0.17.7", "untrusted 0.9.0", ] [[package]] -name = "schannel" -version = "0.1.22" +name = "rustversion" +version = "1.0.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c3733bf4cf7ea0880754e19cb5a462007c4a8c1914bff372ccc95b464f1df88" +checksum = "7ffc183a10b4478d04cbbbfc96d0873219d962dd5accaff2ffbd4ceb7df837f4" + +[[package]] +name = "ryu" +version = "1.0.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f98d2aa92eebf49b69786be48e4477826b256916e84a57ff2a4f21923b48eb4c" + +[[package]] +name = "same-file" +version = "1.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "93fc1dc3aaa9bfed95e02e6eadabb4baf7e3078b0bd1b4d7b6b0b68378900502" dependencies = [ - "windows-sys", + "winapi-util", ] [[package]] @@ -944,51 +1367,48 @@ version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "da046153aa2352493d6cb7da4b6e5c0c057d8a1d0a9aa8560baffdd945acd414" dependencies = [ - "ring 0.17.5", + "ring 0.17.7", "untrusted 0.9.0", ] [[package]] -name = "security-framework" -version = "2.9.2" +name = "semver" +version = "1.0.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05b64fb303737d99b81884b2c63433e9ae28abebe5eb5045dcdd175dc2ecf4de" +checksum = "b97ed7a9823b74f99c7742f5336af7be5ecd3eeafcb1507d1fa93347b1d589b0" dependencies = [ - "bitflags", - "core-foundation", - "core-foundation-sys", - "libc", - "security-framework-sys", -] - -[[package]] -name = "security-framework-sys" -version = "2.9.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e932934257d3b408ed8f30db49d85ea163bfe74961f017f405b025af298f0c7a" -dependencies = [ - "core-foundation-sys", - "libc", + "serde", ] [[package]] name = "serde" -version = "1.0.193" +version = "1.0.196" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "25dd9975e68d0cb5aa1120c288333fc98731bd1dd12f561e468ea4728c042b89" +checksum = "870026e60fa08c69f064aa766c10f10b1d62db9ccd4d0abb206472bee0ce3b32" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.193" +version = "1.0.196" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43576ca501357b9b071ac53cdc7da8ef0cbd9493d8df094cd821777ea6e894d3" +checksum = "33c85360c95e7d137454dc81d9a4ed2b8efd8fbe19cee57357b32b9771fccb67" dependencies = [ "proc-macro2", "quote", - "syn 2.0.39", + "syn 2.0.48", +] + +[[package]] +name = "serde_json" +version = "1.0.113" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "69801b70b1c3dac963ecb03a364ba0ceda9cf60c71cfe475e99864759c8b8a79" +dependencies = [ + "itoa", + "ryu", + "serde", ] [[package]] @@ -1011,6 +1431,12 @@ dependencies = [ "lazy_static", ] +[[package]] +name = "shlex" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64" + [[package]] name = "signature" version = "2.2.0" @@ -1020,6 +1446,21 @@ dependencies = [ "rand_core", ] +[[package]] +name = "skeptic" +version = "0.13.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "16d23b015676c90a0f01c197bfdc786c20342c73a0afdda9025adb0bc42940a8" +dependencies = [ + "bytecount", + "cargo_metadata", + "error-chain", + "glob", + "pulldown-cmark", + "tempfile", + "walkdir", +] + [[package]] name = "slab" version = "0.4.9" @@ -1031,9 +1472,9 @@ dependencies = [ [[package]] name = "smallvec" -version = "1.11.2" +version = "1.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4dccd0940a2dcdf68d092b8cbab7dc0ad8fa938bf95787e1b916b0e3d0e8e970" +checksum = "e6ecd384b10a64542d77071bd64bd7b231f4ed5940fba55e98c3de13824cf3d7" [[package]] name = "socket2" @@ -1042,7 +1483,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7b5fac59a5cb5dd637972e5fca70daf0523c9067fcdc4842f053dae04a18f8e9" dependencies = [ "libc", - "windows-sys", + "windows-sys 0.48.0", ] [[package]] @@ -1059,14 +1500,20 @@ checksum = "6980e8d7511241f8acf4aebddbb1ff938df5eebe98691418c4468d0b72a96a67" [[package]] name = "spki" -version = "0.7.2" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d1e996ef02c474957d681f1b05213dfb0abab947b446a62d37770b23500184a" +checksum = "d91ed6c858b01f942cd56b37a94b3e0a1798290327d1236e4d9cf4eaca44d29d" dependencies = [ "base64ct", "der", ] +[[package]] +name = "subtle" +version = "2.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "81cdd64d312baedb58e21336b31bc043b77e01cc99033ce76ef539f78e965ebc" + [[package]] name = "syn" version = "1.0.109" @@ -1080,9 +1527,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.39" +version = "2.0.48" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23e78b90f2fcf45d3e842032ce32e3f2d1545ba6636271dcbf24fa306d87be7a" +checksum = "0f3531638e407dfc0814761abb7c00a5b54992b849452a0646b7f65c9f770f3f" dependencies = [ "proc-macro2", "quote", @@ -1101,24 +1548,42 @@ dependencies = [ "unicode-xid", ] +[[package]] +name = "tagptr" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b2093cf4c8eb1e67749a6762251bc9cd836b6fc171623bd0a9d324d37af2417" + +[[package]] +name = "tempfile" +version = "3.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a365e8cd18e44762ef95d87f284f4b5cd04107fec2ff3052bd6a3e6069669e67" +dependencies = [ + "cfg-if", + "fastrand", + "rustix", + "windows-sys 0.52.0", +] + [[package]] name = "thiserror" -version = "1.0.50" +version = "1.0.56" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f9a7210f5c9a7156bb50aa36aed4c95afb51df0df00713949448cf9e97d382d2" +checksum = "d54378c645627613241d077a3a79db965db602882668f9136ac42af9ecb730ad" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.50" +version = "1.0.56" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "266b2e40bc00e5a6c09c3584011e08b06f123c00362c92b975ba9843aaaa14b8" +checksum = "fa0faa943b50f3db30a20aa7e265dbc66076993efed8463e8de414e5d06d3471" dependencies = [ "proc-macro2", "quote", - "syn 2.0.39", + "syn 2.0.48", ] [[package]] @@ -1131,14 +1596,25 @@ dependencies = [ "once_cell", ] +[[package]] +name = "tikv-jemalloc-sys" +version = "0.5.4+5.3.0-patched" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9402443cb8fd499b6f327e40565234ff34dbda27460c5b47db0db77443dd85d1" +dependencies = [ + "cc", + "libc", +] + [[package]] name = "time" -version = "0.3.30" +version = "0.3.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c4a34ab300f2dee6e562c10a046fc05e358b29f9bf92277f30c3c8d82275f6f5" +checksum = "c8248b6521bb14bc45b4067159b9b6ad792e2d6d754d6c41fb50e29fefe38749" dependencies = [ "deranged", "itoa", + "num-conv", "powerfmt", "serde", "time-core", @@ -1153,10 +1629,11 @@ checksum = "ef927ca75afb808a4d64dd374f00a2adf8d0fcff8e7b184af886c3c87ec4a3f3" [[package]] name = "time-macros" -version = "0.2.15" +version = "0.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ad70d68dba9e1f8aceda7aa6711965dfec1cac869f311a51bd08b3a2ccbce20" +checksum = "7ba3a3ef41e6672a2f0f001392bb5dcd3ff0a9992d618ca761a11c3121547774" dependencies = [ + "num-conv", "time-core", ] @@ -1177,9 +1654,9 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tl-proto" -version = "0.4.3" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b3b51063c4076ddf3c068738d65dca0946937894ac8139508b6693dd3414c3f3" +checksum = "3418163db528cc2324ed7bc9d52aa3ca7a8f73d685f8b21b319d2a08ee4b36d3" dependencies = [ "bytes", "digest", @@ -1198,7 +1675,7 @@ dependencies = [ "proc-macro2", "quote", "rustc-hash", - "syn 2.0.39", + "syn 2.0.48", "tl-scheme", ] @@ -1217,9 +1694,9 @@ dependencies = [ [[package]] name = "tokio" -version = "1.34.0" +version = "1.36.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d0c014766411e834f7af5b8f4cf46257aab4036ca95e9d2c144a10f59ad6f5b9" +checksum = "61285f6515fa018fb2d1e46eb21223fff441ee8db5d0f1435e8ab4f5cdb80931" dependencies = [ "backtrace", "bytes", @@ -1229,7 +1706,7 @@ dependencies = [ "pin-project-lite", "socket2", "tokio-macros", - "windows-sys", + "windows-sys 0.48.0", ] [[package]] @@ -1240,7 +1717,7 @@ checksum = "5b8a1e28f2deaa14e508979454cb3a223b10b938b45af148bc0986de36f1923b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.39", + "syn 2.0.48", ] [[package]] @@ -1263,7 +1740,6 @@ version = "0.1.40" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c3523ab5a71916ccf420eebdf5521fcef02141234bbc0b8a49f2fdc4544364ef" dependencies = [ - "log", "pin-project-lite", "tracing-attributes", "tracing-core", @@ -1277,7 +1753,7 @@ checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.39", + "syn 2.0.48", ] [[package]] @@ -1342,6 +1818,12 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "triomphe" +version = "0.1.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "859eb650cfee7434994602c3a68b25d77ad9e68c8a6cd491616ef86661382eb3" + [[package]] name = "tycho-collator" version = "0.0.1" @@ -1356,9 +1838,18 @@ dependencies = [ name = "tycho-consensus" version = "0.0.1" dependencies = [ + "anyhow", + "bincode", + "bytes", + "futures-util", + "serde", + "tokio", + "tracing", + "tracing-test", "tycho-network", "tycho-storage", "tycho-util", + "weedb", ] [[package]] @@ -1376,11 +1867,16 @@ version = "0.0.1" dependencies = [ "ahash", "anyhow", + "argh", + "base64", "bytes", + "castaway", "dashmap", "ed25519", + "everscale-crypto", "futures-util", "hex", + "moka", "pin-project-lite", "pkcs8", "quinn", @@ -1390,12 +1886,14 @@ dependencies = [ "rustls", "rustls-webpki", "serde", + "serde_json", "socket2", "thiserror", "tl-proto", "tokio", "tokio-util", "tracing", + "tracing-subscriber", "tracing-test", "tycho-util", "x509-parser", @@ -1411,6 +1909,17 @@ dependencies = [ [[package]] name = "tycho-util" version = "0.0.1" +dependencies = [ + "ahash", + "castaway", + "dashmap", + "futures-util", + "hex", + "humantime", + "rand", + "serde", + "tokio", +] [[package]] name = "typenum" @@ -1424,6 +1933,15 @@ version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ed646292ffc8188ef8ea4d1e0e0150fb15a5c2e12ad9b8fc191ae7a8a7f3c4b9" +[[package]] +name = "unicase" +version = "2.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f7d2d4dafb69621809a81864c9c1b864479e1235c0dd4e199924b9742439ed89" +dependencies = [ + "version_check", +] + [[package]] name = "unicode-ident" version = "1.0.12" @@ -1448,18 +1966,43 @@ version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" +[[package]] +name = "uuid" +version = "1.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f00cc9702ca12d3c81455259621e676d0f7251cec66a21e98fe2e9a37db93b2a" +dependencies = [ + "getrandom", +] + [[package]] name = "valuable" version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "830b7e5d4d90034032940e4ace0d9a9a057e7a45cd94e6c007832e39edb82f6d" +[[package]] +name = "vcpkg" +version = "0.2.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426" + [[package]] name = "version_check" version = "0.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" +[[package]] +name = "walkdir" +version = "2.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d71d857dc86794ca4c280d616f7da00d2dbfd8cd788846559a6813e6aa4b54ee" +dependencies = [ + "same-file", + "winapi-util", +] + [[package]] name = "wasi" version = "0.11.0+wasi-snapshot-preview1" @@ -1468,9 +2011,9 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasm-bindgen" -version = "0.2.88" +version = "0.2.91" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7daec296f25a1bae309c0cd5c29c4b260e510e6d813c286b19eaadf409d40fce" +checksum = "c1e124130aee3fb58c5bdd6b639a0509486b0338acaaae0c84a5124b0f588b7f" dependencies = [ "cfg-if", "wasm-bindgen-macro", @@ -1478,24 +2021,24 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.88" +version = "0.2.91" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e397f4664c0e4e428e8313a469aaa58310d302159845980fd23b0f22a847f217" +checksum = "c9e7e1900c352b609c8488ad12639a311045f40a35491fb69ba8c12f758af70b" dependencies = [ "bumpalo", "log", "once_cell", "proc-macro2", "quote", - "syn 2.0.39", + "syn 2.0.48", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-macro" -version = "0.2.88" +version = "0.2.91" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5961017b3b08ad5f3fe39f1e79877f8ee7c23c5e5fd5eb80de95abc41f1f16b2" +checksum = "b30af9e2d358182b5c7449424f017eba305ed32a7010509ede96cdc4696c46ed" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -1503,33 +2046,45 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.88" +version = "0.2.91" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c5353b8dab669f5e10f5bd76df26a9360c748f054f862ff5f3f8aae0c7fb3907" +checksum = "642f325be6301eb8107a83d12a8ac6c1e1c54345a7ef1a9261962dfefda09e66" dependencies = [ "proc-macro2", "quote", - "syn 2.0.39", + "syn 2.0.48", "wasm-bindgen-backend", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.88" +version = "0.2.91" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0d046c5d029ba91a1ed14da14dca44b68bf2f124cfbaf741c54151fdb3e0750b" +checksum = "4f186bd2dcf04330886ce82d6f33dd75a7bfcf69ecf5763b89fcde53b6ac9838" [[package]] name = "web-sys" -version = "0.3.65" +version = "0.3.68" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5db499c5f66323272151db0e666cd34f78617522fb0c1604d31a27c50c206a85" +checksum = "96565907687f7aceb35bc5fc03770a8a0471d82e479f25832f54a0e3f4b28446" dependencies = [ "js-sys", "wasm-bindgen", ] +[[package]] +name = "weedb" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "de8f9c5dfe31e92c6374e25086363a0dccf15cf9b0923ea8a4a2a105d662428e" +dependencies = [ + "librocksdb-sys", + "rocksdb", + "thiserror", + "tracing", +] + [[package]] name = "winapi" version = "0.3.9" @@ -1546,6 +2101,15 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" +[[package]] +name = "winapi-util" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f29e6f9198ba0d26b4c9f07dbe6f9ed633e1f3d5b8b414090084349e46a52596" +dependencies = [ + "winapi", +] + [[package]] name = "winapi-x86_64-pc-windows-gnu" version = "0.4.0" @@ -1558,7 +2122,16 @@ version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9" dependencies = [ - "windows-targets", + "windows-targets 0.48.5", +] + +[[package]] +name = "windows-sys" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" +dependencies = [ + "windows-targets 0.52.0", ] [[package]] @@ -1567,13 +2140,28 @@ version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9a2fa6e2155d7247be68c096456083145c183cbbbc2764150dda45a87197940c" dependencies = [ - "windows_aarch64_gnullvm", - "windows_aarch64_msvc", - "windows_i686_gnu", - "windows_i686_msvc", - "windows_x86_64_gnu", - "windows_x86_64_gnullvm", - "windows_x86_64_msvc", + "windows_aarch64_gnullvm 0.48.5", + "windows_aarch64_msvc 0.48.5", + "windows_i686_gnu 0.48.5", + "windows_i686_msvc 0.48.5", + "windows_x86_64_gnu 0.48.5", + "windows_x86_64_gnullvm 0.48.5", + "windows_x86_64_msvc 0.48.5", +] + +[[package]] +name = "windows-targets" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a18201040b24831fbb9e4eb208f8892e1f50a37feb53cc7ff887feb8f50e7cd" +dependencies = [ + "windows_aarch64_gnullvm 0.52.0", + "windows_aarch64_msvc 0.52.0", + "windows_i686_gnu 0.52.0", + "windows_i686_msvc 0.52.0", + "windows_x86_64_gnu 0.52.0", + "windows_x86_64_gnullvm 0.52.0", + "windows_x86_64_msvc 0.52.0", ] [[package]] @@ -1582,42 +2170,84 @@ version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8" +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cb7764e35d4db8a7921e09562a0304bf2f93e0a51bfccee0bd0bb0b666b015ea" + [[package]] name = "windows_aarch64_msvc" version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc" +[[package]] +name = "windows_aarch64_msvc" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbaa0368d4f1d2aaefc55b6fcfee13f41544ddf36801e793edbbfd7d7df075ef" + [[package]] name = "windows_i686_gnu" version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e" +[[package]] +name = "windows_i686_gnu" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a28637cb1fa3560a16915793afb20081aba2c92ee8af57b4d5f28e4b3e7df313" + [[package]] name = "windows_i686_msvc" version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406" +[[package]] +name = "windows_i686_msvc" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ffe5e8e31046ce6230cc7215707b816e339ff4d4d67c65dffa206fd0f7aa7b9a" + [[package]] name = "windows_x86_64_gnu" version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e" +[[package]] +name = "windows_x86_64_gnu" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3d6fa32db2bc4a2f5abeacf2b69f7992cd09dca97498da74a151a3132c26befd" + [[package]] name = "windows_x86_64_gnullvm" version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc" +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1a657e1e9d3f514745a572a6846d3c7aa7dbe1658c056ed9c3344c4109a6949e" + [[package]] name = "windows_x86_64_msvc" version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" +[[package]] +name = "windows_x86_64_msvc" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dff9641d1cd4be8d1a070daf9e3773c5f67e78b4d9d42263020c057706765c04" + [[package]] name = "x509-parser" version = "0.15.1" @@ -1646,22 +2276,22 @@ dependencies = [ [[package]] name = "zerocopy" -version = "0.7.26" +version = "0.7.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e97e415490559a91254a2979b4829267a57d2fcd741a98eee8b722fb57289aa0" +checksum = "74d4d3961e53fa4c9a25a8637fc2bfaf2595b3d3ae34875568a5cf64787716be" dependencies = [ "zerocopy-derive", ] [[package]] name = "zerocopy-derive" -version = "0.7.26" +version = "0.7.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd7e48ccf166952882ca8bd778a43502c64f33bf94c12ebe2a7f08e5a0f6689f" +checksum = "9ce1b18ccd8e73a9321186f97e46f9f04b778851177567b1975109d26a08d2a6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.39", + "syn 2.0.48", ] [[package]] @@ -1669,3 +2299,13 @@ name = "zeroize" version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "525b4ec142c6b68a2d10f01f7bbf6755599ca3f81ea53b8431b7dd348f5fdb2d" + +[[package]] +name = "zstd-sys" +version = "2.0.9+zstd.1.5.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9e16efa8a874a0481a574084d34cc26fdb3b99627480f785888deb6386506656" +dependencies = [ + "cc", + "pkg-config", +] diff --git a/Cargo.toml b/Cargo.toml index 311a39381..9debc935d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -5,6 +5,11 @@ members = ["consensus", "core", "network", "storage", "util", "collator"] [profile.release] debug = true +[patch.crates-io] +# NOTE: use crates.io dependency when it is released +# https://github.com/sagebind/castaway/issues/18 +castaway = { git = "https://github.com/sagebind/castaway.git" } + [workspace.lints.rust] future_incompatible = "warn" nonstandard_style = "warn" diff --git a/consensus/Cargo.toml b/consensus/Cargo.toml index 6b2337326..ba9255877 100644 --- a/consensus/Cargo.toml +++ b/consensus/Cargo.toml @@ -6,8 +6,22 @@ description = "DAG-based consensus for external messages queue." [dependencies] # crates.io deps +anyhow = "1.0" +bincode = "1.3" +bytes = { version = "1.0", features = ["serde"] } +futures-util = { version = "0.3" } +serde = { version = "1.0", features = ["derive"] } +tracing = "0.1" +weedb = "0.1" # local deps tycho-network = { path = "../network", version = "=0.0.1" } tycho-storage = { path = "../storage", version = "=0.0.1" } tycho-util = { path = "../util", version = "=0.0.1" } + +[dev-dependencies] +tokio = { version = "1", features = ["rt-multi-thread", "macros"] } +tracing-test = "0.2" + +[lints] +workspace = true diff --git a/consensus/src/engine/dag.rs b/consensus/src/engine/dag.rs new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/consensus/src/engine/dag.rs @@ -0,0 +1 @@ + diff --git a/consensus/src/engine/mod.rs b/consensus/src/engine/mod.rs new file mode 100644 index 000000000..16aee4a36 --- /dev/null +++ b/consensus/src/engine/mod.rs @@ -0,0 +1,2 @@ +mod dag; +mod threshold_clock; diff --git a/consensus/src/engine/threshold_clock.rs b/consensus/src/engine/threshold_clock.rs new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/consensus/src/engine/threshold_clock.rs @@ -0,0 +1 @@ + diff --git a/consensus/src/intercom/dispatcher.rs b/consensus/src/intercom/dispatcher.rs new file mode 100644 index 000000000..cfb39c5a7 --- /dev/null +++ b/consensus/src/intercom/dispatcher.rs @@ -0,0 +1,278 @@ +use std::net::{Ipv4Addr, SocketAddr}; +use std::sync::Arc; + +use anyhow::{anyhow, Result}; +use bytes::Bytes; +use serde::{Deserialize, Serialize}; + +use tycho_network::{ + service_query_fn, Network, NetworkConfig, NetworkExt, Response, ServiceRequest, Version, +}; + +use crate::intercom::responses::*; +use crate::models::{Location, Point, PointId, RoundId, Signature}; + +#[derive(Serialize, Deserialize, Debug)] +enum MPRequest { + // by author + Broadcast { point: Point }, + Point { id: PointId }, + // any point from the last author's round; + // 1/3+1 evidenced vertices determine current consensus round + // PointLast, + // unique point with known evidence + Vertex { id: Location }, + // the next point by the same author + // that contains >=2F signatures for requested vertex + Evidence { vertex_id: Location }, + Vertices { round: RoundId }, +} + +#[derive(Serialize, Deserialize, Debug)] +enum MPResponse { + Broadcast(BroadcastResponse), + Point(PointResponse), + //PointLast(Option), + Vertex(VertexResponse), + Evidence(EvidenceResponse), + Vertices(VerticesResponse), +} + +#[derive(Serialize, Deserialize, Debug)] +enum MPRemoteResult { + Ok(MPResponse), + Err(String), +} + +pub struct Dispatcher { + inner: Arc, + network: Network, +} + +impl Dispatcher { + pub fn new() -> Result { + let inner = Arc::new(DispatcherInner {}); + let service_fn = service_query_fn({ + let inner = inner.clone(); + move |req| inner.clone().handle(req) + }); + + let network = Network::builder() + .with_config(NetworkConfig::default()) + .with_random_private_key() + .with_service_name("tycho-mempool-router") + .build((Ipv4Addr::LOCALHOST, 0), service_fn)?; + + Ok(Self { inner, network }) + } + + pub async fn broadcast(&self, point: Point, from: SocketAddr) -> Result { + let request = tycho_network::Request { + version: Version::V1, + body: Bytes::from(bincode::serialize(&MPRequest::Broadcast { point })?), + }; + + let remote_peer = self.network.connect(from).await?; + + let response = self.network.query(&remote_peer, request).await?; + + match parse_response(&response.body)? { + MPResponse::Broadcast(r) => Ok(r), + x => Err(anyhow!("wrong response")), + } + } + + pub async fn point(&self, id: PointId, from: SocketAddr) -> Result { + let request = tycho_network::Request { + version: Version::V1, + body: Bytes::from(bincode::serialize(&MPRequest::Point { id })?), + }; + + let remote_peer = self.network.connect(from).await?; + + let response = self.network.query(&remote_peer, request).await?; + + match parse_response(&response.body)? { + MPResponse::Point(r) => Ok(r), + x => Err(anyhow!("wrong response")), + } + } + + pub async fn vertex(&self, id: Location, from: SocketAddr) -> Result { + let request = tycho_network::Request { + version: Version::V1, + body: Bytes::from(bincode::serialize(&MPRequest::Vertex { id })?), + }; + + let remote_peer = self.network.connect(from).await?; + + let response = self.network.query(&remote_peer, request).await?; + + match parse_response(&response.body)? { + MPResponse::Vertex(r) => Ok(r), + x => Err(anyhow!("wrong response")), + } + } + + pub async fn evidence( + &self, + vertex_id: Location, + from: SocketAddr, + ) -> Result { + let request = tycho_network::Request { + version: Version::V1, + body: Bytes::from(bincode::serialize(&MPRequest::Evidence { vertex_id })?), + }; + + let remote_peer = self.network.connect(from).await?; + + let response = self.network.query(&remote_peer, request).await?; + + match parse_response(&response.body)? { + MPResponse::Evidence(r) => Ok(r), + x => Err(anyhow!("wrong response")), + } + } + + pub async fn vertices(&self, round: RoundId, from: SocketAddr) -> Result { + let request = tycho_network::Request { + version: Version::V1, + body: Bytes::from(bincode::serialize(&MPRequest::Vertices { round })?), + }; + + let remote_peer = self.network.connect(from).await?; + + let response = self.network.query(&remote_peer, request).await?; + + match parse_response(&response.body)? { + MPResponse::Vertices(r) => Ok(r), + x => Err(anyhow!("wrong response")), + } + } +} + +struct DispatcherInner { + // state and storage components go here +} + +impl DispatcherInner { + async fn handle(self: Arc, req: ServiceRequest) -> Option { + let body = match bincode::deserialize::(&req.body) { + Ok(body) => body, + Err(e) => { + tracing::error!("unexpected request from {:?}: {e:?}", req.metadata); + // NOTE: malformed request is a reason to ignore it + return None; + } + }; + + let response = match body { + MPRequest::Broadcast { point } => { + // 1.1 sigs for my block + 1.2 my next includes + // ?? + 3.1 ask last + MPResponse::Broadcast(BroadcastResponse { + current_round: RoundId(0), + signature: Signature(Bytes::new()), + signer_point: None, + }) + } + MPRequest::Point { id } => { + // 1.2 my next includes (merged with Broadcast flow) + MPResponse::Point(PointResponse { + current_round: RoundId(0), + point: None, + }) + } + MPRequest::Vertex { id } => { + // verification flow: downloader + MPResponse::Vertex(VertexResponse { + current_round: RoundId(0), + vertex: None, + }) + } + MPRequest::Evidence { vertex_id } => { + // verification flow: downloader + MPResponse::Evidence(EvidenceResponse { + current_round: RoundId(0), + point: None, + }) + } + MPRequest::Vertices { round } => { + // cold sync flow: downloader + MPResponse::Vertices(VerticesResponse { + vertices: Vec::new(), + }) + } + }; + + Some(Response { + version: Version::default(), + body: Bytes::from(match bincode::serialize(&response) { + Ok(data) => data, + Err(e) => { + tracing::error!("failed to serialize response to {:?}: {e:?}", req.metadata); + bincode::serialize(&MPRemoteResult::Err(format!("internal error"))) + .expect("must not fail") + } + }), + }) + } +} + +fn parse_response(body: &Bytes) -> anyhow::Result { + if body.is_empty() { + return Err(anyhow::Error::msg( + "remote response serialization exception is hidden by exception during serialization", + )); + } + match bincode::deserialize::(body) { + Ok(MPRemoteResult::Ok(response)) => Ok(response), + Ok(MPRemoteResult::Err(e)) => Err(anyhow::Error::msg(e)), + Err(e) => Err(anyhow!("failed to deserialize response: {e:?}")), + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[tokio::test] + #[tracing_test::traced_test] + async fn underlying_network_works() -> Result<()> { + let node1 = Dispatcher::new()?.network; + let node2 = Dispatcher::new()?.network; + + let peer2 = node1.connect(node2.local_addr()).await?; + let response = node1 + .query( + &peer2, + tycho_network::Request { + version: Version::V1, + body: Bytes::from("bites"), + }, + ) + .await?; + let response = parse_response(&response.body); + + tracing::info!("response '{response:?}'"); + + assert!(response.is_err()); + Ok(()) + } + + #[tokio::test] + #[tracing_test::traced_test] + async fn dispatcher_works() -> Result<()> { + let node1 = Dispatcher::new()?; + let node2 = Dispatcher::new()?; + + let data = node1 + .vertices(RoundId(0), node2.network.local_addr()) + .await?; + + tracing::info!("response: '{data:?}'"); + + assert!(data.vertices.is_empty()); + Ok(()) + } +} diff --git a/consensus/src/intercom/mod.rs b/consensus/src/intercom/mod.rs new file mode 100644 index 000000000..736e764d3 --- /dev/null +++ b/consensus/src/intercom/mod.rs @@ -0,0 +1,4 @@ +mod dispatcher; +mod receiver; +mod responses; +mod uploader; diff --git a/consensus/src/intercom/receiver.rs b/consensus/src/intercom/receiver.rs new file mode 100644 index 000000000..85b0baca4 --- /dev/null +++ b/consensus/src/intercom/receiver.rs @@ -0,0 +1 @@ +pub struct Receiver {} diff --git a/consensus/src/intercom/responses.rs b/consensus/src/intercom/responses.rs new file mode 100644 index 000000000..1eb2c18c7 --- /dev/null +++ b/consensus/src/intercom/responses.rs @@ -0,0 +1,32 @@ +use serde::{Deserialize, Serialize}; + +use crate::models::{Point, RoundId, Signature}; + +#[derive(Serialize, Deserialize, Debug)] +pub struct BroadcastResponse { + pub current_round: RoundId, + // for requested point + pub signature: Signature, + // at the same round, if it was not skipped + pub signer_point: Option, +} +#[derive(Serialize, Deserialize, Debug)] +pub struct PointResponse { + pub current_round: RoundId, + pub point: Option, +} +//PointLast(Option), +#[derive(Serialize, Deserialize, Debug)] +pub struct VertexResponse { + pub current_round: RoundId, + pub vertex: Option, +} +#[derive(Serialize, Deserialize, Debug)] +pub struct EvidenceResponse { + pub current_round: RoundId, + pub point: Option, +} +#[derive(Serialize, Deserialize, Debug)] +pub struct VerticesResponse { + pub vertices: Vec, +} diff --git a/consensus/src/intercom/uploader.rs b/consensus/src/intercom/uploader.rs new file mode 100644 index 000000000..1d4b54f75 --- /dev/null +++ b/consensus/src/intercom/uploader.rs @@ -0,0 +1 @@ +pub struct Uploader {} diff --git a/consensus/src/lib.rs b/consensus/src/lib.rs index 8b1378917..a43c4473e 100644 --- a/consensus/src/lib.rs +++ b/consensus/src/lib.rs @@ -1 +1,4 @@ - +mod engine; +mod intercom; +mod models; +mod tasks; diff --git a/consensus/src/models.rs b/consensus/src/models.rs new file mode 100644 index 000000000..b8ab3bff5 --- /dev/null +++ b/consensus/src/models.rs @@ -0,0 +1,61 @@ +use bytes::Bytes; +use serde::{Deserialize, Serialize}; +use tycho_util::FastHashMap; + +pub const POINT_DIGEST_SIZE: usize = 32; +pub const SIGNATURE_SIZE: usize = 64; + +#[derive(Serialize, Deserialize, PartialEq, Debug)] +pub struct Digest(pub Bytes); +#[derive(Serialize, Deserialize, PartialEq, Debug)] +pub struct Signature(pub Bytes); +#[derive(Serialize, Deserialize, PartialEq, Eq, Hash, Debug)] +pub struct NodeId(pub u8); +#[derive(Serialize, Deserialize, PartialEq, Debug)] +pub struct RoundId(pub u32); + +#[derive(Serialize, Deserialize, PartialEq, Debug)] +pub struct Location { + round: RoundId, + author: NodeId, +} + +#[derive(Serialize, Deserialize, PartialEq, Debug)] +pub struct PointId { + location: Location, + digest: Digest, +} + +#[derive(Serialize, Deserialize, Debug)] +pub struct PrevPoint { + round: RoundId, + digest: Digest, + // >= 2F witnesses, point author excluded + evidence: FastHashMap, +} + +#[derive(Serialize, Deserialize, Debug)] +pub struct PointData { + location: Location, + local_time: u64, + payload: Vec, + // >= 2F+1 vertices from the round before last, + // optionally including author's own vertex + includes: FastHashMap, + anchor: PointId, + proposed_leader: Option, + // any vertices the leader adds to its diff-graph + // beyond its direct inclusions + leader_deep_includes: Vec, + // of the same author + prev_point: Option, +} + +#[derive(Serialize, Deserialize, Debug)] +pub struct Point { + data: PointData, + // author's + signature: Signature, + // of both data and author's signature + digest: Digest, +} diff --git a/consensus/src/tasks/broadcaster.rs b/consensus/src/tasks/broadcaster.rs new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/consensus/src/tasks/broadcaster.rs @@ -0,0 +1 @@ + diff --git a/consensus/src/tasks/downloader.rs b/consensus/src/tasks/downloader.rs new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/consensus/src/tasks/downloader.rs @@ -0,0 +1 @@ + diff --git a/consensus/src/tasks/mod.rs b/consensus/src/tasks/mod.rs new file mode 100644 index 000000000..0926bcb19 --- /dev/null +++ b/consensus/src/tasks/mod.rs @@ -0,0 +1,4 @@ +mod broadcaster; +mod downloader; +mod syncer; +mod uploader; diff --git a/consensus/src/tasks/syncer.rs b/consensus/src/tasks/syncer.rs new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/consensus/src/tasks/syncer.rs @@ -0,0 +1 @@ + diff --git a/consensus/src/tasks/uploader.rs b/consensus/src/tasks/uploader.rs new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/consensus/src/tasks/uploader.rs @@ -0,0 +1 @@ + diff --git a/network.Dockerfile b/network.Dockerfile new file mode 100644 index 000000000..df5d53bc9 --- /dev/null +++ b/network.Dockerfile @@ -0,0 +1,7 @@ +FROM rust:1.76-buster as builder +COPY . . +RUN cargo build --release --example network-node + +FROM debian:buster-slim +RUN mkdir /app +COPY --from=builder /target/release/examples/network-node /app/network-node \ No newline at end of file diff --git a/network/Cargo.toml b/network/Cargo.toml index 112d558b2..a9f25cfc4 100644 --- a/network/Cargo.toml +++ b/network/Cargo.toml @@ -5,18 +5,26 @@ edition = "2021" description = "A peer-to-peer networking library." include = ["src/**/*.rs", "src/**/*.tl"] +[[example]] +name = "network-node" +path = "examples/network_node.rs" + [dependencies] # crates.io deps ahash = "0.8" anyhow = "1.0" -bytes = "1.0" +base64 = "0.21" +bytes = { version = "1.0", features = ["serde"] } +castaway = "0.2" dashmap = "5.4" ed25519 = { version = "2.0", features = ["alloc", "pkcs8"] } +everscale-crypto = { version = "0.2", features = ["tl-proto"] } futures-util = { version = "0.3", features = ["sink"] } hex = "0.4" +moka = { version = "0.12", features = ["sync"] } pin-project-lite = "0.2" pkcs8 = "0.10" -quinn = { version = "0.10", features = ["runtime-tokio", "tls-rustls"] } +quinn = { version = "0.10", default-features = false, features = ["runtime-tokio", "tls-rustls"] } rand = "0.8" rcgen = "0.11" ring = "0.16" @@ -35,7 +43,10 @@ x509-parser = "0.15" tycho-util = { path = "../util", version = "=0.0.1" } [dev-dependencies] +argh = "0.1" +serde_json = "1.0" tokio = { version = "1", features = ["rt-multi-thread"] } +tracing-subscriber = { version = "0.3", features = ["env-filter"] } tracing-test = "0.2" [lints] diff --git a/network/examples/network_node.rs b/network/examples/network_node.rs new file mode 100644 index 000000000..683aa7df1 --- /dev/null +++ b/network/examples/network_node.rs @@ -0,0 +1,247 @@ +//! Run tests with this env: +//! ```text +//! RUST_LOG=info,tycho_network=trace +//! ``` + +use std::io::IsTerminal; +use std::net::SocketAddr; +use std::sync::Arc; + +use anyhow::Result; +use argh::FromArgs; +use everscale_crypto::ed25519; +use serde::{Deserialize, Serialize}; +use tycho_network::{ + Address, DhtClient, DhtConfig, DhtService, Network, NetworkConfig, PeerId, PeerInfo, Router, +}; +use tycho_util::time::now_sec; + +#[tokio::main] +async fn main() -> Result<()> { + let app: App = argh::from_env(); + app.run().await +} + +/// Tycho network node. +#[derive(FromArgs)] +struct App { + #[argh(subcommand)] + cmd: Cmd, +} + +impl App { + async fn run(self) -> Result<()> { + tracing_subscriber::fmt() + .with_env_filter( + tracing_subscriber::EnvFilter::builder() + .with_default_directive(tracing::Level::INFO.into()) + .from_env_lossy(), + ) + .with_ansi(std::io::stdout().is_terminal()) + .init(); + + match self.cmd { + Cmd::Run(cmd) => cmd.run().await, + Cmd::GenKey(cmd) => cmd.run(), + Cmd::GenDht(cmd) => cmd.run(), + } + } +} + +#[derive(FromArgs)] +#[argh(subcommand)] +enum Cmd { + Run(CmdRun), + GenKey(CmdGenKey), + GenDht(CmdGenDht), +} + +/// run a node +#[derive(FromArgs)] +#[argh(subcommand, name = "run")] +struct CmdRun { + /// local node address + #[argh(positional)] + addr: SocketAddr, + + /// node secret key + #[argh(option)] + key: String, + + /// path to the node config + #[argh(option)] + config: Option, + + /// path to the global config + #[argh(option)] + global_config: String, +} + +impl CmdRun { + async fn run(self) -> Result<()> { + let node_config = self + .config + .map(NodeConfig::from_file) + .transpose()? + .unwrap_or_default(); + let global_config = GlobalConfig::from_file(self.global_config)?; + + let node = Node::new(parse_key(&self.key)?, self.addr.into(), node_config)?; + + let mut initial_peer_count = 0usize; + for peer in global_config.bootstrap_peers { + let is_new = node.dht.add_peer(Arc::new(peer))?; + initial_peer_count += is_new as usize; + } + + tracing::info!( + local_id = %node.network.peer_id(), + addr = %self.addr, + initial_peer_count, + "node started" + ); + + futures_util::future::pending().await + } +} + +/// generate a key +#[derive(FromArgs)] +#[argh(subcommand, name = "genkey")] +struct CmdGenKey {} + +impl CmdGenKey { + fn run(self) -> Result<()> { + let secret_key = ed25519::SecretKey::generate(&mut rand::thread_rng()); + let public_key = ed25519::PublicKey::from(&secret_key); + let peer_id = PeerId::from(public_key); + + let data = serde_json::json!({ + "key": hex::encode(secret_key.as_bytes()), + "peer_id": peer_id.to_string(), + }); + let output = if std::io::stdin().is_terminal() { + serde_json::to_string_pretty(&data) + } else { + serde_json::to_string(&data) + }?; + println!("{output}"); + Ok(()) + } +} + +/// generate a dht node info +#[derive(FromArgs)] +#[argh(subcommand, name = "gendht")] +struct CmdGenDht { + /// local node address + #[argh(positional)] + addr: SocketAddr, + + /// node secret key + #[argh(option)] + key: String, + + /// time to live in seconds (default: unlimited) + #[argh(option)] + ttl: Option, +} + +impl CmdGenDht { + fn run(self) -> Result<()> { + let entry = Node::make_peer_info(parse_key(&self.key)?, self.addr.into(), self.ttl); + let output = if std::io::stdin().is_terminal() { + serde_json::to_string_pretty(&entry) + } else { + serde_json::to_string(&entry) + }?; + println!("{output}"); + Ok(()) + } +} + +#[derive(Serialize, Deserialize)] +struct GlobalConfig { + bootstrap_peers: Vec, +} + +impl GlobalConfig { + fn from_file(path: impl AsRef) -> Result { + let config: Self = { + let data = std::fs::read_to_string(path.as_ref())?; + serde_json::from_str(&data)? + }; + + let now = now_sec(); + for peer in &config.bootstrap_peers { + anyhow::ensure!(peer.is_valid(now), "invalid peer info for {}", peer.id); + } + + Ok(config) + } +} + +#[derive(Default, Serialize, Deserialize)] +#[serde(default)] +struct NodeConfig { + network: NetworkConfig, + dht: DhtConfig, +} + +impl NodeConfig { + fn from_file(path: impl AsRef) -> Result { + let data = std::fs::read_to_string(path.as_ref())?; + let config = serde_json::from_str(&data)?; + Ok(config) + } +} + +struct Node { + network: Network, + dht: DhtClient, +} + +impl Node { + fn new(key: ed25519::SecretKey, address: Address, config: NodeConfig) -> Result { + let keypair = everscale_crypto::ed25519::KeyPair::from(&key); + + let (dht_client, dht) = DhtService::builder(keypair.public_key.into()) + .with_config(config.dht) + .build(); + + let router = Router::builder().route(dht).build(); + + let network = Network::builder() + .with_config(config.network) + .with_private_key(key.to_bytes()) + .with_service_name("test-service") + .build(address, router)?; + + let dht = dht_client.build(network.clone()); + + Ok(Self { network, dht }) + } + + fn make_peer_info(key: ed25519::SecretKey, address: Address, ttl: Option) -> PeerInfo { + let keypair = ed25519::KeyPair::from(&key); + let peer_id = PeerId::from(keypair.public_key); + + let now = now_sec(); + let mut node_info = PeerInfo { + id: peer_id, + address_list: vec![address].into_boxed_slice(), + created_at: now, + expires_at: ttl.unwrap_or(u32::MAX), + signature: Box::new([0; 64]), + }; + *node_info.signature = keypair.sign(&node_info); + node_info + } +} + +fn parse_key(key: &str) -> Result { + match hex::decode(key)?.try_into() { + Ok(bytes) => Ok(ed25519::SecretKey::from_bytes(bytes)), + Err(_) => anyhow::bail!("invalid secret key"), + } +} diff --git a/network/src/dht/config.rs b/network/src/dht/config.rs new file mode 100644 index 000000000..0552bb5ee --- /dev/null +++ b/network/src/dht/config.rs @@ -0,0 +1,90 @@ +use std::time::Duration; + +use serde::{Deserialize, Serialize}; +use tycho_util::serde_helpers; + +// TODO: add max storage item size +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(default)] +pub struct DhtConfig { + /// DHT K parameter. + /// + /// Default: 6. + pub max_k: usize, + + /// Maximum time to live for peer info. + /// + /// Default: 1 hour. + #[serde(with = "serde_helpers::humantime")] + pub max_peer_info_ttl: Duration, + + /// Maximum time to live for stored values. + /// + /// Default: 1 hour. + #[serde(with = "serde_helpers::humantime")] + pub max_stored_value_ttl: Duration, + + /// Maximum storage capacity (number of entries). + /// + /// Default: 10000. + pub max_storage_capacity: u64, + + /// Time until a stored item is considered idle and can be removed. + /// + /// Default: unlimited. + #[serde(with = "serde_helpers::humantime")] + pub storage_item_time_to_idle: Option, + + /// A period of refreshing the local peer info. + /// + /// Default: 1 minute. + #[serde(with = "serde_helpers::humantime")] + pub local_info_refresh_period: Duration, + + /// A period of storing the local peer info into the DHT. + /// + /// Default: 10 minutes. + #[serde(with = "serde_helpers::humantime")] + pub local_info_announce_period: Duration, + + /// A maximum value of a random jitter for the peer announce period. + /// + /// Default: 1 minute. + #[serde(with = "serde_helpers::humantime")] + pub max_local_info_announce_period_jitter: Duration, + + /// A period of updating and populating the routing table. + /// + /// Default: 10 minutes. + #[serde(with = "serde_helpers::humantime")] + pub routing_table_refresh_period: Duration, + + /// A maximum value of a random jitter for the routing table refresh period. + /// + /// Default: 1 minutes. + #[serde(with = "serde_helpers::humantime")] + pub max_routing_table_refresh_period_jitter: Duration, + + /// The capacity of the announced peers channel. + /// + /// Default: 10. + pub announced_peers_channel_capacity: usize, +} + +impl Default for DhtConfig { + fn default() -> Self { + Self { + max_k: 6, + max_peer_info_ttl: Duration::from_secs(3600), + max_stored_value_ttl: Duration::from_secs(3600), + max_storage_capacity: 10000, + storage_item_time_to_idle: None, + local_info_refresh_period: Duration::from_secs(60), + local_info_announce_period: Duration::from_secs(600), + max_local_info_announce_period_jitter: Duration::from_secs(60), + routing_table_refresh_period: Duration::from_secs(600), + max_routing_table_refresh_period_jitter: Duration::from_secs(60), + announced_peers_channel_capacity: 10, + } + } +} diff --git a/network/src/dht/mod.rs b/network/src/dht/mod.rs new file mode 100644 index 000000000..bcafb37c3 --- /dev/null +++ b/network/src/dht/mod.rs @@ -0,0 +1,808 @@ +use std::collections::hash_map; +use std::sync::{Arc, Mutex}; + +use anyhow::Result; +use bytes::{Buf, Bytes}; +use futures_util::stream::FuturesUnordered; +use futures_util::StreamExt; +use rand::RngCore; +use tl_proto::TlRead; +use tokio::sync::{broadcast, Semaphore}; +use tokio::task::JoinHandle; +use tycho_util::realloc_box_enum; +use tycho_util::time::{now_sec, shifted_interval}; + +use self::query::{Query, StoreValue}; +use self::routing::{RoutingTable, RoutingTableSource}; +use self::storage::Storage; +use crate::network::{Network, WeakNetwork}; +use crate::proto::dht::{ + rpc, NodeInfoResponse, NodeResponse, PeerValue, PeerValueKey, PeerValueKeyName, + PeerValueKeyRef, PeerValueRef, Value, ValueRef, ValueResponseRaw, +}; +use crate::types::{ + Address, PeerAffinity, PeerId, PeerInfo, Request, Response, Service, ServiceRequest, +}; +use crate::util::{NetworkExt, Routable}; + +pub use self::config::DhtConfig; +pub use self::storage::{OverlayValueMerger, StorageError}; + +mod config; +mod query; +mod routing; +mod storage; + +pub struct DhtClientBuilder { + inner: Arc, + disable_background_tasks: bool, +} + +impl DhtClientBuilder { + pub fn disable_background_tasks(mut self) -> Self { + self.disable_background_tasks = true; + self + } + + pub fn build(self, network: Network) -> DhtClient { + if !self.disable_background_tasks { + self.inner + .start_background_tasks(Network::downgrade(&network)); + } + + DhtClient { + inner: self.inner, + network, + } + } +} + +#[derive(Clone)] +pub struct DhtClient { + inner: Arc, + network: Network, +} + +impl DhtClient { + pub fn network(&self) -> &Network { + &self.network + } + + pub fn add_peer(&self, peer: Arc) -> Result { + self.inner + .add_peer_info(&self.network, peer, RoutingTableSource::Trusted) + } + + pub async fn get_node_info(&self, peer_id: &PeerId) -> Result { + let res = self + .network + .query(peer_id, Request::from_tl(rpc::GetNodeInfo)) + .await?; + let NodeInfoResponse { info } = res.parse_tl()?; + Ok(info) + } + + pub fn entry(&self, name: PeerValueKeyName) -> DhtQueryBuilder<'_> { + DhtQueryBuilder { + inner: &self.inner, + network: &self.network, + name, + idx: 0, + } + } +} + +#[derive(Clone, Copy)] +pub struct DhtQueryBuilder<'a> { + inner: &'a DhtInner, + network: &'a Network, + name: PeerValueKeyName, + idx: u32, +} + +impl<'a> DhtQueryBuilder<'a> { + #[inline] + pub fn with_idx(&mut self, idx: u32) -> &mut Self { + self.idx = idx; + self + } + + pub async fn find_value(&self, peer_id: &PeerId) -> Result + where + for<'tl> T: tl_proto::TlRead<'tl>, + { + let key_hash = tl_proto::hash(PeerValueKeyRef { + name: self.name, + peer_id, + }); + + match self.inner.find_value(self.network, &key_hash).await { + Some(value) => match value.as_ref() { + Value::Peer(value) => { + tl_proto::deserialize(&value.data).map_err(FindValueError::InvalidData) + } + Value::Overlay(_) => Err(FindValueError::InvalidData( + tl_proto::TlError::UnknownConstructor, + )), + }, + None => Err(FindValueError::NotFound), + } + } + + pub async fn find_peer_value_raw( + &self, + peer_id: &PeerId, + ) -> Result, FindValueError> { + let key_hash = tl_proto::hash(PeerValueKeyRef { + name: self.name, + peer_id, + }); + + match self.inner.find_value(self.network, &key_hash).await { + Some(value) => { + realloc_box_enum!(value, { + Value::Peer(value) => Box::new(value) => Ok(value), + Value::Overlay(_) => Err(FindValueError::InvalidData( + tl_proto::TlError::UnknownConstructor, + )), + }) + } + None => Err(FindValueError::NotFound), + } + } + + pub fn with_data(&self, data: T) -> DhtQueryWithDataBuilder<'a> + where + T: tl_proto::TlWrite, + { + DhtQueryWithDataBuilder { + inner: *self, + data: tl_proto::serialize(&data), + at: None, + ttl: self.inner.config.max_stored_value_ttl.as_secs() as _, + with_peer_info: false, + } + } +} + +pub struct DhtQueryWithDataBuilder<'a> { + inner: DhtQueryBuilder<'a>, + data: Vec, + at: Option, + ttl: u32, + with_peer_info: bool, +} + +impl DhtQueryWithDataBuilder<'_> { + pub fn with_time(&mut self, at: u32) -> &mut Self { + self.at = Some(at); + self + } + + pub fn with_ttl(&mut self, ttl: u32) -> &mut Self { + self.ttl = ttl; + self + } + + pub fn with_peer_info(&mut self, with_peer_info: bool) -> &mut Self { + self.with_peer_info = with_peer_info; + self + } + + pub async fn store(&self) -> Result<()> { + let dht = self.inner.inner; + let network = self.inner.network; + + let mut value = PeerValueRef { + key: PeerValueKeyRef { + name: self.inner.name, + peer_id: &dht.local_id, + }, + data: &self.data, + expires_at: self.at.unwrap_or_else(now_sec) + self.ttl, + signature: &[0; 64], + }; + let signature = network.sign_tl(&value); + value.signature = &signature; + + dht.store_value(network, ValueRef::Peer(value), self.with_peer_info) + .await + } + + pub fn into_signed_value(self) -> PeerValue { + let dht = self.inner.inner; + let network = self.inner.network; + + let mut value = PeerValue { + key: PeerValueKey { + name: self.name, + peer_id: dht.local_id, + }, + data: self.data.into_boxed_slice(), + expires_at: self.at.unwrap_or_else(now_sec) + self.ttl, + signature: Box::new([0; 64]), + }; + *value.signature = network.sign_tl(&value); + value + } +} + +impl<'a> std::ops::Deref for DhtQueryWithDataBuilder<'a> { + type Target = DhtQueryBuilder<'a>; + + #[inline] + fn deref(&self) -> &Self::Target { + &self.inner + } +} + +impl<'a> std::ops::DerefMut for DhtQueryWithDataBuilder<'a> { + #[inline] + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} + +pub struct DhtServiceBuilder { + local_id: PeerId, + config: Option, + overlay_merger: Option>, +} + +impl DhtServiceBuilder { + pub fn with_config(mut self, config: DhtConfig) -> Self { + self.config = Some(config); + self + } + + pub fn with_overlay_value_merger(mut self, merger: Arc) -> Self { + self.overlay_merger = Some(merger); + self + } + + pub fn build(self) -> (DhtClientBuilder, DhtService) { + let config = self.config.unwrap_or_default(); + + let storage = { + let mut builder = Storage::builder() + .with_max_capacity(config.max_storage_capacity) + .with_max_ttl(config.max_stored_value_ttl); + + if let Some(time_to_idle) = config.storage_item_time_to_idle { + builder = builder.with_max_idle(time_to_idle); + } + + if let Some(ref merger) = self.overlay_merger { + builder = builder.with_overlay_value_merger(merger); + } + + builder.build() + }; + + let (announced_peers, _) = broadcast::channel(config.announced_peers_channel_capacity); + + let inner = Arc::new(DhtInner { + local_id: self.local_id, + routing_table: Mutex::new(RoutingTable::new(self.local_id)), + storage, + local_peer_info: Mutex::new(None), + config, + announced_peers, + }); + + let client_builder = DhtClientBuilder { + inner: inner.clone(), + disable_background_tasks: false, + }; + + (client_builder, DhtService(inner)) + } +} + +#[derive(Clone)] +pub struct DhtService(Arc); + +impl DhtService { + pub fn builder(local_id: PeerId) -> DhtServiceBuilder { + DhtServiceBuilder { + local_id, + config: None, + overlay_merger: None, + } + } +} + +impl Service for DhtService { + type QueryResponse = Response; + type OnQueryFuture = futures_util::future::Ready>; + type OnMessageFuture = futures_util::future::Ready<()>; + type OnDatagramFuture = futures_util::future::Ready<()>; + + #[tracing::instrument( + level = "debug", + name = "on_dht_query", + skip_all, + fields(peer_id = %req.metadata.peer_id, addr = %req.metadata.remote_address) + )] + fn on_query(&self, req: ServiceRequest) -> Self::OnQueryFuture { + let (constructor, body) = match self.0.try_handle_prefix(&req) { + Ok(rest) => rest, + Err(e) => { + tracing::debug!("failed to deserialize query: {e:?}"); + return futures_util::future::ready(None); + } + }; + + let response = crate::match_tl_request!(body, tag = constructor, { + rpc::FindNode as ref r => { + tracing::debug!(key = %PeerId::wrap(&r.key), k = r.k, "find_node"); + + let res = self.0.handle_find_node(r); + Some(tl_proto::serialize(res)) + }, + rpc::FindValue as ref r => { + tracing::debug!(key = %PeerId::wrap(&r.key), k = r.k, "find_value"); + + let res = self.0.handle_find_value(r); + Some(tl_proto::serialize(res)) + }, + rpc::GetNodeInfo as _ => { + tracing::debug!("get_node_info"); + + self.0.handle_get_node_info().map(tl_proto::serialize) + }, + }, e => { + tracing::debug!("failed to deserialize query: {e:?}"); + None + }); + + futures_util::future::ready(response.map(|body| Response { + version: Default::default(), + body: Bytes::from(body), + })) + } + + #[tracing::instrument( + level = "debug", + name = "on_dht_message", + skip_all, + fields(peer_id = %req.metadata.peer_id, addr = %req.metadata.remote_address) + )] + fn on_message(&self, req: ServiceRequest) -> Self::OnMessageFuture { + let (constructor, body) = match self.0.try_handle_prefix(&req) { + Ok(rest) => rest, + Err(e) => { + tracing::debug!("failed to deserialize message: {e:?}"); + return futures_util::future::ready(()); + } + }; + + crate::match_tl_request!(body, tag = constructor, { + rpc::StoreRef<'_> as ref r => { + tracing::debug!("store"); + + if let Err(e) = self.0.handle_store(r) { + tracing::debug!("failed to store value: {e:?}"); + } + } + }, e => { + tracing::debug!("failed to deserialize message: {e:?}"); + }); + + futures_util::future::ready(()) + } + + #[inline] + fn on_datagram(&self, _req: ServiceRequest) -> Self::OnDatagramFuture { + futures_util::future::ready(()) + } +} + +impl Routable for DhtService { + fn query_ids(&self) -> impl IntoIterator { + [ + rpc::WithPeerInfo::TL_ID, + rpc::FindNode::TL_ID, + rpc::FindValue::TL_ID, + rpc::GetNodeInfo::TL_ID, + ] + } + + fn message_ids(&self) -> impl IntoIterator { + [rpc::WithPeerInfo::TL_ID, rpc::Store::TL_ID] + } +} + +struct DhtInner { + local_id: PeerId, + routing_table: Mutex, + storage: Storage, + local_peer_info: Mutex>, + config: DhtConfig, + announced_peers: broadcast::Sender>, +} + +impl DhtInner { + fn start_background_tasks(self: &Arc, network: WeakNetwork) { + enum Action { + RefreshLocalPeerInfo, + AnnounceLocalPeerInfo, + RefreshRoutingTable, + AddPeer(Arc), + } + + let mut refresh_peer_info_interval = + tokio::time::interval(self.config.local_info_refresh_period); + let mut announce_peer_info_interval = shifted_interval( + self.config.local_info_announce_period, + self.config.max_local_info_announce_period_jitter, + ); + let mut refresh_routing_table_interval = shifted_interval( + self.config.routing_table_refresh_period, + self.config.max_routing_table_refresh_period_jitter, + ); + + let mut announced_peers = self.announced_peers.subscribe(); + + let this = Arc::downgrade(self); + tokio::spawn(async move { + tracing::debug!("background DHT loop started"); + + let mut prev_refresh_routing_table_fut = None::>; + loop { + let action = tokio::select! { + _ = refresh_peer_info_interval.tick() => Action::RefreshLocalPeerInfo, + _ = announce_peer_info_interval.tick() => Action::AnnounceLocalPeerInfo, + _ = refresh_routing_table_interval.tick() => Action::RefreshRoutingTable, + peer = announced_peers.recv() => match peer { + Ok(peer) => Action::AddPeer(peer), + Err(_) => continue, + } + }; + + let (Some(this), Some(network)) = (this.upgrade(), network.upgrade()) else { + break; + }; + + match action { + Action::RefreshLocalPeerInfo => { + this.refresh_local_peer_info(&network); + } + Action::AnnounceLocalPeerInfo => { + // Always refresh peer info before announcing + this.refresh_local_peer_info(&network); + refresh_peer_info_interval.reset(); + + if let Err(e) = this.announce_local_peer_info(&network).await { + tracing::error!("failed to announce local DHT node info: {e:?}"); + } + } + Action::RefreshRoutingTable => { + if let Some(fut) = prev_refresh_routing_table_fut.take() { + if let Err(e) = fut.await { + if e.is_panic() { + std::panic::resume_unwind(e.into_panic()); + } + } + } + + prev_refresh_routing_table_fut = Some(tokio::spawn(async move { + this.refresh_routing_table(&network).await; + })); + } + Action::AddPeer(peer_info) => { + tracing::info!(peer_id = %peer_info.id, "received peer info"); + if let Err(e) = + this.add_peer_info(&network, peer_info, RoutingTableSource::Untrusted) + { + tracing::error!("failed to add peer to the routing table: {e:?}"); + } + } + } + } + tracing::debug!("background DHT loop finished"); + }); + } + + fn refresh_local_peer_info(&self, network: &Network) { + let peer_info = self.make_local_peer_info(network, now_sec()); + *self.local_peer_info.lock().unwrap() = Some(peer_info); + } + + #[tracing::instrument(level = "debug", skip_all, fields(local_id = % self.local_id))] + async fn announce_local_peer_info(&self, network: &Network) -> Result<()> { + let data = tl_proto::serialize(&[network.local_addr().into()] as &[Address]); + + let mut value = self.make_unsigned_peer_value( + PeerValueKeyName::NodeInfo, + &data, + now_sec() + self.config.max_peer_info_ttl.as_secs() as u32, + ); + let signature = network.sign_tl(&value); + value.signature = &signature; + + self.store_value(network, ValueRef::Peer(value), true).await + } + + #[tracing::instrument(level = "debug", skip_all, fields(local_id = % self.local_id))] + async fn refresh_routing_table(&self, network: &Network) { + const PARALLEL_QUERIES: usize = 3; + const MAX_DISTANCE: usize = 15; + const QUERY_DEPTH: usize = 3; + + // Prepare futures for each bucket + let semaphore = Semaphore::new(PARALLEL_QUERIES); + let mut futures = FuturesUnordered::new(); + { + let rng = &mut rand::thread_rng(); + + let mut routing_table = self.routing_table.lock().unwrap(); + + // Filter out expired nodes + let now = now_sec(); + for (_, bucket) in routing_table.buckets.range_mut(..=MAX_DISTANCE) { + bucket.retain_nodes(|node| !node.is_expired(now, &self.config.max_peer_info_ttl)); + } + + // Iterate over the first buckets up until some distance (`MAX_DISTANCE`) + // or up to the last non-empty bucket (?). + for (&distance, bucket) in routing_table.buckets.range(..=MAX_DISTANCE).rev() { + // TODO: Should we skip empty buckets? + if bucket.is_empty() { + continue; + } + + // Query the K closest nodes for a random ID at the specified distance from the local ID. + let random_id = random_key_at_distance(&routing_table.local_id, distance, rng); + let query = Query::new( + network.clone(), + &routing_table, + random_id.as_bytes(), + self.config.max_k, + ); + + futures.push(async { + let _permit = semaphore.acquire().await.unwrap(); + query.find_peers(Some(QUERY_DEPTH)).await + }); + } + } + + // Receive initial set of peers + let Some(mut peers) = futures.next().await else { + tracing::debug!("no new peers found"); + return; + }; + + // Merge new peers into the result set + while let Some(new_peers) = futures.next().await { + for (peer_id, peer) in new_peers { + match peers.entry(peer_id) { + // Just insert the peer if it's new + hash_map::Entry::Vacant(entry) => { + entry.insert(peer); + } + // Replace the peer if it's newer (by creation time) + hash_map::Entry::Occupied(mut entry) => { + if entry.get().created_at < peer.created_at { + entry.insert(peer); + } + } + } + } + } + + let mut routing_table = self.routing_table.lock().unwrap(); + let mut count = 0usize; + for peer in peers.into_values() { + if peer.id == self.local_id { + continue; + } + + let is_new = routing_table.add( + peer.clone(), + self.config.max_k, + &self.config.max_peer_info_ttl, + RoutingTableSource::Trusted, + ); + if is_new { + network.known_peers().insert(peer, PeerAffinity::Allowed); + count += 1; + } + } + + tracing::debug!(count, "found new peers"); + } + + async fn find_value(&self, network: &Network, key_hash: &[u8; 32]) -> Option> { + // TODO: deduplicate shared futures + let query = Query::new( + network.clone(), + &self.routing_table.lock().unwrap(), + key_hash, + self.config.max_k, + ); + + // NOTE: expression is intentionally split to drop the routing table guard + query.find_value().await + } + + async fn store_value( + &self, + network: &Network, + value: ValueRef<'_>, + with_peer_info: bool, + ) -> Result<()> { + self.storage.insert(&value)?; + + let local_peer_info = if with_peer_info { + let mut node_info = self.local_peer_info.lock().unwrap(); + Some( + node_info + .get_or_insert_with(|| self.make_local_peer_info(network, now_sec())) + .clone(), + ) + } else { + None + }; + + let query = StoreValue::new( + network.clone(), + &self.routing_table.lock().unwrap(), + value, + self.config.max_k, + local_peer_info.as_ref(), + ); + + // NOTE: expression is intentionally split to drop the routing table guard + query.run().await; + Ok(()) + } + + fn add_peer_info( + &self, + network: &Network, + peer_info: Arc, + source: RoutingTableSource, + ) -> Result { + anyhow::ensure!(peer_info.is_valid(now_sec()), "invalid peer info"); + + if peer_info.id == self.local_id { + return Ok(false); + } + + let mut routing_table = self.routing_table.lock().unwrap(); + let is_new = routing_table.add( + peer_info.clone(), + self.config.max_k, + &self.config.max_peer_info_ttl, + source, + ); + if is_new { + network + .known_peers() + .insert(peer_info, PeerAffinity::Allowed); + } + Ok(is_new) + } + + fn make_unsigned_peer_value<'a>( + &'a self, + name: PeerValueKeyName, + data: &'a [u8], + expires_at: u32, + ) -> PeerValueRef<'a> { + PeerValueRef { + key: PeerValueKeyRef { + name, + peer_id: &self.local_id, + }, + data, + expires_at, + signature: &[0; 64], + } + } + + fn make_local_peer_info(&self, network: &Network, now: u32) -> PeerInfo { + let mut peer_info = PeerInfo { + id: self.local_id, + address_list: vec![network.local_addr().into()].into_boxed_slice(), + created_at: now, + expires_at: now + self.config.max_peer_info_ttl.as_secs() as u32, + signature: Box::new([0; 64]), + }; + *peer_info.signature = network.sign_tl(&peer_info); + peer_info + } + + fn try_handle_prefix<'a>(&self, req: &'a ServiceRequest) -> Result<(u32, &'a [u8])> { + let mut body = req.as_ref(); + anyhow::ensure!(body.len() >= 4, tl_proto::TlError::UnexpectedEof); + + // NOTE: read constructor without advancing the body + let mut constructor = std::convert::identity(body).get_u32_le(); + let mut offset = 0; + + if constructor == rpc::WithPeerInfo::TL_ID { + let peer_info = rpc::WithPeerInfo::read_from(body, &mut offset)?.peer_info; + anyhow::ensure!( + peer_info.id == req.metadata.peer_id, + "suggested peer ID does not belong to the sender" + ); + self.announced_peers.send(peer_info).ok(); + + body = &body[offset..]; + anyhow::ensure!(body.len() >= 4, tl_proto::TlError::UnexpectedEof); + + // NOTE: read constructor without advancing the body + constructor = std::convert::identity(body).get_u32_le(); + } + + Ok((constructor, body)) + } + + fn handle_store(&self, req: &rpc::StoreRef<'_>) -> Result { + self.storage.insert(&req.value) + } + + fn handle_find_node(&self, req: &rpc::FindNode) -> NodeResponse { + let nodes = self + .routing_table + .lock() + .unwrap() + .closest(&req.key, (req.k as usize).min(self.config.max_k)); + + NodeResponse { nodes } + } + + fn handle_find_value(&self, req: &rpc::FindValue) -> ValueResponseRaw { + if let Some(value) = self.storage.get(&req.key) { + ValueResponseRaw::Found(value) + } else { + let nodes = self + .routing_table + .lock() + .unwrap() + .closest(&req.key, (req.k as usize).min(self.config.max_k)); + + ValueResponseRaw::NotFound(nodes) + } + } + + fn handle_get_node_info(&self) -> Option { + self.local_peer_info + .lock() + .unwrap() + .clone() + .map(|info| NodeInfoResponse { info }) + } +} + +fn random_key_at_distance(from: &PeerId, distance: usize, rng: &mut impl RngCore) -> PeerId { + let mut result = *from; + rng.fill_bytes(&mut result.0[distance..]); + result +} + +pub fn xor_distance(left: &PeerId, right: &PeerId) -> usize { + for (i, (left, right)) in std::iter::zip(left.0.chunks(8), right.0.chunks(8)).enumerate() { + let left = u64::from_be_bytes(left.try_into().unwrap()); + let right = u64::from_be_bytes(right.try_into().unwrap()); + let diff = left ^ right; + if diff != 0 { + return MAX_XOR_DISTANCE - (i * 64 + diff.leading_zeros() as usize); + } + } + + 0 +} + +const MAX_XOR_DISTANCE: usize = 256; + +#[derive(Debug, thiserror::Error)] +pub enum FindValueError { + #[error("failed to deserialize value: {0}")] + InvalidData(#[from] tl_proto::TlError), + #[error("value not found")] + NotFound, +} diff --git a/network/src/dht/query.rs b/network/src/dht/query.rs new file mode 100644 index 000000000..5355ba6d1 --- /dev/null +++ b/network/src/dht/query.rs @@ -0,0 +1,381 @@ +use std::collections::hash_map; +use std::sync::Arc; +use std::time::Duration; + +use ahash::{HashMapExt, HashSetExt}; +use anyhow::Result; +use bytes::Bytes; +use futures_util::stream::FuturesUnordered; +use futures_util::{Future, StreamExt}; +use tokio::sync::Semaphore; +use tycho_util::time::now_sec; +use tycho_util::{FastHashMap, FastHashSet}; + +use crate::dht::routing::{RoutingTable, RoutingTableSource}; +use crate::network::Network; +use crate::proto::dht::{rpc, NodeResponse, Value, ValueRef, ValueResponse}; +use crate::types::{PeerId, PeerInfo, Request}; +use crate::util::NetworkExt; + +pub struct Query { + network: Network, + candidates: RoutingTable, + max_k: usize, +} + +impl Query { + pub fn new( + network: Network, + routing_table: &RoutingTable, + target_id: &[u8; 32], + max_k: usize, + ) -> Self { + let mut candidates = RoutingTable::new(PeerId(*target_id)); + routing_table.visit_closest(target_id, max_k, |node| { + candidates.add( + node.clone(), + max_k, + &Duration::MAX, + RoutingTableSource::Trusted, + ); + }); + + Self { + network, + candidates, + max_k, + } + } + + fn local_id(&self) -> &[u8; 32] { + self.candidates.local_id.as_bytes() + } + + #[tracing::instrument(level = "debug", skip_all)] + pub async fn find_value(mut self) -> Option> { + // Prepare shared request + let request_body = Bytes::from(tl_proto::serialize(rpc::FindValue { + key: *self.local_id(), + k: self.max_k as u32, + })); + + // Prepare request to initial candidates + let semaphore = Semaphore::new(MAX_PARALLEL_REQUESTS); + let mut futures = FuturesUnordered::new(); + self.candidates + .visit_closest(self.local_id(), self.max_k, |node| { + futures.push(Self::visit::( + self.network.clone(), + node.clone(), + request_body.clone(), + &semaphore, + )); + }); + + // Process responses and refill futures until the value is found or all peers are traversed + let mut visited = FastHashSet::new(); + while let Some((node, res)) = futures.next().await { + match res { + // Return the value if found + Some(Ok(ValueResponse::Found(value))) => { + let is_valid = value.is_valid(now_sec(), self.local_id()); + tracing::debug!(peer_id = %node.id, is_valid, "found value"); + + if !is_valid { + // Ignore invalid values + continue; + } + + return Some(value); + } + // Refill futures from the nodes response + Some(Ok(ValueResponse::NotFound(nodes))) => { + let node_count = nodes.len(); + let has_new = + self.update_candidates(now_sec(), self.max_k, nodes, &mut visited); + tracing::debug!(peer_id = %node.id, count = node_count, has_new, "received nodes"); + + if !has_new { + // Do nothing if candidates were not changed + continue; + } + + // Add new nodes from the closest range + self.candidates + .visit_closest(self.local_id(), self.max_k, |node| { + if visited.contains(&node.id) { + // Skip already visited nodes + return; + } + futures.push(Self::visit::( + self.network.clone(), + node.clone(), + request_body.clone(), + &semaphore, + )); + }); + } + // Do nothing on error + Some(Err(e)) => { + tracing::warn!(peer_id = %node.id, "failed to query nodes: {e:?}"); + } + // Do nothing on timeout + None => { + tracing::warn!(peer_id = %node.id, "failed to query nodes: timeout"); + } + } + } + + // Done + None + } + + #[tracing::instrument(level = "debug", skip_all)] + pub async fn find_peers(mut self, depth: Option) -> FastHashMap> { + // Prepare shared request + let request_body = Bytes::from(tl_proto::serialize(rpc::FindNode { + key: *self.local_id(), + k: self.max_k as u32, + })); + + // Prepare request to initial candidates + let semaphore = Semaphore::new(MAX_PARALLEL_REQUESTS); + let mut futures = FuturesUnordered::new(); + self.candidates + .visit_closest(self.local_id(), self.max_k, |node| { + futures.push(Self::visit::( + self.network.clone(), + node.clone(), + request_body.clone(), + &semaphore, + )); + }); + + // Process responses and refill futures until all peers are traversed + let mut current_depth = 0; + let max_depth = depth.unwrap_or(usize::MAX); + let mut result = FastHashMap::>::new(); + while let Some((node, res)) = futures.next().await { + match res { + // Refill futures from the nodes response + Some(Ok(NodeResponse { nodes })) => { + tracing::debug!(peer_id = %node.id, count = nodes.len(), "received nodes"); + if !self.update_candidates_full(now_sec(), self.max_k, nodes, &mut result) { + // Do nothing if candidates were not changed + continue; + } + + current_depth += 1; + if current_depth >= max_depth { + // Stop on max depth + break; + } + + // Add new nodes from the closest range + self.candidates + .visit_closest(self.local_id(), self.max_k, |node| { + if result.contains_key(&node.id) { + // Skip already visited nodes + return; + } + futures.push(Self::visit::( + self.network.clone(), + node.clone(), + request_body.clone(), + &semaphore, + )); + }); + } + // Do nothing on error + Some(Err(e)) => { + tracing::warn!(peer_id = %node.id, "failed to query nodes: {e:?}"); + } + // Do nothing on timeout + None => { + tracing::warn!(peer_id = %node.id, "failed to query nodes: timeout"); + } + } + } + + // Done + result + } + + fn update_candidates( + &mut self, + now: u32, + max_k: usize, + nodes: Vec>, + visited: &mut FastHashSet, + ) -> bool { + let mut has_new = false; + for node in nodes { + // Skip invalid entries + if !node.is_valid(now) { + continue; + } + + // Insert a new entry + if visited.insert(node.id) { + self.candidates + .add(node, max_k, &Duration::MAX, RoutingTableSource::Trusted); + has_new = true; + } + } + + has_new + } + + fn update_candidates_full( + &mut self, + now: u32, + max_k: usize, + nodes: Vec>, + visited: &mut FastHashMap>, + ) -> bool { + let mut has_new = false; + for node in nodes { + // Skip invalid entries + if !node.is_valid(now) { + continue; + } + + match visited.entry(node.id) { + // Insert a new entry + hash_map::Entry::Vacant(entry) => { + let node = entry.insert(node).clone(); + self.candidates + .add(node, max_k, &Duration::MAX, RoutingTableSource::Trusted); + has_new = true; + } + // Try to replace an old entry + hash_map::Entry::Occupied(mut entry) => { + if entry.get().created_at < node.created_at { + *entry.get_mut() = node; + } + } + } + } + + has_new + } + + async fn visit( + network: Network, + node: Arc, + request_body: Bytes, + semaphore: &Semaphore, + ) -> (Arc, Option>) + where + for<'a> T: tl_proto::TlRead<'a, Repr = tl_proto::Boxed>, + { + let Ok(_permit) = semaphore.acquire().await else { + return (node, None); + }; + + let req = network.query( + &node.id, + Request { + version: Default::default(), + body: request_body.clone(), + }, + ); + + let res = match tokio::time::timeout(REQUEST_TIMEOUT, req).await { + Ok(res) => { + Some(res.and_then(|res| tl_proto::deserialize::(&res.body).map_err(Into::into))) + } + Err(_) => None, + }; + + (node, res) + } +} + +pub struct StoreValue { + futures: FuturesUnordered, +} + +impl StoreValue<()> { + pub fn new( + network: Network, + routing_table: &RoutingTable, + value: ValueRef<'_>, + max_k: usize, + local_peer_info: Option<&PeerInfo>, + ) -> StoreValue, Option>)> + Send> { + let key_hash = match &value { + ValueRef::Peer(value) => tl_proto::hash(&value.key), + ValueRef::Overlay(value) => tl_proto::hash(&value.key), + }; + + let request_body = Bytes::from(match local_peer_info { + Some(peer_info) => { + tl_proto::serialize((rpc::WithPeerInfoRef { peer_info }, rpc::StoreRef { value })) + } + None => tl_proto::serialize(rpc::StoreRef { value }), + }); + + let semaphore = Arc::new(Semaphore::new(10)); + let futures = futures_util::stream::FuturesUnordered::new(); + routing_table.visit_closest(&key_hash, max_k, |node| { + futures.push(Self::visit( + network.clone(), + node.clone(), + request_body.clone(), + semaphore.clone(), + )); + }); + + StoreValue { futures } + } + + async fn visit( + network: Network, + node: Arc, + request_body: Bytes, + semaphore: Arc, + ) -> (Arc, Option>) { + let Ok(_permit) = semaphore.acquire().await else { + return (node, None); + }; + + let req = network.send( + &node.id, + Request { + version: Default::default(), + body: request_body.clone(), + }, + ); + + let res = match tokio::time::timeout(REQUEST_TIMEOUT, req).await { + Ok(res) => Some(res), + Err(_) => None, + }; + + (node, res) + } +} + +impl, Option>)> + Send> StoreValue { + #[tracing::instrument(level = "debug", skip_all, name = "store_value")] + pub async fn run(mut self) { + while let Some((node, res)) = self.futures.next().await { + match res { + Some(Ok(())) => { + tracing::debug!(peer_id = %node.id, "value stored"); + } + Some(Err(e)) => { + tracing::warn!(peer_id = %node.id, "failed to store value: {e:?}"); + } + // Do nothing on timeout + None => { + tracing::warn!(peer_id = %node.id, "failed to store value: timeout"); + } + } + } + } +} + +const REQUEST_TIMEOUT: Duration = Duration::from_millis(500); +const MAX_PARALLEL_REQUESTS: usize = 10; diff --git a/network/src/dht/routing.rs b/network/src/dht/routing.rs new file mode 100644 index 000000000..3a054011e --- /dev/null +++ b/network/src/dht/routing.rs @@ -0,0 +1,465 @@ +use std::collections::{BTreeMap, VecDeque}; +use std::sync::Arc; +use std::time::{Duration, Instant}; + +use tycho_util::time::now_sec; + +use crate::dht::{xor_distance, MAX_XOR_DISTANCE}; +use crate::types::{PeerId, PeerInfo}; + +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub(crate) enum RoutingTableSource { + Untrusted, + Trusted, +} + +pub(crate) struct RoutingTable { + pub local_id: PeerId, + pub buckets: BTreeMap, +} + +impl RoutingTable { + pub fn new(local_id: PeerId) -> Self { + Self { + local_id, + buckets: Default::default(), + } + } + + #[allow(unused)] + pub fn is_empty(&self) -> bool { + self.buckets.values().all(Bucket::is_empty) + } + + #[allow(unused)] + pub fn len(&self) -> usize { + self.buckets.values().map(|bucket| bucket.nodes.len()).sum() + } + + pub fn add( + &mut self, + peer: Arc, + max_k: usize, + node_ttl: &Duration, + source: RoutingTableSource, + ) -> bool { + let distance = xor_distance(&self.local_id, &peer.id); + if distance == 0 { + return false; + } + + self.buckets + .entry(distance) + .or_insert_with(|| Bucket::with_capacity(max_k)) + .insert(peer, max_k, node_ttl, source) + } + + pub fn closest(&self, key: &[u8; 32], count: usize) -> Vec> { + if count == 0 { + return Vec::new(); + } + + // TODO: fill secure and unsecure buckets in parallel + let mut result = Vec::with_capacity(count); + let distance = xor_distance(&self.local_id, PeerId::wrap(key)); + + // Search for closest nodes first + for i in (distance..=MAX_XOR_DISTANCE).chain((0..distance).rev()) { + let remaining = match count.checked_sub(result.len()) { + None | Some(0) => break, + Some(n) => n, + }; + + if let Some(bucket) = self.buckets.get(&i) { + for node in bucket.nodes.iter().take(remaining) { + result.push(node.data.clone()); + } + } + } + + result + } + + pub fn visit_closest(&self, key: &[u8; 32], count: usize, mut f: F) + where + F: FnMut(&Arc), + { + if count == 0 { + return; + } + + let distance = xor_distance(&self.local_id, PeerId::wrap(key)); + + let mut processed = 0; + + // Search for closest nodes first + for i in (distance..=MAX_XOR_DISTANCE).chain((0..distance).rev()) { + let remaining = match count.checked_sub(processed) { + None | Some(0) => break, + Some(n) => n, + }; + + if let Some(bucket) = self.buckets.get(&i) { + for node in bucket.nodes.iter().take(remaining) { + f(&node.data); + processed += 1; + } + } + } + } +} + +pub(crate) struct Bucket { + nodes: VecDeque, +} + +impl Bucket { + fn with_capacity(capacity: usize) -> Self { + Self { + nodes: VecDeque::with_capacity(capacity), + } + } + + fn insert( + &mut self, + node: Arc, + max_k: usize, + timeout: &Duration, + source: RoutingTableSource, + ) -> bool { + if let Some(index) = self + .nodes + .iter_mut() + .position(|item| item.data.id == node.id) + { + if source == RoutingTableSource::Untrusted { + let slot = &mut self.nodes[index]; + // Do nothing if node info was not updated (by created_at field) + if node.created_at <= slot.data.created_at { + return false; + } + } + + self.nodes.remove(index); + } else if self.nodes.len() >= max_k { + if matches!(self.nodes.front(), Some(node) if node.is_expired(now_sec(), timeout)) { + self.nodes.pop_front(); + } else { + return false; + } + } + + self.nodes.push_back(Node::new(node)); + true + } + + pub fn retain_nodes(&mut self, f: F) + where + F: FnMut(&Node) -> bool, + { + self.nodes.retain(f); + } + + pub fn is_empty(&self) -> bool { + self.nodes.is_empty() + } +} + +pub(crate) struct Node { + pub data: Arc, + pub last_updated_at: Instant, +} + +impl Node { + fn new(data: Arc) -> Self { + Self { + data, + last_updated_at: Instant::now(), + } + } + + pub fn is_expired(&self, at: u32, timeout: &Duration) -> bool { + self.data.is_expired(at) || &self.last_updated_at.elapsed() >= timeout + } +} + +#[cfg(test)] +mod tests { + use std::str::FromStr; + + use super::*; + + const MAX_K: usize = 20; + + fn make_node(id: PeerId) -> Arc { + Arc::new(PeerInfo { + id, + address_list: Default::default(), + created_at: 0, + expires_at: u32::MAX, + signature: Box::new([0; 64]), + }) + } + + #[test] + fn buckets_are_sets() { + let mut table = RoutingTable::new(PeerId::random()); + + let peer = PeerId::random(); + assert!(table.add( + make_node(peer), + MAX_K, + &Duration::MAX, + RoutingTableSource::Trusted + )); + assert!(table.add( + make_node(peer), + MAX_K, + &Duration::MAX, + RoutingTableSource::Trusted + )); // returns true because the node was updated + assert_eq!(table.len(), 1); + } + + #[test] + fn sould_not_add_seld() { + let local_id = PeerId::random(); + let mut table = RoutingTable::new(local_id); + + assert!(!table.add( + make_node(local_id), + MAX_K, + &Duration::MAX, + RoutingTableSource::Trusted + )); + assert!(table.is_empty()); + } + + #[test] + fn max_k_per_bucket() { + let k = 20; + let timeout = Duration::MAX; + let mut bucket = Bucket::with_capacity(k); + + for _ in 0..k { + assert!(bucket.insert( + make_node(PeerId::random()), + k, + &timeout, + RoutingTableSource::Trusted + )); + } + assert!(!bucket.insert( + make_node(PeerId::random()), + k, + &timeout, + RoutingTableSource::Trusted + )); + } + + #[test] + fn find_closest_nodes() { + let ids = [ + "4a76f9bc07ca82a9a60a198de13721283649b0d1e3eada12e717e922a02e5bb3", + "0f542142194b68e262a715791380574fe1ba59a440372bb48a6021cadcbe0c80", + "95e594066d545fe55f3a7da54065f12bfade3205480f2f0c48ea4ab23af955c9", + "ceec84c6726f140200dfe4b206d46eee82ee94f4539ad5579070ba59d4748065", + "ef02b1fda8ca4082168a925f8e4f1382764fc8650f5945c64c57a54741fd45b1", + "d2778cf6161b43fbd552902be3ab56d2059e8e4ab2563b7b54e2f3dc37735686", + "bd1ab6dcb76bdef6da7f7fb3fcc1d187638e67acf19654157074c0052c267fe1", + "2709f88a1cda61b92f3036e69a7bcee273721a89e1bcbfa5bf705efcfd66ea5e", + "cb6eeb5680c581bfab2da1d9c2dbeae43ce27d8c59179f6a2e75c9c63a044db6", + "75a8edc3ac6fd40dcb3ec28ef27886225dfe267745c5ca237c036f6d60a06c7f", + "1e7de617e4fd4cd5a5c6c4258dbf24561099e8cb233577a1e2a875f77260a7ab", + "138f06d98756b78695d115e4cacdb56069f3564ac48f49241d9d61b177948b37", + "e0e608b434424cfbe6b7995f8b1dec9d8d08cf9e93aa59b8e36fd6f9f2239538", + "236286b8f8c388ea0877201fd1301e701b474c46f7b5df329fbd3289602074e9", + "6660dc422459c1e1003a8cdcbd5e3fd722df33e67e26072b582ee8c46c5ad5e9", + "19b32fcbf5b45bd3679ce5c9e22b11b57a5fcf56a746ff5426857644ccbc272a", + "fb8c40aaa92e4910a7a47d547c290c598b5aa357a0b96fc3815d7710f682b69c", + "6cf33e51fa4e0cef076c79bd90e1b50eb4be2cb70a1d0a275bd7aa2760a73e4e", + "1c72b8583ac3947718238a23863283a2fe8aedc2581d5538f9830441ad3bf84c", + "c52600bc1018e595739d45433ea4c59ce36fea5242c3c46270c3e61af3f24c26", + "1127d91d128f383f528e5e8b24bfc683368fd22d3e7e185ac50816f1d51726f4", + "1d16bbaf7d432418ad0f799d71cdfea0a865f50a3b02543dc846e4797bdcf30d", + "74ce456b7e956c407625c24522ef5851ae3e2d5630603ff60427fe682e3419ea", + "12dcaae7276b99335b3b75ae8fd04ce378c5e5c7b0143efd04c3d800e4345061", + "6f9bde29ef1eae90896e1fb50fe19df9f342af97018e67dae7c8f721280f4243", + "a8caf1325b29fc49c465b6b8bd6cfc2cbb2d4809153e307956c336b0d4bbd816", + "b4b5d8eb4c39345dd8bea701a541e8fb4df827964aa3796bad122652ddd5be1e", + "9f812affedd280a6c13d8916e3f933a7d52d5814bc3c6218a31bfe37cce3befa", + "beec74f32c5c9b83df70aa0df4e3f5abea71726b2edc123a8bb453ddf3d2de90", + "d2f0f2e684c6578e60794fee51d3bcb484627bb384027bd07a442216f1a38893", + "956b9e26da6a429e70828b777d6b843662f1172258129f20273bea63d4c8a467", + "88361b564dc7d147e411087ac891e8565aadd5b27a2e92a7a7bd7107d02b5fdc", + "52593e3c6739d16f22723840225a804e0b9a90a86b29cb470345cc5559b6ac49", + "7289912a703a94fc0df53a80d6e23a8195f4dd04d67e45d31a0426dcc3d5d1b1", + "ae7c0ca443cf94dd0ee54a0bb7678aa2a82712da2da87813363ff8232ca3a363", + "db2328dc4fee7c9a888cf3c4099a9e1eb0d5c43d52702898c27ff442307c1934", + "965d913e0de7251d12985467e9adc9fb4ba87988307cc9e2b940712a818caacd", + "ba714e28cf5f87e84a6ff8e3187db1ffe0d5788c74067cb8d90bcea184600afa", + "beb3c47ee72dc88438d694947a619200dfc000dccc5239f719096e42600524ab", + "882a587dc9f47a0c40074f4232ff67a61865365c626999aff9526d34955757a2", + "f2ad154d811d2e019d63f8e4f824fba7b72ff13a7e97da12cf717a76ea91273e", + "45e5e550116a9f49bd25a86120ae1f40f85b2611bd9a45dd8a98a9f9dd647dc6", + "d5813e9a7a9445b68839db5e7a95e7125090e4ac763cbe32812ae0c5002d1a58", + "4214d98c9bf2166cc41ef9cf0c37fac68b685358f638e36f2297f426c04f91b4", + "7cc1af0803f8fea2015577e1a5510310cad5b136d5924919b9e533e66c648e2d", + "f62ca6a41fa5ed4be443d5b326166a3dafc2c0b1d7dbfcbc709ed22bfddf28a2", + "c91581e33de7a3e1404c58f559e5d6b4438f27d8bedfbb357b8f064ed86df1f8", + "d1ac225a8bfaba82776b6da70010d66b29a876385bacc4a4b365d6ffbeeacc86", + "8c8b75aeeff02c3b88394fe18e7a65534da1b00b36f9446061f7d995484e6177", + "f7d172ff80f4e451f04ba73e279286f2a4707e290ec4268bc16fe94277c7f733", + "3205396db7242347cfa75c796839cf5afb7961a9acb01f650c163fb86b332097", + "7ba5e8fee0239cc2c499161aaecf89d4fa3ebc76b7c8c2d1b305d3309121ca4d", + "c3c05d9e1d51c2d87d6eb4144a726eec697999ba21552951e9c4eefc07f35df4", + "771594e90ff55c810a697d901027ca73e286a8977ea19432e95e28761be19319", + "efcf3927f3456a8eb5e87a2f1d5c582c2bb97336455edad53ce10dcfdbe79420", + "e96dc8c885a3fc866597c8f3b243b011eb928b81bd4accd4fe08d9277a468b75", + "b2797ca70e15d10f8079c527ad13da29af6e261b75f6ccbb5908b6e4e7c7dc87", + "757fe465b20ac4614df7cca23fb3038848fd7fbd0d59afb8800f5e9f212acf40", + "d2bf6ff26de798c1e8944c6c8a39c22b2299e3192fd3a83347a72d7ec4f80071", + "30dbca20ebf6c7f4cdcd8ccf0ce78ae858fd3b296b033ff305559896cb22f54f", + "0a99ceb98807d4f3d217e4b71a7b0cbeb3f79f954088c4f143e1cf046e2132f6", + "227c54051f6872cae600a006eb0e6840ba3903e61a52a18f4a31e4248c7a68eb", + "79799ee7e4e0c5d90d900d9e6a1a4b07ec9f0a565a64e109bca4768a26d095b3", + "2f548b927815ada03b49befad9fc5019d1607f8e3219dd5fa1510b8ae493f064", + "f146a459753a2fb80f3ff5278f9d1bd33734442fa5048e6e0c97d2ae060f1798", + "272dc41968edb8784e34ad71a9b7b06a5a5a200b8df1d14c6b68e6451e27c922", + "5db66920b3d006733c1eb10666b28d83929eede48a7b1fc8f690da2660464c62", + "99019fa36fe000eeafca8efd5fa5c0e77a3a4ed77a4d7ae526cbc71e57026d06", + "c2a0c8b2132ef0db36420eef9f5f0f87da43b01cd78a734bb82e55515f8ffd1d", + "f0c4dac4e62b132b3c3f6086d691c2bf710f1b47e1914eed3fc0a3d4176338a3", + "4f57644cf2f94cb9f547ad1043f8cc439bd7d47cb31748d68ca79b9bc411f99b", + "4ed89565bcd28fa1637fd30b474184c289dc8d326dc4fa052be131b8900b338d", + "b1eb827b1e0b7ca81df1590a5f29818e53a8156634653ae0c02cf3c2a4bb2bde", + "4fa40df71e0237d39d8cc769c2e7252bf741abc755995bbadd6a7e8f95ab1694", + "92398a19157e20036d1e9baffb360096524ae045316e988bf5365e0514183e9c", + "7ca701bffa4a52902298fbe7a7cd383360049cf5fc8201efe17470fd8bbdc7ee", + "e823a52f49062a18c7f2622ced876ca17985d84e20d278935c230847e5560ed1", + "712a228b32fb45b91c9691e73daa96fa0136c85796d0cd802905de7b36da5c99", + "9475a23f0eb50d1573bc6032db822dfda0885bea1eb096cd65eee3bb292c7567", + "6da8d09bc9115d799efdc7e77b7e488dfd87e566e440fb9f591a1257b7914c9d", + "f1ca9e1623356604a00f1982837fe10d634b3f758c5b72d5f543548e616d95e5", + "4e97df7376a778ef083de064d09d9ddd60c42d382bd7d53a721fecdb1e6fba2a", + "dd429467062dcb9e51832f6c6ab55a361615f56e8be7aed600292241684a8133", + "0fc4aed5ebbd23755b4e250bcbc44a5accd3a64b3cf9078da1c02cb53dc8c196", + "8d70a1319a085c4d1c22eac63335085d2c0ddf1a4ffb5b7d93c8a796679e2463", + "f873f50e465c834e2819d104d9dc904f8a32b3f09eb6a880b8669cf08247913e", + "69870545d1b886222d4b968aa14c70c0bfa436893e5a6894749e964cd760069c", + "d5b590ec2b93d9b78e225b254121630ccfaec13be57a1dbf7c915cd922e08d75", + "2abfc539a31361ee6e830b82149c33c898d4bcd3dea6127b930c05ce354dd474", + "1a34e99b9561406f55c9eb5c28965ae1458a6573abb6143f2ca793ccd3bcb7c6", + "5bfe3ac277824dd2d093eeb7241fa8011bbd4dc90ebbad7cce3d055b15524c0b", + "304884f6ea7d01bfa294edc27562c2ebe660e810087f7b962c168b1b967a8d74", + "272b32b839b80f4e7c980577ebc41d8d729d8bed66db9522d69f3851a186fbeb", + "77f06ed2f83251c47af7afef00e9d96729a7d30388fdbe067987a333ea085ede", + "a942f1858af47d7347696427504b9eafa94af58452fa4a26fcc1a99ed70e78b6", + "500de9b4be309b5fa9074e856a473090419c2a131e3823636abe0d538e18c712", + "c30e59f93b5c3a801a31016d2e17e5c7fb5bd525165827466925e8a8cc4dbcd9", + "ffce42b385ed2abdc6eef6b88fd963522b57bfea2f9c7f6b230eb1c518912edf", + "750b037a6a8b833ee976ce27120e31807b644626e526a5e4fff3bfcfeed374dd", + "93a756cd44f530a9a072b6573421ba1ade3a7fe35494a2fc308da2ed58c1a7f7", + ]; + let ids = ids + .into_iter() + .map(PeerId::from_str) + .collect::, _>>() + .unwrap(); + + let local_id = + PeerId::from_str("bdbc554024c65b463b0f0a01037b55985190f4fc01c47dc81c19aab4b4b2d9ab") + .unwrap(); + + let mut table = RoutingTable::new(local_id); + for id in ids { + table.add( + make_node(id), + MAX_K, + &Duration::MAX, + RoutingTableSource::Trusted, + ); + } + + { + let expected_closest_ids = [ + "882a587dc9f47a0c40074f4232ff67a61865365c626999aff9526d34955757a2", + "88361b564dc7d147e411087ac891e8565aadd5b27a2e92a7a7bd7107d02b5fdc", + "8c8b75aeeff02c3b88394fe18e7a65534da1b00b36f9446061f7d995484e6177", + "92398a19157e20036d1e9baffb360096524ae045316e988bf5365e0514183e9c", + "9475a23f0eb50d1573bc6032db822dfda0885bea1eb096cd65eee3bb292c7567", + "956b9e26da6a429e70828b777d6b843662f1172258129f20273bea63d4c8a467", + "95e594066d545fe55f3a7da54065f12bfade3205480f2f0c48ea4ab23af955c9", + "965d913e0de7251d12985467e9adc9fb4ba87988307cc9e2b940712a818caacd", + "99019fa36fe000eeafca8efd5fa5c0e77a3a4ed77a4d7ae526cbc71e57026d06", + "9f812affedd280a6c13d8916e3f933a7d52d5814bc3c6218a31bfe37cce3befa", + "a8caf1325b29fc49c465b6b8bd6cfc2cbb2d4809153e307956c336b0d4bbd816", + "a942f1858af47d7347696427504b9eafa94af58452fa4a26fcc1a99ed70e78b6", + "ae7c0ca443cf94dd0ee54a0bb7678aa2a82712da2da87813363ff8232ca3a363", + "b1eb827b1e0b7ca81df1590a5f29818e53a8156634653ae0c02cf3c2a4bb2bde", + "b2797ca70e15d10f8079c527ad13da29af6e261b75f6ccbb5908b6e4e7c7dc87", + "b4b5d8eb4c39345dd8bea701a541e8fb4df827964aa3796bad122652ddd5be1e", + "ba714e28cf5f87e84a6ff8e3187db1ffe0d5788c74067cb8d90bcea184600afa", + "bd1ab6dcb76bdef6da7f7fb3fcc1d187638e67acf19654157074c0052c267fe1", + "beb3c47ee72dc88438d694947a619200dfc000dccc5239f719096e42600524ab", + "beec74f32c5c9b83df70aa0df4e3f5abea71726b2edc123a8bb453ddf3d2de90", + ]; + let expected_closest_ids = expected_closest_ids + .into_iter() + .map(PeerId::from_str) + .collect::, _>>() + .unwrap(); + + let mut closest = table + .closest(local_id.as_bytes(), 20) + .into_iter() + .map(|item| item.id) + .collect::>(); + closest.sort(); + assert_eq!(closest, expected_closest_ids); + } + + { + let expected_closest_ids = [ + "c3c05d9e1d51c2d87d6eb4144a726eec697999ba21552951e9c4eefc07f35df4", + "c52600bc1018e595739d45433ea4c59ce36fea5242c3c46270c3e61af3f24c26", + "c91581e33de7a3e1404c58f559e5d6b4438f27d8bedfbb357b8f064ed86df1f8", + "cb6eeb5680c581bfab2da1d9c2dbeae43ce27d8c59179f6a2e75c9c63a044db6", + "ceec84c6726f140200dfe4b206d46eee82ee94f4539ad5579070ba59d4748065", + "d1ac225a8bfaba82776b6da70010d66b29a876385bacc4a4b365d6ffbeeacc86", + "d2778cf6161b43fbd552902be3ab56d2059e8e4ab2563b7b54e2f3dc37735686", + "d2bf6ff26de798c1e8944c6c8a39c22b2299e3192fd3a83347a72d7ec4f80071", + "d2f0f2e684c6578e60794fee51d3bcb484627bb384027bd07a442216f1a38893", + "d5813e9a7a9445b68839db5e7a95e7125090e4ac763cbe32812ae0c5002d1a58", + "db2328dc4fee7c9a888cf3c4099a9e1eb0d5c43d52702898c27ff442307c1934", + "e0e608b434424cfbe6b7995f8b1dec9d8d08cf9e93aa59b8e36fd6f9f2239538", + "e96dc8c885a3fc866597c8f3b243b011eb928b81bd4accd4fe08d9277a468b75", + "ef02b1fda8ca4082168a925f8e4f1382764fc8650f5945c64c57a54741fd45b1", + "efcf3927f3456a8eb5e87a2f1d5c582c2bb97336455edad53ce10dcfdbe79420", + "f146a459753a2fb80f3ff5278f9d1bd33734442fa5048e6e0c97d2ae060f1798", + "f2ad154d811d2e019d63f8e4f824fba7b72ff13a7e97da12cf717a76ea91273e", + "f62ca6a41fa5ed4be443d5b326166a3dafc2c0b1d7dbfcbc709ed22bfddf28a2", + "f7d172ff80f4e451f04ba73e279286f2a4707e290ec4268bc16fe94277c7f733", + "fb8c40aaa92e4910a7a47d547c290c598b5aa357a0b96fc3815d7710f682b69c", + ]; + let expected_closest_ids = expected_closest_ids + .into_iter() + .map(PeerId::from_str) + .collect::, _>>() + .unwrap(); + + let target = PeerId::from_str( + "d41f603e6bd24f1c3e2eb4d97d81fd155dd307f5b5c9be443a1a229bd1392b72", + ) + .unwrap(); + + let mut closest = table + .closest(target.as_bytes(), 20) + .into_iter() + .map(|item| item.id) + .collect::>(); + closest.sort(); + assert_eq!(closest, expected_closest_ids); + } + } +} diff --git a/network/src/dht/storage.rs b/network/src/dht/storage.rs new file mode 100644 index 000000000..76041b389 --- /dev/null +++ b/network/src/dht/storage.rs @@ -0,0 +1,253 @@ +use std::cell::RefCell; +use std::sync::{Arc, Weak}; +use std::time::Duration; + +use anyhow::Result; +use bytes::{Bytes, BytesMut}; +use moka::sync::{Cache, CacheBuilder}; +use moka::Expiry; +use tl_proto::TlWrite; +use tycho_util::time::now_sec; + +use crate::proto::dht::{OverlayValue, OverlayValueRef, PeerValueRef, ValueRef}; + +type DhtCache = Cache; +type DhtCacheBuilder = CacheBuilder>; + +pub trait OverlayValueMerger: Send + Sync + 'static { + fn check_value(&self, new: &OverlayValueRef<'_>) -> Result<(), StorageError>; + fn merge_value(&self, new: &OverlayValueRef<'_>, stored: &mut OverlayValue) -> bool; +} + +impl OverlayValueMerger for () { + fn check_value(&self, _new: &OverlayValueRef<'_>) -> Result<(), StorageError> { + Err(StorageError::InvalidKey) + } + fn merge_value(&self, _new: &OverlayValueRef<'_>, _stored: &mut OverlayValue) -> bool { + false + } +} + +pub(crate) struct StorageBuilder { + cache_builder: DhtCacheBuilder, + overlay_value_merger: Weak, + max_ttl: Duration, +} + +impl Default for StorageBuilder { + fn default() -> Self { + Self { + cache_builder: Default::default(), + overlay_value_merger: Weak::<()>::new(), + max_ttl: Duration::from_secs(3600), + } + } +} + +impl StorageBuilder { + pub fn build(self) -> Storage { + fn weigher(_key: &StorageKeyId, value: &StoredValue) -> u32 { + std::mem::size_of::() as u32 + + std::mem::size_of::() as u32 + + value.data.len() as u32 + } + + Storage { + cache: self + .cache_builder + .time_to_live(self.max_ttl) + .weigher(weigher) + .expire_after(ValueExpiry) + .build_with_hasher(ahash::RandomState::default()), + overlay_value_merger: self.overlay_value_merger, + max_ttl_sec: self.max_ttl.as_secs().try_into().unwrap_or(u32::MAX), + } + } + + pub fn with_overlay_value_merger(mut self, merger: &Arc) -> Self { + self.overlay_value_merger = Arc::downgrade(merger); + self + } + + pub fn with_max_capacity(mut self, max_capacity: u64) -> Self { + self.cache_builder = self.cache_builder.max_capacity(max_capacity); + self + } + + pub fn with_max_ttl(mut self, ttl: Duration) -> Self { + self.max_ttl = ttl; + self + } + + pub fn with_max_idle(mut self, duration: Duration) -> Self { + self.cache_builder = self.cache_builder.time_to_idle(duration); + self + } +} + +pub(crate) struct Storage { + cache: DhtCache, + overlay_value_merger: Weak, + max_ttl_sec: u32, +} + +impl Storage { + pub fn builder() -> StorageBuilder { + StorageBuilder::default() + } + + pub fn get(&self, key: &[u8; 32]) -> Option { + let stored_value = self.cache.get(key)?; + (stored_value.expires_at > now_sec()).then_some(stored_value.data) + } + + pub fn insert(&self, value: &ValueRef<'_>) -> Result { + match value.expires_at().checked_sub(now_sec()) { + Some(0) | None => return Err(StorageError::ValueExpired), + Some(remaining_ttl) if remaining_ttl > self.max_ttl_sec => { + return Err(StorageError::UnsupportedTtl) + } + _ => {} + } + + match value { + ValueRef::Peer(value) => self.insert_signed_value(value), + ValueRef::Overlay(value) => self.insert_overlay_value(value), + } + } + + fn insert_signed_value(&self, value: &PeerValueRef<'_>) -> Result { + let Some(public_key) = value.key.peer_id.as_public_key() else { + return Err(StorageError::InvalidSignature); + }; + + if !matches!( + <&[u8; 64]>::try_from(value.signature.as_ref()), + Ok(signature) if public_key.verify(value, signature) + ) { + return Err(StorageError::InvalidSignature); + } + + Ok(self + .cache + .entry(tl_proto::hash(&value.key)) + .or_insert_with_if( + || StoredValue::new(value, value.expires_at), + |prev| prev.expires_at < value.expires_at, + ) + .is_fresh()) + } + + fn insert_overlay_value(&self, value: &OverlayValueRef<'_>) -> Result { + let Some(merger) = self.overlay_value_merger.upgrade() else { + return Ok(false); + }; + + merger.check_value(value)?; + + enum OverlayValueCow<'a, 'b> { + Borrowed(&'a OverlayValueRef<'b>), + Owned(OverlayValue), + } + + impl OverlayValueCow<'_, '_> { + fn make_stored_value(&self) -> StoredValue { + match self { + Self::Borrowed(value) => StoredValue::new(*value, value.expires_at), + Self::Owned(value) => StoredValue::new(value, value.expires_at), + } + } + } + + let new_value = RefCell::new(OverlayValueCow::Borrowed(value)); + + Ok(self + .cache + .entry(tl_proto::hash(&value.key)) + .or_insert_with_if( + || { + let value = new_value.borrow(); + value.make_stored_value() + }, + |prev| { + let Ok(mut prev) = tl_proto::deserialize::(&prev.data) else { + // Invalid values are always replaced with new values + return true; + }; + + if merger.merge_value(value, &mut prev) { + *new_value.borrow_mut() = OverlayValueCow::Owned(prev); + true + } else { + false + } + }, + ) + .is_fresh()) + } +} + +#[derive(Clone)] +struct StoredValue { + expires_at: u32, + data: Bytes, +} + +impl StoredValue { + fn new>(value: &T, expires_at: u32) -> Self { + let mut data = BytesMut::with_capacity(value.max_size_hint()); + value.write_to(&mut data); + + StoredValue { + expires_at, + data: data.freeze(), + } + } +} + +struct ValueExpiry; + +impl Expiry for ValueExpiry { + fn expire_after_create( + &self, + _key: &StorageKeyId, + value: &StoredValue, + _created_at: std::time::Instant, + ) -> Option { + Some(ttl_since_now(value.expires_at)) + } + + fn expire_after_update( + &self, + _key: &StorageKeyId, + value: &StoredValue, + _updated_at: std::time::Instant, + _duration_until_expiry: Option, + ) -> Option { + Some(ttl_since_now(value.expires_at)) + } +} + +fn ttl_since_now(expires_at: u32) -> Duration { + let now = std::time::SystemTime::now() + .duration_since(std::time::SystemTime::UNIX_EPOCH) + .unwrap(); + + Duration::from_secs(expires_at as u64).saturating_sub(now) +} + +pub type StorageKeyId = [u8; 32]; + +#[derive(Debug, thiserror::Error)] +pub enum StorageError { + #[error("value expired")] + ValueExpired, + #[error("unsupported ttl")] + UnsupportedTtl, + #[error("invalid key")] + InvalidKey, + #[error("invalid signature")] + InvalidSignature, + #[error("value too big")] + ValueTooBig, +} diff --git a/network/src/lib.rs b/network/src/lib.rs index 1e604f0a3..fe4689e40 100644 --- a/network/src/lib.rs +++ b/network/src/lib.rs @@ -1,7 +1,55 @@ -pub mod config; -pub mod connection; -pub mod crypto; -pub mod endpoint; -pub mod network; -pub mod proto; -pub mod types; +pub use self::util::{check_peer_signature, NetworkExt, Routable, Router, RouterBuilder}; +pub use dht::{ + xor_distance, DhtClient, DhtClientBuilder, DhtConfig, DhtQueryBuilder, DhtQueryWithDataBuilder, + DhtService, DhtServiceBuilder, FindValueError, OverlayValueMerger, StorageError, +}; +pub use network::{ + ActivePeers, Connection, KnownPeer, KnownPeers, Network, NetworkBuilder, NetworkConfig, Peer, + QuicConfig, RecvStream, SendStream, WeakActivePeers, WeakNetwork, +}; +pub use types::{ + service_datagram_fn, service_message_fn, service_query_fn, Address, BoxCloneService, + BoxService, Direction, DisconnectReason, InboundRequestMeta, PeerAffinity, PeerEvent, PeerId, + PeerInfo, Request, Response, RpcQuery, Service, ServiceDatagramFn, ServiceExt, + ServiceMessageFn, ServiceQueryFn, ServiceRequest, Version, +}; + +pub use quinn; + +mod dht; +mod network; +mod types; +mod util; + +pub mod proto { + pub mod dht; +} + +#[doc(hidden)] +pub mod __internal { + pub use tl_proto; +} + +#[cfg(test)] +mod tests { + use std::net::Ipv4Addr; + + use super::*; + + #[tokio::test] + async fn init_works() { + let keypair = everscale_crypto::ed25519::KeyPair::generate(&mut rand::thread_rng()); + + let (dht_client, dht) = DhtService::builder(keypair.public_key.into()).build(); + + let router = Router::builder().route(dht).build(); + + let network = Network::builder() + .with_random_private_key() + .with_service_name("test-service") + .build((Ipv4Addr::LOCALHOST, 0), router) + .unwrap(); + + let _dht_client = dht_client.build(network); + } +} diff --git a/network/src/config.rs b/network/src/network/config.rs similarity index 86% rename from network/src/config.rs rename to network/src/network/config.rs index 10a50f7ad..1ef9dbdc4 100644 --- a/network/src/config.rs +++ b/network/src/network/config.rs @@ -2,27 +2,57 @@ use std::sync::Arc; use std::time::Duration; use anyhow::{Context, Result}; +use serde::{Deserialize, Serialize}; +use tycho_util::serde_helpers; -use crate::crypto::{CertVerifier, CertVerifierWithPeerId}; +use crate::network::crypto::{ + generate_cert, peer_id_from_certificate, CertVerifier, CertVerifierWithPeerId, +}; use crate::types::PeerId; -#[derive(Debug, Clone)] +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(default)] #[non_exhaustive] -pub struct Config { +pub struct NetworkConfig { pub quic: Option, + + /// Default: 128. pub connection_manager_channel_capacity: usize, + + /// Default: 5 seconds. + #[serde(with = "serde_helpers::humantime")] pub connectivity_check_interval: Duration, + + /// Default: yes. pub max_frame_size: Option, + + /// Default: 10 seconds. + #[serde(with = "serde_helpers::humantime")] pub connect_timeout: Duration, + + /// Default: 10 seconds. + #[serde(with = "serde_helpers::humantime")] pub connection_backoff: Duration, + + /// Default: 1 minute. + #[serde(with = "serde_helpers::humantime")] pub max_connection_backoff: Duration, + + /// Default: 100. pub max_concurrent_outstanding_connections: usize, + + /// Default: unlimited. pub max_concurrent_connections: Option, + + /// Default: 128. pub active_peers_event_channel_capacity: usize, + + /// Default: 1 minute. + #[serde(with = "serde_helpers::humantime")] pub shutdown_idle_timeout: Duration, } -impl Default for Config { +impl Default for NetworkConfig { fn default() -> Self { Self { quic: None, @@ -40,15 +70,24 @@ impl Default for Config { } } -#[derive(Debug, Clone)] +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(default)] pub struct QuicConfig { + /// Default: 100. pub max_concurrent_bidi_streams: u64, + /// Default: 100. pub max_concurrent_uni_streams: u64, + /// Default: auto. pub stream_receive_window: Option, + /// Default: auto. pub receive_window: Option, + /// Default: auto. pub send_window: Option, + // TODO: add all other fields from quin::TransportConfig + /// Default: auto. pub socket_send_buffer_size: Option, + /// Default: auto. pub socket_recv_buffer_size: Option, } @@ -90,7 +129,7 @@ impl QuicConfig { } } -pub struct EndpointConfig { +pub(crate) struct EndpointConfig { pub peer_id: PeerId, pub service_name: String, pub client_cert: rustls::Certificate, @@ -124,7 +163,7 @@ impl EndpointConfig { } } -pub struct EndpointConfigBuilder { +pub(crate) struct EndpointConfigBuilder { mandatory_fields: MandatoryFields, optional_fields: EndpointConfigBuilderFields, } @@ -178,8 +217,8 @@ impl EndpointConfigBuilder { let reset_key = compute_reset_key(&keypair.secret_key); let quinn_endpoint_config = quinn::EndpointConfig::new(Arc::new(reset_key)); - let (cert, pkcs8_der) = crate::crypto::generate_cert(&keypair, &service_name) - .context("Failed to generate a certificate")?; + let (cert, pkcs8_der) = + generate_cert(&keypair, &service_name).context("Failed to generate a certificate")?; let cert_verifier = Arc::new(CertVerifier::from(service_name.clone())); let quinn_client_config = make_client_config( @@ -197,7 +236,7 @@ impl EndpointConfigBuilder { transport_config.clone(), )?; - let peer_id = crate::crypto::peer_id_from_certificate(&cert)?; + let peer_id = peer_id_from_certificate(&cert)?; Ok(EndpointConfig { peer_id, diff --git a/network/src/connection.rs b/network/src/network/connection.rs similarity index 62% rename from network/src/connection.rs rename to network/src/network/connection.rs index 91912dcf0..d5dd1e34b 100644 --- a/network/src/connection.rs +++ b/network/src/network/connection.rs @@ -1,32 +1,40 @@ use std::net::SocketAddr; use std::pin::Pin; +use std::sync::Arc; use std::task::{Context, Poll}; use anyhow::{Context as _, Result}; use bytes::Bytes; -use quinn::{ConnectionError, RecvStream}; +use quinn::{ConnectionError, SendDatagramError}; -use crate::types::{Direction, PeerId}; +use crate::network::crypto::peer_id_from_certificate; +use crate::types::{Direction, InboundRequestMeta, PeerId}; #[derive(Clone)] pub struct Connection { inner: quinn::Connection, - peer_id: PeerId, - origin: Direction, + request_meta: Arc, } impl Connection { pub fn new(inner: quinn::Connection, origin: Direction) -> Result { let peer_id = extract_peer_id(&inner)?; Ok(Self { + request_meta: Arc::new(InboundRequestMeta { + peer_id, + origin, + remote_address: inner.remote_address(), + }), inner, - peer_id, - origin, }) } + pub fn request_meta(&self) -> &Arc { + &self.request_meta + } + pub fn peer_id(&self) -> &PeerId { - &self.peer_id + &self.request_meta.peer_id } pub fn stable_id(&self) -> usize { @@ -34,37 +42,41 @@ impl Connection { } pub fn origin(&self) -> Direction { - self.origin + self.request_meta.origin } pub fn remote_address(&self) -> SocketAddr { - self.inner.remote_address() + self.request_meta.remote_address } pub fn close(&self) { - self.inner.close(0u8.into(), b"connection closed") - } - - pub async fn open_uni(&self) -> Result { - self.inner.open_uni().await.map(SendStream) + self.inner.close(0u8.into(), b"connection closed"); } pub async fn open_bi(&self) -> Result<(SendStream, RecvStream), ConnectionError> { self.inner .open_bi() .await - .map(|(send, recv)| (SendStream(send), recv)) - } - - pub async fn accept_uni(&self) -> Result { - self.inner.accept_uni().await + .map(|(send, recv)| (SendStream(send), RecvStream(recv))) } pub async fn accept_bi(&self) -> Result<(SendStream, RecvStream), ConnectionError> { self.inner .accept_bi() .await - .map(|(send, recv)| (SendStream(send), recv)) + .map(|(send, recv)| (SendStream(send), RecvStream(recv))) + } + + pub async fn open_uni(&self) -> Result { + self.inner.open_uni().await.map(SendStream) + } + + pub async fn accept_uni(&self) -> Result { + self.inner.accept_uni().await.map(RecvStream) + } + + pub fn send_datagram(&self, data: Bytes) -> Result<(), SendDatagramError> { + self.inner.send_datagram(data) } pub async fn read_datagram(&self) -> Result { @@ -75,14 +87,15 @@ impl Connection { impl std::fmt::Debug for Connection { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_struct("Connection") - .field("origin", &self.origin) + .field("origin", &self.request_meta.origin) .field("id", &self.stable_id()) .field("remote_address", &self.remote_address()) - .field("peer_id", &self.peer_id) + .field("peer_id", &self.request_meta.peer_id) .finish_non_exhaustive() } } +#[repr(transparent)] pub struct SendStream(quinn::SendStream); impl Drop for SendStream { @@ -134,6 +147,36 @@ impl tokio::io::AsyncWrite for SendStream { } } +#[repr(transparent)] +pub struct RecvStream(quinn::RecvStream); + +impl std::ops::Deref for RecvStream { + type Target = quinn::RecvStream; + + #[inline] + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +impl std::ops::DerefMut for RecvStream { + #[inline] + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.0 + } +} + +impl tokio::io::AsyncRead for RecvStream { + #[inline] + fn poll_read( + mut self: Pin<&mut Self>, + cx: &mut Context<'_>, + buf: &mut tokio::io::ReadBuf<'_>, + ) -> Poll> { + Pin::new(&mut self.0).poll_read(cx, buf) + } +} + fn extract_peer_id(connection: &quinn::Connection) -> Result { let certificate = connection .peer_identity() @@ -141,5 +184,5 @@ fn extract_peer_id(connection: &quinn::Connection) -> Result { .and_then(|certificates| certificates.into_iter().next()) .context("No certificate found in the connection")?; - crate::crypto::peer_id_from_certificate(&certificate).map_err(Into::into) + peer_id_from_certificate(&certificate).map_err(Into::into) } diff --git a/network/src/network/connection_manager.rs b/network/src/network/connection_manager.rs index b7f9845a1..cfbe2cd75 100644 --- a/network/src/network/connection_manager.rs +++ b/network/src/network/connection_manager.rs @@ -1,4 +1,3 @@ -use std::net::SocketAddr; use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::{Arc, Weak}; use std::time::{Duration, Instant}; @@ -7,23 +6,26 @@ use ahash::HashMap; use anyhow::Result; use tokio::sync::{broadcast, mpsc, oneshot}; use tokio::task::JoinSet; +use tycho_util::{FastDashMap, FastHashMap}; -use crate::config::Config; -use crate::connection::Connection; -use crate::endpoint::{Connecting, Endpoint}; +use crate::network::config::NetworkConfig; +use crate::network::connection::Connection; +use crate::network::endpoint::{Connecting, Endpoint}; +use crate::network::request_handler::InboundRequestHandler; +use crate::network::wire::handshake; use crate::types::{ - Direction, DisconnectReason, FastDashMap, FastHashMap, PeerAffinity, PeerEvent, PeerId, - PeerInfo, + Address, BoxCloneService, Direction, DisconnectReason, PeerAffinity, PeerEvent, PeerId, + PeerInfo, Response, ServiceRequest, }; #[derive(Debug)] -pub enum ConnectionManagerRequest { - Connect(SocketAddr, Option, oneshot::Sender>), +pub(crate) enum ConnectionManagerRequest { + Connect(Address, Option, oneshot::Sender>), Shutdown(oneshot::Sender<()>), } -pub struct ConnectionManager { - config: Arc, +pub(crate) struct ConnectionManager { + config: Arc, endpoint: Arc, mailbox: mpsc::Receiver, @@ -36,20 +38,23 @@ pub struct ConnectionManager { active_peers: ActivePeers, known_peers: KnownPeers, + + service: BoxCloneService, } impl Drop for ConnectionManager { fn drop(&mut self) { - self.endpoint.close() + self.endpoint.close(); } } impl ConnectionManager { pub fn new( - config: Arc, + config: Arc, endpoint: Arc, active_peers: ActivePeers, known_peers: KnownPeers, + service: BoxCloneService, ) -> (Self, mpsc::Sender) { let (mailbox_tx, mailbox) = mpsc::channel(config.connection_manager_channel_capacity); let connection_manager = Self { @@ -62,11 +67,11 @@ impl ConnectionManager { dial_backoff_states: Default::default(), active_peers, known_peers, + service, }; (connection_manager, mailbox_tx) } - #[tracing::instrument(skip_all, fields(local_id = %self.endpoint.peer_id()))] pub async fn start(mut self) { tracing::info!("connection manager started"); @@ -175,56 +180,60 @@ impl ConnectionManager { .0 .iter() .filter(|item| { - let peer_info = item.value(); - peer_info.affinity == PeerAffinity::High - && &peer_info.peer_id != self.endpoint.peer_id() - && !self.active_peers.contains(&peer_info.peer_id) - && !self.pending_dials.contains_key(&peer_info.peer_id) + let KnownPeer { + peer_info, + affinity, + } = item.value(); + + *affinity == PeerAffinity::High + && &peer_info.id != self.endpoint.peer_id() + && !self.active_peers.contains(&peer_info.id) + && !self.pending_dials.contains_key(&peer_info.id) && self .dial_backoff_states - .get(&peer_info.peer_id) - .map(|state| now > state.next_attempt_at) - .unwrap_or(true) + .get(&peer_info.id) + .map_or(true, |state| now > state.next_attempt_at) }) .take(outstanding_connections_limit) - .map(|item| item.value().clone()) + .map(|item| item.value().peer_info.clone()) .collect::>(); for peer_info in outstanding_connections { + // TODO: handle multiple addresses + let address = peer_info + .iter_addresses() + .next() + .cloned() + .expect("address list must have at least one item"); + let (tx, rx) = oneshot::channel(); - self.dial_peer(peer_info.address, Some(peer_info.peer_id), tx); - self.pending_dials.insert(peer_info.peer_id, rx); + self.dial_peer(address, Some(peer_info.id), tx); + self.pending_dials.insert(peer_info.id, rx); } } fn handle_connect_request( &mut self, - address: SocketAddr, + address: Address, peer_id: Option, callback: oneshot::Sender>, ) { - self.dial_peer(address, peer_id, callback) + self.dial_peer(address, peer_id, callback); } fn handle_incoming(&mut self, connecting: Connecting) { async fn handle_incoming_task( connecting: Connecting, - config: Arc, + config: Arc, active_peers: ActivePeers, known_peers: KnownPeers, ) -> ConnectingOutput { let fut = async { let connection = connecting.await?; - match known_peers.get(connection.peer_id()) { - Some(PeerInfo { - affinity: PeerAffinity::High | PeerAffinity::Allowed, - .. - }) => {} - Some(PeerInfo { - affinity: PeerAffinity::Never, - .. - }) => { + match known_peers.get_affinity(connection.peer_id()) { + Some(PeerAffinity::High | PeerAffinity::Allowed) => {} + Some(PeerAffinity::Never) => { anyhow::bail!( "rejecting connection from peer {} due to PeerAffinity::Never", connection.peer_id(), @@ -241,7 +250,7 @@ impl ConnectionManager { } } - crate::proto::handshake(connection).await + handshake(connection).await }; let connecting_result = tokio::time::timeout(config.connect_timeout, fut) @@ -292,27 +301,33 @@ impl ConnectionManager { fn add_peer(&mut self, connection: Connection) { if let Some(connection) = self.active_peers.add(self.endpoint.peer_id(), connection) { - // TODO: spawn request handler + let handler = InboundRequestHandler::new( + self.config.clone(), + connection, + self.service.clone(), + self.active_peers.clone(), + ); + self.connection_handlers.spawn(handler.start()); } } #[tracing::instrument(level = "trace", skip_all, fields(peer_id = ?peer_id, address = %address))] fn dial_peer( &mut self, - address: SocketAddr, + address: Address, peer_id: Option, callback: oneshot::Sender>, ) { async fn dial_peer_task( connecting: Result, - address: SocketAddr, + address: Address, peer_id: Option, callback: oneshot::Sender>, - config: Arc, + config: Arc, ) -> ConnectingOutput { let fut = async { let connection = connecting?.await?; - crate::proto::handshake(connection).await + handshake(connection).await }; let connecting_result = tokio::time::timeout(config.connect_timeout, fut) @@ -328,13 +343,14 @@ impl ConnectionManager { } } + let target_address = address.clone(); let connecting = match peer_id { None => self.endpoint.connect(address), Some(peer_id) => self.endpoint.connect_with_expected_id(address, peer_id), }; self.pending_connections.spawn(dial_peer_task( connecting, - address, + target_address, peer_id, callback, self.config.clone(), @@ -345,7 +361,7 @@ impl ConnectionManager { struct ConnectingOutput { connecting_result: Result, callback: Option>>, - target_address: Option, + target_address: Option
, target_peer_id: Option, } @@ -396,7 +412,7 @@ impl ActivePeers { } pub fn remove(&self, peer_id: &PeerId, reason: DisconnectReason) { - self.0.remove(peer_id, reason) + self.0.remove(peer_id, reason); } pub fn remove_with_stable_id( @@ -405,7 +421,11 @@ impl ActivePeers { stable_id: usize, reason: DisconnectReason, ) { - self.0.remove_with_stable_id(peer_id, stable_id, reason) + self.0.remove_with_stable_id(peer_id, stable_id, reason); + } + + pub fn subscribe(&self) -> broadcast::Receiver { + self.0.subscribe() } pub fn is_empty(&self) -> bool { @@ -508,6 +528,10 @@ impl ActivePeersInner { } } + fn subscribe(&self) -> broadcast::Receiver { + self.events_tx.subscribe() + } + fn send_event(&self, event: PeerEvent) { _ = self.events_tx.send(event); } @@ -537,22 +561,60 @@ fn simultaneous_dial_tie_breaking( } #[derive(Default, Clone)] -pub struct KnownPeers(Arc>); +pub struct KnownPeers(Arc>); impl KnownPeers { pub fn new() -> Self { Self::default() } - pub fn get(&self, peer_id: &PeerId) -> Option { + pub fn contains(&self, peer_id: &PeerId) -> bool { + self.0.contains_key(peer_id) + } + + pub fn get(&self, peer_id: &PeerId) -> Option { self.0.get(peer_id).map(|item| item.value().clone()) } - pub fn insert(&self, peer_info: PeerInfo) -> Option { - self.0.insert(peer_info.peer_id, peer_info) + pub fn get_affinity(&self, peer_id: &PeerId) -> Option { + self.0.get(peer_id).map(|item| item.value().affinity) } - pub fn remove(&self, peer_id: &PeerId) -> Option { + pub fn insert(&self, peer_info: Arc, affinity: PeerAffinity) -> Option { + match self.0.entry(peer_info.id) { + dashmap::mapref::entry::Entry::Vacant(entry) => { + entry.insert(KnownPeer { + peer_info, + affinity, + }); + None + } + dashmap::mapref::entry::Entry::Occupied(entry) => { + if entry.get().peer_info.created_at >= peer_info.created_at { + return None; + } + + let affinity = match affinity { + PeerAffinity::High | PeerAffinity::Never => affinity, + PeerAffinity::Allowed => entry.get().affinity, + }; + + let (_, old) = entry.replace_entry(KnownPeer { + peer_info, + affinity, + }); + Some(old) + } + } + } + + pub fn remove(&self, peer_id: &PeerId) -> Option { self.0.remove(peer_id).map(|(_, value)| value) } } + +#[derive(Debug, Clone)] +pub struct KnownPeer { + pub peer_info: Arc, + pub affinity: PeerAffinity, +} diff --git a/network/src/crypto.rs b/network/src/network/crypto.rs similarity index 92% rename from network/src/crypto.rs rename to network/src/network/crypto.rs index 3122f2b53..602d1d62b 100644 --- a/network/src/crypto.rs +++ b/network/src/network/crypto.rs @@ -5,7 +5,7 @@ use pkcs8::EncodePrivateKey; use crate::types::PeerId; -pub fn generate_cert( +pub(crate) fn generate_cert( keypair: &ed25519::KeypairBytes, subject_name: &str, ) -> Result<(rustls::Certificate, rustls::PrivateKey)> { @@ -25,14 +25,14 @@ pub fn generate_cert( Ok((rustls::Certificate(cert), key_der)) } -pub fn peer_id_from_certificate( +pub(crate) fn peer_id_from_certificate( certificate: &rustls::Certificate, ) -> Result { use pkcs8::DecodePublicKey; use x509_parser::prelude::{FromDer, X509Certificate}; let (_, cert) = X509Certificate::from_der(certificate.0.as_ref()) - .map_err(|_| rustls::Error::InvalidCertificate(rustls::CertificateError::BadEncoding))?; + .map_err(|_e| rustls::Error::InvalidCertificate(rustls::CertificateError::BadEncoding))?; let spki = cert.public_key(); let public_key = ed25519::pkcs8::PublicKeyBytes::from_public_key_der(spki.raw).map_err(|e| { @@ -44,7 +44,7 @@ pub fn peer_id_from_certificate( Ok(PeerId(public_key.to_bytes())) } -pub struct CertVerifierWithPeerId { +pub(crate) struct CertVerifierWithPeerId { inner: CertVerifier, peer_id: PeerId, } @@ -87,7 +87,7 @@ impl rustls::client::ServerCertVerifier for CertVerifierWithPeerId { } /// Verifies self-signed certificates for the specified SNI. -pub struct CertVerifier { +pub(crate) struct CertVerifier { service_name: String, } @@ -118,13 +118,14 @@ impl rustls::server::ClientCertVerifier for CertVerifier { ) -> Result { // Parse the certificate let prepared = prepare_for_self_signed(end_entity, intermediates)?; - let now = webpki::Time::try_from(now).map_err(|_| rustls::Error::FailedToGetCurrentTime)?; + let now = + webpki::Time::try_from(now).map_err(|_e| rustls::Error::FailedToGetCurrentTime)?; // Verify the certificate prepared .parsed .verify_for_usage( - SIGNATURE_ALGHORITHMS, + SIGNATURE_ALGORITHMS, std::slice::from_ref(&prepared.root), &prepared.intermediates, now, @@ -173,13 +174,14 @@ impl rustls::client::ServerCertVerifier for CertVerifier { // Parse the certificate let prepared = prepare_for_self_signed(end_entity, intermediates)?; - let now = webpki::Time::try_from(now).map_err(|_| rustls::Error::FailedToGetCurrentTime)?; + let now = + webpki::Time::try_from(now).map_err(|_e| rustls::Error::FailedToGetCurrentTime)?; // Verify the certificate prepared .parsed .verify_for_usage( - SIGNATURE_ALGHORITHMS, + SIGNATURE_ALGORITHMS, std::slice::from_ref(&prepared.root), &prepared.intermediates, now, @@ -255,4 +257,4 @@ struct InvalidCertificatePublicKey(pkcs8::spki::Error); #[error("certificate peer id mismatch")] struct CertificatePeerIdMismatch; -static SIGNATURE_ALGHORITHMS: &[&webpki::SignatureAlgorithm] = &[&webpki::ED25519]; +static SIGNATURE_ALGORITHMS: &[&webpki::SignatureAlgorithm] = &[&webpki::ED25519]; diff --git a/network/src/endpoint.rs b/network/src/network/endpoint.rs similarity index 92% rename from network/src/endpoint.rs rename to network/src/network/endpoint.rs index 556b2949e..64ef1f4cc 100644 --- a/network/src/endpoint.rs +++ b/network/src/network/endpoint.rs @@ -7,11 +7,11 @@ use std::time::Duration; use anyhow::Result; -use crate::config::EndpointConfig; -use crate::connection::Connection; -use crate::types::{Direction, PeerId}; +use crate::network::config::EndpointConfig; +use crate::network::connection::Connection; +use crate::types::{Address, Direction, PeerId}; -pub struct Endpoint { +pub(crate) struct Endpoint { inner: quinn::Endpoint, local_addr: RwLock, config: EndpointConfig, @@ -73,14 +73,14 @@ impl Endpoint { } /// Connect to a remote endpoint using the endpoint configuration. - pub fn connect(&self, address: SocketAddr) -> Result { + pub fn connect(&self, address: Address) -> Result { self.connect_with_client_config(self.config.quinn_client_config.clone(), address) } /// Connect to a remote endpoint expecting it to have the provided peer id. pub fn connect_with_expected_id( &self, - address: SocketAddr, + address: Address, peer_id: PeerId, ) -> Result { let config = self.config.make_client_config_for_peer_id(peer_id)?; @@ -91,8 +91,10 @@ impl Endpoint { fn connect_with_client_config( &self, config: quinn::ClientConfig, - address: SocketAddr, + address: Address, ) -> Result { + let address = address.resolve()?; + self.inner .connect_with(config, address, &self.config.service_name) .map_err(Into::into) @@ -112,7 +114,7 @@ impl Endpoint { pin_project_lite::pin_project! { #[must_use = "futures do nothing unless you `.await` or poll them"] - pub struct Accept<'a> { + pub(crate) struct Accept<'a> { #[pin] inner: quinn::Accept<'a>, } @@ -131,7 +133,7 @@ impl<'a> Future for Accept<'a> { #[derive(Debug)] #[must_use = "futures do nothing unless you `.await` or poll them"] -pub struct Connecting { +pub(crate) struct Connecting { inner: quinn::Connecting, origin: Direction, } diff --git a/network/src/network/mod.rs b/network/src/network/mod.rs index d2c2f87c1..f0274cfee 100644 --- a/network/src/network/mod.rs +++ b/network/src/network/mod.rs @@ -1,71 +1,87 @@ use std::net::{SocketAddr, ToSocketAddrs}; -use std::sync::Arc; +use std::sync::{Arc, Weak}; use anyhow::Result; +use everscale_crypto::ed25519; use rand::Rng; -use tokio::sync::{mpsc, oneshot}; +use tokio::sync::{broadcast, mpsc, oneshot}; -use crate::config::{Config, EndpointConfig}; -use crate::endpoint::Endpoint; -use crate::types::{DisconnectReason, PeerId}; - -use self::connection_manager::{ - ActivePeers, ConnectionManager, ConnectionManagerRequest, KnownPeers, WeakActivePeers, +use self::config::EndpointConfig; +use self::connection_manager::{ConnectionManager, ConnectionManagerRequest}; +use self::endpoint::Endpoint; +use crate::types::{ + Address, DisconnectReason, PeerEvent, PeerId, Response, Service, ServiceExt, ServiceRequest, }; -use self::peer::Peer; - -pub mod connection_manager; -pub mod peer; -pub struct Builder { +pub use self::config::{NetworkConfig, QuicConfig}; +pub use self::connection::{Connection, RecvStream, SendStream}; +pub use self::connection_manager::{ActivePeers, KnownPeer, KnownPeers, WeakActivePeers}; +pub use self::peer::Peer; + +mod config; +mod connection; +mod connection_manager; +mod crypto; +mod endpoint; +mod peer; +mod request_handler; +mod wire; + +pub struct NetworkBuilder { mandatory_fields: MandatoryFields, optional_fields: BuilderFields, } #[derive(Default)] struct BuilderFields { - config: Option, + config: Option, } -impl Builder { - pub fn with_config(mut self, config: Config) -> Self { +impl NetworkBuilder { + pub fn with_config(mut self, config: NetworkConfig) -> Self { self.optional_fields.config = Some(config); self } } -impl Builder<((), T2)> { - pub fn with_service_name>(self, name: T) -> Builder<(String, T2)> { +impl NetworkBuilder<((), T2)> { + pub fn with_service_name>(self, name: T) -> NetworkBuilder<(String, T2)> { let (_, private_key) = self.mandatory_fields; - Builder { + NetworkBuilder { mandatory_fields: (name.into(), private_key), optional_fields: self.optional_fields, } } } -impl Builder<(T1, ())> { - pub fn with_private_key(self, private_key: [u8; 32]) -> Builder<(T1, [u8; 32])> { +impl NetworkBuilder<(T1, ())> { + pub fn with_private_key(self, private_key: [u8; 32]) -> NetworkBuilder<(T1, [u8; 32])> { let (service_name, _) = self.mandatory_fields; - Builder { + NetworkBuilder { mandatory_fields: (service_name, private_key), optional_fields: self.optional_fields, } } - pub fn with_random_private_key(self) -> Builder<(T1, [u8; 32])> { + pub fn with_random_private_key(self) -> NetworkBuilder<(T1, [u8; 32])> { self.with_private_key(rand::thread_rng().gen()) } } -impl Builder { - pub fn build(self, bind_address: T) -> Result { +impl NetworkBuilder { + pub fn build(self, bind_address: T, service: S) -> Result + where + S: Send + Sync + Clone + 'static, + S: Service, + { use socket2::{Domain, Protocol, Socket, Type}; let config = self.optional_fields.config.unwrap_or_default(); let quic_config = config.quic.clone().unwrap_or_default(); let (service_name, private_key) = self.mandatory_fields; + let keypair = ed25519::KeyPair::from(&ed25519::SecretKey::from_bytes(private_key)); + let endpoint_config = EndpointConfig::builder() .with_service_name(service_name) .with_private_key(private_key) @@ -109,30 +125,53 @@ impl Builder { let weak_active_peers = ActivePeers::downgrade(&active_peers); let known_peers = KnownPeers::new(); - let (connection_manager, connection_manager_handle) = ConnectionManager::new( - config.clone(), - endpoint.clone(), - active_peers, - known_peers.clone(), - ); + let inner = Arc::new_cyclic(move |_weak| { + let service = service.boxed_clone(); + + let (connection_manager, connection_manager_handle) = ConnectionManager::new( + config.clone(), + endpoint.clone(), + active_peers, + known_peers.clone(), + service, + ); + + tokio::spawn(connection_manager.start()); + + NetworkInner { + config, + endpoint, + active_peers: weak_active_peers, + known_peers, + connection_manager_handle, + keypair, + } + }); + + Ok(Network(inner)) + } +} - tokio::spawn(connection_manager.start()); +#[derive(Clone)] +#[repr(transparent)] +pub struct WeakNetwork(Weak); - Ok(Network(Arc::new(NetworkInner { - config, - endpoint, - active_peers: weak_active_peers, - known_peers, - connection_manager_handle, - }))) +impl WeakNetwork { + pub fn upgrade(&self) -> Option { + self.0 + .upgrade() + .map(Network) + .and_then(|network| (!network.is_closed()).then_some(network)) } } +#[derive(Clone)] +#[repr(transparent)] pub struct Network(Arc); impl Network { - pub fn builder() -> Builder<((), ())> { - Builder { + pub fn builder() -> NetworkBuilder<((), ())> { + NetworkBuilder { mandatory_fields: ((), ()), optional_fields: Default::default(), } @@ -154,25 +193,57 @@ impl Network { self.0.known_peers() } - pub async fn connect(&self, addr: SocketAddr) -> Result { - self.0.connect(addr, None).await + pub fn subscribe(&self) -> Result> { + let active_peers = self.0.active_peers.upgrade().ok_or(NetworkShutdownError)?; + Ok(active_peers.subscribe()) } - pub async fn connect_with_peer_id(&self, addr: SocketAddr, peer_id: &PeerId) -> Result { - self.0.connect(addr, Some(peer_id)).await + pub async fn connect(&self, addr: T) -> Result + where + T: Into
, + { + self.0.connect(addr.into(), None).await + } + + pub async fn connect_with_peer_id(&self, addr: T, peer_id: &PeerId) -> Result + where + T: Into
, + { + self.0.connect(addr.into(), Some(peer_id)).await } pub fn disconnect(&self, peer_id: &PeerId) -> Result<()> { self.0.disconnect(peer_id) } + + pub async fn shutdown(&self) -> Result<()> { + self.0.shutdown().await + } + + pub fn is_closed(&self) -> bool { + self.0.is_closed() + } + + pub fn sign_tl(&self, data: T) -> [u8; 64] { + self.0.keypair.sign(data) + } + + pub fn sign_raw(&self, data: &[u8]) -> [u8; 64] { + self.0.keypair.sign_raw(data) + } + + pub fn downgrade(this: &Self) -> WeakNetwork { + WeakNetwork(Arc::downgrade(&this.0)) + } } -pub struct NetworkInner { - config: Arc, +struct NetworkInner { + config: Arc, endpoint: Arc, active_peers: WeakActivePeers, known_peers: KnownPeers, connection_manager_handle: mpsc::Sender, + keypair: ed25519::KeyPair, } impl NetworkInner { @@ -188,7 +259,7 @@ impl NetworkInner { &self.known_peers } - async fn connect(&self, addr: SocketAddr, peer_id: Option<&PeerId>) -> Result { + async fn connect(&self, addr: Address, peer_id: Option<&PeerId>) -> Result { let (tx, rx) = oneshot::channel(); self.connection_manager_handle .send(ConnectionManagerRequest::Connect( @@ -197,7 +268,7 @@ impl NetworkInner { tx, )) .await - .map_err(|_| anyhow::anyhow!("network has been shutdown"))?; + .map_err(|_e| NetworkShutdownError)?; rx.await? } @@ -214,13 +285,43 @@ impl NetworkInner { let connection = active_peers.get(peer_id)?; Some(Peer::new(connection, self.config.clone())) } + + async fn shutdown(&self) -> Result<()> { + let (sender, receiver) = oneshot::channel(); + self.connection_manager_handle + .send(ConnectionManagerRequest::Shutdown(sender)) + .await + .map_err(|_e| NetworkShutdownError)?; + receiver.await.map_err(Into::into) + } + + fn is_closed(&self) -> bool { + self.connection_manager_handle.is_closed() + } } +#[derive(thiserror::Error, Debug)] +#[error("network has been shutdown")] +struct NetworkShutdownError; + #[cfg(test)] mod tests { use tracing_test::traced_test; use super::*; + use crate::types::{service_query_fn, BoxCloneService}; + + fn echo_service() -> BoxCloneService { + let handle = |request: ServiceRequest| async move { + tracing::trace!("received: {}", request.body.escape_ascii()); + let response = Response { + version: Default::default(), + body: request.body, + }; + Some(response) + }; + service_query_fn(handle).boxed_clone() + } #[tokio::test] #[traced_test] @@ -228,17 +329,17 @@ mod tests { let peer1 = Network::builder() .with_random_private_key() .with_service_name("tycho") - .build("127.0.0.1:0")?; + .build("127.0.0.1:0", echo_service())?; let peer2 = Network::builder() .with_random_private_key() .with_service_name("tycho") - .build("127.0.0.1:0")?; + .build("127.0.0.1:0", echo_service())?; let peer3 = Network::builder() .with_random_private_key() .with_service_name("not-tycho") - .build("127.0.0.1:0")?; + .build("127.0.0.1:0", echo_service())?; assert!(peer1.connect(peer2.local_addr()).await.is_ok()); assert!(peer2.connect(peer1.local_addr()).await.is_ok()); diff --git a/network/src/network/peer.rs b/network/src/network/peer.rs index aa69c4efe..37a26d754 100644 --- a/network/src/network/peer.rs +++ b/network/src/network/peer.rs @@ -4,19 +4,19 @@ use anyhow::Result; use bytes::Bytes; use tokio_util::codec::{FramedRead, FramedWrite}; -use crate::config::Config; -use crate::connection::Connection; -use crate::proto::{make_codec, recv_response, send_request}; +use crate::network::config::NetworkConfig; +use crate::network::connection::Connection; +use crate::network::wire::{make_codec, recv_response, send_request}; use crate::types::{PeerId, Request, Response}; #[derive(Clone)] pub struct Peer { connection: Connection, - config: Arc, + config: Arc, } impl Peer { - pub fn new(connection: Connection, config: Arc) -> Self { + pub(crate) fn new(connection: Connection, config: Arc) -> Self { Self { connection, config } } @@ -24,7 +24,7 @@ impl Peer { self.connection.peer_id() } - pub async fn rpc(&self, request: Request) -> Result> { + pub async fn rpc(&self, request: Request) -> Result { let (send_stream, recv_stream) = self.connection.open_bi().await?; let mut send_stream = FramedWrite::new(send_stream, make_codec(&self.config)); let mut recv_stream = FramedRead::new(recv_stream, make_codec(&self.config)); @@ -34,4 +34,19 @@ impl Peer { recv_response(&mut recv_stream).await } + + pub async fn send_message(&self, request: Request) -> Result<()> { + let send_stream = self.connection.open_uni().await?; + let mut send_stream = FramedWrite::new(send_stream, make_codec(&self.config)); + + send_request(&mut send_stream, request).await?; + send_stream.get_mut().finish().await?; + + Ok(()) + } + + pub fn send_datagram(&self, request: Bytes) -> Result<()> { + self.connection.send_datagram(request)?; + Ok(()) + } } diff --git a/network/src/network/request_handler.rs b/network/src/network/request_handler.rs new file mode 100644 index 000000000..6db6cd031 --- /dev/null +++ b/network/src/network/request_handler.rs @@ -0,0 +1,208 @@ +use std::sync::Arc; + +use anyhow::Result; +use tokio::task::JoinSet; +use tokio_util::codec::{FramedRead, FramedWrite, LengthDelimitedCodec}; + +use crate::network::config::NetworkConfig; +use crate::network::connection::{Connection, RecvStream, SendStream}; +use crate::network::connection_manager::ActivePeers; +use crate::network::wire::{make_codec, recv_request, send_response}; +use crate::types::{ + BoxCloneService, DisconnectReason, InboundRequestMeta, Response, Service, ServiceRequest, +}; + +pub(crate) struct InboundRequestHandler { + config: Arc, + connection: Connection, + service: BoxCloneService, + active_peers: ActivePeers, +} + +impl InboundRequestHandler { + pub fn new( + config: Arc, + connection: Connection, + service: BoxCloneService, + active_peers: ActivePeers, + ) -> Self { + Self { + config, + connection, + service, + active_peers, + } + } + + pub async fn start(self) { + tracing::debug!(peer_id = %self.connection.peer_id(), "request handler started"); + + let mut inflight_requests = JoinSet::<()>::new(); + + let reason: quinn::ConnectionError = loop { + tokio::select! { + uni = self.connection.accept_uni() => match uni { + Ok(stream) => { + tracing::trace!(id = %stream.id(), "incoming uni stream"); + let handler = UniStreamRequestHandler::new( + &self.config, + self.connection.request_meta().clone(), + self.service.clone(), + stream, + ); + inflight_requests.spawn(handler.handle()); + }, + Err(e) => { + tracing::trace!("failed to accept an incoming uni stream: {e:?}"); + break e; + } + }, + bi = self.connection.accept_bi() => match bi { + Ok((tx, rx)) => { + tracing::trace!(id = %tx.id(), "incoming bi stream"); + let handler = BiStreamRequestHandler::new( + &self.config, + self.connection.request_meta().clone(), + self.service.clone(), + tx, + rx, + ); + inflight_requests.spawn(handler.handle()); + } + Err(e) => { + tracing::trace!("failed to accept an incoming bi stream: {e:?}"); + break e; + } + }, + datagram = self.connection.read_datagram() => match datagram { + Ok(datagram) => { + tracing::trace!(byte_len = datagram.len(), "incoming datagram"); + + inflight_requests.spawn({ + let metadata = self.connection.request_meta().clone(); + let service = self.service.clone(); + async move { + service + .on_datagram(ServiceRequest { + metadata, + body: datagram, + }) + .await; + } + }); + }, + Err(e) => { + tracing::trace!("failed to read datagram: {e:?}"); + break e; + } + }, + Some(req) = inflight_requests.join_next() => match req { + Ok(()) => tracing::trace!("requrest handler task completed"), + Err(e) => { + if e.is_panic() { + std::panic::resume_unwind(e.into_panic()); + } + tracing::trace!("request handler task cancelled"); + } + } + } + }; + + self.active_peers.remove_with_stable_id( + self.connection.peer_id(), + self.connection.stable_id(), + DisconnectReason::from(reason), + ); + + inflight_requests.shutdown().await; + tracing::debug!(peer_id = %self.connection.peer_id(), "request handler stopped"); + } +} + +struct UniStreamRequestHandler { + meta: Arc, + service: BoxCloneService, + recv_stream: FramedRead, +} + +impl UniStreamRequestHandler { + fn new( + config: &NetworkConfig, + meta: Arc, + service: BoxCloneService, + recv_stream: RecvStream, + ) -> Self { + Self { + meta, + service, + recv_stream: FramedRead::new(recv_stream, make_codec(config)), + } + } + + async fn handle(self) { + if let Err(e) = self.do_handle().await { + tracing::trace!("request handler task failed: {e:?}"); + } + } + + async fn do_handle(mut self) -> Result<()> { + let req = recv_request(&mut self.recv_stream).await?; + self.service + .on_message(ServiceRequest { + metadata: self.meta, + body: req.body, + }) + .await; + Ok(()) + } +} + +struct BiStreamRequestHandler { + meta: Arc, + service: BoxCloneService, + send_stream: FramedWrite, + recv_stream: FramedRead, +} + +impl BiStreamRequestHandler { + fn new( + config: &NetworkConfig, + meta: Arc, + service: BoxCloneService, + send_stream: SendStream, + recv_stream: RecvStream, + ) -> Self { + Self { + meta, + service, + send_stream: FramedWrite::new(send_stream, make_codec(config)), + recv_stream: FramedRead::new(recv_stream, make_codec(config)), + } + } + + async fn handle(self) { + if let Err(e) = self.do_handle().await { + tracing::trace!("request handler task failed: {e:?}"); + } + } + + async fn do_handle(mut self) -> Result<()> { + let req = recv_request(&mut self.recv_stream).await?; + let handler = self.service.on_query(ServiceRequest { + metadata: self.meta, + body: req.body, + }); + + let stopped = self.send_stream.get_mut().stopped(); + tokio::select! { + res = handler => { + if let Some(res) = res { + send_response(&mut self.send_stream, res).await?; + } + self.send_stream.get_mut().finish().await?; + Ok(()) + }, + _ = stopped => anyhow::bail!("send_stream closed by remote"), + } + } +} diff --git a/network/src/proto.rs b/network/src/network/wire.rs similarity index 82% rename from network/src/proto.rs rename to network/src/network/wire.rs index 6861d413a..3b08eb789 100644 --- a/network/src/proto.rs +++ b/network/src/network/wire.rs @@ -1,14 +1,14 @@ use anyhow::Result; -use bytes::Bytes; use futures_util::sink::SinkExt; use futures_util::StreamExt; use tokio::io::{AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt}; use tokio_util::codec::{FramedRead, FramedWrite, LengthDelimitedCodec}; -use crate::config::Config; +use crate::network::config::NetworkConfig; +use crate::network::connection::Connection; use crate::types::{Direction, Request, Response, Version}; -pub fn make_codec(config: &Config) -> LengthDelimitedCodec { +pub(crate) fn make_codec(config: &NetworkConfig) -> LengthDelimitedCodec { let mut builder = LengthDelimitedCodec::builder(); if let Some(max_frame_size) = config.max_frame_size { @@ -18,9 +18,7 @@ pub fn make_codec(config: &Config) -> LengthDelimitedCodec { builder.length_field_length(4).big_endian().new_codec() } -pub async fn handshake( - connection: crate::connection::Connection, -) -> Result { +pub(crate) async fn handshake(connection: Connection) -> Result { match connection.origin() { Direction::Inbound => { let mut send_stream = connection.open_uni().await?; @@ -35,18 +33,18 @@ pub async fn handshake( Ok(connection) } -pub async fn send_request( +pub(crate) async fn send_request( send_stream: &mut FramedWrite, - request: Request, + request: Request, ) -> Result<()> { send_version(send_stream.get_mut(), request.version).await?; send_stream.send(request.body).await?; Ok(()) } -pub async fn recv_request( +pub(crate) async fn recv_request( recv_stream: &mut FramedRead, -) -> Result> { +) -> Result { let version = recv_version(recv_stream.get_mut()).await?; let body = match recv_stream.next().await { Some(body) => body?.freeze(), @@ -55,18 +53,18 @@ pub async fn recv_request( Ok(Request { version, body }) } -pub async fn send_response( +pub(crate) async fn send_response( send_stream: &mut FramedWrite, - response: Response, + response: Response, ) -> Result<()> { send_version(send_stream.get_mut(), response.version).await?; send_stream.send(response.body).await?; Ok(()) } -pub async fn recv_response( +pub(crate) async fn recv_response( recv_stream: &mut FramedRead, -) -> Result> { +) -> Result { let version = recv_version(recv_stream.get_mut()).await?; let body = match recv_stream.next().await { Some(body) => body?.freeze(), diff --git a/network/src/proto.tl b/network/src/proto.tl index 2e6b5205e..21be43d69 100644 --- a/network/src/proto.tl +++ b/network/src/proto.tl @@ -6,32 +6,148 @@ int256 8*[ int ] = Int256; ---types--- -/** -* @param id hash of the TL repr of the full key (@see transport.id.Full) -*/ -transport.id.short id:int256 = transport.id.Short; - /** * @param key compressed ed25519 verifying key */ -transport.id.full.ed25519 key:int256 = transport.id.Full; +transport.peerId key:int256 = transport.PeerId; transport.address.ipv4 ip:int port:int = transport.Address; transport.address.ipv6 ip:int128 port:int = transport.Address; +// DHT +//////////////////////////////////////////////////////////////////////////////// + +---types--- + /** -* @param items multiple possible addresses for the same peer -* @param version unix timestamp when the list was generated -* @param created_at unix timestamp when the peer was started -* @param expire_at unix timestamp up to which this list is valid +* @param id node public key +* @param addr_list multiple possible addresses for the same peer +* @param created_at unix timestamp when the info was generated +* @param expires_at unix timestamp up to which the info is valid +* @param signature a ed25519 signature of the info */ -transport.addressList - items:(vector transport.Address) - version:int +dht.node + id:transport.PeerId + addr_list:(vector transport.Address) created_at:int - expire_at:int - = transport.AddressList; + expires_at:int + signature:bytes + = dht.Node; + +/** +* @param nodes list of DHT nodes +*/ +dht.nodes nodes:(vector dht.node) = dht.Nodes; + + +/** +* Key for the value that can only be updated by an owner +* +* @param name key name enum +* @param peer_id owner id +*/ +dht.peerValueKey + name:dht.PeerValueKeyName + peer_id:transport.PeerId + = dht.Key; + +/** +* Key for the overlay-managed value +* +* @param name key name enum +* @param overlay_id overlay id +*/ +dht.overlayValueKey + name:dht.OverlayValueKeyName + overlay_id:int256 + = dht.Key; + +// Peer value key names { +dht.peerValueKeyName.nodeInfo = dht.PeerValueKeyName; +// } + +// Overlay value key names { +dht.overlayValueKeyName.peersList = dht.OverlayValueKeyName; +// } + +/** +* A value with an exact owner +* +* @param key peer value key +* @param value any data +* @param expires_at unix timestamp up to which this value is valid +*/ +dht.peerValue key:dht.peerValueKey data:bytes expires_at:int signature:bytes = dht.Value; + +/** +* An overlay-managed value +* +* @param key overlay key +* @param value any data +* @param expires_at unix timestamp up to which this value is valid +*/ +dht.overlayValue key:dht.overlayValueKey data:bytes expires_at:int = dht.Value; -transport.message.notify data:bytes = transport.Message; -transport.message.query data:bytes = transport.Message; -transport.message.answer data:bytes = transport.Message; + +/** +* A response for the `dht.findNode` query +* +* @param value a list of nodes with the shortest distances +*/ +dht.nodesFound nodes:dht.nodes = dht.NodeResponse; + + +/** +* A successful response for the `dht.findValue` query +* +* @param value an existing value +*/ +dht.valueFound value:dht.Value = dht.ValueResponse; +/** +* An unsuccessul response for the `dht.findValue` query +* +* @param value a list of nodes with the shortest distances +*/ +dht.valueNotFound nodes:dht.nodes = dht.ValueResponse; + + +/* +* A response for the `dht.getNodeInfo` query +* +* @param info a signed node info +*/ +dht.nodeInfoFound info:dht.node = dht.NodeInfoResponse; + +---functions--- + +/** +* Query wrapper with an announced peer info. +* +* @param peer_info a signed info of the sender +*/ +dht.withPeerInfo peer_info:dht.node = True; + +/** +* Suggest a node to store that value +* +* @param value value to store +*/ +dht.store value:dht.Value = True; +/** +* Searches for k closest nodes +* +* @param key key hash +* @param k max length of the result list +*/ +dht.findNode key:int256 k:int = dht.NodeResponse; +/** +* Searches for a value if stored or k closest nodes +* +* @param key key hash +* @param k max length of the nodes list if it is not found +*/ +dht.findValue key:int256 k:int = dht.ValueResponse; +/** +* Requests a signed node info +*/ +dht.getNodeInfo = dht.NodeInfoResponse; diff --git a/network/src/proto/dht.rs b/network/src/proto/dht.rs new file mode 100644 index 000000000..53c9dfbff --- /dev/null +++ b/network/src/proto/dht.rs @@ -0,0 +1,439 @@ +use std::sync::Arc; + +use bytes::Bytes; +use tl_proto::{TlRead, TlWrite}; + +use crate::types::{PeerId, PeerInfo}; +use crate::util::{check_peer_signature, tl}; + +#[derive(Debug, Clone, Copy, Hash, PartialEq, Eq, TlRead, TlWrite)] +#[tl(boxed, scheme = "proto.tl")] +pub enum PeerValueKeyName { + #[tl(id = "dht.peerValueKeyName.nodeInfo")] + NodeInfo, +} + +#[derive(Debug, Clone, Copy, Hash, PartialEq, Eq, TlRead, TlWrite)] +#[tl(boxed, scheme = "proto.tl")] +pub enum OverlayValueKeyName { + #[tl(id = "dht.overlayValueKeyName.peersList")] + PeersList, +} + +/// Key for values that can only be updated by the owner. +/// +/// See [`SignedValueKeyRef`] for the non-owned version of the struct. +#[derive(Debug, Clone, PartialEq, Eq, TlRead, TlWrite)] +#[tl(boxed, id = "dht.peerValueKey", scheme = "proto.tl")] +pub struct PeerValueKey { + /// Key name. + pub name: PeerValueKeyName, + /// Public key of the owner. + pub peer_id: PeerId, +} + +/// Key for values that can only be updated by the owner. +/// +/// See [`SignedValueKey`] for the owned version of the struct. +#[derive(Debug, Clone, PartialEq, Eq, TlRead, TlWrite)] +#[tl(boxed, id = "dht.peerValueKey", scheme = "proto.tl")] +pub struct PeerValueKeyRef<'tl> { + /// Key name. + pub name: PeerValueKeyName, + /// Public key of the owner. + pub peer_id: &'tl PeerId, +} + +impl PeerValueKeyRef<'_> { + pub fn as_owned(&self) -> PeerValueKey { + PeerValueKey { + name: self.name, + peer_id: *self.peer_id, + } + } +} + +/// Key for overlay-managed values. +/// +/// See [`OverlayValueKeyRef`] for the non-owned version of the struct. +#[derive(Debug, Clone, PartialEq, Eq, TlRead, TlWrite)] +#[tl(boxed, id = "dht.overlayValueKey", scheme = "proto.tl")] +pub struct OverlayValueKey { + /// Key name. + pub name: OverlayValueKeyName, + /// Overlay id. + pub overlay_id: [u8; 32], +} + +/// Key for overlay-managed values. +/// +/// See [`OverlayValueKey`] for the owned version of the struct. +#[derive(Debug, Clone, PartialEq, Eq, TlRead, TlWrite)] +#[tl(boxed, id = "dht.overlayValueKey", scheme = "proto.tl")] +pub struct OverlayValueKeyRef<'tl> { + /// Key name. + pub name: OverlayValueKeyName, + /// Overlay id. + pub overlay_id: &'tl [u8; 32], +} + +impl OverlayValueKeyRef<'_> { + pub fn as_owned(&self) -> OverlayValueKey { + OverlayValueKey { + name: self.name, + overlay_id: *self.overlay_id, + } + } +} + +/// Value with a known owner. +/// +/// See [`PeerValueRef`] for the non-owned version of the struct. +#[derive(Debug, Clone, PartialEq, Eq, TlRead, TlWrite)] +#[tl(boxed, id = "dht.peerValue", scheme = "proto.tl")] +pub struct PeerValue { + /// Peer value key. + pub key: PeerValueKey, + /// Any data. + pub data: Box<[u8]>, + /// Unix timestamp up to which this value is valid. + pub expires_at: u32, + /// A `ed25519` signature of this entry. + #[tl(signature, with = "tl::signature_owned")] + pub signature: Box<[u8; 64]>, +} + +/// Value with a known owner. +/// +/// See [`PeerValue`] for the owned version of the struct. +#[derive(Debug, Clone, PartialEq, Eq, TlRead, TlWrite)] +#[tl(boxed, id = "dht.peerValue", scheme = "proto.tl")] +pub struct PeerValueRef<'tl> { + /// Peer value key. + pub key: PeerValueKeyRef<'tl>, + /// Any data. + pub data: &'tl [u8], + /// Unix timestamp up to which this value is valid. + pub expires_at: u32, + /// A `ed25519` signature of this entry. + #[tl(signature, with = "tl::signature_ref")] + pub signature: &'tl [u8; 64], +} + +impl PeerValueRef<'_> { + pub fn as_owned(&self) -> PeerValue { + PeerValue { + key: self.key.as_owned(), + data: Box::from(self.data), + expires_at: self.expires_at, + signature: Box::new(*self.signature), + } + } +} + +/// Overlay-managed value. +/// +/// See [`OverlayValueRef`] for the non-owned version of the struct. +#[derive(Debug, Clone, PartialEq, Eq, TlRead, TlWrite)] +#[tl(boxed, id = "dht.overlayValue", scheme = "proto.tl")] +pub struct OverlayValue { + /// Overlay key. + pub key: OverlayValueKey, + /// Any data. + pub data: Box<[u8]>, + /// Unix timestamp up to which this value is valid. + pub expires_at: u32, +} + +/// Overlay-managed value. +/// +/// See [`OverlayValue`] for the owned version of the struct. +#[derive(Debug, Clone, PartialEq, Eq, TlRead, TlWrite)] +#[tl(boxed, id = "dht.overlayValue", scheme = "proto.tl")] +pub struct OverlayValueRef<'tl> { + /// Overlay key. + pub key: OverlayValueKeyRef<'tl>, + /// Any data. + pub data: &'tl [u8], + /// Unix timestamp up to which this value is valid. + pub expires_at: u32, +} + +impl OverlayValueRef<'_> { + pub fn as_owned(&self) -> OverlayValue { + OverlayValue { + key: self.key.as_owned(), + data: Box::from(self.data), + expires_at: self.expires_at, + } + } +} + +/// Stored value. +/// +/// See [`ValueRef`] for the non-owned version of the struct. +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum Value { + /// Value with a known owner. + Peer(PeerValue), + /// Overlay-managed value. + Overlay(OverlayValue), +} + +impl Value { + pub fn is_valid(&self, at: u32, key_hash: &[u8; 32]) -> bool { + match self { + Self::Peer(value) => { + value.expires_at >= at + && key_hash == &tl_proto::hash(&value.key) + && check_peer_signature(&value.key.peer_id, &value.signature, value) + } + Self::Overlay(value) => { + value.expires_at >= at && key_hash == &tl_proto::hash(&value.key) + } + } + } + + pub const fn expires_at(&self) -> u32 { + match self { + Self::Peer(value) => value.expires_at, + Self::Overlay(value) => value.expires_at, + } + } +} + +impl TlWrite for Value { + type Repr = tl_proto::Boxed; + + fn max_size_hint(&self) -> usize { + match self { + Self::Peer(value) => value.max_size_hint(), + Self::Overlay(value) => value.max_size_hint(), + } + } + + fn write_to

(&self, packet: &mut P) + where + P: tl_proto::TlPacket, + { + match self { + Self::Peer(value) => value.write_to(packet), + Self::Overlay(value) => value.write_to(packet), + } + } +} + +impl<'a> TlRead<'a> for Value { + type Repr = tl_proto::Boxed; + + fn read_from(packet: &'a [u8], offset: &mut usize) -> tl_proto::TlResult { + let id = u32::read_from(packet, offset)?; + *offset -= 4; + match id { + PeerValue::TL_ID => PeerValue::read_from(packet, offset).map(Self::Peer), + OverlayValue::TL_ID => OverlayValue::read_from(packet, offset).map(Self::Overlay), + _ => Err(tl_proto::TlError::UnknownConstructor), + } + } +} + +/// Stored value. +/// +/// See [`Value`] for the owned version of the struct. +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum ValueRef<'tl> { + /// Value with a known owner. + Peer(PeerValueRef<'tl>), + /// Overlay-managed value. + Overlay(OverlayValueRef<'tl>), +} + +impl ValueRef<'_> { + pub fn is_valid(&self, at: u32, key_hash: &[u8; 32]) -> bool { + match self { + Self::Peer(value) => { + value.expires_at >= at + && key_hash == &tl_proto::hash(&value.key) + && check_peer_signature(value.key.peer_id, value.signature, value) + } + Self::Overlay(value) => { + value.expires_at >= at && key_hash == &tl_proto::hash(&value.key) + } + } + } + + pub const fn expires_at(&self) -> u32 { + match self { + Self::Peer(value) => value.expires_at, + Self::Overlay(value) => value.expires_at, + } + } +} + +impl TlWrite for ValueRef<'_> { + type Repr = tl_proto::Boxed; + + fn max_size_hint(&self) -> usize { + match self { + Self::Peer(value) => value.max_size_hint(), + Self::Overlay(value) => value.max_size_hint(), + } + } + + fn write_to

(&self, packet: &mut P) + where + P: tl_proto::TlPacket, + { + match self { + Self::Peer(value) => value.write_to(packet), + Self::Overlay(value) => value.write_to(packet), + } + } +} + +impl<'a> TlRead<'a> for ValueRef<'a> { + type Repr = tl_proto::Boxed; + + fn read_from(packet: &'a [u8], offset: &mut usize) -> tl_proto::TlResult { + let id = u32::read_from(packet, offset)?; + *offset -= 4; + match id { + PeerValue::TL_ID => PeerValueRef::read_from(packet, offset).map(Self::Peer), + OverlayValue::TL_ID => OverlayValueRef::read_from(packet, offset).map(Self::Overlay), + _ => Err(tl_proto::TlError::UnknownConstructor), + } + } +} + +/// A response for the [`rpc::FindNode`] query. +#[derive(Debug, Clone, TlRead, TlWrite)] +#[tl(boxed, id = "dht.nodesFound", scheme = "proto.tl")] +pub struct NodeResponse { + /// List of nodes closest to the key. + pub nodes: Vec>, +} + +/// A response for the [`rpc::FindValue`] query. +#[derive(Debug, Clone, TlRead, TlWrite)] +#[tl(boxed, scheme = "proto.tl")] +pub enum ValueResponse { + /// An existing value for the specified key. + #[tl(id = "dht.valueFound")] + Found(Box), + /// List of nodes closest to the key. + #[tl(id = "dht.valueNotFound")] + NotFound(Vec>), +} + +/// A response for the [`rpc::FindValue`] query. +#[derive(Debug, Clone)] +pub enum ValueResponseRaw { + Found(Bytes), + NotFound(Vec>), +} + +impl TlWrite for ValueResponseRaw { + type Repr = tl_proto::Boxed; + + fn max_size_hint(&self) -> usize { + 4 + match self { + Self::Found(value) => value.max_size_hint(), + Self::NotFound(nodes) => nodes.max_size_hint(), + } + } + + fn write_to

(&self, packet: &mut P) + where + P: tl_proto::TlPacket, + { + const FOUND_TL_ID: u32 = tl_proto::id!("dht.valueFound", scheme = "proto.tl"); + const NOT_FOUND_TL_ID: u32 = tl_proto::id!("dht.valueNotFound", scheme = "proto.tl"); + + match self { + Self::Found(value) => { + packet.write_u32(FOUND_TL_ID); + packet.write_raw_slice(value); + } + Self::NotFound(nodes) => { + packet.write_u32(NOT_FOUND_TL_ID); + nodes.write_to(packet); + } + } + } +} + +/// A response for the [`rpc::GetNodeInfo`] query. +#[derive(Debug, Clone, TlRead, TlWrite)] +#[tl(boxed, id = "dht.nodeInfoFound", scheme = "proto.tl")] +pub struct NodeInfoResponse { + /// Signed node info. + pub info: PeerInfo, +} + +/// DHT RPC models. +pub mod rpc { + use super::*; + + /// Query wrapper with an announced peer info. + #[derive(Debug, Clone, TlRead, TlWrite)] + #[tl(boxed, id = "dht.withPeerInfo", scheme = "proto.tl")] + pub struct WithPeerInfo { + /// A signed info of the sender. + pub peer_info: Arc, + } + + /// Query wrapper with an announced peer info. + #[derive(Debug, Clone, TlWrite)] + #[tl(boxed, id = "dht.withPeerInfo", scheme = "proto.tl")] + pub struct WithPeerInfoRef<'tl> { + /// A signed info of the sender. + pub peer_info: &'tl PeerInfo, + } + + /// Suggest a node to store that value. + #[derive(Debug, Clone, TlRead, TlWrite)] + #[tl(boxed, id = "dht.store", scheme = "proto.tl")] + pub struct Store { + /// A value to store. + pub value: Value, + } + + /// Suggest a node to store that value. + #[derive(Debug, Clone, TlRead, TlWrite)] + #[tl(boxed, id = "dht.store", scheme = "proto.tl")] + pub struct StoreRef<'tl> { + /// A value to store. + pub value: ValueRef<'tl>, + } + + /// Search for `k` closest nodes. + /// + /// See [`NodeResponse`]. + #[derive(Debug, Clone, TlRead, TlWrite)] + #[tl(boxed, id = "dht.findNode", scheme = "proto.tl")] + pub struct FindNode { + /// Key hash. + pub key: [u8; 32], + /// Maximum number of nodes to return. + pub k: u32, + } + + /// Search for a value if stored or `k` closest nodes. + /// + /// See [`ValueResponse`]. + #[derive(Debug, Clone, TlRead, TlWrite)] + #[tl(boxed, id = "dht.findValue", scheme = "proto.tl")] + pub struct FindValue { + /// Key hash. + pub key: [u8; 32], + /// Maximum number of nodes to return. + pub k: u32, + } + + /// Requests a signed address list from the node. + /// + /// See [`NodeInfoResponse`]. + #[derive(Debug, Clone, TlRead, TlWrite)] + #[tl(boxed, id = "dht.getNodeInfo", scheme = "proto.tl")] + pub struct GetNodeInfo; +} diff --git a/network/src/types/address.rs b/network/src/types/address.rs new file mode 100644 index 000000000..f772054c0 --- /dev/null +++ b/network/src/types/address.rs @@ -0,0 +1,156 @@ +use std::net::{SocketAddr, SocketAddrV4, SocketAddrV6}; +use std::str::FromStr; + +use serde::{Deserialize, Serialize}; +use tl_proto::{TlRead, TlWrite}; + +#[derive(Debug, Clone, Hash, PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize)] +#[serde(transparent)] +pub struct Address(#[serde(with = "tycho_util::serde_helpers::socket_addr")] SocketAddr); + +impl Address { + pub fn resolve(&self) -> std::io::Result { + std::net::ToSocketAddrs::to_socket_addrs(&self).and_then(|mut iter| { + iter.next().ok_or_else(|| { + std::io::Error::new(std::io::ErrorKind::NotFound, "unable to resolve host") + }) + }) + } +} + +impl std::fmt::Display for Address { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + std::fmt::Display::fmt(&self.0, f) + } +} + +impl std::net::ToSocketAddrs for Address { + type Iter = ::Iter; + + fn to_socket_addrs(&self) -> std::io::Result { + self.0.to_socket_addrs() + } +} + +impl TlWrite for Address { + type Repr = tl_proto::Boxed; + + fn max_size_hint(&self) -> usize { + 4 + match &self.0 { + SocketAddr::V4(_) => 4 + 4, + SocketAddr::V6(_) => 16 + 4, + } + } + + fn write_to

(&self, packet: &mut P) + where + P: tl_proto::TlPacket, + { + match &self.0 { + SocketAddr::V4(addr) => { + packet.write_u32(ADDRESS_V4_TL_ID); + packet.write_u32(u32::from(*addr.ip())); + packet.write_u32(addr.port() as u32); + } + SocketAddr::V6(addr) => { + packet.write_u32(ADDRESS_V6_TL_ID); + packet.write_raw_slice(&addr.ip().octets()); + packet.write_u32(addr.port() as u32); + } + }; + } +} + +impl<'a> TlRead<'a> for Address { + type Repr = tl_proto::Boxed; + + fn read_from(packet: &'a [u8], offset: &mut usize) -> tl_proto::TlResult { + use tl_proto::TlError; + + Ok(Address(match u32::read_from(packet, offset)? { + ADDRESS_V4_TL_ID => { + let ip = u32::read_from(packet, offset)?; + let Ok(port) = u32::read_from(packet, offset)?.try_into() else { + return Err(TlError::InvalidData); + }; + SocketAddr::V4(SocketAddrV4::new(ip.into(), port)) + } + ADDRESS_V6_TL_ID => { + let octets = <[u8; 16]>::read_from(packet, offset)?; + let Ok(port) = u32::read_from(packet, offset)?.try_into() else { + return Err(TlError::InvalidData); + }; + SocketAddr::V6(SocketAddrV6::new(octets.into(), port, 0, 0)) + } + _ => return Err(TlError::UnknownConstructor), + })) + } +} + +impl From for Address { + #[inline] + fn from(value: SocketAddr) -> Self { + Self(value) + } +} + +impl From for Address { + #[inline] + fn from(value: SocketAddrV4) -> Self { + Self(SocketAddr::V4(value)) + } +} + +impl From for Address { + #[inline] + fn from(value: SocketAddrV6) -> Self { + Self(SocketAddr::V6(value)) + } +} + +impl From<(std::net::Ipv4Addr, u16)> for Address { + #[inline] + fn from((ip, port): (std::net::Ipv4Addr, u16)) -> Self { + Self(SocketAddr::V4(SocketAddrV4::new(ip, port))) + } +} + +impl From<(std::net::Ipv6Addr, u16)> for Address { + #[inline] + fn from((ip, port): (std::net::Ipv6Addr, u16)) -> Self { + Self(SocketAddr::V6(SocketAddrV6::new(ip, port, 0, 0))) + } +} + +impl FromStr for Address { + type Err = std::net::AddrParseError; + + #[inline] + fn from_str(s: &str) -> Result { + SocketAddr::from_str(s).map(Self) + } +} + +const ADDRESS_V4_TL_ID: u32 = tl_proto::id!("transport.address.ipv4", scheme = "proto.tl"); +const ADDRESS_V6_TL_ID: u32 = tl_proto::id!("transport.address.ipv6", scheme = "proto.tl"); + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn serde() { + const SOME_ADDR_V4: &str = "101.102.103.104:12345"; + const SOME_ADDR_V6: &str = "[2345:0425:2CA1:0:0:0567:5673:23b5]:12345"; + + for addr in [SOME_ADDR_V4, SOME_ADDR_V6] { + let from_json: Address = serde_json::from_str(&format!("\"{addr}\"")).unwrap(); + let from_str = Address::from_str(addr).unwrap(); + assert_eq!(from_json, from_str); + + let to_json = serde_json::to_string(&from_json).unwrap(); + let from_json: Address = serde_json::from_str(&to_json).unwrap(); + assert_eq!(from_json, from_str); + } + } +} diff --git a/network/src/types/mod.rs b/network/src/types/mod.rs index 222a09887..f40e152fe 100644 --- a/network/src/types/mod.rs +++ b/network/src/types/mod.rs @@ -1,89 +1,20 @@ -use std::collections::HashMap; -use std::net::SocketAddr; - -pub use self::peer_id::*; - +pub use self::address::Address; +pub use self::peer_event::{DisconnectReason, PeerEvent}; +pub use self::peer_id::PeerId; +pub use self::peer_info::{PeerAffinity, PeerInfo}; +pub use self::request::{ + Direction, InboundRequestMeta, Request, Response, ServiceRequest, Version, +}; +pub use self::rpc::RpcQuery; +pub use self::service::{ + service_datagram_fn, service_message_fn, service_query_fn, BoxCloneService, BoxService, + Service, ServiceDatagramFn, ServiceExt, ServiceMessageFn, ServiceQueryFn, +}; + +mod address; +mod peer_event; mod peer_id; - -pub type FastDashMap = dashmap::DashMap; -pub type FastHashMap = HashMap; - -#[derive(Default, Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)] -#[repr(u16)] -pub enum Version { - #[default] - V1 = 1, -} - -impl TryFrom for Version { - type Error = anyhow::Error; - - fn try_from(value: u16) -> Result { - match value { - 1 => Ok(Self::V1), - _ => Err(anyhow::anyhow!("invalid version: {value}")), - } - } -} - -impl Version { - pub fn to_u16(self) -> u16 { - self as u16 - } -} - -pub struct Request { - pub version: Version, - pub body: T, -} - -pub struct Response { - pub version: Version, - pub body: T, -} - -#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)] -pub enum PeerAffinity { - High, - Allowed, - Never, -} - -#[derive(Debug, Clone)] -pub struct PeerInfo { - pub peer_id: PeerId, - pub affinity: PeerAffinity, - pub address: SocketAddr, -} - -#[derive(Debug, Clone, PartialEq, Eq)] -pub enum PeerEvent { - NewPeer(PeerId), - LostPeer(PeerId, DisconnectReason), -} - -#[derive(Debug, Clone, Copy, PartialEq, Eq)] -pub enum DisconnectReason { - Requested, - VersionMismatch, - TransportError, - ConnectionClosed, - ApplicationClosed, - Reset, - TimedOut, - LocallyClosed, -} - -impl From for DisconnectReason { - fn from(value: quinn::ConnectionError) -> Self { - match value { - quinn::ConnectionError::VersionMismatch => Self::VersionMismatch, - quinn::ConnectionError::TransportError(_) => Self::TransportError, - quinn::ConnectionError::ConnectionClosed(_) => Self::ConnectionClosed, - quinn::ConnectionError::ApplicationClosed(_) => Self::ApplicationClosed, - quinn::ConnectionError::Reset => Self::Reset, - quinn::ConnectionError::TimedOut => Self::TimedOut, - quinn::ConnectionError::LocallyClosed => Self::LocallyClosed, - } - } -} +mod peer_info; +mod request; +mod rpc; +mod service; diff --git a/network/src/types/peer_event.rs b/network/src/types/peer_event.rs new file mode 100644 index 000000000..33834af2f --- /dev/null +++ b/network/src/types/peer_event.rs @@ -0,0 +1,42 @@ +use serde::{Deserialize, Serialize}; + +use crate::types::PeerId; + +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +pub enum PeerEvent { + NewPeer(PeerId), + LostPeer(PeerId, DisconnectReason), +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)] +pub enum DisconnectReason { + Requested, + VersionMismatch, + TransportError, + ConnectionClosed, + ApplicationClosed, + Reset, + TimedOut, + LocallyClosed, +} + +impl From for DisconnectReason { + #[inline] + fn from(value: quinn::ConnectionError) -> Self { + Self::from(&value) + } +} + +impl From<&quinn::ConnectionError> for DisconnectReason { + fn from(value: &quinn::ConnectionError) -> Self { + match value { + quinn::ConnectionError::VersionMismatch => Self::VersionMismatch, + quinn::ConnectionError::TransportError(_) => Self::TransportError, + quinn::ConnectionError::ConnectionClosed(_) => Self::ConnectionClosed, + quinn::ConnectionError::ApplicationClosed(_) => Self::ApplicationClosed, + quinn::ConnectionError::Reset => Self::Reset, + quinn::ConnectionError::TimedOut => Self::TimedOut, + quinn::ConnectionError::LocallyClosed => Self::LocallyClosed, + } + } +} diff --git a/network/src/types/peer_id.rs b/network/src/types/peer_id.rs index dac90a8bf..031e45289 100644 --- a/network/src/types/peer_id.rs +++ b/network/src/types/peer_id.rs @@ -1,11 +1,47 @@ -#[derive(Clone, Copy, Hash, PartialEq, Eq, PartialOrd, Ord)] +use std::str::FromStr; + +use everscale_crypto::ed25519; +use tl_proto::{TlRead, TlWrite}; + +#[derive(Clone, Copy, TlRead, TlWrite, Hash, PartialEq, Eq, PartialOrd, Ord)] +#[tl(boxed, id = "transport.peerId", scheme = "proto.tl")] #[repr(transparent)] pub struct PeerId(pub [u8; 32]); impl PeerId { pub fn wrap(bytes: &[u8; 32]) -> &Self { // SAFETY: `[u8; 32]` has the same layout as `PeerId`. - unsafe { &*(bytes as *const [u8; 32] as *const Self) } + unsafe { &*(bytes as *const [u8; 32]).cast::() } + } + + #[inline] + pub fn as_bytes(&self) -> &[u8; 32] { + &self.0 + } + + #[inline] + pub fn to_bytes(self) -> [u8; 32] { + self.0 + } + + pub fn as_public_key(&self) -> Option { + ed25519::PublicKey::from_bytes(self.0) + } + + pub fn random() -> Self { + Self(rand::random()) + } +} + +impl<'a> TlRead<'a> for &'a PeerId { + type Repr = tl_proto::Boxed; + + #[inline] + fn read_from(packet: &'a [u8], offset: &mut usize) -> tl_proto::TlResult { + if u32::read_from(packet, offset)? != PeerId::TL_ID { + return Err(tl_proto::TlError::UnknownConstructor); + } + <_>::read_from(packet, offset).map(PeerId::wrap) } } @@ -25,17 +61,107 @@ impl std::fmt::Debug for PeerId { } } -#[derive(Debug, Clone, Copy, Hash, PartialEq, Eq)] -pub enum Direction { - Inbound, - Outbound, +impl FromStr for PeerId { + type Err = hex::FromHexError; + + fn from_str(s: &str) -> Result { + let mut peer_id = PeerId([0; 32]); + hex::decode_to_slice(s, &mut peer_id.0).map(|_| peer_id) + } } -impl std::fmt::Display for Direction { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - f.write_str(match self { - Self::Inbound => "inbound", - Self::Outbound => "outbound", - }) +impl serde::Serialize for PeerId { + fn serialize(&self, serializer: S) -> Result + where + S: serde::Serializer, + { + if serializer.is_human_readable() { + serializer.collect_str(self) + } else { + self.0.serialize(serializer) + } + } +} + +impl<'de> serde::Deserialize<'de> for PeerId { + fn deserialize(deserializer: D) -> Result + where + D: serde::Deserializer<'de>, + { + if deserializer.is_human_readable() { + deserializer.deserialize_str(tycho_util::serde_helpers::StrVisitor::new()) + } else { + <[u8; 32]>::deserialize(deserializer).map(Self) + } + } +} + +impl From for PeerId { + #[inline] + fn from(public_key: ed25519::PublicKey) -> Self { + Self(public_key.to_bytes()) + } +} + +impl std::ops::BitXor for PeerId { + type Output = PeerId; + + #[inline] + fn bitxor(mut self, rhs: PeerId) -> Self::Output { + self ^= rhs; + self + } +} + +impl std::ops::BitXor<&PeerId> for PeerId { + type Output = PeerId; + + #[inline] + fn bitxor(mut self, rhs: &PeerId) -> Self::Output { + self ^= rhs; + self + } +} + +impl std::ops::BitXor<&PeerId> for &PeerId { + type Output = PeerId; + + #[inline] + fn bitxor(self, rhs: &PeerId) -> Self::Output { + *self ^ rhs + } +} + +impl std::ops::BitXorAssign for PeerId { + #[inline] + fn bitxor_assign(&mut self, rhs: PeerId) { + std::ops::BitXorAssign::bitxor_assign(self, &rhs); + } +} + +impl std::ops::BitXorAssign<&PeerId> for PeerId { + #[inline] + fn bitxor_assign(&mut self, rhs: &PeerId) { + for (left, right) in self.0.iter_mut().zip(&rhs.0) { + *left ^= right; + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn serde() { + const SOME_ID: &str = "5d09fe251943525a30f471791d5b4fea1298613f52ad2ad6d985fed05eb00533"; + + let from_json: PeerId = serde_json::from_str(&format!("\"{SOME_ID}\"")).unwrap(); + let from_str = PeerId::from_str(SOME_ID).unwrap(); + assert_eq!(from_json, from_str); + + let to_json = serde_json::to_string(&from_json).unwrap(); + let from_json: PeerId = serde_json::from_str(&to_json).unwrap(); + assert_eq!(from_json, from_str); } } diff --git a/network/src/types/peer_info.rs b/network/src/types/peer_info.rs new file mode 100644 index 000000000..08cbbcd64 --- /dev/null +++ b/network/src/types/peer_info.rs @@ -0,0 +1,170 @@ +use serde::{Deserialize, Serialize}; +use tl_proto::{TlRead, TlWrite}; + +use crate::types::{Address, PeerId}; +use crate::util::{check_peer_signature, tl}; + +#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize)] +pub enum PeerAffinity { + High, + Allowed, + Never, +} + +/// A signed node info. +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, TlRead, TlWrite)] +pub struct PeerInfo { + /// Node public key. + pub id: PeerId, + /// Multiple possible addresses for the same peer. + #[tl(with = "tl_address_list")] + pub address_list: Box<[Address]>, + /// Unix timestamp when the info was generated. + pub created_at: u32, + /// Unix timestamp up to which the info is valid. + pub expires_at: u32, + /// A `ed25519` signature of the info. + #[serde(with = "serde_signature")] + #[tl(signature, with = "tl::signature_owned")] + pub signature: Box<[u8; 64]>, +} + +impl PeerInfo { + pub const MAX_ADDRESSES: usize = 4; + + pub fn is_valid(&self, at: u32) -> bool { + const CLOCK_THRESHOLD: u32 = 1; + + self.created_at <= at + CLOCK_THRESHOLD + && self.expires_at >= at + && !self.address_list.is_empty() + && check_peer_signature(&self.id, &self.signature, self) + } + + pub fn is_expired(&self, at: u32) -> bool { + self.expires_at < at + } + + pub fn iter_addresses(&self) -> std::slice::Iter<'_, Address> { + self.address_list.iter() + } +} + +mod tl_address_list { + use super::*; + + pub fn size_hint(address_list: &[Address]) -> usize { + 4 + address_list + .iter() + .map(Address::max_size_hint) + .sum::() + } + + pub fn write(address_list: &[Address], packet: &mut P) { + address_list.write_to(packet); + } + + pub fn read(packet: &[u8], offset: &mut usize) -> tl_proto::TlResult> { + use tl_proto::TlError; + + let len = u32::read_from(packet, offset)? as usize; + if len == 0 || len > PeerInfo::MAX_ADDRESSES { + return Err(TlError::InvalidData); + } + + let mut items = Vec::with_capacity(len); + for _ in 0..len { + items.push(Address::read_from(packet, offset)?); + } + + Ok(items.into_boxed_slice()) + } +} + +mod serde_signature { + use base64::engine::Engine as _; + use base64::prelude::BASE64_STANDARD; + use tycho_util::serde_helpers::{BorrowedStr, BytesVisitor}; + + use super::*; + + pub fn serialize(data: &[u8; 64], serializer: S) -> Result + where + S: serde::Serializer, + { + if serializer.is_human_readable() { + serializer.serialize_str(&BASE64_STANDARD.encode(data)) + } else { + data.serialize(serializer) + } + } + + pub fn deserialize<'de, D>(deserializer: D) -> Result, D::Error> + where + D: serde::Deserializer<'de>, + { + use serde::de::Error; + + if deserializer.is_human_readable() { + as Deserialize>::deserialize(deserializer).and_then( + |BorrowedStr(s)| { + let mut buffer = [0u8; 66]; + match BASE64_STANDARD.decode_slice(s.as_ref(), &mut buffer) { + Ok(64) => { + let [data @ .., _, _] = buffer; + Ok(Box::new(data)) + } + _ => Err(Error::custom("Invalid signature")), + } + }, + ) + } else { + deserializer + .deserialize_bytes(BytesVisitor::<64>) + .map(Box::new) + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use std::str::FromStr; + + #[test] + fn serde() { + let target_peer_info = PeerInfo { + id: PeerId::from_str( + "40ed1f0e3730d9086156e706b0706b21805db8a30a2b7c73a837403e553124ee", + ) + .unwrap(), + address_list: Box::new([Address::from_str("101.102.103.104:12345").unwrap()]), + created_at: 1700000000, + expires_at: 1710000000, + signature: Box::new([ + 0xe4, 0x3b, 0xc4, 0x50, 0x73, 0xe6, 0xe2, 0x5e, 0xfa, 0xb0, 0x74, 0xc8, 0xef, 0x33, + 0xdb, 0x61, 0xf3, 0x4c, 0x68, 0xec, 0x56, 0xae, 0x38, 0x88, 0xfb, 0xc0, 0x2b, 0x1b, + 0x44, 0x6b, 0xe1, 0xc3, 0xb1, 0xdb, 0x4d, 0x34, 0xeb, 0x37, 0x03, 0x96, 0xc2, 0x9d, + 0xb2, 0xd8, 0xc0, 0x41, 0x2b, 0x9f, 0x70, 0x9a, 0x8f, 0x3c, 0x1d, 0xe6, 0x8e, 0x28, + 0x44, 0x1d, 0x7a, 0x4f, 0x39, 0xc5, 0xe1, 0x3d, + ]), + }; + + let target_peer_info_str = r#"{ + "id": "40ed1f0e3730d9086156e706b0706b21805db8a30a2b7c73a837403e553124ee", + "address_list": [ + "101.102.103.104:12345" + ], + "created_at": 1700000000, + "expires_at": 1710000000, + "signature": "5DvEUHPm4l76sHTI7zPbYfNMaOxWrjiI+8ArG0Rr4cOx20006zcDlsKdstjAQSufcJqPPB3mjihEHXpPOcXhPQ==" +}"#; + assert_eq!( + serde_json::to_string_pretty(&target_peer_info).unwrap(), + target_peer_info_str + ); + + let from_json: PeerInfo = serde_json::from_str(target_peer_info_str).unwrap(); + assert_eq!(from_json, target_peer_info); + } +} diff --git a/network/src/types/request.rs b/network/src/types/request.rs new file mode 100644 index 000000000..a641e5d7e --- /dev/null +++ b/network/src/types/request.rs @@ -0,0 +1,185 @@ +use std::net::SocketAddr; +use std::sync::Arc; + +use bytes::Bytes; +use serde::{Deserialize, Serialize}; + +use crate::types::PeerId; + +#[derive(Default, Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)] +#[repr(u16)] +pub enum Version { + #[default] + V1 = 1, +} + +impl Version { + pub fn to_u16(self) -> u16 { + self as u16 + } +} + +impl TryFrom for Version { + type Error = anyhow::Error; + + fn try_from(value: u16) -> Result { + match value { + 1 => Ok(Self::V1), + _ => Err(anyhow::anyhow!("invalid version: {value}")), + } + } +} + +impl Serialize for Version { + #[inline] + fn serialize(&self, serializer: S) -> Result + where + S: serde::Serializer, + { + serializer.serialize_u16(self.to_u16()) + } +} + +impl<'de> Deserialize<'de> for Version { + fn deserialize(deserializer: D) -> Result + where + D: serde::Deserializer<'de>, + { + use serde::de::Error; + + u16::deserialize(deserializer).and_then(|v| Self::try_from(v).map_err(Error::custom)) + } +} + +#[derive(Serialize, Deserialize)] +pub struct Request { + pub version: Version, + #[serde(with = "serde_body")] + pub body: Bytes, +} + +impl Request { + pub fn from_tl(body: T) -> Self + where + T: tl_proto::TlWrite, + { + Self { + version: Default::default(), + body: tl_proto::serialize(body).into(), + } + } +} + +impl AsRef<[u8]> for Request { + #[inline] + fn as_ref(&self) -> &[u8] { + self.body.as_ref() + } +} + +#[derive(Serialize, Deserialize)] +pub struct Response { + pub version: Version, + #[serde(with = "serde_body")] + pub body: Bytes, +} + +impl Response { + pub fn from_tl(body: T) -> Self + where + T: tl_proto::TlWrite, + { + Self { + version: Default::default(), + body: tl_proto::serialize(body).into(), + } + } + + pub fn parse_tl(self) -> tl_proto::TlResult + where + for<'a> T: tl_proto::TlRead<'a, Repr = tl_proto::Boxed>, + { + tl_proto::deserialize(self.body.as_ref()) + } +} + +impl AsRef<[u8]> for Response { + #[inline] + fn as_ref(&self) -> &[u8] { + self.body.as_ref() + } +} + +pub struct ServiceRequest { + pub metadata: Arc, + pub body: Bytes, +} + +impl AsRef<[u8]> for ServiceRequest { + #[inline] + fn as_ref(&self) -> &[u8] { + self.body.as_ref() + } +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct InboundRequestMeta { + pub peer_id: PeerId, + pub origin: Direction, + #[serde(with = "tycho_util::serde_helpers::socket_addr")] + pub remote_address: SocketAddr, +} + +#[derive(Debug, Clone, Copy, Hash, PartialEq, Eq, Serialize, Deserialize)] +pub enum Direction { + Inbound, + Outbound, +} + +impl std::fmt::Display for Direction { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.write_str(match self { + Self::Inbound => "inbound", + Self::Outbound => "outbound", + }) + } +} + +mod serde_body { + use base64::engine::Engine as _; + use base64::prelude::BASE64_STANDARD; + use tycho_util::serde_helpers::BorrowedStr; + + use super::*; + + pub fn serialize(data: &[u8], serializer: S) -> Result + where + S: serde::Serializer, + { + if serializer.is_human_readable() { + serializer.serialize_str(&BASE64_STANDARD.encode(data)) + } else { + data.serialize(serializer) + } + } + + pub fn deserialize<'de, D>(deserializer: D) -> Result + where + D: serde::Deserializer<'de>, + { + use serde::de::Error; + + if deserializer.is_human_readable() { + as Deserialize>::deserialize(deserializer).and_then( + |BorrowedStr(s)| { + BASE64_STANDARD + .decode(s.as_ref()) + .map(Bytes::from) + .map_err(Error::custom) + }, + ) + } else { + Bytes::deserialize(deserializer) + } + } +} diff --git a/network/src/types/rpc.rs b/network/src/types/rpc.rs new file mode 100644 index 000000000..5dc2b062a --- /dev/null +++ b/network/src/types/rpc.rs @@ -0,0 +1,5 @@ +use tl_proto::{Boxed, TlRead}; + +pub trait RpcQuery { + type Response: for<'a> TlRead<'a, Repr = Boxed>; +} diff --git a/network/src/types/service.rs b/network/src/types/service.rs new file mode 100644 index 000000000..4a7dcb402 --- /dev/null +++ b/network/src/types/service.rs @@ -0,0 +1,459 @@ +use std::future::Future; +use std::marker::PhantomData; +use std::sync::Arc; + +use futures_util::future::BoxFuture; + +pub trait Service { + type QueryResponse: Send + 'static; + type OnQueryFuture: Future> + Send + 'static; + type OnMessageFuture: Future + Send + 'static; + type OnDatagramFuture: Future + Send + 'static; + + /// Called when a query is received. + /// + /// Returns a future that resolves to the either response to the query if `Some`, + /// or cancellation of the query if `None`. + fn on_query(&self, req: Request) -> Self::OnQueryFuture; + + /// Called when a message is received. + fn on_message(&self, req: Request) -> Self::OnMessageFuture; + + /// Called when a datagram is received. + fn on_datagram(&self, req: Request) -> Self::OnDatagramFuture; +} + +pub trait ServiceExt: Service { + #[inline] + fn boxed(self) -> BoxService + where + Self: Sized + Send + Sync + 'static, + Self::OnQueryFuture: Send + 'static, + Self::OnMessageFuture: Send + 'static, + Self::OnDatagramFuture: Send + 'static, + { + BoxService::new(self) + } + + #[inline] + fn boxed_clone(self) -> BoxCloneService + where + Self: Clone + Sized + Send + Sync + 'static, + Self::OnQueryFuture: Send + 'static, + Self::OnMessageFuture: Send + 'static, + Self::OnDatagramFuture: Send + 'static, + { + BoxCloneService::new(self) + } +} + +impl ServiceExt for T where T: Service + ?Sized {} + +impl<'a, S, Request> Service for &'a S +where + S: Service + Sync + 'a, +{ + type QueryResponse = S::QueryResponse; + type OnQueryFuture = S::OnQueryFuture; + type OnMessageFuture = S::OnMessageFuture; + type OnDatagramFuture = S::OnDatagramFuture; + + #[inline] + fn on_query(&self, req: Request) -> Self::OnQueryFuture { + >::on_query(*self, req) + } + + #[inline] + fn on_message(&self, req: Request) -> Self::OnMessageFuture { + >::on_message(*self, req) + } + + #[inline] + fn on_datagram(&self, req: Request) -> Self::OnDatagramFuture { + >::on_datagram(*self, req) + } +} + +impl Service for Arc +where + S: Service + Sync + ?Sized, +{ + type QueryResponse = S::QueryResponse; + type OnQueryFuture = S::OnQueryFuture; + type OnMessageFuture = S::OnMessageFuture; + type OnDatagramFuture = S::OnDatagramFuture; + + #[inline] + fn on_query(&self, req: Request) -> Self::OnQueryFuture { + >::on_query(self.as_ref(), req) + } + + #[inline] + fn on_message(&self, req: Request) -> Self::OnMessageFuture { + >::on_message(self.as_ref(), req) + } + + #[inline] + fn on_datagram(&self, req: Request) -> Self::OnDatagramFuture { + >::on_datagram(self.as_ref(), req) + } +} + +impl Service for Box +where + S: Service + ?Sized, +{ + type QueryResponse = S::QueryResponse; + type OnQueryFuture = S::OnQueryFuture; + type OnMessageFuture = S::OnMessageFuture; + type OnDatagramFuture = S::OnDatagramFuture; + + #[inline] + fn on_query(&self, req: Request) -> Self::OnQueryFuture { + >::on_query(self.as_ref(), req) + } + + #[inline] + fn on_message(&self, req: Request) -> Self::OnMessageFuture { + >::on_message(self.as_ref(), req) + } + + #[inline] + fn on_datagram(&self, req: Request) -> Self::OnDatagramFuture { + >::on_datagram(self.as_ref(), req) + } +} + +#[repr(transparent)] +pub struct BoxService { + inner: Box>, +} + +type DynBoxService = dyn Service< + Request, + QueryResponse = Q, + OnQueryFuture = BoxFuture<'static, Option>, + OnMessageFuture = BoxFuture<'static, ()>, + OnDatagramFuture = BoxFuture<'static, ()>, + > + Send + + Sync; + +impl BoxService { + pub fn new(inner: S) -> Self + where + S: Service + Send + Sync + 'static, + S::OnQueryFuture: Send + 'static, + S::OnMessageFuture: Send + 'static, + S::OnDatagramFuture: Send + 'static, + { + BoxService { + inner: Box::new(BoxPinFutures(inner)), + } + } +} + +impl Service for BoxService +where + Request: Send + 'static, + Q: Send + 'static, +{ + type QueryResponse = Q; + type OnQueryFuture = BoxFuture<'static, Option>; + type OnMessageFuture = BoxFuture<'static, ()>; + type OnDatagramFuture = BoxFuture<'static, ()>; + + #[inline] + fn on_query(&self, req: Request) -> Self::OnQueryFuture { + self.inner.on_query(req) + } + + #[inline] + fn on_message(&self, req: Request) -> Self::OnMessageFuture { + self.inner.on_message(req) + } + + #[inline] + fn on_datagram(&self, req: Request) -> Self::OnDatagramFuture { + self.inner.on_datagram(req) + } +} + +#[repr(transparent)] +pub struct BoxCloneService { + inner: Box>, +} + +type DynBoxCloneService = dyn CloneService< + Request, + QueryResponse = Q, + OnQueryFuture = BoxFuture<'static, Option>, + OnMessageFuture = BoxFuture<'static, ()>, + OnDatagramFuture = BoxFuture<'static, ()>, + > + Send + + Sync; + +impl BoxCloneService +where + Q: Send + 'static, +{ + pub fn new(inner: S) -> Self + where + S: Service + Clone + Send + Sync + 'static, + S::OnQueryFuture: Send + 'static, + S::OnMessageFuture: Send + 'static, + S::OnDatagramFuture: Send + 'static, + { + BoxCloneService { + inner: Box::new(BoxPinFutures(inner)), + } + } +} + +impl Service for BoxCloneService +where + Request: Send + 'static, + Q: Send + 'static, +{ + type QueryResponse = Q; + type OnQueryFuture = BoxFuture<'static, Option>; + type OnMessageFuture = BoxFuture<'static, ()>; + type OnDatagramFuture = BoxFuture<'static, ()>; + + #[inline] + fn on_query(&self, req: Request) -> Self::OnQueryFuture { + self.inner.on_query(req) + } + + #[inline] + fn on_message(&self, req: Request) -> Self::OnMessageFuture { + self.inner.on_message(req) + } + + #[inline] + fn on_datagram(&self, req: Request) -> Self::OnDatagramFuture { + self.inner.on_datagram(req) + } +} + +impl Clone for BoxCloneService +where + Q: Send + Sync + 'static, +{ + fn clone(&self) -> Self { + BoxCloneService { + inner: self.inner.clone_box(), + } + } +} + +trait CloneService: Service { + fn clone_box(&self) -> Box>; +} + +impl CloneService for S +where + S: Service + Clone + Send + Sync + 'static, + S::OnQueryFuture: Send + 'static, + S::OnMessageFuture: Send + 'static, + S::OnDatagramFuture: Send + 'static, +{ + fn clone_box(&self) -> Box> { + Box::new(self.clone()) + } +} + +type DynCloneService = dyn CloneService< + Request, + QueryResponse = >::QueryResponse, + OnQueryFuture = >::OnQueryFuture, + OnMessageFuture = >::OnMessageFuture, + OnDatagramFuture = >::OnDatagramFuture, + > + Send + + Sync; + +#[repr(transparent)] +struct BoxPinFutures(S); + +impl Clone for BoxPinFutures { + #[inline] + fn clone(&self) -> Self { + BoxPinFutures(self.0.clone()) + } +} + +impl Service for BoxPinFutures +where + S: Service, +{ + type QueryResponse = S::QueryResponse; + type OnQueryFuture = BoxFuture<'static, Option>; + type OnMessageFuture = BoxFuture<'static, ()>; + type OnDatagramFuture = BoxFuture<'static, ()>; + + #[inline] + fn on_query(&self, req: Request) -> Self::OnQueryFuture { + let f = self.0.on_query(req); + match castaway::cast!(f, Self::OnQueryFuture) { + Ok(f) => f, + Err(f) => Box::pin(f), + } + } + + #[inline] + fn on_message(&self, req: Request) -> Self::OnMessageFuture { + let f = self.0.on_message(req); + match castaway::cast!(f, Self::OnMessageFuture) { + Ok(f) => f, + Err(f) => Box::pin(f), + } + } + + #[inline] + fn on_datagram(&self, req: Request) -> Self::OnDatagramFuture { + let f = self.0.on_datagram(req); + match castaway::cast!(f, Self::OnDatagramFuture) { + Ok(f) => f, + Err(f) => Box::pin(f), + } + } +} + +pub fn service_query_fn(f: T) -> ServiceQueryFn { + ServiceQueryFn { f } +} + +pub struct ServiceQueryFn { + f: T, +} + +impl Clone for ServiceQueryFn { + #[inline] + fn clone(&self) -> Self { + ServiceQueryFn { f: self.f.clone() } + } +} + +impl Service for ServiceQueryFn +where + Q: Send + 'static, + T: Fn(Request) -> F + Send + 'static, + F: Future> + Send + 'static, +{ + type QueryResponse = Q; + type OnQueryFuture = F; + type OnMessageFuture = futures_util::future::Ready<()>; + type OnDatagramFuture = futures_util::future::Ready<()>; + + #[inline] + fn on_query(&self, req: Request) -> Self::OnQueryFuture { + (self.f)(req) + } + + #[inline] + fn on_message(&self, _req: Request) -> Self::OnMessageFuture { + futures_util::future::ready(()) + } + + #[inline] + fn on_datagram(&self, _req: Request) -> Self::OnDatagramFuture { + futures_util::future::ready(()) + } +} + +pub fn service_message_fn(f: T) -> ServiceMessageFn { + ServiceMessageFn { + f, + _response: PhantomData, + } +} + +impl Clone for ServiceMessageFn { + #[inline] + fn clone(&self) -> Self { + ServiceMessageFn { + f: self.f.clone(), + _response: PhantomData, + } + } +} + +pub struct ServiceMessageFn { + f: T, + _response: PhantomData, +} + +impl Service for ServiceMessageFn +where + Q: Send + 'static, + T: Fn(Request) -> F + Send + 'static, + F: Future + Send + 'static, +{ + type QueryResponse = Q; + type OnQueryFuture = futures_util::future::Ready>; + type OnMessageFuture = F; + type OnDatagramFuture = futures_util::future::Ready<()>; + + #[inline] + fn on_query(&self, _req: Request) -> Self::OnQueryFuture { + futures_util::future::ready(None) + } + + #[inline] + fn on_message(&self, req: Request) -> Self::OnMessageFuture { + (self.f)(req) + } + + #[inline] + fn on_datagram(&self, _req: Request) -> Self::OnDatagramFuture { + futures_util::future::ready(()) + } +} + +pub fn service_datagram_fn(f: T) -> ServiceDatagramFn { + ServiceDatagramFn { + f, + _response: PhantomData, + } +} + +pub struct ServiceDatagramFn { + f: T, + _response: PhantomData, +} + +impl Clone for ServiceDatagramFn { + #[inline] + fn clone(&self) -> Self { + ServiceDatagramFn { + f: self.f.clone(), + _response: PhantomData, + } + } +} + +impl Service for ServiceDatagramFn +where + Q: Send + 'static, + T: Fn(Request) -> F + Send + 'static, + F: Future + Send + 'static, +{ + type QueryResponse = Q; + type OnQueryFuture = futures_util::future::Ready>; + type OnMessageFuture = futures_util::future::Ready<()>; + type OnDatagramFuture = F; + + #[inline] + fn on_query(&self, _req: Request) -> Self::OnQueryFuture { + futures_util::future::ready(None) + } + + #[inline] + fn on_message(&self, _req: Request) -> Self::OnMessageFuture { + futures_util::future::ready(()) + } + + #[inline] + fn on_datagram(&self, req: Request) -> Self::OnDatagramFuture { + (self.f)(req) + } +} diff --git a/network/src/util/mod.rs b/network/src/util/mod.rs new file mode 100644 index 000000000..c868a54aa --- /dev/null +++ b/network/src/util/mod.rs @@ -0,0 +1,50 @@ +pub use self::router::{Routable, Router, RouterBuilder}; +pub use self::traits::NetworkExt; + +use crate::types::PeerId; + +mod router; +mod traits; + +pub(crate) mod tl; + +#[macro_export] +macro_rules! match_tl_request { + ($req_body:expr, $(tag = $tag:expr,)? { + $($ty:path as $pat:pat => $expr:expr),*$(,)? + }, $err:pat => $err_exr:expr) => { + '__match_req: { + let $err = $crate::match_tl_request!(@inner $req_body, $($tag)?, { + $( + <$ty>::TL_ID => match $crate::__internal::tl_proto::deserialize::<$ty>(&($req_body)) { + Ok($pat) => break '__match_req ($expr), + Err(e) => e, + } + )* + _ => $crate::__internal::tl_proto::TlError::UnknownConstructor, + }); + $err_exr + } + }; + + (@inner $req_body:expr, $tag:expr, $($rest:tt)*) => { + match $tag $($rest)* + }; + (@inner $req_body:expr, , $($rest:tt)*) => { + if ($req_body).len() >= 4 { + match ($req_body).as_ref().get_u32_le() $($rest)* + } else { + $crate::__internal::tl_proto::TlError::UnexpectedEof + } + }; +} + +pub fn check_peer_signature(peed_id: &PeerId, signature: &[u8; 64], data: &T) -> bool +where + T: tl_proto::TlWrite, +{ + let Some(public_key) = peed_id.as_public_key() else { + return false; + }; + public_key.verify(data, signature) +} diff --git a/network/src/util/router.rs b/network/src/util/router.rs new file mode 100644 index 000000000..7a445643b --- /dev/null +++ b/network/src/util/router.rs @@ -0,0 +1,156 @@ +use std::marker::PhantomData; +use std::sync::Arc; + +use tycho_util::futures::BoxFutureOrNoop; +use tycho_util::FastHashMap; + +use crate::types::{BoxService, Service, ServiceExt}; + +pub trait Routable { + #[inline] + fn query_ids(&self) -> impl IntoIterator { + std::iter::empty() + } + + #[inline] + fn message_ids(&self) -> impl IntoIterator { + std::iter::empty() + } + + #[inline] + fn datagram_ids(&self) -> impl IntoIterator { + std::iter::empty() + } +} + +pub struct RouterBuilder { + inner: Inner, +} + +impl RouterBuilder { + pub fn route(mut self, service: S) -> Self + where + S: Service + Routable + Send + Sync + 'static, + { + let index = self.inner.services.len(); + for id in service.query_ids() { + let prev = self.inner.query_handlers.insert(id, index); + assert!(prev.is_none(), "duplicate query id: {:08x}", id); + } + for id in service.message_ids() { + let prev = self.inner.message_handlers.insert(id, index); + assert!(prev.is_none(), "duplicate message id: {:08x}", id); + } + for id in service.datagram_ids() { + let prev = self.inner.datagram_handlers.insert(id, index); + assert!(prev.is_none(), "duplicate datagram id: {:08x}", id); + } + + self.inner.services.push(service.boxed()); + self + } + + pub fn build(self) -> Router { + Router { + inner: Arc::new(self.inner), + } + } +} + +impl Default for RouterBuilder { + fn default() -> Self { + Self { + inner: Inner { + services: Vec::new(), + query_handlers: FastHashMap::default(), + message_handlers: FastHashMap::default(), + datagram_handlers: FastHashMap::default(), + _response: PhantomData, + }, + } + } +} + +pub struct Router { + inner: Arc>, +} + +impl Router { + pub fn builder() -> RouterBuilder { + RouterBuilder::default() + } +} + +impl Clone for Router { + #[inline] + fn clone(&self) -> Self { + Self { + inner: self.inner.clone(), + } + } +} + +impl Service for Router +where + Request: Send + AsRef<[u8]> + 'static, + Q: Send + 'static, +{ + type QueryResponse = Q; + type OnQueryFuture = BoxFutureOrNoop>; + type OnMessageFuture = BoxFutureOrNoop<()>; + type OnDatagramFuture = BoxFutureOrNoop<()>; + + fn on_query(&self, req: Request) -> Self::OnQueryFuture { + match find_handler(&req, &self.inner.query_handlers, &self.inner.services) { + Some(service) => BoxFutureOrNoop::Boxed(service.on_query(req)), + None => BoxFutureOrNoop::Noop, + } + } + + fn on_message(&self, req: Request) -> Self::OnMessageFuture { + match find_handler(&req, &self.inner.message_handlers, &self.inner.services) { + Some(service) => BoxFutureOrNoop::Boxed(service.on_message(req)), + None => BoxFutureOrNoop::Noop, + } + } + + fn on_datagram(&self, req: Request) -> Self::OnDatagramFuture { + match find_handler(&req, &self.inner.datagram_handlers, &self.inner.services) { + Some(service) => BoxFutureOrNoop::Boxed(service.on_datagram(req)), + None => BoxFutureOrNoop::Noop, + } + } +} + +fn find_handler<'a, T: AsRef<[u8]>, S>( + req: &T, + indices: &FastHashMap, + handlers: &'a [S], +) -> Option<&'a S> { + if let Some(id) = read_le_u32(req.as_ref()) { + if let Some(&index) = indices.get(&id) { + // NOTE: intentionally panics if index is out of bounds as it is + // an implementation error. + return Some(handlers.get(index).expect("index must be in bounds")); + } + } + None +} + +struct Inner { + services: Vec>, + query_handlers: FastHashMap, + message_handlers: FastHashMap, + datagram_handlers: FastHashMap, + _response: PhantomData, +} + +fn read_le_u32(buf: &[u8]) -> Option { + if buf.len() >= 4 { + let mut bytes = [0; 4]; + bytes.copy_from_slice(&buf[..4]); + Some(u32::from_le_bytes(bytes)) + } else { + None + } +} diff --git a/network/src/util/tl.rs b/network/src/util/tl.rs new file mode 100644 index 000000000..ee01d9ea4 --- /dev/null +++ b/network/src/util/tl.rs @@ -0,0 +1,43 @@ +use tl_proto::{TlError, TlPacket, TlRead, TlResult, TlWrite}; + +pub mod signature_ref { + use super::*; + + #[inline] + pub fn size_hint(signature: &[u8; 64]) -> usize { + signature.as_slice().max_size_hint() + } + + #[inline] + pub fn write(signature: &[u8; 64], packet: &mut P) { + signature.as_slice().write_to(packet); + } + + pub fn read<'a>(packet: &'a [u8], offset: &mut usize) -> TlResult<&'a [u8; 64]> { + <&tl_proto::BoundedBytes<64>>::read_from(packet, offset) + .and_then(|bytes| bytes.as_ref().try_into().map_err(|_e| TlError::InvalidData)) + } +} + +pub mod signature_owned { + use super::*; + + #[inline] + pub fn size_hint(signature: &[u8; 64]) -> usize { + signature.as_slice().max_size_hint() + } + + #[inline] + pub fn write(signature: &[u8; 64], packet: &mut P) { + signature.as_slice().write_to(packet); + } + + pub fn read(packet: &[u8], offset: &mut usize) -> TlResult> { + <&tl_proto::BoundedBytes<64>>::read_from(packet, offset).and_then(|bytes| { + let Ok::<[u8; 64], _>(bytes) = bytes.as_ref().try_into() else { + return Err(TlError::InvalidData); + }; + Ok(Box::new(bytes)) + }) + } +} diff --git a/network/src/util/traits.rs b/network/src/util/traits.rs new file mode 100644 index 000000000..fb0a06384 --- /dev/null +++ b/network/src/util/traits.rs @@ -0,0 +1,104 @@ +use std::future::Future; + +use anyhow::Result; + +use crate::network::{KnownPeer, Network, Peer}; +use crate::types::{PeerEvent, PeerId, Request, Response}; + +pub trait NetworkExt { + fn query( + &self, + peer_id: &PeerId, + request: Request, + ) -> impl Future> + Send; + + fn send(&self, peer_id: &PeerId, request: Request) -> impl Future> + Send; +} + +impl NetworkExt for Network { + async fn query(&self, peer_id: &PeerId, request: Request) -> Result { + on_connected_peer(self, Peer::rpc, peer_id, request).await + } + + async fn send(&self, peer_id: &PeerId, request: Request) -> Result<()> { + on_connected_peer(self, Peer::send_message, peer_id, request).await + } +} + +async fn on_connected_peer( + network: &Network, + f: F, + peer_id: &PeerId, + request: Request, +) -> Result +where + for<'a> F: PeerTask<'a, T>, +{ + use tokio::sync::broadcast::error::RecvError; + + let mut peer_events = network.subscribe()?; + + // Interact if already connected + if let Some(peer) = network.peer(peer_id) { + return f.call(&peer, request).await; + } + + match network.known_peers().get(peer_id) { + // Initiate a connection of it is a known peer + Some(KnownPeer { peer_info, .. }) => { + // TODO: try multiple addresses + let address = peer_info + .iter_addresses() + .next() + .cloned() + .expect("address list must have at least one item"); + + network.connect_with_peer_id(address, peer_id).await?; + } + // Error otherwise + None => anyhow::bail!("trying to interact with an unknown peer: {peer_id}"), + } + + loop { + match peer_events.recv().await { + Ok(PeerEvent::NewPeer(new_peer_id)) if &new_peer_id == peer_id => { + if let Some(peer) = network.peer(peer_id) { + return f.call(&peer, request).await; + } + } + Ok(_) => {} + Err(RecvError::Closed) => anyhow::bail!("network subscription closed"), + Err(RecvError::Lagged(_)) => { + peer_events = peer_events.resubscribe(); + + if let Some(peer) = network.peer(peer_id) { + return f.call(&peer, request).await; + } + } + } + + anyhow::ensure!( + network.known_peers().contains(peer_id), + "waiting for a connection to an unknown peer: {peer_id}", + ); + } +} + +trait PeerTask<'a, T> { + type Output: Future> + 'a; + + fn call(self, peer: &'a Peer, request: Request) -> Self::Output; +} + +impl<'a, T, F, Fut> PeerTask<'a, T> for F +where + F: FnOnce(&'a Peer, Request) -> Fut, + Fut: Future> + 'a, +{ + type Output = Fut; + + #[inline] + fn call(self, peer: &'a Peer, request: Request) -> Fut { + self(peer, request) + } +} diff --git a/network/tests/dht.rs b/network/tests/dht.rs new file mode 100644 index 000000000..cb80aa3da --- /dev/null +++ b/network/tests/dht.rs @@ -0,0 +1,188 @@ +//! Run tests with this env: +//! ```text +//! RUST_LOG=info,tycho_network=trace +//! ``` + +use std::net::Ipv4Addr; +use std::sync::Arc; + +use anyhow::Result; +use everscale_crypto::ed25519; +use tl_proto::{TlRead, TlWrite}; +use tycho_network::{ + proto, Address, DhtClient, DhtService, FindValueError, Network, PeerId, PeerInfo, Router, +}; +use tycho_util::time::now_sec; + +struct Node { + network: Network, + dht: DhtClient, +} + +impl Node { + fn new(key: &ed25519::SecretKey) -> Result { + let keypair = everscale_crypto::ed25519::KeyPair::from(key); + + let (dht_client, dht) = DhtService::builder(keypair.public_key.into()).build(); + + let router = Router::builder().route(dht).build(); + + let network = Network::builder() + .with_private_key(key.to_bytes()) + .with_service_name("test-service") + .build((Ipv4Addr::LOCALHOST, 0), router) + .unwrap(); + + let dht = dht_client.build(network.clone()); + + Ok(Self { network, dht }) + } + + fn make_peer_info(key: &ed25519::SecretKey, address: Address) -> PeerInfo { + let keypair = ed25519::KeyPair::from(key); + let peer_id = PeerId::from(keypair.public_key); + + let now = now_sec(); + let mut node_info = PeerInfo { + id: peer_id, + address_list: vec![address].into_boxed_slice(), + created_at: now, + expires_at: u32::MAX, + signature: Box::new([0; 64]), + }; + *node_info.signature = keypair.sign(&node_info); + node_info + } +} + +fn make_network(node_count: usize) -> (Vec, Vec>) { + let keys = (0..node_count) + .map(|_| ed25519::SecretKey::generate(&mut rand::thread_rng())) + .collect::>(); + + let nodes = keys + .iter() + .map(Node::new) + .collect::>>() + .unwrap(); + + let bootstrap_info = std::iter::zip(&keys, &nodes) + .map(|(key, node)| Arc::new(Node::make_peer_info(key, node.network.local_addr().into()))) + .collect::>(); + for node in &nodes { + for info in &bootstrap_info { + node.dht.add_peer(info.clone()).unwrap(); + } + } + + (nodes, bootstrap_info) +} + +#[tokio::test] +async fn bootstrap_nodes_accessible() -> Result<()> { + tracing_subscriber::fmt::try_init().ok(); + tracing::info!("bootstrap_nodes_accessible"); + + let (nodes, _) = make_network(5); + + for i in 0..nodes.len() { + for j in 0..nodes.len() { + if i == j { + continue; + } + + let left = &nodes[i]; + let right = &nodes[j]; + left.dht.get_node_info(right.network.peer_id()).await?; + } + } + + Ok(()) +} + +#[tokio::test] +async fn bootstrap_nodes_store_value() -> Result<()> { + tracing_subscriber::fmt::try_init().ok(); + tracing::info!("bootstrap_nodes_store_value"); + + #[derive(Debug, Clone, PartialEq, Eq, TlWrite, TlRead)] + struct SomeValue(u32); + + const VALUE: SomeValue = SomeValue(123123); + + let (nodes, _) = make_network(5); + + // Store value + let first = &nodes[0].dht; + + first + .entry(proto::dht::PeerValueKeyName::NodeInfo) + .with_data(VALUE) + .with_time(now_sec()) + .store() + .await?; + + // Retrieve an existing value + let value = first + .entry(proto::dht::PeerValueKeyName::NodeInfo) + .find_value::(&first.network().peer_id()) + .await?; + assert_eq!(value, VALUE); + + // Retrieve a non-existing value + let res = first + .entry(proto::dht::PeerValueKeyName::NodeInfo) + .find_peer_value_raw(nodes[1].network.peer_id()) + .await; + assert!(matches!(res, Err(FindValueError::NotFound))); + + Ok(()) +} + +#[tokio::test] +async fn connect_new_node_to_bootstrap() -> Result<()> { + tracing_subscriber::fmt::try_init().ok(); + tracing::info!("connect_new_node_to_bootstrap"); + + #[derive(Debug, Clone, PartialEq, Eq, TlWrite, TlRead)] + struct SomeValue(u32); + + const VALUE: SomeValue = SomeValue(123123); + + let (bootstrap_nodes, global_config) = make_network(5); + + let node = Node::new(&ed25519::SecretKey::generate(&mut rand::thread_rng()))?; + for peer_info in &global_config { + node.dht.add_peer(peer_info.clone())?; + } + + // Ensure that the node is not known by the bootstrap nodes + let mut somebody_knows_the_peer = false; + for bootstrap_node in &bootstrap_nodes { + somebody_knows_the_peer |= bootstrap_node + .network + .known_peers() + .contains(node.network.peer_id()); + } + assert!(!somebody_knows_the_peer); + + // Store value and announce the peer info + node.dht + .entry(proto::dht::PeerValueKeyName::NodeInfo) + .with_data(VALUE) + .with_peer_info(true) + .store() + .await?; + + // The node must be known by some bootstrap nodes now + let mut somebody_knows_the_peer = false; + for bootstrap_node in &bootstrap_nodes { + somebody_knows_the_peer |= bootstrap_node + .network + .known_peers() + .contains(node.network.peer_id()); + } + assert!(somebody_knows_the_peer); + + Ok(()) +} diff --git a/rust-toolchain b/rust-toolchain deleted file mode 100644 index 283edc6d7..000000000 --- a/rust-toolchain +++ /dev/null @@ -1 +0,0 @@ -1.74.0 \ No newline at end of file diff --git a/scripts/test_dht.py b/scripts/test_dht.py new file mode 100755 index 000000000..26a7f4e08 --- /dev/null +++ b/scripts/test_dht.py @@ -0,0 +1,118 @@ +#!/usr/bin/env python3 +import yaml +import subprocess +import os +import json + + +def generate_entrypoint_script(service_name, start_delay, params: str = ""): + script_content = f"""#!/bin/bash + # Introduce startup delay + sleep {start_delay} + export RUST_LOG="info,tycho_network=trace" + /app/network-node {params} + """ + script_path = f".scratch/entrypoints/{service_name}_entrypoint.sh" + os.makedirs(os.path.dirname(script_path), exist_ok=True) + with open(script_path, "w") as file: + file.write(script_content) + os.chmod(script_path, 0o755) # Make the script executable + + +def generate_docker_compose(services): + """ + Generates a Docker Compose file with specified services, IPs, and entrypoints. + """ + compose_dict = {"version": "3.7", "services": {}} + + for service, details in services.items(): + compose_dict["services"][service] = { + "image": details["image"], + "entrypoint": "/entrypoints/entrypoint.sh", + "volumes": [ + f"./entrypoints/{service}_entrypoint.sh:/entrypoints/entrypoint.sh", + "./global-config.json:/app/global-config.json", + ], + "networks": {"default": {"ipv4_address": details["ip"]}}, + } + + networks_dict = { + "networks": {"default": {"ipam": {"config": [{"subnet": "172.30.0.0/24"}]}}} + } + + compose_dict.update(networks_dict) + + with open(".scratch/docker-compose.yml", "w") as file: + yaml.dump(compose_dict, file) + + print("Docker Compose file and entrypoint scripts generated.") + + +def execute_command(command): + result = subprocess.run(command, shell=True, capture_output=True, text=True) + return result.stdout + + +def run_docker_compose(services): + """ + Runs the Docker Compose file and applies the specified start delays. + """ + os.system("docker compose -f .scratch/docker-compose.yml up") + + # for service, details in services.items(): + # latency = details.get("latency", 0) + # if latency: + # print(f"Applying {latency}ms latency to {service}...") + # # Assuming eth0 as the default network interface inside the container + # container_id = ( + # subprocess.check_output(["docker", "ps", "-qf", f"name={service}"]) + # .decode() + # .strip() + # ) + # os.system( + # f"docker exec {container_id} tc qdisc add dev eth0 root netem delay {latency}ms" + # ) + + print("Docker Compose services started with specified delays and latencies.") + + +def main(): + # Example input + node_count = 5 + node_port = 25565 + + services = {} + bootstrap_peers = [] + + for i in range(node_count): + key = os.urandom(32).hex() + ip = f"172.30.0.{i + 10}" + + cmd = ( + f"cargo run --example network-node -- gendht '{ip}:{node_port}' --key {key}" + ) + dht_entry = json.loads(execute_command(cmd)) + bootstrap_peers.append(dht_entry) + + node_name = f"node-{i}" + services[node_name] = { + "image": "tycho-network", + "ip": ip, + } + generate_entrypoint_script( + node_name, + start_delay=0, + params=f"run '{ip}:{node_port}' --key {key} --global-config /app/global-config.json", + ) + + with open(".scratch/global-config.json", "w") as f: + json.dump({"bootstrap_peers": bootstrap_peers}, f, indent=2) + + generate_docker_compose(services) + print("To manually test the setup, run the following commands:") + print("docker compose -f .scratch/docker-compose.yml up") + run_docker_compose(services) + + +if __name__ == "__main__": + main() diff --git a/util/Cargo.toml b/util/Cargo.toml index 87ed39ac4..3e24d5ec7 100644 --- a/util/Cargo.toml +++ b/util/Cargo.toml @@ -6,8 +6,17 @@ description = "Shared utilities for node components." [dependencies] # crates.io deps +ahash = "0.8" +castaway = "0.2" +dashmap = "5.4" +futures-util = "0.3" +hex = "0.4" +humantime = "2" +rand = "0.8" +serde = { version = "1.0", features = ["derive"] } +tokio = { version = "1", default-features = false, features = ["time"] } # local deps [lints] -workspace= true \ No newline at end of file +workspace = true diff --git a/util/src/futures.rs b/util/src/futures.rs new file mode 100644 index 000000000..66fd9c5fa --- /dev/null +++ b/util/src/futures.rs @@ -0,0 +1,45 @@ +use std::pin::Pin; +use std::task::{Context, Poll}; + +use futures_util::future::BoxFuture; +use futures_util::{Future, FutureExt}; + +pub enum BoxFutureOrNoop { + Boxed(BoxFuture<'static, T>), + Noop, +} + +impl BoxFutureOrNoop { + #[inline] + pub fn future(f: F) -> Self + where + F: Future + Send + 'static, + { + match castaway::cast!(f, BoxFuture<'static, T>) { + Ok(f) => BoxFutureOrNoop::Boxed(f), + Err(f) => BoxFutureOrNoop::Boxed(f.boxed()), + } + } +} + +impl Future for BoxFutureOrNoop<()> { + type Output = (); + + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + match self.get_mut() { + BoxFutureOrNoop::Boxed(fut) => fut.poll_unpin(cx), + BoxFutureOrNoop::Noop => std::task::Poll::Ready(()), + } + } +} + +impl Future for BoxFutureOrNoop> { + type Output = Option; + + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + match self.get_mut() { + BoxFutureOrNoop::Boxed(fut) => fut.poll_unpin(cx), + BoxFutureOrNoop::Noop => std::task::Poll::Ready(None), + } + } +} diff --git a/util/src/lib.rs b/util/src/lib.rs index 8b1378917..7106dbf0d 100644 --- a/util/src/lib.rs +++ b/util/src/lib.rs @@ -1 +1,118 @@ +use std::collections::HashMap; +use std::collections::HashSet; +pub mod futures; +pub mod serde_helpers; +pub mod time; + +pub type FastDashMap = dashmap::DashMap; +pub type FastDashSet = dashmap::DashSet; +pub type FastHashMap = HashMap; +pub type FastHashSet = HashSet; + +/// # Example +/// +/// ```rust +/// # use tycho_util::realloc_box_enum; +/// enum Value { +/// One(BigValue1), +/// Two(BigValue2), +/// } +/// +/// struct BigValue1([u32; 10]); +/// +/// struct BigValue2([u32; 7]); +/// +/// fn convert_to_one(value: Box) -> Option> { +/// realloc_box_enum!(value, { +/// Value::One(value) => Box::new(value) => Some(value), +/// _ => None, +/// }) +/// } +/// ``` +#[macro_export] +macro_rules! realloc_box_enum { + ($value:expr, { + $target_variant:pat => Box::new($extracted:ident) => $target:expr, + $other_variant:pat => $other:expr, + }) => {{ + let value: ::std::boxed::Box<_> = $value; + match ::core::convert::AsRef::as_ref(&value) { + #[allow(unused_variables)] + $target_variant => { + let $extracted = unsafe { + $crate::__internal::realloc_box(value, |value| match value { + $target_variant => $extracted, + _ => unreachable!(), + }) + }; + $target + } + $other_variant => $other, + } + }}; +} + +#[doc(hidden)] +pub mod __internal { + /// # Safety + /// The following must be true: + /// - `T` must have the same layout as `R` + /// - `f` must not panic + pub unsafe fn realloc_box(value: Box, f: F) -> Box + where + F: FnOnce(T) -> R, + { + assert!(std::mem::align_of::() == std::mem::align_of::()); + + let ptr = Box::into_raw(value); + let value = std::ptr::read(ptr); + + let ptr = std::alloc::realloc( + ptr.cast::(), + std::alloc::Layout::new::(), + std::mem::size_of::(), + ) + .cast::(); + + if ptr.is_null() { + std::alloc::handle_alloc_error(std::alloc::Layout::new::()); + } + + // NOTE: in case of panic, the memory will be leaked + std::ptr::write(ptr, f(value)); + + Box::from_raw(ptr) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + #[allow(dead_code)] + fn realloc_enum() { + enum Value { + One(BigValue1), + Two(BigValue2), + } + + #[derive(Clone)] + struct BigValue1([u32; 10]); + + #[derive(Clone)] + struct BigValue2([u32; 7]); + + fn convert_to_one(value: Box) -> Option> { + realloc_box_enum!(value, { + Value::One(value) => Box::new(value) => Some(value), + _ => None, + }) + } + + let value = BigValue1([123; 10]); + let one = convert_to_one(Box::new(Value::One(value.clone()))); + assert_eq!(one.unwrap().0, value.0); + } +} diff --git a/util/src/serde_helpers.rs b/util/src/serde_helpers.rs new file mode 100644 index 000000000..bf38f98d5 --- /dev/null +++ b/util/src/serde_helpers.rs @@ -0,0 +1,312 @@ +use std::borrow::Cow; +use std::marker::PhantomData; +use std::str::FromStr; + +use serde::de::{Error, Expected, Visitor}; +use serde::{Deserialize, Deserializer, Serialize, Serializer}; + +pub mod socket_addr { + use std::net::SocketAddr; + + use super::*; + + pub fn serialize(value: &SocketAddr, serializer: S) -> Result { + if serializer.is_human_readable() { + serializer.collect_str(value) + } else { + value.serialize(serializer) + } + } + + pub fn deserialize<'de, D: Deserializer<'de>>(deserializer: D) -> Result { + if deserializer.is_human_readable() { + deserializer.deserialize_str(StrVisitor::new()) + } else { + SocketAddr::deserialize(deserializer) + } + } +} + +pub mod humantime { + use std::time::{Duration, SystemTime}; + + use super::*; + + pub fn serialize(value: &T, serializer: S) -> Result + where + for<'a> Serde<&'a T>: Serialize, + { + Serde::from(value).serialize(serializer) + } + + pub fn deserialize<'a, T, D: Deserializer<'a>>(deserializer: D) -> Result + where + Serde: Deserialize<'a>, + { + Serde::deserialize(deserializer).map(Serde::into_inner) + } + + pub struct Serde(T); + + impl Serde { + #[inline] + pub fn into_inner(self) -> T { + self.0 + } + } + + impl From for Serde { + fn from(value: T) -> Serde { + Serde(value) + } + } + + impl<'de> Deserialize<'de> for Serde { + fn deserialize>(d: D) -> Result, D::Error> { + struct V; + + impl<'de2> Visitor<'de2> for V { + type Value = Duration; + + fn expecting(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.write_str("a duration") + } + + fn visit_str(self, v: &str) -> Result { + ::humantime::parse_duration(v) + .map_err(|_e| E::invalid_value(serde::de::Unexpected::Str(v), &self)) + } + } + + d.deserialize_str(V).map(Serde) + } + } + + impl<'de> Deserialize<'de> for Serde { + fn deserialize>(d: D) -> Result, D::Error> { + struct V; + + impl<'de2> Visitor<'de2> for V { + type Value = SystemTime; + + fn expecting(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.write_str("a timestamp") + } + + fn visit_str(self, v: &str) -> Result { + ::humantime::parse_rfc3339_weak(v) + .map_err(|_e| E::invalid_value(serde::de::Unexpected::Str(v), &self)) + } + } + + d.deserialize_str(V).map(Serde) + } + } + + impl<'de> Deserialize<'de> for Serde> { + fn deserialize>(d: D) -> Result>, D::Error> { + match Option::>::deserialize(d)? { + Some(Serde(v)) => Ok(Serde(Some(v))), + None => Ok(Serde(None)), + } + } + } + + impl<'de> Deserialize<'de> for Serde> { + fn deserialize>(d: D) -> Result>, D::Error> { + match Option::>::deserialize(d)? { + Some(Serde(v)) => Ok(Serde(Some(v))), + None => Ok(Serde(None)), + } + } + } + + impl<'a> Serialize for Serde<&'a Duration> { + fn serialize(&self, serializer: S) -> Result { + serializer.collect_str(&::humantime::format_duration(*self.0)) + } + } + + impl Serialize for Serde { + fn serialize(&self, serializer: S) -> Result { + serializer.collect_str(&::humantime::format_duration(self.0)) + } + } + + impl<'a> Serialize for Serde<&'a SystemTime> { + fn serialize(&self, serializer: S) -> Result { + serializer.collect_str(&::humantime::format_rfc3339(*self.0)) + } + } + + impl Serialize for Serde { + fn serialize(&self, serializer: S) -> Result { + ::humantime::format_rfc3339(self.0) + .to_string() + .serialize(serializer) + } + } + + impl<'a> Serialize for Serde<&'a Option> { + fn serialize(&self, serializer: S) -> Result { + match *self.0 { + Some(v) => serializer.serialize_some(&Serde(v)), + None => serializer.serialize_none(), + } + } + } + + impl Serialize for Serde> { + fn serialize(&self, serializer: S) -> Result { + Serde(&self.0).serialize(serializer) + } + } + + impl<'a> Serialize for Serde<&'a Option> { + fn serialize(&self, serializer: S) -> Result { + match *self.0 { + Some(v) => serializer.serialize_some(&Serde(v)), + None => serializer.serialize_none(), + } + } + } + + impl Serialize for Serde> { + fn serialize(&self, serializer: S) -> Result { + Serde(&self.0).serialize(serializer) + } + } +} + +#[derive(Deserialize)] +#[repr(transparent)] +pub struct BorrowedStr<'a>(#[serde(borrow)] pub Cow<'a, str>); + +pub struct StrVisitor(PhantomData); + +impl StrVisitor { + pub const fn new() -> Self { + Self(PhantomData) + } +} + +impl<'de, S: FromStr> Visitor<'de> for StrVisitor +where + ::Err: std::fmt::Display, +{ + type Value = S; + + fn expecting(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "a string") + } + + fn visit_str(self, value: &str) -> Result { + value.parse::().map_err(Error::custom) + } +} + +pub struct BytesVisitor; + +impl<'de, const M: usize> Visitor<'de> for BytesVisitor { + type Value = [u8; M]; + + fn expecting(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.write_fmt(format_args!("a byte array of size {M}")) + } + + fn visit_bytes(self, v: &[u8]) -> Result { + v.try_into() + .map_err(|_e| Error::invalid_length(v.len(), &self)) + } + + fn visit_seq(self, seq: A) -> Result + where + A: serde::de::SeqAccess<'de>, + { + struct SeqIter<'de, A, T> { + access: A, + marker: PhantomData<(&'de (), T)>, + } + + impl<'de, A, T> SeqIter<'de, A, T> { + pub(crate) fn new(access: A) -> Self + where + A: serde::de::SeqAccess<'de>, + { + Self { + access, + marker: PhantomData, + } + } + } + + impl<'de, A, T> Iterator for SeqIter<'de, A, T> + where + A: serde::de::SeqAccess<'de>, + T: Deserialize<'de>, + { + type Item = Result; + + fn next(&mut self) -> Option { + self.access.next_element().transpose() + } + + fn size_hint(&self) -> (usize, Option) { + match self.access.size_hint() { + Some(size) => (size, Some(size)), + None => (0, None), + } + } + } + + fn array_from_iterator( + mut iter: I, + expected: &dyn Expected, + ) -> Result<[T; N], E> + where + I: Iterator>, + E: Error, + { + use core::mem::MaybeUninit; + + /// # Safety + /// The following must be true: + /// - The first `num` elements must be initialized. + unsafe fn drop_array_elems( + num: usize, + mut arr: [MaybeUninit; N], + ) { + arr[..num] + .iter_mut() + .for_each(|item| item.assume_init_drop()); + } + + // SAFETY: It is safe to assume that array of uninitialized values is initialized itself. + let mut arr: [MaybeUninit; N] = unsafe { MaybeUninit::uninit().assume_init() }; + + // NOTE: Leaks memory on panic + for (i, elem) in arr[..].iter_mut().enumerate() { + *elem = match iter.next() { + Some(Ok(value)) => MaybeUninit::new(value), + Some(Err(err)) => { + // SAFETY: Items until `i` were initialized. + unsafe { drop_array_elems(i, arr) }; + return Err(err); + } + None => { + // SAFETY: Items until `i` were initialized. + unsafe { drop_array_elems(i, arr) }; + return Err(Error::invalid_length(i, expected)); + } + }; + } + + // Everything is initialized. Transmute the array to the initialized type. + // A normal transmute is not possible because of: + // https://github.com/rust-lang/rust/issues/61956 + Ok(unsafe { std::mem::transmute_copy(&arr) }) + } + + array_from_iterator(SeqIter::new(seq), &self) + } +} diff --git a/util/src/time.rs b/util/src/time.rs new file mode 100644 index 000000000..985712a76 --- /dev/null +++ b/util/src/time.rs @@ -0,0 +1,15 @@ +use std::time::Duration; + +use rand::Rng; + +pub fn now_sec() -> u32 { + std::time::SystemTime::now() + .duration_since(std::time::UNIX_EPOCH) + .unwrap() + .as_secs() as u32 +} + +pub fn shifted_interval(period: Duration, max_shift: Duration) -> tokio::time::Interval { + let shift = rand::thread_rng().gen_range(Duration::ZERO..max_shift); + tokio::time::interval_at(tokio::time::Instant::now() + shift, period + shift) +}